aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/sys
diff options
context:
space:
mode:
authorErlang/OTP <[email protected]>2009-11-20 14:54:40 +0000
committerErlang/OTP <[email protected]>2009-11-20 14:54:40 +0000
commit84adefa331c4159d432d22840663c38f155cd4c1 (patch)
treebff9a9c66adda4df2106dfd0e5c053ab182a12bd /erts/emulator/sys
downloadotp-84adefa331c4159d432d22840663c38f155cd4c1.tar.gz
otp-84adefa331c4159d432d22840663c38f155cd4c1.tar.bz2
otp-84adefa331c4159d432d22840663c38f155cd4c1.zip
The R13B03 release.OTP_R13B03
Diffstat (limited to 'erts/emulator/sys')
-rw-r--r--erts/emulator/sys/common/erl_check_io.c1912
-rw-r--r--erts/emulator/sys/common/erl_check_io.h96
-rw-r--r--erts/emulator/sys/common/erl_mseg.c1452
-rw-r--r--erts/emulator/sys/common/erl_mseg.h97
-rw-r--r--erts/emulator/sys/common/erl_mtrace_sys_wrap.c245
-rw-r--r--erts/emulator/sys/common/erl_poll.c2693
-rw-r--r--erts/emulator/sys/common/erl_poll.h246
-rw-r--r--erts/emulator/sys/unix/driver_int.h41
-rw-r--r--erts/emulator/sys/unix/erl9_start.c130
-rw-r--r--erts/emulator/sys/unix/erl_child_setup.c122
-rw-r--r--erts/emulator/sys/unix/erl_main.c31
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h339
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys_ddll.c280
-rw-r--r--erts/emulator/sys/unix/sys.c3346
-rw-r--r--erts/emulator/sys/unix/sys_float.c815
-rw-r--r--erts/emulator/sys/unix/sys_time.c134
-rw-r--r--erts/emulator/sys/vxworks/driver_int.h30
-rw-r--r--erts/emulator/sys/vxworks/erl_main.c45
-rw-r--r--erts/emulator/sys/vxworks/erl_vxworks_sys.h183
-rw-r--r--erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c253
-rw-r--r--erts/emulator/sys/vxworks/sys.c2594
-rw-r--r--erts/emulator/sys/win32/dosmap.c282
-rw-r--r--erts/emulator/sys/win32/driver_int.h39
-rw-r--r--erts/emulator/sys/win32/erl.def4
-rw-r--r--erts/emulator/sys/win32/erl_main.c29
-rw-r--r--erts/emulator/sys/win32/erl_poll.c1361
-rw-r--r--erts/emulator/sys/win32/erl_win32_sys_ddll.c206
-rw-r--r--erts/emulator/sys/win32/erl_win_dyn_driver.h489
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h212
-rw-r--r--erts/emulator/sys/win32/sys.c3093
-rw-r--r--erts/emulator/sys/win32/sys_env.c261
-rw-r--r--erts/emulator/sys/win32/sys_float.c145
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c142
-rw-r--r--erts/emulator/sys/win32/sys_time.c96
34 files changed, 21443 insertions, 0 deletions
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
new file mode 100644
index 0000000000..218bd79584
--- /dev/null
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -0,0 +1,1912 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Check I/O
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define ERL_CHECK_IO_C__
+#define ERTS_WANT_BREAK_HANDLING
+#ifndef WANT_NONBLOCKING
+# define WANT_NONBLOCKING
+#endif
+#include "sys.h"
+#include "global.h"
+#include "erl_check_io.h"
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+# define ERTS_DRV_EV_STATE_EXTRA_SIZE 128
+#else
+# include "safe_hash.h"
+# define DRV_EV_STATE_HTAB_SIZE 1024
+#endif
+
+typedef char EventStateType;
+#define ERTS_EV_TYPE_NONE ((EventStateType) 0)
+#define ERTS_EV_TYPE_DRV_SEL ((EventStateType) 1) /* driver_select */
+#define ERTS_EV_TYPE_DRV_EV ((EventStateType) 2) /* driver_event */
+#define ERTS_EV_TYPE_STOP_USE ((EventStateType) 3) /* pending stop_select */
+
+typedef char EventStateFlags;
+#define ERTS_EV_FLAG_USED ((EventStateFlags) 1) /* ERL_DRV_USE has been turned on */
+
+
+#if defined(ERTS_KERNEL_POLL_VERSION)
+# define ERTS_CIO_EXPORT(FUNC) FUNC ## _kp
+#elif defined(ERTS_NO_KERNEL_POLL_VERSION)
+# define ERTS_CIO_EXPORT(FUNC) FUNC ## _nkp
+#else
+# define ERTS_CIO_EXPORT(FUNC) FUNC
+#endif
+
+#define ERTS_CIO_HAVE_DRV_EVENT \
+ (ERTS_POLL_USE_POLL && !ERTS_POLL_USE_KERNEL_POLL)
+
+#define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control)
+#define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait)
+#define ERTS_CIO_POLL_INTR ERTS_POLL_EXPORT(erts_poll_interrupt)
+#define ERTS_CIO_POLL_INTR_TMD ERTS_POLL_EXPORT(erts_poll_interrupt_timed)
+#define ERTS_CIO_NEW_POLLSET ERTS_POLL_EXPORT(erts_poll_create_pollset)
+#define ERTS_CIO_FREE_POLLSET ERTS_POLL_EXPORT(erts_poll_destroy_pollset)
+#define ERTS_CIO_POLL_MAX_FDS ERTS_POLL_EXPORT(erts_poll_max_fds)
+#define ERTS_CIO_POLL_INIT ERTS_POLL_EXPORT(erts_poll_init)
+#define ERTS_CIO_POLL_INFO ERTS_POLL_EXPORT(erts_poll_info)
+
+static struct pollset_info
+{
+ ErtsPollSet ps;
+ erts_smp_atomic_t in_poll_wait; /* set while doing poll */
+#ifdef ERTS_SMP
+ struct removed_fd* removed_list; /* list of deselected fd's*/
+ erts_smp_spinlock_t removed_list_lock;
+#endif
+}pollset;
+#define NUM_OF_POLLSETS 1
+
+typedef struct {
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ SafeHashBucket hb;
+#endif
+ ErtsSysFdType fd;
+ union {
+ ErtsDrvEventDataState *event; /* ERTS_EV_TYPE_DRV_EV */
+ ErtsDrvSelectDataState *select; /* ERTS_EV_TYPE_DRV_SEL */
+ erts_driver_t* drv_ptr; /* ERTS_EV_TYPE_STOP_USE */
+ } driver;
+ ErtsPollEvents events;
+ unsigned short remove_cnt; /* number of removed_fd's referring to this fd */
+ EventStateType type;
+ EventStateFlags flags;
+} ErtsDrvEventState;
+
+#ifdef ERTS_SMP
+struct removed_fd {
+ struct removed_fd *next;
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ ErtsSysFdType fd;
+#else
+ ErtsDrvEventState* state;
+ #ifdef DEBUG
+ ErtsSysFdType fd;
+ #endif
+#endif
+
+};
+#endif
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+static int max_fds = -1;
+#endif
+#define DRV_EV_STATE_LOCK_CNT 16
+static union {
+ erts_smp_mtx_t lck;
+ byte _cache_line_alignment[64];
+}drv_ev_state_locks[DRV_EV_STATE_LOCK_CNT];
+
+#ifdef ERTS_SMP
+static ERTS_INLINE erts_smp_mtx_t* fd_mtx(ErtsSysFdType fd)
+{
+ int hash = (int)fd;
+# ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ hash ^= (hash >> 9);
+# endif
+ return &drv_ev_state_locks[hash % DRV_EV_STATE_LOCK_CNT].lck;
+}
+#else
+# define fd_mtx(fd) NULL
+#endif
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+
+static erts_smp_atomic_t drv_ev_state_len;
+static ErtsDrvEventState *drv_ev_state;
+static erts_smp_mtx_t drv_ev_state_grow_lock; /* prevent lock-hogging of racing growers */
+
+#else
+static SafeHash drv_ev_state_tab;
+static int num_state_prealloc;
+static ErtsDrvEventState *state_prealloc_first;
+erts_smp_spinlock_t state_prealloc_lock;
+
+static ERTS_INLINE ErtsDrvEventState *hash_get_drv_ev_state(ErtsSysFdType fd)
+{
+ ErtsDrvEventState tmpl;
+ tmpl.fd = fd;
+ return (ErtsDrvEventState *) safe_hash_get(&drv_ev_state_tab, (void *) &tmpl);
+}
+
+static ERTS_INLINE ErtsDrvEventState* hash_new_drv_ev_state(ErtsSysFdType fd)
+{
+ ErtsDrvEventState tmpl;
+ tmpl.fd = fd;
+ tmpl.driver.select = NULL;
+ tmpl.events = 0;
+ tmpl.remove_cnt = 0;
+ tmpl.type = ERTS_EV_TYPE_NONE;
+ tmpl.flags = 0;
+ return (ErtsDrvEventState *) safe_hash_put(&drv_ev_state_tab, (void *) &tmpl);
+}
+
+static ERTS_INLINE void hash_erase_drv_ev_state(ErtsDrvEventState *state)
+{
+ ASSERT(state->remove_cnt == 0);
+ safe_hash_erase(&drv_ev_state_tab, (void *) state);
+}
+
+#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
+
+static void stale_drv_select(Eterm id, ErtsDrvEventState *state, int mode);
+static void select_steal(ErlDrvPort ix, ErtsDrvEventState *state,
+ int mode, int on);
+static void print_select_op(erts_dsprintf_buf_t *dsbufp,
+ ErlDrvPort ix, ErtsSysFdType fd, int mode, int on);
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+static void select_large_fd_error(ErlDrvPort, ErtsSysFdType, int, int);
+#endif
+#if ERTS_CIO_HAVE_DRV_EVENT
+static void event_steal(ErlDrvPort ix, ErtsDrvEventState *state,
+ ErlDrvEventData event_data);
+static void print_event_op(erts_dsprintf_buf_t *dsbufp,
+ ErlDrvPort, ErtsSysFdType, ErlDrvEventData);
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+static void event_large_fd_error(ErlDrvPort, ErtsSysFdType, ErlDrvEventData);
+#endif
+#endif
+static void steal_pending_stop_select(erts_dsprintf_buf_t*, ErlDrvPort,
+ ErtsDrvEventState*, int mode, int on);
+static ERTS_INLINE Eterm
+drvport2id(ErlDrvPort dp)
+{
+ Port *pp = erts_drvport2port(dp);
+ if (pp)
+ return pp->id;
+ else {
+ ASSERT(0);
+ return am_undefined;
+ }
+}
+
+#ifdef ERTS_SMP
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_FD_LIST)
+#endif
+
+static ERTS_INLINE void
+remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
+{
+#ifdef ERTS_SMP
+ struct removed_fd *fdlp;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd)));
+ if (erts_smp_atomic_read(&psi->in_poll_wait)) {
+ state->remove_cnt++;
+ ASSERT(state->remove_cnt > 0);
+ fdlp = removed_fd_alloc();
+ #if defined(ERTS_SYS_CONTINOUS_FD_NUMBERS) || defined(DEBUG)
+ fdlp->fd = state->fd;
+ #endif
+ #ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ fdlp->state = state;
+ #endif
+ erts_smp_spin_lock(&psi->removed_list_lock);
+ fdlp->next = psi->removed_list;
+ psi->removed_list = fdlp;
+ erts_smp_spin_unlock(&psi->removed_list_lock);
+ }
+#endif
+}
+
+
+static ERTS_INLINE int
+is_removed(ErtsDrvEventState *state)
+{
+#ifdef ERTS_SMP
+ /* Note that there is a possible race here, where an fd is removed
+ (increasing remove_cnt) and then added again just before erts_poll_wait
+ is called by erts_check_io. Any polled event on the re-added fd will then
+ be falsely ignored. But that does not matter, as the event will trigger
+ again next time erl_check_io is called. */
+ return state->remove_cnt > 0;
+#else
+ return 0;
+#endif
+}
+
+static void
+forget_removed(struct pollset_info* psi)
+{
+#ifdef ERTS_SMP
+ struct removed_fd* fdlp;
+ struct removed_fd* tofree;
+
+ /* Fast track: if (atomic_ptr(removed_list)==NULL) return; */
+
+ erts_smp_spin_lock(&psi->removed_list_lock);
+ fdlp = psi->removed_list;
+ psi->removed_list = NULL;
+ erts_smp_spin_unlock(&psi->removed_list_lock);
+
+ while (fdlp) {
+ erts_driver_t* drv_ptr = NULL;
+ erts_smp_mtx_t* mtx;
+ ErtsSysFdType fd;
+ ErtsDrvEventState *state;
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ fd = fdlp->fd;
+ mtx = fd_mtx(fd);
+ erts_smp_mtx_lock(mtx);
+ state = &drv_ev_state[(int) fd];
+#else
+ state = fdlp->state;
+ fd = state->fd;
+ ASSERT(fd == fdlp->fd);
+ mtx = fd_mtx(fd);
+ erts_smp_mtx_lock(mtx);
+#endif
+ ASSERT(state->remove_cnt > 0);
+ if (--state->remove_cnt == 0) {
+ switch (state->type) {
+ case ERTS_EV_TYPE_STOP_USE:
+ /* Now we can call stop_select */
+ drv_ptr = state->driver.drv_ptr;
+ ASSERT(drv_ptr);
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags = 0;
+ state->driver.drv_ptr = NULL;
+ /* Fall through */
+ case ERTS_EV_TYPE_NONE:
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ hash_erase_drv_ev_state(state);
+#endif
+ break;
+ case ERTS_EV_TYPE_DRV_SEL:
+ case ERTS_EV_TYPE_DRV_EV:
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
+ erts_smp_mtx_unlock(mtx);
+ if (drv_ptr) {
+ int was_unmasked = erts_block_fpe();
+ (*drv_ptr->stop_select) (fd, NULL);
+ erts_unblock_fpe(was_unmasked);
+ if (drv_ptr->handle) {
+ erts_ddll_dereference_driver(drv_ptr->handle);
+ }
+ }
+ tofree = fdlp;
+ fdlp = fdlp->next;
+ removed_fd_free(tofree);
+ }
+#endif /* ERTS_SMP */
+}
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+static void
+grow_drv_ev_state(int min_ix)
+{
+ int i;
+ int new_len = min_ix + 1 + ERTS_DRV_EV_STATE_EXTRA_SIZE;
+ if (new_len > max_fds)
+ new_len = max_fds;
+
+ erts_smp_mtx_lock(&drv_ev_state_grow_lock);
+ if (erts_smp_atomic_read(&drv_ev_state_len) <= min_ix) {
+ for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { /* lock all fd's */
+ erts_smp_mtx_lock(&drv_ev_state_locks[i].lck);
+ }
+ drv_ev_state = (drv_ev_state
+ ? erts_realloc(ERTS_ALC_T_DRV_EV_STATE,
+ drv_ev_state,
+ sizeof(ErtsDrvEventState)*new_len)
+ : erts_alloc(ERTS_ALC_T_DRV_EV_STATE,
+ sizeof(ErtsDrvEventState)*new_len));
+ for (i = erts_smp_atomic_read(&drv_ev_state_len); i < new_len; i++) {
+ drv_ev_state[i].fd = (ErtsSysFdType) i;
+ drv_ev_state[i].driver.select = NULL;
+ drv_ev_state[i].events = 0;
+ drv_ev_state[i].remove_cnt = 0;
+ drv_ev_state[i].type = ERTS_EV_TYPE_NONE;
+ drv_ev_state[i].flags = 0;
+ }
+ erts_smp_atomic_set(&drv_ev_state_len, new_len);
+ for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) {
+ erts_smp_mtx_unlock(&drv_ev_state_locks[i].lck);
+ }
+ }
+ /*else already grown by racing thread */
+
+ erts_smp_mtx_unlock(&drv_ev_state_grow_lock);
+}
+#endif /* ERTS_SYS_CONTINOUS_FD_NUMBERS */
+
+
+static ERTS_INLINE void
+abort_task(Eterm id, ErtsPortTaskHandle *pthp, EventStateType type)
+{
+ if (is_nil(id)) {
+ ASSERT(type == ERTS_EV_TYPE_NONE
+ || !erts_port_task_is_scheduled(pthp));
+ }
+ else if (erts_port_task_is_scheduled(pthp)) {
+ erts_port_task_abort(id, pthp);
+ ASSERT(erts_is_port_alive(id));
+ }
+}
+
+static ERTS_INLINE void
+abort_tasks(ErtsDrvEventState *state, int mode)
+{
+ switch (mode) {
+ case 0: check_type:
+ switch (state->type) {
+#if ERTS_CIO_HAVE_DRV_EVENT
+ case ERTS_EV_TYPE_DRV_EV:
+ abort_task(state->driver.event->port,
+ &state->driver.event->task,
+ ERTS_EV_TYPE_DRV_EV);
+ return;
+#endif
+ case ERTS_EV_TYPE_NONE:
+ return;
+ default:
+ ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
+ /* Fall through */
+ }
+ case ERL_DRV_READ|ERL_DRV_WRITE:
+ case ERL_DRV_WRITE:
+ ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
+ abort_task(state->driver.select->outport,
+ &state->driver.select->outtask,
+ state->type);
+ if (mode == ERL_DRV_WRITE)
+ break;
+ case ERL_DRV_READ:
+ ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
+ abort_task(state->driver.select->inport,
+ &state->driver.select->intask,
+ state->type);
+ break;
+ default:
+ goto check_type;
+ }
+}
+
+static void
+deselect(ErtsDrvEventState *state, int mode)
+{
+ int do_wake = 0;
+ ErtsPollEvents rm_events;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd)));
+ ASSERT(state->events);
+
+ abort_tasks(state, mode);
+
+ if (!mode)
+ rm_events = state->events;
+ else {
+ rm_events = 0;
+ ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
+ if (mode & ERL_DRV_READ) {
+ state->driver.select->inport = NIL;
+ rm_events |= ERTS_POLL_EV_IN;
+ }
+ if (mode & ERL_DRV_WRITE) {
+ state->driver.select->outport = NIL;
+ rm_events |= ERTS_POLL_EV_OUT;
+ }
+ }
+
+ state->events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, rm_events, 0, &do_wake);
+
+ if (!(state->events)) {
+ switch (state->type) {
+ case ERTS_EV_TYPE_DRV_SEL:
+ ASSERT(!erts_port_task_is_scheduled(&state->driver.select->intask));
+ ASSERT(!erts_port_task_is_scheduled(&state->driver.select->outtask));
+ erts_free(ERTS_ALC_T_DRV_SEL_D_STATE,
+ state->driver.select);
+ break;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ case ERTS_EV_TYPE_DRV_EV:
+ ASSERT(!erts_port_task_is_scheduled(&state->driver.event->task));
+ erts_free(ERTS_ALC_T_DRV_EV_D_STATE,
+ state->driver.event);
+ break;
+#endif
+ case ERTS_EV_TYPE_NONE:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ state->driver.select = NULL;
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags = 0;
+ remember_removed(state, &pollset);
+ }
+}
+
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+# define IS_FD_UNKNOWN(state) ((state)->type == ERTS_EV_TYPE_NONE && (state)->remove_cnt == 0)
+#else
+# define IS_FD_UNKNOWN(state) ((state) == NULL)
+#endif
+
+
+int
+ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
+ ErlDrvEvent e,
+ int mode,
+ int on)
+{
+ void (*stop_select_fn)(ErlDrvEvent, void*) = NULL;
+ Eterm id = drvport2id(ix);
+ ErtsSysFdType fd = (ErtsSysFdType) e;
+ ErtsPollEvents ctl_events = (ErtsPollEvents) 0;
+ ErtsPollEvents new_events, old_events;
+ ErtsDrvEventState *state;
+ int wake_poller;
+ int ret;
+
+ ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
+ && erts_lc_is_port_locked(erts_drvport2port(ix)));
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if ((unsigned)fd >= (unsigned)erts_smp_atomic_read(&drv_ev_state_len)) {
+ if (fd < 0) {
+ return -1;
+ }
+ if (fd >= max_fds) {
+ select_large_fd_error(ix, fd, mode, on);
+ return -1;
+ }
+ grow_drv_ev_state(fd);
+ }
+#endif
+
+ erts_smp_mtx_lock(fd_mtx(fd));
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ state = &drv_ev_state[(int) fd];
+#else
+ state = hash_get_drv_ev_state(fd); /* may be NULL! */
+#endif
+
+ if (!on && (mode&ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
+ if (IS_FD_UNKNOWN(state)) {
+ /* fast track to stop_select callback */
+ stop_select_fn = erts_drvport2port(ix)->drv_ptr->stop_select;
+ ret = 0;
+ goto done_unknown;
+ }
+ mode |= (ERL_DRV_READ | ERL_DRV_WRITE);
+ wake_poller = 1; /* to eject fd from pollset (if needed) */
+ }
+ else wake_poller = 0;
+
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (state == NULL) {
+ state = hash_new_drv_ev_state(fd);
+ }
+#endif
+
+#if ERTS_CIO_HAVE_DRV_EVENT
+ if (state->type == ERTS_EV_TYPE_DRV_EV)
+ select_steal(ix, state, mode, on);
+#endif
+ if (state->type == ERTS_EV_TYPE_STOP_USE) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_select_op(dsbufp, ix, state->fd, mode, on);
+ steal_pending_stop_select(dsbufp, ix, state, mode, on);
+ if (state->type == ERTS_EV_TYPE_STOP_USE) {
+ ret = 0;
+ goto done; /* stop_select still pending */
+ }
+ ASSERT(state->type == ERTS_EV_TYPE_NONE);
+ }
+
+ if (mode & ERL_DRV_READ) {
+ if (state->type == ERTS_EV_TYPE_DRV_SEL) {
+ Eterm owner = state->driver.select->inport;
+ if (owner != id && is_not_nil(owner))
+ select_steal(ix, state, mode, on);
+ }
+ ctl_events |= ERTS_POLL_EV_IN;
+ }
+ if (mode & ERL_DRV_WRITE) {
+ if (state->type == ERTS_EV_TYPE_DRV_SEL) {
+ Eterm owner = state->driver.select->outport;
+ if (owner != id && is_not_nil(owner))
+ select_steal(ix, state, mode, on);
+ }
+ ctl_events |= ERTS_POLL_EV_OUT;
+ }
+
+ ASSERT((state->type == ERTS_EV_TYPE_DRV_SEL) ||
+ (state->type == ERTS_EV_TYPE_NONE && !state->events));
+
+ if (!on && !(state->flags & ERTS_EV_FLAG_USED)
+ && state->events && !(state->events & ~ctl_events)) {
+ /* Old driver removing all events. At least wake poller.
+ It will not make close() 100% safe but it will prevent
+ actions delayed by poll timeout. */
+ wake_poller = 1;
+ }
+
+ new_events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, ctl_events, on, &wake_poller);
+
+ if (new_events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) {
+ if (state->type == ERTS_EV_TYPE_DRV_SEL && !state->events) {
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags = 0;
+ erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, state->driver.select);
+ state->driver.select = NULL;
+ }
+ ret = -1;
+ goto done;
+ }
+
+ old_events = state->events;
+
+ ASSERT(on
+ ? (new_events == (state->events | ctl_events))
+ : (new_events == (state->events & ~ctl_events)));
+
+ ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL
+ || state->type == ERTS_EV_TYPE_NONE);
+
+ state->events = new_events;
+ if (ctl_events) {
+ if (on) {
+ if (state->type == ERTS_EV_TYPE_NONE) {
+ ErtsDrvSelectDataState *dsdsp
+ = erts_alloc(ERTS_ALC_T_DRV_SEL_D_STATE,
+ sizeof(ErtsDrvSelectDataState));
+ dsdsp->inport = NIL;
+ dsdsp->outport = NIL;
+ erts_port_task_handle_init(&dsdsp->intask);
+ erts_port_task_handle_init(&dsdsp->outtask);
+ ASSERT(state->driver.select == NULL);
+ state->driver.select = dsdsp;
+ state->type = ERTS_EV_TYPE_DRV_SEL;
+ }
+ ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
+ if (ctl_events & ERTS_POLL_EV_IN)
+ state->driver.select->inport = id;
+ if (ctl_events & ERTS_POLL_EV_OUT)
+ state->driver.select->outport = id;
+ if (mode & ERL_DRV_USE) {
+ state->flags |= ERTS_EV_FLAG_USED;
+ }
+ }
+ else { /* off */
+ if (state->type == ERTS_EV_TYPE_DRV_SEL) {
+ if (ctl_events & ERTS_POLL_EV_IN) {
+ abort_tasks(state, ERL_DRV_READ);
+ state->driver.select->inport = NIL;
+ }
+ if (ctl_events & ERTS_POLL_EV_OUT) {
+ abort_tasks(state, ERL_DRV_WRITE);
+ state->driver.select->outport = NIL;
+ }
+ if (new_events == 0) {
+ ASSERT(!erts_port_task_is_scheduled(&state->driver.select->intask));
+ ASSERT(!erts_port_task_is_scheduled(&state->driver.select->outtask));
+ if (old_events != 0) {
+ remember_removed(state, &pollset);
+ }
+ if ((mode & ERL_DRV_USE) || !(state->flags & ERTS_EV_FLAG_USED)) {
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags = 0;
+ erts_free(ERTS_ALC_T_DRV_SEL_D_STATE,
+ state->driver.select);
+ state->driver.select = NULL;
+ }
+ /*else keep it, as fd will probably be selected upon again */
+ }
+ }
+ if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
+ erts_driver_t* drv_ptr = erts_drvport2port(ix)->drv_ptr;
+ ASSERT(new_events==0);
+ if (state->remove_cnt == 0 || !wake_poller) {
+ /* Safe to close fd now as it is not in pollset
+ or there was no need to eject fd (kernel poll) */
+ stop_select_fn = drv_ptr->stop_select;
+ }
+ else {
+ /* Not safe to close fd, postpone stop_select callback. */
+ state->type = ERTS_EV_TYPE_STOP_USE;
+ state->driver.drv_ptr = drv_ptr;
+ if (drv_ptr->handle) {
+ erts_ddll_reference_referenced_driver(drv_ptr->handle);
+ }
+ }
+ }
+ }
+ }
+
+ ret = 0;
+
+done:;
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (state->type == ERTS_EV_TYPE_NONE && state->remove_cnt == 0) {
+ hash_erase_drv_ev_state(state);
+ }
+#endif
+done_unknown:
+ erts_smp_mtx_unlock(fd_mtx(fd));
+ if (stop_select_fn) {
+ int was_unmasked = erts_block_fpe();
+ (*stop_select_fn)(e, NULL);
+ erts_unblock_fpe(was_unmasked);
+ }
+ return ret;
+}
+
+int
+ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
+ ErlDrvEvent e,
+ ErlDrvEventData event_data)
+{
+#if !ERTS_CIO_HAVE_DRV_EVENT
+ return -1;
+#else
+ ErtsSysFdType fd = (ErtsSysFdType) e;
+ ErtsPollEvents events;
+ ErtsPollEvents add_events;
+ ErtsPollEvents remove_events;
+ Eterm id = drvport2id(ix);
+ ErtsDrvEventState *state;
+ int do_wake = 0;
+ int ret;
+
+ ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
+ && erts_lc_is_port_locked(erts_drvport2port(ix)));
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if ((unsigned)fd >= (unsigned)erts_smp_atomic_read(&drv_ev_state_len)) {
+ if (fd < 0)
+ return -1;
+ if (fd >= max_fds) {
+ event_large_fd_error(ix, fd, event_data);
+ return -1;
+ }
+ grow_drv_ev_state(fd);
+ }
+#endif
+
+ erts_smp_mtx_lock(fd_mtx(fd));
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ state = &drv_ev_state[(int) fd];
+#else
+ /* Could use hash_new directly, but want to keep the normal case fast */
+ state = hash_get_drv_ev_state(fd);
+ if (state == NULL) {
+ state = hash_new_drv_ev_state(fd);
+ }
+#endif
+
+ switch (state->type) {
+ case ERTS_EV_TYPE_DRV_EV:
+ if (state->driver.event->port == id) break;
+ /*fall through*/
+ case ERTS_EV_TYPE_DRV_SEL:
+ event_steal(ix, state, event_data);
+ break;
+ case ERTS_EV_TYPE_STOP_USE: {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_event_op(dsbufp, ix, fd, event_data);
+ steal_pending_stop_select(dsbufp, ix, state, 0, 1);
+ break;
+ }
+ }
+
+ ASSERT(state->type == ERTS_EV_TYPE_DRV_EV
+ || state->type == ERTS_EV_TYPE_NONE);
+
+ events = state->events;
+
+ if (!event_data) {
+ remove_events = events;
+ add_events = 0;
+ }
+ else {
+ remove_events = ~event_data->events & events;
+ add_events = ~events & event_data->events;
+ }
+
+ if (add_events) {
+ events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, add_events, 1, &do_wake);
+ if (events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ if (remove_events) {
+ events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, remove_events, 0, &do_wake);
+ if (events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ if (event_data && event_data->events != 0) {
+ if (state->type == ERTS_EV_TYPE_DRV_EV) {
+ state->driver.event->removed_events &= ~add_events;
+ state->driver.event->removed_events |= remove_events;
+ }
+ else {
+ state->driver.event
+ = erts_alloc(ERTS_ALC_T_DRV_EV_D_STATE,
+ sizeof(ErtsDrvEventDataState));
+ erts_port_task_handle_init(&state->driver.event->task);
+ state->driver.event->port = id;
+ state->driver.event->removed_events = (ErtsPollEvents) 0;
+ state->type = ERTS_EV_TYPE_DRV_EV;
+ }
+ state->driver.event->data = event_data;
+ }
+ else {
+ if (state->type == ERTS_EV_TYPE_DRV_EV) {
+ abort_tasks(state, 0);
+ erts_free(ERTS_ALC_T_DRV_EV_D_STATE,
+ state->driver.event);
+ }
+ state->driver.select = NULL;
+ state->type = ERTS_EV_TYPE_NONE;
+ remember_removed(state, &pollset);
+ }
+ state->events = events;
+ ASSERT(event_data ? events == event_data->events : events == 0);
+
+ ret = 0;
+
+done:
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (state->type == ERTS_EV_TYPE_NONE && state->remove_cnt == 0) {
+ hash_erase_drv_ev_state(state);
+ }
+#endif
+ erts_smp_mtx_unlock(fd_mtx(fd));
+ return ret;
+#endif
+}
+
+static ERTS_INLINE int
+chk_stale(Eterm id, ErtsDrvEventState *state, int mode)
+{
+ if (is_nil(id))
+ return 0;
+ if (erts_is_port_alive(id))
+ return 1; /* Steal */
+ stale_drv_select(id, state, mode);
+ return 0;
+}
+
+static int
+need2steal(ErtsDrvEventState *state, int mode)
+{
+ int do_steal = 0;
+ switch (state->type) {
+ case ERTS_EV_TYPE_DRV_SEL:
+ if (mode & ERL_DRV_READ)
+ do_steal |= chk_stale(state->driver.select->inport,
+ state,
+ ERL_DRV_READ);
+ if (mode & ERL_DRV_WRITE)
+ do_steal |= chk_stale(state->driver.select->outport,
+ state,
+ ERL_DRV_WRITE);
+ break;
+#if ERTS_CIO_HAVE_DRV_EVENT
+ case ERTS_EV_TYPE_DRV_EV:
+ do_steal |= chk_stale(state->driver.event->port, state, 0);
+ break;
+#endif
+ case ERTS_EV_TYPE_STOP_USE:
+ ASSERT(0);
+ break;
+ default:
+ break;
+ }
+ return do_steal;
+}
+
+static void
+print_driver_name(erts_dsprintf_buf_t *dsbufp, Eterm id)
+{
+ ErtsPortNames *pnp = erts_get_port_names(id);
+ if (!pnp->name && !pnp->driver_name)
+ erts_dsprintf(dsbufp, "%s ", "<unknown>");
+ else {
+ if (pnp->name) {
+ if (!pnp->driver_name || strcmp(pnp->driver_name, pnp->name) == 0)
+ erts_dsprintf(dsbufp, "%s ", pnp->name);
+ else
+ erts_dsprintf(dsbufp, "%s (%s) ", pnp->driver_name, pnp->name);
+ }
+ else if (pnp->driver_name) {
+ erts_dsprintf(dsbufp, "%s ", pnp->driver_name);
+ }
+ }
+ erts_free_port_names(pnp);
+}
+
+static void
+steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode)
+{
+ erts_dsprintf(dsbufp, "stealing control of fd=%d from ", (int) state->fd);
+ switch (state->type) {
+ case ERTS_EV_TYPE_DRV_SEL: {
+ int deselect_mode = 0;
+ Eterm iid = state->driver.select->inport;
+ Eterm oid = state->driver.select->outport;
+ if ((mode & ERL_DRV_READ) && (is_not_nil(iid))) {
+ erts_dsprintf(dsbufp, "input driver ");
+ print_driver_name(dsbufp, iid);
+ erts_dsprintf(dsbufp, "%T ", iid);
+ deselect_mode |= ERL_DRV_READ;
+ }
+ if ((mode & ERL_DRV_WRITE) && is_not_nil(oid)) {
+ if (deselect_mode) {
+ erts_dsprintf(dsbufp, "and ");
+ }
+ erts_dsprintf(dsbufp, "output driver ");
+ print_driver_name(dsbufp, oid);
+ erts_dsprintf(dsbufp, "%T ", oid);
+ deselect_mode |= ERL_DRV_WRITE;
+ }
+ if (deselect_mode)
+ deselect(state, deselect_mode);
+ else {
+ erts_dsprintf(dsbufp, "no one", (int) state->fd);
+ ASSERT(0);
+ }
+ erts_dsprintf(dsbufp, "\n");
+ break;
+ }
+#if ERTS_CIO_HAVE_DRV_EVENT
+ case ERTS_EV_TYPE_DRV_EV: {
+ Eterm eid = state->driver.event->port;
+ if (is_nil(eid)) {
+ erts_dsprintf(dsbufp, "no one", (int) state->fd);
+ ASSERT(0);
+ }
+ else {
+ erts_dsprintf(dsbufp, "event driver ");
+ print_driver_name(dsbufp, eid);
+ erts_dsprintf(dsbufp, "%T ", eid);
+ }
+ erts_dsprintf(dsbufp, "\n");
+ deselect(state, 0);
+ break;
+ }
+#endif
+ case ERTS_EV_TYPE_STOP_USE: {
+ ASSERT(0);
+ break;
+ }
+ default:
+ erts_dsprintf(dsbufp, "no one\n", (int) state->fd);
+ ASSERT(0);
+ }
+}
+
+static void
+print_select_op(erts_dsprintf_buf_t *dsbufp,
+ ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
+{
+ Port *pp = erts_drvport2port(ix);
+ erts_dsprintf(dsbufp,
+ "driver_select(%p, %d,%s%s%s%s, %d) "
+ "by ",
+ ix,
+ (int) fd,
+ mode & ERL_DRV_READ ? " ERL_DRV_READ" : "",
+ mode & ERL_DRV_WRITE ? " ERL_DRV_WRITE" : "",
+ mode & ERL_DRV_USE ? " ERL_DRV_USE" : "",
+ mode & (ERL_DRV_USE_NO_CALLBACK & ~ERL_DRV_USE) ? "_NO_CALLBACK" : "",
+ on);
+ print_driver_name(dsbufp, pp->id);
+ erts_dsprintf(dsbufp, "driver %T ", pp ? pp->id : NIL);
+}
+
+static void
+select_steal(ErlDrvPort ix, ErtsDrvEventState *state, int mode, int on)
+{
+ if (need2steal(state, mode)) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_select_op(dsbufp, ix, state->fd, mode, on);
+ steal(dsbufp, state, mode);
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+}
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+static void
+large_fd_error_common(erts_dsprintf_buf_t *dsbufp, ErtsSysFdType fd)
+{
+ erts_dsprintf(dsbufp,
+ "fd=%d is larger than the largest allowed fd=%d\n",
+ (int) fd, max_fds - 1);
+}
+
+static void
+select_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
+{
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_select_op(dsbufp, ix, fd, mode, on);
+ erts_dsprintf(dsbufp, "failed: ");
+ large_fd_error_common(dsbufp, fd);
+ erts_send_error_to_logger_nogl(dsbufp);
+}
+#endif /* ERTS_SYS_CONTINOUS_FD_NUMBERS */
+
+
+
+static void
+steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix,
+ ErtsDrvEventState *state, int mode, int on)
+{
+ ASSERT(state->type == ERTS_EV_TYPE_STOP_USE);
+ erts_dsprintf(dsbufp, "failed: fd=%d (re)selected before stop_select "
+ "was called for driver %s\n",
+ (int) state->fd, state->driver.drv_ptr->name);
+ erts_send_error_to_logger_nogl(dsbufp);
+
+ if (on) {
+ /* Either fd-owner changed its mind about closing
+ * or closed fd before stop_select callback and fd is now reused.
+ * In either case stop_select should not be called.
+ */
+ state->type = ERTS_EV_TYPE_NONE;
+ state->flags = 0;
+ if (state->driver.drv_ptr->handle) {
+ erts_ddll_dereference_driver(state->driver.drv_ptr->handle);
+ }
+ state->driver.drv_ptr = NULL;
+ }
+ else if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
+ erts_driver_t* drv_ptr = erts_drvport2port(ix)->drv_ptr;
+ if (drv_ptr != state->driver.drv_ptr) {
+ /* Some other driver wants the stop_select callback */
+ if (state->driver.drv_ptr->handle) {
+ erts_ddll_dereference_driver(state->driver.drv_ptr->handle);
+ }
+ if (drv_ptr->handle) {
+ erts_ddll_reference_referenced_driver(drv_ptr->handle);
+ }
+ state->driver.drv_ptr = drv_ptr;
+ }
+ }
+
+}
+
+
+#if ERTS_CIO_HAVE_DRV_EVENT
+
+static void
+print_event_op(erts_dsprintf_buf_t *dsbufp,
+ ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data)
+{
+ Port *pp = erts_drvport2port(ix);
+ erts_dsprintf(dsbufp, "driver_event(%p, %d, ", ix, (int) fd);
+ if (!event_data)
+ erts_dsprintf(dsbufp, "NULL");
+ else
+ erts_dsprintf(dsbufp, "{0x%x, 0x%x}",
+ (unsigned int) event_data->events,
+ (unsigned int) event_data->revents);
+ erts_dsprintf(dsbufp, ") by ");
+ print_driver_name(dsbufp, pp->id);
+ erts_dsprintf(dsbufp, "driver %T ", pp ? pp->id : NIL);
+}
+
+static void
+event_steal(ErlDrvPort ix, ErtsDrvEventState *state, ErlDrvEventData event_data)
+{
+ if (need2steal(state, ERL_DRV_READ|ERL_DRV_WRITE)) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_event_op(dsbufp, ix, state->fd, event_data);
+ steal(dsbufp, state, ERL_DRV_READ|ERL_DRV_WRITE);
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+ else if (state->type == ERTS_EV_TYPE_DRV_SEL) {
+ ASSERT(state->flags & ERTS_EV_FLAG_USED);
+ deselect(state, 0);
+ }
+}
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+static void
+event_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data)
+{
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ print_event_op(dsbufp, ix, fd, event_data);
+ erts_dsprintf(dsbufp, "failed: ");
+ large_fd_error_common(dsbufp, fd);
+ erts_send_error_to_logger_nogl(dsbufp);
+}
+#endif
+#endif
+
+static ERTS_INLINE void
+iready(Eterm id, ErtsDrvEventState *state)
+{
+ if (erts_port_task_schedule(id,
+ &state->driver.select->intask,
+ ERTS_PORT_TASK_INPUT,
+ (ErlDrvEvent) state->fd,
+ NULL) != 0) {
+ stale_drv_select(id, state, ERL_DRV_READ);
+ }
+}
+
+static ERTS_INLINE void
+oready(Eterm id, ErtsDrvEventState *state)
+{
+ if (erts_port_task_schedule(id,
+ &state->driver.select->outtask,
+ ERTS_PORT_TASK_OUTPUT,
+ (ErlDrvEvent) state->fd,
+ NULL) != 0) {
+ stale_drv_select(id, state, ERL_DRV_WRITE);
+ }
+}
+
+#if ERTS_CIO_HAVE_DRV_EVENT
+static ERTS_INLINE void
+eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data)
+{
+ if (erts_port_task_schedule(id,
+ &state->driver.event->task,
+ ERTS_PORT_TASK_EVENT,
+ (ErlDrvEvent) state->fd,
+ event_data) != 0) {
+ stale_drv_select(id, state, 0);
+ }
+}
+#endif
+
+static void bad_fd_in_pollset( ErtsDrvEventState *, Eterm, Eterm, ErtsPollEvents);
+
+void
+ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
+{
+ ERTS_CIO_POLL_INTR(pollset.ps, set);
+}
+
+void
+ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set, long msec)
+{
+ ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, msec);
+}
+
+void
+ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
+{
+ ErtsPollResFd pollres[256];
+ int pollres_len;
+ SysTimeval wait_time;
+ int poll_ret, i;
+
+ restart:
+
+ /* Figure out timeout value */
+ if (do_wait) {
+ erts_time_remaining(&wait_time);
+ } else { /* poll only */
+ wait_time.tv_sec = 0;
+ wait_time.tv_usec = 0;
+ }
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0); /* No locks should be locked */
+#endif
+ erts_smp_activity_begin(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+ pollres_len = sizeof(pollres)/sizeof(ErtsPollResFd);
+
+ erts_smp_atomic_set(&pollset.in_poll_wait, 1);
+
+ poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, &wait_time);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0); /* No locks should be locked */
+#endif
+ erts_smp_activity_end(ERTS_ACTIVITY_WAIT, NULL, NULL, NULL);
+
+ erts_deliver_time(); /* sync the machine's idea of time */
+
+#ifdef ERTS_BREAK_REQUESTED
+ if (ERTS_BREAK_REQUESTED)
+ erts_do_break_handling();
+#endif
+
+ if (poll_ret != 0) {
+ erts_smp_atomic_set(&pollset.in_poll_wait, 0);
+ forget_removed(&pollset);
+ if (poll_ret == EAGAIN) {
+ goto restart;
+ }
+
+ if (poll_ret != ETIMEDOUT
+ && poll_ret != EINTR
+#ifdef ERRNO_BLOCK
+ && poll_ret != ERRNO_BLOCK
+#endif
+ ) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "erts_poll_wait() failed: %s (%d)\n",
+ erl_errno_id(poll_ret), poll_ret);
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+ return;
+ }
+
+ for (i = 0; i < pollres_len; i++) {
+
+ ErtsSysFdType fd = (ErtsSysFdType) pollres[i].fd;
+ ErtsDrvEventState *state;
+
+ erts_smp_mtx_lock(fd_mtx(fd));
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ state = &drv_ev_state[ (int) fd];
+#else
+ state = hash_get_drv_ev_state(fd);
+ if (!state) {
+ goto next_pollres;
+ }
+#endif
+
+ /* Skip this fd if it was removed from pollset */
+ if (is_removed(state)) {
+ goto next_pollres;
+ }
+
+ switch (state->type) {
+ case ERTS_EV_TYPE_DRV_SEL: { /* Requested via driver_select()... */
+ ErtsPollEvents revents;
+ ErtsPollEvents revent_mask;
+
+ revent_mask = ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT);
+ revent_mask |= state->events;
+ revents = pollres[i].events & revent_mask;
+
+ if (revents & ERTS_POLL_EV_ERR) {
+ /*
+ * Let the driver handle the error condition. Only input,
+ * only output, or nothing might have been selected.
+ * We *do not* want to call a callback that corresponds
+ * to an event not selected. revents might give us a clue
+ * on which one to call.
+ */
+ if ((revents & ERTS_POLL_EV_IN)
+ || (!(revents & ERTS_POLL_EV_OUT)
+ && state->events & ERTS_POLL_EV_IN)) {
+ iready(state->driver.select->inport, state);
+ }
+ else if (state->events & ERTS_POLL_EV_OUT) {
+ oready(state->driver.select->outport, state);
+ }
+ }
+ else if (revents & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
+ if (revents & ERTS_POLL_EV_OUT) {
+ oready(state->driver.select->outport, state);
+ }
+ /* Someone might have deselected input since revents
+ was read (true also on the non-smp emulator since
+ oready() may have been called); therefore, update
+ revents... */
+ revents &= ~(~state->events & ERTS_POLL_EV_IN);
+ if (revents & ERTS_POLL_EV_IN) {
+ iready(state->driver.select->inport, state);
+ }
+ }
+ else if (revents & ERTS_POLL_EV_NVAL) {
+ bad_fd_in_pollset(state,
+ state->driver.select->inport,
+ state->driver.select->outport,
+ state->events);
+ }
+ break;
+ }
+
+#if ERTS_CIO_HAVE_DRV_EVENT
+ case ERTS_EV_TYPE_DRV_EV: { /* Requested via driver_event()... */
+ ErlDrvEventData event_data;
+ ErtsPollEvents revents;
+ ASSERT(state->driver.event);
+ ASSERT(state->driver.event->data);
+ event_data = state->driver.event->data;
+ revents = pollres[i].events;
+ revents &= ~state->driver.event->removed_events;
+
+ if (revents) {
+ event_data->events = state->events;
+ event_data->revents = revents;
+
+ eready(state->driver.event->port, state, event_data);
+ }
+ break;
+ }
+#endif
+
+ case ERTS_EV_TYPE_NONE: /* Deselected ... */
+ break;
+
+ default: { /* Error */
+ erts_dsprintf_buf_t *dsbufp;
+ dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp,
+ "Invalid event request type for fd in erts_poll()! "
+ "fd=%d, event request type=%sd\n", (int) state->fd,
+ (int) state->type);
+ ASSERT(0);
+ deselect(state, 0);
+ break;
+ }
+ }
+
+ next_pollres:;
+#ifdef ERTS_SMP
+ erts_smp_mtx_unlock(fd_mtx(fd));
+#endif
+ }
+
+ erts_smp_atomic_set(&pollset.in_poll_wait, 0);
+ forget_removed(&pollset);
+}
+
+static void
+bad_fd_in_pollset(ErtsDrvEventState *state, Eterm inport,
+ Eterm outport, ErtsPollEvents events)
+{
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+
+ if (events & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
+ char *io_str;
+ Eterm port = NIL;
+ if ((events & ERTS_POLL_EV_IN) && (events & ERTS_POLL_EV_OUT)) {
+ io_str = "input/output";
+ if (inport == outport)
+ port = inport;
+ }
+ else {
+ if (events & ERTS_POLL_EV_IN) {
+ io_str = "input";
+ port = inport;
+ }
+ else {
+ io_str = "output";
+ port = outport;
+ }
+ }
+ erts_dsprintf(dsbufp,
+ "Bad %s fd in erts_poll()! fd=%d, ",
+ io_str, (int) state->fd);
+ if (is_nil(port)) {
+ ErtsPortNames *ipnp = erts_get_port_names(inport);
+ ErtsPortNames *opnp = erts_get_port_names(outport);
+ erts_dsprintf(dsbufp, "ports=%T/%T, drivers=%s/%s, names=%s/%s\n",
+ is_nil(inport) ? am_undefined : inport,
+ is_nil(outport) ? am_undefined : outport,
+ ipnp->driver_name ? ipnp->driver_name : "<unknown>",
+ opnp->driver_name ? opnp->driver_name : "<unknown>",
+ ipnp->name ? ipnp->name : "<unknown>",
+ opnp->name ? opnp->name : "<unknown>");
+ erts_free_port_names(ipnp);
+ erts_free_port_names(opnp);
+ }
+ else {
+ ErtsPortNames *pnp = erts_get_port_names(port);
+ erts_dsprintf(dsbufp, "port=%T, driver=%s, name=%s\n",
+ is_nil(port) ? am_undefined : port,
+ pnp->driver_name ? pnp->driver_name : "<unknown>",
+ pnp->name ? pnp->name : "<unknown>");
+ erts_free_port_names(pnp);
+ }
+ }
+ else {
+ erts_dsprintf(dsbufp, "Bad fd in erts_poll()! fd=%d\n", (int) state->fd);
+ }
+ erts_send_error_to_logger_nogl(dsbufp);
+
+ /* unmap entry */
+ deselect(state, 0);
+}
+
+static void
+stale_drv_select(Eterm id, ErtsDrvEventState *state, int mode)
+{
+ erts_stale_drv_select(id, (ErlDrvEvent) state->fd, mode, 0);
+ deselect(state, mode);
+}
+
+#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
+static SafeHashValue drv_ev_state_hash(void *des)
+{
+ SafeHashValue val = (SafeHashValue) ((ErtsDrvEventState *) des)->fd;
+ return val ^ (val >> 8); /* Good enough for aligned pointer values? */
+}
+
+static int drv_ev_state_cmp(void *des1, void *des2)
+{
+ return ( ((ErtsDrvEventState *) des1)->fd == ((ErtsDrvEventState *) des2)->fd
+ ? 0 : 1);
+}
+
+static void *drv_ev_state_alloc(void *des_tmpl)
+{
+ ErtsDrvEventState *evstate;
+ erts_smp_spin_lock(&state_prealloc_lock);
+ if (state_prealloc_first == NULL) {
+ erts_smp_spin_unlock(&state_prealloc_lock);
+ evstate = (ErtsDrvEventState *)
+ erts_alloc(ERTS_ALC_T_DRV_EV_STATE, sizeof(ErtsDrvEventState));
+ } else {
+ evstate = state_prealloc_first;
+ state_prealloc_first = (ErtsDrvEventState *) evstate->hb.next;
+ --num_state_prealloc;
+ erts_smp_spin_unlock(&state_prealloc_lock);
+ }
+ /* XXX: Already valid data if prealloced, could ignore template! */
+ *evstate = *((ErtsDrvEventState *) des_tmpl);
+
+ return (void *) evstate;
+}
+
+static void drv_ev_state_free(void *des)
+{
+ erts_smp_spin_lock(&state_prealloc_lock);
+ ((ErtsDrvEventState *) des)->hb.next = &state_prealloc_first->hb;
+ state_prealloc_first = (ErtsDrvEventState *) des;
+ ++num_state_prealloc;
+ erts_smp_spin_unlock(&state_prealloc_lock);
+}
+#endif
+
+void
+ERTS_CIO_EXPORT(erts_init_check_io)(void)
+{
+ erts_smp_atomic_init(&pollset.in_poll_wait, 0);
+ ERTS_CIO_POLL_INIT();
+ pollset.ps = ERTS_CIO_NEW_POLLSET();
+
+#ifdef ERTS_SMP
+ init_removed_fd_alloc();
+ pollset.removed_list = NULL;
+ erts_smp_spinlock_init(&pollset.removed_list_lock,
+ "pollset_rm_list");
+ {
+ int i;
+ for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) {
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_smp_mtx_init_x(&drv_ev_state_locks[i].lck, "drv_ev_state", make_small(i));
+#else
+ erts_smp_mtx_init(&drv_ev_state_locks[i].lck, "drv_ev_state");
+#endif
+ }
+ }
+#endif
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ max_fds = ERTS_CIO_POLL_MAX_FDS();
+ erts_smp_atomic_init(&drv_ev_state_len, 0);
+ drv_ev_state = NULL;
+ erts_smp_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow");
+#else
+ {
+ SafeHashFunctions hf;
+ hf.hash = &drv_ev_state_hash;
+ hf.cmp = &drv_ev_state_cmp;
+ hf.alloc = &drv_ev_state_alloc;
+ hf.free = &drv_ev_state_free;
+ num_state_prealloc = 0;
+ state_prealloc_first = NULL;
+ erts_smp_spinlock_init(&state_prealloc_lock,"state_prealloc");
+
+ safe_hash_init(ERTS_ALC_T_DRV_EV_STATE, &drv_ev_state_tab, "drv_ev_state_tab",
+ DRV_EV_STATE_HTAB_SIZE, hf);
+ }
+#endif
+}
+
+int
+ERTS_CIO_EXPORT(erts_check_io_max_files)(void)
+{
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ return max_fds;
+#else
+ return ERTS_POLL_EXPORT(erts_poll_max_fds)();
+#endif
+}
+
+Uint
+ERTS_CIO_EXPORT(erts_check_io_size)(void)
+{
+ Uint res;
+ ErtsPollInfo pi;
+ ERTS_CIO_POLL_INFO(pollset.ps, &pi);
+ res = pi.memory_size;
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ res += sizeof(ErtsDrvEventState) * erts_smp_atomic_read(&drv_ev_state_len);
+#else
+ res += safe_hash_table_sz(&drv_ev_state_tab);
+ {
+ SafeHashInfo hi;
+ safe_hash_get_info(&hi, &drv_ev_state_tab);
+ res += hi.objs * sizeof(ErtsDrvEventState);
+ }
+ erts_smp_spin_lock(&state_prealloc_lock);
+ res += num_state_prealloc * sizeof(ErtsDrvEventState);
+ erts_smp_spin_unlock(&state_prealloc_lock);
+#endif
+ return res;
+}
+
+Eterm
+ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
+{
+ Process *p = (Process *) proc;
+ Eterm tags[15], values[15], res;
+ Uint sz, *szp, *hp, **hpp, memory_size;
+ Sint i;
+ ErtsPollInfo pi;
+
+ ERTS_CIO_POLL_INFO(pollset.ps, &pi);
+ memory_size = pi.memory_size;
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read(&drv_ev_state_len);
+#else
+ memory_size += safe_hash_table_sz(&drv_ev_state_tab);
+ {
+ SafeHashInfo hi;
+ safe_hash_get_info(&hi, &drv_ev_state_tab);
+ memory_size += hi.objs * sizeof(ErtsDrvEventState);
+ }
+ erts_smp_spin_lock(&state_prealloc_lock);
+ memory_size += num_state_prealloc * sizeof(ErtsDrvEventState);
+ erts_smp_spin_unlock(&state_prealloc_lock);
+#endif
+
+ hpp = NULL;
+ szp = &sz;
+ sz = 0;
+
+ bld_it:
+ i = 0;
+
+ tags[i] = erts_bld_atom(hpp, szp, "name");
+ values[i++] = erts_bld_atom(hpp, szp, "erts_poll");
+
+ tags[i] = erts_bld_atom(hpp, szp, "primary");
+ values[i++] = erts_bld_atom(hpp, szp, pi.primary);
+
+ tags[i] = erts_bld_atom(hpp, szp, "fallback");
+ values[i++] = erts_bld_atom(hpp, szp, pi.fallback ? pi.fallback : "false");
+
+ tags[i] = erts_bld_atom(hpp, szp, "kernel_poll");
+ values[i++] = erts_bld_atom(hpp, szp,
+ pi.kernel_poll ? pi.kernel_poll : "false");
+
+ tags[i] = erts_bld_atom(hpp, szp, "memory_size");
+ values[i++] = erts_bld_uint(hpp, szp, memory_size);
+
+ tags[i] = erts_bld_atom(hpp, szp, "total_poll_set_size");
+ values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.poll_set_size);
+
+ if (pi.fallback) {
+ tags[i] = erts_bld_atom(hpp, szp, "fallback_poll_set_size");
+ values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.fallback_poll_set_size);
+ }
+
+ tags[i] = erts_bld_atom(hpp, szp, "lazy_updates");
+ values[i++] = pi.lazy_updates ? am_true : am_false;
+
+ if (pi.lazy_updates) {
+ tags[i] = erts_bld_atom(hpp, szp, "pending_updates");
+ values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.pending_updates);
+ }
+
+ tags[i] = erts_bld_atom(hpp, szp, "batch_updates");
+ values[i++] = pi.batch_updates ? am_true : am_false;
+
+ tags[i] = erts_bld_atom(hpp, szp, "concurrent_updates");
+ values[i++] = pi.concurrent_updates ? am_true : am_false;
+
+ tags[i] = erts_bld_atom(hpp, szp, "max_fds");
+ values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.max_fds);
+
+#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+ tags[i] = erts_bld_atom(hpp, szp, "no_avoided_wakeups");
+ values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.no_avoided_wakeups);
+
+ tags[i] = erts_bld_atom(hpp, szp, "no_avoided_interrupts");
+ values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.no_avoided_interrupts);
+
+ tags[i] = erts_bld_atom(hpp, szp, "no_interrupt_timed");
+ values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.no_interrupt_timed);
+#endif
+
+ res = erts_bld_2tup_list(hpp, szp, i, tags, values);
+
+ if (!hpp) {
+ hp = HAlloc(p, sz);
+ hpp = &hp;
+ szp = NULL;
+ goto bld_it;
+ }
+
+ return res;
+}
+
+static ERTS_INLINE ErtsPollEvents
+print_events(ErtsPollEvents ev)
+{
+ int first = 1;
+ if(ev & ERTS_POLL_EV_IN) {
+ ev &= ~ERTS_POLL_EV_IN;
+ erts_printf("%s%s", first ? "" : "|", "IN");
+ first = 0;
+ }
+ if(ev & ERTS_POLL_EV_OUT) {
+ ev &= ~ERTS_POLL_EV_OUT;
+ erts_printf("%s%s", first ? "" : "|", "OUT");
+ first = 0;
+ }
+ /* The following should not appear... */
+ if(ev & ERTS_POLL_EV_NVAL) {
+ erts_printf("%s%s", first ? "" : "|", "NVAL");
+ first = 0;
+ }
+ if(ev & ERTS_POLL_EV_ERR) {
+ erts_printf("%s%s", first ? "" : "|", "ERR");
+ first = 0;
+ }
+ if (ev)
+ erts_printf("%s0x%b32x", first ? "" : "|", (Uint32) ev);
+ return ev;
+}
+
+typedef struct {
+ int used_fds;
+ int num_errors;
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ int internal_fds;
+ ErtsPollEvents *epep;
+#endif
+} IterDebugCounters;
+
+static void doit_erts_check_io_debug(void *vstate, void *vcounters)
+{
+ ErtsDrvEventState *state = (ErtsDrvEventState *) vstate;
+ IterDebugCounters *counters = (IterDebugCounters *) vcounters;
+ ErtsPollEvents cio_events = state->events;
+ ErtsSysFdType fd = state->fd;
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ int internal = 0;
+ ErtsPollEvents ep_events = counters->epep[(int) fd];
+#endif
+ int err = 0;
+
+#if defined(HAVE_FSTAT) && !defined(NO_FSTAT_ON_SYS_FD_TYPE)
+ struct stat stat_buf;
+#endif
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (state->events || ep_events) {
+ if (ep_events & ERTS_POLL_EV_NVAL) {
+ ep_events &= ~ERTS_POLL_EV_NVAL;
+ internal = 1;
+ counters->internal_fds++;
+ }
+ else
+ counters->used_fds++;
+#else
+ if (state->events) {
+ counters->used_fds++;
+#endif
+
+ erts_printf("fd=%d ", (int) fd);
+
+#if defined(HAVE_FSTAT) && !defined(NO_FSTAT_ON_SYS_FD_TYPE)
+ if (fstat((int) fd, &stat_buf) < 0)
+ erts_printf("type=unknown ");
+ else {
+ erts_printf("type=");
+#ifdef S_ISSOCK
+ if (S_ISSOCK(stat_buf.st_mode))
+ erts_printf("sock ");
+ else
+#endif
+#ifdef S_ISFIFO
+ if (S_ISFIFO(stat_buf.st_mode))
+ erts_printf("fifo ");
+ else
+#endif
+#ifdef S_ISCHR
+ if (S_ISCHR(stat_buf.st_mode))
+ erts_printf("chr ");
+ else
+#endif
+#ifdef S_ISDIR
+ if (S_ISDIR(stat_buf.st_mode))
+ erts_printf("dir ");
+ else
+#endif
+#ifdef S_ISBLK
+ if (S_ISBLK(stat_buf.st_mode))
+ erts_printf("blk ");
+ else
+#endif
+#ifdef S_ISREG
+ if (S_ISREG(stat_buf.st_mode))
+ erts_printf("reg ");
+ else
+#endif
+#ifdef S_ISLNK
+ if (S_ISLNK(stat_buf.st_mode))
+ erts_printf("lnk ");
+ else
+#endif
+#ifdef S_ISDOOR
+ if (S_ISDOOR(stat_buf.st_mode))
+ erts_printf("door ");
+ else
+#endif
+#ifdef S_ISWHT
+ if (S_ISWHT(stat_buf.st_mode))
+ erts_printf("wht ");
+ else
+#endif
+#ifdef S_ISXATTR
+ if (S_ISXATTR(stat_buf.st_mode))
+ erts_printf("xattr ");
+ else
+#endif
+ erts_printf("unknown ");
+ }
+#else
+ erts_printf("type=unknown ");
+#endif
+
+ if (state->type == ERTS_EV_TYPE_DRV_SEL) {
+ erts_printf("driver_select ");
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (internal) {
+ erts_printf("internal ");
+ err = 1;
+ }
+
+ if (cio_events == ep_events) {
+ erts_printf("ev=");
+ if (print_events(cio_events) != 0)
+ err = 1;
+ }
+ else {
+ err = 1;
+ erts_printf("cio_ev=");
+ print_events(cio_events);
+ erts_printf(" ep_ev=");
+ print_events(ep_events);
+ }
+#else
+ if (print_events(cio_events) != 0)
+ err = 1;
+#endif
+ erts_printf(" ");
+ if (cio_events & ERTS_POLL_EV_IN) {
+ Eterm id = state->driver.select->inport;
+ if (is_nil(id)) {
+ erts_printf("inport=none inname=none indrv=none ");
+ err = 1;
+ }
+ else {
+ ErtsPortNames *pnp = erts_get_port_names(id);
+ erts_printf(" inport=%T inname=%s indrv=%s ",
+ id,
+ pnp->name ? pnp->name : "unknown",
+ (pnp->driver_name
+ ? pnp->driver_name
+ : "unknown"));
+ erts_free_port_names(pnp);
+ }
+ }
+ if (cio_events & ERTS_POLL_EV_OUT) {
+ Eterm id = state->driver.select->outport;
+ if (is_nil(id)) {
+ erts_printf("outport=none outname=none outdrv=none ");
+ err = 1;
+ }
+ else {
+ ErtsPortNames *pnp = erts_get_port_names(id);
+ erts_printf(" outport=%T outname=%s outdrv=%s ",
+ id,
+ pnp->name ? pnp->name : "unknown",
+ (pnp->driver_name
+ ? pnp->driver_name
+ : "unknown"));
+ erts_free_port_names(pnp);
+ }
+ }
+ }
+ else if (state->type == ERTS_EV_TYPE_DRV_EV) {
+ Eterm id;
+ erts_printf("driver_event ");
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (internal) {
+ erts_printf("internal ");
+ err = 1;
+ }
+ if (cio_events == ep_events) {
+ erts_printf("ev=0x%b32x", (Uint32) cio_events);
+ }
+ else {
+ err = 1;
+ erts_printf("cio_ev=0x%b32x", (Uint32) cio_events);
+ erts_printf(" ep_ev=0x%b32x", (Uint32) ep_events);
+ }
+#else
+ erts_printf("ev=0x%b32x", (Uint32) cio_events);
+#endif
+ id = state->driver.event->port;
+ if (is_nil(id)) {
+ erts_printf(" port=none name=none drv=none ");
+ err = 1;
+ }
+ else {
+ ErtsPortNames *pnp = erts_get_port_names(id);
+ erts_printf(" port=%T name=%s drv=%s ",
+ id,
+ pnp->name ? pnp->name : "unknown",
+ (pnp->driver_name
+ ? pnp->driver_name
+ : "unknown"));
+ erts_free_port_names(pnp);
+ }
+ }
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ else if (internal) {
+ erts_printf("internal ");
+ if (cio_events) {
+ err = 1;
+ erts_printf("cio_ev=");
+ print_events(cio_events);
+ }
+ if (ep_events) {
+ erts_printf("ep_ev=");
+ print_events(ep_events);
+ }
+ }
+#endif
+ else {
+ err = 1;
+ erts_printf("control_type=%d ", (int)state->type);
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (cio_events == ep_events) {
+ erts_printf("ev=0x%b32x", (Uint32) cio_events);
+ }
+ else {
+ erts_printf("cio_ev=0x%b32x", (Uint32) cio_events);
+ erts_printf(" ep_ev=0x%b32x", (Uint32) ep_events);
+ }
+#else
+ erts_printf("ev=0x%b32x", (Uint32) cio_events);
+#endif
+ }
+
+ if (err) {
+ counters->num_errors++;
+ erts_printf(" ERROR");
+ }
+ erts_printf("\n");
+ }
+}
+
+int
+ERTS_CIO_EXPORT(erts_check_io_debug)(void)
+{
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ int fd, len;
+#endif
+ IterDebugCounters counters;
+ ErtsDrvEventState null_des;
+
+ null_des.driver.select = NULL;
+ null_des.events = 0;
+ null_des.remove_cnt = 0;
+ null_des.type = ERTS_EV_TYPE_NONE;
+
+ erts_printf("--- fds in pollset --------------------------------------\n");
+
+#ifdef ERTS_SMP
+# ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_check_exact(NULL, 0); /* No locks should be locked */
+# endif
+ erts_block_system(0); /* stop the world to avoid messy locking */
+#endif
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ counters.epep = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollEvents)*max_fds);
+ ERTS_POLL_EXPORT(erts_poll_get_selected_events)(pollset.ps, counters.epep, max_fds);
+ counters.internal_fds = 0;
+#endif
+ counters.used_fds = 0;
+ counters.num_errors = 0;
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ len = erts_smp_atomic_read(&drv_ev_state_len);
+ for (fd = 0; fd < len; fd++) {
+ doit_erts_check_io_debug((void *) &drv_ev_state[fd], (void *) &counters);
+ }
+ for ( ; fd < max_fds; fd++) {
+ null_des.fd = fd;
+ doit_erts_check_io_debug((void *) &null_des, (void *) &counters);
+ }
+#else
+ safe_hash_for_each(&drv_ev_state_tab, &doit_erts_check_io_debug, (void *) &counters);
+#endif
+
+#ifdef ERTS_SMP
+ erts_release_system();
+#endif
+
+ erts_printf("\n");
+ erts_printf("used fds=%d\n", counters.used_fds);
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ erts_printf("internal fds=%d\n", counters.internal_fds);
+#endif
+ erts_printf("---------------------------------------------------------\n");
+ fflush(stdout);
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ erts_free(ERTS_ALC_T_TMP, (void *) counters.epep);
+#endif
+ return counters.num_errors;
+}
+
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
new file mode 100644
index 0000000000..9b45a63913
--- /dev/null
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -0,0 +1,96 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Check I/O
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_CHECK_IO_H__
+#define ERL_CHECK_IO_H__
+
+#include "erl_sys_driver.h"
+
+#ifdef ERTS_ENABLE_KERNEL_POLL
+
+int driver_select_kp(ErlDrvPort, ErlDrvEvent, int, int);
+int driver_select_nkp(ErlDrvPort, ErlDrvEvent, int, int);
+int driver_event_kp(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
+int driver_event_nkp(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
+Uint erts_check_io_size_kp(void);
+Uint erts_check_io_size_nkp(void);
+Eterm erts_check_io_info_kp(void *);
+Eterm erts_check_io_info_nkp(void *);
+int erts_check_io_max_files_kp(void);
+int erts_check_io_max_files_nkp(void);
+void erts_check_io_interrupt_kp(int);
+void erts_check_io_interrupt_nkp(int);
+void erts_check_io_interrupt_timed_kp(int, long);
+void erts_check_io_interrupt_timed_nkp(int, long);
+void erts_check_io_kp(int);
+void erts_check_io_nkp(int);
+void erts_init_check_io_kp(void);
+void erts_init_check_io_nkp(void);
+int erts_check_io_debug_kp(void);
+int erts_check_io_debug_nkp(void);
+
+#else /* !ERTS_ENABLE_KERNEL_POLL */
+
+Uint erts_check_io_size(void);
+Eterm erts_check_io_info(void *);
+int erts_check_io_max_files(void);
+void erts_check_io_interrupt(int);
+void erts_check_io_interrupt_timed(int, long);
+void erts_check_io(int);
+void erts_init_check_io(void);
+
+#endif
+
+#endif /* ERL_CHECK_IO_H__ */
+
+#if !defined(ERL_CHECK_IO_C__) && !defined(ERTS_ALLOC_C__)
+#define ERL_CHECK_IO_INTERNAL__
+#endif
+
+#ifndef ERL_CHECK_IO_INTERNAL__
+#define ERL_CHECK_IO_INTERNAL__
+#include "erl_poll.h"
+#include "erl_port_task.h"
+
+/*
+ * ErtsDrvEventDataState is used by driver_event() which is almost never
+ * used. We allocate ErtsDrvEventDataState separate since we dont wan't
+ * the size of ErtsDrvEventState to increase due to driver_event()
+ * information.
+ */
+typedef struct {
+ Eterm port;
+ ErlDrvEventData data;
+ ErtsPollEvents removed_events;
+ ErtsPortTaskHandle task;
+} ErtsDrvEventDataState;
+
+typedef struct {
+ Eterm inport;
+ Eterm outport;
+ ErtsPortTaskHandle intask;
+ ErtsPortTaskHandle outtask;
+} ErtsDrvSelectDataState;
+#endif /* #ifndef ERL_CHECK_IO_INTERNAL__ */
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
new file mode 100644
index 0000000000..f4e21bc05f
--- /dev/null
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -0,0 +1,1452 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: A memory segment allocator. Segments that are deallocated
+ * are kept for a while in a segment "cache" before they are
+ * destroyed. When segments are allocated, cached segments
+ * are used if possible instead of creating new segments.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_mseg.h"
+#include "global.h"
+#include "erl_threads.h"
+#include "erl_mtrace.h"
+#include "big.h"
+
+#if HAVE_ERTS_MSEG
+
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+# define ERTS_THREADS_NO_SMP
+#endif
+
+#define SEGTYPE ERTS_MTRACE_SEGMENT_ID
+
+#ifndef HAVE_GETPAGESIZE
+#define HAVE_GETPAGESIZE 0
+#endif
+
+#ifdef _SC_PAGESIZE
+# define GET_PAGE_SIZE sysconf(_SC_PAGESIZE)
+#elif HAVE_GETPAGESIZE
+# define GET_PAGE_SIZE getpagesize()
+#else
+# error "Page size unknown"
+ /* Implement some other way to get the real page size if needed! */
+#endif
+
+#define MAX_CACHE_SIZE 30
+
+#undef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#undef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+
+#undef PAGE_MASK
+#define INV_PAGE_MASK ((Uint) (page_size - 1))
+#define PAGE_MASK (~INV_PAGE_MASK)
+#define PAGE_FLOOR(X) ((X) & PAGE_MASK)
+#define PAGE_CEILING(X) PAGE_FLOOR((X) + INV_PAGE_MASK)
+#define PAGES(X) ((X) >> page_shift)
+
+static int atoms_initialized;
+
+static Uint cache_check_interval;
+
+static void check_cache(void *unused);
+static void mseg_clear_cache(void);
+static int is_cache_check_scheduled;
+#ifdef ERTS_THREADS_NO_SMP
+static int is_cache_check_requested;
+#endif
+
+#if HAVE_MMAP
+/* Mmap ... */
+
+#define MMAP_PROT (PROT_READ|PROT_WRITE)
+#ifdef MAP_ANON
+# define MMAP_FLAGS (MAP_ANON|MAP_PRIVATE)
+# define MMAP_FD (-1)
+#else
+# define MMAP_FLAGS (MAP_PRIVATE)
+# define MMAP_FD mmap_fd
+static int mmap_fd;
+#endif
+
+#if HAVE_MREMAP
+# define HAVE_MSEG_RECREATE 1
+#else
+# define HAVE_MSEG_RECREATE 0
+#endif
+
+#define CAN_PARTLY_DESTROY 1
+#else /* #if HAVE_MMAP */
+#define CAN_PARTLY_DESTROY 0
+#error "Not supported"
+#endif /* #if HAVE_MMAP */
+
+
+#if defined(ERTS_MSEG_FAKE_SEGMENTS)
+#undef CAN_PARTLY_DESTROY
+#define CAN_PARTLY_DESTROY 0
+#endif
+
+static const ErtsMsegOpt_t default_opt = ERTS_MSEG_DEFAULT_OPT_INITIALIZER;
+
+typedef struct cache_desc_t_ {
+ void *seg;
+ Uint size;
+ struct cache_desc_t_ *next;
+ struct cache_desc_t_ *prev;
+} cache_desc_t;
+
+typedef struct {
+ Uint32 giga_no;
+ Uint32 no;
+} CallCounter;
+
+static int is_init_done;
+static Uint page_size;
+static Uint page_shift;
+
+static struct {
+ CallCounter alloc;
+ CallCounter dealloc;
+ CallCounter realloc;
+ CallCounter create;
+ CallCounter destroy;
+#if HAVE_MSEG_RECREATE
+ CallCounter recreate;
+#endif
+ CallCounter clear_cache;
+ CallCounter check_cache;
+} calls;
+
+static cache_desc_t cache_descs[MAX_CACHE_SIZE];
+static cache_desc_t *free_cache_descs;
+static cache_desc_t *cache;
+static cache_desc_t *cache_end;
+static Uint cache_hits;
+static Uint cache_size;
+static Uint min_cached_seg_size;
+static Uint max_cached_seg_size;
+
+static Uint max_cache_size;
+static Uint abs_max_cache_bad_fit;
+static Uint rel_max_cache_bad_fit;
+
+#if CAN_PARTLY_DESTROY
+static Uint min_seg_size;
+#endif
+
+struct {
+ struct {
+ Uint watermark;
+ Uint no;
+ Uint sz;
+ } current;
+ struct {
+ Uint no;
+ Uint sz;
+ } max;
+ struct {
+ Uint no;
+ Uint sz;
+ } max_ever;
+} segments;
+
+#define ERTS_MSEG_ALLOC_STAT(SZ) \
+do { \
+ segments.current.no++; \
+ if (segments.max.no < segments.current.no) \
+ segments.max.no = segments.current.no; \
+ if (segments.current.watermark < segments.current.no) \
+ segments.current.watermark = segments.current.no; \
+ segments.current.sz += (SZ); \
+ if (segments.max.sz < segments.current.sz) \
+ segments.max.sz = segments.current.sz; \
+} while (0)
+
+#define ERTS_MSEG_DEALLOC_STAT(SZ) \
+do { \
+ ASSERT(segments.current.no > 0); \
+ segments.current.no--; \
+ ASSERT(segments.current.sz >= (SZ)); \
+ segments.current.sz -= (SZ); \
+} while (0)
+
+#define ERTS_MSEG_REALLOC_STAT(OSZ, NSZ) \
+do { \
+ ASSERT(segments.current.sz >= (OSZ)); \
+ segments.current.sz -= (OSZ); \
+ segments.current.sz += (NSZ); \
+} while (0)
+
+#define ONE_GIGA (1000000000)
+
+#define ZERO_CC(CC) (calls.CC.no = 0, calls.CC.giga_no = 0)
+
+#define INC_CC(CC) (calls.CC.no == ONE_GIGA - 1 \
+ ? (calls.CC.giga_no++, calls.CC.no = 0) \
+ : calls.CC.no++)
+
+#define DEC_CC(CC) (calls.CC.no == 0 \
+ ? (calls.CC.giga_no--, \
+ calls.CC.no = ONE_GIGA - 1) \
+ : calls.CC.no--)
+
+
+static erts_mtx_t mseg_mutex; /* Also needed when !USE_THREADS */
+static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */
+
+#ifdef USE_THREADS
+#ifdef ERTS_THREADS_NO_SMP
+static erts_tid_t main_tid;
+static int async_handle = -1;
+#endif
+
+static void thread_safe_init(void)
+{
+ erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
+ erts_mtx_init(&mseg_mutex, "mseg");
+#ifdef ERTS_THREADS_NO_SMP
+ main_tid = erts_thr_self();
+#endif
+}
+
+#endif
+
+static ErlTimer cache_check_timer;
+
+static ERTS_INLINE void
+schedule_cache_check(void)
+{
+ if (!is_cache_check_scheduled && is_init_done) {
+#ifdef ERTS_THREADS_NO_SMP
+ if (!erts_equal_tids(erts_thr_self(), main_tid)) {
+ if (!is_cache_check_requested) {
+ is_cache_check_requested = 1;
+ sys_async_ready(async_handle);
+ }
+ }
+ else
+#endif
+ {
+ cache_check_timer.active = 0;
+ erl_set_timer(&cache_check_timer,
+ check_cache,
+ NULL,
+ NULL,
+ cache_check_interval);
+ is_cache_check_scheduled = 1;
+#ifdef ERTS_THREADS_NO_SMP
+ is_cache_check_requested = 0;
+#endif
+ }
+ }
+}
+
+#ifdef ERTS_THREADS_NO_SMP
+
+static void
+check_schedule_cache_check(void)
+{
+ erts_mtx_lock(&mseg_mutex);
+ if (is_cache_check_requested
+ && !is_cache_check_scheduled) {
+ schedule_cache_check();
+ }
+ erts_mtx_unlock(&mseg_mutex);
+}
+
+#endif
+
+static void
+mseg_shutdown(void)
+{
+#ifdef ERTS_SMP
+ erts_mtx_lock(&mseg_mutex);
+#endif
+ mseg_clear_cache();
+#ifdef ERTS_SMP
+ erts_mtx_unlock(&mseg_mutex);
+#endif
+}
+
+static ERTS_INLINE void *
+mseg_create(Uint size)
+{
+ void *seg;
+
+ ASSERT(size % page_size == 0);
+
+#if defined(ERTS_MSEG_FAKE_SEGMENTS)
+ seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
+#elif HAVE_MMAP
+ seg = (void *) mmap((void *) 0, (size_t) size,
+ MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
+ if (seg == (void *) MAP_FAILED)
+ seg = NULL;
+#else
+#error "Missing mseg_create() implementation"
+#endif
+
+ INC_CC(create);
+
+ return seg;
+}
+
+static ERTS_INLINE void
+mseg_destroy(void *seg, Uint size)
+{
+#if defined(ERTS_MSEG_FAKE_SEGMENTS)
+ erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
+#elif HAVE_MMAP
+
+#ifdef DEBUG
+ int res =
+#endif
+
+ munmap((void *) seg, size);
+
+ ASSERT(size % page_size == 0);
+ ASSERT(res == 0);
+#else
+#error "Missing mseg_destroy() implementation"
+#endif
+
+ INC_CC(destroy);
+
+}
+
+#if HAVE_MSEG_RECREATE
+
+static ERTS_INLINE void *
+mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
+{
+ void *new_seg;
+
+ ASSERT(old_size % page_size == 0);
+ ASSERT(new_size % page_size == 0);
+
+#if defined(ERTS_MSEG_FAKE_SEGMENTS)
+ new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
+#elif HAVE_MREMAP
+ new_seg = (void *) mremap((void *) old_seg,
+ (size_t) old_size,
+ (size_t) new_size,
+ MREMAP_MAYMOVE);
+ if (new_seg == (void *) MAP_FAILED)
+ new_seg = NULL;
+#else
+#error "Missing mseg_recreate() implementation"
+#endif
+
+ INC_CC(recreate);
+
+ return new_seg;
+}
+
+#endif /* #if HAVE_MSEG_RECREATE */
+
+
+static ERTS_INLINE cache_desc_t *
+alloc_cd(void)
+{
+ cache_desc_t *cd = free_cache_descs;
+ if (cd)
+ free_cache_descs = cd->next;
+ return cd;
+}
+
+static ERTS_INLINE void
+free_cd(cache_desc_t *cd)
+{
+ cd->next = free_cache_descs;
+ free_cache_descs = cd;
+}
+
+
+static ERTS_INLINE void
+link_cd(cache_desc_t *cd)
+{
+ if (cache)
+ cache->prev = cd;
+ cd->next = cache;
+ cd->prev = NULL;
+ cache = cd;
+
+ if (!cache_end) {
+ ASSERT(!cd->next);
+ cache_end = cd;
+ }
+
+ cache_size++;
+}
+
+static ERTS_INLINE void
+end_link_cd(cache_desc_t *cd)
+{
+ if (cache_end)
+ cache_end->next = cd;
+ cd->next = NULL;
+ cd->prev = cache_end;
+ cache_end = cd;
+
+ if (!cache) {
+ ASSERT(!cd->prev);
+ cache = cd;
+ }
+
+ cache_size++;
+}
+
+static ERTS_INLINE void
+unlink_cd(cache_desc_t *cd)
+{
+
+ if (cd->next)
+ cd->next->prev = cd->prev;
+ else
+ cache_end = cd->prev;
+
+ if (cd->prev)
+ cd->prev->next = cd->next;
+ else
+ cache = cd->next;
+ ASSERT(cache_size > 0);
+ cache_size--;
+}
+
+static ERTS_INLINE void
+check_cache_limits(void)
+{
+ cache_desc_t *cd;
+ max_cached_seg_size = 0;
+ min_cached_seg_size = ~((Uint) 0);
+ for (cd = cache; cd; cd = cd->next) {
+ if (cd->size < min_cached_seg_size)
+ min_cached_seg_size = cd->size;
+ if (cd->size > max_cached_seg_size)
+ max_cached_seg_size = cd->size;
+ }
+
+}
+
+static ERTS_INLINE void
+adjust_cache_size(int force_check_limits)
+{
+ cache_desc_t *cd;
+ int check_limits = force_check_limits;
+ Sint max_cached = ((Sint) segments.current.watermark
+ - (Sint) segments.current.no);
+
+ while (((Sint) cache_size) > max_cached && ((Sint) cache_size) > 0) {
+ ASSERT(cache_end);
+ cd = cache_end;
+ if (!check_limits &&
+ !(min_cached_seg_size < cd->size
+ && cd->size < max_cached_seg_size)) {
+ check_limits = 1;
+ }
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
+ mseg_destroy(cd->seg, cd->size);
+ unlink_cd(cd);
+ free_cd(cd);
+ }
+
+ if (check_limits)
+ check_cache_limits();
+
+}
+
+static void
+check_cache(void *unused)
+{
+#ifdef ERTS_SMP
+ erts_mtx_lock(&mseg_mutex);
+#endif
+
+ is_cache_check_scheduled = 0;
+
+ if (segments.current.watermark > segments.current.no)
+ segments.current.watermark--;
+ adjust_cache_size(0);
+
+ if (cache_size)
+ schedule_cache_check();
+
+ INC_CC(check_cache);
+
+#ifdef ERTS_SMP
+ erts_mtx_unlock(&mseg_mutex);
+#endif
+
+}
+
+static void
+mseg_clear_cache(void)
+{
+ segments.current.watermark = 0;
+
+ adjust_cache_size(1);
+
+ ASSERT(!cache);
+ ASSERT(!cache_end);
+ ASSERT(!cache_size);
+
+ segments.current.watermark = segments.current.no;
+
+ INC_CC(clear_cache);
+}
+
+static void *
+mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
+{
+
+ Uint max, min, diff_size, size;
+ cache_desc_t *cd, *cand_cd;
+ void *seg;
+
+ INC_CC(alloc);
+
+ size = PAGE_CEILING(*size_p);
+
+#if CAN_PARTLY_DESTROY
+ if (size < min_seg_size)
+ min_seg_size = size;
+#endif
+
+ if (!opt->cache) {
+ create_seg:
+ adjust_cache_size(0);
+ seg = mseg_create(size);
+ if (!seg) {
+ mseg_clear_cache();
+ seg = mseg_create(size);
+ if (!seg)
+ size = 0;
+ }
+
+ *size_p = size;
+ if (seg) {
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
+ ERTS_MSEG_ALLOC_STAT(size);
+ }
+ return seg;
+ }
+
+ if (size > max_cached_seg_size)
+ goto create_seg;
+
+ if (size < min_cached_seg_size) {
+
+ diff_size = min_cached_seg_size - size;
+
+ if (diff_size > abs_max_cache_bad_fit)
+ goto create_seg;
+
+ if (100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size))
+ goto create_seg;
+
+ }
+
+ max = 0;
+ min = ~((Uint) 0);
+ cand_cd = NULL;
+
+ for (cd = cache; cd; cd = cd->next) {
+ if (cd->size >= size) {
+ if (!cand_cd) {
+ cand_cd = cd;
+ continue;
+ }
+ else if (cd->size < cand_cd->size) {
+ if (max < cand_cd->size)
+ max = cand_cd->size;
+ if (min > cand_cd->size)
+ min = cand_cd->size;
+ cand_cd = cd;
+ continue;
+ }
+ }
+ if (max < cd->size)
+ max = cd->size;
+ if (min > cd->size)
+ min = cd->size;
+ }
+
+ min_cached_seg_size = min;
+ max_cached_seg_size = max;
+
+ if (!cand_cd)
+ goto create_seg;
+
+ diff_size = cand_cd->size - size;
+
+ if (diff_size > abs_max_cache_bad_fit
+ || 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) {
+ if (max_cached_seg_size < cand_cd->size)
+ max_cached_seg_size = cand_cd->size;
+ if (min_cached_seg_size > cand_cd->size)
+ min_cached_seg_size = cand_cd->size;
+ goto create_seg;
+ }
+
+ cache_hits++;
+
+ size = cand_cd->size;
+ seg = cand_cd->seg;
+
+ unlink_cd(cand_cd);
+ free_cd(cand_cd);
+
+ *size_p = size;
+
+ if (erts_mtrace_enabled) {
+ erts_mtrace_crr_free(SEGTYPE, SEGTYPE, seg);
+ erts_mtrace_crr_alloc(seg, atype, SEGTYPE, size);
+ }
+
+ if (seg)
+ ERTS_MSEG_ALLOC_STAT(size);
+ return seg;
+}
+
+
+static void
+mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size,
+ const ErtsMsegOpt_t *opt)
+{
+ cache_desc_t *cd;
+
+ ERTS_MSEG_DEALLOC_STAT(size);
+
+ if (!opt->cache || max_cache_size == 0) {
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(atype, SEGTYPE, seg);
+ mseg_destroy(seg, size);
+ }
+ else {
+ int check_limits = 0;
+
+ if (size < min_cached_seg_size)
+ min_cached_seg_size = size;
+ if (size > max_cached_seg_size)
+ max_cached_seg_size = size;
+
+ if (!free_cache_descs) {
+ cd = cache_end;
+ if (!(min_cached_seg_size < cd->size
+ && cd->size < max_cached_seg_size)) {
+ check_limits = 1;
+ }
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
+ mseg_destroy(cd->seg, cd->size);
+ unlink_cd(cd);
+ free_cd(cd);
+ }
+
+ cd = alloc_cd();
+ ASSERT(cd);
+ cd->seg = seg;
+ cd->size = size;
+ link_cd(cd);
+
+ if (erts_mtrace_enabled) {
+ erts_mtrace_crr_free(atype, SEGTYPE, seg);
+ erts_mtrace_crr_alloc(seg, SEGTYPE, SEGTYPE, size);
+ }
+
+ /* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */
+
+ if (check_limits)
+ check_cache_limits();
+
+ schedule_cache_check();
+
+ }
+
+ INC_CC(dealloc);
+}
+
+static void *
+mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p,
+ const ErtsMsegOpt_t *opt)
+{
+ void *new_seg;
+ Uint new_size;
+
+ if (!seg || !old_size) {
+ new_seg = mseg_alloc(atype, new_size_p, opt);
+ DEC_CC(alloc);
+ return new_seg;
+ }
+
+ if (!(*new_size_p)) {
+ mseg_dealloc(atype, seg, old_size, opt);
+ DEC_CC(dealloc);
+ return NULL;
+ }
+
+ new_seg = seg;
+ new_size = PAGE_CEILING(*new_size_p);
+
+ if (new_size == old_size)
+ ;
+ else if (new_size < old_size) {
+ Uint shrink_sz = old_size - new_size;
+
+#if CAN_PARTLY_DESTROY
+ if (new_size < min_seg_size)
+ min_seg_size = new_size;
+#endif
+
+ if (shrink_sz < opt->abs_shrink_th
+ && 100*PAGES(shrink_sz) < opt->rel_shrink_th*PAGES(old_size)) {
+ new_size = old_size;
+ }
+ else {
+
+#if CAN_PARTLY_DESTROY
+
+ if (shrink_sz > min_seg_size
+ && free_cache_descs
+ && opt->cache) {
+ cache_desc_t *cd;
+
+ cd = alloc_cd();
+ ASSERT(cd);
+ cd->seg = ((char *) seg) + new_size;
+ cd->size = shrink_sz;
+ end_link_cd(cd);
+
+ if (erts_mtrace_enabled) {
+ erts_mtrace_crr_realloc(new_seg,
+ atype,
+ SEGTYPE,
+ seg,
+ new_size);
+ erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size);
+ }
+ schedule_cache_check();
+ }
+ else {
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_realloc(new_seg,
+ atype,
+ SEGTYPE,
+ seg,
+ new_size);
+ mseg_destroy(((char *) seg) + new_size, shrink_sz);
+ }
+
+#elif HAVE_MSEG_RECREATE
+
+ goto do_recreate;
+
+#else
+
+ new_seg = mseg_alloc(atype, &new_size, opt);
+ if (!new_seg)
+ new_size = old_size;
+ else {
+ sys_memcpy(((char *) new_seg),
+ ((char *) seg),
+ MIN(new_size, old_size));
+ mseg_dealloc(atype, seg, old_size, opt);
+ }
+
+#endif
+
+ }
+ }
+ else {
+
+ if (!opt->preserv) {
+ mseg_dealloc(atype, seg, old_size, opt);
+ new_seg = mseg_alloc(atype, &new_size, opt);
+ }
+ else {
+#if HAVE_MSEG_RECREATE
+#if !CAN_PARTLY_DESTROY
+ do_recreate:
+#endif
+ new_seg = mseg_recreate((void *) seg, old_size, new_size);
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
+ if (!new_seg)
+ new_size = old_size;
+#else
+ new_seg = mseg_alloc(atype, &new_size, opt);
+ if (!new_seg)
+ new_size = old_size;
+ else {
+ sys_memcpy(((char *) new_seg),
+ ((char *) seg),
+ MIN(new_size, old_size));
+ mseg_dealloc(atype, seg, old_size, opt);
+ }
+#endif
+ }
+ }
+
+ INC_CC(realloc);
+
+ *new_size_p = new_size;
+
+ ERTS_MSEG_REALLOC_STAT(old_size, new_size);
+
+ return new_seg;
+}
+
+/* --- Info stuff ---------------------------------------------------------- */
+
+static struct {
+ Eterm version;
+
+ Eterm options;
+ Eterm amcbf;
+ Eterm rmcbf;
+ Eterm mcs;
+ Eterm cci;
+
+ Eterm status;
+ Eterm cached_segments;
+ Eterm cache_hits;
+ Eterm segments;
+ Eterm segments_size;
+ Eterm segments_watermark;
+
+
+ Eterm calls;
+ Eterm mseg_alloc;
+ Eterm mseg_dealloc;
+ Eterm mseg_realloc;
+ Eterm mseg_create;
+ Eterm mseg_destroy;
+#if HAVE_MSEG_RECREATE
+ Eterm mseg_recreate;
+#endif
+ Eterm mseg_clear_cache;
+ Eterm mseg_check_cache;
+
+#ifdef DEBUG
+ Eterm end_of_atoms;
+#endif
+} am;
+
+static void ERTS_INLINE atom_init(Eterm *atom, char *name)
+{
+ *atom = am_atom_put(name, strlen(name));
+}
+#define AM_INIT(AM) atom_init(&am.AM, #AM)
+
+static void
+init_atoms(void)
+{
+#ifdef DEBUG
+ Eterm *atom;
+#endif
+
+ erts_mtx_unlock(&mseg_mutex);
+ erts_mtx_lock(&init_atoms_mutex);
+
+ if (!atoms_initialized) {
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom <= &am.end_of_atoms; atom++) {
+ *atom = THE_NON_VALUE;
+ }
+#endif
+
+ AM_INIT(version);
+
+ AM_INIT(options);
+ AM_INIT(amcbf);
+ AM_INIT(rmcbf);
+ AM_INIT(mcs);
+ AM_INIT(cci);
+
+ AM_INIT(status);
+ AM_INIT(cached_segments);
+ AM_INIT(cache_hits);
+ AM_INIT(segments);
+ AM_INIT(segments_size);
+ AM_INIT(segments_watermark);
+
+ AM_INIT(calls);
+ AM_INIT(mseg_alloc);
+ AM_INIT(mseg_dealloc);
+ AM_INIT(mseg_realloc);
+ AM_INIT(mseg_create);
+ AM_INIT(mseg_destroy);
+#if HAVE_MSEG_RECREATE
+ AM_INIT(mseg_recreate);
+#endif
+ AM_INIT(mseg_clear_cache);
+ AM_INIT(mseg_check_cache);
+
+#ifdef DEBUG
+ for (atom = (Eterm *) &am; atom < &am.end_of_atoms; atom++) {
+ ASSERT(*atom != THE_NON_VALUE);
+ }
+#endif
+ }
+
+ erts_mtx_lock(&mseg_mutex);
+ atoms_initialized = 1;
+ erts_mtx_unlock(&init_atoms_mutex);
+}
+
+
+#define bld_uint erts_bld_uint
+#define bld_cons erts_bld_cons
+#define bld_tuple erts_bld_tuple
+#define bld_string erts_bld_string
+#define bld_2tup_list erts_bld_2tup_list
+
+
+/*
+ * bld_unstable_uint() (instead of bld_uint()) is used when values may
+ * change between size check and actual build. This because a value
+ * that would fit a small when size check is done may need to be built
+ * as a big when the actual build is performed. Caller is required to
+ * HRelease after build.
+ */
+static ERTS_INLINE Eterm
+bld_unstable_uint(Uint **hpp, Uint *szp, Uint ui)
+{
+ Eterm res = THE_NON_VALUE;
+ if (szp)
+ *szp += BIG_UINT_HEAP_SIZE;
+ if (hpp) {
+ if (IS_USMALL(0, ui))
+ res = make_small(ui);
+ else {
+ res = uint_to_big(ui, *hpp);
+ *hpp += BIG_UINT_HEAP_SIZE;
+ }
+ }
+ return res;
+}
+
+static ERTS_INLINE void
+add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 2, el1, el2), *lp);
+}
+
+static ERTS_INLINE void
+add_3tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2, Eterm el3)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 3, el1, el2, el3), *lp);
+}
+
+static ERTS_INLINE void
+add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
+ Eterm el1, Eterm el2, Eterm el3, Eterm el4)
+{
+ *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp);
+}
+
+static Eterm
+info_options(char *prefix,
+ int *print_to_p,
+ void *print_to_arg,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+ erts_print(to, arg, "%samcbf: %bpu\n", prefix, abs_max_cache_bad_fit);
+ erts_print(to, arg, "%srmcbf: %bpu\n", prefix, rel_max_cache_bad_fit);
+ erts_print(to, arg, "%smcs: %bpu\n", prefix, max_cache_size);
+ erts_print(to, arg, "%scci: %bpu\n", prefix, cache_check_interval);
+ }
+
+ if (hpp || szp) {
+
+ if (!atoms_initialized)
+ init_atoms();
+
+ res = NIL;
+ add_2tup(hpp, szp, &res,
+ am.cci,
+ bld_uint(hpp, szp, cache_check_interval));
+ add_2tup(hpp, szp, &res,
+ am.mcs,
+ bld_uint(hpp, szp, max_cache_size));
+ add_2tup(hpp, szp, &res,
+ am.rmcbf,
+ bld_uint(hpp, szp, rel_max_cache_bad_fit));
+ add_2tup(hpp, szp, &res,
+ am.amcbf,
+ bld_uint(hpp, szp, abs_max_cache_bad_fit));
+
+ }
+
+ return res;
+}
+
+static Eterm
+info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+
+#define PRINT_CC(TO, TOA, CC) \
+ if (calls.CC.giga_no == 0) \
+ erts_print(TO, TOA, "mseg_%s calls: %bpu\n", #CC, calls.CC.no); \
+ else \
+ erts_print(TO, TOA, "mseg_%s calls: %bpu%09bpu\n", #CC, \
+ calls.CC.giga_no, calls.CC.no)
+
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+
+ PRINT_CC(to, arg, alloc);
+ PRINT_CC(to, arg, dealloc);
+ PRINT_CC(to, arg, realloc);
+ PRINT_CC(to, arg, create);
+ PRINT_CC(to, arg, destroy);
+#if HAVE_MSEG_RECREATE
+ PRINT_CC(to, arg, recreate);
+#endif
+ PRINT_CC(to, arg, clear_cache);
+ PRINT_CC(to, arg, check_cache);
+
+#undef PRINT_CC
+
+ }
+
+ if (hpp || szp) {
+
+ res = NIL;
+
+ add_3tup(hpp, szp, &res,
+ am.mseg_check_cache,
+ bld_unstable_uint(hpp, szp, calls.check_cache.giga_no),
+ bld_unstable_uint(hpp, szp, calls.check_cache.no));
+ add_3tup(hpp, szp, &res,
+ am.mseg_clear_cache,
+ bld_unstable_uint(hpp, szp, calls.clear_cache.giga_no),
+ bld_unstable_uint(hpp, szp, calls.clear_cache.no));
+
+#if HAVE_MSEG_RECREATE
+ add_3tup(hpp, szp, &res,
+ am.mseg_recreate,
+ bld_unstable_uint(hpp, szp, calls.recreate.giga_no),
+ bld_unstable_uint(hpp, szp, calls.recreate.no));
+#endif
+ add_3tup(hpp, szp, &res,
+ am.mseg_destroy,
+ bld_unstable_uint(hpp, szp, calls.destroy.giga_no),
+ bld_unstable_uint(hpp, szp, calls.destroy.no));
+ add_3tup(hpp, szp, &res,
+ am.mseg_create,
+ bld_unstable_uint(hpp, szp, calls.create.giga_no),
+ bld_unstable_uint(hpp, szp, calls.create.no));
+
+
+ add_3tup(hpp, szp, &res,
+ am.mseg_realloc,
+ bld_unstable_uint(hpp, szp, calls.realloc.giga_no),
+ bld_unstable_uint(hpp, szp, calls.realloc.no));
+ add_3tup(hpp, szp, &res,
+ am.mseg_dealloc,
+ bld_unstable_uint(hpp, szp, calls.dealloc.giga_no),
+ bld_unstable_uint(hpp, szp, calls.dealloc.no));
+ add_3tup(hpp, szp, &res,
+ am.mseg_alloc,
+ bld_unstable_uint(hpp, szp, calls.alloc.giga_no),
+ bld_unstable_uint(hpp, szp, calls.alloc.no));
+ }
+
+ return res;
+}
+
+static Eterm
+info_status(int *print_to_p,
+ void *print_to_arg,
+ int begin_new_max_period,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+ if (segments.max_ever.no < segments.max.no)
+ segments.max_ever.no = segments.max.no;
+ if (segments.max_ever.sz < segments.max.sz)
+ segments.max_ever.sz = segments.max.sz;
+
+ if (print_to_p) {
+ int to = *print_to_p;
+ void *arg = print_to_arg;
+
+ erts_print(to, arg, "cached_segments: %bpu\n", cache_size);
+ erts_print(to, arg, "cache_hits: %bpu\n", cache_hits);
+ erts_print(to, arg, "segments: %bpu %bpu %bpu\n",
+ segments.current.no, segments.max.no, segments.max_ever.no);
+ erts_print(to, arg, "segments_size: %bpu %bpu %bpu\n",
+ segments.current.sz, segments.max.sz, segments.max_ever.sz);
+ erts_print(to, arg, "segments_watermark: %bpu\n",
+ segments.current.watermark);
+ }
+
+ if (hpp || szp) {
+ res = NIL;
+ add_2tup(hpp, szp, &res,
+ am.segments_watermark,
+ bld_unstable_uint(hpp, szp, segments.current.watermark));
+ add_4tup(hpp, szp, &res,
+ am.segments_size,
+ bld_unstable_uint(hpp, szp, segments.current.sz),
+ bld_unstable_uint(hpp, szp, segments.max.sz),
+ bld_unstable_uint(hpp, szp, segments.max_ever.sz));
+ add_4tup(hpp, szp, &res,
+ am.segments,
+ bld_unstable_uint(hpp, szp, segments.current.no),
+ bld_unstable_uint(hpp, szp, segments.max.no),
+ bld_unstable_uint(hpp, szp, segments.max_ever.no));
+ add_2tup(hpp, szp, &res,
+ am.cache_hits,
+ bld_unstable_uint(hpp, szp, cache_hits));
+ add_2tup(hpp, szp, &res,
+ am.cached_segments,
+ bld_unstable_uint(hpp, szp, cache_size));
+
+ }
+
+ if (begin_new_max_period) {
+ segments.max.no = segments.current.no;
+ segments.max.sz = segments.current.sz;
+ }
+
+ return res;
+}
+
+static Eterm
+info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+
+ if (print_to_p) {
+ erts_print(*print_to_p, print_to_arg, "version: %s\n",
+ ERTS_MSEG_VSN_STR);
+ }
+
+ if (hpp || szp) {
+ res = bld_string(hpp, szp, ERTS_MSEG_VSN_STR);
+ }
+
+ return res;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
+ * Exported functions *
+\* */
+
+Eterm
+erts_mseg_info_options(int *print_to_p, void *print_to_arg,
+ Uint **hpp, Uint *szp)
+{
+ Eterm res;
+
+ erts_mtx_lock(&mseg_mutex);
+
+ res = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+
+ erts_mtx_unlock(&mseg_mutex);
+
+ return res;
+}
+
+Eterm
+erts_mseg_info(int *print_to_p,
+ void *print_to_arg,
+ int begin_max_per,
+ Uint **hpp,
+ Uint *szp)
+{
+ Eterm res = THE_NON_VALUE;
+ Eterm atoms[4];
+ Eterm values[4];
+
+ erts_mtx_lock(&mseg_mutex);
+
+ if (hpp || szp) {
+
+ if (!atoms_initialized)
+ init_atoms();
+
+ atoms[0] = am.version;
+ atoms[1] = am.options;
+ atoms[2] = am.status;
+ atoms[3] = am.calls;
+ }
+
+ values[0] = info_version(print_to_p, print_to_arg, hpp, szp);
+ values[1] = info_options("option ", print_to_p, print_to_arg, hpp, szp);
+ values[2] = info_status(print_to_p, print_to_arg, begin_max_per, hpp, szp);
+ values[3] = info_calls(print_to_p, print_to_arg, hpp, szp);
+
+ if (hpp || szp)
+ res = bld_2tup_list(hpp, szp, 4, atoms, values);
+
+ erts_mtx_unlock(&mseg_mutex);
+
+ return res;
+}
+
+void *
+erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
+{
+ void *seg;
+ erts_mtx_lock(&mseg_mutex);
+ seg = mseg_alloc(atype, size_p, opt);
+ erts_mtx_unlock(&mseg_mutex);
+ return seg;
+}
+
+void *
+erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p)
+{
+ return erts_mseg_alloc_opt(atype, size_p, &default_opt);
+}
+
+void
+erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, Uint size,
+ const ErtsMsegOpt_t *opt)
+{
+ erts_mtx_lock(&mseg_mutex);
+ mseg_dealloc(atype, seg, size, opt);
+ erts_mtx_unlock(&mseg_mutex);
+}
+
+void
+erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
+{
+ erts_mseg_dealloc_opt(atype, seg, size, &default_opt);
+}
+
+void *
+erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, Uint old_size,
+ Uint *new_size_p, const ErtsMsegOpt_t *opt)
+{
+ void *new_seg;
+ erts_mtx_lock(&mseg_mutex);
+ new_seg = mseg_realloc(atype, seg, old_size, new_size_p, opt);
+ erts_mtx_unlock(&mseg_mutex);
+ return new_seg;
+}
+
+void *
+erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size,
+ Uint *new_size_p)
+{
+ return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &default_opt);
+}
+
+void
+erts_mseg_clear_cache(void)
+{
+ erts_mtx_lock(&mseg_mutex);
+ mseg_clear_cache();
+ erts_mtx_unlock(&mseg_mutex);
+}
+
+Uint
+erts_mseg_no(void)
+{
+ Uint n;
+ erts_mtx_lock(&mseg_mutex);
+ n = segments.current.no;
+ erts_mtx_unlock(&mseg_mutex);
+ return n;
+}
+
+Uint
+erts_mseg_unit_size(void)
+{
+ return page_size;
+}
+
+void
+erts_mseg_init(ErtsMsegInit_t *init)
+{
+ unsigned i;
+
+ atoms_initialized = 0;
+ is_init_done = 0;
+
+ /* Options ... */
+
+ abs_max_cache_bad_fit = init->amcbf;
+ rel_max_cache_bad_fit = init->rmcbf;
+ max_cache_size = init->mcs;
+ cache_check_interval = init->cci;
+
+ /* */
+
+#ifdef USE_THREADS
+ thread_safe_init();
+#endif
+
+#if HAVE_MMAP && !defined(MAP_ANON)
+ mmap_fd = open("/dev/zero", O_RDWR);
+ if (mmap_fd < 0)
+ erl_exit(ERTS_ABORT_EXIT, "erts_mseg: unable to open /dev/zero\n");
+#endif
+
+ page_size = GET_PAGE_SIZE;
+
+ page_shift = 1;
+ while ((page_size >> page_shift) != 1) {
+ if ((page_size & (1 << (page_shift - 1))) != 0)
+ erl_exit(ERTS_ABORT_EXIT,
+ "erts_mseg: Unexpected page_size %bpu\n", page_size);
+ page_shift++;
+ }
+
+ sys_memzero((void *) &calls, sizeof(calls));
+
+#if CAN_PARTLY_DESTROY
+ min_seg_size = ~((Uint) 0);
+#endif
+
+ cache = NULL;
+ cache_end = NULL;
+ cache_hits = 0;
+ max_cached_seg_size = 0;
+ min_cached_seg_size = ~((Uint) 0);
+ cache_size = 0;
+
+ is_cache_check_scheduled = 0;
+#ifdef ERTS_THREADS_NO_SMP
+ is_cache_check_requested = 0;
+#endif
+
+ if (max_cache_size > MAX_CACHE_SIZE)
+ max_cache_size = MAX_CACHE_SIZE;
+
+ if (max_cache_size > 0) {
+ for (i = 0; i < max_cache_size - 1; i++)
+ cache_descs[i].next = &cache_descs[i + 1];
+ cache_descs[max_cache_size - 1].next = NULL;
+ free_cache_descs = &cache_descs[0];
+ }
+ else
+ free_cache_descs = NULL;
+
+ segments.current.watermark = 0;
+ segments.current.no = 0;
+ segments.current.sz = 0;
+ segments.max.no = 0;
+ segments.max.sz = 0;
+ segments.max_ever.no = 0;
+ segments.max_ever.sz = 0;
+}
+
+
+/*
+ * erts_mseg_late_init() have to be called after all allocators,
+ * threads and timers have been initialized.
+ */
+void
+erts_mseg_late_init(void)
+{
+#ifdef ERTS_THREADS_NO_SMP
+ int handle =
+ erts_register_async_ready_callback(
+ check_schedule_cache_check);
+#endif
+ erts_mtx_lock(&mseg_mutex);
+ is_init_done = 1;
+#ifdef ERTS_THREADS_NO_SMP
+ async_handle = handle;
+#endif
+ if (cache_size)
+ schedule_cache_check();
+ erts_mtx_unlock(&mseg_mutex);
+}
+
+void
+erts_mseg_exit(void)
+{
+ mseg_shutdown();
+}
+
+#endif /* #if HAVE_ERTS_MSEG */
+
+unsigned long
+erts_mseg_test(unsigned long op,
+ unsigned long a1,
+ unsigned long a2,
+ unsigned long a3)
+{
+ switch (op) {
+#if HAVE_ERTS_MSEG
+ case 0x400: /* Have erts_mseg */
+ return (unsigned long) 1;
+ case 0x401:
+ return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1);
+ case 0x402:
+ erts_mseg_dealloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2);
+ return (unsigned long) 0;
+ case 0x403:
+ return (unsigned long) erts_mseg_realloc(ERTS_ALC_A_INVALID,
+ (void *) a1,
+ (Uint) a2,
+ (Uint *) a3);
+ case 0x404:
+ erts_mseg_clear_cache();
+ return (unsigned long) 0;
+ case 0x405:
+ return (unsigned long) erts_mseg_no();
+ case 0x406: {
+ unsigned long res;
+ erts_mtx_lock(&mseg_mutex);
+ res = (unsigned long) cache_size;
+ erts_mtx_unlock(&mseg_mutex);
+ return res;
+ }
+#else /* #if HAVE_ERTS_MSEG */
+ case 0x400: /* Have erts_mseg */
+ return (unsigned long) 0;
+#endif /* #if HAVE_ERTS_MSEG */
+ default: ASSERT(0); return ~((unsigned long) 0);
+ }
+
+}
+
+
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
new file mode 100644
index 0000000000..1c5aa63e90
--- /dev/null
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -0,0 +1,97 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_MSEG_H_
+#define ERL_MSEG_H_
+
+#include "sys.h"
+#include "erl_alloc_types.h"
+
+#ifndef HAVE_MMAP
+# define HAVE_MMAP 0
+#endif
+#ifndef HAVE_MREMAP
+# define HAVE_MREMAP 0
+#endif
+
+#if HAVE_MMAP
+# define HAVE_ERTS_MSEG 1
+#else
+# define HAVE_ERTS_MSEG 0
+#endif
+
+#if HAVE_ERTS_MSEG
+
+#define ERTS_MSEG_VSN_STR "0.9"
+
+typedef struct {
+ Uint amcbf;
+ Uint rmcbf;
+ Uint mcs;
+ Uint cci;
+} ErtsMsegInit_t;
+
+#define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \
+{ \
+ 4*1024*1024, /* amcbf: Absolute max cache bad fit */ \
+ 20, /* rmcbf: Relative max cache bad fit */ \
+ 5, /* mcs: Max cache size */ \
+ 1000 /* cci: Cache check interval */ \
+}
+
+typedef struct {
+ int cache;
+ int preserv;
+ Uint abs_shrink_th;
+ Uint rel_shrink_th;
+} ErtsMsegOpt_t;
+
+#define ERTS_MSEG_DEFAULT_OPT_INITIALIZER \
+{ \
+ 1, /* Use cache */ \
+ 1, /* Preserv data */ \
+ 0, /* Absolute shrink threshold */ \
+ 0 /* Relative shrink threshold */ \
+}
+
+void *erts_mseg_alloc(ErtsAlcType_t, Uint *);
+void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, const ErtsMsegOpt_t *);
+void erts_mseg_dealloc(ErtsAlcType_t, void *, Uint);
+void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, Uint, const ErtsMsegOpt_t *);
+void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *);
+void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *,
+ const ErtsMsegOpt_t *);
+void erts_mseg_clear_cache(void);
+Uint erts_mseg_no(void);
+Uint erts_mseg_unit_size(void);
+void erts_mseg_init(ErtsMsegInit_t *init);
+void erts_mseg_late_init(void); /* Have to be called after all allocators,
+ threads and timers have been initialized. */
+void erts_mseg_exit(void);
+Eterm erts_mseg_info_options(int *, void*, Uint **, Uint *);
+Eterm erts_mseg_info(int *, void*, int, Uint **, Uint *);
+
+#endif /* #if HAVE_ERTS_MSEG */
+
+unsigned long erts_mseg_test(unsigned long,
+ unsigned long,
+ unsigned long,
+ unsigned long);
+
+#endif /* #ifndef ERL_MSEG_H_ */
diff --git a/erts/emulator/sys/common/erl_mtrace_sys_wrap.c b/erts/emulator/sys/common/erl_mtrace_sys_wrap.c
new file mode 100644
index 0000000000..408aa7e016
--- /dev/null
+++ b/erts/emulator/sys/common/erl_mtrace_sys_wrap.c
@@ -0,0 +1,245 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2004-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "erl_mtrace.h"
+
+#ifdef ERTS_CAN_TRACK_MALLOC
+#if defined(HAVE_END_SYMBOL)
+extern char end;
+#elif defined(HAVE__END_SYMBOL)
+extern char _end;
+#endif
+
+static int inited = 0;
+static int init(void);
+
+static volatile char *heap_start = NULL;
+static volatile char *heap_end = NULL;
+
+#if defined(ERTS___AFTER_MORECORE_HOOK_CAN_TRACK_MALLOC) /* ----------------- */
+
+#ifdef HAVE_MALLOC_H
+# include <malloc.h>
+#endif
+
+#undef SBRK_0
+#define SBRK_0 sbrk(0)
+
+static void
+init_hook(void)
+{
+ __after_morecore_hook = erts_mtrace_update_heap_size;
+ if (inited)
+ return;
+ heap_end = NULL;
+#if defined(HAVE_END_SYMBOL)
+ heap_start = &end;
+#elif defined(HAVE__END_SYMBOL)
+ heap_start = &_end;
+#else
+ heap_start = SBRK_0;
+ if (heap_start == (SBRK_RET_TYPE) -1) {
+ heap_start = NULL;
+ return;
+ }
+#endif
+ inited = 1;
+}
+
+static int
+init(void)
+{
+ init_hook();
+ return inited;
+}
+
+void (*__malloc_initialize_hook)(void) = init_hook;
+
+#elif defined(ERTS_BRK_WRAPPERS_CAN_TRACK_MALLOC) /* ------------------------ */
+#ifdef HAVE_DLFCN_H
+# include <dlfcn.h>
+#endif
+
+#undef SBRK_0
+#define SBRK_0 (*real_sbrk)(0)
+
+#ifndef HAVE_SBRK
+# error no sbrk()
+#endif
+#if !defined(HAVE_END_SYMBOL) && !defined(HAVE__END_SYMBOL)
+# error no 'end' nor '_end'
+#endif
+
+static void update_heap_size(char *new_end);
+
+#define SBRK_IMPL(RET_TYPE, FUNC, ARG_TYPE) \
+RET_TYPE FUNC (ARG_TYPE); \
+static RET_TYPE (*real_ ## FUNC)(ARG_TYPE) = NULL; \
+RET_TYPE FUNC (ARG_TYPE arg) \
+{ \
+ RET_TYPE res; \
+ if (!inited && !init()) \
+ return (RET_TYPE) -1; \
+ res = (*real_ ## FUNC)(arg); \
+ if (erts_mtrace_enabled && res != ((RET_TYPE) -1)) \
+ update_heap_size((char *) (*real_ ## FUNC)(0)); \
+ return res; \
+}
+
+#define BRK_IMPL(RET_TYPE, FUNC, ARG_TYPE) \
+RET_TYPE FUNC (ARG_TYPE); \
+static RET_TYPE (*real_ ## FUNC)(ARG_TYPE) = NULL; \
+RET_TYPE FUNC (ARG_TYPE arg) \
+{ \
+ RET_TYPE res; \
+ if (!inited && !init()) \
+ return (RET_TYPE) -1; \
+ res = (*real_ ## FUNC)(arg); \
+ if (erts_mtrace_enabled && res != ((RET_TYPE) -1)) \
+ update_heap_size((char *) arg); \
+ return res; \
+}
+
+SBRK_IMPL(SBRK_RET_TYPE, sbrk, SBRK_ARG_TYPE)
+#ifdef HAVE_BRK
+ BRK_IMPL(BRK_RET_TYPE, brk, BRK_ARG_TYPE)
+#endif
+
+#ifdef HAVE__SBRK
+ SBRK_IMPL(SBRK_RET_TYPE, _sbrk, SBRK_ARG_TYPE)
+#endif
+#ifdef HAVE__BRK
+ BRK_IMPL(BRK_RET_TYPE, _brk, BRK_ARG_TYPE)
+#endif
+
+#ifdef HAVE___SBRK
+ SBRK_IMPL(SBRK_RET_TYPE, __sbrk, SBRK_ARG_TYPE)
+#endif
+#ifdef HAVE___BRK
+ BRK_IMPL(BRK_RET_TYPE, __brk, BRK_ARG_TYPE)
+#endif
+
+static int
+init(void)
+{
+ if (inited)
+ return 1;
+
+#define INIT_XBRK_SYM(SYM) \
+do { \
+ if (!real_ ## SYM) { \
+ real_ ## SYM = dlsym(RTLD_NEXT, #SYM); \
+ if (!real_ ## SYM) { \
+ errno = ENOMEM; \
+ return 0; \
+ } \
+ } \
+} while (0)
+
+ heap_end = NULL;
+#if defined(HAVE_END_SYMBOL)
+ heap_start = &end;
+#elif defined(HAVE__END_SYMBOL)
+ heap_start = &_end;
+#endif
+
+ INIT_XBRK_SYM(sbrk);
+#ifdef HAVE_BRK
+ INIT_XBRK_SYM(brk);
+#endif
+#ifdef HAVE__SBRK
+ INIT_XBRK_SYM(_sbrk);
+#endif
+#ifdef HAVE__BRK
+ INIT_XBRK_SYM(_brk);
+#endif
+#ifdef HAVE___SBRK
+ INIT_XBRK_SYM(__sbrk);
+#endif
+#ifdef HAVE___BRK
+ INIT_XBRK_SYM(__brk);
+#endif
+
+ return inited = 1;
+#undef INIT_XBRK_SYM
+}
+
+#endif /* #elif defined(ERTS_BRK_WRAPPERS_CAN_TRACK_MALLOC) */ /* ----------- */
+
+static void
+update_heap_size(char *new_end)
+{
+ volatile char *new_start, *old_start, *old_end;
+ Uint size;
+
+ if (new_end == ((char *) -1))
+ return;
+
+ new_start = (old_start = heap_start);
+ old_end = heap_end;
+ heap_end = new_end;
+ if (new_end < old_start || !old_start)
+ heap_start = (new_start = new_end);
+
+ size = (Uint) (new_end - new_start);
+
+ if (!old_end) {
+ if (size)
+ erts_mtrace_crr_alloc((void *) new_start,
+ ERTS_ALC_A_SYSTEM,
+ ERTS_MTRACE_SEGMENT_ID,
+ size);
+ else
+ heap_end = NULL;
+ }
+ else {
+ if (old_end != new_end || old_start != new_start) {
+
+ if (size)
+ erts_mtrace_crr_realloc((void *) new_start,
+ ERTS_ALC_A_SYSTEM,
+ ERTS_MTRACE_SEGMENT_ID,
+ (void *) old_start,
+ size);
+ else {
+ if (old_start)
+ erts_mtrace_crr_free(ERTS_ALC_A_SYSTEM,
+ ERTS_MTRACE_SEGMENT_ID,
+ (void *) old_start);
+ heap_end = NULL;
+ }
+ }
+ }
+}
+
+#endif /* #ifdef ERTS_CAN_TRACK_MALLOC */
+
+void
+erts_mtrace_update_heap_size(void)
+{
+#ifdef ERTS_CAN_TRACK_MALLOC
+ if (erts_mtrace_enabled && (inited || init()))
+ update_heap_size((char *) SBRK_0);
+#endif
+}
+
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
new file mode 100644
index 0000000000..169d4579a2
--- /dev/null
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -0,0 +1,2693 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Poll interface suitable for ERTS with or without
+ * SMP support.
+ *
+ * The interface is currently implemented using:
+ * - select
+ * - poll
+ * - /dev/poll
+ * - epoll with poll or select as fallback
+ * - kqueue with poll or select as fallback
+ *
+ * Some time in the future it will also be
+ * implemented using Solaris ports.
+ *
+ *
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifndef WANT_NONBLOCKING
+# define WANT_NONBLOCKING
+#endif
+#define ERTS_WANT_GOT_SIGUSR1
+
+#include "erl_poll.h"
+#if ERTS_POLL_USE_KQUEUE
+# include <sys/types.h>
+# include <sys/event.h>
+# include <sys/time.h>
+#endif
+#if ERTS_POLL_USE_SELECT
+# ifdef SYS_SELECT_H
+# include <sys/select.h>
+# endif
+# ifdef VXWORKS
+# include <selectLib.h>
+# endif
+#endif
+#ifndef VXWORKS
+# ifdef NO_SYSCONF
+# if ERTS_POLL_USE_SELECT
+# include <sys/param.h>
+# else
+# include <limits.h>
+# endif
+# endif
+#endif
+#include "erl_driver.h"
+#include "erl_alloc.h"
+
+#if !defined(ERTS_POLL_USE_EPOLL) \
+ && !defined(ERTS_POLL_USE_DEVPOLL) \
+ && !defined(ERTS_POLL_USE_POLL) \
+ && !defined(ERTS_POLL_USE_SELECT)
+#error "Missing implementation of erts_poll()"
+#endif
+
+#if defined(ERTS_KERNEL_POLL_VERSION) && !ERTS_POLL_USE_KERNEL_POLL
+#error "Missing kernel poll implementation of erts_poll()"
+#endif
+
+#if defined(ERTS_NO_KERNEL_POLL_VERSION) && ERTS_POLL_USE_KERNEL_POLL
+#error "Kernel poll used when it shouldn't be used"
+#endif
+
+#if 0
+#define ERTS_POLL_DEBUG_PRINT
+#endif
+
+#if defined(DEBUG) && 0
+#define HARD_DEBUG
+#endif
+
+#define ERTS_POLL_USE_BATCH_UPDATE_POLLSET (ERTS_POLL_USE_DEVPOLL \
+ || ERTS_POLL_USE_KQUEUE)
+#define ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE \
+ (defined(ERTS_SMP) || ERTS_POLL_USE_KERNEL_POLL || ERTS_POLL_USE_POLL)
+
+#define ERTS_POLL_USE_CONCURRENT_UPDATE \
+ (defined(ERTS_SMP) && ERTS_POLL_USE_EPOLL)
+
+#define ERTS_POLL_COALESCE_KP_RES (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL)
+
+#define FDS_STATUS_EXTRA_FREE_SIZE 128
+#define POLL_FDS_EXTRA_FREE_SIZE 128
+
+#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+# define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 1
+#else
+# define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 0
+#endif
+
+#define ERTS_POLL_USE_WAKEUP_PIPE \
+ (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP))
+
+#ifdef ERTS_SMP
+
+#define ERTS_POLLSET_LOCK(PS) \
+ erts_smp_mtx_lock(&(PS)->mtx)
+#define ERTS_POLLSET_UNLOCK(PS) \
+ erts_smp_mtx_unlock(&(PS)->mtx)
+
+#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
+ ((int) erts_smp_atomic_xchg(&(PS)->polled, (long) 1))
+#define ERTS_POLLSET_UNSET_POLLED(PS) \
+ erts_smp_atomic_set(&(PS)->polled, (long) 0)
+#define ERTS_POLLSET_IS_POLLED(PS) \
+ ((int) erts_smp_atomic_read(&(PS)->polled))
+
+#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) \
+ ((int) erts_smp_atomic_xchg(&(PS)->woken, (long) 1))
+#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
+ erts_smp_atomic_set(&(PS)->woken, (long) 1)
+#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
+ erts_smp_atomic_set(&(PS)->woken, (long) 0)
+#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
+ ((int) erts_smp_atomic_read(&(PS)->woken))
+
+#else
+
+#define ERTS_POLLSET_LOCK(PS)
+#define ERTS_POLLSET_UNLOCK(PS)
+#define ERTS_POLLSET_SET_POLLED_CHK(PS) 0
+#define ERTS_POLLSET_UNSET_POLLED(PS)
+#define ERTS_POLLSET_IS_POLLED(PS) 0
+
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+
+/*
+ * Ideally, the ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) operation would
+ * be atomic. This operation isn't, but we will do okay anyway. The
+ * "woken check" is only an optimization. The only requirement we have:
+ * If (PS)->woken is set to a value != 0 when interrupting, we have to
+ * write on the the wakeup pipe at least once. Multiple writes are okay.
+ */
+#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) ((PS)->woken++)
+#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) ((PS)->woken = 1, (void) 0)
+#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) ((PS)->woken = 0, (void) 0)
+#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) ((PS)->woken)
+
+#else
+
+#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) 1
+#define ERTS_POLLSET_SET_POLLER_WOKEN(PS)
+#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS)
+#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) 1
+
+#endif
+
+#endif
+
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \
+ erts_smp_atomic_set(&(PS)->have_update_requests, (long) 1)
+#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \
+ erts_smp_atomic_set(&(PS)->have_update_requests, (long) 0)
+#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \
+ ((int) erts_smp_atomic_read(&(PS)->have_update_requests))
+#else
+#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS)
+#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS)
+#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) 0
+#endif
+
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
+
+#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) unset_interrupted_chk((PS))
+#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) ((PS)->interrupt = 0, (void) 0)
+#define ERTS_POLLSET_SET_INTERRUPTED(PS) ((PS)->interrupt = 1, (void) 0)
+#define ERTS_POLLSET_IS_INTERRUPTED(PS) ((PS)->interrupt)
+
+#else
+
+#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) \
+ ((int) erts_smp_atomic_xchg(&(PS)->interrupt, (long) 0))
+#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
+ erts_smp_atomic_set(&(PS)->interrupt, (long) 0)
+#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
+ erts_smp_atomic_set(&(PS)->interrupt, (long) 1)
+#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
+ ((int) erts_smp_atomic_read(&(PS)->interrupt))
+
+#endif
+
+#if ERTS_POLL_USE_FALLBACK
+# if ERTS_POLL_USE_POLL
+# define ERTS_POLL_NEED_FALLBACK(PS) ((PS)->no_poll_fds > 1)
+# elif ERTS_POLL_USE_SELECT
+# define ERTS_POLL_NEED_FALLBACK(PS) ((PS)->no_select_fds > 1)
+# endif
+#endif
+/*
+ * --- Data types ------------------------------------------------------------
+ */
+
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+#define ERTS_POLLSET_UPDATE_REQ_BLOCK_SIZE 128
+
+typedef struct ErtsPollSetUpdateRequestsBlock_ ErtsPollSetUpdateRequestsBlock;
+struct ErtsPollSetUpdateRequestsBlock_ {
+ ErtsPollSetUpdateRequestsBlock *next;
+ int len;
+ int fds[ERTS_POLLSET_UPDATE_REQ_BLOCK_SIZE];
+};
+
+#endif
+
+
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+# define ERTS_POLL_FD_FLG_INURQ (((unsigned short) 1) << 0)
+#endif
+#if ERTS_POLL_USE_FALLBACK
+# define ERTS_POLL_FD_FLG_INFLBCK (((unsigned short) 1) << 1)
+# define ERTS_POLL_FD_FLG_USEFLBCK (((unsigned short) 1) << 2)
+#endif
+#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP)
+# define ERTS_POLL_FD_FLG_RST (((unsigned short) 1) << 3)
+#endif
+typedef struct {
+#if ERTS_POLL_USE_POLL
+ int pix;
+#endif
+ ErtsPollEvents used_events;
+ ErtsPollEvents events;
+#if ERTS_POLL_COALESCE_KP_RES
+ unsigned short res_ev_ix;
+#endif
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE || ERTS_POLL_USE_FALLBACK
+ unsigned short flags;
+#endif
+
+} ErtsFdStatus;
+
+
+#if ERTS_POLL_COALESCE_KP_RES
+/* res_ev_ix max value */
+#define ERTS_POLL_MAX_RES ((1 << sizeof(unsigned short)*8) - 1)
+#endif
+
+#if ERTS_POLL_USE_KQUEUE
+
+#define ERTS_POLL_KQ_OP_HANDLED 1
+#define ERTS_POLL_KQ_OP_DEL_R 2
+#define ERTS_POLL_KQ_OP_DEL_W 3
+#define ERTS_POLL_KQ_OP_ADD_R 4
+#define ERTS_POLL_KQ_OP_ADD_W 5
+#define ERTS_POLL_KQ_OP_ADD2_R 6
+#define ERTS_POLL_KQ_OP_ADD2_W 7
+
+#endif
+
+struct ErtsPollSet_ {
+ ErtsPollSet next;
+ int internal_fd_limit;
+ ErtsFdStatus *fds_status;
+ int no_of_user_fds;
+ int fds_status_len;
+#if ERTS_POLL_USE_KERNEL_POLL
+ int kp_fd;
+ int res_events_len;
+#if ERTS_POLL_USE_EPOLL
+ struct epoll_event *res_events;
+#elif ERTS_POLL_USE_KQUEUE
+ struct kevent *res_events;
+#elif ERTS_POLL_USE_DEVPOLL
+ struct pollfd *res_events;
+#endif
+#endif /* ERTS_POLL_USE_KERNEL_POLL */
+#if ERTS_POLL_USE_POLL
+ int next_poll_fds_ix;
+ int no_poll_fds;
+ int poll_fds_len;
+ struct pollfd*poll_fds;
+#elif ERTS_POLL_USE_SELECT
+ int next_sel_fd;
+ int max_fd;
+#if ERTS_POLL_USE_FALLBACK
+ int no_select_fds;
+#endif
+ fd_set input_fds;
+ fd_set res_input_fds;
+ fd_set output_fds;
+ fd_set res_output_fds;
+#endif
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ ErtsPollSetUpdateRequestsBlock update_requests;
+ ErtsPollSetUpdateRequestsBlock *curr_upd_req_block;
+ erts_smp_atomic_t have_update_requests;
+#endif
+#ifdef ERTS_SMP
+ erts_smp_atomic_t polled;
+ erts_smp_atomic_t woken;
+ erts_smp_mtx_t mtx;
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ volatile int woken;
+#endif
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ int wake_fds[2];
+#endif
+#if ERTS_POLL_USE_FALLBACK
+ int fallback_used;
+#endif
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
+ volatile int interrupt;
+#else
+ erts_smp_atomic_t interrupt;
+#endif
+ erts_smp_atomic_t timeout;
+#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+ erts_smp_atomic_t no_avoided_wakeups;
+ erts_smp_atomic_t no_avoided_interrupts;
+ erts_smp_atomic_t no_interrupt_timed;
+#endif
+};
+
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
+
+static ERTS_INLINE int
+unset_interrupted_chk(ErtsPollSet ps)
+{
+ /* This operation isn't atomic, but we have no need at all for an
+ atomic operation here... */
+ int res = ps->interrupt;
+ ps->interrupt = 0;
+ return res;
+}
+
+#endif
+
+static void fatal_error(char *format, ...);
+static void fatal_error_async_signal_safe(char *error_str);
+
+static int max_fds = -1;
+static ErtsPollSet pollsets;
+static erts_smp_spinlock_t pollsets_lock;
+
+#if ERTS_POLL_USE_POLL
+
+static ERTS_INLINE short
+ev2pollev(ErtsPollEvents ev)
+{
+#if !ERTS_POLL_USE_FALLBACK || ERTS_POLL_USE_KQUEUE
+ return ERTS_POLL_EV_E2N(ev);
+#else /* Note, we only map events we are interested in */
+ short res_ev = (short) 0;
+ if (ev & ERTS_POLL_EV_IN)
+ res_ev |= ERTS_POLL_EV_NKP_IN;
+ if (ev & ERTS_POLL_EV_OUT)
+ res_ev |= ERTS_POLL_EV_NKP_OUT;
+ return res_ev;
+#endif
+}
+
+static ERTS_INLINE ErtsPollEvents
+pollev2ev(short ev)
+{
+#if !ERTS_POLL_USE_FALLBACK || ERTS_POLL_USE_KQUEUE
+ return ERTS_POLL_EV_N2E(ev);
+#else /* Note, we only map events we are interested in */
+ ErtsPollEvents res_ev = (ErtsPollEvents) 0;
+ if (ev & ERTS_POLL_EV_NKP_IN)
+ res_ev |= ERTS_POLL_EV_IN;
+ if (ev & ERTS_POLL_EV_NKP_OUT)
+ res_ev |= ERTS_POLL_EV_OUT;
+ if (ev & ERTS_POLL_EV_NKP_ERR)
+ res_ev |= ERTS_POLL_EV_ERR;
+ if (ev & ERTS_POLL_EV_NKP_NVAL)
+ res_ev |= ERTS_POLL_EV_NVAL;
+ return res_ev;
+#endif
+}
+
+#endif
+
+#ifdef HARD_DEBUG
+static void check_poll_result(ErtsPollResFd pr[], int len);
+#if ERTS_POLL_USE_DEVPOLL
+static void check_poll_status(ErtsPollSet ps);
+#endif /* ERTS_POLL_USE_DEVPOLL */
+#endif /* HARD_DEBUG */
+#ifdef ERTS_POLL_DEBUG_PRINT
+static void print_misc_debug_info(void);
+#endif
+
+/*
+ * --- Wakeup pipe -----------------------------------------------------------
+ */
+
+#if ERTS_POLL_USE_WAKEUP_PIPE
+
+static ERTS_INLINE void
+wake_poller(ErtsPollSet ps)
+{
+ /*
+ * NOTE: This function might be called from signal handlers in the
+ * non-smp case; therefore, it has to be async-signal safe in
+ * the non-smp case.
+ */
+ if (!ERTS_POLLSET_SET_POLLER_WOKEN_CHK(ps)) {
+ ssize_t res;
+ if (ps->wake_fds[1] < 0)
+ return; /* Not initialized yet */
+ do {
+ /* write() is async-signal safe (according to posix) */
+ res = write(ps->wake_fds[1], "!", 1);
+ } while (res < 0 && errno == EINTR);
+ if (res <= 0 && errno != ERRNO_BLOCK) {
+ fatal_error_async_signal_safe(__FILE__
+ ":XXX:wake_poller(): "
+ "Failed to write on wakeup pipe\n");
+ }
+ }
+}
+
+static ERTS_INLINE void
+cleanup_wakeup_pipe(ErtsPollSet ps)
+{
+ int fd = ps->wake_fds[0];
+ int res;
+ do {
+ char buf[32];
+ res = read(fd, buf, sizeof(buf));
+ } while (res > 0 || (res < 0 && errno == EINTR));
+ if (res < 0 && errno != ERRNO_BLOCK) {
+ fatal_error("%s:%d:cleanup_wakeup_pipe(): "
+ "Failed to read on wakeup pipe fd=%d: "
+ "%s (%d)\n",
+ __FILE__, __LINE__,
+ fd,
+ erl_errno_id(errno), errno);
+ }
+}
+
+static void
+create_wakeup_pipe(ErtsPollSet ps)
+{
+ int do_wake = 0;
+ int wake_fds[2];
+ ps->wake_fds[0] = -1;
+ ps->wake_fds[1] = -1;
+ if (pipe(wake_fds) < 0) {
+ fatal_error("%s:%d:create_wakeup_pipe(): "
+ "Failed to create pipe: %s (%d)\n",
+ __FILE__,
+ __LINE__,
+ erl_errno_id(errno),
+ errno);
+ }
+ SET_NONBLOCKING(wake_fds[0]);
+ SET_NONBLOCKING(wake_fds[1]);
+
+#ifdef ERTS_POLL_DEBUG_PRINT
+ erts_printf("wakeup fds = {%d, %d}\n", wake_fds[0], wake_fds[1]);
+#endif
+
+ ERTS_POLL_EXPORT(erts_poll_control)(ps,
+ wake_fds[0],
+ ERTS_POLL_EV_IN,
+ 1, &do_wake);
+#if ERTS_POLL_USE_FALLBACK
+ /* We depend on the wakeup pipe being handled by kernel poll */
+ if (ps->fds_status[wake_fds[0]].flags & ERTS_POLL_FD_FLG_INFLBCK)
+ fatal_error("%s:%d:create_wakeup_pipe(): Internal error\n",
+ __FILE__, __LINE__);
+#endif
+ if (ps->internal_fd_limit <= wake_fds[1])
+ ps->internal_fd_limit = wake_fds[1] + 1;
+ if (ps->internal_fd_limit <= wake_fds[0])
+ ps->internal_fd_limit = wake_fds[0] + 1;
+ ps->wake_fds[0] = wake_fds[0];
+ ps->wake_fds[1] = wake_fds[1];
+}
+
+#endif /* ERTS_POLL_USE_WAKEUP_PIPE */
+
+/*
+ * --- Poll set update requests ----------------------------------------------
+ */
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+
+static ERTS_INLINE void
+enqueue_update_request(ErtsPollSet ps, int fd)
+{
+ ErtsPollSetUpdateRequestsBlock *urqbp;
+
+ ASSERT(fd < ps->fds_status_len);
+
+ if (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INURQ)
+ return;
+
+ if (ps->update_requests.len == 0)
+ ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(ps);
+
+ urqbp = ps->curr_upd_req_block;
+
+ if (urqbp->len == ERTS_POLLSET_UPDATE_REQ_BLOCK_SIZE) {
+ ASSERT(!urqbp->next);
+ urqbp = erts_alloc(ERTS_ALC_T_POLLSET_UPDREQ,
+ sizeof(ErtsPollSetUpdateRequestsBlock));
+ ps->curr_upd_req_block->next = urqbp;
+ ps->curr_upd_req_block = urqbp;
+ urqbp->next = NULL;
+ urqbp->len = 0;
+ }
+
+ ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INURQ;
+ urqbp->fds[urqbp->len++] = fd;
+}
+
+static ERTS_INLINE void
+free_update_requests_block(ErtsPollSet ps,
+ ErtsPollSetUpdateRequestsBlock *urqbp)
+{
+ if (urqbp != &ps->update_requests)
+ erts_free(ERTS_ALC_T_POLLSET_UPDREQ, (void *) urqbp);
+ else {
+ urqbp->next = NULL;
+ urqbp->len = 0;
+ }
+}
+
+#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
+
+/*
+ * --- Growing poll set structures -------------------------------------------
+ */
+
+#if ERTS_POLL_USE_KERNEL_POLL
+static void
+grow_res_events(ErtsPollSet ps, int new_len)
+{
+ size_t new_size = sizeof(
+#if ERTS_POLL_USE_EPOLL
+ struct epoll_event
+#elif ERTS_POLL_USE_DEVPOLL
+ struct pollfd
+#elif ERTS_POLL_USE_KQUEUE
+ struct kevent
+#endif
+ )*new_len;
+ /* We do not need to save previously stored data */
+ if (ps->res_events)
+ erts_free(ERTS_ALC_T_POLL_RES_EVS, ps->res_events);
+ ps->res_events = erts_alloc(ERTS_ALC_T_POLL_RES_EVS, new_size);
+ ps->res_events_len = new_len;
+}
+#endif /* ERTS_POLL_USE_KERNEL_POLL */
+
+#if ERTS_POLL_USE_POLL
+static void
+grow_poll_fds(ErtsPollSet ps, int min_ix)
+{
+ int i;
+ int new_len = min_ix + 1 + POLL_FDS_EXTRA_FREE_SIZE;
+ if (new_len > max_fds)
+ new_len = max_fds;
+ ps->poll_fds = (ps->poll_fds_len
+ ? erts_realloc(ERTS_ALC_T_POLL_FDS,
+ ps->poll_fds,
+ sizeof(struct pollfd)*new_len)
+ : erts_alloc(ERTS_ALC_T_POLL_FDS,
+ sizeof(struct pollfd)*new_len));
+ for (i = ps->poll_fds_len; i < new_len; i++) {
+ ps->poll_fds[i].fd = -1;
+ ps->poll_fds[i].events = (short) 0;
+ ps->poll_fds[i].revents = (short) 0;
+ }
+ ps->poll_fds_len = new_len;
+}
+#endif
+
+static void
+grow_fds_status(ErtsPollSet ps, int min_fd)
+{
+ int i;
+ int new_len = min_fd + 1 + FDS_STATUS_EXTRA_FREE_SIZE;
+ ASSERT(min_fd < max_fds);
+ if (new_len > max_fds)
+ new_len = max_fds;
+ ps->fds_status = (ps->fds_status_len
+ ? erts_realloc(ERTS_ALC_T_FD_STATUS,
+ ps->fds_status,
+ sizeof(ErtsFdStatus)*new_len)
+ : erts_alloc(ERTS_ALC_T_FD_STATUS,
+ sizeof(ErtsFdStatus)*new_len));
+ for (i = ps->fds_status_len; i < new_len; i++) {
+#if ERTS_POLL_USE_POLL
+ ps->fds_status[i].pix = -1;
+#endif
+ ps->fds_status[i].used_events = (ErtsPollEvents) 0;
+ ps->fds_status[i].events = (ErtsPollEvents) 0;
+#if ERTS_POLL_COALESCE_KP_RES
+ ps->fds_status[i].res_ev_ix = (unsigned short) ERTS_POLL_MAX_RES;
+#endif
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE || ERTS_POLL_USE_FALLBACK
+ ps->fds_status[i].flags = (unsigned short) 0;
+#endif
+ }
+ ps->fds_status_len = new_len;
+}
+
+/*
+ * --- Selecting fd to poll on -----------------------------------------------
+ */
+
+#if ERTS_POLL_USE_FALLBACK
+static int update_fallback_pollset(ErtsPollSet ps, int fd);
+#endif
+
+static ERTS_INLINE int
+need_update(ErtsPollSet ps, int fd)
+{
+#if ERTS_POLL_USE_KERNEL_POLL
+ int reset;
+#endif
+
+ ASSERT(fd < ps->fds_status_len);
+
+#if ERTS_POLL_USE_KERNEL_POLL
+ reset = (int) (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST);
+ if (reset && !ps->fds_status[fd].used_events) {
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST;
+ reset = 0;
+ }
+#elif defined(ERTS_SMP)
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST;
+#endif
+
+ if (ps->fds_status[fd].used_events != ps->fds_status[fd].events)
+ return 1;
+
+#if ERTS_POLL_USE_KERNEL_POLL
+ return reset;
+#else
+ return 0;
+#endif
+}
+
+#if ERTS_POLL_USE_BATCH_UPDATE_POLLSET
+
+#if ERTS_POLL_USE_KQUEUE
+#define ERTS_POLL_MIN_BATCH_BUF_SIZE 128
+#else
+#define ERTS_POLL_MIN_BATCH_BUF_SIZE 64
+#endif
+
+typedef struct {
+ int len;
+ int size;
+#if ERTS_POLL_USE_DEVPOLL
+ struct pollfd *buf;
+#elif ERTS_POLL_USE_KQUEUE
+ struct kevent *buf;
+ struct kevent *ebuf;
+#endif
+} ErtsPollBatchBuf;
+
+
+static ERTS_INLINE void
+setup_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp)
+{
+ bbp->len = 0;
+#if ERTS_POLL_USE_DEVPOLL
+ bbp->size = ps->res_events_len;
+ bbp->buf = ps->res_events;
+#elif ERTS_POLL_USE_KQUEUE
+ bbp->size = ps->res_events_len/2;
+ bbp->buf = ps->res_events;
+ bbp->ebuf = bbp->buf + bbp->size;
+#endif
+}
+
+
+#if ERTS_POLL_USE_DEVPOLL
+
+static void
+write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp)
+{
+ ssize_t wres;
+ char *buf = (char *) bbp->buf;
+ size_t buf_size = sizeof(struct pollfd)*bbp->len;
+
+ while (1) {
+ wres = write(ps->kp_fd, (void *) buf, buf_size);
+ if (wres < 0) {
+ if (errno == EINTR)
+ continue;
+ fatal_error("%s:%d:write_batch_buf(): "
+ "Failed to write to /dev/poll: "
+ "%s (%d)\n",
+ __FILE__, __LINE__,
+ erl_errno_id(errno), errno);
+ }
+ buf_size -= wres;
+ if (buf_size <= 0)
+ break;
+ buf += wres;
+ }
+
+ if (buf_size < 0) {
+ fatal_error("%s:%d:write_devpoll_buf(): Internal error\n",
+ __FILE__, __LINE__);
+ }
+ bbp->len = 0;
+}
+
+#elif ERTS_POLL_USE_KQUEUE
+
+static void
+write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp)
+{
+ int res;
+ int len = bbp->len;
+ struct kevent *buf = bbp->buf;
+ struct timespec ts = {0, 0};
+
+ do {
+ res = kevent(ps->kp_fd, buf, len, NULL, 0, &ts);
+ } while (res < 0 && errno == EINTR);
+ if (res < 0) {
+ int i;
+ struct kevent *ebuf = bbp->ebuf;
+ do {
+ res = kevent(ps->kp_fd, buf, len, ebuf, len, &ts);
+ } while (res < 0 && errno == EINTR);
+ if (res < 0) {
+ fatal_error("%s:%d: kevent() failed: %s (%d)\n",
+ __FILE__, __LINE__, erl_errno_id(errno), errno);
+ }
+ for (i = 0; i < res; i++) {
+ if (ebuf[i].flags & EV_ERROR) {
+ short filter;
+ int fd = (int) ebuf[i].ident;
+
+ switch ((int) ebuf[i].udata) {
+
+ /*
+ * Since we use a lazy update approach EV_DELETE will
+ * frequently fail. This since kqueue automatically
+ * removes a file descriptor that is closed from the
+ * poll set.
+ */
+ case ERTS_POLL_KQ_OP_DEL_R:
+ case ERTS_POLL_KQ_OP_DEL_W:
+ case ERTS_POLL_KQ_OP_HANDLED:
+ break;
+
+ /*
+ * According to the kqueue man page EVFILT_READ support
+ * does not imply EVFILT_WRITE support; therefore,
+ * if an EV_ADD fail, we may have to remove other
+ * events on this fd in the kqueue pollset before
+ * adding fd to the fallback pollset.
+ */
+ case ERTS_POLL_KQ_OP_ADD_W:
+ if (ps->fds_status[fd].used_events & ERTS_POLL_EV_IN) {
+ filter = EVFILT_READ;
+ goto rm_add_fb;
+ }
+ goto add_fb;
+ case ERTS_POLL_KQ_OP_ADD_R:
+ if (ps->fds_status[fd].used_events & ERTS_POLL_EV_OUT) {
+ filter = EVFILT_WRITE;
+ goto rm_add_fb;
+ }
+ goto add_fb;
+ case ERTS_POLL_KQ_OP_ADD2_W:
+ case ERTS_POLL_KQ_OP_ADD2_R: {
+ int j;
+ for (j = i+1; j < res; j++) {
+ if (fd == (int) ebuf[j].ident) {
+ ebuf[j].udata = (void *) ERTS_POLL_KQ_OP_HANDLED;
+ if (!(ebuf[j].flags & EV_ERROR)) {
+ switch ((int) ebuf[j].udata) {
+ case ERTS_POLL_KQ_OP_ADD2_W:
+ filter = EVFILT_WRITE;
+ goto rm_add_fb;
+ case ERTS_POLL_KQ_OP_ADD2_R:
+ filter = EVFILT_READ;
+ goto rm_add_fb;
+ default:
+ fatal_error("%s:%d:write_batch_buf(): "
+ "Internal error",
+ __FILE__, __LINE__);
+ break;
+ }
+ }
+ goto add_fb;
+ }
+ }
+ /* The other add succeded... */
+ filter = (((int) ebuf[i].udata == ERTS_POLL_KQ_OP_ADD2_W)
+ ? EVFILT_READ
+ : EVFILT_WRITE);
+ rm_add_fb:
+ {
+ struct kevent kev;
+ struct timespec ts = {0, 0};
+ EV_SET(&kev, fd, filter, EV_DELETE, 0, 0, 0);
+ (void) kevent(ps->kp_fd, &kev, 1, NULL, 0, &ts);
+ }
+
+ add_fb:
+ ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
+ ASSERT(ps->fds_status[fd].used_events);
+ ps->fds_status[fd].used_events = 0;
+ ps->no_of_user_fds--;
+ update_fallback_pollset(ps, fd);
+ ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
+ break;
+ }
+ default:
+ fatal_error("%s:%d:write_batch_buf(): Internal error",
+ __FILE__, __LINE__);
+ break;
+ }
+ }
+ }
+ }
+ bbp->len = 0;
+}
+
+#endif /* ERTS_POLL_USE_KQUEUE */
+
+static ERTS_INLINE void
+batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp)
+{
+ int buf_len;
+#if ERTS_POLL_USE_DEVPOLL
+ short events;
+ struct pollfd *buf;
+#elif ERTS_POLL_USE_KQUEUE
+ struct kevent *buf;
+#endif
+
+#ifdef ERTS_POLL_DEBUG_PRINT
+ erts_printf("Doing lazy update on fd=%d\n", fd);
+#endif
+
+ if (!need_update(ps, fd))
+ return;
+
+ /* Make sure we have room for at least maximum no of entries
+ per fd */
+ if (bbp->size - bbp->len < 2)
+ write_batch_buf(ps, bbp);
+
+ buf_len = bbp->len;
+ buf = bbp->buf;
+
+ ASSERT(fd < ps->fds_status_len);
+
+#if ERTS_POLL_USE_DEVPOLL
+ events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events);
+ if (!events) {
+ buf[buf_len].events = POLLREMOVE;
+ ps->no_of_user_fds--;
+ }
+ else if (!ps->fds_status[fd].used_events) {
+ buf[buf_len].events = events;
+ ps->no_of_user_fds++;
+ }
+ else {
+ if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)
+ || (ps->fds_status[fd].used_events & ~events)) {
+ /* Reset or removed events... */
+ buf[buf_len].fd = fd;
+ buf[buf_len].events = POLLREMOVE;
+ buf[buf_len++].revents = 0;
+ }
+ buf[buf_len].events = events;
+ }
+ buf[buf_len].fd = fd;
+ buf[buf_len++].revents = 0;
+
+#elif ERTS_POLL_USE_KQUEUE
+
+ if (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK) {
+ if (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_USEFLBCK)
+ update_fallback_pollset(ps, fd);
+ else { /* Remove from fallback and try kqueue */
+ ErtsPollEvents events = ps->fds_status[fd].events;
+ ps->fds_status[fd].events = (ErtsPollEvents) 0;
+ update_fallback_pollset(ps, fd);
+ ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK));
+ if (events) {
+ ps->fds_status[fd].events = events;
+ goto try_kqueue;
+ }
+ }
+ }
+ else {
+ ErtsPollEvents events, used_events;
+ int mod_w, mod_r;
+ try_kqueue:
+ events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events);
+ used_events = ERTS_POLL_EV_E2N(ps->fds_status[fd].used_events);
+ if (!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)) {
+ if (!used_events &&
+ (events & ERTS_POLL_EV_IN) && (events & ERTS_POLL_EV_OUT))
+ goto do_add_rw;
+ mod_r = ((events & ERTS_POLL_EV_IN)
+ != (used_events & ERTS_POLL_EV_IN));
+ mod_w = ((events & ERTS_POLL_EV_OUT)
+ != (used_events & ERTS_POLL_EV_OUT));
+ goto do_mod;
+ }
+ else { /* Reset */
+ if ((events & ERTS_POLL_EV_IN) && (events & ERTS_POLL_EV_OUT)) {
+ do_add_rw:
+ EV_SET(&buf[buf_len], fd, EVFILT_READ, EV_ADD,
+ 0, 0, (void *) ERTS_POLL_KQ_OP_ADD2_R);
+ buf_len++;
+ EV_SET(&buf[buf_len], fd, EVFILT_WRITE, EV_ADD,
+ 0, 0, (void *) ERTS_POLL_KQ_OP_ADD2_W);
+ buf_len++;
+
+ }
+ else {
+ mod_r = 1;
+ mod_w = 1;
+ do_mod:
+ if (mod_r) {
+ if (events & ERTS_POLL_EV_IN) {
+ EV_SET(&buf[buf_len], fd, EVFILT_READ, EV_ADD,
+ 0, 0, (void *) ERTS_POLL_KQ_OP_ADD_R);
+ buf_len++;
+ }
+ else if (used_events & ERTS_POLL_EV_IN) {
+ EV_SET(&buf[buf_len], fd, EVFILT_READ, EV_DELETE,
+ 0, 0, (void *) ERTS_POLL_KQ_OP_DEL_R);
+ buf_len++;
+ }
+ }
+ if (mod_w) {
+ if (events & ERTS_POLL_EV_OUT) {
+ EV_SET(&buf[buf_len], fd, EVFILT_WRITE, EV_ADD,
+ 0, 0, (void *) ERTS_POLL_KQ_OP_ADD_W);
+ buf_len++;
+ }
+ else if (used_events & ERTS_POLL_EV_OUT) {
+ EV_SET(&buf[buf_len], fd, EVFILT_WRITE, EV_DELETE,
+ 0, 0, (void *) ERTS_POLL_KQ_OP_DEL_W);
+ buf_len++;
+ }
+ }
+ }
+ }
+ if (used_events) {
+ if (!events) {
+ ps->no_of_user_fds--;
+ }
+ }
+ else {
+ if (events)
+ ps->no_of_user_fds++;
+ }
+ ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
+ ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0);
+ }
+
+#endif
+
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST;
+ ps->fds_status[fd].used_events = ps->fds_status[fd].events;
+
+ bbp->len = buf_len;
+}
+
+#else /* !ERTS_POLL_USE_BATCH_UPDATE_POLLSET */
+
+#if ERTS_POLL_USE_EPOLL
+static int
+#if ERTS_POLL_USE_CONCURRENT_UPDATE
+conc_update_pollset(ErtsPollSet ps, int fd, int *update_fallback)
+#else
+update_pollset(ErtsPollSet ps, int fd)
+#endif
+{
+ int res;
+ int op;
+ struct epoll_event epe_templ;
+ struct epoll_event epe;
+
+ ASSERT(fd < ps->fds_status_len);
+
+ if (!need_update(ps, fd))
+ return 0;
+
+#ifdef ERTS_POLL_DEBUG_PRINT
+ erts_printf("Doing update on fd=%d\n", fd);
+#endif
+ if (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK) {
+#if ERTS_POLL_USE_CONCURRENT_UPDATE
+ if (!*update_fallback) {
+ *update_fallback = 1;
+ return 0;
+ }
+#endif
+ if (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_USEFLBCK) {
+ return update_fallback_pollset(ps, fd);
+ }
+ else { /* Remove from fallback and try epoll */
+ ErtsPollEvents events = ps->fds_status[fd].events;
+ ps->fds_status[fd].events = (ErtsPollEvents) 0;
+ res = update_fallback_pollset(ps, fd);
+ ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK));
+ if (!events)
+ return res;
+ ps->fds_status[fd].events = events;
+ }
+ }
+
+ epe_templ.events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events);
+ epe_templ.data.fd = fd;
+
+#ifdef VALGRIND
+ /* Silence invalid valgrind warning ... */
+ memset((void *) &epe.data, 0, sizeof(epoll_data_t));
+#endif
+
+ if (epe_templ.events && ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST) {
+ do {
+ /* We init 'epe' every time since epoll_ctl() may modify it
+ (not declared const and not documented as const). */
+ epe.events = epe_templ.events;
+ epe.data.fd = epe_templ.data.fd;
+ res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe);
+ } while (res != 0 && errno == EINTR);
+ ps->no_of_user_fds--;
+ ps->fds_status[fd].used_events = 0;
+ }
+
+ if (!epe_templ.events) {
+ /* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9
+ need a non-NULL event pointer even though it is ignored... */
+ op = EPOLL_CTL_DEL;
+ ps->no_of_user_fds--;
+ }
+ else if (!ps->fds_status[fd].used_events) {
+ op = EPOLL_CTL_ADD;
+ ps->no_of_user_fds++;
+ }
+ else {
+ op = EPOLL_CTL_MOD;
+ }
+
+ do {
+ /* We init 'epe' every time since epoll_ctl() may modify it
+ (not declared const and not documented as const). */
+ epe.events = epe_templ.events;
+ epe.data.fd = epe_templ.data.fd;
+ res = epoll_ctl(ps->kp_fd, op, fd, &epe);
+ } while (res != 0 && errno == EINTR);
+
+#if defined(ERTS_POLL_DEBUG_PRINT) && 1
+ {
+ int saved_errno = errno;
+ erts_printf("%s = epoll_ctl(%d, %s, %d, {Ox%x, %d})\n",
+ res == 0 ? "0" : erl_errno_id(errno),
+ ps->kp_fd,
+ (op == EPOLL_CTL_ADD
+ ? "EPOLL_CTL_ADD"
+ : (op == EPOLL_CTL_MOD
+ ? "EPOLL_CTL_MOD"
+ : (op == EPOLL_CTL_DEL
+ ? "EPOLL_CTL_DEL"
+ : "UNKNOWN"))),
+ fd,
+ epe_templ.events,
+ fd);
+ errno = saved_errno;
+ }
+#endif
+ if (res == 0)
+ ps->fds_status[fd].used_events = ps->fds_status[fd].events;
+ else {
+ switch (op) {
+ case EPOLL_CTL_MOD:
+ epe.events = 0;
+ do {
+ /* We init 'epe' every time since epoll_ctl() may modify it
+ (not declared const and not documented as const). */
+ epe.events = 0;
+ epe.data.fd = fd;
+ res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe);
+ } while (res != 0 && errno == EINTR);
+ ps->fds_status[fd].used_events = 0;
+ /* Fall through ... */
+ case EPOLL_CTL_ADD: {
+ ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK;
+ ps->no_of_user_fds--;
+#if ERTS_POLL_USE_CONCURRENT_UPDATE
+ if (!*update_fallback) {
+ *update_fallback = 1;
+ return 0;
+ }
+#endif
+ ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK));
+ res = update_fallback_pollset(ps, fd);
+ ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
+ break;
+ }
+ case EPOLL_CTL_DEL: {
+ /*
+ * Since we use a lazy update approach EPOLL_CTL_DEL will
+ * frequently fail. This since epoll automatically removes
+ * a filedescriptor that is closed from the poll set.
+ */
+ ps->fds_status[fd].used_events = 0;
+ res = 0;
+ break;
+ }
+ default:
+ fatal_error("%s:%d:update_pollset(): Internal error\n",
+ __FILE__, __LINE__);
+ break;
+ }
+ }
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST;
+ return res;
+}
+
+#if ERTS_POLL_USE_CONCURRENT_UPDATE
+static int
+update_pollset(ErtsPollSet ps, int fd)
+{
+ int update_fallback = 1;
+ return conc_update_pollset(ps, fd, &update_fallback);
+}
+#endif
+
+#endif /* ERTS_POLL_USE_EPOLL */
+
+#endif /* ERTS_POLL_USE_BATCH_UPDATE_POLLSET */
+
+#if ERTS_POLL_USE_POLL || ERTS_POLL_USE_SELECT || ERTS_POLL_USE_FALLBACK
+
+#if ERTS_POLL_USE_FALLBACK
+static int update_fallback_pollset(ErtsPollSet ps, int fd)
+#else
+static int update_pollset(ErtsPollSet ps, int fd)
+#endif
+{
+#ifdef ERTS_POLL_DEBUG_PRINT
+#if ERTS_POLL_USE_FALLBACK
+ erts_printf("Doing fallback update on fd=%d\n", fd);
+#else
+ erts_printf("Doing update on fd=%d\n", fd);
+#endif
+#endif
+
+ ASSERT(fd < ps->fds_status_len);
+#if ERTS_POLL_USE_FALLBACK
+ ASSERT(ps->fds_status[fd].used_events
+ ? (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK)
+ : (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_USEFLBCK));
+#endif
+
+ if (!need_update(ps, fd))
+ return 0;
+
+#if ERTS_POLL_USE_FALLBACK
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST;
+#endif
+
+#if ERTS_POLL_USE_POLL /* --- poll -------------------------------- */
+ if (!ps->fds_status[fd].events) {
+ int pix = ps->fds_status[fd].pix;
+ int last_pix;
+ if (pix < 0) {
+#if ERTS_POLL_USE_FALLBACK
+ ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK));
+#endif
+ return -1;
+ }
+#if ERTS_POLL_USE_FALLBACK
+ ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
+#endif
+ ps->no_of_user_fds--;
+ last_pix = --ps->no_poll_fds;
+ if (pix != last_pix) {
+ /* Move last pix to this pix */
+ ps->poll_fds[pix].fd = ps->poll_fds[last_pix].fd;
+ ps->poll_fds[pix].events = ps->poll_fds[last_pix].events;
+ ps->poll_fds[pix].revents = ps->poll_fds[last_pix].revents;
+ ps->fds_status[ps->poll_fds[pix].fd].pix = pix;
+ }
+ /* Clear last pix */
+ ps->poll_fds[last_pix].fd = -1;
+ ps->poll_fds[last_pix].events = (short) 0;
+ ps->poll_fds[last_pix].revents = (short) 0;
+ /* Clear this fd status */
+ ps->fds_status[fd].pix = -1;
+ ps->fds_status[fd].used_events = (ErtsPollEvents) 0;
+#if ERTS_POLL_USE_FALLBACK
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_INFLBCK;
+#endif
+ }
+ else {
+ int pix = ps->fds_status[fd].pix;
+ if (pix < 0) {
+#if ERTS_POLL_USE_FALLBACK
+ ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK)
+ || fd == ps->kp_fd);
+#endif
+ ps->no_of_user_fds++;
+ ps->fds_status[fd].pix = pix = ps->no_poll_fds++;
+ if (pix >= ps->poll_fds_len)
+ grow_poll_fds(ps, pix);
+ ps->poll_fds[pix].fd = fd;
+ ps->fds_status[fd].pix = pix;
+#if ERTS_POLL_USE_FALLBACK
+ ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK;
+#endif
+ }
+
+#if ERTS_POLL_USE_FALLBACK
+ ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK);
+#endif
+
+ /* Events to be used in next poll */
+ ps->poll_fds[pix].events = ev2pollev(ps->fds_status[fd].events);
+ if (ps->poll_fds[pix].revents) {
+ /* Remove result events that we should not poll for anymore */
+ ps->poll_fds[pix].revents
+ &= ev2pollev(~(~ps->fds_status[fd].used_events
+ & ps->fds_status[fd].events));
+ }
+ /* Save events to be used in next poll */
+ ps->fds_status[fd].used_events = ps->fds_status[fd].events;
+ }
+ return 0;
+#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
+ {
+ ErtsPollEvents events = ps->fds_status[fd].events;
+ if ((ERTS_POLL_EV_IN & events)
+ != (ERTS_POLL_EV_IN & ps->fds_status[fd].used_events)) {
+ if (ERTS_POLL_EV_IN & events) {
+ FD_SET(fd, &ps->input_fds);
+ }
+ else {
+ FD_CLR(fd, &ps->input_fds);
+ }
+ }
+ if ((ERTS_POLL_EV_OUT & events)
+ != (ERTS_POLL_EV_OUT & ps->fds_status[fd].used_events)) {
+ if (ERTS_POLL_EV_OUT & events) {
+ FD_SET(fd, &ps->output_fds);
+ }
+ else {
+ FD_CLR(fd, &ps->output_fds);
+ }
+ }
+
+ if (!ps->fds_status[fd].used_events) {
+ ASSERT(events);
+ ps->no_of_user_fds++;
+#if ERTS_POLL_USE_FALLBACK
+ ps->no_select_fds++;
+ ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK;
+#endif
+ }
+ else if (!events) {
+ ASSERT(ps->fds_status[fd].used_events);
+ ps->no_of_user_fds--;
+ ps->fds_status[fd].events = events;
+#if ERTS_POLL_USE_FALLBACK
+ ps->no_select_fds--;
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_INFLBCK;
+#endif
+ }
+
+ ps->fds_status[fd].used_events = events;
+
+ if (events && fd > ps->max_fd)
+ ps->max_fd = fd;
+ else if (!events && fd == ps->max_fd) {
+ int max = ps->max_fd;
+ for (max = ps->max_fd; max >= 0; max--)
+ if (ps->fds_status[max].used_events)
+ break;
+ ps->max_fd = max;
+ }
+ }
+ return 0;
+#endif
+}
+
+#endif /* ERTS_POLL_USE_POLL || ERTS_POLL_USE_SELECT || ERTS_POLL_USE_FALLBACK */
+
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+
+static void
+handle_update_requests(ErtsPollSet ps)
+{
+ ErtsPollSetUpdateRequestsBlock *urqbp = &ps->update_requests;
+#if ERTS_POLL_USE_BATCH_UPDATE_POLLSET
+ ErtsPollBatchBuf bb;
+ setup_batch_buf(ps, &bb);
+#endif
+
+ while (urqbp) {
+ ErtsPollSetUpdateRequestsBlock *free_urqbp = urqbp;
+ int i;
+ int len = urqbp->len;
+ for (i = 0; i < len; i++) {
+ int fd = urqbp->fds[i];
+ ASSERT(fd < ps->fds_status_len);
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_INURQ;
+#if ERTS_POLL_USE_BATCH_UPDATE_POLLSET
+ batch_update_pollset(ps, fd, &bb);
+#else
+ update_pollset(ps, fd);
+#endif
+ }
+
+ free_urqbp = urqbp;
+ urqbp = urqbp->next;
+
+ free_update_requests_block(ps, free_urqbp);
+
+ }
+
+#if ERTS_POLL_USE_BATCH_UPDATE_POLLSET
+ if (bb.len)
+ write_batch_buf(ps, &bb);
+#endif
+
+ ps->curr_upd_req_block = &ps->update_requests;
+
+#if ERTS_POLL_USE_DEVPOLL && defined(HARD_DEBUG)
+ check_poll_status(ps);
+#endif
+
+ ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(ps);
+}
+
+#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
+
+static ERTS_INLINE ErtsPollEvents
+poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on,
+ int *have_set_have_update_requests,
+ int *do_wake)
+{
+ ErtsPollEvents new_events;
+
+ if (fd < ps->internal_fd_limit || fd >= max_fds) {
+ if (fd < 0) {
+ new_events = ERTS_POLL_EV_ERR;
+ goto done;
+ }
+#if ERTS_POLL_USE_KERNEL_POLL
+ if (fd == ps->kp_fd) {
+ new_events = ERTS_POLL_EV_NVAL;
+ goto done;
+ }
+#endif
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == ps->wake_fds[0] || fd == ps->wake_fds[1]) {
+ new_events = ERTS_POLL_EV_NVAL;
+ goto done;
+ }
+#endif
+ }
+
+ if (fd >= ps->fds_status_len)
+ grow_fds_status(ps, fd);
+
+ ASSERT(fd < ps->fds_status_len);
+
+ new_events = ps->fds_status[fd].events;
+
+ if (events == 0) {
+ *do_wake = 0;
+ goto done;
+ }
+
+ if (on)
+ new_events |= events;
+ else
+ new_events &= ~events;
+
+ if (new_events == (ErtsPollEvents) 0) {
+#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP)
+ ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_RST;
+#endif
+#if ERTS_POLL_USE_FALLBACK
+ ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_USEFLBCK;
+#endif
+ }
+
+ ps->fds_status[fd].events = new_events;
+
+ if (new_events == ps->fds_status[fd].used_events
+#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP)
+ && !(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)
+#endif
+ ) {
+ *do_wake = 0;
+ goto done;
+ }
+
+#if !ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ if (update_pollset(ps, fd) != 0)
+ new_events = ERTS_POLL_EV_ERR;
+#else /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
+
+#if ERTS_POLL_USE_CONCURRENT_UPDATE
+ if (ERTS_POLLSET_IS_POLLED(ps)) {
+ int update_fallback = 0;
+ conc_update_pollset(ps, fd, &update_fallback);
+ if (!update_fallback) {
+ *do_wake = 0; /* no need to wake kernel poller */
+ goto done;
+ }
+ }
+#endif
+
+ enqueue_update_request(ps, fd);
+
+#ifdef ERTS_SMP
+ /*
+ * If new events have been added, we need to wake up the
+ * polling thread, but if events have been removed we don't.
+ */
+ if ((new_events && (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST))
+ || (~ps->fds_status[fd].used_events & new_events))
+ *do_wake = 1;
+#endif /* ERTS_SMP */
+
+#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
+
+ done:
+#ifdef ERTS_POLL_DEBUG_PRINT
+ erts_printf("0x%x = poll_control(ps, %d, 0x%x, %s) do_wake=%d\n",
+ (int) new_events, fd, (int) events, (on ? "on" : "off"), *do_wake);
+#endif
+ return new_events;
+}
+
+void
+ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
+ ErtsPollControlEntry pcev[],
+ int len)
+{
+ int i;
+ int hshur = 0;
+ int do_wake;
+ int final_do_wake = 0;
+
+ ERTS_POLLSET_LOCK(ps);
+
+ for (i = 0; i < len; i++) {
+ do_wake = 0;
+ pcev[i].events = poll_control(ps,
+ pcev[i].fd,
+ pcev[i].events,
+ pcev[i].on,
+ &hshur,
+ &do_wake);
+ final_do_wake |= do_wake;
+ }
+
+#ifdef ERTS_SMP
+ if (final_do_wake)
+ wake_poller(ps);
+#endif /* ERTS_SMP */
+
+ ERTS_POLLSET_UNLOCK(ps);
+}
+
+ErtsPollEvents
+ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps,
+ ErtsSysFdType fd,
+ ErtsPollEvents events,
+ int on,
+ int* do_wake) /* In: Wake up polling thread */
+ /* Out: Poller is woken */
+{
+ int hshur = 0;
+ ErtsPollEvents res;
+
+ ERTS_POLLSET_LOCK(ps);
+
+ res = poll_control(ps, fd, events, on, &hshur, do_wake);
+
+#ifdef ERTS_SMP
+ if (*do_wake) {
+ wake_poller(ps);
+ }
+#endif /* ERTS_SMP */
+
+ ERTS_POLLSET_UNLOCK(ps);
+ return res;
+}
+
+/*
+ * --- Wait on poll set ------------------------------------------------------
+ */
+
+#if ERTS_POLL_USE_KERNEL_POLL
+
+static ERTS_INLINE int
+save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res)
+{
+ int res = 0;
+ int i;
+ int n = chk_fds_res < max_res ? chk_fds_res : max_res;
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ int wake_fd = ps->wake_fds[0];
+#endif
+
+ for (i = 0; i < n; i++) {
+
+#if ERTS_POLL_USE_EPOLL /* --- epoll ------------------------------- */
+
+ if (ps->res_events[i].events) {
+ int fd = ps->res_events[i].data.fd;
+ int ix;
+ ErtsPollEvents revents;
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == wake_fd) {
+ cleanup_wakeup_pipe(ps);
+ continue;
+ }
+#endif
+ ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK));
+ /* epoll_wait() can repeat the same fd in result array... */
+ ix = (int) ps->fds_status[fd].res_ev_ix;
+ ASSERT(ix >= 0);
+ if (ix >= res || pr[ix].fd != fd) {
+ ix = res;
+ pr[ix].fd = fd;
+ pr[ix].events = (ErtsPollEvents) 0;
+ }
+
+ revents = ERTS_POLL_EV_N2E(ps->res_events[i].events);
+ pr[ix].events |= revents;
+ if (revents) {
+ if (res == ix) {
+ ps->fds_status[fd].res_ev_ix = (unsigned short) ix;
+ res++;
+ }
+ }
+ }
+
+#elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */
+
+ struct kevent *ev;
+ int fd;
+ int ix;
+
+ ev = &ps->res_events[i];
+ fd = (int) ev->ident;
+ ASSERT(fd < ps->fds_status_len);
+ ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK));
+ ix = (int) ps->fds_status[fd].res_ev_ix;
+
+ ASSERT(ix >= 0);
+ if (ix >= res || pr[ix].fd != fd) {
+ ix = res;
+ pr[ix].fd = (int) ev->ident;
+ pr[ix].events = (ErtsPollEvents) 0;
+ }
+
+ if (ev->filter == EVFILT_READ) {
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == wake_fd) {
+ cleanup_wakeup_pipe(ps);
+ continue;
+ }
+#endif
+ pr[ix].events |= ERTS_POLL_EV_IN;
+ }
+ else if (ev->filter == EVFILT_WRITE)
+ pr[ix].events |= ERTS_POLL_EV_OUT;
+ if (ev->flags & (EV_ERROR|EV_EOF)) {
+ if ((ev->flags & EV_ERROR) && (((int) ev->data) == EBADF))
+ pr[ix].events |= ERTS_POLL_EV_NVAL;
+ else
+ pr[ix].events |= ERTS_POLL_EV_ERR;
+ }
+ if (pr[ix].events) {
+ if (res == ix) {
+ ps->fds_status[fd].res_ev_ix = (unsigned short) ix;
+ res++;
+ }
+ }
+
+#elif ERTS_POLL_USE_DEVPOLL /* --- devpoll ----------------------------- */
+
+ if (ps->res_events[i].revents) {
+ int fd = ps->res_events[i].fd;
+ ErtsPollEvents revents;
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == wake_fd) {
+ cleanup_wakeup_pipe(ps);
+ continue;
+ }
+#endif
+ revents = ERTS_POLL_EV_N2E(ps->res_events[i].events);
+ pr[res].fd = fd;
+ pr[res].events = revents;
+ res++;
+ }
+
+#endif
+
+ }
+
+ return res;
+}
+
+#endif /* ERTS_POLL_USE_KERNEL_POLL */
+
+#if ERTS_POLL_USE_FALLBACK
+
+static int
+get_kp_results(ErtsPollSet ps, ErtsPollResFd pr[], int max_res)
+{
+ int res;
+#if ERTS_POLL_USE_KQUEUE
+ struct timespec ts = {0, 0};
+#endif
+
+ if (max_res > ps->res_events_len)
+ grow_res_events(ps, max_res);
+
+ do {
+#if ERTS_POLL_USE_EPOLL
+ res = epoll_wait(ps->kp_fd, ps->res_events, max_res, 0);
+#elif ERTS_POLL_USE_KQUEUE
+ res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
+#endif
+ } while (res < 0 && errno == EINTR);
+
+ if (res < 0) {
+ fatal_error("%s:%d: %s() failed: %s (%d)\n",
+ __FILE__, __LINE__,
+#if ERTS_POLL_USE_EPOLL
+ "epoll_wait",
+#elif ERTS_POLL_USE_KQUEUE
+ "kevent",
+#endif
+ erl_errno_id(errno), errno);
+ }
+
+ return save_kp_result(ps, pr, max_res, res);
+}
+
+#endif /* ERTS_POLL_USE_FALLBACK */
+
+
+
+static ERTS_INLINE int
+save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
+ int chk_fds_res, int ebadf)
+{
+#if ERTS_POLL_USE_DEVPOLL
+ return save_kp_result(ps, pr, max_res, chk_fds_res);
+#elif ERTS_POLL_USE_FALLBACK
+ if (!ps->fallback_used)
+ return save_kp_result(ps, pr, max_res, chk_fds_res);
+ else
+#endif /* ERTS_POLL_USE_FALLBACK */
+ {
+
+#if ERTS_POLL_USE_POLL /* --- poll -------------------------------- */
+ int res = 0;
+#if ERTS_POLL_USE_WAKEUP_PIPE && !ERTS_POLL_USE_FALLBACK
+ int wake_fd = ps->wake_fds[0];
+#endif
+ int i, first_ix, end_ix;
+
+ /*
+ * In order to be somewhat fair, we continue on the poll_fds
+ * index where we stopped last time.
+ */
+ first_ix = i = ((ps->next_poll_fds_ix < ps->no_poll_fds)
+ ? ps->next_poll_fds_ix
+ : 0);
+ end_ix = ps->no_poll_fds;
+
+ while (1) {
+ while (i < end_ix && res < max_res) {
+ if (ps->poll_fds[i].revents != (short) 0) {
+ int fd = ps->poll_fds[i].fd;
+ ErtsPollEvents revents;
+#if ERTS_POLL_USE_FALLBACK
+ if (fd == ps->kp_fd) {
+ res += get_kp_results(ps, &pr[res], max_res-res);
+ i++;
+ continue;
+ }
+#elif ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == wake_fd) {
+ cleanup_wakeup_pipe(ps);
+ i++;
+ continue;
+ }
+#endif
+ revents = pollev2ev(ps->poll_fds[i].revents);
+ pr[res].fd = fd;
+ pr[res].events = revents;
+ res++;
+ }
+ i++;
+ }
+ if (res == max_res || i == first_ix)
+ break;
+ ASSERT(i == ps->no_poll_fds);
+ i = 0;
+ end_ix = first_ix;
+ }
+
+ ps->next_poll_fds_ix = i;
+ return res;
+
+#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
+ int res = 0;
+#if ERTS_POLL_USE_WAKEUP_PIPE && !ERTS_POLL_USE_FALLBACK
+ int wake_fd = ps->wake_fds[0];
+#endif
+ int fd, first_fd, end_fd;
+
+ /*
+ * In order to be fair, we continue on the fd where we stopped
+ * last time.
+ */
+ first_fd = fd = ps->next_sel_fd <= ps->max_fd ? ps->next_sel_fd : 0;
+ end_fd = ps->max_fd + 1;
+
+ if (!ebadf) {
+ while (1) {
+ while (fd < end_fd && res < max_res) {
+
+ pr[res].events = (ErtsPollEvents) 0;
+ if (FD_ISSET(fd, &ps->res_input_fds)) {
+#if ERTS_POLL_USE_FALLBACK
+ if (fd == ps->kp_fd) {
+ res += get_kp_results(ps, &pr[res], max_res-res);
+ fd++;
+ continue;
+ }
+#elif ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == wake_fd) {
+ cleanup_wakeup_pipe(ps);
+ fd++;
+ continue;
+ }
+#endif
+ pr[res].events |= ERTS_POLL_EV_IN;
+ }
+ if (FD_ISSET(fd, &ps->res_output_fds))
+ pr[res].events |= ERTS_POLL_EV_OUT;
+ if (pr[res].events) {
+ pr[res].fd = fd;
+ res++;
+ }
+ fd++;
+ }
+ if (res == max_res || fd == first_fd)
+ break;
+ ASSERT(fd == ps->max_fd + 1);
+ fd = 0;
+ end_fd = first_fd;
+ }
+ }
+ else {
+ /*
+ * Bad file descriptors in poll set.
+ *
+ * This only happens when running poorly written
+ * drivers. This code could be optimized, but we
+ * don't bother since it should never happen...
+ */
+ while (1) {
+ while (fd < end_fd && res < max_res) {
+ if (ps->fds_status[fd].events) {
+ int sres;
+ fd_set *iset = NULL;
+ fd_set *oset = NULL;
+ if (ps->fds_status[fd].events & ERTS_POLL_EV_IN) {
+ iset = &ps->res_input_fds;
+ FD_ZERO(iset);
+ FD_SET(fd, iset);
+ }
+ if (ps->fds_status[fd].events & ERTS_POLL_EV_OUT) {
+ oset = &ps->res_output_fds;
+ FD_ZERO(oset);
+ FD_SET(fd, oset);
+
+ }
+ do {
+ /* Initiate 'tv' each time;
+ select() may modify it */
+ SysTimeval tv = {0, 0};
+ sres = select(ps->max_fd+1, iset, oset, NULL, &tv);
+ } while (sres < 0 && errno == EINTR);
+ if (sres < 0) {
+#if ERTS_POLL_USE_FALLBACK
+ if (fd == ps->kp_fd) {
+ res += get_kp_results(ps,
+ &pr[res],
+ max_res-res);
+ fd++;
+ continue;
+ }
+#elif ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == wake_fd) {
+ cleanup_wakeup_pipe(ps);
+ fd++;
+ continue;
+ }
+#endif
+ pr[res].fd = fd;
+ pr[res].events = ERTS_POLL_EV_NVAL;
+ res++;
+ }
+ else if (sres > 0) {
+ pr[res].fd = fd;
+ if (iset && FD_ISSET(fd, iset)) {
+#if ERTS_POLL_USE_FALLBACK
+ if (fd == ps->kp_fd) {
+ res += get_kp_results(ps,
+ &pr[res],
+ max_res-res);
+ fd++;
+ continue;
+ }
+#elif ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == wake_fd) {
+ cleanup_wakeup_pipe(ps);
+ fd++;
+ continue;
+ }
+#endif
+ pr[res].events |= ERTS_POLL_EV_IN;
+ }
+ if (oset && FD_ISSET(fd, oset)) {
+ pr[res].events |= ERTS_POLL_EV_OUT;
+ }
+ ASSERT(pr[res].events);
+ res++;
+ }
+ }
+ fd++;
+ }
+ if (res == max_res || fd == first_fd)
+ break;
+ ASSERT(fd == ps->max_fd + 1);
+ fd = 0;
+ end_fd = first_fd;
+ }
+ }
+ ps->next_sel_fd = fd;
+ return res;
+#endif
+ }
+}
+
+static ERTS_INLINE int
+check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked)
+{
+ ASSERT(!*ps_locked);
+ if (ps->no_of_user_fds == 0 && tv->tv_usec == 0 && tv->tv_sec == 0) {
+ /* Nothing to poll and zero timeout; done... */
+ return 0;
+ }
+ else {
+ long timeout = tv->tv_sec*1000 + tv->tv_usec/1000;
+ ASSERT(timeout >= 0);
+ erts_smp_atomic_set(&ps->timeout, timeout);
+#if ERTS_POLL_USE_FALLBACK
+ if (!(ps->fallback_used = ERTS_POLL_NEED_FALLBACK(ps))) {
+
+#if ERTS_POLL_USE_EPOLL /* --- epoll ------------------------------- */
+ if (timeout > INT_MAX)
+ timeout = INT_MAX;
+ if (max_res > ps->res_events_len)
+ grow_res_events(ps, max_res);
+ return epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout);
+#elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */
+ struct timespec ts;
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec*1000;
+ if (max_res > ps->res_events_len)
+ grow_res_events(ps, max_res);
+ return kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
+#endif /* ----------------------------------------- */
+
+ }
+ else /* use fallback (i.e. poll() or select()) */
+#endif /* ERTS_POLL_USE_FALLBACK */
+ {
+
+#if ERTS_POLL_USE_DEVPOLL /* --- devpoll ----------------------------- */
+ /*
+ * The ioctl() will fail with EINVAL on Solaris 10 if dp_nfds
+ * is set too high. dp_nfds should not be set greater than
+ * the maximum number of file descriptors in the poll set.
+ */
+ struct dvpoll poll_res;
+ int nfds = ps->no_of_user_fds;
+#ifdef ERTS_SMP
+ nfds++; /* Wakeup pipe */
+#endif
+ if (timeout > INT_MAX)
+ timeout = INT_MAX;
+ poll_res.dp_nfds = nfds < max_res ? nfds : max_res;
+ if (poll_res.dp_nfds > ps->res_events_len)
+ grow_res_events(ps, poll_res.dp_nfds);
+ poll_res.dp_fds = ps->res_events;
+ poll_res.dp_timeout = (int) timeout;
+ return ioctl(ps->kp_fd, DP_POLL, &poll_res);
+#elif ERTS_POLL_USE_POLL /* --- poll -------------------------------- */
+ if (timeout > INT_MAX)
+ timeout = INT_MAX;
+ return poll(ps->poll_fds, ps->no_poll_fds, (int) timeout);
+#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
+ int res;
+ ps->res_input_fds = ps->input_fds;
+ ps->res_output_fds = ps->output_fds;
+ res = select(ps->max_fd + 1,
+ &ps->res_input_fds,
+ &ps->res_output_fds,
+ NULL,
+ tv);
+#ifdef ERTS_SMP
+ if (res < 0
+ && errno == EBADF
+ && ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) {
+ /*
+ * This may have happened because another thread deselected
+ * a fd in our poll set and then closed it, i.e. the driver
+ * behaved correctly. We wan't to avoid looking for a bad
+ * fd, that may even not exist anymore. Therefore, handle
+ * update requests and try again.
+ *
+ * We don't know how much of the timeout is left; therfore,
+ * we use a zero timeout. If no error occur and no events
+ * have triggered, we fake an EAGAIN error and let the caller
+ * restart us.
+ */
+ SysTimeval zero_tv = {0, 0};
+ *ps_locked = 1;
+ ERTS_POLLSET_LOCK(ps);
+ handle_update_requests(ps);
+ res = select(ps->max_fd + 1,
+ &ps->res_input_fds,
+ &ps->res_output_fds,
+ NULL,
+ &zero_tv);
+ if (res == 0) {
+ errno = EAGAIN;
+ res = -1;
+ }
+ }
+#endif /* ERTS_SMP */
+ return res;
+#endif /* ----------------------------------------- */
+ }
+ }
+}
+
+int
+ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
+ ErtsPollResFd pr[],
+ int *len,
+ SysTimeval *utvp)
+{
+ int res, no_fds;
+ int ebadf = 0;
+ int ps_locked;
+ SysTimeval *tvp;
+ SysTimeval itv;
+
+ no_fds = *len;
+#ifdef ERTS_POLL_MAX_RES
+ if (no_fds >= ERTS_POLL_MAX_RES)
+ no_fds = ERTS_POLL_MAX_RES;
+#endif
+
+ *len = 0;
+
+ ASSERT(utvp);
+
+ tvp = utvp;
+
+#ifdef ERTS_POLL_DEBUG_PRINT
+ erts_printf("Entering erts_poll_wait(), timeout=%d\n",
+ (int) tv->tv_sec*1000 + tv->tv_usec/1000);
+#endif
+
+ ERTS_POLLSET_UNSET_POLLER_WOKEN(ps);
+ if (ERTS_POLLSET_SET_POLLED_CHK(ps)) {
+ res = EINVAL; /* Another thread is in erts_poll_wait()
+ on this pollset... */
+ goto done;
+ }
+
+ if (ERTS_POLLSET_IS_INTERRUPTED(ps)) {
+ /* Interrupt use zero timeout */
+ itv.tv_sec = 0;
+ itv.tv_usec = 0;
+ tvp = &itv;
+ }
+
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ if (ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) {
+ ERTS_POLLSET_LOCK(ps);
+ handle_update_requests(ps);
+ ERTS_POLLSET_UNLOCK(ps);
+ }
+#endif
+
+ ps_locked = 0;
+ res = check_fd_events(ps, tvp, no_fds, &ps_locked);
+
+ ERTS_POLLSET_SET_POLLER_WOKEN(ps);
+
+ if (res == 0) {
+ res = ETIMEDOUT;
+ }
+ else if (res < 0) {
+#if ERTS_POLL_USE_SELECT
+ if (errno == EBADF) {
+ ebadf = 1;
+ goto save_results;
+ }
+#endif
+ res = errno;
+ }
+ else {
+#if ERTS_POLL_USE_SELECT
+ save_results:
+#endif
+
+#ifdef ERTS_SMP
+ if (!ps_locked) {
+ ps_locked = 1;
+ ERTS_POLLSET_LOCK(ps);
+ }
+#endif
+
+ no_fds = save_poll_result(ps, pr, no_fds, res, ebadf);
+
+#ifdef HARD_DEBUG
+ check_poll_result(pr, no_fds);
+#endif
+
+ res = (no_fds == 0
+ ? (ERTS_POLLSET_UNSET_INTERRUPTED_CHK(ps) ? EINTR : EAGAIN)
+ : 0);
+ *len = no_fds;
+ }
+
+#ifdef ERTS_SMP
+ if (ps_locked)
+ ERTS_POLLSET_UNLOCK(ps);
+ ERTS_POLLSET_UNSET_POLLED(ps);
+#endif
+
+ done:
+ erts_smp_atomic_set(&ps->timeout, LONG_MAX);
+#ifdef ERTS_POLL_DEBUG_PRINT
+ erts_printf("Leaving %s = erts_poll_wait()\n",
+ res == 0 ? "0" : erl_errno_id(res));
+#endif
+
+ return res;
+}
+
+/*
+ * --- Interrupt a thread doing erts_poll_wait() -----------------------------
+ */
+
+void
+ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
+{
+ /*
+ * NOTE: This function might be called from signal handlers in the
+ * non-smp case; therefore, it has to be async-signal safe in
+ * the non-smp case.
+ */
+ if (set) {
+ ERTS_POLLSET_SET_INTERRUPTED(ps);
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
+ wake_poller(ps);
+#endif
+ }
+ else {
+ ERTS_POLLSET_UNSET_INTERRUPTED(ps);
+ }
+}
+
+/*
+ * erts_poll_interrupt_timed():
+ * If 'set' != 0, interrupt thread blocked in erts_poll_wait() if it
+ * is not guaranteed that it will timeout before 'msec' milli seconds.
+ */
+void
+ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, int set, long msec)
+{
+ if (set) {
+ if (erts_smp_atomic_read(&ps->timeout) > msec) {
+ ERTS_POLLSET_SET_INTERRUPTED(ps);
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
+ wake_poller(ps);
+#endif
+ }
+#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+ else {
+ if (ERTS_POLLSET_IS_POLLED(ps))
+ erts_smp_atomic_inc(&ps->no_avoided_wakeups);
+ erts_smp_atomic_inc(&ps->no_avoided_interrupts);
+ }
+ erts_smp_atomic_inc(&ps->no_interrupt_timed);
+#endif
+ }
+ else {
+ ERTS_POLLSET_UNSET_INTERRUPTED(ps);
+ }
+}
+
+int
+ERTS_POLL_EXPORT(erts_poll_max_fds)(void)
+{
+ return max_fds;
+}
+/*
+ * --- Initialization --------------------------------------------------------
+ */
+
+#ifdef VXWORKS
+extern int erts_vxworks_max_files;
+#endif
+
+void
+ERTS_POLL_EXPORT(erts_poll_init)(void)
+{
+ erts_smp_spinlock_init(&pollsets_lock, "pollsets_lock");
+ pollsets = NULL;
+
+ errno = 0;
+
+#if defined(VXWORKS)
+ max_fds = erts_vxworks_max_files;
+#elif !defined(NO_SYSCONF)
+ max_fds = sysconf(_SC_OPEN_MAX);
+#elif ERTS_POLL_USE_SELECT
+ max_fds = NOFILE;
+#else
+ max_fds = OPEN_MAX;
+#endif
+
+#if ERTS_POLL_USE_SELECT && defined(FD_SETSIZE)
+ if (max_fds > FD_SETSIZE)
+ max_fds = FD_SETSIZE;
+#endif
+
+ if (max_fds < 0)
+ fatal_error("erts_poll_init(): Failed to get max number of files: %s\n",
+ erl_errno_id(errno));
+
+#ifdef ERTS_POLL_DEBUG_PRINT
+ print_misc_debug_info();
+#endif
+}
+
+ErtsPollSet
+ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
+{
+#if ERTS_POLL_USE_KERNEL_POLL
+ int kp_fd;
+#endif
+ ErtsPollSet ps = erts_alloc(ERTS_ALC_T_POLLSET,
+ sizeof(struct ErtsPollSet_));
+ ps->internal_fd_limit = 0;
+ ps->fds_status = NULL;
+ ps->fds_status_len = 0;
+ ps->no_of_user_fds = 0;
+#if ERTS_POLL_USE_KERNEL_POLL
+ ps->kp_fd = -1;
+#if ERTS_POLL_USE_EPOLL
+ kp_fd = epoll_create(256);
+ ps->res_events_len = 0;
+ ps->res_events = NULL;
+#elif ERTS_POLL_USE_DEVPOLL
+ kp_fd = open("/dev/poll", O_RDWR);
+ ps->res_events_len = 0;
+ ps->res_events = NULL;
+#elif ERTS_POLL_USE_KQUEUE
+ kp_fd = kqueue();
+ ps->res_events_len = 0;
+ ps->res_events = NULL;
+#endif
+ if (kp_fd < 0)
+ fatal_error("erts_poll_create_pollset(): Failed to "
+#if ERTS_POLL_USE_EPOLL
+ "create epoll set"
+#elif ERTS_POLL_USE_DEVPOLL
+ "to open /dev/poll"
+#elif ERTS_POLL_USE_KQUEUE
+ "create kqueue"
+#endif
+ ": %s (%d)\n",
+ erl_errno_id(errno), errno);
+#endif /* ERTS_POLL_USE_KERNEL_POLL */
+#if ERTS_POLL_USE_BATCH_UPDATE_POLLSET
+ /* res_events is also used as write buffer */
+ grow_res_events(ps, ERTS_POLL_MIN_BATCH_BUF_SIZE);
+#endif
+#if ERTS_POLL_USE_POLL
+ ps->next_poll_fds_ix = 0;
+ ps->no_poll_fds = 0;
+ ps->poll_fds_len = 0;
+ ps->poll_fds = NULL;
+#elif ERTS_POLL_USE_SELECT
+ ps->next_sel_fd = 0;
+ ps->max_fd = -1;
+#if ERTS_POLL_USE_FALLBACK
+ ps->no_select_fds = 0;
+#endif
+ FD_ZERO(&ps->input_fds);
+ FD_ZERO(&ps->res_input_fds);
+ FD_ZERO(&ps->output_fds);
+ FD_ZERO(&ps->res_output_fds);
+#endif
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ ps->update_requests.next = NULL;
+ ps->update_requests.len = 0;
+ ps->curr_upd_req_block = &ps->update_requests;
+ erts_smp_atomic_init(&ps->have_update_requests, 0);
+#endif
+#ifdef ERTS_SMP
+ erts_smp_atomic_init(&ps->polled, 0);
+ erts_smp_atomic_init(&ps->woken, 0);
+ erts_smp_mtx_init(&ps->mtx, "pollset");
+#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
+ ps->woken = 0;
+#endif
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ create_wakeup_pipe(ps);
+#endif
+#if ERTS_POLL_USE_FALLBACK
+ if (kp_fd >= ps->fds_status_len)
+ grow_fds_status(ps, kp_fd);
+ /* Force kernel poll fd into fallback (poll/select) set */
+ ps->fds_status[kp_fd].flags
+ |= ERTS_POLL_FD_FLG_INFLBCK|ERTS_POLL_FD_FLG_USEFLBCK;
+ {
+ int do_wake = 0;
+ ERTS_POLL_EXPORT(erts_poll_control)(ps, kp_fd, ERTS_POLL_EV_IN, 1,
+ &do_wake);
+ }
+#endif
+#if ERTS_POLL_USE_KERNEL_POLL
+ if (ps->internal_fd_limit <= kp_fd)
+ ps->internal_fd_limit = kp_fd + 1;
+ ps->kp_fd = kp_fd;
+#endif
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
+ ps->interrupt = 0;
+#else
+ erts_smp_atomic_init(&ps->interrupt, 0);
+#endif
+ erts_smp_atomic_init(&ps->timeout, LONG_MAX);
+#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+ erts_smp_atomic_init(&ps->no_avoided_wakeups, 0);
+ erts_smp_atomic_init(&ps->no_avoided_interrupts, 0);
+ erts_smp_atomic_init(&ps->no_interrupt_timed, 0);
+#endif
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ handle_update_requests(ps);
+#endif
+#if ERTS_POLL_USE_FALLBACK
+ ps->fallback_used = 0;
+#endif
+ ps->no_of_user_fds = 0; /* Don't count wakeup pipe and fallback fd */
+
+ erts_smp_spin_lock(&pollsets_lock);
+ ps->next = pollsets;
+ pollsets = ps;
+ erts_smp_spin_unlock(&pollsets_lock);
+
+ return ps;
+}
+
+void
+ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps)
+{
+
+ if (ps->fds_status)
+ erts_free(ERTS_ALC_T_FD_STATUS, (void *) ps->fds_status);
+
+#if ERTS_POLL_USE_EPOLL
+ if (ps->kp_fd >= 0)
+ close(ps->kp_fd);
+ if (ps->res_events)
+ erts_free(ERTS_ALC_T_POLL_RES_EVS, (void *) ps->res_events);
+#elif ERTS_POLL_USE_DEVPOLL
+ if (ps->kp_fd >= 0)
+ close(ps->kp_fd);
+ if (ps->res_events)
+ erts_free(ERTS_ALC_T_POLL_RES_EVS, (void *) ps->res_events);
+#elif ERTS_POLL_USE_POLL
+ if (ps->poll_fds)
+ erts_free(ERTS_ALC_T_POLL_FDS, (void *) ps->poll_fds);
+#elif ERTS_POLL_USE_SELECT
+#endif
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ {
+ ErtsPollSetUpdateRequestsBlock *urqbp = ps->update_requests.next;
+ while (urqbp) {
+ ErtsPollSetUpdateRequestsBlock *free_urqbp = urqbp;
+ urqbp = urqbp->next;
+ free_update_requests_block(ps, free_urqbp);
+ }
+ }
+#endif
+#ifdef ERTS_SMP
+ erts_smp_mtx_destroy(&ps->mtx);
+#endif
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ if (ps->wake_fds[0] >= 0)
+ close(ps->wake_fds[0]);
+ if (ps->wake_fds[1] >= 0)
+ close(ps->wake_fds[1]);
+#endif
+
+ erts_smp_spin_lock(&pollsets_lock);
+ if (ps == pollsets)
+ pollsets = pollsets->next;
+ else {
+ ErtsPollSet prev_ps;
+ for (prev_ps = pollsets; ps != prev_ps->next; prev_ps = prev_ps->next);
+ ASSERT(ps == prev_ps->next);
+ prev_ps->next = ps->next;
+ }
+ erts_smp_spin_unlock(&pollsets_lock);
+
+ erts_free(ERTS_ALC_T_POLLSET, (void *) ps);
+}
+
+/*
+ * --- Info ------------------------------------------------------------------
+ */
+
+void
+ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
+{
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ int pending_updates;
+#endif
+ Uint size = 0;
+
+ ERTS_POLLSET_LOCK(ps);
+
+ size += sizeof(struct ErtsPollSet_);
+ size += ps->fds_status_len*sizeof(ErtsFdStatus);
+
+#if ERTS_POLL_USE_EPOLL
+ size += ps->res_events_len*sizeof(struct epoll_event);
+#elif ERTS_POLL_USE_DEVPOLL
+ size += ps->res_events_len*sizeof(struct pollfd);
+#elif ERTS_POLL_USE_KQUEUE
+ size += ps->res_events_len*sizeof(struct kevent);
+#endif
+
+#if ERTS_POLL_USE_POLL
+ size += ps->poll_fds_len*sizeof(struct pollfd);
+#elif ERTS_POLL_USE_SELECT
+#endif
+
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ {
+ ErtsPollSetUpdateRequestsBlock *urqbp = ps->update_requests.next;
+ pending_updates = ps->update_requests.len;
+ while (urqbp) {
+ size += sizeof(ErtsPollSetUpdateRequestsBlock);
+ pending_updates += urqbp->len;
+ }
+ }
+#endif
+
+ pip->primary =
+#if ERTS_POLL_USE_KQUEUE
+ "kqueue"
+#elif ERTS_POLL_USE_EPOLL
+ "epoll"
+#elif ERTS_POLL_USE_DEVPOLL
+ "/dev/poll"
+#elif ERTS_POLL_USE_POLL
+ "poll"
+#elif ERTS_POLL_USE_SELECT
+ "select"
+#endif
+ ;
+
+ pip->fallback =
+#if !ERTS_POLL_USE_FALLBACK
+ NULL
+#elif ERTS_POLL_USE_POLL
+ "poll"
+#elif ERTS_POLL_USE_SELECT
+ "select"
+#endif
+ ;
+
+ pip->kernel_poll =
+#if !ERTS_POLL_USE_KERNEL_POLL
+ NULL
+#elif ERTS_POLL_USE_KQUEUE
+ "kqueue"
+#elif ERTS_POLL_USE_EPOLL
+ "epoll"
+#elif ERTS_POLL_USE_DEVPOLL
+ "/dev/poll"
+#endif
+ ;
+
+ pip->memory_size = size;
+
+ pip->poll_set_size = ps->no_of_user_fds;
+#ifdef ERTS_SMP
+ pip->poll_set_size++; /* Wakeup pipe */
+#endif
+
+ pip->fallback_poll_set_size =
+#if !ERTS_POLL_USE_FALLBACK
+ 0
+#elif ERTS_POLL_USE_POLL
+ ps->no_poll_fds
+#elif ERTS_POLL_USE_SELECT
+ ps->no_select_fds
+#endif
+ ;
+
+#if ERTS_POLL_USE_FALLBACK
+ /* If only kp_fd is in fallback poll set we don't use fallback... */
+ if (pip->fallback_poll_set_size == 1)
+ pip->fallback_poll_set_size = 0;
+ else
+ pip->poll_set_size++; /* kp_fd */
+#endif
+
+ pip->lazy_updates =
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ 1
+#else
+ 0
+#endif
+ ;
+
+ pip->pending_updates =
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ pending_updates
+#else
+ 0
+#endif
+ ;
+
+ pip->batch_updates =
+#if ERTS_POLL_USE_BATCH_UPDATE_POLLSET
+ 1
+#else
+ 0
+#endif
+ ;
+
+ pip->concurrent_updates =
+#if ERTS_POLL_USE_CONCURRENT_UPDATE
+ 1
+#else
+ 0
+#endif
+ ;
+
+ pip->max_fds = max_fds;
+
+#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+ pip->no_avoided_wakeups = erts_smp_atomic_read(&ps->no_avoided_wakeups);
+ pip->no_avoided_interrupts = erts_smp_atomic_read(&ps->no_avoided_interrupts);
+ pip->no_interrupt_timed = erts_smp_atomic_read(&ps->no_interrupt_timed);
+#endif
+
+ ERTS_POLLSET_UNLOCK(ps);
+
+}
+
+/*
+ * Fatal error...
+ */
+
+#ifndef ERTS_GOT_SIGUSR1
+# define ERTS_GOT_SIGUSR1 0
+#endif
+
+static void
+fatal_error(char *format, ...)
+{
+ va_list ap;
+
+ if (ERTS_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
+ /*
+ * Crash dump writing and reception of sigusr1 (which will
+ * result in a crash dump) closes all file descriptors. This
+ * typically results in a fatal error for erts_poll() (wakeup
+ * pipes and kernel poll fds are closed).
+ *
+ * We ignore the error and let the crash dump writing continue...
+ */
+ return;
+ }
+ va_start(ap, format);
+ erts_vfprintf(stderr, format, ap);
+ va_end(ap);
+ abort();
+}
+
+static void
+fatal_error_async_signal_safe(char *error_str)
+{
+ if (ERTS_IS_CRASH_DUMPING || ERTS_GOT_SIGUSR1) {
+ /* See comment above in fatal_error() */
+ return;
+ }
+ if (error_str) {
+ int len = 0;
+ while (error_str[len])
+ len++;
+ if (len)
+ (void) write(2, error_str, len); /* async signal safe */
+ }
+ abort();
+}
+
+/*
+ * --- Debug -----------------------------------------------------------------
+ */
+
+void
+ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet ps,
+ ErtsPollEvents ev[],
+ int len)
+{
+ int fd;
+ ERTS_POLLSET_LOCK(ps);
+ for (fd = 0; fd < len; fd++) {
+ if (fd >= ps->fds_status_len)
+ ev[fd] = 0;
+ else {
+ ev[fd] = ps->fds_status[fd].events;
+#if ERTS_POLL_USE_WAKEUP_PIPE
+ if (fd == ps->wake_fds[0] || fd == ps->wake_fds[1])
+ ev[fd] |= ERTS_POLL_EV_NVAL;
+#endif
+#if ERTS_POLL_USE_KERNEL_POLL
+ if (fd == ps->kp_fd)
+ ev[fd] |= ERTS_POLL_EV_NVAL;
+#endif
+ }
+ }
+ ERTS_POLLSET_UNLOCK(ps);
+
+}
+
+#ifdef HARD_DEBUG
+
+static void
+check_poll_result(ErtsPollResFd pr[], int len)
+{
+ int i, j;
+
+ for (i = 0; i < len; i++) {
+ ASSERT(pr[i].fd >= 0);
+ ASSERT(pr[i].fd < max_fds);
+ for (j = 0; j < len; j++) {
+ ASSERT(i == j || pr[i].fd != pr[j].fd);
+ }
+ }
+}
+
+
+#if ERTS_POLL_USE_DEVPOLL
+
+static void
+check_poll_status(ErtsPollSet ps)
+{
+ int i;
+ for (i = 0; i < ps->fds_status_len; i++) {
+ int ires;
+ struct pollfd dp_fd;
+ short events = ERTS_POLL_EV_E2N(ps->fds_status[i].events);
+
+ dp_fd.fd = i;
+ dp_fd.events = (short) 0;
+ dp_fd.revents = (short) 0;
+
+ ires = ioctl(ps->kp_fd, DP_ISPOLLED, &dp_fd);
+
+ if (ires == 0) {
+ ASSERT(!events);
+ }
+ else if (ires == 1) {
+ ASSERT(events);
+ ASSERT(events == dp_fd.revents);
+ }
+ else {
+ ASSERT(0);
+ }
+ ASSERT(dp_fd.fd == i);
+ ASSERT(ps->fds_status[i].events == ps->fds_status[i].used_events);
+ }
+}
+
+#endif /* ERTS_POLL_USE_DEVPOLL */
+#endif /* HARD_DEBUG */
+
+#ifdef ERTS_POLL_DEBUG_PRINT
+static void
+print_misc_debug_info(void)
+{
+ erts_printf("erts_poll using: %s lazy_updates:%s batch_updates:%s\n",
+#if ERTS_POLL_USE_KQUEUE
+ "kqueue"
+#elif ERTS_POLL_USE_EPOLL
+ "epoll"
+#elif ERTS_POLL_USE_DEVPOLL
+ "/dev/poll"
+#endif
+#if ERTS_POLL_USE_FALLBACK
+ "-"
+#endif
+#if ERTS_POLL_USE_POLL
+ "poll"
+#elif ERTS_POLL_USE_SELECT
+ "select"
+#endif
+ ,
+#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
+ "true"
+#else
+ "false"
+#endif
+ ,
+#if ERTS_POLL_USE_BATCH_UPDATE_POLLSET
+ "true"
+#else
+ "false"
+#endif
+ );
+
+ erts_printf("ERTS_POLL_EV_IN=0x%x\n"
+ "ERTS_POLL_EV_OUT=0x%x\n"
+ "ERTS_POLL_EV_NVAL=0x%x\n"
+ "ERTS_POLL_EV_ERR=0x%x\n",
+ ERTS_POLL_EV_IN,
+ ERTS_POLL_EV_OUT,
+ ERTS_POLL_EV_NVAL,
+ ERTS_POLL_EV_ERR);
+
+#ifdef FD_SETSIZE
+ erts_printf("FD_SETSIZE=%d\n", FD_SETSIZE);
+#endif
+}
+
+#endif
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
new file mode 100644
index 0000000000..725a77a152
--- /dev/null
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -0,0 +1,246 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Poll interface suitable for ERTS with or without
+ * SMP support.
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_POLL_H__
+#define ERL_POLL_H__
+
+#include "sys.h"
+
+#if 0
+#define ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+#endif
+
+#ifdef ERTS_ENABLE_KERNEL_POLL
+# if defined(ERTS_KERNEL_POLL_VERSION)
+# define ERTS_POLL_EXPORT(FUNC) FUNC ## _kp
+# else
+# define ERTS_POLL_EXPORT(FUNC) FUNC ## _nkp
+# undef ERTS_POLL_DISABLE_KERNEL_POLL
+# define ERTS_POLL_DISABLE_KERNEL_POLL
+# endif
+#else
+# define ERTS_POLL_EXPORT(FUNC) FUNC
+# undef ERTS_POLL_DISABLE_KERNEL_POLL
+# define ERTS_POLL_DISABLE_KERNEL_POLL
+#endif
+
+#ifdef ERTS_POLL_DISABLE_KERNEL_POLL
+# undef HAVE_SYS_EPOLL_H
+# undef HAVE_SYS_EVENT_H
+# undef HAVE_SYS_DEVPOLL_H
+#endif
+
+#undef ERTS_POLL_USE_KERNEL_POLL
+#define ERTS_POLL_USE_KERNEL_POLL 0
+
+#undef ERTS_POLL_USE_KQUEUE
+#define ERTS_POLL_USE_KQUEUE 0
+#undef ERTS_POLL_USE_EPOLL
+#define ERTS_POLL_USE_EPOLL 0
+#undef ERTS_POLL_USE_DEVPOLL
+#define ERTS_POLL_USE_DEVPOLL 0
+#undef ERTS_POLL_USE_POLL
+#define ERTS_POLL_USE_POLL 0
+#undef ERTS_POLL_USE_SELECT
+#define ERTS_POLL_USE_SELECT 0
+
+#if defined(HAVE_SYS_EVENT_H)
+# undef ERTS_POLL_USE_KQUEUE
+# define ERTS_POLL_USE_KQUEUE 1
+# undef ERTS_POLL_USE_KERNEL_POLL
+# define ERTS_POLL_USE_KERNEL_POLL 1
+#elif defined(HAVE_SYS_EPOLL_H)
+# undef ERTS_POLL_USE_EPOLL
+# define ERTS_POLL_USE_EPOLL 1
+# undef ERTS_POLL_USE_KERNEL_POLL
+# define ERTS_POLL_USE_KERNEL_POLL 1
+#elif defined(HAVE_SYS_DEVPOLL_H)
+# undef ERTS_POLL_USE_DEVPOLL
+# define ERTS_POLL_USE_DEVPOLL 1
+# undef ERTS_POLL_USE_KERNEL_POLL
+# define ERTS_POLL_USE_KERNEL_POLL 1
+#endif
+
+#define ERTS_POLL_USE_FALLBACK (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL)
+
+#if !ERTS_POLL_USE_KERNEL_POLL || ERTS_POLL_USE_FALLBACK
+# if defined(ERTS_USE_POLL)
+# undef ERTS_POLL_USE_POLL
+# define ERTS_POLL_USE_POLL 1
+# elif !defined(__WIN32__)
+# undef ERTS_POLL_USE_SELECT
+# define ERTS_POLL_USE_SELECT 1
+# endif
+#endif
+
+typedef Uint32 ErtsPollEvents;
+#undef ERTS_POLL_EV_E2N
+
+#if defined(__WIN32__) /* --- win32 ------------------------------- */
+
+#define ERTS_POLL_EV_IN 1
+#define ERTS_POLL_EV_OUT 2
+#define ERTS_POLL_EV_ERR 4
+#define ERTS_POLL_EV_NVAL 8
+
+#elif ERTS_POLL_USE_EPOLL /* --- epoll ------------------------------- */
+
+#include <sys/epoll.h>
+
+#define ERTS_POLL_EV_E2N(EV) \
+ ((__uint32_t) (EV))
+#define ERTS_POLL_EV_N2E(EV) \
+ ((ErtsPollEvents) (EV))
+
+#define ERTS_POLL_EV_IN ERTS_POLL_EV_N2E(EPOLLIN)
+#define ERTS_POLL_EV_OUT ERTS_POLL_EV_N2E(EPOLLOUT)
+#define ERTS_POLL_EV_NVAL ERTS_POLL_EV_N2E(EPOLLET)
+#define ERTS_POLL_EV_ERR ERTS_POLL_EV_N2E(EPOLLERR|EPOLLHUP)
+
+#elif ERTS_POLL_USE_DEVPOLL /* --- devpoll ----------------------------- */
+
+#include <sys/devpoll.h>
+
+#define ERTS_POLL_EV_E2N(EV) \
+ ((short) ((EV) & ~((~((ErtsPollEvents) 0)) << 8*SIZEOF_SHORT)))
+#define ERTS_POLL_EV_N2E(EV) \
+ ((ErtsPollEvents) ((unsigned short) (EV)))
+
+#define ERTS_POLL_EV_IN ERTS_POLL_EV_N2E(POLLIN)
+#define ERTS_POLL_EV_OUT ERTS_POLL_EV_N2E(POLLOUT)
+#define ERTS_POLL_EV_NVAL ERTS_POLL_EV_N2E(POLLNVAL)
+#define ERTS_POLL_EV_ERR ERTS_POLL_EV_N2E(POLLERR|POLLHUP)
+
+#elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */
+/* Kqueue use fallback defines (poll() or select()) */
+#endif
+
+#if ERTS_POLL_USE_POLL /* --- poll -------------------------------- */
+
+#include <poll.h>
+
+#define ERTS_POLL_EV_NKP_E2N(EV) \
+ ((short) ((EV) & ~((~((ErtsPollEvents) 0)) << 8*SIZEOF_SHORT)))
+#define ERTS_POLL_EV_NKP_N2E(EV) \
+ ((ErtsPollEvents) ((unsigned short) (EV)))
+
+/* At least on FreeBSD, we need POLLRDNORM for normal files, not POLLIN. */
+/* Whether this is a bug in FreeBSD, I don't know. */
+#ifdef POLLRDNORM
+#define ERTS_POLL_EV_NKP_IN ERTS_POLL_EV_N2E(POLLIN|POLLRDNORM)
+#else
+#define ERTS_POLL_EV_NKP_IN ERTS_POLL_EV_N2E(POLLIN)
+#endif
+#define ERTS_POLL_EV_NKP_OUT ERTS_POLL_EV_N2E(POLLOUT)
+#define ERTS_POLL_EV_NKP_NVAL ERTS_POLL_EV_N2E(POLLNVAL)
+#define ERTS_POLL_EV_NKP_ERR ERTS_POLL_EV_N2E(POLLERR|POLLHUP)
+
+#elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */
+
+#define ERTS_POLL_EV_NKP_E2N(EV) (EV)
+#define ERTS_POLL_EV_NKP_N2E(EV) (EV)
+
+#define ERTS_POLL_EV_NKP_IN (((ErtsPollEvents) 1) << 0)
+#define ERTS_POLL_EV_NKP_OUT (((ErtsPollEvents) 1) << 1)
+#define ERTS_POLL_EV_NKP_NVAL (((ErtsPollEvents) 1) << 2)
+#define ERTS_POLL_EV_NKP_ERR (((ErtsPollEvents) 1) << 3)
+
+#endif /* ----------------------------------------- */
+
+
+#if !defined(ERTS_POLL_EV_E2N) && defined(ERTS_POLL_EV_NKP_E2N)
+/* poll(), select(), and kqueue() */
+
+#define ERTS_POLL_EV_E2N(EV) ERTS_POLL_EV_NKP_E2N((EV))
+#define ERTS_POLL_EV_N2E(EV) ERTS_POLL_EV_NKP_N2E((EV))
+
+#define ERTS_POLL_EV_IN ERTS_POLL_EV_NKP_IN
+#define ERTS_POLL_EV_OUT ERTS_POLL_EV_NKP_OUT
+#define ERTS_POLL_EV_NVAL ERTS_POLL_EV_NKP_NVAL
+#define ERTS_POLL_EV_ERR ERTS_POLL_EV_NKP_ERR
+
+#endif
+
+typedef struct ErtsPollSet_ *ErtsPollSet;
+
+typedef struct {
+ ErtsSysFdType fd;
+ ErtsPollEvents events;
+ int on;
+} ErtsPollControlEntry;
+
+typedef struct {
+ ErtsSysFdType fd;
+ ErtsPollEvents events;
+} ErtsPollResFd;
+
+typedef struct {
+ char *primary;
+ char *fallback;
+ char *kernel_poll;
+ Uint memory_size;
+ int poll_set_size;
+ int fallback_poll_set_size;
+ int lazy_updates;
+ int pending_updates;
+ int batch_updates;
+ int concurrent_updates;
+ int max_fds;
+#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+ long no_avoided_wakeups;
+ long no_avoided_interrupts;
+ long no_interrupt_timed;
+#endif
+} ErtsPollInfo;
+
+void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet,
+ int);
+void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet,
+ int,
+ long);
+ErtsPollEvents ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet,
+ ErtsSysFdType,
+ ErtsPollEvents,
+ int on,
+ int* wake_poller);
+void ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet,
+ ErtsPollControlEntry [],
+ int on);
+int ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet,
+ ErtsPollResFd [],
+ int *,
+ SysTimeval *);
+int ERTS_POLL_EXPORT(erts_poll_max_fds)(void);
+void ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet,
+ ErtsPollInfo *);
+ErtsPollSet ERTS_POLL_EXPORT(erts_poll_create_pollset)(void);
+void ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet);
+void ERTS_POLL_EXPORT(erts_poll_init)(void);
+void ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet,
+ ErtsPollEvents [],
+ int);
+
+#endif /* #ifndef ERL_POLL_H__ */
diff --git a/erts/emulator/sys/unix/driver_int.h b/erts/emulator/sys/unix/driver_int.h
new file mode 100644
index 0000000000..a7ee8087ab
--- /dev/null
+++ b/erts/emulator/sys/unix/driver_int.h
@@ -0,0 +1,41 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * System dependant driver declarations
+ */
+
+#ifndef __DRIVER_INT_H__
+#define __DRIVER_INT_H__
+
+#ifdef HAVE_SYS_UIO_H
+#include <sys/types.h>
+#include <sys/uio.h>
+
+typedef struct iovec SysIOVec;
+
+#else
+
+typedef struct {
+ char* iov_base;
+ int iov_len;
+} SysIOVec;
+
+#endif
+
+#endif
diff --git a/erts/emulator/sys/unix/erl9_start.c b/erts/emulator/sys/unix/erl9_start.c
new file mode 100644
index 0000000000..578062d7e2
--- /dev/null
+++ b/erts/emulator/sys/unix/erl9_start.c
@@ -0,0 +1,130 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+/*
+ * XXX This is a temporary dummy to make sys.c happy until we'll rewrite it.
+ */
+unsigned preloaded_size_ring0 = 1;
+unsigned char preloaded_ring0[1] = {0};
+
+Preload pre_loaded[] = {
+ {"ring0", 1, preloaded_ring0},
+ {0, 0, 0}
+};
+
+int
+main(int argc, char** argv)
+{
+ char sbuf[1024];
+ struct {
+ void* p;
+ int sz;
+ } bins[2];
+ int bin_num = 0;
+ FILE* fp;
+ char* progname = argv[0];
+ char* eq;
+
+ argv++, argc--;
+
+ if (argc > 0 && argv[0][0] == '-') {
+ argv++, argc--;
+ }
+ if (argc < 1) {
+ abort();
+ }
+ if ((fp = fopen(argv[0], "r")) == NULL) {
+ abort();
+ }
+
+ /* Needs to be called before any memory allocation */
+ erts_short_init();
+
+ while (fgets(sbuf, sizeof sbuf, fp)) {
+ if (sbuf[0] == '#') {
+ continue; /* Comment */
+ } else if (sbuf[0] == 'e' && strncmp("exec", sbuf, 4) == 0) {
+ continue; /* Comment ;-) */
+ } else if ((eq = strchr(sbuf, '=')) != NULL) {
+ char* val;
+ char* p = strchr(sbuf, '\n');
+ if (p) {
+ *p = '\0';
+ }
+ *eq = '\0';
+ val = erts_read_env(sbuf);
+ if (val == NULL) {
+ *eq = '=';
+ erts_sys_putenv(sbuf, eq - &sbuf[0]);
+ }
+ erts_free_read_env(val);
+ } else if (sbuf[0] == ':' && '0' <= sbuf[1] && sbuf[1] <= '9') {
+ int load_size = atoi(sbuf+1);
+ void* bin;
+
+ bin = malloc(load_size);
+ if (fread(bin, 1, load_size, fp) != load_size) {
+ abort();
+ }
+ bins[bin_num].p = bin;
+ bins[bin_num].sz = load_size;
+ bin_num++;
+ } else if (strcmp(sbuf, "--end--\n") == 0) {
+ int rval;
+ Eterm mod = NIL;
+ char *val;
+
+ fclose(fp);
+
+ if (bin_num != 2) {
+ abort();
+ }
+
+ val = erts_read_env("ERLBREAKHANDLER");
+ if (val) {
+ init_break_handler();
+ }
+ erts_free_read_env(val);
+
+ if ((rval = erts_load_module(NULL, 0, NIL, &mod, bins[0].p, bins[0].sz)) < 0) {
+ fprintf(stderr, "%s: Load of initial module failed: %d\n",
+ progname, rval);
+ abort();
+ }
+ erts_first_process(mod, bins[1].p, bins[1].sz, argc, argv);
+ free(bins[0].p);
+ free(bins[1].p);
+ process_main();
+ abort();
+ } else {
+ fprintf(stderr, "%s: bad line: %s\n", progname, sbuf);
+ abort();
+ }
+ }
+ abort();
+}
diff --git a/erts/emulator/sys/unix/erl_child_setup.c b/erts/emulator/sys/unix/erl_child_setup.c
new file mode 100644
index 0000000000..7c6e4a2f37
--- /dev/null
+++ b/erts/emulator/sys/unix/erl_child_setup.c
@@ -0,0 +1,122 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * After a vfork() (or fork()) the child exec()s to this program which
+ * sets up the child and exec()s to the user program (see spawn_start()
+ * in sys.c and ticket OTP-4389).
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define NEED_CHILD_SETUP_DEFINES
+#include "sys.h"
+#include "erl_misc_utils.h"
+
+#ifdef SIG_SIGSET /* Old SysV */
+void sys_sigrelease(int sig)
+{
+ sigrelse(sig);
+}
+#else /* !SIG_SIGSET */
+#ifdef SIG_SIGNAL /* Old BSD */
+sys_sigrelease(int sig)
+{
+ sigsetmask(sigblock(0) & ~sigmask(sig));
+}
+#else /* !SIG_SIGNAL */ /* The True Way - POSIX!:-) */
+void sys_sigrelease(int sig)
+{
+ sigset_t mask;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, sig);
+ sigprocmask(SIG_UNBLOCK, &mask, (sigset_t *)NULL);
+}
+#endif /* !SIG_SIGNAL */
+#endif /* !SIG_SIGSET */
+
+int
+main(int argc, char *argv[])
+{
+ int i, from, to;
+ int erts_spawn_executable = 0;
+
+ /* OBSERVE!
+ * Keep child setup after fork() (implemented in sys.c) up to date
+ * if changes are made here.
+ */
+
+ if (argc != CS_ARGV_NO_OF_ARGS) {
+ if (argc < CS_ARGV_NO_OF_ARGS) {
+ return 1;
+ } else {
+ erts_spawn_executable = 1;
+ }
+ }
+
+ if (strcmp("false", argv[CS_ARGV_UNBIND_IX]) != 0)
+ if (erts_unbind_from_cpu_str(argv[CS_ARGV_UNBIND_IX]) != 0)
+ return 1;
+
+ for (i = 0; i < CS_ARGV_NO_OF_DUP2_OPS; i++) {
+ if (argv[CS_ARGV_DUP2_OP_IX(i)][0] == '-'
+ && argv[CS_ARGV_DUP2_OP_IX(i)][1] == '\0')
+ break;
+ if (sscanf(argv[CS_ARGV_DUP2_OP_IX(i)], "%d:%d", &from, &to) != 2)
+ return 1;
+ if (dup2(from, to) < 0)
+ return 1;
+ }
+
+ if (sscanf(argv[CS_ARGV_FD_CR_IX], "%d:%d", &from, &to) != 2)
+ return 1;
+ for (i = from; i <= to; i++)
+ (void) close(i);
+
+ if (!(argv[CS_ARGV_WD_IX][0] == '.' && argv[CS_ARGV_WD_IX][1] == '\0')
+ && chdir(argv[CS_ARGV_WD_IX]) < 0)
+ return 1;
+
+#if defined(USE_SETPGRP_NOARGS) /* SysV */
+ (void) setpgrp();
+#elif defined(USE_SETPGRP) /* BSD */
+ (void) setpgrp(0, getpid());
+#else /* POSIX */
+ (void) setsid();
+#endif
+
+ sys_sigrelease(SIGCHLD);
+ sys_sigrelease(SIGINT);
+ sys_sigrelease(SIGUSR1);
+
+ if (erts_spawn_executable) {
+ if (argv[CS_ARGV_NO_OF_ARGS + 1] == NULL) {
+ execl(argv[CS_ARGV_NO_OF_ARGS],argv[CS_ARGV_NO_OF_ARGS],
+ (char *) NULL);
+ } else {
+ execv(argv[CS_ARGV_NO_OF_ARGS],&(argv[CS_ARGV_NO_OF_ARGS + 1]));
+ }
+ } else {
+ execl("/bin/sh", "sh", "-c", argv[CS_ARGV_CMD_IX], (char *) NULL);
+ }
+ return 1;
+}
diff --git a/erts/emulator/sys/unix/erl_main.c b/erts/emulator/sys/unix/erl_main.c
new file mode 100644
index 0000000000..b26f93f77e
--- /dev/null
+++ b/erts/emulator/sys/unix/erl_main.c
@@ -0,0 +1,31 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+
+int
+main(int argc, char **argv)
+{
+ erl_start(argc, argv);
+ return 0;
+}
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
new file mode 100644
index 0000000000..2d5ef882f6
--- /dev/null
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -0,0 +1,339 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ *
+ * This file handles differences between different Unix systems.
+ * This should be the only place with conditional compilation
+ * depending on the type of OS.
+ */
+
+#ifndef _ERL_UNIX_SYS_H
+#define _ERL_UNIX_SYS_H
+
+#include <stdio.h>
+#include <math.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef QNX
+#include <memory.h>
+#endif
+
+#if defined(__sun__) && defined(__SVR4) && !defined(__EXTENSIONS__)
+# define __EXTENSIONS__
+# include <sys/types.h>
+# undef __EXTENSIONS__
+#else
+# include <sys/types.h>
+#endif
+#include <sys/stat.h>
+#include <sys/param.h>
+#include <fcntl.h>
+#include "erl_errno.h"
+#include <signal.h>
+
+
+#if HAVE_SYS_SOCKETIO_H
+# include <sys/socketio.h>
+#endif
+#if HAVE_SYS_SOCKIO_H
+# include <sys/sockio.h>
+#endif
+
+#ifdef HAVE_NET_ERRNO_H
+#include <net/errno.h>
+#endif
+
+#ifdef HAVE_DIRENT_H
+# include <dirent.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+#ifndef HAVE_MMAP
+# define HAVE_MMAP 0
+#endif
+
+#if HAVE_MMAP
+# include <sys/mman.h>
+#endif
+
+#if TIME_WITH_SYS_TIME
+# include <sys/time.h>
+# include <time.h>
+#else
+# if HAVE_SYS_TIME_H
+# include <sys/time.h>
+# else
+# include <time.h>
+# endif
+#endif
+
+#include <sys/times.h>
+
+#ifdef HAVE_IEEEFP_H
+#include <ieeefp.h>
+#endif
+
+#ifdef QNX
+#include <process.h>
+#include <sys/qnx_glob.h>
+#endif
+
+#include <pwd.h>
+
+#ifndef HZ
+#define HZ 60
+#endif
+
+#ifdef NETDB_H_NEEDS_IN_H
+#include <netinet/in.h>
+#endif
+#include <netdb.h>
+
+/*
+ * Make sure that MAXPATHLEN is defined.
+ */
+#ifdef GETHRTIME_WITH_CLOCK_GETTIME
+#undef HAVE_GETHRTIME
+#define HAVE_GETHRTIME 1
+#endif
+
+#ifndef MAXPATHLEN
+# ifdef PATH_MAX
+# define MAXPATHLEN PATH_MAX
+# else
+# define MAXPATHLEN 2048
+# endif
+#endif
+
+/* File descriptors are numbers anc consecutively allocated on Unix */
+#define ERTS_SYS_CONTINOUS_FD_NUMBERS
+
+#define HAVE_ERTS_CHECK_IO_DEBUG
+int erts_check_io_debug(void);
+
+
+#ifndef ENABLE_CHILD_WAITER_THREAD
+# undef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+# define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+# ifdef ERTS_SMP
+# define ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
+void erts_check_children(void);
+# endif
+#endif
+
+typedef void *GETENV_STATE;
+
+/*
+** For the erl_timer_sup module.
+*/
+
+typedef struct timeval SysTimeval;
+
+#define sys_gettimeofday(Arg) ((void) gettimeofday((Arg), NULL))
+
+typedef struct tms SysTimes;
+
+extern int erts_ticks_per_sec;
+
+#define SYS_CLK_TCK (erts_ticks_per_sec)
+
+#define sys_times(Arg) times(Arg)
+
+#define ERTS_WRAP_SYS_TIMES 1
+extern int erts_ticks_per_sec_wrap;
+#define SYS_CLK_TCK_WRAP (erts_ticks_per_sec_wrap)
+extern clock_t sys_times_wrap(void);
+
+#ifdef HAVE_GETHRTIME
+#ifdef GETHRTIME_WITH_CLOCK_GETTIME
+typedef long long SysHrTime;
+
+extern SysHrTime sys_gethrtime(void);
+#define sys_init_hrtime() /* Nothing */
+
+#else /* Real gethrtime (Solaris) */
+
+typedef hrtime_t SysHrTime;
+
+#define sys_gethrtime() gethrtime()
+#define sys_init_hrtime() /* Nothing */
+
+#endif /* GETHRTIME_WITH_CLOCK_GETTIME */
+#endif /* HAVE_GETHRTIME */
+
+#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME))
+typedef long long SysCpuTime;
+typedef struct timespec SysTimespec;
+
+#if defined(HAVE_GETHRVTIME)
+#define sys_gethrvtime() gethrvtime()
+#define sys_get_proc_cputime(t,tp) (t) = sys_gethrvtime(), \
+ (tp).tv_sec = (time_t)((t)/1000000000LL), \
+ (tp).tv_nsec = (long)((t)%1000000000LL)
+int sys_start_hrvtime(void);
+int sys_stop_hrvtime(void);
+
+#elif defined(HAVE_CLOCK_GETTIME)
+#define sys_clock_gettime(cid,tp) clock_gettime((cid),&(tp))
+#define sys_get_proc_cputime(t,tp) sys_clock_gettime(CLOCK_PROCESS_CPUTIME_ID,(tp))
+
+#endif
+#endif
+
+/* No use in having other resolutions than 1 Ms. */
+#define SYS_CLOCK_RESOLUTION 1
+
+/* These are defined in sys.c */
+#if defined(SIG_SIGSET) /* Old SysV */
+RETSIGTYPE (*sys_sigset())();
+#elif defined(SIG_SIGNAL) /* Old BSD */
+RETSIGTYPE (*sys_sigset())();
+#else
+RETSIGTYPE (*sys_sigset(int, RETSIGTYPE (*func)(int)))(int);
+#endif
+extern void sys_sigrelease(int);
+extern void sys_sigblock(int);
+extern void sys_stop_cat(void);
+
+/*
+ * Handling of floating point exceptions.
+ */
+
+#ifdef USE_ISINF_ISNAN /* simulate finite() */
+# define finite(f) (!isinf(f) && !isnan(f))
+# define HAVE_FINITE
+#endif
+
+#ifdef NO_FPE_SIGNALS
+
+#define erts_get_current_fp_exception() NULL
+#ifdef ERTS_SMP
+#define erts_thread_init_fp_exception() do{}while(0)
+#endif
+# define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0)
+# define __ERTS_FP_ERROR(fpexnp, f, Action) if (!finite(f)) { Action; } else {}
+# define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action)
+# define __ERTS_SAVE_FP_EXCEPTION(fpexnp)
+# define __ERTS_RESTORE_FP_EXCEPTION(fpexnp)
+
+#define erts_sys_block_fpe() 0
+#define erts_sys_unblock_fpe(x) do{}while(0)
+
+#else /* !NO_FPE_SIGNALS */
+
+extern volatile unsigned long *erts_get_current_fp_exception(void);
+#ifdef ERTS_SMP
+extern void erts_thread_init_fp_exception(void);
+#endif
+# if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
+# define erts_fwait(fpexnp,f) \
+ __asm__ __volatile__("fwait" : "=m"(*(fpexnp)) : "m"(f))
+# elif (defined(__powerpc__) || defined(__ppc__)) && defined(__GNUC__)
+# define erts_fwait(fpexnp,f) \
+ __asm__ __volatile__("" : "=m"(*(fpexnp)) : "fm"(f))
+# elif defined(__sparc__) && defined(__linux__) && defined(__GNUC__)
+# define erts_fwait(fpexnp,f) \
+ __asm__ __volatile__("" : "=m"(*(fpexnp)) : "em"(f))
+# else
+# define erts_fwait(fpexnp,f) \
+ __asm__ __volatile__("" : "=m"(*(fpexnp)) : "g"(f))
+# endif
+# if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
+ extern void erts_restore_fpu(void);
+# else
+# define erts_restore_fpu() /*empty*/
+# endif
+# if (!defined(__GNUC__) || \
+ (__GNUC__ < 2) || \
+ (__GNUC__ == 2 && __GNUC_MINOR < 96)) && \
+ !defined(__builtin_expect)
+# define __builtin_expect(x, expected_value) (x)
+# endif
+static __inline__ int erts_check_fpe(volatile unsigned long *fp_exception, double f)
+{
+ erts_fwait(fp_exception, f);
+ if (__builtin_expect(*fp_exception == 0, 1))
+ return 0;
+ *fp_exception = 0;
+ erts_restore_fpu();
+ return 1;
+}
+# undef erts_fwait
+# undef erts_restore_fpu
+extern void erts_fp_check_init_error(volatile unsigned long *fp_exception);
+static __inline__ void __ERTS_FP_CHECK_INIT(volatile unsigned long *fp_exception)
+{
+ if (__builtin_expect(*fp_exception == 0, 1))
+ return;
+ erts_fp_check_init_error(fp_exception);
+}
+# define __ERTS_FP_ERROR(fpexnp, f, Action) do { if (erts_check_fpe((fpexnp),(f))) { Action; } } while (0)
+# define __ERTS_SAVE_FP_EXCEPTION(fpexnp) unsigned long old_erl_fp_exception = *(fpexnp)
+# define __ERTS_RESTORE_FP_EXCEPTION(fpexnp) \
+ do { *(fpexnp) = old_erl_fp_exception; } while (0)
+ /* This is for library calls where we don't trust the external
+ code to always throw floating-point exceptions on errors. */
+static __inline__ int erts_check_fpe_thorough(volatile unsigned long *fp_exception, double f)
+{
+ return erts_check_fpe(fp_exception, f) || !finite(f);
+}
+# define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) \
+ do { if (erts_check_fpe_thorough((fpexnp),(f))) { Action; } } while (0)
+
+int erts_sys_block_fpe(void);
+void erts_sys_unblock_fpe(int);
+
+#endif /* !NO_FPE_SIGNALS */
+
+#define ERTS_FP_CHECK_INIT(p) __ERTS_FP_CHECK_INIT(&(p)->fp_exception)
+#define ERTS_FP_ERROR(p, f, A) __ERTS_FP_ERROR(&(p)->fp_exception, f, A)
+#define ERTS_FP_ERROR_THOROUGH(p, f, A) __ERTS_FP_ERROR_THOROUGH(&(p)->fp_exception, f, A)
+
+
+#ifdef NEED_CHILD_SETUP_DEFINES
+/* The child setup argv[] */
+#define CS_ARGV_PROGNAME_IX 0 /* Program name */
+#define CS_ARGV_UNBIND_IX 1 /* Unbind from cpu */
+#define CS_ARGV_WD_IX 2 /* Working directory */
+#define CS_ARGV_CMD_IX 3 /* Command */
+#define CS_ARGV_FD_CR_IX 4 /* Fd close range */
+#define CS_ARGV_DUP2_OP_IX(N) ((N) + 5) /* dup2 operations */
+
+#define CS_ARGV_NO_OF_DUP2_OPS 3 /* Number of dup2 ops */
+#define CS_ARGV_NO_OF_ARGS 8 /* Number of arguments */
+#endif /* #ifdef NEED_CHILD_SETUP_DEFINES */
+
+/* Threads */
+#ifdef USE_THREADS
+extern int init_async(int);
+extern int exit_async(void);
+#endif
+
+#define ERTS_EXIT_AFTER_DUMP _exit
+
+#ifdef ERTS_TIMER_THREAD
+struct erts_iwait; /* opaque for clients */
+extern struct erts_iwait *erts_iwait_init(void);
+extern void erts_iwait_wait(struct erts_iwait *iwait, struct timeval *delay);
+extern void erts_iwait_interrupt(struct erts_iwait *iwait);
+#endif /* ERTS_TIMER_THREAD */
+
+#endif /* #ifndef _ERL_UNIX_SYS_H */
diff --git a/erts/emulator/sys/unix/erl_unix_sys_ddll.c b/erts/emulator/sys/unix/erl_unix_sys_ddll.c
new file mode 100644
index 0000000000..336d9586c4
--- /dev/null
+++ b/erts/emulator/sys/unix/erl_unix_sys_ddll.c
@@ -0,0 +1,280 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Interface functions to the dynamic linker using dl* functions.
+ * (As far as I know it works on SunOS 4, 5, Linux and FreeBSD. /Seb)
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#ifdef HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+
+/* some systems do not have RTLD_NOW defined, and require the "mode"
+ * argument to dload() always be 1.
+ */
+#ifndef RTLD_NOW
+# define RTLD_NOW 1
+#endif
+
+#define MAX_NAME_LEN 255 /* XXX should we get the system path size? */
+#define EXT_LEN 3
+#define FILE_EXT ".so" /* extension appended to the filename */
+
+static char **errcodes = NULL;
+static int num_errcodes = 0;
+static int num_errcodes_allocated = 0;
+
+#define my_strdup(WHAT) my_strdup_in(ERTS_ALC_T_DDLL_ERRCODES, WHAT);
+
+static char *my_strdup_in(ErtsAlcType_t type, char *what)
+{
+ char *res = erts_alloc(type, strlen(what) + 1);
+ strcpy(res, what);
+ return res;
+}
+
+
+static int find_errcode(char *string, ErtsSysDdllError* err)
+{
+ int i;
+
+ if (err != NULL) {
+ erts_sys_ddll_free_error(err); /* in case we ignored an earlier error */
+ err->str = my_strdup_in(ERTS_ALC_T_DDLL_TMP_BUF, string);
+ return 0;
+ }
+ for(i=0;i<num_errcodes;++i) {
+ if (!strcmp(string, errcodes[i])) {
+ return i;
+ }
+ }
+ if (num_errcodes_allocated == num_errcodes) {
+ errcodes = (num_errcodes_allocated == 0)
+ ? erts_alloc(ERTS_ALC_T_DDLL_ERRCODES,
+ (num_errcodes_allocated = 10) * sizeof(char *))
+ : erts_realloc(ERTS_ALC_T_DDLL_ERRCODES, errcodes,
+ (num_errcodes_allocated += 10) * sizeof(char *));
+ }
+ errcodes[num_errcodes++] = my_strdup(string);
+ return (num_errcodes - 1);
+}
+
+void erl_sys_ddll_init(void) {
+#if defined(HAVE_DLOPEN) && defined(ERTS_NEED_DLOPEN_BEFORE_DLERROR)
+ /*
+ * dlopen() needs to be called before we make the first call to
+ * dlerror(); otherwise, dlerror() might dump core. At least
+ * some versions of linuxthread suffer from this bug.
+ */
+ void *handle = dlopen("/nonexistinglib", RTLD_NOW);
+ if (handle)
+ dlclose(handle);
+#endif
+ return;
+}
+
+/*
+ * Open a shared object
+ */
+int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err)
+{
+#if defined(HAVE_DLOPEN)
+ char* dlname;
+ int len = sys_strlen(full_name);
+ int ret;
+
+ dlname = erts_alloc(ERTS_ALC_T_TMP, len + EXT_LEN + 1);
+ sys_strcpy(dlname, full_name);
+ sys_strcpy(dlname+len, FILE_EXT);
+
+ ret = erts_sys_ddll_open_noext(dlname, handle, err);
+
+ erts_free(ERTS_ALC_T_TMP, (void *) dlname);
+ return ret;
+#else
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+#endif
+}
+
+int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
+{
+ int ret = ERL_DE_NO_ERROR;
+ char *str;
+ dlerror();
+ if ((*handle = dlopen(dlname, RTLD_NOW)) == NULL) {
+ str = dlerror();
+
+ if (err == NULL) {
+ /*
+ * Remove prefix filename to avoid exploading number of
+ * error codes on extreme usage.
+ */
+ if (strstr(str,dlname) == str) {
+ char *save_str = str;
+ str += strlen(dlname);
+ while (*str == ':' || *str == ' ') {
+ ++str;
+ }
+ if (*str == '\0') { /* Better with filename than nothing... */
+ str = save_str;
+ }
+ }
+ }
+ ret = ERL_DE_DYNAMIC_ERROR_OFFSET - find_errcode(str, err);
+ }
+ return ret;
+}
+
+/*
+ * Find a symbol in the shared object
+ */
+int erts_sys_ddll_sym2(void *handle, char *func_name, void **function,
+ ErtsSysDdllError* err)
+{
+#if defined(HAVE_DLOPEN)
+ void *sym;
+ char *e;
+ int ret;
+ dlerror();
+ sym = dlsym(handle, func_name);
+ if ((e = dlerror()) != NULL) {
+ ret = ERL_DE_DYNAMIC_ERROR_OFFSET - find_errcode(e, err);
+ } else {
+ *function = sym;
+ ret = ERL_DE_NO_ERROR;
+ }
+ return ret;
+#else
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+#endif
+}
+
+/* XXX:PaN These two will be changed with new driver interface! */
+
+/*
+ * Load the driver init function, might appear under different names depending on object arch...
+ */
+
+int erts_sys_ddll_load_driver_init(void *handle, void **function)
+{
+ void *fn;
+ int res;
+ if ((res = erts_sys_ddll_sym2(handle, "driver_init", &fn, NULL)) != ERL_DE_NO_ERROR) {
+ res = erts_sys_ddll_sym2(handle, "_driver_init", &fn, NULL);
+ }
+ if (res == ERL_DE_NO_ERROR) {
+ *function = fn;
+ }
+ return res;
+}
+
+int erts_sys_ddll_load_nif_init(void *handle, void **function, ErtsSysDdllError* err)
+{
+ void *fn;
+ int res;
+ if ((res = erts_sys_ddll_sym2(handle, "nif_init", &fn, err)) != ERL_DE_NO_ERROR) {
+ res = erts_sys_ddll_sym2(handle, "_nif_init", &fn, err);
+ }
+ if (res == ERL_DE_NO_ERROR) {
+ *function = fn;
+ }
+ return res;
+}
+
+/*
+ * Call the driver_init function, whatever it's really called, simple on unix...
+*/
+void *erts_sys_ddll_call_init(void *function) {
+ void *(*initfn)(void) = function;
+ return (*initfn)();
+}
+void *erts_sys_ddll_call_nif_init(void *function) {
+ return erts_sys_ddll_call_init(function);
+}
+
+
+
+/*
+ * Close a chared object
+ */
+int erts_sys_ddll_close2(void *handle, ErtsSysDdllError* err)
+{
+#if defined(HAVE_DLOPEN)
+ int ret;
+ char *s;
+ dlerror();
+ if (dlclose(handle) == 0) {
+ ret = ERL_DE_NO_ERROR;
+ } else {
+ if ((s = dlerror()) == NULL) {
+ find_errcode("unspecified error", err);
+ ret = ERL_DE_ERROR_UNSPECIFIED;
+ } else {
+ ret = ERL_DE_DYNAMIC_ERROR_OFFSET - find_errcode(s, err);
+ }
+ }
+ return ret;
+#else
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+#endif
+}
+
+
+/*
+ * Return string that describes the (current) error
+ */
+char *erts_sys_ddll_error(int code)
+{
+ int actual_code;
+
+ if (code > ERL_DE_DYNAMIC_ERROR_OFFSET) {
+ return "Unspecified error";
+ }
+ actual_code = -1*(code - ERL_DE_DYNAMIC_ERROR_OFFSET);
+#if defined(HAVE_DLOPEN)
+ {
+ char *msg;
+
+ if (actual_code >= num_errcodes) {
+ msg = "Unknown dlload error";
+ } else {
+ msg = errcodes[actual_code];
+ }
+ return msg;
+ }
+#endif
+ return "no error";
+}
+
+void erts_sys_ddll_free_error(ErtsSysDdllError* err)
+{
+ if (err->str != NULL) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, err->str);
+ }
+}
+
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
new file mode 100644
index 0000000000..183525b222
--- /dev/null
+++ b/erts/emulator/sys/unix/sys.c
@@ -0,0 +1,3346 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef ISC32
+#define _POSIX_SOURCE
+#define _XOPEN_SOURCE
+#endif
+
+#include <sys/times.h> /* ! */
+#include <time.h>
+#include <signal.h>
+#include <sys/wait.h>
+#include <sys/uio.h>
+#include <termios.h>
+#include <ctype.h>
+#include <sys/utsname.h>
+
+#ifdef ISC32
+#include <sys/bsdtypes.h>
+#endif
+
+#include <termios.h>
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#define NEED_CHILD_SETUP_DEFINES
+#define ERTS_WANT_BREAK_HANDLING
+#define ERTS_WANT_GOT_SIGUSR1
+#define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */
+#include "sys.h"
+
+#ifdef USE_THREADS
+#include "erl_threads.h"
+#endif
+
+#include "erl_mseg.h"
+
+extern char **environ;
+static erts_smp_rwmtx_t environ_rwmtx;
+
+#define MAX_VSIZE 16 /* Max number of entries allowed in an I/O
+ * vector sock_sendv().
+ */
+
+/*
+ * Don't need global.h, but bif_table.h (included by bif.h),
+ * won't compile otherwise
+ */
+#include "global.h"
+#include "bif.h"
+
+#include "erl_sys_driver.h"
+#include "erl_check_io.h"
+
+#ifndef DISABLE_VFORK
+#define DISABLE_VFORK 0
+#endif
+
+#ifdef USE_THREADS
+# ifdef ENABLE_CHILD_WAITER_THREAD
+# define CHLDWTHR ENABLE_CHILD_WAITER_THREAD
+# else
+# define CHLDWTHR 0
+# endif
+#else
+# define CHLDWTHR 0
+#endif
+/*
+ * [OTP-3906]
+ * Solaris signal management gets confused when threads are used and a
+ * lot of child processes dies. The confusion results in that SIGCHLD
+ * signals aren't delivered to the emulator which in turn results in
+ * a lot of defunct processes in the system.
+ *
+ * The problem seems to appear when a signal is frequently
+ * blocked/unblocked at the same time as the signal is frequently
+ * propagated. The child waiter thread is a workaround for this problem.
+ * The SIGCHLD signal is always blocked (in all threads), and the child
+ * waiter thread fetches the signal by a call to sigwait(). See
+ * child_waiter().
+ */
+
+typedef struct ErtsSysReportExit_ ErtsSysReportExit;
+struct ErtsSysReportExit_ {
+ ErtsSysReportExit *next;
+ Eterm port;
+ int pid;
+ int ifd;
+ int ofd;
+#if CHLDWTHR && !defined(ERTS_SMP)
+ int status;
+#endif
+};
+
+static ErtsSysReportExit *report_exit_list;
+#if CHLDWTHR && !defined(ERTS_SMP)
+static ErtsSysReportExit *report_exit_transit_list;
+#endif
+
+extern int check_async_ready(void);
+extern int driver_interrupt(int, int);
+/*EXTERN_FUNCTION(void, increment_time, (int));*/
+/*EXTERN_FUNCTION(int, next_time, (_VOID_));*/
+extern void do_break(void);
+
+extern void erl_sys_args(int*, char**);
+
+/* The following two defs should probably be moved somewhere else */
+
+extern void erts_sys_init_float(void);
+
+extern void erl_crash_dump(char* file, int line, char* fmt, ...);
+
+#define DIR_SEPARATOR_CHAR '/'
+
+#if defined(DEBUG)
+#define ERL_BUILD_TYPE_MARKER ".debug"
+#elif defined(PURIFY)
+#define ERL_BUILD_TYPE_MARKER ".purify"
+#elif defined(QUANTIFY)
+#define ERL_BUILD_TYPE_MARKER ".quantify"
+#elif defined(PURECOV)
+#define ERL_BUILD_TYPE_MARKER ".purecov"
+#elif defined(VALGRIND)
+#define ERL_BUILD_TYPE_MARKER ".valgrind"
+#else /* opt */
+#define ERL_BUILD_TYPE_MARKER
+#endif
+
+#define CHILD_SETUP_PROG_NAME "child_setup" ERL_BUILD_TYPE_MARKER
+#if !DISABLE_VFORK
+static char *child_setup_prog;
+#endif
+
+#ifdef DEBUG
+static int debug_log = 0;
+#endif
+
+#ifdef ERTS_SMP
+erts_smp_atomic_t erts_got_sigusr1;
+#define ERTS_SET_GOT_SIGUSR1 \
+ erts_smp_atomic_set(&erts_got_sigusr1, 1)
+#define ERTS_UNSET_GOT_SIGUSR1 \
+ erts_smp_atomic_set(&erts_got_sigusr1, 0)
+static erts_smp_atomic_t have_prepared_crash_dump;
+#define ERTS_PREPARED_CRASH_DUMP \
+ ((int) erts_smp_atomic_xchg(&have_prepared_crash_dump, 1))
+#else
+volatile int erts_got_sigusr1;
+#define ERTS_SET_GOT_SIGUSR1 (erts_got_sigusr1 = 1)
+#define ERTS_UNSET_GOT_SIGUSR1 (erts_got_sigusr1 = 0)
+static volatile int have_prepared_crash_dump;
+#define ERTS_PREPARED_CRASH_DUMP \
+ (have_prepared_crash_dump++)
+#endif
+
+static erts_smp_atomic_t sys_misc_mem_sz;
+
+#if defined(ERTS_SMP)
+static void smp_sig_notify(char c);
+static int sig_notify_fds[2] = {-1, -1};
+#elif defined(USE_THREADS)
+static int async_fd[2];
+#endif
+
+#if CHLDWTHR || defined(ERTS_SMP)
+erts_mtx_t chld_stat_mtx;
+#endif
+#if CHLDWTHR
+static erts_tid_t child_waiter_tid;
+/* chld_stat_mtx is used to protect against concurrent accesses
+ of the driver_data fields pid, alive, and status. */
+erts_cnd_t chld_stat_cnd;
+static long children_alive;
+#define CHLD_STAT_LOCK erts_mtx_lock(&chld_stat_mtx)
+#define CHLD_STAT_UNLOCK erts_mtx_unlock(&chld_stat_mtx)
+#define CHLD_STAT_WAIT erts_cnd_wait(&chld_stat_cnd, &chld_stat_mtx)
+#define CHLD_STAT_SIGNAL erts_cnd_signal(&chld_stat_cnd)
+#elif defined(ERTS_SMP) /* ------------------------------------------------- */
+#define CHLD_STAT_LOCK erts_mtx_lock(&chld_stat_mtx)
+#define CHLD_STAT_UNLOCK erts_mtx_unlock(&chld_stat_mtx)
+
+#else /* ------------------------------------------------------------------- */
+#define CHLD_STAT_LOCK
+#define CHLD_STAT_UNLOCK
+static volatile int children_died;
+#endif
+
+
+static struct fd_data {
+ char pbuf[4]; /* hold partial packet bytes */
+ int psz; /* size of pbuf */
+ char *buf;
+ char *cpos;
+ int sz;
+ int remain; /* for input on fd */
+} *fd_data; /* indexed by fd */
+
+/* static FUNCTION(int, write_fill, (int, char*, int)); unused? */
+static FUNCTION(void, note_child_death, (int, int));
+
+#if CHLDWTHR
+static FUNCTION(void *, child_waiter, (void *));
+#endif
+
+/********************* General functions ****************************/
+
+/* This is used by both the drivers and general I/O, must be set early */
+static int max_files = -1;
+
+/*
+ * a few variables used by the break handler
+ */
+#ifdef ERTS_SMP
+erts_smp_atomic_t erts_break_requested;
+#define ERTS_SET_BREAK_REQUESTED \
+ erts_smp_atomic_set(&erts_break_requested, (long) 1)
+#define ERTS_UNSET_BREAK_REQUESTED \
+ erts_smp_atomic_set(&erts_break_requested, (long) 0)
+#else
+volatile int erts_break_requested = 0;
+#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
+#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0)
+#endif
+/* set early so the break handler has access to initial mode */
+static struct termios initial_tty_mode;
+static int replace_intr = 0;
+/* assume yes initially, ttsl_init will clear it */
+int using_oldshell = 1;
+
+#ifdef ERTS_ENABLE_KERNEL_POLL
+
+int erts_use_kernel_poll = 0;
+
+struct {
+ int (*select)(ErlDrvPort, ErlDrvEvent, int, int);
+ int (*event)(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
+ void (*check_io_interrupt)(int);
+ void (*check_io_interrupt_tmd)(int, long);
+ void (*check_io)(int);
+ Uint (*size)(void);
+ Eterm (*info)(void *);
+ int (*check_io_debug)(void);
+} io_func = {0};
+
+
+int
+driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on)
+{
+ return (*io_func.select)(port, event, mode, on);
+}
+
+int
+driver_event(ErlDrvPort port, ErlDrvEvent event, ErlDrvEventData event_data)
+{
+ return (*io_func.event)(port, event, event_data);
+}
+
+Eterm erts_check_io_info(void *p)
+{
+ return (*io_func.info)(p);
+}
+
+int
+erts_check_io_debug(void)
+{
+ return (*io_func.check_io_debug)();
+}
+
+
+static void
+init_check_io(void)
+{
+ if (erts_use_kernel_poll) {
+ io_func.select = driver_select_kp;
+ io_func.event = driver_event_kp;
+ io_func.check_io_interrupt = erts_check_io_interrupt_kp;
+ io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_kp;
+ io_func.check_io = erts_check_io_kp;
+ io_func.size = erts_check_io_size_kp;
+ io_func.info = erts_check_io_info_kp;
+ io_func.check_io_debug = erts_check_io_debug_kp;
+ erts_init_check_io_kp();
+ max_files = erts_check_io_max_files_kp();
+ }
+ else {
+ io_func.select = driver_select_nkp;
+ io_func.event = driver_event_nkp;
+ io_func.check_io_interrupt = erts_check_io_interrupt_nkp;
+ io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_nkp;
+ io_func.check_io = erts_check_io_nkp;
+ io_func.size = erts_check_io_size_nkp;
+ io_func.info = erts_check_io_info_nkp;
+ io_func.check_io_debug = erts_check_io_debug_nkp;
+ erts_init_check_io_nkp();
+ max_files = erts_check_io_max_files_nkp();
+ }
+}
+
+#define ERTS_CHK_IO_INTR (*io_func.check_io_interrupt)
+#define ERTS_CHK_IO_INTR_TMD (*io_func.check_io_interrupt_tmd)
+#define ERTS_CHK_IO (*io_func.check_io)
+#define ERTS_CHK_IO_SZ (*io_func.size)
+
+#else /* !ERTS_ENABLE_KERNEL_POLL */
+
+static void
+init_check_io(void)
+{
+ erts_init_check_io();
+ max_files = erts_check_io_max_files();
+}
+
+#define ERTS_CHK_IO_INTR erts_check_io_interrupt
+#define ERTS_CHK_IO_INTR_TMD erts_check_io_interrupt_timed
+#define ERTS_CHK_IO erts_check_io
+#define ERTS_CHK_IO_SZ erts_check_io_size
+
+#endif
+
+#ifdef ERTS_SMP
+void
+erts_sys_schedule_interrupt(int set)
+{
+ ERTS_CHK_IO_INTR(set);
+}
+
+void
+erts_sys_schedule_interrupt_timed(int set, long msec)
+{
+ ERTS_CHK_IO_INTR_TMD(set, msec);
+}
+#endif
+
+Uint
+erts_sys_misc_mem_sz(void)
+{
+ Uint res = ERTS_CHK_IO_SZ();
+ res += erts_smp_atomic_read(&sys_misc_mem_sz);
+ return res;
+}
+
+/*
+ * reset the terminal to the original settings on exit
+ */
+void sys_tty_reset(void)
+{
+ if (using_oldshell && !replace_intr) {
+ SET_BLOCKING(0);
+ }
+ else if (isatty(0)) {
+ tcsetattr(0,TCSANOW,&initial_tty_mode);
+ }
+}
+
+#ifdef __tile__
+/* Direct malloc to spread memory around the caches of multiple tiles. */
+#include <malloc.h>
+MALLOC_USE_HASH(1);
+#endif
+
+#ifdef USE_THREADS
+static void *ethr_internal_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
+}
+static void *ethr_internal_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
+}
+static void ethr_internal_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
+}
+
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
+/*
+ * Child thread inherits parents signal mask at creation. In order to
+ * guarantee that the main thread will receive all SIGINT, SIGCHLD, and
+ * SIGUSR1 signals sent to the process, we block these signals in the
+ * parent thread when creating a new thread.
+ */
+
+static sigset_t thr_create_sigmask;
+
+#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
+
+typedef struct {
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
+ sigset_t saved_sigmask;
+#endif
+ int unbind_child;
+} erts_thr_create_data_t;
+
+/*
+ * thr_create_prepare() is called in parent thread before thread creation.
+ * Returned value is passed as argument to thr_create_cleanup().
+ */
+static void *
+thr_create_prepare(void)
+{
+ erts_thr_create_data_t *tcdp;
+ ErtsSchedulerData *esdp;
+
+ tcdp = erts_alloc(ERTS_ALC_T_TMP, sizeof(erts_thr_create_data_t));
+
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
+ erts_thr_sigmask(SIG_BLOCK, &thr_create_sigmask, &tcdp->saved_sigmask);
+#endif
+ esdp = erts_get_scheduler_data();
+ tcdp->unbind_child = esdp && erts_is_scheduler_bound(esdp);
+
+ return (void *) tcdp;
+}
+
+
+/* thr_create_cleanup() is called in parent thread after thread creation. */
+static void
+thr_create_cleanup(void *vtcdp)
+{
+ erts_thr_create_data_t *tcdp = (erts_thr_create_data_t *) vtcdp;
+
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
+ /* Restore signalmask... */
+ erts_thr_sigmask(SIG_SETMASK, &tcdp->saved_sigmask, NULL);
+#endif
+
+ erts_free(ERTS_ALC_T_TMP, tcdp);
+}
+
+static void
+thr_create_prepare_child(void *vtcdp)
+{
+ erts_thr_create_data_t *tcdp = (erts_thr_create_data_t *) vtcdp;
+
+#ifndef NO_FPE_SIGNALS
+ /*
+ * We do not want fp exeptions in other threads than the
+ * scheduler threads. We enable fpe explicitly in the scheduler
+ * threads after this.
+ */
+ erts_thread_disable_fpe();
+#endif
+
+ if (tcdp->unbind_child) {
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+ erts_unbind_from_cpu(erts_cpuinfo);
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+ }
+
+}
+
+#endif /* #ifdef USE_THREADS */
+
+void
+erts_sys_pre_init(void)
+{
+ erts_printf_add_cr_to_stdout = 1;
+ erts_printf_add_cr_to_stderr = 1;
+#ifdef USE_THREADS
+ {
+ erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
+ eid.alloc = ethr_internal_alloc;
+ eid.realloc = ethr_internal_realloc;
+ eid.free = ethr_internal_free;
+
+ eid.thread_create_child_func = thr_create_prepare_child;
+ /* Before creation in parent */
+ eid.thread_create_prepare_func = thr_create_prepare;
+ /* After creation in parent */
+ eid.thread_create_parent_func = thr_create_cleanup,
+
+#ifdef ERTS_THR_HAVE_SIG_FUNCS
+ sigemptyset(&thr_create_sigmask);
+ sigaddset(&thr_create_sigmask, SIGINT); /* block interrupt */
+ sigaddset(&thr_create_sigmask, SIGCHLD); /* block child signals */
+ sigaddset(&thr_create_sigmask, SIGUSR1); /* block user defined signal */
+#endif
+
+ erts_thr_init(&eid);
+
+ report_exit_list = NULL;
+
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init();
+#endif
+
+#if CHLDWTHR || defined(ERTS_SMP)
+ erts_mtx_init(&chld_stat_mtx, "child_status");
+#endif
+#if CHLDWTHR
+#ifndef ERTS_SMP
+ report_exit_transit_list = NULL;
+#endif
+ erts_cnd_init(&chld_stat_cnd);
+ children_alive = 0;
+#endif
+ }
+#ifdef ERTS_SMP
+ erts_smp_atomic_init(&erts_break_requested, 0);
+ erts_smp_atomic_init(&erts_got_sigusr1, 0);
+ erts_smp_atomic_init(&have_prepared_crash_dump, 0);
+#else
+ erts_break_requested = 0;
+ erts_got_sigusr1 = 0;
+ have_prepared_crash_dump = 0;
+#endif
+#if !CHLDWTHR && !defined(ERTS_SMP)
+ children_died = 0;
+#endif
+#endif /* USE_THREADS */
+ erts_smp_atomic_init(&sys_misc_mem_sz, 0);
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
+}
+
+void
+erl_sys_init(void)
+{
+#if !DISABLE_VFORK
+ int res;
+ char bindir[MAXPATHLEN];
+ size_t bindirsz = sizeof(bindir);
+ Uint csp_path_sz;
+
+ res = erts_sys_getenv("BINDIR", bindir, &bindirsz);
+ if (res != 0) {
+ if (res < 0)
+ erl_exit(-1,
+ "Environment variable BINDIR is not set\n");
+ if (res > 0)
+ erl_exit(-1,
+ "Value of environment variable BINDIR is too large\n");
+ }
+ if (bindir[0] != DIR_SEPARATOR_CHAR)
+ erl_exit(-1,
+ "Environment variable BINDIR does not contain an"
+ " absolute path\n");
+ csp_path_sz = (strlen(bindir)
+ + 1 /* DIR_SEPARATOR_CHAR */
+ + sizeof(CHILD_SETUP_PROG_NAME)
+ + 1);
+ child_setup_prog = erts_alloc(ERTS_ALC_T_CS_PROG_PATH, csp_path_sz);
+ erts_smp_atomic_add(&sys_misc_mem_sz, csp_path_sz);
+ sprintf(child_setup_prog,
+ "%s%c%s",
+ bindir,
+ DIR_SEPARATOR_CHAR,
+ CHILD_SETUP_PROG_NAME);
+#endif
+
+#ifdef USE_SETLINEBUF
+ setlinebuf(stdout);
+#else
+ setvbuf(stdout, (char *)NULL, _IOLBF, BUFSIZ);
+#endif
+
+ erts_sys_init_float();
+
+ /* we save this so the break handler can set and reset it properly */
+ /* also so that we can reset on exit (break handler or not) */
+ if (isatty(0)) {
+ tcgetattr(0,&initial_tty_mode);
+ }
+ tzset(); /* Required at least for NetBSD with localtime_r() */
+}
+
+/* signal handling */
+
+#ifdef SIG_SIGSET /* Old SysV */
+RETSIGTYPE (*sys_sigset(sig, func))()
+int sig;
+RETSIGTYPE (*func)();
+{
+ return(sigset(sig, func));
+}
+void sys_sigblock(int sig)
+{
+ sighold(sig);
+}
+void sys_sigrelease(int sig)
+{
+ sigrelse(sig);
+}
+#else /* !SIG_SIGSET */
+#ifdef SIG_SIGNAL /* Old BSD */
+RETSIGTYPE (*sys_sigset(sig, func))(int, int)
+int sig;
+RETSIGTYPE (*func)();
+{
+ return(signal(sig, func));
+}
+sys_sigblock(int sig)
+{
+ sigblock(sig);
+}
+sys_sigrelease(int sig)
+{
+ sigsetmask(sigblock(0) & ~sigmask(sig));
+}
+#else /* !SIG_SIGNAL */ /* The True Way - POSIX!:-) */
+RETSIGTYPE (*sys_sigset(int sig, RETSIGTYPE (*func)(int)))(int)
+{
+ struct sigaction act, oact;
+
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = 0;
+ act.sa_handler = func;
+ sigaction(sig, &act, &oact);
+ return(oact.sa_handler);
+}
+
+#ifdef USE_THREADS
+#undef sigprocmask
+#define sigprocmask erts_thr_sigmask
+#endif
+
+void sys_sigblock(int sig)
+{
+ sigset_t mask;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, sig);
+ sigprocmask(SIG_BLOCK, &mask, (sigset_t *)NULL);
+}
+
+void sys_sigrelease(int sig)
+{
+ sigset_t mask;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, sig);
+ sigprocmask(SIG_UNBLOCK, &mask, (sigset_t *)NULL);
+}
+#endif /* !SIG_SIGNAL */
+#endif /* !SIG_SIGSET */
+
+#if (0) /* not used? -- gordon */
+static void (*break_func)();
+static RETSIGTYPE break_handler(int sig)
+{
+#ifdef QNX
+ /* Turn off SIGCHLD during break processing */
+ sys_sigblock(SIGCHLD);
+#endif
+ (*break_func)();
+#ifdef QNX
+ sys_sigrelease(SIGCHLD);
+#endif
+}
+#endif /* 0 */
+
+static ERTS_INLINE void
+prepare_crash_dump(void)
+{
+ int i, max;
+ char env[21]; /* enough to hold any 64-bit integer */
+ size_t envsz;
+
+ if (ERTS_PREPARED_CRASH_DUMP)
+ return; /* We have already been called */
+
+ /* Make sure we unregister at epmd (unknown fd) and get at least
+ one free filedescriptor (for erl_crash.dump) */
+ max = max_files;
+ if (max < 1024)
+ max = 1024;
+ for (i = 3; i < max; i++) {
+#if defined(ERTS_SMP)
+ /* We don't want to close the signal notification pipe... */
+ if (i == sig_notify_fds[0] || i == sig_notify_fds[1])
+ continue;
+#elif defined(USE_THREADS)
+ /* We don't want to close the async notification pipe... */
+ if (i == async_fd[0] || i == async_fd[1])
+ continue;
+#endif
+ close(i);
+ }
+
+ envsz = sizeof(env);
+ i = erts_sys_getenv("ERL_CRASH_DUMP_NICE", env, &envsz);
+ if (i >= 0) {
+ int nice_val;
+ nice_val = i != 0 ? 0 : atoi(env);
+ if (nice_val > 39) {
+ nice_val = 39;
+ }
+ nice(nice_val);
+ }
+
+ envsz = sizeof(env);
+ i = erts_sys_getenv("ERL_CRASH_DUMP_SECONDS", env, &envsz);
+ if (i >= 0) {
+ unsigned sec;
+ sec = (unsigned) i != 0 ? 0 : atoi(env);
+ alarm(sec);
+ }
+
+}
+
+void
+erts_sys_prepare_crash_dump(void)
+{
+ prepare_crash_dump();
+}
+
+static ERTS_INLINE void
+break_requested(void)
+{
+ /*
+ * just set a flag - checked for and handled by
+ * scheduler threads erts_check_io() (not signal handler).
+ */
+#ifdef DEBUG
+ fprintf(stderr,"break!\n");
+#endif
+ if (ERTS_BREAK_REQUESTED)
+ erl_exit(ERTS_INTR_EXIT, "");
+
+ ERTS_SET_BREAK_REQUESTED;
+ ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
+}
+
+/* set up signal handlers for break and quit */
+#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
+static RETSIGTYPE request_break(void)
+#else
+static RETSIGTYPE request_break(int signum)
+#endif
+{
+#ifdef ERTS_SMP
+ smp_sig_notify('I');
+#else
+ break_requested();
+#endif
+}
+
+static ERTS_INLINE void
+sigusr1_exit(void)
+{
+ /* We do this at interrupt level, since the main reason for
+ wanting to generate a crash dump in this way is that the emulator
+ is hung somewhere, so it won't be able to poll any flag we set here.
+ */
+ ERTS_SET_GOT_SIGUSR1;
+ prepare_crash_dump();
+ erl_exit(1, "Received SIGUSR1\n");
+}
+
+#ifdef ETHR_UNUSABLE_SIGUSRX
+#warning "Unusable SIGUSR1 & SIGUSR2. Disabling use of these signals"
+#endif
+
+#ifndef ETHR_UNUSABLE_SIGUSRX
+
+#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
+static RETSIGTYPE user_signal1(void)
+#else
+static RETSIGTYPE user_signal1(int signum)
+#endif
+{
+#ifdef ERTS_SMP
+ smp_sig_notify('1');
+#else
+ sigusr1_exit();
+#endif
+}
+
+#ifdef QUANTIFY
+#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
+static RETSIGTYPE user_signal2(void)
+#else
+static RETSIGTYPE user_signal2(int signum)
+#endif
+{
+#ifdef ERTS_SMP
+ smp_sig_notify('2');
+#else
+ quantify_save_data();
+#endif
+}
+#endif
+
+#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
+
+static void
+quit_requested(void)
+{
+ erl_exit(ERTS_INTR_EXIT, "");
+}
+
+#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
+static RETSIGTYPE do_quit(void)
+#else
+static RETSIGTYPE do_quit(int signum)
+#endif
+{
+#ifdef ERTS_SMP
+ smp_sig_notify('Q');
+#else
+ quit_requested();
+#endif
+}
+
+/* Disable break */
+void erts_set_ignore_break(void) {
+ sys_sigset(SIGINT, SIG_IGN);
+ sys_sigset(SIGQUIT, SIG_IGN);
+ sys_sigset(SIGTSTP, SIG_IGN);
+}
+
+/* Don't use ctrl-c for break handler but let it be
+ used by the shell instead (see user_drv.erl) */
+void erts_replace_intr(void) {
+ struct termios mode;
+
+ if (isatty(0)) {
+ tcgetattr(0, &mode);
+
+ /* here's an example of how to replace ctrl-c with ctrl-u */
+ /* mode.c_cc[VKILL] = 0;
+ mode.c_cc[VINTR] = CKILL; */
+
+ mode.c_cc[VINTR] = 0; /* disable ctrl-c */
+ tcsetattr(0, TCSANOW, &mode);
+ replace_intr = 1;
+ }
+}
+
+void init_break_handler(void)
+{
+ sys_sigset(SIGINT, request_break);
+#ifndef ETHR_UNUSABLE_SIGUSRX
+ sys_sigset(SIGUSR1, user_signal1);
+#ifdef QUANTIFY
+ sys_sigset(SIGUSR2, user_signal2);
+#endif
+#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
+ sys_sigset(SIGQUIT, do_quit);
+}
+
+int sys_max_files(void)
+{
+ return(max_files);
+}
+
+static void block_signals(void)
+{
+#if !CHLDWTHR
+ sys_sigblock(SIGCHLD);
+#endif
+#ifndef ERTS_SMP
+ sys_sigblock(SIGINT);
+#ifndef ETHR_UNUSABLE_SIGUSRX
+ sys_sigblock(SIGUSR1);
+#endif
+#endif
+}
+
+static void unblock_signals(void)
+{
+ /* Update erl_child_setup.c if changed */
+#if !CHLDWTHR
+ sys_sigrelease(SIGCHLD);
+#endif
+#ifndef ERTS_SMP
+ sys_sigrelease(SIGINT);
+#ifndef ETHR_UNUSABLE_SIGUSRX
+ sys_sigrelease(SIGUSR1);
+#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
+#endif
+}
+/************************** Time stuff **************************/
+#ifdef HAVE_GETHRTIME
+#ifdef GETHRTIME_WITH_CLOCK_GETTIME
+
+SysHrTime sys_gethrtime(void)
+{
+ struct timespec ts;
+ long long result;
+ if (clock_gettime(CLOCK_MONOTONIC,&ts) != 0) {
+ erl_exit(1,"Fatal, could not get clock_monotonic value!, "
+ "errno = %d\n", errno);
+ }
+ result = ((long long) ts.tv_sec) * 1000000000LL +
+ ((long long) ts.tv_nsec);
+ return (SysHrTime) result;
+}
+#endif
+#endif
+
+/************************** OS info *******************************/
+
+/* Used by erlang:info/1. */
+/* (This code was formerly in drv.XXX/XXX_os_drv.c) */
+
+char os_type[] = "unix";
+
+static int
+get_number(char **str_ptr)
+{
+ char* s = *str_ptr; /* Pointer to beginning of string. */
+ char* dot; /* Pointer to dot in string or NULL. */
+
+ if (!isdigit((int) *s))
+ return 0;
+ if ((dot = strchr(s, '.')) == NULL) {
+ *str_ptr = s+strlen(s);
+ return atoi(s);
+ } else {
+ *dot = '\0';
+ *str_ptr = dot+1;
+ return atoi(s);
+ }
+}
+
+void
+os_flavor(char* namebuf, /* Where to return the name. */
+ unsigned size) /* Size of name buffer. */
+{
+ static int called = 0;
+ static struct utsname uts; /* Information about the system. */
+
+ if (!called) {
+ char* s;
+
+ (void) uname(&uts);
+ called = 1;
+ for (s = uts.sysname; *s; s++) {
+ if (isupper((int) *s)) {
+ *s = tolower((int) *s);
+ }
+ }
+ }
+ strcpy(namebuf, uts.sysname);
+}
+
+void
+os_version(pMajor, pMinor, pBuild)
+int* pMajor; /* Pointer to major version. */
+int* pMinor; /* Pointer to minor version. */
+int* pBuild; /* Pointer to build number. */
+{
+ struct utsname uts; /* Information about the system. */
+ char* release; /* Pointer to the release string:
+ * X.Y or X.Y.Z.
+ */
+
+ (void) uname(&uts);
+ release = uts.release;
+ *pMajor = get_number(&release);
+ *pMinor = get_number(&release);
+ *pBuild = get_number(&release);
+}
+
+void init_getenv_state(GETENV_STATE *state)
+{
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ *state = NULL;
+}
+
+char *getenv_string(GETENV_STATE *state0)
+{
+ char **state = (char **) *state0;
+ char *cp;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx));
+
+ if (state == NULL)
+ state = environ;
+
+ cp = *state++;
+ *state0 = (GETENV_STATE) state;
+
+ return cp;
+}
+
+void fini_getenv_state(GETENV_STATE *state)
+{
+ *state = NULL;
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+}
+
+
+/************************** Port I/O *******************************/
+
+
+
+/* I. Common stuff */
+
+/*
+ * Decreasing the size of it below 16384 is not allowed.
+ */
+
+/* II. The spawn/fd/vanilla drivers */
+
+#define ERTS_SYS_READ_BUF_SZ (64*1024)
+
+/* This data is shared by these drivers - initialized by spawn_init() */
+static struct driver_data {
+ int port_num, ofd, packet_bytes;
+ ErtsSysReportExit *report_exit;
+ int pid;
+ int alive;
+ int status;
+} *driver_data; /* indexed by fd */
+
+/* Driver interfaces */
+static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
+static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
+static int fd_control(ErlDrvData, unsigned int, char *, int, char **, int);
+static ErlDrvData vanilla_start(ErlDrvPort, char*, SysDriverOpts*);
+static int spawn_init(void);
+static void fd_stop(ErlDrvData);
+static void stop(ErlDrvData);
+static void ready_input(ErlDrvData, ErlDrvEvent);
+static void ready_output(ErlDrvData, ErlDrvEvent);
+static void output(ErlDrvData, char*, int);
+static void outputv(ErlDrvData, ErlIOVec*);
+static void stop_select(ErlDrvEvent, void*);
+
+struct erl_drv_entry spawn_driver_entry = {
+ spawn_init,
+ spawn_start,
+ stop,
+ output,
+ ready_input,
+ ready_output,
+ "spawn",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL, NULL,
+ stop_select
+};
+struct erl_drv_entry fd_driver_entry = {
+ NULL,
+ fd_start,
+ fd_stop,
+ output,
+ ready_input,
+ ready_output,
+ "fd",
+ NULL,
+ NULL,
+ fd_control,
+ NULL,
+ outputv,
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL, /* handle2 */
+ NULL, /* process_exit */
+ stop_select
+};
+struct erl_drv_entry vanilla_driver_entry = {
+ NULL,
+ vanilla_start,
+ stop,
+ output,
+ ready_input,
+ ready_output,
+ "vanilla",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL, /* handle2 */
+ NULL, /* process_exit */
+ stop_select
+};
+
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+static int async_drv_init(void);
+static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
+static void async_drv_stop(ErlDrvData);
+static void async_drv_input(ErlDrvData, ErlDrvEvent);
+
+/* INTERNAL use only */
+
+struct erl_drv_entry async_driver_entry = {
+ async_drv_init,
+ async_drv_start,
+ async_drv_stop,
+ NULL,
+ async_drv_input,
+ NULL,
+ "async",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+#endif
+
+/* Handle SIGCHLD signals. */
+#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
+static RETSIGTYPE onchld(void)
+#else
+static RETSIGTYPE onchld(int signum)
+#endif
+{
+#if CHLDWTHR
+ ASSERT(0); /* We should *never* catch a SIGCHLD signal */
+#elif defined(ERTS_SMP)
+ smp_sig_notify('C');
+#else
+ children_died = 1;
+ ERTS_CHK_IO_INTR(1); /* Make sure we don't sleep in poll */
+#endif
+}
+
+static int set_driver_data(int port_num,
+ int ifd,
+ int ofd,
+ int packet_bytes,
+ int read_write,
+ int exit_status,
+ int pid)
+{
+ ErtsSysReportExit *report_exit;
+
+ if (!exit_status)
+ report_exit = NULL;
+ else {
+ report_exit = erts_alloc(ERTS_ALC_T_PRT_REP_EXIT,
+ sizeof(ErtsSysReportExit));
+ report_exit->next = report_exit_list;
+ report_exit->port = erts_port[port_num].id;
+ report_exit->pid = pid;
+ report_exit->ifd = read_write & DO_READ ? ifd : -1;
+ report_exit->ofd = read_write & DO_WRITE ? ofd : -1;
+#if CHLDWTHR && !defined(ERTS_SMP)
+ report_exit->status = 0;
+#endif
+ report_exit_list = report_exit;
+ }
+
+ if (read_write & DO_READ) {
+ driver_data[ifd].packet_bytes = packet_bytes;
+ driver_data[ifd].port_num = port_num;
+ driver_data[ifd].report_exit = report_exit;
+ driver_data[ifd].pid = pid;
+ driver_data[ifd].alive = 1;
+ driver_data[ifd].status = 0;
+ if (read_write & DO_WRITE) {
+ driver_data[ifd].ofd = ofd;
+ if (ifd != ofd)
+ driver_data[ofd] = driver_data[ifd]; /* structure copy */
+ } else { /* DO_READ only */
+ driver_data[ifd].ofd = -1;
+ }
+ (void) driver_select(port_num, ifd, (ERL_DRV_READ|ERL_DRV_USE), 1);
+ return(ifd);
+ } else { /* DO_WRITE only */
+ driver_data[ofd].packet_bytes = packet_bytes;
+ driver_data[ofd].port_num = port_num;
+ driver_data[ofd].report_exit = report_exit;
+ driver_data[ofd].ofd = ofd;
+ driver_data[ofd].pid = pid;
+ driver_data[ofd].alive = 1;
+ driver_data[ofd].status = 0;
+ return(ofd);
+ }
+}
+
+static int spawn_init()
+{
+ int i;
+#if CHLDWTHR
+ erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
+ thr_opts.detached = 0;
+ thr_opts.suggested_stack_size = 0; /* Smallest possible */
+#endif
+
+ sys_sigset(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
+ driver_data = (struct driver_data *)
+ erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
+ erts_smp_atomic_add(&sys_misc_mem_sz,
+ max_files * sizeof(struct driver_data));
+
+ for (i = 0; i < max_files; i++)
+ driver_data[i].pid = -1;
+
+#if CHLDWTHR
+ sys_sigblock(SIGCHLD);
+#endif
+
+ sys_sigset(SIGCHLD, onchld); /* Reap children */
+
+#if CHLDWTHR
+ erts_thr_create(&child_waiter_tid, child_waiter, NULL, &thr_opts);
+#endif
+
+ return 1;
+}
+
+static void close_pipes(int ifd[2], int ofd[2], int read_write)
+{
+ if (read_write & DO_READ) {
+ (void) close(ifd[0]);
+ (void) close(ifd[1]);
+ }
+ if (read_write & DO_WRITE) {
+ (void) close(ofd[0]);
+ (void) close(ofd[1]);
+ }
+}
+
+static void init_fd_data(int fd, int prt)
+{
+ fd_data[fd].buf = NULL;
+ fd_data[fd].cpos = NULL;
+ fd_data[fd].remain = 0;
+ fd_data[fd].sz = 0;
+ fd_data[fd].psz = 0;
+}
+
+static char **build_unix_environment(char *block)
+{
+ int i;
+ int j;
+ int len;
+ char *cp;
+ char **cpp;
+ char** old_env;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx));
+
+ cp = block;
+ len = 0;
+ while (*cp != '\0') {
+ cp += strlen(cp) + 1;
+ len++;
+ }
+ old_env = environ;
+ while (*old_env++ != NULL) {
+ len++;
+ }
+
+ cpp = (char **) erts_alloc_fnf(ERTS_ALC_T_ENVIRONMENT,
+ sizeof(char *) * (len+1));
+ if (cpp == NULL) {
+ return NULL;
+ }
+
+ cp = block;
+ len = 0;
+ while (*cp != '\0') {
+ cpp[len] = cp;
+ cp += strlen(cp) + 1;
+ len++;
+ }
+
+ i = len;
+ for (old_env = environ; *old_env; old_env++) {
+ char* old = *old_env;
+
+ for (j = 0; j < len; j++) {
+ char *s, *t;
+
+ s = cpp[j];
+ t = old;
+ while (*s == *t && *s != '=') {
+ s++, t++;
+ }
+ if (*s == '=' && *t == '=') {
+ break;
+ }
+ }
+
+ if (j == len) { /* New version not found */
+ cpp[len++] = old;
+ }
+ }
+
+ for (j = 0; j < i; j++) {
+ if (cpp[j][strlen(cpp[j])-1] == '=') {
+ cpp[j] = cpp[--len];
+ }
+ }
+
+ cpp[len] = NULL;
+ return cpp;
+}
+
+/*
+ [arndt] In most Unix systems, including Solaris 2.5, 'fork' allocates memory
+ in swap space for the child of a 'fork', whereas 'vfork' does not do this.
+ The natural call to use here is therefore 'vfork'. Due to a bug in
+ 'vfork' in Solaris 2.5 (apparently fixed in 2.6), using 'vfork'
+ can be dangerous in what seems to be these circumstances:
+ If the child code under a vfork sets the signal action to SIG_DFL
+ (or SIG_IGN)
+ for any signal which was previously set to a signal handler, the
+ state of the parent is clobbered, so that the later arrival of
+ such a signal yields a sigsegv in the parent. If the signal was
+ not set to a signal handler, but ignored, all seems to work.
+ If you change the forking code below, beware of this.
+ */
+
+static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
+{
+#define CMD_LINE_PREFIX_STR "exec "
+#define CMD_LINE_PREFIX_STR_SZ (sizeof(CMD_LINE_PREFIX_STR) - 1)
+
+ int ifd[2], ofd[2], len, pid, i;
+ char **volatile new_environ; /* volatile since a vfork() then cannot
+ cause 'new_environ' to be clobbered
+ in the parent process. */
+ int saved_errno;
+ long res;
+ char *cmd_line;
+#ifndef QNX
+ int unbind;
+#endif
+#if !DISABLE_VFORK
+ int no_vfork;
+ size_t no_vfork_sz = sizeof(no_vfork);
+
+ no_vfork = (erts_sys_getenv("ERL_NO_VFORK",
+ (char *) &no_vfork,
+ &no_vfork_sz) >= 0);
+#endif
+
+ switch (opts->read_write) {
+ case DO_READ:
+ if (pipe(ifd) < 0)
+ return ERL_DRV_ERROR_ERRNO;
+ if (ifd[0] >= max_files) {
+ close_pipes(ifd, ofd, opts->read_write);
+ errno = EMFILE;
+ return ERL_DRV_ERROR_ERRNO;
+ }
+ ofd[1] = -1; /* keep purify happy */
+ break;
+ case DO_WRITE:
+ if (pipe(ofd) < 0) return ERL_DRV_ERROR_ERRNO;
+ if (ofd[1] >= max_files) {
+ close_pipes(ifd, ofd, opts->read_write);
+ errno = EMFILE;
+ return ERL_DRV_ERROR_ERRNO;
+ }
+ ifd[0] = -1; /* keep purify happy */
+ break;
+ case DO_READ|DO_WRITE:
+ if (pipe(ifd) < 0) return ERL_DRV_ERROR_ERRNO;
+ errno = EMFILE; /* default for next two conditions */
+ if (ifd[0] >= max_files || pipe(ofd) < 0) {
+ close_pipes(ifd, ofd, DO_READ);
+ return ERL_DRV_ERROR_ERRNO;
+ }
+ if (ofd[1] >= max_files) {
+ close_pipes(ifd, ofd, opts->read_write);
+ errno = EMFILE;
+ return ERL_DRV_ERROR_ERRNO;
+ }
+ break;
+ default:
+ ASSERT(0);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ if (opts->spawn_type == ERTS_SPAWN_EXECUTABLE) {
+ /* started with spawn_executable, not with spawn */
+ len = strlen(name);
+ cmd_line = (char *) erts_alloc_fnf(ERTS_ALC_T_TMP, len + 1);
+ if (!cmd_line) {
+ close_pipes(ifd, ofd, opts->read_write);
+ errno = ENOMEM;
+ return ERL_DRV_ERROR_ERRNO;
+ }
+ memcpy((void *) cmd_line,(void *) name, len);
+ cmd_line[len] = '\0';
+ if (access(cmd_line,X_OK) != 0) {
+ int save_errno = errno;
+ erts_free(ERTS_ALC_T_TMP, cmd_line);
+ errno = save_errno;
+ return ERL_DRV_ERROR_ERRNO;
+ }
+ } else {
+ /* make the string suitable for giving to "sh" */
+ len = strlen(name);
+ cmd_line = (char *) erts_alloc_fnf(ERTS_ALC_T_TMP,
+ CMD_LINE_PREFIX_STR_SZ + len + 1);
+ if (!cmd_line) {
+ close_pipes(ifd, ofd, opts->read_write);
+ errno = ENOMEM;
+ return ERL_DRV_ERROR_ERRNO;
+ }
+ memcpy((void *) cmd_line,
+ (void *) CMD_LINE_PREFIX_STR,
+ CMD_LINE_PREFIX_STR_SZ);
+ memcpy((void *) (cmd_line + CMD_LINE_PREFIX_STR_SZ), (void *) name, len);
+ cmd_line[CMD_LINE_PREFIX_STR_SZ + len] = '\0';
+ }
+
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+
+ if (opts->envir == NULL) {
+ new_environ = environ;
+ } else if ((new_environ = build_unix_environment(opts->envir)) == NULL) {
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
+ errno = ENOMEM;
+ return ERL_DRV_ERROR_ERRNO;
+ }
+
+#ifndef QNX
+ /* Block child from SIGINT and SIGUSR1. Must be before fork()
+ to be safe. */
+ block_signals();
+
+ CHLD_STAT_LOCK;
+
+ unbind = erts_is_scheduler_bound(NULL);
+ if (unbind)
+ erts_smp_rwmtx_rlock(&erts_cpu_bind_rwmtx);
+
+#if !DISABLE_VFORK
+ /* See fork/vfork discussion before this function. */
+ if (no_vfork) {
+#endif
+
+ DEBUGF(("Using fork\n"));
+ pid = fork();
+
+ if (pid == 0) {
+ /* The child! Setup child... */
+
+ if (unbind && erts_unbind_from_cpu(erts_cpuinfo) != 0)
+ goto child_error;
+
+ /* OBSERVE!
+ * Keep child setup after vfork() (implemented below and in
+ * erl_child_setup.c) up to date if changes are made here.
+ */
+
+ if (opts->use_stdio) {
+ if (opts->read_write & DO_READ) {
+ /* stdout for process */
+ if (dup2(ifd[1], 1) < 0)
+ goto child_error;
+ if(opts->redir_stderr)
+ /* stderr for process */
+ if (dup2(ifd[1], 2) < 0)
+ goto child_error;
+ }
+ if (opts->read_write & DO_WRITE)
+ /* stdin for process */
+ if (dup2(ofd[0], 0) < 0)
+ goto child_error;
+ }
+ else { /* XXX will fail if ofd[0] == 4 (unlikely..) */
+ if (opts->read_write & DO_READ)
+ if (dup2(ifd[1], 4) < 0)
+ goto child_error;
+ if (opts->read_write & DO_WRITE)
+ if (dup2(ofd[0], 3) < 0)
+ goto child_error;
+ }
+
+ for (i = opts->use_stdio ? 3 : 5; i < max_files; i++)
+ (void) close(i);
+
+ if (opts->wd && chdir(opts->wd) < 0)
+ goto child_error;
+
+#if defined(USE_SETPGRP_NOARGS) /* SysV */
+ (void) setpgrp();
+#elif defined(USE_SETPGRP) /* BSD */
+ (void) setpgrp(0, getpid());
+#else /* POSIX */
+ (void) setsid();
+#endif
+
+ unblock_signals();
+
+ if (opts->spawn_type == ERTS_SPAWN_EXECUTABLE) {
+ if (opts->argv == NULL) {
+ execle(cmd_line,cmd_line,(char *) NULL, new_environ);
+ } else {
+ if (opts->argv[0] == erts_default_arg0) {
+ opts->argv[0] = cmd_line;
+ }
+ execve(cmd_line, opts->argv, new_environ);
+ if (opts->argv[0] == cmd_line) {
+ opts->argv[0] = erts_default_arg0;
+ }
+ }
+ } else {
+ execle("/bin/sh", "sh", "-c", cmd_line, (char *) NULL, new_environ);
+ }
+ child_error:
+ _exit(1);
+ }
+#if !DISABLE_VFORK
+ }
+ else { /* Use vfork() */
+ char **cs_argv= erts_alloc(ERTS_ALC_T_TMP,(CS_ARGV_NO_OF_ARGS + 1)*
+ sizeof(char *));
+ char fd_close_range[44]; /* 44 bytes are enough to */
+ char dup2_op[CS_ARGV_NO_OF_DUP2_OPS][44]; /* hold any "%d:%d" string */
+ /* on a 64-bit machine. */
+
+ /* Setup argv[] for the child setup program (implemented in
+ erl_child_setup.c) */
+ i = 0;
+ if (opts->use_stdio) {
+ if (opts->read_write & DO_READ){
+ /* stdout for process */
+ sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 1);
+ if(opts->redir_stderr)
+ /* stderr for process */
+ sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 2);
+ }
+ if (opts->read_write & DO_WRITE)
+ /* stdin for process */
+ sprintf(&dup2_op[i++][0], "%d:%d", ofd[0], 0);
+ } else { /* XXX will fail if ofd[0] == 4 (unlikely..) */
+ if (opts->read_write & DO_READ)
+ sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 4);
+ if (opts->read_write & DO_WRITE)
+ sprintf(&dup2_op[i++][0], "%d:%d", ofd[0], 3);
+ }
+ for (; i < CS_ARGV_NO_OF_DUP2_OPS; i++)
+ strcpy(&dup2_op[i][0], "-");
+ sprintf(fd_close_range, "%d:%d", opts->use_stdio ? 3 : 5, max_files-1);
+
+ cs_argv[CS_ARGV_PROGNAME_IX] = child_setup_prog;
+ cs_argv[CS_ARGV_WD_IX] = opts->wd ? opts->wd : ".";
+ cs_argv[CS_ARGV_UNBIND_IX]
+ = (unbind ? erts_get_unbind_from_cpu_str(erts_cpuinfo) : "false");
+ cs_argv[CS_ARGV_FD_CR_IX] = fd_close_range;
+ for (i = 0; i < CS_ARGV_NO_OF_DUP2_OPS; i++)
+ cs_argv[CS_ARGV_DUP2_OP_IX(i)] = &dup2_op[i][0];
+
+ if (opts->spawn_type == ERTS_SPAWN_EXECUTABLE) {
+ int num = 0;
+ int j = 0;
+ if (opts->argv != NULL) {
+ for(; opts->argv[num] != NULL; ++num)
+ ;
+ }
+ cs_argv = erts_realloc(ERTS_ALC_T_TMP,cs_argv, (CS_ARGV_NO_OF_ARGS + 1 + num + 1) * sizeof(char *));
+ cs_argv[CS_ARGV_CMD_IX] = "-";
+ cs_argv[CS_ARGV_NO_OF_ARGS] = cmd_line;
+ if (opts->argv != NULL) {
+ for (;opts->argv[j] != NULL; ++j) {
+ if (opts->argv[j] == erts_default_arg0) {
+ cs_argv[CS_ARGV_NO_OF_ARGS + 1 + j] = cmd_line;
+ } else {
+ cs_argv[CS_ARGV_NO_OF_ARGS + 1 + j] = opts->argv[j];
+ }
+ }
+ }
+ cs_argv[CS_ARGV_NO_OF_ARGS + 1 + j] = NULL;
+ } else {
+ cs_argv[CS_ARGV_CMD_IX] = cmd_line; /* Command */
+ cs_argv[CS_ARGV_NO_OF_ARGS] = NULL;
+ }
+ DEBUGF(("Using vfork\n"));
+ pid = vfork();
+
+ if (pid == 0) {
+ /* The child! */
+
+ /* Observe!
+ * OTP-4389: The child setup program (implemented in
+ * erl_child_setup.c) will perform the necessary setup of the
+ * child before it execs to the user program. This because
+ * vfork() only allow an *immediate* execve() or _exit() in the
+ * child.
+ */
+ execve(child_setup_prog, cs_argv, new_environ);
+ _exit(1);
+ }
+ erts_free(ERTS_ALC_T_TMP,cs_argv);
+ }
+#endif
+
+ if (unbind)
+ erts_smp_rwmtx_runlock(&erts_cpu_bind_rwmtx);
+
+ if (pid == -1) {
+ saved_errno = errno;
+ CHLD_STAT_UNLOCK;
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
+ unblock_signals();
+ close_pipes(ifd, ofd, opts->read_write);
+ errno = saved_errno;
+ return ERL_DRV_ERROR_ERRNO;
+ }
+#else /* QNX */
+ if (opts->use_stdio) {
+ if (opts->read_write & DO_READ)
+ qnx_spawn_options.iov[1] = ifd[1]; /* stdout for process */
+ if (opts->read_write & DO_WRITE)
+ qnx_spawn_options.iov[0] = ofd[0]; /* stdin for process */
+ }
+ else {
+ if (opts->read_write & DO_READ)
+ qnx_spawn_options.iov[4] = ifd[1];
+ if (opts->read_write & DO_WRITE)
+ qnx_spawn_options.iov[3] = ofd[0];
+ }
+ /* Close fds on exec */
+ for (i = 3; i < max_files; i++)
+ fcntl(i, F_SETFD, 1);
+
+ qnx_spawn_options.flags = _SPAWN_SETSID;
+ if ((pid = spawnl(P_NOWAIT, "/bin/sh", "/bin/sh", "-c", cmd_line,
+ (char *) 0)) < 0) {
+ erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
+ reset_qnx_spawn();
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ close_pipes(ifd, ofd, opts->read_write);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+ reset_qnx_spawn();
+#endif /* QNX */
+
+ erts_free(ERTS_ALC_T_TMP, (void *) cmd_line);
+
+ if (new_environ != environ)
+ erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ);
+
+ if (opts->read_write & DO_READ)
+ (void) close(ifd[1]);
+ if (opts->read_write & DO_WRITE)
+ (void) close(ofd[0]);
+
+ if (opts->read_write & DO_READ) {
+ SET_NONBLOCKING(ifd[0]);
+ init_fd_data(ifd[0], port_num);
+ }
+ if (opts->read_write & DO_WRITE) {
+ SET_NONBLOCKING(ofd[1]);
+ init_fd_data(ofd[1], port_num);
+ }
+
+ res = set_driver_data(port_num, ifd[0], ofd[1], opts->packet_bytes,
+ opts->read_write, opts->exit_status, pid);
+ /* Don't unblock SIGCHLD until now, since the call above must
+ first complete putting away the info about our new subprocess. */
+ unblock_signals();
+
+#if CHLDWTHR
+ ASSERT(children_alive >= 0);
+
+ if (!(children_alive++))
+ CHLD_STAT_SIGNAL; /* Wake up child waiter thread if no children
+ was alive before we fork()ed ... */
+#endif
+ /* Don't unlock chld_stat_mtx until now of the same reason as above */
+ CHLD_STAT_UNLOCK;
+
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+
+ return (ErlDrvData)res;
+#undef CMD_LINE_PREFIX_STR
+#undef CMD_LINE_PREFIX_STR_SZ
+}
+
+#ifdef QNX
+static reset_qnx_spawn()
+{
+ int i;
+
+ /* Reset qnx_spawn_options */
+ qnx_spawn_options.flags = 0;
+ qnx_spawn_options.iov[0] = 0xff;
+ qnx_spawn_options.iov[1] = 0xff;
+ qnx_spawn_options.iov[2] = 0xff;
+ qnx_spawn_options.iov[3] = 0xff;
+}
+#endif
+
+#define FD_DEF_HEIGHT 24
+#define FD_DEF_WIDTH 80
+/* Control op */
+#define FD_CTRL_OP_GET_WINSIZE 100
+
+static int fd_get_window_size(int fd, Uint32 *width, Uint32 *height)
+{
+#ifdef TIOCGWINSZ
+ struct winsize ws;
+ if (ioctl(fd,TIOCGWINSZ,&ws) == 0) {
+ *width = (Uint32) ws.ws_col;
+ *height = (Uint32) ws.ws_row;
+ return 0;
+ }
+#endif
+ return -1;
+}
+
+static int fd_control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, int len,
+ char **rbuf, int rlen)
+{
+ int fd = (int)(long)drv_data;
+ char resbuff[2*sizeof(Uint32)];
+ switch (command) {
+ case FD_CTRL_OP_GET_WINSIZE:
+ {
+ Uint32 w,h;
+ if (fd_get_window_size(fd,&w,&h))
+ return 0;
+ memcpy(resbuff,&w,sizeof(Uint32));
+ memcpy(resbuff+sizeof(Uint32),&h,sizeof(Uint32));
+ }
+ break;
+ default:
+ return 0;
+ }
+ if (rlen < 2*sizeof(Uint32)) {
+ *rbuf = driver_alloc(2*sizeof(Uint32));
+ }
+ memcpy(*rbuf,resbuff,2*sizeof(Uint32));
+ return 2*sizeof(Uint32);
+}
+
+static ErlDrvData fd_start(ErlDrvPort port_num, char* name,
+ SysDriverOpts* opts)
+{
+ ErlDrvData res;
+
+ if (((opts->read_write & DO_READ) && opts->ifd >= max_files) ||
+ ((opts->read_write & DO_WRITE) && opts->ofd >= max_files))
+ return ERL_DRV_ERROR_GENERAL;
+
+ /*
+ * Historical:
+ *
+ * "Note about nonblocking I/O.
+ *
+ * At least on Solaris, setting the write end of a TTY to nonblocking,
+ * will set the input end to nonblocking as well (and vice-versa).
+ * If erl is run in a pipeline like this: cat | erl
+ * the input end of the TTY will be the standard input of cat.
+ * And cat is not prepared to handle nonblocking I/O."
+ *
+ * Actually, the reason for this is not that the tty itself gets set
+ * in non-blocking mode, but that the "input end" (cat's stdin) and
+ * the "output end" (erlang's stdout) are typically the "same" file
+ * descriptor, dup()'ed from a single fd by one of this process'
+ * ancestors.
+ *
+ * The workaround for this problem used to be a rather bad kludge,
+ * interposing an extra process ("internal cat") between erlang's
+ * stdout and the original stdout, allowing erlang to set its stdout
+ * in non-blocking mode without affecting the stdin of the preceding
+ * process in the pipeline - and being a kludge, it caused all kinds
+ * of weird problems.
+ *
+ * So, this is the current logic:
+ *
+ * The only reason to set non-blocking mode on the output fd at all is
+ * if it's something that can cause a write() to block, of course,
+ * i.e. primarily if it points to a tty, socket, pipe, or fifo.
+ *
+ * If we don't set non-blocking mode when we "should" have, and output
+ * becomes blocked, the entire runtime system will be suspended - this
+ * is normally bad of course, and can happen fairly "easily" - e.g. user
+ * hits ^S on tty - but doesn't necessarily happen.
+ *
+ * If we do set non-blocking mode when we "shouldn't" have, the runtime
+ * system will end up seeing EOF on the input fd (due to the preceding
+ * process dying), which typically will cause the entire runtime system
+ * to terminate immediately (due to whatever erlang process is seeing
+ * the EOF taking it as a signal to halt the system). This is *very* bad.
+ *
+ * I.e. we should take a conservative approach, and only set non-
+ * blocking mode when we a) need to, and b) are reasonably certain
+ * that it won't be a problem. And as in the example above, the problem
+ * occurs when input fd and output fd point to different "things".
+ *
+ * However, determining that they are not just the same "type" of
+ * "thing", but actually the same instance of that type of thing, is
+ * unreasonably complex in many/most cases.
+ *
+ * Also, with pipes, sockets, and fifos it's far from obvious that the
+ * user *wants* non-blocking output: If you're running erlang inside
+ * some complex pipeline, you're probably not running a real-time system
+ * that must never stop, but rather *want* it to suspend if the output
+ * channel is "full".
+ *
+ * So, the bottom line: We will only set the output fd non-blocking if
+ * it points to a tty, and either a) the input fd also points to a tty,
+ * or b) we can make sure that setting the output fd non-blocking
+ * doesn't interfere with someone else's input, via a somewhat milder
+ * kludge than the above.
+ *
+ * Also keep in mind that while this code is almost exclusively run as
+ * a result of an erlang open_port({fd,0,1}, ...), that isn't the only
+ * case - it can be called with any old pre-existing file descriptors,
+ * the relations between which (if they're even two) we can only guess
+ * at - still, we try our best...
+ */
+
+ if (opts->read_write & DO_READ) {
+ init_fd_data(opts->ifd, port_num);
+ }
+ if (opts->read_write & DO_WRITE) {
+ init_fd_data(opts->ofd, port_num);
+
+ /* If we don't have a read end, all bets are off - no non-blocking. */
+ if (opts->read_write & DO_READ) {
+
+ if (isatty(opts->ofd)) { /* output fd is a tty:-) */
+
+ if (isatty(opts->ifd)) { /* input fd is also a tty */
+
+ /* To really do this "right", we should also check that
+ input and output fd point to the *same* tty - but
+ this seems like overkill; ttyname() isn't for free,
+ and this is a very common case - and it's hard to
+ imagine a scenario where setting non-blocking mode
+ here would cause problems - go ahead and do it. */
+
+ SET_NONBLOCKING(opts->ofd);
+
+ } else { /* output fd is a tty, input fd isn't */
+
+ /* This is a "problem case", but also common (see the
+ example above) - i.e. it makes sense to try a bit
+ harder before giving up on non-blocking mode: Try to
+ re-open the tty that the output fd points to, and if
+ successful replace the original one with the "new" fd
+ obtained this way, and set *that* one in non-blocking
+ mode. (Yes, this is a kludge.)
+
+ However, re-opening the tty may fail in a couple of
+ (unusual) cases:
+
+ 1) The name of the tty (or an equivalent one, i.e.
+ same major/minor number) can't be found, because
+ it actually lives somewhere other than /dev (or
+ wherever ttyname() looks for it), and isn't
+ equivalent to any of those that do live in the
+ "standard" place - this should be *very* unusual.
+
+ 2) Permissions on the tty don't allow us to open it -
+ it's perfectly possible to have an fd open to an
+ object whose permissions wouldn't allow us to open
+ it. This is not as unusual as it sounds, one case
+ is if the user has su'ed to someone else (not
+ root) - we have a read/write fd open to the tty
+ (because it has been inherited all the way down
+ here), but we have neither read nor write
+ permission for the tty.
+
+ In these cases, we finally give up, and don't set the
+ output fd in non-blocking mode. */
+
+ char *tty;
+ int nfd;
+
+ if ((tty = ttyname(opts->ofd)) != NULL &&
+ (nfd = open(tty, O_WRONLY)) != -1) {
+ dup2(nfd, opts->ofd);
+ close(nfd);
+ SET_NONBLOCKING(opts->ofd);
+ }
+ }
+ }
+ }
+ }
+ CHLD_STAT_LOCK;
+ res = (ErlDrvData)(long)set_driver_data(port_num, opts->ifd, opts->ofd,
+ opts->packet_bytes,
+ opts->read_write, 0, -1);
+ CHLD_STAT_UNLOCK;
+ return res;
+}
+
+static void clear_fd_data(int fd)
+{
+ if (fd_data[fd].sz > 0) {
+ erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fd_data[fd].buf);
+ ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= fd_data[fd].sz);
+ erts_smp_atomic_add(&sys_misc_mem_sz, -1*fd_data[fd].sz);
+ }
+ fd_data[fd].buf = NULL;
+ fd_data[fd].sz = 0;
+ fd_data[fd].remain = 0;
+ fd_data[fd].cpos = NULL;
+ fd_data[fd].psz = 0;
+}
+
+static void nbio_stop_fd(int prt, int fd)
+{
+ driver_select(prt,fd,DO_READ|DO_WRITE,0);
+ clear_fd_data(fd);
+ SET_BLOCKING(fd);
+}
+
+static void fd_stop(ErlDrvData fd) /* Does not close the fds */
+{
+ int ofd;
+
+ nbio_stop_fd(driver_data[(int)(long)fd].port_num, (int)(long)fd);
+ ofd = driver_data[(int)(long)fd].ofd;
+ if (ofd != (int)(long)fd && ofd != -1)
+ nbio_stop_fd(driver_data[(int)(long)fd].port_num, (int)(long)ofd);
+}
+
+static ErlDrvData vanilla_start(ErlDrvPort port_num, char* name,
+ SysDriverOpts* opts)
+{
+ int flags, fd;
+ ErlDrvData res;
+
+ flags = (opts->read_write == DO_READ ? O_RDONLY :
+ opts->read_write == DO_WRITE ? O_WRONLY|O_CREAT|O_TRUNC :
+ O_RDWR|O_CREAT);
+ if ((fd = open(name, flags, 0666)) < 0)
+ return ERL_DRV_ERROR_GENERAL;
+ if (fd >= max_files) {
+ close(fd);
+ return ERL_DRV_ERROR_GENERAL;
+ }
+ SET_NONBLOCKING(fd);
+ init_fd_data(fd, port_num);
+
+ CHLD_STAT_LOCK;
+ res = (ErlDrvData)(long)set_driver_data(port_num, fd, fd,
+ opts->packet_bytes,
+ opts->read_write, 0, -1);
+ CHLD_STAT_UNLOCK;
+ return res;
+}
+
+/* Note that driver_data[fd].ifd == fd if the port was opened for reading, */
+/* otherwise (i.e. write only) driver_data[fd].ofd = fd. */
+
+static void stop(ErlDrvData fd)
+{
+ int prt, ofd;
+
+ prt = driver_data[(int)(long)fd].port_num;
+ nbio_stop_fd(prt, (int)(long)fd);
+
+ ofd = driver_data[(int)(long)fd].ofd;
+ if (ofd != (int)(long)fd && (int)(long)ofd != -1)
+ nbio_stop_fd(prt, ofd);
+ else
+ ofd = -1;
+
+ CHLD_STAT_LOCK;
+
+ /* Mark as unused. Maybe resetting the 'port_num' slot is better? */
+ driver_data[(int)(long)fd].pid = -1;
+
+ CHLD_STAT_UNLOCK;
+
+ /* SMP note: Close has to be last thing done (open file descriptors work
+ as locks on driver_data[] entries) */
+ driver_select(prt, (int)(long)fd, ERL_DRV_USE, 0); /* close(fd); */
+ if (ofd >= 0) {
+ driver_select(prt, (int)(long)ofd, ERL_DRV_USE, 0); /* close(ofd); */
+ }
+}
+
+static void outputv(ErlDrvData e, ErlIOVec* ev)
+{
+ int fd = (int)(long)e;
+ int ix = driver_data[fd].port_num;
+ int pb = driver_data[fd].packet_bytes;
+ int ofd = driver_data[fd].ofd;
+ int n;
+ int sz;
+ char lb[4];
+ char* lbp;
+ int len = ev->size;
+
+ /* (len > ((unsigned long)-1 >> (4-pb)*8)) */
+ if (((pb == 2) && (len > 0xffff)) || (pb == 1 && len > 0xff)) {
+ driver_failure_posix(ix, EINVAL);
+ return; /* -1; */
+ }
+ put_int32(len, lb);
+ lbp = lb + (4-pb);
+
+ ev->iov[0].iov_base = lbp;
+ ev->iov[0].iov_len = pb;
+ ev->size += pb;
+ if ((sz = driver_sizeq(ix)) > 0) {
+ driver_enqv(ix, ev, 0);
+ if (sz + ev->size >= (1 << 13))
+ set_busy_port(ix, 1);
+ }
+ else {
+ int vsize = ev->vsize > MAX_VSIZE ? MAX_VSIZE : ev->vsize;
+
+ n = writev(ofd, (const void *) (ev->iov), vsize);
+ if (n == ev->size)
+ return; /* 0;*/
+ if (n < 0) {
+ if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
+ driver_failure_posix(ix, errno);
+ return; /* -1;*/
+ }
+ n = 0;
+ }
+ driver_enqv(ix, ev, n); /* n is the skip value */
+ driver_select(ix, ofd, ERL_DRV_WRITE|ERL_DRV_USE, 1);
+ }
+ /* return 0;*/
+}
+
+
+static void output(ErlDrvData e, char* buf, int len)
+{
+ int fd = (int)(long)e;
+ int ix = driver_data[fd].port_num;
+ int pb = driver_data[fd].packet_bytes;
+ int ofd = driver_data[fd].ofd;
+ int n;
+ int sz;
+ char lb[4];
+ char* lbp;
+ struct iovec iv[2];
+
+ /* (len > ((unsigned long)-1 >> (4-pb)*8)) */
+ if (((pb == 2) && (len > 0xffff)) || (pb == 1 && len > 0xff)) {
+ driver_failure_posix(ix, EINVAL);
+ return; /* -1; */
+ }
+ put_int32(len, lb);
+ lbp = lb + (4-pb);
+
+ if ((sz = driver_sizeq(ix)) > 0) {
+ driver_enq(ix, lbp, pb);
+ driver_enq(ix, buf, len);
+ if (sz + len + pb >= (1 << 13))
+ set_busy_port(ix, 1);
+ }
+ else {
+ iv[0].iov_base = lbp;
+ iv[0].iov_len = pb; /* should work for pb=0 */
+ iv[1].iov_base = buf;
+ iv[1].iov_len = len;
+ n = writev(ofd, iv, 2);
+ if (n == pb+len)
+ return; /* 0; */
+ if (n < 0) {
+ if ((errno != EINTR) && (errno != ERRNO_BLOCK)) {
+ driver_failure_posix(ix, errno);
+ return; /* -1; */
+ }
+ n = 0;
+ }
+ if (n < pb) {
+ driver_enq(ix, lbp+n, pb-n);
+ driver_enq(ix, buf, len);
+ }
+ else {
+ n -= pb;
+ driver_enq(ix, buf+n, len-n);
+ }
+ driver_select(ix, ofd, ERL_DRV_WRITE|ERL_DRV_USE, 1);
+ }
+ return; /* 0; */
+}
+
+static int port_inp_failure(int port_num, int ready_fd, int res)
+ /* Result: 0 (eof) or -1 (error) */
+{
+ int err = errno;
+
+ ASSERT(res <= 0);
+ (void) driver_select(port_num, ready_fd, ERL_DRV_READ|ERL_DRV_WRITE, 0);
+ clear_fd_data(ready_fd);
+ if (res == 0) {
+ if (driver_data[ready_fd].report_exit) {
+ CHLD_STAT_LOCK;
+
+ if (driver_data[ready_fd].alive) {
+ /*
+ * We have eof and want to report exit status, but the process
+ * hasn't exited yet. When it does report_exit_status() will
+ * driver_select() this fd which will make sure that we get
+ * back here with driver_data[ready_fd].alive == 0 and
+ * driver_data[ready_fd].status set.
+ */
+ CHLD_STAT_UNLOCK;
+ return 0;
+ }
+ else {
+ int status = driver_data[ready_fd].status;
+ CHLD_STAT_UNLOCK;
+
+ /* We need not be prepared for stopped/continued processes. */
+ if (WIFSIGNALED(status))
+ status = 128 + WTERMSIG(status);
+ else
+ status = WEXITSTATUS(status);
+
+ driver_report_exit(driver_data[ready_fd].port_num, status);
+ }
+ }
+ driver_failure_eof(port_num);
+ } else {
+ driver_failure_posix(port_num, err);
+ }
+ return 0;
+}
+
+/* fd is the drv_data that is returned from the */
+/* initial start routine */
+/* ready_fd is the descriptor that is ready to read */
+
+static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
+{
+ int fd = (int)(long)e;
+ int port_num;
+ int packet_bytes;
+ int res;
+ Uint h;
+
+ port_num = driver_data[fd].port_num;
+ packet_bytes = driver_data[fd].packet_bytes;
+
+ if (packet_bytes == 0) {
+ byte *read_buf = (byte *) erts_alloc(ERTS_ALC_T_SYS_READ_BUF,
+ ERTS_SYS_READ_BUF_SZ);
+ res = read(ready_fd, read_buf, ERTS_SYS_READ_BUF_SZ);
+ if (res < 0) {
+ if ((errno != EINTR) && (errno != ERRNO_BLOCK))
+ port_inp_failure(port_num, ready_fd, res);
+ }
+ else if (res == 0)
+ port_inp_failure(port_num, ready_fd, res);
+ else
+ driver_output(port_num, (char*) read_buf, res);
+ erts_free(ERTS_ALC_T_SYS_READ_BUF, (void *) read_buf);
+ }
+ else if (fd_data[ready_fd].remain > 0) { /* We try to read the remainder */
+ /* space is allocated in buf */
+ res = read(ready_fd, fd_data[ready_fd].cpos,
+ fd_data[ready_fd].remain);
+ if (res < 0) {
+ if ((errno != EINTR) && (errno != ERRNO_BLOCK))
+ port_inp_failure(port_num, ready_fd, res);
+ }
+ else if (res == 0) {
+ port_inp_failure(port_num, ready_fd, res);
+ }
+ else if (res == fd_data[ready_fd].remain) { /* we're done */
+ driver_output(port_num, fd_data[ready_fd].buf,
+ fd_data[ready_fd].sz);
+ clear_fd_data(ready_fd);
+ }
+ else { /* if (res < fd_data[ready_fd].remain) */
+ fd_data[ready_fd].cpos += res;
+ fd_data[ready_fd].remain -= res;
+ }
+ }
+ else if (fd_data[ready_fd].remain == 0) { /* clean fd */
+ byte *read_buf = (byte *) erts_alloc(ERTS_ALC_T_SYS_READ_BUF,
+ ERTS_SYS_READ_BUF_SZ);
+ /* We make one read attempt and see what happens */
+ res = read(ready_fd, read_buf, ERTS_SYS_READ_BUF_SZ);
+ if (res < 0) {
+ if ((errno != EINTR) && (errno != ERRNO_BLOCK))
+ port_inp_failure(port_num, ready_fd, res);
+ }
+ else if (res == 0) { /* eof */
+ port_inp_failure(port_num, ready_fd, res);
+ }
+ else if (res < packet_bytes - fd_data[ready_fd].psz) {
+ memcpy(fd_data[ready_fd].pbuf+fd_data[ready_fd].psz,
+ read_buf, res);
+ fd_data[ready_fd].psz += res;
+ }
+ else { /* if (res >= packet_bytes) */
+ unsigned char* cpos = read_buf;
+ int bytes_left = res;
+
+ while (1) {
+ int psz = fd_data[ready_fd].psz;
+ char* pbp = fd_data[ready_fd].pbuf + psz;
+
+ while(bytes_left && (psz < packet_bytes)) {
+ *pbp++ = *cpos++;
+ bytes_left--;
+ psz++;
+ }
+
+ if (psz < packet_bytes) {
+ fd_data[ready_fd].psz = psz;
+ break;
+ }
+ fd_data[ready_fd].psz = 0;
+
+ switch (packet_bytes) {
+ case 1: h = get_int8(fd_data[ready_fd].pbuf); break;
+ case 2: h = get_int16(fd_data[ready_fd].pbuf); break;
+ case 4: h = get_int32(fd_data[ready_fd].pbuf); break;
+ default: ASSERT(0); return; /* -1; */
+ }
+
+ if (h <= (bytes_left)) {
+ driver_output(port_num, (char*) cpos, h);
+ cpos += h;
+ bytes_left -= h;
+ continue;
+ }
+ else { /* The last message we got was split */
+ char *buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
+ if (!buf) {
+ errno = ENOMEM;
+ port_inp_failure(port_num, ready_fd, -1);
+ }
+ else {
+ erts_smp_atomic_add(&sys_misc_mem_sz, h);
+ sys_memcpy(buf, cpos, bytes_left);
+ fd_data[ready_fd].buf = buf;
+ fd_data[ready_fd].sz = h;
+ fd_data[ready_fd].remain = h - bytes_left;
+ fd_data[ready_fd].cpos = buf + bytes_left;
+ }
+ break;
+ }
+ }
+ }
+ erts_free(ERTS_ALC_T_SYS_READ_BUF, (void *) read_buf);
+ }
+}
+
+
+/* fd is the drv_data that is returned from the */
+/* initial start routine */
+/* ready_fd is the descriptor that is ready to read */
+
+static void ready_output(ErlDrvData e, ErlDrvEvent ready_fd)
+{
+ int fd = (int)(long)e;
+ int ix = driver_data[fd].port_num;
+ int n;
+ struct iovec* iv;
+ int vsize;
+
+
+ if ((iv = (struct iovec*) driver_peekq(ix, &vsize)) == NULL) {
+ driver_select(ix, ready_fd, ERL_DRV_WRITE, 0);
+ return; /* 0; */
+ }
+ vsize = vsize > MAX_VSIZE ? MAX_VSIZE : vsize;
+ if ((n = writev(ready_fd, iv, vsize)) > 0) {
+ if (driver_deq(ix, n) == 0)
+ set_busy_port(ix, 0);
+ }
+ else if (n < 0) {
+ if (errno == ERRNO_BLOCK || errno == EINTR)
+ return; /* 0; */
+ else {
+ int res = errno;
+ driver_select(ix, ready_fd, ERL_DRV_WRITE, 0);
+ driver_failure_posix(ix, res);
+ return; /* -1; */
+ }
+ }
+ return; /* 0; */
+}
+
+static void stop_select(ErlDrvEvent fd, void* _)
+{
+ close((int)fd);
+}
+
+/*
+** Async opertation support
+*/
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+static void
+sys_async_ready_failed(int fd, int r, int err)
+{
+ char buf[120];
+ sprintf(buf, "sys_async_ready(): Fatal error: fd=%d, r=%d, errno=%d\n",
+ fd, r, err);
+ (void) write(2, buf, strlen(buf));
+ abort();
+}
+
+/* called from threads !! */
+void sys_async_ready(int fd)
+{
+ int r;
+ while (1) {
+ r = write(fd, "0", 1); /* signal main thread fd MUST be async_fd[1] */
+ if (r == 1) {
+ DEBUGF(("sys_async_ready(): r = 1\r\n"));
+ break;
+ }
+ if (r < 0 && errno == EINTR) {
+ DEBUGF(("sys_async_ready(): r = %d\r\n", r));
+ continue;
+ }
+ sys_async_ready_failed(fd, r, errno);
+ }
+}
+
+static int async_drv_init(void)
+{
+ async_fd[0] = -1;
+ async_fd[1] = -1;
+ return 0;
+}
+
+static ErlDrvData async_drv_start(ErlDrvPort port_num,
+ char* name, SysDriverOpts* opts)
+{
+ if (async_fd[0] != -1)
+ return ERL_DRV_ERROR_GENERAL;
+ if (pipe(async_fd) < 0)
+ return ERL_DRV_ERROR_GENERAL;
+
+ DEBUGF(("async_drv_start: %d\r\n", port_num));
+
+ SET_NONBLOCKING(async_fd[0]);
+ driver_select(port_num, async_fd[0], ERL_DRV_READ, 1);
+
+ if (init_async(async_fd[1]) < 0)
+ return ERL_DRV_ERROR_GENERAL;
+ return (ErlDrvData)port_num;
+}
+
+static void async_drv_stop(ErlDrvData e)
+{
+ int port_num = (int)(long)e;
+
+ DEBUGF(("async_drv_stop: %d\r\n", port_num));
+
+ exit_async();
+
+ driver_select(port_num, async_fd[0], ERL_DRV_READ, 0);
+
+ close(async_fd[0]);
+ close(async_fd[1]);
+ async_fd[0] = async_fd[1] = -1;
+}
+
+
+static void async_drv_input(ErlDrvData e, ErlDrvEvent fd)
+{
+ char *buf[32];
+ DEBUGF(("async_drv_input\r\n"));
+ while (read((int) fd, (void *) buf, 32) > 0); /* fd MUST be async_fd[0] */
+ check_async_ready(); /* invoke all async_ready */
+}
+#endif
+
+void erts_do_break_handling(void)
+{
+ struct termios temp_mode;
+ int saved = 0;
+
+ /*
+ * Most functions that do_break() calls are intentionally not thread safe;
+ * therefore, make sure that all threads but this one are blocked before
+ * proceeding!
+ */
+ erts_smp_block_system(0);
+ /*
+ * NOTE: since we allow gc we are not allowed to lock
+ * (any) process main locks while blocking system...
+ */
+
+ /* during break we revert to initial settings */
+ /* this is done differently for oldshell */
+ if (using_oldshell && !replace_intr) {
+ SET_BLOCKING(1);
+ }
+ else if (isatty(0)) {
+ tcgetattr(0,&temp_mode);
+ tcsetattr(0,TCSANOW,&initial_tty_mode);
+ saved = 1;
+ }
+
+ /* call the break handling function, reset the flag */
+ do_break();
+
+ ERTS_UNSET_BREAK_REQUESTED;
+
+ fflush(stdout);
+
+ /* after break we go back to saved settings */
+ if (using_oldshell && !replace_intr) {
+ SET_NONBLOCKING(1);
+ }
+ else if (saved) {
+ tcsetattr(0,TCSANOW,&temp_mode);
+ }
+
+ erts_smp_release_system();
+}
+
+/* Fills in the systems representation of the jam/beam process identifier.
+** The Pid is put in STRING representation in the supplied buffer,
+** no interpretatione of this should be done by the rest of the
+** emulator. The buffer should be at least 21 bytes long.
+*/
+void sys_get_pid(char *buffer){
+ pid_t p = getpid();
+ /* Assume the pid is scalar and can rest in an unsigned long... */
+ sprintf(buffer,"%lu",(unsigned long) p);
+}
+
+int
+erts_sys_putenv(char *buffer, int sep_ix)
+{
+ int res;
+ char *env;
+#ifdef HAVE_COPYING_PUTENV
+ env = buffer;
+#else
+ Uint sz = strlen(buffer)+1;
+ env = erts_alloc(ERTS_ALC_T_PUTENV_STR, sz);
+ erts_smp_atomic_add(&sys_misc_mem_sz, sz);
+ strcpy(env,buffer);
+#endif
+ erts_smp_rwmtx_rwlock(&environ_rwmtx);
+ res = putenv(env);
+ erts_smp_rwmtx_rwunlock(&environ_rwmtx);
+ return res;
+}
+
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ char *orig_value;
+ int res;
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ orig_value = getenv(key);
+ if (!orig_value)
+ res = -1;
+ else {
+ size_t len = sys_strlen(orig_value);
+ if (len >= *size) {
+ *size = len + 1;
+ res = 1;
+ }
+ else {
+ *size = len;
+ sys_memcpy((void *) value, (void *) orig_value, len+1);
+ res = 0;
+ }
+ }
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ return res;
+}
+
+void
+sys_init_io(void)
+{
+ fd_data = (struct fd_data *)
+ erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data));
+ erts_smp_atomic_add(&sys_misc_mem_sz,
+ max_files * sizeof(struct fd_data));
+
+#ifdef USE_THREADS
+#ifdef ERTS_SMP
+ if (init_async(-1) < 0)
+ erl_exit(1, "Failed to initialize async-threads\n");
+#else
+ {
+ /* This is speical stuff, starting a driver from the
+ * system routines, but is a nice way of handling stuff
+ * the erlang way
+ */
+ SysDriverOpts dopts;
+ int ret;
+
+ sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
+ add_driver_entry(&async_driver_entry);
+ ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
+ DEBUGF(("open_driver = %d\n", ret));
+ if (ret < 0)
+ erl_exit(1, "Failed to open async driver\n");
+ erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
+ }
+#endif
+#endif
+
+}
+
+#if (0) /* unused? */
+static int write_fill(fd, buf, len)
+int fd, len;
+char *buf;
+{
+ int i, done = 0;
+
+ do {
+ if ((i = write(fd, buf+done, len-done)) < 0) {
+ if (errno != EINTR)
+ return (i);
+ i = 0;
+ }
+ done += i;
+ } while (done < len);
+ return (len);
+}
+#endif
+
+extern const char pre_loaded_code[];
+extern Preload pre_loaded[];
+
+void erts_sys_alloc_init(void)
+{
+ elib_ensure_initialized();
+}
+
+void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
+{
+ void *res = malloc((size_t) sz);
+#if HAVE_ERTS_MSEG
+ if (!res) {
+ erts_mseg_clear_cache();
+ return malloc((size_t) sz);
+ }
+#endif
+ return res;
+}
+
+void *erts_sys_realloc(ErtsAlcType_t t, void *x, void *p, Uint sz)
+{
+ void *res = realloc(p, (size_t) sz);
+#if HAVE_ERTS_MSEG
+ if (!res) {
+ erts_mseg_clear_cache();
+ return realloc(p, (size_t) sz);
+ }
+#endif
+ return res;
+}
+
+void erts_sys_free(ErtsAlcType_t t, void *x, void *p)
+{
+ free(p);
+}
+
+/* Return a pointer to a vector of names of preloaded modules */
+
+Preload*
+sys_preloaded(void)
+{
+ return pre_loaded;
+}
+
+/* Return a pointer to preloaded code for module "module" */
+unsigned char*
+sys_preload_begin(Preload* p)
+{
+ return p->code;
+}
+
+/* Clean up if allocated */
+void sys_preload_end(Preload* p)
+{
+ /* Nothing */
+}
+
+/* Read a key from console (?) */
+
+int sys_get_key(fd)
+int fd;
+{
+ int c;
+ unsigned char rbuf[64];
+
+ fflush(stdout); /* Flush query ??? */
+
+ if ((c = read(fd,rbuf,64)) <= 0) {
+ return c;
+ }
+
+ return rbuf[0];
+}
+
+
+#ifdef DEBUG
+
+extern int erts_initialized;
+void
+erl_assert_error(char* expr, char* file, int line)
+{
+ fflush(stdout);
+ fprintf(stderr, "Assertion failed: %s in %s, line %d\n",
+ expr, file, line);
+ fflush(stderr);
+#if !defined(ERTS_SMP) && 0
+ /* Writing a crashdump from a failed assertion when smp support
+ * is enabled almost a guaranteed deadlocking, don't even bother.
+ *
+ * It could maybe be useful (but I'm not convinced) to write the
+ * crashdump if smp support is disabled...
+ */
+ if (erts_initialized)
+ erl_crash_dump(file, line, "Assertion failed: %s\n", expr);
+#endif
+ abort();
+}
+
+void
+erl_debug(char* fmt, ...)
+{
+ char sbuf[1024]; /* Temporary buffer. */
+ va_list va;
+
+ if (debug_log) {
+ va_start(va, fmt);
+ vsprintf(sbuf, fmt, va);
+ va_end(va);
+ fprintf(stderr, "%s", sbuf);
+ }
+}
+
+#endif /* DEBUG */
+
+static ERTS_INLINE void
+report_exit_status(ErtsSysReportExit *rep, int status)
+{
+ Port *pp;
+#ifdef ERTS_SMP
+ CHLD_STAT_UNLOCK;
+#endif
+ pp = erts_id2port_sflgs(rep->port,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+#ifdef ERTS_SMP
+ CHLD_STAT_LOCK;
+#endif
+ if (pp) {
+ if (rep->ifd >= 0) {
+ driver_data[rep->ifd].alive = 0;
+ driver_data[rep->ifd].status = status;
+ (void) driver_select((ErlDrvPort) internal_port_index(pp->id),
+ rep->ifd,
+ (ERL_DRV_READ|ERL_DRV_USE),
+ 1);
+ }
+ if (rep->ofd >= 0) {
+ driver_data[rep->ofd].alive = 0;
+ driver_data[rep->ofd].status = status;
+ (void) driver_select((ErlDrvPort) internal_port_index(pp->id),
+ rep->ofd,
+ (ERL_DRV_WRITE|ERL_DRV_USE),
+ 1);
+ }
+ erts_port_release(pp);
+ }
+ erts_free(ERTS_ALC_T_PRT_REP_EXIT, rep);
+}
+
+#if !CHLDWTHR /* ---------------------------------------------------------- */
+
+#define ERTS_REPORT_EXIT_STATUS report_exit_status
+
+static int check_children(void)
+{
+ int res = 0;
+ int pid;
+ int status;
+
+#ifndef ERTS_SMP
+ if (children_died)
+#endif
+ {
+ sys_sigblock(SIGCHLD);
+ CHLD_STAT_LOCK;
+ while ((pid = waitpid(-1, &status, WNOHANG)) > 0)
+ note_child_death(pid, status);
+#ifndef ERTS_SMP
+ children_died = 0;
+#endif
+ CHLD_STAT_UNLOCK;
+ sys_sigrelease(SIGCHLD);
+ res = 1;
+ }
+ return res;
+}
+
+#ifdef ERTS_SMP
+
+void
+erts_check_children(void)
+{
+ (void) check_children();
+}
+
+#endif
+
+#elif CHLDWTHR && defined(ERTS_SMP) /* ------------------------------------- */
+
+#define ERTS_REPORT_EXIT_STATUS report_exit_status
+
+#define check_children() (0)
+
+
+#else /* CHLDWTHR && !defined(ERTS_SMP) ------------------------------------ */
+
+#define ERTS_REPORT_EXIT_STATUS initiate_report_exit_status
+
+static ERTS_INLINE void
+initiate_report_exit_status(ErtsSysReportExit *rep, int status)
+{
+ rep->next = report_exit_transit_list;
+ rep->status = status;
+ report_exit_transit_list = rep;
+ /*
+ * We need the scheduler thread to call check_children().
+ * If the scheduler thread is sleeping in a poll with a
+ * timeout, we need to wake the scheduler thread. We use the
+ * functionality of the async driver to do this, instead of
+ * implementing yet another driver doing the same thing. A
+ * little bit ugly, but it works...
+ */
+ sys_async_ready(async_fd[1]);
+}
+
+static int check_children(void)
+{
+ int res;
+ ErtsSysReportExit *rep;
+ CHLD_STAT_LOCK;
+ rep = report_exit_transit_list;
+ res = rep != NULL;
+ while (rep) {
+ ErtsSysReportExit *curr_rep = rep;
+ rep = rep->next;
+ report_exit_status(curr_rep, curr_rep->status);
+ }
+ report_exit_transit_list = NULL;
+ CHLD_STAT_UNLOCK;
+ return res;
+}
+
+#endif /* ------------------------------------------------------------------ */
+
+static void note_child_death(int pid, int status)
+{
+ ErtsSysReportExit **repp = &report_exit_list;
+ ErtsSysReportExit *rep = report_exit_list;
+
+ while (rep) {
+ if (pid == rep->pid) {
+ *repp = rep->next;
+ ERTS_REPORT_EXIT_STATUS(rep, status);
+ break;
+ }
+ repp = &rep->next;
+ rep = rep->next;
+ }
+}
+
+#if CHLDWTHR
+
+static void *
+child_waiter(void *unused)
+{
+ int pid;
+ int status;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_set_thread_name("child waiter");
+#endif
+
+ while(1) {
+#ifdef DEBUG
+ int waitpid_errno;
+#endif
+ pid = waitpid(-1, &status, 0);
+#ifdef DEBUG
+ waitpid_errno = errno;
+#endif
+ CHLD_STAT_LOCK;
+ if (pid < 0) {
+ ASSERT(waitpid_errno == ECHILD);
+ }
+ else {
+ children_alive--;
+ ASSERT(children_alive >= 0);
+ note_child_death(pid, status);
+ }
+ while (!children_alive)
+ CHLD_STAT_WAIT; /* Wait for children to wait on... :) */
+ CHLD_STAT_UNLOCK;
+ }
+
+ return NULL;
+}
+
+#endif
+
+/*
+ * Called from schedule() when it runs out of runnable processes,
+ * or when Erlang code has performed INPUT_REDUCTIONS reduction
+ * steps. runnable == 0 iff there are no runnable Erlang processes.
+ */
+void
+erl_sys_schedule(int runnable)
+{
+#ifdef ERTS_SMP
+ ERTS_CHK_IO(!runnable);
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+#else
+ ERTS_CHK_IO_INTR(0);
+ if (runnable) {
+ ERTS_CHK_IO(0); /* Poll for I/O */
+ check_async_ready(); /* Check async completions */
+ } else {
+ int wait_for_io = !check_async_ready();
+ if (wait_for_io)
+ wait_for_io = !check_children();
+ ERTS_CHK_IO(wait_for_io);
+ }
+ (void) check_children();
+#endif
+}
+
+
+#ifdef ERTS_SMP
+
+static erts_smp_tid_t sig_dispatcher_tid;
+
+static void
+smp_sig_notify(char c)
+{
+ int res;
+ do {
+ /* write() is async-signal safe (according to posix) */
+ res = write(sig_notify_fds[1], &c, 1);
+ } while (res < 0 && errno == EINTR);
+ if (res != 1) {
+ char msg[] =
+ "smp_sig_notify(): Failed to notify signal-dispatcher thread "
+ "about received signal";
+ (void) write(2, msg, sizeof(msg));
+ abort();
+ }
+}
+
+static void *
+signal_dispatcher_thread_func(void *unused)
+{
+ int initialized = 0;
+#if !CHLDWTHR
+ int notify_check_children = 0;
+#endif
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_set_thread_name("signal_dispatcher");
+#endif
+ while (1) {
+ char buf[32];
+ int res, i;
+ /* Block on read() waiting for a signal notification to arrive... */
+ res = read(sig_notify_fds[0], (void *) &buf[0], 32);
+ if (res < 0) {
+ if (errno == EINTR)
+ continue;
+ erl_exit(ERTS_ABORT_EXIT,
+ "signal-dispatcher thread got unexpected error: %s (%d)\n",
+ erl_errno_id(errno),
+ errno);
+ }
+ for (i = 0; i < res; i++) {
+ /*
+ * NOTE 1: The signal dispatcher thread should not do work
+ * that takes a substantial amount of time (except
+ * perhaps in test and debug builds). It needs to
+ * be responsive, i.e, it should only dispatch work
+ * to other threads.
+ *
+ * NOTE 2: The signal dispatcher thread is not a blockable
+ * thread (i.e., it hasn't called
+ * erts_register_blockable_thread()). This is
+ * intentional. We want to be able to interrupt
+ * writing of a crash dump by hitting C-c twice.
+ * Since it isn't a blockable thread it is important
+ * that it doesn't change the state of any data that
+ * a blocking thread expects to have exclusive access
+ * to (unless the signal dispatcher itself explicitly
+ * is blocking all blockable threads).
+ */
+ switch (buf[i]) {
+ case 0: /* Emulator initialized */
+ initialized = 1;
+#if !CHLDWTHR
+ if (!notify_check_children)
+#endif
+ break;
+#if !CHLDWTHR
+ case 'C': /* SIGCHLD */
+ if (initialized)
+ erts_smp_notify_check_children_needed();
+ else
+ notify_check_children = 1;
+ break;
+#endif
+ case 'I': /* SIGINT */
+ break_requested();
+ break;
+ case 'Q': /* SIGQUIT */
+ quit_requested();
+ break;
+ case '1': /* SIGUSR1 */
+ sigusr1_exit();
+ break;
+#ifdef QUANTIFY
+ case '2': /* SIGUSR2 */
+ quantify_save_data(); /* Might take a substantial amount of
+ time, but this is a test/debug
+ build */
+ break;
+#endif
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "signal-dispatcher thread received unknown "
+ "signal notification: '%c'\n",
+ buf[i]);
+ }
+ }
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+ }
+ return NULL;
+}
+
+static void
+init_smp_sig_notify(void)
+{
+ erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER;
+ thr_opts.detached = 1;
+
+ if (pipe(sig_notify_fds) < 0) {
+ erl_exit(ERTS_ABORT_EXIT,
+ "Failed to create signal-dispatcher pipe: %s (%d)\n",
+ erl_errno_id(errno),
+ errno);
+ }
+
+ /* Start signal handler thread */
+ erts_smp_thr_create(&sig_dispatcher_tid,
+ signal_dispatcher_thread_func,
+ NULL,
+ &thr_opts);
+}
+
+void
+erts_sys_main_thread(void)
+{
+ erts_thread_disable_fpe();
+ /* Become signal receiver thread... */
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_set_thread_name("signal_receiver");
+#endif
+
+ smp_sig_notify(0); /* Notify initialized */
+ while (1) {
+ /* Wait for a signal to arrive... */
+#ifdef DEBUG
+ int res =
+#else
+ (void)
+#endif
+ select(0, NULL, NULL, NULL, NULL);
+ ASSERT(res < 0);
+ ASSERT(errno == EINTR);
+ }
+}
+
+#endif /* ERTS_SMP */
+
+#ifdef ERTS_ENABLE_KERNEL_POLL /* get_value() is currently only used when
+ kernel-poll is enabled */
+
+/* Get arg marks argument as handled by
+ putting NULL in argv */
+static char *
+get_value(char* rest, char** argv, int* ip)
+{
+ char *param = argv[*ip]+1;
+ argv[*ip] = NULL;
+ if (*rest == '\0') {
+ char *next = argv[*ip + 1];
+ if (next[0] == '-'
+ && next[1] == '-'
+ && next[2] == '\0') {
+ erts_fprintf(stderr, "bad \"%s\" value: \n", param);
+ erts_usage();
+ }
+ (*ip)++;
+ argv[*ip] = NULL;
+ return next;
+ }
+ return rest;
+}
+
+#endif /* ERTS_ENABLE_KERNEL_POLL */
+
+void
+erl_sys_args(int* argc, char** argv)
+{
+ int i, j;
+
+ i = 1;
+
+ ASSERT(argc && argv);
+
+ while (i < *argc) {
+ if(argv[i][0] == '-') {
+ switch (argv[i][1]) {
+#ifdef ERTS_ENABLE_KERNEL_POLL
+ case 'K': {
+ char *arg = get_value(argv[i] + 2, argv, &i);
+ if (strcmp("true", arg) == 0) {
+ erts_use_kernel_poll = 1;
+ }
+ else if (strcmp("false", arg) == 0) {
+ erts_use_kernel_poll = 0;
+ }
+ else {
+ erts_fprintf(stderr, "bad \"K\" value: %s\n", arg);
+ erts_usage();
+ }
+ break;
+ }
+#endif
+ case '-':
+ goto done_parsing;
+ default:
+ break;
+ }
+ }
+ i++;
+ }
+
+ done_parsing:
+
+#ifdef ERTS_ENABLE_KERNEL_POLL
+ if (erts_use_kernel_poll) {
+ char no_kp[10];
+ size_t no_kp_sz = sizeof(no_kp);
+ int res = erts_sys_getenv("ERL_NO_KERNEL_POLL", no_kp, &no_kp_sz);
+ if (res > 0
+ || (res == 0
+ && sys_strcmp("false", no_kp) != 0
+ && sys_strcmp("FALSE", no_kp) != 0)) {
+ erts_use_kernel_poll = 0;
+ }
+ }
+#endif
+
+ init_check_io();
+
+#ifdef ERTS_SMP
+ init_smp_sig_notify();
+#endif
+
+ /* Handled arguments have been marked with NULL. Slide arguments
+ not handled towards the beginning of argv. */
+ for (i = 0, j = 0; i < *argc; i++) {
+ if (argv[i])
+ argv[j++] = argv[i];
+ }
+ *argc = j;
+}
+
+
+#ifdef ERTS_TIMER_THREAD
+
+/*
+ * Interruptible-wait facility: low-level synchronisation state
+ * and methods that are implementation dependent.
+ *
+ * Constraint: Every implementation must define 'struct erts_iwait'
+ * with a field 'erts_smp_atomic_t state;'.
+ */
+
+/* values for struct erts_iwait's state field */
+#define IWAIT_WAITING 0
+#define IWAIT_AWAKE 1
+#define IWAIT_INTERRUPT 2
+
+#if 0 /* XXX: needs feature test in erts/configure.in */
+
+/*
+ * This is an implementation of the interruptible wait facility on
+ * top of Linux-specific futexes.
+ */
+#include <asm/unistd.h>
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+static int sys_futex(void *futex, int op, int val, const struct timespec *timeout)
+{
+ return syscall(__NR_futex, futex, op, val, timeout);
+}
+
+struct erts_iwait {
+ erts_smp_atomic_t state; /* &state.counter is our futex */
+};
+
+static void iwait_lowlevel_init(struct erts_iwait *iwait) { /* empty */ }
+
+static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
+{
+ struct timespec timeout;
+ int res;
+
+ timeout.tv_sec = delay->tv_sec;
+ timeout.tv_nsec = delay->tv_usec * 1000;
+ res = sys_futex((void*)&iwait->state.counter, FUTEX_WAIT, IWAIT_WAITING, &timeout);
+ if (res < 0 && errno != ETIMEDOUT && errno != EWOULDBLOCK && errno != EINTR)
+ perror("FUTEX_WAIT");
+}
+
+static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
+{
+ int res = sys_futex((void*)&iwait->state.counter, FUTEX_WAKE, 1, NULL);
+ if (res < 0)
+ perror("FUTEX_WAKE");
+}
+
+#else /* using poll() or select() */
+
+/*
+ * This is an implementation of the interruptible wait facility on
+ * top of pipe(), poll() or select(), read(), and write().
+ */
+struct erts_iwait {
+ erts_smp_atomic_t state;
+ int read_fd; /* wait polls and reads this fd */
+ int write_fd; /* interrupt writes this fd */
+};
+
+static void iwait_lowlevel_init(struct erts_iwait *iwait)
+{
+ int fds[2];
+
+ if (pipe(fds) < 0) {
+ perror("pipe()");
+ exit(1);
+ }
+ iwait->read_fd = fds[0];
+ iwait->write_fd = fds[1];
+}
+
+#if defined(ERTS_USE_POLL)
+
+#include <sys/poll.h>
+#define PERROR_POLL "poll()"
+
+static int iwait_lowlevel_poll(int read_fd, struct timeval *delay)
+{
+ struct pollfd pollfd;
+ int timeout;
+
+ pollfd.fd = read_fd;
+ pollfd.events = POLLIN;
+ pollfd.revents = 0;
+ timeout = delay->tv_sec * 1000 + delay->tv_usec / 1000;
+ return poll(&pollfd, 1, timeout);
+}
+
+#else /* !ERTS_USE_POLL */
+
+#include <sys/select.h>
+#define PERROR_POLL "select()"
+
+static int iwait_lowlevel_poll(int read_fd, struct timeval *delay)
+{
+ fd_set readfds;
+
+ FD_ZERO(&readfds);
+ FD_SET(read_fd, &readfds);
+ return select(read_fd + 1, &readfds, NULL, NULL, delay);
+}
+
+#endif /* !ERTS_USE_POLL */
+
+static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
+{
+ int res;
+ char buf[64];
+
+ res = iwait_lowlevel_poll(iwait->read_fd, delay);
+ if (res > 0)
+ (void)read(iwait->read_fd, buf, sizeof buf);
+ else if (res < 0 && errno != EINTR)
+ perror(PERROR_POLL);
+}
+
+static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
+{
+ int res = write(iwait->write_fd, "!", 1);
+ if (res < 0)
+ perror("write()");
+}
+
+#endif /* using poll() or select() */
+
+#if 0 /* not using poll() or select() */
+/*
+ * This is an implementation of the interruptible wait facility on
+ * top of pthread_cond_timedwait(). This has two problems:
+ * 1. pthread_cond_timedwait() requires an absolute time point,
+ * so the relative delay must be converted to absolute time.
+ * Worse, this breaks if the machine's time is adjusted while
+ * we're preparing to wait.
+ * 2. Each cond operation requires additional mutex lock/unlock operations.
+ *
+ * Problem 2 is probably not too bad on Linux (they'll just become
+ * relatively cheap futex operations), but problem 1 is the real killer.
+ * Only use this implementation if no better alternatives are available!
+ */
+struct erts_iwait {
+ erts_smp_atomic_t state;
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+};
+
+static void iwait_lowlevel_init(struct erts_iwait *iwait)
+{
+ iwait->cond = (pthread_cond_t) PTHREAD_COND_INITIALIZER;
+ iwait->mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
+}
+
+static void iwait_lowlevel_wait(struct erts_iwait *iwait, struct timeval *delay)
+{
+ struct timeval tmp;
+ struct timespec timeout;
+
+ /* Due to pthread_cond_timedwait()'s use of absolute
+ time, this must be the real gettimeofday(), _not_
+ the "smoothed" one beam/erl_time_sup.c implements. */
+ gettimeofday(&tmp, NULL);
+
+ tmp.tv_sec += delay->tv_sec;
+ tmp.tv_usec += delay->tv_usec;
+ if (tmp.tv_usec >= 1000*1000) {
+ tmp.tv_usec -= 1000*1000;
+ tmp.tv_sec += 1;
+ }
+ timeout.tv_sec = tmp.tv_sec;
+ timeout.tv_nsec = tmp.tv_usec * 1000;
+ pthread_mutex_lock(&iwait->mutex);
+ pthread_cond_timedwait(&iwait->cond, &iwait->mutex, &timeout);
+ pthread_mutex_unlock(&iwait->mutex);
+}
+
+static void iwait_lowlevel_interrupt(struct erts_iwait *iwait)
+{
+ pthread_mutex_lock(&iwait->mutex);
+ pthread_cond_signal(&iwait->cond);
+ pthread_mutex_unlock(&iwait->mutex);
+}
+
+#endif /* not using POLL */
+
+/*
+ * Interruptible-wait facility. This is just a wrapper around the
+ * low-level synchronisation code, where we maintain our logical
+ * state in order to suppress some state transitions.
+ */
+
+struct erts_iwait *erts_iwait_init(void)
+{
+ struct erts_iwait *iwait = malloc(sizeof *iwait);
+ if (!iwait) {
+ perror("malloc");
+ exit(1);
+ }
+ iwait_lowlevel_init(iwait);
+ erts_smp_atomic_init(&iwait->state, IWAIT_AWAKE);
+ return iwait;
+}
+
+void erts_iwait_wait(struct erts_iwait *iwait, struct timeval *delay)
+{
+ if (erts_smp_atomic_xchg(&iwait->state, IWAIT_WAITING) != IWAIT_INTERRUPT)
+ iwait_lowlevel_wait(iwait, delay);
+ erts_smp_atomic_set(&iwait->state, IWAIT_AWAKE);
+}
+
+void erts_iwait_interrupt(struct erts_iwait *iwait)
+{
+ if (erts_smp_atomic_xchg(&iwait->state, IWAIT_INTERRUPT) == IWAIT_WAITING)
+ iwait_lowlevel_interrupt(iwait);
+}
+
+#endif /* ERTS_TIMER_THREAD */
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
new file mode 100644
index 0000000000..15da6ab45c
--- /dev/null
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -0,0 +1,815 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2001-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "global.h"
+#include "erl_process.h"
+
+
+#ifdef NO_FPE_SIGNALS
+
+void
+erts_sys_init_float(void)
+{
+# ifdef SIGFPE
+ sys_sigset(SIGFPE, SIG_IGN); /* Ignore so we can test for NaN and Inf */
+# endif
+}
+
+static ERTS_INLINE void set_current_fp_exception(unsigned long pc)
+{
+ /* nothing to do */
+}
+
+#else /* !NO_FPE_SIGNALS */
+
+#ifdef ERTS_SMP
+static erts_tsd_key_t fpe_key;
+
+/* once-only initialisation early in the main thread (via erts_sys_init_float()) */
+static void erts_init_fp_exception(void)
+{
+ /* XXX: the wrappers prevent using a pthread destructor to
+ deallocate the key's value; so when/where do we do that? */
+ erts_tsd_key_create(&fpe_key);
+}
+
+void erts_thread_init_fp_exception(void)
+{
+ unsigned long *fpe = erts_alloc(ERTS_ALC_T_FP_EXCEPTION, sizeof(*fpe));
+ *fpe = 0L;
+ erts_tsd_set(fpe_key, fpe);
+}
+
+static ERTS_INLINE volatile unsigned long *erts_thread_get_fp_exception(void)
+{
+ return (volatile unsigned long*)erts_tsd_get(fpe_key);
+}
+#else /* !SMP */
+#define erts_init_fp_exception() /*empty*/
+static volatile unsigned long fp_exception;
+#define erts_thread_get_fp_exception() (&fp_exception)
+#endif /* SMP */
+
+volatile unsigned long *erts_get_current_fp_exception(void)
+{
+ Process *c_p;
+
+ c_p = erts_get_current_process();
+ if (c_p)
+ return &c_p->fp_exception;
+ return erts_thread_get_fp_exception();
+}
+
+static void set_current_fp_exception(unsigned long pc)
+{
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+ ASSERT(fpexnp != NULL);
+ *fpexnp = pc;
+}
+
+void erts_fp_check_init_error(volatile unsigned long *fpexnp)
+{
+ char buf[64];
+ snprintf(buf, sizeof buf, "ERTS_FP_CHECK_INIT at %p: detected unhandled FPE at %p\r\n",
+ __builtin_return_address(0), (void*)*fpexnp);
+ write(2, buf, strlen(buf));
+ *fpexnp = 0;
+#if defined(__i386__) || defined(__x86_64__)
+ erts_restore_fpu();
+#endif
+}
+
+/* Is there no standard identifier for Darwin/MacOSX ? */
+#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
+#define __DARWIN__ 1
+#endif
+
+#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
+
+static void unmask_x87(void)
+{
+ unsigned short cw;
+
+ __asm__ __volatile__("fstcw %0" : "=m"(cw));
+ cw &= ~(0x01|0x04|0x08); /* unmask IM, ZM, OM */
+ __asm__ __volatile__("fldcw %0" : : "m"(cw));
+}
+
+/* mask x87 FPE, return true if the previous state was unmasked */
+static int mask_x87(void)
+{
+ unsigned short cw;
+ int unmasked;
+
+ __asm__ __volatile__("fstcw %0" : "=m"(cw));
+ unmasked = (cw & (0x01|0x04|0x08)) == 0;
+ /* or just set cw = 0x37f */
+ cw |= (0x01|0x04|0x08); /* mask IM, ZM, OM */
+ __asm__ __volatile__("fldcw %0" : : "m"(cw));
+ return unmasked;
+}
+
+static void unmask_sse2(void)
+{
+ unsigned int mxcsr;
+
+ __asm__ __volatile__("stmxcsr %0" : "=m"(mxcsr));
+ mxcsr &= ~(0x003F|0x0680); /* clear exn flags, unmask OM, ZM, IM (not PM, UM, DM) */
+ __asm__ __volatile__("ldmxcsr %0" : : "m"(mxcsr));
+}
+
+/* mask SSE2 FPE, return true if the previous state was unmasked */
+static int mask_sse2(void)
+{
+ unsigned int mxcsr;
+ int unmasked;
+
+ __asm__ __volatile__("stmxcsr %0" : "=m"(mxcsr));
+ unmasked = (mxcsr & 0x0680) == 0;
+ /* or just set mxcsr = 0x1f80 */
+ mxcsr &= ~0x003F; /* clear exn flags */
+ mxcsr |= 0x0680; /* mask OM, ZM, IM (not PM, UM, DM) */
+ __asm__ __volatile__("ldmxcsr %0" : : "m"(mxcsr));
+ return unmasked;
+}
+
+#if defined(__x86_64__)
+
+static inline int cpu_has_sse2(void) { return 1; }
+
+#else /* !__x86_64__ */
+
+/*
+ * Check if an x86-32 processor has SSE2.
+ */
+static unsigned int xor_eflags(unsigned int mask)
+{
+ unsigned int eax, edx;
+
+ eax = mask; /* eax = mask */
+ __asm__("pushfl\n\t"
+ "popl %0\n\t" /* edx = original EFLAGS */
+ "xorl %0, %1\n\t" /* eax = mask ^ EFLAGS */
+ "pushl %1\n\t"
+ "popfl\n\t" /* new EFLAGS = mask ^ original EFLAGS */
+ "pushfl\n\t"
+ "popl %1\n\t" /* eax = new EFLAGS */
+ "xorl %0, %1\n\t" /* eax = new EFLAGS ^ old EFLAGS */
+ "pushl %0\n\t"
+ "popfl" /* restore original EFLAGS */
+ : "=d"(edx), "=a"(eax)
+ : "1"(eax));
+ return eax;
+}
+
+static __inline__ unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax, save_ebx;
+
+ /* In PIC mode i386 reserves EBX. So we must save
+ and restore it ourselves to not upset gcc. */
+ __asm__(
+ "movl %%ebx, %1\n\t"
+ "cpuid\n\t"
+ "movl %1, %%ebx"
+ : "=a"(eax), "=m"(save_ebx)
+ : "0"(op)
+ : "cx", "dx");
+ return eax;
+}
+
+static __inline__ unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, edx, save_ebx;
+
+ /* In PIC mode i386 reserves EBX. So we must save
+ and restore it ourselves to not upset gcc. */
+ __asm__(
+ "movl %%ebx, %2\n\t"
+ "cpuid\n\t"
+ "movl %2, %%ebx"
+ : "=a"(eax), "=d"(edx), "=m"(save_ebx)
+ : "0"(op)
+ : "cx");
+ return edx;
+}
+
+/* The AC bit, bit #18, is a new bit introduced in the EFLAGS
+ * register on the Intel486 processor to generate alignment
+ * faults. This bit cannot be set on the Intel386 processor.
+ */
+static __inline__ int is_386(void)
+{
+ return ((xor_eflags(1<<18) >> 18) & 1) == 0;
+}
+
+/* Newer x86 processors have a CPUID instruction, as indicated by
+ * the ID bit (#21) in EFLAGS being modifiable.
+ */
+static __inline__ int has_CPUID(void)
+{
+ return (xor_eflags(1<<21) >> 21) & 1;
+}
+
+static int cpu_has_sse2(void)
+{
+ unsigned int maxlev, features;
+ static int has_sse2 = -1;
+
+ if (has_sse2 >= 0)
+ return has_sse2;
+ has_sse2 = 0;
+
+ if (is_386())
+ return 0;
+ if (!has_CPUID())
+ return 0;
+ maxlev = cpuid_eax(0);
+ /* Intel A-step Pentium had a preliminary version of CPUID.
+ It also didn't have SSE2. */
+ if ((maxlev & 0xFFFFFF00) == 0x0500)
+ return 0;
+ /* If max level is zero then CPUID cannot report any features. */
+ if (maxlev == 0)
+ return 0;
+ features = cpuid_edx(1);
+ has_sse2 = (features & (1 << 26)) != 0;
+
+ return has_sse2;
+}
+#endif /* !__x86_64__ */
+
+static void unmask_fpe(void)
+{
+ __asm__ __volatile__("fnclex");
+ unmask_x87();
+ if (cpu_has_sse2())
+ unmask_sse2();
+}
+
+static void unmask_fpe_conditional(int unmasked)
+{
+ if (unmasked)
+ unmask_fpe();
+}
+
+/* mask x86 FPE, return true if the previous state was unmasked */
+static int mask_fpe(void)
+{
+ int unmasked;
+
+ unmasked = mask_x87();
+ if (cpu_has_sse2())
+ unmasked |= mask_sse2();
+ return unmasked;
+}
+
+void erts_restore_fpu(void)
+{
+ __asm__ __volatile__("fninit");
+ unmask_x87();
+ if (cpu_has_sse2())
+ unmask_sse2();
+}
+
+#elif defined(__sparc__) && defined(__linux__)
+
+#if defined(__arch64__)
+#define LDX "ldx"
+#define STX "stx"
+#else
+#define LDX "ld"
+#define STX "st"
+#endif
+
+static void unmask_fpe(void)
+{
+ unsigned long fsr;
+
+ __asm__(STX " %%fsr, %0" : "=m"(fsr));
+ fsr &= ~(0x1FUL << 23); /* clear FSR[TEM] field */
+ fsr |= (0x1AUL << 23); /* enable NV, OF, DZ exceptions */
+ __asm__ __volatile__(LDX " %0, %%fsr" : : "m"(fsr));
+}
+
+static void unmask_fpe_conditional(int unmasked)
+{
+ if (unmasked)
+ unmask_fpe();
+}
+
+/* mask SPARC FPE, return true if the previous state was unmasked */
+static int mask_fpe(void)
+{
+ unsigned long fsr;
+ int unmasked;
+
+ __asm__(STX " %%fsr, %0" : "=m"(fsr));
+ unmasked = ((fsr >> 23) & 0x1A) == 0x1A;
+ fsr &= ~(0x1FUL << 23); /* clear FSR[TEM] field */
+ __asm__ __volatile__(LDX " %0, %%fsr" : : "m"(fsr));
+ return unmasked;
+}
+
+#elif (defined(__powerpc__) && defined(__linux__)) || (defined(__ppc__) && defined(__DARWIN__))
+
+#if defined(__linux__)
+#include <sys/prctl.h>
+
+static void set_fpexc_precise(void)
+{
+ if (prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE) < 0) {
+ perror("PR_SET_FPEXC");
+ exit(1);
+ }
+}
+
+#elif defined(__DARWIN__)
+
+#include <mach/mach.h>
+#include <pthread.h>
+
+/*
+ * FE0 FE1 MSR bits
+ * 0 0 floating-point exceptions disabled
+ * 0 1 floating-point imprecise nonrecoverable
+ * 1 0 floating-point imprecise recoverable
+ * 1 1 floating-point precise mode
+ *
+ * Apparently:
+ * - Darwin 5.5 (MacOS X <= 10.1) starts with FE0 == FE1 == 0,
+ * and resets FE0 and FE1 to 0 after each SIGFPE.
+ * - Darwin 6.0 (MacOS X 10.2) starts with FE0 == FE1 == 1,
+ * and does not reset FE0 or FE1 after a SIGFPE.
+ */
+#define FE0_MASK (1<<11)
+#define FE1_MASK (1<<8)
+
+/* a thread cannot get or set its own MSR bits */
+static void *fpu_fpe_enable(void *arg)
+{
+ thread_t t = *(thread_t*)arg;
+ struct ppc_thread_state state;
+ unsigned int state_size = PPC_THREAD_STATE_COUNT;
+
+ if (thread_get_state(t, PPC_THREAD_STATE, (natural_t*)&state, &state_size) != KERN_SUCCESS) {
+ perror("thread_get_state");
+ exit(1);
+ }
+ if ((state.srr1 & (FE1_MASK|FE0_MASK)) != (FE1_MASK|FE0_MASK)) {
+#if 1
+ /* This would also have to be performed in the SIGFPE handler
+ to work around the MSR reset older Darwin releases do. */
+ state.srr1 |= (FE1_MASK|FE0_MASK);
+ thread_set_state(t, PPC_THREAD_STATE, (natural_t*)&state, state_size);
+#else
+ fprintf(stderr, "srr1 == 0x%08x, your Darwin is too old\n", state.srr1);
+ exit(1);
+#endif
+ }
+ return NULL; /* Ok, we appear to be on Darwin 6.0 or later */
+}
+
+static void set_fpexc_precise(void)
+{
+ thread_t self = mach_thread_self();
+ pthread_t enabler;
+
+ if (pthread_create(&enabler, NULL, fpu_fpe_enable, &self)) {
+ perror("pthread_create");
+ } else if (pthread_join(enabler, NULL)) {
+ perror("pthread_join");
+ }
+}
+
+#endif
+
+static void set_fpscr(unsigned int fpscr)
+{
+ union {
+ double d;
+ unsigned int fpscr[2];
+ } u;
+
+ u.fpscr[0] = 0xFFF80000;
+ u.fpscr[1] = fpscr;
+ __asm__ __volatile__("mtfsf 255,%0" : : "f"(u.d));
+}
+
+static unsigned int get_fpscr(void)
+{
+ union {
+ double d;
+ unsigned int fpscr[2];
+ } u;
+
+ __asm__("mffs %0" : "=f"(u.d));
+ return u.fpscr[1];
+}
+
+static void unmask_fpe(void)
+{
+ set_fpexc_precise();
+ set_fpscr(0x80|0x40|0x10); /* VE, OE, ZE; not UE or XE */
+}
+
+static void unmask_fpe_conditional(int unmasked)
+{
+ if (unmasked)
+ unmask_fpe();
+}
+
+/* mask PowerPC FPE, return true if the previous state was unmasked */
+static int mask_fpe(void)
+{
+ int unmasked;
+
+ unmasked = (get_fpscr() & (0x80|0x40|0x10)) == (0x80|0x40|0x10);
+ set_fpscr(0x00);
+ return unmasked;
+}
+
+#else
+
+static void unmask_fpe(void)
+{
+ fpsetmask(FP_X_INV | FP_X_OFL | FP_X_DZ);
+}
+
+static void unmask_fpe_conditional(int unmasked)
+{
+ if (unmasked)
+ unmask_fpe();
+}
+
+/* mask IEEE FPE, return true if previous state was unmasked */
+static int mask_fpe(void)
+{
+ const fp_except unmasked_mask = FP_X_INV | FP_X_OFL | FP_X_DZ;
+ fp_except old_mask;
+
+ old_mask = fpsetmask(0);
+ return (old_mask & unmasked_mask) == unmasked_mask;
+}
+
+#endif
+
+#if (defined(__linux__) && (defined(__i386__) || defined(__x86_64__) || defined(__sparc__) || defined(__powerpc__))) || (defined(__DARWIN__) && (defined(__i386__) || defined(__x86_64__) || defined(__ppc__))) || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__))) || (defined(__OpenBSD__) && defined(__x86_64__)) || (defined(__sun__) && defined(__x86_64__))
+
+#if defined(__linux__) && defined(__i386__)
+#if !defined(X86_FXSR_MAGIC)
+#define X86_FXSR_MAGIC 0x0000
+#endif
+#elif defined(__FreeBSD__) && defined(__x86_64__)
+#include <sys/types.h>
+#include <machine/fpu.h>
+#elif defined(__FreeBSD__) && defined(__i386__)
+#include <sys/types.h>
+#include <machine/npx.h>
+#elif defined(__DARWIN__)
+#include <machine/signal.h>
+#elif defined(__OpenBSD__) && defined(__x86_64__)
+#include <sys/types.h>
+#include <machine/fpu.h>
+#endif
+#if !(defined(__OpenBSD__) && defined(__x86_64__))
+#include <ucontext.h>
+#endif
+#include <string.h>
+
+#if defined(__linux__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->gregs[REG_RIP])
+#elif defined(__linux__) && defined(__i386__)
+#define mc_pc(mc) ((mc)->gregs[REG_EIP])
+#elif defined(__DARWIN__) && defined(__i386__)
+#ifdef DARWIN_MODERN_MCONTEXT
+#define mc_pc(mc) ((mc)->__ss.__eip)
+#else
+#define mc_pc(mc) ((mc)->ss.eip)
+#endif
+#elif defined(__DARWIN__) && defined(__x86_64__)
+#ifdef DARWIN_MODERN_MCONTEXT
+#define mc_pc(mc) ((mc)->__ss.__rip)
+#else
+#define mc_pc(mc) ((mc)->ss.rip)
+#endif
+#elif defined(__FreeBSD__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->mc_rip)
+#elif defined(__FreeBSD__) && defined(__i386__)
+#define mc_pc(mc) ((mc)->mc_eip)
+#elif defined(__OpenBSD__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->sc_rip)
+#elif defined(__sun__) && defined(__x86_64__)
+#define mc_pc(mc) ((mc)->gregs[REG_RIP])
+#endif
+
+static void fpe_sig_action(int sig, siginfo_t *si, void *puc)
+{
+ ucontext_t *uc = puc;
+ unsigned long pc;
+
+#if defined(__linux__)
+#if defined(__x86_64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ fpregset_t fpstate = mc->fpregs;
+ pc = mc_pc(mc);
+ /* A failed SSE2 instruction will restart. To avoid
+ looping we mask SSE2 exceptions now and unmask them
+ again later in erts_check_fpe()/erts_restore_fpu().
+ On RISCs we update PC to skip the failed instruction,
+ but the ever increasing complexity of the x86 instruction
+ set encoding makes that a poor solution here. */
+ fpstate->mxcsr = 0x1F80;
+ fpstate->swd &= ~0xFF;
+#elif defined(__i386__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ fpregset_t fpstate = mc->fpregs;
+ pc = mc_pc(mc);
+ if ((fpstate->status >> 16) == X86_FXSR_MAGIC)
+ ((struct _fpstate*)fpstate)->mxcsr = 0x1F80;
+ fpstate->sw &= ~0xFF;
+#elif defined(__sparc__) && defined(__arch64__)
+ /* on SPARC the 3rd parameter points to a sigcontext not a ucontext */
+ struct sigcontext *sc = (struct sigcontext*)puc;
+ pc = sc->sigc_regs.tpc;
+ sc->sigc_regs.tpc = sc->sigc_regs.tnpc;
+ sc->sigc_regs.tnpc += 4;
+#elif defined(__sparc__)
+ /* on SPARC the 3rd parameter points to a sigcontext not a ucontext */
+ struct sigcontext *sc = (struct sigcontext*)puc;
+ pc = sc->si_regs.pc;
+ sc->si_regs.pc = sc->si_regs.npc;
+ sc->si_regs.npc = (unsigned long)sc->si_regs.npc + 4;
+#elif defined(__powerpc__)
+#if defined(__powerpc64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ unsigned long *regs = &mc->gp_regs[0];
+#else
+ mcontext_t *mc = uc->uc_mcontext.uc_regs;
+ unsigned long *regs = &mc->gregs[0];
+#endif
+ pc = regs[PT_NIP];
+ regs[PT_NIP] += 4;
+ regs[PT_FPSCR] = 0x80|0x40|0x10; /* VE, OE, ZE; not UE or XE */
+#endif
+#elif defined(__DARWIN__) && (defined(__i386__) || defined(__x86_64__))
+#ifdef DARWIN_MODERN_MCONTEXT
+ mcontext_t mc = uc->uc_mcontext;
+ pc = mc_pc(mc);
+ mc->__fs.__fpu_mxcsr = 0x1F80;
+ *(unsigned short *)&mc->__fs.__fpu_fsw &= ~0xFF;
+#else
+ mcontext_t mc = uc->uc_mcontext;
+ pc = mc_pc(mc);
+ mc->fs.fpu_mxcsr = 0x1F80;
+ *(unsigned short *)&mc->fs.fpu_fsw &= ~0xFF;
+#endif /* DARWIN_MODERN_MCONTEXT */
+#elif defined(__DARWIN__) && defined(__ppc__)
+ mcontext_t mc = uc->uc_mcontext;
+ pc = mc->ss.srr0;
+ mc->ss.srr0 += 4;
+ mc->fs.fpscr = 0x80|0x40|0x10;
+#elif defined(__FreeBSD__) && defined(__x86_64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ struct savefpu *savefpu = (struct savefpu*)&mc->mc_fpstate;
+ struct envxmm *envxmm = &savefpu->sv_env;
+ pc = mc_pc(mc);
+ envxmm->en_mxcsr = 0x1F80;
+ envxmm->en_sw &= ~0xFF;
+#elif defined(__FreeBSD__) && defined(__i386__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ union savefpu *savefpu = (union savefpu*)&mc->mc_fpstate;
+ pc = mc_pc(mc);
+ if (mc->mc_fpformat == _MC_FPFMT_XMM) {
+ struct envxmm *envxmm = &savefpu->sv_xmm.sv_env;
+ envxmm->en_mxcsr = 0x1F80;
+ envxmm->en_sw &= ~0xFF;
+ } else {
+ struct env87 *env87 = &savefpu->sv_87.sv_env;
+ env87->en_sw &= ~0xFF;
+ }
+#elif defined(__OpenBSD__) && defined(__x86_64__)
+ struct fxsave64 *fxsave = uc->sc_fpstate;
+ pc = mc_pc(uc);
+ fxsave->fx_mxcsr = 0x1F80;
+ fxsave->fx_fsw &= ~0xFF;
+#elif defined(__sun__) && defined(__x86_64__)
+ mcontext_t *mc = &uc->uc_mcontext;
+ struct fpchip_state *fpstate = &mc->fpregs.fp_reg_set.fpchip_state;
+ pc = mc_pc(mc);
+ fpstate->mxcsr = 0x1F80;
+ fpstate->sw &= ~0xFF;
+#endif
+#if 0
+ {
+ char buf[64];
+ snprintf(buf, sizeof buf, "%s: FPE at %p\r\n", __FUNCTION__, (void*)pc);
+ write(2, buf, strlen(buf));
+ }
+#endif
+ set_current_fp_exception(pc);
+}
+
+static void erts_thread_catch_fp_exceptions(void)
+{
+ struct sigaction act;
+ memset(&act, 0, sizeof act);
+ act.sa_sigaction = fpe_sig_action;
+ act.sa_flags = SA_SIGINFO;
+ sigaction(SIGFPE, &act, NULL);
+ unmask_fpe();
+}
+
+#else /* !((__linux__ && (__i386__ || __x86_64__ || __powerpc__)) || (__DARWIN__ && (__i386__ || __x86_64__ || __ppc__))) */
+
+static void fpe_sig_handler(int sig)
+{
+ set_current_fp_exception(1); /* XXX: convert to sigaction so we can get the trap PC */
+}
+
+static void erts_thread_catch_fp_exceptions(void)
+{
+ sys_sigset(SIGFPE, fpe_sig_handler);
+ unmask_fpe();
+}
+
+#endif /* (__linux__ && (__i386__ || __x86_64__ || __powerpc__)) || (__DARWIN__ && (__i386__ || __x86_64__ || __ppc__))) */
+
+/* once-only initialisation early in the main thread */
+void erts_sys_init_float(void)
+{
+ erts_init_fp_exception();
+ erts_thread_catch_fp_exceptions();
+ erts_printf_block_fpe = erts_sys_block_fpe;
+ erts_printf_unblock_fpe = erts_sys_unblock_fpe;
+}
+
+#endif /* NO_FPE_SIGNALS */
+
+void erts_thread_init_float(void)
+{
+#ifdef ERTS_SMP
+ /* This allows Erlang schedulers to leave Erlang-process context
+ and still have working FP exceptions. XXX: is this needed? */
+ erts_thread_init_fp_exception();
+#endif
+
+#ifndef NO_FPE_SIGNALS
+ /* NOTE:
+ * erts_thread_disable_fpe() is called in all threads at
+ * creation. We at least need to call unmask_fpe()
+ */
+#if defined(__DARWIN__) || defined(__FreeBSD__)
+ /* Darwin (7.9.0) does not appear to propagate FP exception settings
+ to a new thread from its parent. So if we want FP exceptions, we
+ must manually re-enable them in each new thread.
+ FreeBSD 6.1 appears to suffer from a similar issue. */
+ erts_thread_catch_fp_exceptions();
+#else
+ unmask_fpe();
+#endif
+
+#endif
+}
+
+void erts_thread_disable_fpe(void)
+{
+#if !defined(NO_FPE_SIGNALS)
+ (void)mask_fpe();
+#endif
+}
+
+#if !defined(NO_FPE_SIGNALS)
+int erts_sys_block_fpe(void)
+{
+ return mask_fpe();
+}
+
+void erts_sys_unblock_fpe(int unmasked)
+{
+ unmask_fpe_conditional(unmasked);
+}
+#endif
+
+/* The following check is incorporated from the Vee machine */
+
+#define ISDIGIT(d) ((d) >= '0' && (d) <= '9')
+
+/*
+ ** Convert a double to ascii format 0.dddde[+|-]ddd
+ ** return number of characters converted
+ **
+ ** These two functions should maybe use localeconv() to pick up
+ ** the current radix character, but since it is uncertain how
+ ** expensive such a system call is, and since no-one has heard
+ ** of other radix characters than '.' and ',' an ad-hoc
+ ** low execution time solution is used instead.
+ */
+
+int
+sys_double_to_chars(double fp, char *buf)
+{
+ char *s = buf;
+
+ (void) sprintf(buf, "%.20e", fp);
+ /* Search upto decimal point */
+ if (*s == '+' || *s == '-') s++;
+ while (ISDIGIT(*s)) s++;
+ if (*s == ',') *s++ = '.'; /* Replace ',' with '.' */
+ /* Scan to end of string */
+ while (*s) s++;
+ return s-buf; /* i.e strlen(buf) */
+}
+
+/* Float conversion */
+
+int
+sys_chars_to_double(char* buf, double* fp)
+{
+#ifndef NO_FPE_SIGNALS
+ volatile unsigned long *fpexnp = erts_get_current_fp_exception();
+#endif
+ char *s = buf, *t, *dp;
+
+ /* Robert says that something like this is what he really wanted:
+ * (The [.,] radix test is NOT what Robert wanted - it was added later)
+ *
+ * 7 == sscanf(Tbuf, "%[+-]%[0-9][.,]%[0-9]%[eE]%[+-]%[0-9]%s", ....);
+ * if (*s2 == 0 || *s3 == 0 || *s4 == 0 || *s6 == 0 || *s7)
+ * break;
+ */
+
+ /* Scan string to check syntax. */
+ if (*s == '+' || *s == '-') s++;
+ if (!ISDIGIT(*s)) /* Leading digits. */
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ if (*s != '.' && *s != ',') /* Decimal part. */
+ return -1;
+ dp = s++; /* Remember decimal point pos just in case */
+ if (!ISDIGIT(*s))
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ if (*s == 'e' || *s == 'E') {
+ /* There is an exponent. */
+ s++;
+ if (*s == '+' || *s == '-') s++;
+ if (!ISDIGIT(*s))
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ }
+ if (*s) /* That should be it */
+ return -1;
+
+#ifdef NO_FPE_SIGNALS
+ errno = 0;
+#endif
+ __ERTS_FP_CHECK_INIT(fpexnp);
+ *fp = strtod(buf, &t);
+ __ERTS_FP_ERROR_THOROUGH(fpexnp, *fp, return -1);
+ if (t != s) { /* Whole string not scanned */
+ /* Try again with other radix char */
+ *dp = (*dp == '.') ? ',' : '.';
+ errno = 0;
+ __ERTS_FP_CHECK_INIT(fpexnp);
+ *fp = strtod(buf, &t);
+ __ERTS_FP_ERROR_THOROUGH(fpexnp, *fp, return -1);
+ }
+
+#ifdef NO_FPE_SIGNALS
+ if (errno == ERANGE && (*fp == 0.0 || *fp == HUGE_VAL || *fp == -HUGE_VAL)) {
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+int
+matherr(struct exception *exc)
+{
+#if !defined(NO_FPE_SIGNALS)
+ set_current_fp_exception((unsigned long)__builtin_return_address(0));
+#endif
+ return 1;
+}
diff --git a/erts/emulator/sys/unix/sys_time.c b/erts/emulator/sys/unix/sys_time.c
new file mode 100644
index 0000000000..fcce54a2c4
--- /dev/null
+++ b/erts/emulator/sys/unix/sys_time.c
@@ -0,0 +1,134 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+/* These need to be undef:ed to not break activation of
+ * micro level process accounting on /proc/self
+ */
+#ifdef _LARGEFILE_SOURCE
+# undef _LARGEFILE_SOURCE
+#endif
+#ifdef _FILE_OFFSET_BITS
+# undef _FILE_OFFSET_BITS
+#endif
+
+#include "sys.h"
+#include "global.h"
+
+#ifdef NO_SYSCONF
+# define TICKS_PER_SEC() HZ
+#else
+#define TICKS_PER_SEC() sysconf(_SC_CLK_TCK)
+#endif
+
+#ifdef HAVE_GETHRVTIME_PROCFS_IOCTL
+# include <unistd.h>
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <sys/signal.h>
+# include <sys/fault.h>
+# include <sys/syscall.h>
+# include <sys/procfs.h>
+# include <fcntl.h>
+#endif
+
+/******************* Routines for time measurement *********************/
+
+int erts_ticks_per_sec = 0; /* Will be SYS_CLK_TCK in erl_unix_sys.h */
+int erts_ticks_per_sec_wrap = 0; /* Will be SYS_CLK_TCK_WRAP */
+static int ticks_bsr = 0; /* Shift wrapped tick value this much to the right */
+
+/*
+ * init timers, chose a tick length, and return it.
+ * Unix is priviliged when it comes to time, as erl_time_sup.c
+ * does almost everything. Other platforms have to
+ * emulate Unix in this sense.
+ */
+int sys_init_time(void)
+{
+ /*
+ * This (erts_ticks_per_sec) is only for times() (CLK_TCK),
+ * the resolution is always one millisecond..
+ */
+ if ((erts_ticks_per_sec = TICKS_PER_SEC()) < 0)
+ erl_exit(1, "Can't get clock ticks/sec\n");
+ if (erts_ticks_per_sec >= 1000) {
+ /* Workaround for beta linux kernels, need to be done in runtime
+ to make erlang run on both 2.4 and 2.5 kernels. In the future,
+ the kernel ticks might as
+ well be used as a high res timer instead, but that's for when the
+ majority uses kernels with HZ == 1024 */
+ ticks_bsr = 3;
+ } else {
+ ticks_bsr = 0;
+ }
+ erts_ticks_per_sec_wrap = (erts_ticks_per_sec >> ticks_bsr);
+ return SYS_CLOCK_RESOLUTION;
+}
+
+clock_t sys_times_wrap(void)
+{
+ SysTimes dummy;
+ clock_t result = (sys_times(&dummy) >> ticks_bsr);
+ return result;
+}
+
+
+
+
+#ifdef HAVE_GETHRVTIME_PROCFS_IOCTL
+
+int sys_start_hrvtime(void)
+{
+ long msacct = PR_MSACCT;
+ int fd;
+
+ if ( (fd = open("/proc/self", O_WRONLY)) == -1) {
+ return -1;
+ }
+ if (ioctl(fd, PIOCSET, &msacct) < 0) {
+ close(fd);
+ return -2;
+ }
+ close(fd);
+ return 0;
+}
+
+int sys_stop_hrvtime(void)
+{
+ long msacct = PR_MSACCT;
+ int fd;
+
+ if ( (fd = open("/proc/self", O_WRONLY)) == -1) {
+ return -1;
+ }
+ if (ioctl(fd, PIOCRESET, &msacct) < 0) {
+ close(fd);
+ return -2;
+ }
+ close(fd);
+ return 0;
+}
+
+#endif /* HAVE_GETHRVTIME_PROCFS_IOCTL */
+
+
diff --git a/erts/emulator/sys/vxworks/driver_int.h b/erts/emulator/sys/vxworks/driver_int.h
new file mode 100644
index 0000000000..f6bc71a799
--- /dev/null
+++ b/erts/emulator/sys/vxworks/driver_int.h
@@ -0,0 +1,30 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*----------------------------------------------------------------------
+** Purpose : System dependant driver declarations
+**---------------------------------------------------------------------- */
+
+#ifndef __DRIVER_INT_H__
+#define __DRIVER_INT_H__
+
+#include <ioLib.h>
+
+typedef struct iovec SysIOVec;
+
+#endif
diff --git a/erts/emulator/sys/vxworks/erl_main.c b/erts/emulator/sys/vxworks/erl_main.c
new file mode 100644
index 0000000000..c9b44a635a
--- /dev/null
+++ b/erts/emulator/sys/vxworks/erl_main.c
@@ -0,0 +1,45 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "erl_vm.h"
+
+#if defined(__GNUC__)
+/*
+ * The generated assembler does the usual trick (relative
+ * branch-and-link to next instruction) to get a copy of the
+ * instruction ptr. Instead of branching to an explicit zero offset,
+ * it branches to the symbol `__eabi' --- which is expected to be
+ * undefined and thus zero (if it is defined as non-zero, things will
+ * be interesting --- as in the Chinese curse). To shut up the VxWorks
+ * linker, we define `__eabi' as zero.
+ *
+ * This is just a work around. It's really Wind River's GCC's code
+ * generator that should be fixed.
+ */
+__asm__(".equ __eabi, 0");
+#endif
+
+void
+erl_main(int argc, char **argv)
+{
+ erl_start(argc, argv);
+}
diff --git a/erts/emulator/sys/vxworks/erl_vxworks_sys.h b/erts/emulator/sys/vxworks/erl_vxworks_sys.h
new file mode 100644
index 0000000000..ae46403600
--- /dev/null
+++ b/erts/emulator/sys/vxworks/erl_vxworks_sys.h
@@ -0,0 +1,183 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifndef __ERL_VXWORKS_SYS_H__
+#define __ERL_VXWORKS_SYS_H__
+
+/* stdarg.h don't work without this one... */
+#include <vxWorks.h>
+
+#include <stdio.h>
+#include <math.h>
+#include <limits.h>
+#include <stdlib.h>
+#define index StringIndexFunctionThatIDontWantDeclared
+#include <string.h>
+#undef index
+
+
+
+#include <sys/times.h>
+#include <time.h>/* xxxP */
+
+#include <dirent.h>
+#include <sys/stat.h>
+
+/* xxxP from unix_sys.h begin */
+
+/*
+ * Make sure that MAXPATHLEN is defined.
+ */
+
+#ifndef MAXPATHLEN
+# ifdef PATH_MAX
+# define MAXPATHLEN PATH_MAX
+# else
+# define MAXPATHLEN 2048
+# endif
+#endif
+
+/* xxxP end */
+
+
+/* Unimplemented math functions */
+#define NO_ASINH
+#define NO_ACOSH
+#define NO_ATANH
+#define NO_ERF
+#define NO_ERFC
+
+/* Stuff that is useful for port programs, drivers, etc */
+#ifndef VXWORKS
+#define VXWORKS
+#endif
+
+#define DONT_USE_MAIN
+#define NO_FSYNC
+#define NO_MKDIR_MODE
+#define NO_UMASK
+#define NO_SYMBOLIC_LINKS
+#define NO_DEVICE_FILES
+#define NO_UID
+#define NO_ACCESS
+#define NO_FCNTL
+#define NO_SYSLOG
+#define NO_SYSCONF
+#define NO_PWD /* XXX Means what? */
+#define NO_DAEMON
+/* This chooses ~250 reductions instead of 500 in config.h */
+#if (CPU == CPU32)
+#define SLOW_PROCESSOR
+#endif
+
+/*
+ * Even though we does not always have small memories on VxWorks
+ * we certainly does not have virtual memory.
+ */
+#if !defined(LARGE_MEMORY)
+#define SMALL_MEMORY
+#endif
+
+/*************** Floating point exception handling ***************/
+
+/* There are no known ways to customize the handling of invalid floating
+ point operations, such as matherr() or ieee_handler(), in VxWorks 5.1. */
+
+#if (CPU == MC68040 || CPU == CPU32 || CPU == PPC860 || CPU == PPC32 || \
+ CPU == PPC603 || CPU == PPC604 || CPU == SIMSPARCSOLARIS)
+
+/* VxWorks 5.1 on Motorola 68040 never generates SIGFPE, but sets the
+ result of invalid floating point ops to Inf and NaN - unfortunately
+ the way to test for those values is undocumented and hidden in a
+ "private" include file... */
+/* Haven't found any better way, as of yet, for ppc860 xxxP*/
+
+#include <private/mathP.h>
+#define NO_FPE_SIGNALS
+#define erts_get_current_fp_exception() NULL
+#define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0)
+#define __ERTS_FP_ERROR(fpexnp, f, Action) if (isInf(f) || isNan(f)) { Action; } else {}
+#define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action)
+#define __ERTS_SAVE_FP_EXCEPTION(fpexnp)
+#define __ERTS_RESTORE_FP_EXCEPTION(fpexnp)
+
+#define ERTS_FP_CHECK_INIT(p) __ERTS_FP_CHECK_INIT(&(p)->fp_exception)
+#define ERTS_FP_ERROR(p, f, A) __ERTS_FP_ERROR(&(p)->fp_exception, f, A)
+#define ERTS_SAVE_FP_EXCEPTION(p) __ERTS_SAVE_FP_EXCEPTION(&(p)->fp_exception)
+#define ERTS_RESTORE_FP_EXCEPTION(p) __ERTS_RESTORE_FP_EXCEPTION(&(p)->fp_exception)
+#define ERTS_FP_ERROR_THOROUGH(p, f, A) __ERTS_FP_ERROR_THOROUGH(&(p)->fp_exception, f, A)
+
+#define erts_sys_block_fpe() 0
+#define erts_sys_unblock_fpe(x) do{}while(0)
+
+#if (CPU == PPC603)
+/* Need fppLib to change the Floating point registers
+ (fix_registers in sys.c)*/
+
+#include <fppLib.h>
+
+#endif /* PPC603 */
+
+#else
+
+Unsupported CPU value !
+
+#endif
+
+typedef void *GETENV_STATE;
+
+#define HAVE_GETHRTIME
+
+extern int erts_clock_rate;
+
+#define SYS_CLK_TCK (erts_clock_rate)
+
+#define SYS_CLOCK_RESOLUTION 1
+
+typedef struct _vxworks_tms {
+ clock_t tms_utime;
+ clock_t tms_stime;
+ clock_t tms_cutime;
+ clock_t tms_cstime;
+} SysTimes;
+
+typedef long long SysHrTime;
+
+typedef struct timeval SysTimeval;
+
+extern int sys_init_hrtime(void);
+extern SysHrTime sys_gethrtime(void);
+extern void sys_gettimeofday(SysTimeval *tvp);
+extern clock_t sys_times(SysTimes *t);
+
+#define SIZEOF_SHORT 2
+#define SIZEOF_INT 4
+#define SIZEOF_LONG 4
+#define SIZEOF_VOID_P 4
+#define SIZEOF_SIZE_T 4
+#define SIZEOF_OFF_T 4
+
+/*
+ * Temporary buffer *only* used in sys code.
+ */
+#define SYS_TMP_BUF_SIZE 65536
+
+/* Need to be able to interrupt erts_poll_wait() from signal handler */
+#define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
+
+#endif /* __ERL_VXWORKS_SYS_H__ */
diff --git a/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c b/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c
new file mode 100644
index 0000000000..c56c633b2f
--- /dev/null
+++ b/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c
@@ -0,0 +1,253 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Interface functions to the dynamic linker using dl* functions.
+ * (As far as I know it works on SunOS 4, 5, Linux and FreeBSD. /Seb)
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include <vxWorks.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <a_out.h>
+#include <symLib.h>
+#include <loadLib.h>
+#include <unldLib.h>
+#include <moduleLib.h>
+#include <sysSymTbl.h>
+#include "sys.h"
+#include "global.h"
+#include "erl_alloc.h"
+#include "erl_driver.h"
+
+#define EXT_LEN 4
+#define FILE_EXT ".eld"
+#define ALT_FILE_EXT ".o"
+/* ALT_FILE_EXT must not be longer than FILE_EXT */
+#define DRIVER_INIT_SUFFIX "_init"
+
+static MODULE_ID get_mid(char *);
+static FUNCPTR lookup(char *);
+
+typedef enum {
+ NoError,
+ ModuleNotFound,
+ ModuleNotUnloadable,
+ UnknownError
+} FakeSytemError;
+
+static char *errcode_tab[] = {
+ "No error",
+ "Module/file not found",
+ "Module cannot be unloaded",
+ "Unknown error"
+};
+
+void erl_sys_ddll_init(void) {
+ return;
+}
+/*
+ * Open a shared object
+ */
+int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err)
+{
+ int len;
+
+ if (erts_sys_ddll_open_noext(full_name, handle, err) == ERL_DE_NO_ERROR) {
+ return ERL_DE_NO_ERROR;
+ }
+ if ((len = sys_strlen(full_name)) > PATH_MAX-EXT_LEN) {
+ return ERL_DE_LOAD_ERROR_NAME_TO_LONG;
+ } else {
+ static char dlname[PATH_MAX + 1];
+
+ sys_strcpy(dlname, full_name);
+ sys_strcpy(dlname+len, FILE_EXT);
+ if (erts_sys_ddll_open_noext(dlname, handle, err) == ERL_DE_NO_ERROR) {
+ return ERL_DE_NO_ERROR;
+ }
+ sys_strcpy(dlname+len, ALT_FILE_EXT);
+ return erts_sys_ddll_open_noext(dlname, handle, err);
+ }
+}
+int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
+{
+ MODULE_ID mid;
+
+ if((mid = get_mid(dlname)) == NULL) {
+ return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotFound);
+ }
+ *handle = (void *) mid;
+ return ERL_DE_NO_ERROR;
+}
+
+/*
+ * Find a symbol in the shared object
+ */
+#define PREALLOC_BUFFER_SIZE 256
+int erts_sys_ddll_sym2(void *handle, char *func_name, void **function, ErtsSysDdllError* err)
+{
+ FUNCPTR proc;
+ static char statbuf[PREALLOC_BUFFER_SIZE];
+ char *buf = statbuf;
+ int need;
+
+ if ((proc = lookup(func_name)) == NULL) {
+ if ((need = strlen(func_name)+2) > PREALLOC_BUFFER_SIZE) {
+ buf = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF,need);
+ }
+ buf[0] = '_';
+ sys_strcpy(buf+1,func_name);
+ proc = lookup(buf);
+ if (buf != statbuf) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, buf);
+ }
+ if (proc == NULL) {
+ return ERL_DE_LOOKUP_ERROR_NOT_FOUND;
+ }
+ }
+ *function = (void *) proc;
+ return ERL_DE_NO_ERROR;
+}
+
+/* XXX:PaN These two will be changed with new driver interface! */
+
+/*
+ * Load the driver init function, might appear under different names depending on object arch...
+ */
+
+int erts_sys_ddll_load_driver_init(void *handle, void **function)
+{
+ MODULE_ID mid = (MODULE_ID) handle;
+ char *modname;
+ char *cp;
+ static char statbuf[PREALLOC_BUFFER_SIZE];
+ char *fname = statbuf;
+ int len;
+ int res;
+ void *func;
+ int need;
+
+ if((modname = moduleNameGet(mid)) == NULL) {
+ return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotFound);
+ }
+
+ if((cp = strrchr(modname, '.')) == NULL) {
+ len = strlen(modname);
+ } else {
+ len = cp - modname;
+ }
+
+ need = len + strlen(DRIVER_INIT_SUFFIX) + 1;
+ if (need > PREALLOC_BUFFER_SIZE) {
+ fname = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, need); /* erts_alloc exits on failure */
+ }
+ sys_strncpy(fname, modname, len);
+ fname[len] = '\0';
+ sys_strcat(fname, DRIVER_INIT_SUFFIX);
+ res = erts_sys_ddll_sym(handle, fname, &func);
+ if (fname != statbuf) {
+ erts_free(ERTS_ALC_T_DDLL_TMP_BUF, fname);
+ }
+ if ( res != ERL_DE_NO_ERROR) {
+ return res;
+ }
+ *function = func;
+ return ERL_DE_NO_ERROR;
+}
+
+int erts_sys_ddll_load_nif_init(void *handle, void **function, ErtsSysDdllError* err)
+{
+ /* NIFs not implemented for vxworks */
+ return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
+}
+
+/*
+ * Call the driver_init function, whatever it's really called, simple on unix...
+*/
+void *erts_sys_ddll_call_init(void *function) {
+ void *(*initfn)(void) = function;
+ return (*initfn)();
+}
+void *erts_sys_ddll_call_nif_init(void *function) {
+ return erts_sys_ddll_call_init(function);
+}
+
+
+/*
+ * Close a chared object
+ */
+int erts_sys_ddll_close2(void *handle, ErtsSysDdllError* err)
+{
+ MODULE_ID mid = (MODULE_ID) handle;
+ if (unld(mid, 0) < 0) {
+ return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotUnloadable);
+ }
+ return ERL_DE_NO_ERROR;
+}
+
+/*
+ * Return string that describes the (current) error
+ */
+char *erts_sys_ddll_error(int code)
+{
+ int actual_code;
+ if (code > ERL_DE_DYNAMIC_ERROR_OFFSET) {
+ return "Unspecified error";
+ }
+ actual_code = -1*(code - ERL_DE_DYNAMIC_ERROR_OFFSET);
+ if (actual_code > ((int) UnknownError)) {
+ actual_code = UnknownError;
+ }
+ return errcode_tab[actual_code];
+}
+
+static FUNCPTR lookup(char *sym)
+{
+ FUNCPTR entry;
+ SYM_TYPE type;
+
+ if (symFindByNameAndType(sysSymTbl, sym, (char **)&entry,
+ &type, N_EXT | N_TEXT, N_EXT | N_TEXT) != OK) {
+ return NULL ;
+ }
+ return entry;
+}
+
+static MODULE_ID get_mid(char* name)
+{
+ int fd;
+ MODULE_ID mid = NULL;
+
+ if((fd = open(name, O_RDONLY, 0664)) >= 0) {
+ mid = loadModule(fd, GLOBAL_SYMBOLS);
+ close(fd);
+ }
+ return mid;
+}
+
+void erts_sys_ddll_free_error(ErtsSysDdllError* err)
+{
+ /* NYI */
+}
+
diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c
new file mode 100644
index 0000000000..abddc7e107
--- /dev/null
+++ b/erts/emulator/sys/vxworks/sys.c
@@ -0,0 +1,2594 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * system-dependent functions
+ *
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include <vxWorks.h>
+#include <version.h>
+#include <string.h>
+#include <types.h>
+#include <sigLib.h>
+#include <ioLib.h>
+#include <iosLib.h>
+#include <envLib.h>
+#include <fioLib.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <symLib.h>
+#include <sysLib.h>
+#include <sysSymTbl.h>
+#include <loadLib.h>
+#include <taskLib.h>
+#include <taskVarLib.h>
+#include <taskHookLib.h>
+#include <tickLib.h>
+#include <time.h>
+#include <rngLib.h>
+#include <semLib.h>
+#include <selectLib.h>
+#include <sockLib.h>
+#include <a_out.h>
+#include <wdLib.h>
+#include <timers.h>
+#include <ctype.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <stdarg.h>
+
+
+#ifndef WANT_NONBLOCKING
+#define WANT_NONBLOCKING
+#endif
+
+#include "sys.h"
+#include "erl_alloc.h"
+
+/* don't need global.h, but bif_table.h (included by bif.h) won't compile otherwise */
+#include "global.h"
+#include "bif.h"
+
+#include "erl_sys_driver.h"
+
+#include "elib_stat.h"
+
+#include "reclaim_private.h" /* Some more or less private reclaim facilities */
+
+#ifndef RETSIGTYPE
+#define RETSIGTYPE void
+#endif
+
+EXTERN_FUNCTION(void, erl_start, (int, char**));
+EXTERN_FUNCTION(void, erl_exit, (int n, char*, _DOTS_));
+EXTERN_FUNCTION(void, erl_error, (char*, va_list));
+EXTERN_FUNCTION(int, driver_interrupt, (int, int));
+EXTERN_FUNCTION(void, increment_time, (int));
+EXTERN_FUNCTION(int, next_time, (_VOID_));
+EXTERN_FUNCTION(void, set_reclaim_free_function, (FreeFunction));
+EXTERN_FUNCTION(int, erl_mem_info_get, (MEM_PART_STATS *));
+EXTERN_FUNCTION(void, erl_crash_dump, (char* file, int line, char* fmt, ...));
+
+#define ISREG(st) (((st).st_mode&S_IFMT) == S_IFREG)
+
+/* these are defined in usrLib.c */
+extern int spTaskPriority, spTaskOptions;
+
+/* forward declarations */
+static FUNCTION(FUNCPTR, lookup, (char*));
+static FUNCTION(int, read_fill, (int, char*, int));
+#if (CPU == SPARC)
+static FUNCTION(RETSIGTYPE, fpe_sig_handler, (int)); /*where is this fun? */
+#elif (CPU == PPC603)
+static FUNCTION(void, fix_registers, (void));
+#endif
+static FUNCTION(void, close_pipes, (int*, int*, int));
+static FUNCTION(void, delete_hook, (void));
+static FUNCTION(void, initialize_allocation, (void));
+
+FUNCTION(STATUS, uxPipeDrv, (void));
+FUNCTION(STATUS, pipe, (int*));
+FUNCTION(void, uxPipeShow, (int));
+
+void erl_main(int argc, char **argv);
+void argcall(char *args);
+
+/* Malloc-realted functions called from the VxWorks shell */
+EXTERN_FUNCTION(int, erl_set_memory_block,
+ (int, int, int, int, int, int, int, int, int, int));
+EXTERN_FUNCTION(int, erl_memory_show,
+ (int, int, int, int, int, int, int, int, int, int));
+
+#define DEFAULT_PORT_STACK_SIZE 100000
+static int port_stack_size;
+
+static int erlang_id = 0; /* Inited at loading, set/reset at each run */
+
+/* interval time reported to emulator */
+static int sys_itime;
+
+/* XXX - This is defined in .../config/all/configAll.h (NUM_FILES),
+ and not easily accessible at compile or run time - however,
+ in VxWorks 5.1 it is stored in the (undocumented?) maxFiles variable;
+ probably shouldn't depend on it, but we try to pick it up... */
+static int max_files = 50; /* default configAll.h */
+
+int erts_vxworks_max_files;
+
+/*
+ * used by the break handler (set by signal handler on ctl-c)
+ */
+volatile int erts_break_requested;
+
+/********************* General functions ****************************/
+
+Uint
+erts_sys_misc_mem_sz(void)
+{
+ Uint res = erts_check_io_size();
+ /* res += FIXME */
+ return res;
+}
+
+/*
+ * XXX This declaration should not be here.
+ */
+void erl_sys_schedule_loop(void);
+
+#ifdef SOFTDEBUG
+static void do_trace(int line, char *file, char *format, ...)
+{
+ va_list va;
+ int tid = taskIdSelf();
+ char buff[512];
+
+ va_start(va, format);
+ sprintf(buff,"Trace: Task: 0x%08x, %s:%d - ",
+ tid, file, line);
+ vsprintf(buff + strlen(buff), format, va);
+ va_end(va);
+ strcat(buff,"\r\n");
+ write(2,buff,strlen(buff));
+}
+
+#define TRACE() do_trace(__LINE__, __FILE__,"")
+#define TRACEF(Args...) do_trace(__LINE__,__FILE__, ## Args)
+#endif
+
+void
+erts_sys_pre_init(void)
+{
+ if (erlang_id != 0) {
+ /* NOTE: This particular case must *not* call erl_exit() */
+ erts_fprintf(stderr, "Sorry, erlang is already running (as task %d)\n",
+ erlang_id);
+ exit(1);
+ }
+
+ /* This must be done as early as possible... */
+ if(!reclaim_init())
+ fprintf(stderr, "Warning : reclaim facility should be initiated before "
+ "erlang is started!\n");
+ erts_vxworks_max_files = max_files = reclaim_max_files();
+
+ /* Floating point exceptions */
+#if (CPU == SPARC)
+ sys_sigset(SIGFPE, fpe_sig_handler);
+#elif (CPU == PPC603)
+ fix_registers();
+#endif
+
+ /* register the private delete hook in reclaim */
+ save_delete_hook((FUNCPTR)delete_hook, (caddr_t)0);
+ erlang_id = taskIdSelf();
+#ifdef DEBUG
+ printf("emulator task id = 0x%x\n", erlang_id);
+#endif
+}
+
+void erts_sys_alloc_init(void)
+{
+ initialize_allocation();
+}
+
+void
+erl_sys_init(void)
+{
+ setvbuf(stdout, (char *)NULL, _IOLBF, BUFSIZ);
+ /* XXX Bug in VxWorks stdio loses fputch()'ed output after the
+ setvbuf() but before a *printf(), and possibly worse (malloc
+ errors, crash?) - so let's give it a *printf().... */
+ fprintf(stdout, "%s","");
+}
+
+void
+erl_sys_args(int* argc, char** argv)
+{
+ erts_init_check_io();
+ max_files = erts_check_io_max_files();
+ ASSERT(max_files <= erts_vxworks_max_files);
+}
+
+/*
+ * Called from schedule() when it runs out of runnable processes,
+ * or when Erlang code has performed INPUT_REDUCTIONS reduction
+ * steps. runnable == 0 iff there are no runnable Erlang processes.
+ */
+void
+erl_sys_schedule(int runnable)
+{
+ erts_check_io_interrupt(0);
+ erts_check_io(!runnable);
+}
+
+void erts_do_break_handling(void)
+{
+ SET_BLOCKING(0);
+ /* call the break handling function, reset the flag */
+ do_break();
+ erts_break_requested = 0;
+ SET_NONBLOCKING(0);
+}
+
+/* signal handling */
+RETSIGTYPE (*sys_sigset(sig, func))()
+ int sig;
+ RETSIGTYPE (*func)();
+{
+ struct sigaction act, oact;
+
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = 0;
+ act.sa_handler = func;
+ sigaction(sig, &act, &oact);
+ return(oact.sa_handler);
+}
+
+void sys_sigblock(int sig)
+{
+ sigset_t mask;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, sig);
+ sigprocmask(SIG_BLOCK, &mask, (sigset_t *)NULL);
+}
+
+void sys_sigrelease(int sig)
+{
+ sigset_t mask;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, sig);
+ sigprocmask(SIG_UNBLOCK, &mask, (sigset_t *)NULL);
+}
+
+void
+erts_sys_prepare_crash_dump(void)
+{
+
+}
+
+/* register signal handlers XXX - they don't work, need to find out why... */
+/* set up signal handlers for break and quit */
+static void request_break(void)
+{
+ /* just set a flag - checked for and handled
+ * in main thread (not signal handler).
+ * see check_io()
+ */
+#ifdef DEBUG
+ fprintf(stderr,"break!\n");
+#endif
+ erts_break_requested = 1;
+ erts_check_io_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */
+}
+
+static void do_quit(void)
+{
+ halt_0(0);
+}
+
+void erts_set_ignore_break(void) {
+}
+
+void init_break_handler(void)
+{
+ sys_sigset(SIGINT, request_break);
+ sys_sigset(SIGQUIT, do_quit);
+}
+
+void erts_replace_intr(void) {
+}
+
+int sys_max_files(void)
+{
+ return(max_files);
+}
+
+/******************* Routines for time measurement *********************/
+
+int sys_init_time(void)
+{
+ erts_clock_rate = sysClkRateGet();
+ /*
+ ** One could imagine that it would be better returning
+ ** a resolution more near the clock rate, like in:
+ ** return 1000 / erts_clock_rate;
+ ** but tests show that such isn't the case (rounding errors?)
+ ** Well, we go for the Unix variant of returning 1
+ ** as a constant virtual clock rate.
+ */
+ return SYS_CLOCK_RESOLUTION;
+}
+
+int erts_clock_rate;
+static volatile int ticks_inuse;
+static volatile unsigned long ticks_collected; /* will wrap */
+static WDOG_ID watchdog_id;
+static ULONG user_time;
+static int this_task_id, sys_itime;
+static SysHrTime hrtime_wrap;
+static unsigned long last_tick_count;
+
+static void tolerant_time_clockint(int count)
+{
+ if (watchdog_id != NULL) {
+ if (taskIsReady(this_task_id))
+ user_time += 1;
+ ++count;
+ if (!ticks_inuse) {
+ ticks_collected += count;
+ count = 0;
+ }
+ wdStart(watchdog_id, 1, (FUNCPTR)tolerant_time_clockint, count);
+ }
+}
+
+int sys_init_hrtime(void)
+{
+ this_task_id = taskIdSelf(); /* OK, this only works for one single task
+ in the system... */
+ user_time = 0;
+
+ ticks_inuse = 0;
+ ticks_collected = 0;
+ hrtime_wrap = 0;
+ last_tick_count = 0;
+
+ sys_itime = 1000 / erts_clock_rate;
+ watchdog_id = wdCreate();
+ wdStart(watchdog_id, 1, (FUNCPTR) tolerant_time_clockint, 0);
+ return 0;
+}
+
+SysHrTime sys_gethrtime(void)
+{
+ SysHrTime ticks;
+
+ ++ticks_inuse;
+ ticks = (SysHrTime) (ticks_collected & 0x7FFFFFFF);
+ ticks_inuse = 0;
+ if (ticks < (SysHrTime) last_tick_count) {
+ hrtime_wrap += 1UL << 31;
+ }
+ last_tick_count = ticks;
+ return (ticks + hrtime_wrap) * ((SysHrTime) (1000000000UL /
+ erts_clock_rate));
+}
+
+void sys_gettimeofday(SysTimeval *tvp)
+{
+ struct timespec now;
+
+ clock_gettime(CLOCK_REALTIME, &now);
+ tvp->tv_sec = now.tv_sec;
+ tvp->tv_usec = now.tv_nsec / 1000;
+}
+
+clock_t sys_times(SysTimes *t)
+{
+ t->tms_stime = t->tms_cutime = t->tms_cstime = 0;
+ ++ticks_inuse;
+ t->tms_utime = user_time;
+ ticks_inuse = 0;
+ return tickGet(); /* The best we can do... */
+}
+
+/* This is called when *this task* is deleted */
+static void delete_hook(void)
+{
+ if (watchdog_id != NULL) {
+ wdDelete(watchdog_id);
+ watchdog_id = NULL;
+ }
+ erlang_id = 0;
+ this_task_id = 0;
+}
+
+/************************** OS info *******************************/
+
+/* Used by erlang:info/1. */
+/* (This code was formerly in drv.XXX/XXX_os_drv.c) */
+
+#define MAX_VER_STR 9 /* Number of characters to
+ consider in version string */
+
+static FUNCTION(int, get_number, (char** str_ptr));
+
+char os_type[] = "vxworks";
+
+static int
+get_number(char **str_ptr)
+{
+ char* s = *str_ptr; /* Pointer to beginning of string. */
+ char* dot; /* Pointer to dot in string or NULL. */
+
+ if (!isdigit(*s))
+ return 0;
+ if ((dot = strchr(s, '.')) == NULL) {
+ *str_ptr = s+strlen(s);
+ return atoi(s);
+ } else {
+ *dot = '\0';
+ *str_ptr = dot+1;
+ return atoi(s);
+ }
+}
+
+/* namebuf; Where to return the name. */
+/* size; Size of name buffer. */
+void
+os_flavor(char *namebuf, unsigned size)
+{
+ strcpy(namebuf, "-");
+}
+
+/* int* pMajor; Pointer to major version. */
+/* int* pMinor; Pointer to minor version. */
+/* int* pBuild; Pointer to build number. */
+void
+os_version(int *pMajor, int *pMinor, int *pBuild)
+{
+ char os_ver[MAX_VER_STR+2];
+ char* release; /* Pointer to the release string:
+ * X.Y or X.Y.Z.
+ */
+ strncpy(os_ver, vxWorksVersion, MAX_VER_STR);
+ release = os_ver;
+ *pMajor = get_number(&release);
+ *pMinor = get_number(&release);
+ *pBuild = get_number(&release);
+}
+
+void init_getenv_state(GETENV_STATE *state)
+{
+ *state = NULL;
+}
+
+char *getenv_string(GETENV_STATE *state0)
+{
+ return NULL;
+}
+
+void fini_getenv_state(GETENV_STATE *state)
+{
+ *state = NULL;
+}
+
+/************************** Port I/O *******************************/
+
+
+/* I. Common stuff */
+
+#define TMP_BUF_MAX (tmp_buf_size - 1024)
+static byte *tmp_buf;
+static Uint tmp_buf_size;
+
+/* II. The spawn/fd/vanilla drivers */
+
+/* This data is shared by these drivers - initialized by spawn_init() */
+static struct driver_data {
+ int port_num, ofd, packet_bytes, report_exit;
+ int exitcode, exit_reported; /* For returning of exit codes. */
+} *driver_data; /* indexed by fd */
+
+/*
+ * Locking only for exitcodes and exit_reported, one global sem for all
+ * spawn ports as this is rare.
+ */
+static SEM_ID driver_data_sem = NULL;
+/*
+ * Also locking when looking up entries in the load table
+ */
+static SEM_ID entry_data_sem = NULL;
+
+/* We maintain a linked fifo queue of these structs in order */
+/* to manage unfinnished reads/and writes on differenet fd's */
+
+typedef struct pend {
+ char *cpos;
+ int fd;
+ int remain;
+ struct pend *next;
+ char buf[1]; /* this is a trick to be able to malloc one chunk */
+} Pend;
+
+static struct fd_data {
+ int inport, outport;
+ char *buf, *cpos;
+ int sz, remain; /* for input on fd */
+ Pend* pending; /* pending outputs */
+
+} *fd_data; /* indexed by fd */
+
+
+/* Driver interfaces */
+static ErlDrvData spawn_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts);
+static ErlDrvData fd_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts);
+static ErlDrvData vanilla_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts);
+static int spawn_init(void);
+static void fd_stop(ErlDrvData);
+static void stop(ErlDrvData);
+static void ready_input(ErlDrvData fd, ErlDrvEvent ready_fd);
+static void ready_output(ErlDrvData fd, ErlDrvEvent ready_fd);
+static void output(ErlDrvData fd, char *buf, int len);
+static void stop_select(ErlDrvEvent, void*);
+
+struct erl_drv_entry spawn_driver_entry = {
+ spawn_init,
+ spawn_start,
+ stop,
+ output,
+ ready_input,
+ ready_output,
+ "spawn",
+ NULL, /* finish */
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL, /* handle2 */
+ NULL, /* process_exit */
+ stop_select
+
+};
+struct erl_drv_entry fd_driver_entry = {
+ NULL,
+ fd_start,
+ fd_stop,
+ output,
+ ready_input,
+ ready_output,
+ "fd",
+ NULL, /* finish */
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL, /* handle2 */
+ NULL, /* process_exit */
+ stop_select
+};
+struct erl_drv_entry vanilla_driver_entry = {
+ NULL,
+ vanilla_start,
+ stop,
+ output,
+ ready_input,
+ ready_output,
+ "vanilla",
+ NULL, /* finish */
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL, /* handle2 */
+ NULL, /* process_exit */
+ stop_select
+};
+
+/*
+** Set up enough of the driver_data structure to be able to report exit status.
+** Some things may be initiated again, but that is no real problem.
+*/
+static int pre_set_driver_data(int ifd, int ofd,
+ int read_write, int report_exit) {
+ if (read_write & DO_READ) {
+ driver_data[ifd].report_exit = report_exit;
+ driver_data[ifd].exitcode = 0;
+ driver_data[ifd].exit_reported = 0;
+ if (read_write & DO_WRITE) {
+ driver_data[ifd].ofd = ofd;
+ if (ifd != ofd) {
+ driver_data[ofd] = driver_data[ifd];
+ driver_data[ofd].report_exit = 0;
+ }
+ } else { /* DO_READ only */
+ driver_data[ifd].ofd = -1;
+ }
+ return(ifd);
+ } else { /* DO_WRITE only */
+ driver_data[ofd].report_exit = 0;
+ driver_data[ofd].exitcode = 0;
+ driver_data[ofd].exit_reported = 0;
+ driver_data[ofd].ofd = ofd;
+ return(ofd);
+ }
+}
+
+/*
+** Set up the driver_data structure, it may have been initiated
+** partly by the function above, but we dont care.
+*/
+static int set_driver_data(int port_num, int ifd, int ofd,
+ int packet_bytes, int read_write,
+ int report_exit)
+{
+ if (read_write & DO_READ) {
+ driver_data[ifd].packet_bytes = packet_bytes;
+ driver_data[ifd].port_num = port_num;
+ driver_data[ifd].report_exit = report_exit;
+ if (read_write & DO_WRITE) {
+ driver_data[ifd].ofd = ofd;
+ if (ifd != ofd) {
+ driver_data[ofd] = driver_data[ifd];
+ driver_data[ofd].report_exit = 0;
+ }
+ } else { /* DO_READ only */
+ driver_data[ifd].ofd = -1;
+ }
+ (void) driver_select(port_num, ifd, ERL_DRV_READ|ERL_DRV_USE, 1);
+ return(ifd);
+ } else { /* DO_WRITE only */
+ driver_data[ofd].packet_bytes = packet_bytes;
+ driver_data[ofd].port_num = port_num;
+ driver_data[ofd].report_exit = 0;
+ driver_data[ofd].ofd = ofd;
+ return(ofd);
+ }
+}
+
+static int need_new_sems = 1;
+
+static int spawn_init(void)
+{
+ char *stackenv;
+ int size;
+ driver_data = (struct driver_data *)
+ erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
+ if (need_new_sems) {
+ driver_data_sem = semMCreate
+ (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE);
+ entry_data_sem = semMCreate
+ (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE);
+ }
+ if (driver_data_sem == NULL || entry_data_sem == NULL) {
+ erl_exit(1,"Could not allocate driver locking semaphore.");
+ }
+ need_new_sems = 0;
+
+ (void)uxPipeDrv(); /* Install pipe driver */
+
+ if ((stackenv = getenv("ERLPORTSTACKSIZE")) != NULL &&
+ (size = atoi(stackenv)) > 0)
+ port_stack_size = size;
+ else
+ port_stack_size = DEFAULT_PORT_STACK_SIZE;
+ return 0;
+}
+
+/* Argv has to be built vith the save_xxx routines, not with whathever
+ sys_xxx2 has in mind... */
+#define argv_alloc save_malloc
+#define argv_realloc save_realloc
+#define argv_free save_free
+/* Build argv, return argc or -1 on failure */
+static int build_argv(char *name, char ***argvp)
+{
+ int argvsize = 10, argc = 0;
+ char *args, *arglast = NULL, *argp;
+ char **argv;
+
+#ifdef DEBUG
+ fdprintf(2, "Building argv, %s =>\n", name);
+#endif
+ if ((argv = (char **)argv_alloc(argvsize * sizeof(char *))) == NULL)
+ return(-1);
+ if ((args = argv_alloc(strlen(name) + 1)) == NULL)
+ return(-1);
+ strcpy(args, name);
+ argp = strtok_r(args, " \t", &arglast);
+ while (argp != NULL) {
+ if (argc + 1 >= argvsize) {
+ argvsize += 10;
+ argv = (char **)argv_realloc((char *)argv, argvsize*sizeof(char *));
+ if (argv == NULL) {
+ argv_free(args);
+ return(-1);
+ }
+ }
+#ifdef DEBUG
+ fdprintf(2, "%s\n", argp);
+#endif
+ argv[argc++] = argp;
+ argp = strtok_r((char *)NULL, " \t", &arglast);
+ }
+ argv[argc] = NULL;
+ *argvp = argv;
+ return(argc);
+}
+#undef argv_alloc
+#undef argv_realloc
+#undef argv_free
+
+
+/* Lookup and return global text symbol or NULL on failure
+ Symbol name is null-terminated and without the leading '_' */
+static FUNCPTR
+lookup(char *sym)
+{
+ char buf[256];
+ char *symname = buf;
+ int len;
+ FUNCPTR entry;
+ SYM_TYPE type;
+
+ len = strlen(sym);
+ if (len > 254 && (symname = malloc(len+2)) == NULL)
+ return(NULL);
+#if defined _ARCH_PPC || defined SIMSPARCSOLARIS
+ /* GCC for PPC and SIMSPARC doesn't add a leading _ to symbols */
+ strcpy(symname, sym);
+#else
+ sprintf(symname, "_%s", sym);
+#endif
+ if (symFindByNameAndType(sysSymTbl, symname, (char **)&entry,
+ &type, N_EXT | N_TEXT, N_EXT | N_TEXT) != OK)
+ entry = NULL;
+ if (symname != buf)
+ free(symname);
+ return(entry);
+}
+
+/* This function is spawned to build argc, argv, lookup the symbol to call,
+ connect and set up file descriptors, and make the actual call.
+ N.B. 'name' was allocated by the Erlang task (through plain_malloc) and
+ is freed by this port program task.
+ Note: 'name' may be a path containing '/'. */
+
+static void call_proc(char *name, int ifd, int ofd, int read_write,
+ int redir_stderr, int driver_index,
+ int p6, int p7, int p8, int p9)
+{
+ int argc;
+ char **argv, *bname;
+ FUNCPTR entry;
+ int ret = -1;
+
+ /* Must consume 'name' */
+ argc = build_argv(name, &argv);
+ plain_free(name);
+ /* Find basename of path */
+ if ((bname = strrchr(argv[0], '/')) != NULL) {
+ bname++;
+ } else {
+ bname = argv[0];
+ }
+#ifdef DEBUG
+ fdprintf(2, "Port program name: %s\n", bname);
+#endif
+ semTake(entry_data_sem, WAIT_FOREVER);
+
+ if (argc > 0) {
+ if ((entry = lookup(bname)) == NULL) {
+ int fd;
+ char *fn;
+ /* NOTE: We don't check the return value of loadModule,
+ since that was incompatibly changed from 5.0.2b to 5.1,
+ but rather do a repeated lookup(). */
+ if ((fd = open(argv[0], O_RDONLY)) > 0) {
+ (void) loadModule(fd, GLOBAL_SYMBOLS);
+ close(fd);
+ entry = lookup(bname);
+ }
+ if (entry == NULL) {
+ /* filename == func failed, try func.o */
+ if ((fn = malloc(strlen(argv[0]) + 3)) != NULL) { /* ".o\0" */
+ strcpy(fn, argv[0]);
+ strcat(fn, ".o");
+ if ((fd = open(fn, O_RDONLY)) > 0) {
+ (void) loadModule(fd, GLOBAL_SYMBOLS);
+ close(fd);
+ entry = lookup(bname);
+ }
+ free(fn);
+ }
+ }
+ }
+ } else {
+ entry = NULL;
+ }
+ semGive(entry_data_sem);
+
+ if (read_write & DO_READ) { /* emulator read */
+ save_fd(ofd);
+ ioTaskStdSet(0, 1, ofd); /* stdout for process */
+ if(redir_stderr)
+ ioTaskStdSet(0, 2, ofd);/* stderr for process */
+ }
+ if (read_write & DO_WRITE) { /* emulator write */
+ save_fd(ifd);
+ ioTaskStdSet(0, 0, ifd); /* stdin for process */
+ }
+ if (entry != NULL) {
+ ret = (*entry)(argc, argv, (char **)NULL); /* NULL for envp */
+ } else {
+ fdprintf(2, "Could not exec \"%s\"\n", argv[0]);
+ ret = -1;
+ }
+ if (driver_data[driver_index].report_exit) {
+ semTake(driver_data_sem, WAIT_FOREVER);
+ driver_data[driver_index].exitcode = ret;
+ driver_data[driver_index].exit_reported = 1;
+ semGive(driver_data_sem);
+ }
+ /* We *don't* want to close the pipes here, but let the delete
+ hook take care of it - it might want to flush stdout and there'd
+ better be an open descriptor to flush to... */
+ exit(ret);
+}
+
+static void close_pipes(int ifd[2], int ofd[2], int read_write)
+{
+ if (read_write & DO_READ) {
+ (void) close(ifd[0]);
+ (void) close(ifd[1]);
+ }
+ if (read_write & DO_WRITE) {
+ (void) close(ofd[0]);
+ (void) close(ofd[1]);
+ }
+}
+
+static void init_fd_data(int fd, int port_unused_argument)
+{
+ SET_NONBLOCKING(fd);
+ fd_data[fd].pending = NULL;
+ fd_data[fd].buf = fd_data[fd].cpos = NULL;
+ fd_data[fd].remain = fd_data[fd].sz = 0;
+}
+
+static ErlDrvData spawn_start(ErlDrvPort port_num, char *name,SysDriverOpts* opts)
+{
+ int ifd[2], ofd[2], len, nl, id;
+ char taskname[11], *progname, *bname;
+ char *space_in_command;
+ int packet_bytes = opts->packet_bytes;
+ int read_write = opts->read_write;
+ int use_stdio = opts->use_stdio;
+ int redir_stderr = opts->redir_stderr;
+ int driver_index;
+
+ if (!use_stdio){
+ return (ErlDrvData) -3;
+ }
+
+ /* Create pipes and set the Erlang task as owner of its
+ * read and write ends (through save_fd()).
+ */
+ switch (read_write) {
+ case DO_READ:
+ if (pipe(ifd) < 0){
+ return (ErlDrvData) -2;
+ }
+ if (ifd[0] >= max_files) {
+ close_pipes(ifd, ofd, read_write);
+ errno = ENFILE;
+ return (ErlDrvData) -2;
+ }
+ save_fd(ifd[0]);
+ break;
+ case DO_WRITE:
+ if (pipe(ofd) < 0) {
+ return (ErlDrvData) -2;
+ }
+ if (ofd[1] >= max_files) {
+ close_pipes(ifd, ofd, read_write);
+ errno = ENFILE;
+ return (ErlDrvData) -2;
+ }
+ save_fd(ofd[1]);
+ break;
+ case DO_READ|DO_WRITE:
+ if (pipe(ifd) < 0){
+ return (ErlDrvData) -2;
+ }
+ if (ifd[0] >= max_files || pipe(ofd) < 0) {
+ close_pipes(ifd, ofd, DO_READ);
+ errno = ENFILE;
+ return (ErlDrvData) -2;
+ }
+ if (ofd[1] >= max_files) {
+ close_pipes(ifd, ofd, read_write);
+ errno = ENFILE;
+ return (ErlDrvData) -2;
+ }
+ save_fd(ifd[0]);
+ save_fd(ofd[1]);
+ break;
+ default:
+ return (ErlDrvData) -1;
+ }
+
+ /* Allocate space for program name to be freed by the
+ * spawned task. We use plain_malloc so that the allocated
+ * space is not owned by the Erlang task.
+ */
+
+ if ((progname = plain_malloc(strlen(name) + 1)) == NULL) {
+ close_pipes(ifd, ofd, read_write);
+ errno = ENOMEM;
+ return (ErlDrvData) -2;
+ }
+ strcpy(progname, name);
+
+ /* Check if name contains a space
+ * (e.g "port_test -o/home/gandalf/tornado/wind/target/erlang")
+ */
+ if ((space_in_command = strrchr(progname, ' ')) != NULL) {
+ *space_in_command = '\0';
+ }
+
+ /* resulting in "port_test" */
+ if ((bname = strrchr(progname, '/')) != NULL)
+ bname++;
+ else
+ bname = progname;
+
+ /* resulting in "port_test" */
+ len = strlen(bname);
+ nl = len > 10 ? 10 : len;
+ strncpy(taskname, bname, nl);
+ taskname[nl] = '\0';
+ if (space_in_command != NULL)
+ *space_in_command = ' ';
+ driver_index = pre_set_driver_data(ifd[0], ofd[1],
+ read_write, opts->exit_status);
+
+ /* resetting to "port_test -o/home/gandalf/tornado/wind/target/erlang" */
+ if ((id = taskSpawn(taskname, spTaskPriority, spTaskOptions,
+ port_stack_size, (FUNCPTR)call_proc, (int)progname,
+ ofd[0], ifd[1], read_write, redir_stderr, driver_index,
+ 0,0,0,0))
+ == ERROR) {
+ close_pipes(ifd, ofd, read_write);
+ plain_free(progname); /* only when spawn fails */
+ errno = ENOMEM;
+ return (ErlDrvData) -2;
+ }
+#ifdef DEBUG
+ fdprintf(2, "Spawned %s as %s[0x%x]\n", name, taskname, id);
+#endif
+ if (read_write & DO_READ)
+ init_fd_data(ifd[0], port_num);
+ if (read_write & DO_WRITE)
+ init_fd_data(ofd[1], port_num);
+ return (ErlDrvData) (set_driver_data(port_num, ifd[0], ofd[1],
+ packet_bytes,read_write,
+ opts->exit_status));
+}
+
+static ErlDrvData fd_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts)
+{
+ if (((opts->read_write & DO_READ) && opts->ifd >= max_files) ||
+ ((opts->read_write & DO_WRITE) && opts->ofd >= max_files)) {
+ return (ErlDrvData) -1;
+ }
+
+ if (opts->read_write & DO_READ)
+ init_fd_data(opts->ifd, port_num);
+ if (opts->read_write & DO_WRITE)
+ init_fd_data(opts->ofd, port_num);
+ return (ErlDrvData) (set_driver_data(port_num, opts->ifd, opts->ofd,
+ opts->packet_bytes, opts->read_write, 0));
+}
+
+static void clear_fd_data(int fd)
+{
+
+ if (fd_data[fd].sz > 0)
+ erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fd_data[fd].buf);
+ fd_data[fd].buf = NULL;
+ fd_data[fd].sz = 0;
+ fd_data[fd].remain = 0;
+ fd_data[fd].cpos = NULL;
+}
+
+static void nbio_stop_fd(int port_num, int fd)
+{
+ Pend *p, *p1;
+
+ driver_select(port_num, fd, ERL_DRV_READ|ERL_DRV_WRITE, 0);
+ clear_fd_data(fd);
+ p = fd_data[fd].pending;
+ SET_BLOCKING(fd);
+ while (p) {
+ p1 = p->next;
+ free(p);
+ p = p1;
+ }
+ fd_data[fd].pending = NULL;
+}
+
+static void fd_stop(ErlDrvData drv_data)
+{
+ int ofd;
+ int fd = (int) drv_data;
+
+ nbio_stop_fd(driver_data[fd].port_num, (int)fd);
+ ofd = driver_data[fd].ofd;
+ if (ofd != fd && ofd != -1)
+ nbio_stop_fd(driver_data[fd].port_num, (int)ofd); /* XXX fd = ofd? */
+}
+
+static ErlDrvData
+vanilla_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts)
+{
+ int flags, fd;
+ struct stat statbuf;
+
+ DEBUGF(("vanilla_start, name: %s [r=%1i w=%1i]\n", name,
+ opts->read_write & DO_READ,
+ opts->read_write & DO_WRITE));
+
+ flags = (opts->read_write == DO_READ ? O_RDONLY :
+ opts->read_write == DO_WRITE ? O_WRONLY|O_CREAT|O_TRUNC :
+ O_RDWR|O_CREAT);
+ if ((fd = open(name, flags, 0666)) < 0){
+ errno = ENFILE;
+ return (ErlDrvData) -2;
+ }
+ if (fd >= max_files) {
+ close(fd);
+ errno = ENFILE;
+ return (ErlDrvData) -2;
+ }
+ if (fstat(fd, &statbuf) < 0) {
+ close(fd);
+ errno = ENFILE;
+ return (ErlDrvData) -2;
+ }
+
+ /* Return error for reading regular files (doesn't work) */
+ if (ISREG(statbuf) && ((opts->read_write) & DO_READ)) {
+ close(fd);
+ return (ErlDrvData) -3;
+ }
+ init_fd_data(fd, port_num);
+ return (ErlDrvData) (set_driver_data(port_num, fd, fd,
+ opts->packet_bytes, opts->read_write, 0));
+}
+
+/* Note that driver_data[fd].ifd == fd if the port was opened for reading, */
+/* otherwise (i.e. write only) driver_data[fd].ofd = fd. */
+
+static void stop(ErlDrvData drv_data)
+{
+ int port_num, ofd;
+ int fd = (int) drv_data;
+
+ port_num = driver_data[fd].port_num;
+ nbio_stop_fd(port_num, fd);
+ driver_select(port_num, fd, ERL_DRV_USE, 0); /* close(fd) */
+
+ ofd = driver_data[fd].ofd;
+ if (ofd != fd && ofd != -1) {
+ nbio_stop_fd(port_num, ofd);
+ driver_select(port_num, ofd, ERL_DRV_USE, 0); /* close(fd) */
+ }
+}
+
+static int sched_write(int port_num,int fd, char *buf, int len, int pb)
+{
+ Pend *p, *p2, *p3;
+ int p_bytes = len;
+
+ p = (Pend*) erts_alloc_fnf(ERTS_ALC_T_PEND_DATA, pb + len + sizeof(Pend));
+ if (!p) {
+ driver_failure(port_num, -1);
+ return(-1);
+ }
+
+ switch(pb) {
+ case 4: put_int32(len, p->buf); break;
+ case 2: put_int16(len, p->buf); break;
+ case 1: put_int8(len, p->buf); break;
+ case 0: break; /* Handles this case too */
+ }
+ sys_memcpy(p->buf + pb, buf, len);
+ driver_select(port_num, fd, ERL_DRV_WRITE|ERL_DRV_USE, 1);
+ p->cpos = p->buf;
+ p->fd = fd;
+ p->next = NULL;
+ p->remain = len + pb;
+ p2 = fd_data[fd].pending;
+ if (p2 == NULL)
+ fd_data[fd].pending = p;
+ else {
+ p3 = p2->next;
+ while(p3) {
+ p_bytes += p2->remain;
+ p2 = p2->next;
+ p3 = p3->next;
+ }
+ p2->next = p;
+ }
+ if (p_bytes > (1 << 13)) /* More than 8 k pending */
+ set_busy_port(port_num, 1);
+ return(0);
+}
+
+/* Fd is the value returned as drv_data by the start func */
+static void output(ErlDrvData drv_data, char *buf, int len)
+{
+ int buf_done, port_num, wval, pb, ofd;
+ byte lb[4];
+ struct iovec iv[2];
+ int fd = (int) drv_data;
+
+ pb = driver_data[fd].packet_bytes;
+ port_num = driver_data[fd].port_num;
+
+ if ((ofd = driver_data[fd].ofd) == -1) {
+ return;
+ }
+
+ if (fd_data[ofd].pending) {
+ sched_write(port_num, ofd, buf, len, pb);
+ return;
+ }
+
+ if ((pb == 2 && len > 65535) || (pb == 1 && len > 255)) {
+ driver_failure_posix(port_num, EINVAL);
+ return;
+ }
+ if (pb == 0) {
+ wval = write(ofd, buf, len);
+ } else {
+ lb[0] = (len >> 24) & 255; /* MSB */
+ lb[1] = (len >> 16) & 255;
+ lb[2] = (len >> 8) & 255;
+ lb[3] = len & 255; /* LSB */
+ iv[0].iov_base = (char*) lb + (4 - pb);
+ iv[0].iov_len = pb;
+ iv[1].iov_base = buf;
+ iv[1].iov_len = len;
+ wval = writev(ofd, iv, 2);
+ }
+ if (wval == pb + len ) {
+ return;
+ }
+ if (wval < 0) {
+ if ((errno == EINTR) || (errno == ERRNO_BLOCK)) {
+ if (pb) {
+ sched_write(port_num, ofd, buf ,len, pb);
+ } else if (pb == 0) {
+ sched_write(port_num, ofd, buf ,len, 0);
+ }
+ return;
+ }
+ driver_failure_posix(driver_data[fd].port_num, EINVAL);
+ return;
+ }
+ if (wval < pb) {
+ sched_write(port_num, ofd, (lb +4 -pb) + wval, pb-wval, 0);
+ sched_write(port_num, ofd, buf ,len, 0);
+ return;
+ }
+
+ /* we now know that wval < (pb + len) */
+ buf_done = wval - pb;
+ sched_write(port_num, ofd, buf + buf_done, len - buf_done,0);
+}
+
+static void stop_select(ErlDrvEvent fd, void* _)
+{
+ close((int)fd);
+}
+
+static int ensure_header(int fd,char *buf,int packet_size, int sofar)
+{
+ int res = 0;
+ int remaining = packet_size - sofar;
+
+ SET_BLOCKING(fd);
+ if (read_fill(fd, buf+sofar, remaining) != remaining)
+ return -1;
+ switch (packet_size) {
+ case 1: res = get_int8(buf); break;
+ case 2: res = get_int16(buf); break;
+ case 4: res = get_int32(buf); break;
+ }
+ SET_NONBLOCKING(fd);
+ return(res);
+}
+
+static int port_inp_failure(int port_num, int ready_fd, int res)
+{
+ (void) driver_select(port_num, ready_fd, ERL_DRV_READ|ERL_DRV_WRITE, 0);
+ clear_fd_data(ready_fd);
+ if (res == 0) {
+ if (driver_data[ready_fd].report_exit) {
+ int tmpexit = 0;
+ int reported;
+ /* Lock the driver_data structure */
+ semTake(driver_data_sem, WAIT_FOREVER);
+ if ((reported = driver_data[ready_fd].exit_reported))
+ tmpexit = driver_data[ready_fd].exitcode;
+ semGive(driver_data_sem);
+ if (reported) {
+ erts_fprintf(stderr,"Exitcode %d reported\r\n", tmpexit);
+ driver_report_exit(port_num, tmpexit);
+ }
+ }
+ driver_failure_eof(port_num);
+ } else {
+ driver_failure(port_num, res);
+ }
+ return 0;
+}
+
+/* fd is the drv_data that is returned from the */
+/* initial start routine */
+/* ready_fd is the descriptor that is ready to read */
+
+static void ready_input(ErlDrvData drv_data, ErlDrvEvent drv_event)
+{
+ int port_num, packet_bytes, res;
+ Uint h = 0;
+ char *buf;
+ int fd = (int) drv_data;
+ int ready_fd = (int) drv_event;
+
+ port_num = driver_data[fd].port_num;
+ packet_bytes = driver_data[fd].packet_bytes;
+
+ if (packet_bytes == 0) {
+ if ((res = read(ready_fd, tmp_buf, tmp_buf_size)) > 0) {
+ driver_output(port_num, (char*)tmp_buf, res);
+ return;
+ }
+ port_inp_failure(port_num, ready_fd, res);
+ return;
+ }
+
+ if (fd_data[ready_fd].remain > 0) { /* We try to read the remainder */
+ /* space is allocated in buf */
+ res = read(ready_fd, fd_data[ready_fd].cpos,
+ fd_data[ready_fd].remain);
+ if (res < 0) {
+ if ((errno == EINTR) || (errno == ERRNO_BLOCK)) {
+ ;
+ } else {
+ port_inp_failure(port_num, ready_fd, res);
+ }
+ } else if (res == 0) {
+ port_inp_failure(port_num, ready_fd, res);
+ } else if (res == fd_data[ready_fd].remain) { /* we're done */
+ driver_output(port_num, fd_data[ready_fd].buf,
+ fd_data[ready_fd].sz);
+ clear_fd_data(ready_fd);
+ } else { /* if (res < fd_data[ready_fd].remain) */
+ fd_data[ready_fd].cpos += res;
+ fd_data[ready_fd].remain -= res;
+ }
+ return;
+ }
+
+
+ if (fd_data[ready_fd].remain == 0) { /* clean fd */
+ /* We make one read attempt and see what happens */
+ res = read(ready_fd, tmp_buf, tmp_buf_size);
+ if (res < 0) {
+ if ((errno == EINTR) || (errno == ERRNO_BLOCK))
+ return;
+ port_inp_failure(port_num, ready_fd, res);
+ return;
+ }
+ else if (res == 0) { /* eof */
+ port_inp_failure(port_num, ready_fd, res);
+ return;
+ }
+ else if (res < packet_bytes) { /* Ugly case... get at least */
+ if ((h = ensure_header(ready_fd, tmp_buf, packet_bytes, res))==-1) {
+ port_inp_failure(port_num, ready_fd, -1);
+ return;
+ }
+ buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
+ if (!buf) {
+ port_inp_failure(port_num, ready_fd, -1);
+ return;
+ }
+ fd_data[ready_fd].buf = buf;
+ fd_data[ready_fd].sz = h;
+ fd_data[ready_fd].remain = h;
+ fd_data[ready_fd].cpos = buf;
+ return;
+ }
+ else { /* if (res >= packet_bytes) */
+ unsigned char* cpos = tmp_buf;
+ int bytes_left = res;
+ while (1) { /* driver_output as many as possible */
+ if (bytes_left == 0) {
+ clear_fd_data(ready_fd);
+ return;
+ }
+ if (bytes_left < packet_bytes) { /* Yet an ugly case */
+ if((h=ensure_header(ready_fd, cpos,
+ packet_bytes, bytes_left))==-1) {
+ port_inp_failure(port_num, ready_fd, -1);
+ return;
+ }
+ buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
+ if (!buf)
+ port_inp_failure(port_num, ready_fd, -1);
+ fd_data[ready_fd].buf = buf;
+ fd_data[ready_fd].sz = h;
+ fd_data[ready_fd].remain = h;
+ fd_data[ready_fd].cpos = buf;
+ return;
+ }
+ switch (packet_bytes) {
+ case 1: h = get_int8(cpos); cpos += 1; break;
+ case 2: h = get_int16(cpos); cpos += 2; break;
+ case 4: h = get_int32(cpos); cpos += 4; break;
+ }
+ bytes_left -= packet_bytes;
+ /* we've got the header, now check if we've got the data */
+ if (h <= (bytes_left)) {
+ driver_output(port_num, (char*) cpos, h);
+ cpos += h;
+ bytes_left -= h;
+ continue;
+ }
+ else { /* The last message we got was split */
+ buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
+ if (!buf) {
+ port_inp_failure(port_num, ready_fd, -1);
+ }
+ sys_memcpy(buf, cpos, bytes_left);
+ fd_data[ready_fd].buf = buf;
+ fd_data[ready_fd].sz = h;
+ fd_data[ready_fd].remain = h - bytes_left;
+ fd_data[ready_fd].cpos = buf + bytes_left;
+ return;
+ }
+ }
+ return;
+ }
+ }
+ fprintf(stderr, "remain %d \n", fd_data[ready_fd].remain);
+ port_inp_failure(port_num, ready_fd, -1);
+}
+
+
+/* fd is the drv_data that is returned from the */
+/* initial start routine */
+/* ready_fd is the descriptor that is ready to read */
+
+static void ready_output(ErlDrvData drv_data, ErlDrvEvent drv_event)
+{
+ Pend *p;
+ int wval;
+
+ int fd = (int) drv_data;
+ int ready_fd = (int) drv_event;
+
+ while(1) {
+ if ((p = fd_data[ready_fd].pending) == NULL) {
+ driver_select(driver_data[fd].port_num, ready_fd,
+ ERL_DRV_WRITE, 0);
+ return;
+ }
+ wval = write(p->fd, p->cpos, p->remain);
+ if (wval == p->remain) {
+ fd_data[ready_fd].pending = p->next;
+ erts_free(ERTS_ALC_T_PEND_DATA, p);
+ if (fd_data[ready_fd].pending == NULL) {
+ driver_select(driver_data[fd].port_num, ready_fd,
+ ERL_DRV_WRITE, 0);
+ set_busy_port(driver_data[fd].port_num, 0);
+ return;
+ }
+ else
+ continue;
+ }
+ else if (wval < 0) {
+ if (errno == ERRNO_BLOCK || errno == EINTR)
+ return;
+ else {
+ driver_select(driver_data[fd].port_num, ready_fd,
+ ERL_DRV_WRITE, 0);
+ driver_failure(driver_data[fd].port_num, -1);
+ return;
+ }
+ }
+ else if (wval < p->remain) {
+ p->cpos += wval;
+ p->remain -= wval;
+ return;
+ }
+ }
+}
+
+/* Fills in the systems representation of the jam/beam process identifier.
+** The Pid is put in STRING representation in the supplied buffer,
+** no interpretatione of this should be done by the rest of the
+** emulator. The buffer should be at least 21 bytes long.
+*/
+void sys_get_pid(char *buffer){
+ int p = taskIdSelf(); /* Hmm, may be negative??? requires some GB of
+ memory to make the TCB address convert to a
+ negative value. */
+ sprintf(buffer,"%d", p);
+}
+
+int
+erts_sys_putenv(char *buffer, int sep_ix)
+{
+ return putenv(buffer);
+}
+
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ char *orig_value;
+ int res;
+ orig_value = getenv(key);
+ if (!orig_value)
+ res = -1;
+ else {
+ size_t len = sys_strlen(orig_value);
+ if (len >= *size) {
+ *size = len + 1;
+ res = 1;
+ }
+ else {
+ *size = len;
+ sys_memcpy((void *) value, (void *) orig_value, len+1);
+ res = 0;
+ }
+ }
+ return res;
+}
+
+void
+sys_init_io(void)
+{
+ tmp_buf = (byte *) erts_alloc(ERTS_ALC_T_SYS_TMP_BUF, SYS_TMP_BUF_SIZE);
+ tmp_buf_size = SYS_TMP_BUF_SIZE;
+ fd_data = (struct fd_data *)
+ erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data));
+}
+
+
+/* Fill buffer, return buffer length, 0 for EOF, < 0 for error. */
+
+static int read_fill(int fd, char *buf, int len)
+{
+ int i, got = 0;
+ do {
+ if ((i = read(fd, buf+got, len-got)) <= 0) {
+ return i;
+ }
+ got += i;
+ } while (got < len);
+ return (len);
+}
+
+
+/************************** Misc... *******************************/
+
+extern const char pre_loaded_code[];
+extern char* const pre_loaded[];
+
+
+/* Float conversion */
+
+int sys_chars_to_double(char *buf, double *fp)
+{
+ char *s = buf;
+
+ /* The following check is incorporated from the Vee machine */
+
+#define ISDIGIT(d) ((d) >= '0' && (d) <= '9')
+
+ /* Robert says that something like this is what he really wanted:
+ *
+ * 7 == sscanf(Tbuf, "%[+-]%[0-9].%[0-9]%[eE]%[+-]%[0-9]%s", ....);
+ * if (*s2 == 0 || *s3 == 0 || *s4 == 0 || *s6 == 0 || *s7)
+ * break;
+ */
+
+ /* Scan string to check syntax. */
+ if (*s == '+' || *s == '-')
+ s++;
+
+ if (!ISDIGIT(*s)) /* Leading digits. */
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ if (*s++ != '.') /* Decimal part. */
+ return -1;
+ if (!ISDIGIT(*s))
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ if (*s == 'e' || *s == 'E') {
+ /* There is an exponent. */
+ s++;
+ if (*s == '+' || *s == '-')
+ s++;
+ if (!ISDIGIT(*s))
+ return -1;
+ while (ISDIGIT(*s)) s++;
+ }
+ if (*s) /* That should be it */
+ return -1;
+
+ if (sscanf(buf, "%lf", fp) != 1)
+ return -1;
+ return 0;
+}
+
+/*
+ ** Convert a double to ascii format 0.dddde[+|-]ddd
+ ** return number of characters converted
+ */
+
+int sys_double_to_chars(double fp, char *buf)
+{
+ (void) sprintf(buf, "%.20e", fp);
+ return strlen(buf);
+}
+
+
+/* Floating point exceptions */
+
+#if (CPU == SPARC)
+jmp_buf fpe_jmp;
+
+RETSIGTYPE fpe_sig_handler(int sig)
+{
+ longjmp(fpe_jmp, 1);
+}
+
+#elif (CPU == PPC603)
+static void fix_registers(void){
+ FP_CONTEXT fpcontext;
+ fppSave(&fpcontext);
+ fpcontext.fpcsr &= ~(_PPC_FPSCR_INIT);
+ fppRestore(&fpcontext);
+}
+#endif
+
+
+/* Return a pointer to a vector of names of preloaded modules */
+
+Preload* sys_preloaded(void)
+{
+ return (Preload *) pre_loaded;
+}
+
+/* Return a pointer to preloaded code for module "module" */
+unsigned char* sys_preload_begin(Preload *pp)
+{
+ return pp->code;
+}
+
+/* Clean up if allocated */
+void sys_preload_end(Preload *pp)
+{
+ /* Nothing */
+}
+
+/* Read a key from console (?) */
+
+int sys_get_key(int fd)
+{
+ int c;
+ unsigned char rbuf[64];
+
+ fflush(stdout); /* Flush query ??? */
+
+ if ((c = read(fd,rbuf,64)) <= 0)
+ return c;
+ return rbuf[0];
+}
+
+
+/* A real printf that does the equivalent of fprintf(stdout, ...) */
+
+/* ARGSUSED */
+static STATUS
+stdio_write(char *buf, int nchars, int fp)
+{
+ if (fwrite(buf, sizeof(char), nchars, (FILE *)fp) == 0)
+ return(ERROR);
+ return(OK);
+}
+
+int real_printf(const char *fmt, ...)
+{
+ va_list ap;
+ int err;
+
+ va_start(ap, fmt);
+ err = fioFormatV(fmt, ap, stdio_write, (int)stdout);
+ va_end(ap);
+ return(err);
+}
+
+
+/*
+ * Little function to do argc, argv calls from (e.g.) VxWorks shell
+ * The arguments should be in the form of a single ""-enclosed string
+ * NOTE: This isn't really part of the emulator, just included here
+ * so we can use the handy functions and memory reclamation.
+ */
+void argcall(char *args)
+{
+ int argc;
+ char **argv;
+ FUNCPTR entry;
+
+ if (args != NULL) {
+ if ((argc = build_argv(args, &argv)) > 0) {
+ if ((entry = lookup(argv[0])) != NULL)
+ (*entry)(argc, argv, (char **)NULL); /* NULL for envp */
+ else
+ fprintf(stderr, "Couldn't find %s\n", argv[0]);
+ } else
+ fprintf(stderr, "Failed to build argv!\n");
+ } else
+ fprintf(stderr, "No argument list!\n");
+}
+
+
+/* That concludes the Erlang stuff - now we just need to implement an OS...
+ - Just kidding, but resource reclamation isn't the strength of VxWorks */
+#undef calloc
+#undef free
+#undef cfree
+#undef malloc
+#undef realloc
+#undef open
+#undef creat
+#undef socket
+#undef accept
+#undef close
+#undef fopen
+#undef fdopen
+#undef freopen
+#undef fclose
+
+/********************* Using elib_malloc ****************************/
+/* This gives us yet another level of malloc wrappers. The purpouse */
+/* is to be able to select between different varieties of memory */
+/* allocation without recompiling. */
+/* Maybe the performance is somewhat degraded by this, but */
+/* on the other hand, performance may be much better if the most */
+/* suiting malloc is used (not to mention the much lower */
+/* fragmentation). */
+/* /Patrik N */
+/********************************************************************/
+
+/*
+ * I don't want to include the whole elib header, especially
+ * as it uses char * for generic pointers. Let's fool ANSI C instead.
+ */
+extern void *elib_malloc(size_t);
+extern void *elib_realloc(void *, size_t);
+extern void elib_free(void *);
+extern void elib_init(void *, int);
+extern void elib_force_init(void *, int);
+extern size_t elib_sizeof(void *);
+
+/* Flags */
+#define USING_ELIB_MALLOC 1 /* We are using the elib_malloc */
+#define WARN_MALLOC_MIX 2 /* Warn if plain malloc or save_malloc
+ is mixed with sys_free2 or
+ sys_realloc2 */
+#define REALLOC_MOVES 4 /* Always move on realloc
+ (less fragmentation) */
+#define USER_POOL 8 /* The user supplied the memory
+ pool, it was not save_alloced. */
+#define RECLAIM_USER_POOL 16 /* Use the reclaim mechanism in the
+ user pool. */
+#define NEW_USER_POOL 32 /* The user pool is newly suppllied,
+ any old pool should be discarded */
+
+
+#define ELIB_LOCK \
+if(alloc_flags & USING_ELIB_MALLOC) \
+ semTake(elib_malloc_sem, WAIT_FOREVER)
+
+#define ELIB_UNLOCK \
+if(alloc_flags & USING_ELIB_MALLOC) \
+ semGive(elib_malloc_sem)
+
+#define USER_RECLAIM() ((alloc_flags & USING_ELIB_MALLOC) && \
+ (alloc_flags & USER_POOL) && \
+ (alloc_flags & RECLAIM_USER_POOL))
+
+/*
+ * Global state
+ * The use of function pointers for the malloc/realloc/free functions
+ * is actually only useful in the malloc case, we must know what kind of
+ * realloc/free we are going to use, so we could call elib_xxx directly.
+ * However, as the overhead is small and this construction makes it
+ * fairly easy to add another malloc algorithm, the function pointers
+ * are used in realloc/free to.
+ */
+static MallocFunction actual_alloc = &save_malloc;
+static ReallocFunction actual_realloc = &save_realloc;
+static FreeFunction actual_free = &save_free;
+static int alloc_flags = 0;
+static int alloc_pool_size = 0;
+static void *alloc_pool_ptr = NULL;
+static SEM_ID elib_malloc_sem = NULL;
+
+/*
+ * Descide if we should use the save_free instead of elib_free or,
+ * in the case of the free used in a delete hook, if we should
+ * use plain free instead of elib_free.
+ */
+static int use_save_free(void *ptr){
+ register int diff = ((char *) ptr) - ((char *) alloc_pool_ptr);
+ /*
+ * Hmmm... should it be save_free even if diff is exactly 0?
+ * The answer is Yes if the whole area is save_alloced and No if not,
+ * so reclaim_free_hook is NOT run in the case of one save_alloced area.
+ */
+ return (!(alloc_flags & USING_ELIB_MALLOC) ||
+ (diff < 0 || diff >= alloc_pool_size));
+}
+
+/*
+ * A free function used by the task deletion hook for the save_xxx functions.
+ * Set with the set_reclaim_free_function function.
+ */
+static void reclaim_free_hook(void *ptr){
+ if(use_save_free(ptr)){
+ free(ptr);
+ } else {
+ ELIB_LOCK;
+ (*actual_free)(ptr);
+ ELIB_UNLOCK;
+ }
+}
+
+
+/*
+ * Initialize, sets the values of pointers based on
+ * either nothing (the default) or what's set previously by the
+ * erl_set_memory_block function.
+ */
+static void initialize_allocation(void){
+ set_reclaim_free_function(NULL);
+ if(alloc_pool_size == 0){
+ actual_alloc = (void *(*)(size_t))&save_malloc;
+ actual_realloc = (void *(*)(void *, size_t))&save_realloc;
+ actual_free = &save_free;
+ alloc_flags &= ~(USING_ELIB_MALLOC | USER_POOL | RECLAIM_USER_POOL);
+ } else {
+ if(elib_malloc_sem == NULL)
+ elib_malloc_sem = semMCreate
+ (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE);
+ if(elib_malloc_sem == NULL)
+ erl_exit(1,"Could not create mutex semaphore for elib_malloc");
+ if(!(alloc_flags & USER_POOL)){
+ if((alloc_pool_ptr = save_malloc(alloc_pool_size)) == NULL)
+ erl_exit(1,"Erlang set to allocate a %d byte block initially;"
+ " not enough memory available.", alloc_pool_size);
+ elib_force_init(alloc_pool_ptr, alloc_pool_size);
+ } else if(alloc_flags & NEW_USER_POOL){
+ elib_force_init(alloc_pool_ptr, alloc_pool_size);
+ }
+ actual_alloc=&elib_malloc;
+ actual_realloc=&elib_realloc;
+ actual_free=&elib_free;
+ alloc_flags |= USING_ELIB_MALLOC;
+ /* We MUST see to that the right free function is used
+ otherwise we'll get a very nasty crash! */
+ if(USER_RECLAIM())
+ set_reclaim_free_function(&reclaim_free_hook);
+ }
+ alloc_flags &= ~(NEW_USER_POOL); /* It's never new after initialization*/
+}
+
+/* This does not exist on other platforms, we just use it in sys.c
+ and the BSD resolver */
+void *sys_calloc2(Uint nelem, Uint elsize){
+ void *ptr = erts_alloc_fnf(ERTS_ALC_T_UNDEF, nelem*elsize);
+ if(ptr != NULL)
+ memset(ptr,0,nelem*elsize);
+ return ptr;
+}
+
+/*
+ * The malloc wrapper
+ */
+void *
+erts_sys_alloc(ErtsAlcType_t type, void *extra, Uint size)
+{
+ register void *ret;
+ ELIB_LOCK;
+ if(USER_RECLAIM())
+ ret = save_malloc2((size_t)size,actual_alloc);
+ else
+ ret = (*actual_alloc)((size_t)size);
+ ELIB_UNLOCK;
+ return ret;
+}
+
+/*
+ * The realloc wrapper, may respond to the "realloc-always-moves" flag
+ * if the area is initially allocated with elib_malloc.
+ */
+void *
+erts_sys_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
+{
+ register void *ret;
+ if(use_save_free(ptr)){
+ if((alloc_flags & WARN_MALLOC_MIX) &&
+ (alloc_flags & USING_ELIB_MALLOC))
+ erts_fprintf(stderr,"Warning, save_malloced data realloced "
+ "by sys_realloc2\n");
+ return save_realloc(ptr, (size_t) size);
+ } else {
+ ELIB_LOCK;
+ if((alloc_flags & REALLOC_MOVES) &&
+ (alloc_flags & USING_ELIB_MALLOC)){
+ size_t osz = elib_sizeof(ptr);
+ if(USER_RECLAIM())
+ ret = save_malloc2((size_t) size, actual_alloc);
+ else
+ ret = (*actual_alloc)((size_t) size);
+ if(ret != NULL){
+ memcpy(ret,ptr,(((size_t)size) < osz) ? ((size_t)size) : osz);
+ if(USER_RECLAIM())
+ save_free2(ptr,actual_free);
+ else
+ (*actual_free)(ptr);
+ }
+ } else {
+ if(USER_RECLAIM())
+ ret = save_realloc2(ptr,(size_t)size,actual_realloc);
+ else
+ ret = (*actual_realloc)(ptr,(size_t)size);
+ }
+ ELIB_UNLOCK;
+ return ret;
+ }
+}
+
+/*
+ * Wrapped free().
+ */
+void
+erts_sys_free(ErtsAlcType_t type, void *extra, void *ptr)
+{
+ if(use_save_free(ptr)){
+ /*
+ * This might happen when linked in drivers use save_malloc etc
+ * directly.
+ */
+ if((alloc_flags & WARN_MALLOC_MIX) &&
+ (alloc_flags & USING_ELIB_MALLOC))
+ erts_fprintf(stderr,"Warning, save_malloced data freed by "
+ "sys_free2\n");
+ save_free(ptr);
+ } else {
+ ELIB_LOCK;
+ if(USER_RECLAIM())
+ save_free2(ptr,actual_free);
+ else
+ (*actual_free)(ptr);
+ ELIB_UNLOCK;
+ }
+}
+
+/*
+ * External interface to be called before erlang is started
+ * Parameters:
+ * isize: The size of the memory block where erlang should malloc().
+ * iptr: (optional) A pointer to a user supplied memory block of
+ * size isize.
+ * warn_save: Instructs sys_free2 and sys_realloc2 to warn if
+ * memory allocation/reallocation/freeing is mixed between
+ * pure malloc/save_malloc/sys_alloc2 routines (only
+ * warns if elib is actually used in the sys_alloc2 routines).
+ * realloc_moves: Always allocate a fresh memory block on reallocation
+ * (less fragmentation).
+ * reclaim_in_supplied: Use memory reclaim mechanisms inside the user
+ * supplied area, this makes one area reusable between
+ * starts of erlang and might be nice for drivers etc.
+ */
+
+int erl_set_memory_block(int isize, int iptr, int warn_save,
+ int realloc_moves, int reclaim_in_supplied, int p5,
+ int p6, int p7, int p8, int p9){
+ if(erlang_id != 0){
+ erts_fprintf(stderr,"Error, cannot set erlang memory block while an "
+ "erlang task is running!\n");
+ return 1;
+ }
+ if(isize < 8 * 1024 *1024)
+ erts_fprintf(stderr,
+ "Warning, the memory pool of %dMb may be to small to "
+ "run erlang in!\n", isize / (1024 * 1024));
+ alloc_pool_size = (size_t) isize;
+ alloc_pool_ptr = (void *) iptr;
+ alloc_flags = 0;
+ /* USING_ELIB_MALLOC gets set by the initialization routine */
+ if((void *)iptr != NULL)
+ alloc_flags |= (USER_POOL | NEW_USER_POOL);
+ if(realloc_moves)
+ alloc_flags |= REALLOC_MOVES;
+ if(warn_save)
+ alloc_flags |= WARN_MALLOC_MIX;
+ if((void *)iptr != NULL && reclaim_in_supplied)
+ alloc_flags |= RECLAIM_USER_POOL;
+ return 0;
+}
+
+/* External statistics interface */
+int erl_memory_show(int p0, int p1, int p2, int p3, int p4, int p5,
+ int p6, int p7, int p8, int p9){
+ struct elib_stat statistics;
+ if(!(alloc_flags & USING_ELIB_MALLOC) && erlang_id != 0){
+ erts_printf("Using plain save_alloc, use memShow instead.\n");
+ return 1;
+ }
+ if(erlang_id == 0 && !((alloc_flags & USER_POOL) &&
+ !(alloc_flags & NEW_USER_POOL))){
+ erts_printf("Sorry, no allocation statistics until erlang "
+ "is started.\n");
+ return 1;
+ }
+ erts_printf("Allocation settings:\n");
+ erts_printf("Using elib_malloc with memory pool size of %lu bytes.\n",
+ (unsigned long) alloc_pool_size);
+ erts_printf("Realloc-always-moves is %s\n",
+ (alloc_flags & REALLOC_MOVES) ? "on" : "off");
+ erts_printf("Warnings about mixed malloc/free's are %s\n",
+ (alloc_flags & WARN_MALLOC_MIX) ? "on" : "off");
+ if(alloc_flags & USER_POOL){
+ erts_printf("The memory block used by elib is user supplied "
+ "at 0x%08x.\n", (unsigned int) alloc_pool_ptr);
+ if(alloc_flags & RECLAIM_USER_POOL)
+ erts_printf("Allocated memory within the user supplied pool\n"
+ " will be automatically reclaimed at task exit.\n");
+ } else {
+ erts_printf("The memory block used by elib is save_malloc'ed "
+ "at 0x%08x.\n", (unsigned int) alloc_pool_ptr);
+ }
+#ifdef NO_FIX_ALLOC
+ erts_printf("Fix_alloc is disabled in this build\n");
+#endif
+ erts_printf("Statistics from elib_malloc:\n");
+ ELIB_LOCK;
+
+ elib_stat(&statistics);
+ ELIB_UNLOCK;
+ erts_printf("Type Size (bytes) Number of blocks\n");
+ erts_printf("============= ============ ================\n");
+ erts_printf("Total: %12lu %16lu\n",
+ (unsigned long) statistics.mem_total*4,
+ (unsigned long) statistics.mem_blocks);
+ erts_printf("Allocated: %12lu %16lu\n",
+ (unsigned long) statistics.mem_alloc*4,
+ (unsigned long) statistics.mem_blocks-statistics.free_blocks);
+ erts_printf("Free: %12lu %16lu\n",
+ (unsigned long) statistics.mem_free*4,
+ (unsigned long) statistics.free_blocks);
+ erts_printf("Largest free: %12lu -\n\n",
+ (unsigned long) statistics.max_free*4);
+ return 0;
+}
+
+
+/*
+** More programmer friendly (as opposed to user friendly ;-) interface
+** to the memory statistics. Resembles the VxWorks memPartInfoGet but
+** does not take a partition id as parameter...
+*/
+int erl_mem_info_get(MEM_PART_STATS *stats){
+ struct elib_stat statistics;
+ if(!(alloc_flags & USING_ELIB_MALLOC))
+ return -1;
+ ELIB_LOCK;
+ elib_stat(&statistics);
+ ELIB_UNLOCK;
+ stats->numBytesFree = statistics.mem_free*4;
+ stats->numBlocksFree = statistics.free_blocks;
+ stats->maxBlockSizeFree = statistics.max_free*4;
+ stats->numBytesAlloc = statistics.mem_alloc*4;
+ stats->numBlocksAlloc = statistics.mem_blocks-statistics.free_blocks;
+ return 0;
+}
+
+/********************* Pipe driver **********************************/
+/*
+ * Purpose: Pipe driver with Unix (unnamed) pipe semantics.
+ * Author: Peter Hogfeldt ([email protected]) from an outline
+ * by Per Hedeland ([email protected]).
+ *
+ * Note: This driver must *not* use the reclaim facilities, hence it
+ * is placed here. (after the #undef's of open,malloc etc)
+ *
+ * This driver supports select() and non-blocking I/O via
+ * ioctl(fd, FIONBIO, val).
+ *
+ * 1997-03-21 Peter Hogfeldt
+ * Added non-blocking I/O.
+ *
+ */
+
+/*
+ * SEMAPHORES
+ *
+ * Each end of a pipe has two semaphores: semExcl for serialising access to
+ * the pipe end, and semBlock for blocking I/O.
+ *
+ * reader->semBlock is available (full) if and only if the pipe is
+ * not empty, or the write end is closed. Otherwise
+ * it is unavailable (empty). It is initially
+ * unavailable.
+ *
+ * writer->semBlock is available (full) if and only if the pipe is
+ * not full, or if the reader end is closed.
+ * Otherwise it is unavailable. It is initially
+ * available.
+ */
+
+#define UXPIPE_SIZE 4096
+
+/* Forward declaration */
+typedef struct uxPipeDev UXPIPE_DEV;
+
+/*
+ * Pipe descriptor (one for each open pipe).
+ */
+typedef struct {
+ int drvNum;
+ UXPIPE_DEV *reader, *writer;
+ RING_ID ringId;
+} UXPIPE;
+
+/*
+ * Device descriptor (one for each of the read and write
+ * ends of an open pipe).
+ */
+struct uxPipeDev {
+ UXPIPE *pipe;
+ int blocking;
+ SEL_WAKEUP_LIST wakeupList;
+ SEM_ID semExcl;
+ SEM_ID semBlock;
+};
+
+int uxPipeDrvNum = 0; /* driver number of pipe driver */
+
+#define PIPE_NAME "/uxpipe" /* only used internally */
+#define PIPE_READ "/r" /* ditto */
+#define PIPE_WRITE "/w" /* ditto */
+
+LOCAL char pipeRead[64], pipeWrite[64];
+LOCAL DEV_HDR devHdr;
+LOCAL UXPIPE *newPipe; /* communicate btwn open()s in pipe() */
+LOCAL SEM_ID pipeSem; /* mutual exclusion in pipe() */
+
+/* forward declarations */
+LOCAL int uxPipeOpen(DEV_HDR *pDv, char *name, int mode);
+LOCAL int uxPipeClose(UXPIPE_DEV *pDev);
+LOCAL int uxPipeRead(UXPIPE_DEV *pDev, char *buffer, int maxbytes);
+LOCAL int uxPipeWrite(UXPIPE_DEV *pDev, char *buffer, int nbytes);
+LOCAL STATUS uxPipeIoctl(FAST UXPIPE_DEV *pDev, FAST int function, int arg);
+
+
+/***************************************************************************
+ *
+ * uxPipeDrv - install Unix pipe driver
+ *
+ * This routine initializes the Unix pipe driver. It must be called
+ * before any other routine in this driver.
+ *
+ * RETURNS:
+ * OK, or ERROR if I/O system is unable to install driver.
+ */
+
+STATUS
+uxPipeDrv(void)
+{
+ if (uxPipeDrvNum > 0)
+ return (OK); /* driver already installed */
+ if ((uxPipeDrvNum = iosDrvInstall((FUNCPTR) NULL, (FUNCPTR) NULL,
+ uxPipeOpen, uxPipeClose, uxPipeRead,
+ uxPipeWrite, uxPipeIoctl)) == ERROR)
+ return (ERROR);
+ if (iosDevAdd(&devHdr, PIPE_NAME, uxPipeDrvNum) == ERROR)
+ return (ERROR);
+ strcpy(pipeRead, PIPE_NAME);
+ strcat(pipeRead, PIPE_READ);
+ strcpy(pipeWrite, PIPE_NAME);
+ strcat(pipeWrite, PIPE_WRITE);
+ if ((pipeSem = semMCreate(SEM_Q_PRIORITY | SEM_DELETE_SAFE)) == NULL)
+ return (ERROR);
+ return (OK);
+}
+
+/***************************************************************************
+ *
+ * uxPipeOpen - open a pipe
+ *
+ * RETURNS: Pointer to device descriptor, or ERROR if memory cannot be
+ * allocated (errno = ENOMEM), or invalid argument (errno = EINVAL).
+ */
+
+/*
+ * DEV_HDR *pDv; pointer to device header (dummy)
+ * char *name; name of pipe to open ("/r" or "/w")
+ * int mode; access mode (O_RDONLY or O_WRONLY)
+ */
+LOCAL int
+uxPipeOpen(DEV_HDR *pDv, char *name, int mode)
+{
+ UXPIPE_DEV *reader, *writer;
+
+ if (mode == O_RDONLY && strcmp(name, PIPE_READ) == 0) {
+ /* reader open */
+ if ((newPipe = (UXPIPE *) malloc(sizeof(UXPIPE))) != NULL) {
+ if ((newPipe->ringId = rngCreate(UXPIPE_SIZE)) != NULL) {
+ if ((reader = (UXPIPE_DEV *) malloc(sizeof(UXPIPE_DEV))) != NULL) {
+ if ((reader->semExcl = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) {
+ if ((reader->semBlock = semBCreate(SEM_Q_FIFO, SEM_EMPTY)) != NULL) {
+ reader->pipe = newPipe;
+ reader->blocking = 1;
+ selWakeupListInit(&reader->wakeupList);
+ newPipe->reader = reader;
+ newPipe->writer = NULL;
+ newPipe->drvNum = uxPipeDrvNum;
+ return ((int) reader);
+ }
+ semDelete(reader->semExcl);
+ }
+ free(reader);
+ }
+ rngDelete(newPipe->ringId);
+ }
+ free(newPipe);
+ newPipe = NULL;
+ errno = ENOMEM;
+ }
+ } else if (mode == O_WRONLY && strcmp(name, PIPE_WRITE) == 0) {
+ /* writer open */
+ if (newPipe != NULL &&
+ (writer = (UXPIPE_DEV *) malloc(sizeof(UXPIPE_DEV))) != NULL) {
+ if ((writer->semExcl = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) {
+ if ((writer->semBlock = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) {
+ writer->blocking = 1;
+ writer->pipe = newPipe;
+ selWakeupListInit(&writer->wakeupList);
+ newPipe->writer = writer;
+ newPipe = NULL;
+ return ((int) writer);
+ }
+ semDelete(writer->semExcl);
+ }
+ free(writer);
+ }
+ if (newPipe != NULL)
+ free(newPipe);
+ newPipe = NULL;
+ errno = ENOMEM;
+ } else {
+ errno = EINVAL;
+ }
+ return (ERROR);
+}
+
+/***************************************************************************
+ *
+ * uxPipeClose - close read or write end of a pipe.
+ *
+ * RETURNS:
+ * OK, or ERROR if device descriptor does not refer to an open read or
+ write end of a pipe (errno = EBADF).
+ */
+
+LOCAL int
+uxPipeClose(UXPIPE_DEV *pDev)
+{
+ UXPIPE *pajp = pDev->pipe;
+
+ taskLock();
+ if (pDev == pajp->reader) {
+ /* Close this end */
+ semDelete(pDev->semExcl);
+ semDelete(pDev->semBlock);
+ free(pDev);
+ pajp->reader = NULL;
+ /* Inform the other end */
+ if (pajp->writer != NULL) {
+ selWakeupAll(&pajp->writer->wakeupList, SELWRITE);
+ semGive(pajp->writer->semBlock);
+ }
+ } else if (pDev == pajp->writer) {
+ /* Close this end */
+ semDelete(pDev->semExcl);
+ semDelete(pDev->semBlock);
+ free(pDev);
+ pajp->writer = NULL;
+ /* Inform the other end */
+ if (pajp->reader != NULL) {
+ selWakeupAll(&pajp->reader->wakeupList, SELREAD);
+ semGive(pajp->reader->semBlock);
+ }
+ } else {
+ errno = EBADF;
+ taskUnlock();
+ return (ERROR);
+ }
+ if (pajp->reader == NULL && pajp->writer == NULL) {
+ rngDelete(pajp->ringId);
+ pajp->drvNum = 0;
+ free(pajp);
+ }
+ taskUnlock();
+ return (OK);
+}
+/***************************************************************************
+ *
+ * uxPipeRead - read from a pipe.
+ *
+ * Reads at most maxbytes bytes from the pipe. Blocks if blocking mode is
+ * set and the pipe is empty.
+ *
+ * RETURNS:
+ * number of bytes read, 0 on EOF, or ERROR if device descriptor does
+ * not refer to an open read end of a pipe (errno = EBADF), or if
+ * non-blocking mode is set and the pipe is empty (errno = EWOULDBLOCK).
+ */
+
+LOCAL int
+uxPipeRead(UXPIPE_DEV *pDev, char *buffer, int maxbytes)
+{
+ UXPIPE *pajp = pDev->pipe;
+ int nbytes = 0;
+
+ if (pDev != pajp->reader) {
+ errno = EBADF;
+ return (ERROR);
+ }
+ if (maxbytes == 0)
+ return (0);
+ semTake(pDev->semExcl, WAIT_FOREVER);
+ /*
+ * Note that semBlock may be full, although there is nothing to read.
+ * This happens e.g. after the following sequence of operations: a
+ * reader task blocks, a writer task writes two times (the first
+ * write unblocks the reader task, the second write makes semBlock
+ * full).
+ */
+ while (nbytes == 0) {
+ if (pDev->blocking)
+ semTake(pDev->semBlock, WAIT_FOREVER);
+ /*
+ * Reading and updating of the write end must not be interleaved
+ * with a write from another task - hence we lock this task.
+ */
+ taskLock();
+ nbytes = rngBufGet(pajp->ringId, buffer, maxbytes);
+ if (nbytes > 0) {
+ /* Give own semaphore if bytes remain or if write end is closed */
+ if ((!rngIsEmpty(pajp->ringId) || pajp->writer == NULL) &&
+ pDev->blocking)
+ semGive(pDev->semBlock);
+ /* Inform write end */
+ if (pajp->writer != NULL) {
+ if (pajp->writer->blocking)
+ semGive(pajp->writer->semBlock);
+ selWakeupAll(&pajp->writer->wakeupList, SELWRITE);
+ }
+ } else if (pajp->writer == NULL) {
+ nbytes = 0; /* EOF */
+ /* Give semaphore when write end is closed */
+ if (pDev->blocking)
+ semGive(pDev->semBlock);
+ taskUnlock();
+ semGive(pDev->semExcl);
+ return (nbytes);
+ } else if (!pDev->blocking) {
+ taskUnlock();
+ semGive(pDev->semExcl);
+ errno = EWOULDBLOCK;
+ return (ERROR);
+ }
+ taskUnlock();
+ }
+ semGive(pDev->semExcl);
+ return (nbytes);
+}
+
+/***************************************************************************
+ *
+ * uxPipeWrite - write to a pipe.
+ *
+ * Writes nbytes bytes to the pipe. Blocks if blocking mode is set, and if
+ * the pipe is full.
+ *
+ * RETURNS:
+ * number of bytes written, or ERROR if the device descriptor does not
+ * refer to an open write end of a pipe (errno = EBADF); or if the read end
+ * of the pipe is closed (errno = EPIPE); or if non-blocking mode is set
+ * and the pipe is full (errno = EWOULDBLOCK).
+ *
+ */
+
+LOCAL int
+uxPipeWrite(UXPIPE_DEV *pDev, char *buffer, int nbytes)
+{
+
+ UXPIPE *pajp = pDev->pipe;
+ int sofar = 0, written;
+
+ if (pDev != pajp->writer) {
+ errno = EBADF;
+ return (ERROR);
+ }
+ if (pajp->reader == NULL) {
+ errno = EPIPE;
+ return (ERROR);
+ }
+ if (nbytes == 0)
+ return (0);
+ semTake(pDev->semExcl, WAIT_FOREVER);
+ while (sofar < nbytes) {
+ if (pDev->blocking)
+ semTake(pDev->semBlock, WAIT_FOREVER);
+ if (pajp->reader == NULL) {
+ errno = EPIPE;
+ semGive(pDev->semBlock);
+ semGive(pDev->semExcl);
+ return (ERROR);
+ }
+ /* Writing and updating of the read end must not be interleaved
+ * with a read from another task - hence we lock this task.
+ */
+ taskLock();
+ written = rngBufPut(pajp->ringId, buffer + sofar, nbytes - sofar);
+ sofar += written;
+ /* Inform the read end if we really wrote something */
+ if (written > 0 && pajp->reader != NULL) {
+ selWakeupAll(&pajp->reader->wakeupList, SELREAD);
+ if (pajp->reader->blocking)
+ semGive(pajp->reader->semBlock);
+ }
+ taskUnlock();
+ if (!pDev->blocking) {
+ if (sofar == 0) {
+ errno = EWOULDBLOCK;
+ sofar = ERROR;
+ }
+ break;
+ }
+ }
+ /* Give own semaphore if space remains */
+ if (!rngIsFull(pajp->ringId) && pDev->blocking)
+ semGive(pDev->semBlock);
+ semGive(pDev->semExcl);
+ return (sofar);
+}
+
+/***************************************************************************
+ *
+ * uxPipeIoctl - do device specific I/O control
+ *
+ * RETURNS:
+ * OK or ERROR.
+ */
+
+LOCAL STATUS
+uxPipeIoctl(FAST UXPIPE_DEV *pDev, FAST int function, int arg)
+
+{
+ UXPIPE *pajp = pDev->pipe;
+ int status = OK;
+
+ switch (function) {
+ case FIONBIO:
+ pDev->blocking = (*(int *)arg) ? 0 : 1;
+ break;
+ case FIOSELECT:
+ taskLock();
+ selNodeAdd(&pDev->wakeupList, (SEL_WAKEUP_NODE *) arg);
+ if (selWakeupType((SEL_WAKEUP_NODE *) arg) == SELREAD &&
+ pDev == pajp->reader &&
+ (!rngIsEmpty(pajp->ringId) || pajp->writer == NULL))
+ selWakeup((SEL_WAKEUP_NODE *) arg);
+ if (selWakeupType((SEL_WAKEUP_NODE *) arg) == SELWRITE &&
+ pDev == pajp->writer &&
+ (!rngIsFull(pajp->ringId) || pajp->reader == NULL))
+ selWakeup((SEL_WAKEUP_NODE *) arg);
+ taskUnlock();
+ break;
+ case FIOUNSELECT:
+ selNodeDelete(&pDev->wakeupList, (SEL_WAKEUP_NODE *) arg);
+ break;
+ default:
+ status = ERROR;
+ break;
+ }
+ return (status);
+}
+
+/***************************************************************************
+ *
+ * pipe - create an intertask channel
+ *
+ * Creates a pipe. fd[0] (fd[1]) is the read (write) file descriptor.
+ *
+ * RETURNS:
+ * OK or ERROR, if the pipe could not be created.
+ */
+
+STATUS
+pipe(int fd[2])
+{
+ semTake(pipeSem, WAIT_FOREVER);
+ if ((fd[0] = open(pipeRead, O_RDONLY, 0)) != ERROR) {
+ if ((fd[1] = open(pipeWrite, O_WRONLY, 0)) != ERROR) {
+ semGive(pipeSem);
+ return (OK);
+ }
+ (void) close(fd[0]);
+ }
+ errno &= 0xFFFF;
+ if((errno & 0xFFFF) == EINTR) /* Why on earth EINTR??? */
+ errno = ENFILE; /* It means we are out of file descriptors...*/
+ semGive(pipeSem);
+ return (ERROR);
+}
+
+/***************************************************************************
+ *
+ * uxPipeShow - display pipe information
+ *
+ * RETURNS:
+ * N/A.
+ */
+
+void
+uxPipeShow(int fd)
+{
+ UXPIPE_DEV *pDev;
+ UXPIPE *pajp;
+ int drvValue;
+
+ if ((drvValue = iosFdValue(fd)) == ERROR) {
+ erts_fprintf(stderr, "Error: file descriptor invalid\n");
+ return;
+ }
+ pDev = (UXPIPE_DEV *)drvValue;
+ pajp = pDev->pipe;
+ if (pajp->drvNum != uxPipeDrvNum) {
+ erts_fprintf(stderr, "Error: Not a ux pipe device\n");
+ return;
+ }
+ erts_fprintf(stderr, "Device : 0x%x\n", (int) pDev);
+ erts_fprintf(stderr, "Buffer size : %d\n", UXPIPE_SIZE);
+ erts_fprintf(stderr, "Bytes in buffer : %d\n\n", rngNBytes(pajp->ringId));
+ erts_fprintf(stderr, "READ END\n\n");
+ if (pajp->reader != NULL) {
+ erts_fprintf(stderr, "Mode : ");
+ erts_fprintf(stderr, "%s\n",
+ (pajp->reader->blocking) ? "blocking" : "non-blocking");
+ }
+ erts_fprintf(stderr, "Status : ");
+ if (pajp->reader != NULL) {
+ erts_fprintf(stderr, "OPEN\n");
+ erts_fprintf(stderr, "Wake-up list : %d\n\n",
+ selWakeupListLen(&pajp->reader->wakeupList));
+ erts_fprintf(stderr, "Exclusion Semaphore\n");
+ semShow(pajp->reader->semExcl, 1);
+ erts_fprintf(stderr, "Blocking Semaphore\n");
+ semShow(pajp->reader->semBlock, 1);
+ } else
+ erts_fprintf(stderr, "CLOSED\n\n");
+ erts_fprintf(stderr, "WRITE END\n\n");
+ if (pajp->writer != NULL) {
+ erts_fprintf(stderr, "Mode : ");
+ erts_fprintf(stderr, "%s\n",
+ (pajp->writer->blocking) ? "blocking" : "non-blocking");
+ }
+ erts_fprintf(stderr, "Status : ");
+ if (pajp->writer != NULL) {
+ erts_fprintf(stderr, "OPEN\n");
+ erts_fprintf(stderr, "Wake-up list : %d\n\n",
+ selWakeupListLen(&pajp->writer->wakeupList));
+ erts_fprintf(stderr, "Exclusion Semaphore\n");
+ semShow(pajp->writer->semExcl, 1);
+ erts_fprintf(stderr, "Blocking Semaphore\n");
+ semShow(pajp->writer->semBlock, 1);
+ } else
+ erts_fprintf(stderr, "CLOSED\n\n");
+}
+
+#ifdef DEBUG
+void
+erl_assert_error(char* expr, char* file, int line)
+{
+ fflush(stdout);
+ fprintf(stderr, "Assertion failed: %s in %s, line %d\n",
+ expr, file, line);
+ fflush(stderr);
+ erl_crash_dump(file, line, "Assertion failed: %s\n", expr);
+ abort();
+}
+void
+erl_debug(char* fmt, ...)
+{
+ char sbuf[1024]; /* Temporary buffer. */
+ va_list va;
+
+ va_start(va, fmt);
+ vsprintf(sbuf, fmt, va);
+ va_end(va);
+ fprintf(stderr, "%s\n", sbuf);
+}
+#endif
diff --git a/erts/emulator/sys/win32/dosmap.c b/erts/emulator/sys/win32/dosmap.c
new file mode 100644
index 0000000000..15416a66c5
--- /dev/null
+++ b/erts/emulator/sys/win32/dosmap.c
@@ -0,0 +1,282 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1998-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * _dosmaperr: maps Windows OS errors to Unix System V errno values
+ *
+ * Contributor: Michael Regen
+ */
+
+/* Only use for win32 if linking to MSVCR??.DLL and not if statically linking
+ to LIBCMT.LIB */
+#if defined(WIN32) && defined(_MT) && defined(_DLL)
+
+#include <errno.h>
+#include <winerror.h>
+#include <stdlib.h>
+
+/* Position in table = Windows OS error -> Posix errno
+** An exception for ERROR_NOT_ENOUGH_QUOTA - 1816 is in _dosmaperr
+*/
+static const unsigned char errMapTable[] = {
+ EINVAL, /* ERROR_SUCCESS 0 */
+ EINVAL, /* ERROR_INVALID_FUNCTION 1 */
+ ENOENT, /* ERROR_FILE_NOT_FOUND 2 */
+ ENOENT, /* ERROR_PATH_NOT_FOUND 3 */
+ EMFILE, /* ERROR_TOO_MANY_OPEN_FILES 4 */
+ EACCES, /* ERROR_ACCESS_DENIED 5 */
+ EBADF, /* ERROR_INVALID_HANDLE 6 */
+ ENOMEM, /* ERROR_ARENA_TRASHED 7 */
+ ENOMEM, /* ERROR_NOT_ENOUGH_MEMORY 8 */
+ ENOMEM, /* ERROR_INVALID_BLOCK 9 */
+ E2BIG, /* ERROR_BAD_ENVIRONMENT 10 */
+ ENOEXEC, /* ERROR_BAD_FORMAT 11 */
+ EINVAL, /* ERROR_INVALID_ACCESS 12 */
+ EINVAL, /* ERROR_INVALID_DATA 13 */
+ EINVAL, /* ERROR_OUTOFMEMORY 14 */
+ ENOENT, /* ERROR_INVALID_DRIVE 15 */
+ EACCES, /* ERROR_CURRENT_DIRECTORY 16 */
+ EXDEV, /* ERROR_NOT_SAME_DEVICE 17 */
+ ENOENT, /* ERROR_NO_MORE_FILES 18 */
+ EACCES, /* ERROR_WRITE_PROTECT 19 */
+ EACCES, /* ERROR_BAD_UNIT 20 */
+ EACCES, /* ERROR_NOT_READY 21 */
+ EACCES, /* ERROR_BAD_COMMAND 22 */
+ EACCES, /* ERROR_CRC 23 */
+ EACCES, /* ERROR_BAD_LENGTH 24 */
+ EACCES, /* ERROR_SEEK 25 */
+ EACCES, /* ERROR_NOT_DOS_DISK 26 */
+ EACCES, /* ERROR_SECTOR_NOT_FOUND 27 */
+ EACCES, /* ERROR_OUT_OF_PAPER 28 */
+ EACCES, /* ERROR_WRITE_FAULT 29 */
+ EACCES, /* ERROR_READ_FAULT 30 */
+ EACCES, /* ERROR_GEN_FAILURE 31 */
+ EACCES, /* ERROR_SHARING_VIOLATION 32 */
+ EACCES, /* ERROR_LOCK_VIOLATION 33 */
+ EACCES, /* ERROR_WRONG_DISK 34 */
+ EACCES, /* 35 */
+ EACCES, /* ERROR_SHARING_BUFFER_EXCEEDED 36 */
+ EINVAL, /* 37 */
+ EINVAL, /* ERROR_HANDLE_EOF 38 */
+ EINVAL, /* ERROR_HANDLE_DISK_FULL 39 */
+ EINVAL, /* 40 */
+ EINVAL, /* 41 */
+ EINVAL, /* 42 */
+ EINVAL, /* 43 */
+ EINVAL, /* 44 */
+ EINVAL, /* 45 */
+ EINVAL, /* 46 */
+ EINVAL, /* 47 */
+ EINVAL, /* 48 */
+ EINVAL, /* 49 */
+ EINVAL, /* ERROR_NOT_SUPPORTED 50 */
+ EINVAL, /* ERROR_REM_NOT_LIST 51 */
+ EINVAL, /* ERROR_DUP_NAME 52 */
+ ENOENT, /* ERROR_BAD_NETPATH 53 */
+ EINVAL, /* ERROR_NETWORK_BUSY 54 */
+ EINVAL, /* ERROR_DEV_NOT_EXIST 55 */
+ EINVAL, /* ERROR_TOO_MANY_CMDS 56 */
+ EINVAL, /* ERROR_ADAP_HDW_ERR 57 */
+ EINVAL, /* ERROR_BAD_NET_RESP 58 */
+ EINVAL, /* ERROR_UNEXP_NET_ERR 59 */
+ EINVAL, /* ERROR_BAD_REM_ADAP 60 */
+ EINVAL, /* ERROR_PRINTQ_FULL 61 */
+ EINVAL, /* ERROR_NO_SPOOL_SPACE 62 */
+ EINVAL, /* ERROR_PRINT_CANCELLED 63 */
+ EINVAL, /* ERROR_NETNAME_DELETED 64 */
+ EACCES, /* ERROR_NETWORK_ACCESS_DENIED 65 */
+ EINVAL, /* ERROR_BAD_DEV_TYPE 66 */
+ ENOENT, /* ERROR_BAD_NET_NAME 67 */
+ EINVAL, /* ERROR_TOO_MANY_NAMES 68 */
+ EINVAL, /* ERROR_TOO_MANY_SESS 69 */
+ EINVAL, /* ERROR_SHARING_PAUSED 70 */
+ EINVAL, /* ERROR_REQ_NOT_ACCEP 71 */
+ EINVAL, /* ERROR_REDIR_PAUSED 72 */
+ EINVAL, /* 73 */
+ EINVAL, /* 74 */
+ EINVAL, /* 75 */
+ EINVAL, /* 76 */
+ EINVAL, /* 77 */
+ EINVAL, /* 78 */
+ EINVAL, /* 79 */
+ EEXIST, /* ERROR_FILE_EXISTS 80 */
+ EINVAL, /* 81 */
+ EACCES, /* ERROR_CANNOT_MAKE 82 */
+ EACCES, /* ERROR_FAIL_I24 83 */
+ EINVAL, /* ERROR_OUT_OF_STRUCTURES 84 */
+ EINVAL, /* ERROR_ALREADY_ASSIGNED 85 */
+ EINVAL, /* ERROR_INVALID_PASSWORD 86 */
+ EINVAL, /* ERROR_INVALID_PARAMETER 87 */
+ EINVAL, /* ERROR_NET_WRITE_FAULT 88 */
+ EAGAIN, /* ERROR_NO_PROC_SLOTS 89 */
+ EINVAL, /* 90 */
+ EINVAL, /* 91 */
+ EINVAL, /* 92 */
+ EINVAL, /* 93 */
+ EINVAL, /* 94 */
+ EINVAL, /* 95 */
+ EINVAL, /* 96 */
+ EINVAL, /* 97 */
+ EINVAL, /* 98 */
+ EINVAL, /* 99 */
+ EINVAL, /* ERROR_TOO_MANY_SEMAPHORES 100 */
+ EINVAL, /* ERROR_EXCL_SEM_ALREADY_OWNED 101 */
+ EINVAL, /* ERROR_SEM_IS_SET 102 */
+ EINVAL, /* ERROR_TOO_MANY_SEM_REQUESTS 103 */
+ EINVAL, /* ERROR_INVALID_AT_INTERRUPT_TIME 104 */
+ EINVAL, /* ERROR_SEM_OWNER_DIED 105 */
+ EINVAL, /* ERROR_SEM_USER_LIMIT 106 */
+ EINVAL, /* ERROR_DISK_CHANGE 107 */
+ EACCES, /* ERROR_DRIVE_LOCKED 108 */
+ EPIPE, /* ERROR_BROKEN_PIPE 109 */
+ EINVAL, /* ERROR_OPEN_FAILED 110 */
+ EINVAL, /* ERROR_BUFFER_OVERFLOW 111 */
+ ENOSPC, /* ERROR_DISK_FULL 112 */
+ EINVAL, /* ERROR_NO_MORE_SEARCH_HANDLES 113 */
+ EBADF, /* ERROR_INVALID_TARGET_HANDLE 114 */
+ EINVAL, /* 115 */
+ EINVAL, /* 116 */
+ EINVAL, /* ERROR_INVALID_CATEGORY 117 */
+ EINVAL, /* ERROR_INVALID_VERIFY_SWITCH 118 */
+ EINVAL, /* ERROR_BAD_DRIVER_LEVEL 119 */
+ EINVAL, /* ERROR_CALL_NOT_IMPLEMENTED 120 */
+ EINVAL, /* ERROR_SEM_TIMEOUT 121 */
+ EINVAL, /* ERROR_INSUFFICIENT_BUFFER 122 */
+ EINVAL, /* ERROR_INVALID_NAME 123 */
+ EINVAL, /* ERROR_INVALID_LEVEL 124 */
+ EINVAL, /* ERROR_NO_VOLUME_LABEL 125 */
+ EINVAL, /* ERROR_MOD_NOT_FOUND 126 */
+ EINVAL, /* ERROR_PROC_NOT_FOUND 127 */
+ ECHILD, /* ERROR_WAIT_NO_CHILDREN 128 */
+ ECHILD, /* ERROR_CHILD_NOT_COMPLETE 129 */
+ EBADF, /* ERROR_DIRECT_ACCESS_HANDLE 130 */
+ EINVAL, /* ERROR_NEGATIVE_SEEK 131 */
+ EACCES, /* ERROR_SEEK_ON_DEVICE 132 */
+ EINVAL, /* ERROR_IS_JOIN_TARGET 133 */
+ EINVAL, /* ERROR_IS_JOINED 134 */
+ EINVAL, /* ERROR_IS_SUBSTED 135 */
+ EINVAL, /* ERROR_NOT_JOINED 136 */
+ EINVAL, /* ERROR_NOT_SUBSTED 137 */
+ EINVAL, /* ERROR_JOIN_TO_JOIN 138 */
+ EINVAL, /* ERROR_SUBST_TO_SUBST 139 */
+ EINVAL, /* ERROR_JOIN_TO_SUBST 140 */
+ EINVAL, /* ERROR_SUBST_TO_JOIN 141 */
+ EINVAL, /* ERROR_BUSY_DRIVE 142 */
+ EINVAL, /* ERROR_SAME_DRIVE 143 */
+ EINVAL, /* ERROR_DIR_NOT_ROOT 144 */
+ ENOTEMPTY, /* ERROR_DIR_NOT_EMPTY 145 */
+ EINVAL, /* ERROR_IS_SUBST_PATH 146 */
+ EINVAL, /* ERROR_IS_JOIN_PATH 147 */
+ EINVAL, /* ERROR_PATH_BUSY 148 */
+ EINVAL, /* ERROR_IS_SUBST_TARGET 149 */
+ EINVAL, /* ERROR_SYSTEM_TRACE 150 */
+ EINVAL, /* ERROR_INVALID_EVENT_COUNT 151 */
+ EINVAL, /* ERROR_TOO_MANY_MUXWAITERS 152 */
+ EINVAL, /* ERROR_INVALID_LIST_FORMAT 153 */
+ EINVAL, /* ERROR_LABEL_TOO_LONG 154 */
+ EINVAL, /* ERROR_TOO_MANY_TCBS 155 */
+ EINVAL, /* ERROR_SIGNAL_REFUSED 156 */
+ EINVAL, /* ERROR_DISCARDED 157 */
+ EACCES, /* ERROR_NOT_LOCKED 158 */
+ EINVAL, /* ERROR_BAD_THREADID_ADDR 159 */
+ EINVAL, /* ERROR_BAD_ARGUMENTS 160 */
+ ENOENT, /* ERROR_BAD_PATHNAME 161 */
+ EINVAL, /* ERROR_SIGNAL_PENDING 162 */
+ EINVAL, /* 163 */
+ EAGAIN, /* ERROR_MAX_THRDS_REACHED 164 */
+ EINVAL, /* 165 */
+ EINVAL, /* 166 */
+ EACCES, /* ERROR_LOCK_FAILED 167 */
+ EINVAL, /* 168 */
+ EINVAL, /* 169 */
+ EINVAL, /* ERROR_BUSY 170 */
+ EINVAL, /* 171 */
+ EINVAL, /* 172 */
+ EINVAL, /* ERROR_CANCEL_VIOLATION 173 */
+ EINVAL, /* ERROR_ATOMIC_LOCKS_NOT_SUPPORTED 174 */
+ EINVAL, /* 175 */
+ EINVAL, /* 176 */
+ EINVAL, /* 177 */
+ EINVAL, /* 178 */
+ EINVAL, /* 179 */
+ EINVAL, /* ERROR_INVALID_SEGMENT_NUMBER 180 */
+ EINVAL, /* 181 */
+ EINVAL, /* ERROR_INVALID_ORDINAL 182 */
+ EEXIST, /* ERROR_ALREADY_EXISTS 183 */
+ EINVAL, /* 184 */
+ EINVAL, /* 185 */
+ EINVAL, /* ERROR_INVALID_FLAG_NUMBER 186 */
+ EINVAL, /* ERROR_SEM_NOT_FOUND 187 */
+ ENOEXEC, /* ERROR_INVALID_STARTING_CODESEG 188 */
+ ENOEXEC, /* ERROR_INVALID_STACKSEG 189 */
+ ENOEXEC, /* ERROR_INVALID_MODULETYPE 190 */
+ ENOEXEC, /* ERROR_INVALID_EXE_SIGNATURE 191 */
+ ENOEXEC, /* ERROR_EXE_MARKED_INVALID 192 */
+ ENOEXEC, /* ERROR_BAD_EXE_FORMAT 193 */
+ ENOEXEC, /* ERROR_ITERATED_DATA_EXCEEDS_64k 194 */
+ ENOEXEC, /* ERROR_INVALID_MINALLOCSIZE 195 */
+ ENOEXEC, /* ERROR_DYNLINK_FROM_INVALID_RING 196 */
+ ENOEXEC, /* ERROR_IOPL_NOT_ENABLED 197 */
+ ENOEXEC, /* ERROR_INVALID_SEGDPL 198 */
+ ENOEXEC, /* ERROR_AUTODATASEG_EXCEEDS_64k 199 */
+ ENOEXEC, /* ERROR_RING2SEG_MUST_BE_MOVABLE 200 */
+ ENOEXEC, /* ERROR_RELOC_CHAIN_XEEDS_SEGLIM 201 */
+ ENOEXEC, /* ERROR_INFLOOP_IN_RELOC_CHAIN 202 */
+ EINVAL, /* ERROR_ENVVAR_NOT_FOUND 203 */
+ EINVAL, /* 204 */
+ EINVAL, /* ERROR_NO_SIGNAL_SENT 205 */
+ ENOENT, /* ERROR_FILENAME_EXCED_RANGE 206 */
+ EINVAL, /* ERROR_RING2_STACK_IN_USE 207 */
+ EINVAL, /* ERROR_META_EXPANSION_TOO_LONG 208 */
+ EINVAL, /* ERROR_INVALID_SIGNAL_NUMBER 209 */
+ EINVAL, /* ERROR_THREAD_1_INACTIVE 210 */
+ EINVAL, /* 211 */
+ EINVAL, /* ERROR_LOCKED 212 */
+ EINVAL, /* 213 */
+ EINVAL, /* ERROR_TOO_MANY_MODULES 214 */
+ EAGAIN /* ERROR_NESTING_NOT_ALLOWED 215 */
+};
+
+/* size of the table */
+#define ERRMAPTABLESIZE (sizeof(errMapTable)/sizeof(errMapTable[0]))
+
+/*
+** void __cdecl _dosmaperr(winerrno)
+**
+** Takes a Windows error number and tries to map it to a Unix System V errno.
+** Sets:
+** _doserrno = Windows error number
+** errno = Unix System V errno.
+*/
+void __cdecl _dosmaperr(unsigned long winerrno)
+{
+ _doserrno = winerrno;
+
+ if (winerrno >= ERRMAPTABLESIZE) {
+ if (winerrno == ERROR_NOT_ENOUGH_QUOTA) { /* exception for 1816 */
+ errno = ENOMEM;
+ } else {
+ errno = EINVAL;
+ }
+ } else {
+ errno = (unsigned int) errMapTable[winerrno];
+ }
+}
+
+#endif /* WIN32 && _MT && _DLL */
+
diff --git a/erts/emulator/sys/win32/driver_int.h b/erts/emulator/sys/win32/driver_int.h
new file mode 100644
index 0000000000..97e188816e
--- /dev/null
+++ b/erts/emulator/sys/win32/driver_int.h
@@ -0,0 +1,39 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*----------------------------------------------------------------------
+** Purpose : System dependant driver declarations
+**---------------------------------------------------------------------- */
+
+#ifndef __DRIVER_INT_H__
+#define __DRIVER_INT_H__
+
+#if !defined __WIN32__
+# define __WIN32__
+#endif
+
+/*
+ * This structure can be cast to a WSABUF structure.
+ */
+
+typedef struct _SysIOVec {
+ unsigned long iov_len;
+ char* iov_base;
+} SysIOVec;
+
+#endif
diff --git a/erts/emulator/sys/win32/erl.def b/erts/emulator/sys/win32/erl.def
new file mode 100644
index 0000000000..59e940847d
--- /dev/null
+++ b/erts/emulator/sys/win32/erl.def
@@ -0,0 +1,4 @@
+EXPORTS
+ erl_start
+ sys_get_key
+ sys_primitive_init
diff --git a/erts/emulator/sys/win32/erl_main.c b/erts/emulator/sys/win32/erl_main.c
new file mode 100644
index 0000000000..5471bffb52
--- /dev/null
+++ b/erts/emulator/sys/win32/erl_main.c
@@ -0,0 +1,29 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2000-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "global.h"
+
+void
+main(int argc, char **argv)
+{
+ erl_start(argc, argv);
+}
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
new file mode 100644
index 0000000000..d816cc2c07
--- /dev/null
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -0,0 +1,1361 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2007-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#define WANT_NONBLOCKING
+
+#include "sys.h"
+#include "erl_alloc.h"
+#include "erl_poll.h"
+
+/*
+ * Some debug macros
+ */
+
+/*#define HARDDEBUG */
+#ifdef HARDDEBUG
+#ifdef HARDTRACE
+#define HARDTRACEF(X) my_debug_printf##X
+#else
+#define HARDTRACEF(X)
+#endif
+
+#define HARDDEBUGF(X) my_debug_printf##X
+static void my_debug_printf(char *fmt, ...)
+{
+ char buffer[1024];
+ va_list args;
+
+ va_start(args, fmt);
+ erts_vsnprintf(buffer,1024,fmt,args);
+ va_end(args);
+ erts_fprintf(stderr,"%s\r\n",buffer);
+}
+#else
+#define HARDTRACEF(X)
+#define HARDDEBUGF(X)
+#endif
+
+#ifdef DEBUG
+#define NoMansLandFill 0xFD /* fill no-man's land with this */
+#define DeadLandFill 0xDD /* fill free objects with this */
+#define CleanLandFill 0xCD /* fill new objects with this */
+
+static void consistency_check(struct _Waiter* w);
+static void* debug_alloc(ErtsAlcType_t, Uint);
+static void* debug_realloc(ErtsAlcType_t, void *, Uint, Uint);
+
+# define SEL_ALLOC debug_alloc
+# define SEL_REALLOC debug_realloc
+# define SEL_FREE erts_free
+
+static void *debug_alloc(ErtsAlcType_t type, Uint size)
+{
+ void* p = erts_alloc(type, size);
+ memset(p, CleanLandFill, size);
+ return p;
+}
+
+static void *debug_realloc(ErtsAlcType_t type, void *ptr, Uint prev_size,
+ Uint size)
+{
+ void *p;
+ size_t fill_size;
+ void *fill_ptr;
+
+ if (prev_size > size) {
+ size_t fill_size = (size_t) (prev_size - size);
+ void *fill_ptr = (void *) (((char *) ptr) + size);
+ memset(fill_ptr, NoMansLandFill, fill_size);
+ }
+
+ p = erts_realloc(type, ptr, size);
+
+ if (size > prev_size) {
+ size_t fill_size = (size_t) (size - prev_size);
+ void *fill_ptr = (void *) (((char *) p) + prev_size);
+ memset(fill_ptr, CleanLandFill, fill_size);
+ }
+
+ return p;
+}
+#else
+# define SEL_ALLOC erts_alloc
+# define SEL_REALLOC realloc_wrap
+# define SEL_FREE erts_free
+
+static ERTS_INLINE void *
+realloc_wrap(ErtsAlcType_t t, void *p, Uint ps, Uint s)
+{
+ return erts_realloc(t, p, s);
+}
+#endif
+
+
+#ifdef HARD_POLL_DEBUG
+#define OP_SELECT 1
+#define OP_DESELECT 2
+#define OP_FIRED 3
+#define OP_READ_BEGIN 4
+#define OP_READ_DONE 5
+#define OP_WRITE_BEGIN 6
+#define OP_WRITE_DONE 7
+#define OP_REPORTED 8
+#define OP_DIED 9
+#define OP_ASYNC_INIT 10
+#define OP_ASYNC_IMMED 11
+#define OP_FD_MOVED 12
+
+static struct {
+ int op;
+ ErtsSysFdType active;
+ int xdata;
+} debug_save_ops[1024];
+
+static int num_debug_save_ops = 0;
+
+static ErtsSysFdType active_debug_fd;
+static int active_debug_fd_set = 0;
+
+static erts_mtx_t save_ops_mtx;
+
+static void poll_debug_init(void)
+{
+ erts_mtx_init(&save_ops_mtx, "save_ops_lock");
+}
+
+void poll_debug_set_active_fd(ErtsSysFdType fd)
+{
+ erts_mtx_lock(&save_ops_mtx);
+ active_debug_fd_set = 1;
+ active_debug_fd = fd;
+ erts_mtx_unlock(&save_ops_mtx);
+}
+
+static void do_save_op(ErtsSysFdType fd, int op, int xdata)
+{
+ erts_mtx_lock(&save_ops_mtx);
+ if (fd == active_debug_fd && num_debug_save_ops < 1024) {
+ int x = num_debug_save_ops++;
+ debug_save_ops[x].op = op;
+ debug_save_ops[x].active = fd;
+ debug_save_ops[x].xdata = xdata;
+ }
+ erts_mtx_unlock(&save_ops_mtx);
+}
+
+void poll_debug_moved(ErtsSysFdType fd, int s1, int s2)
+{
+ do_save_op(fd,OP_FD_MOVED,s1 | (s2 << 16));
+}
+
+void poll_debug_select(ErtsSysFdType fd, int mode)
+{
+ do_save_op(fd,OP_SELECT,mode);
+}
+
+void poll_debug_deselect(ErtsSysFdType fd)
+{
+ do_save_op(fd,OP_DESELECT,0);
+}
+
+void poll_debug_fired(ErtsSysFdType fd)
+{
+ do_save_op(fd,OP_FIRED,0);
+}
+
+void poll_debug_read_begin(ErtsSysFdType fd)
+{
+ do_save_op(fd,OP_READ_BEGIN,0);
+}
+
+void poll_debug_read_done(ErtsSysFdType fd, int bytes)
+{
+ do_save_op(fd,OP_READ_DONE,bytes);
+}
+
+void poll_debug_async_initialized(ErtsSysFdType fd)
+{
+ do_save_op(fd,OP_ASYNC_INIT,0);
+}
+
+void poll_debug_async_immediate(ErtsSysFdType fd, int bytes)
+{
+ do_save_op(fd,OP_ASYNC_IMMED,bytes);
+}
+
+void poll_debug_write_begin(ErtsSysFdType fd)
+{
+ do_save_op(fd,OP_WRITE_BEGIN,0);
+}
+
+void poll_debug_write_done(ErtsSysFdType fd, int bytes)
+{
+ do_save_op(fd,OP_WRITE_DONE,bytes);
+}
+
+void poll_debug_reported(ErtsSysFdType fd, int mode)
+{
+ do_save_op(fd,OP_REPORTED,mode);
+}
+
+void poll_debug_died(ErtsSysFdType fd)
+{
+ do_save_op(fd,OP_DIED,0);
+}
+
+#endif /* DEBUG */
+
+/*
+ * End of debug macros
+ */
+
+
+
+/*
+ * Handles that we poll, but that are actually signalled from outside
+ * this module
+ */
+
+extern HANDLE erts_service_event;
+extern HANDLE erts_sys_break_event;
+
+
+/*
+ * The structure we hold for each event (i.e. fd)
+ */
+typedef struct _EventData {
+ HANDLE event; /* For convenience. */
+ ErtsPollEvents mode; /* The current select mode. */
+ struct _EventData *next; /* Next in free or delete lists. */
+} EventData;
+
+/*
+ * The structure to represent a waiter thread
+ */
+typedef struct _Waiter {
+ HANDLE events[MAXIMUM_WAIT_OBJECTS]; /* The events. */
+ EventData* evdata[MAXIMUM_WAIT_OBJECTS]; /* Pointers to associated data. */
+ int active_events; /* Number of events to wait for */
+ int total_events; /* Total number of events in the arrays. */
+ int highwater; /* Events processed up to here */
+ EventData evdata_heap[MAXIMUM_WAIT_OBJECTS]; /* Pre-allocated EventDatas */
+ EventData* first_free_evdata; /* Index of first free EventData object. */
+ HANDLE go_ahead; /* The waiter may continue. (Auto-reset) */
+ void *xdata; /* used when thread parameter */
+ erts_tid_t this; /* Thread "handle" of this waiter */
+ erts_mtx_t mtx; /* Mutex for updating/reading pollset, but the
+ currently used set require thread stopping
+ to be updated */
+} Waiter;
+
+/*
+ * The structure for a pollset. There can currently be only one...
+ */
+struct ErtsPollSet_ {
+ Waiter** waiter;
+ int allocated_waiters; /* Size ow waiter array */
+ int num_waiters; /* Number of waiter threads. */
+ erts_atomic_t sys_io_ready; /* Tells us there is I/O ready (already). */
+ int restore_events; /* Tells us to restore waiters events
+ next time around */
+ HANDLE event_io_ready; /* To be used when waiting for io */
+ /* These are used to wait for workers to enter standby */
+ volatile int standby_wait_counter; /* Number of threads to wait for */
+ CRITICAL_SECTION standby_crit; /* CS to guard the counter */
+ HANDLE standby_wait_event; /* Event signalled when counte == 0 */
+#ifdef ERTS_SMP
+ erts_smp_atomic_t woken;
+ erts_smp_mtx_t mtx;
+ erts_smp_atomic_t interrupt;
+#endif
+ erts_smp_atomic_t timeout;
+};
+
+#ifdef ERTS_SMP
+
+#define ERTS_POLLSET_LOCK(PS) \
+ erts_smp_mtx_lock(&(PS)->mtx)
+#define ERTS_POLLSET_UNLOCK(PS) \
+ erts_smp_mtx_unlock(&(PS)->mtx)
+#define ERTS_POLLSET_SET_POLLED_CHK(PS) \
+ ((int) erts_smp_atomic_xchg(&(PS)->polled, (long) 1))
+#define ERTS_POLLSET_SET_POLLED(PS) \
+ erts_smp_atomic_set(&(PS)->polled, (long) 1)
+#define ERTS_POLLSET_UNSET_POLLED(PS) \
+ erts_smp_atomic_set(&(PS)->polled, (long) 0)
+#define ERTS_POLLSET_IS_POLLED(PS) \
+ ((int) erts_smp_atomic_read(&(PS)->polled))
+#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) \
+ ((int) erts_smp_atomic_xchg(&(PS)->woken, (long) 1))
+#define ERTS_POLLSET_SET_POLLER_WOKEN(PS) \
+ erts_smp_atomic_set(&(PS)->woken, (long) 1)
+#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS) \
+ erts_smp_atomic_set(&(PS)->woken, (long) 0)
+#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) \
+ ((int) erts_smp_atomic_read(&(PS)->woken))
+
+#define ERTS_POLLSET_UNSET_INTERRUPTED_CHK(PS) \
+ ((int) erts_smp_atomic_xchg(&(PS)->interrupt, (long) 0))
+#define ERTS_POLLSET_UNSET_INTERRUPTED(PS) \
+ erts_smp_atomic_set(&(PS)->interrupt, (long) 0)
+#define ERTS_POLLSET_SET_INTERRUPTED(PS) \
+ erts_smp_atomic_set(&(PS)->interrupt, (long) 1)
+#define ERTS_POLLSET_IS_INTERRUPTED(PS) \
+ ((int) erts_smp_atomic_read(&(PS)->interrupt))
+
+#else
+
+#define ERTS_POLLSET_LOCK(PS)
+#define ERTS_POLLSET_UNLOCK(PS)
+#define ERTS_POLLSET_SET_POLLED_CHK(PS) 0
+#define ERTS_POLLSET_UNSET_POLLED(PS)
+#define ERTS_POLLSET_IS_POLLED(PS) 0
+#define ERTS_POLLSET_SET_POLLER_WOKEN_CHK(PS) 1
+#define ERTS_POLLSET_SET_POLLER_WOKEN(PS)
+#define ERTS_POLLSET_UNSET_POLLER_WOKEN(PS)
+#define ERTS_POLLSET_IS_POLLER_WOKEN(PS) 1
+
+
+#endif
+
+/*
+ * While atomics are not yet implemented for windows in the common library...
+ *
+ * MSDN doc states that SMP machines and old compilers require
+ * InterLockedExchange to properly read and write interlocked
+ * variables, otherwise the processors might reschedule
+ * the access and order of atomics access is destroyed...
+ * While they only mention it in white-papers, the problem
+ * in VS2003 is due to the IA64 arch, so we can still count
+ * on the CPU not rescheduling the access to volatile in X86 arch using
+ * even the slightly older compiler...
+ *
+ * So here's (hopefully) a subset of the generally working atomic
+ * variable access...
+ */
+
+#if defined(__GNUC__)
+# if defined(__i386__) || defined(__x86_64__)
+# define VOLATILE_IN_SEQUENCE 1
+# else
+# define VOLATILE_IN_SEQUENCE 0
+# endif
+#elif defined(_MSC_VER)
+# if _MSC_VER < 1300
+# define VOLATILE_IN_SEQUENCE 0 /* Dont trust really old compilers */
+# else
+# if defined(_M_IX86)
+# define VOLATILE_IN_SEQUENCE 1
+# else /* I.e. IA64 */
+# if _MSC_VER >= 1400
+# define VOLATILE_IN_SEQUENCE 1
+# else
+# define VOLATILE_IN_SEQUENCE 0
+# endif
+# endif
+# endif
+#else
+# define VOLATILE_IN_SEQUENCE 0
+#endif
+
+
+
+/*
+ * Communication with sys_interrupt
+ */
+
+#ifdef ERTS_SMP
+extern erts_smp_atomic_t erts_break_requested;
+#define ERTS_SET_BREAK_REQUESTED \
+ erts_smp_atomic_set(&erts_break_requested, (long) 1)
+#define ERTS_UNSET_BREAK_REQUESTED \
+ erts_smp_atomic_set(&erts_break_requested, (long) 0)
+#else
+extern volatile int erts_break_requested;
+#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
+#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0)
+#endif
+
+static erts_mtx_t break_waiter_lock;
+static HANDLE break_happened_event;
+static erts_atomic_t break_waiter_state;
+#define BREAK_WAITER_GOT_BREAK 1
+#define BREAK_WAITER_GOT_HALT 2
+
+
+/*
+ * Forward declarations
+ */
+
+static void *threaded_waiter(void *param);
+static void *break_waiter(void *param);
+
+/*
+ * Sychronization macros and functions
+ */
+#define START_WAITER(PS, w) \
+ SetEvent((w)->go_ahead)
+
+#define STOP_WAITER(PS,w) \
+do { \
+ setup_standby_wait((PS),1); \
+ SetEvent((w)->events[0]); \
+ wait_standby(PS); \
+} while(0)
+
+#define START_WAITERS(PS) \
+do { \
+ int i; \
+ for (i = 0; i < (PS)->num_waiters; i++) { \
+ SetEvent((PS)->waiter[i]->go_ahead); \
+ } \
+ } while(0)
+
+#define STOP_WAITERS(PS) \
+do { \
+ int i; \
+ setup_standby_wait((PS),(PS)->num_waiters); \
+ for (i = 0; i < (PS)->num_waiters; i++) { \
+ SetEvent((PS)->waiter[i]->events[0]); \
+ } \
+ wait_standby(PS); \
+ } while(0)
+
+#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT && !defined(ERTS_SMP)
+
+static ERTS_INLINE int
+unset_interrupted_chk(ErtsPollSet ps)
+{
+ /* This operation isn't atomic, but we have no need at all for an
+ atomic operation here... */
+ int res = ps->interrupt;
+ ps->interrupt = 0;
+ return res;
+}
+
+#endif
+
+#ifdef ERTS_SMP
+static ERTS_INLINE void
+wake_poller(ErtsPollSet ps)
+{
+ if (!ERTS_POLLSET_SET_POLLER_WOKEN_CHK(ps)) {
+ SetEvent(ps->event_io_ready);
+ }
+}
+#endif
+
+static void setup_standby_wait(ErtsPollSet ps, int num_threads)
+{
+ EnterCriticalSection(&(ps->standby_crit));
+ ps->standby_wait_counter = num_threads;
+ ResetEvent(ps->standby_wait_event);
+ LeaveCriticalSection(&(ps->standby_crit));
+}
+
+static void signal_standby(ErtsPollSet ps)
+{
+ EnterCriticalSection(&(ps->standby_crit));
+ --(ps->standby_wait_counter);
+ if (ps->standby_wait_counter < 0) {
+ LeaveCriticalSection(&(ps->standby_crit));
+ erl_exit(1,"Standby signalled by more threads than expected");
+ }
+ if (!(ps->standby_wait_counter)) {
+ SetEvent(ps->standby_wait_event);
+ }
+ LeaveCriticalSection(&(ps->standby_crit));
+}
+
+static void wait_standby(ErtsPollSet ps)
+{
+ WaitForSingleObject(ps->standby_wait_event,INFINITE);
+}
+
+static void remove_event_from_set(Waiter *w, int j)
+{
+ w->evdata[j]->event = INVALID_HANDLE_VALUE;
+ w->evdata[j]->mode = 0;
+ w->evdata[j]->next = w->first_free_evdata;
+ w->first_free_evdata = w->evdata[j];
+
+ /*
+ * If the event is active, we will overwrite it
+ * with the last active event and make the hole
+ * the first non-active event.
+ */
+
+ if (j < w->active_events) {
+ w->active_events--;
+ w->highwater--;
+ w->total_events--;
+ w->events[j] = w->events[w->active_events];
+ w->evdata[j] = w->evdata[w->active_events];
+ w->events[w->active_events] = w->events[w->highwater];
+ w->evdata[w->active_events] = w->evdata[w->highwater];
+ w->events[w->highwater] = w->events[w->total_events];
+ w->evdata[w->highwater] = w->evdata[w->total_events];
+ } else if (j < w->highwater) {
+ w->highwater--;
+ w->total_events--;
+ w->events[j] = w->events[w->highwater];
+ w->evdata[j] = w->evdata[w->highwater];
+ w->events[w->highwater] = w->events[w->total_events];
+ w->evdata[w->highwater] = w->evdata[w->total_events];
+ } else {
+ w->total_events--;
+ w->events[j] = w->events[w->total_events];
+ w->evdata[j] = w->evdata[w->total_events];
+ }
+
+#ifdef DEBUG
+ w->events[w->total_events] = (HANDLE) CleanLandFill;
+ w->evdata[w->total_events] = (EventData *) CleanLandFill;
+ consistency_check(w);
+#endif
+}
+
+/*
+ * Thread handling
+ */
+
+#ifdef DEBUG
+static void consistency_check(Waiter* w)
+{
+ int i;
+
+ ASSERT(w->active_events <= w->total_events);
+ ASSERT(w->evdata[0] == NULL);
+
+ for (i = 1; i < w->total_events; i++) {
+ ASSERT(w->events[i] == w->evdata[i]->event);
+ ASSERT(w->evdata[i]->mode != 0);
+ }
+}
+
+#endif
+
+static void new_waiter(ErtsPollSet ps)
+{
+ register Waiter* w;
+ DWORD tid; /* Id for thread. */
+ erts_tid_t thread;
+ int i;
+ int tres;
+
+ if (ps->num_waiters == ps->allocated_waiters) {
+ Uint old_size = sizeof(Waiter *)*ps->allocated_waiters;
+ ps->allocated_waiters += 64;
+ ps->waiter = SEL_REALLOC(ERTS_ALC_T_WAITER_OBJ,
+ (void *) ps->waiter,
+ old_size,
+ sizeof(Waiter *) * (ps->allocated_waiters));
+ }
+
+ w = (Waiter *) SEL_ALLOC(ERTS_ALC_T_WAITER_OBJ, sizeof(Waiter));
+ ps->waiter[ps->num_waiters] = w;
+
+ w->events[0] = CreateAutoEvent(FALSE);
+ w->evdata[0] = NULL; /* Should never be used. */
+ w->active_events = 1;
+ w->highwater = 1;
+ w->total_events = 1;
+ erts_mtx_init(&w->mtx, "pollwaiter");
+
+
+ /*
+ * Form the free list of EventData objects.
+ */
+
+ w->evdata_heap[0].next = 0; /* Last in free list. */
+ for (i = 1; i < MAXIMUM_WAIT_OBJECTS; i++) {
+ w->evdata_heap[i].next = w->evdata_heap+i-1;
+ }
+ w->first_free_evdata = w->evdata_heap+MAXIMUM_WAIT_OBJECTS-1;
+
+ /*
+ * Create the other events.
+ */
+
+ w->go_ahead = CreateAutoEvent(FALSE);
+
+ /*
+ * Create the thread.
+ */
+ w->xdata = ps;
+ erts_thr_create(&thread, &threaded_waiter, w, NULL);
+ w->this = thread;
+
+ /*
+ * Finally, done.
+ */
+
+ (ps->num_waiters)++;
+}
+
+static void *break_waiter(void *param)
+{
+ HANDLE harr[2];
+ int i = 0;
+ harr[i++] = erts_sys_break_event;
+ if (erts_service_event != NULL) {
+ harr[i++] = erts_service_event;
+ }
+
+ for(;;) {
+ switch (WaitForMultipleObjects(i,harr,FALSE,INFINITE)) {
+ case WAIT_OBJECT_0:
+ ResetEvent(harr[0]);
+ erts_mtx_lock(&break_waiter_lock);
+ erts_atomic_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK);
+ SetEvent(break_happened_event);
+ erts_mtx_unlock(&break_waiter_lock);
+ break;
+ case (WAIT_OBJECT_0+1):
+ ResetEvent(harr[1]);
+ erts_mtx_lock(&break_waiter_lock);
+ erts_atomic_set(&break_waiter_state,BREAK_WAITER_GOT_HALT);
+ SetEvent(break_happened_event);
+ erts_mtx_unlock(&break_waiter_lock);
+ break;
+ default:
+ erl_exit(1,"Unexpected event in break_waiter");
+ }
+ }
+}
+
+static void *threaded_waiter(void *param)
+{
+ register Waiter* w = (Waiter *) param;
+ ErtsPollSet ps = (ErtsPollSet) w->xdata;
+#ifdef HARD_POLL_DEBUG2
+ HANDLE oold_fired[64];
+ int num_oold_fired;
+ HANDLE old_fired[64];
+ int num_old_fired = 0;
+ HANDLE fired[64];
+ int num_fired = 0;
+ HANDLE errors[1024];
+ int num_errors = 0;
+ HANDLE save_events[64];
+ int save_active_events;
+ int save_total_events;
+ int save_highwater;
+#endif
+
+ again:
+ WaitForSingleObject(w->go_ahead, INFINITE);
+ /* Atomic enough when just checking, skip lock */
+ if (w->total_events == 0) {
+ return NULL;
+ }
+ if (w->active_events == 0) {
+ goto again;
+ }
+ ASSERT(w->evdata[0] == NULL);
+#ifdef HARD_POLL_DEBUG2
+ num_oold_fired = num_old_fired;
+ memcpy(oold_fired,old_fired,num_old_fired*sizeof(HANDLE));
+ num_old_fired = num_fired;
+ memcpy(old_fired,fired,num_fired*sizeof(HANDLE));
+ num_fired = 0;
+#endif
+ for (;;) {
+ int i;
+ int j;
+#ifdef HARD_POLL_DEBUG2
+ erts_mtx_lock(&w->mtx);
+ memcpy(save_events,w->events,w->active_events*sizeof(HANDLE));
+ save_active_events = w->active_events;
+ save_total_events = w->total_events;
+ save_highwater = w->highwater;
+ erts_mtx_unlock(&w->mtx);
+#endif
+ i = WaitForMultipleObjects(w->active_events, w->events, FALSE, INFINITE);
+ switch (i) {
+ case WAIT_FAILED:
+ DEBUGF(("Wait failed: %s\n", last_error()));
+ erts_mtx_lock(&w->mtx);
+ /* Dont wait for our signal event */
+ for (j = 1; j < w->active_events; j++) {
+ int tmp;
+ if ((tmp = WaitForSingleObject(w->events[j], 0))
+ == WAIT_FAILED) {
+ DEBUGF(("Invalid handle: i = %d, handle = 0x%0x\n",
+ j, w->events[j]));
+#ifdef HARD_POLL_DEBUG2
+ if (num_errors < 1024)
+ errors[num_errors++] = w->events[j];
+#endif
+#ifdef HARD_POLL_DEBUG
+ poll_debug_died(w->events[j]);
+#endif
+ remove_event_from_set(w,j);
+#ifdef DEBUG
+ consistency_check(w);
+#endif
+ } else if (tmp == WAIT_OBJECT_0) {
+ i = WAIT_OBJECT_0 + j;
+ goto event_happened;
+ }
+ }
+ erts_mtx_unlock(&w->mtx);
+ break;
+ case WAIT_OBJECT_0:
+ signal_standby(ps);
+ goto again;
+#ifdef DEBUG
+ case WAIT_TIMEOUT:
+ ASSERT(0);
+#endif
+ default:
+ erts_mtx_lock(&w->mtx);
+#ifdef HARD_POLL_DEBUG2
+ {
+ int x = memcmp(save_events,w->events,w->active_events*sizeof(HANDLE));
+ ASSERT(x == 0 && save_active_events == w->active_events);
+ }
+#endif
+event_happened:
+#ifdef DEBUG
+ consistency_check(w);
+#endif
+ ASSERT(WAIT_OBJECT_0 < i && i < WAIT_OBJECT_0+w->active_events);
+ if (!erts_atomic_xchg(&ps->sys_io_ready,1)) {
+ HARDDEBUGF(("SET EventIoReady (%d)",erts_atomic_read(&ps->sys_io_ready)));
+ SetEvent(ps->event_io_ready);
+ } else {
+ HARDDEBUGF(("DONT SET EventIoReady"));
+ }
+
+ /*
+ * The main thread wont start working on our arrays untill we're
+ * stopped, so we can work in peace although the main thread runs
+ */
+ ASSERT(i >= WAIT_OBJECT_0+1);
+ i -= WAIT_OBJECT_0;
+ ASSERT(i >= 1);
+ w->active_events--;
+ HARDDEBUGF(("i = %d, a,h,t = %d,%d,%d",i,
+ w->active_events, w->highwater, w->total_events));
+#ifdef HARD_POLL_DEBUG2
+ fired[num_fired++] = w->events[i];
+#endif
+#ifdef HARD_POLL_DEBUG
+ poll_debug_fired(w->events[i]);
+#endif
+ if (i < w->active_events) {
+ HANDLE te = w->events[i];
+ EventData* tp = w->evdata[i];
+ w->events[i] = w->events[w->active_events];
+ w->evdata[i] = w->evdata[w->active_events];
+ w->events[w->active_events] = te;
+ w->evdata[w->active_events] = tp;
+ }
+ HARDDEBUGF(("i = %d, a,h,t = %d,%d,%d",i,
+ w->active_events, w->highwater, w->total_events));
+#ifdef DEBUG
+ consistency_check(w);
+#endif
+ erts_mtx_unlock(&w->mtx);
+ break;
+ }
+ }
+}
+
+/*
+ * The actual adding and removing from pollset utilities
+ */
+
+static int set_driver_select(ErtsPollSet ps, HANDLE event, ErtsPollEvents mode)
+{
+ int i;
+ int best_waiter = -1; /* The waiter with lowest number of events. */
+ int lowest = MAXIMUM_WAIT_OBJECTS; /* Lowest number of events
+ * in any waiter.
+ */
+ EventData* ev;
+ Waiter* w;
+
+ /*
+ * Find the waiter which is least busy.
+ */
+
+#ifdef HARD_POLL_DEBUG
+ poll_debug_select(event, mode);
+#endif
+
+ /* total_events can no longer be read without the lock, it's changed in the waiter */
+ for (i = 0; i < ps->num_waiters; i++) {
+ erts_mtx_lock(&(ps->waiter[i]->mtx));
+ if (ps->waiter[i]->total_events < lowest) {
+ lowest = ps->waiter[i]->total_events;
+ best_waiter = i;
+ }
+ erts_mtx_unlock(&(ps->waiter[i]->mtx));
+ }
+
+ /*
+ * Stop the selected waiter, or start a new waiter if all were busy.
+ */
+
+ if (best_waiter >= 0) {
+ w = ps->waiter[best_waiter];
+ STOP_WAITER(ps,w);
+ erts_mtx_lock(&w->mtx);
+ } else {
+ new_waiter(ps);
+ w = ps->waiter[(ps->num_waiters)-1];
+ erts_mtx_lock(&w->mtx);
+ }
+
+#ifdef DEBUG
+ consistency_check(w);
+#endif
+
+ /*
+ * Allocate and initialize an EventData structure.
+ */
+
+ ev = w->first_free_evdata;
+ w->first_free_evdata = ev->next;
+ ev->event = event;
+ ev->mode = mode;
+ ev->next = NULL;
+
+ /*
+ * At this point, the selected waiter (newly-created or not) is
+ * standing by. Put the new event into the active part of the array.
+ */
+
+ if (w->active_events < w->total_events) {
+ /*
+ * Move the first event beyond the active part of the array to
+ * the very end to make place for the new event.
+ */
+
+#ifdef HARD_POLL_DEBUG
+ poll_debug_moved(w->events[w->highwater],w->highwater,w->total_events);
+#endif
+ w->events[w->total_events] = w->events[w->highwater];
+ w->evdata[w->total_events] = w->evdata[w->highwater];
+#ifdef HARD_POLL_DEBUG
+ poll_debug_moved(w->events[w->active_events],w->active_events,w->highwater);
+#endif
+ w->events[w->highwater] = w->events[w->active_events];
+ w->evdata[w->highwater] = w->evdata[w->active_events];
+
+ }
+ w->events[w->active_events] = event;
+ w->evdata[w->active_events] = ev;
+ w->active_events++;
+ w->highwater++;
+ w->total_events++;
+
+#ifdef DEBUG
+ consistency_check(w);
+#endif
+ erts_mtx_unlock(&w->mtx);
+ START_WAITER(ps,w);
+ HARDDEBUGF(("add select %d %d %d %d",best_waiter,
+ w->active_events,w->highwater,w->total_events));
+ return mode;
+}
+
+
+static int cancel_driver_select(ErtsPollSet ps, HANDLE event)
+{
+ int i;
+
+ ASSERT(event != INVALID_HANDLE_VALUE);
+ restart:
+ for (i = 0; i < ps->num_waiters; i++) {
+ Waiter* w = ps->waiter[i];
+ int j;
+
+ erts_mtx_lock(&w->mtx);
+#ifdef DEBUG
+ consistency_check(w);
+#endif
+ for (j = 0; j < w->total_events; j++) {
+ if (w->events[j] == event) {
+ int stopped = 0;
+ /*
+ * Free the event's EventData structure.
+ */
+
+ if (j < w->active_events) {
+ HARDDEBUGF(("Stopped in remove select"));
+ stopped = 1;
+ erts_mtx_unlock(&w->mtx);
+ STOP_WAITER(ps,w);
+ erts_mtx_lock(&w->mtx);
+ if ( j >= w->active_events || w->events[j] != event) {
+ /* things happened while unlocked */
+ START_WAITER(ps,w);
+ erts_mtx_unlock(&w->mtx);
+ goto restart;
+ }
+ }
+#ifdef HARD_POLL_DEBUG
+ poll_debug_deselect(w->events[j]);
+#endif
+ remove_event_from_set(w, j);
+ if (stopped) {
+ START_WAITER(ps,w);
+ }
+ HARDDEBUGF(("removed select %d,%d %d %d %d",i,j,
+ w->active_events,w->highwater,w->total_events));
+ break;
+ }
+ }
+ erts_mtx_unlock(&w->mtx);
+ }
+ return 0;
+}
+
+/*
+ * Interface functions
+ */
+
+void erts_poll_interrupt(ErtsPollSet ps, int set /* bool */)
+{
+ HARDTRACEF(("In erts_poll_interrupt(%d)",set));
+#ifdef ERTS_SMP
+ if (set) {
+ ERTS_POLLSET_SET_INTERRUPTED(ps);
+ wake_poller(ps);
+ }
+ else {
+ ERTS_POLLSET_UNSET_INTERRUPTED(ps);
+ }
+#endif
+ HARDTRACEF(("Out erts_poll_interrupt(%d)",set));
+}
+
+void erts_poll_interrupt_timed(ErtsPollSet ps,
+ int set /* bool */,
+ long msec)
+{
+ HARDTRACEF(("In erts_poll_interrupt_timed(%d,%ld)",set,msec));
+#ifdef ERTS_SMP
+ if (set) {
+ if (erts_smp_atomic_read(&ps->timeout) > msec) {
+ ERTS_POLLSET_SET_INTERRUPTED(ps);
+ wake_poller(ps);
+ }
+ }
+ else {
+ ERTS_POLLSET_UNSET_INTERRUPTED(ps);
+ }
+#endif
+ HARDTRACEF(("Out erts_poll_interrupt_timed"));
+}
+
+
+/*
+ * Windows is special, there is actually only one event type, and
+ * the only difference between ERTS_POLL_EV_IN and ERTS_POLL_EV_OUT
+ * is which driver callback will eventually be called.
+ */
+static ErtsPollEvents do_poll_control(ErtsPollSet ps,
+ ErtsSysFdType fd,
+ ErtsPollEvents pe,
+ int on /* bool */)
+{
+ HANDLE event = (HANDLE) fd;
+ ErtsPollEvents mode;
+ ErtsPollEvents result;
+ ASSERT(event != INVALID_HANDLE_VALUE);
+
+ if (on) {
+ if (pe & ERTS_POLL_EV_IN || !(pe & ERTS_POLL_EV_OUT )) {
+ mode = ERTS_POLL_EV_IN;
+ } else {
+ mode = ERTS_POLL_EV_OUT; /* ready output only in this case */
+ }
+ result = set_driver_select(ps, event, mode);
+ } else {
+ result = cancel_driver_select(ps, event);
+ }
+ return result;
+}
+
+ErtsPollEvents erts_poll_control(ErtsPollSet ps,
+ ErtsSysFdType fd,
+ ErtsPollEvents pe,
+ int on,
+ int* do_wake) /* In: Wake up polling thread */
+ /* Out: Poller is woken */
+{
+ ErtsPollEvents result;
+ HARDTRACEF(("In erts_poll_control(0x%08X, %u, %d)",(unsigned long) fd, (unsigned) pe, on));
+ ERTS_POLLSET_LOCK(ps);
+ result=do_poll_control(ps,fd,pe,on);
+ ERTS_POLLSET_UNLOCK(ps);
+ *do_wake = 0; /* Never any need to wake polling threads on windows */
+ HARDTRACEF(("Out erts_poll_control -> %u",(unsigned) result));
+ return result;
+}
+
+void erts_poll_controlv(ErtsPollSet ps,
+ ErtsPollControlEntry pcev[],
+ int len)
+{
+ int i;
+ int hshur = 0;
+ int do_wake = 0;
+
+ HARDTRACEF(("In erts_poll_controlv(%d)",len));
+ ERTS_POLLSET_LOCK(ps);
+
+ for (i = 0; i < len; i++) {
+ pcev[i].events = do_poll_control(ps,
+ pcev[i].fd,
+ pcev[i].events,
+ pcev[i].on);
+ }
+ ERTS_POLLSET_LOCK(ps);
+ HARDTRACEF(("Out erts_poll_controlv"));
+}
+
+int erts_poll_wait(ErtsPollSet ps,
+ ErtsPollResFd pr[],
+ int *len,
+ SysTimeval *utvp)
+{
+ SysTimeval *tvp = utvp;
+ SysTimeval itv;
+ int no_fds;
+ DWORD timeout;
+ EventData* ev;
+ int res = 0;
+ int num = 0;
+ int n;
+ int i;
+ int break_state;
+
+ HARDTRACEF(("In erts_poll_wait"));
+ ERTS_POLLSET_LOCK(ps);
+
+ if (!erts_atomic_read(&ps->sys_io_ready) && ps->restore_events) {
+ HARDDEBUGF(("Restore events: %d",ps->num_waiters));
+ ps->restore_events = 0;
+ for (i = 0; i < ps->num_waiters; ++i) {
+ Waiter* w = ps->waiter[i];
+ erts_mtx_lock(&w->mtx);
+ HARDDEBUGF(("Maybe reset %d %d %d %d",i,
+ w->active_events,w->highwater,w->total_events));
+ if (w->active_events < w->total_events) {
+ erts_mtx_unlock(&w->mtx);
+ STOP_WAITER(ps,w);
+ HARDDEBUGF(("Need reset %d %d %d %d",i,
+ w->active_events,w->highwater,w->total_events));
+ erts_mtx_lock(&w->mtx);
+ /* Need reset, just check that it doesn't have got more to tell */
+ if (w->highwater != w->active_events) {
+ HARDDEBUGF(("Oups!"));
+ /* Oups, got signalled before we took the lock, can't reset */
+ if(erts_atomic_read(&ps->sys_io_ready) == 0) {
+ erl_exit(1,"Internal error: "
+ "Inconsistent io structures in erl_poll.\n");
+ }
+ START_WAITER(ps,w);
+ erts_mtx_unlock(&w->mtx);
+ ps->restore_events = 1;
+ continue;
+ }
+ w->active_events = w->highwater = w->total_events;
+ START_WAITER(ps,w);
+ erts_mtx_unlock(&w->mtx);
+ } else {
+ erts_mtx_unlock(&w->mtx);
+ }
+ }
+ }
+
+ no_fds = *len;
+
+#ifdef ERTS_POLL_MAX_RES
+ if (no_fds >= ERTS_POLL_MAX_RES)
+ no_fds = ERTS_POLL_MAX_RES;
+#endif
+
+
+ ResetEvent(ps->event_io_ready);
+ ERTS_POLLSET_UNSET_POLLER_WOKEN(ps);
+
+#ifdef ERTS_SMP
+ if (ERTS_POLLSET_IS_INTERRUPTED(ps)) {
+ /* Interrupt use zero timeout */
+ itv.tv_sec = 0;
+ itv.tv_usec = 0;
+ tvp = &itv;
+ }
+#endif
+
+ timeout = tvp->tv_sec * 1000 + tvp->tv_usec / 1000;
+ /*HARDDEBUGF(("timeout = %ld",(long) timeout));*/
+ erts_smp_atomic_set(&ps->timeout, timeout);
+
+ if (timeout > 0 && ! erts_atomic_read(&ps->sys_io_ready) && ! erts_atomic_read(&break_waiter_state)) {
+ HANDLE harr[2] = {ps->event_io_ready, break_happened_event};
+ int num_h = 2;
+
+ HARDDEBUGF(("Start waiting %d [%d]",num_h, (long) timeout));
+ ERTS_POLLSET_UNLOCK(ps);
+ WaitForMultipleObjects(num_h, harr, FALSE, timeout);
+ ERTS_POLLSET_LOCK(ps);
+ HARDDEBUGF(("Stop waiting %d [%d]",num_h, (long) timeout));
+ }
+
+ ERTS_UNSET_BREAK_REQUESTED;
+ if(erts_atomic_read(&break_waiter_state)) {
+ erts_mtx_lock(&break_waiter_lock);
+ break_state = erts_atomic_read(&break_waiter_state);
+ erts_atomic_set(&break_waiter_state,0);
+ ResetEvent(break_happened_event);
+ erts_mtx_unlock(&break_waiter_lock);
+ switch (break_state) {
+ case BREAK_WAITER_GOT_BREAK:
+ ERTS_SET_BREAK_REQUESTED;
+ break;
+ case BREAK_WAITER_GOT_HALT:
+ erl_exit(0,"");
+ break;
+ default:
+ break;
+ }
+ }
+
+ ERTS_POLLSET_SET_POLLER_WOKEN(ps);
+
+ if (!erts_atomic_read(&ps->sys_io_ready)) {
+ res = EINTR;
+ HARDDEBUGF(("EINTR!"));
+ goto done;
+ }
+
+ erts_atomic_set(&ps->sys_io_ready,0);
+
+ n = ps->num_waiters;
+
+ for (i = 0; i < n; i++) {
+ Waiter* w = ps->waiter[i];
+ int j;
+ int first;
+ int last;
+ erts_mtx_lock(&w->mtx);
+#ifdef DEBUG
+ consistency_check(w);
+#endif
+
+ first = w->active_events;
+ last = w->highwater;
+ w->highwater = w->active_events;
+
+ for (j = last-1; j >= first; --j) {
+ if (num >= no_fds) {
+ w->highwater=j+1;
+ erts_mtx_unlock(&w->mtx);
+ /* This might mean we still have data to report, set
+ back the global flag! */
+ erts_atomic_set(&ps->sys_io_ready,1);
+ HARDDEBUGF(("To many FD's to report!"));
+ goto done;
+ }
+ HARDDEBUGF(("SET! Restore events"));
+ ps->restore_events = 1;
+ HARDDEBUGF(("Report %d,%d",i,j));
+ pr[num].fd = (ErtsSysFdType) w->events[j];
+ pr[num].events = w->evdata[j]->mode;
+#ifdef HARD_POLL_DEBUG
+ poll_debug_reported(w->events[j],w->highwater | (j << 16));
+ poll_debug_reported(w->events[j],first | (last << 16));
+#endif
+ ++num;
+ }
+
+#ifdef DEBUG
+ consistency_check(w);
+#endif
+ erts_mtx_unlock(&w->mtx);
+ }
+ done:
+ erts_smp_atomic_set(&ps->timeout, LONG_MAX);
+ *len = num;
+ ERTS_POLLSET_UNLOCK(ps);
+ HARDTRACEF(("Out erts_poll_wait"));
+ return res;
+
+}
+
+int erts_poll_max_fds(void)
+{
+ int res = sys_max_files();
+ HARDTRACEF(("In/Out erts_poll_max_fds -> %d",res));
+ return res;
+}
+
+void erts_poll_info(ErtsPollSet ps,
+ ErtsPollInfo *pip)
+{
+ Uint size = 0;
+ Uint num_events = 0;
+ int i;
+
+ HARDTRACEF(("In erts_poll_info"));
+ ERTS_POLLSET_LOCK(ps);
+
+ size += sizeof(struct ErtsPollSet_);
+ size += sizeof(Waiter *) * ps->allocated_waiters;
+ for (i = 0; i < ps->num_waiters; ++i) {
+ Waiter *w = ps->waiter[i];
+ if (w != NULL) {
+ size += sizeof(Waiter);
+ erts_mtx_lock(&w->mtx);
+ size += sizeof(EventData) * w->total_events;
+ num_events += (w->total_events - 1); /* First event is internal */
+ erts_mtx_unlock(&w->mtx);
+ }
+ }
+
+ pip->primary = "WaitForMultipleObjects";
+
+ pip->fallback = NULL;
+
+ pip->kernel_poll = NULL;
+
+ pip->memory_size = size;
+
+ pip->poll_set_size = num_events;
+
+ pip->fallback_poll_set_size = 0;
+
+ pip->lazy_updates = 0;
+
+ pip->pending_updates = 0;
+
+ pip->batch_updates = 0;
+
+ pip->concurrent_updates = 0;
+ ERTS_POLLSET_UNLOCK(ps);
+
+ pip->max_fds = erts_poll_max_fds();
+ HARDTRACEF(("Out erts_poll_info"));
+
+}
+
+ErtsPollSet erts_poll_create_pollset(void)
+{
+ ErtsPollSet ps = SEL_ALLOC(ERTS_ALC_T_POLLSET,
+ sizeof(struct ErtsPollSet_));
+ HARDTRACEF(("In erts_poll_create_pollset"));
+
+ ps->num_waiters = 0;
+ ps->allocated_waiters = 64;
+ ps->waiter = SEL_ALLOC(ERTS_ALC_T_WAITER_OBJ,
+ sizeof(Waiter *)*ps->allocated_waiters);
+ InitializeCriticalSection(&(ps->standby_crit));
+ ps->standby_wait_counter = 0;
+ ps->event_io_ready = CreateManualEvent(FALSE);
+ ps->standby_wait_event = CreateManualEvent(FALSE);
+ erts_atomic_init(&ps->sys_io_ready,0);
+ ps->restore_events = 0;
+
+#ifdef ERTS_SMP
+ erts_smp_atomic_init(&ps->woken, 0);
+ erts_smp_mtx_init(&ps->mtx, "pollset");
+ erts_smp_atomic_init(&ps->interrupt, 0);
+#endif
+ erts_smp_atomic_init(&ps->timeout, LONG_MAX);
+
+ HARDTRACEF(("Out erts_poll_create_pollset"));
+ return ps;
+}
+
+void erts_poll_destroy_pollset(ErtsPollSet ps)
+{
+ int i;
+ HARDTRACEF(("In erts_poll_destroy_pollset"));
+ ERTS_POLLSET_LOCK(ps);
+ STOP_WAITERS(ps);
+ for (i=0;i<ps->num_waiters;++i) {
+ Waiter *w = ps->waiter[i];
+ void *dummy;
+ erts_tid_t t = w->this;
+ /* Assume we're alone, no locking here... */
+ w->active_events = w->total_events = w->highwater = 0;
+ START_WAITER(ps,w);
+ erts_thr_join(t,&dummy);
+ CloseHandle(w->go_ahead);
+ CloseHandle(w->events[0]);
+ erts_mtx_destroy(&w->mtx);
+ SEL_FREE(ERTS_ALC_T_WAITER_OBJ, (void *) w);
+ }
+ SEL_FREE(ERTS_ALC_T_WAITER_OBJ,ps->waiter);
+ CloseHandle(ps->event_io_ready);
+ CloseHandle(ps->standby_wait_event);
+ ERTS_POLLSET_UNLOCK(ps);
+#ifdef ERTS_SMP
+ erts_smp_mtx_destroy(&ps->mtx);
+#endif
+ SEL_FREE(ERTS_ALC_T_POLLSET, (void *) ps);
+ HARDTRACEF(("Out erts_poll_destroy_pollset"));
+}
+
+/*
+ * Actually mostly initializes the friend module sys_interrupt...
+ */
+void erts_poll_init(void)
+{
+ erts_tid_t thread;
+
+#ifdef HARD_POLL_DEBUG
+ poll_debug_init();
+#endif
+
+ HARDTRACEF(("In erts_poll_init"));
+ erts_sys_break_event = CreateManualEvent(FALSE);
+
+ erts_mtx_init(&break_waiter_lock,"break_waiter_lock");
+ break_happened_event = CreateManualEvent(FALSE);
+ erts_atomic_init(&break_waiter_state, 0);
+
+ erts_thr_create(&thread, &break_waiter, NULL, NULL);
+ ERTS_UNSET_BREAK_REQUESTED;
+ HARDTRACEF(("Out erts_poll_init"));
+}
+
+/*
+ * Non windows friendly interface, not used when fd's are not continous
+ */
+void erts_poll_get_selected_events(ErtsPollSet ps,
+ ErtsPollEvents ev[],
+ int len)
+{
+ int i;
+ HARDTRACEF(("In erts_poll_get_selected_events"));
+ for (i = 0; i < len; ++i)
+ ev[i] = 0;
+ HARDTRACEF(("Out erts_poll_get_selected_events"));
+}
diff --git a/erts/emulator/sys/win32/erl_win32_sys_ddll.c b/erts/emulator/sys/win32/erl_win32_sys_ddll.c
new file mode 100644
index 0000000000..a19f49af10
--- /dev/null
+++ b/erts/emulator/sys/win32/erl_win32_sys_ddll.c
@@ -0,0 +1,206 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2006-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Interface functions to the dynamic linker using dl* functions.
+ * (As far as I know it works on SunOS 4, 5, Linux and FreeBSD. /Seb)
+ */
+
+#include <windows.h>
+
+#define GET_ERTS_ALC_TEST
+#include "sys.h"
+#include "global.h"
+#include "erl_alloc.h"
+
+#include "erl_driver.h"
+#include "erl_win_dyn_driver.h"
+
+#include "erl_nif.h"
+
+#define EXT_LEN 4
+#define FILE_EXT ".dll"
+
+static DWORD tls_index = 0;
+static TWinDynDriverCallbacks wddc;
+static TWinDynNifCallbacks nif_callbacks;
+
+void erl_sys_ddll_init(void) {
+ tls_index = TlsAlloc();
+ ERL_INIT_CALLBACK_STRUCTURE(wddc);
+
+#define ERL_NIF_API_FUNC_DECL(RET,NAME,ARGS) nif_callbacks.NAME = NAME
+#include "erl_nif_api_funcs.h"
+#undef ERL_NIF_API_FUNC_DECL
+
+ return;
+}
+
+/*
+ * Open a shared object
+ */
+int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err)
+{
+ int len;
+ char dlname[MAXPATHLEN + 1];
+
+ if ((len = sys_strlen(full_name)) >= MAXPATHLEN - EXT_LEN) {
+ if (err != NULL) {
+ err->str = "Library name too long";
+ }
+ return ERL_DE_LOAD_ERROR_NAME_TO_LONG;
+ }
+ sys_strcpy(dlname, full_name);
+ sys_strcpy(dlname+len, FILE_EXT);
+ return erts_sys_ddll_open_noext(dlname, handle, err);
+}
+int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
+{
+ HINSTANCE hinstance;
+
+ if ((hinstance = LoadLibrary(dlname)) == NULL) {
+ int code = ERL_DE_DYNAMIC_ERROR_OFFSET - GetLastError();
+ if (err != NULL) {
+ err->str = erts_sys_ddll_error(code);
+ }
+ return code;
+ } else {
+ *handle = (void *) hinstance;
+ return ERL_DE_NO_ERROR;
+ }
+}
+
+/*
+ * Find a symbol in the shared object
+ */
+int erts_sys_ddll_sym2(void *handle, char *func_name, void **function,
+ ErtsSysDdllError* err)
+{
+ FARPROC proc;
+ if ((proc = GetProcAddress( (HINSTANCE) handle, func_name)) == NULL) {
+ int code = ERL_DE_DYNAMIC_ERROR_OFFSET - GetLastError();
+ if (err != NULL) {
+ err->str = erts_sys_ddll_error(code);
+ }
+ return code;
+ }
+ *function = (void *) proc;
+ return ERL_DE_NO_ERROR;
+}
+
+/* XXX:PaN These two will be changed with new driver interface! */
+
+/*
+ * Load the driver init function, might appear under different names depending on object arch...
+ */
+
+int erts_sys_ddll_load_driver_init(void *handle, void **function)
+{
+ void *fn;
+ int res;
+ if ((res = erts_sys_ddll_sym(handle, "driver_init", &fn)) != ERL_DE_NO_ERROR) {
+ return res;
+ }
+ *function = fn;
+ return res;
+}
+
+int erts_sys_ddll_load_nif_init(void *handle, void **function, ErtsSysDdllError* err)
+{
+ void *fn;
+ int res;
+ if ((res = erts_sys_ddll_sym2(handle, "nif_init", &fn, err)) != ERL_DE_NO_ERROR) {
+ return res;
+ }
+ *function = fn;
+ return res;
+}
+
+
+/*
+ * Call the driver_init function, whatever it's really called, simple on unix...
+*/
+void *erts_sys_ddll_call_init(void *function) {
+ void *(*initfn)(TWinDynDriverCallbacks *) = function;
+ return (*initfn)(&wddc);
+}
+
+void *erts_sys_ddll_call_nif_init(void *function) {
+ void *(*initfn)(TWinDynNifCallbacks *) = function;
+ return (*initfn)(&nif_callbacks);
+}
+
+
+/*
+ * Close a chared object
+ */
+int erts_sys_ddll_close2(void *handle, ErtsSysDdllError* err)
+{
+ if (!FreeLibrary((HINSTANCE) handle)) {
+ int code = ERL_DE_DYNAMIC_ERROR_OFFSET - GetLastError();
+ if (err != NULL) {
+ err->str = erts_sys_ddll_error(code);
+ }
+ return code;
+ }
+ return ERL_DE_NO_ERROR;
+}
+
+/*
+ * Return string that describes the (current) error
+ */
+#define MAX_ERROR 255
+char *erts_sys_ddll_error(int code)
+{
+ int actual_code;
+ char *local_ptr;
+ if (code > ERL_DE_DYNAMIC_ERROR_OFFSET) {
+ return "Unspecified error";
+ }
+ actual_code = -1*(code - ERL_DE_DYNAMIC_ERROR_OFFSET);
+
+ local_ptr = TlsGetValue(tls_index);
+ if (local_ptr == NULL) {
+ local_ptr = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, MAX_ERROR);
+ TlsSetValue(tls_index,local_ptr);
+ }
+ if (!FormatMessage(
+ FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL,
+ (DWORD) actual_code,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ local_ptr,
+ MAX_ERROR, NULL )) {
+ return "Unspecified error";
+ } else {
+ char *ptr = local_ptr + strlen(local_ptr) - 1;
+ while (ptr >= local_ptr && (*ptr == '\r' || *ptr == '\n')) {
+ *ptr-- = '\0';
+ }
+ }
+ return local_ptr;
+}
+
+void erts_sys_ddll_free_error(ErtsSysDdllError* err)
+{
+ /* err->str may be either a static string or reused as thread local data,
+ * so wo don't bother free it.
+ */
+}
+
diff --git a/erts/emulator/sys/win32/erl_win_dyn_driver.h b/erts/emulator/sys/win32/erl_win_dyn_driver.h
new file mode 100644
index 0000000000..4949998abc
--- /dev/null
+++ b/erts/emulator/sys/win32/erl_win_dyn_driver.h
@@ -0,0 +1,489 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2003-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Include file for erlang driver writers using dynamic drivers on windows.
+ */
+
+/* Maybe this should be auto generated, but I'll leave that for now... */
+
+#ifndef _ERL_WIN_DYN_DRIVER_H
+#define _ERL_WIN_DYN_DRIVER_H
+
+#define WDD_FTYPE(FunctionName) TWinDynDriver##FunctionName
+
+#define WDD_TYPEDEF(RetType, FunctionName, Params) \
+ typedef RetType WDD_FTYPE(FunctionName) Params
+
+WDD_TYPEDEF(int, null_func,(void));
+WDD_TYPEDEF(int, driver_failure_atom,(ErlDrvPort, char *));
+WDD_TYPEDEF(int, driver_failure_posix,(ErlDrvPort, int));
+WDD_TYPEDEF(int, driver_failure,(ErlDrvPort, int));
+WDD_TYPEDEF(int, driver_exit, (ErlDrvPort, int));
+WDD_TYPEDEF(int, driver_failure_eof, (ErlDrvPort));
+WDD_TYPEDEF(int, driver_select, (ErlDrvPort, ErlDrvEvent, int, int));
+WDD_TYPEDEF(int, driver_event, (ErlDrvPort, ErlDrvEvent,ErlDrvEventData));
+WDD_TYPEDEF(int, driver_output, (ErlDrvPort, char *, int));
+WDD_TYPEDEF(int, driver_output2, (ErlDrvPort, char *, int,char *, int));
+WDD_TYPEDEF(int, driver_output_binary, (ErlDrvPort, char *, int,ErlDrvBinary*, int, int));
+WDD_TYPEDEF(int, driver_outputv, (ErlDrvPort, char*, int, ErlIOVec *,int));
+WDD_TYPEDEF(int, driver_vec_to_buf, (ErlIOVec *, char *, int));
+WDD_TYPEDEF(int, driver_set_timer, (ErlDrvPort, unsigned long));
+WDD_TYPEDEF(int, driver_cancel_timer, (ErlDrvPort));
+WDD_TYPEDEF(int, driver_read_timer, (ErlDrvPort, unsigned long *));
+WDD_TYPEDEF(char *, erl_errno_id, (int));
+WDD_TYPEDEF(void, set_busy_port, (ErlDrvPort, int));
+WDD_TYPEDEF(void, set_port_control_flags, (ErlDrvPort, int));
+WDD_TYPEDEF(int, get_port_flags, (ErlDrvPort));
+WDD_TYPEDEF(ErlDrvBinary *, driver_alloc_binary, (int));
+WDD_TYPEDEF(ErlDrvBinary *, driver_realloc_binary, (ErlDrvBinary *, int));
+WDD_TYPEDEF(void, driver_free_binary, (ErlDrvBinary *));
+WDD_TYPEDEF(void *, driver_alloc, (size_t));
+WDD_TYPEDEF(void *, driver_realloc, (void *, size_t));
+WDD_TYPEDEF(void, driver_free, (void *));
+WDD_TYPEDEF(int, driver_enq, (ErlDrvPort, char*, int));
+WDD_TYPEDEF(int, driver_pushq, (ErlDrvPort, char*, int));
+WDD_TYPEDEF(int, driver_deq, (ErlDrvPort, int));
+WDD_TYPEDEF(int, driver_sizeq, (ErlDrvPort));
+WDD_TYPEDEF(int, driver_enq_bin, (ErlDrvPort, ErlDrvBinary *, int,int));
+WDD_TYPEDEF(int, driver_pushq_bin, (ErlDrvPort, ErlDrvBinary *, int,int));
+WDD_TYPEDEF(int, driver_peekqv, (ErlDrvPort, ErlIOVec *));
+WDD_TYPEDEF(SysIOVec *, driver_peekq, (ErlDrvPort, int *));
+WDD_TYPEDEF(int, driver_enqv, (ErlDrvPort, ErlIOVec *, int));
+WDD_TYPEDEF(int, driver_pushqv, (ErlDrvPort, ErlIOVec *, int));
+WDD_TYPEDEF(void, add_driver_entry, (ErlDrvEntry *));
+WDD_TYPEDEF(int, remove_driver_entry, (ErlDrvEntry *));
+WDD_TYPEDEF(ErlDrvTermData, driver_mk_atom, (char*));
+WDD_TYPEDEF(ErlDrvTermData, driver_mk_port,(ErlDrvPort));
+WDD_TYPEDEF(ErlDrvTermData, driver_connected,(ErlDrvPort));
+WDD_TYPEDEF(ErlDrvTermData, driver_caller,(ErlDrvPort));
+WDD_TYPEDEF(ErlDrvTermData, driver_mk_term_nil,(void));
+WDD_TYPEDEF(int, driver_output_term, (ErlDrvPort, ErlDrvTermData*, int));
+WDD_TYPEDEF(int, driver_send_term, (ErlDrvPort, ErlDrvTermData, ErlDrvTermData*, int));
+WDD_TYPEDEF(long, driver_async, (ErlDrvPort,unsigned int*,void (*)(void*),void*,void (*)(void*)));
+WDD_TYPEDEF(int, driver_async_cancel, (unsigned int));
+WDD_TYPEDEF(int, driver_lock_driver, (ErlDrvPort));
+WDD_TYPEDEF(void *, driver_dl_open, (char *));
+WDD_TYPEDEF(void *, driver_dl_sym, (void *, char *));
+WDD_TYPEDEF(int, driver_dl_close, (void *));
+WDD_TYPEDEF(char *, driver_dl_error, (void));
+WDD_TYPEDEF(unsigned long, erts_alc_test, (unsigned long,
+ unsigned long,
+ unsigned long,
+ unsigned long));
+WDD_TYPEDEF(long, driver_binary_get_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(long, driver_binary_inc_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(long, driver_binary_dec_refc, (ErlDrvBinary *dbp));
+WDD_TYPEDEF(ErlDrvPDL, driver_pdl_create, (ErlDrvPort));
+WDD_TYPEDEF(void, driver_pdl_lock, (ErlDrvPDL));
+WDD_TYPEDEF(void, driver_pdl_unlock, (ErlDrvPDL));
+WDD_TYPEDEF(long, driver_pdl_get_refc, (ErlDrvPDL));
+WDD_TYPEDEF(long, driver_pdl_inc_refc, (ErlDrvPDL));
+WDD_TYPEDEF(long, driver_pdl_dec_refc, (ErlDrvPDL));
+WDD_TYPEDEF(void, driver_system_info, (ErlDrvSysInfo *, size_t));
+WDD_TYPEDEF(int, driver_get_now, (ErlDrvNowData *));
+WDD_TYPEDEF(int, driver_monitor_process, (ErlDrvPort port,
+ ErlDrvTermData process,
+ ErlDrvMonitor *monitor));
+WDD_TYPEDEF(int, driver_demonitor_process, (ErlDrvPort port,
+ const ErlDrvMonitor *monitor));
+WDD_TYPEDEF(ErlDrvTermData, driver_get_monitored_process,
+ (ErlDrvPort port, const ErlDrvMonitor *monitor));
+WDD_TYPEDEF(int, driver_compare_monitors,
+ (const ErlDrvMonitor *, const ErlDrvMonitor *));
+WDD_TYPEDEF(ErlDrvMutex *, erl_drv_mutex_create, (char *name));
+WDD_TYPEDEF(void, erl_drv_mutex_destroy, (ErlDrvMutex *mtx));
+WDD_TYPEDEF(int, erl_drv_mutex_trylock, (ErlDrvMutex *mtx));
+WDD_TYPEDEF(void, erl_drv_mutex_lock, (ErlDrvMutex *mtx));
+WDD_TYPEDEF(void, erl_drv_mutex_unlock, (ErlDrvMutex *mtx));
+WDD_TYPEDEF(ErlDrvCond *, erl_drv_cond_create, (char *name));
+WDD_TYPEDEF(void, erl_drv_cond_destroy, (ErlDrvCond *cnd));
+WDD_TYPEDEF(void, erl_drv_cond_signal, (ErlDrvCond *cnd));
+WDD_TYPEDEF(void, erl_drv_cond_broadcast, (ErlDrvCond *cnd));
+WDD_TYPEDEF(void, erl_drv_cond_wait, (ErlDrvCond *cnd, ErlDrvMutex *mtx));
+WDD_TYPEDEF(ErlDrvRWLock *, erl_drv_rwlock_create, (char *name));
+WDD_TYPEDEF(void, erl_drv_rwlock_destroy, (ErlDrvRWLock *rwlck));
+WDD_TYPEDEF(int, erl_drv_rwlock_tryrlock, (ErlDrvRWLock *rwlck));
+WDD_TYPEDEF(void, erl_drv_rwlock_rlock, (ErlDrvRWLock *rwlck));
+WDD_TYPEDEF(void, erl_drv_rwlock_runlock, (ErlDrvRWLock *rwlck));
+WDD_TYPEDEF(int, erl_drv_rwlock_tryrwlock, (ErlDrvRWLock *rwlck));
+WDD_TYPEDEF(void, erl_drv_rwlock_rwlock, (ErlDrvRWLock *rwlck));
+WDD_TYPEDEF(void, erl_drv_rwlock_rwunlock, (ErlDrvRWLock *rwlck));
+WDD_TYPEDEF(int, erl_drv_tsd_key_create, (char *name, ErlDrvTSDKey *key));
+WDD_TYPEDEF(void, erl_drv_tsd_key_destroy, (ErlDrvTSDKey key));
+WDD_TYPEDEF(void, erl_drv_tsd_set, (ErlDrvTSDKey key, void *data));
+WDD_TYPEDEF(void *, erl_drv_tsd_get, (ErlDrvTSDKey key));
+WDD_TYPEDEF(ErlDrvThreadOpts *, erl_drv_thread_opts_create, (char *name));
+WDD_TYPEDEF(void, erl_drv_thread_opts_destroy, (ErlDrvThreadOpts *opts));
+WDD_TYPEDEF(int, erl_drv_thread_create, (char *name,
+ ErlDrvTid *tid,
+ void * (*func)(void *),
+ void *args,
+ ErlDrvThreadOpts *opts));
+WDD_TYPEDEF(ErlDrvTid, erl_drv_thread_self, (void));
+WDD_TYPEDEF(int, erl_drv_equal_tids, (ErlDrvTid tid1, ErlDrvTid tid2));
+WDD_TYPEDEF(void, erl_drv_thread_exit, (void *resp));
+WDD_TYPEDEF(int, erl_drv_thread_join, (ErlDrvTid, void **respp));
+WDD_TYPEDEF(int, erl_drv_putenv, (char *key, char *value));
+WDD_TYPEDEF(int, erl_drv_getenv, (char *key, char *value, size_t *value_size));
+
+typedef struct {
+ WDD_FTYPE(null_func) *null_func;
+ WDD_FTYPE(driver_failure_atom) *driver_failure_atom;
+ WDD_FTYPE(driver_failure_posix) *driver_failure_posix;
+ WDD_FTYPE(driver_failure) *driver_failure;
+ WDD_FTYPE(driver_exit) *driver_exit;
+ WDD_FTYPE(driver_failure_eof) *driver_failure_eof;
+ WDD_FTYPE(driver_select) *driver_select;
+ WDD_FTYPE(driver_event) *driver_event;
+ WDD_FTYPE(driver_output) *driver_output;
+ WDD_FTYPE(driver_output2) *driver_output2;
+ WDD_FTYPE(driver_output_binary) *driver_output_binary;
+ WDD_FTYPE(driver_outputv) *driver_outputv;
+ WDD_FTYPE(driver_vec_to_buf) *driver_vec_to_buf;
+ WDD_FTYPE(driver_set_timer) *driver_set_timer;
+ WDD_FTYPE(driver_cancel_timer) *driver_cancel_timer;
+ WDD_FTYPE(driver_read_timer) *driver_read_timer;
+ WDD_FTYPE(erl_errno_id) *erl_errno_id;
+ WDD_FTYPE(set_busy_port)* set_busy_port;
+ WDD_FTYPE(set_port_control_flags) *set_port_control_flags;
+ WDD_FTYPE(get_port_flags) *get_port_flags;
+ WDD_FTYPE(driver_alloc_binary) *driver_alloc_binary;
+ WDD_FTYPE(driver_realloc_binary) *driver_realloc_binary;
+ WDD_FTYPE(driver_free_binary) *driver_free_binary;
+ WDD_FTYPE(driver_alloc) *driver_alloc;
+ WDD_FTYPE(driver_realloc) *driver_realloc;
+ WDD_FTYPE(driver_free) *driver_free;
+ WDD_FTYPE(driver_enq) *driver_enq;
+ WDD_FTYPE(driver_pushq) *driver_pushq;
+ WDD_FTYPE(driver_deq) *driver_deq;
+ WDD_FTYPE(driver_sizeq) *driver_sizeq;
+ WDD_FTYPE(driver_enq_bin)* driver_enq_bin;
+ WDD_FTYPE(driver_pushq_bin) *driver_pushq_bin;
+ WDD_FTYPE(driver_peekqv) *driver_peekqv;
+ WDD_FTYPE(driver_peekq) *driver_peekq;
+ WDD_FTYPE(driver_enqv) *driver_enqv;
+ WDD_FTYPE(driver_pushqv) *driver_pushqv;
+ WDD_FTYPE(add_driver_entry) *add_driver_entry;
+ WDD_FTYPE(remove_driver_entry) *remove_driver_entry;
+ WDD_FTYPE(driver_mk_atom) *driver_mk_atom;
+ WDD_FTYPE(driver_mk_port) *driver_mk_port;
+ WDD_FTYPE(driver_connected) *driver_connected;
+ WDD_FTYPE(driver_caller) *driver_caller;
+ WDD_FTYPE(driver_mk_term_nil) *driver_mk_term_nil;
+ WDD_FTYPE(driver_output_term) *driver_output_term;
+ WDD_FTYPE(driver_send_term) *driver_send_term;
+ WDD_FTYPE(driver_async) *driver_async;
+ WDD_FTYPE(driver_async_cancel) *driver_async_cancel;
+ WDD_FTYPE(driver_lock_driver) *driver_lock_driver;
+ WDD_FTYPE(driver_dl_open) *driver_dl_open;
+ WDD_FTYPE(driver_dl_sym) *driver_dl_sym;
+ WDD_FTYPE(driver_dl_close) *driver_dl_close;
+ WDD_FTYPE(driver_dl_error) *driver_dl_error;
+ WDD_FTYPE(erts_alc_test) *erts_alc_test;
+ WDD_FTYPE(driver_binary_get_refc) *driver_binary_get_refc;
+ WDD_FTYPE(driver_binary_inc_refc) *driver_binary_inc_refc;
+ WDD_FTYPE(driver_binary_dec_refc) *driver_binary_dec_refc;
+ WDD_FTYPE(driver_pdl_create) *driver_pdl_create;
+ WDD_FTYPE(driver_pdl_lock) *driver_pdl_lock;
+ WDD_FTYPE(driver_pdl_unlock) *driver_pdl_unlock;
+ WDD_FTYPE(driver_pdl_get_refc) *driver_pdl_get_refc;
+ WDD_FTYPE(driver_pdl_inc_refc) *driver_pdl_inc_refc;
+ WDD_FTYPE(driver_pdl_dec_refc) *driver_pdl_dec_refc;
+ WDD_FTYPE(driver_system_info) *driver_system_info;
+ WDD_FTYPE(driver_get_now) *driver_get_now;
+ WDD_FTYPE(driver_monitor_process) *driver_monitor_process;
+ WDD_FTYPE(driver_demonitor_process) *driver_demonitor_process;
+ WDD_FTYPE(driver_get_monitored_process) *driver_get_monitored_process;
+ WDD_FTYPE(driver_compare_monitors) *driver_compare_monitors;
+ WDD_FTYPE(erl_drv_mutex_create) *erl_drv_mutex_create;
+ WDD_FTYPE(erl_drv_mutex_destroy) *erl_drv_mutex_destroy;
+ WDD_FTYPE(erl_drv_mutex_trylock) *erl_drv_mutex_trylock;
+ WDD_FTYPE(erl_drv_mutex_lock) *erl_drv_mutex_lock;
+ WDD_FTYPE(erl_drv_mutex_unlock) *erl_drv_mutex_unlock;
+ WDD_FTYPE(erl_drv_cond_create) *erl_drv_cond_create;
+ WDD_FTYPE(erl_drv_cond_destroy) *erl_drv_cond_destroy;
+ WDD_FTYPE(erl_drv_cond_signal) *erl_drv_cond_signal;
+ WDD_FTYPE(erl_drv_cond_broadcast) *erl_drv_cond_broadcast;
+ WDD_FTYPE(erl_drv_cond_wait) *erl_drv_cond_wait;
+ WDD_FTYPE(erl_drv_rwlock_create) *erl_drv_rwlock_create;
+ WDD_FTYPE(erl_drv_rwlock_destroy) *erl_drv_rwlock_destroy;
+ WDD_FTYPE(erl_drv_rwlock_tryrlock) *erl_drv_rwlock_tryrlock;
+ WDD_FTYPE(erl_drv_rwlock_rlock) *erl_drv_rwlock_rlock;
+ WDD_FTYPE(erl_drv_rwlock_runlock) *erl_drv_rwlock_runlock;
+ WDD_FTYPE(erl_drv_rwlock_tryrwlock) *erl_drv_rwlock_tryrwlock;
+ WDD_FTYPE(erl_drv_rwlock_rwlock) *erl_drv_rwlock_rwlock;
+ WDD_FTYPE(erl_drv_rwlock_rwunlock) *erl_drv_rwlock_rwunlock;
+ WDD_FTYPE(erl_drv_tsd_key_create) *erl_drv_tsd_key_create;
+ WDD_FTYPE(erl_drv_tsd_key_destroy) *erl_drv_tsd_key_destroy;
+ WDD_FTYPE(erl_drv_tsd_set) *erl_drv_tsd_set;
+ WDD_FTYPE(erl_drv_tsd_get) *erl_drv_tsd_get;
+ WDD_FTYPE(erl_drv_thread_opts_create) *erl_drv_thread_opts_create;
+ WDD_FTYPE(erl_drv_thread_opts_destroy) *erl_drv_thread_opts_destroy;
+ WDD_FTYPE(erl_drv_thread_create) *erl_drv_thread_create;
+ WDD_FTYPE(erl_drv_thread_self) *erl_drv_thread_self;
+ WDD_FTYPE(erl_drv_equal_tids) *erl_drv_equal_tids;
+ WDD_FTYPE(erl_drv_thread_exit) *erl_drv_thread_exit;
+ WDD_FTYPE(erl_drv_thread_join) *erl_drv_thread_join;
+ WDD_FTYPE(erl_drv_putenv) *erl_drv_putenv;
+ WDD_FTYPE(erl_drv_getenv) *erl_drv_getenv;
+ /* Add new calls here */
+} TWinDynDriverCallbacks;
+
+/* This header is included explicitly by the ddll static driver, it musn't define things then */
+#ifndef STATIC_ERLANG_DRIVER
+
+extern TWinDynDriverCallbacks WinDynDriverCallbacks;
+
+#define null_func (WinDynDriverCallbacks.null_func)
+#define driver_failure_atom (WinDynDriverCallbacks.driver_failure_atom)
+#define driver_failure_posix (WinDynDriverCallbacks.driver_failure_posix)
+#define driver_failure (WinDynDriverCallbacks.driver_failure)
+#define driver_exit (WinDynDriverCallbacks.driver_exit)
+#define driver_failure_eof (WinDynDriverCallbacks.driver_failure_eof)
+#define driver_select (WinDynDriverCallbacks.driver_select)
+#define driver_event (WinDynDriverCallbacks.driver_event)
+#define driver_output (WinDynDriverCallbacks.driver_output)
+#define driver_output2 (WinDynDriverCallbacks.driver_output2)
+#define driver_output_binary (WinDynDriverCallbacks.driver_output_binary)
+#define driver_outputv (WinDynDriverCallbacks.driver_outputv)
+#define driver_vec_to_buf (WinDynDriverCallbacks.driver_vec_to_buf)
+#define driver_set_timer (WinDynDriverCallbacks.driver_set_timer)
+#define driver_cancel_timer (WinDynDriverCallbacks.driver_cancel_timer)
+#define driver_read_timer (WinDynDriverCallbacks.driver_read_timer)
+#define erl_errno_id (WinDynDriverCallbacks.erl_errno_id)
+#define set_busy_port (WinDynDriverCallbacks.set_busy_port)
+#define set_port_control_flags (WinDynDriverCallbacks.set_port_control_flags)
+#define get_port_flags (WinDynDriverCallbacks.get_port_flags)
+#define driver_alloc_binary (WinDynDriverCallbacks.driver_alloc_binary)
+#define driver_realloc_binary (WinDynDriverCallbacks.driver_realloc_binary)
+#define driver_free_binary (WinDynDriverCallbacks.driver_free_binary)
+#define driver_alloc (WinDynDriverCallbacks.driver_alloc)
+#define driver_realloc (WinDynDriverCallbacks.driver_realloc)
+#define driver_free (WinDynDriverCallbacks.driver_free)
+#define driver_enq (WinDynDriverCallbacks.driver_enq)
+#define driver_pushq (WinDynDriverCallbacks.driver_pushq)
+#define driver_deq (WinDynDriverCallbacks.driver_deq)
+#define driver_sizeq (WinDynDriverCallbacks.driver_sizeq)
+#define driver_enq_bin (WinDynDriverCallbacks.driver_enq_bin)
+#define driver_pushq_bin (WinDynDriverCallbacks.driver_pushq_bin)
+#define driver_peekqv (WinDynDriverCallbacks.driver_peekqv)
+#define driver_peekq (WinDynDriverCallbacks.driver_peekq)
+#define driver_enqv (WinDynDriverCallbacks.driver_enqv)
+#define driver_pushqv (WinDynDriverCallbacks.driver_pushqv)
+#define add_driver_entry (WinDynDriverCallbacks.add_driver_entry)
+#define remove_driver_entry (WinDynDriverCallbacks.remove_driver_entry)
+#define driver_mk_atom (WinDynDriverCallbacks.driver_mk_atom)
+#define driver_mk_port (WinDynDriverCallbacks.driver_mk_port)
+#define driver_connected (WinDynDriverCallbacks.driver_connected)
+#define driver_caller (WinDynDriverCallbacks.driver_caller)
+#define driver_mk_term_nil (WinDynDriverCallbacks.driver_mk_term_nil)
+#define driver_output_term (WinDynDriverCallbacks.driver_output_term)
+#define driver_send_term (WinDynDriverCallbacks.driver_send_term)
+#define driver_async (WinDynDriverCallbacks.driver_async)
+#define driver_async_cancel (WinDynDriverCallbacks.driver_async_cancel)
+#define driver_lock_driver (WinDynDriverCallbacks.driver_lock_driver)
+#define driver_dl_open (WinDynDriverCallbacks.driver_dl_open)
+#define driver_dl_sym (WinDynDriverCallbacks.driver_dl_sym)
+#define driver_dl_close (WinDynDriverCallbacks.driver_dl_close)
+#define driver_dl_error (WinDynDriverCallbacks.driver_dl_error)
+#define erts_alc_test (WinDynDriverCallbacks.erts_alc_test)
+#define driver_binary_get_refc (WinDynDriverCallbacks.driver_binary_get_refc)
+#define driver_binary_inc_refc (WinDynDriverCallbacks.driver_binary_inc_refc)
+#define driver_binary_dec_refc (WinDynDriverCallbacks.driver_binary_dec_refc)
+#define driver_pdl_create (WinDynDriverCallbacks.driver_pdl_create)
+#define driver_pdl_lock (WinDynDriverCallbacks.driver_pdl_lock)
+#define driver_pdl_unlock (WinDynDriverCallbacks.driver_pdl_unlock)
+#define driver_pdl_get_refc (WinDynDriverCallbacks.driver_pdl_get_refc)
+#define driver_pdl_inc_refc (WinDynDriverCallbacks.driver_pdl_inc_refc)
+#define driver_pdl_dec_refc (WinDynDriverCallbacks.driver_pdl_dec_refc)
+#define driver_system_info (WinDynDriverCallbacks.driver_system_info)
+#define driver_get_now (WinDynDriverCallbacks.driver_get_now)
+#define driver_monitor_process \
+(WinDynDriverCallbacks.driver_monitor_process)
+#define driver_demonitor_process \
+(WinDynDriverCallbacks.driver_demonitor_process)
+#define driver_get_monitored_process \
+(WinDynDriverCallbacks.driver_get_monitored_process)
+#define driver_compare_monitors \
+(WinDynDriverCallbacks.driver_compare_monitors)
+#define erl_drv_mutex_create (WinDynDriverCallbacks.erl_drv_mutex_create)
+#define erl_drv_mutex_destroy (WinDynDriverCallbacks.erl_drv_mutex_destroy)
+#define erl_drv_mutex_trylock (WinDynDriverCallbacks.erl_drv_mutex_trylock)
+#define erl_drv_mutex_lock (WinDynDriverCallbacks.erl_drv_mutex_lock)
+#define erl_drv_mutex_unlock (WinDynDriverCallbacks.erl_drv_mutex_unlock)
+#define erl_drv_cond_create (WinDynDriverCallbacks.erl_drv_cond_create)
+#define erl_drv_cond_destroy (WinDynDriverCallbacks.erl_drv_cond_destroy)
+#define erl_drv_cond_signal (WinDynDriverCallbacks.erl_drv_cond_signal)
+#define erl_drv_cond_broadcast (WinDynDriverCallbacks.erl_drv_cond_broadcast)
+#define erl_drv_cond_wait (WinDynDriverCallbacks.erl_drv_cond_wait)
+#define erl_drv_rwlock_create (WinDynDriverCallbacks.erl_drv_rwlock_create)
+#define erl_drv_rwlock_destroy (WinDynDriverCallbacks.erl_drv_rwlock_destroy)
+#define erl_drv_rwlock_tryrlock (WinDynDriverCallbacks.erl_drv_rwlock_tryrlock)
+#define erl_drv_rwlock_rlock (WinDynDriverCallbacks.erl_drv_rwlock_rlock)
+#define erl_drv_rwlock_runlock (WinDynDriverCallbacks.erl_drv_rwlock_runlock)
+#define erl_drv_rwlock_tryrwlock \
+(WinDynDriverCallbacks.erl_drv_rwlock_tryrwlock)
+#define erl_drv_rwlock_rwlock (WinDynDriverCallbacks.erl_drv_rwlock_rwlock)
+#define erl_drv_rwlock_rwunlock (WinDynDriverCallbacks.erl_drv_rwlock_rwunlock)
+#define erl_drv_tsd_key_create (WinDynDriverCallbacks.erl_drv_tsd_key_create)
+#define erl_drv_tsd_key_destroy (WinDynDriverCallbacks.erl_drv_tsd_key_destroy)
+#define erl_drv_tsd_set (WinDynDriverCallbacks.erl_drv_tsd_set)
+#define erl_drv_tsd_get (WinDynDriverCallbacks.erl_drv_tsd_get)
+#define erl_drv_thread_opts_create \
+(WinDynDriverCallbacks.erl_drv_thread_opts_create)
+#define erl_drv_thread_opts_destroy \
+(WinDynDriverCallbacks.erl_drv_thread_opts_destroy)
+#define erl_drv_thread_create (WinDynDriverCallbacks.erl_drv_thread_create)
+#define erl_drv_thread_self (WinDynDriverCallbacks.erl_drv_thread_self)
+#define erl_drv_equal_tids (WinDynDriverCallbacks.erl_drv_equal_tids)
+#define erl_drv_thread_exit (WinDynDriverCallbacks.erl_drv_thread_exit)
+#define erl_drv_thread_join (WinDynDriverCallbacks.erl_drv_thread_join)
+#define erl_drv_putenv (WinDynDriverCallbacks.erl_drv_putenv)
+#define erl_drv_getenv (WinDynDriverCallbacks.erl_drv_getenv)
+
+/* The only variable in the interface... */
+#define driver_term_nil (driver_mk_term_nil())
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define DRIVER_INIT(DriverName) \
+ErlDrvEntry *erl_dyndriver_real_driver_init(void); \
+TWinDynDriverCallbacks WinDynDriverCallbacks; \
+__declspec(dllexport) ErlDrvEntry *driver_init(TWinDynDriverCallbacks *callbacks) \
+{ \
+ memcpy(&WinDynDriverCallbacks,callbacks,sizeof(TWinDynDriverCallbacks)); \
+ return erl_dyndriver_real_driver_init(); \
+} \
+ErlDrvEntry *erl_dyndriver_real_driver_init(void)
+
+/* This is to make erl_driver.h avoid changing what's done here */
+#define ERL_DRIVER_TYPES_ONLY
+
+#else /* defined(STATIC_ERLANG_DRIVER) */
+/* This is for the ddll driver */
+
+#define ERL_INIT_CALLBACK_STRUCTURE(W) \
+do { \
+((W).null_func) = null_func; \
+((W).driver_failure_atom) = driver_failure_atom; \
+((W).driver_failure_posix) = driver_failure_posix; \
+((W).driver_failure) = driver_failure; \
+((W).driver_exit) = driver_exit; \
+((W).driver_failure_eof) = driver_failure_eof; \
+((W).driver_select) = driver_select; \
+((W).driver_event) = driver_event; \
+((W).driver_output) = driver_output; \
+((W).driver_output2) = driver_output2; \
+((W).driver_output_binary) = driver_output_binary; \
+((W).driver_outputv) = driver_outputv; \
+((W).driver_vec_to_buf) = driver_vec_to_buf; \
+((W).driver_set_timer) = driver_set_timer; \
+((W).driver_cancel_timer) = driver_cancel_timer; \
+((W).driver_read_timer) = driver_read_timer; \
+((W).erl_errno_id) = erl_errno_id; \
+((W).set_busy_port) = set_busy_port; \
+((W).set_port_control_flags) = set_port_control_flags; \
+((W).get_port_flags) = get_port_flags; \
+((W).driver_alloc_binary) = driver_alloc_binary; \
+((W).driver_realloc_binary) = driver_realloc_binary; \
+((W).driver_free_binary) = driver_free_binary; \
+((W).driver_alloc) = driver_alloc; \
+((W).driver_realloc) = driver_realloc; \
+((W).driver_free) = driver_free; \
+((W).driver_enq) = driver_enq; \
+((W).driver_pushq) = driver_pushq; \
+((W).driver_deq) = driver_deq; \
+((W).driver_sizeq) = driver_sizeq; \
+((W).driver_enq_bin) = driver_enq_bin; \
+((W).driver_pushq_bin) = driver_pushq_bin; \
+((W).driver_peekqv) = driver_peekqv; \
+((W).driver_peekq) = driver_peekq; \
+((W).driver_enqv) = driver_enqv; \
+((W).driver_pushqv) = driver_pushqv; \
+((W).add_driver_entry) = add_driver_entry; \
+((W).remove_driver_entry) = remove_driver_entry; \
+((W).driver_mk_atom) = driver_mk_atom; \
+((W).driver_mk_port) = driver_mk_port; \
+((W).driver_connected) = driver_connected; \
+((W).driver_caller) = driver_caller; \
+((W).driver_mk_term_nil) = driver_mk_term_nil; \
+((W).driver_output_term) = driver_output_term; \
+((W).driver_send_term) = driver_send_term; \
+((W).driver_async) = driver_async; \
+((W).driver_async_cancel) = driver_async_cancel; \
+((W).driver_lock_driver) = driver_lock_driver; \
+((W).driver_dl_open) = driver_dl_open; \
+((W).driver_dl_sym) = driver_dl_sym; \
+((W).driver_dl_close) = driver_dl_close; \
+((W).driver_dl_error) = driver_dl_error; \
+((W).erts_alc_test) = erts_alc_test; \
+((W).driver_binary_get_refc) = driver_binary_get_refc; \
+((W).driver_binary_inc_refc) = driver_binary_inc_refc; \
+((W).driver_binary_dec_refc) = driver_binary_dec_refc; \
+((W).driver_pdl_create) = driver_pdl_create; \
+((W).driver_pdl_lock) = driver_pdl_lock; \
+((W).driver_pdl_unlock) = driver_pdl_unlock; \
+((W).driver_pdl_get_refc) = driver_pdl_get_refc; \
+((W).driver_pdl_inc_refc) = driver_pdl_inc_refc; \
+((W).driver_pdl_dec_refc) = driver_pdl_dec_refc; \
+((W).driver_system_info) = driver_system_info; \
+((W).driver_get_now) = driver_get_now; \
+((W).driver_monitor_process) = driver_monitor_process; \
+((W).driver_demonitor_process) = driver_demonitor_process; \
+((W).driver_get_monitored_process) = driver_get_monitored_process; \
+((W).driver_compare_monitors) = driver_compare_monitors;\
+((W).erl_drv_mutex_create) = erl_drv_mutex_create; \
+((W).erl_drv_mutex_destroy) = erl_drv_mutex_destroy; \
+((W).erl_drv_mutex_trylock) = erl_drv_mutex_trylock; \
+((W).erl_drv_mutex_lock) = erl_drv_mutex_lock; \
+((W).erl_drv_mutex_unlock) = erl_drv_mutex_unlock; \
+((W).erl_drv_cond_create) = erl_drv_cond_create; \
+((W).erl_drv_cond_destroy) = erl_drv_cond_destroy; \
+((W).erl_drv_cond_signal) = erl_drv_cond_signal; \
+((W).erl_drv_cond_broadcast) = erl_drv_cond_broadcast; \
+((W).erl_drv_cond_wait) = erl_drv_cond_wait; \
+((W).erl_drv_rwlock_create) = erl_drv_rwlock_create; \
+((W).erl_drv_rwlock_destroy) = erl_drv_rwlock_destroy; \
+((W).erl_drv_rwlock_tryrlock) = erl_drv_rwlock_tryrlock;\
+((W).erl_drv_rwlock_rlock) = erl_drv_rwlock_rlock; \
+((W).erl_drv_rwlock_runlock) = erl_drv_rwlock_runlock; \
+((W).erl_drv_rwlock_tryrwlock) = erl_drv_rwlock_tryrwlock;\
+((W).erl_drv_rwlock_rwlock) = erl_drv_rwlock_rwlock; \
+((W).erl_drv_rwlock_rwunlock) = erl_drv_rwlock_rwunlock;\
+((W).erl_drv_tsd_key_create) = erl_drv_tsd_key_create; \
+((W).erl_drv_tsd_key_destroy) = erl_drv_tsd_key_destroy;\
+((W).erl_drv_tsd_set) = erl_drv_tsd_set; \
+((W).erl_drv_tsd_get) = erl_drv_tsd_get; \
+((W).erl_drv_thread_opts_create) = erl_drv_thread_opts_create;\
+((W).erl_drv_thread_opts_destroy) = erl_drv_thread_opts_destroy;\
+((W).erl_drv_thread_create) = erl_drv_thread_create; \
+((W).erl_drv_thread_self) = erl_drv_thread_self; \
+((W).erl_drv_equal_tids) = erl_drv_equal_tids; \
+((W).erl_drv_thread_exit) = erl_drv_thread_exit; \
+((W).erl_drv_thread_join) = erl_drv_thread_join; \
+((W).erl_drv_putenv) = erl_drv_putenv; \
+((W).erl_drv_getenv) = erl_drv_getenv; \
+} while (0)
+
+
+
+#endif /* STATIC_ERLANG_DRIVER */
+#endif /* _ERL_WIN_DYN_DRIVER_H */
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
new file mode 100644
index 0000000000..92d8577537
--- /dev/null
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -0,0 +1,212 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * This file handles differences between operating systems.
+ * This should be the only place with conditional compilation
+ * depending on the type of OS.
+ */
+
+#ifndef _ERL_WIN_SYS_H
+#define _ERL_WIN_SYS_H
+
+#define HAS_STDARG
+
+#ifdef __GNUC__
+#ifdef pid_t
+/* Really... */
+#undef pid_t
+#endif
+#endif
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <limits.h>
+#include <process.h>
+#include <malloc.h>
+#ifndef __GNUC__
+#include <direct.h>
+#endif
+#include "erl_errno.h"
+#include <io.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <time.h>
+#include <sys/timeb.h>
+#pragma comment(linker,"/manifestdependency:\"type='win32' "\
+ "name='Microsoft.Windows.Common-Controls' "\
+ "version='6.0.0.0' processorArchitecture='*' "\
+ "publicKeyToken='6595b64144ccf1df' language='*'\"")
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+
+/*
+ * Define MAXPATHLEN in terms of MAXPATH if available.
+ */
+
+#ifndef MAXPATH
+#define MAXPATH MAX_PATH
+#endif /* MAXPATH */
+
+#ifndef MAXPATHLEN
+#define MAXPATHLEN MAXPATH
+#endif /* MAXPATHLEN */
+
+/*
+ * Various configuration options, used to be in the Makefile.
+ */
+
+#define NO_ASINH
+#define NO_ACOSH
+#define NO_ATANH
+#define NO_ERF
+#define NO_ERFC
+
+#define NO_SYSLOG
+#define NO_SYSCONF
+#define NO_DAEMON
+#define NO_PWD
+/*#define HAVE_MEMMOVE*/
+
+#define strncasecmp _strnicmp
+
+/*
+ * Practial Windows specific macros.
+ */
+
+#define CreateAutoEvent(state) CreateEvent(NULL, FALSE, state, NULL)
+#define CreateManualEvent(state) CreateEvent(NULL, TRUE, state, NULL)
+
+
+/*
+ * Our own type of "FD's"
+ */
+#define ERTS_SYS_FD_TYPE HANDLE
+#define NO_FSTAT_ON_SYS_FD_TYPE 1 /* They are events, not files */
+
+#define HAVE_ERTS_CHECK_IO_DEBUG
+int erts_check_io_debug(void);
+
+/*
+ * For erl_time_sup
+ */
+#define HAVE_GETHRTIME
+
+#define sys_init_hrtime() /* Nothing */
+
+#define SYS_CLK_TCK 1000
+#define SYS_CLOCK_RESOLUTION 1
+
+typedef struct {
+ long tv_sec;
+ long tv_usec;
+} SysTimeval;
+
+typedef struct {
+ clock_t tms_utime;
+ clock_t tms_stime;
+ clock_t tms_cutime;
+ clock_t tms_cstime;
+} SysTimes;
+
+#define HAVE_INT64 1
+#if defined (__GNUC__)
+typedef unsigned long long Uint64;
+typedef long long Sint64;
+
+typedef long long SysHrTime;
+#else
+typedef ULONGLONG Uint64;
+typedef LONGLONG Sint64;
+
+typedef LONGLONG SysHrTime;
+#endif
+
+extern int sys_init_time(void);
+extern void sys_gettimeofday(SysTimeval *tv);
+extern SysHrTime sys_gethrtime(void);
+extern clock_t sys_times(SysTimes *buffer);
+
+extern char *win_build_environment(char *);
+
+typedef struct {
+ char *environment_strings;
+ char *next_string;
+} GETENV_STATE;
+
+void erts_sys_env_init(void);
+
+/*
+ ** These are to avoid irritating warnings
+ */
+#pragma warning(disable : 4244)
+#pragma warning(disable : 4018)
+
+/*
+ * Floating point support.
+ */
+
+extern volatile int erl_fp_exception;
+
+#include <float.h>
+#if defined (__GNUC__)
+int _finite(double x);
+#endif
+#endif
+
+/*#define NO_FPE_SIGNALS*/
+#define erts_get_current_fp_exception() NULL
+#define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0)
+#define __ERTS_FP_ERROR(fpexnp, f, Action) if (!_finite(f)) { Action; } else {}
+#define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action)
+#define __ERTS_SAVE_FP_EXCEPTION(fpexnp)
+#define __ERTS_RESTORE_FP_EXCEPTION(fpexnp)
+
+#define ERTS_FP_CHECK_INIT(p) __ERTS_FP_CHECK_INIT(&(p)->fp_exception)
+#define ERTS_FP_ERROR(p, f, A) __ERTS_FP_ERROR(&(p)->fp_exception, f, A)
+#define ERTS_SAVE_FP_EXCEPTION(p) __ERTS_SAVE_FP_EXCEPTION(&(p)->fp_exception)
+#define ERTS_RESTORE_FP_EXCEPTION(p) __ERTS_RESTORE_FP_EXCEPTION(&(p)->fp_exception)
+#define ERTS_FP_ERROR_THOROUGH(p, f, A) __ERTS_FP_ERROR_THOROUGH(&(p)->fp_exception, f, A)
+
+#define erts_sys_block_fpe() 0
+#define erts_sys_unblock_fpe(x) do{}while(0)
+
+#define SIZEOF_SHORT 2
+#define SIZEOF_INT 4
+#define SIZEOF_LONG 4
+#define SIZEOF_VOID_P 4
+#define SIZEOF_SIZE_T 4
+#define SIZEOF_OFF_T 4
+
+/*
+ * Seems to be missing.
+ */
+#ifndef __GNUC__
+typedef long ssize_t;
+#endif
+
+/* Threads */
+#ifdef USE_THREADS
+int init_async(int);
+int exit_async(void);
+#endif
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
new file mode 100644
index 0000000000..3194493ac8
--- /dev/null
+++ b/erts/emulator/sys/win32/sys.c
@@ -0,0 +1,3093 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * system-dependent functions
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_alloc.h"
+#include "erl_sys_driver.h"
+#include "global.h"
+#include "erl_threads.h"
+#include "../../drivers/win32/win_con.h"
+
+
+void erts_sys_init_float(void);
+
+void erl_start(int, char**);
+void erl_exit(int n, char*, _DOTS_);
+void erl_error(char*, va_list);
+void erl_crash_dump(char*, int, char*, ...);
+
+/*
+ * Microsoft-specific function to map a WIN32 error code to a Posix errno.
+ */
+extern void _dosmaperr(DWORD);
+
+#ifdef ERL_RUN_SHARED_LIB
+#ifdef __argc
+#undef __argc
+#endif
+#define __argc e_argc
+#ifdef __argv
+#undef __argv
+#endif
+#define __argv e_argv
+#endif
+
+static void init_console();
+static int get_and_remove_option(int* argc, char** argv, const char* option);
+static char *get_and_remove_option2(int *argc, char **argv,
+ const char *option);
+static int init_async_io(struct async_io* aio, int use_threads);
+static void release_async_io(struct async_io* aio, ErlDrvPort);
+static void async_read_file(struct async_io* aio, LPVOID buf, DWORD numToRead);
+static int async_write_file(struct async_io* aio, LPVOID buf, DWORD numToWrite);
+static int get_overlapped_result(struct async_io* aio,
+ LPDWORD pBytesRead, BOOL wait);
+static FUNCTION(BOOL, CreateChildProcess, (char *, HANDLE, HANDLE,
+ HANDLE, LPHANDLE, BOOL,
+ LPVOID, LPTSTR, unsigned,
+ char **, int *));
+static int create_pipe(LPHANDLE, LPHANDLE, BOOL, BOOL);
+static int ApplicationType(const char* originalName, char fullPath[MAX_PATH],
+ BOOL search_in_path, BOOL handle_quotes,
+ int *error_return);
+
+HANDLE erts_service_event;
+
+#ifdef ERTS_SMP
+static erts_smp_tsd_key_t win32_errstr_key;
+#endif
+
+static erts_smp_atomic_t pipe_creation_counter;
+
+static erts_smp_mtx_t sys_driver_data_lock;
+
+
+/* Results from ApplicationType is one of */
+#define APPL_NONE 0
+#define APPL_DOS 1
+#define APPL_WIN3X 2
+#define APPL_WIN32 3
+
+static FUNCTION(int, driver_write, (long, HANDLE, byte*, int));
+static void common_stop(int);
+static int create_file_thread(struct async_io* aio, int mode);
+static DWORD WINAPI threaded_reader(LPVOID param);
+static DWORD WINAPI threaded_writer(LPVOID param);
+static DWORD WINAPI threaded_exiter(LPVOID param);
+
+#ifdef DEBUG
+static void debug_console(void);
+#endif
+
+BOOL WINAPI ctrl_handler(DWORD dwCtrlType);
+
+#define PORT_BUFSIZ 4096
+
+#define PORT_FREE (-1)
+#define PORT_EXITING (-2)
+
+#define DRV_BUF_ALLOC(SZ) \
+ erts_alloc_fnf(ERTS_ALC_T_DRV_DATA_BUF, (SZ))
+#define DRV_BUF_REALLOC(P, SZ) \
+ erts_realloc_fnf(ERTS_ALC_T_DRV_DATA_BUF, (P), (SZ))
+#define DRV_BUF_FREE(P) \
+ erts_free(ERTS_ALC_T_DRV_DATA_BUF, (P))
+
+/********************* General functions ****************************/
+
+/*
+ * Whether create_pipe() should use a named pipe or an anonymous.
+ * (Named pipes are not supported on Windows 95.)
+ */
+
+static int max_files = 1024;
+
+static BOOL use_named_pipes;
+static BOOL win_console = FALSE;
+
+
+static OSVERSIONINFO int_os_version; /* Version information for Win32. */
+
+
+/* This is the system's main function (which may or may not be called "main")
+ - do general system-dependent initialization
+ - call erl_start() to parse arguments and do other init
+*/
+
+static erts_smp_atomic_t sys_misc_mem_sz;
+
+HMODULE beam_module = NULL;
+
+void erl_sys_init();
+
+void erl_sys_args(int* argc, char** argv);
+
+int nohup;
+#ifndef __GNUC__
+void erts_sys_invalid_parameter_handler(const wchar_t * expression,
+ const wchar_t * function,
+ const wchar_t * file,
+ unsigned int line,
+ uintptr_t pReserved
+ )
+{
+#ifdef DEBUG
+ fprintf(stderr,
+ "Debug: Invalid parameter\"%ls\" "
+ "(detected in \"%ls\" [%ls:%d]) \n",
+ (expression) ? expression : L"(unknown)",
+ (function) ? function : L"(unknown)",
+ (file) ? file : L"(unknown)",
+ line);
+#endif
+ return;
+}
+#endif
+
+void sys_primitive_init(HMODULE beam)
+{
+#ifndef __GNUC__
+ /* Initialize this module handle (the beam.dll module handle) and
+ take care of the standard library's aggressive invalid parameter
+ handling... */
+ _set_invalid_parameter_handler(&erts_sys_invalid_parameter_handler);
+#endif
+ beam_module = (HMODULE) beam;
+}
+
+Uint
+erts_sys_misc_mem_sz(void)
+{
+ Uint res = (Uint) erts_check_io_size();
+ res += (Uint) erts_smp_atomic_read(&sys_misc_mem_sz);
+ return res;
+}
+
+void erl_sys_args(int* argc, char** argv)
+{
+ char *event_name;
+ nohup = get_and_remove_option(argc, argv, "-nohup");
+
+#ifdef DEBUG
+ /*
+ * Start a debug console if -console option given.
+ */
+
+ if (get_and_remove_option(argc, argv, "-console")) {
+ debug_console();
+ }
+#endif
+
+ if (nohup && (event_name = get_and_remove_option2(argc, argv,
+ "-service_event"))) {
+ if ((erts_service_event =
+ OpenEvent(EVENT_ALL_ACCESS,FALSE,event_name)) == NULL) {
+ erts_fprintf(stderr,
+ "Warning: could not open service event: %s\r\n",
+ event_name);
+ }
+ } else {
+ erts_service_event = NULL;
+ }
+
+#ifdef DEBUG
+ /*
+ * Given the "-threads" option, always use threads instead of
+ * named pipes.
+ */
+
+ if (get_and_remove_option(argc, argv, "-threads")) {
+ use_named_pipes = FALSE;
+ }
+#endif
+}
+
+void
+erts_sys_prepare_crash_dump(void)
+{
+ /* Windows - free file descriptors are hopefully available */
+ return;
+}
+
+static void
+init_console()
+{
+ char* mode = erts_read_env("ERL_CONSOLE_MODE");
+
+ if (!mode || strcmp(mode, "window") == 0) {
+ win_console = TRUE;
+ ConInit();
+ /*nohup = 0;*/
+ } else if (strncmp(mode, "tty:", 4) == 0) {
+ if (mode[5] == 'c') {
+ setvbuf(stdout, NULL, _IONBF, 0);
+ }
+ if (mode[6] == 'c') {
+ setvbuf(stderr, NULL, _IONBF, 0);
+ }
+ }
+
+ erts_free_read_env(mode);
+}
+
+int sys_max_files()
+{
+ return max_files;
+}
+
+/*
+ * Looks for the given option in the argv vector. If it is found,
+ * it will be removed from the argv vector.
+ *
+ * If the return value indicates that the option was found and removed,
+ * it is the responsibility of the caller to decrement the value of argc.
+ *
+ * Returns: 0 if the option wasn't found, 1 if it was found
+ */
+
+static int
+get_and_remove_option(argc, argv, option)
+ int* argc; /* Number of arguments. */
+ char* argv[]; /* The argument vector. */
+ const char* option; /* Option to search for and remove. */
+{
+ int i;
+
+ for (i = 1; i < *argc; i++) {
+ if (strcmp(argv[i], option) == 0) {
+ (*argc)--;
+ while (i < *argc) {
+ argv[i] = argv[i+1];
+ i++;
+ }
+ argv[i] = NULL;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static char *get_and_remove_option2(int *argc, char **argv,
+ const char *option)
+{
+ char *ret;
+ int i;
+
+ for (i = 1; i < *argc; i++) {
+ if (strcmp(argv[i], option) == 0) {
+ if (i+1 < *argc) {
+ ret = argv[i+1];
+ (*argc) -= 2;
+ while (i < *argc) {
+ argv[i] = argv[i+2];
+ i++;
+ }
+ argv[i] = NULL;
+ return ret;
+ }
+ }
+ }
+ return NULL;
+}
+
+
+/************************** OS info *******************************/
+
+/* Used by erlang:info/1. */
+/* (This code was formerly in drv.XXX/XXX_os_drv.c) */
+
+char os_type[] = "win32";
+
+void
+os_flavor(namebuf, size)
+char* namebuf; /* Where to return the name. */
+unsigned size; /* Size of name buffer. */
+{
+ switch (int_os_version.dwPlatformId) {
+ case VER_PLATFORM_WIN32_WINDOWS:
+ strcpy(namebuf, "windows");
+ break;
+ case VER_PLATFORM_WIN32_NT:
+ strcpy(namebuf, "nt");
+ break;
+ default: /* Can't happen. */
+ strcpy(namebuf, "unknown");
+ break;
+ }
+}
+
+void
+os_version(pMajor, pMinor, pBuild)
+int* pMajor; /* Pointer to major version. */
+int* pMinor; /* Pointer to minor version. */
+int* pBuild; /* Pointer to build number. */
+{
+ *pMajor = int_os_version.dwMajorVersion;
+ *pMinor = int_os_version.dwMinorVersion;
+ *pBuild = int_os_version.dwBuildNumber;
+}
+
+/************************** Port I/O *******************************/
+
+/* I. Common stuff */
+
+/* II. The spawn/fd/vanilla drivers */
+
+/*
+ * Definitions for driver flags.
+ */
+
+#define DF_OVR_READY 1 /* Overlapped result is ready. */
+#define DF_EXIT_THREAD 2 /* The thread should exit. */
+#define DF_XLAT_CR 4 /* The thread should translate CRs. */
+#define DF_DROP_IF_INVH 8 /* Drop packages instead of crash if
+ invalid handle (stderr) */
+
+#define OV_BUFFER_PTR(dp) ((LPVOID) ((dp)->ov.Internal))
+#define OV_NUM_TO_READ(dp) ((dp)->ov.InternalHigh)
+
+/*
+ * This data is used to make overlapped I/O operations work on both
+ * Windows NT (using true overlapped I/O) and Windows 95 (using threads).
+ */
+
+typedef struct async_io {
+ unsigned flags; /* Driver flags, definitions found above. */
+ HANDLE thread; /* If -1, overlapped I/O is used (Windows NT).
+ * Otherwise, it is the handle of the thread used
+ * for simulating overlapped I/O (Windows 95 and
+ * the console for Windows NT).
+ */
+ HANDLE fd; /* Handle for file or pipe. */
+#ifdef ERTS_SMP
+ int async_io_active; /* if true, a close of the file will signal the event in ov */
+#endif
+ OVERLAPPED ov; /* Control structure for overlapped reading.
+ * When overlapped reading is simulated with
+ * a thread, the fields are used as follows:
+ * ov.Internal - Read buffer.
+ * ov.InternalHigh - Number of bytes to read.
+ * See macros above.
+ */
+ HANDLE ioAllowed; /* The thread will wait for this event
+ * before starting a new read or write.
+ */
+ DWORD pendingError; /* Used to delay presentating an error to Erlang
+ * until the check_io function is entered.
+ */
+ DWORD bytesTransferred; /* Bytes read or write in the last operation.
+ * Valid only when DF_OVR_READY is set.
+ */
+} AsyncIo;
+
+
+/*
+ * Input thread for fd_driver (if fd_driver is running).
+ */
+static AsyncIo* fd_driver_input = NULL;
+static BOOL (WINAPI *fpSetHandleInformation)(HANDLE,DWORD,DWORD);
+
+/*
+ * This data is used by the spawn and vanilla drivers.
+ * There will be one entry for each port, even if the input
+ * and output HANDLES are different. Since handles are not
+ * guaranteed to be small numbers in Win32, we cannot index
+ * with them. I.e. the index for each entry is not equal to
+ * none of the file handles.
+ */
+
+typedef struct driver_data {
+ int totalNeeded; /* Total number of bytes needed to fill
+ * up the packet header or packet. */
+ int bytesInBuffer; /* Number of bytes read so far in
+ * the input buffer.
+ */
+ int inBufSize; /* Size of input buffer. */
+ byte *inbuf; /* Buffer to use for overlapped read. */
+ int outBufSize; /* Size of output buffer. */
+ byte *outbuf; /* Buffer to use for overlapped write. */
+ ErlDrvPort port_num; /* The port number. */
+ int packet_bytes; /* 0: continous stream, 1, 2, or 4: the number
+ * of bytes in the packet header.
+ */
+ HANDLE port_pid; /* PID of the port process. */
+ AsyncIo in; /* Control block for overlapped reading. */
+ AsyncIo out; /* Control block for overlapped writing. */
+ int report_exit; /* Do report exit status for the port */
+} DriverData;
+
+static DriverData* driver_data; /* Pointer to array of driver data. */
+
+/* Driver interfaces */
+static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
+static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
+static ErlDrvData vanilla_start(ErlDrvPort, char*, SysDriverOpts*);
+static int spawn_init(void);
+static int fd_init(void);
+static void fd_stop(ErlDrvData);
+static void stop(ErlDrvData);
+static void output(ErlDrvData, char*, int);
+static void ready_input(ErlDrvData, ErlDrvEvent);
+static void ready_output(ErlDrvData, ErlDrvEvent);
+static void stop_select(ErlDrvEvent, void*);
+
+struct erl_drv_entry spawn_driver_entry = {
+ spawn_init,
+ spawn_start,
+ stop,
+ output,
+ ready_input,
+ ready_output,
+ "spawn",
+ NULL, /* finish */
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL,
+ NULL, /* process_exit */
+ stop_select
+};
+
+#ifdef HARD_POLL_DEBUG
+extern void poll_debug_set_active_fd(ErtsSysFdType fd);
+extern void poll_debug_read_begin(ErtsSysFdType fd);
+extern void poll_debug_read_done(ErtsSysFdType fd, int bytes);
+extern void poll_debug_async_initialized(ErtsSysFdType fd);
+extern void poll_debug_async_immediate(ErtsSysFdType fd, int bytes);
+extern void poll_debug_write_begin(ErtsSysFdType fd);
+extern void poll_debug_write_done(ErtsSysFdType fd, int bytes);
+#endif
+
+extern int null_func(void);
+
+struct erl_drv_entry fd_driver_entry = {
+ fd_init,
+ fd_start,
+ fd_stop,
+ output,
+ ready_input,
+ ready_output,
+ "fd",
+ NULL, /* finish */
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL,
+ NULL, /* process_exit */
+ stop_select
+};
+
+struct erl_drv_entry vanilla_driver_entry = {
+ null_func,
+ vanilla_start,
+ stop,
+ output,
+ ready_input,
+ ready_output,
+ "vanilla",
+ NULL, /* finish */
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL,
+ NULL, /* process_exit */
+ stop_select
+};
+
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+
+static int async_drv_init(void);
+static ErlDrvData async_drv_start(ErlDrvPort, char*, SysDriverOpts*);
+static void async_drv_stop(ErlDrvData);
+static void async_drv_input(ErlDrvData, ErlDrvEvent);
+
+/* INTERNAL use only */
+
+void null_output(ErlDrvData drv_data, char* buf, int len)
+{
+}
+
+void null_ready_output(ErlDrvData drv_data, ErlDrvEvent event)
+{
+}
+
+struct erl_drv_entry async_driver_entry = {
+ async_drv_init,
+ async_drv_start,
+ async_drv_stop,
+ null_output,
+ async_drv_input,
+ null_ready_output,
+ "async",
+ NULL, /* finish */
+ NULL, /* handle */
+ NULL, /* control */
+ NULL, /* timeout */
+ NULL, /* outputv */
+ NULL, /* ready_async */
+ NULL, /* flush */
+ NULL, /* call */
+ NULL, /* event */
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ 0, /* ERL_DRV_FLAGs */
+ NULL,
+ NULL, /* process_exit */
+ stop_select
+};
+
+#endif
+
+/*
+ * Initialises a DriverData structure.
+ *
+ * Results: Returns a pointer to a DriverData structure, or NULL
+ * if the initialsation failed.
+ */
+
+static DriverData*
+new_driver_data(port_num, packet_bytes, wait_objs_required, use_threads)
+ int port_num; /* The port number. */
+ int packet_bytes; /* Number of bytes in header. */
+ int wait_objs_required; /* The number objects this port is going
+ /* wait for (typically 1 or 2). */
+ int use_threads; /* TRUE if threads are intended to be used. */
+{
+ DriverData* dp;
+
+ erts_smp_mtx_lock(&sys_driver_data_lock);
+
+ DEBUGF(("new_driver_data(port_num %d, pb %d)\n",
+ port_num, packet_bytes));
+
+ /*
+ * We used to test first at all that there is enough room in the
+ * array used by WaitForMultipleObjects(), but that is not necessary
+ * any more, since driver_select() can't fail.
+ */
+
+ /*
+ * Search for a free slot.
+ */
+
+ for (dp = driver_data; dp < driver_data+max_files; dp++) {
+ if (dp->port_num == PORT_FREE) {
+ dp->bytesInBuffer = 0;
+ dp->totalNeeded = packet_bytes;
+ dp->inBufSize = PORT_BUFSIZ;
+ dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize);
+ if (dp->inbuf == NULL) {
+ erts_smp_mtx_unlock(&sys_driver_data_lock);
+ return NULL;
+ }
+ erts_smp_atomic_add(&sys_misc_mem_sz, dp->inBufSize);
+ dp->outBufSize = 0;
+ dp->outbuf = NULL;
+ dp->port_num = port_num;
+ dp->packet_bytes = packet_bytes;
+ dp->port_pid = INVALID_HANDLE_VALUE;
+ if (init_async_io(&dp->in, use_threads) == -1)
+ break;
+ if (init_async_io(&dp->out, use_threads) == -1)
+ break;
+ erts_smp_mtx_unlock(&sys_driver_data_lock);
+ return dp;
+ }
+ }
+
+ /*
+ * Error or no free driver data.
+ */
+
+ if (dp < driver_data+max_files) {
+ release_async_io(&dp->in, dp->port_num);
+ release_async_io(&dp->out, dp->port_num);
+ }
+ erts_smp_mtx_unlock(&sys_driver_data_lock);
+ return NULL;
+}
+
+static void
+release_driver_data(DriverData* dp)
+{
+ erts_smp_mtx_lock(&sys_driver_data_lock);
+
+#ifdef ERTS_SMP
+ /* This is a workaround for the fact that CancelIo cant cancel
+ requests issued by another thread and that we still cant use
+ CancelIoEx as that's only availabele in Vista etc. */
+ if(dp->in.async_io_active && dp->in.fd != INVALID_HANDLE_VALUE) {
+ CloseHandle(dp->in.fd);
+ dp->in.fd = INVALID_HANDLE_VALUE;
+ DEBUGF(("Waiting for the in event thingie"));
+ WaitForSingleObject(dp->in.ov.hEvent,INFINITE);
+ DEBUGF(("...done\n"));
+ }
+ if(dp->out.async_io_active && dp->out.fd != INVALID_HANDLE_VALUE) {
+ CloseHandle(dp->out.fd);
+ dp->out.fd = INVALID_HANDLE_VALUE;
+ DEBUGF(("Waiting for the out event thingie"));
+ WaitForSingleObject(dp->out.ov.hEvent,INFINITE);
+ DEBUGF(("...done\n"));
+ }
+#else
+ if (dp->out.thread == (HANDLE) -1 && dp->in.fd != INVALID_HANDLE_VALUE) {
+ CancelIo(dp->in.fd);
+ }
+ if (dp->out.thread == (HANDLE) -1 && dp->out.fd != INVALID_HANDLE_VALUE) {
+ CancelIo(dp->out.fd);
+ }
+#endif
+
+ if (dp->inbuf != NULL) {
+ ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->inBufSize);
+ erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->inBufSize);
+ DRV_BUF_FREE(dp->inbuf);
+ dp->inBufSize = 0;
+ dp->inbuf = NULL;
+ }
+ ASSERT(dp->inBufSize == 0);
+
+ if (dp->outbuf != NULL) {
+ ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ DRV_BUF_FREE(dp->outbuf);
+ dp->outBufSize = 0;
+ dp->outbuf = NULL;
+ }
+ ASSERT(dp->outBufSize == 0);
+
+ if (dp->port_pid != INVALID_HANDLE_VALUE) {
+ CloseHandle(dp->port_pid);
+ dp->port_pid = INVALID_HANDLE_VALUE;
+ }
+
+ release_async_io(&dp->in, dp->port_num);
+ release_async_io(&dp->out, dp->port_num);
+
+ /*
+ * This must be last, because this function might be executed from
+ * the exit thread.
+ */
+
+ dp->port_num = PORT_FREE;
+ erts_smp_mtx_unlock(&sys_driver_data_lock);
+}
+
+/*
+ * Stores input and output file descriptors in the DriverData structure,
+ * and calls driver_select().
+ *
+ * This function fortunately can't fail!
+ */
+
+static ErlDrvData
+set_driver_data(dp, ifd, ofd, read_write, report_exit)
+ DriverData* dp;
+ HANDLE ifd;
+ HANDLE ofd;
+ int read_write;
+ int report_exit;
+{
+ int index = dp - driver_data;
+ int result;
+
+ dp->in.fd = ifd;
+ dp->out.fd = ofd;
+ dp->report_exit = report_exit;
+
+ if (read_write & DO_READ) {
+ result = driver_select(dp->port_num, (ErlDrvEvent)dp->in.ov.hEvent,
+ ERL_DRV_READ|ERL_DRV_USE, 1);
+ ASSERT(result != -1);
+ async_read_file(&dp->in, dp->inbuf, dp->inBufSize);
+ }
+
+ if (read_write & DO_WRITE) {
+ result = driver_select(dp->port_num, (ErlDrvEvent)dp->out.ov.hEvent,
+ ERL_DRV_WRITE|ERL_DRV_USE, 1);
+ ASSERT(result != -1);
+ }
+ return (ErlDrvData)index;
+}
+
+/*
+ * Initialises an AsyncIo structure.
+ */
+
+static int
+init_async_io(AsyncIo* aio, int use_threads)
+{
+ aio->flags = 0;
+ aio->thread = (HANDLE) -1;
+ aio->fd = INVALID_HANDLE_VALUE;
+ aio->ov.hEvent = NULL;
+ aio->ov.Offset = 0L;
+ aio->ov.OffsetHigh = 0L;
+ aio->ioAllowed = NULL;
+ aio->pendingError = 0;
+ aio->bytesTransferred = 0;
+#ifdef ERTS_SMP
+ aio->async_io_active = 0;
+#endif
+ aio->ov.hEvent = CreateManualEvent(FALSE);
+ if (aio->ov.hEvent == NULL)
+ return -1;
+ if (use_threads) {
+ aio->ioAllowed = CreateAutoEvent(FALSE);
+ if (aio->ioAllowed == NULL)
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Releases everything allocated in an AsyncIo structure.
+ */
+
+static void
+release_async_io(AsyncIo* aio, ErlDrvPort port_num)
+{
+ aio->flags = 0;
+
+ if (aio->thread != (HANDLE) -1)
+ CloseHandle(aio->thread);
+ aio->thread = (HANDLE) -1;
+
+ if (aio->fd != INVALID_HANDLE_VALUE)
+ CloseHandle(aio->fd);
+ aio->fd = INVALID_HANDLE_VALUE;
+
+ if (aio->ov.hEvent != NULL) {
+ (void) driver_select(port_num,
+ (ErlDrvEvent)aio->ov.hEvent,
+ ERL_DRV_USE, 0);
+ /* was CloseHandle(aio->ov.hEvent); */
+ }
+
+ aio->ov.hEvent = NULL;
+
+ if (aio->ioAllowed != NULL)
+ CloseHandle(aio->ioAllowed);
+ aio->ioAllowed = NULL;
+}
+
+/* ----------------------------------------------------------------------
+ * async_read_file --
+ * Initiaties an asynchronous file read, or simulates that using
+ * the thread associated with this driver data. To get the results,
+ * call get_overlapped_result().
+ *
+ * Results:
+ * None.
+ * ----------------------------------------------------------------------
+ */
+
+static void
+async_read_file(aio, buf, numToRead)
+ AsyncIo* aio; /* Pointer to driver data. */
+ LPVOID buf; /* Pointer to buffer to receive data. */
+ DWORD numToRead; /* Number of bytes to read. */
+{
+ aio->pendingError = NO_ERROR;
+#ifdef HARD_POLL_DEBUG
+ poll_debug_async_initialized(aio->ov.hEvent);
+#endif
+ if (aio->thread != (HANDLE) -1) {
+ DEBUGF(("async_read_file: signaling thread 0x%x, event 0x%x\n",
+ aio->thread, aio->ioAllowed));
+ OV_BUFFER_PTR(aio) = buf;
+ OV_NUM_TO_READ(aio) = numToRead;
+ ResetEvent(aio->ov.hEvent);
+ SetEvent(aio->ioAllowed);
+ } else {
+#ifdef ERTS_SMP
+ aio->async_io_active = 1; /* Will get 0 when the event actually happened */
+#endif
+ if (ReadFile(aio->fd, buf, numToRead,
+ &aio->bytesTransferred, &aio->ov)) {
+ DEBUGF(("async_read_file: ReadFile() suceeded: %d bytes\n",
+ aio->bytesTransferred));
+#ifdef HARD_POLL_DEBUG
+ poll_debug_async_immediate(aio->ov.hEvent, aio->bytesTransferred);
+#endif
+ aio->flags |= DF_OVR_READY;
+ SetEvent(aio->ov.hEvent);
+ } else {
+ DWORD error = GetLastError();
+ if (error != ERROR_IO_PENDING) {
+#ifdef HARD_POLL_DEBUG
+ poll_debug_async_immediate(aio->ov.hEvent, 0);
+#endif
+ aio->pendingError = error;
+ SetEvent(aio->ov.hEvent);
+ }
+ DEBUGF(("async_read_file: ReadFile() -> %s\n", win32_errorstr(error)));
+ }
+ }
+}
+
+/* ----------------------------------------------------------------------
+ * async_write_file --
+ * Initiaties an asynchronous file write, or simulates that using
+ * the output thread associated with this driver data.
+ * To get the results, call get_overlapped_result().
+ *
+ * Results:
+ * None.
+ * ----------------------------------------------------------------------
+ */
+static int
+async_write_file(aio, buf, numToWrite)
+ AsyncIo* aio; /* Pointer to async control block. */
+ LPVOID buf; /* Pointer to buffer with data to write. */
+ DWORD numToWrite; /* Number of bytes to write. */
+{
+ aio->pendingError = NO_ERROR;
+ if (aio->thread != (HANDLE) -1) {
+ DEBUGF(("async_write_file: signaling thread 0x%x, event 0x%x\n",
+ aio->thread, aio->ioAllowed));
+ OV_BUFFER_PTR(aio) = buf;
+ OV_NUM_TO_READ(aio) = numToWrite;
+ ResetEvent(aio->ov.hEvent);
+ SetEvent(aio->ioAllowed);
+ } else {
+#ifdef ERTS_SMP
+ aio->async_io_active = 1; /* Will get 0 when the event actually happened */
+#endif
+ if (WriteFile(aio->fd, buf, numToWrite,
+ &aio->bytesTransferred, &aio->ov)) {
+ DEBUGF(("async_write_file: WriteFile() suceeded: %d bytes\n",
+ aio->bytesTransferred));
+#ifdef ERTS_SMP
+ aio->async_io_active = 0; /* The event will not be signalled */
+#endif
+ ResetEvent(aio->ov.hEvent);
+ return TRUE;
+ } else {
+ DWORD error = GetLastError();
+ if (error != ERROR_IO_PENDING) {
+ aio->pendingError = error;
+ SetEvent(aio->ov.hEvent);
+ }
+ DEBUGF(("async_write_file: WriteFile() -> %s\n", win32_errorstr(error)));
+ }
+ }
+ return FALSE;
+}
+
+/* ----------------------------------------------------------------------
+ * get_overlapped_result --
+ *
+ * Results:
+ * Returns the error code for the overlapped result, or NO_ERROR
+ * if no error.
+ * ----------------------------------------------------------------------
+ */
+static int
+get_overlapped_result(aio, pBytesRead, wait)
+ AsyncIo* aio; /* Pointer to async control block. */
+ LPDWORD pBytesRead; /* Where to place the number of bytes
+ * transferred.
+ */
+ BOOL wait; /* If true, wait until result is ready. */
+{
+ DWORD error = NO_ERROR; /* Error status from last function. */
+
+ if (aio->thread != (HANDLE) -1) {
+
+ /*
+ * Simulate overlapped io with a thread.
+ */
+ DEBUGF(("get_overlapped_result: about to wait for event 0x%x\n",
+ aio->ov.hEvent));
+ error = WaitForSingleObject(aio->ov.hEvent, wait ? INFINITE : 0);
+ switch (error) {
+ case WAIT_OBJECT_0:
+ error = aio->pendingError;
+ aio->pendingError = NO_ERROR;
+ *pBytesRead = aio->bytesTransferred;
+ ResetEvent(aio->ov.hEvent);
+ DEBUGF(("get_overlapped_result -> %s\n",
+ win32_errorstr(error)));
+ return error;
+ case WAIT_TIMEOUT:
+ DEBUGF(("get_overlapped_result -> %s\n",
+ ERROR_IO_INCOMPLETE));
+ return ERROR_IO_INCOMPLETE;
+ case WAIT_FAILED: /* XXX: Shouldn't happen? */
+ error = GetLastError();
+ DEBUGF(("get_overlapped_result (WAIT_FAILED) -> %s\n",
+ win32_errorstr(error)));
+ return error;
+ }
+ } else if (aio->pendingError != NO_ERROR) { /* Pending error. */
+ error = aio->pendingError;
+ aio->pendingError = NO_ERROR;
+ ResetEvent(aio->ov.hEvent);
+ DEBUGF(("get_overlapped_result: pending error: %s\n",
+ win32_errorstr(error)));
+ return error;
+ } else if (aio->flags & DF_OVR_READY) { /* Operation succeded. */
+ aio->flags &= ~DF_OVR_READY;
+ *pBytesRead = aio->bytesTransferred;
+ ResetEvent(aio->ov.hEvent);
+ DEBUGF(("get_overlapped_result: delayed success: %d bytes\n",
+ aio->bytesTransferred));
+ } else if (!GetOverlappedResult(aio->fd, &aio->ov, pBytesRead, wait)) {
+ error = GetLastError();
+ ResetEvent(aio->ov.hEvent);
+ DEBUGF(("get_overlapped_result: error: %s\n", win32_errorstr(error)));
+ return error;
+ } else { /* Success. */
+ DEBUGF(("get_overlapped_result: success\n"));
+ ResetEvent(aio->ov.hEvent);
+ }
+ return NO_ERROR;
+}
+
+static int
+fd_init(void)
+{
+ char kernel_dll_name[] = "kernel32";
+ HMODULE module;
+ module = GetModuleHandle(kernel_dll_name);
+ fpSetHandleInformation = (module != NULL) ?
+ (BOOL (WINAPI *)(HANDLE,DWORD,DWORD))
+ GetProcAddress(module,"SetHandleInformation") :
+ NULL;
+
+ return 0;
+}
+static int
+spawn_init()
+{
+ int i;
+
+ driver_data = (struct driver_data *)
+ erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
+ erts_smp_atomic_add(&sys_misc_mem_sz, max_files*sizeof(struct driver_data));
+ for (i = 0; i < max_files; i++)
+ driver_data[i].port_num = PORT_FREE;
+ return 0;
+}
+
+static ErlDrvData
+spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
+{
+ HANDLE hToChild = INVALID_HANDLE_VALUE; /* Write handle to child. */
+ HANDLE hFromChild = INVALID_HANDLE_VALUE; /* Read handle from child. */
+ HANDLE hChildStdin = INVALID_HANDLE_VALUE; /* Child's stdin. */
+ HANDLE hChildStdout = INVALID_HANDLE_VALUE; /* Child's stout. */
+ HANDLE hChildStderr = INVALID_HANDLE_VALUE; /* Child's sterr. */
+ int close_child_stderr = 0;
+ DriverData* dp; /* Pointer to driver data. */
+ ErlDrvData retval = ERL_DRV_ERROR_GENERAL; /* Return value. */
+ int ok;
+ int neededSelects = 0;
+ SECURITY_ATTRIBUTES sa = {sizeof(SECURITY_ATTRIBUTES), NULL, TRUE};
+ char* envir = opts->envir;
+ int errno_return = -1;
+
+ if (opts->read_write & DO_READ)
+ neededSelects++;
+ if (opts->read_write & DO_WRITE)
+ neededSelects++;
+
+ if ((dp = new_driver_data(port_num, opts->packet_bytes, neededSelects,
+ !use_named_pipes)) == NULL)
+ return ERL_DRV_ERROR_GENERAL;
+
+ /*
+ * Create two pipes to communicate with the port program.
+ */
+
+ if (opts->read_write & DO_READ) {
+ if (!create_pipe(&hFromChild, &hChildStdout, FALSE,
+ opts->overlapped_io))
+ goto error;
+ } else {
+ hChildStdout = CreateFile("nul", GENERIC_WRITE, 0,
+ &sa, OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL, NULL);
+ DEBUGF(("Created nul file for hChildStdout = %d\n",hChildStdout));
+ }
+ if (opts->read_write & DO_WRITE) {
+ if (!create_pipe(&hChildStdin, &hToChild, TRUE, opts->overlapped_io)) {
+ CloseHandle(hFromChild);
+ hFromChild = INVALID_HANDLE_VALUE;
+ CloseHandle(hChildStdout);
+ goto error;
+ }
+ } else {
+ hChildStdin = CreateFile("nul", GENERIC_READ, 0,
+ &sa, OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL, NULL);
+ DEBUGF(("Created nul file for hChildStdin = %d\n",hChildStdin));
+ }
+
+ /*
+ * Make sure that standard error is valid handle, because a Command Prompt
+ * window not work properly otherwise. We leave standard error alone if
+ * it is okay and no redirection was specified.
+ */
+ hChildStderr = GetStdHandle(STD_ERROR_HANDLE);
+ if (opts->redir_stderr) {
+ hChildStderr = hChildStdout;
+ } else if (hChildStderr == INVALID_HANDLE_VALUE || hChildStderr == 0) {
+ hChildStderr = CreateFile("nul", GENERIC_WRITE, 0, &sa, OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL, NULL);
+ close_child_stderr = 1;
+ }
+ if (fpSetHandleInformation != NULL) {
+ (*fpSetHandleInformation)(hChildStderr, HANDLE_FLAG_INHERIT, 1);
+ }
+ /*
+ * Spawn the port program.
+ */
+
+ DEBUGF(("Spawning \"%s\"\n", name));
+ envir = win_build_environment(envir);
+ ok = CreateChildProcess(name,
+ hChildStdin,
+ hChildStdout,
+ hChildStderr,
+ &dp->port_pid,
+ opts->hide_window,
+ (LPVOID) envir,
+ (LPTSTR) opts->wd,
+ opts->spawn_type,
+ opts->argv,
+ &errno_return);
+ CloseHandle(hChildStdin);
+ CloseHandle(hChildStdout);
+ if (close_child_stderr && hChildStderr != INVALID_HANDLE_VALUE &&
+ hChildStderr != 0) {
+ CloseHandle(hChildStderr);
+ }
+ if (envir != NULL) {
+ erts_free(ERTS_ALC_T_ENVIRONMENT, envir);
+ }
+
+ if (!ok) {
+ dp->port_pid = INVALID_HANDLE_VALUE;
+ if (errno_return >= 0) {
+ retval = ERL_DRV_ERROR_ERRNO;
+ }
+ } else {
+ if (!use_named_pipes) {
+ if ((opts->read_write & DO_READ) &&
+ !create_file_thread(&dp->in, DO_READ))
+ goto error;
+ if ((opts->read_write & DO_WRITE) &&
+ !create_file_thread(&dp->out, DO_WRITE)) {
+ dp->in.flags = DF_EXIT_THREAD;
+ SetEvent(dp->in.ioAllowed);
+ WaitForSingleObject(dp->in.thread, INFINITE);
+ dp->in.thread = (HANDLE) -1;
+ goto error;
+ }
+ }
+#ifdef HARD_POLL_DEBUG
+ if (strncmp(name,"inet_gethost",12) == 0) {
+ erts_printf("Debugging \"%s\"\n", name);
+ poll_debug_set_active_fd(dp->in.ov.hEvent);
+ }
+#endif
+ retval = set_driver_data(dp, hFromChild, hToChild, opts->read_write,
+ opts->exit_status);
+ }
+
+ if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO)
+ return retval;
+
+ error:
+ if (hFromChild != INVALID_HANDLE_VALUE)
+ CloseHandle(hFromChild);
+ if (hToChild != INVALID_HANDLE_VALUE)
+ CloseHandle(hToChild);
+ release_driver_data(dp);
+ if (retval == ERL_DRV_ERROR_ERRNO) {
+ errno = errno_return;
+ }
+ return retval;
+}
+
+static int
+create_file_thread(AsyncIo* aio, int mode)
+{
+ DWORD tid; /* Id for thread. */
+
+ aio->thread = (HANDLE)
+ _beginthreadex(NULL, 0,
+ (mode & DO_WRITE) ? threaded_writer : threaded_reader,
+ aio, 0, &tid);
+
+ return aio->thread != (HANDLE) -1;
+}
+
+/*
+ * A helper function used by CreateChildProcess().
+ * Parses a command line with arguments and returns the length of the
+ * first part containing the program name.
+ * Example: input = "\"Program Files\"\\erl arg1 arg2"
+ * gives 19 as result.
+ * The length returned is equivalent with length(argv[0]) if the
+ * comman line should have been prepared by _setargv for the main function
+*/
+int parse_command(char* cmd){
+#define NORMAL 2
+#define STRING 1
+#define STOP 0
+ int i =0;
+ int state = NORMAL;
+ while (cmd[i]) {
+ switch (cmd[i]) {
+ case '"':
+ if (state == NORMAL)
+ state = STRING;
+ else
+ state = NORMAL;
+ break;
+ case '\\':
+ if ((state == STRING) && (cmd[i+1]=='"'))
+ i++;
+ break;
+ case ' ':
+ if (state == NORMAL)
+ state = STOP;
+ break;
+ default:
+ break;
+ }
+ if (state == STOP) {
+ return i;
+ }
+ i++;
+ }
+ return i;
+}
+
+BOOL need_quotes(char *str)
+{
+ int in_quote = 0;
+ int backslashed = 0;
+ int naked_space = 0;
+ while (*str != '\0') {
+ switch (*str) {
+ case '\\' :
+ backslashed = !backslashed;
+ break;
+ case '"':
+ if (backslashed) {
+ backslashed=0;
+ } else {
+ in_quote = !in_quote;
+ }
+ break;
+ case ' ':
+ backslashed = 0;
+ if (!(backslashed || in_quote)) {
+ naked_space++;
+ }
+ break;
+ default:
+ backslashed = 0;
+ }
+ ++str;
+ }
+ return (naked_space > 0);
+}
+
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * CreateChildProcess --
+ *
+ * Create a child process that has pipes as its
+ * standard input, output, and error. The child process runs
+ * synchronously under Win32s and asynchronously under Windows NT
+ * and Windows 95, and runs with the same environment variables
+ * as the creating process.
+ *
+ * The complete Windows search path is searched to find the specified
+ * executable. If an executable by the given name is not found,
+ * automatically tries appending ".com", ".exe", and ".bat" to the
+ * executable name.
+ *
+ * Results:
+ * The return value is FALSE if there was a problem creating the child process.
+ * Otherwise, the return value is 0 and *phPid is
+ * filled with the process id of the child process.
+ *
+ * Side effects:
+ * A process is created.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static BOOL
+CreateChildProcess
+(
+ char *origcmd, /* Command line for child process (including
+ * name of executable). Or whole executable if st is
+ * ERTS_SPAWN_EXECUTABLE
+ */
+ HANDLE hStdin, /* The standard input handle for child. */
+ HANDLE hStdout, /* The standard output handle for child. */
+ HANDLE hStderr, /* The standard error handle for child. */
+ LPHANDLE phPid, /* Pointer to variable to received PID. */
+ BOOL hide, /* Hide the window unconditionally. */
+ LPVOID env, /* Environment for the child */
+ LPTSTR wd, /* Working dir for the child */
+ unsigned st, /* Flags for spawn, tells us how to interpret origcmd */
+ char **argv, /* Argument vector if given. */
+ int *errno_return /* Place to put an errno in in case of failure */
+ )
+{
+ PROCESS_INFORMATION piProcInfo = {0};
+ STARTUPINFO siStartInfo = {0};
+ BOOL ok = FALSE;
+ int applType;
+ /* Not to be changed for different types of executables */
+ int staticCreateFlags = GetPriorityClass(GetCurrentProcess());
+ int createFlags = DETACHED_PROCESS;
+ char *newcmdline = NULL;
+ char execPath[MAX_PATH];
+ int cmdlength;
+ char* thecommand;
+ LPTSTR appname = NULL;
+ HANDLE hProcess = GetCurrentProcess();
+
+ *errno_return = -1;
+
+ siStartInfo.cb = sizeof(STARTUPINFO);
+ siStartInfo.dwFlags = STARTF_USESTDHANDLES;
+ siStartInfo.hStdInput = hStdin;
+ siStartInfo.hStdOutput = hStdout;
+ siStartInfo.hStdError = hStderr;
+
+
+ if (st != ERTS_SPAWN_EXECUTABLE) {
+ /*
+ * Parse out the program name from the command line (it can be quoted and
+ * contain spaces).
+ */
+ newcmdline = erts_alloc(ERTS_ALC_T_TMP, 2048);
+ cmdlength = parse_command(origcmd);
+ thecommand = (char *) erts_alloc(ERTS_ALC_T_TMP, cmdlength+1);
+ strncpy(thecommand, origcmd, cmdlength);
+ thecommand[cmdlength] = '\0';
+ DEBUGF(("spawn command: %s\n", thecommand));
+
+ applType = ApplicationType(thecommand, execPath, TRUE,
+ TRUE, errno_return);
+ DEBUGF(("ApplicationType returned for (%s) is %d\n", thecommand, applType));
+ erts_free(ERTS_ALC_T_TMP, (void *) thecommand);
+ if (applType == APPL_NONE) {
+ erts_free(ERTS_ALC_T_TMP,newcmdline);
+ return FALSE;
+ }
+ newcmdline[0] = '\0';
+
+ if (applType == APPL_DOS) {
+ /*
+ * Under NT, 16-bit DOS applications will not run unless they
+ * can be attached to a console. Run the 16-bit program as
+ * a normal process inside of a hidden console application,
+ * and then run that hidden console as a detached process.
+ */
+
+ siStartInfo.wShowWindow = SW_HIDE;
+ siStartInfo.dwFlags |= STARTF_USESHOWWINDOW;
+ createFlags = CREATE_NEW_CONSOLE;
+ strcat(newcmdline, "cmd.exe /c ");
+ } else if (hide) {
+ DEBUGF(("hiding window\n"));
+ siStartInfo.wShowWindow = SW_HIDE;
+ siStartInfo.dwFlags |= STARTF_USESHOWWINDOW;
+ createFlags = 0;
+ }
+
+ strcat(newcmdline, execPath);
+ strcat(newcmdline, origcmd+cmdlength);
+ } else { /* ERTS_SPAWN_EXECUTABLE */
+ int run_cmd = 0;
+ applType = ApplicationType(origcmd, execPath, FALSE, FALSE,
+ errno_return);
+ if (applType == APPL_NONE) {
+ return FALSE;
+ }
+ if (applType == APPL_DOS) {
+ /*
+ * See comment above
+ */
+
+ siStartInfo.wShowWindow = SW_HIDE;
+ siStartInfo.dwFlags |= STARTF_USESHOWWINDOW;
+ createFlags = CREATE_NEW_CONSOLE;
+ run_cmd = 1;
+ } else if (hide) {
+ DEBUGF(("hiding window\n"));
+ siStartInfo.wShowWindow = SW_HIDE;
+ siStartInfo.dwFlags |= STARTF_USESHOWWINDOW;
+ createFlags = 0;
+ }
+ if (run_cmd) {
+ char cmdPath[MAX_PATH];
+ int cmdType;
+ cmdType = ApplicationType("cmd.exe", cmdPath, TRUE, FALSE, errno_return);
+ if (cmdType == APPL_NONE || cmdType == APPL_DOS) {
+ return FALSE;
+ }
+ appname = (char *) erts_alloc(ERTS_ALC_T_TMP, strlen(cmdPath)+1);
+ strcpy(appname,cmdPath);
+ } else {
+ appname = (char *) erts_alloc(ERTS_ALC_T_TMP, strlen(execPath)+1);
+ strcpy(appname,execPath);
+ }
+ if (argv == NULL) {
+ BOOL orig_need_q = need_quotes(execPath);
+ char *ptr;
+ int ocl = strlen(execPath);
+ if (run_cmd) {
+ newcmdline = (char *) erts_alloc(ERTS_ALC_T_TMP,
+ ocl + ((orig_need_q) ? 3 : 1)
+ + 11);
+ memcpy(newcmdline,"cmd.exe /c ",11);
+ ptr = newcmdline + 11;
+ } else {
+ newcmdline = (char *) erts_alloc(ERTS_ALC_T_TMP,
+ ocl + ((orig_need_q) ? 3 : 1));
+ ptr = newcmdline;
+ }
+ if (orig_need_q) {
+ *ptr++ = '"';
+ }
+ memcpy(ptr,execPath,ocl);
+ ptr += ocl;
+ if (orig_need_q) {
+ *ptr++ = '"';
+ }
+ *ptr = '\0';
+ } else {
+ int sum = 1; /* '\0' */
+ char **ar = argv;
+ char *n;
+ char *save_arg0 = NULL;
+ if (argv[0] == erts_default_arg0 || run_cmd) {
+ save_arg0 = argv[0];
+ argv[0] = execPath;
+ }
+ if (run_cmd) {
+ sum += 11; /* cmd.exe /c */
+ }
+ while (*ar != NULL) {
+ sum += strlen(*ar);
+ if (need_quotes(*ar)) {
+ sum += 2; /* quotes */
+ }
+ sum++; /* space */
+ ++ar;
+ }
+ ar = argv;
+ newcmdline = erts_alloc(ERTS_ALC_T_TMP, sum);
+ n = newcmdline;
+ if (run_cmd) {
+ memcpy(n,"cmd.exe /c ",11);
+ n += 11;
+ }
+ while (*ar != NULL) {
+ int q = need_quotes(*ar);
+ sum = strlen(*ar);
+ if (q) {
+ *n++ = '"';
+ }
+ memcpy(n,*ar,sum);
+ n += sum;
+ if (q) {
+ *n++ = '"';
+ }
+ *n++ = ' ';
+ ++ar;
+ }
+ ASSERT(n > newcmdline);
+ *(n-1) = '\0';
+ if (save_arg0 != NULL) {
+ argv[0] = save_arg0;
+ }
+ }
+
+ }
+ DEBUGF(("Creating child process: %s, createFlags = %d\n", newcmdline, createFlags));
+ ok = CreateProcess(appname,
+ newcmdline,
+ NULL,
+ NULL,
+ TRUE,
+ createFlags | staticCreateFlags,
+ env,
+ wd,
+ &siStartInfo,
+ &piProcInfo);
+
+ if (newcmdline != NULL) {
+ erts_free(ERTS_ALC_T_TMP,newcmdline);
+ }
+ if (appname != NULL) {
+ erts_free(ERTS_ALC_T_TMP,appname);
+ }
+ if (!ok) {
+ DEBUGF(("CreateProcess failed: %s\n", last_error()));
+ if (*errno_return < 0) {
+ *errno_return = EACCES;
+ }
+ return FALSE;
+ }
+ CloseHandle(piProcInfo.hThread); /* Necessary to avoid resource leak. */
+ *phPid = piProcInfo.hProcess;
+
+ if (applType == APPL_DOS) {
+ WaitForSingleObject(hProcess, 50);
+ }
+
+ /*
+ * When an application spawns a process repeatedly, a new thread
+ * instance will be created for each process but the previous
+ * instances may not be cleaned up. This results in a significant
+ * virtual memory loss each time the process is spawned. If there
+ * is a WaitForInputIdle() call between CreateProcess() and
+ * CloseHandle(), the problem does not occur. PSS ID Number: Q124121
+ */
+
+ WaitForInputIdle(piProcInfo.hProcess, 5000);
+
+ return ok;
+}
+
+/*
+ * Note, inheritRead == FALSE means "inhetitWrite", i e one of the
+ * pipe ends is always expected to be inherited. The pipe end that should
+ * be inherited is opened without overlapped io flags, as the child program
+ * would expect stdout not to demand overlapped I/O.
+ */
+static int create_pipe(HANDLE *phRead, HANDLE *phWrite, BOOL inheritRead, BOOL overlapped_io)
+{
+ SECURITY_ATTRIBUTES sa = {sizeof(SECURITY_ATTRIBUTES), NULL, TRUE};
+ char pipe_name[128]; /* Name of pipe. */
+ Uint calls;
+
+ /*
+ * If we should't use named pipes, create anonmous pipes.
+ */
+
+ if (!use_named_pipes) {
+ int success;
+ HANDLE non_inherited; /* Non-inherited copy of handle. */
+
+ if (!CreatePipe(phRead, phWrite, &sa, 0)) {
+ DEBUGF(("Error creating anonyomous pipe: %s\n", last_error()));
+ return FALSE;
+ }
+
+ if (inheritRead) {
+ success = DuplicateHandle(GetCurrentProcess(), *phWrite,
+ GetCurrentProcess(), &non_inherited, 0,
+ FALSE, DUPLICATE_SAME_ACCESS);
+ CloseHandle(*phWrite);
+ *phWrite = non_inherited;
+ } else {
+ success = DuplicateHandle(GetCurrentProcess(), *phRead,
+ GetCurrentProcess(), &non_inherited, 0,
+ FALSE, DUPLICATE_SAME_ACCESS);
+ CloseHandle(*phRead);
+ *phRead = non_inherited;
+ }
+ return success;
+ }
+
+
+ /*
+ * Otherwise, create named pipes.
+ */
+
+ calls = (Uint) erts_smp_atomic_inctest(&pipe_creation_counter);
+ sprintf(pipe_name, "\\\\.\\pipe\\erlang44_%d_%d",
+ getpid(), calls);
+
+ DEBUGF(("Creating pipe %s\n", pipe_name));
+ sa.bInheritHandle = inheritRead;
+ if ((*phRead = CreateNamedPipe(pipe_name,
+ PIPE_ACCESS_INBOUND |
+ ((inheritRead && !overlapped_io) ? 0 : FILE_FLAG_OVERLAPPED),
+ PIPE_TYPE_BYTE | PIPE_READMODE_BYTE,
+ 1,
+ 0,
+ 0,
+ 2000,
+ &sa)) == NULL) {
+ DEBUGF(("Error creating pipe: %s\n", last_error()));
+ return FALSE;
+ }
+
+ sa.bInheritHandle = !inheritRead;
+ if ((*phWrite = CreateFile(pipe_name,
+ GENERIC_WRITE,
+ 0, /* No sharing */
+ &sa,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL |
+ ((inheritRead || overlapped_io) ? FILE_FLAG_OVERLAPPED : 0),
+ NULL)) == INVALID_HANDLE_VALUE) {
+ CloseHandle(*phRead);
+ DEBUGF(("Error opening other end of pipe: %s\n", last_error()));
+ return FALSE;
+ }
+ return TRUE;
+}
+
+
+
+
+static int ApplicationType
+(
+ const char *originalName, /* Name of the application to find. */
+ char fullPath[MAX_PATH], /* Filled with complete path to
+ * application. */
+ BOOL search_in_path, /* If we should search the system wide path */
+ BOOL handle_quotes, /* If we should handle quotes around executable */
+ int *error_return /* A place to put an error code */
+ )
+{
+ int applType, i;
+ HANDLE hFile;
+ char *ext, *rest;
+ char buf[2];
+ DWORD read;
+ IMAGE_DOS_HEADER header;
+ static char extensions[][5] = {"", ".com", ".exe", ".bat"};
+ int is_quoted;
+ int len;
+
+ /* Look for the program as an external program. First try the name
+ * as it is, then try adding .com, .exe, and .bat, in that order, to
+ * the name, looking for an executable.
+ * NOTE! that we does not support execution of .com programs on Windows NT
+ *
+ *
+ * Using the raw SearchPath() procedure doesn't do quite what is
+ * necessary. If the name of the executable already contains a '.'
+ * character, it will not try appending the specified extension when
+ * searching (in other words, SearchPath will not find the program
+ * "a.b.exe" if the arguments specified "a.b" and ".exe").
+ * So, first look for the file as it is named. Then manually append
+ * the extensions, looking for a match. (')
+ */
+
+ len = strlen(originalName);
+ is_quoted = handle_quotes && len > 0 && originalName[0] == '"' &&
+ originalName[len-1] == '"';
+
+ applType = APPL_NONE;
+ *error_return = ENOENT;
+ for (i = 0; i < (int) (sizeof(extensions) / sizeof(extensions[0])); i++) {
+ if(is_quoted) {
+ lstrcpyn(fullPath, originalName+1, MAX_PATH - 7);
+ len = strlen(fullPath);
+ if(len > 0) {
+ fullPath[len-1] = '\0';
+ }
+ } else {
+ lstrcpyn(fullPath, originalName, MAX_PATH - 5);
+ }
+ lstrcat(fullPath, extensions[i]);
+ SearchPath((search_in_path) ? NULL : ".", fullPath, NULL, MAX_PATH, fullPath, &rest);
+
+ /*
+ * Ignore matches on directories or data files, return if identified
+ * a known type.
+ */
+
+ if (GetFileAttributes(fullPath) & FILE_ATTRIBUTE_DIRECTORY) {
+ continue;
+ }
+
+ ext = strrchr(fullPath, '.');
+ if ((ext != NULL) && (strcmpi(ext, ".bat") == 0)) {
+ *error_return = EACCES;
+ applType = APPL_DOS;
+ break;
+ }
+
+ hFile = CreateFile(fullPath, GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (hFile == INVALID_HANDLE_VALUE) {
+ continue;
+ }
+
+ *error_return = EACCES; /* If considered an error,
+ it's an access error */
+ header.e_magic = 0;
+ ReadFile(hFile, (void *) &header, sizeof(header), &read, NULL);
+ if (header.e_magic != IMAGE_DOS_SIGNATURE) {
+ /*
+ * Doesn't have the magic number for relocatable executables. If
+ * filename ends with .com, assume it's a DOS application anyhow.
+ * Note that we didn't make this assumption at first, because some
+ * supposed .com files are really 32-bit executables with all the
+ * magic numbers and everything.
+ */
+
+ CloseHandle(hFile);
+ if ((ext != NULL) && (strcmpi(ext, ".com") == 0)) {
+ applType = APPL_DOS;
+ break;
+ }
+ continue;
+ }
+ if (header.e_lfarlc != sizeof(header)) {
+ /*
+ * All Windows 3.X and Win32 and some DOS programs have this value
+ * set here. If it doesn't, assume that since it already had the
+ * other magic number it was a DOS application.
+ */
+
+ CloseHandle(hFile);
+ applType = APPL_DOS;
+ break;
+ }
+
+ /*
+ * The DWORD at header.e_lfanew points to yet another magic number.
+ */
+
+ buf[0] = '\0';
+ SetFilePointer(hFile, header.e_lfanew, NULL, FILE_BEGIN);
+ ReadFile(hFile, (void *) buf, 2, &read, NULL);
+ CloseHandle(hFile);
+
+ if ((buf[0] == 'L') && (buf[1] == 'E')) {
+ applType = APPL_DOS;
+ } else if ((buf[0] == 'N') && (buf[1] == 'E')) {
+ applType = APPL_WIN3X;
+ } else if ((buf[0] == 'P') && (buf[1] == 'E')) {
+ applType = APPL_WIN32;
+ } else {
+ continue;
+ }
+ break;
+ }
+
+ if (applType == APPL_NONE) {
+ return APPL_NONE;
+ }
+
+ if ((applType == APPL_DOS) || (applType == APPL_WIN3X)) {
+ /*
+ * Replace long path name of executable with short path name for
+ * 16-bit applications. Otherwise the application may not be able
+ * to correctly parse its own command line to separate off the
+ * application name from the arguments.
+ */
+
+ GetShortPathName(fullPath, fullPath, MAX_PATH);
+ }
+ if (is_quoted) {
+ /* restore quotes on quoted program name */
+ len = strlen(fullPath);
+ memmove(fullPath+1,fullPath,len);
+ fullPath[0]='"';
+ fullPath[len+1]='"';
+ fullPath[len+2]='\0';
+ }
+ return applType;
+}
+
+/*
+ * Thread function used to emulate overlapped reading.
+ */
+
+DWORD WINAPI
+threaded_reader(LPVOID param)
+{
+ AsyncIo* aio = (AsyncIo *) param;
+ HANDLE thread = GetCurrentThread();
+ char* buf;
+ DWORD numToRead;
+
+ for (;;) {
+ WaitForSingleObject(aio->ioAllowed, INFINITE);
+ if (aio->flags & DF_EXIT_THREAD)
+ break;
+ buf = OV_BUFFER_PTR(aio);
+ numToRead = OV_NUM_TO_READ(aio);
+ aio->pendingError = 0;
+ if (!ReadFile(aio->fd, buf, numToRead, &aio->bytesTransferred, NULL))
+ aio->pendingError = GetLastError();
+ else if (aio->flags & DF_XLAT_CR) {
+ char *s;
+ int n;
+
+ n = aio->bytesTransferred;
+ for (s = buf; s < buf+n; s++) {
+ if (*s == '\r') {
+ if (s < buf + n - 1 && s[1] == '\n') {
+ memmove(s, s+1, (buf+n - s - 1));
+ --n;
+ } else {
+ *s = '\n';
+ }
+ }
+ }
+ aio->bytesTransferred = n;
+ }
+ SetEvent(aio->ov.hEvent);
+ if ((aio->flags & DF_XLAT_CR) == 0 && aio->bytesTransferred == 0) {
+ break;
+ }
+ if (aio->pendingError != NO_ERROR) {
+ break;
+ }
+ if (aio->flags & DF_EXIT_THREAD)
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Thread function used to emulate overlapped writing
+ */
+
+DWORD WINAPI
+threaded_writer(LPVOID param)
+{
+ AsyncIo* aio = (AsyncIo *) param;
+ HANDLE thread = GetCurrentThread();
+ char* buf;
+ DWORD numToWrite;
+ int ok;
+
+ for (;;) {
+ WaitForSingleObject(aio->ioAllowed, INFINITE);
+ if (aio->flags & DF_EXIT_THREAD)
+ break;
+ buf = OV_BUFFER_PTR(aio);
+ numToWrite = OV_NUM_TO_READ(aio);
+ aio->pendingError = 0;
+ ok = WriteFile(aio->fd, buf, numToWrite, &aio->bytesTransferred, NULL);
+ if (!ok) {
+ aio->pendingError = GetLastError();
+ if (aio->pendingError == ERROR_INVALID_HANDLE &&
+ aio->flags & DF_DROP_IF_INVH) {
+ /* This is standard error and we'we got an
+ invalid standard error FD (non-inheritable) from parent.
+ Just drop the message and be happy. */
+ aio->pendingError = 0;
+ aio->bytesTransferred = numToWrite;
+ } else if (aio->pendingError == ERROR_NOT_ENOUGH_MEMORY) {
+ /* This could be a console, which limits utput to 64kbytes,
+ which might translate to less on a unicode system.
+ Try 16k chunks and see if it works before giving up. */
+ int done = 0;
+ DWORD transferred;
+ aio->pendingError = 0;
+ aio->bytesTransferred = 0;
+ ok = 1;
+ while (ok && (numToWrite - done) > 0x4000) {
+ ok = WriteFile(aio->fd, buf + done, 0x4000, &transferred, NULL);
+ aio->bytesTransferred += transferred;
+ done += 0x4000;
+ }
+ if (ok && (numToWrite - done) > 0) {
+ ok = WriteFile(aio->fd, buf + done, (numToWrite - done),
+ &transferred, NULL);
+ aio->bytesTransferred += transferred;
+ }
+ if (!ok) {
+ aio->pendingError = GetLastError();
+ }
+ }
+ }
+ SetEvent(aio->ov.hEvent);
+ if (aio->pendingError != NO_ERROR || aio->bytesTransferred == 0)
+ break;
+ if (aio->flags & DF_EXIT_THREAD)
+ break;
+ }
+ CloseHandle(aio->fd);
+ aio->fd = INVALID_HANDLE_VALUE;
+ return 0;
+}
+
+static HANDLE
+translate_fd(int fd)
+{
+ DWORD access;
+ HANDLE handle;
+
+ switch (fd) {
+ case 0:
+ access = GENERIC_READ;
+ handle = GetStdHandle(STD_INPUT_HANDLE);
+ break;
+ case 1:
+ access = GENERIC_WRITE;
+ handle = GetStdHandle(STD_OUTPUT_HANDLE);
+ break;
+ case 2:
+ access = GENERIC_WRITE;
+ handle = GetStdHandle(STD_ERROR_HANDLE);
+ break;
+ default:
+ return (HANDLE) fd;
+ }
+ DEBUGF(("translate_fd(%d) -> std(%d)\n", fd, handle));
+
+ if (handle == INVALID_HANDLE_VALUE || handle == 0) {
+ handle = CreateFile("nul", access, 0,
+ NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ }
+ DEBUGF(("translate_fd(%d) -> %d\n", fd, handle));
+ return handle;
+}
+
+static ErlDrvData
+fd_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
+{
+ DriverData* dp;
+ int is_std_error = (opts->ofd == 2);
+
+ opts->ifd = (int) translate_fd(opts->ifd);
+ opts->ofd = (int) translate_fd(opts->ofd);
+ if ((dp = new_driver_data(port_num, opts->packet_bytes, 2, TRUE)) == NULL)
+ return ERL_DRV_ERROR_GENERAL;
+
+ if (!create_file_thread(&dp->in, DO_READ)) {
+ dp->port_num = PORT_FREE;
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ if (!create_file_thread(&dp->out, DO_WRITE)) {
+ dp->port_num = PORT_FREE;
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ fd_driver_input = &(dp->in);
+ dp->in.flags = DF_XLAT_CR;
+ if (is_std_error) {
+ dp->out.flags |= DF_DROP_IF_INVH; /* Just drop messages if stderror
+ is an invalid handle */
+ }
+ return set_driver_data(dp, opts->ifd, opts->ofd, opts->read_write, 0);
+}
+
+static void fd_stop(ErlDrvData d)
+{
+ int fd = (int)d;
+ /*
+ * I don't know a clean way to terminate the threads
+ * (TerminateThread() doesn't release the stack),
+ * so will we'll let the threads live. Normally, the fd
+ * driver is only used to support the -oldshell option,
+ * so this shouldn't be a problem in practice.
+ *
+ * Since we will not attempt to terminate the threads,
+ * better not close the input or output files either.
+ */
+
+ driver_data[fd].in.thread = (HANDLE) -1;
+ driver_data[fd].out.thread = (HANDLE) -1;
+ driver_data[fd].in.fd = INVALID_HANDLE_VALUE;
+ driver_data[fd].out.fd = INVALID_HANDLE_VALUE;
+
+ /*return */ common_stop(fd);
+}
+
+static ErlDrvData
+vanilla_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
+{
+ HANDLE ofd,ifd;
+ DriverData* dp;
+ DWORD access; /* Access mode: GENERIC_READ, GENERIC_WRITE. */
+ DWORD crFlags;
+ HANDLE this_process = GetCurrentProcess();
+
+ access = 0;
+ if (opts->read_write == DO_READ)
+ access |= GENERIC_READ;
+ if (opts->read_write == DO_WRITE)
+ access |= GENERIC_WRITE;
+
+ if (opts->read_write == DO_READ)
+ crFlags = OPEN_EXISTING;
+ else if (opts->read_write == DO_WRITE)
+ crFlags = CREATE_ALWAYS;
+ else
+ crFlags = OPEN_ALWAYS;
+
+ if ((dp = new_driver_data(port_num, opts->packet_bytes, 2, FALSE)) == NULL)
+ return ERL_DRV_ERROR_GENERAL;
+ ofd = CreateFile(name, access, FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL, crFlags, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (!DuplicateHandle(this_process, (HANDLE) ofd,
+ this_process, &ifd, 0,
+ FALSE, DUPLICATE_SAME_ACCESS)) {
+ CloseHandle(ofd);
+ ofd = INVALID_HANDLE_VALUE;
+ }
+ if (ofd == INVALID_HANDLE_VALUE)
+ return ERL_DRV_ERROR_GENERAL;
+ return set_driver_data(dp, ifd, ofd, opts->read_write,0);
+}
+
+static void
+stop(ErlDrvData index)
+{
+ common_stop((int)index);
+}
+
+static void common_stop(int index)
+{
+ DriverData* dp = driver_data+index;
+
+ DEBUGF(("common_stop(%d)\n", index));
+
+ if (dp->in.ov.hEvent != NULL) {
+ (void) driver_select(dp->port_num,
+ (ErlDrvEvent)dp->in.ov.hEvent,
+ ERL_DRV_READ, 0);
+ }
+ if (dp->out.ov.hEvent != NULL) {
+ (void) driver_select(dp->port_num,
+ (ErlDrvEvent)dp->out.ov.hEvent,
+ ERL_DRV_WRITE, 0);
+ }
+
+ if (dp->out.thread == (HANDLE) -1 && dp->in.thread == (HANDLE) -1) {
+ release_driver_data(dp);
+ } else {
+ /*
+ * If there are read or write threads, start a thread which will
+ * wait for them to finish.
+ */
+ HANDLE thread;
+ DWORD tid;
+ dp->port_num = PORT_EXITING;
+ thread = (HANDLE *) _beginthreadex(NULL, 0, threaded_exiter, dp, 0, &tid);
+ CloseHandle(thread);
+ }
+}
+
+DWORD WINAPI
+threaded_exiter(LPVOID param)
+{
+ DriverData* dp = (DriverData *) param;
+ HANDLE handles[2];
+ int i;
+
+ /*
+ * Ask the threads to terminated.
+ *
+ * Note that we can't reliable test the state of the ioAllowed event,
+ * because it is an auto reset event. Therefore, always set the
+ * exit flag and signal the event.
+ */
+
+ i = 0;
+ if (dp->out.thread != (HANDLE) -1) {
+ dp->out.flags = DF_EXIT_THREAD;
+ SetEvent(dp->out.ioAllowed);
+ handles[i++] = dp->out.thread;
+ }
+ if (dp->in.thread != (HANDLE) -1) {
+ dp->in.flags = DF_EXIT_THREAD;
+ SetEvent(dp->in.ioAllowed);
+ handles[i++] = dp->in.thread;
+ }
+
+ /*
+ * If we were lucky, the following happened above:
+ * 1) The output thread terminated (and closed the pipe).
+ * 2) As a consequence of that, the port program received
+ * EOF on its standard input.
+ * 3) Hopefully, because of (2), the port program terminated.
+ * 4) Because of (3), the input thread terminated.
+ *
+ * But this might need some time; therefore, we must wait for
+ * both threads to terminate.
+ */
+
+ if (i > 0) {
+ switch (WaitForMultipleObjects(i, handles, TRUE, 5000)) {
+ case WAIT_TIMEOUT:
+ DEBUGF(("Timeout waiting for %d threads failed\n", i));
+ break;
+ case WAIT_FAILED:
+ DEBUGF(("Wait for %d threads failed: %s\n",
+ i, win32_errorstr(GetLastError())));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Wait for threads to terminate didn't help. Now use some force.
+ * TerminateThread() is *not* a good idea, because it doesn't clean
+ * up the thread's stack.
+ *
+ * Instead we well terminate the port program and wait for the
+ * threads to terminate themselves when they receive end of file.
+ */
+
+ if (dp->out.thread != (HANDLE) -1) {
+ int error;
+
+ if (WaitForSingleObject(dp->out.thread, 0) == WAIT_OBJECT_0) {
+ CloseHandle(dp->out.thread);
+ dp->out.thread = (HANDLE) -1;
+ } else if (dp->port_pid != INVALID_HANDLE_VALUE) {
+ DEBUGF(("Killing port process 0x%x (output thread)\n", dp->port_pid));
+ TerminateProcess(dp->port_pid, 0);
+ if (!CloseHandle(dp->port_pid))
+ DEBUGF(("Failed to close output handle!!!\n"));
+ dp->port_pid = INVALID_HANDLE_VALUE;
+ DEBUGF(("Waiting for output thread 0x%x to finish\n", dp->out.thread));
+ error = WaitForSingleObject(dp->out.thread, INFINITE);
+ }
+ }
+
+ if (dp->in.thread != (HANDLE) -1) {
+ if (WaitForSingleObject(dp->in.thread, 0) == WAIT_OBJECT_0) {
+ CloseHandle(dp->in.thread);
+ dp->in.thread = (HANDLE) -1;
+ } else if (dp->port_pid != INVALID_HANDLE_VALUE) {
+ DEBUGF(("Killing port process 0x%x (input thread)\n", dp->port_pid));
+ TerminateProcess(dp->port_pid, 0);
+ if (!CloseHandle(dp->port_pid))
+ DEBUGF(("Failed to close input handle!!!\n"));
+ dp->port_pid = INVALID_HANDLE_VALUE;
+
+ DEBUGF(("Waiting for input thread 0x%x to finish\n", dp->in.thread));
+ switch (WaitForSingleObject(dp->in.thread, INFINITE)) {
+ case WAIT_OBJECT_0:
+ CloseHandle(dp->in.thread);
+ dp->in.thread = (HANDLE) -1;
+ break;
+ default:
+ DEBUGF(("Wait for input thread to finish failed: %s\n",
+ win32_errorstr(GetLastError())));
+ break;
+ }
+ }
+ }
+
+ release_driver_data(dp);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------
+ * output --
+ * Outputs data from Erlang to the port program.
+ *
+ * Results:
+ * Returns the actual number of bytes written (including the
+ * packet header) or -1 if an error occurred.
+ * ----------------------------------------------------------------------
+ */
+
+static void
+output(ErlDrvData drv_data, char* buf, int len)
+/* long drv_data; /* The slot to use in the driver data table.
+ * For Windows NT, this is *NOT* a file handle.
+ * The handle is found in the driver data.
+ */
+/* char *buf; /* Pointer to data to write to the port program. */
+/* int len; /* Number of bytes to write. */
+{
+ DriverData* dp;
+ int pb; /* The header size for this port. */
+ int port_num; /* The actual port number (for diagnostics). */
+ char* current;
+
+ dp = driver_data + (int)drv_data;
+ if ((port_num = dp->port_num) == -1)
+ return ; /*-1;*/
+
+ pb = dp->packet_bytes;
+
+ if ((pb+len) == 0)
+ return ; /* 0; */
+
+ /*
+ * Check that the message can be sent with given header length.
+ */
+
+ if ((pb == 2 && len > 65535) || (pb == 1 && len > 255)) {
+ driver_failure_posix(port_num, EINVAL);
+ return ; /* -1; */
+ }
+
+ /*
+ * Allocate memory for both the message and the header.
+ */
+
+ ASSERT(dp->outbuf == NULL);
+ ASSERT(dp->outBufSize == 0);
+
+ ASSERT(!dp->outbuf);
+ dp->outbuf = DRV_BUF_ALLOC(pb+len);
+ if (!dp->outbuf) {
+ driver_failure_posix(port_num, ENOMEM);
+ return ; /* -1; */
+ }
+
+ dp->outBufSize = pb+len;
+ erts_smp_atomic_add(&sys_misc_mem_sz, dp->outBufSize);
+
+ /*
+ * Store header bytes (if any).
+ */
+
+ current = dp->outbuf;
+ switch (pb) {
+ case 4:
+ *current++ = (len >> 24) & 255;
+ *current++ = (len >> 16) & 255;
+ case 2:
+ *current++ = (len >> 8) & 255;
+ case 1:
+ *current++ = len & 255;
+ }
+
+ /*
+ * Start the write.
+ */
+
+ if (len)
+ memcpy(current, buf, len);
+
+ if (!async_write_file(&dp->out, dp->outbuf, pb+len)) {
+ set_busy_port(port_num, 1);
+ } else {
+ dp->out.ov.Offset += pb+len; /* For vanilla driver. */
+ /* XXX OffsetHigh should be changed too. */
+ ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ DRV_BUF_FREE(dp->outbuf);
+ dp->outBufSize = 0;
+ dp->outbuf = NULL;
+ }
+ /*return 0;*/
+}
+
+
+/* ----------------------------------------------------------------------
+ * ready_input --
+ * This function is called (indirectly) from check_io() when an
+ * event object has been signaled, indicating that there is
+ * something to read on the corresponding file handle.
+ *
+ * If the port is working in the continous stream mode (packet_bytes == 0),
+ * whatever data read will be sent straight to Erlang.
+ *
+ * Results:
+ * Always 0.
+ * ----------------------------------------------------------------------
+ */
+
+static void
+ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event)
+/* long drv_data; /* Driver data. */
+/* HANDLE ready_event; /* The handle for the ready event. */
+{
+ int error = 0; /* The error code (assume initially no errors). */
+ DWORD bytesRead; /* Number of bytes read. */
+ DriverData* dp;
+ int pb;
+
+ dp = driver_data+(int)drv_data;
+ pb = dp->packet_bytes;
+#ifdef ERTS_SMP
+ if(dp->in.thread == (HANDLE) -1) {
+ dp->in.async_io_active = 0;
+ }
+#endif
+ DEBUGF(("ready_input: dp %p, event 0x%x\n", dp, ready_event));
+
+ /*
+ * Evaluate the result of the overlapped read.
+ */
+
+#ifdef HARD_POLL_DEBUG
+ poll_debug_read_begin(dp->in.ov.hEvent);
+#endif
+
+ error = get_overlapped_result(&dp->in, &bytesRead, TRUE);
+
+#ifdef HARD_POLL_DEBUG
+ poll_debug_read_done(dp->in.ov.hEvent,bytesRead);
+#endif
+
+ if (error == NO_ERROR) {
+ if (pb == 0) { /* Continous stream. */
+#ifdef DEBUG
+ DEBUGF(("ready_input: %d: ", bytesRead));
+ erl_bin_write(dp->inbuf, 16, bytesRead);
+ DEBUGF(("\n"));
+#endif
+ driver_output(dp->port_num, dp->inbuf, bytesRead);
+ } else { /* Packet mode */
+ dp->bytesInBuffer += bytesRead;
+
+ /*
+ * Loop until we've exhausted the data in the buffer.
+ */
+
+ for (;;) {
+
+ /*
+ * Check for completion of a header read.
+ */
+
+ if (dp->bytesInBuffer >= dp->totalNeeded &&
+ dp->totalNeeded == pb) {
+
+ /*
+ * We have successfully read the packet header
+ * (and perhaps even the packet). Get the packet size
+ * from the header and update dp->totalNeeded to include
+ * the packet size.
+ */
+
+ int packet_size = 0;
+ unsigned char *header = (unsigned char *) dp->inbuf;
+
+ switch (pb) {
+ case 4:
+ packet_size = (packet_size << 8) | *header++;
+ packet_size = (packet_size << 8) | *header++;
+ case 2:
+ packet_size = (packet_size << 8) | *header++;
+ case 1:
+ packet_size = (packet_size << 8) | *header++;
+ }
+
+ dp->totalNeeded += packet_size;
+
+ /*
+ * Make sure that the receive buffer is big enough.
+ */
+
+ if (dp->inBufSize < dp->totalNeeded) {
+ char* new_buf;
+
+ new_buf = DRV_BUF_REALLOC(dp->inbuf, dp->totalNeeded);
+ if (new_buf == NULL) {
+ error = ERROR_NOT_ENOUGH_MEMORY;
+ break; /* Break out of loop into error handler. */
+ }
+ ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->inBufSize);
+ erts_smp_atomic_add(&sys_misc_mem_sz,
+ dp->totalNeeded - dp->inBufSize);
+ dp->inBufSize = dp->totalNeeded;
+ dp->inbuf = new_buf;
+ }
+ }
+
+ /*
+ * Check for completion of a packet read.
+ */
+
+ if (dp->bytesInBuffer < dp->totalNeeded) {
+ /*
+ * Not enough bytes in the buffer. Break out of
+ * the loop and initiate a new read.
+ */
+
+ break;
+ } else {
+
+ /*
+ * We have successfully read a complete packet, which
+ * can be passed to Erlang.
+ */
+
+ driver_output(dp->port_num, dp->inbuf+pb, dp->totalNeeded-pb);
+
+ /*
+ * Update the number of bytes remaining in the buffer,
+ * and move the data remaining (if any) to the beginning
+ * of the buffer.
+ */
+
+ dp->bytesInBuffer -= dp->totalNeeded;
+ if (dp->bytesInBuffer > 0) {
+ memmove(dp->inbuf, dp->inbuf+dp->totalNeeded,
+ dp->bytesInBuffer);
+ }
+
+ /*
+ * Indicate that we need the size of a header, and
+ * go through the loop once more (to either process
+ * remaining bytes or initiate reading more).
+ */
+
+ dp->totalNeeded = pb;
+ }
+ }
+ }
+ }
+
+ /*
+ * Start a new overlapped read, or report the error.
+ */
+
+ if (error == NO_ERROR) {
+ async_read_file(&dp->in, dp->inbuf+dp->bytesInBuffer,
+ dp->inBufSize - dp->bytesInBuffer);
+ } else {
+ DEBUGF(("ready_input(): error: %s\n", win32_errorstr(error)));
+ if (error == ERROR_BROKEN_PIPE || error == ERROR_HANDLE_EOF) {
+ /* Maybe check exit status */
+ if (dp->report_exit) {
+ DWORD exitcode;
+ if (GetExitCodeProcess(dp->port_pid, &exitcode) &&
+ exitcode != STILL_ACTIVE) {
+ driver_report_exit(dp->port_num, exitcode);
+ }
+ }
+ driver_failure_eof(dp->port_num);
+ } else { /* Report real errors. */
+ int error = GetLastError();
+ (void) driver_select(dp->port_num, ready_event, ERL_DRV_READ, 0);
+ _dosmaperr(error);
+ driver_failure_posix(dp->port_num, errno);
+ }
+ }
+
+ /*return 0;*/
+}
+
+static void
+ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
+{
+ DWORD bytesWritten;
+ DriverData* dp = driver_data + (int)drv_data;
+ int error;
+
+#ifdef ERTS_SMP
+ if(dp->out.thread == (HANDLE) -1) {
+ dp->out.async_io_active = 0;
+ }
+#endif
+ DEBUGF(("ready_output(%d, 0x%x)\n", drv_data, ready_event));
+ set_busy_port(dp->port_num, 0);
+ if (!(dp->outbuf)) {
+ /* Happens because event sometimes get signalled during a succesful
+ write... */
+ return;
+ }
+ ASSERT(erts_smp_atomic_read(&sys_misc_mem_sz) >= dp->outBufSize);
+ erts_smp_atomic_add(&sys_misc_mem_sz, -1*dp->outBufSize);
+ DRV_BUF_FREE(dp->outbuf);
+ dp->outBufSize = 0;
+ dp->outbuf = NULL;
+#ifdef HARD_POLL_DEBUG
+ poll_debug_write_begin(dp->out.ov.hEvent);
+#endif
+ error = get_overlapped_result(&dp->out, &bytesWritten, TRUE);
+#ifdef HARD_POLL_DEBUG
+ poll_debug_write_done(dp->out.ov.hEvent,bytesWritten);
+#endif
+
+ if (error == NO_ERROR) {
+ dp->out.ov.Offset += bytesWritten; /* For vanilla driver. */
+ return ; /* 0; */
+ }
+
+ (void) driver_select(dp->port_num, ready_event, ERL_DRV_WRITE, 0);
+ _dosmaperr(error);
+ driver_failure_posix(dp->port_num, errno);
+ /* return 0; */
+}
+
+static void stop_select(ErlDrvEvent e, void* _)
+{
+ CloseHandle((HANDLE)e);
+}
+
+/* Fills in the systems representation of the beam process identifier.
+** The Pid is put in STRING representation in the supplied buffer,
+** no interpretation of this should be done by the rest of the
+** emulator. The buffer should be at least 21 bytes long.
+*/
+void sys_get_pid(char *buffer){
+ DWORD p = GetCurrentProcessId();
+ /* The pid is scalar and is an unsigned long. */
+ sprintf(buffer,"%lu",(unsigned long) p);
+}
+
+void
+sys_init_io(void)
+{
+
+ /* Now heres an icky one... This is called before drivers are, so we
+ can change our view of the number of open files possible.
+ We estimate the number to twice the amount of ports.
+ We really dont know on windows, do we? */
+ max_files = 2*erts_max_ports;
+
+#ifdef USE_THREADS
+#ifdef ERTS_SMP
+ if (init_async(-1) < 0)
+ erl_exit(1, "Failed to initialize async-threads\n");
+#else
+ {
+ /* This is special stuff, starting a driver from the
+ * system routines, but is a nice way of handling stuff
+ * the erlang way
+ */
+ SysDriverOpts dopts;
+ int ret;
+
+ sys_memset((void*)&dopts, 0, sizeof(SysDriverOpts));
+ add_driver_entry(&async_driver_entry);
+ ret = erts_open_driver(NULL, NIL, "async", &dopts, NULL);
+ DEBUGF(("open_driver = %d\n", ret));
+ if (ret < 0)
+ erl_exit(1, "Failed to open async driver\n");
+ erts_port[ret].status |= ERTS_PORT_SFLG_IMMORTAL;
+ }
+#endif
+#endif
+}
+
+#ifdef ERTS_SMP
+void
+erts_sys_main_thread(void)
+{
+ HANDLE dummy;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_set_thread_name("parent_thread");
+#endif
+ dummy = CreateEvent(NULL, FALSE, FALSE, NULL);
+ for(;;) {
+ WaitForSingleObject(dummy, INFINITE);
+ }
+}
+#endif
+
+void erts_sys_alloc_init(void)
+{
+ elib_ensure_initialized();
+}
+
+void *erts_sys_alloc(ErtsAlcType_t t, void *x, Uint sz)
+{
+ return malloc((size_t) sz);
+}
+
+void *erts_sys_realloc(ErtsAlcType_t t, void *x, void *p, Uint sz)
+{
+ return realloc(p, (size_t) sz);
+}
+
+void erts_sys_free(ErtsAlcType_t t, void *x, void *p)
+{
+ free(p);
+}
+
+static Preload* preloaded = NULL;
+static unsigned* res_name = NULL;
+static int num_preloaded = 0;
+
+/* Return a pointer to a vector of names of preloaded modules */
+
+Preload* sys_preloaded(void)
+{
+ HRSRC hRes;
+ unsigned char* data;
+
+#define GETWORD(p) (0[p] | 1[p] << 8)
+#define GETDWORD(p) (GETWORD(p) | GETWORD(p+2) << 16)
+
+
+ if (preloaded == NULL) {
+ int i;
+ ASSERT(beam_module != NULL);
+ hRes = FindResource(beam_module, 0, "ERLANG_DICT");
+ /* We might have a resource compiler laying out the 0 resource with
+ "0" as a textual name instead... */
+ if (hRes == NULL) {
+ hRes = FindResource(beam_module, "0", "ERLANG_DICT");
+ }
+ if (hRes == NULL) {
+ DWORD n = GetLastError();
+ fprintf(stderr, "No ERLANG_DICT resource\n");
+ exit(1);
+ }
+ data = (unsigned char *) LoadResource(beam_module, hRes);
+
+ num_preloaded = GETWORD(data);
+ if (num_preloaded == 0) {
+ fprintf(stderr, "No preloaded modules\n");
+ exit(1);
+ }
+
+ data += 2;
+ preloaded = erts_alloc(ERTS_ALC_T_PRELOADED,
+ (num_preloaded+1)*sizeof(Preload));
+ res_name = erts_alloc(ERTS_ALC_T_PRELOADED,
+ (num_preloaded+1)*sizeof(unsigned));
+ erts_smp_atomic_add(&sys_misc_mem_sz,
+ (num_preloaded+1)*sizeof(Preload)
+ + (num_preloaded+1)*sizeof(unsigned));
+ for (i = 0; i < num_preloaded; i++) {
+ int n;
+
+ preloaded[i].size = GETDWORD(data);
+ data += 4;
+ res_name[i] = GETWORD(data);
+ data += 2;
+ n = GETWORD(data);
+ data += 2;
+ preloaded[i].name = erts_alloc(ERTS_ALC_T_PRELOADED, n+1);
+ erts_smp_atomic_add(&sys_misc_mem_sz, n+1);
+ sys_memcpy(preloaded[i].name, data, n);
+ preloaded[i].name[n] = '\0';
+ data += n;
+ DEBUGF(("name: %s; size: %d; resource: %p\n",
+ preloaded[i].name, preloaded[i].size, res_name[i]));
+ }
+ preloaded[i].name = NULL;
+ }
+
+#undef GETWORD
+#undef GETDWORD
+ return preloaded;
+}
+
+/* Return a pointer to preloaded code for module "module" */
+unsigned char* sys_preload_begin(Preload* pp)
+{
+ HRSRC hRes;
+ unsigned resource;
+
+ ASSERT(beam_module != NULL);
+
+ resource = res_name[pp-preloaded];
+ DEBUGF(("Loading name: %s; size: %d; resource: %p\n",
+ pp->name, pp->size, resource));
+ hRes = FindResource(beam_module, (char *) resource, "ERLANG_CODE");
+ return pp->code = LoadResource(beam_module, hRes);
+}
+
+/* Clean up if allocated */
+void sys_preload_end(Preload* pp)
+{
+}
+
+/* Read a key from console */
+
+int
+sys_get_key(int fd)
+{
+ ASSERT(fd == 0);
+
+ if (win_console) {
+ return ConGetKey();
+ }
+
+ /*
+ * Black magic follows. (Code stolen from get_overlapped_result())
+ */
+
+ if (fd_driver_input != NULL && fd_driver_input->thread != (HANDLE)-1) {
+ DWORD error;
+ int key;
+
+ error = WaitForSingleObject(fd_driver_input->ov.hEvent, INFINITE);
+ if (error == WAIT_OBJECT_0) {
+ if (fd_driver_input->bytesTransferred > 0) {
+ int n;
+ int i;
+ char* buf = OV_BUFFER_PTR(fd_driver_input);
+
+ fd_driver_input->bytesTransferred--;
+ n = fd_driver_input->bytesTransferred;
+ key = buf[0];
+ for (i = n; i > 0; i--) {
+ buf[i-1] = buf[i];
+ }
+ return key;
+ }
+ }
+ }
+ return '*'; /* Error! */
+}
+
+/*
+ * Returns a human-readable description of the last error.
+ * The returned pointer will be valid only as long as last-error()
+ * isn't called again.
+ */
+
+char* win32_errorstr(int error)
+{
+#ifdef SMP
+ LPTSTR lpBufPtr = erts_smp_tsd_get(win32_errstr_key);
+#else
+ static LPTSTR lpBufPtr = NULL;
+#endif
+ if (lpBufPtr) {
+ LocalFree(lpBufPtr);
+ }
+ FormatMessage(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ error,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPTSTR) &lpBufPtr,
+ 0,
+ NULL);
+ SetLastError(error);
+#ifdef ERTS_SMP
+ erts_smp_tsd_set(win32_errstr_key,lpBufPtr);
+#endif
+ return lpBufPtr;
+}
+
+char* last_error(void)
+{
+ return win32_errorstr(GetLastError());
+}
+
+static void* sys_func_memzero(void* s, size_t n)
+{
+ return sys_memzero(s, n);
+}
+
+#ifdef DEBUG
+static HANDLE hDebugWrite = INVALID_HANDLE_VALUE;
+
+void erl_debug(char *fmt,...)
+{
+ char sbuf[1024]; /* Temporary buffer. */
+ DWORD written; /* Actual number of chars written. */
+ va_list va;
+
+ if (hDebugWrite != INVALID_HANDLE_VALUE) {
+ va_start(va, fmt);
+ vsprintf(sbuf, fmt, va);
+ WriteFile(hDebugWrite, sbuf, strlen(sbuf), &written, NULL);
+ va_end(va);
+ }
+}
+
+static void debug_console(void)
+{
+ HANDLE hRead; /* Handle to read end of pipe. */
+ SECURITY_ATTRIBUTES sa;
+ PROCESS_INFORMATION procInfo;
+ STARTUPINFO startInfo;
+ BOOL ok;
+
+ /*
+ * Create a pipe for communicating with the sub process.
+ */
+
+ sa.nLength = sizeof(sa);
+ sa.lpSecurityDescriptor = NULL;
+ sa.bInheritHandle = TRUE;
+ if (!CreatePipe(&hRead, &hDebugWrite, &sa, 0)) {
+ fprintf(stderr, "Failed to create pipe: %d\n",
+ GetLastError());
+ exit(1);
+ }
+
+ startInfo.cb = sizeof(STARTUPINFO);
+ startInfo.lpTitle = "Erlang Debug Log";
+ startInfo.lpReserved = NULL;
+ startInfo.lpReserved2 = NULL;
+ startInfo.cbReserved2 = 0;
+ startInfo.lpDesktop = NULL;
+ startInfo.dwFlags = STARTF_USESTDHANDLES;
+ startInfo.hStdInput = hRead;
+
+ /* The following handles are not intended to be used. */
+ startInfo.hStdOutput = GetStdHandle(STD_OUTPUT_HANDLE);
+ startInfo.hStdError = GetStdHandle(STD_ERROR_HANDLE);
+
+ ok = CreateProcess(NULL,
+ "erl_log.exe", /* Application */
+ NULL, /* Process security attributes. */
+ NULL, /* Thread security attributes. */
+ TRUE, /* Handle inheritance flag. */
+ CREATE_NEW_CONSOLE, /* Flags. */
+ NULL, /* Environment. */
+ NULL, /* Current directory. */
+ &startInfo,/* Startup info. */
+ &procInfo /* Process information. */
+ );
+
+ CloseHandle(hRead);
+
+ if (ok) {
+ /*
+ * Since we don't use these, close them at once to avoid a resource
+ * leak.
+ */
+ CloseHandle(procInfo.hProcess);
+ CloseHandle(procInfo.hThread);
+ } else {
+ fprintf(stderr, "Create process failed: %s\n", last_error());
+ exit(1);
+ }
+}
+
+void
+erl_bin_write(buf, sz, max)
+ unsigned char* buf;
+ int sz;
+ int max;
+{
+ int i, imax;
+ char comma[5] = ",";
+
+ if (hDebugWrite == INVALID_HANDLE_VALUE)
+ return;
+
+ if (!sz)
+ return;
+ if (sz > max)
+ imax = max;
+ else
+ imax = sz;
+
+ for (i=0; i<imax; i++) {
+ if (i == imax-1) {
+ if (sz > max)
+ strcpy(comma, ",...");
+ else
+ comma[0] = 0;
+ }
+ if (isdigit(buf[i]))
+ erl_debug("%u%s", (int)(buf[i]), comma);
+ else {
+ if (isalpha(buf[i])) {
+ erl_debug("%c%s", buf[i], comma);
+ }
+ else
+ erl_debug("%u%s", (int)(buf[i]), comma);
+ }
+ }
+}
+
+void
+erl_assert_error(char* expr, char* file, int line)
+{
+ char message[1024];
+
+ sprintf(message, "File %hs, line %d: %hs", file, line, expr);
+ MessageBox(GetActiveWindow(), message, "Assertion failed",
+ MB_OK | MB_ICONERROR);
+#if 0
+ erl_crash_dump(file, line, "Assertion failed: %hs\n", expr);
+#endif
+ DebugBreak();
+}
+
+#endif /* DEBUG */
+
+static void
+check_supported_os_version(void)
+{
+#if defined(_WIN32_WINNT)
+ {
+ DWORD major = (_WIN32_WINNT >> 8) & 0xff;
+ DWORD minor = _WIN32_WINNT & 0xff;
+
+ if (int_os_version.dwPlatformId != VER_PLATFORM_WIN32_NT
+ || int_os_version.dwMajorVersion < major
+ || (int_os_version.dwMajorVersion == major
+ && int_os_version.dwMinorVersion < minor))
+ erl_exit(-1,
+ "Windows version not supported "
+ "(min required: winnt %d.%d)\n",
+ major, minor);
+ }
+#else
+ erl_exit(-1,
+ "Windows version not supported "
+ "(min required: win %d.%d)\n",
+ nt_major, nt_minor);
+#endif
+}
+
+#ifdef USE_THREADS
+static void *ethr_internal_alloc(size_t size)
+{
+ return erts_alloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, (Uint) size);
+}
+static void *ethr_internal_realloc(void *ptr, size_t size)
+{
+ return erts_realloc_fnf(ERTS_ALC_T_ETHR_INTERNAL, ptr, (Uint) size);
+}
+static void ethr_internal_free(void *ptr)
+{
+ erts_free(ERTS_ALC_T_ETHR_INTERNAL, ptr);
+}
+#endif
+
+void
+erts_sys_pre_init(void)
+{
+ int_os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&int_os_version);
+ check_supported_os_version();
+#ifdef USE_THREADS
+ {
+ erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
+ eid.alloc = ethr_internal_alloc;
+ eid.realloc = ethr_internal_realloc;
+ eid.free = ethr_internal_free;
+ erts_thr_init(&eid);
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_init();
+#endif
+ }
+#endif
+ erts_smp_atomic_init(&sys_misc_mem_sz, 0);
+ erts_sys_env_init();
+}
+
+/*
+ * the last two only used for standalone erlang
+ * they should are used by sae_main in beam dll to
+ * enable standalone execution via erl_api-routines
+ */
+
+void noinherit_std_handle(DWORD type)
+{
+ HANDLE h = GetStdHandle(type);
+ if (h != 0 && h != INVALID_HANDLE_VALUE) {
+ SetHandleInformation(h,HANDLE_FLAG_INHERIT,0);
+ }
+}
+
+
+void erl_sys_init(void)
+{
+ HANDLE handle;
+
+ noinherit_std_handle(STD_OUTPUT_HANDLE);
+ noinherit_std_handle(STD_INPUT_HANDLE);
+ noinherit_std_handle(STD_ERROR_HANDLE);
+
+
+ erts_smp_mtx_init(&sys_driver_data_lock, "sys_driver_data_lock");
+
+#ifdef ERTS_SMP
+ erts_smp_tsd_key_create(&win32_errstr_key);
+#endif
+ erts_smp_atomic_init(&pipe_creation_counter,0);
+ /*
+ * Test if we have named pipes or not.
+ */
+
+ switch (int_os_version.dwPlatformId) {
+ case VER_PLATFORM_WIN32_WINDOWS:
+ DEBUGF(("Running on Windows 95"));
+ use_named_pipes = FALSE;
+ break;
+ case VER_PLATFORM_WIN32_NT:
+ DEBUGF(("Running on Windows NT"));
+#ifdef DISABLE_NAMED_PIPES
+ use_named_pipes = FALSE;
+#else
+ use_named_pipes = TRUE;
+#endif
+ break;
+ default: /* Unsupported platform. */
+ exit(1);
+ }
+ DEBUGF((" %d.%d, build %d, %s\n",
+ int_os_version.dwMajorVersion, int_os_version.dwMinorVersion,
+ int_os_version.dwBuildNumber, int_os_version.szCSDVersion));
+
+ ASSERT(beam_module != NULL);
+ init_console();
+
+ /*
+ * The following makes sure that the current directory for the current drive
+ * is remembered (in the environment).
+ */
+
+ chdir(".");
+
+ /*
+ * Make sure that the standard error handle is valid.
+ */
+ handle = GetStdHandle(STD_ERROR_HANDLE);
+ if (handle == INVALID_HANDLE_VALUE || handle == 0) {
+ SetStdHandle(STD_ERROR_HANDLE, GetStdHandle(STD_OUTPUT_HANDLE));
+ }
+ erts_sys_init_float();
+ erts_init_check_io();
+
+ /* Suppress windows error message popups */
+ SetErrorMode(SetErrorMode(0) |
+ SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX);
+}
+
+#ifdef ERTS_SMP
+void
+erts_sys_schedule_interrupt(int set)
+{
+ erts_check_io_interrupt(set);
+}
+
+void
+erts_sys_schedule_interrupt_timed(int set, long msec)
+{
+ erts_check_io_interrupt_timed(set, msec);
+}
+#endif
+
+/*
+ * Called from schedule() when it runs out of runnable processes,
+ * or when Erlang code has performed INPUT_REDUCTIONS reduction
+ * steps. runnable == 0 iff there are no runnable Erlang processes.
+ */
+void
+erl_sys_schedule(int runnable)
+{
+#ifdef ERTS_SMP
+ erts_check_io(!runnable);
+ ERTS_SMP_LC_ASSERT(!ERTS_LC_IS_BLOCKING);
+#else
+ erts_check_io_interrupt(0);
+ if (runnable) {
+ erts_check_io(0); /* Poll for I/O */
+ check_async_ready(); /* Check async completions */
+ } else {
+ erts_check_io(check_async_ready() ? 0 : 1);
+ }
+#endif
+}
+
+#if defined(USE_THREADS) && !defined(ERTS_SMP)
+/*
+ * Async operation support.
+ */
+
+static ErlDrvEvent async_drv_event;
+
+void
+sys_async_ready(int fd)
+{
+ SetEvent((HANDLE)async_drv_event);
+}
+
+static int
+async_drv_init(void)
+{
+ async_drv_event = (ErlDrvEvent) NULL;
+ return 0;
+}
+
+static ErlDrvData
+async_drv_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
+{
+ if (async_drv_event != (ErlDrvEvent) NULL) {
+ return ERL_DRV_ERROR_GENERAL;
+ }
+ if ((async_drv_event = (ErlDrvEvent)CreateAutoEvent(FALSE)) == (ErlDrvEvent) NULL) {
+ return ERL_DRV_ERROR_GENERAL;
+ }
+
+ driver_select(port_num, async_drv_event, ERL_DRV_READ|ERL_DRV_USE, 1);
+ if (init_async(async_drv_event) < 0) {
+ return ERL_DRV_ERROR_GENERAL;
+ }
+ return (ErlDrvData)port_num;
+}
+
+static void
+async_drv_stop(ErlDrvData port_num)
+{
+ exit_async();
+ driver_select((ErlDrvPort)port_num, async_drv_event, ERL_DRV_READ|ERL_DRV_USE, 0);
+ /*CloseHandle((HANDLE)async_drv_event);*/
+ async_drv_event = (ErlDrvEvent) NULL;
+}
+
+
+static void
+async_drv_input(ErlDrvData port_num, ErlDrvEvent e)
+{
+ check_async_ready();
+
+ /*
+ * Our event is auto-resetting.
+ */
+}
+
+#endif
+
diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c
new file mode 100644
index 0000000000..ac4be3f316
--- /dev/null
+++ b/erts/emulator/sys/win32/sys_env.c
@@ -0,0 +1,261 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2002-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_sys_driver.h"
+#include "erl_alloc.h"
+
+static char* merge_environment(char *current, char *add);
+static char* arg_to_env(char **arg);
+static char** env_to_arg(char *env);
+static char** find_arg(char **arg, char *str);
+static int compare(const void *a, const void *b);
+
+static erts_smp_rwmtx_t environ_rwmtx;
+
+void
+erts_sys_env_init(void)
+{
+ erts_smp_rwmtx_init(&environ_rwmtx, "environ");
+}
+
+int
+erts_sys_putenv(char *key_value, int sep_ix)
+{
+ int res;
+ char sep = key_value[sep_ix];
+ ASSERT(sep == '=');
+ key_value[sep_ix] = '\0';
+ erts_smp_rwmtx_rwlock(&environ_rwmtx);
+ res = (SetEnvironmentVariable((LPCTSTR) key_value,
+ (LPCTSTR) &key_value[sep_ix+1]) ? 0 : 1);
+ erts_smp_rwmtx_rwunlock(&environ_rwmtx);
+ key_value[sep_ix] = sep;
+ return res;
+}
+
+int
+erts_sys_getenv(char *key, char *value, size_t *size)
+{
+ size_t req_size = 0;
+ int res = 0;
+ DWORD new_size;
+
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ SetLastError(0);
+ new_size = GetEnvironmentVariable((LPCTSTR) key,
+ (LPTSTR) value,
+ (DWORD) *size);
+ res = !new_size && GetLastError() == ERROR_ENVVAR_NOT_FOUND ? -1 : 0;
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ if (res < 0)
+ return res;
+ res = new_size > *size ? 1 : 0;
+ *size = new_size;
+ return res;
+}
+
+struct win32_getenv_state {
+ char *env;
+ char *next;
+};
+
+
+void init_getenv_state(GETENV_STATE *state)
+{
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ state->environment_strings = (char *) GetEnvironmentStrings();
+ state->next_string = state->environment_strings;
+}
+
+char *getenv_string(GETENV_STATE *state)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx));
+ if (state->next_string[0] == '\0')
+ return NULL;
+ else {
+ char *res = state->next_string;
+ state->next_string += sys_strlen(res) + 1;
+ return res;
+ }
+}
+
+void fini_getenv_state(GETENV_STATE *state)
+{
+ FreeEnvironmentStrings(state->environment_strings);
+ state->environment_strings = state->next_string = NULL;
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+}
+
+char*
+win_build_environment(char* new_env)
+{
+ if (new_env == NULL) {
+ return NULL;
+ } else {
+ char *tmp, *merged;
+
+ erts_smp_rwmtx_rlock(&environ_rwmtx);
+ tmp = GetEnvironmentStrings();
+ merged = merge_environment(tmp, new_env);
+
+ FreeEnvironmentStrings(tmp);
+ erts_smp_rwmtx_runlock(&environ_rwmtx);
+ return merged;
+ }
+}
+
+static char*
+merge_environment(char *old, char *add)
+{
+ char **a_arg = env_to_arg(add);
+ char **c_arg = env_to_arg(old);
+ char *ret;
+ int i, j;
+
+ for(i = 0; c_arg[i] != NULL; ++i)
+ ;
+
+ for(j = 0; a_arg[j] != NULL; ++j)
+ ;
+
+ c_arg = erts_realloc(ERTS_ALC_T_TMP,
+ c_arg, (i+j+1) * sizeof(char *));
+
+ for(j = 0; a_arg[j] != NULL; ++j){
+ char **tmp;
+ char *current = a_arg[j];
+
+ if ((tmp = find_arg(c_arg, current)) != NULL) {
+ if (current[strlen(current)-1] != '=') {
+ *tmp = current;
+ } else {
+ *tmp = c_arg[--i];
+ c_arg[i] = NULL;
+ }
+ } else if (current[strlen(current)-1] != '=') {
+ c_arg[i++] = current;
+ c_arg[i] = NULL;
+ }
+ }
+ ret = arg_to_env(c_arg);
+ erts_free(ERTS_ALC_T_TMP, c_arg);
+ erts_free(ERTS_ALC_T_TMP, a_arg);
+ return ret;
+}
+
+static char**
+find_arg(char **arg, char *str)
+{
+ char *tmp;
+ int len;
+
+ if ((tmp = strchr(str, '=')) != NULL) {
+ tmp++;
+ len = tmp - str;
+ while (*arg != NULL){
+ if (_strnicmp(*arg, str, len) == 0){
+ return arg;
+ }
+ ++arg;
+ }
+ }
+ return NULL;
+}
+
+static int
+compare(const void *a, const void *b)
+{
+ char *s1 = *((char **) a);
+ char *s2 = *((char **) b);
+ char *e1 = strchr(s1,'=');
+ char *e2 = strchr(s2,'=');
+ int ret;
+ int len;
+
+ if(!e1)
+ e1 = s1 + strlen(s1);
+ if(!e2)
+ e2 = s2 + strlen(s2);
+
+ if((e1 - s1) > (e2 - s2))
+ len = (e2 - s2);
+ else
+ len = (e1 - s1);
+
+ ret = _strnicmp(s1,s2,len);
+ if (ret == 0)
+ return ((e1 - s1) - (e2 - s2));
+ else
+ return ret;
+}
+
+static char**
+env_to_arg(char *env)
+{
+ char **ret;
+ char *tmp;
+ int i;
+ int num_strings = 0;
+
+ for(tmp = env; *tmp != '\0'; tmp += strlen(tmp)+1) {
+ ++num_strings;
+ }
+ ret = erts_alloc(ERTS_ALC_T_TMP, sizeof(char *) * (num_strings + 1));
+ i = 0;
+ for(tmp = env; *tmp != '\0'; tmp += strlen(tmp)+1){
+ ret[i++] = tmp;
+ }
+ ret[i] = NULL;
+ return ret;
+}
+
+static char*
+arg_to_env(char **arg)
+{
+ char *block;
+ char *ptr;
+ int i;
+ int totlen = 1; /* extra '\0' */
+
+ for(i = 0; arg[i] != NULL; ++i) {
+ totlen += strlen(arg[i])+1;
+ }
+
+ /* sort the environment vector */
+ qsort(arg, i, sizeof(char *), &compare);
+
+ if (totlen == 1){
+ block = erts_alloc(ERTS_ALC_T_ENVIRONMENT, 2);
+ block[0] = block[1] = '\0';
+ } else {
+ block = erts_alloc(ERTS_ALC_T_ENVIRONMENT, totlen);
+ ptr = block;
+ for(i=0; arg[i] != NULL; ++i){
+ strcpy(ptr, arg[i]);
+ ptr += strlen(ptr)+1;
+ }
+ *ptr = '\0';
+ }
+ return block;
+}
diff --git a/erts/emulator/sys/win32/sys_float.c b/erts/emulator/sys/win32/sys_float.c
new file mode 100644
index 0000000000..9e67ca7f48
--- /dev/null
+++ b/erts/emulator/sys/win32/sys_float.c
@@ -0,0 +1,145 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/* Float conversions */
+
+#include "sys.h"
+#include "signal.h"
+
+/* global variable for floating point checks, (see sys.h) */
+/* Note! This is part of the interface Machine <---> sys.c */
+volatile int erl_fp_exception = 0;
+
+static void fpe_exception(int sig);
+
+void
+erts_sys_init_float(void)
+{
+}
+void erts_thread_init_float(void)
+{
+}
+void erts_thread_disable_fpe(void)
+{
+}
+
+/*
+ ** These two functions should maybe use localeconv() to pick up
+ ** the current radix character, but since it is uncertain how
+ ** expensive such a system call is, and since no-one has heard
+ ** of other radix characters than '.' and ',' an ad-hoc
+ ** low execution time solution is used instead.
+ */
+
+int
+sys_chars_to_double(char *buf, double *fp)
+{
+ char *s = buf, *t, *dp;
+
+ /* Robert says that something like this is what he really wanted:
+ * (The [.,] radix test is NOT what Robert wanted - it was added later)
+ *
+ * 7 == sscanf(Tbuf, "%[+-]%[0-9][.,]%[0-9]%[eE]%[+-]%[0-9]%s", ....);
+ * if (*s2 == 0 || *s3 == 0 || *s4 == 0 || *s6 == 0 || *s7)
+ * break;
+ */
+
+ /* Scan string to check syntax. */
+ if (*s == '+' || *s == '-') s++;
+ if (!isdigit(*s)) /* Leading digits. */
+ return -1;
+ while (isdigit(*s)) s++;
+ if (*s != '.' && *s != ',')/* Decimal part. */
+ return -1;
+ dp = s++; /* Remember decimal point pos just in case */
+ if (!isdigit(*s))
+ return -1;
+ while (isdigit(*s)) s++;
+ if (*s == 'e' || *s == 'E') {
+ /* There is an exponent. */
+ s++;
+ if (*s == '+' || *s == '-') s++;
+ if (!isdigit(*s))
+ return -1;
+ while (isdigit(*s)) s++;
+ }
+ if (*s) /* That should be it */
+ return -1;
+
+ errno = 0;
+ *fp = strtod(buf, &t);
+ if (t != s) { /* Whole string not scanned */
+ /* Try again with other radix char */
+ *dp = (*dp == '.') ? ',' : '.';
+ errno = 0;
+ *fp = strtod(buf, &t);
+ if (t != s) { /* Whole string not scanned */
+ return -1;
+ }
+ }
+ if (*fp < -1.0e-307 || 1.0e-307 < *fp) {
+ if (errno == ERANGE) {
+ return -1;
+ }
+ } else {
+ if (errno == ERANGE) {
+ /* Special case: Windows (at least some) regard very small
+ * i.e non-normalized numbers as a range error for strtod().
+ * But not for atof.
+ */
+ *fp = atof(buf);
+ }
+ }
+
+ return 0;
+}
+
+/*
+** Convert a double to ascii format 0.dddde[+|-]ddd
+** return number of characters converted
+*/
+
+int
+sys_double_to_chars(double fp, char *buf)
+{
+ char *s = buf;
+
+ (void) sprintf(buf, "%.20e", fp);
+ /* Search upto decimal point */
+ if (*s == '+' || *s == '-') s++;
+ while (isdigit(*s)) s++;
+ if (*s == ',') *s++ = '.'; /* Replace ',' with '.' */
+ /* Scan to end of string */
+ while (*s) s++;
+ return s-buf; /* i.e strlen(buf) */
+}
+
+int
+matherr(struct _exception *exc)
+{
+ erl_fp_exception = 1;
+ DEBUGF(("FP exception (matherr) (0x%x) (%d)\n", exc->type, erl_fp_exception));
+ return 1;
+}
+
+static void
+fpe_exception(int sig)
+{
+ erl_fp_exception = 1;
+ DEBUGF(("FP exception\n"));
+}
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
new file mode 100644
index 0000000000..d2449a1bdb
--- /dev/null
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -0,0 +1,142 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * Purpose: Interrupt handling in windows.
+ */
+#include "sys.h"
+#include "erl_alloc.h"
+#include "erl_driver.h"
+#include "../../drivers/win32/win_con.h"
+
+#if defined(__GNUC__)
+# define WIN_SYS_INLINE __inline__
+#elif defined(__WIN32__)
+# define WIN_SYS_INLINE __forceinline
+#endif
+
+#ifdef ERTS_SMP
+erts_smp_atomic_t erts_break_requested;
+#define ERTS_SET_BREAK_REQUESTED \
+ erts_smp_atomic_set(&erts_break_requested, (long) 1)
+#define ERTS_UNSET_BREAK_REQUESTED \
+ erts_smp_atomic_set(&erts_break_requested, (long) 0)
+#else
+volatile int erts_break_requested = 0;
+#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
+#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0)
+#endif
+
+extern int nohup;
+HANDLE erts_sys_break_event = NULL;
+
+void erts_do_break_handling(void)
+{
+ /*
+ * Most functions that do_break() calls are intentionally not thread safe;
+ * therefore, make sure that all threads but this one are blocked before
+ * proceeding!
+ */
+ erts_smp_block_system(0);
+ /* call the break handling function, reset the flag */
+ do_break();
+
+ ResetEvent(erts_sys_break_event);
+ ERTS_UNSET_BREAK_REQUESTED;
+
+ erts_smp_release_system();
+}
+
+
+BOOL WINAPI ctrl_handler_ignore_break(DWORD dwCtrlType)
+{
+ switch (dwCtrlType) {
+ case CTRL_C_EVENT:
+ case CTRL_BREAK_EVENT:
+ return TRUE;
+ break;
+ case CTRL_LOGOFF_EVENT:
+ if (nohup)
+ return TRUE;
+ /* else pour through... */
+ case CTRL_CLOSE_EVENT:
+ case CTRL_SHUTDOWN_EVENT:
+ erl_exit(0, "");
+ break;
+ }
+ return TRUE;
+}
+
+void erts_set_ignore_break(void) {
+ ConSetCtrlHandler(ctrl_handler_ignore_break);
+ SetConsoleCtrlHandler(ctrl_handler_ignore_break, TRUE);
+}
+
+BOOL WINAPI ctrl_handler_replace_intr(DWORD dwCtrlType)
+{
+ switch (dwCtrlType) {
+ case CTRL_C_EVENT:
+ return FALSE;
+ case CTRL_BREAK_EVENT:
+ SetEvent(erts_sys_break_event);
+ break;
+ case CTRL_LOGOFF_EVENT:
+ if (nohup)
+ return TRUE;
+ /* else pour through... */
+ case CTRL_CLOSE_EVENT:
+ case CTRL_SHUTDOWN_EVENT:
+ erl_exit(0, "");
+ break;
+ }
+ return TRUE;
+}
+
+
+/* Don't use ctrl-c for break handler but let it be
+ used by the shell instead (see user_drv.erl) */
+void erts_replace_intr(void) {
+ ConSetCtrlHandler(ctrl_handler_replace_intr);
+ SetConsoleCtrlHandler(ctrl_handler_replace_intr, TRUE);
+}
+
+BOOL WINAPI ctrl_handler(DWORD dwCtrlType)
+{
+ switch (dwCtrlType) {
+ case CTRL_C_EVENT:
+ case CTRL_BREAK_EVENT:
+ SetEvent(erts_sys_break_event);
+ break;
+ case CTRL_LOGOFF_EVENT:
+ if (nohup)
+ return TRUE;
+ /* else pour through... */
+ case CTRL_CLOSE_EVENT:
+ case CTRL_SHUTDOWN_EVENT:
+ erl_exit(0, "");
+ break;
+ }
+ return TRUE;
+}
+
+void init_break_handler()
+{
+ ConSetCtrlHandler(ctrl_handler);
+ SetConsoleCtrlHandler(ctrl_handler, TRUE);
+}
+
diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c
new file mode 100644
index 0000000000..50e43065b5
--- /dev/null
+++ b/erts/emulator/sys/win32/sys_time.c
@@ -0,0 +1,96 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 1997-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+/*
+ * Purpose: System-dependent time functions.
+ */
+
+#include "sys.h"
+#include "assert.h"
+
+#ifdef __GNUC__
+#define LL_LITERAL(X) X##LL
+#else
+#define LL_LITERAL(X) X##i64
+#endif
+
+/******************* Routines for time measurement *********************/
+
+#define EPOCH_JULIAN_DIFF LL_LITERAL(11644473600)
+
+static SysHrTime wrap = 0;
+static DWORD last_tick_count = 0;
+
+int
+sys_init_time(void)
+{
+ return 1;
+}
+
+void
+sys_gettimeofday(SysTimeval *tv)
+{
+ SYSTEMTIME t;
+ FILETIME ft;
+ LONGLONG lft;
+
+ GetSystemTime(&t);
+ SystemTimeToFileTime(&t, &ft);
+ memcpy(&lft, &ft, sizeof(lft));
+ tv->tv_usec = (long) ((lft / LL_LITERAL(10)) % LL_LITERAL(1000000));
+ tv->tv_sec = (long) ((lft / LL_LITERAL(10000000)) - EPOCH_JULIAN_DIFF);
+}
+
+SysHrTime
+sys_gethrtime(void)
+{
+ DWORD ticks = (SysHrTime) (GetTickCount() & 0x7FFFFFFF);
+ if (ticks < (SysHrTime) last_tick_count) {
+ wrap += LL_LITERAL(1) << 31;
+ }
+ last_tick_count = ticks;
+ return ((((LONGLONG) ticks) + wrap) * LL_LITERAL(1000000));
+}
+
+clock_t
+sys_times(SysTimes *buffer) {
+ clock_t kernel_ticks = (GetTickCount() /
+ (1000 / SYS_CLK_TCK)) & 0x7FFFFFFF;
+ FILETIME dummy;
+ LONGLONG user;
+ LONGLONG system;
+
+ buffer->tms_utime = buffer->tms_stime = buffer->tms_cutime =
+ buffer->tms_cstime = 0;
+
+ if (GetProcessTimes(GetCurrentProcess(), &dummy, &dummy,
+ (FILETIME *) &system, (FILETIME *) &user) == 0)
+ return kernel_ticks;
+ system /= (LONGLONG)(10000000 / SYS_CLK_TCK);
+ user /= (LONGLONG)(10000000 / SYS_CLK_TCK);
+
+ buffer->tms_utime = (clock_t) (user & LL_LITERAL(0x7FFFFFFF));
+ buffer->tms_stime = (clock_t) (system & LL_LITERAL(0x7FFFFFFF));
+ return kernel_ticks;
+}
+
+
+
+
+
+