diff options
author | Sverker Eriksson <[email protected]> | 2017-08-30 20:55:08 +0200 |
---|---|---|
committer | Sverker Eriksson <[email protected]> | 2017-08-30 20:55:08 +0200 |
commit | 7c67bbddb53c364086f66260701bc54a61c9659c (patch) | |
tree | 92ab0d4b91d5e2f6e7a3f9d61ea25089e8a71fe0 /erts/emulator/sys/common | |
parent | 97dc5e7f396129222419811c173edc7fa767b0f8 (diff) | |
parent | 3b7a6ffddc819bf305353a593904cea9e932e7dc (diff) | |
download | otp-7c67bbddb53c364086f66260701bc54a61c9659c.tar.gz otp-7c67bbddb53c364086f66260701bc54a61c9659c.tar.bz2 otp-7c67bbddb53c364086f66260701bc54a61c9659c.zip |
Merge tag 'OTP-19.0' into sverker/19/binary_to_atom-utf8-crash/ERL-474/OTP-14590
Diffstat (limited to 'erts/emulator/sys/common')
-rw-r--r-- | erts/emulator/sys/common/erl_check_io.c | 784 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_check_io.h | 74 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mmap.c | 2918 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mmap.h | 181 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mseg.c | 1283 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mseg.h | 82 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mtrace_sys_wrap.c | 23 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_os_monotonic_time_extender.c | 89 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_os_monotonic_time_extender.h | 66 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_poll.c | 503 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_poll.h | 46 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_sys_common_misc.c | 33 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_util_queue.h | 21 |
13 files changed, 4780 insertions, 1323 deletions
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 7035dc77df..44a77f3ea5 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2013. All Rights Reserved. + * Copyright Ericsson AB 2006-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ @@ -34,12 +35,15 @@ #endif #include "sys.h" #include "global.h" +#include "erl_port.h" #include "erl_check_io.h" #include "erl_thr_progress.h" #include "dtrace-wrapper.h" +#include "lttng-wrapper.h" +#define ERTS_WANT_TIMER_WHEEL_API +#include "erl_time.h" -#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS -#else +#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS # include "safe_hash.h" # define DRV_EV_STATE_HTAB_SIZE 1024 #endif @@ -51,8 +55,17 @@ typedef char EventStateType; #define ERTS_EV_TYPE_STOP_USE ((EventStateType) 3) /* pending stop_select */ typedef char EventStateFlags; -#define ERTS_EV_FLAG_USED ((EventStateFlags) 1) /* ERL_DRV_USE has been turned on */ +#define ERTS_EV_FLAG_USED ((EventStateFlags) 1) /* ERL_DRV_USE has been turned on */ +#define ERTS_EV_FLAG_DEFER_IN_EV ((EventStateFlags) 2) +#define ERTS_EV_FLAG_DEFER_OUT_EV ((EventStateFlags) 4) + +#ifdef DEBUG +# define ERTS_ACTIVE_FD_INC 2 +#else +# define ERTS_ACTIVE_FD_INC 128 +#endif +#define ERTS_CHECK_IO_POLL_RES_LEN 512 #if defined(ERTS_KERNEL_POLL_VERSION) # define ERTS_CIO_EXPORT(FUNC) FUNC ## _kp @@ -66,6 +79,7 @@ typedef char EventStateFlags; (ERTS_POLL_USE_POLL && !ERTS_POLL_USE_KERNEL_POLL) #define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control) +#define ERTS_CIO_POLL_CTLV ERTS_POLL_EXPORT(erts_poll_controlv) #define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait) #ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT #define ERTS_CIO_POLL_AS_INTR ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt) @@ -78,10 +92,19 @@ typedef char EventStateFlags; #define ERTS_CIO_POLL_INIT ERTS_POLL_EXPORT(erts_poll_init) #define ERTS_CIO_POLL_INFO ERTS_POLL_EXPORT(erts_poll_info) +#define GET_FD(fd) fd + static struct pollset_info { ErtsPollSet ps; erts_smp_atomic_t in_poll_wait; /* set while doing poll */ + struct { + int six; /* start index */ + int eix; /* end index */ + erts_smp_atomic32_t no; + int size; + ErtsSysFdType *array; + } active_fd; #ifdef ERTS_SMP struct removed_fd* removed_list; /* list of deselected fd's*/ erts_smp_spinlock_t removed_list_lock; @@ -94,9 +117,11 @@ typedef struct { SafeHashBucket hb; #endif ErtsSysFdType fd; - union { - ErtsDrvEventDataState *event; /* ERTS_EV_TYPE_DRV_EV */ + struct { ErtsDrvSelectDataState *select; /* ERTS_EV_TYPE_DRV_SEL */ +#if ERTS_CIO_HAVE_DRV_EVENT + ErtsDrvEventDataState *event; /* ERTS_EV_TYPE_DRV_EV */ +#endif erts_driver_t* drv_ptr; /* ERTS_EV_TYPE_STOP_USE */ } driver; ErtsPollEvents events; @@ -166,6 +191,10 @@ static ERTS_INLINE ErtsDrvEventState* hash_new_drv_ev_state(ErtsSysFdType fd) ErtsDrvEventState tmpl; tmpl.fd = fd; tmpl.driver.select = NULL; +#if ERTS_CIO_HAVE_DRV_EVENT + tmpl.driver.event = NULL; +#endif + tmpl.driver.drv_ptr = NULL; tmpl.events = 0; tmpl.remove_cnt = 0; tmpl.type = ERTS_EV_TYPE_NONE; @@ -206,6 +235,69 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_F #endif static ERTS_INLINE void +init_iotask(ErtsIoTask *io_task) +{ + erts_port_task_handle_init(&io_task->task); + erts_smp_atomic_init_nob(&io_task->executed_time, ~((erts_aint_t) 0)); +} + +static ERTS_INLINE int +is_iotask_active(ErtsIoTask *io_task, erts_aint_t current_cio_time) +{ + if (erts_port_task_is_scheduled(&io_task->task)) + return 1; + if (erts_smp_atomic_read_nob(&io_task->executed_time) == current_cio_time) + return 1; + return 0; +} + +static ERTS_INLINE ErtsDrvSelectDataState * +alloc_drv_select_data(void) +{ + ErtsDrvSelectDataState *dsp = erts_alloc(ERTS_ALC_T_DRV_SEL_D_STATE, + sizeof(ErtsDrvSelectDataState)); + dsp->inport = NIL; + dsp->outport = NIL; + init_iotask(&dsp->iniotask); + init_iotask(&dsp->outiotask); + return dsp; +} + +static ERTS_INLINE void +free_drv_select_data(ErtsDrvSelectDataState *dsp) +{ + ASSERT(!erts_port_task_is_scheduled(&dsp->iniotask.task)); + ASSERT(!erts_port_task_is_scheduled(&dsp->outiotask.task)); + erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, dsp); +} + +#if ERTS_CIO_HAVE_DRV_EVENT + +static ERTS_INLINE ErtsDrvEventDataState * +alloc_drv_event_data(void) +{ + ErtsDrvEventDataState *dep = erts_alloc(ERTS_ALC_T_DRV_EV_D_STATE, + sizeof(ErtsDrvEventDataState)); + dep->port = NIL; + dep->data = NULL; + dep->removed_events = 0; +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + dep->deferred_events = 0; +#endif + init_iotask(&dep->iotask); + return dep; +} + +static ERTS_INLINE void +free_drv_event_data(ErtsDrvEventDataState *dep) +{ + ASSERT(!erts_port_task_is_scheduled(&dep->iotask.task)); + erts_free(ERTS_ALC_T_DRV_EV_D_STATE, dep); +} + +#endif /* ERTS_CIO_HAVE_DRV_EVENT */ + +static ERTS_INLINE void remember_removed(ErtsDrvEventState *state, struct pollset_info* psi) { #ifdef ERTS_SMP @@ -285,7 +377,7 @@ forget_removed(struct pollset_info* psi) drv_ptr = state->driver.drv_ptr; ASSERT(drv_ptr); state->type = ERTS_EV_TYPE_NONE; - state->flags = 0; + state->flags &= ~ERTS_EV_FLAG_USED; state->driver.drv_ptr = NULL; /* Fall through */ case ERTS_EV_TYPE_NONE: @@ -304,6 +396,7 @@ forget_removed(struct pollset_info* psi) if (drv_ptr) { int was_unmasked = erts_block_fpe(); DTRACE1(driver_stop_select, drv_ptr->name); + LTTNG1(driver_stop_select, drv_ptr->name); (*drv_ptr->stop_select) ((ErlDrvEvent) fd, NULL); erts_unblock_fpe(was_unmasked); if (drv_ptr->handle) { @@ -322,14 +415,16 @@ static void grow_drv_ev_state(int min_ix) { int i; + int old_len; int new_len; - new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_ix + 1); - if (new_len > max_fds) - new_len = max_fds; - erts_smp_mtx_lock(&drv_ev_state_grow_lock); - if (erts_smp_atomic_read_nob(&drv_ev_state_len) <= min_ix) { + old_len = erts_smp_atomic_read_nob(&drv_ev_state_len); + if (min_ix >= old_len) { + new_len = erts_poll_new_table_len(old_len, min_ix + 1); + if (new_len > max_fds) + new_len = max_fds; + for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { /* lock all fd's */ erts_smp_mtx_lock(&drv_ev_state_locks[i].lck); } @@ -339,9 +434,13 @@ grow_drv_ev_state(int min_ix) sizeof(ErtsDrvEventState)*new_len) : erts_alloc(ERTS_ALC_T_DRV_EV_STATE, sizeof(ErtsDrvEventState)*new_len)); - for (i = erts_smp_atomic_read_nob(&drv_ev_state_len); i < new_len; i++) { + for (i = old_len; i < new_len; i++) { drv_ev_state[i].fd = (ErtsSysFdType) i; drv_ev_state[i].driver.select = NULL; +#if ERTS_CIO_HAVE_DRV_EVENT + drv_ev_state[i].driver.event = NULL; +#endif + drv_ev_state[i].driver.drv_ptr = NULL; drv_ev_state[i].events = 0; drv_ev_state[i].remove_cnt = 0; drv_ev_state[i].type = ERTS_EV_TYPE_NONE; @@ -362,11 +461,7 @@ grow_drv_ev_state(int min_ix) static ERTS_INLINE void abort_task(Eterm id, ErtsPortTaskHandle *pthp, EventStateType type) { - if (is_nil(id)) { - ASSERT(type == ERTS_EV_TYPE_NONE - || !erts_port_task_is_scheduled(pthp)); - } - else if (erts_port_task_is_scheduled(pthp)) { + if (is_not_nil(id) && erts_port_task_is_scheduled(pthp)) { erts_port_task_abort(pthp); ASSERT(erts_is_port_alive(id)); } @@ -381,7 +476,7 @@ abort_tasks(ErtsDrvEventState *state, int mode) #if ERTS_CIO_HAVE_DRV_EVENT case ERTS_EV_TYPE_DRV_EV: abort_task(state->driver.event->port, - &state->driver.event->task, + &state->driver.event->iotask.task, ERTS_EV_TYPE_DRV_EV); return; #endif @@ -395,14 +490,14 @@ abort_tasks(ErtsDrvEventState *state, int mode) case ERL_DRV_WRITE: ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL); abort_task(state->driver.select->outport, - &state->driver.select->outtask, + &state->driver.select->outiotask.task, state->type); if (mode == ERL_DRV_WRITE) break; case ERL_DRV_READ: ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL); abort_task(state->driver.select->inport, - &state->driver.select->intask, + &state->driver.select->iniotask.task, state->type); break; default: @@ -440,16 +535,14 @@ deselect(ErtsDrvEventState *state, int mode) if (!(state->events)) { switch (state->type) { case ERTS_EV_TYPE_DRV_SEL: - ASSERT(!erts_port_task_is_scheduled(&state->driver.select->intask)); - ASSERT(!erts_port_task_is_scheduled(&state->driver.select->outtask)); - erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, - state->driver.select); + state->driver.select->inport = NIL; + state->driver.select->outport = NIL; break; #if ERTS_CIO_HAVE_DRV_EVENT case ERTS_EV_TYPE_DRV_EV: - ASSERT(!erts_port_task_is_scheduled(&state->driver.event->task)); - erts_free(ERTS_ALC_T_DRV_EV_D_STATE, - state->driver.event); + state->driver.event->port = NIL; + state->driver.event->data = NULL; + state->driver.event->removed_events = (ErtsPollEvents) 0; break; #endif case ERTS_EV_TYPE_NONE: @@ -459,20 +552,297 @@ deselect(ErtsDrvEventState *state, int mode) break; } - state->driver.select = NULL; state->type = ERTS_EV_TYPE_NONE; - state->flags = 0; + state->flags &= ~ERTS_EV_FLAG_USED; remember_removed(state, &pollset); } } - #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS # define IS_FD_UNKNOWN(state) ((state)->type == ERTS_EV_TYPE_NONE && (state)->remove_cnt == 0) #else # define IS_FD_UNKNOWN(state) ((state) == NULL) #endif +static ERTS_INLINE void +check_fd_cleanup(ErtsDrvEventState *state, +#if ERTS_CIO_HAVE_DRV_EVENT + ErtsDrvEventDataState **free_event, +#endif + ErtsDrvSelectDataState **free_select) +{ + erts_aint_t current_cio_time; + + ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd))); + + current_cio_time = erts_smp_atomic_read_acqb(&erts_check_io_time); + *free_select = NULL; + if (state->driver.select + && (state->type != ERTS_EV_TYPE_DRV_SEL) + && !is_iotask_active(&state->driver.select->iniotask, current_cio_time) + && !is_iotask_active(&state->driver.select->outiotask, current_cio_time)) { + + *free_select = state->driver.select; + state->driver.select = NULL; + } + +#if ERTS_CIO_HAVE_DRV_EVENT + *free_event = NULL; + if (state->driver.event + && (state->type != ERTS_EV_TYPE_DRV_EV) + && !is_iotask_active(&state->driver.event->iotask, current_cio_time)) { + + *free_event = state->driver.event; + state->driver.event = NULL; + } +#endif + +#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS + if (((state->type != ERTS_EV_TYPE_NONE) + | state->remove_cnt +#if ERTS_CIO_HAVE_DRV_EVENT + | (state->driver.event != NULL) +#endif + | (state->driver.select != NULL)) == 0) { + + hash_erase_drv_ev_state(state); + + } +#endif +} + +static ERTS_INLINE int +check_cleanup_active_fd(ErtsSysFdType fd, +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + ErtsPollControlEntry *pce, + int *pce_ix, +#endif + erts_aint_t current_cio_time) +{ + ErtsDrvEventState *state; + int active = 0; + erts_smp_mtx_t *mtx = fd_mtx(fd); + void *free_select = NULL; +#if ERTS_CIO_HAVE_DRV_EVENT + void *free_event = NULL; +#endif +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + ErtsPollEvents evon = 0, evoff = 0; +#endif + + erts_smp_mtx_lock(mtx); + +#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS + state = &drv_ev_state[(int) fd]; +#else + state = hash_get_drv_ev_state(fd); /* may be NULL! */ + if (state) +#endif + { + if (state->driver.select) { +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + if (is_iotask_active(&state->driver.select->iniotask, current_cio_time)) { + active = 1; + if ((state->events & ERTS_POLL_EV_IN) + && !(state->flags & ERTS_EV_FLAG_DEFER_IN_EV)) { + evoff |= ERTS_POLL_EV_IN; + state->flags |= ERTS_EV_FLAG_DEFER_IN_EV; + } + } + else if (state->flags & ERTS_EV_FLAG_DEFER_IN_EV) { + if (state->events & ERTS_POLL_EV_IN) + evon |= ERTS_POLL_EV_IN; + state->flags &= ~ERTS_EV_FLAG_DEFER_IN_EV; + } + if (is_iotask_active(&state->driver.select->outiotask, current_cio_time)) { + active = 1; + if ((state->events & ERTS_POLL_EV_OUT) + && !(state->flags & ERTS_EV_FLAG_DEFER_OUT_EV)) { + evoff |= ERTS_POLL_EV_OUT; + state->flags |= ERTS_EV_FLAG_DEFER_OUT_EV; + } + } + else if (state->flags & ERTS_EV_FLAG_DEFER_OUT_EV) { + if (state->events & ERTS_POLL_EV_OUT) + evon |= ERTS_POLL_EV_OUT; + state->flags &= ~ERTS_EV_FLAG_DEFER_OUT_EV; + } + if (active) + (void) 0; + else +#else + if (is_iotask_active(&state->driver.select->iniotask, current_cio_time) + || is_iotask_active(&state->driver.select->outiotask, current_cio_time)) + active = 1; + else +#endif + if (state->type != ERTS_EV_TYPE_DRV_SEL) { + free_select = state->driver.select; + state->driver.select = NULL; + } + } + +#if ERTS_CIO_HAVE_DRV_EVENT + if (state->driver.event) { + if (is_iotask_active(&state->driver.event->iotask, current_cio_time)) { +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + ErtsPollEvents evs = state->events & ~state->driver.event->deferred_events; + if (evs) { + evoff |= evs; + state->driver.event->deferred_events |= evs; + } +#endif + active = 1; + } + else if (state->type != ERTS_EV_TYPE_DRV_EV) { + free_event = state->driver.event; + state->driver.event = NULL; + } +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + else { + ErtsPollEvents evs = state->events & state->driver.event->deferred_events; + if (evs) { + evon |= evs; + state->driver.event->deferred_events = 0; + } + } +#endif + + } +#endif + +#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS + if (((state->type != ERTS_EV_TYPE_NONE) | state->remove_cnt | active) == 0) + hash_erase_drv_ev_state(state); +#endif + + } + + erts_smp_mtx_unlock(mtx); + + if (free_select) + free_drv_select_data(free_select); +#if ERTS_CIO_HAVE_DRV_EVENT + if (free_event) + free_drv_event_data(free_event); +#endif + +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + if (evoff) { + ErtsPollControlEntry *pcep = &pce[(*pce_ix)++]; + pcep->fd = fd; + pcep->events = evoff; + pcep->on = 0; + } + if (evon) { + ErtsPollControlEntry *pcep = &pce[(*pce_ix)++]; + pcep->fd = fd; + pcep->events = evon; + pcep->on = 1; + } +#endif + + return active; +} + +static void +check_cleanup_active_fds(erts_aint_t current_cio_time) +{ + int six = pollset.active_fd.six; + int eix = pollset.active_fd.eix; + erts_aint32_t no = erts_smp_atomic32_read_dirty(&pollset.active_fd.no); + int size = pollset.active_fd.size; + int ix = six; +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + /* every fd might add two entries */ + Uint pce_sz = 2*sizeof(ErtsPollControlEntry)*no; + ErtsPollControlEntry *pctrl_entries = (pce_sz + ? erts_alloc(ERTS_ALC_T_TMP, pce_sz) + : NULL); + int pctrl_ix = 0; +#endif + + while (ix != eix) { + ErtsSysFdType fd = pollset.active_fd.array[ix]; + int nix = ix + 1; + if (nix >= size) + nix = 0; + ASSERT(fd != ERTS_SYS_FD_INVALID); + if (!check_cleanup_active_fd(fd, +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + pctrl_entries, + &pctrl_ix, +#endif + current_cio_time)) { + no--; + if (ix == six) { +#ifdef DEBUG + pollset.active_fd.array[ix] = ERTS_SYS_FD_INVALID; +#endif + six = nix; + } + else { + pollset.active_fd.array[ix] = pollset.active_fd.array[six]; +#ifdef DEBUG + pollset.active_fd.array[six] = ERTS_SYS_FD_INVALID; +#endif + six++; + if (six >= size) + six = 0; + } + } + ix = nix; + } + +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + ASSERT(pctrl_ix <= pce_sz/sizeof(ErtsPollControlEntry)); + if (pctrl_ix) + ERTS_CIO_POLL_CTLV(pollset.ps, pctrl_entries, pctrl_ix); + if (pctrl_entries) + erts_free(ERTS_ALC_T_TMP, pctrl_entries); +#endif + + pollset.active_fd.six = six; + pollset.active_fd.eix = eix; + erts_smp_atomic32_set_relb(&pollset.active_fd.no, no); +} + +static ERTS_INLINE void +add_active_fd(ErtsSysFdType fd) +{ + int eix = pollset.active_fd.eix; + int size = pollset.active_fd.size; + + + pollset.active_fd.array[eix] = fd; + + erts_smp_atomic32_set_relb(&pollset.active_fd.no, + (erts_smp_atomic32_read_dirty(&pollset.active_fd.no) + + 1)); + + eix++; + if (eix >= size) + eix = 0; + if (pollset.active_fd.six == eix) { + pollset.active_fd.six = 0; + eix = size; + size += ERTS_ACTIVE_FD_INC; + pollset.active_fd.array = erts_realloc(ERTS_ALC_T_ACTIVE_FD_ARR, + pollset.active_fd.array, + sizeof(ErtsSysFdType)*size); + pollset.active_fd.size = size; +#ifdef DEBUG + { + int i; + for (i = eix + 1; i < size; i++) + pollset.active_fd.array[i] = ERTS_SYS_FD_INVALID; + } +#endif + + } + + pollset.active_fd.eix = eix; +} int ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, @@ -489,6 +859,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, ErtsDrvEventState *state; int wake_poller; int ret; +#if ERTS_CIO_HAVE_DRV_EVENT + ErtsDrvEventDataState *free_event = NULL; +#endif + ErtsDrvSelectDataState *free_select = NULL; #ifdef USE_VM_PROBES DTRACE_CHARBUF(name, 64); #endif @@ -524,7 +898,8 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, /* fast track to stop_select callback */ stop_select_fn = prt->drv_ptr->stop_select; #ifdef USE_VM_PROBES - strncpy(name, prt->drv_ptr->name, sizeof(name)-1); + strncpy(name, prt->drv_ptr->name, + sizeof(DTRACE_CHARBUF_NAME(name))-1); name[sizeof(name)-1] = '\0'; #endif ret = 0; @@ -589,9 +964,9 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, if (new_events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) { if (state->type == ERTS_EV_TYPE_DRV_SEL && !state->events) { state->type = ERTS_EV_TYPE_NONE; - state->flags = 0; - erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, state->driver.select); - state->driver.select = NULL; + state->flags &= ~ERTS_EV_FLAG_USED; + state->driver.select->inport = NIL; + state->driver.select->outport = NIL; } ret = -1; goto done; @@ -609,18 +984,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, state->events = new_events; if (ctl_events) { if (on) { - if (state->type == ERTS_EV_TYPE_NONE) { - ErtsDrvSelectDataState *dsdsp - = erts_alloc(ERTS_ALC_T_DRV_SEL_D_STATE, - sizeof(ErtsDrvSelectDataState)); - dsdsp->inport = NIL; - dsdsp->outport = NIL; - erts_port_task_handle_init(&dsdsp->intask); - erts_port_task_handle_init(&dsdsp->outtask); - ASSERT(state->driver.select == NULL); - state->driver.select = dsdsp; + if (!state->driver.select) + state->driver.select = alloc_drv_select_data(); + if (state->type == ERTS_EV_TYPE_NONE) state->type = ERTS_EV_TYPE_DRV_SEL; - } ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL); if (ctl_events & ERTS_POLL_EV_IN) state->driver.select->inport = id; @@ -641,17 +1008,12 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, state->driver.select->outport = NIL; } if (new_events == 0) { - ASSERT(!erts_port_task_is_scheduled(&state->driver.select->intask)); - ASSERT(!erts_port_task_is_scheduled(&state->driver.select->outtask)); if (old_events != 0) { remember_removed(state, &pollset); } if ((mode & ERL_DRV_USE) || !(state->flags & ERTS_EV_FLAG_USED)) { state->type = ERTS_EV_TYPE_NONE; - state->flags = 0; - erts_free(ERTS_ALC_T_DRV_SEL_D_STATE, - state->driver.select); - state->driver.select = NULL; + state->flags &= ~ERTS_EV_FLAG_USED; } /*else keep it, as fd will probably be selected upon again */ } @@ -682,20 +1044,29 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, ret = 0; -done:; -#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS - if (state->type == ERTS_EV_TYPE_NONE && state->remove_cnt == 0) { - hash_erase_drv_ev_state(state); - } +done: + + check_fd_cleanup(state, +#if ERTS_CIO_HAVE_DRV_EVENT + &free_event, #endif -done_unknown: + &free_select); + +done_unknown: erts_smp_mtx_unlock(fd_mtx(fd)); if (stop_select_fn) { int was_unmasked = erts_block_fpe(); DTRACE1(driver_stop_select, name); + LTTNG1(driver_stop_select, "unknown"); (*stop_select_fn)(e, NULL); erts_unblock_fpe(was_unmasked); } + if (free_select) + free_drv_select_data(free_select); +#if ERTS_CIO_HAVE_DRV_EVENT + if (free_event) + free_drv_event_data(free_event); +#endif return ret; } @@ -715,6 +1086,10 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, ErtsDrvEventState *state; int do_wake = 0; int ret; +#if ERTS_CIO_HAVE_DRV_EVENT + ErtsDrvEventDataState *free_event; +#endif + ErtsDrvSelectDataState *free_select; Port *prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) @@ -795,10 +1170,8 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, state->driver.event->removed_events |= remove_events; } else { - state->driver.event - = erts_alloc(ERTS_ALC_T_DRV_EV_D_STATE, - sizeof(ErtsDrvEventDataState)); - erts_port_task_handle_init(&state->driver.event->task); + if (!state->driver.event) + state->driver.event = alloc_drv_event_data(); state->driver.event->port = id; state->driver.event->removed_events = (ErtsPollEvents) 0; state->type = ERTS_EV_TYPE_DRV_EV; @@ -808,10 +1181,10 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, else { if (state->type == ERTS_EV_TYPE_DRV_EV) { abort_tasks(state, 0); - erts_free(ERTS_ALC_T_DRV_EV_D_STATE, - state->driver.event); + state->driver.event->port = NIL; + state->driver.event->data = NULL; + state->driver.event->removed_events = (ErtsPollEvents) 0; } - state->driver.select = NULL; state->type = ERTS_EV_TYPE_NONE; remember_removed(state, &pollset); } @@ -821,12 +1194,22 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, ret = 0; done: -#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS - if (state->type == ERTS_EV_TYPE_NONE && state->remove_cnt == 0) { - hash_erase_drv_ev_state(state); - } + + check_fd_cleanup(state, +#if ERTS_CIO_HAVE_DRV_EVENT + &free_event, #endif + &free_select); + erts_smp_mtx_unlock(fd_mtx(fd)); + + if (free_select) + free_drv_select_data(free_select); +#if ERTS_CIO_HAVE_DRV_EVENT + if (free_event) + free_drv_event_data(free_event); +#endif + return ret; #endif } @@ -894,7 +1277,7 @@ print_driver_name(erts_dsprintf_buf_t *dsbufp, Eterm id) static void steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode) { - erts_dsprintf(dsbufp, "stealing control of fd=%d from ", (int) state->fd); + erts_dsprintf(dsbufp, "stealing control of fd=%d from ", (int) GET_FD(state->fd)); switch (state->type) { case ERTS_EV_TYPE_DRV_SEL: { int deselect_mode = 0; @@ -918,7 +1301,7 @@ steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode) if (deselect_mode) deselect(state, deselect_mode); else { - erts_dsprintf(dsbufp, "no one", (int) state->fd); + erts_dsprintf(dsbufp, "no one", (int) GET_FD(state->fd)); ASSERT(0); } erts_dsprintf(dsbufp, "\n"); @@ -946,7 +1329,7 @@ steal(erts_dsprintf_buf_t *dsbufp, ErtsDrvEventState *state, int mode) break; } default: - erts_dsprintf(dsbufp, "no one\n", (int) state->fd); + erts_dsprintf(dsbufp, "no one\n", (int) GET_FD(state->fd)); ASSERT(0); } } @@ -960,7 +1343,7 @@ print_select_op(erts_dsprintf_buf_t *dsbufp, "driver_select(%p, %d,%s%s%s%s, %d) " "by ", ix, - (int) fd, + (int) GET_FD(fd), mode & ERL_DRV_READ ? " ERL_DRV_READ" : "", mode & ERL_DRV_WRITE ? " ERL_DRV_WRITE" : "", mode & ERL_DRV_USE ? " ERL_DRV_USE" : "", @@ -1010,7 +1393,7 @@ steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix, ASSERT(state->type == ERTS_EV_TYPE_STOP_USE); erts_dsprintf(dsbufp, "failed: fd=%d (re)selected before stop_select " "was called for driver %s\n", - (int) state->fd, state->driver.drv_ptr->name); + (int) GET_FD(state->fd), state->driver.drv_ptr->name); erts_send_error_to_logger_nogl(dsbufp); if (on) { @@ -1019,7 +1402,7 @@ steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix, * In either case stop_select should not be called. */ state->type = ERTS_EV_TYPE_NONE; - state->flags = 0; + state->flags &= ~ERTS_EV_FLAG_USED; if (state->driver.drv_ptr->handle) { erts_ddll_dereference_driver(state->driver.drv_ptr->handle); } @@ -1091,38 +1474,103 @@ event_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data #endif #endif +static ERTS_INLINE int +io_task_schedule_allowed(ErtsDrvEventState *state, + ErtsPortTaskType type, + erts_aint_t current_cio_time) +{ + ErtsIoTask *io_task; + + switch (type) { + case ERTS_PORT_TASK_INPUT: + if (!state->driver.select) + return 0; +#if ERTS_CIO_HAVE_DRV_EVENT + if (state->driver.event) + return 0; +#endif + io_task = &state->driver.select->iniotask; + break; + case ERTS_PORT_TASK_OUTPUT: + if (!state->driver.select) + return 0; +#if ERTS_CIO_HAVE_DRV_EVENT + if (state->driver.event) + return 0; +#endif + io_task = &state->driver.select->outiotask; + break; +#if ERTS_CIO_HAVE_DRV_EVENT + case ERTS_PORT_TASK_EVENT: + if (!state->driver.event) + return 0; + if (state->driver.select) + return 0; + io_task = &state->driver.event->iotask; + break; +#endif + default: + ERTS_INTERNAL_ERROR("Invalid I/O-task type"); + return 0; + } + + return !is_iotask_active(io_task, current_cio_time); +} + static ERTS_INLINE void -iready(Eterm id, ErtsDrvEventState *state) +iready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time) { - if (erts_port_task_schedule(id, - &state->driver.select->intask, - ERTS_PORT_TASK_INPUT, - (ErlDrvEvent) state->fd) != 0) { - stale_drv_select(id, state, ERL_DRV_READ); + if (io_task_schedule_allowed(state, + ERTS_PORT_TASK_INPUT, + current_cio_time)) { + ErtsIoTask *iotask = &state->driver.select->iniotask; + erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + if (erts_port_task_schedule(id, + &iotask->task, + ERTS_PORT_TASK_INPUT, + (ErlDrvEvent) state->fd) != 0) { + stale_drv_select(id, state, ERL_DRV_READ); + } + add_active_fd(state->fd); } } static ERTS_INLINE void -oready(Eterm id, ErtsDrvEventState *state) +oready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time) { - if (erts_port_task_schedule(id, - &state->driver.select->outtask, - ERTS_PORT_TASK_OUTPUT, - (ErlDrvEvent) state->fd) != 0) { - stale_drv_select(id, state, ERL_DRV_WRITE); + if (io_task_schedule_allowed(state, + ERTS_PORT_TASK_OUTPUT, + current_cio_time)) { + ErtsIoTask *iotask = &state->driver.select->outiotask; + erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + if (erts_port_task_schedule(id, + &iotask->task, + ERTS_PORT_TASK_OUTPUT, + (ErlDrvEvent) state->fd) != 0) { + stale_drv_select(id, state, ERL_DRV_WRITE); + } + add_active_fd(state->fd); } } #if ERTS_CIO_HAVE_DRV_EVENT static ERTS_INLINE void -eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data) +eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data, + erts_aint_t current_cio_time) { - if (erts_port_task_schedule(id, - &state->driver.event->task, - ERTS_PORT_TASK_EVENT, - (ErlDrvEvent) state->fd, - event_data) != 0) { - stale_drv_select(id, state, 0); + if (io_task_schedule_allowed(state, + ERTS_PORT_TASK_EVENT, + current_cio_time)) { + ErtsIoTask *iotask = &state->driver.event->iotask; + erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + if (erts_port_task_schedule(id, + &iotask->task, + ERTS_PORT_TASK_EVENT, + (ErlDrvEvent) state->fd, + event_data) != 0) { + stale_drv_select(id, state, 0); + } + add_active_fd(state->fd); } } #endif @@ -1145,18 +1593,22 @@ ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set) void ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set, - erts_short_time_t msec) + ErtsMonotonicTime timeout_time) { - ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, msec); + ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, timeout_time); } void ERTS_CIO_EXPORT(erts_check_io)(int do_wait) { - ErtsPollResFd pollres[256]; + ErtsPollResFd *pollres; int pollres_len; - SysTimeval wait_time; + ErtsMonotonicTime timeout_time; int poll_ret, i; + erts_aint_t current_cio_time; + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + + ASSERT(esdp); restart: @@ -1166,28 +1618,37 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) #endif /* Figure out timeout value */ - if (do_wait) { - erts_time_remaining(&wait_time); - } else { /* poll only */ - wait_time.tv_sec = 0; - wait_time.tv_usec = 0; - } + timeout_time = (do_wait + ? erts_check_next_timeout_time(esdp) + : ERTS_POLL_NO_TIMEOUT /* poll only */); + + /* + * No need for an atomic inc op when incrementing + * erts_check_io_time, since only one thread can + * check io at a time. + */ + current_cio_time = erts_smp_atomic_read_dirty(&erts_check_io_time); + current_cio_time++; + erts_smp_atomic_set_relb(&erts_check_io_time, current_cio_time); + + check_cleanup_active_fds(current_cio_time); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); /* No locks should be locked */ #endif - pollres_len = sizeof(pollres)/sizeof(ErtsPollResFd); + + pollres_len = erts_smp_atomic32_read_dirty(&pollset.active_fd.no) + ERTS_CHECK_IO_POLL_RES_LEN; + + pollres = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollResFd)*pollres_len); erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1); - poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, &wait_time); + poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, timeout_time); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); /* No locks should be locked */ #endif - erts_deliver_time(); /* sync the machine's idea of time */ - #ifdef ERTS_BREAK_REQUESTED if (ERTS_BREAK_REQUESTED) erts_do_break_handling(); @@ -1196,6 +1657,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) if (poll_ret != 0) { erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0); forget_removed(&pollset); + erts_free(ERTS_ALC_T_TMP, pollres); if (poll_ret == EAGAIN) { goto restart; } @@ -1255,15 +1717,15 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) if ((revents & ERTS_POLL_EV_IN) || (!(revents & ERTS_POLL_EV_OUT) && state->events & ERTS_POLL_EV_IN)) { - iready(state->driver.select->inport, state); + iready(state->driver.select->inport, state, current_cio_time); } else if (state->events & ERTS_POLL_EV_OUT) { - oready(state->driver.select->outport, state); + oready(state->driver.select->outport, state, current_cio_time); } } else if (revents & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) { if (revents & ERTS_POLL_EV_OUT) { - oready(state->driver.select->outport, state); + oready(state->driver.select->outport, state, current_cio_time); } /* Someone might have deselected input since revents was read (true also on the non-smp emulator since @@ -1271,7 +1733,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) revents... */ revents &= ~(~state->events & ERTS_POLL_EV_IN); if (revents & ERTS_POLL_EV_IN) { - iready(state->driver.select->inport, state); + iready(state->driver.select->inport, state, current_cio_time); } } else if (revents & ERTS_POLL_EV_NVAL) { @@ -1279,6 +1741,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) state->driver.select->inport, state->driver.select->outport, state->events); + add_active_fd(state->fd); } break; } @@ -1296,8 +1759,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) if (revents) { event_data->events = state->events; event_data->revents = revents; - - eready(state->driver.event->port, state, event_data); + eready(state->driver.event->port, state, event_data, current_cio_time); } break; } @@ -1315,6 +1777,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) (int) state->type); ASSERT(0); deselect(state, 0); + add_active_fd(state->fd); break; } } @@ -1326,6 +1789,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) } erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0); + erts_free(ERTS_ALC_T_TMP, pollres); forget_removed(&pollset); } @@ -1395,6 +1859,7 @@ stale_drv_select(Eterm id, ErtsDrvEventState *state, int mode) } #ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS + static SafeHashValue drv_ev_state_hash(void *des) { SafeHashValue val = (SafeHashValue) ((ErtsDrvEventState *) des)->fd; @@ -1440,10 +1905,27 @@ static void drv_ev_state_free(void *des) void ERTS_CIO_EXPORT(erts_init_check_io)(void) { + erts_smp_atomic_init_nob(&erts_check_io_time, 0); erts_smp_atomic_init_nob(&pollset.in_poll_wait, 0); + ERTS_CIO_POLL_INIT(); pollset.ps = ERTS_CIO_NEW_POLLSET(); + pollset.active_fd.six = 0; + pollset.active_fd.eix = 0; + erts_smp_atomic32_init_nob(&pollset.active_fd.no, 0); + pollset.active_fd.size = ERTS_ACTIVE_FD_INC; + pollset.active_fd.array = erts_alloc(ERTS_ALC_T_ACTIVE_FD_ARR, + sizeof(ErtsSysFdType)*ERTS_ACTIVE_FD_INC); +#ifdef DEBUG + { + int i; + for (i = 0; i < ERTS_ACTIVE_FD_INC; i++) + pollset.active_fd.array[i] = ERTS_SYS_FD_INVALID; + } +#endif + + #ifdef ERTS_SMP init_removed_fd_alloc(); pollset.removed_list = NULL; @@ -1519,12 +2001,27 @@ Eterm ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) { Process *p = (Process *) proc; - Eterm tags[15], values[15], res; + Eterm tags[16], values[16], res; Uint sz, *szp, *hp, **hpp, memory_size; Sint i; ErtsPollInfo pi; - - ERTS_CIO_POLL_INFO(pollset.ps, &pi); + erts_aint_t cio_time = erts_smp_atomic_read_acqb(&erts_check_io_time); + int active_fds = (int) erts_smp_atomic32_read_acqb(&pollset.active_fd.no); + + while (1) { + erts_aint_t post_cio_time; + int post_active_fds; + + ERTS_CIO_POLL_INFO(pollset.ps, &pi); + + post_cio_time = erts_smp_atomic_read_mb(&erts_check_io_time); + post_active_fds = (int) erts_smp_atomic32_read_acqb(&pollset.active_fd.no); + if (cio_time == post_cio_time && active_fds == post_active_fds) + break; + cio_time = post_cio_time; + active_fds = post_active_fds; + } + memory_size = pi.memory_size; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len); @@ -1588,6 +2085,9 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) tags[i] = erts_bld_atom(hpp, szp, "max_fds"); values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.max_fds); + tags[i] = erts_bld_atom(hpp, szp, "active_fds"); + values[i++] = erts_bld_uint(hpp, szp, (Uint) active_fds); + #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS tags[i] = erts_bld_atom(hpp, szp, "no_avoided_wakeups"); values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.no_avoided_wakeups); @@ -1642,6 +2142,8 @@ print_events(ErtsPollEvents ev) typedef struct { int used_fds; int num_errors; + int no_driver_select_structs; + int no_driver_event_structs; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS int internal_fds; ErtsPollEvents *epep; @@ -1664,6 +2166,13 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters) struct stat stat_buf; #endif + if (state->driver.select) + counters->no_driver_select_structs++; +#if ERTS_CIO_HAVE_DRV_EVENT + if (state->driver.event) + counters->no_driver_event_structs++; +#endif + #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS if (state->events || ep_events) { if (ep_events & ERTS_POLL_EV_NVAL) { @@ -1802,6 +2311,7 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters) } } } +#if ERTS_CIO_HAVE_DRV_EVENT else if (state->type == ERTS_EV_TYPE_DRV_EV) { Eterm id; erts_printf("driver_event "); @@ -1837,6 +2347,7 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters) erts_free_port_names(pnp); } } +#endif #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS else if (internal) { erts_printf("internal "); @@ -1876,18 +2387,24 @@ static void doit_erts_check_io_debug(void *vstate, void *vcounters) } int -ERTS_CIO_EXPORT(erts_check_io_debug)(void) +ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip) { #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS int fd, len; #endif IterDebugCounters counters; +#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS ErtsDrvEventState null_des; null_des.driver.select = NULL; +#if ERTS_CIO_HAVE_DRV_EVENT + null_des.driver.event = NULL; +#endif + null_des.driver.drv_ptr = NULL; null_des.events = 0; null_des.remove_cnt = 0; null_des.type = ERTS_EV_TYPE_NONE; +#endif erts_printf("--- fds in pollset --------------------------------------\n"); @@ -1904,6 +2421,8 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void) #endif counters.used_fds = 0; counters.num_errors = 0; + counters.no_driver_select_structs = 0; + counters.no_driver_event_structs = 0; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS len = erts_smp_atomic_read_nob(&drv_ev_state_len); @@ -1920,8 +2439,16 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void) erts_smp_thr_progress_unblock(); + ciodip->no_used_fds = counters.used_fds; + ciodip->no_driver_select_structs = counters.no_driver_select_structs; + ciodip->no_driver_event_structs = counters.no_driver_event_structs; + erts_printf("\n"); erts_printf("used fds=%d\n", counters.used_fds); + erts_printf("Number of driver_select() structures=%d\n", counters.no_driver_select_structs); +#if ERTS_CIO_HAVE_DRV_EVENT + erts_printf("Number of driver_event() structures=%d\n", counters.no_driver_event_structs); +#endif #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS erts_printf("internal fds=%d\n", counters.internal_fds); #endif @@ -1930,6 +2457,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void) #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS erts_free(ERTS_ALC_T_TMP, (void *) counters.epep); #endif + return counters.num_errors; } diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h index edab7947ba..14f1ea3f43 100644 --- a/erts/emulator/sys/common/erl_check_io.h +++ b/erts/emulator/sys/common/erl_check_io.h @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2011. All Rights Reserved. + * Copyright Ericsson AB 2006-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ @@ -26,6 +27,7 @@ #ifndef ERL_CHECK_IO_H__ #define ERL_CHECK_IO_H__ +#include "sys.h" #include "erl_sys_driver.h" #ifdef ERTS_ENABLE_KERNEL_POLL @@ -46,14 +48,14 @@ void erts_check_io_async_sig_interrupt_nkp(void); #endif void erts_check_io_interrupt_kp(int); void erts_check_io_interrupt_nkp(int); -void erts_check_io_interrupt_timed_kp(int, erts_short_time_t); -void erts_check_io_interrupt_timed_nkp(int, erts_short_time_t); +void erts_check_io_interrupt_timed_kp(int, ErtsMonotonicTime); +void erts_check_io_interrupt_timed_nkp(int, ErtsMonotonicTime); void erts_check_io_kp(int); void erts_check_io_nkp(int); void erts_init_check_io_kp(void); void erts_init_check_io_nkp(void); -int erts_check_io_debug_kp(void); -int erts_check_io_debug_nkp(void); +int erts_check_io_debug_kp(ErtsCheckIoDebugInfo *); +int erts_check_io_debug_nkp(ErtsCheckIoDebugInfo *); #else /* !ERTS_ENABLE_KERNEL_POLL */ @@ -64,12 +66,33 @@ int erts_check_io_max_files(void); void erts_check_io_async_sig_interrupt(void); #endif void erts_check_io_interrupt(int); -void erts_check_io_interrupt_timed(int, erts_short_time_t); +void erts_check_io_interrupt_timed(int, ErtsMonotonicTime); void erts_check_io(int); void erts_init_check_io(void); #endif +extern erts_smp_atomic_t erts_check_io_time; + +typedef struct { + ErtsPortTaskHandle task; + erts_smp_atomic_t executed_time; +} ErtsIoTask; + +ERTS_GLB_INLINE void erts_io_notify_port_task_executed(ErtsPortTaskHandle *pthp); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE void +erts_io_notify_port_task_executed(ErtsPortTaskHandle *pthp) +{ + ErtsIoTask *itp = (ErtsIoTask *) (((char *) pthp) - offsetof(ErtsIoTask, task)); + erts_aint_t ci_time = erts_smp_atomic_read_acqb(&erts_check_io_time); + erts_smp_atomic_set_relb(&itp->executed_time, ci_time); +} + +#endif + #endif /* ERL_CHECK_IO_H__ */ #if !defined(ERL_CHECK_IO_C__) && !defined(ERTS_ALLOC_C__) @@ -81,6 +104,16 @@ void erts_init_check_io(void); #include "erl_poll.h" #include "erl_port_task.h" +#ifdef __WIN32__ +/* + * Current erts_poll implementation for Windows cannot handle + * active events in the set of events polled. + */ +# define ERTS_CIO_DEFER_ACTIVE_EVENTS 1 +#else +# define ERTS_CIO_DEFER_ACTIVE_EVENTS 0 +#endif + /* * ErtsDrvEventDataState is used by driver_event() which is almost never * used. We allocate ErtsDrvEventDataState separate since we dont wan't @@ -91,13 +124,16 @@ typedef struct { Eterm port; ErlDrvEventData data; ErtsPollEvents removed_events; - ErtsPortTaskHandle task; +#if ERTS_CIO_DEFER_ACTIVE_EVENTS + ErtsPollEvents deferred_events; +#endif + ErtsIoTask iotask; } ErtsDrvEventDataState; typedef struct { Eterm inport; Eterm outport; - ErtsPortTaskHandle intask; - ErtsPortTaskHandle outtask; + ErtsIoTask iniotask; + ErtsIoTask outiotask; } ErtsDrvSelectDataState; #endif /* #ifndef ERL_CHECK_IO_INTERNAL__ */ diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c new file mode 100644 index 0000000000..7bbb406f29 --- /dev/null +++ b/erts/emulator/sys/common/erl_mmap.c @@ -0,0 +1,2918 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2002-2016. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * %CopyrightEnd% + */ +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "sys.h" +#include "erl_process.h" +#include "erl_smp.h" +#include "atom.h" +#include "erl_mmap.h" +#include <stddef.h> + +#if HAVE_ERTS_MMAP + +/* #define ERTS_MMAP_OP_RINGBUF_SZ 100 */ + +#if defined(DEBUG) || 0 +# undef ERTS_MMAP_DEBUG +# define ERTS_MMAP_DEBUG +# ifndef ERTS_MMAP_OP_RINGBUF_SZ +# define ERTS_MMAP_OP_RINGBUF_SZ 100 +# endif +#endif + +#ifndef ERTS_MMAP_OP_RINGBUF_SZ +# define ERTS_MMAP_OP_RINGBUF_SZ 0 +#endif + +/* #define ERTS_MMAP_DEBUG_FILL_AREAS */ + +#ifdef ERTS_MMAP_DEBUG +# define ERTS_MMAP_ASSERT ERTS_ASSERT +#else +# define ERTS_MMAP_ASSERT(A) ((void) 1) +#endif + +/* + * `mm->sa.bot` and `mm->sua.top` are read only after + * initialization, but the other pointers are not; i.e., only + * ERTS_MMAP_IN_SUPERCARRIER() is allowed without the mutex held. + */ +#define ERTS_MMAP_IN_SUPERCARRIER(PTR) \ + (((UWord) (PTR)) - ((UWord) mm->sa.bot) \ + < ((UWord) mm->sua.top) - ((UWord) mm->sa.bot)) +#define ERTS_MMAP_IN_SUPERALIGNED_AREA(PTR) \ + (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ + (((UWord) (PTR)) - ((UWord) mm->sa.bot) \ + < ((UWord) mm->sa.top) - ((UWord) mm->sa.bot))) +#define ERTS_MMAP_IN_SUPERUNALIGNED_AREA(PTR) \ + (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ + (((UWord) (PTR)) - ((UWord) mm->sua.bot) \ + < ((UWord) mm->sua.top) - ((UWord) mm->sua.bot))) + +UWord erts_page_inv_mask; + +#if defined(DEBUG) || defined(ERTS_MMAP_DEBUG) +# undef RBT_DEBUG +# define RBT_DEBUG +#endif +#ifdef RBT_DEBUG +# define RBT_ASSERT ERTS_ASSERT +# define IF_RBT_DEBUG(C) C +#else +# define RBT_ASSERT(x) +# define IF_RBT_DEBUG(C) +#endif + +typedef struct RBTNode_ RBTNode; +struct RBTNode_ { + UWord parent_and_color; /* color in bit 0 of parent ptr */ + RBTNode *left; + RBTNode *right; +}; + +#define RED_FLG (1) +#define IS_RED(N) ((N) && ((N)->parent_and_color & RED_FLG)) +#define IS_BLACK(N) (!IS_RED(N)) +#define SET_RED(N) ((N)->parent_and_color |= RED_FLG) +#define SET_BLACK(N) ((N)->parent_and_color &= ~RED_FLG) + +static ERTS_INLINE RBTNode* parent(RBTNode* node) +{ + return (RBTNode*) (node->parent_and_color & ~RED_FLG); +} + +static ERTS_INLINE void set_parent(RBTNode* node, RBTNode* parent) +{ + RBT_ASSERT(!((UWord)parent & RED_FLG)); + node->parent_and_color = ((UWord)parent) | (node->parent_and_color & RED_FLG); +} + +static ERTS_INLINE UWord parent_and_color(RBTNode* parent, int color) +{ + RBT_ASSERT(!((UWord)parent & RED_FLG)); + RBT_ASSERT(!(color & ~RED_FLG)); + return ((UWord)parent) | color; +} + + +enum SortOrder { + ADDR_ORDER, /* only address order */ + SA_SZ_ADDR_ORDER, /* first super-aligned size then address order */ + SZ_REVERSE_ADDR_ORDER /* first size then reverse address order */ +}; +#ifdef HARD_DEBUG +static const char* sort_order_names[] = {"Address","SuperAlignedSize-Address","Size-RevAddress"}; +#endif + +typedef struct { + RBTNode* root; + enum SortOrder order; +}RBTree; + +#ifdef HARD_DEBUG +# define HARD_CHECK_IS_MEMBER(ROOT,NODE) rbt_assert_is_member(ROOT,NODE) +# define HARD_CHECK_TREE(TREE,SZ) check_tree(TREE, SZ) +static int rbt_assert_is_member(RBTNode* root, RBTNode* node); +static RBTNode* check_tree(RBTree* tree, Uint); +#else +# define HARD_CHECK_IS_MEMBER(ROOT,NODE) +# define HARD_CHECK_TREE(TREE,SZ) +#endif + +#if ERTS_MMAP_OP_RINGBUF_SZ + +static int mmap_op_ix; + +typedef enum { + ERTS_OP_TYPE_NONE, + ERTS_OP_TYPE_MMAP, + ERTS_OP_TYPE_MUNMAP, + ERTS_OP_TYPE_MREMAP +} ErtsMMapOpType; + +typedef struct { + ErtsMMapOpType type; + void *result; + UWord in_size; + UWord out_size; + void *old_ptr; + UWord old_size; +} ErtsMMapOp; + +static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; + +#define ERTS_MMAP_OP_RINGBUF_INIT() \ + do { \ + int ix__; \ + for (ix__ = 0; ix__ < ERTS_MMAP_OP_RINGBUF_SZ; ix__++) {\ + mmap_ops[ix__].type = ERTS_OP_TYPE_NONE; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = 0; \ + mmap_ops[ix__].out_size = 0; \ + mmap_ops[ix__].old_ptr = NULL; \ + mmap_ops[ix__].old_size = 0; \ + } \ + mmap_op_ix = ERTS_MMAP_OP_RINGBUF_SZ-1; \ + } while (0) + +#define ERTS_MMAP_OP_START(SZ) \ + do { \ + int ix__; \ + if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \ + mmap_op_ix = 0; \ + ix__ = mmap_op_ix; \ + mmap_ops[ix__].type = ERTS_OP_TYPE_MMAP; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = (SZ); \ + mmap_ops[ix__].out_size = 0; \ + mmap_ops[ix__].old_ptr = NULL; \ + mmap_ops[ix__].old_size = 0; \ + } while (0) + +#define ERTS_MMAP_OP_END(PTR, SZ) \ + do { \ + int ix__ = mmap_op_ix; \ + mmap_ops[ix__].result = (PTR); \ + mmap_ops[ix__].out_size = (SZ); \ + } while (0) + +#define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) \ + do { \ + erts_smp_mtx_lock(&mm->mtx); \ + ERTS_MMAP_OP_START((IN_SZ)); \ + ERTS_MMAP_OP_END((RES), (OUT_SZ)); \ + erts_smp_mtx_unlock(&mm->mtx); \ + } while (0) + +#define ERTS_MUNMAP_OP(PTR, SZ) \ + do { \ + int ix__; \ + if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \ + mmap_op_ix = 0; \ + ix__ = mmap_op_ix; \ + mmap_ops[ix__].type = ERTS_OP_TYPE_MUNMAP; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = 0; \ + mmap_ops[ix__].out_size = 0; \ + mmap_ops[ix__].old_ptr = (PTR); \ + mmap_ops[ix__].old_size = (SZ); \ + } while (0) + +#define ERTS_MUNMAP_OP_LCK(PTR, SZ) \ + do { \ + erts_smp_mtx_lock(&mm->mtx); \ + ERTS_MUNMAP_OP((PTR), (SZ)); \ + erts_smp_mtx_unlock(&mm->mtx); \ + } while (0) + +#define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) \ + do { \ + int ix__; \ + if (++mmap_op_ix >= ERTS_MMAP_OP_RINGBUF_SZ) \ + mmap_op_ix = 0; \ + ix__ = mmap_op_ix; \ + mmap_ops[ix__].type = ERTS_OP_TYPE_MREMAP; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = (IN_SZ); \ + mmap_ops[ix__].out_size = (OLD_SZ); \ + mmap_ops[ix__].old_ptr = (OLD_PTR); \ + mmap_ops[ix__].old_size = (OLD_SZ); \ + } while (0) + +#define ERTS_MREMAP_OP_END(PTR, SZ) \ + do { \ + int ix__ = mmap_op_ix; \ + mmap_ops[ix__].result = (PTR); \ + mmap_ops[mmap_op_ix].out_size = (SZ); \ + } while (0) + +#define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) \ + do { \ + erts_smp_mtx_lock(&mm->mtx); \ + ERTS_MREMAP_OP_START((OLD_PTR), (OLD_SZ), (IN_SZ)); \ + ERTS_MREMAP_OP_END((RES), (OUT_SZ)); \ + erts_smp_mtx_unlock(&mm->mtx); \ + } while (0) + +#define ERTS_MMAP_OP_ABORT() \ + do { \ + int ix__ = mmap_op_ix; \ + mmap_ops[ix__].type = ERTS_OP_TYPE_NONE; \ + mmap_ops[ix__].result = NULL; \ + mmap_ops[ix__].in_size = 0; \ + mmap_ops[ix__].out_size = 0; \ + mmap_ops[ix__].old_ptr = NULL; \ + mmap_ops[ix__].old_size = 0; \ + if (--mmap_op_ix < 0) \ + mmap_op_ix = ERTS_MMAP_OP_RINGBUF_SZ-1; \ + } while (0) + +#else + +#define ERTS_MMAP_OP_RINGBUF_INIT() +#define ERTS_MMAP_OP_START(SZ) +#define ERTS_MMAP_OP_END(PTR, SZ) +#define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) +#define ERTS_MUNMAP_OP(PTR, SZ) +#define ERTS_MUNMAP_OP_LCK(PTR, SZ) +#define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) +#define ERTS_MREMAP_OP_END(PTR, SZ) +#define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) +#define ERTS_MMAP_OP_ABORT() + +#endif + +typedef struct { + RBTNode snode; /* node in 'stree' */ + RBTNode anode; /* node in 'atree' */ + char* start; + char* end; +}ErtsFreeSegDesc; + +typedef struct { + RBTree stree; /* size ordered tree */ + RBTree atree; /* address ordered tree */ + Uint nseg; +}ErtsFreeSegMap; + +struct ErtsMemMapper_ { + int (*reserve_physical)(char *, UWord, int exec); + void (*unreserve_physical)(char *, UWord); + int supercarrier; + int no_os_mmap; + int executable; /* is client a native code allocator? */ + /* + * Super unaligned area is located above super aligned + * area. That is, `sa.bot` is beginning of the super + * carrier, `sua.top` is the end of the super carrier, + * and sa.top and sua.bot moves towards eachother. + */ + struct { + char *top; + char *bot; + ErtsFreeSegMap map; + } sua; + struct { + char *top; + char *bot; + ErtsFreeSegMap map; + } sa; +#if HAVE_MMAP && (!defined(MAP_ANON) && !defined(MAP_ANONYMOUS)) + int mmap_fd; +#endif + erts_smp_mtx_t mtx; + struct { + char *free_list; + char *unused_start; + char *unused_end; + char *new_area_hint; + Uint reserved; + } desc; + struct { + UWord free_seg_descs; + struct { + UWord curr; + UWord max; + } free_segs; + } no; + struct { + struct { + UWord total; + struct { + UWord total; + UWord sa; + UWord sua; + } used; + } supercarrier; + struct { + UWord used; + } os; + } size; +}; + +ErtsMemMapper erts_dflt_mmapper; + +#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) +ErtsMemMapper erts_literal_mmapper; +char* erts_literals_start; +UWord erts_literals_size; +#endif + +#ifdef ERTS_ALC_A_EXEC +ErtsMemMapper erts_exec_mmapper; +#endif + + + +#define ERTS_MMAP_SIZE_SC_SA_INC(SZ) \ + do { \ + mm->size.supercarrier.used.total += (SZ); \ + mm->size.supercarrier.used.sa += (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total \ + <= mm->size.supercarrier.total); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sa \ + <= mm->size.supercarrier.used.total); \ + } while (0) +#define ERTS_MMAP_SIZE_SC_SA_DEC(SZ) \ + do { \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total >= (SZ)); \ + mm->size.supercarrier.used.total -= (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sa >= (SZ)); \ + mm->size.supercarrier.used.sa -= (SZ); \ + } while (0) +#define ERTS_MMAP_SIZE_SC_SUA_INC(SZ) \ + do { \ + mm->size.supercarrier.used.total += (SZ); \ + mm->size.supercarrier.used.sua += (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total \ + <= mm->size.supercarrier.total); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sua \ + <= mm->size.supercarrier.used.total); \ + } while (0) +#define ERTS_MMAP_SIZE_SC_SUA_DEC(SZ) \ + do { \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total >= (SZ)); \ + mm->size.supercarrier.used.total -= (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sua >= (SZ)); \ + mm->size.supercarrier.used.sua -= (SZ); \ + } while (0) +#define ERTS_MMAP_SIZE_OS_INC(SZ) \ + do { \ + ERTS_MMAP_ASSERT(mm->size.os.used + (SZ) >= (SZ)); \ + mm->size.os.used += (SZ); \ + } while (0) +#define ERTS_MMAP_SIZE_OS_DEC(SZ) \ + do { \ + ERTS_MMAP_ASSERT(mm->size.os.used >= (SZ)); \ + mm->size.os.used -= (SZ); \ + } while (0) + + +static void +add_free_desc_area(ErtsMemMapper* mm, char *start, char *end) +{ + ERTS_MMAP_ASSERT(end == (void *) 0 || end > start); + if (sizeof(ErtsFreeSegDesc) <= ((UWord) end) - ((UWord) start)) { + UWord no; + ErtsFreeSegDesc *prev_desc, *desc; + char *desc_end; + + no = 1; + prev_desc = (ErtsFreeSegDesc *) start; + prev_desc->start = mm->desc.free_list; + desc = (ErtsFreeSegDesc *) (start + sizeof(ErtsFreeSegDesc)); + desc_end = start + 2*sizeof(ErtsFreeSegDesc); + + while (desc_end <= end) { + desc->start = (char *) prev_desc; + prev_desc = desc; + desc = (ErtsFreeSegDesc *) desc_end; + desc_end += sizeof(ErtsFreeSegDesc); + no++; + } + mm->desc.free_list = (char *) prev_desc; + mm->no.free_seg_descs += no; + } +} + +static ErtsFreeSegDesc * +add_unused_free_desc_area(ErtsMemMapper* mm) +{ + char *ptr; + if (!mm->desc.unused_start) + return NULL; + + ERTS_MMAP_ASSERT(mm->desc.unused_end); + ERTS_MMAP_ASSERT(ERTS_PAGEALIGNED_SIZE + <= mm->desc.unused_end - mm->desc.unused_start); + + ptr = mm->desc.unused_start + ERTS_PAGEALIGNED_SIZE; + add_free_desc_area(mm, mm->desc.unused_start, ptr); + + if ((mm->desc.unused_end - ptr) >= ERTS_PAGEALIGNED_SIZE) + mm->desc.unused_start = ptr; + else + mm->desc.unused_end = mm->desc.unused_start = NULL; + + ERTS_MMAP_ASSERT(mm->desc.free_list); + return (ErtsFreeSegDesc *) mm->desc.free_list; +} + +static ERTS_INLINE ErtsFreeSegDesc * +alloc_desc(ErtsMemMapper* mm) +{ + ErtsFreeSegDesc *res; + res = (ErtsFreeSegDesc *) mm->desc.free_list; + if (!res) { + res = add_unused_free_desc_area(mm); + if (!res) + return NULL; + } + mm->desc.free_list = res->start; + ASSERT(mm->no.free_segs.curr < mm->no.free_seg_descs); + mm->no.free_segs.curr++; + if (mm->no.free_segs.max < mm->no.free_segs.curr) + mm->no.free_segs.max = mm->no.free_segs.curr; + return res; +} + +static ERTS_INLINE void +free_desc(ErtsMemMapper* mm, ErtsFreeSegDesc *desc) +{ + desc->start = mm->desc.free_list; + mm->desc.free_list = (char *) desc; + ERTS_MMAP_ASSERT(mm->no.free_segs.curr > 0); + mm->no.free_segs.curr--; +} + +static ERTS_INLINE ErtsFreeSegDesc* anode_to_desc(RBTNode* anode) +{ + return (ErtsFreeSegDesc*) ((char*)anode - offsetof(ErtsFreeSegDesc, anode)); +} + +static ERTS_INLINE ErtsFreeSegDesc* snode_to_desc(RBTNode* snode) +{ + return (ErtsFreeSegDesc*) ((char*)snode - offsetof(ErtsFreeSegDesc, snode)); +} + +static ERTS_INLINE ErtsFreeSegDesc* node_to_desc(enum SortOrder order, RBTNode* node) +{ + return order==ADDR_ORDER ? anode_to_desc(node) : snode_to_desc(node); +} + +static ERTS_INLINE SWord usable_size(enum SortOrder order, + ErtsFreeSegDesc* desc) +{ + return ((order == SA_SZ_ADDR_ORDER) ? + ERTS_SUPERALIGNED_FLOOR(desc->end) - ERTS_SUPERALIGNED_CEILING(desc->start) + : desc->end - desc->start); +} + +#ifdef HARD_DEBUG +static ERTS_INLINE SWord cmp_nodes(enum SortOrder order, + RBTNode* lhs, RBTNode* rhs) +{ + ErtsFreeSegDesc* ldesc = node_to_desc(order, lhs); + ErtsFreeSegDesc* rdesc = node_to_desc(order, rhs); + RBT_ASSERT(lhs != rhs); + if (order != ADDR_ORDER) { + SWord diff = usable_size(order, ldesc) - usable_size(order, rdesc); + if (diff) return diff; + } + if (order != SZ_REVERSE_ADDR_ORDER) { + return (char*)ldesc->start - (char*)rdesc->start; + } + else { + return (char*)rdesc->start - (char*)ldesc->start; + } +} +#endif /* HARD_DEBUG */ + +static ERTS_INLINE SWord cmp_with_node(enum SortOrder order, + SWord sz, char* addr, RBTNode* rhs) +{ + ErtsFreeSegDesc* rdesc; + if (order != ADDR_ORDER) { + SWord diff; + rdesc = snode_to_desc(rhs); + diff = sz - usable_size(order, rdesc); + if (diff) return diff; + } + else + rdesc = anode_to_desc(rhs); + + if (order != SZ_REVERSE_ADDR_ORDER) + return addr - (char*)rdesc->start; + else + return (char*)rdesc->start - addr; +} + + +static ERTS_INLINE void +left_rotate(RBTNode **root, RBTNode *x) +{ + RBTNode *y = x->right; + x->right = y->left; + if (y->left) + set_parent(y->left, x); + set_parent(y, parent(x)); + if (!parent(y)) { + RBT_ASSERT(*root == x); + *root = y; + } + else if (x == parent(x)->left) + parent(x)->left = y; + else { + RBT_ASSERT(x == parent(x)->right); + parent(x)->right = y; + } + y->left = x; + set_parent(x, y); +} + +static ERTS_INLINE void +right_rotate(RBTNode **root, RBTNode *x) +{ + RBTNode *y = x->left; + x->left = y->right; + if (y->right) + set_parent(y->right, x); + set_parent(y, parent(x)); + if (!parent(y)) { + RBT_ASSERT(*root == x); + *root = y; + } + else if (x == parent(x)->right) + parent(x)->right = y; + else { + RBT_ASSERT(x == parent(x)->left); + parent(x)->left = y; + } + y->right = x; + set_parent(x, y); +} + +/* + * Replace node x with node y + * NOTE: segment descriptor of y is not changed + */ +static ERTS_INLINE void +replace(RBTNode **root, RBTNode *x, RBTNode *y) +{ + + if (!parent(x)) { + RBT_ASSERT(*root == x); + *root = y; + } + else if (x == parent(x)->left) + parent(x)->left = y; + else { + RBT_ASSERT(x == parent(x)->right); + parent(x)->right = y; + } + if (x->left) { + RBT_ASSERT(parent(x->left) == x); + set_parent(x->left, y); + } + if (x->right) { + RBT_ASSERT(parent(x->right) == x); + set_parent(x->right, y); + } + + y->parent_and_color = x->parent_and_color; + y->right = x->right; + y->left = x->left; +} + +static void +tree_insert_fixup(RBTNode** root, RBTNode *node) +{ + RBTNode *x = node, *y, *papa_x, *granpa_x; + + /* + * Rearrange the tree so that it satisfies the Red-Black Tree properties + */ + + papa_x = parent(x); + RBT_ASSERT(x != *root && IS_RED(papa_x)); + do { + + /* + * x and its parent are both red. Move the red pair up the tree + * until we get to the root or until we can separate them. + */ + + granpa_x = parent(papa_x); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_BLACK(granpa_x)); + RBT_ASSERT(granpa_x); + + if (papa_x == granpa_x->left) { + y = granpa_x->right; + if (IS_RED(y)) { + SET_BLACK(y); + SET_BLACK(papa_x); + SET_RED(granpa_x); + x = granpa_x; + } + else { + + if (x == papa_x->right) { + left_rotate(root, papa_x); + papa_x = x; + x = papa_x->left; + } + + RBT_ASSERT(x == granpa_x->left->left); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_RED(papa_x)); + RBT_ASSERT(IS_BLACK(granpa_x)); + RBT_ASSERT(IS_BLACK(y)); + + SET_BLACK(papa_x); + SET_RED(granpa_x); + right_rotate(root, granpa_x); + + RBT_ASSERT(x == parent(x)->left); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_RED(parent(x)->right)); + RBT_ASSERT(IS_BLACK(parent(x))); + break; + } + } + else { + RBT_ASSERT(papa_x == granpa_x->right); + y = granpa_x->left; + if (IS_RED(y)) { + SET_BLACK(y); + SET_BLACK(papa_x); + SET_RED(granpa_x); + x = granpa_x; + } + else { + + if (x == papa_x->left) { + right_rotate(root, papa_x); + papa_x = x; + x = papa_x->right; + } + + RBT_ASSERT(x == granpa_x->right->right); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_RED(papa_x)); + RBT_ASSERT(IS_BLACK(granpa_x)); + RBT_ASSERT(IS_BLACK(y)); + + SET_BLACK(papa_x); + SET_RED(granpa_x); + left_rotate(root, granpa_x); + + RBT_ASSERT(x == parent(x)->right); + RBT_ASSERT(IS_RED(x)); + RBT_ASSERT(IS_RED(parent(x)->left)); + RBT_ASSERT(IS_BLACK(parent(x))); + break; + } + } + } while (x != *root && (papa_x=parent(x), IS_RED(papa_x))); + + SET_BLACK(*root); +} + +static void +rbt_delete(RBTree* tree, RBTNode* del) +{ + Uint spliced_is_black; + RBTNode *x, *y, *z = del, *papa_y; + RBTNode null_x; /* null_x is used to get the fixup started when we + splice out a node without children. */ + + HARD_CHECK_IS_MEMBER(tree->root, del); + HARD_CHECK_TREE(tree, 0); + + null_x.parent_and_color = parent_and_color(NULL, !RED_FLG); + + /* Remove node from tree... */ + + /* Find node to splice out */ + if (!z->left || !z->right) + y = z; + else + /* Set y to z:s successor */ + for(y = z->right; y->left; y = y->left); + /* splice out y */ + x = y->left ? y->left : y->right; + spliced_is_black = IS_BLACK(y); + papa_y = parent(y); + if (x) { + set_parent(x, papa_y); + } + else if (spliced_is_black) { + x = &null_x; + x->right = x->left = NULL; + x->parent_and_color = parent_and_color(papa_y, !RED_FLG); + y->left = x; + } + + if (!papa_y) { + RBT_ASSERT(tree->root == y); + tree->root = x; + } + else { + if (y == papa_y->left) { + papa_y->left = x; + } + else { + RBT_ASSERT(y == papa_y->right); + papa_y->right = x; + } + } + if (y != z) { + /* We spliced out the successor of z; replace z by the successor */ + RBT_ASSERT(z != &null_x); + replace(&tree->root, z, y); + } + + if (spliced_is_black) { + RBTNode* papa_x; + /* We removed a black node which makes the resulting tree + violate the Red-Black Tree properties. Fixup tree... */ + + papa_x = parent(x); + while (IS_BLACK(x) && papa_x) { + + /* + * x has an "extra black" which we move up the tree + * until we reach the root or until we can get rid of it. + * + * y is the sibbling of x + */ + + if (x == papa_x->left) { + y = papa_x->right; + RBT_ASSERT(y); + if (IS_RED(y)) { + RBT_ASSERT(y->right); + RBT_ASSERT(y->left); + SET_BLACK(y); + RBT_ASSERT(IS_BLACK(papa_x)); + SET_RED(papa_x); + left_rotate(&tree->root, papa_x); + RBT_ASSERT(papa_x == parent(x)); + y = papa_x->right; + } + RBT_ASSERT(y); + RBT_ASSERT(IS_BLACK(y)); + if (IS_BLACK(y->left) && IS_BLACK(y->right)) { + SET_RED(y); + } + else { + if (IS_BLACK(y->right)) { + SET_BLACK(y->left); + SET_RED(y); + right_rotate(&tree->root, y); + RBT_ASSERT(papa_x == parent(x)); + y = papa_x->right; + } + RBT_ASSERT(y); + if (IS_RED(papa_x)) { + + SET_BLACK(papa_x); + SET_RED(y); + } + RBT_ASSERT(y->right); + SET_BLACK(y->right); + left_rotate(&tree->root, papa_x); + x = tree->root; + break; + } + } + else { + RBT_ASSERT(x == papa_x->right); + y = papa_x->left; + RBT_ASSERT(y); + if (IS_RED(y)) { + RBT_ASSERT(y->right); + RBT_ASSERT(y->left); + SET_BLACK(y); + RBT_ASSERT(IS_BLACK(papa_x)); + SET_RED(papa_x); + right_rotate(&tree->root, papa_x); + RBT_ASSERT(papa_x == parent(x)); + y = papa_x->left; + } + RBT_ASSERT(y); + RBT_ASSERT(IS_BLACK(y)); + if (IS_BLACK(y->right) && IS_BLACK(y->left)) { + SET_RED(y); + } + else { + if (IS_BLACK(y->left)) { + SET_BLACK(y->right); + SET_RED(y); + left_rotate(&tree->root, y); + RBT_ASSERT(papa_x == parent(x)); + y = papa_x->left; + } + RBT_ASSERT(y); + if (IS_RED(papa_x)) { + SET_BLACK(papa_x); + SET_RED(y); + } + RBT_ASSERT(y->left); + SET_BLACK(y->left); + right_rotate(&tree->root, papa_x); + x = tree->root; + break; + } + } + x = papa_x; + papa_x = parent(x); + } + SET_BLACK(x); + + papa_x = parent(&null_x); + if (papa_x) { + if (papa_x->left == &null_x) + papa_x->left = NULL; + else { + RBT_ASSERT(papa_x->right == &null_x); + papa_x->right = NULL; + } + RBT_ASSERT(!null_x.left); + RBT_ASSERT(!null_x.right); + } + else if (tree->root == &null_x) { + tree->root = NULL; + RBT_ASSERT(!null_x.left); + RBT_ASSERT(!null_x.right); + } + } + HARD_CHECK_TREE(tree, 0); +} + + +static void +rbt_insert(RBTree* tree, RBTNode* node) +{ +#ifdef RBT_DEBUG + ErtsFreeSegDesc *dbg_under=NULL, *dbg_over=NULL; +#endif + ErtsFreeSegDesc* desc = node_to_desc(tree->order, node); + char* seg_addr = desc->start; + SWord seg_sz = desc->end - desc->start; + + HARD_CHECK_TREE(tree, 0); + + node->left = NULL; + node->right = NULL; + + if (!tree->root) { + node->parent_and_color = parent_and_color(NULL, !RED_FLG); + tree->root = node; + } + else { + RBTNode *x = tree->root; + while (1) { + SWord diff = cmp_with_node(tree->order, seg_sz, seg_addr, x); + if (diff < 0) { + IF_RBT_DEBUG(dbg_over = node_to_desc(tree->order, x)); + if (!x->left) { + node->parent_and_color = parent_and_color(x, RED_FLG); + x->left = node; + break; + } + x = x->left; + } + else { + RBT_ASSERT(diff > 0); + IF_RBT_DEBUG(dbg_under = node_to_desc(tree->order, x)); + if (!x->right) { + node->parent_and_color = parent_and_color(x, RED_FLG); + x->right = node; + break; + } + x = x->right; + } + } + + RBT_ASSERT(parent(node)); +#ifdef RBT_DEBUG + if (tree->order == ADDR_ORDER) { + RBT_ASSERT(!dbg_under || dbg_under->end < desc->start); + RBT_ASSERT(!dbg_over || dbg_over->start > desc->end); + } +#endif + RBT_ASSERT(IS_RED(node)); + if (IS_RED(parent(node))) + tree_insert_fixup(&tree->root, node); + } + HARD_CHECK_TREE(tree, 0); +} + +/* + * Traverse tree in (reverse) sorting order + */ +static void +rbt_foreach_node(RBTree* tree, + void (*fn)(RBTNode*,void*), + void* arg, int reverse) +{ +#ifdef HARD_DEBUG + Sint blacks = -1; + Sint curr_blacks = 1; + Uint depth = 1; + Uint max_depth = 0; + Uint node_cnt = 0; +#endif + enum { RECURSE_LEFT, DO_NODE, RECURSE_RIGHT, RETURN_TO_PARENT }state; + RBTNode *x = tree->root; + + RBT_ASSERT(!x || !parent(x)); + + state = reverse ? RECURSE_RIGHT : RECURSE_LEFT; + while (x) { + switch (state) { + case RECURSE_LEFT: + if (x->left) { + RBT_ASSERT(parent(x->left) == x); + #ifdef HARD_DEBUG + ++depth; + if (IS_BLACK(x->left)) + curr_blacks++; + #endif + x = x->left; + state = reverse ? RECURSE_RIGHT : RECURSE_LEFT; + } + else { + #ifdef HARD_DEBUG + if (blacks < 0) + blacks = curr_blacks; + RBT_ASSERT(blacks == curr_blacks); + #endif + state = reverse ? RETURN_TO_PARENT : DO_NODE; + } + break; + + case DO_NODE: + #ifdef HARD_DEBUG + ++node_cnt; + if (depth > max_depth) + max_depth = depth; + #endif + (*fn) (x, arg); /* Do it! */ + state = reverse ? RECURSE_LEFT : RECURSE_RIGHT; + break; + + case RECURSE_RIGHT: + if (x->right) { + RBT_ASSERT(parent(x->right) == x); + #ifdef HARD_DEBUG + ++depth; + if (IS_BLACK(x->right)) + curr_blacks++; + #endif + x = x->right; + state = reverse ? RECURSE_RIGHT : RECURSE_LEFT; + } + else { + #ifdef HARD_DEBUG + if (blacks < 0) + blacks = curr_blacks; + RBT_ASSERT(blacks == curr_blacks); + #endif + state = reverse ? DO_NODE : RETURN_TO_PARENT; + } + break; + + case RETURN_TO_PARENT: + #ifdef HARD_DEBUG + if (IS_BLACK(x)) + curr_blacks--; + --depth; + #endif + if (parent(x)) { + if (x == parent(x)->left) { + state = reverse ? RETURN_TO_PARENT : DO_NODE; + } + else { + RBT_ASSERT(x == parent(x)->right); + state = reverse ? DO_NODE : RETURN_TO_PARENT; + } + } + x = parent(x); + break; + } + } +#ifdef HARD_DEBUG + RBT_ASSERT(depth == 0 || (!tree->root && depth==1)); + RBT_ASSERT(curr_blacks == 0); + RBT_ASSERT((1 << (max_depth/2)) <= node_cnt); +#endif +} + +#if defined(RBT_DEBUG) || defined(HARD_DEBUG_MSEG) +static RBTNode* rbt_prev_node(RBTNode* node) +{ + RBTNode* x; + if (node->left) { + for (x=node->left; x->right; x=x->right) + ; + return x; + } + for (x=node; parent(x); x=parent(x)) { + if (parent(x)->right == x) + return parent(x); + } + return NULL; +} +static RBTNode* rbt_next_node(RBTNode* node) +{ + RBTNode* x; + if (node->right) { + for (x=node->right; x->left; x=x->left) + ; + return x; + } + for (x=node; parent(x); x=parent(x)) { + if (parent(x)->left == x) + return parent(x); + } + return NULL; +} +#endif /* RBT_DEBUG || HARD_DEBUG_MSEG */ + + +/* The API to keep track of a bunch of separated (free) segments + (non-overlapping and non-adjacent). + */ +static void init_free_seg_map(ErtsFreeSegMap*, enum SortOrder); +static void adjacent_free_seg(ErtsFreeSegMap*, char* start, char* end, + ErtsFreeSegDesc** under, ErtsFreeSegDesc** over); +static void insert_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*, char* start, char* end); +static void resize_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*, char* start, char* end); +static void delete_free_seg(ErtsFreeSegMap*, ErtsFreeSegDesc*); +static ErtsFreeSegDesc* lookup_free_seg(ErtsFreeSegMap*, SWord sz); + + +static void init_free_seg_map(ErtsFreeSegMap* map, enum SortOrder order) +{ + map->atree.root = NULL; + map->atree.order = ADDR_ORDER; + map->stree.root = NULL; + map->stree.order = order; + map->nseg = 0; +} + +/* Lookup directly adjacent free segments to the given area [start->end]. + * The given area must not contain any free segments. + */ +static void adjacent_free_seg(ErtsFreeSegMap* map, char* start, char* end, + ErtsFreeSegDesc** under, ErtsFreeSegDesc** over) +{ + RBTNode* x = map->atree.root; + + *under = NULL; + *over = NULL; + while (x) { + if (start < anode_to_desc(x)->start) { + RBT_ASSERT(end <= anode_to_desc(x)->start); + if (end == anode_to_desc(x)->start) { + RBT_ASSERT(!*over); + *over = anode_to_desc(x); + } + x = x->left; + } + else { + RBT_ASSERT(start >= anode_to_desc(x)->end); + if (start == anode_to_desc(x)->end) { + RBT_ASSERT(!*under); + *under = anode_to_desc(x); + } + x = x->right; + } + } +} + +/* Initialize 'desc' and insert as new free segment [start->end]. + * The new segment must not contain or be adjacent to any free segment in 'map'. + */ +static void insert_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc, + char* start, char* end) +{ + desc->start = start; + desc->end = end; + rbt_insert(&map->atree, &desc->anode); + rbt_insert(&map->stree, &desc->snode); + map->nseg++; +} + +/* Resize existing free segment 'desc' to [start->end]. + * The new segment location must overlap the old location and + * it must not contain or be adjacent to any other free segment in 'map'. + */ +static void resize_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc, + char* start, char* end) +{ +#ifdef RBT_DEBUG + RBTNode* prev = rbt_prev_node(&desc->anode); + RBTNode* next = rbt_next_node(&desc->anode); + RBT_ASSERT(!prev || anode_to_desc(prev)->end < start); + RBT_ASSERT(!next || anode_to_desc(next)->start > end); +#endif + rbt_delete(&map->stree, &desc->snode); + desc->start = start; + desc->end = end; + rbt_insert(&map->stree, &desc->snode); +} + +/* Delete existing free segment 'desc' from 'map'. + */ +static void delete_free_seg(ErtsFreeSegMap* map, ErtsFreeSegDesc* desc) +{ + rbt_delete(&map->atree, &desc->anode); + rbt_delete(&map->stree, &desc->snode); + map->nseg--; +} + +/* Lookup a free segment in 'map' with a size of at least 'need_sz' usable bytes. + */ +static ErtsFreeSegDesc* lookup_free_seg(ErtsFreeSegMap* map, SWord need_sz) +{ + RBTNode* x = map->stree.root; + ErtsFreeSegDesc* best_desc = NULL; + const enum SortOrder order = map->stree.order; + + while (x) { + ErtsFreeSegDesc* desc = snode_to_desc(x); + SWord seg_sz = usable_size(order, desc); + + if (seg_sz < need_sz) { + x = x->right; + } + else { + best_desc = desc; + x = x->left; + } + } + return best_desc; +} + +struct build_arg_t +{ + Process* p; + Eterm* hp; + Eterm acc; +}; + +static void build_free_seg_tuple(RBTNode* node, void* arg) +{ + struct build_arg_t* a = (struct build_arg_t*)arg; + ErtsFreeSegDesc* desc = anode_to_desc(node); + Eterm start= erts_bld_uword(&a->hp, NULL, (UWord)desc->start); + Eterm end = erts_bld_uword(&a->hp, NULL, (UWord)desc->end); + Eterm tpl = TUPLE2(a->hp, start, end); + + a->hp += 3; + a->acc = CONS(a->hp, tpl, a->acc); + a->hp += 2; +} + +static +Eterm build_free_seg_list(Process* p, ErtsFreeSegMap* map) +{ + struct build_arg_t barg; + Eterm* hp_end; + const Uint may_need = map->nseg * (2 + 3 + 2*2); /* cons + tuple + bigs */ + + barg.p = p; + barg.hp = HAlloc(p, may_need); + hp_end = barg.hp + may_need; + barg.acc = NIL; + rbt_foreach_node(&map->atree, build_free_seg_tuple, &barg, 1); + + RBT_ASSERT(barg.hp <= hp_end); + HRelease(p, hp_end, barg.hp); + return barg.acc; +} + +#if ERTS_HAVE_OS_MMAP +/* Implementation of os_mmap()/os_munmap()/os_mremap()... */ + +#if HAVE_MMAP +# define ERTS_MMAP_PROT (PROT_READ|PROT_WRITE) +# define ERTS_MMAP_PROT_EXEC (PROT_READ|PROT_WRITE|PROT_EXEC) +# if defined(MAP_ANONYMOUS) +# define ERTS_MMAP_FLAGS (MAP_ANON|MAP_PRIVATE) +# define ERTS_MMAP_FD (-1) +# elif defined(MAP_ANON) +# define ERTS_MMAP_FLAGS (MAP_ANON|MAP_PRIVATE) +# define ERTS_MMAP_FD (-1) +# else +# define ERTS_MMAP_FLAGS (MAP_PRIVATE) +# define ERTS_MMAP_FD mm->mmap_fd +# endif +#endif + +static ERTS_INLINE void * +os_mmap(void *hint_ptr, UWord size, int try_superalign, int executable) +{ +#if HAVE_MMAP + const int prot = executable ? ERTS_MMAP_PROT_EXEC : ERTS_MMAP_PROT; + void *res; +#ifdef MAP_ALIGN + if (try_superalign) + res = mmap((void *) ERTS_SUPERALIGNED_SIZE, size, prot, + ERTS_MMAP_FLAGS|MAP_ALIGN, ERTS_MMAP_FD, 0); + else +#endif + res = mmap((void *) hint_ptr, size, prot, + ERTS_MMAP_FLAGS, ERTS_MMAP_FD, 0); + if (res == MAP_FAILED) + return NULL; + return res; +#elif HAVE_VIRTUALALLOC + const DWORD prot = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + return (void *) VirtualAlloc(NULL, (SIZE_T) size, + MEM_COMMIT|MEM_RESERVE, prot); +#else +# error "missing mmap() or similar" +#endif +} + +static ERTS_INLINE void +os_munmap(void *ptr, UWord size) +{ +#if HAVE_MMAP +#ifdef ERTS_MMAP_DEBUG + int res = +#endif + munmap(ptr, size); + ERTS_MMAP_ASSERT(res == 0); +#elif HAVE_VIRTUALALLOC +#ifdef DEBUG + BOOL res = +#endif + VirtualFree((LPVOID) ptr, (SIZE_T) 0, MEM_RELEASE); + ERTS_MMAP_ASSERT(res != 0); +#else +# error "missing munmap() or similar" +#endif +} + +#ifdef ERTS_HAVE_OS_MREMAP +# if HAVE_MREMAP +# if defined(__NetBSD__) +# define ERTS_MREMAP_FLAGS (0) +# else +# define ERTS_MREMAP_FLAGS (MREMAP_MAYMOVE) +# endif +# endif +static ERTS_INLINE void * +os_mremap(void *ptr, UWord old_size, UWord new_size, int try_superalign) +{ + void *new_seg; +#if HAVE_MREMAP + new_seg = mremap(ptr, (size_t) old_size, +# if defined(__NetBSD__) + NULL, +# endif + (size_t) new_size, ERTS_MREMAP_FLAGS); + if (new_seg == (void *) MAP_FAILED) + return NULL; + return new_seg; +#else +# error "missing mremap() or similar" +#endif +} +#endif + +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION +#if HAVE_MMAP + +#define ERTS_MMAP_RESERVE_PROT (ERTS_MMAP_PROT) +#define ERTS_MMAP_RESERVE_PROT_EXEC (ERTS_MMAP_PROT_EXEC) +#define ERTS_MMAP_RESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_FIXED) +#define ERTS_MMAP_UNRESERVE_PROT (PROT_NONE) +#if defined(__FreeBSD__) +#define ERTS_MMAP_UNRESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_FIXED) +#else +#define ERTS_MMAP_UNRESERVE_FLAGS (ERTS_MMAP_FLAGS|MAP_NORESERVE|MAP_FIXED) +#endif /* __FreeBSD__ */ +#define ERTS_MMAP_VIRTUAL_PROT (PROT_NONE) +#if defined(__FreeBSD__) +#define ERTS_MMAP_VIRTUAL_FLAGS (ERTS_MMAP_FLAGS) +#else +#define ERTS_MMAP_VIRTUAL_FLAGS (ERTS_MMAP_FLAGS|MAP_NORESERVE) +#endif /* __FreeBSD__ */ + +static int +os_reserve_physical(char *ptr, UWord size, int exec) +{ + const int prot = exec ? ERTS_MMAP_RESERVE_PROT_EXEC : ERTS_MMAP_RESERVE_PROT; + void *res = mmap((void *) ptr, (size_t) size, prot, + ERTS_MMAP_RESERVE_FLAGS, ERTS_MMAP_FD, 0); + if (res == (void *) MAP_FAILED) + return 0; + return 1; +} + +static void +os_unreserve_physical(char *ptr, UWord size) +{ + void *res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_UNRESERVE_PROT, + ERTS_MMAP_UNRESERVE_FLAGS, ERTS_MMAP_FD, 0); + if (res == (void *) MAP_FAILED) + erts_exit(ERTS_ABORT_EXIT, "Failed to unreserve memory"); +} + +static void * +os_mmap_virtual(char *ptr, UWord size, int exec) +{ + int flags = ERTS_MMAP_VIRTUAL_FLAGS; + void* res; + +#ifdef ERTS_ALC_A_EXEC + if (exec) { + ASSERT(!ptr); + /* OTP-19.0: Nice hack below cut-and-pasted from hipe_amd64.c */ + +# ifdef MAP_32BIT + /* If we got MAP_32BIT (Linux), then use that to ask for low memory */ + flags |= MAP_32BIT; +# else + /* FreeBSD doesn't have MAP_32BIT, and it doesn't respect + a plain map_hint (returns high mappings even though the + hint refers to a free area), so we have to use both map_hint + and MAP_FIXED to get addresses below the 2GB boundary. + This is even worse than the Linux/ppc64 case. + Similarly, Solaris 10 doesn't have MAP_32BIT, + and it doesn't respect a plain map_hint. */ + ptr = (char*)(512*1024*1024); /* 0.5GB */ + +# if defined(__FreeBSD__) || defined(__sun__) + flags |= MAP_FIXED; +# endif +# endif /* !MAP_32BIT */ + } +#else /* !ERTS_ALC_A_EXEC */ + ASSERT(!exec); +#endif + res = mmap((void *) ptr, (size_t) size, ERTS_MMAP_VIRTUAL_PROT, + flags, ERTS_MMAP_FD, 0); + if (res == (void *) MAP_FAILED) + return NULL; + return res; +} + +#else +#error "Missing reserve/unreserve physical memory implementation" +#endif +#endif /* ERTS_HAVE_OS_RESERVE_PHYSICAL_MEMORY */ + +#endif /* ERTS_HAVE_OS_MMAP */ + +static int reserve_noop(char *ptr, UWord size, int exec) +{ +#ifdef ERTS_MMAP_DEBUG_FILL_AREAS + Uint32 *uip, *end = (Uint32 *) (ptr + size); + + for (uip = (Uint32 *) ptr; uip < end; uip++) + ERTS_MMAP_ASSERT(*uip == (Uint32) 0xdeadbeef); + for (uip = (Uint32 *) ptr; uip < end; uip++) + *uip = (Uint32) 0xfeedfeed; +#endif + return 1; +} + +static void unreserve_noop(char *ptr, UWord size) +{ +#ifdef ERTS_MMAP_DEBUG_FILL_AREAS + Uint32 *uip, *end = (Uint32 *) (ptr + size); + + for (uip = (Uint32 *) ptr; uip < end; uip++) + *uip = (Uint32) 0xdeadbeef; +#endif +} + +static UWord +alloc_desc_insert_free_seg(ErtsMemMapper* mm, + ErtsFreeSegMap *map, char* start, char* end) +{ + char *ptr; + ErtsFreeSegMap *da_map; + ErtsFreeSegDesc *desc = alloc_desc(mm); + if (desc) { + insert_free_seg(map, desc, start, end); + return 0; + } + + /* + * Ahh; ran out of free segment descriptors. + * + * First try to map a new page... + */ + +#if ERTS_HAVE_OS_MMAP + if (!mm->no_os_mmap) { + ptr = os_mmap(mm->desc.new_area_hint, ERTS_PAGEALIGNED_SIZE, 0, 0); + if (ptr) { + mm->desc.new_area_hint = ptr+ERTS_PAGEALIGNED_SIZE; + ERTS_MMAP_SIZE_OS_INC(ERTS_PAGEALIGNED_SIZE); + add_free_desc_area(mm, ptr, ptr+ERTS_PAGEALIGNED_SIZE); + desc = alloc_desc(mm); + ERTS_MMAP_ASSERT(desc); + insert_free_seg(map, desc, start, end); + return 0; + } + } +#endif + + /* + * ...then try to find a good place in the supercarrier... + */ + da_map = &mm->sua.map; + desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE); + if (desc) { + if (mm->reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE, 0)) + ERTS_MMAP_SIZE_SC_SUA_INC(ERTS_PAGEALIGNED_SIZE); + else + desc = NULL; + + } + else { + da_map = &mm->sa.map; + desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE); + if (desc) { + if (mm->reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE, 0)) + ERTS_MMAP_SIZE_SC_SA_INC(ERTS_PAGEALIGNED_SIZE); + else + desc = NULL; + } + } + if (desc) { + char *da_end = desc->start + ERTS_PAGEALIGNED_SIZE; + add_free_desc_area(mm, desc->start, da_end); + if (da_end != desc->end) + resize_free_seg(da_map, desc, da_end, desc->end); + else { + delete_free_seg(da_map, desc); + free_desc(mm, desc); + } + + desc = alloc_desc(mm); + ERTS_MMAP_ASSERT(desc); + insert_free_seg(map, desc, start, end); + return 0; + } + + /* + * ... and then as last resort use the first page of the + * free segment we are trying to insert for free descriptors. + */ + ptr = start + ERTS_PAGEALIGNED_SIZE; + ERTS_MMAP_ASSERT(ptr <= end); + + add_free_desc_area(mm, start, ptr); + + if (ptr != end) { + desc = alloc_desc(mm); + ERTS_MMAP_ASSERT(desc); + insert_free_seg(map, desc, ptr, end); + } + + return ERTS_PAGEALIGNED_SIZE; +} + +void * +erts_mmap(ErtsMemMapper* mm, Uint32 flags, UWord *sizep) +{ + char *seg; + UWord asize = ERTS_PAGEALIGNED_CEILING(*sizep); + + /* Map in premapped supercarrier */ + if (mm->supercarrier && !(ERTS_MMAPFLG_OS_ONLY & flags)) { + char *end; + ErtsFreeSegDesc *desc; + Uint32 superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); + + erts_smp_mtx_lock(&mm->mtx); + + ERTS_MMAP_OP_START(*sizep); + + if (!superaligned) { + desc = lookup_free_seg(&mm->sua.map, asize); + if (desc) { + seg = desc->start; + end = seg+asize; + if (!mm->reserve_physical(seg, asize, mm->executable)) + goto supercarrier_reserve_failure; + if (desc->end == end) { + delete_free_seg(&mm->sua.map, desc); + free_desc(mm, desc); + } + else { + ERTS_MMAP_ASSERT(end < desc->end); + resize_free_seg(&mm->sua.map, desc, end, desc->end); + } + ERTS_MMAP_SIZE_SC_SUA_INC(asize); + goto supercarrier_success; + } + + if (asize <= mm->sua.bot - mm->sa.top) { + if (!mm->reserve_physical(mm->sua.bot - asize, asize, + mm->executable)) + goto supercarrier_reserve_failure; + mm->sua.bot -= asize; + seg = mm->sua.bot; + ERTS_MMAP_SIZE_SC_SUA_INC(asize); + goto supercarrier_success; + } + } + + asize = ERTS_SUPERALIGNED_CEILING(asize); + + desc = lookup_free_seg(&mm->sa.map, asize); + if (desc) { + char *start = seg = desc->start; + seg = (char *) ERTS_SUPERALIGNED_CEILING(seg); + end = seg+asize; + if (!mm->reserve_physical(start, (UWord) (end - start), + mm->executable)) + goto supercarrier_reserve_failure; + ERTS_MMAP_SIZE_SC_SA_INC(asize); + if (desc->end == end) { + if (start != seg) + resize_free_seg(&mm->sa.map, desc, start, seg); + else { + delete_free_seg(&mm->sa.map, desc); + free_desc(mm, desc); + } + } + else { + ERTS_MMAP_ASSERT(end < desc->end); + resize_free_seg(&mm->sa.map, desc, end, desc->end); + if (start != seg) { + UWord ad_sz; + ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map, + start, seg); + start += ad_sz; + if (start != seg) + mm->unreserve_physical(start, (UWord) (seg - start)); + } + } + goto supercarrier_success; + } + + if (superaligned) { + char *start = mm->sa.top; + seg = (char *) ERTS_SUPERALIGNED_CEILING(start); + + if (asize + (seg - start) <= mm->sua.bot - start) { + end = seg + asize; + if (!mm->reserve_physical(start, (UWord) (end - start), + mm->executable)) + goto supercarrier_reserve_failure; + mm->sa.top = end; + ERTS_MMAP_SIZE_SC_SA_INC(asize); + if (start != seg) { + UWord ad_sz; + ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map, + start, seg); + start += ad_sz; + if (start != seg) + mm->unreserve_physical(start, (UWord) (seg - start)); + } + goto supercarrier_success; + } + + desc = lookup_free_seg(&mm->sua.map, asize + ERTS_SUPERALIGNED_SIZE); + if (desc) { + char *org_start = desc->start; + char *org_end = desc->end; + + seg = (char *) ERTS_SUPERALIGNED_CEILING(org_start); + end = seg + asize; + if (!mm->reserve_physical(seg, (UWord) (org_end - seg), + mm->executable)) + goto supercarrier_reserve_failure; + ERTS_MMAP_SIZE_SC_SUA_INC(asize); + if (org_start != seg) { + ERTS_MMAP_ASSERT(org_start < seg); + resize_free_seg(&mm->sua.map, desc, org_start, seg); + desc = NULL; + } + if (end != org_end) { + UWord ad_sz = 0; + ERTS_MMAP_ASSERT(end < org_end); + if (desc) + resize_free_seg(&mm->sua.map, desc, end, org_end); + else + ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map, + end, org_end); + end += ad_sz; + if (end != org_end) + mm->unreserve_physical(end, + (UWord) (org_end - end)); + } + goto supercarrier_success; + } + } + + ERTS_MMAP_OP_ABORT(); + erts_smp_mtx_unlock(&mm->mtx); + } + +#if ERTS_HAVE_OS_MMAP + /* Map using OS primitives */ + if (!(ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) && !mm->no_os_mmap) { + if (!(ERTS_MMAPFLG_SUPERALIGNED & flags)) { + seg = os_mmap(NULL, asize, 0, mm->executable); + if (!seg) + goto failure; + } + else { + asize = ERTS_SUPERALIGNED_CEILING(*sizep); + seg = os_mmap(NULL, asize, 1, mm->executable); + if (!seg) + goto failure; + + if (!ERTS_IS_SUPERALIGNED(seg)) { + char *ptr; + UWord sz; + + os_munmap(seg, asize); + + ptr = os_mmap(NULL, asize + ERTS_SUPERALIGNED_SIZE, 1, + mm->executable); + if (!ptr) + goto failure; + + seg = (char *) ERTS_SUPERALIGNED_CEILING(ptr); + sz = (UWord) (seg - ptr); + ERTS_MMAP_ASSERT(sz <= ERTS_SUPERALIGNED_SIZE); + if (sz) + os_munmap(ptr, sz); + sz = ERTS_SUPERALIGNED_SIZE - sz; + if (sz) + os_munmap(seg+asize, sz); + } + } + + ERTS_MMAP_OP_LCK(seg, *sizep, asize); + ERTS_MMAP_SIZE_OS_INC(asize); + *sizep = asize; + return (void *) seg; + } +failure: +#endif + ERTS_MMAP_OP_LCK(NULL, *sizep, 0); + *sizep = 0; + return NULL; + +supercarrier_success: + +#ifdef ERTS_MMAP_DEBUG + if (ERTS_MMAPFLG_SUPERALIGNED & flags) { + ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(seg)); + ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(asize)); + } + else { + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(seg)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); + } +#endif + + ERTS_MMAP_OP_END(seg, asize); + erts_smp_mtx_unlock(&mm->mtx); + + *sizep = asize; + return (void *) seg; + +supercarrier_reserve_failure: + erts_smp_mtx_unlock(&mm->mtx); + *sizep = 0; + return NULL; +} + +void +erts_munmap(ErtsMemMapper* mm, Uint32 flags, void *ptr, UWord size) +{ + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(size)); + + if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) { + ERTS_MMAP_ASSERT(!mm->no_os_mmap); +#if ERTS_HAVE_OS_MMAP + ERTS_MUNMAP_OP_LCK(ptr, size); + ERTS_MMAP_SIZE_OS_DEC(size); + os_munmap(ptr, size); +#endif + } + else { + char *start, *end; + ErtsFreeSegMap *map; + ErtsFreeSegDesc *prev, *next, *desc; + UWord ad_sz = 0; + + ERTS_MMAP_ASSERT(mm->supercarrier); + + start = (char *) ptr; + end = start + size; + + erts_smp_mtx_lock(&mm->mtx); + + ERTS_MUNMAP_OP(ptr, size); + + if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) { + + map = &mm->sa.map; + adjacent_free_seg(map, start, end, &prev, &next); + + ERTS_MMAP_SIZE_SC_SA_DEC(size); + if (end == mm->sa.top) { + ERTS_MMAP_ASSERT(!next); + if (prev) { + start = prev->start; + delete_free_seg(map, prev); + free_desc(mm, prev); + } + mm->sa.top = start; + goto supercarrier_success; + } + } + else { + map = &mm->sua.map; + adjacent_free_seg(map, start, end, &prev, &next); + + ERTS_MMAP_SIZE_SC_SUA_DEC(size); + if (start == mm->sua.bot) { + ERTS_MMAP_ASSERT(!prev); + if (next) { + end = next->end; + delete_free_seg(map, next); + free_desc(mm, next); + } + mm->sua.bot = end; + goto supercarrier_success; + } + } + + desc = NULL; + + if (next) { + ERTS_MMAP_ASSERT(end < next->end); + end = next->end; + if (prev) { + delete_free_seg(map, next); + free_desc(mm, next); + goto save_prev; + } + desc = next; + } else if (prev) { + save_prev: + ERTS_MMAP_ASSERT(prev->start < start); + start = prev->start; + desc = prev; + } + + if (desc) + resize_free_seg(map, desc, start, end); + else + ad_sz = alloc_desc_insert_free_seg(mm, map, start, end); + + supercarrier_success: { + UWord unres_sz; + + ERTS_MMAP_ASSERT(size >= ad_sz); + unres_sz = size - ad_sz; + if (unres_sz) + mm->unreserve_physical(((char *) ptr) + ad_sz, unres_sz); + + erts_smp_mtx_unlock(&mm->mtx); + } + } +} + +static void * +remap_move(ErtsMemMapper* mm, + Uint32 flags, void *ptr, UWord old_size, UWord *sizep) +{ + UWord size = *sizep; + void *new_ptr = erts_mmap(mm, flags, &size); + if (!new_ptr) + return NULL; + *sizep = size; + if (old_size < size) + size = old_size; + sys_memcpy(new_ptr, ptr, (size_t) size); + erts_munmap(mm, flags, ptr, old_size); + return new_ptr; +} + +void * +erts_mremap(ErtsMemMapper* mm, + Uint32 flags, void *ptr, UWord old_size, UWord *sizep) +{ + void *new_ptr; + Uint32 superaligned; + UWord asize; + + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size)); + ERTS_MMAP_ASSERT(sizep && ERTS_IS_PAGEALIGNED(*sizep)); + + if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) { + + ERTS_MMAP_ASSERT(!mm->no_os_mmap); + + if (!(ERTS_MMAPFLG_OS_ONLY & flags) && mm->supercarrier) { + new_ptr = remap_move(mm, ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, + ptr, old_size, sizep); + if (new_ptr) + return new_ptr; + } + + if (ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) { + ERTS_MREMAP_OP_LCK(NULL, ptr, old_size, *sizep, old_size); + return NULL; + } + +#if ERTS_HAVE_OS_MREMAP || ERTS_HAVE_GENUINE_OS_MMAP + superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); + + if (superaligned) { + asize = ERTS_SUPERALIGNED_CEILING(*sizep); + if (asize == old_size && ERTS_IS_SUPERALIGNED(ptr)) { + ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize); + *sizep = asize; + return ptr; + } + } + else { + asize = ERTS_PAGEALIGNED_CEILING(*sizep); + if (asize == old_size) { + ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize); + *sizep = asize; + return ptr; + } + } + +#if ERTS_HAVE_GENUINE_OS_MMAP + if (asize < old_size + && (!superaligned + || ERTS_IS_SUPERALIGNED(ptr))) { + UWord um_sz; + new_ptr = ((char *) ptr) + asize; + ERTS_MMAP_ASSERT((((char *)ptr) + old_size) > (char *) new_ptr); + um_sz = (UWord) ((((char *) ptr) + old_size) - (char *) new_ptr); + ERTS_MMAP_SIZE_OS_DEC(um_sz); + os_munmap(new_ptr, um_sz); + ERTS_MREMAP_OP_LCK(ptr, ptr, old_size, *sizep, asize); + *sizep = asize; + return ptr; + } +#endif +#if ERTS_HAVE_OS_MREMAP + if (superaligned) + return remap_move(mm, flags, new_ptr, old_size, sizep); + else { + new_ptr = os_mremap(ptr, old_size, asize, 0); + if (!new_ptr) + return NULL; + if (asize > old_size) + ERTS_MMAP_SIZE_OS_INC(asize - old_size); + else + ERTS_MMAP_SIZE_OS_DEC(old_size - asize); + ERTS_MREMAP_OP_LCK(new_ptr, ptr, old_size, *sizep, asize); + *sizep = asize; + return new_ptr; + } +#endif +#endif + } + else { /* In super carrier */ + char *start, *end, *new_end; + ErtsFreeSegMap *map; + ErtsFreeSegDesc *prev, *next; + UWord ad_sz = 0; + + ERTS_MMAP_ASSERT(mm->supercarrier); + + if (ERTS_MMAPFLG_OS_ONLY & flags) + return remap_move(mm, flags, ptr, old_size, sizep); + + superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); + + asize = (superaligned + ? ERTS_SUPERALIGNED_CEILING(*sizep) + : ERTS_PAGEALIGNED_CEILING(*sizep)); + + erts_smp_mtx_lock(&mm->mtx); + + if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr) + ? (!superaligned && lookup_free_seg(&mm->sua.map, asize)) + : (superaligned && lookup_free_seg(&mm->sa.map, asize))) { + erts_smp_mtx_unlock(&mm->mtx); + /* + * Segment currently in wrong area (due to a previous memory + * shortage), move it to the right area. + * (remap_move() will succeed) + */ + return remap_move(mm, ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, + ptr, old_size, sizep); + } + + ERTS_MREMAP_OP_START(ptr, old_size, *sizep); + + if (asize == old_size) { + new_ptr = ptr; + goto supercarrier_resize_success; + } + + start = (char *) ptr; + end = start + old_size; + new_end = start+asize; + + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); + + if (asize < old_size) { + UWord unres_sz; + new_ptr = ptr; + if (!ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) { + map = &mm->sua.map; + ERTS_MMAP_SIZE_SC_SUA_DEC(old_size - asize); + } + else { + if (end == mm->sa.top) { + mm->sa.top = new_end; + mm->unreserve_physical(((char *) ptr) + asize, + old_size - asize); + goto supercarrier_resize_success; + } + ERTS_MMAP_SIZE_SC_SA_DEC(old_size - asize); + map = &mm->sa.map; + } + + adjacent_free_seg(map, start, end, &prev, &next); + + if (next) + resize_free_seg(map, next, new_end, next->end); + else + ad_sz = alloc_desc_insert_free_seg(mm, map, new_end, end); + ERTS_MMAP_ASSERT(old_size - asize >= ad_sz); + unres_sz = old_size - asize - ad_sz; + if (unres_sz) + mm->unreserve_physical(((char *) ptr) + asize + ad_sz, + unres_sz); + goto supercarrier_resize_success; + } + + if (!ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) { + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); + + adjacent_free_seg(&mm->sua.map, start, end, &prev, &next); + + if (next && new_end <= next->end) { + if (!mm->reserve_physical(((char *) ptr) + old_size, + asize - old_size, + mm->executable)) + goto supercarrier_reserve_failure; + if (new_end < next->end) + resize_free_seg(&mm->sua.map, next, new_end, next->end); + else { + delete_free_seg(&mm->sua.map, next); + free_desc(mm, next); + } + new_ptr = ptr; + ERTS_MMAP_SIZE_SC_SUA_INC(asize - old_size); + goto supercarrier_resize_success; + } + } + else { /* Superaligned area */ + + if (end == mm->sa.top) { + if (new_end <= mm->sua.bot) { + if (!mm->reserve_physical(((char *) ptr) + old_size, + asize - old_size, + mm->executable)) + goto supercarrier_reserve_failure; + mm->sa.top = new_end; + new_ptr = ptr; + ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size); + goto supercarrier_resize_success; + } + } + else { + adjacent_free_seg(&mm->sa.map, start, end, &prev, &next); + if (next && new_end <= next->end) { + if (!mm->reserve_physical(((char *) ptr) + old_size, + asize - old_size, + mm->executable)) + goto supercarrier_reserve_failure; + if (new_end < next->end) + resize_free_seg(&mm->sa.map, next, new_end, next->end); + else { + delete_free_seg(&mm->sa.map, next); + free_desc(mm, next); + } + new_ptr = ptr; + ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size); + goto supercarrier_resize_success; + } + } + } + + ERTS_MMAP_OP_ABORT(); + erts_smp_mtx_unlock(&mm->mtx); + + /* Failed to resize... */ + } + + return remap_move(mm, flags, ptr, old_size, sizep); + +supercarrier_resize_success: + +#ifdef ERTS_MMAP_DEBUG + if ((ERTS_MMAPFLG_SUPERALIGNED & flags) + || ERTS_MMAP_IN_SUPERALIGNED_AREA(new_ptr)) { + ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(new_ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_SUPERALIGNED(asize)); + } + else { + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(new_ptr)); + ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); + } +#endif + + ERTS_MREMAP_OP_END(new_ptr, asize); + erts_smp_mtx_unlock(&mm->mtx); + + *sizep = asize; + return new_ptr; + +supercarrier_reserve_failure: + ERTS_MREMAP_OP_END(NULL, old_size); + erts_smp_mtx_unlock(&mm->mtx); + *sizep = old_size; + return NULL; + +} + +int erts_mmap_in_supercarrier(ErtsMemMapper* mm, void *ptr) +{ + return ERTS_MMAP_IN_SUPERCARRIER(ptr); +} + +static struct { + Eterm options; + Eterm total; + Eterm total_sa; + Eterm total_sua; + Eterm used; + Eterm used_sa; + Eterm used_sua; + Eterm max; + Eterm allocated; + Eterm reserved; + Eterm sizes; + Eterm free_segs; + Eterm supercarrier; + Eterm os; + Eterm scs; + Eterm sco; + Eterm scrpm; + Eterm scrfsd; + + int is_initialized; + erts_mtx_t init_mutex; +}am; + +static void ERTS_INLINE atom_init(Eterm *atom, char *name) +{ + *atom = am_atom_put(name, strlen(name)); +} +#define AM_INIT(AM) atom_init(&am.AM, #AM) + +static void init_atoms(void) +{ + erts_mtx_lock(&am.init_mutex); + + if (!am.is_initialized) { + AM_INIT(options); + AM_INIT(total); + AM_INIT(total_sa); + AM_INIT(total_sua); + AM_INIT(used); + AM_INIT(used_sa); + AM_INIT(used_sua); + AM_INIT(max); + AM_INIT(allocated); + AM_INIT(reserved); + AM_INIT(sizes); + AM_INIT(free_segs); + AM_INIT(supercarrier); + AM_INIT(os); + AM_INIT(scs); + AM_INIT(sco); + AM_INIT(scrpm); + AM_INIT(scrfsd); + am.is_initialized = 1; + } + erts_mtx_unlock(&am.init_mutex); +}; + + +#ifdef HARD_DEBUG_MSEG +static void hard_dbg_mseg_init(void); +#endif + +void +erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init, int executable) +{ + static int is_first_call = 1; + int virtual_map = 0; + char *start = NULL, *end = NULL; + UWord pagesize; +#if defined(__WIN32__) + SYSTEM_INFO sysinfo; + GetSystemInfo(&sysinfo); + pagesize = (UWord) sysinfo.dwPageSize; +#elif defined(_SC_PAGESIZE) + pagesize = (UWord) sysconf(_SC_PAGESIZE); +#elif defined(HAVE_GETPAGESIZE) + pagesize = (UWord) getpagesize(); +#else +# error "Do not know how to get page size" +#endif +#if defined(HARD_DEBUG) || 0 + erts_fprintf(stderr, "erts_mmap: scs = %bpu\n", init->scs); + erts_fprintf(stderr, "erts_mmap: sco = %i\n", init->sco); + erts_fprintf(stderr, "erts_mmap: scrfsd = %i\n", init->scrfsd); +#endif + erts_page_inv_mask = pagesize - 1; + if (pagesize & erts_page_inv_mask) + erts_exit(1, "erts_mmap: Invalid pagesize: %bpu\n", + pagesize); + + ERTS_MMAP_OP_RINGBUF_INIT(); + + mm->supercarrier = 0; + mm->reserve_physical = reserve_noop; + mm->unreserve_physical = unreserve_noop; + mm->executable = executable; + +#if HAVE_MMAP && !defined(MAP_ANON) + mm->mmap_fd = open("/dev/zero", O_RDWR); + if (mm->mmap_fd < 0) + erts_exit(1, "erts_mmap: Failed to open /dev/zero\n"); +#endif + + erts_smp_mtx_init(&mm->mtx, "erts_mmap"); + if (is_first_call) { + erts_mtx_init(&am.init_mutex, "mmap_init_atoms"); + } + +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION + if (init->virtual_range.start) { + char *ptr; + UWord sz; + ptr = (char *) ERTS_PAGEALIGNED_CEILING(init->virtual_range.start); + end = (char *) ERTS_PAGEALIGNED_FLOOR(init->virtual_range.end); + sz = end - ptr; + start = os_mmap_virtual(ptr, sz, executable); + if (!start || start > ptr || start >= end) + erts_exit(1, + "erts_mmap: Failed to create virtual range for super carrier\n"); + sz = start - ptr; + if (sz) + os_munmap(end, sz); + mm->reserve_physical = os_reserve_physical; + mm->unreserve_physical = os_unreserve_physical; + virtual_map = 1; + } + else +#endif + if (init->predefined_area.start) { + start = init->predefined_area.start; + end = init->predefined_area.end; + if (end != (void *) 0 && end < start) + end = start; + } +#if ERTS_HAVE_OS_MMAP + else if (init->scs) { + UWord sz; + sz = ERTS_PAGEALIGNED_CEILING(init->scs); +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION + if (!init->scrpm) { + start = os_mmap_virtual(NULL, sz, executable); + mm->reserve_physical = os_reserve_physical; + mm->unreserve_physical = os_unreserve_physical; + virtual_map = 1; + } + else +#endif + { + /* + * The whole supercarrier will by physically + * reserved all the time. + */ + start = os_mmap(NULL, sz, 1, executable); + } + if (!start) + erts_exit(1, + "erts_mmap: Failed to create super carrier of size %bpu MB\n", + init->scs/1024/1024); + end = start + sz; +#ifdef ERTS_MMAP_DEBUG_FILL_AREAS + if (!virtual_map) { + Uint32 *uip; + + for (uip = (Uint32 *) start; uip < (Uint32 *) end; uip++) + *uip = (Uint32) 0xdeadbeef; + } +#endif + } +#endif + + mm->no.free_seg_descs = 0; + mm->no.free_segs.curr = 0; + mm->no.free_segs.max = 0; + + mm->size.supercarrier.total = 0; + mm->size.supercarrier.used.total = 0; + mm->size.supercarrier.used.sa = 0; + mm->size.supercarrier.used.sua = 0; + mm->size.os.used = 0; + + mm->desc.new_area_hint = NULL; + + if (!start) { + mm->sa.bot = NULL; + mm->sua.top = NULL; + mm->sa.bot = NULL; + mm->sua.top = NULL; + mm->no_os_mmap = 0; + mm->supercarrier = 0; + } + else { + size_t desc_size; + + mm->no_os_mmap = init->sco; + + desc_size = init->scrfsd; + if (desc_size < 100) + desc_size = 100; + desc_size *= sizeof(ErtsFreeSegDesc); + if ((desc_size + + ERTS_SUPERALIGNED_SIZE + + ERTS_PAGEALIGNED_SIZE) > end - start) + erts_exit(1, "erts_mmap: No space for segments in super carrier\n"); + + mm->sa.bot = start; + mm->sa.bot += desc_size; + mm->sa.bot = (char *) ERTS_SUPERALIGNED_CEILING(mm->sa.bot); + mm->sa.top = mm->sa.bot; + mm->sua.top = end; + mm->sua.bot = mm->sua.top; + + mm->size.supercarrier.used.total += (UWord) (mm->sa.bot - start); + + mm->desc.free_list = NULL; + mm->desc.reserved = 0; + + if (end == (void *) 0) { + /* + * Very unlikely, but we need a guarantee + * that `mm->sua.top` always will + * compare as larger than all segment pointers + * into the super carrier... + */ + mm->sua.top -= ERTS_PAGEALIGNED_SIZE; + mm->size.supercarrier.used.total += ERTS_PAGEALIGNED_SIZE; +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION + if (!virtual_map || os_reserve_physical(mm->sua.top, ERTS_PAGEALIGNED_SIZE, 0)) +#endif + add_free_desc_area(mm, mm->sua.top, end); + mm->desc.reserved += (end - mm->sua.top) / sizeof(ErtsFreeSegDesc); + } + + mm->size.supercarrier.total = (UWord) (mm->sua.top - start); + + /* + * Area before (and after) super carrier + * will be used for free segment descritors. + */ +#ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION + if (virtual_map && !os_reserve_physical(start, mm->sa.bot - start, 0)) + erts_exit(1, "erts_mmap: Failed to reserve physical memory for descriptors\n"); +#endif + mm->desc.unused_start = start; + mm->desc.unused_end = mm->sa.bot; + mm->desc.reserved += ((mm->desc.unused_end - start) + / sizeof(ErtsFreeSegDesc)); + + init_free_seg_map(&mm->sa.map, SA_SZ_ADDR_ORDER); + init_free_seg_map(&mm->sua.map, SZ_REVERSE_ADDR_ORDER); + + mm->supercarrier = 1; + + mm->desc.new_area_hint = end; + + } + +#if !ERTS_HAVE_OS_MMAP + mm->no_os_mmap = 1; +#endif + +#ifdef HARD_DEBUG_MSEG + hard_dbg_mseg_init(); +#endif + +#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) + if (mm == &erts_literal_mmapper) { + erts_literals_start = erts_literal_mmapper.sa.bot; + erts_literals_size = erts_literal_mmapper.sua.top - erts_literals_start; + } +#endif + is_first_call = 0; +} + + +static ERTS_INLINE void +add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2) +{ + *lp = erts_bld_cons(hpp, szp, erts_bld_tuple(hpp, szp, 2, el1, el2), *lp); +} + +Eterm erts_mmap_info(ErtsMemMapper* mm, + int *print_to_p, + void *print_to_arg, + Eterm** hpp, Uint* szp, + struct erts_mmap_info_struct* emis) +{ + Eterm size_tags[] = { am.total, am.total_sa, am.total_sua, am.used, am.used_sa, am.used_sua }; + Eterm seg_tags[] = { am.used, am.max, am.allocated, am.reserved, am.used_sa, am.used_sua }; + Eterm group[2]; + Eterm group_tags[] = { am.sizes, am.free_segs }; + Eterm list[3]; + Eterm list_tags[3]; /* { am.options, am.supercarrier, am.os } */ + int lix = 0; + Eterm res = THE_NON_VALUE; + + if (!hpp) { + erts_smp_mtx_lock(&mm->mtx); + emis->sizes[0] = mm->size.supercarrier.total; + emis->sizes[1] = mm->sa.top - mm->sa.bot; + emis->sizes[2] = mm->sua.top - mm->sua.bot; + emis->sizes[3] = mm->size.supercarrier.used.total; + emis->sizes[4] = mm->size.supercarrier.used.sa; + emis->sizes[5] = mm->size.supercarrier.used.sua; + + emis->segs[0] = mm->no.free_segs.curr; + emis->segs[1] = mm->no.free_segs.max; + emis->segs[2] = mm->no.free_seg_descs; + emis->segs[3] = mm->desc.reserved; + emis->segs[4] = mm->sa.map.nseg; + emis->segs[5] = mm->sua.map.nseg; + + emis->os_used = mm->size.os.used; + erts_smp_mtx_unlock(&mm->mtx); + } + + list[lix] = erts_mmap_info_options(mm, "option ", print_to_p, print_to_arg, + hpp, szp); + list_tags[lix] = am.options; + lix++; + + + if (print_to_p) { + int to = *print_to_p; + void *arg = print_to_arg; + if (mm->supercarrier) { + const char* prefix = "supercarrier "; + erts_print(to, arg, "%stotal size: %bpu\n", prefix, emis->sizes[0]); + erts_print(to, arg, "%stotal sa size: %bpu\n", prefix, emis->sizes[1]); + erts_print(to, arg, "%stotal sua size: %bpu\n", prefix, emis->sizes[2]); + erts_print(to, arg, "%sused size: %bpu\n", prefix, emis->sizes[3]); + erts_print(to, arg, "%sused sa size: %bpu\n", prefix, emis->sizes[4]); + erts_print(to, arg, "%sused sua size: %bpu\n", prefix, emis->sizes[5]); + erts_print(to, arg, "%sused free segs: %bpu\n", prefix, emis->segs[0]); + erts_print(to, arg, "%smax free segs: %bpu\n", prefix, emis->segs[1]); + erts_print(to, arg, "%sallocated free segs: %bpu\n", prefix, emis->segs[2]); + erts_print(to, arg, "%sreserved free segs: %bpu\n", prefix, emis->segs[3]); + erts_print(to, arg, "%ssa free segs: %bpu\n", prefix, emis->segs[4]); + erts_print(to, arg, "%ssua free segs: %bpu\n", prefix, emis->segs[5]); + } + if (!mm->no_os_mmap) { + erts_print(to, arg, "os mmap size used: %bpu\n", emis->os_used); + } + } + + + if (hpp || szp) { + if (!am.is_initialized) { + init_atoms(); + } + + if (mm->supercarrier) { + group[0] = erts_bld_atom_uword_2tup_list(hpp, szp, + sizeof(size_tags)/sizeof(Eterm), + size_tags, emis->sizes); + group[1] = erts_bld_atom_uword_2tup_list(hpp, szp, + sizeof(seg_tags)/sizeof(Eterm), + seg_tags, emis->segs); + list[lix] = erts_bld_2tup_list(hpp, szp, 2, group_tags, group); + list_tags[lix] = am.supercarrier; + lix++; + } + + if (!mm->no_os_mmap) { + group[0] = erts_bld_atom_uword_2tup_list(hpp, szp, + 1, &am.used, &emis->os_used); + list[lix] = erts_bld_2tup_list(hpp, szp, 1, group_tags, group); + list_tags[lix] = am.os; + lix++; + } + res = erts_bld_2tup_list(hpp, szp, lix, list_tags, list); + } + return res; +} + +Eterm erts_mmap_info_options(ErtsMemMapper* mm, + char *prefix, + int *print_to_p, + void *print_to_arg, + Uint **hpp, + Uint *szp) +{ + const UWord scs = mm->sua.top - mm->sa.bot; + const Eterm sco = mm->no_os_mmap ? am_true : am_false; + const Eterm scrpm = (mm->reserve_physical == reserve_noop) ? am_true : am_false; + Eterm res = THE_NON_VALUE; + + if (print_to_p) { + int to = *print_to_p; + void *arg = print_to_arg; + erts_print(to, arg, "%sscs: %bpu\n", prefix, scs); + if (mm->supercarrier) { + erts_print(to, arg, "%ssco: %T\n", prefix, sco); + erts_print(to, arg, "%sscrpm: %T\n", prefix, scrpm); + erts_print(to, arg, "%sscrfsd: %beu\n", prefix, mm->desc.reserved); + } + } + + if (hpp || szp) { + if (!am.is_initialized) { + init_atoms(); + } + + res = NIL; + if (mm->supercarrier) { + add_2tup(hpp, szp, &res, am.scrfsd, + erts_bld_uint(hpp,szp, mm->desc.reserved)); + add_2tup(hpp, szp, &res, am.scrpm, scrpm); + add_2tup(hpp, szp, &res, am.sco, sco); + } + add_2tup(hpp, szp, &res, am.scs, erts_bld_uword(hpp, szp, scs)); + } + return res; +} + +#endif /* HAVE_ERTS_MMAP */ + +Eterm erts_mmap_debug_info(Process* p) +{ +#if HAVE_ERTS_MMAP + ErtsMemMapper* mm = &erts_dflt_mmapper; + + if (mm->supercarrier) { + ERTS_DECL_AM(sabot); + ERTS_DECL_AM(satop); + ERTS_DECL_AM(suabot); + ERTS_DECL_AM(suatop); + Eterm sa_list, sua_list, list; + Eterm tags[] = { AM_sabot, AM_satop, AM_suabot, AM_suatop }; + UWord values[4]; + Eterm *hp, *hp_end; + Uint may_need; + + erts_smp_mtx_lock(&mm->mtx); + values[0] = (UWord)mm->sa.bot; + values[1] = (UWord)mm->sa.top; + values[2] = (UWord)mm->sua.bot; + values[3] = (UWord)mm->sua.top; + sa_list = build_free_seg_list(p, &mm->sa.map); + sua_list = build_free_seg_list(p, &mm->sua.map); + erts_smp_mtx_unlock(&mm->mtx); + + may_need = 4*(2+3+2) + 2*(2+3); + hp = HAlloc(p, may_need); + hp_end = hp + may_need; + + list = erts_bld_atom_uword_2tup_list(&hp, NULL, + sizeof(values)/sizeof(*values), + tags, values); + + sa_list = TUPLE2(hp, am_atom_put("sa_free_segs",12), sa_list); hp+=3; + sua_list = TUPLE2(hp, am_atom_put("sua_free_segs",13), sua_list); hp+=3; + list = CONS(hp, sua_list, list); hp+=2; + list = CONS(hp, sa_list, list); hp+=2; + + ASSERT(hp <= hp_end); + HRelease(p, hp_end, hp); + return list; + } +#endif + return am_undefined; +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ + * Debug functions * +\* */ + + +#ifdef HARD_DEBUG + +static int rbt_assert_is_member(RBTNode* root, RBTNode* node) +{ + while (node != root) { + RBT_ASSERT(parent(node)); + RBT_ASSERT(parent(node)->left == node || parent(node)->right == node); + node = parent(node); + } + return 1; +} + + +#if 0 +# define PRINT_TREE +#else +# undef PRINT_TREE +#endif + +#ifdef PRINT_TREE +static void print_tree(enum SortOrder order, RBTNode*); +#endif + +/* + * Checks that the order between parent and children are correct, + * and that the Red-Black Tree properies are satisfied. if size > 0, + * check_tree() returns the node that satisfies "address order first fit" + * + * The Red-Black Tree properies are: + * 1. Every node is either red or black. + * 2. Every leaf (NIL) is black. + * 3. If a node is red, then both its children are black. + * 4. Every simple path from a node to a descendant leaf + * contains the same number of black nodes. + * + */ + +struct check_arg_t { + RBTree* tree; + ErtsFreeSegDesc* prev_seg; + Uint size; + RBTNode *res; +}; +static void check_node_callback(RBTNode* x, void* arg); + + +static RBTNode * +check_tree(RBTree* tree, Uint size) +{ + struct check_arg_t carg; + carg.tree = tree; + carg.prev_seg = NULL; + carg.size = size; + carg.res = NULL; + +#ifdef PRINT_TREE + print_tree(tree->order, tree->root); +#endif + + if (!tree->root) + return NULL; + + RBT_ASSERT(IS_BLACK(tree->root)); + RBT_ASSERT(!parent(tree->root)); + + rbt_foreach_node(tree, check_node_callback, &carg, 0); + + return carg.res; +} + +static void check_node_callback(RBTNode* x, void* arg) +{ + struct check_arg_t* a = (struct check_arg_t*) arg; + ErtsFreeSegDesc* seg; + + if (IS_RED(x)) { + RBT_ASSERT(IS_BLACK(x->right)); + RBT_ASSERT(IS_BLACK(x->left)); + } + + RBT_ASSERT(parent(x) || x == a->tree->root); + + if (x->left) { + RBT_ASSERT(cmp_nodes(a->tree->order, x->left, x) < 0); + } + if (x->right) { + RBT_ASSERT(cmp_nodes(a->tree->order, x->right, x) > 0); + } + + seg = node_to_desc(a->tree->order, x); + RBT_ASSERT(seg->start < seg->end); + if (a->size && (seg->end - seg->start) >= a->size) { + if (!a->res || cmp_nodes(a->tree->order, x, a->res) < 0) { + a->res = x; + } + } + if (a->tree->order == ADDR_ORDER) { + RBT_ASSERT(!a->prev_seg || a->prev_seg->end < seg->start); + a->prev_seg = seg; + } +} + +#endif /* HARD_DEBUG */ + + +#ifdef PRINT_TREE +#define INDENT_STEP 2 + +#include <stdio.h> + +static void +print_tree_aux(enum SortOrder order, RBTNode *x, int indent) +{ + int i; + + if (x) { + ErtsFreeSegDesc* desc = node_to_desc(order, x); + print_tree_aux(order, x->right, indent + INDENT_STEP); + for (i = 0; i < indent; i++) { + putc(' ', stderr); + } + fprintf(stderr, "%s: sz=%lx [%p - %p] desc=%p\r\n", + IS_BLACK(x) ? "BLACK" : "RED", + desc->end - desc->start, desc->start, desc->end, desc); + print_tree_aux(order, x->left, indent + INDENT_STEP); + } +} + + +static void +print_tree(enum SortOrder order, RBTNode* root) +{ + fprintf(stderr, " --- %s ordered tree begin ---\r\n", sort_order_names[order]); + print_tree_aux(order, root, 0); + fprintf(stderr, " --- %s ordered tree end ---\r\n", sort_order_names[order]); +} + +#endif /* PRINT_TREE */ + + +#ifdef FREE_SEG_API_SMOKE_TEST + +void test_it(void) +{ + ErtsFreeSegMap map; + ErtsFreeSegDesc *desc, *under, *over, *d1, *d2; + const int i = 1; /* reverse addr order */ + + { + init_free_seg_map(&map, SZ_REVERSE_ADDR_ORDER); + + insert_free_seg(&map, alloc_desc(), (char*)0x11000, (char*)0x12000); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + insert_free_seg(&map, alloc_desc(), (char*)0x13000, (char*)0x14000); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + insert_free_seg(&map, alloc_desc(), (char*)0x15000, (char*)0x17000); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + insert_free_seg(&map, alloc_desc(), (char*)0x8000, (char*)0x10000); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + + desc = lookup_free_seg(&map, 0x500); + ERTS_ASSERT(desc->start == (char*)(i?0x13000L:0x11000L)); + + desc = lookup_free_seg(&map, 0x1500); + ERTS_ASSERT(desc->start == (char*)0x15000); + + adjacent_free_seg(&map, (char*)0x6666, (char*)0x7777, &under, &over); + ERTS_ASSERT(!under && !over); + + adjacent_free_seg(&map, (char*)0x6666, (char*)0x8000, &under, &over); + ERTS_ASSERT(!under && over->start == (char*)0x8000); + + adjacent_free_seg(&map, (char*)0x10000, (char*)0x10500, &under, &over); + ERTS_ASSERT(under->end == (char*)0x10000 && !over); + + adjacent_free_seg(&map, (char*)0x10100, (char*)0x10500, &under, &over); + ERTS_ASSERT(!under && !over); + + adjacent_free_seg(&map, (char*)0x10100, (char*)0x11000, &under, &over); + ERTS_ASSERT(!under && over && over->start == (char*)0x11000); + + adjacent_free_seg(&map, (char*)0x12000, (char*)0x12500, &under, &over); + ERTS_ASSERT(under && under->end == (char*)0x12000 && !over); + + adjacent_free_seg(&map, (char*)0x12000, (char*)0x13000, &under, &over); + ERTS_ASSERT(under && under->end == (char*)0x12000 && + over && over->start == (char*)0x13000); + + adjacent_free_seg(&map, (char*)0x12500, (char*)0x13000, &under, &over); + ERTS_ASSERT(!under && over && over->start == (char*)0x13000); + + d1 = lookup_free_seg(&map, 0x500); + ERTS_ASSERT(d1->start == (char*)(i?0x13000L:0x11000L)); + + resize_free_seg(&map, d1, d1->start - 0x800, (char*)d1->end); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + + d2 = lookup_free_seg(&map, 0x1200); + ERTS_ASSERT(d2 == d1); + + delete_free_seg(&map, d1); + HARD_CHECK_TREE(&map.atree, 0); HARD_CHECK_TREE(&map.stree, 0); + + d1 = lookup_free_seg(&map, 0x1200); + ERTS_ASSERT(d1->start == (char*)0x15000); + } +} + +#endif /* FREE_SEG_API_SMOKE_TEST */ + + +#ifdef HARD_DEBUG_MSEG + +/* + * Debug stuff used by erl_mseg to check that it does the right thing. + * The reason for keeping it here is that we (ab)use the rb-tree code + * for keeping track of *allocated* segments. + */ + +typedef struct ErtsFreeSegDesc_fake_ { + /*RBTNode snode; Save memory by skipping unused size tree node */ + RBTNode anode; /* node in 'atree' */ + union { + char* start; + struct ErtsFreeSegDesc_fake_* next_free; + }u; + char* end; +}ErtsFreeSegDesc_fake; + +static ErtsFreeSegDesc_fake hard_dbg_mseg_desc_pool[10000]; +static ErtsFreeSegDesc_fake* hard_dbg_mseg_desc_first; +RBTree hard_dbg_mseg_tree; + +static erts_mtx_t hard_dbg_mseg_mtx; + +static void hard_dbg_mseg_init(void) +{ + ErtsFreeSegDesc_fake* p; + + erts_mtx_init(&hard_dbg_mseg_mtx, "hard_dbg_mseg"); + hard_dbg_mseg_tree.root = NULL; + hard_dbg_mseg_tree.order = ADDR_ORDER; + + p = &hard_dbg_mseg_desc_pool[(sizeof(hard_dbg_mseg_desc_pool) / + sizeof(*hard_dbg_mseg_desc_pool)) - 1]; + p->u.next_free = NULL; + while (--p >= hard_dbg_mseg_desc_pool) { + p->u.next_free = (p+1); + } + hard_dbg_mseg_desc_first = &hard_dbg_mseg_desc_pool[0]; +} + +static ErtsFreeSegDesc* hard_dbg_alloc_desc(void) +{ + ErtsFreeSegDesc_fake* p = hard_dbg_mseg_desc_first; + ERTS_ASSERT(p || !"HARD_DEBUG_MSEG: Out of mseg descriptors"); + hard_dbg_mseg_desc_first = p->u.next_free; + + /* Creative pointer arithmetic to return something that looks like + * a ErtsFreeSegDesc as long as we don't use the absent 'snode'. + */ + return (ErtsFreeSegDesc*) ((char*)p - offsetof(ErtsFreeSegDesc,anode)); +} + +static void hard_dbg_free_desc(ErtsFreeSegDesc* desc) +{ + ErtsFreeSegDesc_fake* p = (ErtsFreeSegDesc_fake*) &desc->anode; + memset(p, 0xfe, sizeof(*p)); + p->u.next_free = hard_dbg_mseg_desc_first; + hard_dbg_mseg_desc_first = p; +} + +static void check_seg_writable(void* seg, UWord sz) +{ + UWord* seg_end = (UWord*)((char*)seg + sz); + volatile UWord* p; + ERTS_ASSERT(ERTS_IS_PAGEALIGNED(seg)); + ERTS_ASSERT(ERTS_IS_PAGEALIGNED(sz)); + for (p=(UWord*)seg; p<seg_end; p += (ERTS_INV_PAGEALIGNED_MASK+1)/sizeof(UWord)) { + UWord write_back = *p; + *p = 0xfade2b1acc; + *p = write_back; + } +} + +void hard_dbg_insert_mseg(void* seg, UWord sz) +{ + check_seg_writable(seg, sz); + erts_mtx_lock(&hard_dbg_mseg_mtx); + { + ErtsFreeSegDesc *desc = hard_dbg_alloc_desc(); + RBTNode *prev, *next; + desc->start = (char*)seg; + desc->end = desc->start + sz - 1; /* -1 to allow adjacent segments in tree */ + rbt_insert(&hard_dbg_mseg_tree, &desc->anode); + prev = rbt_prev_node(&desc->anode); + next = rbt_next_node(&desc->anode); + ERTS_ASSERT(!prev || anode_to_desc(prev)->end < desc->start); + ERTS_ASSERT(!next || anode_to_desc(next)->start > desc->end); + } + erts_mtx_unlock(&hard_dbg_mseg_mtx); +} + +static ErtsFreeSegDesc* hard_dbg_lookup_seg_at(RBTree* tree, char* start) +{ + RBTNode* x = tree->root; + + while (x) { + ErtsFreeSegDesc* desc = anode_to_desc(x); + if (start < desc->start) { + x = x->left; + } + else if (start > desc->start) { + ERTS_ASSERT(start > desc->end); + x = x->right; + } + else + return desc; + } + return NULL; +} + +void hard_dbg_remove_mseg(void* seg, UWord sz) +{ + check_seg_writable(seg, sz); + erts_mtx_lock(&hard_dbg_mseg_mtx); + { + ErtsFreeSegDesc* desc = hard_dbg_lookup_seg_at(&hard_dbg_mseg_tree, (char*)seg); + ERTS_ASSERT(desc); + ERTS_ASSERT(desc->start == (char*)seg); + ERTS_ASSERT(desc->end == (char*)seg + sz - 1); + + rbt_delete(&hard_dbg_mseg_tree, &desc->anode); + hard_dbg_free_desc(desc); + } + erts_mtx_unlock(&hard_dbg_mseg_mtx); +} + +#endif /* HARD_DEBUG_MSEG */ diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h new file mode 100644 index 0000000000..fa51b663fa --- /dev/null +++ b/erts/emulator/sys/common/erl_mmap.h @@ -0,0 +1,181 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2013-2016. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * %CopyrightEnd% + */ + +#ifndef ERL_MMAP_H__ +#define ERL_MMAP_H__ + +#include "sys.h" + +#define ERTS_MMAP_SUPERALIGNED_BITS (18) +/* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ + +#ifndef HAVE_MMAP +# define HAVE_MMAP 0 +#endif +#ifndef HAVE_MREMAP +# define HAVE_MREMAP 0 +#endif +#if HAVE_MMAP +# define ERTS_HAVE_OS_MMAP 1 +# define ERTS_HAVE_GENUINE_OS_MMAP 1 +# if HAVE_MREMAP +# define ERTS_HAVE_OS_MREMAP 1 +# endif +/* + * MAP_NORESERVE is undefined in FreeBSD 10.x and later. + * This is to enable 64bit HiPE experimentally on FreeBSD. + * Note that on FreeBSD MAP_NORESERVE was "never implemented" + * even before 11.x (and the flag does not exist in /usr/src/sys/vm/mmap.c + * of 10.3-STABLE r301478 either), and HiPE was working on OTP 18.3.3, + * so mandating MAP_NORESERVE on FreeBSD might not be needed. + * See the following message on how MAP_NORESERVE was treated on FreeBSD: + * <http://lists.llvm.org/pipermail/cfe-commits/Week-of-Mon-20150202/122958.html> + */ +# if defined(MAP_FIXED) && (defined(MAP_NORESERVE) || defined(__FreeBSD__)) +# define ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION 1 +# endif +#endif + +#ifndef HAVE_VIRTUALALLOC +# define HAVE_VIRTUALALLOC 0 +#endif +#if HAVE_VIRTUALALLOC +# define ERTS_HAVE_OS_MMAP 1 +#endif + +#ifdef ERTS_HAVE_GENUINE_OS_MMAP +# define HAVE_ERTS_MMAP 1 +#else +# define HAVE_ERTS_MMAP 0 +#endif + + +extern UWord erts_page_inv_mask; + +typedef struct { + struct { + char *start; + char *end; + } virtual_range; + struct { + char *start; + char *end; + } predefined_area; + UWord scs; /* super carrier size */ + int sco; /* super carrier only? */ + UWord scrfsd; /* super carrier reserved free segment descriptors */ + int scrpm; /* super carrier reserve physical memory */ +}ErtsMMapInit; + +#define ERTS_MMAP_INIT_DEFAULT_INITER \ + {{NULL, NULL}, {NULL, NULL}, 0, 1, (1 << 16), 1} + +#define ERTS_LITERAL_VIRTUAL_AREA_SIZE (UWORD_CONSTANT(1)*1024*1024*1024) + +#define ERTS_MMAP_INIT_LITERAL_INITER \ + {{NULL, NULL}, {NULL, NULL}, ERTS_LITERAL_VIRTUAL_AREA_SIZE, 1, (1 << 10), 0} + +#define ERTS_HIPE_EXEC_VIRTUAL_AREA_SIZE (UWORD_CONSTANT(512)*1024*1024) + +#define ERTS_MMAP_INIT_HIPE_EXEC_INITER \ + {{NULL, NULL}, {NULL, NULL}, ERTS_HIPE_EXEC_VIRTUAL_AREA_SIZE, 1, (1 << 10), 0} + + +#define ERTS_SUPERALIGNED_SIZE \ + (1 << ERTS_MMAP_SUPERALIGNED_BITS) +#define ERTS_INV_SUPERALIGNED_MASK \ + ((UWord) (ERTS_SUPERALIGNED_SIZE - 1)) +#define ERTS_SUPERALIGNED_MASK \ + (~ERTS_INV_SUPERALIGNED_MASK) +#define ERTS_SUPERALIGNED_FLOOR(X) \ + (((UWord) (X)) & ERTS_SUPERALIGNED_MASK) +#define ERTS_SUPERALIGNED_CEILING(X) \ + ERTS_SUPERALIGNED_FLOOR((X) + ERTS_INV_SUPERALIGNED_MASK) +#define ERTS_IS_SUPERALIGNED(X) \ + (((UWord) (X) & ERTS_INV_SUPERALIGNED_MASK) == 0) + +#define ERTS_INV_PAGEALIGNED_MASK \ + (erts_page_inv_mask) +#define ERTS_PAGEALIGNED_MASK \ + (~ERTS_INV_PAGEALIGNED_MASK) +#define ERTS_PAGEALIGNED_FLOOR(X) \ + (((UWord) (X)) & ERTS_PAGEALIGNED_MASK) +#define ERTS_PAGEALIGNED_CEILING(X) \ + ERTS_PAGEALIGNED_FLOOR((X) + ERTS_INV_PAGEALIGNED_MASK) +#define ERTS_IS_PAGEALIGNED(X) \ + (((UWord) (X) & ERTS_INV_PAGEALIGNED_MASK) == 0) +#define ERTS_PAGEALIGNED_SIZE \ + (ERTS_INV_PAGEALIGNED_MASK + 1) + +struct process; +Eterm erts_mmap_debug_info(struct process*); + +#if HAVE_ERTS_MMAP + +typedef struct ErtsMemMapper_ ErtsMemMapper; + +#define ERTS_MMAPFLG_OS_ONLY (((Uint32) 1) << 0) +#define ERTS_MMAPFLG_SUPERCARRIER_ONLY (((Uint32) 1) << 1) +#define ERTS_MMAPFLG_SUPERALIGNED (((Uint32) 1) << 2) + +void *erts_mmap(ErtsMemMapper*, Uint32 flags, UWord *sizep); +void erts_munmap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord size); +void *erts_mremap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord old_size, UWord *sizep); +int erts_mmap_in_supercarrier(ErtsMemMapper*, void *ptr); +void erts_mmap_init(ErtsMemMapper*, ErtsMMapInit*, int executable); +struct erts_mmap_info_struct +{ + UWord sizes[6]; + UWord segs[6]; + UWord os_used; +}; +Eterm erts_mmap_info(ErtsMemMapper*, int *print_to_p, void *print_to_arg, + Eterm** hpp, Uint* szp, struct erts_mmap_info_struct*); +Eterm erts_mmap_info_options(ErtsMemMapper*, + char *prefix, int *print_to_p, void *print_to_arg, + Uint **hpp, Uint *szp); + + +#ifdef ERTS_WANT_MEM_MAPPERS +# include "erl_alloc_types.h" + +extern ErtsMemMapper erts_dflt_mmapper; +# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) +extern ErtsMemMapper erts_literal_mmapper; +# endif +# ifdef ERTS_ALC_A_EXEC +extern ErtsMemMapper erts_exec_mmapper; +# endif +#endif /* ERTS_WANT_MEM_MAPPERS */ + +/*#define HARD_DEBUG_MSEG*/ +#ifdef HARD_DEBUG_MSEG +# define HARD_DBG_INSERT_MSEG hard_dbg_insert_mseg +# define HARD_DBG_REMOVE_MSEG hard_dbg_remove_mseg +void hard_dbg_insert_mseg(void* seg, UWord sz); +void hard_dbg_remove_mseg(void* seg, UWord sz); +#else +# define HARD_DBG_INSERT_MSEG(SEG,SZ) +# define HARD_DBG_REMOVE_MSEG(SEG,SZ) +#endif + +#endif /* HAVE_ERTS_MMAP */ + +#endif /* ERL_MMAP_H__ */ diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 2748edba02..f3306a888c 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2002-2013. All Rights Reserved. + * Copyright Ericsson AB 2002-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ @@ -30,6 +31,7 @@ # include "config.h" #endif +#define ERTS_WANT_MEM_MAPPERS #include "sys.h" #include "erl_mseg.h" #include "global.h" @@ -98,56 +100,12 @@ static const int debruijn[32] = { static int atoms_initialized; -typedef struct mem_kind_t MemKind; - -#if HALFWORD_HEAP -static int initialize_pmmap(void); -static void *pmmap(size_t size); -static int pmunmap(void *p, size_t size); -static void *pmremap(void *old_address, size_t old_size, - size_t new_size); -#endif - -#if HAVE_MMAP -/* Mmap ... */ - -#define MMAP_PROT (PROT_READ|PROT_WRITE) - - -#ifdef MAP_ANON -# define MMAP_FLAGS (MAP_ANON|MAP_PRIVATE) -# define MMAP_FD (-1) -#else -# define MMAP_FLAGS (MAP_PRIVATE) -# define MMAP_FD mmap_fd -static int mmap_fd; -#endif - -#if HAVE_MREMAP -# define HAVE_MSEG_RECREATE 1 -#else -# define HAVE_MSEG_RECREATE 0 -#endif - -#if HALFWORD_HEAP -#define CAN_PARTLY_DESTROY 0 -#else -#define CAN_PARTLY_DESTROY 1 -#endif -#else /* #if HAVE_MMAP */ -#define CAN_PARTLY_DESTROY 0 -#error "Not supported" -#endif /* #if HAVE_MMAP */ - const ErtsMsegOpt_t erts_mseg_default_opt = { 1, /* Use cache */ 1, /* Preserv data */ 0, /* Absolute shrink threshold */ 0, /* Relative shrink threshold */ 0 /* Scheduler specific */ -#if HALFWORD_HEAP - ,0 /* need low memory */ -#endif }; @@ -163,9 +121,7 @@ typedef struct { CallCounter create; CallCounter create_resize; CallCounter destroy; -#if HAVE_MSEG_RECREATE CallCounter recreate; -#endif CallCounter clear_cache; CallCounter check_cache; } ErtsMsegCalls; @@ -173,7 +129,7 @@ typedef struct { typedef struct cache_t_ cache_t; struct cache_t_ { - Uint size; + UWord size; void *seg; cache_t *next; cache_t *prev; @@ -182,7 +138,14 @@ struct cache_t_ { typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t; -struct mem_kind_t { +struct ErtsMsegAllctr_t_ { + int ix; + + int is_init_done; + int is_thread_safe; + erts_mtx_t mtx; + + int is_cache_check_scheduled; cache_t cache[MAX_CACHE_SIZE]; cache_t cache_unpowered_node; @@ -208,39 +171,11 @@ struct mem_kind_t { } max_ever; } segments; - ErtsMsegAllctr_t *ma; - const char* name; - MemKind* next; -};/*MemKind*/ - -struct ErtsMsegAllctr_t_ { - int ix; - - int is_init_done; - int is_thread_safe; - erts_mtx_t mtx; - - int is_cache_check_scheduled; - - MemKind* mk_list; - -#if HALFWORD_HEAP - MemKind low_mem; - MemKind hi_mem; -#else - MemKind the_mem; -#endif - Uint max_cache_size; Uint abs_max_cache_bad_fit; Uint rel_max_cache_bad_fit; ErtsMsegCalls calls; - -#if CAN_PARTLY_DESTROY - Uint min_seg_size; -#endif - }; typedef union { @@ -344,69 +279,26 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) { } } -/* remove ErtsMsegAllctr_t from arguments? - * only used for statistics - */ -static ERTS_INLINE void * -mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, int fd, off_t offset) { - - char *p, *q; - UWord d; - - p = mmap(addr, length, prot, flags, fd, offset); - - if (MAP_IS_ALIGNED(p) || p == MAP_FAILED) - return p; - - if (ma) - INC_CC(ma, create_resize); - - munmap(p, length); - - if ((p = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) - return MAP_FAILED; - - q = (void *)ALIGNED_CEILING((char *)p); - d = (UWord)(q - p); - - if (d > 0) - munmap(p, d); - - if (MSEG_ALIGNED_SIZE - d > 0) - munmap((void *)(q + length), MSEG_ALIGNED_SIZE - d); - - return q; -} +/* #define ERTS_PRINT_ERTS_MMAP */ static ERTS_INLINE void * -mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) +mseg_create(ErtsMsegAllctr_t *ma, Uint flags, UWord *sizep) { - void *seg; - ASSERT(size % MSEG_ALIGNED_SIZE == 0); - -#if HALFWORD_HEAP - if (mk == &ma->low_mem) { - seg = pmmap(size); - if ((unsigned long) seg & CHECK_POINTER_MASK) { - erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg); - return NULL; - } - } else +#ifdef ERTS_PRINT_ERTS_MMAP + UWord req_size = *sizep; #endif - { -#if HAVE_MMAP - { - seg = (void *) mmap_align(ma, (void *) 0, (size_t) size, - MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); - if (seg == (void *) MAP_FAILED) - seg = NULL; - - ASSERT(MAP_IS_ALIGNED(seg) || !seg); - } -#else -# error "Missing mseg_create() implementation" + void *seg; + Uint32 mmap_flags = 0; + if (MSEG_FLG_IS_2POW(flags)) + mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; + + seg = erts_mmap(&erts_dflt_mmapper, mmap_flags, sizep); + +#ifdef ERTS_PRINT_ERTS_MMAP + erts_fprintf(stderr, "%p = erts_mmap(%s, {%bpu, %bpu});\n", seg, + (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua", + req_size, *sizep); #endif - } INC_CC(ma, create); @@ -414,91 +306,45 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) } static ERTS_INLINE void -mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { - ERTS_DECLARE_DUMMY(int res); - -#if HALFWORD_HEAP - if (mk == &ma->low_mem) { - res = pmunmap((void *) seg, size); - } - else -#endif - { -#ifdef HAVE_MMAP - res = munmap((void *) seg, size); -#else -# error "Missing mseg_destroy() implementation" +mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, void *seg_p, UWord size) { + + Uint32 mmap_flags = 0; + if (MSEG_FLG_IS_2POW(flags)) + mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; + + erts_munmap(&erts_dflt_mmapper, mmap_flags, seg_p, size); +#ifdef ERTS_PRINT_ERTS_MMAP + erts_fprintf(stderr, "erts_munmap(%s, %p, %bpu);\n", + (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua", + seg_p, *size); #endif - } - - ASSERT(size % MSEG_ALIGNED_SIZE == 0); - ASSERT(res == 0); - INC_CC(ma, destroy); } -#if HAVE_MSEG_RECREATE -#if defined(__NetBSD__) -#define MREMAP_FLAGS (0) -#else -#define MREMAP_FLAGS (MREMAP_MAYMOVE) -#endif - - -/* mseg_recreate - * May return *unaligned* segments as in address not aligned to MSEG_ALIGNMENT - * it is still page aligned - * - * This is fine for single block carriers as long as we don't cache misaligned - * segments (since multiblock carriers may use them) - * - * For multiblock carriers we *need* MSEG_ALIGNMENT but mbc's will never be - * reallocated. - * - * This should probably be fixed the following way: - * 1) Use an option to segment allocation - NEED_ALIGNMENT - * 2) Add mremap_align which takes care of aligning a new a mremaped area - * 3) Fix the cache to handle of aligned and unaligned segments - */ - static ERTS_INLINE void * -mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size) +mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, void *old_seg, UWord old_size, UWord *sizep) { +#ifdef ERTS_PRINT_ERTS_MMAP + UWord req_size = *sizep; +#endif void *new_seg; + Uint32 mmap_flags = 0; + if (MSEG_FLG_IS_2POW(flags)) + mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; - ASSERT(old_size % MSEG_ALIGNED_SIZE == 0); - ASSERT(new_size % MSEG_ALIGNED_SIZE == 0); + new_seg = erts_mremap(&erts_dflt_mmapper, mmap_flags, old_seg, old_size, sizep); -#if HALFWORD_HEAP - if (mk == &ma->low_mem) { - new_seg = (void *) pmremap((void *) old_seg, - (size_t) old_size, - (size_t) new_size); - } - else -#endif - { -#if HAVE_MREMAP -#if defined(__NetBSD__) - new_seg = mremap(old_seg, (size_t)old_size, NULL, new_size, MREMAP_FLAGS); -#else - new_seg = mremap(old_seg, (size_t)old_size, (size_t)new_size, MREMAP_FLAGS); +#ifdef ERTS_PRINT_ERTS_MMAP + erts_fprintf(stderr, "%p = erts_mremap(%s, %p, %bpu, {%bpu, %bpu});\n", + new_seg, (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua", + old_seg, old_size, req_size, *sizep); #endif - if (new_seg == (void *) MAP_FAILED) - new_seg = NULL; -#else -#error "Missing mseg_recreate() implementation" -#endif - } - INC_CC(ma, recreate); return new_seg; } -#endif /* #if HAVE_MSEG_RECREATE */ - #ifdef DEBUG #define ERTS_DBG_MA_CHK_THR_ACCESS(MA) \ do { \ @@ -511,11 +357,8 @@ do { \ || erts_smp_thr_progress_is_blocking() \ || ERTS_IS_CRASH_DUMPING); \ } while (0) -#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) \ - ERTS_DBG_MA_CHK_THR_ACCESS((MK)->ma) #else #define ERTS_DBG_MA_CHK_THR_ACCESS(MA) -#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) #endif /* Cache interface */ @@ -528,10 +371,10 @@ static ERTS_INLINE void mseg_cache_clear_node(cache_t *c) { c->prev = c; } -static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Uint flags) { +static ERTS_INLINE int cache_bless_segment(ErtsMsegAllctr_t *ma, void *seg, UWord size, Uint flags) { cache_t *c; - ERTS_DBG_MK_CHK_THR_ACCESS(mk); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); ASSERT(!MSEG_FLG_IS_2POW(flags) || (MSEG_FLG_IS_2POW(flags) && MAP_IS_ALIGNED(seg) && IS_2POW(size))); @@ -540,11 +383,11 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui * Large blocks has no such cache and it is up to mseg to cache them to speed things up. */ - if (!erts_circleq_is_empty(&(mk->cache_free))) { + if (!erts_circleq_is_empty(&(ma->cache_free))) { /* We have free slots, use one to cache the segment */ - c = erts_circleq_head(&(mk->cache_free)); + c = erts_circleq_head(&(ma->cache_free)); erts_circleq_remove(c); c->seg = seg; @@ -556,30 +399,28 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui ASSERT(ix < CACHE_AREAS); ASSERT((1 << (ix + MSEG_ALIGN_BITS)) == size); - erts_circleq_push_head(&(mk->cache_powered_node[ix]), c); + erts_circleq_push_head(&(ma->cache_powered_node[ix]), c); } else - erts_circleq_push_head(&(mk->cache_unpowered_node), c); + erts_circleq_push_head(&(ma->cache_unpowered_node), c); - mk->cache_size++; - ASSERT(mk->cache_size <= mk->ma->max_cache_size); + ma->cache_size++; return 1; - } else if (!MSEG_FLG_IS_2POW(flags) && !erts_circleq_is_empty(&(mk->cache_unpowered_node))) { - + } else if (!MSEG_FLG_IS_2POW(flags) && !erts_circleq_is_empty(&(ma->cache_unpowered_node))) { /* No free slots. * Evict oldest slot from unpowered cache so we can cache an unpowered (sbc) segment */ - c = erts_circleq_tail(&(mk->cache_unpowered_node)); + c = erts_circleq_tail(&(ma->cache_unpowered_node)); erts_circleq_remove(c); - mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_destroy(ma, ERTS_MSEG_FLG_NONE, c->seg, c->size); mseg_cache_clear_node(c); c->seg = seg; c->size = size; - erts_circleq_push_head(&(mk->cache_unpowered_node), c); + erts_circleq_push_head(&(ma->cache_unpowered_node), c); return 1; } else if (!MSEG_FLG_IS_2POW(flags)) { @@ -593,19 +434,20 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui int i; for( i = 0; i < CACHE_AREAS; i++) { - if (erts_circleq_is_empty(&(mk->cache_powered_node[i]))) + if (erts_circleq_is_empty(&(ma->cache_powered_node[i]))) continue; - c = erts_circleq_tail(&(mk->cache_powered_node[i])); + c = erts_circleq_tail(&(ma->cache_powered_node[i])); erts_circleq_remove(c); - mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_destroy(ma, ERTS_MSEG_FLG_2POW, c->seg, c->size); + mseg_cache_clear_node(c); c->seg = seg; c->size = size; - erts_circleq_push_head(&(mk->cache_unpowered_node), c); + erts_circleq_push_head(&(ma->cache_unpowered_node), c); return 1; } @@ -614,61 +456,60 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size, Ui return 0; } -static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags) { +static ERTS_INLINE void *cache_get_segment(ErtsMsegAllctr_t *ma, UWord *size_p, Uint flags) { - Uint size = *size_p; + UWord size = *size_p; - ERTS_DBG_MK_CHK_THR_ACCESS(mk); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); if (MSEG_FLG_IS_2POW(flags)) { int i, ix = SIZE_TO_CACHE_AREA_IDX(size); - void *seg; + char *seg; cache_t *c; - Uint csize; + UWord csize; ASSERT(IS_2POW(size)); for( i = ix; i < CACHE_AREAS; i++) { - if (erts_circleq_is_empty(&(mk->cache_powered_node[i]))) + if (erts_circleq_is_empty(&(ma->cache_powered_node[i]))) continue; - c = erts_circleq_head(&(mk->cache_powered_node[i])); + c = erts_circleq_head(&(ma->cache_powered_node[i])); erts_circleq_remove(c); ASSERT(IS_2POW(c->size)); ASSERT(MAP_IS_ALIGNED(c->seg)); csize = c->size; - seg = c->seg; + seg = (char*) c->seg; - mk->cache_size--; - mk->cache_hits++; + ma->cache_size--; + ma->cache_hits++; /* link to free cache list */ mseg_cache_clear_node(c); - erts_circleq_push_head(&(mk->cache_free), c); + erts_circleq_push_head(&(ma->cache_free), c); - ASSERT(!(mk->cache_size < 0)); + ASSERT(!(ma->cache_size < 0)); - if (csize != size) { - mseg_destroy(mk->ma, mk, (char *)seg + size, csize - size); - } + if (csize != size) + mseg_destroy(ma, ERTS_MSEG_FLG_2POW, seg + size, csize - size); return seg; } } - else if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) { + else if (!erts_circleq_is_empty(&(ma->cache_unpowered_node))) { void *seg; cache_t *c; cache_t *best = NULL; - Uint bdiff = 0; - Uint csize; - Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit; - Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit; + UWord bdiff = 0; + UWord csize; + UWord bad_max_abs = ma->abs_max_cache_bad_fit; + UWord bad_max_rel = ma->rel_max_cache_bad_fit; - erts_circleq_foreach(c, &(mk->cache_unpowered_node)) { + erts_circleq_foreach(c, &(ma->cache_unpowered_node)) { csize = c->size; if (csize >= size) { if (((csize - size)*100 < bad_max_rel*size) && (csize - size) < bad_max_abs ) { @@ -677,11 +518,11 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags erts_circleq_remove(c); - mk->cache_size--; - mk->cache_hits++; + ma->cache_size--; + ma->cache_hits++; mseg_cache_clear_node(c); - erts_circleq_push_head(&(mk->cache_free), c); + erts_circleq_push_head(&(ma->cache_free), c); *size_p = csize; @@ -704,7 +545,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags ASSERT(best->seg); ASSERT(best->size > 0); - mk->cache_hits++; + ma->cache_hits++; /* Use current cache placement for remaining segment space */ @@ -728,7 +569,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p, Uint flags * using callbacks from aux-work in the scheduler. */ -static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t *head) { +static ERTS_INLINE Uint mseg_drop_one_cache_size(ErtsMsegAllctr_t *ma, Uint flags, cache_t *head) { cache_t *c = NULL; c = erts_circleq_tail(head); @@ -737,19 +578,19 @@ static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t *h if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); - mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_destroy(ma, flags, c->seg, c->size); mseg_cache_clear_node(c); - erts_circleq_push_head(&(mk->cache_free), c); + erts_circleq_push_head(&(ma->cache_free), c); - mk->segments.current.watermark--; - mk->cache_size--; + ma->segments.current.watermark--; + ma->cache_size--; - ASSERT( mk->cache_size >= 0 ); + ASSERT(ma->cache_size >= 0); - return mk->cache_size; + return ma->cache_size; } -static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t *head) { +static ERTS_INLINE Uint mseg_drop_cache_size(ErtsMsegAllctr_t *ma, Uint flags, cache_t *head) { cache_t *c = NULL; while (!erts_circleq_is_empty(head)) { @@ -760,58 +601,52 @@ static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t *head) if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); - mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_destroy(ma, flags, c->seg, c->size); mseg_cache_clear_node(c); - erts_circleq_push_head(&(mk->cache_free), c); - - mk->segments.current.watermark--; - mk->cache_size--; + erts_circleq_push_head(&(ma->cache_free), c); + ma->segments.current.watermark--; + ma->cache_size--; } - ASSERT( mk->cache_size >= 0 ); + ASSERT(ma->cache_size >= 0); - return mk->cache_size; + return ma->cache_size; } -/* mseg_check_memkind_cache - * - Check if we can empty some cached segments in this - * MemKind. +/* mseg_check_cache + * - Check if we can empty some cached segments in this allocator */ -static Uint mseg_check_memkind_cache(MemKind *mk) { +static Uint mseg_check_cache(ErtsMsegAllctr_t *ma) { int i; - ERTS_DBG_MK_CHK_THR_ACCESS(mk); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); for (i = 0; i < CACHE_AREAS; i++) { - if (!erts_circleq_is_empty(&(mk->cache_powered_node[i]))) - return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_powered_node[i])); + if (!erts_circleq_is_empty(&(ma->cache_powered_node[i]))) + return mseg_drop_one_cache_size(ma, ERTS_MSEG_FLG_2POW, &(ma->cache_powered_node[i])); } - if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) - return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered_node)); + if (!erts_circleq_is_empty(&(ma->cache_unpowered_node))) + return mseg_drop_one_cache_size(ma, ERTS_MSEG_FLG_NONE, &(ma->cache_unpowered_node)); return 0; } /* mseg_cache_check * - Check if we have some cache we can purge - * in any of the memkinds. */ static void mseg_cache_check(ErtsMsegAllctr_t *ma) { - MemKind* mk; Uint empty_cache = 1; ERTS_MSEG_LOCK(ma); - for (mk = ma->mk_list; mk; mk = mk->next) { - if (mseg_check_memkind_cache(mk)) - empty_cache = 0; - } + if (mseg_check_cache(ma)) + empty_cache = 0; /* If all MemKinds caches are empty, * remove aux-work callback @@ -829,7 +664,7 @@ static void mseg_cache_check(ErtsMsegAllctr_t *ma) { /* erts_mseg_cache_check * - This is a callback that is scheduled as aux-work from * schedulers and is called at some interval if we have a cache - * on this mseg-allocator and memkind. + * on this mseg-allocator. * - Purpose: Empty cache slowly so we don't collect mapped areas * and bloat memory. */ @@ -839,42 +674,32 @@ void erts_mseg_cache_check(void) { } -/* *_mseg_clear_*_cache +/* mseg_clear_cache * Remove cached segments from the allocator completely */ -static void mseg_clear_memkind_cache(MemKind *mk) { + +static void mseg_clear_cache(ErtsMsegAllctr_t *ma) { int i; + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); /* drop pow2 caches */ for (i = 0; i < CACHE_AREAS; i++) { - if (erts_circleq_is_empty(&(mk->cache_powered_node[i]))) + if (erts_circleq_is_empty(&(ma->cache_powered_node[i]))) continue; - mseg_drop_memkind_cache_size(mk, &(mk->cache_powered_node[i])); - ASSERT(erts_circleq_is_empty(&(mk->cache_powered_node[i]))); + mseg_drop_cache_size(ma, ERTS_MSEG_FLG_2POW, &(ma->cache_powered_node[i])); + ASSERT(erts_circleq_is_empty(&(ma->cache_powered_node[i]))); } /* drop varied caches */ - if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) - mseg_drop_memkind_cache_size(mk, &(mk->cache_unpowered_node)); + if (!erts_circleq_is_empty(&(ma->cache_unpowered_node))) + mseg_drop_cache_size(ma, ERTS_MSEG_FLG_NONE, &(ma->cache_unpowered_node)); - ASSERT(erts_circleq_is_empty(&(mk->cache_unpowered_node))); - ASSERT(mk->cache_size == 0); -} - -static void mseg_clear_cache(ErtsMsegAllctr_t *ma) { - MemKind* mk; - - ERTS_MSEG_LOCK(ma); - ERTS_DBG_MA_CHK_THR_ACCESS(ma); - - - for (mk = ma->mk_list; mk; mk = mk->next) { - mseg_clear_memkind_cache(mk); - } + ASSERT(erts_circleq_is_empty(&(ma->cache_unpowered_node))); + ASSERT(ma->cache_size == 0); INC_CC(ma, clear_cache); - ERTS_MSEG_UNLOCK(ma); } @@ -883,53 +708,39 @@ void erts_mseg_clear_cache(void) { mseg_clear_cache(ERTS_MSEG_ALLCTR_IX(0)); } - - -static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, - const ErtsMsegOpt_t *opt) -{ -#if HALFWORD_HEAP - return opt->low_mem ? &ma->low_mem : &ma->hi_mem; -#else - return &ma->the_mem; -#endif -} - static void * -mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p, +mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, UWord *size_p, Uint flags, const ErtsMsegOpt_t *opt) { - Uint size; + UWord size; void *seg; - MemKind* mk = memkind(ma, opt); INC_CC(ma, alloc); - /* Carrier align */ - size = ALIGNED_CEILING(*size_p); - - /* Cache optim (if applicable) */ - if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(size)) - size = ceil_2pow(size); + if (!MSEG_FLG_IS_2POW(flags)) + size = ERTS_PAGEALIGNED_CEILING(*size_p); + else { + size = ALIGNED_CEILING(*size_p); + if (!IS_2POW(size)) { + /* Cache optim (if applicable) */ + size = ceil_2pow(size); + } + } -#if CAN_PARTLY_DESTROY - if (size < ma->min_seg_size) - ma->min_seg_size = size; -#endif - - if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size, flags)) != NULL) + if (opt->cache && ma->cache_size > 0 && (seg = cache_get_segment(ma, &size, flags)) != NULL) goto done; - if ((seg = mseg_create(ma, mk, size)) == NULL) - size = 0; + seg = mseg_create(ma, flags, &size); + if (!seg) + *size_p = 0; + else { done: - *size_p = size; - if (seg) { + *size_p = size; if (erts_mtrace_enabled) erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size); - ERTS_MSEG_ALLOC_STAT(mk,size); + ERTS_MSEG_ALLOC_STAT(ma,size); } return seg; @@ -937,14 +748,12 @@ done: static void -mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size, +mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, UWord size, Uint flags, const ErtsMsegOpt_t *opt) { - MemKind* mk = memkind(ma, opt); - - ERTS_MSEG_DEALLOC_STAT(mk,size); + ERTS_MSEG_DEALLOC_STAT(ma,size); - if (opt->cache && cache_bless_segment(mk, seg, size, flags)) { + if (opt->cache && cache_bless_segment(ma, seg, size, flags)) { schedule_cache_check(ma); goto done; } @@ -952,7 +761,7 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size, if (erts_mtrace_enabled) erts_mtrace_crr_free(atype, SEGTYPE, seg); - mseg_destroy(ma, mk, seg, size); + mseg_destroy(ma, flags, seg, size); done: @@ -961,11 +770,10 @@ done: static void * mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) + UWord old_size, UWord *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) { - MemKind* mk; void *new_seg; - Uint new_size; + UWord new_size; /* Just allocate a new segment if we didn't have one before */ if (!seg || !old_size) { @@ -982,99 +790,55 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, return NULL; } - mk = memkind(ma, opt); new_seg = seg; - /* Carrier align */ - new_size = ALIGNED_CEILING(*new_size_p); - - /* Cache optim (if applicable) */ - if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(new_size)) - new_size = ceil_2pow(new_size); - - if (new_size == old_size) - ; - else if (new_size < old_size) { - Uint shrink_sz = old_size - new_size; + if (!MSEG_FLG_IS_2POW(flags)) + new_size = ERTS_PAGEALIGNED_CEILING(*new_size_p); + else { + new_size = ALIGNED_CEILING(*new_size_p); + if (!IS_2POW(new_size)) { + /* Cache optim (if applicable) */ + new_size = ceil_2pow(new_size); + } + } -#if CAN_PARTLY_DESTROY - if (new_size < ma->min_seg_size) - ma->min_seg_size = new_size; -#endif - /* +M<S>rsbcst <ratio> */ - if (shrink_sz < opt->abs_shrink_th - && 100*shrink_sz < opt->rel_shrink_th*old_size) { - new_size = old_size; + if (new_size > old_size) { + if (opt->preserv) { + new_seg = mseg_recreate(ma, flags, (void *) seg, old_size, &new_size); + if (!new_seg) + new_size = old_size; } else { - -#if CAN_PARTLY_DESTROY - - if (erts_mtrace_enabled) - erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); - - mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz); - -#elif HAVE_MSEG_RECREATE - goto do_recreate; -#else + mseg_dealloc(ma, atype, seg, old_size, flags, opt); new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); - - ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); - if (!new_seg) - new_size = old_size; - else { - sys_memcpy(((char *) new_seg), - ((char *) seg), - MIN(new_size, old_size)); - mseg_dealloc(ma, atype, seg, old_size, flags, opt); - } -#endif + new_size = 0; } } - else { + else if (new_size < old_size) { + UWord shrink_sz = old_size - new_size; - if (!opt->preserv) { - mseg_dealloc(ma, atype, seg, old_size, flags, opt); - new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); - ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); + /* +M<S>rsbcst <ratio> */ + if (shrink_sz < opt->abs_shrink_th + && 100*shrink_sz < opt->rel_shrink_th*old_size) { + new_size = old_size; } else { -#if HAVE_MSEG_RECREATE -#if !CAN_PARTLY_DESTROY - do_recreate: -#endif - new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size); - /* ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); - * will not always be aligned and it ok for now - */ - - if (erts_mtrace_enabled) - erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); - if (!new_seg) - new_size = old_size; -#else - new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); - - ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); - + new_seg = mseg_recreate(ma, flags, (void *) seg, old_size, &new_size); if (!new_seg) new_size = old_size; - else { - sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size)); - mseg_dealloc(ma, atype, seg, old_size, flags, opt); - } -#endif } } + if (erts_mtrace_enabled) + erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); + INC_CC(ma, realloc); ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size)); *new_size_p = new_size; - ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size); + ERTS_MSEG_REALLOC_STAT(ma, old_size, new_size); return new_seg; } @@ -1106,9 +870,7 @@ static struct { Eterm mseg_create; Eterm mseg_create_resize; Eterm mseg_destroy; -#if HAVE_MSEG_RECREATE Eterm mseg_recreate; -#endif Eterm mseg_clear_cache; Eterm mseg_check_cache; @@ -1129,8 +891,6 @@ init_atoms(ErtsMsegAllctr_t *ma) #ifdef DEBUG Eterm *atom; #endif - - ERTS_MSEG_UNLOCK(ma); erts_mtx_lock(&init_atoms_mutex); if (!atoms_initialized) { @@ -1163,9 +923,7 @@ init_atoms(ErtsMsegAllctr_t *ma) AM_INIT(mseg_create); AM_INIT(mseg_create_resize); AM_INIT(mseg_destroy); -#if HAVE_MSEG_RECREATE AM_INIT(mseg_recreate); -#endif AM_INIT(mseg_clear_cache); AM_INIT(mseg_check_cache); @@ -1176,7 +934,6 @@ init_atoms(ErtsMsegAllctr_t *ma) #endif } - ERTS_MSEG_LOCK(ma); atoms_initialized = 1; erts_mtx_unlock(&init_atoms_mutex); } @@ -1239,7 +996,7 @@ info_options(ErtsMsegAllctr_t *ma, Uint **hpp, Uint *szp) { - Eterm res = THE_NON_VALUE; + Eterm res = NIL; if (print_to_p) { int to = *print_to_p; @@ -1254,7 +1011,6 @@ info_options(ErtsMsegAllctr_t *ma, if (!atoms_initialized) init_atoms(ma); - res = NIL; add_2tup(hpp, szp, &res, am.mcs, bld_uint(hpp, szp, ma->max_cache_size)); @@ -1293,9 +1049,7 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp PRINT_CC(to, arg, create); PRINT_CC(to, arg, create_resize); PRINT_CC(to, arg, destroy); -#if HAVE_MSEG_RECREATE PRINT_CC(to, arg, recreate); -#endif PRINT_CC(to, arg, clear_cache); PRINT_CC(to, arg, check_cache); @@ -1316,12 +1070,10 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp bld_unstable_uint(hpp, szp, ma->calls.clear_cache.giga_no), bld_unstable_uint(hpp, szp, ma->calls.clear_cache.no)); -#if HAVE_MSEG_RECREATE add_3tup(hpp, szp, &res, am.mseg_recreate, bld_unstable_uint(hpp, szp, ma->calls.recreate.giga_no), bld_unstable_uint(hpp, szp, ma->calls.recreate.no)); -#endif add_3tup(hpp, szp, &res, am.mseg_destroy, bld_unstable_uint(hpp, szp, ma->calls.destroy.giga_no), @@ -1354,88 +1106,95 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp } static Eterm -info_status(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg, - int begin_new_max_period, Uint **hpp, Uint *szp) +info_status(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, + int begin_new_max_period, int only_sz, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; - if (mk->segments.max_ever.no < mk->segments.max.no) - mk->segments.max_ever.no = mk->segments.max.no; - if (mk->segments.max_ever.sz < mk->segments.max.sz) - mk->segments.max_ever.sz = mk->segments.max.sz; + if (ma->segments.max_ever.no < ma->segments.max.no) + ma->segments.max_ever.no = ma->segments.max.no; + if (ma->segments.max_ever.sz < ma->segments.max.sz) + ma->segments.max_ever.sz = ma->segments.max.sz; if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; - erts_print(to, arg, "cached_segments: %beu\n", mk->cache_size); - erts_print(to, arg, "cache_hits: %beu\n", mk->cache_hits); - erts_print(to, arg, "segments: %beu %beu %beu\n", - mk->segments.current.no, mk->segments.max.no, mk->segments.max_ever.no); - erts_print(to, arg, "segments_size: %beu %beu %beu\n", - mk->segments.current.sz, mk->segments.max.sz, mk->segments.max_ever.sz); - erts_print(to, arg, "segments_watermark: %beu\n", - mk->segments.current.watermark); + if (!only_sz) { + erts_print(to, arg, "cached_segments: %beu\n", ma->cache_size); + erts_print(to, arg, "cache_hits: %beu\n", ma->cache_hits); + erts_print(to, arg, "segments: %beu %beu %beu\n", + ma->segments.current.no, ma->segments.max.no, ma->segments.max_ever.no); + erts_print(to, arg, "segments_watermark: %beu\n", + ma->segments.current.watermark); + } + erts_print(to, arg, "segments_size: %beu %beu %beu\n", + ma->segments.current.sz, ma->segments.max.sz, ma->segments.max_ever.sz); } if (hpp || szp) { res = NIL; - add_2tup(hpp, szp, &res, - am.segments_watermark, - bld_unstable_uint(hpp, szp, mk->segments.current.watermark)); - add_4tup(hpp, szp, &res, - am.segments_size, - bld_unstable_uint(hpp, szp, mk->segments.current.sz), - bld_unstable_uint(hpp, szp, mk->segments.max.sz), - bld_unstable_uint(hpp, szp, mk->segments.max_ever.sz)); - add_4tup(hpp, szp, &res, - am.segments, - bld_unstable_uint(hpp, szp, mk->segments.current.no), - bld_unstable_uint(hpp, szp, mk->segments.max.no), - bld_unstable_uint(hpp, szp, mk->segments.max_ever.no)); - add_2tup(hpp, szp, &res, - am.cache_hits, - bld_unstable_uint(hpp, szp, mk->cache_hits)); - add_2tup(hpp, szp, &res, - am.cached_segments, - bld_unstable_uint(hpp, szp, mk->cache_size)); - + add_4tup(hpp, szp, &res, + am.segments_size, + bld_unstable_uint(hpp, szp, ma->segments.current.sz), + bld_unstable_uint(hpp, szp, ma->segments.max.sz), + bld_unstable_uint(hpp, szp, ma->segments.max_ever.sz)); + if (!only_sz) { + add_2tup(hpp, szp, &res, + am.segments_watermark, + bld_unstable_uint(hpp, szp, ma->segments.current.watermark)); + add_4tup(hpp, szp, &res, + am.segments, + bld_unstable_uint(hpp, szp, ma->segments.current.no), + bld_unstable_uint(hpp, szp, ma->segments.max.no), + bld_unstable_uint(hpp, szp, ma->segments.max_ever.no)); + add_2tup(hpp, szp, &res, + am.cache_hits, + bld_unstable_uint(hpp, szp, ma->cache_hits)); + add_2tup(hpp, szp, &res, + am.cached_segments, + bld_unstable_uint(hpp, szp, ma->cache_size)); + } } if (begin_new_max_period) { - mk->segments.max.no = mk->segments.current.no; - mk->segments.max.sz = mk->segments.current.sz; + ma->segments.max.no = ma->segments.current.no; + ma->segments.max.sz = ma->segments.current.sz; } return res; } -static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg, - int begin_max_per, Uint **hpp, Uint *szp) +static Eterm info_memkind(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, + int begin_max_per, int only_sz, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; Eterm atoms[3]; Eterm values[3]; - if (print_to_p) { - erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", mk->name); + if (!only_sz) { + if (print_to_p) { + erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", "all memory"); + } + if (hpp || szp) { + atoms[0] = am.name; + atoms[1] = am.status; + atoms[2] = am.calls; + values[0] = erts_bld_string(hpp, szp, "all memory"); + } } - if (hpp || szp) { - atoms[0] = am.name; - atoms[1] = am.status; - atoms[2] = am.calls; - values[0] = erts_bld_string(hpp, szp, mk->name); - } - values[1] = info_status(ma, mk, print_to_p, print_to_arg, begin_max_per, hpp, szp); - values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp); + res = info_status(ma, print_to_p, print_to_arg, begin_max_per, only_sz, hpp, szp); + if (!only_sz) { + values[1] = res; + values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp); - if (hpp || szp) - res = bld_2tup_list(hpp, szp, 3, atoms, values); + if (hpp || szp) + res = bld_2tup_list(hpp, szp, 3, atoms, values); + } return res; } - static Eterm info_version(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { @@ -1465,14 +1224,8 @@ erts_mseg_info_options(int ix, ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix); Eterm res; - ERTS_MSEG_LOCK(ma); - - ERTS_DBG_MA_CHK_THR_ACCESS(ma); - res = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp); - ERTS_MSEG_UNLOCK(ma); - return res; } @@ -1481,6 +1234,7 @@ erts_mseg_info(int ix, int *print_to_p, void *print_to_arg, int begin_max_per, + int only_sz, Uint **hpp, Uint *szp) { @@ -1490,30 +1244,30 @@ erts_mseg_info(int ix, Eterm values[4]; Uint n = 0; - ERTS_MSEG_LOCK(ma); + if (hpp || szp) { + if (!atoms_initialized) + init_atoms(ma); + } + if (!only_sz) { + if (hpp || szp) { + atoms[0] = am.version; + atoms[1] = am.options; + atoms[2] = am.memkind; + } + values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp); + values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp); + } + ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); - if (hpp || szp) { - - if (!atoms_initialized) - init_atoms(ma); + res = info_memkind(ma, print_to_p, print_to_arg, begin_max_per, only_sz, hpp, szp); - atoms[0] = am.version; - atoms[1] = am.options; - atoms[2] = am.memkind; - atoms[3] = am.memkind; + if (!only_sz) { + values[n++] = res; + if (hpp || szp) + res = bld_2tup_list(hpp, szp, n, atoms, values); } - values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp); - values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp); -#if HALFWORD_HEAP - values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); - values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); -#else - values[n++] = info_memkind(ma, &ma->the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); -#endif - if (hpp || szp) - res = bld_2tup_list(hpp, szp, n, atoms, values); ERTS_MSEG_UNLOCK(ma); @@ -1521,7 +1275,7 @@ erts_mseg_info(int ix, } void * -erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMsegOpt_t *opt) +erts_mseg_alloc_opt(ErtsAlcType_t atype, UWord *size_p, Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *seg; @@ -1529,20 +1283,23 @@ erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMse ERTS_DBG_MA_CHK_THR_ACCESS(ma); seg = mseg_alloc(ma, atype, size_p, flags, opt); ERTS_MSEG_UNLOCK(ma); + HARD_DBG_INSERT_MSEG(seg, *size_p); return seg; } void * -erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p, Uint flags) +erts_mseg_alloc(ErtsAlcType_t atype, UWord *size_p, Uint flags) { return erts_mseg_alloc_opt(atype, size_p, flags, &erts_mseg_default_opt); } void erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, - Uint size, Uint flags, const ErtsMsegOpt_t *opt) + UWord size, Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); + + HARD_DBG_REMOVE_MSEG(seg, size); ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); mseg_dealloc(ma, atype, seg, size, flags, opt); @@ -1550,29 +1307,32 @@ erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, } void -erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size, Uint flags) +erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, UWord size, Uint flags) { erts_mseg_dealloc_opt(atype, seg, size, flags, &erts_mseg_default_opt); } void * erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p, + UWord old_size, UWord *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *new_seg; + + HARD_DBG_REMOVE_MSEG(seg, old_size); ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, flags, opt); ERTS_MSEG_UNLOCK(ma); + HARD_DBG_INSERT_MSEG(new_seg, *new_size_p); return new_seg; } void * erts_mseg_realloc(ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p, Uint flags) + UWord old_size, UWord *new_size_p, Uint flags) { return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, flags, &erts_mseg_default_opt); @@ -1582,13 +1342,10 @@ Uint erts_mseg_no(const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); - MemKind* mk; - Uint n = 0; + Uint n; ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); - for (mk=ma->mk_list; mk; mk=mk->next) { - n += mk->segments.current.no; - } + n = ma->segments.current.no; ERTS_MSEG_UNLOCK(ma); return n; } @@ -1600,16 +1357,16 @@ erts_mseg_unit_size(void) } -static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) +static void mem_cache_init(ErtsMsegAllctr_t *ma) { int i; /* Clear all cache headers */ - mseg_cache_clear_node(&(mk->cache_free)); - mseg_cache_clear_node(&(mk->cache_unpowered_node)); + mseg_cache_clear_node(&(ma->cache_free)); + mseg_cache_clear_node(&(ma->cache_unpowered_node)); for (i = 0; i < CACHE_AREAS; i++) { - mseg_cache_clear_node(&(mk->cache_powered_node[i])); + mseg_cache_clear_node(&(ma->cache_powered_node[i])); } /* Populate cache free list */ @@ -1617,25 +1374,20 @@ static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) ASSERT(ma->max_cache_size <= MAX_CACHE_SIZE); for (i = 0; i < ma->max_cache_size; i++) { - mseg_cache_clear_node(&(mk->cache[i])); - erts_circleq_push_head(&(mk->cache_free), &(mk->cache[i])); + mseg_cache_clear_node(&(ma->cache[i])); + erts_circleq_push_head(&(ma->cache_free), &(ma->cache[i])); } - mk->cache_size = 0; - mk->cache_hits = 0; - - mk->segments.current.watermark = 0; - mk->segments.current.no = 0; - mk->segments.current.sz = 0; - mk->segments.max.no = 0; - mk->segments.max.sz = 0; - mk->segments.max_ever.no = 0; - mk->segments.max_ever.sz = 0; - - mk->ma = ma; - mk->name = name; - mk->next = ma->mk_list; - ma->mk_list = mk; + ma->cache_size = 0; + ma->cache_hits = 0; + + ma->segments.current.watermark = 0; + ma->segments.current.no = 0; + ma->segments.current.sz = 0; + ma->segments.max.no = 0; + ma->segments.max.sz = 0; + ma->segments.max_ever.no = 0; + ma->segments.max_ever.sz = 0; } void @@ -1662,18 +1414,19 @@ erts_mseg_init(ErtsMsegInit_t *init) erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms"); -#if HAVE_MMAP && !defined(MAP_ANON) - mmap_fd = open("/dev/zero", O_RDWR); - if (mmap_fd < 0) - erl_exit(ERTS_ABORT_EXIT, "erts_mseg: unable to open /dev/zero\n"); +#ifdef ERTS_ALC_A_EXEC + /* Initialize erts_exec_mapper *FIRST*, to increase probability + * of getting low memory for HiPE AMD64's small code model. + */ + erts_mmap_init(&erts_exec_mmapper, &init->exec_mmap, 1); #endif - -#if HAVE_MMAP && HALFWORD_HEAP - initialize_pmmap(); + erts_mmap_init(&erts_dflt_mmapper, &init->dflt_mmap, 0); +#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) + erts_mmap_init(&erts_literal_mmapper, &init->literal_mmap, 0); #endif if (!IS_2POW(GET_PAGE_SIZE)) - erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE); + erts_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE); ASSERT((MSEG_ALIGNED_SIZE % GET_PAGE_SIZE) == 0); @@ -1702,33 +1455,17 @@ erts_mseg_init(ErtsMsegInit_t *init) if (ma->max_cache_size > MAX_CACHE_SIZE) ma->max_cache_size = MAX_CACHE_SIZE; - ma->mk_list = NULL; - -#if HALFWORD_HEAP - mem_kind_init(ma, &ma->low_mem, "low memory"); - mem_kind_init(ma, &ma->hi_mem, "high memory"); -#else - mem_kind_init(ma, &ma->the_mem, "all memory"); -#endif + mem_cache_init(ma); sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls)); - -#if CAN_PARTLY_DESTROY - ma->min_seg_size = ~((Uint) 0); -#endif } } static ERTS_INLINE Uint tot_cache_size(ErtsMsegAllctr_t *ma) { - MemKind* mk; - Uint sz = 0; ERTS_DBG_MA_CHK_THR_ACCESS(ma); - for (mk=ma->mk_list; mk; mk=mk->next) { - sz += mk->cache_size; - } - return sz; + return ma->cache_size; } /* @@ -1757,7 +1494,7 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3) case 0x400: /* Have erts_mseg */ return (UWord) 1; case 0x401: - return (UWord) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1, (Uint) 0); + return (UWord) erts_mseg_alloc(ERTS_ALC_A_INVALID, (UWord *) a1, (Uint) 0); case 0x402: erts_mseg_dealloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2, (Uint) 0); return (UWord) 0; @@ -1765,7 +1502,7 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3) return (UWord) erts_mseg_realloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2, - (Uint *) a3, + (UWord *) a3, (Uint) 0); case 0x404: erts_mseg_clear_cache(); @@ -1788,405 +1525,3 @@ erts_mseg_test(UWord op, UWord a1, UWord a2, UWord a3) } } - - -#if HALFWORD_HEAP -/* - * Very simple page oriented mmap replacer. Works in the lower - * 32 bit address range of a 64bit program. - * Implements anonymous mmap mremap and munmap with address order first fit. - * The free list is expected to be very short... - * To be used for compressed pointers in Erlang halfword emulator - * implementation. The MacOS X version is more of a toy, it's not really - * for production as the halfword erlang VM relies on Linux specific memory - * mapping tricks. - */ - -/* #define HARDDEBUG 1 */ - -#ifdef HARDDEBUG -static void dump_freelist(void) -{ - FreeBlock *p = first; - - while (p) { - fprintf(stderr, "p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n", - (void *) p, (unsigned long) p->num, (void *) p->next); - p = p->next; - } -} - -#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) \ - fprintf(stderr,"Mapping of address %p with size %ld " \ - "does not map complete pages (%s:%d)\r\n", \ - (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) - -#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) \ - fprintf(stderr,"Mapping of address %p with size %ld " \ - "is not page aligned (%s:%d)\r\n", \ - (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) - -#define HARDDEBUG_MAP_FAILED(PTR, SZ) \ - fprintf(stderr, "Could not actually map memory " \ - "at address %p with size %ld (%s:%d) ..\r\n", \ - (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) -#else -#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) do{}while(0) -#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) do{}while(0) -#define HARDDEBUG_MAP_FAILED(PTR, SZ) do{}while(0) -#endif - - -#ifdef __APPLE__ -#define MAP_ANONYMOUS MAP_ANON -#endif - -#define INIT_LOCK() do {erts_mtx_init(&pmmap_mutex, "pmmap");} while(0) - -#define TAKE_LOCK() do {erts_mtx_lock(&pmmap_mutex);} while(0) - -#define RELEASE_LOCK() do {erts_mtx_unlock(&pmmap_mutex);} while(0) - -static erts_mtx_t pmmap_mutex; /* Also needed when !USE_THREADS */ - -typedef struct _free_block { - unsigned long num; /*pages*/ - struct _free_block *next; -} FreeBlock; - -/* Protect with lock */ -static FreeBlock *first; - -static void *do_map(void *ptr, size_t sz) -{ - void *res; - - if (ALIGNED_CEILING(sz) != sz) { - HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz); - return NULL; - } - - if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) { - HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); - return NULL; - } - -#if HAVE_MMAP - res = mmap(ptr, sz, - PROT_READ | PROT_WRITE, MAP_PRIVATE | - MAP_ANONYMOUS | MAP_FIXED, - -1 , 0); -#else -# error "Missing mmap support" -#endif - - if (res == MAP_FAILED) { - HARDDEBUG_MAP_FAILED(ptr, sz); - return NULL; - } - - return res; -} - -static int do_unmap(void *ptr, size_t sz) -{ - void *res; - - if (ALIGNED_CEILING(sz) != sz) { - HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz); - return 1; - } - - if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) { - HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); - return 1; - } - - res = mmap(ptr, sz, - PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, - -1 , 0); - - if (res == MAP_FAILED) { - HARDDEBUG_MAP_FAILED(ptr, sz); - return 1; - } - - return 0; -} - -#ifdef __APPLE__ -/* - * The first 4 gig's are protected on Macos X for 64bit processes :( - * The range 0x1000000000 - 0x10FFFFFFFF is selected as an arbitrary - * value of a normally unused range... Real MMAP's will avoid - * it and all 32bit compressed pointers can be in that range... - * More expensive than on Linux where expansion of compressed - * poiters involves no masking (as they are in the first 4 gig's). - * It's also very uncertain if the MAP_NORESERVE flag really has - * any effect in MacOS X. Swap space may always be allocated... - */ -#define SET_RANGE_MIN() /* nothing */ -#define RANGE_MIN 0x1000000000UL -#define RANGE_MAX 0x1100000000UL -#define RANGE_MASK (RANGE_MIN) -#define EXTRA_MAP_FLAGS (MAP_FIXED) -#else -static size_t range_min; -#define SET_RANGE_MIN() do { range_min = (size_t) sbrk(0); } while (0) -#define RANGE_MIN range_min -#define RANGE_MAX 0x100000000UL -#define RANGE_MASK 0UL -#define EXTRA_MAP_FLAGS (0) -#endif - -static int initialize_pmmap(void) -{ - char *p,*q,*rptr; - size_t rsz; - FreeBlock *initial; - - SET_RANGE_MIN(); - if (sizeof(void *) != 8) { - erl_exit(1,"Halfword emulator cannot be run in 32bit mode"); - } - - p = (char *) RANGE_MIN; - q = (char *) RANGE_MAX; - - rsz = ALIGNED_FLOOR(q - p); - - rptr = mmap_align(NULL, (void *) p, rsz, - PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | - MAP_NORESERVE | EXTRA_MAP_FLAGS, - -1 , 0); -#ifdef HARDDEBUG - printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n", - p, (unsigned long) rsz, (unsigned long) (rsz / MSEG_ALIGNED_SIZE), - (void *) rptr, (void*)(rptr + rsz)); -#endif - if ((UWord)(rptr + rsz) > RANGE_MAX) { - size_t rsz_trunc = RANGE_MAX - (UWord)rptr; -#ifdef HARDDEBUG - printf("Reducing mmap'ed memory from %lu to %lu Mb, reduced range = %p -> %p\r\n", - rsz/(1024*1024), rsz_trunc/(1024*1024), rptr, rptr+rsz_trunc); -#endif - munmap((void*)RANGE_MAX, rsz - rsz_trunc); - rsz = rsz_trunc; - } - if (!do_map(rptr, MSEG_ALIGNED_SIZE)) { - erl_exit(1,"Could not actually mmap first page for halfword emulator...\n"); - } - initial = (FreeBlock *) rptr; - initial->num = (rsz / MSEG_ALIGNED_SIZE); - initial->next = NULL; - first = initial; - INIT_LOCK(); - return 0; -} - -static void *pmmap(size_t size) -{ - size_t real_size = ALIGNED_CEILING(size); - size_t num_pages = real_size / MSEG_ALIGNED_SIZE; - FreeBlock **block; - FreeBlock *tail; - FreeBlock *res; - - TAKE_LOCK(); - - for (block = &first; - *block != NULL && (*block)->num < num_pages; - block = &((*block)->next)) - ; - if (!(*block)) { - RELEASE_LOCK(); - return NULL; - } - if ((*block)->num == num_pages) { - /* nice, perfect fit */ - res = *block; - *block = (*block)->next; - } else { - tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size); - if (!do_map(tail, MSEG_ALIGNED_SIZE)) { - HARDDEBUG_MAP_FAILED(tail, MSEG_ALIGNED_SIZE); - RELEASE_LOCK(); - return NULL; - } - tail->num = (*block)->num - num_pages; - tail->next = (*block)->next; - res = *block; - *block = tail; - } - - RELEASE_LOCK(); - - if (!do_map(res, real_size)) { - HARDDEBUG_MAP_FAILED(res, real_size); - return NULL; - } - - return (void *) res; -} - -static int pmunmap(void *p, size_t size) -{ - size_t real_size = ALIGNED_CEILING(size); - size_t num_pages = real_size / MSEG_ALIGNED_SIZE; - - FreeBlock *block; - FreeBlock *last; - FreeBlock *nb = (FreeBlock *) p; - - ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0); - - if (real_size > MSEG_ALIGNED_SIZE) { - if (do_unmap(((char *) p) + MSEG_ALIGNED_SIZE, real_size - MSEG_ALIGNED_SIZE)) { - return 1; - } - } - - TAKE_LOCK(); - - last = NULL; - block = first; - while(block != NULL && ((void *) block) < p) { - last = block; - block = block->next; - } - - if (block != NULL && - ((void *) block) == ((void *) (((char *) p) + real_size))) { - /* Merge new free block with following */ - nb->num = block->num + num_pages; - nb->next = block->next; - if (do_unmap(block, MSEG_ALIGNED_SIZE)) { - RELEASE_LOCK(); - return 1; - } - } else { - /* just link in */ - nb->num = num_pages; - nb->next = block; - } - if (last != NULL) { - if (p == ((void *) (((char *) last) + (last->num * MSEG_ALIGNED_SIZE)))) { - /* Merge with previous */ - last->num += nb->num; - last->next = nb->next; - if (do_unmap(nb, MSEG_ALIGNED_SIZE)) { - RELEASE_LOCK(); - return 1; - } - } else { - last->next = nb; - } - } else { - first = nb; - } - RELEASE_LOCK(); - return 0; -} - -static void *pmremap(void *old_address, size_t old_size, - size_t new_size) -{ - size_t new_real_size = ALIGNED_CEILING(new_size); - size_t new_num_pages = new_real_size / MSEG_ALIGNED_SIZE; - size_t old_real_size = ALIGNED_CEILING(old_size); - size_t old_num_pages = old_real_size / MSEG_ALIGNED_SIZE; - if (new_num_pages == old_num_pages) { - return old_address; - } else if (new_num_pages < old_num_pages) { /* Shrink */ - size_t nfb_pages = old_num_pages - new_num_pages; - size_t nfb_real_size = old_real_size - new_real_size; - void *vnfb = (void *) (((char *)old_address) + new_real_size); - FreeBlock *nfb = (FreeBlock *) vnfb; - FreeBlock **block; - TAKE_LOCK(); - for (block = &first; - *block != NULL && (*block) < nfb; - block = &((*block)->next)) - ; - if (!(*block) || - (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) { - /* Normal link in */ - if (nfb_pages > 1) { - if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE), - (nfb_pages - 1)*MSEG_ALIGNED_SIZE)) { - return NULL; - } - } - nfb->next = (*block); - nfb->num = nfb_pages; - (*block) = nfb; - } else { /* block merge */ - nfb->next = (*block)->next; - nfb->num = nfb_pages + (*block)->num; - /* unmap also the first page of the next freeblock */ - (*block) = nfb; - if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE), - nfb_pages*MSEG_ALIGNED_SIZE)) { - return NULL; - } - } - RELEASE_LOCK(); - return old_address; - } else { /* Enlarge */ - FreeBlock **block; - void *old_end = (void *) (((char *)old_address) + old_real_size); - TAKE_LOCK(); - for (block = &first; - *block != NULL && (*block) < (FreeBlock *) old_address; - block = &((*block)->next)) - ; - if ((*block) == NULL || old_end > ((void *) RANGE_MAX) || - (*block) != old_end || - (*block)->num < (new_num_pages - old_num_pages)) { - /* cannot extend */ - void *result; - RELEASE_LOCK(); - result = pmmap(new_size); - if (result == NULL) { - return NULL; - } - memcpy(result,old_address,old_size); - if (pmunmap(old_address,old_size)) { - /* Oups... */ - pmunmap(result,new_size); - return NULL; - } - return result; - } else { /* extend */ - size_t remaining_pages = (*block)->num - - (new_num_pages - old_num_pages); - if (!remaining_pages) { - void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE); - void *n = (*block)->next; - size_t x = ((*block)->num - 1) * MSEG_ALIGNED_SIZE; - if (x > 0) { - if (do_map(p,x) == NULL) { - RELEASE_LOCK(); - return NULL; - } - } - (*block) = n; - } else { - FreeBlock *nfb = (FreeBlock *) ((void *) - (((char *) old_address) + - new_real_size)); - void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE); - if (do_map(p,new_real_size - old_real_size) == NULL) { - RELEASE_LOCK(); - return NULL; - } - nfb->num = remaining_pages; - nfb->next = (*block)->next; - (*block) = nfb; - } - RELEASE_LOCK(); - return old_address; - } - } -} -#endif /* HALFWORD_HEAP */ diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index a4f250ceab..a43b409e94 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2002-2013. All Rights Reserved. + * Copyright Ericsson AB 2002-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ @@ -22,35 +23,25 @@ #include "sys.h" #include "erl_alloc_types.h" +#include "erl_mmap.h" -#ifndef HAVE_MMAP -# define HAVE_MMAP 0 -#endif -#ifndef HAVE_MREMAP -# define HAVE_MREMAP 0 -#endif - -#if HAVE_MMAP +/* + * We currently only enable mseg_alloc if we got + * a genuine mmap()/munmap() primitive. It is possible + * to utilize erts_mmap() withiout a mmap support but + * alloc_util needs to be prepared before we can do + * that. + */ +#ifdef ERTS_HAVE_GENUINE_OS_MMAP # define HAVE_ERTS_MSEG 1 -# define HAVE_SUPER_ALIGNED_MB_CARRIERS 1 +# define ERTS_HAVE_MSEG_SUPER_ALIGNED 1 #else # define HAVE_ERTS_MSEG 0 -# define HAVE_SUPER_ALIGNED_MB_CARRIERS 0 +# define ERTS_HAVE_MSEG_SUPER_ALIGNED 0 #endif -#if HAVE_SUPER_ALIGNED_MB_CARRIERS -# define MSEG_ALIGN_BITS (18) - /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ -#else -/* If we don't use super aligned multiblock carriers - * we will mmap with page size alignment (and thus use corresponding - * align bits). - * - * Current implementation needs this to be a constant and - * only uses this for user dev testing so setting page size - * to 4096 (12 bits) is fine. - */ -# define MSEG_ALIGN_BITS (12) +#if ERTS_HAVE_MSEG_SUPER_ALIGNED +# define MSEG_ALIGN_BITS ERTS_MMAP_SUPERALIGNED_BITS #endif #if HAVE_ERTS_MSEG @@ -68,6 +59,9 @@ typedef struct { Uint rmcbf; Uint mcs; Uint nos; + ErtsMMapInit dflt_mmap; + ErtsMMapInit literal_mmap; + ErtsMMapInit exec_mmap; } ErtsMsegInit_t; #define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \ @@ -75,7 +69,10 @@ typedef struct { 4*1024*1024, /* amcbf: Absolute max cache bad fit */ \ 20, /* rmcbf: Relative max cache bad fit */ \ 10, /* mcs: Max cache size */ \ - 1000 /* cci: Cache check interval */ \ + 1000, /* cci: Cache check interval */ \ + ERTS_MMAP_INIT_DEFAULT_INITER, \ + ERTS_MMAP_INIT_LITERAL_INITER, \ + ERTS_MMAP_INIT_HIPE_EXEC_INITER \ } typedef struct { @@ -84,19 +81,16 @@ typedef struct { UWord abs_shrink_th; UWord rel_shrink_th; int sched_spec; -#if HALFWORD_HEAP - int low_mem; -#endif } ErtsMsegOpt_t; extern const ErtsMsegOpt_t erts_mseg_default_opt; -void *erts_mseg_alloc(ErtsAlcType_t, Uint *, Uint); -void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, Uint, const ErtsMsegOpt_t *); -void erts_mseg_dealloc(ErtsAlcType_t, void *, Uint, Uint); -void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, Uint, Uint, const ErtsMsegOpt_t *); -void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *, Uint); -void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *, Uint, const ErtsMsegOpt_t *); +void *erts_mseg_alloc(ErtsAlcType_t, UWord *, Uint); +void *erts_mseg_alloc_opt(ErtsAlcType_t, UWord *, Uint, const ErtsMsegOpt_t *); +void erts_mseg_dealloc(ErtsAlcType_t, void *, UWord, Uint); +void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, UWord, Uint, const ErtsMsegOpt_t *); +void *erts_mseg_realloc(ErtsAlcType_t, void *, UWord, UWord *, Uint); +void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, UWord, UWord *, Uint, const ErtsMsegOpt_t *); void erts_mseg_clear_cache(void); void erts_mseg_cache_check(void); Uint erts_mseg_no( const ErtsMsegOpt_t *); @@ -105,7 +99,7 @@ void erts_mseg_init(ErtsMsegInit_t *init); void erts_mseg_late_init(void); /* Have to be called after all allocators, threads and timers have been initialized. */ Eterm erts_mseg_info_options(int, int *, void*, Uint **, Uint *); -Eterm erts_mseg_info(int, int *, void*, int, Uint **, Uint *); +Eterm erts_mseg_info(int, int *, void*, int, int, Uint **, Uint *); #endif /* #if HAVE_ERTS_MSEG */ diff --git a/erts/emulator/sys/common/erl_mtrace_sys_wrap.c b/erts/emulator/sys/common/erl_mtrace_sys_wrap.c index 408aa7e016..fc871f94f1 100644 --- a/erts/emulator/sys/common/erl_mtrace_sys_wrap.c +++ b/erts/emulator/sys/common/erl_mtrace_sys_wrap.c @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2004-2009. All Rights Reserved. + * Copyright Ericsson AB 2004-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ diff --git a/erts/emulator/sys/common/erl_os_monotonic_time_extender.c b/erts/emulator/sys/common/erl_os_monotonic_time_extender.c new file mode 100644 index 0000000000..d53190fdd5 --- /dev/null +++ b/erts/emulator/sys/common/erl_os_monotonic_time_extender.c @@ -0,0 +1,89 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2015-2016. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * %CopyrightEnd% + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif +#include "erl_os_monotonic_time_extender.h" + +#ifdef USE_THREADS + +static void *os_monotonic_time_extender(void *vstatep) +{ + ErtsOsMonotonicTimeExtendState *state = (ErtsOsMonotonicTimeExtendState *) vstatep; + long sleep_time = state->check_interval*1000; + Uint32 (*raw_os_mtime)(void) = state->raw_os_monotonic_time; + Uint32 last_msb = 0; + + while (1) { + Uint32 msb = (*raw_os_mtime)() & (((Uint32) 1) << 31); + + if (msb != last_msb) { + int ix = ((int) (last_msb >> 31)) & 1; + Uint32 xtnd = (Uint32) erts_atomic32_read_nob(&state->extend[ix]); + erts_atomic32_set_nob(&state->extend[ix], (erts_aint32_t) (xtnd + 1)); + last_msb = msb; + } + erts_milli_sleep(sleep_time); + } + + erts_exit(ERTS_ABORT_EXIT, "os_monotonic_time_extender thread terminating"); + return NULL; +} + +static erts_tid_t os_monotonic_extender_tid; +#endif + +void +erts_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep, + Uint32 (*raw_os_monotonic_time)(void), + int check_seconds) +{ +#ifdef USE_THREADS + statep->raw_os_monotonic_time = raw_os_monotonic_time; + erts_atomic32_init_nob(&statep->extend[0], (erts_aint32_t) 0); + erts_atomic32_init_nob(&statep->extend[1], (erts_aint32_t) 0); + statep->check_interval = check_seconds; + +#else + statep->extend[0] = (Uint32) 0; + statep->extend[1] = (Uint32) 0; + statep->last_msb = (ErtsMonotonicTime) 0; +#endif +} + +void +erts_late_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep) +{ +#ifdef USE_THREADS + erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; + thr_opts.detached = 1; + thr_opts.suggested_stack_size = 4; + +#if 0 + thr_opts.name = "os_monotonic_time_extender"; +#endif + + erts_thr_create(&os_monotonic_extender_tid, + os_monotonic_time_extender, + (void*) statep, + &thr_opts); +#endif +} diff --git a/erts/emulator/sys/common/erl_os_monotonic_time_extender.h b/erts/emulator/sys/common/erl_os_monotonic_time_extender.h new file mode 100644 index 0000000000..8089c9aed9 --- /dev/null +++ b/erts/emulator/sys/common/erl_os_monotonic_time_extender.h @@ -0,0 +1,66 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2015. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * %CopyrightEnd% + */ + +#ifndef ERL_OS_MONOTONIC_TIME_EXTENDER_H__ +#define ERL_OS_MONOTONIC_TIME_EXTENDER_H__ + +#include "sys.h" +#include "erl_threads.h" + +typedef struct { +#ifdef USE_THREADS + Uint32 (*raw_os_monotonic_time)(void); + erts_atomic32_t extend[2]; + int check_interval; +#else + Uint32 extend[2]; + ErtsMonotonicTime last_msb; +#endif +} ErtsOsMonotonicTimeExtendState; + +#ifdef USE_THREADS +# define ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(S, RT) ((void) 1) +# define ERTS_EXTEND_OS_MONOTONIC_TIME(S, RT) \ + ((((ErtsMonotonicTime) \ + erts_atomic32_read_nob(&((S)->extend[((int) ((RT) >> 31)) & 1]))) \ + << 32) \ + + (RT)) +#else +# define ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(S, RT) \ + do { \ + Uint32 msb__ = (RT) & (((Uint32) 1) << 31); \ + if (msb__ != (S)->last_msb) { \ + int ix__ = ((int) ((S)->last_msb >> 31)) & 1; \ + (S)->extend[ix__]++; \ + (S)->last_msb = msb; \ + } \ + } while (0) +# define ERTS_EXTEND_OS_MONOTONIC_TIME(S, RT) \ + ((((ErtsMonotonicTime) (S)->extend[((int) ((RT) >> 31)) & 1]) << 32) + (RT)) +#endif + +void +erts_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep, + Uint32 (*raw_os_monotonic_time)(void), + int check_seconds); +void +erts_late_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep); + +#endif diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 5861b30315..e394d84f73 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2013. All Rights Reserved. + * Copyright Ericsson AB 2006-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ @@ -73,6 +74,7 @@ #include "erl_thr_progress.h" #include "erl_driver.h" #include "erl_alloc.h" +#include "erl_msacc.h" #if !defined(ERTS_POLL_USE_EPOLL) \ && !defined(ERTS_POLL_USE_DEVPOLL) \ @@ -123,8 +125,8 @@ static ERTS_INLINE int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds, ERTS_fd_set *exceptfds, struct timeval *timeout) { - ASSERT(!readfds || readfds->sz >= nfds); - ASSERT(!writefds || writefds->sz >= nfds); + ASSERT(!readfds || readfds->sz >= ERTS_FD_SIZE(nfds)); + ASSERT(!writefds || writefds->sz >= ERTS_FD_SIZE(nfds)); ASSERT(!exceptfds); return select(nfds, (readfds ? readfds->ptr : NULL ), @@ -153,9 +155,6 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds, #define ERTS_POLL_COALESCE_KP_RES (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL) -#define ERTS_EV_TABLE_MIN_LENGTH 1024 -#define ERTS_EV_TABLE_EXP_THRESHOLD (2048*1024) - #ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT # define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 1 #else @@ -314,13 +313,16 @@ struct ErtsPollSet_ { #if ERTS_POLL_USE_WAKEUP_PIPE int wake_fds[2]; #endif +#if ERTS_POLL_USE_TIMERFD + int timer_fd; +#endif #if ERTS_POLL_USE_FALLBACK int fallback_used; #endif #if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT erts_atomic32_t wakeup_state; #endif - erts_smp_atomic32_t timeout; + erts_atomic64_t timeout_time; #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS erts_smp_atomic_t no_avoided_wakeups; erts_smp_atomic_t no_avoided_interrupts; @@ -384,6 +386,26 @@ static void check_poll_status(ErtsPollSet ps); static void print_misc_debug_info(void); #endif +static ERTS_INLINE void +init_timeout_time(ErtsPollSet ps) +{ + erts_atomic64_init_nob(&ps->timeout_time, + (erts_aint64_t) ERTS_MONOTONIC_TIME_MAX); +} + +static ERTS_INLINE void +set_timeout_time(ErtsPollSet ps, ErtsMonotonicTime time) +{ + erts_atomic64_set_relb(&ps->timeout_time, + (erts_aint64_t) time); +} + +static ERTS_INLINE ErtsMonotonicTime +get_timeout_time(ErtsPollSet ps) +{ + return (ErtsMonotonicTime) erts_atomic64_read_acqb(&ps->timeout_time); +} + #define ERTS_POLL_NOT_WOKEN 0 #define ERTS_POLL_WOKEN -1 #define ERTS_POLL_WOKEN_INTR 1 @@ -410,7 +432,7 @@ static ERTS_INLINE int is_interrupted_reset(ErtsPollSet ps) { #if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - return (erts_atomic32_xchg_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN) + return (erts_atomic32_xchg_acqb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN) == ERTS_POLL_WOKEN_INTR); #else return 0; @@ -421,7 +443,7 @@ static ERTS_INLINE void woke_up(ErtsPollSet ps) { #if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state); + erts_aint32_t wakeup_state = erts_atomic32_read_acqb(&ps->wakeup_state); if (wakeup_state == ERTS_POLL_NOT_WOKEN) (void) erts_atomic32_cmpxchg_nob(&ps->wakeup_state, ERTS_POLL_WOKEN, @@ -448,14 +470,9 @@ wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe) wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state, ERTS_POLL_WOKEN, ERTS_POLL_NOT_WOKEN); - else { - /* - * We might unnecessarily write to the pipe, however, - * that isn't problematic. - */ - wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state); - erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR); - } + else + wakeup_state = erts_atomic32_xchg_relb(&ps->wakeup_state, + ERTS_POLL_WOKEN_INTR); wake = wakeup_state == ERTS_POLL_NOT_WOKEN; } /* @@ -560,6 +577,75 @@ create_wakeup_pipe(ErtsPollSet ps) #endif /* ERTS_POLL_USE_WAKEUP_PIPE */ /* + * --- timer fd ----------------------------------------------------------- + */ + +#if ERTS_POLL_USE_TIMERFD + +/* We use the timerfd when using epoll_wait to get high accuracy + timeouts, i.e. we want to sleep with < ms accuracy. */ + +static void +create_timerfd(ErtsPollSet ps) +{ + int do_wake = 0; + int timer_fd; + timer_fd = timerfd_create(CLOCK_MONOTONIC,0); + ERTS_POLL_EXPORT(erts_poll_control)(ps, + timer_fd, + ERTS_POLL_EV_IN, + 1, &do_wake); +#if ERTS_POLL_USE_FALLBACK + /* We depend on the wakeup pipe being handled by kernel poll */ + if (ps->fds_status[timer_fd].flags & ERTS_POLL_FD_FLG_INFLBCK) + fatal_error("%s:%d:create_wakeup_pipe(): Internal error\n", + __FILE__, __LINE__); +#endif + if (ps->internal_fd_limit <= timer_fd) + ps->internal_fd_limit = timer_fd + 1; + ps->timer_fd = timer_fd; +} + +static ERTS_INLINE void +timerfd_set(ErtsPollSet ps, struct itimerspec *its) +{ +#ifdef DEBUG + struct itimerspec old_its; + int res; + res = timerfd_settime(ps->timer_fd, 0, its, &old_its); + ASSERT(res == 0); + ASSERT(old_its.it_interval.tv_sec == 0 && + old_its.it_interval.tv_nsec == 0 && + old_its.it_value.tv_sec == 0 && + old_its.it_value.tv_nsec == 0); + +#else + timerfd_settime(ps->timer_fd, 0, its, NULL); +#endif +} + +static ERTS_INLINE int +timerfd_clear(ErtsPollSet ps, int res, int max_res) { + + struct itimerspec its; + /* we always have to clear the timer */ + its.it_interval.tv_sec = 0; + its.it_interval.tv_nsec = 0; + its.it_value.tv_sec = 0; + its.it_value.tv_nsec = 0; + timerfd_settime(ps->timer_fd, 0, &its, NULL); + + /* only timeout fd triggered */ + if (res == 1 && ps->res_events[0].data.fd == ps->timer_fd) + return 0; + + return res; +} + +#endif /* ERTS_POLL_USE_TIMERFD */ + + +/* * --- Poll set update requests ---------------------------------------------- */ #if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE @@ -611,27 +697,33 @@ free_update_requests_block(ErtsPollSet ps, * --- Growing poll set structures ------------------------------------------- */ -int -ERTS_POLL_EXPORT(erts_poll_get_table_len) (int new_len) +#ifndef ERTS_KERNEL_POLL_VERSION /* only one shared implementation */ + +#define ERTS_FD_TABLE_MIN_LENGTH 1024 +#define ERTS_FD_TABLE_EXP_THRESHOLD (2048*1024) + +int erts_poll_new_table_len (int old_len, int need_len) { - if (new_len < ERTS_EV_TABLE_MIN_LENGTH) { - new_len = ERTS_EV_TABLE_MIN_LENGTH; - } else if (new_len < ERTS_EV_TABLE_EXP_THRESHOLD) { - /* find next power of 2 */ - --new_len; - new_len |= new_len >> 1; - new_len |= new_len >> 2; - new_len |= new_len >> 4; - new_len |= new_len >> 8; - new_len |= new_len >> 16; - ++new_len; - } else { - /* grow incrementally */ - new_len += ERTS_EV_TABLE_EXP_THRESHOLD; + int new_len; + + ASSERT(need_len > old_len); + if (need_len < ERTS_FD_TABLE_MIN_LENGTH) { + new_len = ERTS_FD_TABLE_MIN_LENGTH; + } + else { + new_len = old_len; + do { + if (new_len < ERTS_FD_TABLE_EXP_THRESHOLD) + new_len *= 2; + else + new_len += ERTS_FD_TABLE_EXP_THRESHOLD; + + } while (new_len < need_len); } + ASSERT(new_len >= need_len); return new_len; } - +#endif #if ERTS_POLL_USE_KERNEL_POLL static void @@ -645,7 +737,7 @@ grow_res_events(ErtsPollSet ps, int new_len) #elif ERTS_POLL_USE_KQUEUE struct kevent #endif - ) * ERTS_POLL_EXPORT(erts_poll_get_table_len)(new_len); + ) * erts_poll_new_table_len(ps->res_events_len, new_len); /* We do not need to save previously stored data */ if (ps->res_events) erts_free(ERTS_ALC_T_POLL_RES_EVS, ps->res_events); @@ -659,7 +751,7 @@ static void grow_poll_fds(ErtsPollSet ps, int min_ix) { int i; - int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_ix + 1); + int new_len = erts_poll_new_table_len(ps->poll_fds_len, min_ix + 1); if (new_len > max_fds) new_len = max_fds; ps->poll_fds = (ps->poll_fds_len @@ -681,7 +773,7 @@ grow_poll_fds(ErtsPollSet ps, int min_ix) static void grow_select_fds(int fd, ERTS_fd_set* fds) { - int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(fd + 1); + int new_len = erts_poll_new_table_len(fds->sz, fd + 1); if (new_len > max_fds) new_len = max_fds; new_len = ERTS_FD_SIZE(new_len); @@ -708,7 +800,7 @@ static void grow_fds_status(ErtsPollSet ps, int min_fd) { int i; - int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_fd + 1); + int new_len = erts_poll_new_table_len(ps->fds_status_len, min_fd + 1); ASSERT(min_fd < max_fds); if (new_len > max_fds) new_len = max_fds; @@ -1497,6 +1589,12 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake goto done; } #endif +#if ERTS_POLL_USE_TIMERFD + if (fd == ps->timer_fd) { + new_events = ERTS_POLL_EV_NVAL; + goto done; + } +#endif } if (fd >= ps->fds_status_len) @@ -1644,6 +1742,9 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res) #if ERTS_POLL_USE_WAKEUP_PIPE int wake_fd = ps->wake_fds[0]; #endif +#if ERTS_POLL_USE_TIMERFD + int timer_fd = ps->timer_fd; +#endif for (i = 0; i < n; i++) { @@ -1659,6 +1760,11 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res) continue; } #endif +#if ERTS_POLL_USE_TIMERFD + if (fd == timer_fd) { + continue; + } +#endif ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK)); /* epoll_wait() can repeat the same fd in result array... */ ix = (int) ps->fds_status[fd].res_ev_ix; @@ -1733,6 +1839,11 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res) continue; } #endif +#if ERTS_POLL_USE_TIMERFD + if (fd == timer_fd) { + continue; + } +#endif revents = ERTS_POLL_EV_N2E(ps->res_events[i].events); pr[res].fd = fd; pr[res].events = revents; @@ -1993,44 +2104,192 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, } } +static ERTS_INLINE ErtsMonotonicTime +get_timeout(ErtsPollSet ps, + int resolution, + ErtsMonotonicTime timeout_time) +{ + ErtsMonotonicTime timeout, save_timeout_time; + + if (timeout_time == ERTS_POLL_NO_TIMEOUT) { + save_timeout_time = ERTS_MONOTONIC_TIME_MIN; + timeout = 0; + } + else { + ErtsMonotonicTime diff_time, current_time; + current_time = erts_get_monotonic_time(NULL); + diff_time = timeout_time - current_time; + if (diff_time <= 0) { + save_timeout_time = ERTS_MONOTONIC_TIME_MIN; + timeout = 0; + } + else { + save_timeout_time = current_time; + switch (resolution) { + case 1000: + /* Round up to nearest even milli second */ + timeout = ERTS_MONOTONIC_TO_MSEC(diff_time - 1) + 1; + if (timeout > (ErtsMonotonicTime) INT_MAX) + timeout = (ErtsMonotonicTime) INT_MAX; + save_timeout_time += ERTS_MSEC_TO_MONOTONIC(timeout); + break; + case 1000000: + /* Round up to nearest even micro second */ + timeout = ERTS_MONOTONIC_TO_USEC(diff_time - 1) + 1; + save_timeout_time += ERTS_USEC_TO_MONOTONIC(timeout); + break; + case 1000000000: + /* Round up to nearest even nano second */ + timeout = ERTS_MONOTONIC_TO_NSEC(diff_time - 1) + 1; + save_timeout_time += ERTS_NSEC_TO_MONOTONIC(timeout); + break; + default: + ERTS_INTERNAL_ERROR("Invalid resolution"); + timeout = 0; + save_timeout_time = 0; + break; + } + } + } + set_timeout_time(ps, save_timeout_time); + return timeout; +} + +#if ERTS_POLL_USE_SELECT + +static ERTS_INLINE int +get_timeout_timeval(ErtsPollSet ps, + SysTimeval *tvp, + ErtsMonotonicTime timeout_time) +{ + ErtsMonotonicTime timeout = get_timeout(ps, + 1000*1000, + timeout_time); + + if (!timeout) { + tvp->tv_sec = 0; + tvp->tv_usec = 0; + + return 0; + } + else { + ErtsMonotonicTime sec = timeout/(1000*1000); + tvp->tv_sec = sec; + tvp->tv_usec = timeout - sec*(1000*1000); + + ASSERT(tvp->tv_sec >= 0); + ASSERT(tvp->tv_usec >= 0); + ASSERT(tvp->tv_usec < 1000*1000); + + return !0; + } + +} + +#endif + +#if ERTS_POLL_USE_KQUEUE || (ERTS_POLL_USE_POLL && defined(HAVE_PPOLL)) || ERTS_POLL_USE_TIMERFD + +static ERTS_INLINE int +get_timeout_timespec(ErtsPollSet ps, + struct timespec *tsp, + ErtsMonotonicTime timeout_time) +{ + ErtsMonotonicTime timeout = get_timeout(ps, + 1000*1000*1000, + timeout_time); + + if (!timeout) { + tsp->tv_sec = 0; + tsp->tv_nsec = 0; + return 0; + } + else { + ErtsMonotonicTime sec = timeout/(1000*1000*1000); + tsp->tv_sec = sec; + tsp->tv_nsec = timeout - sec*(1000*1000*1000); + + ASSERT(tsp->tv_sec >= 0); + ASSERT(tsp->tv_nsec >= 0); + ASSERT(tsp->tv_nsec < 1000*1000*1000); + + return !0; + } +} + +#endif + +#if ERTS_POLL_USE_TIMERFD + static ERTS_INLINE int -check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res) +get_timeout_itimerspec(ErtsPollSet ps, + struct itimerspec *itsp, + ErtsMonotonicTime timeout_time) +{ + + itsp->it_interval.tv_sec = 0; + itsp->it_interval.tv_nsec = 0; + + return get_timeout_timespec(ps, &itsp->it_value, timeout_time); +} + +#endif + +static ERTS_INLINE int +check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) { int res; + ERTS_MSACC_PUSH_STATE_M(); if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0 - && tv->tv_usec == 0 && tv->tv_sec == 0) { + && timeout_time == ERTS_POLL_NO_TIMEOUT) { /* Nothing to poll and zero timeout; done... */ return 0; } else { - long timeout = tv->tv_sec*1000 + tv->tv_usec/1000; - if (timeout > ERTS_AINT32_T_MAX) - timeout = ERTS_AINT32_T_MAX; - ASSERT(timeout >= 0); - erts_smp_atomic32_set_relb(&ps->timeout, (erts_aint32_t) timeout); + int timeout; #if ERTS_POLL_USE_FALLBACK if (!(ps->fallback_used = ERTS_POLL_NEED_FALLBACK(ps))) { #if ERTS_POLL_USE_EPOLL /* --- epoll ------------------------------- */ - if (timeout > INT_MAX) - timeout = INT_MAX; if (max_res > ps->res_events_len) grow_res_events(ps, max_res); +#if ERTS_POLL_USE_TIMERFD + { + struct itimerspec its; + timeout = get_timeout_itimerspec(ps, &its, timeout_time); + if (timeout) { +#ifdef ERTS_SMP + erts_thr_progress_prepare_wait(NULL); +#endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + timerfd_set(ps, &its); + res = epoll_wait(ps->kp_fd, ps->res_events, max_res, -1); + res = timerfd_clear(ps, res, max_res); + } else { + res = epoll_wait(ps->kp_fd, ps->res_events, max_res, 0); + } + } +#else /* !ERTS_POLL_USE_TIMERFD */ + timeout = (int) get_timeout(ps, 1000, timeout_time); + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif - res = epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout); + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } + res = epoll_wait(ps->kp_fd, ps->res_events, max_res, timeout); +#endif /* !ERTS_POLL_USE_TIMERFD */ #elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */ struct timespec ts; if (max_res > ps->res_events_len) grow_res_events(ps, max_res); + timeout = get_timeout_timespec(ps, &ts, timeout_time); + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif - ts.tv_sec = tv->tv_sec; - ts.tv_nsec = tv->tv_usec*1000; + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts); #endif /* ----------------------------------------- */ } @@ -2049,44 +2308,62 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res) #if ERTS_POLL_USE_WAKEUP_PIPE nfds++; /* Wakeup pipe */ #endif - if (timeout > INT_MAX) - timeout = INT_MAX; + timeout = (int) get_timeout(ps, 1000, timeout_time); poll_res.dp_nfds = nfds < max_res ? nfds : max_res; if (poll_res.dp_nfds > ps->res_events_len) grow_res_events(ps, poll_res.dp_nfds); poll_res.dp_fds = ps->res_events; + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif - poll_res.dp_timeout = (int) timeout; + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } + poll_res.dp_timeout = timeout; res = ioctl(ps->kp_fd, DP_POLL, &poll_res); -#elif ERTS_POLL_USE_POLL /* --- poll -------------------------------- */ - if (timeout > INT_MAX) - timeout = INT_MAX; +#elif ERTS_POLL_USE_POLL && defined(HAVE_PPOLL) /* --- ppoll ---------------- */ + struct timespec ts; + timeout = get_timeout_timespec(ps, &ts, timeout_time); + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif - res = poll(ps->poll_fds, ps->no_poll_fds, (int) timeout); + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } + res = ppoll(ps->poll_fds, ps->no_poll_fds, &ts, NULL); +#elif ERTS_POLL_USE_POLL /* --- poll --------------------------------- */ + timeout = (int) get_timeout(ps, 1000, timeout_time); + + if (timeout) { +#ifdef ERTS_SMP + erts_thr_progress_prepare_wait(NULL); +#endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } + res = poll(ps->poll_fds, ps->no_poll_fds, timeout); #elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */ - SysTimeval to = *tv; + SysTimeval to; + timeout = get_timeout_timeval(ps, &to, timeout_time); ERTS_FD_COPY(&ps->input_fds, &ps->res_input_fds); ERTS_FD_COPY(&ps->output_fds, &ps->res_output_fds); - + + if (timeout) { #ifdef ERTS_SMP - if (to.tv_sec || to.tv_usec) erts_thr_progress_prepare_wait(NULL); #endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } res = ERTS_SELECT(ps->max_fd + 1, - &ps->res_input_fds, - &ps->res_output_fds, - NULL, - &to); + &ps->res_input_fds, + &ps->res_output_fds, + NULL, + &to); #ifdef ERTS_SMP - if (to.tv_sec || to.tv_usec) + if (timeout) { erts_thr_progress_finalize_wait(NULL); + ERTS_MSACC_POP_STATE_M(); + } if (res < 0 && errno == EBADF && ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) { @@ -2108,10 +2385,10 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res) handle_update_requests(ps); ERTS_POLLSET_UNLOCK(ps); res = ERTS_SELECT(ps->max_fd + 1, - &ps->res_input_fds, - &ps->res_output_fds, - NULL, - &to); + &ps->res_input_fds, + &ps->res_output_fds, + NULL, + &to); if (res == 0) { errno = EAGAIN; res = -1; @@ -2121,10 +2398,12 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res) return res; #endif /* ----------------------------------------- */ } + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_finalize_wait(NULL); #endif + ERTS_MSACC_POP_STATE_M(); + } return res; } } @@ -2133,15 +2412,14 @@ int ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, ErtsPollResFd pr[], int *len, - SysTimeval *utvp) + ErtsMonotonicTime timeout_time) { + ErtsMonotonicTime to; int res, no_fds; int ebadf = 0; #ifdef ERTS_SMP int ps_locked = 0; #endif - SysTimeval *tvp; - SysTimeval itv; no_fds = *len; #ifdef ERTS_POLL_MAX_RES @@ -2151,13 +2429,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, *len = 0; - ASSERT(utvp); - - tvp = utvp; - #ifdef ERTS_POLL_DEBUG_PRINT - erts_printf("Entering erts_poll_wait(), timeout=%d\n", - (int) tv->tv_sec*1000 + tv->tv_usec/1000); + erts_printf("Entering erts_poll_wait(), timeout_time=%bps\n", + timeout_time); #endif if (ERTS_POLLSET_SET_POLLED_CHK(ps)) { @@ -2166,12 +2440,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, goto done; } - if (is_woken(ps)) { - /* Use zero timeout */ - itv.tv_sec = 0; - itv.tv_usec = 0; - tvp = &itv; - } + to = (is_woken(ps) + ? ERTS_POLL_NO_TIMEOUT /* Use zero timeout */ + : timeout_time); #if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE if (ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) { @@ -2181,7 +2452,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, } #endif - res = check_fd_events(ps, tvp, no_fds); + res = check_fd_events(ps, to, no_fds); woke_up(ps); @@ -2224,7 +2495,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, #endif done: - erts_smp_atomic32_set_relb(&ps->timeout, ERTS_AINT32_T_MAX); + set_timeout_time(ps, ERTS_MONOTONIC_TIME_MAX); #ifdef ERTS_POLL_DEBUG_PRINT erts_printf("Leaving %s = erts_poll_wait()\n", res == 0 ? "0" : erl_errno_id(res)); @@ -2268,13 +2539,14 @@ ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet ps) void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, int set, - erts_short_time_t msec) + ErtsMonotonicTime timeout_time) { #if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP) if (!set) reset_wakeup_state(ps); else { - if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec) + ErtsMonotonicTime max_wait_time = get_timeout_time(ps); + if (max_wait_time > timeout_time) wake_poller(ps, 1, 0); #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS else { @@ -2414,6 +2686,9 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) #if ERTS_POLL_USE_WAKEUP_PIPE create_wakeup_pipe(ps); #endif +#if ERTS_POLL_USE_TIMERFD + create_timerfd(ps); +#endif #if ERTS_POLL_USE_FALLBACK if (kp_fd >= ps->fds_status_len) grow_fds_status(ps, kp_fd); @@ -2431,7 +2706,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) ps->internal_fd_limit = kp_fd + 1; ps->kp_fd = kp_fd; #endif - erts_smp_atomic32_init_nob(&ps->timeout, ERTS_AINT32_T_MAX); + init_timeout_time(ps); #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS erts_smp_atomic_init_nob(&ps->no_avoided_wakeups, 0); erts_smp_atomic_init_nob(&ps->no_avoided_interrupts, 0); @@ -2504,13 +2779,18 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) if (ps->wake_fds[1] >= 0) close(ps->wake_fds[1]); #endif +#if ERTS_POLL_USE_TIMERFD + if (ps->timer_fd >= 0) + close(ps->timer_fd); +#endif erts_smp_spin_lock(&pollsets_lock); if (ps == pollsets) pollsets = pollsets->next; else { ErtsPollSet prev_ps; - for (prev_ps = pollsets; ps != prev_ps->next; prev_ps = prev_ps->next); + for (prev_ps = pollsets; ps != prev_ps->next; prev_ps = prev_ps->next) + ; ASSERT(ps == prev_ps->next); prev_ps->next = ps->next; } @@ -2607,6 +2887,9 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) #if ERTS_POLL_USE_WAKEUP_PIPE pip->poll_set_size++; /* Wakeup pipe */ #endif +#if ERTS_POLL_USE_TIMERFD + pip->poll_set_size++; /* timerfd */ +#endif pip->fallback_poll_set_size = #if !ERTS_POLL_USE_FALLBACK @@ -2735,14 +3018,18 @@ ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet ps, ev[fd] = 0; else { ev[fd] = ps->fds_status[fd].events; + if ( #if ERTS_POLL_USE_WAKEUP_PIPE - if (fd == ps->wake_fds[0] || fd == ps->wake_fds[1]) - ev[fd] |= ERTS_POLL_EV_NVAL; + fd == ps->wake_fds[0] || fd == ps->wake_fds[1] || +#endif +#if ERTS_POLL_USE_TIMERFD + fd == ps->timer_fd || #endif #if ERTS_POLL_USE_KERNEL_POLL - if (fd == ps->kp_fd) - ev[fd] |= ERTS_POLL_EV_NVAL; + fd == ps->kp_fd || #endif + 0) + ev[fd] |= ERTS_POLL_EV_NVAL; } } ERTS_POLLSET_UNLOCK(ps); diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h index 09ed9f41af..c16122610d 100644 --- a/erts/emulator/sys/common/erl_poll.h +++ b/erts/emulator/sys/common/erl_poll.h @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2013. All Rights Reserved. + * Copyright Ericsson AB 2006-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ @@ -29,6 +30,8 @@ #include "sys.h" +#define ERTS_POLL_NO_TIMEOUT ERTS_MONOTONIC_TIME_MIN + #if 0 #define ERTS_POLL_COUNT_AVOIDED_WAKEUPS #endif @@ -96,10 +99,12 @@ # endif #endif +#define ERTS_POLL_USE_TIMERFD 0 + typedef Uint32 ErtsPollEvents; #undef ERTS_POLL_EV_E2N -#if defined(__WIN32__) /* --- win32 ------------------------------- */ +#if defined(__WIN32__) /* --- win32 --------------------------------------- */ #define ERTS_POLL_EV_IN 1 #define ERTS_POLL_EV_OUT 2 @@ -110,8 +115,14 @@ typedef Uint32 ErtsPollEvents; #include <sys/epoll.h> +#ifdef HAVE_SYS_TIMERFD_H +#include <sys/timerfd.h> +#undef ERTS_POLL_USE_TIMERFD +#define ERTS_POLL_USE_TIMERFD 1 +#endif + #define ERTS_POLL_EV_E2N(EV) \ - ((__uint32_t) (EV)) + ((uint32_t) (EV)) #define ERTS_POLL_EV_N2E(EV) \ ((ErtsPollEvents) (EV)) @@ -223,19 +234,20 @@ void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet, int); void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet, int, - erts_short_time_t); + ErtsMonotonicTime); ErtsPollEvents ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet, ErtsSysFdType, ErtsPollEvents, int on, - int* wake_poller); + int* wake_poller + ); void ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet, ErtsPollControlEntry [], int on); int ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet, ErtsPollResFd [], int *, - SysTimeval *); + ErtsMonotonicTime); int ERTS_POLL_EXPORT(erts_poll_max_fds)(void); void ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet, ErtsPollInfo *); @@ -246,6 +258,6 @@ void ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet, ErtsPollEvents [], int); -int ERTS_POLL_EXPORT(erts_poll_get_table_len)(int); +int erts_poll_new_table_len(int old_len, int need_len); #endif /* #ifndef ERL_POLL_H__ */ diff --git a/erts/emulator/sys/common/erl_sys_common_misc.c b/erts/emulator/sys/common/erl_sys_common_misc.c index 31ad3b82d5..79f87eb3a9 100644 --- a/erts/emulator/sys/common/erl_sys_common_misc.c +++ b/erts/emulator/sys/common/erl_sys_common_misc.c @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2013. All Rights Reserved. + * Copyright Ericsson AB 2006-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ @@ -44,6 +45,14 @@ #endif #endif +/* + * erts_check_io_time is used by the erl_check_io implementation. The + * global erts_check_io_time variable is declared here since there + * (often) exist two versions of erl_check_io (kernel-poll and + * non-kernel-poll), and we dont want two versions of this variable. + */ +erts_smp_atomic_t erts_check_io_time; + /* Written once and only once */ static int filename_encoding = ERL_FILENAME_UNKNOWN; @@ -52,7 +61,7 @@ static int filename_warning = ERL_FILENAME_WARNING_WARNING; /* Default unicode on windows and MacOS X */ static int user_filename_encoding = ERL_FILENAME_UTF8; #else -static int user_filename_encoding = ERL_FILENAME_LATIN1; +static int user_filename_encoding = ERL_FILENAME_UNKNOWN; #endif /* This controls the heuristic in printing characters in shell and w/ io:format("~tp", ...) etc. */ diff --git a/erts/emulator/sys/common/erl_util_queue.h b/erts/emulator/sys/common/erl_util_queue.h index 47925e2264..73293e0225 100644 --- a/erts/emulator/sys/common/erl_util_queue.h +++ b/erts/emulator/sys/common/erl_util_queue.h @@ -1,18 +1,19 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2013. All Rights Reserved. + * Copyright Ericsson AB 2013-2016. All Rights Reserved. * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * %CopyrightEnd% */ |