aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_nif.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_nif.c')
-rw-r--r--erts/emulator/beam/erl_nif.c602
1 files changed, 427 insertions, 175 deletions
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 848e116621..43441e0228 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -56,6 +56,7 @@
#include "erl_process.h"
#include "erl_bif_unique.h"
#include "erl_utils.h"
+#include "erl_io_queue.h"
#undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
#include "erl_nfunc_sched.h"
@@ -66,7 +67,6 @@
#include <limits.h>
#include <stddef.h> /* offsetof */
-
/* Information about a loaded nif library.
* Each successful call to erlang:load_nif will allocate an instance of
* erl_module_nif. Two calls opening the same library will thus have the same
@@ -138,7 +138,7 @@ execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
Process *c_p = env->proc;
if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) {
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
& ERTS_PROC_LOCK_MAIN);
}
else {
@@ -220,7 +220,7 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
ASSERT(esdp);
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
ASSERT(p->scheduler_data == esdp);
ASSERT((state & (ERTS_PSFLG_RUNNING
@@ -287,7 +287,7 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
else
dirty_shadow_proc = env->proc;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
ep = erts_nif_export_schedule(c_p, dirty_shadow_proc,
c_p->current,
@@ -304,7 +304,6 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
return (ERL_NIF_TERM) THE_NON_VALUE;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
@@ -320,7 +319,7 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
ErlNifEnv env;
ERL_NIF_TERM result;
#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&c_p->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
@@ -343,14 +342,14 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
- erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC));
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
ASSERT(env.proc->next == c_p);
@@ -394,24 +393,19 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
return exiting;
}
-#endif
static void full_flush_env(ErlNifEnv* env)
{
flush_env(env);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
/* Dirty nif call using shadow process struct */
erts_flush_dirty_shadow_proc(env->proc);
-#endif
}
static void full_cache_env(ErlNifEnv* env)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
erts_cache_dirty_shadow_proc(env->proc);
-#endif
cache_env(env);
}
@@ -564,7 +558,6 @@ void enif_clear_env(ErlNifEnv* env)
free_tmp_objs(env);
}
-#ifdef ERTS_SMP
#ifdef DEBUG
static int enif_send_delay = 0;
#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0)
@@ -588,7 +581,11 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
ErlTraceMessageQueue *msgq, **last_msgq;
int reds = 0;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ /* Only one thread at a time is allowed to flush trace messages,
+ so we require the main lock to be held when doing the flush */
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
msgq = c_p->trace_msg_q;
@@ -607,7 +604,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
msgq->first = NULL;
msgq->last = &msgq->first;
msgq->len = 0;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
ASSERT(len != 0);
@@ -620,13 +617,13 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
if (rp->common.id == c_p->common.id)
rp_locks &= ~c_p_locks;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
reds += len;
} else {
erts_cleanup_messages(first);
}
reds += 1;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
msgq = msgq->next;
} while (msgq);
@@ -643,21 +640,18 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
}
error:
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
return reds;
}
-#endif
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
ErtsProcLocks rp_locks = 0;
-#ifdef ERTS_SMP
ErtsProcLocks lc_locks = 0;
-#endif
Process* rp;
Process* c_p;
ErtsMessage *mp;
@@ -666,13 +660,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
execution_state(env, &c_p, &scheduler);
-#ifndef ERTS_SMP
- if (!scheduler) {
- erts_exit(ERTS_ABORT_EXIT,
- "enif_send: called from non-scheduler thread on non-SMP VM");
- return 0;
- }
-#endif
if (scheduler > 0) { /* Normal scheduler */
rp = erts_proc_lookup(receiver);
@@ -686,7 +673,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
return 0;
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
}
@@ -695,7 +682,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ERTS_P2P_FLG_INC_REFC);
if (!rp) {
if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
return 0;
}
}
@@ -730,7 +717,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
full_cache_env(env);
}
else {
- erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state);
+ erts_aint_t state = erts_atomic32_read_nob(&rp->state);
if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
mp = erts_alloc_message(sz, &hp);
ohp = sz == 0 ? NULL : &mp->hfrag.off_heap;
@@ -756,7 +743,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
full_cache_env(env);
}
}
-#ifdef ERTS_SMP
else {
/* This clause is taken when the nif is called in the context
of a traced process. We do not know which locks we have
@@ -767,7 +753,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
Process *t_p = env->tracee;
- erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
msgq = t_p->trace_msg_q;
@@ -784,7 +770,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#endif
if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
rp_locks & ERTS_PROC_LOCK_MSGQ ||
- erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+ erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
if (!msgq) {
msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
@@ -798,36 +784,33 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
msgq->next = t_p->trace_msg_q;
t_p->trace_msg_q = msgq;
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
erts_schedule_flush_trace_messages(t_p, 0);
} else {
msgq->len++;
*msgq->last = mp;
msgq->last = &mp->next;
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
}
goto done;
} else {
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
rp_locks &= ~ERTS_PROC_LOCK_TRACE;
rp_locks |= ERTS_PROC_LOCK_MSGQ;
}
}
-#endif /* ERTS_SMP */
erts_queue_message(rp, rp_locks, mp, msg,
c_p ? c_p->common.id : am_undefined);
-#ifdef ERTS_SMP
done:
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks & ~lc_locks)
- erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
+ erts_proc_unlock(rp, rp_locks & ~lc_locks);
if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
if (scheduler <= 0)
erts_proc_dec_refc(rp);
@@ -857,15 +840,9 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
if (scheduler > 0)
prt = erts_port_lookup(to_port->port_id, iflags);
else {
-#ifdef ERTS_SMP
if (ERTS_PROC_IS_EXITING(c_p))
return 0;
prt = erts_thr_port_lookup(to_port->port_id, iflags);
-#else
- erts_exit(ERTS_ABORT_EXIT,
- "enif_port_command: called from non-scheduler "
- "thread on non-SMP VM");
-#endif
}
if (!prt)
@@ -889,26 +866,27 @@ static Eterm call_whereis(ErlNifEnv *env, Eterm name)
Process *c_p;
Eterm res;
int scheduler;
- int unlock;
execution_state(env, &c_p, &scheduler);
ASSERT((c_p && scheduler) || (!c_p && !scheduler));
- unlock = 0;
if (scheduler < 0) {
/* dirty scheduler */
if (ERTS_PROC_IS_EXITING(c_p))
return 0;
- if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
- unlock = 1;
- }
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ c_p = NULL; /* as we don't have main lock */
}
- res = erts_whereis_name_to_id(c_p, name);
- if (unlock)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ if (c_p) {
+ /* main lock may be released below and c_p->htop updated by others */
+ flush_env(env);
+ }
+ res = erts_whereis_name_to_id(c_p, name);
+ if (c_p)
+ cache_env(env);
return res;
}
@@ -1847,18 +1825,11 @@ int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
if (scheduler > 0)
return !!erts_proc_lookup(proc->pid);
else {
-#ifdef ERTS_SMP
Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
ERTS_P2P_FLG_INC_REFC);
if (rp)
erts_proc_dec_refc(rp);
return !!rp;
-#else
- erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: "
- "called from non-scheduler thread "
- "in non-smp emulator");
- return 0;
-#endif
}
}
@@ -1874,17 +1845,10 @@ int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
if (scheduler > 0)
return !!erts_port_lookup(port->port_id, iflags);
else {
-#ifdef ERTS_SMP
Port *prt = erts_thr_port_lookup(port->port_id, iflags);
if (prt)
erts_port_dec_refc(prt);
return !!prt;
-#else
- erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: "
- "called from non-scheduler thread "
- "in non-smp emulator");
- return 0;
-#endif
}
}
@@ -2092,7 +2056,7 @@ ErlNifResourceType* open_resource_type(ErlNifEnv* env,
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
- ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(erts_thr_progress_is_blocking());
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -2231,19 +2195,14 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context)
rp = erts_proc_lookup(mon->u.pid);
}
else {
-#ifdef ERTS_SMP
rp = erts_proc_lookup_inc_refc(mon->u.pid);
-#else
- ASSERT(!"nif monitor destruction in non-scheduler thread");
- rp = NULL;
-#endif
}
if (!rp) {
is_exiting = 1;
}
if (rp) {
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
if (ERTS_PROC_IS_EXITING(rp)) {
is_exiting = 1;
} else {
@@ -2251,11 +2210,9 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context)
ASSERT(rmon);
is_exiting = 0;
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#ifdef ERTS_SMP
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (ctx->scheduler <= 0)
erts_proc_dec_refc(rp);
-#endif
}
if (is_exiting) {
ctx->resource->monitors->pending_failed_fire++;
@@ -2281,34 +2238,7 @@ static void destroy_all_monitors(ErtsMonitor* monitors, ErtsResource* resource)
}
-#ifdef ERTS_SMP
# define NIF_RESOURCE_DTOR &nif_resource_dtor
-#else
-# define NIF_RESOURCE_DTOR &nosmp_nif_resource_dtor_prologue
-
-/*
- * NO-SMP: Always run resource destructor on scheduler thread
- * as we may have to remove process monitors.
- */
-static int nif_resource_dtor(Binary*);
-
-static void nosmp_nif_resource_dtor_scheduled(void* vbin)
-{
- erts_bin_free((Binary*)vbin);
-}
-
-static int nosmp_nif_resource_dtor_prologue(Binary* bin)
-{
- if (is_scheduler()) {
- return nif_resource_dtor(bin);
- }
- else {
- erts_schedule_misc_aux_work(1, nosmp_nif_resource_dtor_scheduled, bin);
- return 0; /* do not free */
- }
-}
-
-#endif /* !ERTS_SMP */
static int nif_resource_dtor(Binary* bin)
{
@@ -2320,7 +2250,7 @@ static int nif_resource_dtor(Binary* bin)
ErtsResourceMonitors* rm = resource->monitors;
ASSERT(type->down);
- erts_smp_mtx_lock(&rm->lock);
+ erts_mtx_lock(&rm->lock);
ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0);
if (rm->root) {
ASSERT(!rm->is_dying);
@@ -2342,11 +2272,11 @@ static int nif_resource_dtor(Binary* bin)
*/
ASSERT(!rm->is_dying);
rm->is_dying = 1;
- erts_smp_mtx_unlock(&rm->lock);
+ erts_mtx_unlock(&rm->lock);
return 0;
}
- erts_smp_mtx_unlock(&rm->lock);
- erts_smp_mtx_destroy(&rm->lock);
+ erts_mtx_unlock(&rm->lock);
+ erts_mtx_destroy(&rm->lock);
}
if (type->dtor != NULL) {
@@ -2387,12 +2317,12 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref)
ASSERT(rmp);
ASSERT(resource->type->down);
- erts_smp_mtx_lock(&rmp->lock);
+ erts_mtx_lock(&rmp->lock);
rmon = erts_remove_monitor(&rmp->root, ref);
if (!rmon) {
int free_me = (--rmp->pending_failed_fire == 0) && rmp->is_dying;
ASSERT(rmp->pending_failed_fire >= 0);
- erts_smp_mtx_unlock(&rmp->lock);
+ erts_mtx_unlock(&rmp->lock);
if (free_me) {
ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0);
@@ -2408,10 +2338,10 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref)
* we avoid calling 'down' and just silently remove the monitor.
* This can happen even for non smp as destructor calls may be scheduled.
*/
- erts_smp_mtx_unlock(&rmp->lock);
+ erts_mtx_unlock(&rmp->lock);
}
else {
- erts_smp_mtx_unlock(&rmp->lock);
+ erts_mtx_unlock(&rmp->lock);
ASSERT(rmon->u.pid == pid);
erts_ref_to_driver_monitor(ref, &nif_monitor);
@@ -2456,7 +2386,7 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
erts_refc_inc(&resource->type->refc, 2);
if (type->down) {
resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
- erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
+ erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
resource->monitors->root = NULL;
resource->monitors->pending_failed_fire = 0;
@@ -2655,7 +2585,6 @@ nif_export_restore(Process *c_p, NifExport *ep, Eterm res)
}
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* Finalize a dirty NIF call. This function is scheduled to cause the VM to
@@ -2725,7 +2654,7 @@ schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp,
execution_state(env, &proc, NULL);
- (void) erts_smp_atomic32_read_bset_nob(&proc->state,
+ (void) erts_atomic32_read_bset_nob(&proc->state,
(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC),
(flags == ERL_NIF_DIRTY_JOB_CPU_BOUND
@@ -2763,7 +2692,7 @@ static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
ASSERT(is_atom(mod) && is_atom(func));
ASSERT(fp);
- (void) erts_smp_atomic32_read_bset_nob(&proc->state,
+ (void) erts_atomic32_read_bset_nob(&proc->state,
(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC),
dirty_psflg);
@@ -2783,7 +2712,6 @@ static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
/*
* NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
@@ -2857,24 +2785,20 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
if (scheduler <= 0) {
if (scheduler == 0)
enif_make_badarg(env);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
}
if (flags == 0)
result = schedule(env, execute_nif, fp, proc->current->module,
fun_name_atom, argc, argv);
else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
-#ifdef ERTS_DIRTY_SCHEDULERS
result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv);
-#else
- result = enif_raise_exception(env, am_notsup);
-#endif
}
else
result = enif_make_badarg(env);
if (scheduler < 0)
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
return result;
}
@@ -2890,12 +2814,10 @@ enif_thread_type(void)
switch (esdp->type) {
case ERTS_SCHED_NORMAL:
return ERL_NIF_THR_NORMAL_SCHEDULER;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
case ERTS_SCHED_DIRTY_IO:
return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
return -1;
@@ -3216,27 +3138,19 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
execution_state(env, NULL, &scheduler);
-#ifdef ERTS_SMP
if (scheduler > 0) /* Normal scheduler */
rp = erts_proc_lookup_raw(target_pid->pid);
else
rp = erts_proc_lookup_raw_inc_refc(target_pid->pid);
-#else
- if (scheduler <= 0) {
- erts_exit(ERTS_ABORT_EXIT, "enif_monitor_process: called from "
- "non-scheduler thread on non-SMP VM");
- }
- rp = erts_proc_lookup(target_pid->pid);
-#endif
if (!rp)
return 1;
ref = erts_make_ref_in_buffer(tmp);
- erts_smp_mtx_lock(&rsrc->monitors->lock);
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
- if (ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)) {
+ erts_mtx_lock(&rsrc->monitors->lock);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state)) {
retval = 1;
}
else {
@@ -3244,13 +3158,11 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
erts_add_monitor(&ERTS_P_MONITORS(rp), MON_NIF_TARGET, ref, (UWord)rsrc, NIL);
retval = 0;
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- erts_smp_mtx_unlock(&rsrc->monitors->lock);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_mtx_unlock(&rsrc->monitors->lock);
-#ifdef ERTS_SMP
if (scheduler <= 0)
erts_proc_dec_refc(rp);
-#endif
if (monitor)
erts_ref_to_driver_monitor(ref,monitor);
@@ -3278,35 +3190,27 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit
ref = erts_driver_monitor_to_ref(ref_heap, monitor);
- erts_smp_mtx_lock(&rsrc->monitors->lock);
+ erts_mtx_lock(&rsrc->monitors->lock);
mon = erts_remove_monitor(&rsrc->monitors->root, ref);
if (mon == NULL) {
- erts_smp_mtx_unlock(&rsrc->monitors->lock);
+ erts_mtx_unlock(&rsrc->monitors->lock);
return 1;
}
ASSERT(mon->type == MON_ORIGIN);
ASSERT(is_internal_pid(mon->u.pid));
-#ifdef ERTS_SMP
if (scheduler > 0) /* Normal scheduler */
rp = erts_proc_lookup(mon->u.pid);
else
rp = erts_proc_lookup_inc_refc(mon->u.pid);
-#else
- if (scheduler <= 0) {
- erts_exit(ERTS_ABORT_EXIT, "enif_demonitor_process: called from "
- "non-scheduler thread on non-SMP VM");
- }
- rp = erts_proc_lookup(mon->u.pid);
-#endif
if (!rp) {
is_exiting = 1;
}
else {
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_LINK);
if (ERTS_PROC_IS_EXITING(rp)) {
is_exiting = 1;
} else {
@@ -3314,17 +3218,15 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit
ASSERT(rmon);
is_exiting = 0;
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#ifdef ERTS_SMP
if (scheduler <= 0)
erts_proc_dec_refc(rp);
-#endif
}
if (is_exiting) {
rsrc->monitors->pending_failed_fire++;
}
- erts_smp_mtx_unlock(&rsrc->monitors->lock);
+ erts_mtx_unlock(&rsrc->monitors->lock);
if (rmon) {
ASSERT(rmon->type == MON_NIF_TARGET);
@@ -3343,6 +3245,363 @@ int enif_compare_monitors(const ErlNifMonitor *monitor1,
ERTS_REF_THING_SIZE*sizeof(Eterm));
}
+ErlNifIOQueue *enif_ioq_create(ErlNifIOQueueOpts opts)
+{
+ ErlNifIOQueue *q;
+
+ if (opts != ERL_NIF_IOQ_NORMAL)
+ return NULL;
+
+ q = enif_alloc(sizeof(ErlNifIOQueue));
+ if (!q) return NULL;
+ erts_ioq_init(q, ERTS_ALC_T_NIF, 0);
+
+ return q;
+}
+
+void enif_ioq_destroy(ErlNifIOQueue *q)
+{
+ erts_ioq_clear(q);
+ enif_free(q);
+}
+
+/* If the iovec was preallocated (Stack or otherwise) it needs to be marked as
+ * such to perform a proper free. */
+#define ERL_NIF_IOVEC_FLAGS_PREALLOC (1 << 0)
+
+void enif_free_iovec(ErlNifIOVec *iov)
+{
+ int i;
+ /* Decrement the refc of all the binaries */
+ for (i = 0; i < iov->iovcnt; i++) {
+ Binary *bptr = ((Binary**)iov->ref_bins)[i];
+ /* bptr can be null if enq_binary was used */
+ if (bptr && erts_refc_dectest(&bptr->intern.refc, 0) == 0) {
+ erts_bin_free(bptr);
+ }
+ }
+
+ if (!(iov->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
+ enif_free(iov);
+ }
+}
+
+typedef struct {
+ UWord sublist_length;
+ Eterm sublist_start;
+ Eterm sublist_end;
+
+ UWord offheap_size;
+ UWord onheap_size;
+
+ UWord iovec_len;
+} iovec_slice_t;
+
+static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *result) {
+ Eterm lookahead;
+
+ result->sublist_start = list;
+ result->sublist_length = 0;
+ result->offheap_size = 0;
+ result->onheap_size = 0;
+ result->iovec_len = 0;
+
+ lookahead = result->sublist_start;
+
+ while (is_list(lookahead)) {
+ Eterm *binary_header, binary;
+ Eterm *cell;
+ UWord size;
+
+ cell = list_val(lookahead);
+ binary = CAR(cell);
+
+ if (!is_binary(binary)) {
+ return 0;
+ }
+
+ size = binary_size(binary);
+ binary_header = binary_val(binary);
+
+ /* If we're a sub-binary we'll need to check our underlying binary to
+ * determine whether we're on-heap or not. */
+ if(thing_subtag(*binary_header) == SUB_BINARY_SUBTAG) {
+ ErlSubBin *sb = (ErlSubBin*)binary_header;
+
+ /* Reject bitstrings */
+ if((sb->bitoffs + sb->bitsize) > 0) {
+ return 0;
+ }
+
+ ASSERT(size <= binary_size(sb->orig));
+ binary_header = binary_val(sb->orig);
+ }
+
+ if(thing_subtag(*binary_header) == HEAP_BINARY_SUBTAG) {
+ ASSERT(size <= ERL_ONHEAP_BIN_LIMIT);
+
+ result->iovec_len += 1;
+ result->onheap_size += size;
+ } else {
+ ASSERT(thing_subtag(*binary_header) == REFC_BINARY_SUBTAG);
+
+ result->iovec_len += 1 + size / MAX_SYSIOVEC_IOVLEN;
+ result->offheap_size += size;
+ }
+
+ result->sublist_length += 1;
+ lookahead = CDR(cell);
+
+ if(result->sublist_length >= max_length) {
+ break;
+ }
+ }
+
+ if (!is_nil(lookahead) && !is_list(lookahead)) {
+ return 0;
+ }
+
+ result->sublist_end = lookahead;
+
+ return 1;
+}
+
+static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) {
+ Eterm *parent_header;
+ Eterm parent_binary;
+
+ int bit_offset, bit_size;
+ Uint byte_offset;
+
+ ASSERT(is_binary(binary));
+
+ ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size);
+
+ parent_header = binary_val(parent_binary);
+
+ result->size = binary_size(binary);
+ result->bin_term = binary;
+
+ if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
+ ProcBin *pb = (ProcBin*)parent_header;
+
+ ASSERT(pb->val != NULL);
+ ASSERT(byte_offset < pb->size);
+ ASSERT(&pb->bytes[byte_offset] >= (byte*)(pb->val)->orig_bytes);
+
+ result->data = (unsigned char*)&pb->bytes[byte_offset];
+ result->ref_bin = (void*)pb->val;
+ } else {
+ ErlHeapBin *hb = (ErlHeapBin*)parent_header;
+
+ ASSERT(thing_subtag(*parent_header) == HEAP_BINARY_SUBTAG);
+
+ result->data = &((unsigned char*)&hb->data)[byte_offset];
+ result->ref_bin = NULL;
+ }
+}
+
+static int fill_iovec_with_slice(ErlNifEnv *env,
+ iovec_slice_t *slice,
+ ErlNifIOVec *iovec) {
+ UWord onheap_offset, iovec_idx;
+ ErlNifBinary onheap_data;
+ Eterm sublist_iterator;
+
+ /* Set up a common refc binary for all on-heap binaries. */
+ if (slice->onheap_size > 0) {
+ if (!enif_alloc_binary(slice->onheap_size, &onheap_data)) {
+ return 0;
+ }
+ }
+
+ sublist_iterator = slice->sublist_start;
+ onheap_offset = 0;
+ iovec_idx = 0;
+
+ while (sublist_iterator != slice->sublist_end) {
+ ErlNifBinary raw_data;
+ Eterm *cell;
+
+ cell = list_val(sublist_iterator);
+ inspect_raw_binary_data(CAR(cell), &raw_data);
+
+ /* If this isn't a refc binary, copy its contents to the onheap buffer
+ * and reference that instead. */
+ if (raw_data.ref_bin == NULL) {
+ ASSERT(onheap_offset < onheap_data.size);
+ ASSERT(slice->onheap_size > 0);
+
+ sys_memcpy(&onheap_data.data[onheap_offset],
+ raw_data.data, raw_data.size);
+
+ raw_data.data = &onheap_data.data[onheap_offset];
+ raw_data.ref_bin = onheap_data.ref_bin;
+ }
+
+ ASSERT(raw_data.ref_bin != NULL);
+
+ while (raw_data.size > 0) {
+ UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN);
+
+ ASSERT(iovec_idx < iovec->iovcnt);
+
+ iovec->iov[iovec_idx].iov_base = raw_data.data;
+ iovec->iov[iovec_idx].iov_len = chunk_len;
+
+ iovec->ref_bins[iovec_idx] = raw_data.ref_bin;
+
+ raw_data.data += chunk_len;
+ raw_data.size -= chunk_len;
+
+ iovec_idx += 1;
+ }
+
+ sublist_iterator = CDR(cell);
+ }
+
+ ASSERT(iovec_idx == iovec->iovcnt);
+
+ if (env == NULL) {
+ int i;
+ for (i = 0; i < iovec->iovcnt; i++) {
+ Binary *refc_binary = (Binary*)(iovec->ref_bins[i]);
+ erts_refc_inc(&refc_binary->intern.refc, 1);
+ }
+
+ if (slice->onheap_size > 0) {
+ /* Transfer ownership to the iovec; we've taken references to it in
+ * the above loop. */
+ enif_release_binary(&onheap_data);
+ }
+ } else {
+ if (slice->onheap_size > 0) {
+ /* Attach the binary to our environment and let the GC take care of
+ * it after returning. */
+ enif_make_binary(env, &onheap_data);
+ }
+ }
+
+ return 1;
+}
+
+static int create_iovec_from_slice(ErlNifEnv *env,
+ iovec_slice_t *slice,
+ ErlNifIOVec **result) {
+ ErlNifIOVec *iovec = *result;
+
+ if (iovec && slice->iovec_len < ERL_NIF_IOVEC_SIZE) {
+ iovec->iov = iovec->small_iov;
+ iovec->ref_bins = iovec->small_ref_bin;
+ iovec->flags = ERL_NIF_IOVEC_FLAGS_PREALLOC;
+ } else {
+ UWord iov_offset, binv_offset, alloc_size;
+ char *alloc_base;
+
+ iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlNifIOVec));
+ binv_offset = iov_offset;
+ binv_offset += ERTS_ALC_DATA_ALIGN_SIZE(slice->iovec_len * sizeof(SysIOVec));
+ alloc_size = binv_offset;
+ alloc_size += slice->iovec_len * sizeof(Binary*);
+
+ /* If we have an environment we'll attach the allocated data to it. The
+ * GC will take care of releasing it later on. */
+ if (env != NULL) {
+ ErlNifBinary gc_bin;
+
+ if (!enif_alloc_binary(alloc_size, &gc_bin)) {
+ return 0;
+ }
+
+ alloc_base = (char*)gc_bin.data;
+ enif_make_binary(env, &gc_bin);
+ } else {
+ alloc_base = enif_alloc(alloc_size);
+ }
+
+ iovec = (ErlNifIOVec*)alloc_base;
+ iovec->iov = (SysIOVec*)(alloc_base + iov_offset);
+ iovec->ref_bins = (void**)(alloc_base + binv_offset);
+ iovec->flags = 0;
+ }
+
+ iovec->size = slice->offheap_size + slice->onheap_size;
+ iovec->iovcnt = slice->iovec_len;
+
+ if(!fill_iovec_with_slice(env, slice, iovec)) {
+ if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
+ enif_free(iovec);
+ }
+
+ return 0;
+ }
+
+ *result = iovec;
+
+ return 1;
+}
+
+int enif_inspect_iovec(ErlNifEnv *env, size_t max_elements,
+ ERL_NIF_TERM list, ERL_NIF_TERM *tail,
+ ErlNifIOVec **iov) {
+ iovec_slice_t slice;
+
+ if(!examine_iovec_term(list, max_elements, &slice)) {
+ return 0;
+ } else if(!create_iovec_from_slice(env, &slice, iov)) {
+ return 0;
+ }
+
+ (*tail) = slice.sublist_end;
+
+ return 1;
+}
+
+/* */
+int enif_ioq_enqv(ErlNifIOQueue *q, ErlNifIOVec *iov, size_t skip)
+{
+ if(skip <= iov->size) {
+ return !erts_ioq_enqv(q, (ErtsIOVec*)iov, skip);
+ }
+
+ return 0;
+}
+
+int enif_ioq_enq_binary(ErlNifIOQueue *q, ErlNifBinary *bin, size_t skip)
+{
+ ErlNifIOVec vec = {1, bin->size, NULL, NULL, ERL_NIF_IOVEC_FLAGS_PREALLOC };
+ Binary *ref_bin = (Binary*)bin->ref_bin;
+ int res;
+ vec.iov = vec.small_iov;
+ vec.ref_bins = vec.small_ref_bin;
+ vec.iov[0].iov_base = bin->data;
+ vec.iov[0].iov_len = bin->size;
+ ((Binary**)(vec.ref_bins))[0] = ref_bin;
+
+ res = enif_ioq_enqv(q, &vec, skip);
+ enif_release_binary(bin);
+ return res;
+}
+
+size_t enif_ioq_size(ErlNifIOQueue *q)
+{
+ return erts_ioq_size(q);
+}
+
+int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size)
+{
+ if (erts_ioq_deq(q, elems) == -1)
+ return 0;
+ if (size)
+ *size = erts_ioq_size(q);
+ return 1;
+}
+
+SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen)
+{
+ return erts_ioq_peekq(q, iovlen);
+}
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
@@ -3550,8 +3809,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
/* Block system (is this the right place to do it?) */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
/* Find calling module */
ASSERT(BIF_P->current != NULL);
@@ -3656,14 +3915,9 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* dirty scheduler support, treat a non-zero flags field as
* a load error.
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
f->flags, mod_atom, f->name, f->arity);
-#else
- ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
- mod_atom, f->name, f->arity);
-#endif
}
else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
< BEAM_NIF_MIN_FUNC_SZ)
@@ -3736,7 +3990,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(BeamInstr) BeamOp(op_i_generic_breakpoint));
g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (f->flags) {
code_ptr[3] = (BeamInstr) f->fptr;
code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
@@ -3744,7 +3997,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(BeamInstr) static_schedule_dirty_cpu_nif;
}
else
-#endif
code_ptr[1] = (BeamInstr) f->fptr;
code_ptr[2] = (BeamInstr) lib;
}
@@ -3762,8 +4014,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_sys_ddll_free_error(&errdesc);
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
erts_free(ERTS_ALC_T_TMP, lib_name);
@@ -3776,7 +4028,7 @@ erts_unload_nif(struct erl_module_nif* lib)
{
ErlNifResourceType* rt;
ErlNifResourceType* next;
- ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(erts_thr_progress_is_blocking());
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
@@ -3848,8 +4100,8 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
break;
ASSERT(i < mod->entry.num_of_funcs);
if (p)
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
- || erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
+ || erts_thr_progress_is_blocking());
#endif
if (p) {
/* This is almost a normal nif call like in beam_emu,