aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_nif.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_nif.c')
-rw-r--r--erts/emulator/beam/erl_nif.c1073
1 files changed, 596 insertions, 477 deletions
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 4e479c26ef..ebef485b04 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2017. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -57,6 +57,7 @@
#include "erl_bif_unique.h"
#include "erl_utils.h"
#include "erl_io_queue.h"
+#include "erl_proc_sig_queue.h"
#undef ERTS_WANT_NFUNC_SCHED_INTERNALS__
#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
#include "erl_nfunc_sched.h"
@@ -138,7 +139,7 @@ execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
Process *c_p = env->proc;
if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) {
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
& ERTS_PROC_LOCK_MAIN);
}
else {
@@ -220,7 +221,7 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
ASSERT(esdp);
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
ASSERT(p->scheduler_data == esdp);
ASSERT((state & (ERTS_PSFLG_RUNNING
@@ -237,9 +238,11 @@ static void cache_env(ErlNifEnv* env);
static void full_flush_env(ErlNifEnv *env);
static void flush_env(ErlNifEnv* env);
-/* Temporary object header, auto-deallocated when NIF returns
- * or when independent environment is cleared.
- */
+/* Temporary object header, auto-deallocated when NIF returns or when
+ * independent environment is cleared.
+ *
+ * The payload can be accessed with &tmp_obj_ptr[1] but keep in mind that its
+ * first element must not require greater alignment than `next`. */
struct enif_tmp_obj_t {
struct enif_tmp_obj_t* next;
void (*dtor)(struct enif_tmp_obj_t*);
@@ -256,6 +259,46 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
}
}
+/* Whether the given environment is bound to a process and will be cleaned up
+ * when the NIF returns. It's safe to use temp_alloc for objects in
+ * env->tmp_obj_list when this is true. */
+static ERTS_INLINE int is_proc_bound(ErlNifEnv *env)
+{
+ return env->mod_nif != NULL;
+}
+
+/* Allocates and attaches an object to the given environment, running its
+ * destructor when the environment is cleared. To avoid temporary variables the
+ * address of the allocated object is returned instead of the enif_tmp_obj_t.
+ *
+ * The destructor *must* call `erts_free(tmp_obj->allocator, tmp_obj)` to free
+ * the object. If the destructor needs to refer to the allocated object its
+ * address will be &tmp_obj[1]. */
+static ERTS_INLINE void *alloc_tmp_obj(ErlNifEnv *env, size_t size,
+ void (*dtor)(struct enif_tmp_obj_t*)) {
+ struct enif_tmp_obj_t *tmp_obj;
+ ErtsAlcType_t allocator;
+
+ allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
+
+ tmp_obj = erts_alloc(allocator, sizeof(struct enif_tmp_obj_t) + MAX(1, size));
+
+ tmp_obj->next = env->tmp_obj_list;
+ tmp_obj->allocator = allocator;
+ tmp_obj->dtor = dtor;
+
+ env->tmp_obj_list = tmp_obj;
+
+ return (void*)&tmp_obj[1];
+}
+
+/* Generic destructor for objects allocated through alloc_tmp_obj that don't
+ * care about their payload. */
+static void tmp_alloc_dtor(struct enif_tmp_obj_t *tmp_obj)
+{
+ erts_free(tmp_obj->allocator, tmp_obj);
+}
+
void erts_post_nif(ErlNifEnv* env)
{
erts_unblock_fpe(env->fpe_was_unmasked);
@@ -287,12 +330,12 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
else
dirty_shadow_proc = env->proc;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p));
ep = erts_nif_export_schedule(c_p, dirty_shadow_proc,
c_p->current,
c_p->cp,
- (BeamInstr) em_call_nif,
+ BeamOpCodeAddr(op_call_nif),
direct_fp, indirect_fp,
mod, func_name,
argc, (const Eterm *) argv);
@@ -304,7 +347,6 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
return (ERL_NIF_TERM) THE_NON_VALUE;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
@@ -320,7 +362,7 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
ErlNifEnv env;
ERL_NIF_TERM result;
#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&c_p->state);
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p));
@@ -343,14 +385,20 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)));
- erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC));
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ ASSERT(esdp->current_nif == NULL);
+ esdp->current_nif = &env;
+
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(esdp->current_nif == &env);
+ esdp->current_nif = NULL;
ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
ASSERT(env.proc->next == c_p);
@@ -394,24 +442,29 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
return exiting;
}
-#endif
static void full_flush_env(ErlNifEnv* env)
{
flush_env(env);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
/* Dirty nif call using shadow process struct */
erts_flush_dirty_shadow_proc(env->proc);
-#endif
}
static void full_cache_env(ErlNifEnv* env)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
erts_cache_dirty_shadow_proc(env->proc);
-#endif
+ /*
+ * If shadow proc had heap fragments when flushed
+ * those have now been moved to the real proc.
+ * Ensure heap pointers do not point into a heap
+ * fragment on real proc...
+ */
+ ASSERT(!env->proc->mbuf);
+ env->hp_end = HEAP_LIMIT(env->proc);
+ env->hp = HEAP_TOP(env->proc);
+ }
cache_env(env);
}
@@ -452,6 +505,7 @@ static void cache_env(ErlNifEnv* env)
env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
}
}
+
void* enif_priv_data(ErlNifEnv* env)
{
return env->mod_nif->priv_data;
@@ -492,7 +546,7 @@ setup_nif_env(struct enif_msg_environment_t* msg_env,
msg_env->env.tmp_obj_list = NULL;
msg_env->env.proc = &msg_env->phony_proc;
msg_env->env.exception_thrown = 0;
- memset(&msg_env->phony_proc, 0, sizeof(Process));
+ sys_memset(&msg_env->phony_proc, 0, sizeof(Process));
HEAP_START(&msg_env->phony_proc) = phony_heap;
HEAP_TOP(&msg_env->phony_proc) = phony_heap;
HEAP_LIMIT(&msg_env->phony_proc) = phony_heap;
@@ -566,10 +620,9 @@ void enif_clear_env(ErlNifEnv* env)
ASSERT(!is_offheap(&MSO(p)));
}
-#ifdef ERTS_SMP
#ifdef DEBUG
static int enif_send_delay = 0;
-#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0)
+#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 32 == 0)
#else
#ifdef ERTS_PROC_LOCK_OWN_IMPL
#define ERTS_FORCE_ENIF_SEND_DELAY() 0
@@ -592,9 +645,9 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
/* Only one thread at a time is allowed to flush trace messages,
so we require the main lock to be held when doing the flush */
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p);
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
msgq = c_p->trace_msg_q;
@@ -613,7 +666,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
msgq->first = NULL;
msgq->last = &msgq->first;
msgq->len = 0;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
ASSERT(len != 0);
@@ -622,17 +675,17 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
rp_locks = 0;
if (rp->common.id == c_p->common.id)
rp_locks = c_p_locks;
- erts_queue_messages(rp, rp_locks, first, last, len, c_p->common.id);
+ erts_queue_proc_messages(c_p, rp, rp_locks, first, last, len);
if (rp->common.id == c_p->common.id)
rp_locks &= ~c_p_locks;
if (rp_locks)
- erts_smp_proc_unlock(rp, rp_locks);
+ erts_proc_unlock(rp, rp_locks);
reds += len;
} else {
erts_cleanup_messages(first);
}
reds += 1;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE);
msgq = msgq->next;
} while (msgq);
@@ -649,36 +702,27 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
}
error:
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE);
return reds;
}
-#endif
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
ErtsProcLocks rp_locks = 0;
-#ifdef ERTS_SMP
ErtsProcLocks lc_locks = 0;
-#endif
Process* rp;
Process* c_p;
ErtsMessage *mp;
+ Eterm from;
Eterm receiver = to_pid->pid;
int scheduler;
execution_state(env, &c_p, &scheduler);
-#ifndef ERTS_SMP
- if (!scheduler) {
- erts_exit(ERTS_ABORT_EXIT,
- "enif_send: called from non-scheduler thread on non-SMP VM");
- return 0;
- }
-#endif
if (scheduler > 0) { /* Normal scheduler */
rp = erts_proc_lookup(receiver);
@@ -692,7 +736,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
return 0;
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
}
@@ -701,7 +745,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ERTS_P2P_FLG_INC_REFC);
if (!rp) {
if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
return 0;
}
}
@@ -710,8 +754,58 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
rp_locks = ERTS_PROC_LOCK_MAIN;
if (menv) {
+ Eterm token = c_p ? SEQ_TRACE_TOKEN(c_p) : am_undefined;
+ if (token != NIL && token != am_undefined) {
+ /* This code is copied from erts_send_message */
+ Eterm stoken = SEQ_TRACE_TOKEN(c_p);
+#ifdef USE_VM_PROBES
+ DTRACE_CHARBUF(sender_name, 64);
+ DTRACE_CHARBUF(receiver_name, 64);
+ Sint tok_label = 0;
+ Sint tok_lastcnt = 0;
+ Sint tok_serial = 0;
+ Eterm utag = NIL;
+ *sender_name = *receiver_name = '\0';
+ if (DTRACE_ENABLED(message_send)) {
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)),
+ "%T", c_p->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)),
+ "%T", rp->common.id);
+ }
+#endif
+ if (have_seqtrace(stoken)) {
+ seq_trace_update_send(c_p);
+ seq_trace_output(stoken, msg, SEQ_TRACE_SEND,
+ rp->common.id, c_p);
+ }
+#ifdef USE_VM_PROBES
+ if (!(DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING)) {
+ stoken = NIL;
+ }
+#endif
+ token = enif_make_copy(msg_env, stoken);
+
+#ifdef USE_VM_PROBES
+ if (DT_UTAG_FLAGS(c_p) & DT_UTAG_SPREADING) {
+ if (is_immed(DT_UTAG(c_p)))
+ utag = DT_UTAG(c_p);
+ else
+ utag = enif_make_copy(msg_env, DT_UTAG(c_p));
+ }
+ if (DTRACE_ENABLED(message_send)) {
+ if (have_seqtrace(stoken)) {
+ tok_label = SEQ_TRACE_T_DTRACE_LABEL(stoken);
+ tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(stoken));
+ tok_serial = signed_val(SEQ_TRACE_T_SERIAL(stoken));
+ }
+ DTRACE6(message_send, sender_name, receiver_name,
+ size_object(msg), tok_label, tok_lastcnt, tok_serial);
+ }
+#endif
+ }
flush_env(msg_env);
mp = erts_alloc_message(0, NULL);
+ ERL_MESSAGE_TOKEN(mp) = token;
mp->data.heap_frag = menv->env.heap_frag;
ASSERT(mp->data.heap_frag == MBUF(&menv->phony_proc));
if (mp->data.heap_frag != NULL) {
@@ -736,7 +830,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
full_cache_env(env);
}
else {
- erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state);
+ erts_aint_t state = erts_atomic32_read_nob(&rp->state);
if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
mp = erts_alloc_message(sz, &hp);
ohp = sz == 0 ? NULL : &mp->hfrag.off_heap;
@@ -749,10 +843,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ohp = &bp->off_heap;
}
}
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
msg = copy_struct_litopt(msg, sz, &hp, ohp, &litarea);
}
- ERL_MESSAGE_TERM(mp) = msg;
+ from = c_p ? c_p->common.id : am_undefined;
if (!env || !env->tracee) {
@@ -762,7 +857,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
full_cache_env(env);
}
}
-#ifdef ERTS_SMP
else {
/* This clause is taken when the nif is called in the context
of a traced process. We do not know which locks we have
@@ -772,8 +866,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlTraceMessageQueue *msgq;
Process *t_p = env->tracee;
-
- erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE);
msgq = t_p->trace_msg_q;
@@ -790,7 +883,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#endif
if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
rp_locks & ERTS_PROC_LOCK_MSGQ ||
- erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+ erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+
+ ERL_MESSAGE_TERM(mp) = msg;
+ ERL_MESSAGE_FROM(mp) = from;
+ ERL_MESSAGE_TOKEN(mp) = am_undefined;
if (!msgq) {
msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
@@ -804,36 +901,35 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
msgq->next = t_p->trace_msg_q;
t_p->trace_msg_q = msgq;
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
erts_schedule_flush_trace_messages(t_p, 0);
} else {
msgq->len++;
*msgq->last = mp;
msgq->last = &mp->next;
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
}
goto done;
} else {
- erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+ erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
rp_locks &= ~ERTS_PROC_LOCK_TRACE;
rp_locks |= ERTS_PROC_LOCK_MSGQ;
}
}
-#endif /* ERTS_SMP */
- erts_queue_message(rp, rp_locks, mp, msg,
- c_p ? c_p->common.id : am_undefined);
+ if (c_p)
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
+ else
+ erts_queue_message(rp, rp_locks, mp, msg, from);
-#ifdef ERTS_SMP
done:
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks & ~lc_locks)
- erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
+ erts_proc_unlock(rp, rp_locks & ~lc_locks);
if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
if (scheduler <= 0)
erts_proc_dec_refc(rp);
@@ -863,15 +959,9 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
if (scheduler > 0)
prt = erts_port_lookup(to_port->port_id, iflags);
else {
-#ifdef ERTS_SMP
if (ERTS_PROC_IS_EXITING(c_p))
return 0;
prt = erts_thr_port_lookup(to_port->port_id, iflags);
-#else
- erts_exit(ERTS_ABORT_EXIT,
- "enif_port_command: called from non-scheduler "
- "thread on non-SMP VM");
-#endif
}
if (!prt)
@@ -1046,11 +1136,6 @@ int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
return is_number(term);
}
-static ERTS_INLINE int is_proc_bound(ErlNifEnv* env)
-{
- return env->mod_nif != NULL;
-}
-
static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
{
erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator);
@@ -1085,22 +1170,14 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
u.tmp->dtor = &aligned_binary_dtor;
env->tmp_obj_list = u.tmp;
}
- bin->bin_term = bin_term;
bin->size = binary_size(bin_term);
bin->ref_bin = NULL;
ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
-static void tmp_alloc_dtor(struct enif_tmp_obj_t* obj)
-{
- erts_free(obj->allocator, obj);
-}
-
int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
{
- struct enif_tmp_obj_t* tobj;
- ErtsAlcType_t allocator;
ErlDrvSizeT sz;
if (is_binary(term)) {
return enif_inspect_binary(env,term,bin);
@@ -1108,7 +1185,6 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
if (is_nil(term)) {
bin->data = (unsigned char*) &bin->data; /* dummy non-NULL */
bin->size = 0;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
return 1;
}
@@ -1116,16 +1192,8 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
return 0;
}
- allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
- tobj = erts_alloc(allocator, sz + sizeof(struct enif_tmp_obj_t));
- tobj->allocator = allocator;
- tobj->next = env->tmp_obj_list;
- tobj->dtor = &tmp_alloc_dtor;
- env->tmp_obj_list = tobj;
-
- bin->data = (unsigned char*) &tobj[1];
+ bin->data = alloc_tmp_obj(env, sz, &tmp_alloc_dtor);
bin->size = sz;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
erts_iolist_to_buf(term, (char*) bin->data, sz);
ADD_READONLY_CHECK(env, bin->data, bin->size);
@@ -1143,7 +1211,6 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin)
bin->size = size;
bin->data = (unsigned char*) refbin->orig_bytes;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = refbin;
return 1;
}
@@ -1177,12 +1244,10 @@ void enif_release_binary(ErlNifBinary* bin)
{
if (bin->ref_bin != NULL) {
Binary* refbin = bin->ref_bin;
- ASSERT(bin->bin_term == THE_NON_VALUE);
erts_bin_release(refbin);
}
#ifdef DEBUG
bin->data = NULL;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
#endif
}
@@ -1230,11 +1295,12 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env,
Sint size;
ErtsHeapFactory factory;
byte *bp = (byte*) data;
+ Uint32 flags = 0;
- ERTS_CT_ASSERT(ERL_NIF_BIN2TERM_SAFE == ERTS_DIST_EXT_BTT_SAFE);
-
- if (opts & ~ERL_NIF_BIN2TERM_SAFE) {
- return 0;
+ switch ((Uint32)opts) {
+ case 0: break;
+ case ERL_NIF_BIN2TERM_SAFE: flags = ERTS_DIST_EXT_BTT_SAFE; break;
+ default: return 0;
}
if ((size = erts_decode_ext_size(bp, data_sz)) < 0)
return 0;
@@ -1246,7 +1312,7 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env,
erts_factory_dummy_init(&factory);
}
- *term = erts_decode_ext(&factory, &bp, (Uint32)opts);
+ *term = erts_decode_ext(&factory, &bp, flags);
if (is_non_value(*term)) {
return 0;
@@ -1340,39 +1406,51 @@ int enif_get_string(ErlNifEnv *env, ERL_NIF_TERM list, char* buf, unsigned len,
Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
{
- if (bin->bin_term != THE_NON_VALUE) {
- return bin->bin_term;
- }
- else if (bin->ref_bin != NULL) {
- Binary* bptr = bin->ref_bin;
- ProcBin* pb;
- Eterm bin_term;
-
- /* !! Copy-paste from new_binary() !! */
- pb = (ProcBin *) alloc_heap(env, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = bptr->orig_size;
- pb->next = MSO(env->proc).first;
- MSO(env->proc).first = (struct erl_off_heap_header*) pb;
- pb->val = bptr;
- pb->bytes = (byte*) bptr->orig_bytes;
- pb->flags = 0;
-
- OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
- bin_term = make_binary(pb);
- if (erts_refc_read(&bptr->intern.refc, 1) == 1) {
- /* Total ownership transfer */
- bin->ref_bin = NULL;
- bin->bin_term = bin_term;
- }
- return bin_term;
- }
- else {
- flush_env(env);
- bin->bin_term = new_binary(env->proc, bin->data, bin->size);
- cache_env(env);
- return bin->bin_term;
+ Eterm bin_term;
+
+ if (bin->ref_bin != NULL) {
+ Binary* binary = bin->ref_bin;
+
+ /* If the binary is smaller than the heap binary limit we'll return a
+ * heap binary to reduce the number of small refc binaries in the
+ * system. We can't simply release the refc binary right away however;
+ * the documentation states that the binary should be considered
+ * read-only from this point on, which implies that it should still be
+ * readable.
+ *
+ * We could keep it alive until we return by adding it to the temporary
+ * object list, but that requires an off-heap allocation which is
+ * potentially quite slow, so we create a dummy ProcBin instead and
+ * rely on the next minor GC to get rid of it. */
+ if (bin->size <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+
+ hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(bin->size));
+ hb->thing_word = header_heap_bin(bin->size);
+ hb->size = bin->size;
+
+ sys_memcpy(hb->data, bin->data, bin->size);
+
+ erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE),
+ binary);
+
+ bin_term = make_binary(hb);
+ } else {
+ bin_term = erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE),
+ binary);
+ }
+
+ /* Our (possibly shared) ownership has been transferred to the term. */
+ bin->ref_bin = NULL;
+ } else {
+ flush_env(env);
+ bin_term = new_binary(env->proc, bin->data, bin->size);
+ cache_env(env);
}
+
+ return bin_term;
}
Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
@@ -1856,18 +1934,11 @@ int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
if (scheduler > 0)
return !!erts_proc_lookup(proc->pid);
else {
-#ifdef ERTS_SMP
Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
ERTS_P2P_FLG_INC_REFC);
if (rp)
erts_proc_dec_refc(rp);
return !!rp;
-#else
- erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: "
- "called from non-scheduler thread "
- "in non-smp emulator");
- return 0;
-#endif
}
}
@@ -1883,17 +1954,10 @@ int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
if (scheduler > 0)
return !!erts_port_lookup(port->port_id, iflags);
else {
-#ifdef ERTS_SMP
Port *prt = erts_thr_port_lookup(port->port_id, iflags);
if (prt)
erts_port_dec_refc(prt);
return !!prt;
-#else
- erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: "
- "called from non-scheduler thread "
- "in non-smp emulator");
- return 0;
-#endif
}
}
@@ -1975,6 +2039,12 @@ ErlNifTid enif_thread_self(void) { return erl_drv_thread_self(); }
int enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2) { return erl_drv_equal_tids(tid1,tid2); }
void enif_thread_exit(void *resp) { erl_drv_thread_exit(resp); }
int enif_thread_join(ErlNifTid tid, void **respp) { return erl_drv_thread_join(tid,respp); }
+
+char* enif_mutex_name(ErlNifMutex *mtx) {return erl_drv_mutex_name(mtx); }
+char* enif_cond_name(ErlNifCond *cnd) { return erl_drv_cond_name(cnd); }
+char* enif_rwlock_name(ErlNifRWLock* rwlck) { return erl_drv_rwlock_name(rwlck); }
+char* enif_thread_name(ErlNifTid tid) { return erl_drv_thread_name(tid); }
+
int enif_getenv(const char *key, char *value, size_t *value_size) { return erl_drv_getenv(key, value, value_size); }
ErlNifTime enif_monotonic_time(ErlNifTimeUnit time_unit)
@@ -1997,16 +2067,21 @@ enif_convert_time_unit(ErlNifTime val,
(int) to);
}
-int enif_fprintf(void* filep, const char* format, ...)
+int enif_fprintf(FILE* filep, const char* format, ...)
{
int ret;
va_list arglist;
va_start(arglist, format);
- ret = erts_vfprintf((FILE*)filep, format, arglist);
+ ret = erts_vfprintf(filep, format, arglist);
va_end(arglist);
return ret;
}
+int enif_vfprintf(FILE* filep, const char *format, va_list ap)
+{
+ return erts_vfprintf(filep, format, ap);
+}
+
int enif_snprintf(char *buffer, size_t size, const char* format, ...)
{
int ret;
@@ -2017,6 +2092,12 @@ int enif_snprintf(char *buffer, size_t size, const char* format, ...)
return ret;
}
+int enif_vsnprintf(char* buffer, size_t size, const char *format, va_list ap)
+{
+ return erts_vsnprintf(buffer, size, format, ap);
+}
+
+
/***********************************************************
** Memory managed (GC'ed) "resource" objects **
***********************************************************/
@@ -2101,7 +2182,7 @@ ErlNifResourceType* open_resource_type(ErlNifEnv* env,
ErlNifResourceFlags op = flags;
Eterm module_am, name_am;
- ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(erts_thr_progress_is_blocking());
module_am = make_atom(env->mod_nif->mod->module);
name_am = enif_make_atom(env, name_str);
@@ -2218,106 +2299,69 @@ static void rollback_opened_resource_types(void)
}
}
-struct destroy_monitor_ctx
-{
- ErtsResource* resource;
- int exiting_procs;
- int scheduler;
-};
-
-static void destroy_one_monitor(ErtsMonitor* mon, void* context)
-{
- struct destroy_monitor_ctx* ctx = (struct destroy_monitor_ctx*) context;
- Process* rp;
- ErtsMonitor *rmon = NULL;
- int is_exiting;
-
- ASSERT(mon->type == MON_ORIGIN);
- ASSERT(is_internal_pid(mon->u.pid));
- ASSERT(is_internal_ref(mon->ref));
-
- if (ctx->scheduler > 0) { /* Normal scheduler */
- rp = erts_proc_lookup(mon->u.pid);
- }
- else {
-#ifdef ERTS_SMP
- rp = erts_proc_lookup_inc_refc(mon->u.pid);
+#ifdef ARCH_64
+# define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 63)
#else
- ASSERT(!"nif monitor destruction in non-scheduler thread");
- rp = NULL;
+# define ERTS_RESOURCE_DYING_FLAG (((Uint) 1) << 31)
#endif
- }
+#define ERTS_RESOURCE_REFC_MASK (~ERTS_RESOURCE_DYING_FLAG)
- if (!rp) {
- is_exiting = 1;
- }
- if (rp) {
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
- if (ERTS_PROC_IS_EXITING(rp)) {
- is_exiting = 1;
- } else {
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- ASSERT(rmon);
- is_exiting = 0;
- }
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#ifdef ERTS_SMP
- if (ctx->scheduler <= 0)
- erts_proc_dec_refc(rp);
-#endif
- }
- if (is_exiting) {
- ctx->resource->monitors->pending_failed_fire++;
- }
-
- /* ToDo: Delay destruction after monitor_locks */
- if (rmon) {
- ASSERT(rmon->type == MON_NIF_TARGET);
- ASSERT(rmon->u.resource == ctx->resource);
- erts_destroy_monitor(rmon);
- }
- erts_destroy_monitor(mon);
+static ERTS_INLINE void
+rmon_set_dying(ErtsResourceMonitors *rms)
+{
+ rms->refc |= ERTS_RESOURCE_DYING_FLAG;
}
-static void destroy_all_monitors(ErtsMonitor* monitors, ErtsResource* resource)
+static ERTS_INLINE int
+rmon_is_dying(ErtsResourceMonitors *rms)
{
- struct destroy_monitor_ctx ctx;
-
- execution_state(NULL, NULL, &ctx.scheduler);
+ return !!(rms->refc & ERTS_RESOURCE_DYING_FLAG);
+}
- ctx.resource = resource;
- erts_sweep_monitors(monitors, &destroy_one_monitor, &ctx);
+static ERTS_INLINE void
+rmon_refc_inc(ErtsResourceMonitors *rms)
+{
+ rms->refc++;
}
+static ERTS_INLINE Uint
+rmon_refc_dec_read(ErtsResourceMonitors *rms)
+{
+ Uint res;
+ ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0);
+ res = --rms->refc;
+ return res & ERTS_RESOURCE_REFC_MASK;
+}
-#ifdef ERTS_SMP
-# define NIF_RESOURCE_DTOR &nif_resource_dtor
-#else
-# define NIF_RESOURCE_DTOR &nosmp_nif_resource_dtor_prologue
+static ERTS_INLINE void
+rmon_refc_dec(ErtsResourceMonitors *rms)
+{
+ ASSERT((rms->refc & ERTS_RESOURCE_REFC_MASK) != 0);
+ --rms->refc;
+}
-/*
- * NO-SMP: Always run resource destructor on scheduler thread
- * as we may have to remove process monitors.
- */
-static int nif_resource_dtor(Binary*);
+static ERTS_INLINE Uint
+rmon_refc_read(ErtsResourceMonitors *rms)
+{
+ return rms->refc & ERTS_RESOURCE_REFC_MASK;
+}
-static void nosmp_nif_resource_dtor_scheduled(void* vbin)
+static void dtor_demonitor(ErtsMonitor* mon, void* context)
{
- erts_bin_free((Binary*)vbin);
+ ASSERT(erts_monitor_is_origin(mon));
+ ASSERT(is_internal_pid(mon->other.item));
+
+ erts_proc_sig_send_demonitor(mon);
}
-static int nosmp_nif_resource_dtor_prologue(Binary* bin)
+#ifdef DEBUG
+int erts_dbg_is_resource_dying(ErtsResource* resource)
{
- if (is_scheduler()) {
- return nif_resource_dtor(bin);
- }
- else {
- erts_schedule_misc_aux_work(1, nosmp_nif_resource_dtor_scheduled, bin);
- return 0; /* do not free */
- }
+ return resource->monitors && rmon_is_dying(resource->monitors);
}
+#endif
-#endif /* !ERTS_SMP */
+# define NIF_RESOURCE_DTOR &nif_resource_dtor
static int nif_resource_dtor(Binary* bin)
{
@@ -2327,35 +2371,37 @@ static int nif_resource_dtor(Binary* bin)
if (resource->monitors) {
ErtsResourceMonitors* rm = resource->monitors;
+ int kill;
+ ErtsMonitor *root;
+ Uint refc;
ASSERT(type->down);
- erts_smp_mtx_lock(&rm->lock);
+ erts_mtx_lock(&rm->lock);
ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0);
- if (rm->root) {
- ASSERT(!rm->is_dying);
- destroy_all_monitors(rm->root, resource);
+ kill = !rmon_is_dying(rm);
+ if (kill) {
+ rmon_set_dying(rm);
+ root = rm->root;
rm->root = NULL;
}
- if (rm->pending_failed_fire) {
- /*
- * Resource death struggle prolonged to serve exiting process(es).
- * Destructor will be called again when last exiting process
- * tries to fire its MON_NIF_TARGET monitor (and fails).
- *
- * This resource is doomed. It has no "real" references and
- * should get not get called upon to do anything except the
- * final destructor call.
- *
- * We keep refc at 0 and use a separate counter for exiting
- * processes to avoid resource getting revived by "dec_term".
- */
- ASSERT(!rm->is_dying);
- rm->is_dying = 1;
- erts_smp_mtx_unlock(&rm->lock);
- return 0;
- }
- erts_smp_mtx_unlock(&rm->lock);
- erts_smp_mtx_destroy(&rm->lock);
+ refc = rmon_refc_read(rm);
+ erts_mtx_unlock(&rm->lock);
+
+ if (kill)
+ erts_monitor_tree_foreach_delete(&root,
+ dtor_demonitor,
+ NULL);
+
+ /*
+ * If resource->monitors->refc != 0 there are
+ * outstanding references to the resource from
+ * monitors that has not been removed yet.
+ * nif_resource_dtor() will be called again this
+ * reference count reach zero.
+ */
+ if (refc != 0)
+ return 0; /* we'll be back... */
+ erts_mtx_destroy(&rm->lock);
}
if (type->dtor != NULL) {
@@ -2384,54 +2430,82 @@ void erts_resource_stop(ErtsResource* resource, ErlNifEvent e,
post_nif_noproc(&msg_env);
}
-void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref)
+void erts_nif_demonitored(ErtsResource* resource)
{
- ErtsMonitor* rmon;
+ ErtsResourceMonitors* rmp = resource->monitors;
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ int free_me;
+
+ ASSERT(rmp);
+ ASSERT(resource->type->down);
+
+ erts_mtx_lock(&rmp->lock);
+ free_me = ((rmon_refc_dec_read(rmp) == 0) & !!rmon_is_dying(rmp));
+ erts_mtx_unlock(&rmp->lock);
+
+ if (free_me)
+ erts_bin_free(&bin->binary);
+}
+
+void erts_fire_nif_monitor(ErtsMonitor *tmon)
+{
+ ErtsResource* resource;
+ ErtsMonitorData *mdp;
+ ErtsMonitor *omon;
+ ErtsBinary* bin;
struct enif_msg_environment_t msg_env;
ErlNifPid nif_pid;
ErlNifMonitor nif_monitor;
- ErtsResourceMonitors* rmp = resource->monitors;
+ ErtsResourceMonitors* rmp;
+ Uint mrefc, brefc;
+ int active, is_dying;
+
+ ASSERT(tmon->type == ERTS_MON_TYPE_RESOURCE);
+ ASSERT(erts_monitor_is_target(tmon));
+
+ resource = tmon->other.ptr;
+ bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
+ rmp = resource->monitors;
+
+ mdp = erts_monitor_to_data(tmon);
+ omon = &mdp->origin;
ASSERT(rmp);
ASSERT(resource->type->down);
- erts_smp_mtx_lock(&rmp->lock);
- rmon = erts_remove_monitor(&rmp->root, ref);
- if (!rmon) {
- int free_me = (--rmp->pending_failed_fire == 0) && rmp->is_dying;
- ASSERT(rmp->pending_failed_fire >= 0);
- erts_smp_mtx_unlock(&rmp->lock);
+ erts_mtx_lock(&rmp->lock);
- if (free_me) {
- ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0);
- erts_bin_free(&bin->binary);
- }
- return;
+ mrefc = rmon_refc_dec_read(rmp);
+ is_dying = rmon_is_dying(rmp);
+ active = !is_dying && erts_monitor_is_in_table(omon);
+
+ if (active) {
+ erts_monitor_tree_delete(&rmp->root, omon);
+ brefc = (Uint) erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0);
}
- ASSERT(!rmp->is_dying);
- if (erts_refc_inc_unless(&bin->binary.intern.refc, 0, 0) == 0) {
- /*
- * Racing resource destruction.
- * To avoid a more complex refc-dance with destructing thread
- * we avoid calling 'down' and just silently remove the monitor.
- * This can happen even for non smp as destructor calls may be scheduled.
- */
- erts_smp_mtx_unlock(&rmp->lock);
+
+ erts_mtx_unlock(&rmp->lock);
+
+ if (!active) {
+ ASSERT(!is_dying || erts_refc_read(&bin->binary.intern.refc, 0) == 0);
+ if (is_dying && mrefc == 0)
+ erts_bin_free(&bin->binary);
+ erts_monitor_release(tmon);
}
else {
- erts_smp_mtx_unlock(&rmp->lock);
-
- ASSERT(rmon->u.pid == pid);
- erts_ref_to_driver_monitor(ref, &nif_monitor);
- nif_pid.pid = pid;
- pre_nif_noproc(&msg_env, resource->type->owner, NULL);
- resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
- post_nif_noproc(&msg_env);
+ if (brefc > 0) {
+ ASSERT(is_internal_pid(omon->other.item));
+ erts_ref_to_driver_monitor(mdp->ref, &nif_monitor);
+ nif_pid.pid = omon->other.item;
+ pre_nif_noproc(&msg_env, resource->type->owner, NULL);
+ resource->type->down(&msg_env.env, resource->data, &nif_pid, &nif_monitor);
+ post_nif_noproc(&msg_env);
+
+ erts_bin_release(&bin->binary);
+ }
- erts_bin_release(&bin->binary);
+ erts_monitor_release_both(mdp);
}
- erts_destroy_monitor(rmon);
}
void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
@@ -2465,11 +2539,10 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz)
erts_refc_inc(&resource->type->refc, 2);
if (type->down) {
resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs);
- erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
+ erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL,
ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
resource->monitors->root = NULL;
- resource->monitors->pending_failed_fire = 0;
- resource->monitors->is_dying = 0;
+ resource->monitors->refc = 0;
resource->monitors->user_data_sz = data_sz;
}
else {
@@ -2484,7 +2557,7 @@ void enif_release_resource(void* obj)
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
- ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
#ifdef DEBUG
erts_refc_dec(&resource->nif_refc, 0);
#endif
@@ -2497,7 +2570,7 @@ void enif_keep_resource(void* obj)
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == NIF_RESOURCE_DTOR);
- ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
#ifdef DEBUG
erts_refc_inc(&resource->nif_refc, 1);
#endif
@@ -2507,7 +2580,7 @@ void enif_keep_resource(void* obj)
Eterm erts_bld_resource_ref(Eterm** hpp, ErlOffHeap* oh, ErtsResource* resource)
{
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
- ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
return erts_mk_magic_ref(hpp, oh, &bin->binary);
}
@@ -2516,7 +2589,7 @@ ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj)
ErtsResource* resource = DATA_TO_RESOURCE(obj);
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(resource);
Eterm* hp = alloc_heap(env, ERTS_MAGIC_REF_THING_SIZE);
- ASSERT(!(resource->monitors && resource->monitors->is_dying));
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
return erts_mk_magic_ref(&hp, &MSO(env->proc), &bin->binary);
}
@@ -2628,8 +2701,12 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
{
Process *proc;
Sint reds;
+ int sched;
- execution_state(env, &proc, NULL);
+ execution_state(env, &proc, &sched);
+
+ if (sched < 0)
+ return 0; /* no-op on dirty scheduler */
ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100);
if (percent < 1) percent = 1;
@@ -2664,7 +2741,6 @@ nif_export_restore(Process *c_p, NifExport *ep, Eterm res)
}
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* Finalize a dirty NIF call. This function is scheduled to cause the VM to
@@ -2734,7 +2810,7 @@ schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp,
execution_state(env, &proc, NULL);
- (void) erts_smp_atomic32_read_bset_nob(&proc->state,
+ (void) erts_atomic32_read_bset_nob(&proc->state,
(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC),
(flags == ERL_NIF_DIRTY_JOB_CPU_BOUND
@@ -2772,7 +2848,7 @@ static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg,
ASSERT(is_atom(mod) && is_atom(func));
ASSERT(fp);
- (void) erts_smp_atomic32_read_bset_nob(&proc->state,
+ (void) erts_atomic32_read_bset_nob(&proc->state,
(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC),
dirty_psflg);
@@ -2792,7 +2868,6 @@ static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
/*
* NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
@@ -2866,24 +2941,20 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
if (scheduler <= 0) {
if (scheduler == 0)
enif_make_badarg(env);
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
}
if (flags == 0)
result = schedule(env, execute_nif, fp, proc->current->module,
fun_name_atom, argc, argv);
else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
-#ifdef ERTS_DIRTY_SCHEDULERS
result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv);
-#else
- result = enif_raise_exception(env, am_notsup);
-#endif
}
else
result = enif_make_badarg(env);
if (scheduler < 0)
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
return result;
}
@@ -2899,12 +2970,10 @@ enif_thread_type(void)
switch (esdp->type) {
case ERTS_SCHED_NORMAL:
return ERL_NIF_THR_NORMAL_SCHEDULER;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
case ERTS_SCHED_DIRTY_IO:
return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
return -1;
@@ -2949,6 +3018,44 @@ ERL_NIF_TERM enif_make_new_map(ErlNifEnv* env)
return make_flatmap(mp);
}
+int enif_make_map_from_arrays(ErlNifEnv *env,
+ ERL_NIF_TERM keys[],
+ ERL_NIF_TERM values[],
+ size_t cnt,
+ ERL_NIF_TERM *map_out)
+{
+ ErtsHeapFactory factory;
+ int succeeded;
+
+#ifdef ERTS_NIF_ASSERT_IN_ENV
+ size_t index = 0;
+
+ while (index < cnt) {
+ ASSERT_IN_ENV(env, keys[index], index, "key");
+ ASSERT_IN_ENV(env, values[index], index, "value");
+ index++;
+ }
+#endif
+
+ flush_env(env);
+
+ erts_factory_proc_prealloc_init(&factory, env->proc,
+ cnt * 2 + MAP_HEADER_FLATMAP_SZ + 1);
+
+ (*map_out) = erts_map_from_ks_and_vs(&factory, keys, values, cnt);
+ succeeded = (*map_out) != THE_NON_VALUE;
+
+ if (!succeeded) {
+ erts_factory_undo(&factory);
+ }
+
+ erts_factory_close(&factory);
+
+ cache_env(env);
+
+ return succeeded;
+}
+
int enif_make_map_put(ErlNifEnv* env,
Eterm map_in,
Eterm key,
@@ -3204,143 +3311,88 @@ int enif_map_iterator_get_pair(ErlNifEnv *env,
int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
ErlNifMonitor* monitor)
{
- int scheduler;
ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
- Process *rp;
Eterm tmp[ERTS_REF_THING_SIZE];
Eterm ref;
- int retval;
+ ErtsResourceMonitors *rm;
+ ErtsMonitorData *mdp;
ASSERT(ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->magic_binary.destructor
== NIF_RESOURCE_DTOR);
- ASSERT(!(rsrc->monitors && rsrc->monitors->is_dying));
+ ASSERT(erts_refc_read(&ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc)->binary.intern.refc, 0) != 0);
ASSERT(!rsrc->monitors == !rsrc->type->down);
-
- if (!rsrc->monitors) {
+ rm = rsrc->monitors;
+ if (!rm) {
ASSERT(!rsrc->type->down);
return -1;
}
ASSERT(rsrc->type->down);
- execution_state(env, NULL, &scheduler);
+ ref = erts_make_ref_in_buffer(tmp);
-#ifdef ERTS_SMP
- if (scheduler > 0) /* Normal scheduler */
- rp = erts_proc_lookup_raw(target_pid->pid);
- else
- rp = erts_proc_lookup_raw_inc_refc(target_pid->pid);
-#else
- if (scheduler <= 0) {
- erts_exit(ERTS_ABORT_EXIT, "enif_monitor_process: called from "
- "non-scheduler thread on non-SMP VM");
- }
- rp = erts_proc_lookup(target_pid->pid);
-#endif
+ mdp = erts_monitor_create(ERTS_MON_TYPE_RESOURCE, ref,
+ (Eterm) rsrc, target_pid->pid, NIL);
+ erts_mtx_lock(&rm->lock);
+ ASSERT(!rmon_is_dying(rm));
+ erts_monitor_tree_insert(&rm->root, &mdp->origin);
+ rmon_refc_inc(rm);
+ erts_mtx_unlock(&rm->lock);
- if (!rp)
- return 1;
+ if (!erts_proc_sig_send_monitor(&mdp->target, target_pid->pid)) {
+ /* Failed to send monitor signal; cleanup... */
+#ifdef DEBUG
+ ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
+#endif
- ref = erts_make_ref_in_buffer(tmp);
+ erts_mtx_lock(&rm->lock);
+ ASSERT(!rmon_is_dying(rm));
+ erts_monitor_tree_delete(&rm->root, &mdp->origin);
+ rmon_refc_dec(rm);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 1) != 0);
+ erts_mtx_unlock(&rm->lock);
+ erts_monitor_release_both(mdp);
- erts_smp_mtx_lock(&rsrc->monitors->lock);
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
- if (ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)) {
- retval = 1;
- }
- else {
- erts_add_monitor(&rsrc->monitors->root, MON_ORIGIN, ref, rp->common.id, NIL);
- erts_add_monitor(&ERTS_P_MONITORS(rp), MON_NIF_TARGET, ref, (UWord)rsrc, NIL);
- retval = 0;
+ return 1;
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- erts_smp_mtx_unlock(&rsrc->monitors->lock);
-#ifdef ERTS_SMP
- if (scheduler <= 0)
- erts_proc_dec_refc(rp);
-#endif
if (monitor)
erts_ref_to_driver_monitor(ref,monitor);
- return retval;
+ return 0;
}
int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monitor)
{
- int scheduler;
ErtsResource* rsrc = DATA_TO_RESOURCE(obj);
#ifdef DEBUG
ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_UNALIGNED_DATA(rsrc);
#endif
- Process *rp;
+ ErtsResourceMonitors *rm;
ErtsMonitor *mon;
- ErtsMonitor *rmon = NULL;
Eterm ref_heap[ERTS_REF_THING_SIZE];
Eterm ref;
- int is_exiting;
ASSERT(bin->magic_binary.destructor == NIF_RESOURCE_DTOR);
- ASSERT(!(rsrc->monitors && rsrc->monitors->is_dying));
-
- execution_state(env, NULL, &scheduler);
+ ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) != 0);
ref = erts_driver_monitor_to_ref(ref_heap, monitor);
- erts_smp_mtx_lock(&rsrc->monitors->lock);
- mon = erts_remove_monitor(&rsrc->monitors->root, ref);
+ rm = rsrc->monitors;
+ erts_mtx_lock(&rm->lock);
+ ASSERT(!rmon_is_dying(rm));
+ mon = erts_monitor_tree_lookup(rm->root, ref);
+ if (mon)
+ erts_monitor_tree_delete(&rm->root, mon);
+ erts_mtx_unlock(&rm->lock);
- if (mon == NULL) {
- erts_smp_mtx_unlock(&rsrc->monitors->lock);
+ if (!mon)
return 1;
- }
-
- ASSERT(mon->type == MON_ORIGIN);
- ASSERT(is_internal_pid(mon->u.pid));
-
-#ifdef ERTS_SMP
- if (scheduler > 0) /* Normal scheduler */
- rp = erts_proc_lookup(mon->u.pid);
- else
- rp = erts_proc_lookup_inc_refc(mon->u.pid);
-#else
- if (scheduler <= 0) {
- erts_exit(ERTS_ABORT_EXIT, "enif_demonitor_process: called from "
- "non-scheduler thread on non-SMP VM");
- }
- rp = erts_proc_lookup(mon->u.pid);
-#endif
- if (!rp) {
- is_exiting = 1;
- }
- else {
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
- if (ERTS_PROC_IS_EXITING(rp)) {
- is_exiting = 1;
- } else {
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- ASSERT(rmon);
- is_exiting = 0;
- }
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-
-#ifdef ERTS_SMP
- if (scheduler <= 0)
- erts_proc_dec_refc(rp);
-#endif
- }
- if (is_exiting) {
- rsrc->monitors->pending_failed_fire++;
- }
- erts_smp_mtx_unlock(&rsrc->monitors->lock);
+ ASSERT(erts_monitor_is_origin(mon));
+ ASSERT(is_internal_pid(mon->other.item));
- if (rmon) {
- ASSERT(rmon->type == MON_NIF_TARGET);
- ASSERT(rmon->u.resource == rsrc);
- erts_destroy_monitor(rmon);
- }
- erts_destroy_monitor(mon);
+ erts_proc_sig_send_demonitor(mon);
return 0;
}
@@ -3494,7 +3546,6 @@ static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer,
parent_header = binary_val(parent_binary);
result->size = binary_size(binary);
- result->bin_term = binary;
if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
ProcBin *pb = (ProcBin*)parent_header;
@@ -3522,6 +3573,7 @@ static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer,
* and reference that instead. */
if (result->ref_bin == NULL || bit_offset != 0) {
+ ASSERT(copy_buffer->ref_bin != NULL && copy_buffer->data != NULL);
ASSERT(result->size <= (copy_buffer->size - *copy_offset));
if (bit_offset == 0) {
@@ -3543,8 +3595,8 @@ static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer,
static int fill_iovec_with_slice(ErlNifEnv *env,
iovec_slice_t *slice,
ErlNifIOVec *iovec) {
+ ErlNifBinary copy_buffer = {0};
UWord copy_offset, iovec_idx;
- ErlNifBinary copy_buffer;
Eterm sublist_iterator;
/* Set up a common refc binary for all on-heap and unaligned binaries. */
@@ -3552,11 +3604,8 @@ static int fill_iovec_with_slice(ErlNifEnv *env,
if (!enif_alloc_binary(slice->copied_size, &copy_buffer)) {
return 0;
}
- } else {
-#ifdef DEBUG
- copy_buffer.data = NULL;
- copy_buffer.size = 0;
-#endif
+
+ ASSERT(copy_buffer.ref_bin != NULL);
}
sublist_iterator = slice->sublist_start;
@@ -3606,9 +3655,11 @@ static int fill_iovec_with_slice(ErlNifEnv *env,
}
} else {
if (slice->copied_size > 0) {
- /* Attach the binary to our environment and let the GC take care of
- * it after returning. */
- enif_make_binary(env, &copy_buffer);
+ /* Attach the binary to our environment and let the next minor GC
+ * get rid of it. This is slightly faster than using the tmp object
+ * list since it avoids off-heap allocations. */
+ erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE), copy_buffer.ref_bin);
}
}
@@ -3634,19 +3685,14 @@ static int create_iovec_from_slice(ErlNifEnv *env,
alloc_size = binv_offset;
alloc_size += slice->iovec_len * sizeof(Binary*);
- /* If we have an environment we'll attach the allocated data to it. The
- * GC will take care of releasing it later on. */
+ /* When the user passes an environment, we attach the iovec to it so
+ * the user won't have to bother managing it (similar to
+ * enif_inspect_binary). It'll disappear once the environment is
+ * cleaned up. */
if (env != NULL) {
- ErlNifBinary gc_bin;
-
- if (!enif_alloc_binary(alloc_size, &gc_bin)) {
- return 0;
- }
-
- alloc_base = (char*)gc_bin.data;
- enif_make_binary(env, &gc_bin);
+ alloc_base = alloc_tmp_obj(env, alloc_size, &tmp_alloc_dtor);
} else {
- alloc_base = enif_alloc(alloc_size);
+ alloc_base = erts_alloc(ERTS_ALC_T_NIF, alloc_size);
}
iovec = (ErlNifIOVec*)alloc_base;
@@ -3660,7 +3706,7 @@ static int create_iovec_from_slice(ErlNifEnv *env,
if(!fill_iovec_with_slice(env, slice, iovec)) {
if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
- enif_free(iovec);
+ erts_free(ERTS_ALC_T_NIF, iovec);
}
return 0;
@@ -3727,6 +3773,55 @@ int enif_ioq_deq(ErlNifIOQueue *q, size_t elems, size_t *size)
return 1;
}
+int enif_ioq_peek_head(ErlNifEnv *env, ErlNifIOQueue *q, size_t *size, ERL_NIF_TERM *bin_term) {
+ SysIOVec *iov_entry;
+ Binary *ref_bin;
+
+ if (q->size == 0) {
+ return 0;
+ }
+
+ ASSERT(q->b_head != q->b_tail && q->v_head != q->v_tail);
+
+ ref_bin = &q->b_head[0]->nif;
+ iov_entry = &q->v_head[0];
+
+ if (size != NULL) {
+ *size = iov_entry->iov_len;
+ }
+
+ if (iov_entry->iov_len > ERL_ONHEAP_BIN_LIMIT) {
+ ProcBin *pb = (ProcBin*)alloc_heap(env, PROC_BIN_SIZE);
+
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->next = MSO(env->proc).first;
+ pb->val = ref_bin;
+ pb->flags = 0;
+
+ ASSERT((byte*)iov_entry->iov_base >= (byte*)ref_bin->orig_bytes);
+ ASSERT(iov_entry->iov_len <= ref_bin->orig_size);
+
+ pb->bytes = (byte*)iov_entry->iov_base;
+ pb->size = iov_entry->iov_len;
+
+ MSO(env->proc).first = (struct erl_off_heap_header*) pb;
+ OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm));
+
+ erts_refc_inc(&ref_bin->intern.refc, 2);
+ *bin_term = make_binary(pb);
+ } else {
+ ErlHeapBin* hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(iov_entry->iov_len));
+
+ hb->thing_word = header_heap_bin(iov_entry->iov_len);
+ hb->size = iov_entry->iov_len;
+
+ sys_memcpy(hb->data, iov_entry->iov_base, iov_entry->iov_len);
+ *bin_term = make_binary(hb);
+ }
+
+ return 1;
+}
+
SysIOVec *enif_ioq_peek(ErlNifIOQueue *q, int *iovlen)
{
return erts_ioq_peekq(q, iovlen);
@@ -3743,7 +3838,7 @@ static ErtsCodeInfo** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsign
int j;
for (j = 0; j < n; ++j) {
ErtsCodeInfo* ci = mod_code->functions[j];
- ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI));
if (f_atom == ci->mfa.function
&& arity == ci->mfa.arity) {
return mod_code->functions+j;
@@ -3757,16 +3852,26 @@ static Eterm mkatom(const char *str)
return am_atom_put(str, sys_strlen(str));
}
-static struct tainted_module_t
+struct tainted_module_t
{
struct tainted_module_t* next;
Eterm module_atom;
-}*first_tainted_module = NULL;
+};
+
+erts_atomic_t first_taint; /* struct tainted_module_t* */
-static void add_taint(Eterm mod_atom)
+void erts_add_taint(Eterm mod_atom)
{
- struct tainted_module_t* t;
- for (t=first_tainted_module ; t!=NULL; t=t->next) {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ extern erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
+#endif
+ struct tainted_module_t *first, *t;
+
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)
+ || erts_thr_progress_is_blocking());
+
+ first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
+ for (t=first ; t; t=t->next) {
if (t->module_atom == mod_atom) {
return;
}
@@ -3774,22 +3879,24 @@ static void add_taint(Eterm mod_atom)
t = erts_alloc_fnf(ERTS_ALC_T_TAINT, sizeof(*t));
if (t != NULL) {
t->module_atom = mod_atom;
- t->next = first_tainted_module;
- first_tainted_module = t;
+ t->next = first;
+ erts_atomic_set_nob(&first_taint, (erts_aint_t)t);
}
}
Eterm erts_nif_taints(Process* p)
{
- struct tainted_module_t* t;
+ struct tainted_module_t *first, *t;
unsigned cnt = 0;
Eterm list = NIL;
Eterm* hp;
- for (t=first_tainted_module ; t!=NULL; t=t->next) {
+
+ first = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
+ for (t=first ; t!=NULL; t=t->next) {
cnt++;
}
hp = HAlloc(p,cnt*2);
- for (t=first_tainted_module ; t!=NULL; t=t->next) {
+ for (t=first ; t!=NULL; t=t->next) {
list = CONS(hp, t->module_atom, list);
hp += 2;
}
@@ -3798,9 +3905,11 @@ Eterm erts_nif_taints(Process* p)
void erts_print_nif_taints(fmtfn_t to, void* to_arg)
{
- struct tainted_module_t* t;
+ struct tainted_module_t *t;
const char* delim = "";
- for (t=first_tainted_module ; t!=NULL; t=t->next) {
+
+ t = (struct tainted_module_t*) erts_atomic_read_nob(&first_taint);
+ for ( ; t; t = t->next) {
const Atom* atom = atom_tab(atom_val(t->module_atom));
erts_cbprintf(to,to_arg,"%s%.*s", delim, atom->len, atom->name);
delim = ",";
@@ -3887,6 +3996,11 @@ static struct erl_module_nif* create_lib(const ErlNifEntry* src)
} else {
dst->sizeof_ErlNifResourceTypeInit = 0;
}
+ if (AT_LEAST_VERSION(src, 2, 14)) {
+ dst->min_erts = src->min_erts;
+ } else {
+ dst->min_erts = "erts-?";
+ }
return lib;
};
@@ -3909,6 +4023,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
Eterm ret = am_ok;
int veto;
+ int taint = 1;
struct erl_module_nif* lib = NULL;
struct erl_module_instance* this_mi;
struct erl_module_instance* prev_mi;
@@ -3939,8 +4054,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
/* Block system (is this the right place to do it?) */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
/* Find calling module */
ASSERT(BIF_P->current != NULL);
@@ -3955,10 +4070,15 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ASSERT(module_p != NULL);
mod_atomp = atom_tab(atom_val(mod_atom));
- init_func = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
- if (init_func != NULL)
- handle = init_func;
-
+ {
+ ErtsStaticNifEntry* sne;
+ sne = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
+ if (sne != NULL) {
+ init_func = sne->nif_init;
+ handle = init_func;
+ taint = sne->taint;
+ }
+ }
this_mi = &module_p->curr;
prev_mi = &module_p->old;
if (in_area(caller, module_p->old.code_hdr, module_p->old.code_length)) {
@@ -3995,18 +4115,24 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
" function: '%s'", errdesc.str);
}
- else if ((add_taint(mod_atom),
+ else if ((taint ? erts_add_taint(mod_atom) : 0,
(entry = erts_sys_ddll_call_nif_init(init_func)) == NULL)) {
ret = load_nif_error(BIF_P, bad_lib, "Library init-call unsuccessful");
}
+ else if (entry->major > ERL_NIF_MAJOR_VERSION
+ || (entry->major == ERL_NIF_MAJOR_VERSION
+ && entry->minor > ERL_NIF_MINOR_VERSION)) {
+ char* fmt = "That '%T' NIF library needs %s or newer. Either try to"
+ " recompile the NIF lib or use a newer erts runtime.";
+ ret = load_nif_error(BIF_P, bad_lib, fmt, mod_atom, entry->min_erts);
+ }
else if (entry->major < ERL_NIF_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
- || (ERL_NIF_MAJOR_VERSION < entry->major
- || (ERL_NIF_MAJOR_VERSION == entry->major
- && ERL_NIF_MINOR_VERSION < entry->minor))
|| (entry->major==2 && entry->minor == 5)) { /* experimental maps */
- ret = load_nif_error(BIF_P, bad_lib, "Library version (%d.%d) not compatible (with %d.%d).",
- entry->major, entry->minor, ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
+ char* fmt = "That old NIF library (%d.%d) is not compatible with this "
+ "erts runtime (%d.%d). Try recompile the NIF lib.";
+ ret = load_nif_error(BIF_P, bad_lib, fmt, entry->major, entry->minor,
+ ERL_NIF_MAJOR_VERSION, ERL_NIF_MINOR_VERSION);
}
else if (AT_LEAST_VERSION(entry, 2, 1)
&& sys_strcmp(entry->vm_variant, ERL_NIF_VM_VARIANT) != 0) {
@@ -4045,14 +4171,9 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* dirty scheduler support, treat a non-zero flags field as
* a load error.
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
f->flags, mod_atom, f->name, f->arity);
-#else
- ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
- mod_atom, f->name, f->arity);
-#endif
}
else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
< BEAM_NIF_MIN_FUNC_SZ)
@@ -4117,15 +4238,13 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
code_ptr = erts_codeinfo_to_code(ci);
if (ci->u.gen_bp == NULL) {
- code_ptr[0] = (BeamInstr) BeamOp(op_call_nif);
+ code_ptr[0] = BeamOpCodeAddr(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
GenericBp* g = ci->u.gen_bp;
- ASSERT(code_ptr[0] ==
- (BeamInstr) BeamOp(op_i_generic_breakpoint));
- g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ ASSERT(BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint));
+ g->orig_instr = BeamOpCodeAddr(op_call_nif);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (f->flags) {
code_ptr[3] = (BeamInstr) f->fptr;
code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
@@ -4133,7 +4252,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(BeamInstr) static_schedule_dirty_cpu_nif;
}
else
-#endif
code_ptr[1] = (BeamInstr) f->fptr;
code_ptr[2] = (BeamInstr) lib;
}
@@ -4151,8 +4269,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_sys_ddll_free_error(&errdesc);
}
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
erts_free(ERTS_ALC_T_TMP, lib_name);
@@ -4165,7 +4283,7 @@ erts_unload_nif(struct erl_module_nif* lib)
{
ErlNifResourceType* rt;
ErlNifResourceType* next;
- ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(erts_thr_progress_is_blocking());
ASSERT(lib != NULL);
ASSERT(lib->mod != NULL);
@@ -4224,6 +4342,10 @@ int erts_nif_get_funcs(struct erl_module_nif* mod,
return mod->entry.num_of_funcs;
}
+Module *erts_nif_get_module(struct erl_module_nif *nif_mod) {
+ return nif_mod->mod;
+}
+
Eterm erts_nif_call_function(Process *p, Process *tracee,
struct erl_module_nif* mod,
ErlNifFunc *fun, int argc, Eterm *argv)
@@ -4237,8 +4359,8 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
break;
ASSERT(i < mod->entry.num_of_funcs);
if (p)
- ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
- || erts_smp_thr_progress_is_blocking());
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN
+ || erts_thr_progress_is_blocking());
#endif
if (p) {
/* This is almost a normal nif call like in beam_emu,
@@ -4309,34 +4431,31 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size);
struct readonly_check_t
{
- struct enif_tmp_obj_t hdr;
unsigned char* ptr;
unsigned size;
unsigned checksum;
};
static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz)
{
- ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
- struct readonly_check_t* obj = erts_alloc(allocator,
- sizeof(struct readonly_check_t));
- obj->hdr.allocator = allocator;
- obj->hdr.next = env->tmp_obj_list;
- env->tmp_obj_list = &obj->hdr;
- obj->hdr.dtor = &readonly_check_dtor;
+ struct readonly_check_t* obj;
+
+ obj = alloc_tmp_obj(env, sizeof(struct readonly_check_t),
+ &readonly_check_dtor);
+
obj->ptr = ptr;
obj->size = sz;
- obj->checksum = calc_checksum(ptr, sz);
+ obj->checksum = calc_checksum(ptr, sz);
}
-static void readonly_check_dtor(struct enif_tmp_obj_t* o)
+static void readonly_check_dtor(struct enif_tmp_obj_t* tmp_obj)
{
- struct readonly_check_t* obj = (struct readonly_check_t*) o;
- unsigned chksum = calc_checksum(obj->ptr, obj->size);
- if (chksum != obj->checksum) {
+ struct readonly_check_t* ro_check = (struct readonly_check_t*)&tmp_obj[1];
+ unsigned chksum = calc_checksum(ro_check->ptr, ro_check->size);
+ if (chksum != ro_check->checksum) {
fprintf(stderr, "\r\nReadonly data written by NIF, checksums differ"
- " %x != %x\r\nABORTING\r\n", chksum, obj->checksum);
+ " %x != %x\r\nABORTING\r\n", chksum, ro_check->checksum);
abort();
}
- erts_free(obj->hdr.allocator, obj);
+ erts_free(tmp_obj->allocator, tmp_obj);
}
static unsigned calc_checksum(unsigned char* ptr, unsigned size)
{
@@ -4411,7 +4530,7 @@ static void get_string_maybe(ErlNifEnv *env, const ERL_NIF_TERM term,
str_bin.size > bufsiz) {
*ptr = NULL;
} else {
- memcpy(buf, (char *) str_bin.data, str_bin.size);
+ sys_memcpy(buf, (char *) str_bin.data, str_bin.size);
buf[str_bin.size] = '\0';
*ptr = buf;
}
@@ -4428,7 +4547,7 @@ ERL_NIF_TERM erl_nif_user_trace_s1(ErlNifEnv* env, int argc,
message_bin.size > MESSAGE_BUFSIZ) {
return am_badarg;
}
- memcpy(messagebuf, (char *) message_bin.data, message_bin.size);
+ sys_memcpy(messagebuf, (char *) message_bin.data, message_bin.size);
messagebuf[message_bin.size] = '\0';
DTRACE1(user_trace_s1, messagebuf);
return am_true;