aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_nif.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_nif.c')
-rw-r--r--erts/emulator/beam/erl_nif.c173
1 files changed, 109 insertions, 64 deletions
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 6bd27d11f4..f572577bde 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -120,8 +120,10 @@ execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
else {
Process *c_p = env->proc;
- if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC))
- ASSERT(is_scheduler() > 0);
+ if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) {
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+ }
else {
c_p = env->proc->next;
ASSERT(is_scheduler() < 0);
@@ -208,11 +210,16 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
#endif
}
+static void full_cache_env(ErlNifEnv *env);
+static void cache_env(ErlNifEnv* env);
+static void full_flush_env(ErlNifEnv *env);
+static void flush_env(ErlNifEnv* env);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
void erts_pre_dirty_nif(ErtsSchedulerData *esdp,
- ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
- Process* tracee)
+ ErlNifEnv* env, Process* p,
+ struct erl_module_nif* mod_nif)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
Process *sproc;
#ifdef DEBUG
erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
@@ -223,7 +230,7 @@ void erts_pre_dirty_nif(ErtsSchedulerData *esdp,
ASSERT(esdp);
#endif
- erts_pre_nif(env, p, mod_nif, tracee);
+ erts_pre_nif(env, p, mod_nif, NULL);
sproc = esdp->dirty_shadow_process;
ASSERT(sproc);
@@ -235,22 +242,10 @@ void erts_pre_dirty_nif(ErtsSchedulerData *esdp,
sproc->next = p;
sproc->common.id = p->common.id;
- sproc->htop = p->htop;
- sproc->stop = p->stop;
- sproc->hend = p->hend;
- sproc->heap = p->heap;
- sproc->abandoned_heap = p->abandoned_heap;
- sproc->heap_sz = p->heap_sz;
- sproc->high_water = p->high_water;
- sproc->old_hend = p->old_hend;
- sproc->old_htop = p->old_htop;
- sproc->old_heap = p->old_heap;
- sproc->mbuf = NULL;
- sproc->mbuf_sz = 0;
- ERTS_INIT_OFF_HEAP(&sproc->off_heap);
env->proc = sproc;
-#endif
+ full_cache_env(env);
}
+#endif
/* Temporary object header, auto-deallocated when NIF returns
* or when independent environment is cleared.
@@ -274,32 +269,37 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
void erts_post_nif(ErlNifEnv* env)
{
erts_unblock_fpe(env->fpe_was_unmasked);
+ full_flush_env(env);
+ free_tmp_objs(env);
+ env->exiting = ERTS_PROC_IS_EXITING(env->proc);
+}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (!(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+void erts_post_dirty_nif(ErlNifEnv* env)
+{
+ Process *c_p;
+ ASSERT(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(env->proc->next);
+ erts_unblock_fpe(env->fpe_was_unmasked);
+ full_flush_env(env);
+ free_tmp_objs(env);
+ c_p = env->proc->next;
+ env->exiting = ERTS_PROC_IS_EXITING(c_p);
+ ERTS_VBUMP_ALL_REDS(c_p);
+}
#endif
- {
- ASSERT(is_scheduler() > 0);
- if (env->heap_frag == NULL) {
- ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
- ASSERT(env->hp >= HEAP_TOP(env->proc));
- ASSERT(env->hp <= HEAP_LIMIT(env->proc));
- HEAP_TOP(env->proc) = env->hp;
- }
- else {
- ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
- env->heap_frag->used_size = env->hp - env->heap_frag->mem;
- ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
- }
- env->exiting = ERTS_PROC_IS_EXITING(env->proc);
- }
+
+static void full_flush_env(ErlNifEnv* env)
+{
#ifdef ERTS_DIRTY_SCHEDULERS
- else { /* Dirty nif call using shadow process struct */
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ /* Dirty nif call using shadow process struct */
Process *c_p = env->proc->next;
ASSERT(is_scheduler() < 0);
ASSERT(env->proc->common.id == c_p->common.id);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
if (!env->heap_frag) {
ASSERT(env->hp_end == HEAP_LIMIT(c_p));
@@ -339,11 +339,48 @@ void erts_post_nif(ErlNifEnv* env)
}
c_p->off_heap.overhead += env->proc->off_heap.overhead;
- env->exiting = ERTS_PROC_IS_EXITING(c_p);
- BUMP_ALL_REDS(c_p);
+ return;
}
#endif
- free_tmp_objs(env);
+
+ flush_env(env);
+}
+
+static void full_cache_env(ErlNifEnv* env)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ /* Dirty nif call using shadow process struct */
+ Process *sproc = env->proc;
+ Process *c_p = sproc->next;
+ ASSERT(c_p);
+ ASSERT(is_scheduler() < 0);
+ ASSERT(env->proc->common.id == c_p->common.id);
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p)
+ & ERTS_PROC_LOCK_MAIN);
+
+ sproc->htop = c_p->htop;
+ sproc->stop = c_p->stop;
+ sproc->hend = c_p->hend;
+ sproc->heap = c_p->heap;
+ sproc->abandoned_heap = c_p->abandoned_heap;
+ sproc->heap_sz = c_p->heap_sz;
+ sproc->high_water = c_p->high_water;
+ sproc->old_hend = c_p->old_hend;
+ sproc->old_htop = c_p->old_htop;
+ sproc->old_heap = c_p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+
+ env->hp_end = HEAP_LIMIT(c_p);
+ env->hp = HEAP_TOP(c_p);
+ env->heap_frag = NULL;
+ return;
+ }
+#endif
+
+ cache_env(env);
}
/* Flush out our cached heap pointers to allow an ordinary HAlloc
@@ -600,17 +637,32 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
if (scheduler > 0) { /* Normal scheduler */
rp = erts_proc_lookup(receiver);
- if (c_p == rp)
- rp_locks = ERTS_PROC_LOCK_MAIN;
+ if (!rp)
+ return 0;
}
else {
- if (c_p && ERTS_PROC_IS_EXITING(c_p))
- return 0;
- rp = erts_pid2proc_opt(c_p, 0, receiver, rp_locks,
+ if (c_p) {
+ ASSERT(scheduler < 0); /* Dirty scheduler */
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+
+ if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ }
+
+ rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ receiver, rp_locks,
ERTS_P2P_FLG_INC_REFC);
+ if (!rp) {
+ if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ return 0;
+ }
}
- if (rp == NULL)
- return 0;
+
+ if (c_p == rp)
+ rp_locks = ERTS_PROC_LOCK_MAIN;
if (menv) {
flush_env(msg_env);
@@ -634,9 +686,9 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
INITIALIZE_LITERAL_PURGE_AREA(litarea);
sz = size_object_litopt(msg, &litarea);
if (env && !env->tracee) {
- flush_env(env);
+ full_flush_env(env);
mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
- cache_env(env);
+ full_cache_env(env);
}
else {
erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state);
@@ -659,8 +711,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
if (!env || !env->tracee) {
- if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND))
+ if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) {
+ full_flush_env(env);
trace_send(c_p, receiver, msg);
+ full_cache_env(env);
+ }
}
#ifdef ERTS_SMP
else {
@@ -693,10 +748,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
if (!msgq) {
-#ifdef ERTS_SMP
- ErtsThrPrgrDelayHandle dhndl;
-#endif
-
msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
sizeof(ErlTraceMessageQueue));
msgq->receiver = receiver;
@@ -710,15 +761,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
-#ifdef ERTS_SMP
- if (!scheduler)
- dhndl = erts_thr_progress_unmanaged_delay();
-#endif
- erts_schedule_flush_trace_messages(t_p->common.id);
-#ifdef ERTS_SMP
- if (!scheduler)
- erts_thr_progress_unmanaged_continue(dhndl);
-#endif
+ erts_schedule_flush_trace_messages(t_p, 0);
} else {
msgq->len++;
*msgq->last = mp;
@@ -743,6 +786,8 @@ done:
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks & ~lc_locks)
erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
+ if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
#endif
if (scheduler <= 0)
erts_proc_dec_refc(rp);