diff options
Diffstat (limited to 'erts/emulator/beam/erl_nif.c')
-rw-r--r-- | erts/emulator/beam/erl_nif.c | 561 |
1 files changed, 421 insertions, 140 deletions
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index a695a028ba..8a3007d52a 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -20,6 +20,23 @@ /* Erlang Native InterFace */ +/* + * Environment contains a pointer to currently executing process. + * In the dirty case this pointer do however not point to the + * actual process structure of the executing process, but instead + * a "shadow process structure". This in order to be able to handle + * heap allocation without the need to acquire the main lock on + * the process. + * + * The dirty process is allowed to allocate on the heap without + * the main lock, i.e., incrementing htop, but is not allowed to + * modify mbuf, offheap, etc without the main lock. The dirty + * process moves mbuf list and offheap list of the shadow process + * structure into the real structure when the dirty nif call + * completes. + */ + + #ifdef HAVE_CONFIG_H # include "config.h" #endif @@ -81,6 +98,43 @@ void dtrace_nifenv_str(ErlNifEnv *, char *); #define MIN_HEAP_FRAG_SZ 200 static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp); +static ERTS_INLINE int +is_scheduler(void) +{ + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + if (!esdp) + return 0; + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) + return -1; + return 1; +} + +static ERTS_INLINE void +execution_state(ErlNifEnv *env, Process **c_pp, int *schedp) +{ + if (schedp) + *schedp = is_scheduler(); + if (c_pp) { + if (!env || env->proc->common.id == ERTS_INVALID_PID) + *c_pp = NULL; + else { + Process *c_p = env->proc; + + if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) + ASSERT(is_scheduler() > 0); + else { + c_p = env->proc->next; + ASSERT(is_scheduler() < 0); + ASSERT(c_p && env->proc->common.id == c_p->common.id); + } + + *c_pp = c_p; + + ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)); + } + } +} + static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need) { Eterm* hp = env->hp; @@ -124,6 +178,9 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, unsigned may_need) void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, Process* tracee) { +#ifdef ERTS_DIRTY_SCHEDULERS + ErtsSchedulerData *esdp; +#endif env->mod_nif = mod_nif; env->proc = p; env->hp = HEAP_TOP(p); @@ -133,6 +190,61 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, env->tmp_obj_list = NULL; env->exception_thrown = 0; env->tracee = tracee; + + ASSERT(p->common.id != ERTS_INVALID_PID); + +#ifdef ERTS_DIRTY_SCHEDULERS + esdp = erts_get_scheduler_data(); + ASSERT(esdp); + + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { +#ifdef DEBUG + erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + + ASSERT(p->scheduler_data == esdp); + ASSERT((state & (ERTS_PSFLG_RUNNING + | ERTS_PSFLG_RUNNING_SYS)) + && !(state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS))); +#endif + + } + else { + Process *sproc; +#ifdef DEBUG + erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + + ASSERT(!p->scheduler_data); + ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING) + && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))); +#endif + + sproc = esdp->dirty_shadow_process; + ASSERT(sproc); + ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC); + ASSERT(erts_smp_atomic32_read_nob(&sproc->state) + == (ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_PROXY)); + + sproc->next = p; + sproc->common.id = p->common.id; + sproc->htop = p->htop; + sproc->stop = p->stop; + sproc->hend = p->hend; + sproc->heap = p->heap; + sproc->abandoned_heap = p->abandoned_heap; + sproc->heap_sz = p->heap_sz; + sproc->high_water = p->high_water; + sproc->old_hend = p->old_hend; + sproc->old_htop = p->old_htop; + sproc->old_heap = p->old_heap; + sproc->mbuf = NULL; + sproc->mbuf_sz = 0; + ERTS_INIT_OFF_HEAP(&sproc->off_heap); + env->proc = sproc; + } +#endif } /* Temporary object header, auto-deallocated when NIF returns @@ -157,18 +269,75 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env) void erts_post_nif(ErlNifEnv* env) { erts_unblock_fpe(env->fpe_was_unmasked); - if (env->heap_frag == NULL) { - ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); - ASSERT(env->hp >= HEAP_TOP(env->proc)); - ASSERT(env->hp <= HEAP_LIMIT(env->proc)); - HEAP_TOP(env->proc) = env->hp; + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) +#endif + { + ASSERT(is_scheduler() > 0); + if (env->heap_frag == NULL) { + ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); + ASSERT(env->hp >= HEAP_TOP(env->proc)); + ASSERT(env->hp <= HEAP_LIMIT(env->proc)); + HEAP_TOP(env->proc) = env->hp; + } + else { + ASSERT(env->hp_end != HEAP_LIMIT(env->proc)); + ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); + env->heap_frag->used_size = env->hp - env->heap_frag->mem; + ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); + } + env->exiting = ERTS_PROC_IS_EXITING(env->proc); } - else { - ASSERT(env->hp_end != HEAP_LIMIT(env->proc)); - ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); - env->heap_frag->used_size = env->hp - env->heap_frag->mem; - ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); +#ifdef ERTS_DIRTY_SCHEDULERS + else { /* Dirty nif call using shadow process struct */ + Process *c_p = env->proc->next; + + ASSERT(is_scheduler() < 0); + ASSERT(env->proc->common.id == c_p->common.id); + + if (!env->heap_frag) { + ASSERT(env->hp_end == HEAP_LIMIT(c_p)); + ASSERT(env->hp >= HEAP_TOP(c_p)); + ASSERT(env->hp <= HEAP_LIMIT(c_p)); + HEAP_TOP(c_p) = env->hp; + } + else { + ASSERT(env->hp_end != HEAP_LIMIT(c_p)); + ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); + + HEAP_TOP(c_p) = HEAP_TOP(env->proc); + env->heap_frag->used_size = env->hp - env->heap_frag->mem; + + ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); + + if (c_p->mbuf) { + ErlHeapFragment *bp; + for (bp = env->proc->mbuf; bp->next; bp = bp->next) + ; + bp->next = c_p->mbuf; + } + + c_p->mbuf = env->proc->mbuf; + c_p->mbuf_sz += env->proc->mbuf_sz; + + } + + if (!c_p->off_heap.first) + c_p->off_heap.first = env->proc->off_heap.first; + else if (env->proc->off_heap.first) { + struct erl_off_heap_header *ohhp; + for (ohhp = env->proc->off_heap.first; ohhp->next; ohhp = ohhp->next) + ; + ohhp->next = c_p->off_heap.first; + c_p->off_heap.first = env->proc->off_heap.first; + } + c_p->off_heap.overhead += env->proc->off_heap.overhead; + + env->exiting = ERTS_PROC_IS_EXITING(c_p); + BUMP_ALL_REDS(c_p); } +#endif free_tmp_objs(env); } @@ -400,9 +569,8 @@ error: #endif -int -enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, - ErlNifEnv* msg_env, ERL_NIF_TERM msg) +int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, + ErlNifEnv* msg_env, ERL_NIF_TERM msg) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env; ErtsProcLocks rp_locks = 0; @@ -413,34 +581,32 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, Process* c_p; ErtsMessage *mp; Eterm receiver = to_pid->pid; - int flush_me = 0; - ErtsSchedulerData *esdp = erts_get_scheduler_data(); - int scheduler = esdp ? esdp->no : 0; + int scheduler; - if (env != NULL) { - c_p = env->proc; - if (receiver == c_p->common.id) { + execution_state(env, &c_p, &scheduler); + +#ifndef ERTS_SMP + if (!scheduler) { + erts_exit(ERTS_ABORT_EXIT, + "enif_send: called from non-scheduler thread on non-SMP VM"); + return 0; + } +#endif + + if (scheduler > 0) { /* Normal scheduler */ + rp = erts_proc_lookup(receiver); + if (c_p == rp) rp_locks = ERTS_PROC_LOCK_MAIN; - flush_me = 1; - } } else { -#ifdef ERTS_SMP - c_p = NULL; -#else - erts_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM"); -#endif + if (c_p && ERTS_PROC_IS_EXITING(c_p)) + return 0; + rp = erts_pid2proc_opt(c_p, 0, receiver, rp_locks, + ERTS_P2P_FLG_INC_REFC); } - - rp = (scheduler - ? erts_proc_lookup(receiver) - : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, - receiver, rp_locks, ERTS_P2P_FLG_INC_REFC)); - - if (rp == NULL) { - ASSERT(env == NULL || receiver != c_p->common.id); + if (rp == NULL) return 0; - } + if (menv) { flush_env(msg_env); mp = erts_alloc_message(0, NULL); @@ -465,10 +631,6 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ERL_MESSAGE_TERM(mp) = msg; - if (flush_me) { - flush_env(env); /* Needed for ERTS_HOLE_CHECK */ - } - if (!env || !env->tracee) { if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) @@ -546,11 +708,9 @@ done: if (rp_locks & ~lc_locks) erts_smp_proc_unlock(rp, rp_locks & ~lc_locks); #endif - if (!scheduler) + if (scheduler <= 0) erts_proc_dec_refc(rp); - if (flush_me) { - cache_env(env); - } + return 1; } @@ -558,26 +718,52 @@ int enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg) { - - ErtsSchedulerData *esdp = erts_get_scheduler_data(); - int scheduler = esdp ? esdp->no : 0; + int iflags = (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP); + int scheduler; + Process *c_p; Port *prt; + int res; - if (scheduler == 0 || !env) - return 0; + if (!env) + erts_exit(ERTS_ABORT_EXIT, "enif_port_command: env == NULL"); + + execution_state(env, &c_p, &scheduler); + + if (!c_p) + c_p = env->proc; - prt = erts_port_lookup(to_port->port_id, - (erts_port_synchronous_ops - ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP - : ERTS_PORT_SFLGS_INVALID_LOOKUP)); + if (scheduler > 0) + prt = erts_port_lookup(to_port->port_id, iflags); +#ifdef ERTS_DIRTY_SCHEDULERS + else if (scheduler < 0) { + if (ERTS_PROC_IS_EXITING(c_p)) + return 0; + prt = erts_thr_port_lookup(to_port->port_id, iflags); + } +#endif + else { + erts_exit(ERTS_ABORT_EXIT, "enif_port_command: " + "called from non-scheduler thread"); + } if (!prt) - return 0; + res = 0; + else { + + if (IS_TRACED_FL(prt, F_TRACE_RECEIVE)) + trace_port_receive(prt, c_p->common.id, am_command, msg); - if (IS_TRACED_FL(prt, F_TRACE_RECEIVE)) - trace_port_receive(prt, env->proc->common.id, am_command, msg); + res = erts_port_output_async(prt, c_p->common.id, msg); + } + +#ifdef ERTS_DIRTY_SCHEDULERS + if (scheduler < 0) + erts_port_dec_refc(prt); +#endif - return erts_port_output_async(prt, env->proc->common.id, msg); + return res; } ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term) @@ -1039,15 +1225,21 @@ Eterm enif_make_badarg(ErlNifEnv* env) Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason) { + Process *c_p; + + execution_state(env, &c_p, NULL); + env->exception_thrown = 1; - env->proc->fvalue = reason; - BIF_ERROR(env->proc, EXC_ERROR); + c_p->fvalue = reason; + BIF_ERROR(c_p, EXC_ERROR); } int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason) { if (env->exception_thrown && reason != NULL) { - *reason = env->proc->fvalue; + Process *c_p; + execution_state(env, &c_p, NULL); + *reason = c_p->fvalue; } return env->exception_thrown; } @@ -1441,56 +1633,71 @@ int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list return 1; } +int enif_is_current_process_alive(ErlNifEnv* env) +{ + Process *c_p; + int scheduler; + + execution_state(env, &c_p, &scheduler); + + if (!c_p) + erts_exit(ERTS_ABORT_EXIT, + "enif_is_current_process_alive: " + "Invalid environment"); + + if (!scheduler) + erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: " + "called from non-scheduler thread"); + + return !ERTS_PROC_IS_EXITING(c_p); +} + int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc) { - ErtsProcLocks rp_locks = 0; /* We don't need any locks, - just to check if it is alive */ - Eterm target = proc->pid; - Process* rp; - Process* c_p; - int scheduler = erts_get_scheduler_id() != 0; + int scheduler; - if (env != NULL) { - c_p = env->proc; - if (target == c_p->common.id) { - /* We are alive! */ - return 1; - } - } + execution_state(env, NULL, &scheduler); + + if (scheduler > 0) + return !!erts_proc_lookup(proc->pid); else { #ifdef ERTS_SMP - c_p = NULL; + Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0, + ERTS_P2P_FLG_INC_REFC); + if (rp) + erts_proc_dec_refc(rp); + return !!rp; #else - erts_exit(ERTS_ABORT_EXIT,"enif_is_process_alive: " - "env==NULL on non-SMP VM"); -#endif - } - - rp = (scheduler - ? erts_proc_lookup(target) - : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, - target, rp_locks, ERTS_P2P_FLG_INC_REFC)); - if (rp == NULL) { - ASSERT(env == NULL || target != c_p->common.id); + erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: " + "called from non-scheduler thread"); return 0; - } else { - if (!scheduler) - erts_proc_dec_refc(rp); - return 1; +#endif } } int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port) { - /* only allowed if called from scheduler */ - if (erts_get_scheduler_id() == 0) - erts_exit(ERTS_ABORT_EXIT,"enif_is_port_alive: called from non-scheduler"); + int scheduler; + Uint32 iflags = (erts_port_synchronous_ops + ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + : ERTS_PORT_SFLGS_INVALID_LOOKUP); - return erts_port_lookup( - port->port_id, - (erts_port_synchronous_ops - ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP - : ERTS_PORT_SFLGS_INVALID_LOOKUP)) != NULL; + execution_state(env, NULL, &scheduler); + + if (scheduler > 0) + return !!erts_port_lookup(port->port_id, iflags); + else { +#ifdef ERTS_SMP + Port *prt = erts_thr_port_lookup(port->port_id, iflags); + if (prt) + erts_port_dec_refc(prt); + return !!prt; +#else + erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: " + "called from non-scheduler thread"); + return 0; +#endif + } } ERL_NIF_TERM @@ -1957,16 +2164,19 @@ void* enif_dlsym(void* handle, const char* symbol, int enif_consume_timeslice(ErlNifEnv* env, int percent) { + Process *proc; Sint reds; + execution_state(env, &proc, NULL); + ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100); if (percent < 1) percent = 1; else if (percent > 100) percent = 100; reds = ((CONTEXT_REDS+99) / 100) * percent; ASSERT(reds > 0 && reds <= CONTEXT_REDS); - BUMP_REDS(env->proc, reds); - return ERTS_BIF_REDS_LEFT(env->proc) == 0; + BUMP_REDS(proc, reds); + return ERTS_BIF_REDS_LEFT(proc) == 0; } /* @@ -2061,10 +2271,19 @@ static ERL_NIF_TERM init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, int need_save, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; - Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; + Process* proc; + Eterm* reg; NifExport* ep; - int i; + int i, scheduler; + + execution_state(env, &proc, &scheduler); + + ASSERT(scheduler); + + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc) + & ERTS_PROC_LOCK_MAIN); + + reg = erts_proc_sched_data(proc)->x_reg_array; ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); if (!ep) @@ -2076,12 +2295,13 @@ init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirec } if (env->exception_thrown) { ep->exception_thrown = 1; - ep->rootset[0] = env->proc->fvalue; + ep->rootset[0] = proc->fvalue; } else { ep->exception_thrown = 0; ep->rootset[0] = NIL; } - ERTS_VBUMP_ALL_REDS(proc); + if (scheduler > 0) + ERTS_VBUMP_ALL_REDS(proc); for (i = 0; i < argc; i++) { if (need_save) ep->rootset[i+1] = reg[i]; @@ -2113,7 +2333,12 @@ static void restore_nif_mfa(Process* proc, NifExport* ep, int exception) { int i; - Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; + Eterm* reg = erts_proc_sched_data(proc)->x_reg_array; + + ERTS_SMP_LC_ASSERT(!(proc->static_flags + & ERTS_STC_FLG_SHADOW_PROC)); + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc) + & ERTS_PROC_LOCK_MAIN); proc->current[0] = ep->saved_mfa[0]; proc->current[1] = ep->saved_mfa[1]; @@ -2138,11 +2363,13 @@ restore_nif_mfa(Process* proc, NifExport* ep, int exception) static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; + Process* proc; NifExport* ep; + execution_state(env, &proc, NULL); + ASSERT(argc == 1); - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc))); ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); ASSERT(!ep->exception_thrown); @@ -2157,10 +2384,12 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; + Process* proc; NifExport* ep; - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + execution_state(env, &proc, NULL); + + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc))); ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); ASSERT(ep->exception_thrown); @@ -2177,23 +2406,32 @@ dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) static ERL_NIF_TERM execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; - NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + Process* proc; + NativeFunPtr fp; NifExport* ep; ERL_NIF_TERM result; - ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + execution_state(env, &proc, NULL); + + fp = (NativeFunPtr) proc->current[6]; + + ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc))); /* * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution */ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); - ASSERT(ep); + ASSERT(ep && fp); ep->fp = NULL; erts_smp_atomic32_read_band_mb(&proc->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); + + erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + result = (*fp)(env, argc, argv); + erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL) close_lib(env->mod_nif); /* @@ -2230,29 +2468,49 @@ execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) static ERTS_INLINE ERL_NIF_TERM schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[]) { - erts_aint32_t state, n, a; - Process* proc = env->proc; - NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + ERL_NIF_TERM result; + erts_aint32_t act, dirty_flag; + Process* proc; + NativeFunPtr fp; NifExport* ep; - int need_save; + int need_save, scheduler; + + execution_state(env, &proc, &scheduler); + if (scheduler <= 0) { + ASSERT(scheduler < 0); + erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + } + + fp = (NativeFunPtr) proc->current[6]; + + ASSERT(fp); ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND); - a = erts_smp_atomic32_read_acqb(&proc->state); - while (1) { - n = state = a; + if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND) + dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC; + else + dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC; + + act = erts_smp_atomic32_read_bor_nob(&proc->state, dirty_flag); + if (!(act & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))) + erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1); + else if ((act & (ERTS_PSFLG_DIRTY_CPU_PROC + | ERTS_PSFLG_DIRTY_IO_PROC)) & ~dirty_flag) { + /* clear other flag... */ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND) - n |= ERTS_PSFLG_DIRTY_CPU_PROC; + dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC; else - n |= ERTS_PSFLG_DIRTY_IO_PROC; - a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state); - if (a == state) - break; + dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC; + erts_smp_atomic32_read_band_nob(&proc->state, ~dirty_flag); } - erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); need_save = (ep == NULL || is_non_value(ep->saved_mfa[0])); - return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv); + result = init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv); + if (scheduler <= 0) + erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + return result; } static ERL_NIF_TERM @@ -2277,11 +2535,14 @@ schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) static ERL_NIF_TERM execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; - NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + Process* proc; + NativeFunPtr fp; NifExport* ep; ERL_NIF_TERM result; + execution_state(env, &proc, NULL); + fp = (NativeFunPtr) proc->current[6]; + ASSERT(!env->exception_thrown); ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); @@ -2304,10 +2565,10 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]), int argc, const ERL_NIF_TERM argv[]) { - Process* proc = env->proc; + Process* proc; NifExport* ep; ERL_NIF_TERM fun_name_atom, result; - int need_save; + int need_save, scheduler; if (argc > MAX_ARG) return enif_make_badarg(env); @@ -2315,6 +2576,13 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, if (enif_is_exception(env, fun_name_atom)) return fun_name_atom; + execution_state(env, &proc, &scheduler); + if (scheduler <= 0) { + if (scheduler == 0) + enif_make_badarg(env); + erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + } + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); need_save = (ep == NULL || is_non_value(ep->saved_mfa[0])); @@ -2326,12 +2594,15 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, sched_fun = schedule_dirty_io_nif; else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND) sched_fun = schedule_dirty_cpu_nif; - else - return enif_make_badarg(env); + else { + result = enif_make_badarg(env); + goto done; + } result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv); #else - return enif_make_badarg(env); + result = enif_make_badarg(env); #endif + goto done; } else result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv); @@ -2339,18 +2610,28 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); ASSERT(ep); ep->exp.code[1] = (BeamInstr) fun_name_atom; + +done: + if (scheduler < 0) + erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + return result; } -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT - int enif_is_on_dirty_scheduler(ErlNifEnv* env) { - return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data); -} + int scheduler; + Process *c_p; + + execution_state(env, &c_p, &scheduler); -#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */ + if (!c_p || !scheduler) + erts_exit(ERTS_ABORT_EXIT, "enif_is_on_dirty_scheduler: " + "Invalid env"); + + return scheduler < 0; +} /* Maps */ @@ -3051,16 +3332,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) (BeamInstr) BeamOp(op_i_generic_breakpoint)); g->orig_instr = (BeamInstr) BeamOp(op_call_nif); } +#ifdef ERTS_DIRTY_SCHEDULERS if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7)) && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) { -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT code_ptr[5+3] = (BeamInstr) f->fptr; code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ? (BeamInstr) schedule_dirty_io_nif : (BeamInstr) schedule_dirty_cpu_nif; -#endif } else +#endif code_ptr[5+1] = (BeamInstr) f->fptr; code_ptr[5+2] = (BeamInstr) lib; f = next_func(entry, &incr, f); |