From 60557173f8a7bd0d4deafdb2b3e066899c586f56 Mon Sep 17 00:00:00 2001 From: Steve Vinoski Date: Wed, 23 Dec 2015 21:09:19 -0500 Subject: Add dirty_process_main function Dirty schedulers only execute NIFs, so having them execute the full process_main function isn't necessary. Add dirty_process_main for dirty schedulers to execute instead. Add erts_pre_dirty_nif(), called when preparing to execute a dirty nif. Add more dirty NIF tests to verify that activities requiring the process main lock can succeed when the process is executing a dirty NIF. --- erts/emulator/beam/beam_emu.c | 307 ++++++++++++++++++++- erts/emulator/beam/erl_nif.c | 81 +++--- erts/emulator/beam/erl_process.c | 4 +- erts/emulator/beam/global.h | 3 + erts/emulator/test/dirty_nif_SUITE.erl | 129 ++++++++- .../test/dirty_nif_SUITE_data/dirty_nif_SUITE.c | 20 ++ 6 files changed, 492 insertions(+), 52 deletions(-) diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index f8f2e29c95..25cc41323b 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -3559,12 +3559,8 @@ do { \ typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]); NifF* fp = vbf = (NifF*) I[1]; struct enif_environment_t env; -#ifdef ERTS_DIRTY_SCHEDULERS - if (!c_p->scheduler_data) - live_hf_end = ERTS_INVALID_HFRAG_PTR; /* On dirty scheduler */ - else -#endif - live_hf_end = c_p->mbuf; + ASSERT(c_p->scheduler_data); + live_hf_end = c_p->mbuf; erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL); nif_bif_result = (*fp)(&env, bif_nif_arity, reg); if (env.exception_thrown) @@ -3574,10 +3570,7 @@ do { \ PROCESS_MAIN_CHK_LOCKS(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); - if (env.exiting) { - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - goto do_schedule; - } + ASSERT(!env.exiting); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); } @@ -5162,6 +5155,300 @@ do { \ } } +/* + * dirty_process_main() is what dirty schedulers execute. Since they handle + * only NIF calls they do not need to be able to execute all BEAM + * instructions. + */ +void dirty_process_main(void) +{ + Process* c_p = NULL; + int reds_used; +#ifdef DEBUG + ERTS_DECLARE_DUMMY(Eterm pid); +#endif + BeamInstr *do_call_nif = OpCode(call_nif); + + /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC, + * in all other cases x0 is used. + */ + register Eterm* reg REG_xregs = NULL; + + /* + * Top of heap (next free location); grows upwards. + */ + register Eterm* HTOP REG_htop = NULL; + + /* Stack pointer. Grows downwards; points + * to last item pushed (normally a saved + * continuation pointer). + */ + register Eterm* E REG_stop = NULL; + + /* + * Pointer to next threaded instruction. + */ + register BeamInstr *I REG_I = NULL; + + /* Number of reductions left. This function + * returns to the scheduler when FCALLS reaches zero. + */ + register Sint FCALLS REG_fcalls = 0; + + /* + * X registers and floating point registers are located in + * scheduler specific data. + */ + register FloatDef *freg = NULL; + + /* + * For keeping the negative old value of 'reds' when call saving is active. + */ + int neg_o_reds = 0; + + ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */ + + ERL_BITS_DECLARE_STATEP; /* Has to be last declaration */ + + c_p = NULL; + reds_used = 0; + + goto do_dirty_schedule1; + + context_switch: + c_p->arity = I[-1]; + c_p->current = I-3; /* Pointer to Mod, Func, Arity */ + + { + Eterm* argp; + int i; + + /* + * Make sure that there is enough room for the argument registers to be saved. + */ + if (c_p->arity > c_p->max_arg_reg) { + /* + * Yes, this is an expensive operation, but you only pay it the first + * time you call a function with more than 6 arguments which is + * scheduled out. This is better than paying for 26 words of wasted + * space for most processes which never call functions with more than + * 6 arguments. + */ + Uint size = c_p->arity * sizeof(c_p->arg_reg[0]); + if (c_p->arg_reg != c_p->def_arg_reg) { + c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG, + (void *) c_p->arg_reg, + size); + } else { + c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size); + } + c_p->max_arg_reg = c_p->arity; + } + + /* + * Since REDS_IN(c_p) is stored in the save area (c_p->arg_reg) we must read it + * now before saving registers. + */ + + ASSERT(c_p->debug_reds_in == REDS_IN(c_p)); + if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) + reds_used = REDS_IN(c_p) - FCALLS; + else + reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS); + ASSERT(reds_used >= 0); + + /* + * Save the argument registers and everything else. + */ + + argp = c_p->arg_reg; + for (i = c_p->arity - 1; i >= 0; i--) { + argp[i] = reg[i]; + } + SWAPOUT; + c_p->i = I; + goto do_dirty_schedule1; + } + + do_dirty_schedule: + ASSERT(c_p->debug_reds_in == REDS_IN(c_p)); + if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) + reds_used = REDS_IN(c_p) - FCALLS; + else + reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS); + ASSERT(reds_used >= 0); + do_dirty_schedule1: + + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + c_p = schedule(c_p, reds_used); + ASSERT(!(c_p->flags & F_HIPE_MODE)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); +#ifdef DEBUG + pid = c_p->common.id; /* Save for debugging purposes */ +#endif + ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + + ERTS_MSACC_UPDATE_CACHE_X(); + + reg = erts_proc_sched_data(c_p)->x_reg_array; + freg = erts_proc_sched_data(c_p)->f_reg_array; + ERL_BITS_RELOAD_STATEP(c_p); + { + int reds; + Eterm* argp; + int i; + + argp = c_p->arg_reg; + for (i = c_p->arity - 1; i >= 0; i--) { + reg[i] = argp[i]; + CHECK_TERM(reg[i]); + } + + /* + * We put the original reduction count in the process structure, to reduce + * the code size (referencing a field in a struct through a pointer stored + * in a register gives smaller code than referencing a global variable). + */ + + I = c_p->i; + + REDS_IN(c_p) = reds = c_p->fcalls; +#ifdef DEBUG + c_p->debug_reds_in = reds; +#endif + + if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { + neg_o_reds = -CONTEXT_REDS; + FCALLS = neg_o_reds + reds; + } else { + neg_o_reds = 0; + FCALLS = reds; + } + + ERTS_DBG_CHK_REDS(c_p, FCALLS); + + SWAPIN; + +#ifdef USE_VM_PROBES + if (DTRACE_ENABLED(process_scheduled)) { + DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE); + DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE); + dtrace_proc_str(c_p, process_buf); + + if (ERTS_PROC_IS_EXITING(c_p)) { + strcpy(fun_buf, ""); + } else { + BeamInstr *fptr = find_function_from_pc(c_p->i); + if (fptr) { + dtrace_fun_decode(c_p, (Eterm)fptr[0], + (Eterm)fptr[1], (Uint)fptr[2], + NULL, fun_buf); + } else { + erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)), + "", next); + } + } + + DTRACE2(process_scheduled, process_buf, fun_buf); + } +#endif + } + + { + Eterm nif_bif_result; + Eterm bif_nif_arity; + + OpCase(call_nif): + { + /* + * call_nif is always first instruction in function: + * + * I[-3]: Module + * I[-2]: Function + * I[-1]: Arity + * I[0]: &&call_nif + * I[1]: Function pointer to NIF function + * I[2]: Pointer to erl_module_nif + * I[3]: Function pointer to dirty NIF + */ + BifFunction vbf; + + if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the nif */ + goto context_switch; + } + + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF); + + DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]); + c_p->current = I-3; /* current and vbf set to please handle_error */ + SWAPOUT; + c_p->fcalls = FCALLS - 1; + PROCESS_MAIN_CHK_LOCKS(c_p); + bif_nif_arity = I[-1]; + ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + { + typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]); + NifF* fp = vbf = (NifF*) I[1]; + struct enif_environment_t env; + ASSERT(!c_p->scheduler_data); + erts_pre_dirty_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL); + nif_bif_result = (*fp)(&env, bif_nif_arity, reg); + if (env.exception_thrown) + nif_bif_result = THE_NON_VALUE; + erts_post_nif(&env); + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + if (env.exiting) { + ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + goto do_dirty_schedule; + } + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + } + DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]); + ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_HOLE_CHECK(c_p); + if (ERTS_IS_GC_DESIRED(c_p)) { + nif_bif_result = erts_gc_after_bif_call(c_p, + nif_bif_result, + reg, bif_nif_arity); + } + SWAPIN; /* There might have been a garbage collection. */ + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (is_value(nif_bif_result)) { + r(0) = nif_bif_result; + CHECK_TERM(r(0)); + I = c_p->cp; + c_p->cp = 0; + Goto(*I); + } else if (c_p->freason == TRAP) { + I = c_p->i; + if (c_p->flags & F_HIBERNATE_SCHED) { + c_p->flags &= ~F_HIBERNATE_SCHED; + goto do_dirty_schedule; + } + DispatchMacro(); + } + I = handle_error(c_p, c_p->cp, reg, vbf); + } + } + if (I == 0) { + goto do_dirty_schedule; + } else { + ASSERT(!is_value(r(0))); + SWAPIN; + Goto(do_call_nif); + } +} + static BifFunction translate_gc_bif(void* gcf) { diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index 2bbb8e3c91..4437adb57b 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -178,8 +178,10 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, size_t may_need) void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, Process* tracee) { +#ifdef DEBUG #ifdef ERTS_DIRTY_SCHEDULERS ErtsSchedulerData *esdp; +#endif #endif env->mod_nif = mod_nif; env->proc = p; @@ -193,12 +195,12 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, ASSERT(p->common.id != ERTS_INVALID_PID); +#ifdef DEBUG #ifdef ERTS_DIRTY_SCHEDULERS esdp = erts_get_scheduler_data(); ASSERT(esdp); if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { -#ifdef DEBUG erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); ASSERT(p->scheduler_data == esdp); @@ -206,44 +208,57 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, | ERTS_PSFLG_RUNNING_SYS)) && !(state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS))); + } #endif +#endif +} - } - else { - Process *sproc; +void erts_pre_dirty_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, + Process* tracee) +{ +#ifdef ERTS_DIRTY_SCHEDULERS #ifdef DEBUG - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state; +#endif + Process *sproc; + ErtsSchedulerData *esdp; + esdp = erts_get_scheduler_data(); + ASSERT(esdp); + + erts_pre_nif(env, p, mod_nif, tracee); - ASSERT(!p->scheduler_data); - ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING) - && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))); +#ifdef DEBUG + state = erts_smp_atomic32_read_nob(&p->state); + + ASSERT(!p->scheduler_data); + ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING) + && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))); #endif - sproc = esdp->dirty_shadow_process; - ASSERT(sproc); - ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC); - ASSERT(erts_smp_atomic32_read_nob(&sproc->state) - == (ERTS_PSFLG_ACTIVE - | ERTS_PSFLG_DIRTY_RUNNING - | ERTS_PSFLG_PROXY)); - - sproc->next = p; - sproc->common.id = p->common.id; - sproc->htop = p->htop; - sproc->stop = p->stop; - sproc->hend = p->hend; - sproc->heap = p->heap; - sproc->abandoned_heap = p->abandoned_heap; - sproc->heap_sz = p->heap_sz; - sproc->high_water = p->high_water; - sproc->old_hend = p->old_hend; - sproc->old_htop = p->old_htop; - sproc->old_heap = p->old_heap; - sproc->mbuf = NULL; - sproc->mbuf_sz = 0; - ERTS_INIT_OFF_HEAP(&sproc->off_heap); - env->proc = sproc; - } + sproc = esdp->dirty_shadow_process; + ASSERT(sproc); + ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC); + ASSERT(erts_smp_atomic32_read_nob(&sproc->state) + == (ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_PROXY)); + + sproc->next = p; + sproc->common.id = p->common.id; + sproc->htop = p->htop; + sproc->stop = p->stop; + sproc->hend = p->hend; + sproc->heap = p->heap; + sproc->abandoned_heap = p->abandoned_heap; + sproc->heap_sz = p->heap_sz; + sproc->high_water = p->high_water; + sproc->old_hend = p->old_hend; + sproc->old_htop = p->old_htop; + sproc->old_heap = p->old_heap; + sproc->mbuf = NULL; + sproc->mbuf_sz = 0; + ERTS_INIT_OFF_HEAP(&sproc->off_heap); + env->proc = sproc; #endif } diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index f8cbe60e76..e245c9e6bb 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -8197,7 +8197,7 @@ sched_dirty_cpu_thread_func(void *vesdp) #endif erts_thread_init_float(); - process_main(); + dirty_process_main(); /* No schedulers should *ever* terminate */ erts_exit(ERTS_ABORT_EXIT, "Dirty CPU scheduler thread number %beu terminated\n", @@ -8242,7 +8242,7 @@ sched_dirty_io_thread_func(void *vesdp) #endif erts_thread_init_float(); - process_main(); + dirty_process_main(); /* No schedulers should *ever* terminate */ erts_exit(ERTS_ABORT_EXIT, "Dirty I/O scheduler thread number %beu terminated\n", diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index b76b9cd874..15253bb53e 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -62,6 +62,8 @@ struct enif_environment_t /* ErlNifEnv */ extern void erts_pre_nif(struct enif_environment_t*, Process*, struct erl_module_nif*, Process* tracee); extern void erts_post_nif(struct enif_environment_t* env); +extern void erts_pre_dirty_nif(struct enif_environment_t*, Process*, + struct erl_module_nif*, Process* tracee); extern Eterm erts_nif_taints(Process* p); extern void erts_print_nif_taints(int to, void* to_arg); void erts_unload_nif(struct erl_module_nif* nif); @@ -1152,6 +1154,7 @@ void print_pass_through(int, byte*, int); int catchlevel(Process*); void init_emulator(void); void process_main(void); +void dirty_process_main(void); Eterm build_stacktrace(Process* c_p, Eterm exc); Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value); void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth); diff --git a/erts/emulator/test/dirty_nif_SUITE.erl b/erts/emulator/test/dirty_nif_SUITE.erl index c3afbc0803..83b098a704 100644 --- a/erts/emulator/test/dirty_nif_SUITE.erl +++ b/erts/emulator/test/dirty_nif_SUITE.erl @@ -32,19 +32,23 @@ dirty_nif/1, dirty_nif_send/1, dirty_nif_exception/1, call_dirty_nif_exception/1, dirty_scheduler_exit/1, dirty_call_while_terminated/1, - dirty_heap_access/1]). + dirty_heap_access/1, dirty_process_info/1, + dirty_process_register/1, dirty_process_trace/1]). -define(nif_stub,nif_stub_error(?LINE)). suite() -> [{ct_hooks,[ts_install_cth]}]. -all() -> +all() -> [dirty_nif, dirty_nif_send, dirty_nif_exception, dirty_scheduler_exit, dirty_call_while_terminated, - dirty_heap_access]. + dirty_heap_access, + dirty_process_info, + dirty_process_register, + dirty_process_trace]. init_per_suite(Config) -> try erlang:system_info(dirty_cpu_schedulers) of @@ -187,7 +191,7 @@ dirty_call_while_terminated(Config) when is_list(Config) -> blipp:blupp(Bin) end, [monitor,link]), - receive {dirty_alive, Pid} -> ok end, + receive {dirty_alive, _Pid} -> ok end, {value, {BinAddr, 4711, 2}} = lists:keysearch(4711, 2, element(2, process_info(self(), @@ -241,7 +245,7 @@ dirty_heap_access(Config) when is_list(Config) -> end), {N, R} = access_dirty_heap(Dirty, RGL, 0, 0), receive - {Pid, Res} -> + {_Pid, Res} -> 1000 = length(Res), lists:foreach(fun (X) -> Ref = X end, Res) end, @@ -269,12 +273,123 @@ access_dirty_heap(Dirty, RGL, N, R) -> end) end. +%% These tests verify that processes that access a process executing a +%% dirty NIF where the main lock is needed for that access do not get +%% blocked. Each test passes its pid to dirty_sleeper, which sends a +%% 'ready' message when it's running on a dirty scheduler and just before +%% it starts a 6 second sleep. When it receives the message, it verifies +%% that access to the dirty process is as it expects. After the dirty +%% process finishes its 6 second sleep but before it returns from the dirty +%% scheduler, it sends a 'done' message. If the tester already received +%% that message, the test fails because it means attempting to access the +%% dirty process waited for that process to return to a regular scheduler, +%% so verify that we haven't received that message, and also verify that +%% the dirty process is still alive immediately after accessing it. +dirty_process_info(Config) when is_list(Config) -> + access_dirty_process( + Config, + fun() -> ok end, + fun(NifPid) -> + PI = process_info(NifPid), + {current_function,{?MODULE,dirty_sleeper,1}} = + lists:keyfind(current_function, 1, PI), + ok + end, + fun(_) -> ok end). + +dirty_process_register(Config) when is_list(Config) -> + access_dirty_process( + Config, + fun() -> ok end, + fun(NifPid) -> + register(test_dirty_process_register, NifPid), + NifPid = whereis(test_dirty_process_register), + unregister(test_dirty_process_register), + false = lists:member(test_dirty_process_register, + registered()), + ok + end, + fun(_) -> ok end). + +dirty_process_trace(Config) when is_list(Config) -> + access_dirty_process( + Config, + fun() -> + erlang:trace_pattern({?MODULE,dirty_sleeper,1}, + [{'_',[],[{return_trace}]}], + [local,meta]), + ok + end, + fun(NifPid) -> + erlang:trace(NifPid, true, [call,timestamp]), + ok + end, + fun(NifPid) -> + receive + done -> + receive + {trace_ts,NifPid,call,{?MODULE,dirty_sleeper,_},_} -> + ok + after + 0 -> + error(missing_trace_call_message) + end, + receive + {trace_ts,NifPid,return_from,{?MODULE,dirty_sleeper,1}, + ok,_} -> + ok + after + 100 -> + error(missing_trace_return_message) + end + after + 6500 -> + error(missing_done_message) + end, + ok + end). + %% %% Internal... %% +access_dirty_process(Config, Start, Test, Finish) -> + {ok, Node} = start_node(Config, ""), + [ok] = mcall(Node, + [fun() -> + Path = ?config(data_dir, Config), + Lib = atom_to_list(?MODULE), + ok = erlang:load_nif(filename:join(Path,Lib), []), + ok = test_dirty_process_access(Start, Test, Finish) + end]), + stop_node(Node), + ok. + +test_dirty_process_access(Start, Test, Finish) -> + ok = Start(), + Self = self(), + NifPid = spawn_link(fun() -> + ok = dirty_sleeper(Self) + end), + ok = receive + ready -> + ok = Test(NifPid), + receive + done -> + error(dirty_process_info_blocked) + after + 0 -> + true = erlang:is_process_alive(NifPid), + ok + end + after + 3000 -> + error(timeout) + end, + ok = Finish(NifPid). + receive_any() -> - receive M -> M end. + receive M -> M end. start_node(Config) -> start_node(Config, ""). @@ -314,13 +429,13 @@ mcall(Node, Funs) -> %% The NIFs: lib_loaded() -> false. -call_nif_schedule(_,_) -> ?nif_stub. call_dirty_nif(_,_,_) -> ?nif_stub. send_from_dirty_nif(_) -> ?nif_stub. call_dirty_nif_exception(_) -> ?nif_stub. call_dirty_nif_zero_args() -> ?nif_stub. dirty_call_while_terminated_nif(_) -> ?nif_stub. dirty_sleeper() -> ?nif_stub. +dirty_sleeper(_) -> ?nif_stub. dirty_heap_access_nif(_) -> ?nif_stub. nif_stub_error(Line) -> diff --git a/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c b/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c index 2013c88167..1d2a099186 100644 --- a/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c +++ b/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c @@ -146,12 +146,31 @@ static ERL_NIF_TERM call_dirty_nif_zero_args(ErlNifEnv* env, int argc, const ERL static ERL_NIF_TERM dirty_sleeper(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { + ErlNifPid pid; + ErlNifEnv* msg_env = NULL; + assert(enif_is_on_dirty_scheduler(env)); + + /* If we get a pid argument, it indicates a process involved in the + test wants a message from us. Prior to the sleep we send a 'ready' + message, and then after the sleep, send a 'done' message. */ + if (argc == 1 && enif_get_local_pid(env, argv[0], &pid)) { + msg_env = enif_alloc_env(); + enif_send(env, &pid, msg_env, enif_make_atom(msg_env, "ready")); + } + #ifdef __WIN32__ Sleep(6000); #else sleep(6); #endif + + if (argc == 1) { + assert(msg_env != NULL); + enif_send(env, &pid, msg_env, enif_make_atom(msg_env, "done")); + enif_free_env(msg_env); + } + return enif_make_atom(env, "ok"); } @@ -216,6 +235,7 @@ static ErlNifFunc nif_funcs[] = {"call_dirty_nif_exception", 1, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND}, {"call_dirty_nif_zero_args", 0, call_dirty_nif_zero_args, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"dirty_sleeper", 0, dirty_sleeper, ERL_NIF_DIRTY_JOB_IO_BOUND}, + {"dirty_sleeper", 1, dirty_sleeper, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"dirty_call_while_terminated_nif", 1, dirty_call_while_terminated_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"dirty_heap_access_nif", 1, dirty_heap_access_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND} }; -- cgit v1.2.3