aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2016-08-29 18:38:48 +0200
committerRickard Green <[email protected]>2016-08-29 18:38:48 +0200
commit5006f1e0c45d4e9b888b2c0ca48130049d33074c (patch)
tree0dba1e3ed85e04d22755461bcce40704e0d68b49 /erts/emulator
parent928d74ffa09bd56652d9390b02fa51ef51d71d51 (diff)
parent0e04e76df2ea71e2e2e116afef04c497d84b1024 (diff)
downloadotp-5006f1e0c45d4e9b888b2c0ca48130049d33074c.tar.gz
otp-5006f1e0c45d4e9b888b2c0ca48130049d33074c.tar.bz2
otp-5006f1e0c45d4e9b888b2c0ca48130049d33074c.zip
Merge branch 'rickard/ds-purge-module/OTP-13808' into maint
* rickard/ds-purge-module/OTP-13808: Perform check_process_code while process is executing dirty Conflicts: erts/doc/src/erl_nif.xml
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in3
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c40
-rw-r--r--erts/emulator/beam/bif.tab3
-rw-r--r--erts/emulator/beam/erl_init.c10
-rw-r--r--erts/emulator/beam/erl_process.c198
-rw-r--r--erts/emulator/beam/global.h3
-rw-r--r--erts/emulator/test/dirty_nif_SUITE.erl75
8 files changed, 308 insertions, 25 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 5b4e9c779b..e0260205e3 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -605,7 +605,8 @@ PRELOAD_BEAM = $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
$(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam \
$(ERL_TOP)/erts/preloaded/ebin/erl_tracer.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erts_literal_area_collector.beam
+ $(ERL_TOP)/erts/preloaded/ebin/erts_literal_area_collector.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_dirty_process_code_checker.beam
ifeq ($(TARGET),win32)
# On windows the preloaded objects are in a resource object.
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index ace3dc5230..89c42f4700 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -198,6 +198,7 @@ atom dgroup_leader
atom dictionary
atom dirty_cpu
atom dirty_cpu_schedulers_online
+atom dirty_execution
atom dirty_io
atom disable_trace
atom disabled
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 92340579ad..ad107b4861 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -58,6 +58,9 @@ static struct {
Process *erts_code_purger = NULL;
ErtsLiteralArea *erts_copy_literal_area = NULL;
+#ifdef ERTS_DIRTY_SCHEDULERS
+Process *erts_dirty_process_code_checker;
+#endif
#ifdef ERTS_NEW_PURGE_STRATEGY
Process *erts_literal_area_collector = NULL;
@@ -585,6 +588,43 @@ badarg:
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
+{
+#if !defined(ERTS_DIRTY_SCHEDULERS)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+#else
+ Process *rp;
+ int reds = 0;
+ Eterm res;
+
+ if (BIF_P != erts_dirty_process_code_checker)
+ BIF_ERROR(BIF_P, EXC_NOTSUP);
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+
+ if (is_not_atom(BIF_ARG_2))
+ BIF_ERROR(BIF_P, BADARG);
+
+ rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ if (!rp)
+ BIF_RET(am_false);
+
+ res = erts_check_process_code(rp, BIF_ARG_2, 0, &reds, BIF_P->fcalls);
+
+ if (BIF_P != rp)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+
+ ASSERT(is_value(res));
+
+ BIF_RET2(res, reds);
+#endif
+}
+
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
{
ErtsCodeIndex code_ix;
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 90ac5e9ed8..80db4eb6ff 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -161,6 +161,7 @@ bif erts_internal:port_close/1
bif erts_internal:port_connect/2
bif erts_internal:request_system_task/3
+bif erts_internal:request_system_task/4
bif erts_internal:check_process_code/2
bif erts_internal:map_to_tuple_keys/1
@@ -644,6 +645,8 @@ bif erts_debug:map_info/1
# New in 19.0
#
+bif erts_internal:is_process_executing_dirty/1
+bif erts_internal:check_dirty_process_code/2
bif erts_internal:purge_module/2
bif binary:split/2
bif binary:split/3
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 8afda42a71..781bf024dd 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -2274,6 +2274,16 @@ erl_start(int argc, char **argv)
erts_proc_inc_refc(erts_literal_area_collector);
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+ pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker");
+ erts_dirty_process_code_checker
+ = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ ASSERT(erts_dirty_process_code_checker
+ && erts_dirty_process_code_checker->common.id == pid);
+ erts_proc_inc_refc(erts_dirty_process_code_checker);
+#endif
+
}
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index f833aadd91..c1699cb501 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -6674,14 +6674,24 @@ erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
}
static int
-schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
+schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
+ erts_aint32_t *fail_state_p)
{
int res;
int locked;
ErtsProcSysTaskQs *stqs, *free_stqs;
- erts_aint32_t state, a, n, enq_prio;
+ erts_aint32_t fail_state, state, a, n, enq_prio;
int enqueue; /* < 0 -> use proxy */
unsigned int prof_runnable_procs;
+ int strict_fail_state;
+
+ fail_state = *fail_state_p;
+ /*
+ * If fail state something other than just exiting process,
+ * ensure that the task wont be scheduled when the
+ * receiver is in the failure state.
+ */
+ strict_fail_state = fail_state != ERTS_PSFLG_EXITING;
res = 1; /* prepare for success */
st->next = st->prev = st; /* Prep for empty prio queue */
@@ -6708,7 +6718,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
state = erts_smp_atomic32_read_nob(&p->state);
- if (state & ERTS_PSFLG_EXITING) {
+ if (state & fail_state) {
+ *fail_state_p = (state & fail_state);
free_stqs = stqs;
res = 0;
goto cleanup;
@@ -6762,7 +6773,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
/* Status lock prevents out of order "runnable proc" trace msgs */
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- if (!prof_runnable_procs) {
+ if (!prof_runnable_procs && !strict_fail_state) {
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
locked = 0;
}
@@ -6773,6 +6784,11 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
erts_aint32_t e;
n = e = a;
+ if (strict_fail_state && (a & fail_state)) {
+ *fail_state_p = (a & fail_state);
+ goto cleanup;
+ }
+
if (a & ERTS_PSFLG_FREE)
goto cleanup; /* We don't want to schedule free processes... */
@@ -9095,6 +9111,27 @@ resume_process_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE
+erts_internal_is_process_executing_dirty_1(BIF_ALIST_1)
+{
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else {
+ Process *rp = erts_proc_lookup(BIF_ARG_1);
+ if (rp) {
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ |ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ BIF_RET(am_true);
+ }
+ }
+ }
+#endif
+ BIF_RET(am_false);
+}
+
+
Uint
erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched)
{
@@ -9987,6 +10024,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (state & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_EXITING)) {
+ /*
+ * IMPORTANT! We need to take care of
+ * scheduled check-process-code requests
+ * before continuing with dirty execution!
+ */
/* Migrate to normal scheduler... */
goto sunlock_sched_out_proc;
}
@@ -10496,22 +10538,83 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
return reds;
}
-BIF_RETTYPE
-erts_internal_request_system_task_3(BIF_ALIST_3)
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static BIF_RETTYPE
+dispatch_system_task(Process *c_p, erts_aint_t fail_state,
+ ErtsProcSysTask *st, Eterm target,
+ Eterm prio, Eterm operation)
{
- Process *rp = erts_proc_lookup(BIF_ARG_1);
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ Eterm *hp, msg;
+ Uint hsz, osz;
+ BIF_RETTYPE ret;
+
+ switch (st->type) {
+ case ERTS_PSTT_CPC:
+ rp = erts_dirty_process_code_checker;
+ ASSERT(fail_state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
+ if (c_p == rp) {
+ ERTS_BIF_PREP_RET(ret, am_dirty_execution);
+ return ret;
+ }
+ break;
+ default:
+ rp = NULL;
+ ERTS_INTERNAL_ERROR("Non-dispatchable system task");
+ break;
+ }
+
+ ERTS_BIF_PREP_RET(ret, am_ok);
+
+ /*
+ * Send message on the form: {Requester, Target, Operation}
+ */
+
+ ASSERT(is_immed(st->requester));
+ ASSERT(is_immed(target));
+ ASSERT(is_immed(prio));
+
+ osz = size_object(operation);
+ hsz = 5 /* 4-tuple */ + osz;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, hsz, &hp, &ohp);
+
+ msg = copy_struct(operation, osz, &hp, ohp);
+ msg = TUPLE4(hp, st->requester, target, prio, msg);
+
+ erts_queue_message(rp, rp_locks, mp, msg, st->requester);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+
+ return ret;
+}
+
+#endif
+
+static BIF_RETTYPE
+request_system_task(Process *c_p, Eterm requester, Eterm target,
+ Eterm priority, Eterm operation)
+{
+ BIF_RETTYPE ret;
+ Process *rp = erts_proc_lookup(target);
ErtsProcSysTask *st = NULL;
- erts_aint32_t prio;
+ erts_aint32_t prio, fail_state = ERTS_PSFLG_EXITING;
Eterm noproc_res, req_type;
- if (!rp && !is_internal_pid(BIF_ARG_1)) {
- if (!is_external_pid(BIF_ARG_1))
+ if (!rp && !is_internal_pid(target)) {
+ if (!is_external_pid(target))
goto badarg;
- if (external_pid_dist_entry(BIF_ARG_1) != erts_this_dist_entry)
+ if (external_pid_dist_entry(target) != erts_this_dist_entry)
goto badarg;
}
- switch (BIF_ARG_2) {
+ switch (priority) {
case am_max: prio = PRIORITY_MAX; break;
case am_high: prio = PRIORITY_HIGH; break;
case am_normal: prio = PRIORITY_NORMAL; break;
@@ -10519,11 +10622,11 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
default: goto badarg;
}
- if (is_not_tuple(BIF_ARG_3))
+ if (is_not_tuple(operation))
goto badarg;
else {
int i;
- Eterm *tp = tuple_val(BIF_ARG_3);
+ Eterm *tp = tuple_val(operation);
Uint arity = arityval(*tp);
Eterm req_id;
Uint req_id_sz;
@@ -10561,7 +10664,7 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
ERTS_INIT_OFF_HEAP(&st->off_heap);
hp = &st->heap[0];
- st->requester = BIF_P->common.id;
+ st->requester = requester;
st->reply_tag = req_type;
st->req_id_sz = req_id_sz;
st->req_id = req_id_sz == 0 ? req_id : copy_struct(req_id,
@@ -10595,6 +10698,15 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
st->type = ERTS_PSTT_CPC;
if (!rp)
goto noproc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /*
+ * If the process should start executing dirty
+ * code it is important that this task is
+ * aborted. Therefore this strict fail state...
+ */
+ fail_state |= (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS);
+#endif
break;
#ifdef ERTS_NEW_PURGE_STRATEGY
@@ -10612,20 +10724,59 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
goto badarg;
}
- if (!schedule_process_sys_task(rp, prio, st)) {
- noproc:
- notify_sys_task_executed(BIF_P, st, noproc_res);
+ if (!schedule_process_sys_task(rp, prio, st, &fail_state)) {
+ Eterm failure;
+ if (fail_state & ERTS_PSFLG_EXITING) {
+ noproc:
+ failure = noproc_res;
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (fail_state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ ret = dispatch_system_task(c_p, fail_state, st,
+ target, priority, operation);
+ goto cleanup_return;
+ }
+#endif
+ else {
+ ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()");
+ failure = am_internal_error;
+ }
+ notify_sys_task_executed(c_p, st, failure);
}
- BIF_RET(am_ok);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+
+ return ret;
badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+cleanup_return:
+#endif
+
if (st) {
erts_cleanup_offheap(&st->off_heap);
erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
- BIF_ERROR(BIF_P, BADARG);
+
+ return ret;
+}
+
+BIF_RETTYPE
+erts_internal_request_system_task_3(BIF_ALIST_3)
+{
+ return request_system_task(BIF_P, BIF_P->common.id,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+BIF_RETTYPE
+erts_internal_request_system_task_4(BIF_ALIST_4)
+{
+ return request_system_task(BIF_P, BIF_ARG_1,
+ BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
}
static void
@@ -10634,7 +10785,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
Process *rp = erts_proc_lookup(pid);
if (rp) {
ErtsProcSysTask *st;
- erts_aint32_t state;
+ erts_aint32_t state, fail_state;
int i;
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
@@ -10649,7 +10800,10 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
ERTS_INIT_OFF_HEAP(&st->off_heap);
state = erts_smp_atomic32_read_nob(&rp->state);
- if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state), st))
+ fail_state = ERTS_PSFLG_EXITING;
+
+ if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state),
+ st, &fail_state))
erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
}
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index d844f9310b..7a00cd8f6e 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1017,6 +1017,9 @@ extern ErtsLiteralArea *erts_copy_literal_area;
#ifdef ERTS_NEW_PURGE_STRATEGY
extern Process *erts_literal_area_collector;
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+extern Process *erts_dirty_process_code_checker;
+#endif
extern Process *erts_code_purger;
diff --git a/erts/emulator/test/dirty_nif_SUITE.erl b/erts/emulator/test/dirty_nif_SUITE.erl
index 83b098a704..c2d2e58957 100644
--- a/erts/emulator/test/dirty_nif_SUITE.erl
+++ b/erts/emulator/test/dirty_nif_SUITE.erl
@@ -33,7 +33,8 @@
dirty_nif_exception/1, call_dirty_nif_exception/1,
dirty_scheduler_exit/1, dirty_call_while_terminated/1,
dirty_heap_access/1, dirty_process_info/1,
- dirty_process_register/1, dirty_process_trace/1]).
+ dirty_process_register/1, dirty_process_trace/1,
+ code_purge/1]).
-define(nif_stub,nif_stub_error(?LINE)).
@@ -48,7 +49,8 @@ all() ->
dirty_heap_access,
dirty_process_info,
dirty_process_register,
- dirty_process_trace].
+ dirty_process_trace,
+ code_purge].
init_per_suite(Config) ->
try erlang:system_info(dirty_cpu_schedulers) of
@@ -349,6 +351,75 @@ dirty_process_trace(Config) when is_list(Config) ->
ok
end).
+dirty_code_test_code() ->
+ "
+-module(dirty_code_test).
+
+-export([func/1]).
+
+func(Fun) ->
+ Fun(),
+ blipp:blapp().
+
+".
+
+code_purge(Config) when is_list(Config) ->
+ Path = ?config(data_dir, Config),
+ File = filename:join(Path, "dirty_code_test.erl"),
+ ok = file:write_file(File, dirty_code_test_code()),
+ {ok, dirty_code_test, Bin} = compile:file(File, [binary]),
+ {module, dirty_code_test} = erlang:load_module(dirty_code_test, Bin),
+ Start = erlang:monotonic_time(),
+ {Pid1, Mon1} = spawn_monitor(fun () ->
+ dirty_code_test:func(fun () ->
+ %% Sleep for 6 seconds
+ %% in dirty nif...
+ dirty_sleeper()
+ end)
+ end),
+ {module, dirty_code_test} = erlang:load_module(dirty_code_test, Bin),
+ {Pid2, Mon2} = spawn_monitor(fun () ->
+ dirty_code_test:func(fun () ->
+ %% Sleep for 6 seconds
+ %% in dirty nif...
+ dirty_sleeper()
+ end)
+ end),
+ receive
+ {'DOWN', Mon1, process, Pid1, _} ->
+ ct:fail(premature_death)
+ after 100 ->
+ ok
+ end,
+ true = erlang:purge_module(dirty_code_test),
+ receive
+ {'DOWN', Mon1, process, Pid1, Reason} ->
+ killed = Reason
+ end,
+ receive
+ {'DOWN', Mon2, process, Pid2, _} ->
+ ct:fail(premature_death)
+ after 100 ->
+ ok
+ end,
+ true = erlang:delete_module(dirty_code_test),
+ receive
+ {'DOWN', Mon2, process, Pid2, _} ->
+ ct:fail(premature_death)
+ after 100 ->
+ ok
+ end,
+ true = erlang:purge_module(dirty_code_test),
+ receive
+ {'DOWN', Mon2, process, Pid2, Reason} ->
+ killed = Reason
+ end,
+ End = erlang:monotonic_time(),
+ Time = erlang:convert_time_unit(End-Start, native, milli_seconds),
+ io:format("Time=~p~n", [Time]),
+ true = Time =< 1000,
+ ok.
+
%%
%% Internal...
%%