aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names8
-rw-r--r--erts/emulator/beam/beam_bif_load.c141
-rw-r--r--erts/emulator/beam/beam_emu.c293
-rw-r--r--erts/emulator/beam/bif.c474
-rw-r--r--erts/emulator/beam/bif.tab2
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c105
-rw-r--r--erts/emulator/beam/erl_bif_port.c8
-rw-r--r--erts/emulator/beam/erl_db_hash.c34
-rw-r--r--erts/emulator/beam/erl_db_hash.h2
-rw-r--r--erts/emulator/beam/erl_gc.c50
-rw-r--r--erts/emulator/beam/erl_init.c6
-rw-r--r--erts/emulator/beam/erl_message.c45
-rw-r--r--erts/emulator/beam/erl_message.h38
-rw-r--r--erts/emulator/beam/erl_nif.c154
-rw-r--r--erts/emulator/beam/erl_nif.h11
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h4
-rw-r--r--erts/emulator/beam/erl_port.h58
-rw-r--r--erts/emulator/beam/erl_process.c147
-rw-r--r--erts/emulator/beam/erl_process.h28
-rw-r--r--erts/emulator/beam/erl_process_dump.c5
-rw-r--r--erts/emulator/beam/erl_ptab.h2
-rw-r--r--erts/emulator/beam/erl_thr_progress.c20
-rw-r--r--erts/emulator/beam/erl_trace.c65
-rw-r--r--erts/emulator/beam/erlang_lttng.h48
-rw-r--r--erts/emulator/beam/export.c5
-rw-r--r--erts/emulator/beam/global.h4
-rw-r--r--erts/emulator/beam/io.c449
-rw-r--r--erts/emulator/beam/lttng-wrapper.h12
-rw-r--r--erts/emulator/beam/register.c51
-rw-r--r--erts/emulator/beam/register.h2
-rw-r--r--erts/emulator/beam/sys.h5
-rw-r--r--erts/emulator/beam/utils.c26
33 files changed, 1549 insertions, 755 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 8f65e71531..badd69856e 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -228,9 +228,6 @@ atom endian
atom env
atom eof
atom eol
-atom exception_from
-atom exception_trace
-atom extended
atom Eq='=:='
atom Eqeq='=='
atom erl_tracer
@@ -243,6 +240,8 @@ atom ets
atom ETS_TRANSFER='ETS-TRANSFER'
atom event
atom exact_reductions
+atom exception_from
+atom exception_trace
atom exclusive
atom exit_status
atom existing
@@ -251,7 +250,9 @@ atom existing_ports
atom existing
atom exiting
atom exports
+atom extended
atom external
+atom extra
atom false
atom fcgi
atom fd
@@ -389,7 +390,6 @@ atom min_heap_size
atom min_bin_vheap_size
atom minor_version
atom Minus='-'
-atom mixed
atom module
atom module_info
atom monitored_by
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 40d44dda4c..15e878ba65 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -762,6 +762,11 @@ check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size)
return 0;
}
+static Uint hfrag_literal_size(Eterm* start, Eterm* end,
+ char* lit_start, Uint lit_size);
+static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
+ Eterm *start, Eterm *end,
+ char *lit_start, Uint lit_size);
static Eterm
check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
@@ -842,9 +847,14 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
}
/*
- * Message queue can contains funs, but (at least currently) no
+ * Message queue can contains funs, and may contain
* literals. If we got references to this module from the message
- * queue, a GC cannot remove these...
+ * queue.
+ *
+ * If a literal is in the message queue we maka an explicit copy of
+ * and attach it to the heap fragment. Each message needs to be
+ * self contained, we cannot save the literal in the old_heap or
+ * any other heap than the message it self.
*/
erts_smp_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
@@ -861,15 +871,31 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
hfrag = msgp->data.heap_frag;
else
continue;
- for (; hfrag; hfrag = hfrag->next) {
- if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size))
- return am_true;
- /* Should not contain any literals... */
- ASSERT(!any_heap_refs(&hfrag->mem[0],
- &hfrag->mem[hfrag->used_size],
- literals,
- lit_bsize));
- }
+ {
+ ErlHeapFragment *hf;
+ Uint lit_sz;
+ for (hf=hfrag; hf; hf = hf->next) {
+ if (check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size))
+ return am_true;
+ lit_sz = hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ }
+ if (lit_sz > 0) {
+ ErlHeapFragment *bp = new_message_buffer(lit_sz);
+ Eterm *hp = bp->mem;
+
+ for (hf=hfrag; hf; hf = hf->next) {
+ hfrag_literal_copy(&hp, &bp->off_heap,
+ &hf->mem[0], &hf->mem[hf->used_size],
+ literals, lit_bsize);
+ hfrag=hf;
+ }
+ /* link new hfrag last */
+ ASSERT(hfrag->next == NULL);
+ hfrag->next = bp;
+ bp->next = NULL;
+ }
+ }
}
while (1) {
@@ -916,29 +942,26 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
goto try_literal_gc;
}
-#ifdef DEBUG
/*
- * Message buffer fragments should not have any references
- * to literals, and off heap lists should already have
- * been moved into process off heap structure.
+ * Message buffer fragments (matched messages)
+ * - off heap lists should already have been moved into
+ * process off heap structure.
+ * - Check for literals
*/
for (msgp = rp->msg_frag; msgp; msgp = msgp->next) {
- if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG)
- hfrag = &msgp->hfrag;
- else
- hfrag = msgp->data.heap_frag;
+ hfrag = erts_message_to_heap_frag(msgp);
for (; hfrag; hfrag = hfrag->next) {
Eterm *hp, *hp_end;
ASSERT(!check_mod_funs(rp, &hfrag->off_heap, mod_start, mod_size));
hp = &hfrag->mem[0];
hp_end = &hfrag->mem[hfrag->used_size];
- ASSERT(!any_heap_refs(hp, hp_end, literals, lit_bsize));
+
+ if (any_heap_refs(hp, hp_end, literals, lit_bsize))
+ goto try_literal_gc;
}
}
-#endif
-
return am_false;
try_literal_gc:
@@ -1038,6 +1061,80 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
return 0;
}
+static Uint
+hfrag_literal_size(Eterm* start, Eterm* end, char* lit_start, Uint lit_size)
+{
+ Eterm* p;
+ Eterm val;
+ Uint sz = 0;
+
+ for (p = start; p < end; p++) {
+ val = *p;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (ErtsInArea(val, lit_start, lit_size)) {
+ sz += size_object(val);
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (!header_is_transparent(val)) {
+ Eterm* new_p;
+ if (header_is_bin_matchstate(val)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) p;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (ErtsInArea(mb->orig, lit_start, lit_size)) {
+ sz += size_object(mb->orig);
+ }
+ }
+ new_p = p + thing_arityval(val);
+ ASSERT(start <= new_p && new_p < end);
+ p = new_p;
+ }
+ }
+ }
+ return sz;
+}
+
+static void
+hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
+ Eterm *start, Eterm *end,
+ char *lit_start, Uint lit_size) {
+ Eterm* p;
+ Eterm val;
+ Uint sz;
+
+ for (p = start; p < end; p++) {
+ val = *p;
+ switch (primary_tag(val)) {
+ case TAG_PRIMARY_BOXED:
+ case TAG_PRIMARY_LIST:
+ if (ErtsInArea(val, lit_start, lit_size)) {
+ sz = size_object(val);
+ val = copy_struct(val, sz, hpp, ohp);
+ *p = val;
+ }
+ break;
+ case TAG_PRIMARY_HEADER:
+ if (!header_is_transparent(val)) {
+ Eterm* new_p;
+ /* matchstate in message, not possible. */
+ if (header_is_bin_matchstate(val)) {
+ ErlBinMatchState *ms = (ErlBinMatchState*) p;
+ ErlBinMatchBuffer *mb = &(ms->mb);
+ if (ErtsInArea(mb->orig, lit_start, lit_size)) {
+ sz = size_object(mb->orig);
+ mb->orig = copy_struct(mb->orig, sz, hpp, ohp);
+ }
+ }
+ new_p = p + thing_arityval(val);
+ ASSERT(start <= new_p && new_p < end);
+ p = new_p;
+ }
+ }
+ }
+}
+
#undef in_area
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index f8f2e29c95..4716460a6b 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1323,11 +1323,7 @@ void process_main(void)
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
- if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
- && !ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))
-#endif
- ) {
+ if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
BeamInstr *inptr = find_function_from_pc(start_time_i);
BeamInstr *outptr = find_function_from_pc(c_p->i);
monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
@@ -1337,7 +1333,7 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- c_p = schedule(c_p, reds_used);
+ c_p = erts_schedule(NULL, c_p, reds_used);
ASSERT(!(c_p->flags & F_HIPE_MODE));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
start_time = 0;
@@ -3559,12 +3555,10 @@ do { \
typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
NifF* fp = vbf = (NifF*) I[1];
struct enif_environment_t env;
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!c_p->scheduler_data)
- live_hf_end = ERTS_INVALID_HFRAG_PTR; /* On dirty scheduler */
- else
+#ifdef ERTS_SMP
+ ASSERT(c_p->scheduler_data);
#endif
- live_hf_end = c_p->mbuf;
+ live_hf_end = c_p->mbuf;
erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
if (env.exception_thrown)
@@ -3574,10 +3568,7 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- if (env.exiting) {
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- goto do_schedule;
- }
+ ASSERT(!env.exiting);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
@@ -5162,6 +5153,278 @@ do { \
}
}
+/*
+ * erts_dirty_process_main() is what dirty schedulers execute. Since they handle
+ * only NIF calls they do not need to be able to execute all BEAM
+ * instructions.
+ */
+void erts_dirty_process_main(ErtsSchedulerData *esdp)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Process* c_p = NULL;
+ ErtsMonotonicTime start_time;
+#ifdef DEBUG
+ ERTS_DECLARE_DUMMY(Eterm pid);
+#endif
+
+ /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
+ * in all other cases x0 is used.
+ */
+ register Eterm* reg REG_xregs = NULL;
+
+ /*
+ * Top of heap (next free location); grows upwards.
+ */
+ register Eterm* HTOP REG_htop = NULL;
+
+ /* Stack pointer. Grows downwards; points
+ * to last item pushed (normally a saved
+ * continuation pointer).
+ */
+ register Eterm* E REG_stop = NULL;
+
+ /*
+ * Pointer to next threaded instruction.
+ */
+ register BeamInstr *I REG_I = NULL;
+
+ ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
+
+ /*
+ * start_time always positive for dirty CPU schedulers,
+ * and negative for dirty I/O schedulers.
+ */
+
+ if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp)) {
+ start_time = erts_get_monotonic_time(NULL);
+ ASSERT(start_time >= 0);
+ }
+ else {
+ start_time = ERTS_SINT64_MIN;
+ ASSERT(start_time < 0);
+ }
+
+ goto do_dirty_schedule;
+
+ context_switch:
+ c_p->arity = I[-1];
+ c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+
+ {
+ int reds_used;
+ Eterm* argp;
+ int i;
+
+ /*
+ * Make sure that there is enough room for the argument registers to be saved.
+ */
+ if (c_p->arity > c_p->max_arg_reg) {
+ /*
+ * Yes, this is an expensive operation, but you only pay it the first
+ * time you call a function with more than 6 arguments which is
+ * scheduled out. This is better than paying for 26 words of wasted
+ * space for most processes which never call functions with more than
+ * 6 arguments.
+ */
+ Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
+ if (c_p->arg_reg != c_p->def_arg_reg) {
+ c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
+ (void *) c_p->arg_reg,
+ size);
+ } else {
+ c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
+ }
+ c_p->max_arg_reg = c_p->arity;
+ }
+
+ /*
+ * Save the argument registers and everything else.
+ */
+
+ argp = c_p->arg_reg;
+ for (i = c_p->arity - 1; i >= 0; i--) {
+ argp[i] = reg[i];
+ }
+ SWAPOUT;
+ c_p->i = I;
+
+ do_dirty_schedule:
+
+ if (start_time < 0) {
+ /*
+ * Dirty I/O scheduler:
+ * One reduction consumed regardless of
+ * time spent in the dirty NIF.
+ */
+ reds_used = esdp->virtual_reds + 1;
+ }
+ else {
+ /*
+ * Dirty CPU scheduler:
+ * Currently two reductions consumed per
+ * micro second spent in the dirty NIF.
+ */
+ ErtsMonotonicTime time;
+ time = erts_get_monotonic_time(esdp);
+ time -= start_time;
+ time = ERTS_MONOTONIC_TO_USEC(time);
+ time *= (CONTEXT_REDS-1)/1000 + 1;
+ ASSERT(time >= 0);
+ if (time == 0)
+ time = 1; /* At least one reduction */
+ time += esdp->virtual_reds;
+ reds_used = time > INT_MAX ? INT_MAX : (int) time;
+ }
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ c_p = erts_schedule(esdp, c_p, reds_used);
+
+ if (start_time >= 0) {
+ start_time = erts_get_monotonic_time(esdp);
+ ASSERT(start_time >= 0);
+ }
+ }
+
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+#ifdef DEBUG
+ pid = c_p->common.id; /* Save for debugging purposes */
+#endif
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ ASSERT(!(c_p->flags & F_HIPE_MODE));
+ ERTS_MSACC_UPDATE_CACHE_X();
+
+ reg = esdp->x_reg_array;
+ {
+ Eterm* argp;
+ int i;
+
+ argp = c_p->arg_reg;
+ for (i = c_p->arity - 1; i >= 0; i--) {
+ reg[i] = argp[i];
+ CHECK_TERM(reg[i]);
+ }
+
+ /*
+ * We put the original reduction count in the process structure, to reduce
+ * the code size (referencing a field in a struct through a pointer stored
+ * in a register gives smaller code than referencing a global variable).
+ */
+
+ I = c_p->i;
+
+ ASSERT(BeamOp(op_call_nif) == (BeamInstr *) *I);
+
+ /*
+ * Set fcalls even though we ignore it, so we don't
+ * confuse code accessing it...
+ */
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ c_p->fcalls = 0;
+ else
+ c_p->fcalls = CONTEXT_REDS;
+
+ SWAPIN;
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_scheduled)) {
+ DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
+ dtrace_proc_str(c_p, process_buf);
+
+ if (ERTS_PROC_IS_EXITING(c_p)) {
+ strcpy(fun_buf, "<exiting>");
+ } else {
+ BeamInstr *fptr = find_function_from_pc(c_p->i);
+ if (fptr) {
+ dtrace_fun_decode(c_p, (Eterm)fptr[0],
+ (Eterm)fptr[1], (Uint)fptr[2],
+ NULL, fun_buf);
+ } else {
+ erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
+ "<unknown/%p>", *I);
+ }
+ }
+
+ DTRACE2(process_scheduled, process_buf, fun_buf);
+ }
+#endif
+ }
+
+ {
+#ifdef DEBUG
+ Eterm result;
+#endif
+ Eterm arity;
+
+ {
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
+ */
+ BifFunction vbf;
+
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
+
+ DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ c_p->current = I-3; /* current and vbf set to please handle_error */
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ arity = I[-1];
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ {
+ typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
+ NifF* fp = vbf = (NifF*) I[1];
+ struct enif_environment_t env;
+ ASSERT(!c_p->scheduler_data);
+
+ erts_pre_dirty_nif(esdp, &env, c_p,
+ (struct erl_module_nif*)I[2], NULL);
+
+#ifdef DEBUG
+ result =
+#else
+ (void)
+#endif
+ (*fp)(&env, arity, reg);
+
+ erts_post_nif(&env);
+
+ ASSERT(!is_value(result));
+ ASSERT(c_p->freason == TRAP);
+ ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (env.exiting)
+ goto do_dirty_schedule;
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
+
+ DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ ERTS_HOLE_CHECK(c_p);
+ SWAPIN;
+ I = c_p->i;
+ goto context_switch;
+ }
+ }
+#endif /* ERTS_DIRTY_SCHEDULERS */
+}
+
static BifFunction
translate_gc_bif(void* gcf)
{
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 2a3bd4afe5..fc14061a44 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -282,20 +282,17 @@ res_no_proc: {
}
}
-#define ERTS_DEMONITOR_FALSE 2
-#define ERTS_DEMONITOR_TRUE 1
-#define ERTS_DEMONITOR_BADARG 0
-#define ERTS_DEMONITOR_YIELD_TRUE -1
-#define ERTS_DEMONITOR_INTERNAL_ERROR -2
-
-static int
+/* This function is allowed to return range of values handled by demonitor/1-2
+ * Namely: atoms true, false, yield, internal_error, badarg or THE_NON_VALUE
+ */
+static Eterm
remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
{
ErtsDSigData dsd;
ErtsMonitor *dmon;
ErtsMonitor *mon;
int code;
- int res;
+ Eterm res = am_false;
#ifndef ERTS_SMP
int stale_mon = 0;
#endif
@@ -328,7 +325,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
- res = ERTS_DEMONITOR_TRUE;
+ res = am_true;
break;
case ERTS_DSIG_PREP_CONNECTED:
@@ -352,7 +349,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
* This is possible when smp support is enabled.
* 'DOWN' message just arrived.
*/
- res = ERTS_DEMONITOR_TRUE;
+ res = am_true;
}
else {
/*
@@ -367,16 +364,13 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
: mon->pid),
ref,
0);
- res = (code == ERTS_DSIG_SEND_YIELD
- ? ERTS_DEMONITOR_YIELD_TRUE
- : ERTS_DEMONITOR_TRUE);
+ res = (code == ERTS_DSIG_SEND_YIELD ? am_yield : am_true);
erts_destroy_monitor(dmon);
-
}
break;
default:
ASSERT(! "Invalid dsig prepare result");
- return ERTS_DEMONITOR_INTERNAL_ERROR;
+ return am_internal_error;
}
#ifndef ERTS_SMP
@@ -404,27 +398,96 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
return res;
}
-static int demonitor(Process *c_p, Eterm ref, Eterm *multip)
+static ERTS_INLINE void
+demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res)
+{
+ Process *rp = erts_pid2proc_opt(c_p,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ to,
+ ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X);
+ ErtsMonitor *mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
+
+#ifndef ERTS_SMP
+ ASSERT(mon);
+#else
+ if (!mon)
+ *res = am_false;
+ else
+#endif
+ {
+ *res = am_true;
+ erts_destroy_monitor(mon);
+ }
+ if (rp) {
+ ErtsMonitor *rmon;
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
+ if (rp != c_p)
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon != NULL)
+ erts_destroy_monitor(rmon);
+ }
+ else {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p);
+ }
+}
+
+static ERTS_INLINE BIF_RETTYPE
+demonitor_local_port(Process *origin, Eterm ref, Eterm target)
{
- ErtsMonitor *mon = NULL; /* The monitor entry to delete */
- Process *rp; /* Local target process */
- Eterm to = NIL; /* Monitor link traget */
- DistEntry *dep = NULL; /* Target's distribution entry */
- int deref_de = 0;
- int res;
- int unlock_link = 1;
+ BIF_RETTYPE res = am_false;
+ Port *port = erts_port_lookup_raw(target);
+
+ if (!port) {
+ BIF_ERROR(origin, BADARG);
+ }
+ erts_smp_proc_unlock(origin, ERTS_PROC_LOCK_LINK);
+
+ if (port) {
+ Eterm trap_ref;
+ switch (erts_port_demonitor(origin, ERTS_PORT_DEMONITOR_NORMAL,
+ port, ref, &trap_ref)) {
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ break;
+ case ERTS_PORT_OP_SCHEDULED:
+ BIF_TRAP3(await_port_send_result_trap, origin, trap_ref,
+ am_busy_port, am_true);
+ /* the busy_port atom will never be returned, because it cannot be
+ * returned from erts_port_(de)monitor, but just in case if in future
+ * internal API changes - you may see this atom */
+ default:
+ break;
+ }
+ }
+ else {
+ ERTS_SMP_ASSERT_IS_NOT_EXITING(origin);
+ }
+ BIF_RET(res);
+}
+/* Can return atom true, false, yield, internal_error, badarg or
+ * THE_NON_VALUE if error occured or trap has been set up
+ */
+static
+BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip)
+{
+ ErtsMonitor *mon = NULL; /* The monitor entry to delete */
+ Eterm to = NIL; /* Monitor link traget */
+ DistEntry *dep = NULL; /* Target's distribution entry */
+ int deref_de = 0;
+ BIF_RETTYPE res = am_false;
+ int unlock_link = 1;
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_LINK);
if (is_not_internal_ref(ref)) {
- res = ERTS_DEMONITOR_BADARG;
+ res = am_badarg;
goto done; /* Cannot be this monitor's ref */
}
mon = erts_lookup_monitor(ERTS_P_MONITORS(c_p), ref);
if (!mon) {
- res = ERTS_DEMONITOR_FALSE;
goto done;
}
@@ -432,70 +495,50 @@ static int demonitor(Process *c_p, Eterm ref, Eterm *multip)
case MON_TIME_OFFSET:
*multip = am_true;
erts_demonitor_time_offset(ref);
- res = ERTS_DEMONITOR_TRUE;
+ res = am_true;
break;
case MON_ORIGIN:
to = mon->pid;
*multip = am_false;
if (is_atom(to)) {
- /* Monitoring a name at node to */
- ASSERT(is_node_name_atom(to));
- dep = erts_sysname_to_connected_dist_entry(to);
- ASSERT(dep != erts_this_dist_entry);
- if (dep)
- deref_de = 1;
+ /* Monitoring a name at node to */
+ ASSERT(is_node_name_atom(to));
+ dep = erts_sysname_to_connected_dist_entry(to);
+ ASSERT(dep != erts_this_dist_entry);
+ if (dep)
+ deref_de = 1;
+ } else if (is_port(to)) {
+ if (port_dist_entry(to) != erts_this_dist_entry) {
+ goto badarg;
+ }
+ res = demonitor_local_port(c_p, ref, to);
+ unlock_link = 0;
+ goto done;
} else {
- ASSERT(is_pid(to));
- dep = pid_dist_entry(to);
+ ASSERT(is_pid(to));
+ dep = pid_dist_entry(to);
}
if (dep != erts_this_dist_entry) {
- res = remote_demonitor(c_p, dep, ref, to);
- /* remote_demonitor() unlocks link lock on c_p */
- unlock_link = 0;
+ res = remote_demonitor(c_p, dep, ref, to);
+ /* remote_demonitor() unlocks link lock on c_p */
+ unlock_link = 0;
}
else { /* Local monitor */
- if (deref_de) {
- deref_de = 0;
- erts_deref_dist_entry(dep);
- }
- dep = NULL;
- rp = erts_pid2proc_opt(c_p,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- to,
- ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
-#ifndef ERTS_SMP
- ASSERT(mon);
-#else
- if (!mon)
- res = ERTS_DEMONITOR_FALSE;
- else
-#endif
- {
- res = ERTS_DEMONITOR_TRUE;
- erts_destroy_monitor(mon);
- }
- if (rp) {
- ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
- if (rp != c_p)
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon != NULL)
- erts_destroy_monitor(rmon);
- }
- else {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p);
- }
-
+ if (deref_de) {
+ deref_de = 0;
+ erts_deref_dist_entry(dep);
+ }
+ dep = NULL;
+ demonitor_local_process(c_p, ref, to, &res);
}
break;
- default:
- res = ERTS_DEMONITOR_BADARG;
+ default /* case */ :
+badarg:
+ res = am_badarg; /* will be converted to error by caller */
*multip = am_false;
break;
}
- done:
+done:
if (unlock_link)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
@@ -506,21 +549,20 @@ static int demonitor(Process *c_p, Eterm ref, Eterm *multip)
}
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
- return res;
+ BIF_RET(res);
}
BIF_RETTYPE demonitor_1(BIF_ALIST_1)
{
Eterm multi;
switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
- case ERTS_DEMONITOR_FALSE:
- case ERTS_DEMONITOR_TRUE:
- BIF_RET(am_true);
- case ERTS_DEMONITOR_YIELD_TRUE:
- ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- case ERTS_DEMONITOR_BADARG:
- BIF_ERROR(BIF_P, BADARG);
- case ERTS_DEMONITOR_INTERNAL_ERROR:
+ case am_false:
+ case am_true: BIF_RET(am_true);
+ case THE_NON_VALUE: BIF_RET(THE_NON_VALUE);
+ case am_yield: ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
+ case am_badarg: BIF_ERROR(BIF_P, BADARG);
+
+ case am_internal_error:
default:
ASSERT(! "demonitor(): internal error");
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
@@ -529,11 +571,11 @@ BIF_RETTYPE demonitor_1(BIF_ALIST_1)
BIF_RETTYPE demonitor_2(BIF_ALIST_2)
{
- Eterm res = am_true;
- Eterm multi = am_false;
- int info = 0;
- int flush = 0;
- Eterm list = BIF_ARG_2;
+ BIF_RETTYPE res = am_true;
+ Eterm multi = am_false;
+ int info = 0;
+ int flush = 0;
+ Eterm list = BIF_ARG_2;
while (is_list(list)) {
Eterm* consp = list_val(list);
@@ -554,24 +596,27 @@ BIF_RETTYPE demonitor_2(BIF_ALIST_2)
goto badarg;
switch (demonitor(BIF_P, BIF_ARG_1, &multi)) {
- case ERTS_DEMONITOR_FALSE:
+ case THE_NON_VALUE:
+ /* If other error occured or trap has been set up - pass through */
+ BIF_RET(THE_NON_VALUE);
+ case am_false:
if (info)
res = am_false;
if (flush) {
- flush_messages:
+flush_messages:
BIF_TRAP3(flush_monitor_messages_trap, BIF_P,
BIF_ARG_1, multi, res);
}
- case ERTS_DEMONITOR_TRUE:
+ case am_true:
if (multi == am_true && flush)
goto flush_messages;
BIF_RET(res);
- case ERTS_DEMONITOR_YIELD_TRUE:
+ case am_yield:
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
- case ERTS_DEMONITOR_BADARG:
- badarg:
+ case am_badarg:
+badarg:
BIF_ERROR(BIF_P, BADARG);
- case ERTS_DEMONITOR_INTERNAL_ERROR:
+ case am_internal_error:
default:
ASSERT(! "demonitor(): internal error");
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
@@ -615,14 +660,13 @@ erts_queue_monitor_message(Process *p,
erts_queue_message(p, *p_locksp, msgp, tup, am_system);
}
-static BIF_RETTYPE
+static Eterm
local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean)
{
- BIF_RETTYPE ret;
- Process *rp;
+ Eterm ret = mon_ref;
+ Process *rp;
ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK;
- ERTS_BIF_PREP_RET(ret, mon_ref);
if (target == p->common.id) {
return ret;
}
@@ -658,40 +702,112 @@ local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean)
}
static BIF_RETTYPE
-local_name_monitor(Process *p, Eterm target_name)
+local_port_monitor(Process *origin, Eterm target)
{
- BIF_RETTYPE ret;
- Eterm mon_ref;
- ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK;
- Process *rp;
+ BIF_RETTYPE ref = erts_make_ref(origin);
+ Port *port = erts_sig_lookup_port(origin, target);
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN;
- mon_ref = erts_make_ref(p);
- ERTS_BIF_PREP_RET(ret, mon_ref);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
- rp = erts_whereis_process(p, p_locks, target_name, ERTS_PROC_LOCK_LINK,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (!rp) {
- DeclareTmpHeap(lhp,3,p);
+ if (!port) {
+res_no_proc:
+ /* Send the DOWN message immediately. Ref is made on the fly because
+ * caller has never seen it yet. */
+ erts_queue_monitor_message(origin, &p_locks, ref,
+ am_port, target, am_noproc);
+ }
+ else {
+ switch (erts_port_monitor(origin, port, target, &ref)) {
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ goto res_no_proc;
+ case ERTS_PORT_OP_SCHEDULED:
+ BIF_TRAP3(await_port_send_result_trap, origin, ref,
+ am_busy_port, ref);
+ /* the busy_port atom will never be returned, because it cannot be
+ * returned from erts_port_monitor, but just in case if in future
+ * internal API changes - you may see this atom */
+ default:
+ break;
+ }
+ }
+ erts_smp_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ BIF_RET(ref);
+}
+
+/* Type = process | port :: atom(), 1st argument passed to erlang:monitor/2
+ */
+static BIF_RETTYPE
+local_name_monitor(Process *self, Eterm type, Eterm target_name)
+{
+ BIF_RETTYPE ret = erts_make_ref(self);
+
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK;
+ Process *proc = NULL;
+ Port *port = NULL;
+
+ erts_smp_proc_lock(self, ERTS_PROC_LOCK_LINK);
+
+ erts_whereis_name(self, p_locks, target_name,
+ &proc, ERTS_PROC_LOCK_LINK,
+ ERTS_P2P_FLG_ALLOW_OTHER_X,
+ &port, 0);
+
+ /* If the name is not registered,
+ * or if we asked for proc and got a port,
+ * or if we asked for port and got a proc,
+ * we just send the 'DOWN' message.
+ */
+ if ((!proc && !port) ||
+ (type == am_process && port) ||
+ (type == am_port && proc)) {
+ DeclareTmpHeap(lhp,3,self);
Eterm item;
- UseTmpHeap(3,p);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+ UseTmpHeap(3,self);
+
+ erts_smp_proc_unlock(self, ERTS_PROC_LOCK_LINK);
p_locks &= ~ERTS_PROC_LOCK_LINK;
+
item = TUPLE2(lhp, target_name, erts_this_dist_entry->sysname);
- erts_queue_monitor_message(p, &p_locks,
- mon_ref, am_process, item, am_noproc);
- UnUseTmpHeap(3,p);
- }
- else if (rp != p) {
- erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, rp->common.id,
- target_name);
- erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id,
- target_name);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ erts_queue_monitor_message(self, &p_locks,
+ ret,
+ type, /* = process|port :: atom() */
+ item, am_noproc);
+ UnUseTmpHeap(3,self);
+ }
+ else if (port) {
+ erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ p_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ switch (erts_port_monitor(self, port, target_name, &ret)) {
+ case ERTS_PORT_OP_DONE:
+ return ret;
+ case ERTS_PORT_OP_SCHEDULED: { /* Scheduled a signal */
+ ASSERT(is_internal_ref(ret));
+ BIF_TRAP3(await_port_send_result_trap, self,
+ ret, am_true, ret);
+ /* bif_trap returns */
+ } break;
+ default:
+ goto badarg;
+ }
+ }
+ else if (proc != self) {
+ erts_add_monitor(&ERTS_P_MONITORS(self), MON_ORIGIN, ret,
+ proc->common.id, target_name);
+ erts_add_monitor(&ERTS_P_MONITORS(proc), MON_TARGET, ret,
+ self->common.id, target_name);
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_LINK);
}
- erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN);
-
- return ret;
+ if (p_locks) {
+ erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ }
+ BIF_RET(ret);
+badarg:
+ if (p_locks) {
+ erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN);
+ }
+ BIF_ERROR(self, BADARG);
}
static BIF_RETTYPE
@@ -758,7 +874,7 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2,
break;
}
- return ret;
+ BIF_RET(ret);
}
BIF_RETTYPE monitor_2(BIF_ALIST_2)
@@ -772,8 +888,9 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
switch (BIF_ARG_1) {
case am_time_offset: {
Eterm ref;
- if (BIF_ARG_2 != am_clock_service)
- goto error;
+ if (BIF_ARG_2 != am_clock_service) {
+ goto badarg;
+ }
ref = erts_make_ref(BIF_P);
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
erts_add_monitor(&ERTS_P_MONITORS(BIF_P), MON_TIME_OFFSET,
@@ -783,46 +900,57 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2)
BIF_RET(ref);
}
case am_process:
+ case am_port:
break;
default:
- goto error;
+ goto badarg;
}
- if (is_internal_pid(target)) {
- local_pid:
- ret = local_pid_monitor(BIF_P, target, erts_make_ref(BIF_P), 0);
- } else if (is_external_pid(target)) {
+ if (is_internal_pid(target) && BIF_ARG_1 == am_process) {
+local_pid:
+ ret = local_pid_monitor(BIF_P, target, erts_make_ref(BIF_P), 0);
+ } else if (is_external_pid(target) && BIF_ARG_1 == am_process) {
dep = external_pid_dist_entry(target);
if (dep == erts_this_dist_entry)
goto local_pid;
ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, target, 0);
+ } else if (is_internal_port(target) && BIF_ARG_1 == am_port) {
+local_port:
+ ret = local_port_monitor(BIF_P, target);
+ } else if (is_external_port(target) && BIF_ARG_1 == am_port) {
+ dep = external_port_dist_entry(target);
+ if (dep == erts_this_dist_entry) {
+ goto local_port;
+ }
+ goto badarg; /* No want remote port */
} else if (is_atom(target)) {
- ret = local_name_monitor(BIF_P, target);
+ ret = local_name_monitor(BIF_P, BIF_ARG_1, target);
} else if (is_tuple(target)) {
Eterm *tp = tuple_val(target);
Eterm remote_node;
Eterm name;
- if (arityval(*tp) != 2)
- goto error;
+ if (arityval(*tp) != 2) {
+ goto badarg;
+ }
remote_node = tp[2];
name = tp[1];
if (!is_atom(remote_node) || !is_atom(name)) {
- goto error;
+ goto badarg;
}
if (!erts_is_alive && remote_node != am_Noname) {
- goto error; /* Remote monitor from (this) undistributed node */
+ goto badarg; /* Remote monitor from (this) undistributed node */
}
dep = erts_sysname_to_connected_dist_entry(remote_node);
if (dep == erts_this_dist_entry) {
deref_de = 1;
- ret = local_name_monitor(BIF_P, name);
+ ret = local_name_monitor(BIF_P, BIF_ARG_1, name);
} else {
if (dep)
deref_de = 1;
ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, name, 1);
}
} else {
- error:
+badarg:
ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
}
if (deref_de) {
@@ -917,9 +1045,6 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
goto error;
} else if (arg == am_message_queue_data) {
switch (val) {
- case am_mixed:
- so.flags &= ~(SPO_OFF_HEAP_MSGQ|SPO_ON_HEAP_MSGQ);
- break;
case am_on_heap:
so.flags &= ~SPO_OFF_HEAP_MSGQ;
so.flags |= SPO_ON_HEAP_MSGQ;
@@ -3836,59 +3961,11 @@ BIF_RETTYPE display_nl_0(BIF_ALIST_0)
/**********************************************************************/
-/* stop the system */
-/* ARGSUSED */
-BIF_RETTYPE halt_0(BIF_ALIST_0)
-{
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt()\n"));
- erts_halt(0);
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
-}
-
-/**********************************************************************/
#define HALT_MSG_SIZE 200
-static char halt_msg[HALT_MSG_SIZE];
-
-/* stop the system with exit code */
-/* ARGSUSED */
-BIF_RETTYPE halt_1(BIF_ALIST_1)
-{
- Uint code;
-
- if (term_to_Uint_mask(BIF_ARG_1, &code)) {
- int pos_int_code = (int) (code & INT_MAX);
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erts_halt(pos_int_code);
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
- }
- else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_exit(ERTS_ABORT_EXIT, "");
- }
- else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
- Sint i;
-
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE-1)) < 0) {
- goto error;
- }
- halt_msg[i] = '\0';
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg);
- }
- else
- goto error;
- return NIL; /* Pedantic (lint does not know about erts_exit) */
- error:
- BIF_ERROR(BIF_P, BADARG);
-}
-
-/**********************************************************************/
+static char halt_msg[HALT_MSG_SIZE+1];
/* stop the system with exit code and flags */
-/* ARGSUSED */
BIF_RETTYPE halt_2(BIF_ALIST_2)
{
Uint code;
@@ -3924,7 +4001,7 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
if (flush) {
erts_halt(pos_int_code);
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
+ ERTS_BIF_YIELD2(bif_export[BIF_halt_2], BIF_P, am_undefined, am_undefined);
}
else {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
@@ -3940,9 +4017,12 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
Sint i;
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE-1)) < 0) {
- goto error;
- }
+ if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE)) == -1) {
+ goto error;
+ }
+ if (i == -2) /* truncated string */
+ i = HALT_MSG_SIZE;
+ ASSERT(i >= 0 && i <= HALT_MSG_SIZE);
halt_msg[i] = '\0';
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 872f0f9b2a..065018514a 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -72,8 +72,6 @@ bif erlang:get/1
bif erlang:get_keys/1
bif erlang:group_leader/0
bif erlang:group_leader/2
-bif erlang:halt/0
-bif erlang:halt/1
bif erlang:halt/2
bif erlang:phash/2
bif erlang:phash2/1
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index fbe4724047..7e239d1f5d 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -123,7 +123,7 @@ struct AOFF_Carrier_t_ {
AOFF_RBTree_t rbt_node; /* My node in the carrier tree */
AOFF_RBTree_t* root; /* Root of my block tree */
};
-#define RBT_NODE_TO_MBC(PTR) ((AOFF_Carrier_t*)((char*)(PTR) - offsetof(AOFF_Carrier_t, rbt_node)))
+#define RBT_NODE_TO_MBC(PTR) ErtsContainerStruct((PTR), AOFF_Carrier_t, rbt_node)
/*
To support carrier migration we keep two kinds of rb-trees:
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 2e195db0ee..3fb866733c 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -361,8 +361,13 @@ erts_print_system_version(int to, void *arg, Process *c_p)
}
typedef struct {
+ /* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
+ * {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
+ * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name */
Eterm entity;
Eterm node;
+ /* pid is actual target being monitored, no matter pid/port or name */
+ Eterm pid;
} MonitorInfo;
typedef struct {
@@ -420,21 +425,27 @@ static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
EXTEND_MONITOR_INFOS(micp);
if (is_atom(mon->pid)) { /* external by name */
micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = mon->pid;
- micp->sz += 3; /* need one 2-tuple */
+ micp->mi[micp->mi_i].node = mon->pid;
+ micp->sz += 3; /* need one 2-tuple */
} else if (is_external_pid(mon->pid)) { /* external by pid */
micp->mi[micp->mi_i].entity = mon->pid;
- micp->mi[micp->mi_i].node = NIL;
- micp->sz += NC_HEAP_SIZE(mon->pid);
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += NC_HEAP_SIZE(mon->pid);
} else if (!is_nil(mon->name)) { /* internal by name */
micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
- micp->sz += 3; /* need one 2-tuple */
+ micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
+ micp->sz += 3; /* need one 2-tuple */
} else { /* internal by pid */
micp->mi[micp->mi_i].entity = mon->pid;
- micp->mi[micp->mi_i].node = NIL;
+ micp->mi[micp->mi_i].node = NIL;
/* no additional heap space needed */
}
+
+ /* have always pid at hand, to assist with figuring out if its a port or
+ * a process, when we monitored by name and process_info is requested.
+ * See: erl_bif_info.c:process_info_aux section for am_monitors */
+ micp->mi[micp->mi_i].pid = mon->pid;
+
micp->mi_i++;
micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
}
@@ -1190,37 +1201,49 @@ process_info_aux(Process *BIF_P,
case am_monitors: {
MonitorInfoCollection mic;
- int i;
+ int i;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_origin_monitor,&mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ erts_doforall_monitors(ERTS_P_MONITORS(rp),
+ &collect_one_origin_monitor, &mic);
+ hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
if (is_atom(mic.mi[i].entity)) {
/* Monitor by name.
- * Build {process, {Name, Node}} and cons it.
+ * Build {process|port, {Name, Node}} and cons it.
*/
Eterm t1, t2;
+ /* If pid is an atom, then it is a remote named monitor, which
+ has to be a process */
+ Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
+ ASSERT(is_pid(mic.mi[i].pid)
+ || is_port(mic.mi[i].pid)
+ || is_atom(mic.mi[i].pid));
t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
hp += 3;
- t2 = TUPLE2(hp, am_process, t1);
+ t2 = TUPLE2(hp, m_type, t1);
hp += 3;
res = CONS(hp, t2, res);
- hp += 2;
+ hp += 2;
}
else {
- /* Monitor by pid. Build {process, Pid} and cons it. */
+ /* Monitor by pid. Build {process|port, Pid} and cons it. */
Eterm t;
Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
- t = TUPLE2(hp, am_process, pid);
+
+ Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
+ ASSERT(is_pid(mic.mi[i].pid)
+ || is_port(mic.mi[i].pid));
+
+ t = TUPLE2(hp, m_type, pid);
hp += 3;
res = CONS(hp, t, res);
- hp += 2;
+ hp += 2;
}
}
- DESTROY_MONITOR_INFOS(mic);
+ DESTROY_MONITOR_INFOS(mic);
break;
}
@@ -1565,9 +1588,6 @@ process_info_aux(Process *BIF_P,
case F_ON_HEAP_MSGQ:
res = am_on_heap;
break;
- case 0:
- res = am_mixed;
- break;
default:
res = am_error;
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
@@ -2809,8 +2829,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_off_heap);
case SPO_ON_HEAP_MSGQ:
BIF_RET(am_on_heap);
- case 0:
- BIF_RET(am_mixed);
default:
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
BIF_RET(am_error);
@@ -2885,7 +2903,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
*/
Eterm
-erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm item)
+erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
+ Eterm item)
{
Eterm res = THE_NON_VALUE;
@@ -2933,8 +2952,8 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
Eterm item;
INIT_MONITOR_INFOS(mic);
-
- erts_doforall_monitors(ERTS_P_MONITORS(prt), &collect_one_origin_monitor, &mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(prt),
+ &collect_one_origin_monitor, &mic);
if (szp)
*szp += mic.sz;
@@ -2943,14 +2962,16 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
- t = TUPLE2(*hpp, am_process, item);
+ Eterm m_type;
+
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ m_type = is_port(item) ? am_port : am_process;
+ t = TUPLE2(*hpp, m_type, item);
*hpp += 3;
res = CONS(*hpp, t, res);
*hpp += 2;
}
- }
-
+ } // hpp
DESTROY_MONITOR_INFOS(mic);
if (szp) {
@@ -2958,6 +2979,32 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm ite
goto done;
}
}
+ else if (item == am_monitored_by) {
+ MonitorInfoCollection mic;
+ int i;
+ Eterm item;
+
+ INIT_MONITOR_INFOS(mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(prt),
+ &collect_one_target_monitor, &mic);
+ if (szp)
+ *szp += mic.sz;
+
+ if (hpp) {
+ res = NIL;
+ for (i = 0; i < mic.mi_i; ++i) {
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ res = CONS(*hpp, item, res);
+ *hpp += 2;
+ }
+ } // hpp
+ DESTROY_MONITOR_INFOS(mic);
+
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
+ }
else if (item == am_name) {
int count = sys_strlen(prt->name);
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 37f4e1de49..90e78a9b0b 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -139,6 +139,12 @@ sig_lookup_port(Process *c_p, Eterm id_or_name)
return lookup_port(c_p, id_or_name, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
}
+/* Non-inline copy of sig_lookup_port to be exported */
+Port *erts_sig_lookup_port(Process *c_p, Eterm id_or_name)
+{
+ return lookup_port(c_p, id_or_name, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+}
+
static ERTS_INLINE Port *
data_lookup_port(Process *c_p, Eterm id_or_name)
{
@@ -1411,7 +1417,7 @@ BIF_RETTYPE decode_packet_3(BIF_ALIST_3)
trunc_len = val;
goto next_option;
case am_line_delimiter:
- if (type == TCP_PB_LINE_LF && val >= 0 && val <= 255) {
+ if (type == TCP_PB_LINE_LF && val <= 255) {
delimiter = (char)val;
goto next_option;
}
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index 74979f984a..074ac6d64e 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -95,7 +95,8 @@
/*
* The following symbols can be manipulated to "tune" the linear hash array
*/
-#define CHAIN_LEN 6 /* Medium bucket chain len */
+#define GROW_LIMIT(NACTIVE) ((NACTIVE)*2)
+#define SHRINK_LIMIT(NACTIVE) ((NACTIVE) / 2)
/* Number of slots per segment */
#define SEGSZ_EXP 8
@@ -463,7 +464,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle);
static ERTS_INLINE void try_shrink(DbTableHash* tb)
{
int nactive = NACTIVE(tb);
- if (nactive > SEGSZ && NITEMS(tb) < (nactive * CHAIN_LEN)
+ if (nactive > SEGSZ && NITEMS(tb) < SHRINK_LIMIT(nactive)
&& !IS_FIXED(tb)) {
shrink(tb, nactive);
}
@@ -670,8 +671,8 @@ int db_create_hash(Process *p, DbTable *tbl)
tb->nsegs = NSEG_1;
tb->nslots = SEGSZ;
- erts_smp_atomic_init_nob(&tb->is_resizing, 0);
#ifdef ERTS_SMP
+ erts_smp_atomic_init_nob(&tb->is_resizing, 0);
if (tb->common.type & DB_FINE_LOCKED) {
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
int i;
@@ -862,7 +863,7 @@ Lnew:
WUNLOCK_HASH(lck);
{
int nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN+1) && !IS_FIXED(tb)) {
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
grow(tb, nactive);
}
}
@@ -2250,12 +2251,12 @@ static int db_free_table_continue_hash(DbTable *tbl)
done /= 2;
while(tb->nslots != 0) {
- free_seg(tb, 1);
+ done += 1 + SEGSZ/64 + free_seg(tb, 1);
/*
* If we have done enough work, get out here.
*/
- if (++done >= (DELETE_RECORD_LIMIT / CHAIN_LEN / SEGSZ)) {
+ if (done >= DELETE_RECORD_LIMIT) {
return 0; /* Not done */
}
}
@@ -2604,23 +2605,22 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
static ERTS_INLINE int
begin_resizing(DbTableHash* tb)
{
+#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
- return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1);
- else {
- if (erts_smp_atomic_read_nob(&tb->is_resizing))
- return 0;
- erts_smp_atomic_set_nob(&tb->is_resizing, 1);
- return 1;
- }
+ return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
+ else
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+#endif
+ return 1;
}
static ERTS_INLINE void
done_resizing(DbTableHash* tb)
{
+#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
- erts_smp_atomic_set_relb(&tb->is_resizing, 0);
- else
- erts_smp_atomic_set_nob(&tb->is_resizing, 0);
+ erts_atomic_set_relb(&tb->is_resizing, 0);
+#endif
}
/* Grow table with one new bucket.
@@ -2871,7 +2871,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems);
int nactive = NACTIVE(tb);
- if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) {
+ if (nitems > GROW_LIMIT(nactive) && !IS_FIXED(tb)) {
grow(tb, nactive);
}
}
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index e654363cd5..081ff8fafc 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -60,8 +60,8 @@ typedef struct db_table_hash {
/* List of slots where elements have been deleted while table was fixed */
erts_smp_atomic_t fixdel; /* (FixedDeletion*) */
erts_smp_atomic_t nactive; /* Number of "active" slots */
- erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
#ifdef ERTS_SMP
+ erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
DbTableHashFineLocks* locks;
#endif
#ifdef VALGRIND
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index d740b2baec..d0d74bbf44 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1183,22 +1183,14 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end,
adjust_size = p->htop - p->heap;
}
- goto done;
}
+ else if (need_after > HEAP_SIZE(p)) {
+ grow_new_heap(p, next_heap_size(p, need_after, 0), objv, nobj);
+ adjust_size = p->htop - p->heap;
+ }
+ /*else: The heap size turned out to be just right. We are done. */
- if (HEAP_SIZE(p) >= need_after) {
- /*
- * The heap size turned out to be just right. We are done.
- */
- goto done;
- }
-
- grow_new_heap(p, next_heap_size(p, need_after, 0), objv, nobj);
- adjust_size = p->htop - p->heap;
-
- done:
ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
- ASSERT(MBUF(p) == NULL);
/* The heap usage during GC should be larger than what we end up
after a GC, even if we grow it. If this assertion is not true
@@ -1591,6 +1583,9 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
HIGH_WATER(p) = HEAP_TOP(p);
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p);
+#endif
remove_message_buffers(p);
if (p->flags & F_ON_HEAP_MSGQ)
@@ -1603,9 +1598,6 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
adjusted = adjust_after_fullsweep(p, need, objv, nobj);
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
ErtsGcQuickSanityCheck(p);
return gc_cost(size_after, adjusted ? size_after : 0);
@@ -2279,10 +2271,7 @@ move_msgq_to_heap(Process *p)
}
else {
- if (mp->data.attached == ERTS_MSG_COMBINED_HFRAG)
- bp = &mp->hfrag;
- else
- bp = mp->data.heap_frag;
+ bp = erts_message_to_heap_frag(mp);
if (bp->next)
erts_move_multi_frags(&factory.hp, factory.off_heap, bp,
@@ -2296,18 +2285,13 @@ move_msgq_to_heap(Process *p)
free_message_buffer(bp);
}
else {
- ErtsMessage *tmp = erts_alloc_message(0, NULL);
- sys_memcpy((void *) tmp->m, (void *) mp->m,
- sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
- tmp->next = mp->next;
- if (p->msg.save == &mp->next)
- p->msg.save = &tmp->next;
- if (p->msg.last == &mp->next)
- p->msg.last = &tmp->next;
- *mpp = tmp;
+ ErtsMessage *new_mp = erts_alloc_message(0, NULL);
+ sys_memcpy((void *) new_mp->m, (void *) mp->m,
+ sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
+ erts_msgq_replace_msg_ref(&p->msg, new_mp, mpp);
mp->next = NULL;
erts_cleanup_messages(mp);
- mp = tmp;
+ mp = new_mp;
}
}
@@ -3304,11 +3288,7 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
while (mp) {
- if (mp->data.attached == ERTS_MSG_COMBINED_HFRAG)
- bp = &mp->hfrag;
- else
- bp = mp->data.heap_frag;
-
+ bp = erts_message_to_heap_frag(mp);
mp = mp->next;
search_heap_frags:
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 0649fb68de..fbdafec4ef 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -585,7 +585,7 @@ void erts_usage(void)
erts_fprintf(stderr, "-hpds size initial process dictionary size (default %d)\n",
erts_pd_initial_size);
erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n");
- erts_fprintf(stderr, " valid values are: off_heap | on_heap | mixed\n");
+ erts_fprintf(stderr, " valid values are: off_heap | on_heap\n");
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
@@ -1526,9 +1526,7 @@ erl_start(int argc, char **argv)
erts_pd_initial_size));
} else if (has_prefix("mqd", sub_param)) {
arg = get_arg(sub_param+3, argv[i+1], &i);
- if (sys_strcmp(arg, "mixed") == 0)
- erts_default_spo_flags &= ~(SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ);
- else if (sys_strcmp(arg, "on_heap") == 0) {
+ if (sys_strcmp(arg, "on_heap") == 0) {
erts_default_spo_flags &= ~SPO_OFF_HEAP_MSGQ;
erts_default_spo_flags |= SPO_ON_HEAP_MSGQ;
}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 579f6e427d..71ab92937d 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -1123,11 +1123,9 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
break;
case am_on_heap:
c_p->flags |= F_ON_HEAP_MSGQ;
+ c_p->flags &= ~F_OFF_HEAP_MSGQ;
erts_smp_atomic32_read_bor_nob(&c_p->state,
ERTS_PSFLG_ON_HEAP_MSGQ);
- /* fall through */
- case am_mixed:
- c_p->flags &= ~F_OFF_HEAP_MSGQ;
/*
* We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ
* if a off heap change is ongoing. It will be adjusted
@@ -1151,11 +1149,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
switch (new_state) {
case am_on_heap:
break;
- case am_mixed:
- c_p->flags &= ~F_ON_HEAP_MSGQ;
- erts_smp_atomic32_read_band_nob(&c_p->state,
- ~ERTS_PSFLG_ON_HEAP_MSGQ);
- break;
case am_off_heap:
c_p->flags &= ~F_ON_HEAP_MSGQ;
erts_smp_atomic32_read_band_nob(&c_p->state,
@@ -1167,25 +1160,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
}
break;
- case 0:
- res = am_mixed;
-
- switch (new_state) {
- case am_mixed:
- break;
- case am_on_heap:
- c_p->flags |= F_ON_HEAP_MSGQ;
- erts_smp_atomic32_read_bor_nob(&c_p->state,
- ERTS_PSFLG_ON_HEAP_MSGQ);
- break;
- case am_off_heap:
- goto change_to_off_heap;
- default:
- res = THE_NON_VALUE; /* badarg */
- break;
- }
- break;
-
default:
res = am_error;
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
@@ -1371,10 +1345,10 @@ erts_prep_msgq_for_inspection(Process *c_p, Process *rp,
mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next;
- if (rp->msg.save == &bad_mp->next)
- rp->msg.save = mpp;
- if (rp->msg.last == &bad_mp->next)
- rp->msg.last = mpp;
+ ASSERT(*mpp == bad_mp);
+
+ erts_msgq_update_internal_pointers(&rp->msg, mpp, &bad_mp->next);
+
mp = mp->next;
*mpp = mp;
rp->msg.len--;
@@ -1411,12 +1385,7 @@ erts_prep_msgq_for_inspection(Process *c_p, Process *rp,
sys_memcpy((void *) tmp->m, (void *) mp->m,
sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next;
- tmp->next = mp->next;
- if (rp->msg.save == &mp->next)
- rp->msg.save = &tmp->next;
- if (rp->msg.last == &mp->next)
- rp->msg.last = &tmp->next;
- *mpp = tmp;
+ erts_msgq_replace_msg_ref(&rp->msg, tmp, mpp);
erts_save_message_in_proc(rp, mp);
mp = tmp;
}
@@ -1756,7 +1725,7 @@ void erts_factory_trim_and_close(ErtsHeapFactory* factory,
case FACTORY_MESSAGE: {
ErtsMessage *mp = factory->message;
if (mp->data.attached == ERTS_MSG_COMBINED_HFRAG) {
- if (!mp->hfrag.next) {
+ if (!factory->heap_frags) {
Uint sz = factory->hp - factory->hp_start;
mp = erts_shrink_message(mp, sz, brefs, brefs_size);
factory->message = mp;
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 4493df1c1a..6df969367b 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -366,9 +366,19 @@ ERTS_GLB_FORCE_INLINE ErtsMessage *erts_shrink_message(ErtsMessage *mp, Uint sz,
ERTS_GLB_FORCE_INLINE void erts_free_message(ErtsMessage *mp);
ERTS_GLB_INLINE Uint erts_used_frag_sz(const ErlHeapFragment*);
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg);
+ERTS_GLB_INLINE void erts_msgq_update_internal_pointers(ErlMessageQueue *msgq,
+ ErtsMessage **newpp,
+ ErtsMessage **oldpp);
+ERTS_GLB_INLINE void erts_msgq_replace_msg_ref(ErlMessageQueue *msgq,
+ ErtsMessage *newp,
+ ErtsMessage **oldpp);
#define ERTS_MSG_COMBINED_HFRAG ((void *) 0x1)
+#define erts_message_to_heap_frag(MP) \
+ (((MP)->data.attached == ERTS_MSG_COMBINED_HFRAG) ? \
+ &(MP)->hfrag : (MP)->data.heap_frag)
+
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_FORCE_INLINE ErtsMessage *erts_alloc_message(Uint sz, Eterm **hpp)
@@ -449,10 +459,7 @@ ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg)
ASSERT(msg->data.attached);
if (is_value(ERL_MESSAGE_TERM(msg))) {
ErlHeapFragment *bp;
- if (msg->data.attached == ERTS_MSG_COMBINED_HFRAG)
- bp = &msg->hfrag;
- else
- bp = msg->data.heap_frag;
+ bp = erts_message_to_heap_frag(msg);
return erts_used_frag_sz(bp);
}
else if (msg->data.dist_ext->heap_size < 0)
@@ -467,6 +474,29 @@ ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg)
return sz;
}
}
+
+ERTS_GLB_INLINE void
+erts_msgq_update_internal_pointers(ErlMessageQueue *msgq,
+ ErtsMessage **newpp,
+ ErtsMessage **oldpp)
+{
+ if (msgq->save == oldpp)
+ msgq->save = newpp;
+ if (msgq->last == oldpp)
+ msgq->last = newpp;
+ if (msgq->saved_last == oldpp)
+ msgq->saved_last = newpp;
+}
+
+ERTS_GLB_INLINE void
+erts_msgq_replace_msg_ref(ErlMessageQueue *msgq, ErtsMessage *newp, ErtsMessage **oldpp)
+{
+ ErtsMessage *oldp = *oldpp;
+ newp->next = oldp->next;
+ erts_msgq_update_internal_pointers(msgq, &newp->next, &oldp->next);
+ *oldpp = newp;
+}
+
#endif
#endif
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 606b73c7b5..039f97ef43 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -178,9 +178,6 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, size_t may_need)
void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
Process* tracee)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsSchedulerData *esdp;
-#endif
env->mod_nif = mod_nif;
env->proc = p;
env->hp = HEAP_TOP(p);
@@ -193,57 +190,65 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
ASSERT(p->common.id != ERTS_INVALID_PID);
-#ifdef ERTS_DIRTY_SCHEDULERS
- esdp = erts_get_scheduler_data();
- ASSERT(esdp);
+#if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS)
+ {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
-#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
- ASSERT(p->scheduler_data == esdp);
- ASSERT((state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS))
- && !(state & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ ASSERT(p->scheduler_data == esdp);
+ ASSERT((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))
+ && !(state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ }
+ }
#endif
+}
- }
- else {
- Process *sproc;
+void erts_pre_dirty_nif(ErtsSchedulerData *esdp,
+ ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
+ Process* tracee)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Process *sproc;
#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
- ASSERT(!p->scheduler_data);
- ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
- && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(!p->scheduler_data);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
#endif
- sproc = esdp->dirty_shadow_process;
- ASSERT(sproc);
- ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
- ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
- == (ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_PROXY));
-
- sproc->next = p;
- sproc->common.id = p->common.id;
- sproc->htop = p->htop;
- sproc->stop = p->stop;
- sproc->hend = p->hend;
- sproc->heap = p->heap;
- sproc->abandoned_heap = p->abandoned_heap;
- sproc->heap_sz = p->heap_sz;
- sproc->high_water = p->high_water;
- sproc->old_hend = p->old_hend;
- sproc->old_htop = p->old_htop;
- sproc->old_heap = p->old_heap;
- sproc->mbuf = NULL;
- sproc->mbuf_sz = 0;
- ERTS_INIT_OFF_HEAP(&sproc->off_heap);
- env->proc = sproc;
- }
+ erts_pre_nif(env, p, mod_nif, tracee);
+
+ sproc = esdp->dirty_shadow_process;
+ ASSERT(sproc);
+ ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
+ == (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+
+ sproc->next = p;
+ sproc->common.id = p->common.id;
+ sproc->htop = p->htop;
+ sproc->stop = p->stop;
+ sproc->hend = p->hend;
+ sproc->heap = p->heap;
+ sproc->abandoned_heap = p->abandoned_heap;
+ sproc->heap_sz = p->heap_sz;
+ sproc->high_water = p->high_water;
+ sproc->old_hend = p->old_hend;
+ sproc->old_htop = p->old_htop;
+ sproc->old_heap = p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+ env->proc = sproc;
#endif
}
@@ -623,10 +628,28 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
}
} else {
Uint sz = size_object(msg);
+ ErlOffHeap *ohp;
Eterm *hp;
- mp = erts_alloc_message(sz, &hp);
- msg = copy_struct(msg, sz, &hp, &mp->hfrag.off_heap);
- ASSERT(hp == mp->hfrag.mem+mp->hfrag.used_size);
+ if (env && !env->tracee) {
+ flush_env(env);
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ cache_env(env);
+ }
+ else {
+ erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state);
+ if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
+ mp = erts_alloc_message(sz, &hp);
+ ohp = sz == 0 ? NULL : &mp->hfrag.off_heap;
+ }
+ else {
+ ErlHeapFragment *bp = new_message_buffer(sz);
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = bp;
+ hp = bp->mem;
+ ohp = &bp->off_heap;
+ }
+ }
+ msg = copy_struct(msg, sz, &hp, ohp);
}
ERL_MESSAGE_TERM(mp) = msg;
@@ -780,19 +803,13 @@ ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
{
Uint sz;
Eterm* hp;
-#ifdef SHCOPY
- erts_shcopy_t info;
- INITIALIZE_SHCOPY(info);
- sz = copy_shared_calculate(src_term, &info);
- hp = alloc_heap(dst_env, sz);
- src_term = copy_shared_perform(src_term, sz, &info, &hp, &MSO(dst_env->proc));
- DESTROY_SHCOPY(info);
- return src_term;
-#else
+ /*
+ * No preserved sharing allowed as long as literals are also preserved.
+ * Process independent environment can not be reached by purge.
+ */
sz = size_object(src_term);
hp = alloc_heap(dst_env, sz);
return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc));
-#endif
}
@@ -2641,18 +2658,21 @@ done:
}
int
-enif_is_on_dirty_scheduler(ErlNifEnv* env)
+enif_thread_type(void)
{
- int scheduler;
- Process *c_p;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
- execution_state(env, &c_p, &scheduler);
+ if (!esdp)
+ return ERL_NIF_THR_UNDEFINED;
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp))
+ return ERL_NIF_THR_NORMAL_SCHEDULER;
- if (!c_p || !scheduler)
- erts_exit(ERTS_ABORT_EXIT, "enif_is_on_dirty_scheduler: "
- "Invalid env");
+ if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp))
+ return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
- return scheduler < 0;
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(esdp));
+ return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
}
/* Maps */
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index da7a754757..494971e118 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -209,6 +209,17 @@ typedef enum {
ERL_NIF_BIN2TERM_SAFE = 0x20000000
} ErlNifBinaryToTerm;
+/*
+ * Return values from enif_thread_type(). Negative values
+ * reserved for specific types of non-scheduler threads.
+ * Positive values reserved for scheduler thread types.
+ */
+
+#define ERL_NIF_THR_UNDEFINED 0
+#define ERL_NIF_THR_NORMAL_SCHEDULER 1
+#define ERL_NIF_THR_DIRTY_CPU_SCHEDULER 2
+#define ERL_NIF_THR_DIRTY_IO_SCHEDULER 3
+
#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
# define ERL_NIF_API_FUNC_DECL(RET_TYPE, NAME, ARGS) RET_TYPE (*NAME) ARGS
typedef struct {
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index b211ab4b16..9a8f216773 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -173,7 +173,7 @@ ERL_NIF_API_FUNC_DECL(int, enif_get_local_port, (ErlNifEnv* env, ERL_NIF_TERM, E
ERL_NIF_API_FUNC_DECL(int, enif_term_to_binary, (ErlNifEnv *env, ERL_NIF_TERM term, ErlNifBinary *bin));
ERL_NIF_API_FUNC_DECL(size_t, enif_binary_to_term, (ErlNifEnv *env, const unsigned char* data, size_t sz, ERL_NIF_TERM *term, unsigned int opts));
ERL_NIF_API_FUNC_DECL(int, enif_port_command, (ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg));
-ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
+ERL_NIF_API_FUNC_DECL(int,enif_thread_type,(void));
ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char *format, ...));
/*
@@ -330,7 +330,7 @@ ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char
# define enif_term_to_binary ERL_NIF_API_FUNC_MACRO(enif_term_to_binary)
# define enif_binary_to_term ERL_NIF_API_FUNC_MACRO(enif_binary_to_term)
# define enif_port_command ERL_NIF_API_FUNC_MACRO(enif_port_command)
-# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
+# define enif_thread_type ERL_NIF_API_FUNC_MACRO(enif_thread_type)
# define enif_snprintf ERL_NIF_API_FUNC_MACRO(enif_snprintf)
/*
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index f0075ca2b9..f90844ccc8 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -361,6 +361,8 @@ Eterm erts_request_io_bytes(Process *c_p);
#define ERTS_PORT_REDS_CONNECT (CONTEXT_REDS/200)
#define ERTS_PORT_REDS_UNLINK (CONTEXT_REDS/200)
#define ERTS_PORT_REDS_LINK (CONTEXT_REDS/200)
+#define ERTS_PORT_REDS_MONITOR (CONTEXT_REDS/200)
+#define ERTS_PORT_REDS_DEMONITOR (CONTEXT_REDS/200)
#define ERTS_PORT_REDS_BADSIG (CONTEXT_REDS/200)
#define ERTS_PORT_REDS_CONTROL (CONTEXT_REDS/100)
#define ERTS_PORT_REDS_CALL (CONTEXT_REDS/50)
@@ -850,16 +852,20 @@ void erts_port_resume_procs(Port *);
struct binary;
-#define ERTS_P2P_SIG_TYPE_BAD 0
-#define ERTS_P2P_SIG_TYPE_OUTPUT 1
-#define ERTS_P2P_SIG_TYPE_OUTPUTV 2
-#define ERTS_P2P_SIG_TYPE_CONNECT 3
-#define ERTS_P2P_SIG_TYPE_EXIT 4
-#define ERTS_P2P_SIG_TYPE_CONTROL 5
-#define ERTS_P2P_SIG_TYPE_CALL 6
-#define ERTS_P2P_SIG_TYPE_INFO 7
-#define ERTS_P2P_SIG_TYPE_LINK 8
-#define ERTS_P2P_SIG_TYPE_UNLINK 9
+enum {
+ ERTS_P2P_SIG_TYPE_BAD = 0,
+ ERTS_P2P_SIG_TYPE_OUTPUT = 1,
+ ERTS_P2P_SIG_TYPE_OUTPUTV = 2,
+ ERTS_P2P_SIG_TYPE_CONNECT = 3,
+ ERTS_P2P_SIG_TYPE_EXIT = 4,
+ ERTS_P2P_SIG_TYPE_CONTROL = 5,
+ ERTS_P2P_SIG_TYPE_CALL = 6,
+ ERTS_P2P_SIG_TYPE_INFO = 7,
+ ERTS_P2P_SIG_TYPE_LINK = 8,
+ ERTS_P2P_SIG_TYPE_UNLINK = 9,
+ ERTS_P2P_SIG_TYPE_MONITOR = 10,
+ ERTS_P2P_SIG_TYPE_DEMONITOR = 11
+};
#define ERTS_P2P_SIG_TYPE_BITS 4
#define ERTS_P2P_SIG_TYPE_MASK \
@@ -921,6 +927,15 @@ struct ErtsProc2PortSigData_ {
struct {
Eterm from;
} unlink;
+ struct {
+ Eterm origin; /* who receives monitor event, pid */
+ Eterm name; /* either name for named monitor, or port id */
+ } monitor;
+ struct {
+ Eterm origin; /* who is at the other end of the monitor, pid */
+ Eterm name; /* port id */
+ Uint32 ref[ERTS_MAX_REF_NUMBERS]; /* box contents of a ref */
+ } demonitor;
} u;
} ;
@@ -1017,6 +1032,29 @@ ErtsPortOpResult erts_port_control(Process *, Port *, unsigned int, Eterm, Eterm
ErtsPortOpResult erts_port_call(Process *, Port *, unsigned int, Eterm, Eterm *);
ErtsPortOpResult erts_port_info(Process *, Port *, Eterm, Eterm *);
+/* Creates monitor between Origin and Target. Ref must be initialized to
+ * a reference (ref may be rewritten to be used to serve additionally as a
+ * signal id). Name is atom if user monitors port by name or NIL */
+ErtsPortOpResult erts_port_monitor(Process *origin, Port *target, Eterm name,
+ Eterm *ref);
+
+typedef enum {
+ /* Normal demonitor rules apply with locking and reductions bump */
+ ERTS_PORT_DEMONITOR_NORMAL = 1,
+ /* Relaxed demonitor rules when process is about to die, which means that
+ * pid lookup won't work, locks won't work, no reductions bump. */
+ ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED = 2,
+} ErtsDemonitorMode;
+
+/* Removes monitor between origin and target, identified by ref.
+ * origin_is_dying can be 0 (false, normal locking rules and reductions bump
+ * apply) or 1 (true, in case when we avoid origin locking) */
+ErtsPortOpResult erts_port_demonitor(Process *origin, ErtsDemonitorMode mode,
+ Port *target, Eterm ref,
+ Eterm *trap_ref);
+/* defined in erl_bif_port.c */
+Port *erts_sig_lookup_port(Process *c_p, Eterm id_or_name);
+
int erts_port_output_async(Port *, Eterm, Eterm);
/*
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index a853ec585b..66f22979ad 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -149,7 +149,7 @@ extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = 0;
+int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = SPO_ON_HEAP_MSGQ;
int ERTS_WRITE_UNLIKELY(erts_eager_check_io) = 1;
int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0;
@@ -3509,7 +3509,7 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one)
#endif
#define ERTS_NO_USED_RUNQS_SHIFT 16
-#define ERTS_NO_RUNQS_MASK 0xffff
+#define ERTS_NO_RUNQS_MASK 0xffffU
#if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_NO_RUNQS_MASK
# error "Too large amount of schedulers allowed"
@@ -8197,7 +8197,7 @@ sched_dirty_cpu_thread_func(void *vesdp)
#endif
erts_thread_init_float();
- process_main();
+ erts_dirty_process_main(esdp);
/* No schedulers should *ever* terminate */
erts_exit(ERTS_ABORT_EXIT,
"Dirty CPU scheduler thread number %beu terminated\n",
@@ -8242,7 +8242,7 @@ sched_dirty_io_thread_func(void *vesdp)
#endif
erts_thread_init_float();
- process_main();
+ erts_dirty_process_main(esdp);
/* No schedulers should *ever* terminate */
erts_exit(ERTS_ABORT_EXIT,
"Dirty I/O scheduler thread number %beu terminated\n",
@@ -9377,77 +9377,6 @@ scheduler_gc_proc(Process *c_p, int reds_left)
return reds;
}
-static ERTS_INLINE void
-clean_dirty_start(Process *p)
-{
-#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ARCH_64)
- void *ptr = ERTS_PROC_SET_DIRTY_CPU_START(p, NULL);
- if (ptr)
- erts_free(ERTS_ALC_T_DIRTY_START, ptr);
-#endif
-}
-
-static ERTS_INLINE void
-save_dirty_start(ErtsSchedulerData *esdp, Process *c_p)
-{
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
- ErtsMonotonicTime time = erts_get_monotonic_time(esdp);
-#ifdef ARCH_64
- ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) time);
-#else
- ErtsMonotonicTime *stimep;
-
- stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
- if (!stimep) {
- stimep = erts_alloc(ERTS_ALC_T_DIRTY_START,
- sizeof(ErtsMonotonicTime));
- ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) stimep);
- }
- *stimep = time;
-#endif
- }
-#endif
-}
-
-static ERTS_INLINE int
-get_dirty_reds(ErtsSchedulerData *esdp, Process *c_p)
-{
-
-#ifndef ERTS_DIRTY_SCHEDULERS
- return -1;
-#else
- ErtsMonotonicTime stime, time;
-
- if (!ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue))
- return 1;
-
-#ifdef ARCH_64
- stime = (ErtsMonotonicTime) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
-#else
- {
- ErtsMonotonicTime *stimep;
- stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
- ASSERT(stimep);
- stime = *stimep;
- }
-#endif
-
- time = erts_get_monotonic_time(esdp);
-
- ASSERT(stime && stime < time);
-
- time -= stime;
- time = ERTS_MONOTONIC_TO_USEC(time);
- time *= 2;
-
- if (time > INT_MAX)
- return INT_MAX;
- return (int) time;
-#endif
-
-}
-
/*
* schedule() is called from BEAM (process_main()) or HiPE
* (hipe_mode_switch()) when the current process is to be
@@ -9466,11 +9395,10 @@ get_dirty_reds(ErtsSchedulerData *esdp, Process *c_p)
* so that normal processes get to run more frequently.
*/
-Process *schedule(Process *p, int calls)
+Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
{
Process *proxy_p = NULL;
ErtsRunQueue *rq;
- ErtsSchedulerData *esdp;
int context_reds;
int fcalls;
int input_reductions;
@@ -9507,8 +9435,19 @@ Process *schedule(Process *p, int calls)
* Clean up after the process being scheduled out.
*/
if (!p) { /* NULL in the very first schedule() call */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ is_normal_sched = !esdp;
+ if (is_normal_sched) {
+ esdp = erts_get_scheduler_data();
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+ else {
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+#else
esdp = erts_get_scheduler_data();
- is_normal_sched = !ERTS_SCHEDULER_IS_DIRTY(esdp);
+ is_normal_sched = 1;
+#endif
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls);
@@ -9517,12 +9456,12 @@ Process *schedule(Process *p, int calls)
} else {
#ifdef ERTS_SMP
#ifdef ERTS_DIRTY_SCHEDULERS
- esdp = p->scheduler_data;
- is_normal_sched = esdp != NULL;
- if (is_normal_sched)
+ is_normal_sched = !esdp;
+ if (is_normal_sched) {
+ esdp = p->scheduler_data;
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
else {
- esdp = erts_get_scheduler_data();
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
#else
@@ -9541,10 +9480,7 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- if (is_normal_sched)
- reds = actual_reds = calls - esdp->virtual_reds;
- else
- reds = actual_reds = get_dirty_reds(esdp, p);
+ reds = actual_reds = calls - esdp->virtual_reds;
ASSERT(actual_reds >= 0);
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
@@ -9994,17 +9930,10 @@ Process *schedule(Process *p, int calls)
calls = 0;
reds = context_reds;
-#ifdef ERTS_SMP
-
erts_smp_runq_unlock(rq);
-#endif /* ERTS_SMP */
-
}
- if (!is_normal_sched)
- save_dirty_start(esdp, p);
-
#ifdef ERTS_SMP
if (flags & ERTS_RUNQ_FLG_PROTECTED)
@@ -10153,7 +10082,7 @@ Process *schedule(Process *p, int calls)
}
}
- if (ERTS_IS_GC_DESIRED(p)) {
+ if (ERTS_IS_GC_DESIRED(p) && !ERTS_SCHEDULER_IS_DIRTY_IO(esdp)) {
if (!(state & ERTS_PSFLG_EXITING) && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
int cost = scheduler_gc_proc(p, reds);
calls += cost;
@@ -11206,6 +11135,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
flags |= F_ON_HEAP_MSGQ;
}
+ ASSERT((flags & F_ON_HEAP_MSGQ) || (flags & F_OFF_HEAP_MSGQ));
+
if (!rq)
rq = erts_get_runq_proc(parent);
@@ -11218,6 +11149,11 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
goto error;
}
+ ASSERT((erts_smp_atomic32_read_nob(&p->state)
+ & ERTS_PSFLG_ON_HEAP_MSGQ)
+ || (erts_smp_atomic32_read_nob(&p->state)
+ & ERTS_PSFLG_OFF_HEAP_MSGQ));
+
#ifdef BM_COUNTERS
processes_busy++;
#endif
@@ -11738,8 +11674,6 @@ delete_process(Process* p)
if (nif_export)
erts_destroy_nif_export(nif_export);
- clean_dirty_start(p);
-
/* Cleanup psd */
psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
@@ -12305,7 +12239,6 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ExitMonitorContext *pcontext = vpcontext;
DistEntry *dep;
ErtsMonitor *rmon;
- Process *rp;
switch (mon->type) {
case MON_ORIGIN:
@@ -12334,9 +12267,10 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
erts_deref_dist_entry(dep);
}
} else {
- ASSERT(is_pid(mon->pid));
- if (is_internal_pid(mon->pid)) { /* local by pid or name */
- rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ ASSERT(is_pid(mon->pid) || is_port(mon->pid));
+ /* if is local by pid or name */
+ if (is_internal_pid(mon->pid)) {
+ Process *rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
if (!rp) {
goto done;
}
@@ -12346,7 +12280,17 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
goto done;
}
erts_destroy_monitor(rmon);
- } else { /* remote by pid */
+ } else if (is_internal_port(mon->pid)) {
+ /* Is a local port */
+ Port *prt = erts_port_lookup_raw(mon->pid);
+ if (!prt) {
+ goto done;
+ }
+ erts_port_demonitor(pcontext->p,
+ ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED,
+ prt, mon->ref, NULL);
+ return; /* let erts_port_demonitor do the deletion */
+ } else { /* remote by pid */
ASSERT(is_external_pid(mon->pid));
dep = external_pid_dist_entry(mon->pid);
ASSERT(dep != NULL);
@@ -12384,6 +12328,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
erts_port_release(prt);
} else if (is_internal_pid(mon->pid)) {/* local by name or pid */
Eterm watched;
+ Process *rp;
DeclareTmpHeapNoproc(lhp,3);
ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
| ERTS_PROC_LOCKS_MSG_SEND);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index b44ac442aa..7c98b60647 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -810,25 +810,13 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_DELAYED_GC_TASK_QS 4
#define ERTS_PSD_NIF_TRAP_EXPORT 5
#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6
-#define ERTS_PSD_DIRTY_CPU_START 7
-#define ERTS_PSD_SIZE 8
+#define ERTS_PSD_SIZE 7
-#if !defined(HIPE) && !defined(ERTS_DIRTY_SCHEDULERS)
+#if !defined(HIPE)
# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
-# undef ERTS_PSD_DIRTY_CPU_START
# undef ERTS_PSD_SIZE
# define ERTS_PSD_SIZE 6
-#elif !defined(HIPE)
-# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
-# undef ERTS_PSD_DIRTY_CPU_START
-# undef ERTS_PSD_SIZE
-# define ERTS_PSD_DIRTY_CPU_START 6
-# define ERTS_PSD_SIZE 7
-#elif !defined(ERTS_DIRTY_SCHEDULERS)
-# undef ERTS_PSD_DIRTY_CPU_START
-# undef ERTS_PSD_SIZE
-# define ERTS_PSD_SIZE 7
#endif
typedef struct {
@@ -1179,6 +1167,9 @@ void erts_check_for_holes(Process* p);
* USR_PRIO -> User prio. i.e., prio the user has set.
* PRQ_PRIO -> Prio queue prio, i.e., prio queue currently
* enqueued in.
+ *
+ * Update etp-proc-state-int in $ERL_TOP/erts/etc/unix/etp-commands.in
+ * when changing ERTS_PSFLG_*.
*/
#define ERTS_PSFLGS_ACT_PRIO_MASK \
(ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_ACT_PRIO_OFFSET)
@@ -1831,7 +1822,7 @@ Eterm erts_get_schedulers_binds(Process *c_p);
Eterm erts_set_cpu_topology(Process *c_p, Eterm term);
Eterm erts_bind_schedulers(Process *c_p, Eterm how);
ErtsRunQueue *erts_schedid2runq(Uint);
-Process *schedule(Process*, int);
+Process *erts_schedule(ErtsSchedulerData *, Process*, int);
void erts_schedule_misc_op(void (*)(void *), void *);
Eterm erl_create_process(Process*, Eterm, Eterm, Eterm, ErlSpawnOpts*);
void erts_do_exit_process(Process*, Eterm);
@@ -2061,13 +2052,6 @@ erts_psd_set(Process *p, int ix, void *data)
((struct saved_calls *) erts_psd_set((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF, (void *) (SCB)))
#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PROC_GET_DIRTY_CPU_START(P) \
- ((void *) erts_psd_get((P), ERTS_PSD_DIRTY_CPU_START))
-#define ERTS_PROC_SET_DIRTY_CPU_START(P, DCS) \
- ((void *) erts_psd_set((P), ERTS_PSD_DIRTY_CPU_START, (void *) (DCS)))
-#endif
-
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, Eterm handler);
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index eeaa9a569c..a70dfb8e73 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -560,6 +560,11 @@ dump_externally(int to, void *to_arg, Eterm term)
}
}
+ /* Do not handle maps */
+ if (is_map(term)) {
+ term = am_undefined;
+ }
+
s = p = sbuf;
erts_encode_ext(term, &p);
erts_print(to, to_arg, "E%X:", p-s);
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index a5931ffc25..fecfd96ab0 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -168,7 +168,7 @@ typedef struct {
#define ERTS_PTAB_INVALID_ID(TAG) \
((Eterm) \
- ((((1 << ERTS_PTAB_ID_DATA_SIZE) - 1) << ERTS_PTAB_ID_DATA_SHIFT) \
+ ((((1U << ERTS_PTAB_ID_DATA_SIZE) - 1) << ERTS_PTAB_ID_DATA_SHIFT) \
| (TAG)))
#define erts_ptab_is_valid_id(ID) \
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 542541165b..21938e7684 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -95,9 +95,9 @@
#define ERTS_THR_PRGR_FTL_ERR_BLCK_POLL_INTERVAL 100
-#define ERTS_THR_PRGR_LFLG_BLOCK (((erts_aint32_t) 1) << 31)
-#define ERTS_THR_PRGR_LFLG_NO_LEADER (((erts_aint32_t) 1) << 30)
-#define ERTS_THR_PRGR_LFLG_WAITING_UM (((erts_aint32_t) 1) << 29)
+#define ERTS_THR_PRGR_LFLG_BLOCK ((erts_aint32_t) (1U << 31))
+#define ERTS_THR_PRGR_LFLG_NO_LEADER ((erts_aint32_t) (1U << 30))
+#define ERTS_THR_PRGR_LFLG_WAITING_UM ((erts_aint32_t) (1U << 29))
#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
| ERTS_THR_PRGR_LFLG_BLOCK \
| ERTS_THR_PRGR_LFLG_WAITING_UM))
@@ -142,8 +142,8 @@ init_nob(ERTS_THR_PRGR_ATOMIC *atmc, ErtsThrPrgrVal val)
#warning "Thread progress state debug is on"
#endif
-#define ERTS_THR_PROGRESS_STATE_DEBUG_LEADER (((erts_aint32_t) 1) << 0)
-#define ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE (((erts_aint32_t) 1) << 1)
+#define ERTS_THR_PROGRESS_STATE_DEBUG_LEADER ((erts_aint32_t) (1U << 0))
+#define ERTS_THR_PROGRESS_STATE_DEBUG_ACTIVE ((erts_aint32_t) (1U << 1))
#define ERTS_THR_PROGRESS_STATE_DEBUG_INIT(ID) \
erts_atomic32_init_nob(&intrnl->thr[(ID)].data.state_debug, \
@@ -179,10 +179,10 @@ do { \
#endif /* ERTS_THR_PROGRESS_STATE_DEBUG */
-#define ERTS_THR_PRGR_BLCKR_INVALID (~((erts_aint32_t) 0))
-#define ERTS_THR_PRGR_BLCKR_UNMANAGED (((erts_aint32_t) 1) << 31)
+#define ERTS_THR_PRGR_BLCKR_INVALID ((erts_aint32_t) (~0U))
+#define ERTS_THR_PRGR_BLCKR_UNMANAGED ((erts_aint32_t) (1U << 31))
-#define ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING (((erts_aint32_t) 1) << 31)
+#define ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING ((erts_aint32_t) (1U << 31))
#define ERTS_THR_PRGR_BM_BITS 32
#define ERTS_THR_PRGR_BM_SHIFT 5
@@ -1186,7 +1186,7 @@ wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
int hbase = hix << ERTS_THR_PRGR_BM_SHIFT;
int hbit;
for (hbit = 0; hbit < ERTS_THR_PRGR_BM_BITS; hbit++) {
- if (hmask & (1 << hbit)) {
+ if (hmask & (1U << hbit)) {
erts_aint_t lmask;
int lix = hbase + hbit;
ASSERT(0 <= lix && lix < umwd->low_sz);
@@ -1195,7 +1195,7 @@ wakeup_unmanaged_threads(ErtsThrPrgrUnmanagedWakeupData *umwd)
int lbase = lix << ERTS_THR_PRGR_BM_SHIFT;
int lbit;
for (lbit = 0; lbit < ERTS_THR_PRGR_BM_BITS; lbit++) {
- if (lmask & (1 << lbit)) {
+ if (lmask & (1U << lbit)) {
int id = lbase + lbit;
wakeup_unmanaged(id);
}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ca001fc156..3dca58d60b 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2625,7 +2625,7 @@ static void init_tracer_template(ErtsTracerNif *tnif) {
/* default tracer functions */
tnif->tracers[TRACE_FUN_DEFAULT].name = "trace";
- tnif->tracers[TRACE_FUN_DEFAULT].arity = 6;
+ tnif->tracers[TRACE_FUN_DEFAULT].arity = 5;
tnif->tracers[TRACE_FUN_DEFAULT].cb = NULL;
tnif->tracers[TRACE_FUN_ENABLED].name = "enabled";
@@ -2634,35 +2634,35 @@ static void init_tracer_template(ErtsTracerNif *tnif) {
/* specific tracer functions */
tnif->tracers[TRACE_FUN_T_SEND].name = "trace_send";
- tnif->tracers[TRACE_FUN_T_SEND].arity = 6;
+ tnif->tracers[TRACE_FUN_T_SEND].arity = 5;
tnif->tracers[TRACE_FUN_T_SEND].cb = NULL;
tnif->tracers[TRACE_FUN_T_RECEIVE].name = "trace_receive";
- tnif->tracers[TRACE_FUN_T_RECEIVE].arity = 6;
+ tnif->tracers[TRACE_FUN_T_RECEIVE].arity = 5;
tnif->tracers[TRACE_FUN_T_RECEIVE].cb = NULL;
tnif->tracers[TRACE_FUN_T_CALL].name = "trace_call";
- tnif->tracers[TRACE_FUN_T_CALL].arity = 6;
+ tnif->tracers[TRACE_FUN_T_CALL].arity = 5;
tnif->tracers[TRACE_FUN_T_CALL].cb = NULL;
tnif->tracers[TRACE_FUN_T_SCHED_PROC].name = "trace_running_procs";
- tnif->tracers[TRACE_FUN_T_SCHED_PROC].arity = 6;
+ tnif->tracers[TRACE_FUN_T_SCHED_PROC].arity = 5;
tnif->tracers[TRACE_FUN_T_SCHED_PROC].cb = NULL;
tnif->tracers[TRACE_FUN_T_SCHED_PORT].name = "trace_running_ports";
- tnif->tracers[TRACE_FUN_T_SCHED_PORT].arity = 6;
+ tnif->tracers[TRACE_FUN_T_SCHED_PORT].arity = 5;
tnif->tracers[TRACE_FUN_T_SCHED_PORT].cb = NULL;
tnif->tracers[TRACE_FUN_T_GC].name = "trace_garbage_collection";
- tnif->tracers[TRACE_FUN_T_GC].arity = 6;
+ tnif->tracers[TRACE_FUN_T_GC].arity = 5;
tnif->tracers[TRACE_FUN_T_GC].cb = NULL;
tnif->tracers[TRACE_FUN_T_PROCS].name = "trace_procs";
- tnif->tracers[TRACE_FUN_T_PROCS].arity = 6;
+ tnif->tracers[TRACE_FUN_T_PROCS].arity = 5;
tnif->tracers[TRACE_FUN_T_PROCS].cb = NULL;
tnif->tracers[TRACE_FUN_T_PORTS].name = "trace_ports";
- tnif->tracers[TRACE_FUN_T_PORTS].arity = 6;
+ tnif->tracers[TRACE_FUN_T_PORTS].arity = 5;
tnif->tracers[TRACE_FUN_T_PORTS].cb = NULL;
/* specific enabled functions */
@@ -2834,10 +2834,12 @@ send_to_tracer_nif_raw(Process *c_p, Process *tracee,
Eterm tag, Eterm msg, Eterm extra, Eterm pam_result)
{
if (tnif || (tnif = lookup_tracer_nif(tracer)) != NULL) {
-#define MAP_SIZE 3
- Eterm argv[6], local_heap[3+MAP_SIZE /* values */ + (MAP_SIZE+1 /* keys */)];
+#define MAP_SIZE 4
+ Eterm argv[5], local_heap[3+MAP_SIZE /* values */ + (MAP_SIZE+1 /* keys */)];
flatmap_t *map = (flatmap_t*)(local_heap+(MAP_SIZE+1));
Eterm *map_values = flatmap_get_values(map);
+ Eterm *map_keys = local_heap + 1;
+ Uint map_elem_count = 0;
topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_DEFAULT;
ASSERT(topt < NIF_TRACER_TYPES);
@@ -2846,31 +2848,40 @@ send_to_tracer_nif_raw(Process *c_p, Process *tracee,
argv[1] = ERTS_TRACER_STATE(tracer);
argv[2] = t_p_id;
argv[3] = msg;
- argv[4] = extra == THE_NON_VALUE ? am_undefined : extra;
- argv[5] = make_flatmap(map);
+ argv[4] = make_flatmap(map);
map->thing_word = MAP_HEADER_FLATMAP;
- map->size = MAP_SIZE;
- map->keys = TUPLE3(local_heap, am_match_spec_result, am_scheduler_id, am_timestamp);
-
- *map_values++ = pam_result;
- if (tracee_flags & F_TRACE_SCHED_NO)
- *map_values++ = make_small(erts_get_scheduler_id());
- else
- *map_values++ = am_undefined;
+
+ if (extra != THE_NON_VALUE) {
+ map_keys[map_elem_count] = am_extra;
+ map_values[map_elem_count++] = extra;
+ }
+
+ if (pam_result != am_true) {
+ map_keys[map_elem_count] = am_match_spec_result;
+ map_values[map_elem_count++] = pam_result;
+ }
+
+ if (tracee_flags & F_TRACE_SCHED_NO) {
+ map_keys[map_elem_count] = am_scheduler_id;
+ map_values[map_elem_count++] = make_small(erts_get_scheduler_id());
+ }
+ map_keys[map_elem_count] = am_timestamp;
if (tracee_flags & F_NOW_TS)
#ifdef HAVE_ERTS_NOW_CPU
if (erts_cpu_timestamp)
- *map_values++ = am_cpu_timestamp;
+ map_values[map_elem_count++] = am_cpu_timestamp;
else
#endif
- *map_values++ = am_timestamp;
+ map_values[map_elem_count++] = am_timestamp;
else if (tracee_flags & F_STRICT_MON_TS)
- *map_values++ = am_strict_monotonic;
+ map_values[map_elem_count++] = am_strict_monotonic;
else if (tracee_flags & F_MON_TS)
- *map_values++ = am_monotonic;
- else
- *map_values++ = am_undefined;
+ map_values[map_elem_count++] = am_monotonic;
+
+ map->size = map_elem_count;
+ map->keys = make_tuple(local_heap);
+ local_heap[0] = make_arityval(map_elem_count);
#undef MAP_SIZE
erts_nif_call_function(c_p, tracee ? tracee : c_p,
diff --git a/erts/emulator/beam/erlang_lttng.h b/erts/emulator/beam/erlang_lttng.h
index 12f68e477b..4e869671f7 100644
--- a/erts/emulator/beam/erlang_lttng.h
+++ b/erts/emulator/beam/erlang_lttng.h
@@ -20,7 +20,7 @@
#ifdef USE_LTTNG
#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER com_ericsson_otp
+#define TRACEPOINT_PROVIDER org_erlang_otp
#undef TRACEPOINT_INCLUDE
#define TRACEPOINT_INCLUDE "erlang_lttng.h"
@@ -33,7 +33,7 @@
/* Schedulers */
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
scheduler_poll,
TP_ARGS(
int, id,
@@ -62,7 +62,7 @@ typedef struct {
/* Port and Driver Scheduling */
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_start,
TP_ARGS(
char*, pid,
@@ -77,7 +77,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_init,
TP_ARGS(
char*, driver,
@@ -94,7 +94,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_outputv,
TP_ARGS(
char*, pid,
@@ -111,7 +111,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_output,
TP_ARGS(
char*, pid,
@@ -128,7 +128,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_ready_input,
TP_ARGS(
char*, pid,
@@ -143,7 +143,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_ready_output,
TP_ARGS(
char*, pid,
@@ -158,7 +158,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_event,
TP_ARGS(
char*, pid,
@@ -173,7 +173,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_timeout,
TP_ARGS(
char*, pid,
@@ -188,7 +188,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_stop_select,
TP_ARGS(
char*, driver
@@ -199,7 +199,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_flush,
TP_ARGS(
char*, pid,
@@ -214,7 +214,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_stop,
TP_ARGS(
char*, pid,
@@ -229,7 +229,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_process_exit,
TP_ARGS(
char*, pid,
@@ -244,7 +244,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_ready_async,
TP_ARGS(
char*, pid,
@@ -259,7 +259,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_finish,
TP_ARGS(
char*, driver
@@ -270,7 +270,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_call,
TP_ARGS(
char*, pid,
@@ -289,7 +289,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
driver_control,
TP_ARGS(
char*, pid,
@@ -310,7 +310,7 @@ TRACEPOINT_EVENT(
/* Async pool */
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
aio_pool_get,
TP_ARGS(
char*, port,
@@ -323,7 +323,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
aio_pool_put,
TP_ARGS(
char*, port,
@@ -339,7 +339,7 @@ TRACEPOINT_EVENT(
/* Memory Allocator */
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
carrier_create,
TP_ARGS(
const char*, type,
@@ -365,7 +365,7 @@ TRACEPOINT_EVENT(
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
carrier_destroy,
TP_ARGS(
const char*, type,
@@ -390,7 +390,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
carrier_pool_put,
TP_ARGS(
const char*, name,
@@ -405,7 +405,7 @@ TRACEPOINT_EVENT(
)
TRACEPOINT_EVENT(
- com_ericsson_otp,
+ org_erlang_otp,
carrier_pool_get,
TP_ARGS(
const char*, name,
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 02c24557c1..2a19211987 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -31,7 +31,7 @@
#define EXPORT_INITIAL_SIZE 4000
#define EXPORT_LIMIT (512*1024)
-#define EXPORT_HASH(m,f,a) ((m)*(f)+(a))
+#define EXPORT_HASH(m,f,a) ((atom_val(m) * atom_val(f)) ^ (a))
#ifdef DEBUG
# define IF_DEBUG(x) x
@@ -79,8 +79,7 @@ struct export_templ
static struct export_blob* entry_to_blob(struct export_entry* ee)
{
- return (struct export_blob*)
- ((char*)ee->ep - offsetof(struct export_blob,exp));
+ return ErtsContainerStruct(ee->ep, struct export_blob, exp);
}
void
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index b76b9cd874..f3d4ac56cd 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -62,6 +62,9 @@ struct enif_environment_t /* ErlNifEnv */
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*, Process* tracee);
extern void erts_post_nif(struct enif_environment_t* env);
+extern void erts_pre_dirty_nif(ErtsSchedulerData *,
+ struct enif_environment_t*, Process*,
+ struct erl_module_nif*, Process* tracee);
extern Eterm erts_nif_taints(Process* p);
extern void erts_print_nif_taints(int to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
@@ -1152,6 +1155,7 @@ void print_pass_through(int, byte*, int);
int catchlevel(Process*);
void init_emulator(void);
void process_main(void);
+void erts_dirty_process_main(ErtsSchedulerData *);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 0377f6cb5e..cb8792dffa 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -938,18 +938,32 @@ int erts_port_handle_xports(Port *prt)
** -2 on type error
*/
-#define SET_VEC(iov, bv, bin, ptr, len, vlen) do { \
- (iov)->iov_base = (ptr); \
- (iov)->iov_len = (len); \
- if (sizeof((iov)->iov_len) < sizeof(len) \
- /* Check if (len) overflowed (iov)->iov_len */ \
- && (iov)->iov_len != (len)) { \
- goto L_overflow; \
- } \
- *(bv)++ = (bin); \
- (iov)++; \
- (vlen)++; \
-} while(0)
+#ifdef DEBUG
+#define MAX_SYSIOVEC_IOVLEN (1ull << (32 - 1))
+#else
+#define MAX_SYSIOVEC_IOVLEN (1ull << (sizeof(((SysIOVec*)0)->iov_len) * 8 - 1))
+#endif
+
+static ERTS_INLINE void
+io_list_to_vec_set_vec(SysIOVec **iov, ErlDrvBinary ***binv,
+ ErlDrvBinary *bin, byte *ptr, Uint len,
+ int *vlen)
+{
+ while (len > MAX_SYSIOVEC_IOVLEN) {
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = MAX_SYSIOVEC_IOVLEN;
+ ptr += MAX_SYSIOVEC_IOVLEN;
+ len -= MAX_SYSIOVEC_IOVLEN;
+ (*iov)++;
+ (*vlen)++;
+ *(*binv)++ = bin;
+ }
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = len;
+ *(*binv)++ = bin;
+ (*iov)++;
+ (*vlen)++;
+}
static int
io_list_to_vec(Eterm obj, /* io-list */
@@ -960,11 +974,11 @@ io_list_to_vec(Eterm obj, /* io-list */
{
DECLARE_ESTACK(s);
Eterm* objp;
- char *buf = cbin->orig_bytes;
+ byte *buf = (byte*)cbin->orig_bytes;
Uint len = cbin->orig_size;
Uint csize = 0;
int vlen = 0;
- char* cptr = buf;
+ byte* cptr = buf;
goto L_jump_start; /* avoid push */
@@ -1032,15 +1046,17 @@ io_list_to_vec(Eterm obj, /* io-list */
len -= size;
} else {
if (csize != 0) {
- SET_VEC(iov, binv, cbin, cptr, csize, vlen);
+ io_list_to_vec_set_vec(&iov, &binv, cbin,
+ cptr, csize, &vlen);
cptr = buf;
csize = 0;
}
if (pb->flags) {
erts_emasculate_writable_binary(pb);
}
- SET_VEC(iov, binv, Binary2ErlDrvBinary(pb->val),
- pb->bytes+offset, size, vlen);
+ io_list_to_vec_set_vec(
+ &iov, &binv, Binary2ErlDrvBinary(pb->val),
+ pb->bytes+offset, size, &vlen);
}
} else {
ErlHeapBin* hb = (ErlHeapBin *) bptr;
@@ -1060,7 +1076,7 @@ io_list_to_vec(Eterm obj, /* io-list */
}
if (csize != 0) {
- SET_VEC(iov, binv, cbin, cptr, csize, vlen);
+ io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen);
}
DESTROY_ESTACK(s);
@@ -1086,10 +1102,13 @@ do { \
if (_bitsize != 0) goto L_type_error; \
if (thing_subtag(*binary_val(_real)) == REFC_BINARY_SUBTAG && \
_bitoffs == 0) { \
- b_size += _size; \
+ b_size += _size; \
if (b_size < _size) goto L_overflow_error; \
in_clist = 0; \
- v_size++; \
+ v_size++; \
+ /* If iov_len is smaller then Uint we split the binary into*/ \
+ /* multiple smaller (2GB) elements in the iolist.*/ \
+ v_size += _size / MAX_SYSIOVEC_IOVLEN; \
if (_size >= ERL_SMALL_IO_BIN_LIMIT) { \
p_in_clist = 0; \
p_v_size++; \
@@ -1241,7 +1260,7 @@ typedef struct {
/*
* Try doing an immediate driver callback call from a process. If
* this fail, the operation should be scheduled in the normal case...
- *
+ * Returns: ok to do the call, or error (lock busy, does not exist, etc)
*/
static ERTS_INLINE ErtsTryImmDrvCallResult
try_imm_drv_call(ErtsTryImmDrvCallState *sp)
@@ -3055,6 +3074,250 @@ erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
}
static void
+port_monitor_failure(Eterm port_id, Eterm origin, Eterm ref_DOWN)
+{
+ Process *origin_p;
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
+ ASSERT(is_internal_pid(origin));
+
+ origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
+ if (! origin_p) { return; }
+
+ /* Send the DOWN message immediately. Ref is made on the fly because
+ * caller has never seen it yet. */
+ erts_queue_monitor_message(origin_p, &p_locks, ref_DOWN,
+ am_port, port_id, am_noproc);
+ erts_smp_proc_unlock(origin_p, p_locks);
+}
+
+/* Origin wants to monitor port Prt. State contains possible error, which has
+ * happened just before. Name is either NIL or an atom, if user monitors
+ * a port by name. Ref is premade reference that will be returned to user */
+static void
+port_monitor(Port *prt, erts_aint32_t state, Eterm origin,
+ Eterm name, Eterm ref)
+{
+ Eterm name_or_nil = is_atom(name) ? name : NIL;
+
+ ASSERT(is_pid(origin));
+ ASSERT(is_atom(name) || is_port(name) || name == NIL);
+ ASSERT(is_internal_ref(ref));
+
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
+
+ Process *origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
+ if (! origin_p) {
+ goto failure;
+ }
+ erts_add_monitor(&ERTS_P_MONITORS(origin_p), MON_ORIGIN, ref,
+ prt->common.id, name_or_nil);
+ erts_add_monitor(&ERTS_P_MONITORS(prt), MON_TARGET, ref,
+ origin, name_or_nil);
+
+ erts_smp_proc_unlock(origin_p, p_locks);
+ } else {
+failure:
+ port_monitor_failure(prt->common.id, origin, ref);
+ }
+}
+
+static int
+port_sig_monitor(Port *prt, erts_aint32_t state, int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ Eterm hp[REF_THING_SIZE];
+ Eterm ref = make_internal_ref(&hp);
+ write_ref_thing(hp, sigdp->ref[0], sigdp->ref[1], sigdp->ref[2]);
+
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ /* erts_add_monitor call inside port_monitor will copy ref from hp */
+ port_monitor(prt, state,
+ sigdp->u.monitor.origin,
+ sigdp->u.monitor.name,
+ ref);
+ } else {
+ port_monitor_failure(sigdp->u.monitor.name,
+ sigdp->u.monitor.origin,
+ ref);
+ }
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) {
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
+ }
+ return ERTS_PORT_REDS_MONITOR;
+}
+
+/* Creates monitor between Origin and Target. Ref must be initialized to
+ * a reference (ref may be rewritten to be used to serve additionally as a
+ * signal id). Name is atom if user monitors port by name or NIL */
+ErtsPortOpResult
+erts_port_monitor(Process *origin, Port *port, Eterm name, Eterm *refp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ origin, port, ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0, /* trap_ref is always set so !trap_ref always is false */
+ am_monitor);
+
+ ASSERT(origin);
+ ASSERT(port);
+ ASSERT(is_atom(name) || is_port(name));
+ ASSERT(refp);
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ port_monitor(port, try_call_state.state, origin->common.id, name, *refp);
+ finalize_imm_drv_call(&try_call_state);
+ BUMP_REDS(origin, ERTS_PORT_REDS_MONITOR);
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_BADARG;
+ default:
+ break; /* Schedule call instead... */
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_MONITOR;
+ sigdp->u.monitor.origin = origin->common.id;
+ sigdp->u.monitor.name = name; /* either named monitor, or port id */
+
+ /* Ref contents will be initialized here */
+ return erts_schedule_proc2port_signal(origin, port, origin->common.id,
+ refp, sigdp, 0, NULL,
+ port_sig_monitor);
+}
+
+static void
+port_demonitor_failure(Eterm port_id, Eterm origin, Eterm ref)
+{
+ Process *origin_p;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK;
+ ErtsMonitor *mon1;
+ ASSERT(is_internal_pid(origin));
+
+ origin_p = erts_pid2proc(NULL, 0, origin, rp_locks);
+ if (! origin_p) { return; }
+
+ /* do not send any DOWN messages, drop monitors on process */
+ mon1 = erts_remove_monitor(&ERTS_P_MONITORS(origin_p), ref);
+ if (mon1 != NULL) {
+ erts_destroy_monitor(mon1);
+ }
+
+ erts_smp_proc_unlock(origin_p, rp_locks);
+}
+
+/* Origin wants to demonitor port Prt. State contains possible error, which has
+ * happened just before. Ref is reference to monitor */
+static void
+port_demonitor(Port *port, erts_aint32_t state, Eterm origin, Eterm ref)
+{
+ ASSERT(port);
+ ASSERT(is_pid(origin));
+ ASSERT(is_internal_ref(ref));
+
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
+ ErtsProcLocks p_locks = ERTS_PROC_LOCK_LINK;
+ Process *origin_p = erts_pid2proc(NULL, 0, origin, p_locks);
+ if (origin_p) {
+ ErtsMonitor *mon1 = erts_remove_monitor(&ERTS_P_MONITORS(origin_p),
+ ref);
+ if (mon1 != NULL) {
+ erts_destroy_monitor(mon1);
+ }
+ }
+ if (1) {
+ ErtsMonitor *mon2 = erts_remove_monitor(&ERTS_P_MONITORS(port),
+ ref);
+ if (mon2 != NULL) {
+ erts_destroy_monitor(mon2);
+ }
+ }
+ if (origin_p) { /* when origin is dying, it won't be found */
+ erts_smp_proc_unlock(origin_p, p_locks);
+ }
+ } else {
+ port_demonitor_failure(port->common.id, origin, ref);
+ }
+}
+
+static int
+port_sig_demonitor(Port *prt, erts_aint32_t state, int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ Eterm hp[REF_THING_SIZE];
+ Eterm ref = make_internal_ref(&hp);
+ write_ref_thing(hp, sigdp->u.demonitor.ref[0],
+ sigdp->u.demonitor.ref[1],
+ sigdp->u.demonitor.ref[2]);
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ port_demonitor(prt, state, sigdp->u.demonitor.origin, ref);
+ } else {
+ port_demonitor_failure(sigdp->u.demonitor.name,
+ sigdp->u.demonitor.origin,
+ ref);
+ }
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) {
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
+ }
+ return ERTS_PORT_REDS_DEMONITOR;
+}
+
+/* Removes monitor between origin and target, identified by ref.
+ * Mode defines normal or relaxed demonitor rules (process is at death) */
+ErtsPortOpResult erts_port_demonitor(Process *origin, ErtsDemonitorMode mode,
+ Port *target, Eterm ref,
+ Eterm *trap_ref)
+{
+ Process *c_p = mode == ERTS_PORT_DEMONITOR_NORMAL ? origin : NULL;
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ target, ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !trap_ref,
+ am_demonitor);
+
+ ASSERT(origin);
+ ASSERT(target);
+ ASSERT(is_internal_ref(ref));
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ port_demonitor(target, try_call_state.state, origin->common.id, ref);
+ finalize_imm_drv_call(&try_call_state);
+ if (mode == ERTS_PORT_DEMONITOR_NORMAL) {
+ BUMP_REDS(origin, ERTS_PORT_REDS_DEMONITOR);
+ }
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_BADARG;
+ default:
+ break; /* Schedule call instead... */
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_DEMONITOR;
+ sigdp->u.demonitor.origin = origin->common.id;
+ sigdp->u.demonitor.name = target->common.id;
+ {
+ RefThing *reft = ref_thing_ptr(ref);
+ /* Start from 1 skip ref arity */
+ sys_memcpy(sigdp->u.demonitor.ref,
+ internal_thing_ref_numbers(reft),
+ sizeof(sigdp->u.demonitor.ref));
+ }
+
+ /* Ref contents will be initialized here */
+ return erts_schedule_proc2port_signal(c_p, target, origin->common.id,
+ trap_ref, sigdp, 0, NULL,
+ port_sig_demonitor);
+}
+
+static void
init_ack_send_reply(Port *port, Eterm resp)
{
@@ -3923,23 +4186,30 @@ erts_terminate_port(Port *pp)
terminate_port(pp);
}
+static void port_fire_one_monitor(ErtsMonitor *mon, void *ctx0);
static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
{
- ErtsMonitor *rmon;
- Process *rp;
+ switch (mon->type) {
+ case MON_ORIGIN: {
+ ErtsMonitor *rmon;
+ Process *rp;
- ASSERT(mon->type == MON_ORIGIN);
- ASSERT(is_internal_pid(mon->pid));
- rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- goto done;
- }
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon == NULL) {
- goto done;
+ ASSERT(is_internal_pid(mon->pid));
+ rp = erts_pid2proc(NULL, 0, mon->pid, ERTS_PROC_LOCK_LINK);
+ if (!rp) {
+ goto done;
+ }
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ if (rmon == NULL) {
+ goto done;
+ }
+ erts_destroy_monitor(rmon);
+ } break;
+ case MON_TARGET: {
+ port_fire_one_monitor(mon, vpsc); /* forward call */
+ } break;
}
- erts_destroy_monitor(rmon);
done:
erts_destroy_monitor(mon);
}
@@ -4020,6 +4290,43 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
erts_destroy_link(lnk);
}
+static void
+port_fire_one_monitor(ErtsMonitor *mon, void *ctx0)
+{
+ Process *origin;
+ ErtsProcLocks origin_locks;
+
+ if (mon->type != MON_TARGET || ! is_pid(mon->pid)) {
+ return;
+ }
+ /*
+ * Proceed here if someone monitors us, we (port) are the target and
+ * origin is some process
+ */
+ origin_locks = ERTS_PROC_LOCKS_MSG_SEND | ERTS_PROC_LOCK_LINK;
+
+ origin = erts_pid2proc(NULL, 0, mon->pid, origin_locks);
+ if (origin) {
+ DeclareTmpHeapNoproc(lhp,3);
+ SweepContext *ctx = (SweepContext *)ctx0;
+ ErtsMonitor *rmon;
+ Eterm watched = (is_atom(mon->name)
+ ? TUPLE2(lhp, mon->name, erts_this_dist_entry->sysname)
+ : ctx->port->common.id);
+
+ erts_queue_monitor_message(origin, &origin_locks, mon->ref, am_port,
+ watched, ctx->reason);
+ UnUseTmpHeapNoproc(3);
+
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(origin), mon->ref);
+ erts_smp_proc_unlock(origin, origin_locks);
+
+ if (rmon) {
+ erts_destroy_monitor(rmon);
+ }
+ }
+}
+
/* 'from' is sending 'this_port' an exit signal, (this_port must be internal).
* If reason is normal we don't do anything, *unless* from is our connected
* process in which case we close the port. Any other reason kills the port.
@@ -4031,39 +4338,40 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
*/
int
-erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed,
+erts_deliver_port_exit(Port *prt, Eterm from, Eterm reason, int send_closed,
int drop_normal)
{
ErtsLink *lnk;
- Eterm rreason;
+ Eterm modified_reason;
erts_aint32_t state, set_state_flags;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- rreason = (reason == am_kill) ? am_killed : reason;
+ modified_reason = (reason == am_kill) ? am_killed : reason;
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_exit)) {
DTRACE_CHARBUF(from_str, DTRACE_TERM_BUF_SIZE);
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(rreason_str, 64);
+ DTRACE_CHARBUF(reason_str, 64);
erts_snprintf(from_str, sizeof(DTRACE_CHARBUF_NAME(from_str)), "%T", from);
- dtrace_port_str(p, port_str);
- erts_snprintf(rreason_str, sizeof(DTRACE_CHARBUF_NAME(rreason_str)), "%T", rreason);
- DTRACE4(port_exit, from_str, port_str, p->name, rreason_str);
+ dtrace_port_str(prt, port_str);
+ erts_snprintf(reason_str, sizeof(DTRACE_CHARBUF_NAME(reason_str)), "%T",
+ modified_reason);
+ DTRACE4(port_exit, from_str, port_str, prt->name, reason_str);
}
#endif
- state = erts_atomic32_read_nob(&p->state);
+ state = erts_atomic32_read_nob(&prt->state);
if (state & (ERTS_PORT_SFLGS_DEAD
| ERTS_PORT_SFLG_EXITING
| ERTS_PORT_SFLG_CLOSING))
return 0;
- if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(p)
- && from != p->common.id && drop_normal) {
+ if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(prt)
+ && from != prt->common.id && drop_normal) {
return 0;
}
@@ -4071,53 +4379,54 @@ erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed,
if (send_closed)
set_state_flags |= ERTS_PORT_SFLG_SEND_CLOSED;
- erts_port_task_sched_enter_exiting_state(&p->sched);
+ erts_port_task_sched_enter_exiting_state(&prt->sched);
- state = erts_atomic32_read_bor_mb(&p->state, set_state_flags);
+ state = erts_atomic32_read_bor_mb(&prt->state, set_state_flags);
state |= set_state_flags;
- if (IS_TRACED_FL(p, F_TRACE_PORTS))
- trace_port(p, am_closed, reason);
+ if (IS_TRACED_FL(prt, F_TRACE_PORTS))
+ trace_port(prt, am_closed, reason);
- erts_trace_check_exiting(p->common.id);
+ erts_trace_check_exiting(prt->common.id);
- set_busy_port(ERTS_Port2ErlDrvPort(p), 0);
+ set_busy_port(ERTS_Port2ErlDrvPort(prt), 0);
- if (p->common.u.alive.reg != NULL)
- (void) erts_unregister_name(NULL, 0, p, p->common.u.alive.reg->name);
+ if (prt->common.u.alive.reg != NULL)
+ (void) erts_unregister_name(NULL, 0, prt, prt->common.u.alive.reg->name);
{
- SweepContext sc = {p, rreason};
- lnk = ERTS_P_LINKS(p);
- ERTS_P_LINKS(p) = NULL;
+ SweepContext sc = {prt, modified_reason};
+ lnk = ERTS_P_LINKS(prt);
+ ERTS_P_LINKS(prt) = NULL;
erts_sweep_links(lnk, &sweep_one_link, &sc);
}
- DRV_MONITOR_LOCK_PDL(p);
+ DRV_MONITOR_LOCK_PDL(prt);
{
- ErtsMonitor *moni = ERTS_P_MONITORS(p);
- ERTS_P_MONITORS(p) = NULL;
- erts_sweep_monitors(moni, &sweep_one_monitor, NULL);
+ SweepContext ctx = {prt, modified_reason};
+ ErtsMonitor *moni = ERTS_P_MONITORS(prt);
+ ERTS_P_MONITORS(prt) = NULL;
+ erts_sweep_monitors(moni, &sweep_one_monitor, &ctx);
}
- DRV_MONITOR_UNLOCK_PDL(p);
+ DRV_MONITOR_UNLOCK_PDL(prt);
- if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) {
- erts_do_net_exits(p->dist_entry, rreason);
- erts_deref_dist_entry(p->dist_entry);
- p->dist_entry = NULL;
- erts_atomic32_read_band_relb(&p->state,
+ if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && prt->dist_entry) {
+ erts_do_net_exits(prt->dist_entry, modified_reason);
+ erts_deref_dist_entry(prt->dist_entry);
+ prt->dist_entry = NULL;
+ erts_atomic32_read_band_relb(&prt->state,
~ERTS_PORT_SFLG_DISTRIBUTION);
}
- if ((reason != am_kill) && !is_port_ioq_empty(p)) {
+ if ((reason != am_kill) && !is_port_ioq_empty(prt)) {
/* must turn exiting flag off */
- erts_atomic32_read_bset_relb(&p->state,
+ erts_atomic32_read_bset_relb(&prt->state,
(ERTS_PORT_SFLG_EXITING
| ERTS_PORT_SFLG_CLOSING),
ERTS_PORT_SFLG_CLOSING);
- flush_port(p);
+ flush_port(prt);
}
else {
- terminate_port(p);
+ terminate_port(prt);
}
return 1;
diff --git a/erts/emulator/beam/lttng-wrapper.h b/erts/emulator/beam/lttng-wrapper.h
index 294872c365..0bc75c1552 100644
--- a/erts/emulator/beam/lttng-wrapper.h
+++ b/erts/emulator/beam/lttng-wrapper.h
@@ -77,23 +77,23 @@
(RQ)->scheduler->no
#define LTTNG_ENABLED(Name) \
- tracepoint_enabled(com_ericsson_otp, Name)
+ tracepoint_enabled(org_erlang_otp, Name)
/* include a special LTTNG_DO for do_tracepoint ? */
#define LTTNG1(Name, Arg1) \
- tracepoint(com_ericsson_otp, Name, (Arg1))
+ tracepoint(org_erlang_otp, Name, (Arg1))
#define LTTNG2(Name, Arg1, Arg2) \
- tracepoint(com_ericsson_otp, Name, (Arg1), (Arg2))
+ tracepoint(org_erlang_otp, Name, (Arg1), (Arg2))
#define LTTNG3(Name, Arg1, Arg2, Arg3) \
- tracepoint(com_ericsson_otp, Name, (Arg1), (Arg2), (Arg3))
+ tracepoint(org_erlang_otp, Name, (Arg1), (Arg2), (Arg3))
#define LTTNG4(Name, Arg1, Arg2, Arg3, Arg4) \
- tracepoint(com_ericsson_otp, Name, (Arg1), (Arg2), (Arg3), (Arg4))
+ tracepoint(org_erlang_otp, Name, (Arg1), (Arg2), (Arg3), (Arg4))
#define LTTNG5(Name, Arg1, Arg2, Arg3, Arg4, Arg5) \
- tracepoint(com_ericsson_otp, Name, (Arg1), (Arg2), (Arg3), (Arg4), (Arg5))
+ tracepoint(org_erlang_otp, Name, (Arg1), (Arg2), (Arg3), (Arg4), (Arg5))
#else /* USE_LTTNG */
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 77f79fcea4..ac7096745e 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -323,7 +323,8 @@ erts_whereis_name(Process *c_p,
Process** proc,
ErtsProcLocks need_locks,
int flags,
- Port** port)
+ Port** port,
+ int lock_port)
{
RegProc* rp = NULL;
HashValue hval;
@@ -406,31 +407,33 @@ erts_whereis_name(Process *c_p,
*port = NULL;
else {
#ifdef ERTS_SMP
- if (pending_port == rp->pt)
- pending_port = NULL;
- else {
- if (pending_port) {
- /* Ahh! Registered port changed while reg lock
- was unlocked... */
- erts_port_release(pending_port);
- pending_port = NULL;
- }
+ if (lock_port) {
+ if (pending_port == rp->pt)
+ pending_port = NULL;
+ else {
+ if (pending_port) {
+ /* Ahh! Registered port changed while reg lock
+ was unlocked... */
+ erts_port_release(pending_port);
+ pending_port = NULL;
+ }
- if (erts_smp_port_trylock(rp->pt) == EBUSY) {
- Eterm id = rp->pt->common.id; /* id read only... */
- /* Unlock all locks, acquire port lock, and restart... */
- if (current_c_p_locks) {
- erts_smp_proc_unlock(c_p, current_c_p_locks);
- current_c_p_locks = 0;
- }
- reg_read_unlock();
- pending_port = erts_id2port(id);
- goto restart;
- }
- }
+ if (erts_smp_port_trylock(rp->pt) == EBUSY) {
+ Eterm id = rp->pt->common.id; /* id read only... */
+ /* Unlock all locks, acquire port lock, and restart... */
+ if (current_c_p_locks) {
+ erts_smp_proc_unlock(c_p, current_c_p_locks);
+ current_c_p_locks = 0;
+ }
+ reg_read_unlock();
+ pending_port = erts_id2port(id);
+ goto restart;
+ }
+ }
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(rp->pt));
+ }
#endif
*port = rp->pt;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(*port));
}
}
@@ -452,7 +455,7 @@ erts_whereis_process(Process *c_p,
int flags)
{
Process *proc;
- erts_whereis_name(c_p, c_p_locks, name, &proc, need_locks, flags, NULL);
+ erts_whereis_name(c_p, c_p_locks, name, &proc, need_locks, flags, NULL, 0);
return proc;
}
diff --git a/erts/emulator/beam/register.h b/erts/emulator/beam/register.h
index 88ab7b7bf1..d839f55d6b 100644
--- a/erts/emulator/beam/register.h
+++ b/erts/emulator/beam/register.h
@@ -49,7 +49,7 @@ int erts_register_name(Process *, Eterm, Eterm);
Eterm erts_whereis_name_to_id(Process *, Eterm);
void erts_whereis_name(Process *, ErtsProcLocks,
Eterm, Process**, ErtsProcLocks, int,
- Port**);
+ Port**, int);
Process *erts_whereis_process(Process *,
ErtsProcLocks,
Eterm,
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index f303d4f167..dfe82cab44 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -97,7 +97,7 @@
((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
#define ErtsContainerStruct(ptr, type, member) \
- (type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member))
+ ((type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member)))
#if defined (__WIN32__)
# include "erl_win_sys.h"
@@ -154,8 +154,9 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# define ERTS_WRITE_UNLIKELY(X) X
#endif
+/* clang may have too low __GNUC__ versions but can handle it */
#ifdef __GNUC__
-# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 5)
+# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 5) || defined(__clang__)
# define ERTS_DECLARE_DUMMY(X) X __attribute__ ((unused))
# else
# define ERTS_DECLARE_DUMMY(X) X
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index cedc88e5fe..675fafa726 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -2210,7 +2210,9 @@ do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
#ifndef ERTS_SMP
#ifdef USE_THREADS
- if (erts_get_scheduler_data()) /* Must be scheduler thread */
+ if (!erts_get_scheduler_data()) /* Must be scheduler thread */
+ *p = NULL;
+ else
#endif
{
*p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
@@ -2226,18 +2228,10 @@ do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
}
/* So we have an error logger, lets build the message */
- if (sz <= HeapWordsLeft(*p)) {
- *ohp = &MSO(*p);
- *hp = HEAP_TOP(*p);
- HEAP_TOP(*p) += sz;
- } else {
-#endif
- *bp = new_message_buffer(sz);
- *ohp = &(*bp)->off_heap;
- *hp = (*bp)->mem;
-#ifndef ERTS_SMP
- }
#endif
+ *bp = new_message_buffer(sz);
+ *ohp = &(*bp)->off_heap;
+ *hp = (*bp)->mem;
return (is_nil(gleader)
? am_noproc
@@ -3893,8 +3887,10 @@ void bin_write(int to, void *to_arg, byte* buf, size_t sz)
}
/* Fill buf with the contents of bytelist list
- return number of chars in list or -1 for error */
-
+ * return number of chars in list
+ * or -1 for type error
+ * or -2 for not enough buffer space (buffer contains truncated result)
+ */
Sint
intlist_to_buf(Eterm list, char *buf, Sint len)
{
@@ -3917,7 +3913,7 @@ intlist_to_buf(Eterm list, char *buf, Sint len)
return -1;
listptr = list_val(*(listptr + 1));
}
- return -1; /* not enough space */
+ return -2; /* not enough space */
}
/*