aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names8
-rw-r--r--erts/emulator/beam/beam_emu.c298
-rw-r--r--erts/emulator/beam/bif.c64
-rw-r--r--erts/emulator/beam/bif.tab2
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c5
-rw-r--r--erts/emulator/beam/erl_gc.c39
-rw-r--r--erts/emulator/beam/erl_init.c6
-rw-r--r--erts/emulator/beam/erl_message.c45
-rw-r--r--erts/emulator/beam/erl_message.h29
-rw-r--r--erts/emulator/beam/erl_nif.c121
-rw-r--r--erts/emulator/beam/erl_process.c122
-rw-r--r--erts/emulator/beam/erl_process.h28
-rw-r--r--erts/emulator/beam/erl_trace.c65
-rw-r--r--erts/emulator/beam/export.c5
-rw-r--r--erts/emulator/beam/global.h4
-rw-r--r--erts/emulator/beam/io.c59
-rw-r--r--erts/emulator/beam/sys.h2
-rw-r--r--erts/emulator/beam/utils.c26
19 files changed, 548 insertions, 382 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 8f65e71531..badd69856e 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -228,9 +228,6 @@ atom endian
atom env
atom eof
atom eol
-atom exception_from
-atom exception_trace
-atom extended
atom Eq='=:='
atom Eqeq='=='
atom erl_tracer
@@ -243,6 +240,8 @@ atom ets
atom ETS_TRANSFER='ETS-TRANSFER'
atom event
atom exact_reductions
+atom exception_from
+atom exception_trace
atom exclusive
atom exit_status
atom existing
@@ -251,7 +250,9 @@ atom existing_ports
atom existing
atom exiting
atom exports
+atom extended
atom external
+atom extra
atom false
atom fcgi
atom fd
@@ -389,7 +390,6 @@ atom min_heap_size
atom min_bin_vheap_size
atom minor_version
atom Minus='-'
-atom mixed
atom module
atom module_info
atom monitored_by
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index f8f2e29c95..0a59f8785c 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1323,11 +1323,7 @@ void process_main(void)
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
- if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
- && !ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))
-#endif
- ) {
+ if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule) {
BeamInstr *inptr = find_function_from_pc(start_time_i);
BeamInstr *outptr = find_function_from_pc(c_p->i);
monitor_long_schedule_proc(c_p,inptr,outptr,(Uint) diff);
@@ -1337,7 +1333,7 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- c_p = schedule(c_p, reds_used);
+ c_p = erts_schedule(NULL, c_p, reds_used);
ASSERT(!(c_p->flags & F_HIPE_MODE));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
start_time = 0;
@@ -3559,12 +3555,10 @@ do { \
typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
NifF* fp = vbf = (NifF*) I[1];
struct enif_environment_t env;
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!c_p->scheduler_data)
- live_hf_end = ERTS_INVALID_HFRAG_PTR; /* On dirty scheduler */
- else
+#ifdef ERTS_SMP
+ ASSERT(c_p->scheduler_data);
#endif
- live_hf_end = c_p->mbuf;
+ live_hf_end = c_p->mbuf;
erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
if (env.exception_thrown)
@@ -3574,10 +3568,7 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
- if (env.exiting) {
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- goto do_schedule;
- }
+ ASSERT(!env.exiting);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
@@ -5162,6 +5153,283 @@ do { \
}
}
+/*
+ * erts_dirty_process_main() is what dirty schedulers execute. Since they handle
+ * only NIF calls they do not need to be able to execute all BEAM
+ * instructions.
+ */
+void erts_dirty_process_main(ErtsSchedulerData *esdp)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Process* c_p = NULL;
+ ErtsMonotonicTime start_time;
+#ifdef DEBUG
+ ERTS_DECLARE_DUMMY(Eterm pid);
+#endif
+
+ /* Pointer to X registers: x(1)..x(N); reg[0] is used when doing GC,
+ * in all other cases x0 is used.
+ */
+ register Eterm* reg REG_xregs = NULL;
+
+ /*
+ * Top of heap (next free location); grows upwards.
+ */
+ register Eterm* HTOP REG_htop = NULL;
+
+ /* Stack pointer. Grows downwards; points
+ * to last item pushed (normally a saved
+ * continuation pointer).
+ */
+ register Eterm* E REG_stop = NULL;
+
+ /*
+ * Pointer to next threaded instruction.
+ */
+ register BeamInstr *I REG_I = NULL;
+
+ ERTS_MSACC_DECLARE_CACHE_X() /* a cached value of the tsd pointer for msacc */
+
+ /*
+ * start_time always positive for dirty CPU schedulers,
+ * and negative for dirty I/O schedulers.
+ */
+
+ if (ERTS_SCHEDULER_IS_DIRTY_CPU(esdp)) {
+ start_time = erts_get_monotonic_time(NULL);
+ ASSERT(start_time >= 0);
+ }
+ else {
+ start_time = ERTS_SINT64_MIN;
+ ASSERT(start_time < 0);
+ }
+
+ goto do_dirty_schedule;
+
+ context_switch:
+ c_p->arity = I[-1];
+ c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+
+ {
+ int reds_used;
+ Eterm* argp;
+ int i;
+
+ /*
+ * Make sure that there is enough room for the argument registers to be saved.
+ */
+ if (c_p->arity > c_p->max_arg_reg) {
+ /*
+ * Yes, this is an expensive operation, but you only pay it the first
+ * time you call a function with more than 6 arguments which is
+ * scheduled out. This is better than paying for 26 words of wasted
+ * space for most processes which never call functions with more than
+ * 6 arguments.
+ */
+ Uint size = c_p->arity * sizeof(c_p->arg_reg[0]);
+ if (c_p->arg_reg != c_p->def_arg_reg) {
+ c_p->arg_reg = (Eterm *) erts_realloc(ERTS_ALC_T_ARG_REG,
+ (void *) c_p->arg_reg,
+ size);
+ } else {
+ c_p->arg_reg = (Eterm *) erts_alloc(ERTS_ALC_T_ARG_REG, size);
+ }
+ c_p->max_arg_reg = c_p->arity;
+ }
+
+ /*
+ * Save the argument registers and everything else.
+ */
+
+ argp = c_p->arg_reg;
+ for (i = c_p->arity - 1; i >= 0; i--) {
+ argp[i] = reg[i];
+ }
+ SWAPOUT;
+ c_p->i = I;
+
+ do_dirty_schedule:
+
+ if (start_time < 0) {
+ /*
+ * Dirty I/O scheduler:
+ * One reduction consumed regardless of
+ * time spent in the dirty NIF.
+ */
+ reds_used = esdp->virtual_reds + 1;
+ }
+ else {
+ /*
+ * Dirty CPU scheduler:
+ * Currently two reductions consumed per
+ * micro second spent in the dirty NIF.
+ */
+ ErtsMonotonicTime time;
+ time = erts_get_monotonic_time(esdp);
+ time -= start_time;
+ time = ERTS_MONOTONIC_TO_USEC(time);
+ time *= (CONTEXT_REDS-1)/1000 + 1;
+ ASSERT(time >= 0);
+ if (time == 0)
+ time = 1; /* At least one reduction */
+ time += esdp->virtual_reds;
+ reds_used = time > INT_MAX ? INT_MAX : (int) time;
+ }
+
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ c_p = erts_schedule(esdp, c_p, reds_used);
+
+ if (start_time >= 0) {
+ start_time = erts_get_monotonic_time(esdp);
+ ASSERT(start_time >= 0);
+ }
+ }
+
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+#ifdef DEBUG
+ pid = c_p->common.id; /* Save for debugging purposes */
+#endif
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+
+ ASSERT(!(c_p->flags & F_HIPE_MODE));
+ ERTS_MSACC_UPDATE_CACHE_X();
+
+ reg = esdp->x_reg_array;
+ {
+ Eterm* argp;
+ int i;
+
+ argp = c_p->arg_reg;
+ for (i = c_p->arity - 1; i >= 0; i--) {
+ reg[i] = argp[i];
+ CHECK_TERM(reg[i]);
+ }
+
+ /*
+ * We put the original reduction count in the process structure, to reduce
+ * the code size (referencing a field in a struct through a pointer stored
+ * in a register gives smaller code than referencing a global variable).
+ */
+
+ I = c_p->i;
+
+ ASSERT(BeamOp(op_call_nif) == (BeamInstr *) *I);
+
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
+ && (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE) == 0) {
+ c_p->fcalls = REDS_IN(c_p) = 0;
+ }
+
+ SWAPIN;
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(process_scheduled)) {
+ DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(fun_buf, DTRACE_TERM_BUF_SIZE);
+ dtrace_proc_str(c_p, process_buf);
+
+ if (ERTS_PROC_IS_EXITING(c_p)) {
+ strcpy(fun_buf, "<exiting>");
+ } else {
+ BeamInstr *fptr = find_function_from_pc(c_p->i);
+ if (fptr) {
+ dtrace_fun_decode(c_p, (Eterm)fptr[0],
+ (Eterm)fptr[1], (Uint)fptr[2],
+ NULL, fun_buf);
+ } else {
+ erts_snprintf(fun_buf, sizeof(DTRACE_CHARBUF_NAME(fun_buf)),
+ "<unknown/%p>", next);
+ }
+ }
+
+ DTRACE2(process_scheduled, process_buf, fun_buf);
+ }
+#endif
+ }
+
+ {
+ Eterm nif_bif_result;
+ Eterm bif_nif_arity;
+
+ {
+ /*
+ * call_nif is always first instruction in function:
+ *
+ * I[-3]: Module
+ * I[-2]: Function
+ * I[-1]: Arity
+ * I[0]: &&call_nif
+ * I[1]: Function pointer to NIF function
+ * I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
+ */
+ BifFunction vbf;
+
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
+
+ DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ c_p->current = I-3; /* current and vbf set to please handle_error */
+ SWAPOUT;
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ bif_nif_arity = I[-1];
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ {
+ typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
+ NifF* fp = vbf = (NifF*) I[1];
+ struct enif_environment_t env;
+ ASSERT(!c_p->scheduler_data);
+ erts_pre_dirty_nif(esdp, &env, c_p, (struct erl_module_nif*)I[2], NULL);
+ nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
+ if (env.exception_thrown)
+ nif_bif_result = THE_NON_VALUE;
+ erts_post_nif(&env);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (env.exiting) {
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ goto do_dirty_schedule;
+ }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
+ DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ ERTS_HOLE_CHECK(c_p);
+ if (ERTS_IS_GC_DESIRED(c_p)) {
+ nif_bif_result = erts_gc_after_bif_call(c_p,
+ nif_bif_result,
+ reg, bif_nif_arity);
+ }
+ SWAPIN; /* There might have been a garbage collection. */
+ if (is_value(nif_bif_result)) {
+ r(0) = nif_bif_result;
+ CHECK_TERM(r(0));
+ I = c_p->cp;
+ c_p->cp = 0;
+ Goto(*I);
+ } else if (c_p->freason == TRAP) {
+ I = c_p->i;
+ ASSERT(!(c_p->flags & F_HIBERNATE_SCHED));
+ goto context_switch;
+ }
+ I = handle_error(c_p, c_p->cp, reg, vbf);
+ }
+ }
+ if (I == 0) {
+ goto do_dirty_schedule;
+ } else {
+ ASSERT(!is_value(r(0)));
+ SWAPIN;
+ goto context_switch;
+ }
+#endif /* ERTS_DIRTY_SCHEDULERS */
+}
+
static BifFunction
translate_gc_bif(void* gcf)
{
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 2a3bd4afe5..b18910e2c7 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -917,9 +917,6 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
goto error;
} else if (arg == am_message_queue_data) {
switch (val) {
- case am_mixed:
- so.flags &= ~(SPO_OFF_HEAP_MSGQ|SPO_ON_HEAP_MSGQ);
- break;
case am_on_heap:
so.flags &= ~SPO_OFF_HEAP_MSGQ;
so.flags |= SPO_ON_HEAP_MSGQ;
@@ -3836,59 +3833,11 @@ BIF_RETTYPE display_nl_0(BIF_ALIST_0)
/**********************************************************************/
-/* stop the system */
-/* ARGSUSED */
-BIF_RETTYPE halt_0(BIF_ALIST_0)
-{
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt()\n"));
- erts_halt(0);
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
-}
-
-/**********************************************************************/
#define HALT_MSG_SIZE 200
-static char halt_msg[HALT_MSG_SIZE];
-
-/* stop the system with exit code */
-/* ARGSUSED */
-BIF_RETTYPE halt_1(BIF_ALIST_1)
-{
- Uint code;
-
- if (term_to_Uint_mask(BIF_ARG_1, &code)) {
- int pos_int_code = (int) (code & INT_MAX);
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erts_halt(pos_int_code);
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
- }
- else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) {
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_exit(ERTS_ABORT_EXIT, "");
- }
- else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
- Sint i;
-
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE-1)) < 0) {
- goto error;
- }
- halt_msg[i] = '\0';
- VERBOSE(DEBUG_SYSTEM,("System halted by BIF halt(%T)\n", BIF_ARG_1));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg);
- }
- else
- goto error;
- return NIL; /* Pedantic (lint does not know about erts_exit) */
- error:
- BIF_ERROR(BIF_P, BADARG);
-}
-
-/**********************************************************************/
+static char halt_msg[HALT_MSG_SIZE+1];
/* stop the system with exit code and flags */
-/* ARGSUSED */
BIF_RETTYPE halt_2(BIF_ALIST_2)
{
Uint code;
@@ -3924,7 +3873,7 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
if (flush) {
erts_halt(pos_int_code);
- ERTS_BIF_YIELD1(bif_export[BIF_halt_1], BIF_P, am_undefined);
+ ERTS_BIF_YIELD2(bif_export[BIF_halt_2], BIF_P, am_undefined, am_undefined);
}
else {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
@@ -3940,9 +3889,12 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) {
Sint i;
- if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE-1)) < 0) {
- goto error;
- }
+ if ((i = intlist_to_buf(BIF_ARG_1, halt_msg, HALT_MSG_SIZE)) == -1) {
+ goto error;
+ }
+ if (i == -2) /* truncated string */
+ i = HALT_MSG_SIZE;
+ ASSERT(i >= 0 && i <= HALT_MSG_SIZE);
halt_msg[i] = '\0';
VERBOSE(DEBUG_SYSTEM,
("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2));
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 872f0f9b2a..065018514a 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -72,8 +72,6 @@ bif erlang:get/1
bif erlang:get_keys/1
bif erlang:group_leader/0
bif erlang:group_leader/2
-bif erlang:halt/0
-bif erlang:halt/1
bif erlang:halt/2
bif erlang:phash/2
bif erlang:phash2/1
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index fbe4724047..7e239d1f5d 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -123,7 +123,7 @@ struct AOFF_Carrier_t_ {
AOFF_RBTree_t rbt_node; /* My node in the carrier tree */
AOFF_RBTree_t* root; /* Root of my block tree */
};
-#define RBT_NODE_TO_MBC(PTR) ((AOFF_Carrier_t*)((char*)(PTR) - offsetof(AOFF_Carrier_t, rbt_node)))
+#define RBT_NODE_TO_MBC(PTR) ErtsContainerStruct((PTR), AOFF_Carrier_t, rbt_node)
/*
To support carrier migration we keep two kinds of rb-trees:
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 2e195db0ee..b410578d37 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1565,9 +1565,6 @@ process_info_aux(Process *BIF_P,
case F_ON_HEAP_MSGQ:
res = am_on_heap;
break;
- case 0:
- res = am_mixed;
- break;
default:
res = am_error;
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
@@ -2809,8 +2806,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_off_heap);
case SPO_ON_HEAP_MSGQ:
BIF_RET(am_on_heap);
- case 0:
- BIF_RET(am_mixed);
default:
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
BIF_RET(am_error);
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 374da9407c..d0d74bbf44 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1183,22 +1183,14 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end,
adjust_size = p->htop - p->heap;
}
- goto done;
}
+ else if (need_after > HEAP_SIZE(p)) {
+ grow_new_heap(p, next_heap_size(p, need_after, 0), objv, nobj);
+ adjust_size = p->htop - p->heap;
+ }
+ /*else: The heap size turned out to be just right. We are done. */
- if (HEAP_SIZE(p) >= need_after) {
- /*
- * The heap size turned out to be just right. We are done.
- */
- goto done;
- }
-
- grow_new_heap(p, next_heap_size(p, need_after, 0), objv, nobj);
- adjust_size = p->htop - p->heap;
-
- done:
ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
- ASSERT(MBUF(p) == NULL);
/* The heap usage during GC should be larger than what we end up
after a GC, even if we grow it. If this assertion is not true
@@ -1591,6 +1583,9 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
HIGH_WATER(p) = HEAP_TOP(p);
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p);
+#endif
remove_message_buffers(p);
if (p->flags & F_ON_HEAP_MSGQ)
@@ -1603,9 +1598,6 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
adjusted = adjust_after_fullsweep(p, need, objv, nobj);
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
ErtsGcQuickSanityCheck(p);
return gc_cost(size_after, adjusted ? size_after : 0);
@@ -2293,18 +2285,13 @@ move_msgq_to_heap(Process *p)
free_message_buffer(bp);
}
else {
- ErtsMessage *tmp = erts_alloc_message(0, NULL);
- sys_memcpy((void *) tmp->m, (void *) mp->m,
- sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
- tmp->next = mp->next;
- if (p->msg.save == &mp->next)
- p->msg.save = &tmp->next;
- if (p->msg.last == &mp->next)
- p->msg.last = &tmp->next;
- *mpp = tmp;
+ ErtsMessage *new_mp = erts_alloc_message(0, NULL);
+ sys_memcpy((void *) new_mp->m, (void *) mp->m,
+ sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
+ erts_msgq_replace_msg_ref(&p->msg, new_mp, mpp);
mp->next = NULL;
erts_cleanup_messages(mp);
- mp = tmp;
+ mp = new_mp;
}
}
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 0649fb68de..fbdafec4ef 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -585,7 +585,7 @@ void erts_usage(void)
erts_fprintf(stderr, "-hpds size initial process dictionary size (default %d)\n",
erts_pd_initial_size);
erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n");
- erts_fprintf(stderr, " valid values are: off_heap | on_heap | mixed\n");
+ erts_fprintf(stderr, " valid values are: off_heap | on_heap\n");
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
@@ -1526,9 +1526,7 @@ erl_start(int argc, char **argv)
erts_pd_initial_size));
} else if (has_prefix("mqd", sub_param)) {
arg = get_arg(sub_param+3, argv[i+1], &i);
- if (sys_strcmp(arg, "mixed") == 0)
- erts_default_spo_flags &= ~(SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ);
- else if (sys_strcmp(arg, "on_heap") == 0) {
+ if (sys_strcmp(arg, "on_heap") == 0) {
erts_default_spo_flags &= ~SPO_OFF_HEAP_MSGQ;
erts_default_spo_flags |= SPO_ON_HEAP_MSGQ;
}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 579f6e427d..71ab92937d 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -1123,11 +1123,9 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
break;
case am_on_heap:
c_p->flags |= F_ON_HEAP_MSGQ;
+ c_p->flags &= ~F_OFF_HEAP_MSGQ;
erts_smp_atomic32_read_bor_nob(&c_p->state,
ERTS_PSFLG_ON_HEAP_MSGQ);
- /* fall through */
- case am_mixed:
- c_p->flags &= ~F_OFF_HEAP_MSGQ;
/*
* We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ
* if a off heap change is ongoing. It will be adjusted
@@ -1151,11 +1149,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
switch (new_state) {
case am_on_heap:
break;
- case am_mixed:
- c_p->flags &= ~F_ON_HEAP_MSGQ;
- erts_smp_atomic32_read_band_nob(&c_p->state,
- ~ERTS_PSFLG_ON_HEAP_MSGQ);
- break;
case am_off_heap:
c_p->flags &= ~F_ON_HEAP_MSGQ;
erts_smp_atomic32_read_band_nob(&c_p->state,
@@ -1167,25 +1160,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
}
break;
- case 0:
- res = am_mixed;
-
- switch (new_state) {
- case am_mixed:
- break;
- case am_on_heap:
- c_p->flags |= F_ON_HEAP_MSGQ;
- erts_smp_atomic32_read_bor_nob(&c_p->state,
- ERTS_PSFLG_ON_HEAP_MSGQ);
- break;
- case am_off_heap:
- goto change_to_off_heap;
- default:
- res = THE_NON_VALUE; /* badarg */
- break;
- }
- break;
-
default:
res = am_error;
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
@@ -1371,10 +1345,10 @@ erts_prep_msgq_for_inspection(Process *c_p, Process *rp,
mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next;
- if (rp->msg.save == &bad_mp->next)
- rp->msg.save = mpp;
- if (rp->msg.last == &bad_mp->next)
- rp->msg.last = mpp;
+ ASSERT(*mpp == bad_mp);
+
+ erts_msgq_update_internal_pointers(&rp->msg, mpp, &bad_mp->next);
+
mp = mp->next;
*mpp = mp;
rp->msg.len--;
@@ -1411,12 +1385,7 @@ erts_prep_msgq_for_inspection(Process *c_p, Process *rp,
sys_memcpy((void *) tmp->m, (void *) mp->m,
sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
mpp = i == 0 ? &rp->msg.first : &mip[i-1].msgp->next;
- tmp->next = mp->next;
- if (rp->msg.save == &mp->next)
- rp->msg.save = &tmp->next;
- if (rp->msg.last == &mp->next)
- rp->msg.last = &tmp->next;
- *mpp = tmp;
+ erts_msgq_replace_msg_ref(&rp->msg, tmp, mpp);
erts_save_message_in_proc(rp, mp);
mp = tmp;
}
@@ -1756,7 +1725,7 @@ void erts_factory_trim_and_close(ErtsHeapFactory* factory,
case FACTORY_MESSAGE: {
ErtsMessage *mp = factory->message;
if (mp->data.attached == ERTS_MSG_COMBINED_HFRAG) {
- if (!mp->hfrag.next) {
+ if (!factory->heap_frags) {
Uint sz = factory->hp - factory->hp_start;
mp = erts_shrink_message(mp, sz, brefs, brefs_size);
factory->message = mp;
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 46063ea0c2..6df969367b 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -366,6 +366,12 @@ ERTS_GLB_FORCE_INLINE ErtsMessage *erts_shrink_message(ErtsMessage *mp, Uint sz,
ERTS_GLB_FORCE_INLINE void erts_free_message(ErtsMessage *mp);
ERTS_GLB_INLINE Uint erts_used_frag_sz(const ErlHeapFragment*);
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg);
+ERTS_GLB_INLINE void erts_msgq_update_internal_pointers(ErlMessageQueue *msgq,
+ ErtsMessage **newpp,
+ ErtsMessage **oldpp);
+ERTS_GLB_INLINE void erts_msgq_replace_msg_ref(ErlMessageQueue *msgq,
+ ErtsMessage *newp,
+ ErtsMessage **oldpp);
#define ERTS_MSG_COMBINED_HFRAG ((void *) 0x1)
@@ -468,6 +474,29 @@ ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErtsMessage *msg)
return sz;
}
}
+
+ERTS_GLB_INLINE void
+erts_msgq_update_internal_pointers(ErlMessageQueue *msgq,
+ ErtsMessage **newpp,
+ ErtsMessage **oldpp)
+{
+ if (msgq->save == oldpp)
+ msgq->save = newpp;
+ if (msgq->last == oldpp)
+ msgq->last = newpp;
+ if (msgq->saved_last == oldpp)
+ msgq->saved_last = newpp;
+}
+
+ERTS_GLB_INLINE void
+erts_msgq_replace_msg_ref(ErlMessageQueue *msgq, ErtsMessage *newp, ErtsMessage **oldpp)
+{
+ ErtsMessage *oldp = *oldpp;
+ newp->next = oldp->next;
+ erts_msgq_update_internal_pointers(msgq, &newp->next, &oldp->next);
+ *oldpp = newp;
+}
+
#endif
#endif
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 606b73c7b5..f1c35cd5a5 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -178,9 +178,6 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, size_t may_need)
void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
Process* tracee)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
- ErtsSchedulerData *esdp;
-#endif
env->mod_nif = mod_nif;
env->proc = p;
env->hp = HEAP_TOP(p);
@@ -193,57 +190,65 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
ASSERT(p->common.id != ERTS_INVALID_PID);
-#ifdef ERTS_DIRTY_SCHEDULERS
- esdp = erts_get_scheduler_data();
- ASSERT(esdp);
+#if defined(DEBUG) && defined(ERTS_DIRTY_SCHEDULERS)
+ {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
-#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
- ASSERT(p->scheduler_data == esdp);
- ASSERT((state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS))
- && !(state & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ ASSERT(p->scheduler_data == esdp);
+ ASSERT((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))
+ && !(state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ }
+ }
#endif
+}
- }
- else {
- Process *sproc;
+void erts_pre_dirty_nif(ErtsSchedulerData *esdp,
+ ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
+ Process* tracee)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ Process *sproc;
#ifdef DEBUG
- erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
- ASSERT(!p->scheduler_data);
- ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
- && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(!p->scheduler_data);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+ ASSERT(esdp);
#endif
- sproc = esdp->dirty_shadow_process;
- ASSERT(sproc);
- ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
- ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
- == (ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_PROXY));
-
- sproc->next = p;
- sproc->common.id = p->common.id;
- sproc->htop = p->htop;
- sproc->stop = p->stop;
- sproc->hend = p->hend;
- sproc->heap = p->heap;
- sproc->abandoned_heap = p->abandoned_heap;
- sproc->heap_sz = p->heap_sz;
- sproc->high_water = p->high_water;
- sproc->old_hend = p->old_hend;
- sproc->old_htop = p->old_htop;
- sproc->old_heap = p->old_heap;
- sproc->mbuf = NULL;
- sproc->mbuf_sz = 0;
- ERTS_INIT_OFF_HEAP(&sproc->off_heap);
- env->proc = sproc;
- }
+ erts_pre_nif(env, p, mod_nif, tracee);
+
+ sproc = esdp->dirty_shadow_process;
+ ASSERT(sproc);
+ ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
+ == (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+
+ sproc->next = p;
+ sproc->common.id = p->common.id;
+ sproc->htop = p->htop;
+ sproc->stop = p->stop;
+ sproc->hend = p->hend;
+ sproc->heap = p->heap;
+ sproc->abandoned_heap = p->abandoned_heap;
+ sproc->heap_sz = p->heap_sz;
+ sproc->high_water = p->high_water;
+ sproc->old_hend = p->old_hend;
+ sproc->old_htop = p->old_htop;
+ sproc->old_heap = p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+ env->proc = sproc;
#endif
}
@@ -623,10 +628,28 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
}
} else {
Uint sz = size_object(msg);
+ ErlOffHeap *ohp;
Eterm *hp;
- mp = erts_alloc_message(sz, &hp);
- msg = copy_struct(msg, sz, &hp, &mp->hfrag.off_heap);
- ASSERT(hp == mp->hfrag.mem+mp->hfrag.used_size);
+ if (env && !env->tracee) {
+ flush_env(env);
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
+ cache_env(env);
+ }
+ else {
+ erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state);
+ if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) {
+ mp = erts_alloc_message(sz, &hp);
+ ohp = sz == 0 ? NULL : &mp->hfrag.off_heap;
+ }
+ else {
+ ErlHeapFragment *bp = new_message_buffer(sz);
+ mp = erts_alloc_message(0, NULL);
+ mp->data.heap_frag = bp;
+ hp = bp->mem;
+ ohp = &bp->off_heap;
+ }
+ }
+ msg = copy_struct(msg, sz, &hp, ohp);
}
ERL_MESSAGE_TERM(mp) = msg;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index a853ec585b..c0b1d7246c 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -149,7 +149,7 @@ extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = 0;
+int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = SPO_ON_HEAP_MSGQ;
int ERTS_WRITE_UNLIKELY(erts_eager_check_io) = 1;
int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0;
@@ -8197,7 +8197,7 @@ sched_dirty_cpu_thread_func(void *vesdp)
#endif
erts_thread_init_float();
- process_main();
+ erts_dirty_process_main(esdp);
/* No schedulers should *ever* terminate */
erts_exit(ERTS_ABORT_EXIT,
"Dirty CPU scheduler thread number %beu terminated\n",
@@ -8242,7 +8242,7 @@ sched_dirty_io_thread_func(void *vesdp)
#endif
erts_thread_init_float();
- process_main();
+ erts_dirty_process_main(esdp);
/* No schedulers should *ever* terminate */
erts_exit(ERTS_ABORT_EXIT,
"Dirty I/O scheduler thread number %beu terminated\n",
@@ -9377,77 +9377,6 @@ scheduler_gc_proc(Process *c_p, int reds_left)
return reds;
}
-static ERTS_INLINE void
-clean_dirty_start(Process *p)
-{
-#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ARCH_64)
- void *ptr = ERTS_PROC_SET_DIRTY_CPU_START(p, NULL);
- if (ptr)
- erts_free(ERTS_ALC_T_DIRTY_START, ptr);
-#endif
-}
-
-static ERTS_INLINE void
-save_dirty_start(ErtsSchedulerData *esdp, Process *c_p)
-{
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
- ErtsMonotonicTime time = erts_get_monotonic_time(esdp);
-#ifdef ARCH_64
- ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) time);
-#else
- ErtsMonotonicTime *stimep;
-
- stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
- if (!stimep) {
- stimep = erts_alloc(ERTS_ALC_T_DIRTY_START,
- sizeof(ErtsMonotonicTime));
- ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) stimep);
- }
- *stimep = time;
-#endif
- }
-#endif
-}
-
-static ERTS_INLINE int
-get_dirty_reds(ErtsSchedulerData *esdp, Process *c_p)
-{
-
-#ifndef ERTS_DIRTY_SCHEDULERS
- return -1;
-#else
- ErtsMonotonicTime stime, time;
-
- if (!ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue))
- return 1;
-
-#ifdef ARCH_64
- stime = (ErtsMonotonicTime) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
-#else
- {
- ErtsMonotonicTime *stimep;
- stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
- ASSERT(stimep);
- stime = *stimep;
- }
-#endif
-
- time = erts_get_monotonic_time(esdp);
-
- ASSERT(stime && stime < time);
-
- time -= stime;
- time = ERTS_MONOTONIC_TO_USEC(time);
- time *= 2;
-
- if (time > INT_MAX)
- return INT_MAX;
- return (int) time;
-#endif
-
-}
-
/*
* schedule() is called from BEAM (process_main()) or HiPE
* (hipe_mode_switch()) when the current process is to be
@@ -9466,11 +9395,10 @@ get_dirty_reds(ErtsSchedulerData *esdp, Process *c_p)
* so that normal processes get to run more frequently.
*/
-Process *schedule(Process *p, int calls)
+Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
{
Process *proxy_p = NULL;
ErtsRunQueue *rq;
- ErtsSchedulerData *esdp;
int context_reds;
int fcalls;
int input_reductions;
@@ -9507,8 +9435,19 @@ Process *schedule(Process *p, int calls)
* Clean up after the process being scheduled out.
*/
if (!p) { /* NULL in the very first schedule() call */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ is_normal_sched = !esdp;
+ if (is_normal_sched) {
+ esdp = erts_get_scheduler_data();
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+ else {
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+#else
esdp = erts_get_scheduler_data();
- is_normal_sched = !ERTS_SCHEDULER_IS_DIRTY(esdp);
+ is_normal_sched = 1;
+#endif
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls);
@@ -9517,12 +9456,12 @@ Process *schedule(Process *p, int calls)
} else {
#ifdef ERTS_SMP
#ifdef ERTS_DIRTY_SCHEDULERS
- esdp = p->scheduler_data;
- is_normal_sched = esdp != NULL;
- if (is_normal_sched)
+ is_normal_sched = !esdp;
+ if (is_normal_sched) {
+ esdp = p->scheduler_data;
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
else {
- esdp = erts_get_scheduler_data();
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
#else
@@ -9541,10 +9480,7 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- if (is_normal_sched)
- reds = actual_reds = calls - esdp->virtual_reds;
- else
- reds = actual_reds = get_dirty_reds(esdp, p);
+ reds = actual_reds = calls - esdp->virtual_reds;
ASSERT(actual_reds >= 0);
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
@@ -9994,17 +9930,10 @@ Process *schedule(Process *p, int calls)
calls = 0;
reds = context_reds;
-#ifdef ERTS_SMP
-
erts_smp_runq_unlock(rq);
-#endif /* ERTS_SMP */
-
}
- if (!is_normal_sched)
- save_dirty_start(esdp, p);
-
#ifdef ERTS_SMP
if (flags & ERTS_RUNQ_FLG_PROTECTED)
@@ -11206,6 +11135,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
flags |= F_ON_HEAP_MSGQ;
}
+ ASSERT((flags & F_ON_HEAP_MSGQ) || (flags & F_OFF_HEAP_MSGQ));
+
if (!rq)
rq = erts_get_runq_proc(parent);
@@ -11218,6 +11149,11 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
goto error;
}
+ ASSERT((erts_smp_atomic32_read_nob(&p->state)
+ & ERTS_PSFLG_ON_HEAP_MSGQ)
+ || (erts_smp_atomic32_read_nob(&p->state)
+ & ERTS_PSFLG_OFF_HEAP_MSGQ));
+
#ifdef BM_COUNTERS
processes_busy++;
#endif
@@ -11738,8 +11674,6 @@ delete_process(Process* p)
if (nif_export)
erts_destroy_nif_export(nif_export);
- clean_dirty_start(p);
-
/* Cleanup psd */
psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index b44ac442aa..7c98b60647 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -810,25 +810,13 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_DELAYED_GC_TASK_QS 4
#define ERTS_PSD_NIF_TRAP_EXPORT 5
#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6
-#define ERTS_PSD_DIRTY_CPU_START 7
-#define ERTS_PSD_SIZE 8
+#define ERTS_PSD_SIZE 7
-#if !defined(HIPE) && !defined(ERTS_DIRTY_SCHEDULERS)
+#if !defined(HIPE)
# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
-# undef ERTS_PSD_DIRTY_CPU_START
# undef ERTS_PSD_SIZE
# define ERTS_PSD_SIZE 6
-#elif !defined(HIPE)
-# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
-# undef ERTS_PSD_DIRTY_CPU_START
-# undef ERTS_PSD_SIZE
-# define ERTS_PSD_DIRTY_CPU_START 6
-# define ERTS_PSD_SIZE 7
-#elif !defined(ERTS_DIRTY_SCHEDULERS)
-# undef ERTS_PSD_DIRTY_CPU_START
-# undef ERTS_PSD_SIZE
-# define ERTS_PSD_SIZE 7
#endif
typedef struct {
@@ -1179,6 +1167,9 @@ void erts_check_for_holes(Process* p);
* USR_PRIO -> User prio. i.e., prio the user has set.
* PRQ_PRIO -> Prio queue prio, i.e., prio queue currently
* enqueued in.
+ *
+ * Update etp-proc-state-int in $ERL_TOP/erts/etc/unix/etp-commands.in
+ * when changing ERTS_PSFLG_*.
*/
#define ERTS_PSFLGS_ACT_PRIO_MASK \
(ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_ACT_PRIO_OFFSET)
@@ -1831,7 +1822,7 @@ Eterm erts_get_schedulers_binds(Process *c_p);
Eterm erts_set_cpu_topology(Process *c_p, Eterm term);
Eterm erts_bind_schedulers(Process *c_p, Eterm how);
ErtsRunQueue *erts_schedid2runq(Uint);
-Process *schedule(Process*, int);
+Process *erts_schedule(ErtsSchedulerData *, Process*, int);
void erts_schedule_misc_op(void (*)(void *), void *);
Eterm erl_create_process(Process*, Eterm, Eterm, Eterm, ErlSpawnOpts*);
void erts_do_exit_process(Process*, Eterm);
@@ -2061,13 +2052,6 @@ erts_psd_set(Process *p, int ix, void *data)
((struct saved_calls *) erts_psd_set((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF, (void *) (SCB)))
#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PROC_GET_DIRTY_CPU_START(P) \
- ((void *) erts_psd_get((P), ERTS_PSD_DIRTY_CPU_START))
-#define ERTS_PROC_SET_DIRTY_CPU_START(P, DCS) \
- ((void *) erts_psd_set((P), ERTS_PSD_DIRTY_CPU_START, (void *) (DCS)))
-#endif
-
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, Eterm handler);
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index ca001fc156..3dca58d60b 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2625,7 +2625,7 @@ static void init_tracer_template(ErtsTracerNif *tnif) {
/* default tracer functions */
tnif->tracers[TRACE_FUN_DEFAULT].name = "trace";
- tnif->tracers[TRACE_FUN_DEFAULT].arity = 6;
+ tnif->tracers[TRACE_FUN_DEFAULT].arity = 5;
tnif->tracers[TRACE_FUN_DEFAULT].cb = NULL;
tnif->tracers[TRACE_FUN_ENABLED].name = "enabled";
@@ -2634,35 +2634,35 @@ static void init_tracer_template(ErtsTracerNif *tnif) {
/* specific tracer functions */
tnif->tracers[TRACE_FUN_T_SEND].name = "trace_send";
- tnif->tracers[TRACE_FUN_T_SEND].arity = 6;
+ tnif->tracers[TRACE_FUN_T_SEND].arity = 5;
tnif->tracers[TRACE_FUN_T_SEND].cb = NULL;
tnif->tracers[TRACE_FUN_T_RECEIVE].name = "trace_receive";
- tnif->tracers[TRACE_FUN_T_RECEIVE].arity = 6;
+ tnif->tracers[TRACE_FUN_T_RECEIVE].arity = 5;
tnif->tracers[TRACE_FUN_T_RECEIVE].cb = NULL;
tnif->tracers[TRACE_FUN_T_CALL].name = "trace_call";
- tnif->tracers[TRACE_FUN_T_CALL].arity = 6;
+ tnif->tracers[TRACE_FUN_T_CALL].arity = 5;
tnif->tracers[TRACE_FUN_T_CALL].cb = NULL;
tnif->tracers[TRACE_FUN_T_SCHED_PROC].name = "trace_running_procs";
- tnif->tracers[TRACE_FUN_T_SCHED_PROC].arity = 6;
+ tnif->tracers[TRACE_FUN_T_SCHED_PROC].arity = 5;
tnif->tracers[TRACE_FUN_T_SCHED_PROC].cb = NULL;
tnif->tracers[TRACE_FUN_T_SCHED_PORT].name = "trace_running_ports";
- tnif->tracers[TRACE_FUN_T_SCHED_PORT].arity = 6;
+ tnif->tracers[TRACE_FUN_T_SCHED_PORT].arity = 5;
tnif->tracers[TRACE_FUN_T_SCHED_PORT].cb = NULL;
tnif->tracers[TRACE_FUN_T_GC].name = "trace_garbage_collection";
- tnif->tracers[TRACE_FUN_T_GC].arity = 6;
+ tnif->tracers[TRACE_FUN_T_GC].arity = 5;
tnif->tracers[TRACE_FUN_T_GC].cb = NULL;
tnif->tracers[TRACE_FUN_T_PROCS].name = "trace_procs";
- tnif->tracers[TRACE_FUN_T_PROCS].arity = 6;
+ tnif->tracers[TRACE_FUN_T_PROCS].arity = 5;
tnif->tracers[TRACE_FUN_T_PROCS].cb = NULL;
tnif->tracers[TRACE_FUN_T_PORTS].name = "trace_ports";
- tnif->tracers[TRACE_FUN_T_PORTS].arity = 6;
+ tnif->tracers[TRACE_FUN_T_PORTS].arity = 5;
tnif->tracers[TRACE_FUN_T_PORTS].cb = NULL;
/* specific enabled functions */
@@ -2834,10 +2834,12 @@ send_to_tracer_nif_raw(Process *c_p, Process *tracee,
Eterm tag, Eterm msg, Eterm extra, Eterm pam_result)
{
if (tnif || (tnif = lookup_tracer_nif(tracer)) != NULL) {
-#define MAP_SIZE 3
- Eterm argv[6], local_heap[3+MAP_SIZE /* values */ + (MAP_SIZE+1 /* keys */)];
+#define MAP_SIZE 4
+ Eterm argv[5], local_heap[3+MAP_SIZE /* values */ + (MAP_SIZE+1 /* keys */)];
flatmap_t *map = (flatmap_t*)(local_heap+(MAP_SIZE+1));
Eterm *map_values = flatmap_get_values(map);
+ Eterm *map_keys = local_heap + 1;
+ Uint map_elem_count = 0;
topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_DEFAULT;
ASSERT(topt < NIF_TRACER_TYPES);
@@ -2846,31 +2848,40 @@ send_to_tracer_nif_raw(Process *c_p, Process *tracee,
argv[1] = ERTS_TRACER_STATE(tracer);
argv[2] = t_p_id;
argv[3] = msg;
- argv[4] = extra == THE_NON_VALUE ? am_undefined : extra;
- argv[5] = make_flatmap(map);
+ argv[4] = make_flatmap(map);
map->thing_word = MAP_HEADER_FLATMAP;
- map->size = MAP_SIZE;
- map->keys = TUPLE3(local_heap, am_match_spec_result, am_scheduler_id, am_timestamp);
-
- *map_values++ = pam_result;
- if (tracee_flags & F_TRACE_SCHED_NO)
- *map_values++ = make_small(erts_get_scheduler_id());
- else
- *map_values++ = am_undefined;
+
+ if (extra != THE_NON_VALUE) {
+ map_keys[map_elem_count] = am_extra;
+ map_values[map_elem_count++] = extra;
+ }
+
+ if (pam_result != am_true) {
+ map_keys[map_elem_count] = am_match_spec_result;
+ map_values[map_elem_count++] = pam_result;
+ }
+
+ if (tracee_flags & F_TRACE_SCHED_NO) {
+ map_keys[map_elem_count] = am_scheduler_id;
+ map_values[map_elem_count++] = make_small(erts_get_scheduler_id());
+ }
+ map_keys[map_elem_count] = am_timestamp;
if (tracee_flags & F_NOW_TS)
#ifdef HAVE_ERTS_NOW_CPU
if (erts_cpu_timestamp)
- *map_values++ = am_cpu_timestamp;
+ map_values[map_elem_count++] = am_cpu_timestamp;
else
#endif
- *map_values++ = am_timestamp;
+ map_values[map_elem_count++] = am_timestamp;
else if (tracee_flags & F_STRICT_MON_TS)
- *map_values++ = am_strict_monotonic;
+ map_values[map_elem_count++] = am_strict_monotonic;
else if (tracee_flags & F_MON_TS)
- *map_values++ = am_monotonic;
- else
- *map_values++ = am_undefined;
+ map_values[map_elem_count++] = am_monotonic;
+
+ map->size = map_elem_count;
+ map->keys = make_tuple(local_heap);
+ local_heap[0] = make_arityval(map_elem_count);
#undef MAP_SIZE
erts_nif_call_function(c_p, tracee ? tracee : c_p,
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 02c24557c1..2a19211987 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -31,7 +31,7 @@
#define EXPORT_INITIAL_SIZE 4000
#define EXPORT_LIMIT (512*1024)
-#define EXPORT_HASH(m,f,a) ((m)*(f)+(a))
+#define EXPORT_HASH(m,f,a) ((atom_val(m) * atom_val(f)) ^ (a))
#ifdef DEBUG
# define IF_DEBUG(x) x
@@ -79,8 +79,7 @@ struct export_templ
static struct export_blob* entry_to_blob(struct export_entry* ee)
{
- return (struct export_blob*)
- ((char*)ee->ep - offsetof(struct export_blob,exp));
+ return ErtsContainerStruct(ee->ep, struct export_blob, exp);
}
void
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index b76b9cd874..f3d4ac56cd 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -62,6 +62,9 @@ struct enif_environment_t /* ErlNifEnv */
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*, Process* tracee);
extern void erts_post_nif(struct enif_environment_t* env);
+extern void erts_pre_dirty_nif(ErtsSchedulerData *,
+ struct enif_environment_t*, Process*,
+ struct erl_module_nif*, Process* tracee);
extern Eterm erts_nif_taints(Process* p);
extern void erts_print_nif_taints(int to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
@@ -1152,6 +1155,7 @@ void print_pass_through(int, byte*, int);
int catchlevel(Process*);
void init_emulator(void);
void process_main(void);
+void erts_dirty_process_main(ErtsSchedulerData *);
Eterm build_stacktrace(Process* c_p, Eterm exc);
Eterm expand_error_value(Process* c_p, Uint freason, Eterm Value);
void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 0377f6cb5e..01df5476db 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -938,18 +938,32 @@ int erts_port_handle_xports(Port *prt)
** -2 on type error
*/
-#define SET_VEC(iov, bv, bin, ptr, len, vlen) do { \
- (iov)->iov_base = (ptr); \
- (iov)->iov_len = (len); \
- if (sizeof((iov)->iov_len) < sizeof(len) \
- /* Check if (len) overflowed (iov)->iov_len */ \
- && (iov)->iov_len != (len)) { \
- goto L_overflow; \
- } \
- *(bv)++ = (bin); \
- (iov)++; \
- (vlen)++; \
-} while(0)
+#ifdef DEBUG
+#define MAX_SYSIOVEC_IOVLEN (1ull << (32 - 1))
+#else
+#define MAX_SYSIOVEC_IOVLEN (1ull << (sizeof(((SysIOVec*)0)->iov_len) * 8 - 1))
+#endif
+
+static ERTS_INLINE void
+io_list_to_vec_set_vec(SysIOVec **iov, ErlDrvBinary ***binv,
+ ErlDrvBinary *bin, byte *ptr, Uint len,
+ int *vlen)
+{
+ while (len > MAX_SYSIOVEC_IOVLEN) {
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = MAX_SYSIOVEC_IOVLEN;
+ ptr += MAX_SYSIOVEC_IOVLEN;
+ len -= MAX_SYSIOVEC_IOVLEN;
+ (*iov)++;
+ (*vlen)++;
+ *(*binv)++ = bin;
+ }
+ (*iov)->iov_base = ptr;
+ (*iov)->iov_len = len;
+ *(*binv)++ = bin;
+ (*iov)++;
+ (*vlen)++;
+}
static int
io_list_to_vec(Eterm obj, /* io-list */
@@ -960,11 +974,11 @@ io_list_to_vec(Eterm obj, /* io-list */
{
DECLARE_ESTACK(s);
Eterm* objp;
- char *buf = cbin->orig_bytes;
+ byte *buf = (byte*)cbin->orig_bytes;
Uint len = cbin->orig_size;
Uint csize = 0;
int vlen = 0;
- char* cptr = buf;
+ byte* cptr = buf;
goto L_jump_start; /* avoid push */
@@ -1032,15 +1046,17 @@ io_list_to_vec(Eterm obj, /* io-list */
len -= size;
} else {
if (csize != 0) {
- SET_VEC(iov, binv, cbin, cptr, csize, vlen);
+ io_list_to_vec_set_vec(&iov, &binv, cbin,
+ cptr, csize, &vlen);
cptr = buf;
csize = 0;
}
if (pb->flags) {
erts_emasculate_writable_binary(pb);
}
- SET_VEC(iov, binv, Binary2ErlDrvBinary(pb->val),
- pb->bytes+offset, size, vlen);
+ io_list_to_vec_set_vec(
+ &iov, &binv, Binary2ErlDrvBinary(pb->val),
+ pb->bytes+offset, size, &vlen);
}
} else {
ErlHeapBin* hb = (ErlHeapBin *) bptr;
@@ -1060,7 +1076,7 @@ io_list_to_vec(Eterm obj, /* io-list */
}
if (csize != 0) {
- SET_VEC(iov, binv, cbin, cptr, csize, vlen);
+ io_list_to_vec_set_vec(&iov, &binv, cbin, cptr, csize, &vlen);
}
DESTROY_ESTACK(s);
@@ -1086,10 +1102,13 @@ do { \
if (_bitsize != 0) goto L_type_error; \
if (thing_subtag(*binary_val(_real)) == REFC_BINARY_SUBTAG && \
_bitoffs == 0) { \
- b_size += _size; \
+ b_size += _size; \
if (b_size < _size) goto L_overflow_error; \
in_clist = 0; \
- v_size++; \
+ v_size++; \
+ /* If iov_len is smaller then Uint we split the binary into*/ \
+ /* multiple smaller (2GB) elements in the iolist.*/ \
+ v_size += _size / MAX_SYSIOVEC_IOVLEN; \
if (_size >= ERL_SMALL_IO_BIN_LIMIT) { \
p_in_clist = 0; \
p_v_size++; \
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index f303d4f167..9a205d50d3 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -97,7 +97,7 @@
((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
#define ErtsContainerStruct(ptr, type, member) \
- (type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member))
+ ((type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member)))
#if defined (__WIN32__)
# include "erl_win_sys.h"
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index cedc88e5fe..675fafa726 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -2210,7 +2210,9 @@ do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
#ifndef ERTS_SMP
#ifdef USE_THREADS
- if (erts_get_scheduler_data()) /* Must be scheduler thread */
+ if (!erts_get_scheduler_data()) /* Must be scheduler thread */
+ *p = NULL;
+ else
#endif
{
*p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
@@ -2226,18 +2228,10 @@ do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
}
/* So we have an error logger, lets build the message */
- if (sz <= HeapWordsLeft(*p)) {
- *ohp = &MSO(*p);
- *hp = HEAP_TOP(*p);
- HEAP_TOP(*p) += sz;
- } else {
-#endif
- *bp = new_message_buffer(sz);
- *ohp = &(*bp)->off_heap;
- *hp = (*bp)->mem;
-#ifndef ERTS_SMP
- }
#endif
+ *bp = new_message_buffer(sz);
+ *ohp = &(*bp)->off_heap;
+ *hp = (*bp)->mem;
return (is_nil(gleader)
? am_noproc
@@ -3893,8 +3887,10 @@ void bin_write(int to, void *to_arg, byte* buf, size_t sz)
}
/* Fill buf with the contents of bytelist list
- return number of chars in list or -1 for error */
-
+ * return number of chars in list
+ * or -1 for type error
+ * or -2 for not enough buffer space (buffer contains truncated result)
+ */
Sint
intlist_to_buf(Eterm list, char *buf, Sint len)
{
@@ -3917,7 +3913,7 @@ intlist_to_buf(Eterm list, char *buf, Sint len)
return -1;
listptr = list_val(*(listptr + 1));
}
- return -1; /* not enough space */
+ return -2; /* not enough space */
}
/*