aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/bif.c15
-rw-r--r--erts/emulator/beam/bif.tab4
-rw-r--r--erts/emulator/beam/binary.c5
-rw-r--r--erts/emulator/beam/break.c21
-rw-r--r--erts/emulator/beam/bs_instrs.tab3
-rw-r--r--erts/emulator/beam/erl_bif_binary.c191
-rw-r--r--erts/emulator/beam/erl_bif_port.c21
-rw-r--r--erts/emulator/beam/erl_db.c28
-rw-r--r--erts/emulator/beam/erl_gc.c48
-rw-r--r--erts/emulator/beam/erl_init.c1
-rw-r--r--erts/emulator/beam/erl_io_queue.c36
-rw-r--r--erts/emulator/beam/erl_message.c6
-rw-r--r--erts/emulator/beam/erl_nif.c317
-rw-r--r--erts/emulator/beam/erl_nif.h3
-rw-r--r--erts/emulator/beam/erl_port.h39
-rw-r--r--erts/emulator/beam/erl_port_task.c25
-rw-r--r--erts/emulator/beam/erl_process.c236
-rw-r--r--erts/emulator/beam/erl_process.h154
-rw-r--r--erts/emulator/beam/erl_process_dump.c76
-rw-r--r--erts/emulator/beam/erl_trace.c30
-rw-r--r--erts/emulator/beam/io.c117
-rw-r--r--erts/emulator/beam/utils.c3
-rw-r--r--erts/emulator/nifs/unix/unix_prim_file.c19
-rw-r--r--erts/emulator/test/binary_SUITE.erl1
-rw-r--r--erts/emulator/test/efile_SUITE.erl21
-rw-r--r--erts/emulator/test/iovec_SUITE.erl46
-rw-r--r--erts/emulator/test/nif_SUITE.erl30
28 files changed, 874 insertions, 623 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index b6ec3e7ed2..42a368cdd8 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -124,7 +124,6 @@ atom big
atom bif_return_trap
atom bif_timer_server
atom binary
-atom binary_bin_to_list_trap
atom binary_copy_trap
atom binary_find_trap
atom binary_longest_prefix_trap
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index d68ccc3028..bfd572335f 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1744,7 +1744,6 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
else if (BIF_ARG_1 == am_scheduler) {
ErtsRunQueue *old, *new, *curr;
Sint sched;
- erts_aint32_t state;
if (!is_small(BIF_ARG_2))
goto error;
@@ -1753,23 +1752,23 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
goto error;
if (sched == 0) {
+ old = erts_bind_runq_proc(BIF_P, 0);
new = NULL;
- state = erts_atomic32_read_band_mb(&BIF_P->state,
- ~ERTS_PSFLG_BOUND);
}
else {
+ int bound = !0;
new = erts_schedid2runq(sched);
- erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new);
- state = erts_atomic32_read_bor_mb(&BIF_P->state,
- ERTS_PSFLG_BOUND);
+ old = erts_set_runq_proc(BIF_P, new, &bound);
+ if (!bound)
+ old = NULL;
}
+ old_value = old ? make_small(old->ix+1) : make_small(0);
+
curr = erts_proc_sched_data(BIF_P)->run_queue;
- old = (ERTS_PSFLG_BOUND & state) ? curr : NULL;
ASSERT(!old || old == curr);
- old_value = old ? make_small(old->ix+1) : make_small(0);
if (new && new != curr)
ERTS_BIF_YIELD_RETURN_X(BIF_P, old_value, am_scheduler);
else
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index c0d5e8ce74..0d1166f6ed 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -551,9 +551,6 @@ bif binary:last/1
bif binary:at/2
bif binary:part/2 binary_binary_part_2
bif binary:part/3 binary_binary_part_3
-bif binary:bin_to_list/1
-bif binary:bin_to_list/2
-bif binary:bin_to_list/3
bif binary:list_to_bin/1
bif binary:copy/1
bif binary:copy/2
@@ -696,3 +693,4 @@ bif erlang:iolist_to_iovec/1
bif erts_internal:new_connection/1
bif erts_internal:abort_connection/2
bif erts_internal:map_next/3
+bif ets:whereis/1
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index d4db1a4188..95d324d2c1 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -953,7 +953,10 @@ HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_binary, 1)
BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1)
{
if (is_binary(BIF_ARG_1)) {
- BIF_RET(BIF_ARG_1);
+ if (binary_bitsize(BIF_ARG_1) == 0) {
+ BIF_RET(BIF_ARG_1);
+ }
+ BIF_ERROR(BIF_P, BADARG);
}
return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_iolist_to_binary_1]);
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index e7acea0c5f..b9b70cd8ef 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -336,6 +336,12 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
erts_print(to, to_arg, "Heap unused: %bpu\n", (p->hend - p->htop));
erts_print(to, to_arg, "OldHeap unused: %bpu\n",
(OLD_HEAP(p) == NULL) ? 0 : (OLD_HEND(p) - OLD_HTOP(p)) );
+ erts_print(to, to_arg, "BinVHeap: %b64u\n", p->off_heap.overhead);
+ erts_print(to, to_arg, "OldBinVHeap: %b64u\n", BIN_OLD_VHEAP(p));
+ erts_print(to, to_arg, "BinVHeap unused: %b64u\n",
+ BIN_VHEAP_SZ(p) - p->off_heap.overhead);
+ erts_print(to, to_arg, "OldBinVHeap unused: %b64u\n",
+ BIN_OLD_VHEAP_SZ(p) - BIN_OLD_VHEAP(p));
erts_print(to, to_arg, "Memory: %beu\n", erts_process_memory(p, !0));
if (garbing) {
@@ -891,6 +897,21 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_print_scheduler_info(to, to_arg, ERTS_SCHEDULER_IX(i)),
erts_cbprintf(to, to_arg, "** crashed **\n"));
}
+ for (i = 0; i < erts_no_dirty_cpu_schedulers; i++) {
+ ERTS_SYS_TRY_CATCH(
+ erts_print_scheduler_info(to, to_arg, ERTS_DIRTY_CPU_SCHEDULER_IX(i)),
+ erts_cbprintf(to, to_arg, "** crashed **\n"));
+ }
+ erts_cbprintf(to, to_arg, "=dirty_cpu_run_queue\n");
+ erts_print_run_queue_info(to, to_arg, ERTS_DIRTY_CPU_RUNQ);
+
+ for (i = 0; i < erts_no_dirty_io_schedulers; i++) {
+ ERTS_SYS_TRY_CATCH(
+ erts_print_scheduler_info(to, to_arg, ERTS_DIRTY_IO_SCHEDULER_IX(i)),
+ erts_cbprintf(to, to_arg, "** crashed **\n"));
+ }
+ erts_cbprintf(to, to_arg, "=dirty_io_run_queue\n");
+ erts_print_run_queue_info(to, to_arg, ERTS_DIRTY_IO_RUNQ);
#endif
diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab
index b11903a47b..94e0000c8b 100644
--- a/erts/emulator/beam/bs_instrs.tab
+++ b/erts/emulator/beam/bs_instrs.tab
@@ -990,6 +990,9 @@ ctx_to_bin.execute() {
Uint hole_size;
Uint orig = mb->orig;
ErlSubBin* sb = (ErlSubBin *) boxed_val(context);
+ /* Since we're going to overwrite the match state with the result, an
+ * ErlBinMatchState must be at least as large as an ErlSubBin. */
+ ERTS_CT_ASSERT(sizeof(ErlSubBin) <= sizeof(ErlBinMatchState));
hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE;
sb->thing_word = HEADER_SUB_BIN;
sb->size = BYTE_OFFSET(size);
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 41c2ae08d3..33bc189182 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -61,8 +61,6 @@ static Export binary_longest_prefix_trap_export;
static BIF_RETTYPE binary_longest_prefix_trap(BIF_ALIST_3);
static Export binary_longest_suffix_trap_export;
static BIF_RETTYPE binary_longest_suffix_trap(BIF_ALIST_3);
-static Export binary_bin_to_list_trap_export;
-static BIF_RETTYPE binary_bin_to_list_trap(BIF_ALIST_3);
static Export binary_copy_trap_export;
static BIF_RETTYPE binary_copy_trap(BIF_ALIST_2);
static Uint max_loop_limit;
@@ -86,10 +84,6 @@ void erts_init_bif_binary(void)
am_erlang, am_binary_longest_suffix_trap, 3,
&binary_longest_suffix_trap);
- erts_init_trap_export(&binary_bin_to_list_trap_export,
- am_erlang, am_binary_bin_to_list_trap, 3,
- &binary_bin_to_list_trap);
-
erts_init_trap_export(&binary_copy_trap_export,
am_erlang, am_binary_copy_trap, 2,
&binary_copy_trap);
@@ -2440,191 +2434,6 @@ BIF_RETTYPE binary_at_2(BIF_ALIST_2)
BIF_ERROR(BIF_P,BADARG);
}
-#define BIN_TO_LIST_OK 0
-#define BIN_TO_LIST_TRAP 1
-/* No badarg, checked before call */
-
-#define BIN_TO_LIST_LOOP_FACTOR 10
-
-static int do_bin_to_list(Process *p, byte *bytes, Uint bit_offs,
- Uint start, Sint *lenp, Eterm *termp)
-{
- Uint reds = get_reds(p, BIN_TO_LIST_LOOP_FACTOR); /* reds can never be 0 */
- Uint len = *lenp;
- Uint loops;
- Eterm *hp;
- Eterm term = *termp;
- Uint n;
-
- ASSERT(reds > 0);
-
- loops = MIN(reds,len);
-
- BUMP_REDS(p, loops / BIN_TO_LIST_LOOP_FACTOR);
-
- hp = HAlloc(p,2*loops);
- while (loops--) {
- --len;
- if (bit_offs) {
- n = ((((Uint) bytes[start+len]) << bit_offs) |
- (((Uint) bytes[start+len+1]) >> (8-bit_offs))) & 0xFF;
- } else {
- n = bytes[start+len];
- }
-
- term = CONS(hp,make_small(n),term);
- hp +=2;
- }
- *termp = term;
- *lenp = len;
- if (len) {
- BUMP_ALL_REDS(p);
- return BIN_TO_LIST_TRAP;
- }
- return BIN_TO_LIST_OK;
-}
-
-
-static BIF_RETTYPE do_trap_bin_to_list(Process *p, Eterm binary,
- Uint start, Sint len, Eterm sofar)
-{
- Eterm *hp;
- Eterm blob;
-
- hp = HAlloc(p,3);
- hp[0] = make_pos_bignum_header(2);
- hp[1] = start;
- hp[2] = (Uint) len;
- blob = make_big(hp);
- BIF_TRAP3(&binary_bin_to_list_trap_export, p, binary, blob, sofar);
-}
-
-static BIF_RETTYPE binary_bin_to_list_trap(BIF_ALIST_3)
-{
- Eterm *ptr;
- Uint start;
- Sint len;
- byte *bytes;
- Uint bit_offs;
- Uint bit_size;
- Eterm res = BIF_ARG_3;
-
- ptr = big_val(BIF_ARG_2);
- start = ptr[1];
- len = (Sint) ptr[2];
-
- ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
- if (do_bin_to_list(BIF_P, bytes, bit_offs, start, &len, &res) ==
- BIN_TO_LIST_OK) {
- BIF_RET(res);
- }
- return do_trap_bin_to_list(BIF_P,BIF_ARG_1,start,len,res);
-}
-
-static BIF_RETTYPE binary_bin_to_list_common(Process *p,
- Eterm bin,
- Eterm epos,
- Eterm elen)
-{
- Uint pos;
- Sint len;
- size_t sz;
- byte *bytes;
- Uint bit_offs;
- Uint bit_size;
- Eterm res = NIL;
-
- if (is_not_binary(bin)) {
- goto badarg;
- }
- if (!term_to_Uint(epos, &pos)) {
- goto badarg;
- }
- if (!term_to_Sint(elen, &len)) {
- goto badarg;
- }
- if (len < 0) {
- Uint lentmp = -(Uint)len;
- /* overflow */
- if ((Sint)lentmp < 0) {
- goto badarg;
- }
- len = lentmp;
- if (len > pos) {
- goto badarg;
- }
- pos -= len;
- }
- /* overflow */
- if ((pos + len) < pos || (len > 0 && (pos + len) == pos)) {
- goto badarg;
- }
- sz = binary_size(bin);
-
- if (pos+len > sz) {
- goto badarg;
- }
- ERTS_GET_BINARY_BYTES(bin,bytes,bit_offs,bit_size);
- if (bit_size != 0) {
- goto badarg;
- }
- if(do_bin_to_list(p, bytes, bit_offs, pos, &len, &res) ==
- BIN_TO_LIST_OK) {
- BIF_RET(res);
- }
- return do_trap_bin_to_list(p,bin,pos,len,res);
-
- badarg:
- BIF_ERROR(p,BADARG);
-}
-
-BIF_RETTYPE binary_bin_to_list_3(BIF_ALIST_3)
-{
- return binary_bin_to_list_common(BIF_P,BIF_ARG_1,BIF_ARG_2,BIF_ARG_3);
-}
-
-BIF_RETTYPE binary_bin_to_list_2(BIF_ALIST_2)
-{
- Eterm *tp;
-
- if (is_not_tuple(BIF_ARG_2)) {
- goto badarg;
- }
- tp = tuple_val(BIF_ARG_2);
- if (arityval(*tp) != 2) {
- goto badarg;
- }
- return binary_bin_to_list_common(BIF_P,BIF_ARG_1,tp[1],tp[2]);
- badarg:
- BIF_ERROR(BIF_P,BADARG);
-}
-
-BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1)
-{
- Uint pos = 0;
- Sint len;
- byte *bytes;
- Uint bit_offs;
- Uint bit_size;
- Eterm res = NIL;
-
- if (is_not_binary(BIF_ARG_1)) {
- goto badarg;
- }
- len = binary_size(BIF_ARG_1);
- ERTS_GET_BINARY_BYTES(BIF_ARG_1,bytes,bit_offs,bit_size);
- if (bit_size != 0) {
- goto badarg;
- }
- if(do_bin_to_list(BIF_P, bytes, bit_offs, pos, &len, &res) ==
- BIN_TO_LIST_OK) {
- BIF_RET(res);
- }
- return do_trap_bin_to_list(BIF_P,BIF_ARG_1,pos,len,res);
- badarg:
- BIF_ERROR(BIF_P,BADARG);
-}
-
HIPE_WRAPPER_BIF_DISABLE_GC(binary_list_to_bin, 1)
BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1)
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index 9f0c90ff7b..b184adedee 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -639,6 +639,27 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
BIF_RET(res);
}
+Eterm erts_port_data_read(Port* prt)
+{
+ Eterm res;
+ erts_aint_t data;
+
+ data = erts_atomic_read_ddrb(&prt->data);
+ if (data == (erts_aint_t)NULL)
+ return am_undefined; /* Port terminated by racing thread */
+
+ if ((data & 0x3) != 0) {
+ res = (Eterm) (UWord) data;
+ ASSERT(is_immed(res));
+ }
+ else {
+ ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
+ res = pdhp->data;
+ }
+ return res;
+}
+
+
/*
* Open a port. Most of the work is not done here but rather in
* the file io.c.
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 3ba0886464..1ab1c4a363 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -1753,6 +1753,28 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_RET(ret);
}
+/*
+** Retrieves the tid() of a named ets table.
+*/
+BIF_RETTYPE ets_whereis_1(BIF_ALIST_1)
+{
+ DbTable* tb;
+ Eterm res;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
+ BIF_RET(am_undefined);
+ }
+
+ res = make_tid(BIF_P, tb);
+ db_unlock(tb, LCK_READ);
+
+ BIF_RET(res);
+}
+
/*
** The lookup BIF
*/
@@ -3126,7 +3148,8 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
static Eterm fields[] = {am_protection, am_keypos, am_type, am_named_table,
am_node, am_size, am_name, am_heir, am_owner, am_memory, am_compressed,
am_write_concurrency,
- am_read_concurrency};
+ am_read_concurrency,
+ am_id};
Eterm results[sizeof(fields)/sizeof(Eterm)];
DbTable* tb;
Eterm res;
@@ -4016,7 +4039,10 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = is_table_named(tb) ? am_true : am_false;
} else if (What == am_compressed) {
ret = tb->common.compress ? am_true : am_false;
+ } else if (What == am_id) {
+ ret = make_tid(p, tb);
}
+
/*
* For debugging purposes
*/
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index a5a59c8e74..1c64644efc 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -116,6 +116,7 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
static Eterm *full_sweep_heaps(Process *p,
+ ErlHeapFragment *live_hf_end,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
char *oh, Uint oh_size,
@@ -142,7 +143,7 @@ static Eterm* sweep_literal_area(Eterm* n_hp, Eterm* n_htop,
static Eterm* sweep_literals_to_old_heap(Eterm* heap_ptr, Eterm* heap_end, Eterm* htop,
char* src, Uint src_size);
static Eterm* collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
- Eterm* heap, Eterm* htop, Eterm* objv, int nobj);
+ Eterm* htop);
static int adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj);
static void shrink_new_heap(Process *p, Uint new_sz, Eterm *objv, int nobj);
static void grow_new_heap(Process *p, Uint new_sz, Eterm* objv, int nobj);
@@ -917,6 +918,7 @@ garbage_collect_hibernate(Process* p, int check_long_gc)
htop = heap;
htop = full_sweep_heaps(p,
+ ERTS_INVALID_HFRAG_PTR,
1,
heap,
htop,
@@ -1474,18 +1476,22 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
n_htop = n_heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
+ n = setup_rootset(p, objv, nobj, &rootset);
+ roots = rootset.roots;
+
+ /*
+ * All allocations done. Start defile heap with move markers.
+ * A crash dump due to allocation failure above will see a healthy heap.
+ */
+
if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
/*
* Move heap frags that we know are completely live
* directly into the new young heap generation.
*/
- n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop,
- objv, nobj);
+ n_htop = collect_live_heap_frags(p, live_hf_end, n_htop);
}
- n = setup_rootset(p, objv, nobj, &rootset);
- roots = rootset.roots;
-
GENSWEEP_NSTACK(p, old_htop, n_htop);
while (n--) {
Eterm* g_ptr = roots->v;
@@ -1722,16 +1728,8 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
n_htop = n_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
- if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
- /*
- * Move heap frags that we know are completely live
- * directly into the heap.
- */
- n_htop = collect_live_heap_frags(p, live_hf_end, n_heap, n_htop,
- objv, nobj);
- }
-
- n_htop = full_sweep_heaps(p, 0, n_heap, n_htop, oh, oh_size, objv, nobj);
+ n_htop = full_sweep_heaps(p, live_hf_end, 0, n_heap, n_htop, oh, oh_size,
+ objv, nobj);
/* Move the stack to the end of the heap */
stk_sz = HEAP_END(p) - p->stop;
@@ -1778,6 +1776,7 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
static Eterm *
full_sweep_heaps(Process *p,
+ ErlHeapFragment *live_hf_end,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
char *oh, Uint oh_size,
@@ -1794,6 +1793,19 @@ full_sweep_heaps(Process *p,
n = setup_rootset(p, objv, nobj, &rootset);
+ /*
+ * All allocations done. Start defile heap with move markers.
+ * A crash dump due to allocation failure above will see a healthy heap.
+ */
+
+ if (live_hf_end != ERTS_INVALID_HFRAG_PTR) {
+ /*
+ * Move heap frags that we know are completely live
+ * directly into the heap.
+ */
+ n_htop = collect_live_heap_frags(p, live_hf_end, n_htop);
+ }
+
#ifdef HIPE
if (hibernate)
hipe_empty_nstack(p);
@@ -2296,9 +2308,7 @@ move_one_area(Eterm* n_htop, char* src, Uint src_size)
*/
static Eterm*
-collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end,
- Eterm* n_hstart, Eterm* n_htop,
- Eterm* objv, int nobj)
+collect_live_heap_frags(Process* p, ErlHeapFragment *live_hf_end, Eterm* n_htop)
{
ErlHeapFragment* qb;
char* frag_begin;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 4846ccd2d3..e8048cfdfc 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -1746,6 +1746,7 @@ erl_start(int argc, char **argv)
}
else if (has_prefix("ecio", sub_param)) {
/* ignore argument, eager check io no longer used */
+ arg = get_arg(sub_param+4, argv[i+1], &i);
}
else if (has_prefix("pp", sub_param)) {
arg = get_arg(sub_param+2, argv[i+1], &i);
diff --git a/erts/emulator/beam/erl_io_queue.c b/erts/emulator/beam/erl_io_queue.c
index c93b5248d9..d779d1031a 100644
--- a/erts/emulator/beam/erl_io_queue.c
+++ b/erts/emulator/beam/erl_io_queue.c
@@ -801,12 +801,11 @@ static Eterm iol2v_make_sub_bin(iol2v_state_t *state, Eterm bin_term,
ERTS_GET_REAL_BIN(bin_term, orig_pb_term,
byte_offset, bit_offset, bit_size);
- (void)bit_offset;
- (void)bit_size;
+ ASSERT(bit_size == 0);
sb->thing_word = HEADER_SUB_BIN;
+ sb->bitoffs = bit_offset;
sb->bitsize = 0;
- sb->bitoffs = 0;
sb->orig = orig_pb_term;
sb->is_writable = 0;
@@ -975,7 +974,7 @@ static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) {
parent_header = binary_val(parent_binary);
binary_size = binary_size(bin_term);
- if (bit_offset != 0 || bit_size != 0) {
+ if (bit_size != 0) {
return 0;
} else if (binary_size == 0) {
state->bytereds_spent += 1;
@@ -1017,8 +1016,16 @@ static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) {
* then just copy it into the accumulator. */
iol2v_expand_acc(state, binary_size);
- sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
- binary_data, binary_size);
+ if (ERTS_LIKELY(bit_offset == 0)) {
+ sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
+ binary_data, binary_size);
+ } else {
+ ASSERT(binary_size <= ERTS_UWORD_MAX / 8);
+
+ erts_copy_bits(binary_data, bit_offset, 1,
+ (byte*)&(state->acc)->orig_bytes[state->acc_size], 0, 1,
+ binary_size * 8);
+ }
state->acc_size += binary_size;
} else {
@@ -1029,8 +1036,16 @@ static int iol2v_append_binary(iol2v_state_t *state, Eterm bin_term) {
iol2v_expand_acc(state, spill);
- sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
- binary_data, spill);
+ if (ERTS_LIKELY(bit_offset == 0)) {
+ sys_memcpy(&(state->acc)->orig_bytes[state->acc_size],
+ binary_data, spill);
+ } else {
+ ASSERT(binary_size <= ERTS_UWORD_MAX / 8);
+
+ erts_copy_bits(binary_data, bit_offset, 1,
+ (byte*)&(state->acc)->orig_bytes[state->acc_size], 0, 1,
+ spill * 8);
+ }
state->acc_size += spill;
@@ -1179,7 +1194,10 @@ BIF_RETTYPE iolist_to_iovec_1(BIF_ALIST_1) {
if (is_nil(BIF_ARG_1)) {
BIF_RET(NIL);
} else if (is_binary(BIF_ARG_1)) {
- if (binary_size(BIF_ARG_1) != 0) {
+ if (binary_bitsize(BIF_ARG_1) != 0) {
+ ASSERT(!(BIF_P->flags & F_DISABLE_GC));
+ BIF_ERROR(BIF_P, BADARG);
+ } else if (binary_size(BIF_ARG_1) != 0) {
Eterm *hp = HAlloc(BIF_P, 2);
BIF_RET(CONS(hp, BIF_ARG_1, NIL));
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index abf194cf94..6f7c71ef98 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -616,7 +616,7 @@ erts_try_alloc_message_on_heap(Process *pp,
}
else {
in_message_fragment:
- if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) {
+ if ((*psp) & ERTS_PSFLG_OFF_HEAP_MSGQ) {
mp = erts_alloc_message(sz, hpp);
*ohpp = sz == 0 ? NULL : &mp->hfrag.off_heap;
}
@@ -1079,8 +1079,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
case am_on_heap:
c_p->flags |= F_ON_HEAP_MSGQ;
c_p->flags &= ~F_OFF_HEAP_MSGQ;
- erts_atomic32_read_bor_nob(&c_p->state,
- ERTS_PSFLG_ON_HEAP_MSGQ);
/*
* We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ
* if a off heap change is ongoing. It will be adjusted
@@ -1106,8 +1104,6 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state)
break;
case am_off_heap:
c_p->flags &= ~F_ON_HEAP_MSGQ;
- erts_atomic32_read_band_nob(&c_p->state,
- ~ERTS_PSFLG_ON_HEAP_MSGQ);
goto change_to_off_heap;
default:
res = THE_NON_VALUE; /* badarg */
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 94ebf88b56..d2000aa71e 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -237,9 +237,11 @@ static void cache_env(ErlNifEnv* env);
static void full_flush_env(ErlNifEnv *env);
static void flush_env(ErlNifEnv* env);
-/* Temporary object header, auto-deallocated when NIF returns
- * or when independent environment is cleared.
- */
+/* Temporary object header, auto-deallocated when NIF returns or when
+ * independent environment is cleared.
+ *
+ * The payload can be accessed with &tmp_obj_ptr[1] but keep in mind that its
+ * first element must not require greater alignment than `next`. */
struct enif_tmp_obj_t {
struct enif_tmp_obj_t* next;
void (*dtor)(struct enif_tmp_obj_t*);
@@ -256,6 +258,46 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
}
}
+/* Whether the given environment is bound to a process and will be cleaned up
+ * when the NIF returns. It's safe to use temp_alloc for objects in
+ * env->tmp_obj_list when this is true. */
+static ERTS_INLINE int is_proc_bound(ErlNifEnv *env)
+{
+ return env->mod_nif != NULL;
+}
+
+/* Allocates and attaches an object to the given environment, running its
+ * destructor when the environment is cleared. To avoid temporary variables the
+ * address of the allocated object is returned instead of the enif_tmp_obj_t.
+ *
+ * The destructor *must* call `erts_free(tmp_obj->allocator, tmp_obj)` to free
+ * the object. If the destructor needs to refer to the allocated object its
+ * address will be &tmp_obj[1]. */
+static ERTS_INLINE void *alloc_tmp_obj(ErlNifEnv *env, size_t size,
+ void (*dtor)(struct enif_tmp_obj_t*)) {
+ struct enif_tmp_obj_t *tmp_obj;
+ ErtsAlcType_t allocator;
+
+ allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
+
+ tmp_obj = erts_alloc(allocator, sizeof(struct enif_tmp_obj_t) + MAX(1, size));
+
+ tmp_obj->next = env->tmp_obj_list;
+ tmp_obj->allocator = allocator;
+ tmp_obj->dtor = dtor;
+
+ env->tmp_obj_list = tmp_obj;
+
+ return (void*)&tmp_obj[1];
+}
+
+/* Generic destructor for objects allocated through alloc_tmp_obj that don't
+ * care about their payload. */
+static void tmp_alloc_dtor(struct enif_tmp_obj_t *tmp_obj)
+{
+ erts_free(tmp_obj->allocator, tmp_obj);
+}
+
void erts_post_nif(ErlNifEnv* env)
{
erts_unblock_fpe(env->fpe_was_unmasked);
@@ -446,6 +488,7 @@ static void cache_env(ErlNifEnv* env)
env->hp_end = env->heap_frag->mem + env->heap_frag->alloc_size;
}
}
+
void* enif_priv_data(ErlNifEnv* env)
{
return env->mod_nif->priv_data;
@@ -1019,11 +1062,6 @@ int enif_is_number(ErlNifEnv* env, ERL_NIF_TERM term)
return is_number(term);
}
-static ERTS_INLINE int is_proc_bound(ErlNifEnv* env)
-{
- return env->mod_nif != NULL;
-}
-
static void aligned_binary_dtor(struct enif_tmp_obj_t* obj)
{
erts_free_aligned_binary_bytes_extra((byte*)obj, obj->allocator);
@@ -1058,22 +1096,14 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
u.tmp->dtor = &aligned_binary_dtor;
env->tmp_obj_list = u.tmp;
}
- bin->bin_term = bin_term;
bin->size = binary_size(bin_term);
bin->ref_bin = NULL;
ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
-static void tmp_alloc_dtor(struct enif_tmp_obj_t* obj)
-{
- erts_free(obj->allocator, obj);
-}
-
int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
{
- struct enif_tmp_obj_t* tobj;
- ErtsAlcType_t allocator;
ErlDrvSizeT sz;
if (is_binary(term)) {
return enif_inspect_binary(env,term,bin);
@@ -1081,7 +1111,6 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
if (is_nil(term)) {
bin->data = (unsigned char*) &bin->data; /* dummy non-NULL */
bin->size = 0;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
return 1;
}
@@ -1089,16 +1118,8 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
return 0;
}
- allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
- tobj = erts_alloc(allocator, sz + sizeof(struct enif_tmp_obj_t));
- tobj->allocator = allocator;
- tobj->next = env->tmp_obj_list;
- tobj->dtor = &tmp_alloc_dtor;
- env->tmp_obj_list = tobj;
-
- bin->data = (unsigned char*) &tobj[1];
+ bin->data = alloc_tmp_obj(env, sz, &tmp_alloc_dtor);
bin->size = sz;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
erts_iolist_to_buf(term, (char*) bin->data, sz);
ADD_READONLY_CHECK(env, bin->data, bin->size);
@@ -1116,7 +1137,6 @@ int enif_alloc_binary(size_t size, ErlNifBinary* bin)
bin->size = size;
bin->data = (unsigned char*) refbin->orig_bytes;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = refbin;
return 1;
}
@@ -1150,12 +1170,10 @@ void enif_release_binary(ErlNifBinary* bin)
{
if (bin->ref_bin != NULL) {
Binary* refbin = bin->ref_bin;
- ASSERT(bin->bin_term == THE_NON_VALUE);
erts_bin_release(refbin);
}
#ifdef DEBUG
bin->data = NULL;
- bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
#endif
}
@@ -1312,29 +1330,51 @@ int enif_get_string(ErlNifEnv *env, ERL_NIF_TERM list, char* buf, unsigned len,
Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin)
{
- if (bin->bin_term != THE_NON_VALUE) {
- return bin->bin_term;
- }
- else if (bin->ref_bin != NULL) {
- Binary* bptr = bin->ref_bin;
- Eterm bin_term;
-
- bin_term = erts_build_proc_bin(&MSO(env->proc),
- alloc_heap(env, PROC_BIN_SIZE),
- bptr);
- if (erts_refc_read(&bptr->intern.refc, 1) == 1) {
- /* Total ownership transfer */
- bin->ref_bin = NULL;
- bin->bin_term = bin_term;
- }
- return bin_term;
- }
- else {
- flush_env(env);
- bin->bin_term = new_binary(env->proc, bin->data, bin->size);
- cache_env(env);
- return bin->bin_term;
+ Eterm bin_term;
+
+ if (bin->ref_bin != NULL) {
+ Binary* binary = bin->ref_bin;
+
+ /* If the binary is smaller than the heap binary limit we'll return a
+ * heap binary to reduce the number of small refc binaries in the
+ * system. We can't simply release the refc binary right away however;
+ * the documentation states that the binary should be considered
+ * read-only from this point on, which implies that it should still be
+ * readable.
+ *
+ * We could keep it alive until we return by adding it to the temporary
+ * object list, but that requires an off-heap allocation which is
+ * potentially quite slow, so we create a dummy ProcBin instead and
+ * rely on the next minor GC to get rid of it. */
+ if (bin->size <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+
+ hb = (ErlHeapBin*)alloc_heap(env, heap_bin_size(bin->size));
+ hb->thing_word = header_heap_bin(bin->size);
+ hb->size = bin->size;
+
+ sys_memcpy(hb->data, bin->data, bin->size);
+
+ erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE),
+ binary);
+
+ bin_term = make_binary(hb);
+ } else {
+ bin_term = erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE),
+ binary);
+ }
+
+ /* Our (possibly shared) ownership has been transferred to the term. */
+ bin->ref_bin = NULL;
+ } else {
+ flush_env(env);
+ bin_term = new_binary(env->proc, bin->data, bin->size);
+ cache_env(env);
}
+
+ return bin_term;
}
Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term,
@@ -3284,8 +3324,8 @@ typedef struct {
Eterm sublist_start;
Eterm sublist_end;
- UWord offheap_size;
- UWord onheap_size;
+ UWord referenced_size;
+ UWord copied_size;
UWord iovec_len;
} iovec_slice_t;
@@ -3295,16 +3335,16 @@ static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *resul
result->sublist_start = list;
result->sublist_length = 0;
- result->offheap_size = 0;
- result->onheap_size = 0;
+ result->referenced_size = 0;
+ result->copied_size = 0;
result->iovec_len = 0;
lookahead = result->sublist_start;
while (is_list(lookahead)) {
- Eterm *binary_header, binary;
+ UWord byte_size;
+ Eterm binary;
Eterm *cell;
- UWord size;
cell = list_val(lookahead);
binary = CAR(cell);
@@ -3313,35 +3353,36 @@ static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *resul
return 0;
}
- size = binary_size(binary);
- binary_header = binary_val(binary);
+ byte_size = binary_size(binary);
+
+ if (byte_size > 0) {
+ int bit_offset, bit_size;
+ Eterm parent_binary;
+ UWord byte_offset;
- if (size > 0) {
- /* If we're a sub-binary we'll need to check our underlying binary
- * to determine whether we're on-heap or not. */
- if (thing_subtag(*binary_header) == SUB_BINARY_SUBTAG) {
- ErlSubBin *sb = (ErlSubBin*)binary_header;
+ int requires_copying;
- /* Reject bitstrings */
- if((sb->bitoffs + sb->bitsize) > 0) {
- return 0;
- }
+ ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset,
+ bit_offset, bit_size);
- ASSERT(size <= binary_size(sb->orig));
- binary_header = binary_val(sb->orig);
+ (void)byte_offset;
+
+ if (bit_size != 0) {
+ return 0;
}
- if (thing_subtag(*binary_header) == HEAP_BINARY_SUBTAG) {
- ASSERT(size <= ERL_ONHEAP_BIN_LIMIT);
+ /* If we're unaligned or an on-heap binary we'll need to copy
+ * ourselves over to a temporary buffer. */
+ requires_copying = (bit_offset != 0) ||
+ thing_subtag(*binary_val(parent_binary)) == HEAP_BINARY_SUBTAG;
- result->iovec_len += 1;
- result->onheap_size += size;
+ if (requires_copying) {
+ result->copied_size += byte_size;
} else {
- ASSERT(thing_subtag(*binary_header) == REFC_BINARY_SUBTAG);
-
- result->iovec_len += 1 + size / MAX_SYSIOVEC_IOVLEN;
- result->offheap_size += size;
+ result->referenced_size += byte_size;
}
+
+ result->iovec_len += 1 + byte_size / MAX_SYSIOVEC_IOVLEN;
}
result->sublist_length += 1;
@@ -3361,7 +3402,9 @@ static int examine_iovec_term(Eterm list, UWord max_length, iovec_slice_t *resul
return 1;
}
-static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) {
+static void marshal_iovec_binary(Eterm binary, ErlNifBinary *copy_buffer,
+ UWord *copy_offset, ErlNifBinary *result) {
+
Eterm *parent_header;
Eterm parent_binary;
@@ -3372,10 +3415,11 @@ static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) {
ERTS_GET_REAL_BIN(binary, parent_binary, byte_offset, bit_offset, bit_size);
+ ASSERT(bit_size == 0);
+
parent_header = binary_val(parent_binary);
result->size = binary_size(binary);
- result->bin_term = binary;
if (thing_subtag(*parent_header) == REFC_BINARY_SUBTAG) {
ProcBin *pb = (ProcBin*)parent_header;
@@ -3398,24 +3442,48 @@ static void inspect_raw_binary_data(Eterm binary, ErlNifBinary *result) {
result->data = &((unsigned char*)&hb->data)[byte_offset];
result->ref_bin = NULL;
}
+
+ /* If this isn't an *aligned* refc binary, copy its contents to the buffer
+ * and reference that instead. */
+
+ if (result->ref_bin == NULL || bit_offset != 0) {
+ ASSERT(copy_buffer->ref_bin != NULL && copy_buffer->data != NULL);
+ ASSERT(result->size <= (copy_buffer->size - *copy_offset));
+
+ if (bit_offset == 0) {
+ sys_memcpy(&copy_buffer->data[*copy_offset],
+ result->data, result->size);
+ } else {
+ erts_copy_bits(result->data, bit_offset, 1,
+ (byte*)&copy_buffer->data[*copy_offset], 0, 1,
+ result->size * 8);
+ }
+
+ result->data = &copy_buffer->data[*copy_offset];
+ result->ref_bin = copy_buffer->ref_bin;
+
+ *copy_offset += result->size;
+ }
}
static int fill_iovec_with_slice(ErlNifEnv *env,
iovec_slice_t *slice,
ErlNifIOVec *iovec) {
- UWord onheap_offset, iovec_idx;
- ErlNifBinary onheap_data;
+ ErlNifBinary copy_buffer = {0};
+ UWord copy_offset, iovec_idx;
Eterm sublist_iterator;
- /* Set up a common refc binary for all on-heap binaries. */
- if (slice->onheap_size > 0) {
- if (!enif_alloc_binary(slice->onheap_size, &onheap_data)) {
+ /* Set up a common refc binary for all on-heap and unaligned binaries. */
+ if (slice->copied_size > 0) {
+ if (!enif_alloc_binary(slice->copied_size, &copy_buffer)) {
return 0;
}
+
+ ASSERT(copy_buffer.ref_bin != NULL);
}
sublist_iterator = slice->sublist_start;
- onheap_offset = 0;
+ copy_offset = 0;
iovec_idx = 0;
while (sublist_iterator != slice->sublist_end) {
@@ -3423,20 +3491,7 @@ static int fill_iovec_with_slice(ErlNifEnv *env,
Eterm *cell;
cell = list_val(sublist_iterator);
- inspect_raw_binary_data(CAR(cell), &raw_data);
-
- /* If this isn't a refc binary, copy its contents to the onheap buffer
- * and reference that instead. */
- if (raw_data.size > 0 && raw_data.ref_bin == NULL) {
- ASSERT(onheap_offset < onheap_data.size);
- ASSERT(slice->onheap_size > 0);
-
- sys_memcpy(&onheap_data.data[onheap_offset],
- raw_data.data, raw_data.size);
-
- raw_data.data = &onheap_data.data[onheap_offset];
- raw_data.ref_bin = onheap_data.ref_bin;
- }
+ marshal_iovec_binary(CAR(cell), &copy_buffer, &copy_offset, &raw_data);
while (raw_data.size > 0) {
UWord chunk_len = MIN(raw_data.size, MAX_SYSIOVEC_IOVLEN);
@@ -3467,16 +3522,18 @@ static int fill_iovec_with_slice(ErlNifEnv *env,
erts_refc_inc(&refc_binary->intern.refc, 1);
}
- if (slice->onheap_size > 0) {
+ if (slice->copied_size > 0) {
/* Transfer ownership to the iovec; we've taken references to it in
* the above loop. */
- enif_release_binary(&onheap_data);
+ enif_release_binary(&copy_buffer);
}
} else {
- if (slice->onheap_size > 0) {
- /* Attach the binary to our environment and let the GC take care of
- * it after returning. */
- enif_make_binary(env, &onheap_data);
+ if (slice->copied_size > 0) {
+ /* Attach the binary to our environment and let the next minor GC
+ * get rid of it. This is slightly faster than using the tmp object
+ * list since it avoids off-heap allocations. */
+ erts_build_proc_bin(&MSO(env->proc),
+ alloc_heap(env, PROC_BIN_SIZE), copy_buffer.ref_bin);
}
}
@@ -3502,19 +3559,14 @@ static int create_iovec_from_slice(ErlNifEnv *env,
alloc_size = binv_offset;
alloc_size += slice->iovec_len * sizeof(Binary*);
- /* If we have an environment we'll attach the allocated data to it. The
- * GC will take care of releasing it later on. */
+ /* When the user passes an environment, we attach the iovec to it so
+ * the user won't have to bother managing it (similar to
+ * enif_inspect_binary). It'll disappear once the environment is
+ * cleaned up. */
if (env != NULL) {
- ErlNifBinary gc_bin;
-
- if (!enif_alloc_binary(alloc_size, &gc_bin)) {
- return 0;
- }
-
- alloc_base = (char*)gc_bin.data;
- enif_make_binary(env, &gc_bin);
+ alloc_base = alloc_tmp_obj(env, alloc_size, &tmp_alloc_dtor);
} else {
- alloc_base = enif_alloc(alloc_size);
+ alloc_base = erts_alloc(ERTS_ALC_T_NIF, alloc_size);
}
iovec = (ErlNifIOVec*)alloc_base;
@@ -3523,12 +3575,12 @@ static int create_iovec_from_slice(ErlNifEnv *env,
iovec->flags = 0;
}
- iovec->size = slice->offheap_size + slice->onheap_size;
+ iovec->size = slice->referenced_size + slice->copied_size;
iovec->iovcnt = slice->iovec_len;
if(!fill_iovec_with_slice(env, slice, iovec)) {
if (env == NULL && !(iovec->flags & ERL_NIF_IOVEC_FLAGS_PREALLOC)) {
- enif_free(iovec);
+ erts_free(ERTS_ALC_T_NIF, iovec);
}
return 0;
@@ -4218,34 +4270,31 @@ static unsigned calc_checksum(unsigned char* ptr, unsigned size);
struct readonly_check_t
{
- struct enif_tmp_obj_t hdr;
unsigned char* ptr;
unsigned size;
unsigned checksum;
};
static void add_readonly_check(ErlNifEnv* env, unsigned char* ptr, unsigned sz)
{
- ErtsAlcType_t allocator = is_proc_bound(env) ? ERTS_ALC_T_TMP : ERTS_ALC_T_NIF;
- struct readonly_check_t* obj = erts_alloc(allocator,
- sizeof(struct readonly_check_t));
- obj->hdr.allocator = allocator;
- obj->hdr.next = env->tmp_obj_list;
- env->tmp_obj_list = &obj->hdr;
- obj->hdr.dtor = &readonly_check_dtor;
+ struct readonly_check_t* obj;
+
+ obj = alloc_tmp_obj(env, sizeof(struct readonly_check_t),
+ &readonly_check_dtor);
+
obj->ptr = ptr;
obj->size = sz;
- obj->checksum = calc_checksum(ptr, sz);
+ obj->checksum = calc_checksum(ptr, sz);
}
-static void readonly_check_dtor(struct enif_tmp_obj_t* o)
+static void readonly_check_dtor(struct enif_tmp_obj_t* tmp_obj)
{
- struct readonly_check_t* obj = (struct readonly_check_t*) o;
- unsigned chksum = calc_checksum(obj->ptr, obj->size);
- if (chksum != obj->checksum) {
+ struct readonly_check_t* ro_check = (struct readonly_check_t*)&tmp_obj[1];
+ unsigned chksum = calc_checksum(ro_check->ptr, ro_check->size);
+ if (chksum != ro_check->checksum) {
fprintf(stderr, "\r\nReadonly data written by NIF, checksums differ"
- " %x != %x\r\nABORTING\r\n", chksum, obj->checksum);
+ " %x != %x\r\nABORTING\r\n", chksum, ro_check->checksum);
abort();
}
- erts_free(obj->hdr.allocator, obj);
+ erts_free(tmp_obj->allocator, tmp_obj);
}
static unsigned calc_checksum(unsigned char* ptr, unsigned size)
{
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 053f7673c4..a99b4db705 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -137,8 +137,9 @@ typedef struct
unsigned char* data;
/* Internals (avert your eyes) */
- ERL_NIF_TERM bin_term;
void* ref_bin;
+ /* for future additions to be ABI compatible (same struct size) */
+ void* __spare__[2];
}ErlNifBinary;
#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 9117eb1f72..0d148ee048 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -180,6 +180,7 @@ void erts_init_port_data(Port *);
void erts_cleanup_port_data(Port *);
Uint erts_port_data_size(Port *);
ErlOffHeap *erts_port_data_offheap(Port *);
+Eterm erts_port_data_read(Port* prt);
#define ERTS_PORT_GET_CONNECTED(PRT) \
((Eterm) erts_atomic_read_nob(&(PRT)->connected))
@@ -195,26 +196,52 @@ struct erl_drv_port_data_lock {
Port *prt;
};
+ERTS_GLB_INLINE void erts_init_runq_port(Port *prt, ErtsRunQueue *runq);
+ERTS_GLB_INLINE void erts_set_runq_port(Port *prt, ErtsRunQueue *runq);
+ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_port(Port *prt);
ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void
+erts_init_runq_port(Port *prt, ErtsRunQueue *runq)
+{
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ erts_atomic_init_nob(&prt->run_queue, (erts_aint_t) runq);
+}
+
+ERTS_GLB_INLINE void
+erts_set_runq_port(Port *prt, ErtsRunQueue *runq)
+{
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
+}
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_get_runq_port(Port *prt)
+{
+ ErtsRunQueue *runq;
+ runq = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue);
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ return runq;
+}
+
+
ERTS_GLB_INLINE ErtsRunQueue *
erts_port_runq(Port *prt)
{
ErtsRunQueue *rq1, *rq2;
- rq1 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue);
- if (!rq1)
- return NULL;
+ rq1 = erts_get_runq_port(prt);
while (1) {
erts_runq_lock(rq1);
- rq2 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue);
+ rq2 = erts_get_runq_port(prt);
if (rq1 == rq2)
return rq1;
erts_runq_unlock(rq1);
rq1 = rq2;
- if (!rq1)
- return NULL;
}
}
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index a588477320..4a3671df0c 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -84,11 +84,10 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
#define LTTNG_DRIVER(TRACEPOINT, PP) do {} while(0)
#endif
-#define ERTS_LC_VERIFY_RQ(RQ, PP) \
- do { \
+#define ERTS_LC_VERIFY_RQ(RQ, PP) \
+ do { \
ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); \
- ERTS_LC_ASSERT((RQ) == ((ErtsRunQueue *) \
- erts_atomic_read_nob(&(PP)->run_queue))); \
+ ERTS_LC_ASSERT((RQ) == erts_get_runq_port((PP))); \
} while (0)
#define ERTS_PT_STATE_SCHEDULED 0
@@ -1520,19 +1519,15 @@ erts_port_task_schedule(Eterm id,
/* Enqueue port on run-queue */
runq = erts_port_runq(pp);
- if (!runq)
- ERTS_INTERNAL_ERROR("Missing run-queue");
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
ERTS_LC_ASSERT(runq != xrunq);
ERTS_LC_VERIFY_RQ(runq, pp);
if (xrunq) {
/* Emigrate port ... */
- erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
+ erts_set_runq_port(pp, xrunq);
erts_runq_unlock(runq);
runq = erts_port_runq(pp);
- if (!runq)
- ERTS_INTERNAL_ERROR("Missing run-queue");
}
enqueue_port(runq, pp);
@@ -1593,8 +1588,6 @@ erts_port_task_free_port(Port *pp)
ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
runq = erts_port_runq(pp);
- if (!runq)
- ERTS_INTERNAL_ERROR("Missing run-queue");
erts_port_task_sched_lock(&pp->sched);
flags = erts_atomic32_read_bor_relb(&pp->sched.flags,
ERTS_PTS_FLG_EXIT);
@@ -1805,7 +1798,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
ERTS_MSACC_POP_STATE_M();
- ASSERT(runq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue));
+ ASSERT(runq == erts_get_runq_port(pp));
active = finalize_exec(pp, &execq, processing_busy_q);
@@ -1831,11 +1824,10 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
}
else {
/* Emigrate port... */
- erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
+ erts_set_runq_port(pp, xrunq);
erts_runq_unlock(runq);
xrunq = erts_port_runq(pp);
- ASSERT(xrunq);
enqueue_port(xrunq, pp);
erts_runq_unlock(xrunq);
erts_notify_inc_runq(xrunq);
@@ -2069,7 +2061,7 @@ void
erts_enqueue_port(ErtsRunQueue *rq, Port *pp)
{
ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
- ASSERT(rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue));
+ ASSERT(rq == erts_get_runq_port(pp));
ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ);
enqueue_port(rq, pp);
}
@@ -2080,8 +2072,7 @@ erts_dequeue_port(ErtsRunQueue *rq)
Port *pp;
ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
pp = pop_port(rq);
- ASSERT(!pp
- || rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue));
+ ASSERT(!pp || rq == erts_get_runq_port(pp));
ASSERT(!pp || (erts_atomic32_read_nob(&pp->sched.flags)
& ERTS_PTS_FLG_IN_RUNQ));
return pp;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index a807d60ec7..b19659f496 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -137,36 +137,6 @@ runq_got_work_to_execute(ErtsRunQueue *rq)
return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq));
}
-#undef RUNQ_READ_RQ
-#undef RUNQ_SET_RQ
-#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_atomic_read_nob((X)))
-#define RUNQ_SET_RQ(X, RQ) erts_atomic_set_nob((X), (erts_aint_t) (RQ))
-
-#ifdef DEBUG
-# if defined(ARCH_64)
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
- (RUNQ_SET_RQ((RQP), (0xdeadbeefdead0003LL | ((N) << 4)))
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
-do { \
- ASSERT((RQP) != NULL); \
- ASSERT(((((Uint) (RQP)) & ((Uint) 0x3))) == ((Uint) 0)); \
- ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdeadbeefdead0000LL));\
-} while (0)
-# else
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
- (RUNQ_SET_RQ((RQP), (0xdead0003 | ((N) << 4))))
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
-do { \
- ASSERT((RQP) != NULL); \
- ASSERT(((((UWord) (RQP)) & ((UWord) 1))) == ((UWord) 0)); \
- ASSERT((((UWord) (RQP)) & ~((UWord) 0xffff)) != ((UWord) 0xdead0000)); \
-} while (0)
-# endif
-#else
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N)
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP)
-#endif
-
const Process erts_invalid_process = {{ERTS_INVALID_PID}};
extern BeamInstr beam_apply[];
@@ -1538,6 +1508,20 @@ erts_proclist_destroy(ErtsProcList *plp)
proclist_destroy(plp);
}
+void
+erts_proclist_dump(fmtfn_t to, void *to_arg, ErtsProcList *plp)
+{
+ ErtsProcList *first = plp;
+
+ while (plp) {
+ erts_print(to, to_arg, "%T", plp->pid);
+ plp = plp->next;
+ if (plp == first)
+ break;
+ }
+ erts_print(to, to_arg, "\n");
+}
+
void *
erts_psd_set_init(Process *p, int ix, void *data)
{
@@ -3926,21 +3910,16 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
Port *prt;
prt = erts_dequeue_port(rq);
if (prt)
- RUNQ_SET_RQ(&prt->run_queue, c_rq);
+ erts_set_runq_port(prt, c_rq);
erts_runq_unlock(rq);
if (prt) {
- /* port might terminate while we have no lock... */
rq = erts_port_runq(prt);
- if (rq) {
- if (rq != c_rq)
- erts_exit(ERTS_ABORT_EXIT,
- "%s:%d:%s(): Internal error",
- __FILE__, __LINE__, __func__);
- erts_enqueue_port(c_rq, prt);
- if (!iflag)
- return; /* done */
- erts_runq_unlock(c_rq);
- }
+ if (rq != c_rq)
+ ERTS_INTERNAL_ERROR("Unexpected run-queue");
+ erts_enqueue_port(c_rq, prt);
+ if (!iflag)
+ return; /* done */
+ erts_runq_unlock(c_rq);
}
}
else {
@@ -3954,12 +3933,11 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
while (proc) {
erts_aint32_t state;
state = erts_atomic32_read_acqb(&proc->state);
- if (!(ERTS_PSFLG_BOUND & state)
- && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) {
+ if (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state)
+ && erts_try_change_runq_proc(proc, c_rq)) {
ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio];
unqueue_process(rq, rpq, rqi, prio, prev_proc, proc);
erts_runq_unlock(rq);
- RUNQ_SET_RQ(&proc->run_queue, c_rq);
rq_locked = 0;
erts_runq_lock(c_rq);
@@ -4140,21 +4118,13 @@ evacuate_run_queue(ErtsRunQueue *rq,
while (prt) {
ErtsRunQueue *prt_rq;
prt = erts_dequeue_port(rq);
- RUNQ_SET_RQ(&prt->run_queue, to_rq);
+ erts_set_runq_port(prt, to_rq);
erts_runq_unlock(rq);
- /*
- * The port might terminate while
- * we have no lock on it...
- */
prt_rq = erts_port_runq(prt);
- if (prt_rq) {
- if (prt_rq != to_rq)
- erts_exit(ERTS_ABORT_EXIT,
- "%s:%d:%s() internal error\n",
- __FILE__, __LINE__, __func__);
- erts_enqueue_port(to_rq, prt);
- erts_runq_unlock(to_rq);
- }
+ if (prt_rq != to_rq)
+ ERTS_INTERNAL_ERROR("Unexpected run-queue");
+ erts_enqueue_port(to_rq, prt);
+ erts_runq_unlock(to_rq);
erts_runq_lock(rq);
prt = rq->ports.start;
}
@@ -4177,14 +4147,13 @@ evacuate_run_queue(ErtsRunQueue *rq,
while (proc) {
Process *real_proc;
int prio;
- erts_aint32_t max_qbit, qbit, real_state;
+ erts_aint32_t max_qbit, qbit;
prio = ERTS_PSFLGS_GET_PRQ_PRIO(state);
qbit = ((erts_aint32_t) 1) << prio;
if (!(state & ERTS_PSFLG_PROXY)) {
real_proc = proc;
- real_state = state;
}
else {
real_proc = erts_proc_lookup_raw(proc->common.id);
@@ -4192,7 +4161,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
free_proxy_proc(proc);
goto handle_next_proc;
}
- real_state = erts_atomic32_read_acqb(&real_proc->state);
}
max_qbit = (state >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET);
@@ -4227,7 +4195,7 @@ evacuate_run_queue(ErtsRunQueue *rq,
goto handle_next_proc;
}
- if (ERTS_PSFLG_BOUND & real_state) {
+ if (!erts_try_change_runq_proc(proc, to_rq)) {
/* Bound processes get stuck here... */
proc->next = NULL;
if (sbpp->last)
@@ -4241,7 +4209,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
erts_runq_unlock(rq);
to_rq = mp->prio[prio].runq;
- RUNQ_SET_RQ(&proc->run_queue, to_rq);
erts_runq_lock(to_rq);
enqueue_process(to_rq, prio, proc);
@@ -4309,14 +4276,13 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
proc = rpq->first;
while (proc) {
- erts_aint32_t state = erts_atomic32_read_acqb(&proc->state);
- if (!(ERTS_PSFLG_BOUND & state)) {
+ if (erts_try_change_runq_proc(proc, rq)) {
+ erts_aint32_t state = erts_atomic32_read_acqb(&proc->state);
/* Steal process */
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio];
unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc);
erts_runq_unlock(vrq);
- RUNQ_SET_RQ(&proc->run_queue, rq);
erts_runq_lock(rq);
*rq_lockedp = 1;
@@ -4341,26 +4307,14 @@ no_procs:
if (vrq->ports.start) {
ErtsRunQueue *prt_rq;
Port *prt = erts_dequeue_port(vrq);
- RUNQ_SET_RQ(&prt->run_queue, rq);
+ erts_set_runq_port(prt, rq);
erts_runq_unlock(vrq);
-
- /*
- * The port might terminate while
- * we have no lock on it...
- */
-
prt_rq = erts_port_runq(prt);
- if (!prt_rq)
- return 0;
- else {
- if (prt_rq != rq)
- erts_exit(ERTS_ABORT_EXIT,
- "%s:%d:%s() internal error\n",
- __FILE__, __LINE__, __func__);
- *rq_lockedp = 1;
- erts_enqueue_port(rq, prt);
- return !0;
- }
+ if (prt_rq != rq)
+ ERTS_INTERNAL_ERROR("Unexpected run-queue");
+ *rq_lockedp = 1;
+ erts_enqueue_port(rq, prt);
+ return !0;
}
erts_runq_unlock(vrq);
@@ -6116,7 +6070,8 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
{
erts_aint32_t state;
Process *proxy;
- ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue);
+ int bound;
+ ErtsRunQueue *rq = erts_get_runq_proc(proc, &bound);
state = (ERTS_PSFLG_PROXY
| ERTS_PSFLG_IN_RUNQ
@@ -6129,7 +6084,7 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
proxy = prev_proxy;
ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
erts_atomic32_set_nob(&proxy->state, state);
- RUNQ_SET_RQ(&proc->run_queue, rq);
+ (void) erts_set_runq_proc(proc, rq, &bound);
}
else {
proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process));
@@ -6142,8 +6097,7 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
}
#endif
erts_atomic32_init_nob(&proxy->state, state);
- erts_atomic_init_nob(&proxy->run_queue,
- erts_atomic_read_nob(&proc->run_queue));
+ erts_init_runq_proc(proc, rq, bound);
}
proxy->common.id = proc->common.id;
@@ -6334,18 +6288,21 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
default: {
ErtsRunQueue* runq;
+ int bound;
ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
|| enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
- runq = erts_get_runq_proc(p);
+ runq = erts_get_runq_proc(p, &bound);
- if (!(ERTS_PSFLG_BOUND & state)) {
+ if (!bound) {
ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
- if (new_runq) {
- RUNQ_SET_RQ(&p->run_queue, new_runq);
- runq = new_runq;
- }
+ if (new_runq) {
+ if (erts_try_change_runq_proc(p, new_runq))
+ runq = new_runq;
+ else
+ runq = erts_get_runq_proc(p, NULL);
+ }
}
ASSERT(runq);
@@ -11462,6 +11419,7 @@ typedef struct {
Process *proc;
erts_aint32_t state;
ErtsRunQueue *run_queue;
+ int bound;
} ErtsEarlyProcInit;
static void early_init_process_struct(void *varg, Eterm data)
@@ -11472,10 +11430,9 @@ static void early_init_process_struct(void *varg, Eterm data)
proc->common.id = make_internal_pid(data);
erts_atomic32_init_nob(&proc->dirty_state, 0);
proc->dirty_sys_tasks = NULL;
+ erts_init_runq_proc(proc, arg->run_queue, arg->bound);
erts_atomic32_init_relb(&proc->state, arg->state);
- RUNQ_SET_RQ(&proc->run_queue, arg->run_queue);
-
erts_proc_lock_init(proc); /* All locks locked */
}
@@ -11484,7 +11441,7 @@ static void early_init_process_struct(void *varg, Eterm data)
** Allocate process and find out where to place next process.
*/
static Process*
-alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
+alloc_process(ErtsRunQueue *rq, int bound, erts_aint32_t state)
{
ErtsEarlyProcInit init_arg;
Process *p;
@@ -11493,9 +11450,12 @@ alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
if (!p)
return NULL;
+ ASSERT(rq);
+
init_arg.proc = (Process *) p;
- init_arg.run_queue = rq;
init_arg.state = state;
+ init_arg.run_queue = rq;
+ init_arg.bound = bound;
ERTS_CT_ASSERT(offsetof(Process,common) == 0);
@@ -11530,6 +11490,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
+ int bound = 0;
Uint flags = 0;
ErtsRunQueue *rq = NULL;
Process *p;
@@ -11566,7 +11527,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
ASSERT(0 <= ix && ix < erts_no_run_queues);
rq = ERTS_RUNQ_IX(ix);
/* Unsupported feature... */
- state |= ERTS_PSFLG_BOUND;
+ bound = !0;
}
prio = (erts_aint32_t) so->priority;
}
@@ -11579,17 +11540,16 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
flags |= F_OFF_HEAP_MSGQ;
}
else if (so->flags & SPO_ON_HEAP_MSGQ) {
- state |= ERTS_PSFLG_ON_HEAP_MSGQ;
flags |= F_ON_HEAP_MSGQ;
}
ASSERT((flags & F_ON_HEAP_MSGQ) || (flags & F_OFF_HEAP_MSGQ));
if (!rq)
- rq = erts_get_runq_proc(parent);
+ rq = erts_get_runq_proc(parent, NULL);
- p = alloc_process(rq, state); /* All proc locks are locked by this thread
- on success */
+ p = alloc_process(rq, bound, state); /* All proc locks are locked by this thread
+ on success */
if (!p) {
erts_send_error_to_logger_str(parent->group_leader,
"Too many processes\n");
@@ -11597,11 +11557,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
goto error;
}
- ASSERT((erts_atomic32_read_nob(&p->state)
- & ERTS_PSFLG_ON_HEAP_MSGQ)
- || (erts_atomic32_read_nob(&p->state)
- & ERTS_PSFLG_OFF_HEAP_MSGQ));
-
#ifdef SHCOPY_SPAWN
arg_size = copy_shared_calculate(args, &info);
#else
@@ -11976,7 +11931,7 @@ void erts_init_empty_process(Process *p)
p->pending_exit.bp = NULL;
erts_proc_lock_init(p);
erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
- RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
+ erts_init_runq_proc(p, ERTS_RUNQ_IX(0), 0);
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -12298,7 +12253,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp)
ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- rq = RUNQ_READ_RQ(&p->run_queue);
+ rq = erts_get_runq_proc(p, NULL);
ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
if (!plp)
@@ -13398,16 +13353,33 @@ stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg)
return yreg;
}
+static void print_current_process_info(fmtfn_t, void *to_arg, ErtsSchedulerData*);
+
/*
* Print scheduler information
*/
void
-erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
+erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp)
+{
int i;
erts_aint32_t flg;
- Process *p;
- erts_print(to, to_arg, "=scheduler:%u\n", esdp->no);
+ switch (esdp->type) {
+ case ERTS_SCHED_NORMAL:
+ erts_print(to, to_arg, "=scheduler:%u\n", esdp->no);
+ break;
+ case ERTS_SCHED_DIRTY_CPU:
+ erts_print(to, to_arg, "=dirty_cpu_scheduler:%u\n",
+ (esdp->dirty_no + erts_no_schedulers));
+ break;
+ case ERTS_SCHED_DIRTY_IO:
+ erts_print(to, to_arg, "=dirty_io_scheduler:%u\n",
+ (esdp->dirty_no + erts_no_schedulers + erts_no_dirty_cpu_schedulers));
+ break;
+ default:
+ erts_print(to, to_arg, "=unknown_scheduler_type:%u\n", esdp->type);
+ break;
+ }
flg = erts_atomic32_read_dirty(&esdp->ssi->flags);
erts_print(to, to_arg, "Scheduler Sleep Info Flags: ");
@@ -13453,10 +13425,24 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
}
erts_print(to, to_arg, "\n");
- erts_print(to, to_arg, "Current Port: ");
- if (esdp->current_port)
- erts_print(to, to_arg, "%T", esdp->current_port->common.id);
- erts_print(to, to_arg, "\n");
+ if (esdp->type == ERTS_SCHED_NORMAL) {
+ erts_print(to, to_arg, "Current Port: ");
+ if (esdp->current_port)
+ erts_print(to, to_arg, "%T", esdp->current_port->common.id);
+ erts_print(to, to_arg, "\n");
+
+ erts_print_run_queue_info(to, to_arg, esdp->run_queue);
+ }
+
+ /* This *MUST* to be the last information in scheduler block */
+ print_current_process_info(to, to_arg, esdp);
+}
+
+void erts_print_run_queue_info(fmtfn_t to, void *to_arg,
+ ErtsRunQueue *run_queue)
+{
+ erts_aint32_t flg;
+ int i;
for (i = 0; i < ERTS_NO_PROC_PRIO_LEVELS; i++) {
erts_print(to, to_arg, "Run Queue ");
@@ -13478,12 +13464,12 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
break;
}
erts_print(to, to_arg, "Length: %d\n",
- erts_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len));
+ erts_atomic32_read_dirty(&run_queue->procs.prio_info[i].len));
}
erts_print(to, to_arg, "Run Queue Port Length: %d\n",
- erts_atomic32_read_dirty(&esdp->run_queue->ports.info.len));
+ erts_atomic32_read_dirty(&run_queue->ports.info.len));
- flg = erts_atomic32_read_dirty(&esdp->run_queue->flags);
+ flg = erts_atomic32_read_dirty(&run_queue->flags);
erts_print(to, to_arg, "Run Queue Flags: ");
for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) {
erts_aint32_t chk = (1 << i);
@@ -13550,9 +13536,15 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
}
}
erts_print(to, to_arg, "\n");
+}
+
+
+static void print_current_process_info(fmtfn_t to, void *to_arg,
+ ErtsSchedulerData* esdp)
+{
+ Process *p = esdp->current_process;
+ erts_aint32_t flg;
- /* This *MUST* to be the last information in scheduler block */
- p = esdp->current_process;
erts_print(to, to_arg, "Current Process: ");
if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) {
flg = erts_atomic32_read_dirty(&p->state);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 55c020d47b..e585fabb51 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -244,6 +244,9 @@ extern int erts_dio_sched_thread_suggested_stack_size;
(erts_aint32_t) (MSK), \
(erts_aint32_t) (FLGS)))
+#define ERTS_RUNQ_POINTER_MASK (~((erts_aint_t) 3))
+#define ERTS_RUNQ_BOUND_FLAG ((erts_aint_t) 1)
+
typedef enum {
ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED,
ERTS_SCHDLR_SSPND_DONE_NMSCHED_BLOCKED,
@@ -1168,14 +1171,14 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(9)
#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(10)
#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(11)
-#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(12)
+/* #define ERTS_PSFLG_ ERTS_PSFLG_BIT(12) */
#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(13)
#define ERTS_PSFLG_ACTIVE_SYS ERTS_PSFLG_BIT(14)
#define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15)
#define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16)
#define ERTS_PSFLG_DELAYED_SYS ERTS_PSFLG_BIT(17)
#define ERTS_PSFLG_OFF_HEAP_MSGQ ERTS_PSFLG_BIT(18)
-#define ERTS_PSFLG_ON_HEAP_MSGQ ERTS_PSFLG_BIT(19)
+/* #define ERTS_PSFLG_ ERTS_PSFLG_BIT(19) */
#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(20)
#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(21)
#define ERTS_PSFLG_DIRTY_ACTIVE_SYS ERTS_PSFLG_BIT(22)
@@ -1546,6 +1549,7 @@ Uint64 erts_step_proc_interval(void);
ErtsProcList *erts_proclist_create(Process *);
ErtsProcList *erts_proclist_copy(ErtsProcList *);
void erts_proclist_destroy(ErtsProcList *);
+void erts_proclist_dump(fmtfn_t to, void *to_arg, ErtsProcList*);
ERTS_GLB_INLINE int erts_proclist_same(ErtsProcList *, Process *);
ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **, ErtsProcList *);
@@ -1792,6 +1796,7 @@ void erts_stack_dump(fmtfn_t to, void *to_arg, Process *);
void erts_limited_stack_trace(fmtfn_t to, void *to_arg, Process *);
void erts_program_counter_info(fmtfn_t to, void *to_arg, Process *);
void erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp);
+void erts_print_run_queue_info(fmtfn_t, void *to_arg, ErtsRunQueue*);
void erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg);
void erts_dump_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg);
@@ -2167,7 +2172,12 @@ ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp);
ERTS_GLB_INLINE Process *erts_get_current_process(void);
ERTS_GLB_INLINE Eterm erts_get_current_pid(void);
ERTS_GLB_INLINE Uint erts_get_scheduler_id(void);
-ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p);
+ERTS_GLB_INLINE void erts_init_runq_proc(Process *p, ErtsRunQueue *rq, int bnd);
+ERTS_GLB_INLINE ErtsRunQueue *erts_set_runq_proc(Process *p, ErtsRunQueue *rq, int *boundp);
+ERTS_GLB_INLINE int erts_try_change_runq_proc(Process *p, ErtsRunQueue *rq);
+ERTS_GLB_INLINE ErtsRunQueue *erts_bind_runq_proc(Process *p, int bind);
+ERTS_GLB_INLINE int erts_proc_runq_is_bound(Process *p);
+ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p, int *boundp);
ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp);
ERTS_GLB_INLINE void erts_runq_lock(ErtsRunQueue *rq);
ERTS_GLB_INLINE int erts_runq_trylock(ErtsRunQueue *rq);
@@ -2247,11 +2257,143 @@ Uint erts_get_scheduler_id(void)
return esdp ? esdp->no : (Uint) 0;
}
+/**
+ * Init run-queue of process.
+ *
+ * @param p[in,out] Process
+ * @param rq[in] Run-queue that process will be assigned to
+ * @param bnd[in,out] If non-zero binds process to run-queue.
+ */
+
+ERTS_GLB_INLINE void
+erts_init_runq_proc(Process *p, ErtsRunQueue *rq, int bnd)
+{
+ erts_aint_t rqint = (erts_aint_t) rq;
+ if (bnd)
+ rqint |= ERTS_RUNQ_BOUND_FLAG;
+ erts_atomic_init_nob(&p->run_queue, rqint);
+}
+
+/**
+ * Forcibly set run-queue of process.
+ *
+ * @param p[in,out] Process
+ * @param rq[in] Run-queue that process will be assigned to
+ * @param bndp[in,out] Pointer to integer. On input non-zero
+ * value causes the process to be bound to
+ * the run-queue. On output, indicating
+ * wether process previously was bound or
+ * not.
+ * @return Previous run-queue.
+ */
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_set_runq_proc(Process *p, ErtsRunQueue *rq, int *bndp)
+{
+ erts_aint_t rqint = (erts_aint_t) rq;
+ ASSERT(bndp);
+ if (*bndp)
+ rqint |= ERTS_RUNQ_BOUND_FLAG;
+ rqint = erts_atomic_xchg_nob(&p->run_queue, rqint);
+ *bndp = (int) (rqint & ERTS_RUNQ_BOUND_FLAG);
+ return (ErtsRunQueue *) (rqint & ERTS_RUNQ_POINTER_MASK);
+}
+
+/**
+ * Try to change run-queue assignment of a process.
+ *
+ * @param p[in,out] Process
+ * @param rq[int] Run-queue that process will be assigned to
+ * @return Non-zero if the run-queue assignment was
+ * successfully changed.
+ */
+
+ERTS_GLB_INLINE int
+erts_try_change_runq_proc(Process *p, ErtsRunQueue *rq)
+{
+ erts_aint_t old_rqint, new_rqint;
+
+ new_rqint = (erts_aint_t) rq;
+ old_rqint = (erts_aint_t) erts_atomic_read_nob(&p->run_queue);
+ while (1) {
+ erts_aint_t act_rqint;
+
+ if (old_rqint & ERTS_RUNQ_BOUND_FLAG)
+ return 0;
+
+ act_rqint = erts_atomic_cmpxchg_nob(&p->run_queue,
+ new_rqint,
+ old_rqint);
+ if (act_rqint == old_rqint)
+ return !0;
+ }
+}
+
+/**
+ *
+ * Bind or unbind process to/from currently used run-queue.
+ *
+ * @param p Process
+ * @param bind Bind if non-zero; otherwise unbind
+ * @return Pointer to previously bound run-queue,
+ * or NULL if previously unbound
+ */
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_bind_runq_proc(Process *p, int bind)
+{
+ erts_aint_t rqint;
+ if (bind)
+ rqint = erts_atomic_read_bor_nob(&p->run_queue,
+ ERTS_RUNQ_BOUND_FLAG);
+ else
+ rqint = erts_atomic_read_band_nob(&p->run_queue,
+ ~ERTS_RUNQ_BOUND_FLAG);
+ if (rqint & ERTS_RUNQ_BOUND_FLAG)
+ return (ErtsRunQueue *) (rqint & ERTS_RUNQ_POINTER_MASK);
+ else
+ return NULL;
+}
+
+/**
+ * Determine wether a process is bound to a run-queue or not.
+ *
+ * @return Returns a non-zero value if bound,
+ * and zero of not bound.
+ */
+
+ERTS_GLB_INLINE int
+erts_proc_runq_is_bound(Process *p)
+{
+ erts_aint_t rqint = erts_atomic_read_nob(&p->run_queue);
+ return (int) (rqint & ERTS_RUNQ_BOUND_FLAG);
+}
+
+/**
+ * Set run-queue of process.
+ *
+ * @param p[in,out] Process
+ * @param bndp[out] Pointer to integer. If non-NULL pointer,
+ * the integer will be set to a non-zero
+ * value if the process is bound to the
+ * run-queue.
+ * @return Pointer to the normal run-queue that
+ * the process currently is assigend to.
+ * A process is always assigned to a
+ * normal run-queue.
+ */
+
ERTS_GLB_INLINE ErtsRunQueue *
-erts_get_runq_proc(Process *p)
+erts_get_runq_proc(Process *p, int *bndp)
{
- ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue));
- return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue);
+ erts_aint_t rqint = erts_atomic_read_nob(&p->run_queue);
+ ErtsRunQueue *rq;
+ if (bndp)
+ *bndp = (int) (rqint & ERTS_RUNQ_BOUND_FLAG);
+ rqint &= ERTS_RUNQ_POINTER_MASK;
+ rq = (ErtsRunQueue *) rqint;
+ ASSERT(rq);
+ return rq;
}
ERTS_GLB_INLINE ErtsRunQueue *
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index f562fc961b..05e7bcdea2 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -78,8 +78,17 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg)
Process *p = erts_pix2proc(i);
if (p && p->i != ENULL) {
erts_aint32_t state = erts_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC)))
- dump_process_info(to, to_arg, p);
+ if (state & ERTS_PSFLG_EXITING)
+ continue;
+ if (state & ERTS_PSFLG_GC) {
+ ErtsSchedulerData *sdp = erts_get_scheduler_data();
+ if (!sdp || p != sdp->current_process)
+ continue;
+
+ /* We want to dump the garbing process that caused the dump */
+ }
+
+ dump_process_info(to, to_arg, p);
}
}
@@ -135,9 +144,12 @@ dump_process_info(fmtfn_t to, void *to_arg, Process *p)
ErtsMessage* mp;
int yreg = -1;
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE)
+ return;
+
ERTS_MSGQ_MV_INQ2PRIVQ(p);
- if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) {
+ if (p->msg.first) {
erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id);
for (mp = p->msg.first; mp != NULL; mp = mp->next) {
Eterm mesg = ERL_MESSAGE_TERM(mp);
@@ -152,38 +164,34 @@ dump_process_info(fmtfn_t to, void *to_arg, Process *p)
}
}
- if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
- if (p->dictionary) {
- erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id);
- erts_deep_dictionary_dump(to, to_arg,
- p->dictionary, dump_element_nl);
- }
+ if (p->dictionary) {
+ erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id);
+ erts_deep_dictionary_dump(to, to_arg,
+ p->dictionary, dump_element_nl);
}
- if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
- erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
- for (sp = p->stop; sp < STACK_START(p); sp++) {
- yreg = stack_element_dump(to, to_arg, sp, yreg);
- }
+ erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
+ for (sp = p->stop; sp < STACK_START(p); sp++) {
+ yreg = stack_element_dump(to, to_arg, sp, yreg);
+ }
- erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
- for (sp = p->stop; sp < STACK_START(p); sp++) {
- Eterm term = *sp;
-
- if (!is_catch(term) && !is_CP(term)) {
- heap_dump(to, to_arg, term);
- }
- }
- for (mp = p->msg.first; mp != NULL; mp = mp->next) {
- Eterm mesg = ERL_MESSAGE_TERM(mp);
- if (is_value(mesg))
- heap_dump(to, to_arg, mesg);
- mesg = ERL_MESSAGE_TOKEN(mp);
- heap_dump(to, to_arg, mesg);
- }
- if (p->dictionary) {
- erts_deep_dictionary_dump(to, to_arg, p->dictionary, heap_dump);
- }
+ erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
+ for (sp = p->stop; sp < STACK_START(p); sp++) {
+ Eterm term = *sp;
+
+ if (!is_catch(term) && !is_CP(term)) {
+ heap_dump(to, to_arg, term);
+ }
+ }
+ for (mp = p->msg.first; mp != NULL; mp = mp->next) {
+ Eterm mesg = ERL_MESSAGE_TERM(mp);
+ if (is_value(mesg))
+ heap_dump(to, to_arg, mesg);
+ mesg = ERL_MESSAGE_TOKEN(mp);
+ heap_dump(to, to_arg, mesg);
+ }
+ if (p->dictionary) {
+ erts_deep_dictionary_dump(to, to_arg, p->dictionary, heap_dump);
}
}
@@ -1001,8 +1009,6 @@ erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg)
erts_print(to, to_arg, "SUSPENDED"); break;
case ERTS_PSFLG_GC:
erts_print(to, to_arg, "GC"); break;
- case ERTS_PSFLG_BOUND:
- erts_print(to, to_arg, "BOUND"); break;
case ERTS_PSFLG_TRAP_EXIT:
erts_print(to, to_arg, "TRAP_EXIT"); break;
case ERTS_PSFLG_ACTIVE_SYS:
@@ -1015,8 +1021,6 @@ erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg)
erts_print(to, to_arg, "DELAYED_SYS"); break;
case ERTS_PSFLG_OFF_HEAP_MSGQ:
erts_print(to, to_arg, "OFF_HEAP_MSGQ"); break;
- case ERTS_PSFLG_ON_HEAP_MSGQ:
- erts_print(to, to_arg, "ON_HEAP_MSGQ"); break;
case ERTS_PSFLG_DIRTY_CPU_PROC:
erts_print(to, to_arg, "DIRTY_CPU_PROC"); break;
case ERTS_PSFLG_DIRTY_IO_PROC:
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 4b996d8fc2..d2495479ab 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2783,24 +2783,28 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
ASSERT(0);
}
- /* Only remove tracer on self() and ports */
+ /* Only remove tracer on (self() or ports) AND we are on a normal scheduler */
if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsProcLocks c_p_xlocks = 0;
- if (is_internal_pid(t_p->id)) {
- ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
- if (c_p_locks != ERTS_PROC_LOCKS_ALL) {
- c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL;
- if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) {
- erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN);
- erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (is_internal_pid(t_p->id)) {
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN);
+ if (c_p_locks != ERTS_PROC_LOCKS_ALL) {
+ c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL;
+ if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) {
+ erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
}
}
- }
- erts_tracer_replace(t_p, erts_tracer_nil);
- t_p->trace_flags &= ~TRACEE_FLAGS;
- if (c_p_xlocks)
- erts_proc_unlock(c_p, c_p_xlocks);
+ erts_tracer_replace(t_p, erts_tracer_nil);
+ t_p->trace_flags &= ~TRACEE_FLAGS;
+
+ if (c_p_xlocks)
+ erts_proc_unlock(c_p, c_p_xlocks);
+ }
}
return 0;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 2c1b7871c4..3e8f6263bb 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -298,7 +298,6 @@ static Port *create_port(char *name,
erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED;
erts_aint32_t x_pts_flgs = 0;
- ErtsRunQueue *runq;
if (!driver_lock) {
/* Align size for mutex following port struct */
port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
@@ -347,11 +346,16 @@ static Port *create_port(char *name,
p += sizeof(erts_mtx_t);
state |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
}
- if (erts_get_scheduler_data())
- runq = erts_get_runq_current(NULL);
- else
- runq = ERTS_RUNQ_IX(0);
- erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
+
+ {
+ ErtsRunQueue *runq;
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (esdp)
+ runq = erts_get_runq_current(esdp);
+ else
+ runq = ERTS_RUNQ_IX(0);
+ erts_init_runq_port(prt, runq);
+ }
prt->xports = NULL;
@@ -5075,6 +5079,93 @@ static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
erts_print(prtd->to, prtd->arg, "%T", lnk->pid);
}
+static void dump_port_state(fmtfn_t to, void *arg, erts_aint32_t state)
+{
+ erts_aint32_t rest;
+ int unknown = 0;
+ char delim = ' ';
+
+ erts_print(to, arg, "State:");
+
+ rest = state;
+ while (rest) {
+ erts_aint32_t chk = (rest ^ (rest-1)) & rest; /* lowest set bit */
+ char* s;
+
+ rest &= ~chk;
+ switch (chk) {
+ case ERTS_PORT_SFLG_CONNECTED: s = "CONNECTED"; break;
+ case ERTS_PORT_SFLG_EXITING: s = "EXITING"; break;
+ case ERTS_PORT_SFLG_DISTRIBUTION: s = "DISTR"; break;
+ case ERTS_PORT_SFLG_BINARY_IO: s = "BINARY_IO"; break;
+ case ERTS_PORT_SFLG_SOFT_EOF: s = "SOFT_EOF"; break;
+ case ERTS_PORT_SFLG_CLOSING: s = "CLOSING"; break;
+ case ERTS_PORT_SFLG_SEND_CLOSED: s = "SEND_CLOSED"; break;
+ case ERTS_PORT_SFLG_LINEBUF_IO: s = "LINEBUF_IO"; break;
+ case ERTS_PORT_SFLG_FREE: s = "FREE"; break;
+ case ERTS_PORT_SFLG_INITIALIZING: s = "INITIALIZING"; break;
+ case ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK: s = "PORT_LOCK"; break;
+ case ERTS_PORT_SFLG_INVALID: s = "INVALID"; break;
+ case ERTS_PORT_SFLG_HALT: s = "HALT"; break;
+#ifdef DEBUG
+ case ERTS_PORT_SFLG_PORT_DEBUG: s = "DEBUG"; break;
+#endif
+ default:
+ unknown = 1;
+ continue;
+ }
+ erts_print(to, arg, "%c%s", delim, s);
+ delim = '|';
+ }
+ if (unknown || !state)
+ erts_print(to, arg, "%c0x%x\n", delim, state);
+ else
+ erts_print(to, arg, "\n");
+}
+
+static void dump_port_task_flags(fmtfn_t to, void *arg, Port* p)
+{
+ erts_aint32_t flags = erts_atomic32_read_nob(&p->sched.flags);
+ erts_aint32_t unknown = 0;
+ char delim = ' ';
+
+ if (!flags)
+ return;
+
+ erts_print(to, arg, "Task Flags:");
+
+ while (flags) {
+ erts_aint32_t chk = (flags ^ (flags-1)) & flags; /* lowest set bit */
+ char* s;
+
+ flags &= ~chk;
+ switch (chk) {
+ case ERTS_PTS_FLG_IN_RUNQ: s = "IN_RUNQ"; break;
+ case ERTS_PTS_FLG_EXEC: s = "EXEC"; break;
+ case ERTS_PTS_FLG_HAVE_TASKS: s = "HAVE_TASKS"; break;
+ case ERTS_PTS_FLG_EXIT: s = "EXIT"; break;
+ case ERTS_PTS_FLG_BUSY_PORT: s = "BUSY_PORT"; break;
+ case ERTS_PTS_FLG_BUSY_PORT_Q: s = "BUSY_Q"; break;
+ case ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q: s = "CHK_UNSET_BUSY_Q"; break;
+ case ERTS_PTS_FLG_HAVE_BUSY_TASKS: s = "BUSY_TASKS"; break;
+ case ERTS_PTS_FLG_HAVE_NS_TASKS: s = "NS_TASKS"; break;
+ case ERTS_PTS_FLG_PARALLELISM: s = "PARALLELISM"; break;
+ case ERTS_PTS_FLG_FORCE_SCHED: s = "FORCE_SCHED"; break;
+ case ERTS_PTS_FLG_EXITING: s = "EXITING"; break;
+ case ERTS_PTS_FLG_EXEC_IMM: s = "EXEC_IMM"; break;
+ default:
+ unknown |= chk;
+ continue;
+ }
+ erts_print(to, arg, "%c%s", delim, s);
+ delim = '|';
+ }
+ if (unknown)
+ erts_print(to, arg, "%cUNKNOWN(0x%x)\n", delim, unknown);
+ else
+ erts_print(to, arg, "\n");
+}
+
void
print_port_info(Port *p, fmtfn_t to, void *arg)
{
@@ -5084,6 +5175,8 @@ print_port_info(Port *p, fmtfn_t to, void *arg)
return;
erts_print(to, arg, "=port:%T\n", p->common.id);
+ dump_port_state(to, arg, state);
+ dump_port_task_flags(to, arg, p);
erts_print(to, arg, "Slot: %d\n", internal_port_index(p->common.id));
if (state & ERTS_PORT_SFLG_CONNECTED) {
erts_print(to, arg, "Connected: %T", ERTS_PORT_GET_CONNECTED(p));
@@ -5106,6 +5199,10 @@ print_port_info(Port *p, fmtfn_t to, void *arg)
erts_doforall_monitors(ERTS_P_MONITORS(p), &prt_one_monitor, &prtd);
erts_print(to, arg, "\n");
}
+ if (p->suspended) {
+ erts_print(to, arg, "Suspended: ");
+ erts_proclist_dump(to, arg, p->suspended);
+ }
if (p->common.u.alive.reg != NULL)
erts_print(to, arg, "Registered as: %T\n", p->common.u.alive.reg->name);
@@ -5123,6 +5220,14 @@ print_port_info(Port *p, fmtfn_t to, void *arg)
} else {
erts_print(to, arg, "Port controls linked-in driver: %s\n",p->name);
}
+ erts_print(to, arg, "Input: %beu\n", p->bytes_in);
+ erts_print(to, arg, "Output: %beu\n", p->bytes_out);
+ erts_print(to, arg, "Queue: %beu\n", erts_ioq_size(&p->ioq));
+ {
+ Eterm port_data = erts_port_data_read(p);
+ if (port_data != am_undefined)
+ erts_print(to, arg, "Port Data: %T\n", port_data);
+ }
}
void
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index fe9f1c7606..4bf60619ba 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -3152,6 +3152,9 @@ tailrecur_ne:
int cmp;
byte* a_ptr;
byte* b_ptr;
+ if (eq_only && a_size != b_size) {
+ RETURN_NEQ(a_size - b_size);
+ }
ERTS_GET_BINARY_BYTES(a, a_ptr, a_bitoffs, a_bitsize);
ERTS_GET_BINARY_BYTES(b, b_ptr, b_bitoffs, b_bitsize);
if ((a_bitsize | b_bitsize | a_bitoffs | b_bitoffs) == 0) {
diff --git a/erts/emulator/nifs/unix/unix_prim_file.c b/erts/emulator/nifs/unix/unix_prim_file.c
index 57c8ef62e1..4a6c476882 100644
--- a/erts/emulator/nifs/unix/unix_prim_file.c
+++ b/erts/emulator/nifs/unix/unix_prim_file.c
@@ -125,24 +125,11 @@ static int open_file_type_check(const efile_path_t *path, int fd) {
* immediately in a read within the call, but the new implementation
* never does that. */
return 1;
- } else {
- /* The old driver tolerated opening /dev/null despite the "no devices"
- * limitation. It provided no explanation for this but we still need
- * to match the behavior. We're checking through stat(2) instead of
- * comparing the name to account for links. */
- struct stat null_device_info;
- int is_dev_null;
-
- is_dev_null = (stat("/dev/null", &null_device_info) == 0);
- is_dev_null &= (file_info.st_ino == null_device_info.st_ino);
- is_dev_null &= (file_info.st_dev == null_device_info.st_dev);
-
- if(is_dev_null) {
- return 1;
- }
}
- if(!S_ISREG(file_info.st_mode)) {
+ /* Allow everything that isn't a directory, and error out on the next call
+ * if it's unsupported. */
+ if(S_ISDIR(file_info.st_mode)) {
return 0;
}
diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl
index 4bc1838139..a3c3daac15 100644
--- a/erts/emulator/test/binary_SUITE.erl
+++ b/erts/emulator/test/binary_SUITE.erl
@@ -262,6 +262,7 @@ test_deep_bitstr(List) ->
{Bin,bitstring_to_list(Bin)}.
bad_list_to_binary(Config) when is_list(Config) ->
+ test_bad_bin(<<1:1>>),
test_bad_bin(atom),
test_bad_bin(42),
test_bad_bin([1|2]),
diff --git a/erts/emulator/test/efile_SUITE.erl b/erts/emulator/test/efile_SUITE.erl
index 16d581a567..821381bf0d 100644
--- a/erts/emulator/test/efile_SUITE.erl
+++ b/erts/emulator/test/efile_SUITE.erl
@@ -46,14 +46,14 @@ iter_max_files_1(Config) ->
DataDir = proplists:get_value(data_dir,Config),
TestFile = filename:join(DataDir, "existing_file"),
N = 10,
- %% Run on a different node in order to set the max ports
+ %% Run on a different node in order to make the test more stable.
Dir = filename:dirname(code:which(?MODULE)),
{ok,Node} = test_server:start_node(test_iter_max_files,slave,
- [{args,"+Q 1524 -pa " ++ Dir}]),
+ [{args,"-pa " ++ Dir}]),
L = rpc:call(Node,?MODULE,do_iter_max_files,[N, TestFile]),
test_server:stop_node(Node),
io:format("Number of files opened in each test:~n~w\n", [L]),
- all_equal(L),
+ verify_max_files(L),
Head = hd(L),
if Head >= 2 -> ok;
true -> ct:fail(too_few_files)
@@ -65,12 +65,15 @@ do_iter_max_files(N, Name) when N > 0 ->
do_iter_max_files(_, _) ->
[].
-all_equal([E, E| T]) ->
- all_equal([E| T]);
-all_equal([_]) ->
- ok;
-all_equal([]) ->
- ok.
+%% The attempts shouldn't vary too much; we used to require that they were all
+%% exactly equal, but after we reimplemented the file driver as a NIF we
+%% noticed that the only reason it was stable on Darwin was because the port
+%% limit was hit before ulimit.
+verify_max_files(Attempts) ->
+ N = length(Attempts),
+ Mean = lists:sum(Attempts) / N,
+ Variance = lists:sum([(X - Mean) * (X - Mean) || X <- Attempts]) / N,
+ true = math:sqrt(Variance) =< 1 + (Mean / 1000).
max_files(Name) ->
Fds = open_files(Name),
diff --git a/erts/emulator/test/iovec_SUITE.erl b/erts/emulator/test/iovec_SUITE.erl
index 49dc64b0d2..963b7e2501 100644
--- a/erts/emulator/test/iovec_SUITE.erl
+++ b/erts/emulator/test/iovec_SUITE.erl
@@ -25,7 +25,8 @@
-export([integer_lists/1, binary_lists/1, empty_lists/1, empty_binary_lists/1,
mixed_lists/1, improper_lists/1, illegal_lists/1, cons_bomb/1,
sub_binary_lists/1, iolist_to_iovec_idempotence/1,
- iolist_to_iovec_correctness/1]).
+ iolist_to_iovec_correctness/1, unaligned_sub_binaries/1,
+ direct_binary_arg/1]).
-include_lib("common_test/include/ct.hrl").
@@ -36,7 +37,8 @@ suite() ->
all() ->
[integer_lists, binary_lists, empty_lists, empty_binary_lists, mixed_lists,
sub_binary_lists, illegal_lists, improper_lists, cons_bomb,
- iolist_to_iovec_idempotence, iolist_to_iovec_correctness].
+ iolist_to_iovec_idempotence, iolist_to_iovec_correctness,
+ unaligned_sub_binaries, direct_binary_arg].
init_per_suite(Config) ->
Config.
@@ -78,7 +80,7 @@ illegal_lists(Config) when is_list(Config) ->
BitStrs = gen_variations(["gurka", <<1:1>>, "gaffel"]),
BadInts = gen_variations(["gurka", 890, "gaffel"]),
Atoms = gen_variations([gurka, "gaffel"]),
- BadTails = [["test" | 0], ["gurka", gaffel]],
+ BadTails = [["test" | 0], ["gurka" | gaffel], ["gaffel" | <<1:1>>]],
Variations =
BitStrs ++ BadInts ++ Atoms ++ BadTails,
@@ -98,14 +100,7 @@ cons_bomb(Config) when is_list(Config) ->
BinBase = gen_variations([<<I:8>> || I <- lists:seq(1, 255)]),
MixBase = gen_variations([<<12, 45, 78>>, lists:seq(1, 255)]),
- Rounds =
- case system_mem_size() of
- Mem when Mem >= (16 bsl 30) -> 32;
- Mem when Mem >= (3 bsl 30) -> 28;
- _ -> 20
- end,
-
- Variations = gen_variations([IntBase, BinBase, MixBase], Rounds),
+ Variations = gen_variations([IntBase, BinBase, MixBase], 16),
equivalence_test(fun erlang:iolist_to_iovec/1, Variations).
iolist_to_iovec_idempotence(Config) when is_list(Config) ->
@@ -130,6 +125,21 @@ iolist_to_iovec_correctness(Config) when is_list(Config) ->
true = is_iolist_equal(Optimized, Variations),
ok.
+unaligned_sub_binaries(Config) when is_list(Config) ->
+ UnalignedBins = [gen_unaligned_binary(I) || I <- lists:seq(32, 4 bsl 10, 512)],
+ UnalignedVariations = gen_variations(UnalignedBins),
+
+ Optimized = erlang:iolist_to_iovec(UnalignedVariations),
+
+ true = is_iolist_equal(Optimized, UnalignedVariations),
+ ok.
+
+direct_binary_arg(Config) when is_list(Config) ->
+ {'EXIT',{badarg, _}} = (catch erlang:iolist_to_iovec(<<1:1>>)),
+ [<<1>>] = erlang:iolist_to_iovec(<<1>>),
+ [] = erlang:iolist_to_iovec(<<>>),
+ ok.
+
illegality_test(Fun, Variations) ->
[{'EXIT',{badarg, _}} = (catch Fun(Variation)) || Variation <- Variations],
ok.
@@ -145,11 +155,18 @@ equivalence_test(Fun, [Head | _] = Variations) ->
is_iolist_equal(A, B) ->
iolist_to_binary(A) =:= iolist_to_binary(B).
+gen_unaligned_binary(Size) ->
+ Bin0 = << <<I>> || I <- lists:seq(1, Size) >>,
+ <<0:3,Bin:Size/binary,31:5>> = id(<<0:3,Bin0/binary,31:5>>),
+ Bin.
+
+id(I) -> I.
+
%% Generates a bunch of lists whose contents will be equal to Base repeated a
%% few times. The lists only differ by their structure, so their reduction to
%% a simpler format should yield the same result.
gen_variations(Base) ->
- gen_variations(Base, 16).
+ gen_variations(Base, 12).
gen_variations(Base, N) ->
[gen_flat_list(Base, N),
gen_nested_list(Base, N),
@@ -169,8 +186,3 @@ gen_nasty_list_1([Head | Base], Result) when is_list(Head) ->
gen_nasty_list_1(Base, [[Result], [gen_nasty_list_1(Head, [])]]);
gen_nasty_list_1([Head | Base], Result) ->
gen_nasty_list_1(Base, [[Result], [Head]]).
-
-system_mem_size() ->
- application:ensure_all_started(os_mon),
- {Tot,_Used,_} = memsup:get_memory_data(),
- Tot.
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index 85c302e310..b6e15ababb 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -3049,8 +3049,24 @@ nif_ioq(Config) ->
{peek_head, a},
{peek, a},
- %% Test to enqueue a bunch of refc binaries
+ %% Ensure that enqueued refc binaries are intact after a roundtrip.
+ %%
+ %% This test and the ones immediately following it does not go through
+ %% erlang:iolist_to_iovec/1
{enqv, a, [nif_ioq_payload(refcbin) || _ <- lists:seq(1,20)], 0},
+ {peek, a},
+
+ %% ... heap binaries
+ {enqv, a, [nif_ioq_payload(heapbin) || _ <- lists:seq(1,20)], 0},
+ {peek, a},
+
+ %% ... plain sub-binaries
+ {enqv, a, [nif_ioq_payload(subbin) || _ <- lists:seq(1,20)], 0},
+ {peek, a},
+
+ %% ... unaligned binaries
+ {enqv, a, [nif_ioq_payload(unaligned_bin) || _ <- lists:seq(1,20)], 0},
+ {peek, a},
%% Enq stuff to destroy with data in queue
{enqv, a, 2, 100},
@@ -3206,13 +3222,16 @@ nif_ioq_payload(N) when is_integer(N) ->
Tail = if N > 3 -> nif_ioq_payload(N-3); true -> [] end,
Head = element(1, lists:split(N,[nif_ioq_payload(subbin),
nif_ioq_payload(heapbin),
- nif_ioq_payload(refcbin) | Tail])),
+ nif_ioq_payload(refcbin),
+ nif_ioq_payload(unaligned_bin) | Tail])),
erlang:iolist_to_iovec(Head);
nif_ioq_payload(subbin) ->
Bin = nif_ioq_payload(refcbin),
Sz = size(Bin) - 1,
<<_:8,SubBin:Sz/binary,_/bits>> = Bin,
SubBin;
+nif_ioq_payload(unaligned_bin) ->
+ make_unaligned_binary(<< <<I>> || I <- lists:seq(1, 255) >>);
nif_ioq_payload(heapbin) ->
<<"a literal heap binary">>;
nif_ioq_payload(refcbin) ->
@@ -3220,6 +3239,13 @@ nif_ioq_payload(refcbin) ->
nif_ioq_payload(Else) ->
Else.
+make_unaligned_binary(Bin0) ->
+ Size = byte_size(Bin0),
+ <<0:3,Bin:Size/binary,31:5>> = id(<<0:3,Bin0/binary,31:5>>),
+ Bin.
+
+id(I) -> I.
+
%% The NIFs:
lib_version() -> undefined.
call_history() -> ?nif_stub.