aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in9
-rw-r--r--erts/emulator/beam/atom.names9
-rw-r--r--erts/emulator/beam/beam_bif_load.c3
-rw-r--r--erts/emulator/beam/beam_bp.c18
-rw-r--r--erts/emulator/beam/beam_emu.c28
-rw-r--r--erts/emulator/beam/bif.c602
-rw-r--r--erts/emulator/beam/bif.h21
-rw-r--r--erts/emulator/beam/bif.tab32
-rw-r--r--erts/emulator/beam/binary.c6
-rw-r--r--erts/emulator/beam/break.c37
-rw-r--r--erts/emulator/beam/copy.c2
-rw-r--r--erts/emulator/beam/dist.c305
-rw-r--r--erts/emulator/beam/dist.h14
-rw-r--r--erts/emulator/beam/erl_alloc.c9
-rw-r--r--erts/emulator/beam/erl_alloc.h2
-rw-r--r--erts/emulator/beam/erl_alloc.types20
-rw-r--r--erts/emulator/beam/erl_async.c13
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c194
-rwxr-xr-xerts/emulator/beam/erl_bif_info.c402
-rw-r--r--erts/emulator/beam/erl_bif_port.c867
-rw-r--r--erts/emulator/beam/erl_bif_re.c16
-rw-r--r--erts/emulator/beam/erl_bif_timer.c18
-rw-r--r--erts/emulator/beam/erl_bif_trace.c172
-rw-r--r--erts/emulator/beam/erl_db.c89
-rw-r--r--erts/emulator/beam/erl_db_util.c51
-rw-r--r--erts/emulator/beam/erl_db_util.h4
-rw-r--r--erts/emulator/beam/erl_debug.c28
-rw-r--r--erts/emulator/beam/erl_driver.h42
-rw-r--r--erts/emulator/beam/erl_gc.c8
-rw-r--r--erts/emulator/beam/erl_init.c142
-rw-r--r--erts/emulator/beam/erl_lock_check.c23
-rw-r--r--erts/emulator/beam/erl_lock_check.h1
-rw-r--r--erts/emulator/beam/erl_message.c12
-rw-r--r--erts/emulator/beam/erl_message.h23
-rw-r--r--erts/emulator/beam/erl_monitors.c13
-rw-r--r--erts/emulator/beam/erl_monitors.h2
-rw-r--r--erts/emulator/beam/erl_nif.c16
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h102
-rw-r--r--erts/emulator/beam/erl_node_tables.c65
-rw-r--r--erts/emulator/beam/erl_node_tables.h9
-rw-r--r--erts/emulator/beam/erl_port.h942
-rw-r--r--erts/emulator/beam/erl_port_task.c2036
-rw-r--r--erts/emulator/beam/erl_port_task.h160
-rw-r--r--erts/emulator/beam/erl_process.c2076
-rw-r--r--erts/emulator/beam/erl_process.h270
-rw-r--r--erts/emulator/beam/erl_process_dict.c6
-rw-r--r--erts/emulator/beam/erl_process_dump.c18
-rw-r--r--erts/emulator/beam/erl_process_lock.c143
-rw-r--r--erts/emulator/beam/erl_process_lock.h56
-rw-r--r--erts/emulator/beam/erl_ptab.c1566
-rw-r--r--erts/emulator/beam/erl_ptab.h472
-rw-r--r--erts/emulator/beam/erl_smp.h28
-rw-r--r--erts/emulator/beam/erl_sys_driver.h1
-rw-r--r--erts/emulator/beam/erl_term.c4
-rw-r--r--erts/emulator/beam/erl_term.h36
-rw-r--r--erts/emulator/beam/erl_thr_progress.c288
-rw-r--r--erts/emulator/beam/erl_thr_progress.h63
-rw-r--r--erts/emulator/beam/erl_threads.h298
-rw-r--r--erts/emulator/beam/erl_trace.c376
-rw-r--r--erts/emulator/beam/erl_trace.h141
-rw-r--r--erts/emulator/beam/erl_utils.h215
-rwxr-xr-xerts/emulator/beam/global.h924
-rw-r--r--erts/emulator/beam/io.c4455
-rw-r--r--erts/emulator/beam/register.c53
-rw-r--r--erts/emulator/beam/register.h19
-rw-r--r--erts/emulator/beam/sys.h40
-rw-r--r--erts/emulator/beam/utils.c18
-rw-r--r--erts/emulator/drivers/common/inet_drv.c165
-rw-r--r--erts/emulator/hipe/hipe_bif_list.m41
-rw-r--r--erts/emulator/hipe/hipe_debug.c11
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c2
-rw-r--r--erts/emulator/sys/common/erl_check_io.c53
-rw-r--r--erts/emulator/sys/unix/sys.c49
-rw-r--r--erts/emulator/sys/win32/erl_win_dyn_driver.h8
-rwxr-xr-xerts/emulator/sys/win32/sys.c155
-rw-r--r--erts/emulator/test/busy_port_SUITE.erl30
-rw-r--r--erts/emulator/test/ddll_SUITE.erl2
-rw-r--r--erts/emulator/test/driver_SUITE.erl67
-rw-r--r--erts/emulator/test/driver_SUITE_data/Makefile.src3
-rw-r--r--erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c178
-rw-r--r--erts/emulator/test/port_SUITE.erl88
-rw-r--r--erts/emulator/test/process_SUITE.erl46
-rw-r--r--erts/emulator/test/tuple_SUITE.erl260
83 files changed, 12268 insertions, 6983 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 7e966c81bb..67320697ef 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -560,7 +560,8 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/zlib.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \
$(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erlang.beam
+ $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam
LANG=C $(PERL) utils/make_preload $(MAKE_PRELOAD_EXTRA) -rc $^ > $@
else
PRELOAD_OBJ = $(OBJDIR)/preload.o
@@ -572,7 +573,8 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/zlib.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \
$(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erlang.beam
+ $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam
LANG=C $(PERL) utils/make_preload -old $^ > $@
endif
@@ -735,7 +737,8 @@ RUN_OBJS = \
$(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \
$(OBJDIR)/erl_zlib.o $(OBJDIR)/erl_nif.o \
$(OBJDIR)/erl_bif_binary.o $(OBJDIR)/erl_ao_firstfit_alloc.o \
- $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o
+ $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o \
+ $(OBJDIR)/erl_ptab.o
ifeq ($(TARGET),win32)
DRV_OBJS = \
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index afcbd732df..c47a608215 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -94,6 +94,7 @@ atom asynchronous
atom atom
atom atom_used
atom attributes
+atom await_port_send_result
atom await_proc_exit
atom await_sched_wall_time_modifications
atom awaiting_load
@@ -152,6 +153,7 @@ atom connection_closed
atom cons
atom const
atom context_switches
+atom control
atom copy
atom cpu
atom cpu_timestamp
@@ -204,6 +206,7 @@ atom erlang
atom ERROR='ERROR'
atom error_handler
atom error_logger
+atom erts_internal
atom ets
atom ETS_TRANSFER='ETS-TRANSFER'
atom event
@@ -237,6 +240,7 @@ atom gc_end
atom gc_start
atom Ge='>='
atom generational
+atom get_data
atom get_seq_token
atom get_tcw
atom getenv
@@ -408,6 +412,7 @@ atom overlapped_io
atom owner
atom packet
atom packet_size
+atom parallelism
atom Plus='+'
atom pause
atom pending
@@ -419,12 +424,12 @@ atom pid
atom port
atom ports
atom port_count
+atom port_limit
atom print
atom priority
atom private
atom process
atom processes
-atom processes_trap
atom processes_used
atom process_count
atom process_display
@@ -434,6 +439,7 @@ atom procs
atom profile
atom protected
atom protection
+atom ptab_list_continue
atom public
atom purify
atom quantify
@@ -481,6 +487,7 @@ atom sequential_trace_token
atom serial
atom set
atom set_cpu_topology
+atom set_data
atom set_on_first_link
atom set_on_first_spawn
atom set_on_link
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 9e4add823d..e0a4f86d2d 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -438,8 +438,7 @@ check_process_code_2(BIF_ALIST_2)
if (is_internal_pid(BIF_ARG_1)) {
Eterm res;
ErtsCodeIndex code_ix;
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- goto error;
+
code_ix = erts_active_code_ix();
modp = erts_get_module(BIF_ARG_2, code_ix);
if (modp == NULL) { /* Doesn't exist. */
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 58e0090a76..2168d410b5 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -714,7 +714,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
IS_TRACED_FL(p, F_TRACE_CALLS)) {
int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
flags = erts_call_trace(p, ep->code, bp->local_ms, args,
- local, &p->tracer_proc);
+ local, &ERTS_TRACER_PROC(p));
}
if (bp_flags & ERTS_BPF_META_TRACE) {
Eterm tpid1, tpid2;
@@ -800,7 +800,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
if (flags & MATCH_SET_EXCEPTION_TRACE) {
erts_trace_exception(p, ep->code, class, value,
- &p->tracer_proc);
+ &ERTS_TRACER_PROC(p));
}
if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
/* can only happen if(local)*/
@@ -825,7 +825,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
UnUseTmpHeapNoproc(3);
if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags |= F_EXCEPTION_TRACE;
+ ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
@@ -835,7 +835,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
/* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &p->tracer_proc);
+ erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p));
}
if (flags & MATCH_SET_RETURN_TO_TRACE) {
/* can only happen if(local)*/
@@ -935,7 +935,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
beam_exception_trace : beam_return_trace;
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
+ ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
c_p->stop = E;
@@ -974,7 +974,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
ASSERT(pbt->pc);
/* add time to previous code */
bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = c_p->id;
+ sitem.pid = c_p->common.id;
sitem.count = 0;
/* previous breakpoint */
@@ -997,7 +997,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
}
/* Add count to this code */
- sitem.pid = c_p->id;
+ sitem.pid = c_p->common.id;
sitem.count = 1;
sitem.s_time = 0;
sitem.us_time = 0;
@@ -1055,7 +1055,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
ASSERT(pbt->pc);
bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
+ sitem.pid = p->common.id;
sitem.count = 0;
/* previous breakpoint */
@@ -1386,7 +1386,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
if (pbdt) {
get_sys_now(&ms,&s,&us);
bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
+ sitem.pid = p->common.id;
sitem.count = 0;
h = &(pbdt->hash[bp_sched2ix_proc(p)]);
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 7b1ae624ce..0e9d140908 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -516,7 +516,7 @@ extern int count_instructions;
# define Dispatchfun() DispatchMacroFun()
#endif
-#define Self(R) R = c_p->id
+#define Self(R) R = c_p->common.id
#define Node(R) R = erts_this_node->sysname
#define Arg(N) I[(N)+1]
@@ -1074,11 +1074,11 @@ init_emulator(void)
void
dtrace_drvport_str(ErlDrvPort drvport, char *port_buf)
{
- Port *port = erts_drvport2port(drvport);
+ Port *port = erts_drvport2port(drvport, NULL);
erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
- port_channel_no(port->id),
- port_number(port->id));
+ port_channel_no(port->common.id),
+ port_number(port->common.id));
}
#endif
/*
@@ -1195,7 +1195,7 @@ void process_main(void)
c_p = schedule(c_p, reds_used);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
#ifdef DEBUG
- pid = c_p->id; /* Save for debugging purpouses */
+ pid = c_p->common.id; /* Save for debugging purpouses */
#endif
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -1227,7 +1227,7 @@ void process_main(void)
reds = c_p->fcalls;
if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
- && (c_p->trace_flags & F_SENSITIVE) == 0) {
+ && (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE) == 0) {
neg_o_reds = -reds;
FCALLS = REDS_IN(c_p) = 0;
} else {
@@ -1591,6 +1591,7 @@ void process_main(void)
reg[0] = r(0);
result = erl_send(c_p, r(0), x(1));
PreFetch(0, next);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
@@ -1866,14 +1867,14 @@ void process_main(void)
erts_fprintf(stderr,
"Dtrace -> (%T) stop spreading "
"tag %T with message %T\r\n",
- c_p->id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
+ c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
#endif
} else {
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) kill tag %T with "
"message %T\r\n",
- c_p->id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
+ c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
#endif
DT_UTAG(c_p) = NIL;
SEQ_TRACE_TOKEN(c_p) = NIL;
@@ -1898,7 +1899,7 @@ void process_main(void)
erts_fprintf(stderr,
"Dtrace -> (%T) receive tag (%T) "
"with message %T\r\n",
- c_p->id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp));
+ c_p->common.id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp));
#endif
} else {
#endif
@@ -1914,7 +1915,7 @@ void process_main(void)
}
msg = ERL_MESSAGE_TERM(msgp);
seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
- c_p->id, c_p);
+ c_p->common.id, c_p);
#ifdef USE_VM_PROBES
}
#endif
@@ -2567,6 +2568,7 @@ void process_main(void)
reg[0] = r(0);
result = (*bf)(c_p, reg, I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_HOLE_CHECK(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -3301,7 +3303,6 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
bif_nif_arity = I[-1];
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
{
@@ -3346,7 +3347,6 @@ void process_main(void)
bif_nif_arity = I[-1];
ASSERT(bif_nif_arity <= 3);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
reg[0] = r(0);
{
Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
@@ -5257,7 +5257,7 @@ terminate_proc(Process* c_p, Eterm Value)
/* EXF_LOG is a primary exception flag */
if (c_p->freason & EXF_LOG) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Error in process %T ", c_p->id);
+ erts_dsprintf(dsbufp, "Error in process %T ", c_p->common.id);
if (erts_is_alive)
erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
erts_dsprintf(dsbufp,"with exit value: %0.*T\n", display_items, Value);
@@ -6186,7 +6186,7 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
MSO(p).first = (struct erl_off_heap_header*) funp;
funp->fe = fe;
funp->num_free = num_free;
- funp->creator = p->id;
+ funp->creator = p->common.id;
#ifdef HIPE
funp->native_address = fe->native_address;
#endif
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 1cdce49eef..c4ff4fe982 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -37,10 +37,13 @@
#include "erl_db_util.h"
#include "register.h"
#include "erl_thr_progress.h"
+#define ERTS_PTAB_WANT_BIF_IMPL__
+#include "erl_ptab.h"
static Export* flush_monitor_message_trap = NULL;
static Export* set_cpu_topology_trap = NULL;
static Export* await_proc_exit_trap = NULL;
+static Export* await_port_send_result_trap = NULL;
Export* erts_format_cpu_topology_trap = NULL;
static Export *await_sched_wall_time_mod_trap;
@@ -83,8 +86,10 @@ static int insert_internal_link(Process* p, Eterm rpid)
ASSERT(is_internal_pid(rpid));
#ifdef ERTS_SMP
- if (IS_TRACED(p) && (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)))
+ if (IS_TRACED(p)
+ && (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) {
rp_locks = ERTS_PROC_LOCKS_ALL;
+ }
erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
#endif
@@ -100,27 +105,27 @@ static int insert_internal_link(Process* p, Eterm rpid)
}
if (p != rp) {
- erts_add_link(&(p->nlinks), LINK_PID, rp->id);
- erts_add_link(&(rp->nlinks), LINK_PID, p->id);
+ erts_add_link(&ERTS_P_LINKS(p), LINK_PID, rp->common.id);
+ erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, p->common.id);
- ASSERT(is_nil(p->tracer_proc)
- || is_internal_pid(p->tracer_proc)
- || is_internal_port(p->tracer_proc));
+ ASSERT(is_nil(ERTS_TRACER_PROC(p))
+ || is_internal_pid(ERTS_TRACER_PROC(p))
+ || is_internal_port(ERTS_TRACER_PROC(p)));
if (IS_TRACED(p)) {
- if (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) {
- rp->trace_flags |= (p->trace_flags & TRACEE_FLAGS);
- rp->tracer_proc = p->tracer_proc; /* maybe steal */
+ if (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(rp) |= (ERTS_TRACE_FLAGS(p) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(rp) = ERTS_TRACER_PROC(p); /* maybe steal */
- if (p->trace_flags & F_TRACE_SOL1) { /* maybe override */
- rp->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- p->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ if (ERTS_TRACE_FLAGS(p) & F_TRACE_SOL1) { /* maybe override */
+ ERTS_TRACE_FLAGS(rp) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
}
}
}
}
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(p, rp, am_getting_linked, p->id);
+ trace_proc(p, rp, am_getting_linked, p->common.id);
if (p == rp)
erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN);
@@ -144,10 +149,6 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
/* check that the pid or port which is our argument is OK */
if (is_internal_pid(BIF_ARG_1)) {
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
if (insert_internal_link(BIF_P, BIF_ARG_1)) {
BIF_RET(am_true);
}
@@ -157,19 +158,37 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
}
if (is_internal_port(BIF_ARG_1)) {
- Port *pt = erts_id2port(BIF_ARG_1, BIF_P, ERTS_PROC_LOCK_MAIN);
- if (!pt) {
+ int send_link_signal = 0;
+ Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (!prt) {
goto res_no_proc;
}
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
- if (erts_add_link(&(BIF_P->nlinks), LINK_PID, BIF_ARG_1) >= 0)
- erts_add_link(&(pt->nlinks), LINK_PID, BIF_P->id);
+ if (erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1) >= 0)
+ send_link_signal = 1;
/* else: already linked */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- erts_smp_port_unlock(pt);
+
+ if (send_link_signal) {
+ Eterm ref;
+ Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
+
+ switch (erts_port_link(BIF_P, prt, BIF_P->common.id, refp)) {
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ goto res_no_proc;
+ case ERTS_PORT_OP_SCHEDULED:
+ if (refp) {
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
+ }
+ default:
+ break;
+ }
+ }
BIF_RET(am_true);
}
else if (is_external_port(BIF_ARG_1)
@@ -182,7 +201,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
/* We may earn time by checking first that we're not linked already */
- if (erts_lookup_link(BIF_P->nlinks, BIF_ARG_1) != NULL) {
+ if (erts_lookup_link(ERTS_P_LINKS(BIF_P), BIF_ARG_1) != NULL) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
}
@@ -209,10 +228,10 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
erts_smp_de_links_lock(dep);
- erts_add_link(&(BIF_P->nlinks), LINK_PID, BIF_ARG_1);
+ erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1);
lnk = erts_add_or_lookup_link(&(dep->nlinks),
LINK_PID,
- BIF_P->id);
+ BIF_P->common.id);
ASSERT(lnk != NULL);
erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1);
@@ -220,7 +239,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
erts_smp_de_runlock(dep);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- code = erts_dsig_send_link(&dsd, BIF_P->id, BIF_ARG_1);
+ code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
BIF_RET(am_true);
@@ -289,7 +308,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
if (dmon)
erts_destroy_monitor(dmon);
}
- mon = erts_remove_monitor(&c_p->monitors, ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
res = ERTS_DEMONITOR_TRUE;
@@ -298,7 +317,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
case ERTS_DSIG_PREP_CONNECTED:
erts_smp_de_links_lock(dep);
- mon = erts_remove_monitor(&c_p->monitors, ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
dmon = erts_remove_monitor(&dep->monitors, ref);
erts_smp_de_links_unlock(dep);
erts_smp_de_runlock(dep);
@@ -325,7 +344,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
* the atom is stored there. Yield if necessary.
*/
code = erts_dsig_send_demonitor(&dsd,
- c_p->id,
+ c_p->common.id,
(mon->name != NIL
? mon->name
: mon->pid),
@@ -387,7 +406,7 @@ static int demonitor(Process *c_p, Eterm ref)
goto done; /* Cannot be this monitor's ref */
}
- mon = erts_lookup_monitor(c_p->monitors, ref);
+ mon = erts_lookup_monitor(ERTS_P_MONITORS(c_p), ref);
if (!mon) {
res = ERTS_DEMONITOR_FALSE;
goto done;
@@ -426,7 +445,7 @@ static int demonitor(Process *c_p, Eterm ref)
to,
ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
- mon = erts_remove_monitor(&c_p->monitors, ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
#ifndef ERTS_SMP
ASSERT(mon);
#else
@@ -440,7 +459,7 @@ static int demonitor(Process *c_p, Eterm ref)
}
if (rp) {
ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&(rp->monitors), ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
if (rp != c_p)
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon != NULL)
@@ -582,7 +601,7 @@ local_pid_monitor(Process *p, Eterm target)
mon_ref = erts_make_ref(p);
ERTS_BIF_PREP_RET(ret, mon_ref);
- if (target == p->id) {
+ if (target == p->common.id) {
return ret;
}
@@ -599,8 +618,8 @@ local_pid_monitor(Process *p, Eterm target)
else {
ASSERT(rp != p);
- erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, target, NIL);
- erts_add_monitor(&(rp->monitors), MON_TARGET, mon_ref, p->id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
@@ -635,9 +654,9 @@ local_name_monitor(Process *p, Eterm target_name)
UnUseTmpHeap(3,p);
}
else if (rp != p) {
- erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, rp->id,
+ erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, rp->common.id,
target_name);
- erts_add_monitor(&(rp->monitors), MON_TARGET, mon_ref, p->id,
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id,
target_name);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
@@ -689,16 +708,16 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2,
erts_smp_de_links_lock(dep);
- erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, p_trgt,
+ erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, p_trgt,
p_name);
- erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->id,
+ erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->common.id,
d_name);
erts_smp_de_links_unlock(dep);
erts_smp_de_runlock(dep);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- code = erts_dsig_send_monitor(&dsd, p->id, target, mon_ref);
+ code = erts_dsig_send_monitor(&dsd, p->common.id, target, mon_ref);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_PREP_YIELD_RETURN(ret, p, mon_ref);
else
@@ -941,36 +960,39 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
}
if (is_internal_port(BIF_ARG_1)) {
- Port *pt = erts_id2port_sflgs(BIF_ARG_1,
- BIF_P,
- ERTS_PROC_LOCK_MAIN,
- ERTS_PORT_SFLGS_DEAD);
-
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
#ifdef ERTS_SMP
- if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
- if (pt)
- erts_smp_port_unlock(pt);
+ if (ERTS_PROC_PENDING_EXIT(BIF_P))
goto handle_pending_exit;
- }
#endif
- l = erts_remove_link(&BIF_P->nlinks, BIF_ARG_1);
-
- ASSERT(pt || !l);
-
- if (pt) {
- rl = erts_remove_link(&pt->nlinks, BIF_P->id);
- erts_smp_port_unlock(pt);
- if (rl)
- erts_destroy_link(rl);
- }
+ l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
- if (l)
+ if (l) {
+ Port *prt;
+
erts_destroy_link(l);
+ /* Send unlink signal */
+ prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_DEAD);
+ if (prt) {
+ ErtsPortOpResult res;
+ Eterm ref;
+ Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
+#ifdef DEBUG
+ ref = NIL;
+#endif
+ res = erts_port_unlink(BIF_P, prt, BIF_P->common.id, refp);
+
+ if (refp && res == ERTS_PORT_OP_SCHEDULED) {
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
+ }
+ }
+ }
+
BIF_RET(am_true);
}
else if (is_external_port(BIF_ARG_1)
@@ -993,7 +1015,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
if (ERTS_PROC_PENDING_EXIT(BIF_P))
goto handle_pending_exit;
#endif
- l = erts_remove_link(&BIF_P->nlinks,BIF_ARG_1);
+ l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
erts_smp_proc_unlock(BIF_P,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
@@ -1022,8 +1044,8 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
#endif
case ERTS_DSIG_PREP_CONNECTED:
- erts_remove_dist_link(&dld, BIF_P->id, BIF_ARG_1, dep);
- code = erts_dsig_send_unlink(&dsd, BIF_P->id, BIF_ARG_1);
+ erts_remove_dist_link(&dld, BIF_P->common.id, BIF_ARG_1, dep);
+ code = erts_dsig_send_unlink(&dsd, BIF_P->common.id, BIF_ARG_1);
erts_destroy_dist_link(&dld);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
@@ -1037,10 +1059,6 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
/* Internal pid... */
- /* process ok ? */
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
-
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
/* get process struct */
@@ -1059,7 +1077,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
#endif
/* unlink and ignore errors */
- l = erts_remove_link(&BIF_P->nlinks,BIF_ARG_1);
+ l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
if (l != NULL)
erts_destroy_link(l);
@@ -1067,12 +1085,12 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
}
else {
- rl = erts_remove_link(&(rp->nlinks),BIF_P->id);
+ rl = erts_remove_link(&ERTS_P_LINKS(rp), BIF_P->common.id);
if (rl != NULL)
erts_destroy_link(rl);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) {
- trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->id);
+ trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->common.id);
}
if (rp != BIF_P)
@@ -1345,15 +1363,28 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
*/
if (is_internal_port(BIF_ARG_1)) {
- Port *prt;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- prt = erts_id2port(BIF_ARG_1, NULL, 0);
+ Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
if (prt) {
- erts_do_exit_port(prt, BIF_P->id, BIF_ARG_2);
- erts_port_release(prt);
+ Eterm ref;
+ Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
+ ErtsPortOpResult res;
+
+#ifdef DEBUG
+ ref = NIL;
+#endif
+
+ res = erts_port_exit(BIF_P, 0, prt, BIF_P->common.id, BIF_ARG_2, refp);
+
+ ERTS_BIF_CHK_EXITED(BIF_P);
+
+ if (refp && res == ERTS_PORT_OP_SCHEDULED) {
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
+ }
+
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- ERTS_BIF_CHK_EXITED(BIF_P);
+
BIF_RET(am_true);
}
else if(is_external_port(BIF_ARG_1)
@@ -1379,7 +1410,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
case ERTS_DSIG_PREP_NOT_CONNECTED:
BIF_TRAP2(dexit_trap, BIF_P, BIF_ARG_1, BIF_ARG_2);
case ERTS_DSIG_PREP_CONNECTED:
- code = erts_dsig_send_exit2(&dsd, BIF_P->id, BIF_ARG_1, BIF_ARG_2);
+ code = erts_dsig_send_exit2(&dsd, BIF_P->common.id, BIF_ARG_1, BIF_ARG_2);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
BIF_RET(am_true);
@@ -1397,9 +1428,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
*/
ErtsProcLocks rp_locks;
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
- if (BIF_ARG_1 == BIF_P->id) {
+ if (BIF_ARG_1 == BIF_P->common.id) {
rp_locks = ERTS_PROC_LOCKS_ALL;
rp = BIF_P;
erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -1417,7 +1446,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
* Send an exit signal.
*/
erts_send_exit_signal(BIF_P,
- BIF_P->id,
+ BIF_P->common.id,
rp,
&rp_locks,
BIF_ARG_2,
@@ -1519,22 +1548,24 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
}
/*
* NOTE: It is important that we check for pending exit signals
- * and handle them before flag trap_exit is set to true.
- * For more info, see implementation of erts_send_exit_signal().
+ * and handle them before returning if trap_exit is set to
+ * true. For more info, see implementation of
+ * erts_send_exit_signal().
*/
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
- ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)
- & erts_proc_lc_my_proc_locks(BIF_P));
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
if (trap_exit)
- state = erts_smp_atomic32_read_bor_nob(&BIF_P->state,
- ERTS_PSFLG_TRAP_EXIT);
+ state = erts_smp_atomic32_read_bor_mb(&BIF_P->state,
+ ERTS_PSFLG_TRAP_EXIT);
else
- state = erts_smp_atomic32_read_band_nob(&BIF_P->state,
- ~ERTS_PSFLG_TRAP_EXIT);
+ state = erts_smp_atomic32_read_band_mb(&BIF_P->state,
+ ~ERTS_PSFLG_TRAP_EXIT);
+#ifdef ERTS_SMP
+ if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
+ erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
+ ERTS_BIF_EXITED(BIF_P);
+ }
+#endif
+
old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
BIF_RET(old_value);
}
else if (BIF_ARG_1 == am_scheduler) {
@@ -1617,11 +1648,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
goto error;
}
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
- old_value = BIF_P->trace_flags & F_SENSITIVE ? am_true : am_false;
+ old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE
+ ? am_true
+ : am_false);
if (is_sensitive) {
- BIF_P->trace_flags |= F_SENSITIVE;
+ ERTS_TRACE_FLAGS(BIF_P) |= F_SENSITIVE;
} else {
- BIF_P->trace_flags &= ~F_SENSITIVE;
+ ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE;
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
BIF_RET(old_value);
@@ -1747,8 +1780,9 @@ ebif_bang_2(BIF_ALIST_2)
#define SEND_BADARG (-4)
#define SEND_USER_ERROR (-5)
#define SEND_INTERNAL_ERROR (-6)
+#define SEND_AWAIT_RESULT (-7)
-Sint do_send(Process *p, Eterm to, Eterm msg, int suspend);
+Sint do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp);
static Sint remote_send(Process *p, DistEntry *dep,
Eterm to, Eterm full_to, Eterm msg, int suspend)
@@ -1802,7 +1836,7 @@ static Sint remote_send(Process *p, DistEntry *dep,
}
Sint
-do_send(Process *p, Eterm to, Eterm msg, int suspend) {
+do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
Eterm portid;
Port *pt;
Process* rp;
@@ -1814,16 +1848,10 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
-
- if (internal_pid_index(to) >= erts_max_processes)
- return SEND_BADARG;
- rp = erts_proc_lookup_raw(to);
-
- if (!rp) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
+ rp = erts_proc_lookup_raw(to);
+ if (!rp)
return 0;
- }
} else if (is_external_pid(to)) {
dep = external_pid_dist_entry(to);
if(dep == erts_this_dist_entry) {
@@ -1832,7 +1860,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
"Discarding message %T from %T to %T in an old "
"incarnation (%d) of this node (%d)\n",
msg,
- p->id,
+ p->common.id,
to,
external_pid_creation(to),
erts_this_node->creation);
@@ -1841,45 +1869,24 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
}
return remote_send(p, dep, to, to, msg, suspend);
} else if (is_atom(to)) {
-
- /* Need to virtual schedule out sending process
- * because of lock wait. This is only necessary
- * for internal port calling but the lock is bundled
- * with name lookup.
- */
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_inactive);
- }
- erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
- to,
- &rp, 0, 0,
- &pt);
+ Eterm id = erts_whereis_name_to_id(p, to);
+
+ rp = erts_proc_lookup(id);
+ if (rp)
+ goto send_message;
+ pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
if (pt) {
- portid = pt->id;
+ portid = id;
goto port_common;
}
-
- /* Not a port virtually schedule the process back in */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_active);
- }
if (IS_TRACED(p))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
- if (!rp) {
- return SEND_BADARG;
- }
+ return SEND_BADARG;
} else if (is_external_port(to)
&& (external_port_dist_entry(to)
== erts_this_dist_entry)) {
@@ -1888,50 +1895,56 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
"Discarding message %T from %T to %T in an old "
"incarnation (%d) of this node (%d)\n",
msg,
- p->id,
+ p->common.id,
to,
external_port_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
return 0;
} else if (is_internal_port(to)) {
+ int ret_val;
portid = to;
- /* schedule out calling process, waiting for lock*/
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_inactive);
- }
- pt = erts_id2port(to, p, ERTS_PROC_LOCK_MAIN);
+
+ pt = erts_port_lookup(portid, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
port_common:
- ERTS_SMP_LC_ASSERT(!pt || erts_lc_is_port_locked(pt));
+ ret_val = 0;
- /* We have waited for locks, trace schedule ports */
- if (pt && IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(pt, am_in, am_command);
- }
- if (pt && erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
- profile_runnable_port(pt, am_active);
- }
-
- /* XXX let port_command handle the busy stuff !!! */
- if (pt && (pt->status & ERTS_PORT_SFLG_PORT_BUSY)) {
- if (suspend) {
- erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
- if (erts_system_monitor_flags.busy_port) {
- monitor_generic(p, am_busy_port, portid);
+ if (pt) {
+ int ps_flags = suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
+ *refp = NIL;
+
+ switch (erts_port_command(p, ps_flags, pt, msg, refp)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ /* We are exiting... */
+ return SEND_USER_ERROR;
+ case ERTS_PORT_OP_BUSY:
+ /* Nothing has been sent */
+ if (suspend)
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
+ return SEND_YIELD;
+ case ERTS_PORT_OP_BUSY_SCHEDULED:
+ /* Message was sent */
+ if (suspend) {
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
+ ret_val = SEND_YIELD_RETURN;
+ break;
}
+ /* Fall through */
+ case ERTS_PORT_OP_SCHEDULED:
+ if (is_not_nil(*refp)) {
+ ASSERT(is_internal_ref(*refp));
+ ret_val = SEND_AWAIT_RESULT;
+ }
+ break;
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DONE:
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_command() result");
+ break;
}
- /* Virtually schedule out the port before releasing */
- if (IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(pt, am_out, am_command);
- }
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
- profile_runnable_port(pt, am_inactive);
- }
- erts_port_release(pt);
- return SEND_YIELD;
}
if (IS_TRACED(p)) /* trace once only !! */
@@ -1949,30 +1962,11 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
SEQ_TRACE_SEND, portid, p);
}
- /* XXX NO GC in port command */
- erts_port_command(p, p->id, pt, msg);
- if (pt) {
- /* Virtually schedule out the port before releasing */
- if (IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(pt, am_out, am_command);
- }
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
- profile_runnable_port(pt, am_inactive);
- }
- erts_port_release(pt);
- }
- /* Virtually schedule in process */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_active);
- }
if (ERTS_PROC_IS_EXITING(p)) {
KILL_CATCHES(p); /* Must exit */
return SEND_USER_ERROR;
}
- return 0;
+ return ret_val;
} else if (is_tuple(to)) { /* Remote send */
int ret;
tp = tuple_val(to);
@@ -1988,47 +1982,24 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
dep = erts_sysname_to_connected_dist_entry(tp[2]);
if (dep == erts_this_dist_entry) {
+ Eterm id;
erts_deref_dist_entry(dep);
if (IS_TRACED(p))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
-
- /* Need to virtual schedule out sending process
- * because of lock wait. This is only necessary
- * for internal port calling but the lock is bundled.
- */
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_inactive);
- }
- erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
- tp[1],
- &rp, 0, 0,
- &pt);
+ id = erts_whereis_name_to_id(p, tp[1]);
+
+ rp = erts_proc_lookup_raw(id);
+ if (rp)
+ goto send_message;
+ pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
if (pt) {
- portid = pt->id;
+ portid = id;
goto port_common;
}
- /* Port lookup failed, virtually schedule the process
- * back in.
- */
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_active);
- }
-
- if (!rp) {
- return 0;
- }
- goto send_message;
+ return 0;
}
ret = remote_send(p, dep, tp[1], to, msg, suspend);
@@ -2067,6 +2038,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
BIF_RETTYPE send_3(BIF_ALIST_3)
{
+ Eterm ref;
Process *p = BIF_P;
Eterm to = BIF_ARG_1;
Eterm msg = BIF_ARG_2;
@@ -2090,12 +2062,18 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
if(!is_nil(l)) {
BIF_ERROR(p, BADARG);
}
-
- result = do_send(p, to, msg, suspend);
+
+#ifdef DEBUG
+ ref = NIL;
+#endif
+
+ result = do_send(p, to, msg, suspend, &ref);
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
BIF_RET(am_ok);
- } else switch (result) {
+ }
+
+ switch (result) {
case 0:
BIF_RET(am_ok);
break;
@@ -2118,6 +2096,9 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
ERTS_BIF_YIELD_RETURN(p, am_ok);
else
BIF_RET(am_nosuspend);
+ case SEND_AWAIT_RESULT:
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
case SEND_BADARG:
BIF_ERROR(p, BADARG);
break;
@@ -2142,12 +2123,21 @@ BIF_RETTYPE send_2(BIF_ALIST_2)
Eterm erl_send(Process *p, Eterm to, Eterm msg)
{
- Sint result = do_send(p, to, msg, !0);
+ Eterm ref;
+ Sint result;
+
+#ifdef DEBUG
+ ref = NIL;
+#endif
+
+ result = do_send(p, to, msg, !0, &ref);
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
BIF_RET(msg);
- } else switch (result) {
+ }
+
+ switch (result) {
case 0:
BIF_RET(msg);
break;
@@ -2159,6 +2149,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
break;
case SEND_YIELD_RETURN:
ERTS_BIF_YIELD_RETURN(p, msg);
+ case SEND_AWAIT_RESULT:
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, p, ref, msg, msg);
case SEND_BADARG:
BIF_ERROR(p, BADARG);
break;
@@ -2428,9 +2421,7 @@ BIF_RETTYPE setelement_3(BIF_ALIST_3)
/* copy the tuple */
resp = hp;
- while (size--) { /* XXX use memcpy? */
- *hp++ = *ptr++;
- }
+ sys_memcpy(hp, ptr, sizeof(Eterm)*size);
resp[ix] = BIF_ARG_3;
BIF_RET(make_tuple(resp));
}
@@ -2443,7 +2434,7 @@ BIF_RETTYPE make_tuple_2(BIF_ALIST_2)
Eterm* hp;
Eterm res;
- if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0) {
+ if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0 || n > ERTS_MAX_TUPLE_SIZE) {
BIF_ERROR(BIF_P, BADARG);
}
hp = HAlloc(BIF_P, n+1);
@@ -2464,7 +2455,7 @@ BIF_RETTYPE make_tuple_3(BIF_ALIST_3)
Eterm list = BIF_ARG_3;
Eterm* tup;
- if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0) {
+ if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0 || n > ERTS_MAX_TUPLE_SIZE) {
error:
BIF_ERROR(BIF_P, BADARG);
}
@@ -2516,11 +2507,16 @@ BIF_RETTYPE append_element_2(BIF_ALIST_2)
Eterm res;
if (is_not_tuple(BIF_ARG_1)) {
+ error:
BIF_ERROR(BIF_P, BADARG);
}
- ptr = tuple_val(BIF_ARG_1);
+ ptr = tuple_val(BIF_ARG_1);
arity = arityval(*ptr);
- hp = HAlloc(BIF_P, arity + 2);
+
+ if (arity + 1 > ERTS_MAX_TUPLE_SIZE)
+ goto error;
+
+ hp = HAlloc(BIF_P, arity + 2);
res = make_tuple(hp);
*hp = make_arityval(arity+1);
while (arity--) {
@@ -3104,7 +3100,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
Eterm* hp;
int len;
- if ((len = list_length(list)) < 0) {
+ if ((len = list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -3126,7 +3122,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
BIF_RETTYPE self_0(BIF_ALIST_0)
{
- BIF_RET(BIF_P->id);
+ BIF_RET(BIF_P->common.id);
}
/**********************************************************************/
@@ -3163,11 +3159,9 @@ static erts_smp_spinlock_t make_ref_lock;
static erts_smp_mtx_t ports_snapshot_mtx;
erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+void
+erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
{
- Eterm* hp = buffer;
- Uint32 ref0, ref1, ref2;
-
erts_smp_spin_lock(&make_ref_lock);
reference0++;
@@ -3179,24 +3173,36 @@ Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
}
}
- ref0 = reference0;
- ref1 = reference1;
- ref2 = reference2;
+ ref[0] = reference0;
+ ref[1] = reference1;
+ ref[2] = reference2;
erts_smp_spin_unlock(&make_ref_lock);
+}
- write_ref_thing(hp, ref0, ref1, ref2);
+Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+{
+ Eterm* hp = buffer;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+
+ erts_make_ref_in_array(ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
return make_internal_ref(hp);
}
Eterm erts_make_ref(Process *p)
{
Eterm* hp;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
hp = HAlloc(p, REF_THING_SIZE);
- return erts_make_ref_in_buffer(hp);
+
+ erts_make_ref_in_array(ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+
+ return make_internal_ref(hp);
}
BIF_RETTYPE make_ref_0(BIF_ALIST_0)
@@ -3460,7 +3466,7 @@ BIF_RETTYPE garbage_collect_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
- if (BIF_P->id == BIF_ARG_1)
+ if (BIF_P->common.id == BIF_ARG_1)
rp = BIF_P;
else {
#ifdef ERTS_SMP
@@ -3500,71 +3506,23 @@ BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
}
/**********************************************************************/
-/* Return a list of active ports */
+/*
+ * The erlang:processes/0 BIF.
+ */
-BIF_RETTYPE ports_0(BIF_ALIST_0)
+BIF_RETTYPE processes_0(BIF_ALIST_0)
{
- Eterm res = NIL;
- Eterm* port_buf = erts_alloc(ERTS_ALC_T_TMP,
- sizeof(Eterm)*erts_max_ports);
- Eterm* pp = port_buf;
- Eterm* dead_ports;
- int alive, dead;
- Uint32 next_ss;
- int i;
-
- /* To get a consistent snapshot...
- * We add alive ports from start of the buffer
- * while dying ports are added from the other end by the killing threads.
- */
-
- erts_smp_mtx_lock(&ports_snapshot_mtx); /* One snapshot at a time */
-
- erts_smp_atomic_set_nob(&erts_dead_ports_ptr,
- (erts_aint_t) (port_buf + erts_max_ports));
-
- next_ss = erts_smp_atomic32_inc_read_relb(&erts_ports_snapshot);
-
- for (i = erts_max_ports-1; i >= 0; i--) {
- Port* prt = &erts_port[i];
- erts_smp_port_state_lock(prt);
- if (!(prt->status & ERTS_PORT_SFLGS_DEAD)
- && prt->snapshot != next_ss) {
- ASSERT(prt->snapshot == next_ss - 1);
- *pp++ = prt->id;
- prt->snapshot = next_ss; /* Consumed by this snapshot */
- }
- erts_smp_port_state_unlock(prt);
- }
-
- dead_ports = (Eterm*)erts_smp_atomic_xchg_nob(&erts_dead_ports_ptr,
- (erts_aint_t) NULL);
- erts_smp_mtx_unlock(&ports_snapshot_mtx);
-
- ASSERT(pp <= dead_ports);
-
- alive = pp - port_buf;
- dead = port_buf + erts_max_ports - dead_ports;
-
- ASSERT((alive+dead) <= erts_max_ports);
-
- if (alive+dead > 0) {
- erts_aint_t i;
- Eterm *hp = HAlloc(BIF_P, (alive+dead)*2);
-
- for (i = 0; i < alive; i++) {
- res = CONS(hp, port_buf[i], res);
- hp += 2;
- }
- for (i = 0; i < dead; i++) {
- res = CONS(hp, dead_ports[i], res);
- hp += 2;
- }
- }
+ return erts_ptab_list(BIF_P, &erts_proc);
+}
- erts_free(ERTS_ALC_T_TMP, port_buf);
+/**********************************************************************/
+/*
+ * The erlang:ports/0 BIF.
+ */
- BIF_RET(res);
+BIF_RETTYPE ports_0(BIF_ALIST_0)
+{
+ return erts_ptab_list(BIF_P, &erts_port);
}
/**********************************************************************/
@@ -4194,12 +4152,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_RET(old_value);
}
} else if (BIF_ARG_1 == make_small(1)) {
- Uint i;
+ int i, max;
ErlMessage* mp;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
- for (i = 0; i < erts_max_processes; i++) {
+ max = erts_ptab_max(&erts_proc);
+ for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p) {
#ifdef USE_VM_PROBES
@@ -4546,6 +4505,8 @@ void erts_init_bif(void)
am_format_cpu_topology,
1);
await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3);
+ await_port_send_result_trap
+ = erts_export_put(am_erts_internal, am_await_port_send_result, 3);
await_sched_wall_time_mod_trap
= erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2);
erts_smp_atomic32_init_nob(&sched_wall_time, 0);
@@ -4561,19 +4522,18 @@ bif erlang:send_to_logger/2
BIF_RETTYPE send_to_logger_2(BIF_ALIST_2)
{
byte *buf;
- int len;
+ ErlDrvSizeT len;
if (!is_atom(BIF_ARG_1) || !(is_list(BIF_ARG_2) ||
is_nil(BIF_ARG_1))) {
BIF_ERROR(BIF_P,BADARG);
}
- len = io_list_len(BIF_ARG_2);
- if (len < 0)
+ if (erts_iolist_size(BIF_ARG_2, &len) != 0)
BIF_ERROR(BIF_P,BADARG);
else if (len == 0)
buf = "";
else {
#ifdef DEBUG
- int len2;
+ ErlDrvSizeT len2;
#endif
buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, len+1);
#ifdef DEBUG
@@ -4581,7 +4541,7 @@ BIF_RETTYPE send_to_logger_2(BIF_ALIST_2)
#else
(void)
#endif
- io_list_to_buf(BIF_ARG_2, buf, len);
+ erts_iolist_to_buf(BIF_ARG_2, buf, len);
ASSERT(len2 == len);
buf[len] = '\0';
switch (BIF_ARG_1) {
@@ -4675,7 +4635,6 @@ BIF_RETTYPE dt_prepend_vm_tag_data_1(BIF_ALIST_1)
#ifdef USE_VM_PROBES
Eterm b;
Eterm *hp;
- hp = HAlloc(BIF_P,2);
if (is_binary((DT_UTAG(BIF_P)))) {
Uint sz = binary_size(DT_UTAG(BIF_P));
int i;
@@ -4692,6 +4651,7 @@ BIF_RETTYPE dt_prepend_vm_tag_data_1(BIF_ALIST_1)
} else {
b = new_binary(BIF_P,(byte *)"\0",1);
}
+ hp = HAlloc(BIF_P,2);
BIF_RET(CONS(hp,b,BIF_ARG_1));
#else
BIF_RET(BIF_ARG_1);
@@ -4702,7 +4662,6 @@ BIF_RETTYPE dt_append_vm_tag_data_1(BIF_ALIST_1)
#ifdef USE_VM_PROBES
Eterm b;
Eterm *hp;
- hp = HAlloc(BIF_P,2);
if (is_binary((DT_UTAG(BIF_P)))) {
Uint sz = binary_size(DT_UTAG(BIF_P));
int i;
@@ -4719,6 +4678,7 @@ BIF_RETTYPE dt_append_vm_tag_data_1(BIF_ALIST_1)
} else {
b = new_binary(BIF_P,(byte *)"\0",1);
}
+ hp = HAlloc(BIF_P,2);
BIF_RET(CONS(hp,BIF_ARG_1,b));
#else
BIF_RET(BIF_ARG_1);
@@ -4742,14 +4702,14 @@ BIF_RETTYPE dt_spread_tag_1(BIF_ALIST_1)
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) start spreading tag %T\r\n",
- BIF_P->id,DT_UTAG(BIF_P));
+ BIF_P->common.id,DT_UTAG(BIF_P));
#endif
} else {
DT_UTAG_FLAGS(BIF_P) &= ~DT_UTAG_SPREADING;
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) stop spreading tag %T\r\n",
- BIF_P->id,DT_UTAG(BIF_P));
+ BIF_P->common.id,DT_UTAG(BIF_P));
#endif
}
}
@@ -4775,7 +4735,7 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) restore Killing tag!\r\n",
- BIF_P->id);
+ BIF_P->common.id);
#endif
}
DT_UTAG(BIF_P) = NIL;
@@ -4792,12 +4752,12 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
erts_fprintf(stderr,
"Dtrace -> (%T) restore stop spreading "
"tag %T\r\n",
- BIF_P->id, tpl[2]);
+ BIF_P->common.id, tpl[2]);
} else if ((x & DT_UTAG_SPREADING) &&
!(DT_UTAG_FLAGS(BIF_P) & DT_UTAG_SPREADING)) {
erts_fprintf(stderr,
"Dtrace -> (%T) restore start spreading "
- "tag %T\r\n",BIF_P->id,tpl[2]);
+ "tag %T\r\n",BIF_P->common.id,tpl[2]);
}
#endif
DT_UTAG_FLAGS(BIF_P) = x;
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 7cb2c78815..71f232035d 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -322,27 +322,6 @@ do { \
ERTS_BIF_EXITED((PROC)); \
} while (0)
-#ifdef ERTS_SMP
-#define ERTS_SMP_BIF_CHK_PENDING_EXIT(P, L) \
-do { \
- ERTS_SMP_LC_ASSERT((L) == erts_proc_lc_my_proc_locks((P))); \
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & (L)); \
- if (!((L) & ERTS_PROC_LOCK_STATUS)) \
- erts_smp_proc_lock((P), ERTS_PROC_LOCK_STATUS); \
- if (ERTS_PROC_PENDING_EXIT((P))) { \
- erts_handle_pending_exit((P), (L)|ERTS_PROC_LOCK_STATUS); \
- erts_smp_proc_unlock((P), \
- (((L)|ERTS_PROC_LOCK_STATUS) \
- & ~ERTS_PROC_LOCK_MAIN)); \
- ERTS_BIF_EXITED((P)); \
- } \
- if (!((L) & ERTS_PROC_LOCK_STATUS)) \
- erts_smp_proc_unlock((P), ERTS_PROC_LOCK_STATUS); \
-} while (0)
-#else
-#define ERTS_SMP_BIF_CHK_PENDING_EXIT(P, L)
-#endif
-
/*
* The ERTS_BIF_*_AWAIT_X_*_TRAP makros either exits the caller, or
* sets up a trap to erlang:await_proc_exit/3.
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index f7dad2767f..9a1a25031c 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -180,10 +180,6 @@ bif 'erl.lang.port':open/2 ebif_open_port_2 open_port_2
bif erlang:pid_to_list/1
bif 'erl.lang.proc':pid_to_string/1 ebif_pid_to_string_1 pid_to_list_1
-bif erlang:port_info/1
-bif 'erl.lang.port':info/1 ebif_port_info_1
-bif erlang:port_info/2
-bif 'erl.lang.port':info/2 ebif_port_info_2
bif erlang:ports/0
bif 'erl.lang.node':ports/0 ebif_ports_0
bif erlang:pre_loaded/0
@@ -252,24 +248,16 @@ bif erlang:setnode/2
bif erlang:setnode/3
bif erlang:dist_exit/3
-bif erlang:port_call/2
-bif 'erl.lang.port':call/2 ebif_port_call_2
-bif erlang:port_call/3
-bif 'erl.lang.port':call/3 ebif_port_call_3
-bif erlang:port_command/2
-bif 'erl.lang.port':command/2 ebif_port_command_2
-bif erlang:port_command/3
-bif 'erl.lang.port':command/3 ebif_port_command_3
-bif erlang:port_control/3
-bif 'erl.lang.port':control/3 ebif_port_control_3
-bif erlang:port_close/1
-bif 'erl.lang.port':close/1 ebif_port_close_1
-bif erlang:port_connect/2
-bif 'erl.lang.port':connect/2 ebif_port_connect_2
-bif erlang:port_set_data/2
-bif 'erl.lang.port':set_data/2 ebif_port_set_data_2
-bif erlang:port_get_data/1
-bif 'erl.lang.port':get_data/1 ebif_port_get_data_1
+# Static native functions in erts_internal
+bif erts_internal:port_info/1
+bif erts_internal:port_info/2
+bif erts_internal:port_call/3
+bif erts_internal:port_command/3
+bif erts_internal:port_control/3
+bif erts_internal:port_close/1
+bif erts_internal:port_connect/2
+bif erts_internal:port_set_data/2
+bif erts_internal:port_get_data/1
# Tracing & debugging.
bif erlang:trace_pattern/2
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 3d2725e239..4441dab181 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -355,10 +355,10 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
{
Eterm bin;
- Uint size;
+ ErlDrvSizeT size;
byte* bytes;
#ifdef DEBUG
- int offset;
+ ErlDrvSizeT offset;
#endif
if (is_nil(arg)) {
@@ -377,7 +377,7 @@ BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
#ifdef DEBUG
offset =
#endif
- io_list_to_buf(arg, (char*) bytes, size);
+ erts_iolist_to_buf(arg, (char*) bytes, size);
ASSERT(offset == 0);
BIF_RET(bin);
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index c1e9fe536d..9aa1e5f30d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -61,16 +61,19 @@ extern char* erts_system_version[];
static void
port_info(int to, void *to_arg)
{
- int i;
- for (i = 0; i < erts_max_ports; i++)
- print_port_info(to, to_arg, i);
+ int i, max = erts_ptab_max(&erts_port);
+ for (i = 0; i < max; i++) {
+ Port *p = erts_pix2port(i);
+ if (p)
+ print_port_info(p, to, to_arg);
+ }
}
void
process_info(int to, void *to_arg)
{
- int i;
- for (i = 0; i < erts_max_processes; i++) {
+ int i, max = erts_ptab_max(&erts_proc);
+ for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p && p->i != ENULL) {
if (!ERTS_PROC_IS_EXITING(p))
@@ -84,12 +87,12 @@ process_info(int to, void *to_arg)
static void
process_killer(void)
{
- int i, j;
+ int i, j, max = erts_ptab_max(&erts_proc);
Process* rp;
erts_printf("\n\nProcess Information\n\n");
erts_printf("--------------------------------------------------\n");
- for (i = erts_max_processes-1; i >= 0; i--) {
+ for (i = max-1; i >= 0; i--) {
rp = erts_pix2proc(i);
if (rp && rp->i != ENULL) {
int br;
@@ -196,7 +199,7 @@ print_process_info(int to, void *to_arg, Process *p)
erts_aint32_t state;
/* display the PID */
- erts_print(to, to_arg, "=proc:%T\n", p->id);
+ erts_print(to, to_arg, "=proc:%T\n", p->common.id);
/* Display the state */
erts_print(to, to_arg, "State: ");
@@ -226,8 +229,8 @@ print_process_info(int to, void *to_arg, Process *p)
* If the process is registered as a global process, display the
* registered name
*/
- if (p->reg != NULL)
- erts_print(to, to_arg, "Name: %T\n", p->reg->name);
+ if (p->common.u.alive.reg)
+ erts_print(to, to_arg, "Name: %T\n", p->common.u.alive.reg->name);
/*
* Display the initial function name
@@ -301,11 +304,11 @@ print_process_info(int to, void *to_arg, Process *p)
}
/* display the links only if there are any*/
- if (p->nlinks != NULL || p->monitors != NULL) {
+ if (ERTS_P_LINKS(p) || ERTS_P_MONITORS(p)) {
PrintMonitorContext context = {1,to};
erts_print(to, to_arg,"Link list: [");
- erts_doforall_links(p->nlinks, &doit_print_link, &context);
- erts_doforall_monitors(p->monitors, &doit_print_monitor, &context);
+ erts_doforall_links(ERTS_P_LINKS(p), &doit_print_link, &context);
+ erts_doforall_monitors(ERTS_P_MONITORS(p), &doit_print_monitor, &context);
erts_print(to, to_arg,"]\n");
}
@@ -625,9 +628,9 @@ bin_check(void)
{
Process *rp;
struct erl_off_heap_header* hdr;
- int i, printed = 0;
+ int i, printed = 0, max = erts_ptab_max(&erts_proc);
- for (i=0; i < erts_max_processes; i++) {
+ for (i=0; i < max; i++) {
rp = erts_pix2proc(i);
if (!rp)
continue;
@@ -635,7 +638,7 @@ bin_check(void)
if (hdr->thing_word == HEADER_PROC_BIN) {
ProcBin *bp = (ProcBin*) hdr;
if (!printed) {
- erts_printf("Process %T holding binary data \n", rp->id);
+ erts_printf("Process %T holding binary data \n", rp->common.id);
printed = 1;
}
erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
@@ -766,7 +769,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_print_nif_taints(fd, NULL);
erts_fdprintf(fd, "Atoms: %d\n", atom_table_size());
info(fd, NULL); /* General system info */
- if (erts_proc.tab)
+ if (erts_ptab_initialized(&erts_proc))
process_info(fd, NULL); /* Info about each process and port */
db_info(fd, NULL, 0);
erts_print_bif_timer_info(fd, NULL);
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 36eda04de2..23c0fca6aa 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -47,7 +47,7 @@ copy_object(Eterm obj, Process* to)
if (DTRACE_ENABLED(copy_object)) {
DTRACE_CHARBUF(proc_name, 64);
- erts_snprintf(proc_name, sizeof(proc_name), "%T", to->id);
+ erts_snprintf(proc_name, sizeof(proc_name), "%T", to->common.id);
DTRACE2(copy_object, proc_name, size);
}
#endif
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 28c4621ff2..f169a1a34a 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -124,6 +124,13 @@ static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm);
static void init_nodes_monitors(void);
static erts_smp_atomic_t no_caches;
+static erts_smp_atomic_t no_nodes;
+
+struct {
+ Eterm reason;
+ ErlHeapFragment *bp;
+} nodedown;
+
static void
delete_cache(ErtsAtomCache *cache)
@@ -144,7 +151,7 @@ create_cache(DistEntry *dep)
ERTS_SMP_LC_ASSERT(
is_internal_port(dep->cid)
- && erts_lc_is_port_locked(&erts_port[internal_port_index(dep->cid)]));
+ && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
ASSERT(!dep->cache);
dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE,
@@ -171,11 +178,10 @@ get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs)
return NULL;
}
else {
- ErtsProcList *plp;
- plp = dep->suspended.first;
- dep->suspended.first = NULL;
- dep->suspended.last = NULL;
- return plp;
+ ErtsProcList *suspended = dep->suspended;
+ dep->suspended = NULL;
+ erts_proclist_fetch(&suspended, NULL);
+ return suspended;
}
}
@@ -252,7 +258,7 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
if (mon->type == MON_ORIGIN) {
/* local pid is beeing monitored */
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); nope, can happen during process exit */
if (rmon != NULL) {
erts_destroy_monitor(rmon);
@@ -262,7 +268,7 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
Eterm watched;
UseTmpHeapNoproc(3);
ASSERT(mon->type == MON_TARGET);
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); can happen during process exit */
if (rmon != NULL) {
ASSERT(is_atom(rmon->name) || is_nil(rmon->name));
@@ -311,7 +317,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
goto done;
}
- rlnk = erts_remove_link(&(rp->nlinks), sublnk->pid);
+ rlnk = erts_remove_link(&ERTS_P_LINKS(rp), sublnk->pid);
xres = erts_send_exit_signal(NULL,
sublnk->pid,
rp,
@@ -370,7 +376,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
if (!rp) {
goto done;
}
- rlnk = erts_remove_link(&(rp->nlinks), name);
+ rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name);
if (rlnk != NULL) {
ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE));
erts_destroy_link(rlnk);
@@ -394,6 +400,47 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
erts_destroy_link(lnk);
}
+static void
+set_node_not_alive(void *unused)
+{
+ ErlHeapFragment *bp;
+ Eterm nodename = erts_this_dist_entry->sysname;
+
+ ASSERT(erts_smp_atomic_read_nob(&no_nodes) == 0);
+
+ erts_smp_thr_progress_block();
+ erts_set_this_node(am_Noname, 0);
+ erts_is_alive = 0;
+ send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nodedown.reason);
+ nodedown.reason = NIL;
+ bp = nodedown.bp;
+ nodedown.bp = NULL;
+ erts_smp_thr_progress_unblock();
+ if (bp)
+ free_message_buffer(bp);
+}
+
+static ERTS_INLINE void
+dec_no_nodes(void)
+{
+ erts_aint_t no = erts_smp_atomic_dec_read_mb(&no_nodes);
+ ASSERT(no >= 0);
+ ASSERT(erts_get_scheduler_id()); /* Need to be a scheduler */
+ if (no == 0)
+ erts_schedule_misc_aux_work(erts_get_scheduler_id(),
+ set_node_not_alive,
+ NULL);
+}
+
+static ERTS_INLINE void
+inc_no_nodes(void)
+{
+#ifdef DEBUG
+ erts_aint_t no = erts_smp_atomic_read_nob(&no_nodes);
+ ASSERT(erts_is_alive ? no > 0 : no == 0);
+#endif
+ erts_smp_atomic_inc_mb(&no_nodes);
+}
/*
* proc is currently running or exiting process.
@@ -403,47 +450,76 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
Eterm nodename;
if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */
+ DistEntry *tdep;
+ int no_dist_port = 0;
Eterm nd_reason = (reason == am_no_network
? am_no_network
: am_net_kernel_terminated);
erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next)
+ no_dist_port++;
+ for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next)
+ no_dist_port++;
+
/* KILL all port controllers */
- while(erts_visible_dist_entries || erts_hidden_dist_entries) {
- DistEntry *tdep;
- Eterm prt_id;
- Port *prt;
- if(erts_hidden_dist_entries)
- tdep = erts_hidden_dist_entries;
- else
- tdep = erts_visible_dist_entries;
- prt_id = tdep->cid;
- ASSERT(is_internal_port(prt_id));
+ if (no_dist_port == 0)
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ else {
+ Eterm def_buf[128];
+ int i = 0;
+ Eterm *dist_port;
- prt = erts_id2port(prt_id, NULL, 0);
- if (prt) {
- ASSERT(prt->status & ERTS_PORT_SFLG_DISTRIBUTION);
- ASSERT(prt->dist_entry);
- /* will call do_net_exists !!! */
- erts_do_exit_port(prt, prt_id, nd_reason);
- erts_port_release(prt);
+ if (no_dist_port <= sizeof(def_buf)/sizeof(def_buf[0]))
+ dist_port = &def_buf[0];
+ else
+ dist_port = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(Eterm)*no_dist_port);
+ for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) {
+ ASSERT(is_internal_port(tdep->cid));
+ dist_port[i++] = tdep->cid;
+ }
+ for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) {
+ ASSERT(is_internal_port(tdep->cid));
+ dist_port[i++] = tdep->cid;
}
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
- }
+ for (i = 0; i < no_dist_port; i++) {
+ Port *prt = erts_port_lookup(dist_port[i],
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (!prt)
+ continue;
+ ASSERT(erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLG_DISTRIBUTION);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED,
+ prt, dist_port[i], nd_reason, NULL);
+ }
- nodename = erts_this_dist_entry->sysname;
- erts_smp_thr_progress_block();
- erts_set_this_node(am_Noname, 0);
- erts_is_alive = 0;
- send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nd_reason);
- erts_smp_thr_progress_unblock();
+ if (dist_port != &def_buf[0])
+ erts_free(ERTS_ALC_T_TMP, dist_port);
+ }
+ /*
+ * When last dist port exits, node will be taken
+ * from alive to not alive.
+ */
+ ASSERT(is_nil(nodedown.reason) && !nodedown.bp);
+ if (is_immed(nd_reason))
+ nodedown.reason = nd_reason;
+ else {
+ Eterm *hp;
+ Uint sz = size_object(nd_reason);
+ nodedown.bp = new_message_buffer(sz);
+ hp = nodedown.bp->mem;
+ nodedown.reason = copy_struct(nd_reason,
+ sz,
+ &hp,
+ &nodedown.bp->off_heap);
+ }
}
- else { /* recursive call via erts_do_exit_port() will end up here */
+ else { /* Call from distribution port */
NetExitsContext nec = {dep};
ErtsLink *nlinks;
ErtsLink *node_links;
@@ -454,10 +530,10 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
erts_smp_de_rwlock(dep);
ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid)
- && erts_lc_is_port_locked(&erts_port[internal_port_index(dep->cid)]));
+ && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
if (erts_port_task_is_scheduled(&dep->dist_cmd))
- erts_port_task_abort(dep->cid, &dep->dist_cmd);
+ erts_port_task_abort(&dep->dist_cmd);
if (dep->status & ERTS_DE_SFLG_EXITING) {
#ifdef DEBUG
@@ -503,6 +579,9 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
clear_dist_entry(dep);
}
+
+ dec_no_nodes();
+
return 1;
}
@@ -516,6 +595,10 @@ void init_dist(void)
{
init_nodes_monitors();
+ nodedown.reason = NIL;
+ nodedown.bp = NULL;
+
+ erts_smp_atomic_init_nob(&no_nodes, 0);
erts_smp_atomic_init_nob(&no_caches, 0);
/* Lookup/Install all references to trap functions */
@@ -769,7 +852,7 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(receiver_name), "%T", remote);
msize = size_object(message);
if (token != NIL && token != am_have_dt_utag) {
@@ -826,7 +909,7 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(receiver_name),
"{%T,%s}", remote_name, node_name);
msize = size_object(message);
@@ -840,10 +923,10 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
if (token != NIL)
ctl = TUPLE5(&ctl_heap[0], make_small(DOP_REG_SEND_TT),
- sender->id, am_Cookie, remote_name, token);
+ sender->common.id, am_Cookie, remote_name, token);
else
ctl = TUPLE4(&ctl_heap[0], make_small(DOP_REG_SEND),
- sender->id, am_Cookie, remote_name);
+ sender->common.id, am_Cookie, remote_name);
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
@@ -889,7 +972,7 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
*node_name = *sender_name = *remote_name = '\0';
if (DTRACE_ENABLED(process_exit_signal_remote)) {
erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
erts_snprintf(remote_name, sizeof(remote_name),
"{%T,%s}", remote, node_name);
erts_snprintf(reason_str, sizeof(reason), "%T", reason);
@@ -1141,7 +1224,7 @@ int erts_net_message(Port *prt,
}
erts_smp_de_links_lock(dep);
- res = erts_add_link(&(rp->nlinks), LINK_PID, from);
+ res = erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, from);
if (res < 0) {
/* It was already there! Lets skip the rest... */
@@ -1149,7 +1232,7 @@ int erts_net_message(Port *prt,
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
break;
}
- lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->id);
+ lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->common.id);
erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from);
erts_smp_de_links_unlock(dep);
@@ -1176,7 +1259,7 @@ int erts_net_message(Port *prt,
if (!rp)
break;
- lnk = erts_remove_link(&(rp->nlinks), from);
+ lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && lnk != NULL) {
trace_proc(NULL, rp, am_getting_unlinked, from);
@@ -1233,10 +1316,10 @@ int erts_net_message(Port *prt,
}
else {
if (is_atom(watched))
- watched = rp->id;
+ watched = rp->common.id;
erts_smp_de_links_lock(dep);
erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name);
- erts_add_monitor(&(rp->monitors), MON_TARGET, ref, watcher, name);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, watcher, name);
erts_smp_de_links_unlock(dep);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
@@ -1275,7 +1358,7 @@ int erts_net_message(Port *prt,
if (!rp) {
break;
}
- mon = erts_remove_monitor(&(rp->monitors),ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
ASSERT(mon != NULL);
if (mon == NULL) {
@@ -1432,7 +1515,7 @@ int erts_net_message(Port *prt,
erts_destroy_monitor(mon);
- mon = erts_remove_monitor(&(rp->monitors),ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
if (mon == NULL) {
erts_smp_proc_unlock(rp, rp_locks);
@@ -1483,7 +1566,7 @@ int erts_net_message(Port *prt,
if (!rp)
lnk = NULL;
else {
- lnk = erts_remove_link(&(rp->nlinks), from);
+ lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
/* If lnk == NULL, we have unlinked on this side, i.e.
* ignore exit.
@@ -1597,7 +1680,7 @@ int erts_net_message(Port *prt,
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- erts_do_exit_port(prt, dep->cid, am_killed);
+ erts_deliver_port_exit(prt, dep->cid, am_killed, 0);
ERTS_SMP_CHK_NO_PROC_LOCKS;
return -1;
}
@@ -1693,7 +1776,6 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
erts_smp_mtx_unlock(&dep->qlock);
plp = erts_proclist_create(c_p);
- plp->next = NULL;
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
suspended = 1;
erts_smp_mtx_lock(&dep->qlock);
@@ -1726,11 +1808,7 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
else {
/* Enqueue suspended process on dist entry */
ASSERT(plp);
- if (dep->suspended.last)
- dep->suspended.last->next = plp;
- else
- dep->suspended.first = plp;
- dep->suspended.last = plp;
+ erts_proclist_store_last(&dep->suspended, plp);
}
}
@@ -1779,7 +1857,7 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
erts_snprintf(port_str, sizeof(port_str), "%T", cid);
erts_snprintf(remote_str, sizeof(remote_str), "%T", dep->sysname);
- erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->id);
+ erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->common.id);
DTRACE4(dist_port_busy, erts_this_node_sysname,
port_str, remote_str, pid_str);
}
@@ -1812,7 +1890,7 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
erts_snprintf(remote_str, sizeof(remote_str),
"%T", prt->dist_entry->sysname);
DTRACE4(dist_output, erts_this_node_sysname, port_str,
@@ -1866,7 +1944,7 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
erts_snprintf(remote_str, sizeof(remote_str),
"%T", prt->dist_entry->sysname);
DTRACE4(dist_outputv, erts_this_node_sysname, port_str,
@@ -1903,13 +1981,13 @@ int
erts_dist_command(Port *prt, int reds_limit)
{
Sint reds = ERTS_PORT_REDS_DIST_CMD_START;
- int prt_busy;
Uint32 status;
Uint32 flags;
Sint obufsize = 0;
ErtsDistOutputQueue oq, foq;
DistEntry *dep = prt->dist_entry;
Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
+ erts_aint32_t sched_flags;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
@@ -1925,7 +2003,7 @@ erts_dist_command(Port *prt, int reds_limit)
erts_smp_de_runlock(dep);
if (status & ERTS_DE_SFLG_EXITING) {
- erts_do_exit_port(prt, prt->id, am_killed);
+ erts_deliver_port_exit(prt, prt->common.id, am_killed, 0);
erts_deref_dist_entry(dep);
return reds + ERTS_PORT_REDS_DIST_CMD_EXIT;
}
@@ -1952,12 +2030,12 @@ erts_dist_command(Port *prt, int reds_limit)
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+
if (reds > reds_limit)
goto preempted;
- prt_busy = (int) (prt->status & ERTS_PORT_SFLG_PORT_BUSY);
-
- if (!prt_busy && foq.first) {
+ if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && foq.first) {
int preempt = 0;
do {
Uint size;
@@ -1974,11 +2052,10 @@ erts_dist_command(Port *prt, int reds_limit)
obufsize += size_obuf(fob);
foq.first = foq.first->next;
free_dist_obuf(fob);
- preempt = reds > reds_limit || (prt->status & ERTS_PORT_SFLGS_DEAD);
- if (prt->status & ERTS_PORT_SFLG_PORT_BUSY) {
- prt_busy = 1;
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT);
+ if (sched_flags & ERTS_PTS_FLG_BUSY_PORT)
break;
- }
} while (foq.first && !preempt);
if (!foq.first)
foq.last = NULL;
@@ -1986,7 +2063,7 @@ erts_dist_command(Port *prt, int reds_limit)
goto preempted;
}
- if (prt_busy) {
+ if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) {
if (oq.first) {
ErtsDistOutputBuf *ob;
int preempt;
@@ -2058,12 +2135,10 @@ erts_dist_command(Port *prt, int reds_limit)
obufsize += size_obuf(fob);
oq.first = oq.first->next;
free_dist_obuf(fob);
- preempt = reds > reds_limit || (prt->status & ERTS_PORT_SFLGS_DEAD);
- if (prt->status & ERTS_PORT_SFLG_PORT_BUSY) {
- prt_busy = 1;
- if (oq.first && !preempt)
- goto finalize_only;
- }
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT);
+ if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt)
+ goto finalize_only;
}
ASSERT(!oq.first || preempt);
@@ -2091,7 +2166,7 @@ erts_dist_command(Port *prt, int reds_limit)
ASSERT(dep->qsize >= obufsize);
dep->qsize -= obufsize;
obufsize = 0;
- if (!prt_busy
+ if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)
&& (dep->qflgs & ERTS_DE_QFLG_BUSY)
&& dep->qsize < erts_dist_buf_busy_limit) {
ErtsProcList *suspendees;
@@ -2137,11 +2212,15 @@ erts_dist_command(Port *prt, int reds_limit)
return reds;
preempted:
+ /*
+ * Here we assume that state has been read
+ * since last call to driver.
+ */
ASSERT(oq.first || !oq.last);
ASSERT(!oq.first || oq.last);
- if (prt->status & ERTS_PORT_SFLGS_DEAD) {
+ if (sched_flags & ERTS_PTS_FLG_EXIT) {
/*
* Port died during port command; clean up 'oq'
* and 'foq'. Things buffered in dist entry after
@@ -2199,7 +2278,7 @@ erts_dist_port_not_busy(Port *prt)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
erts_snprintf(remote_str, sizeof(remote_str),
"%T", prt->dist_entry->sysname);
DTRACE3(dist_port_not_busy, erts_this_node_sysname,
@@ -2241,7 +2320,7 @@ static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
Process *rp;
ErtsMonitor *rmon;
rp = erts_proc_lookup(mon->pid);
- if (!rp || (rmon = erts_lookup_monitor(rp->monitors, mon->ref)) == NULL) {
+ if (!rp || (rmon = erts_lookup_monitor(ERTS_P_MONITORS(rp), mon->ref)) == NULL) {
erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid);
} else if (mon->type == MON_ORIGIN) {
/* Local pid is being monitored */
@@ -2486,6 +2565,7 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
+ inc_no_nodes();
erts_set_this_node(BIF_ARG_1, (Uint32) creation);
erts_is_alive = 1;
send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL);
@@ -2554,9 +2634,9 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
/* DFLAG_EXTENDED_REFERENCES is compulsory from R9 and forward */
if (!(DFLAG_EXTENDED_REFERENCES & flags)) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "%T", BIF_P->id);
- if (BIF_P->reg)
- erts_dsprintf(dsbufp, " (%T)", BIF_P->reg->name);
+ erts_dsprintf(dsbufp, "%T", BIF_P->common.id);
+ if (BIF_P->common.u.alive.reg)
+ erts_dsprintf(dsbufp, " (%T)", BIF_P->common.u.alive.reg->name);
erts_dsprintf(dsbufp,
" attempted to enable connection to node %T "
"which is not able to handle extended references.\n",
@@ -2576,10 +2656,14 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
else if (!dep)
goto system_limit; /* Should never happen!!! */
- pp = erts_id2port(BIF_ARG_2, BIF_P, ERTS_PROC_LOCK_MAIN);
+ pp = erts_id2port_sflgs(BIF_ARG_2,
+ BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
erts_smp_de_rwlock(dep);
- if (!pp || (pp->status & ERTS_PORT_SFLG_EXITING))
+ if (!pp || (erts_atomic32_read_nob(&pp->state)
+ & ERTS_PORT_SFLG_EXITING))
goto badarg;
if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
@@ -2594,11 +2678,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
plp->next = NULL;
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
erts_smp_mtx_lock(&dep->qlock);
- if (dep->suspended.last)
- dep->suspended.last->next = plp;
- else
- dep->suspended.first = plp;
- dep->suspended.last = plp;
+ erts_proclist_store_last(&dep->suspended, plp);
erts_smp_mtx_unlock(&dep->qlock);
goto yield;
}
@@ -2608,7 +2688,16 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
if (pp->dist_entry || is_not_nil(dep->cid))
goto badarg;
- erts_port_status_bor_set(pp, ERTS_PORT_SFLG_DISTRIBUTION);
+ erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
+
+ /*
+ * Dist-ports do not use the "busy port message queue" functionality, but
+ * instead use "busy dist entry" functionality.
+ */
+ {
+ ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
+ erl_drv_busy_msgq_limits((ErlDrvPort) pp, &disable, NULL);
+ }
pp->dist_entry = dep;
@@ -2640,6 +2729,8 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
erts_smp_de_rwunlock(dep);
dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ inc_no_nodes();
+
send_nodes_mon_msgs(BIF_P,
am_nodeup,
BIF_ARG_1,
@@ -2653,7 +2744,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
}
if (pp)
- erts_smp_port_unlock(pp);
+ erts_port_release(pp);
return ret;
@@ -2697,7 +2788,7 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
if (is_internal_pid(local)) {
Process *lp;
ErtsProcLocks lp_locks;
- if (BIF_P->id == local) {
+ if (BIF_P->common.id == local) {
lp_locks = ERTS_PROC_LOCKS_ALL;
lp = BIF_P;
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -2725,11 +2816,17 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
#endif
erts_smp_proc_unlock(lp, lp_locks);
if (lp == BIF_P) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state);
/*
* We may have exited current process and may have to take action.
*/
- ERTS_BIF_CHK_EXITED(BIF_P);
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
+#ifdef ERTS_SMP
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
+#endif
+ ERTS_BIF_EXITED(BIF_P);
+ }
}
}
else if (is_external_pid(local)
@@ -2927,23 +3024,23 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
if (Bool == am_true) {
ASSERT(dep->cid != NIL);
lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE,
- p->id);
+ p->common.id);
++ERTS_LINK_REFC(lnk);
- lnk = erts_add_or_lookup_link(&(p->nlinks), LINK_NODE, Node);
+ lnk = erts_add_or_lookup_link(&ERTS_P_LINKS(p), LINK_NODE, Node);
++ERTS_LINK_REFC(lnk);
}
else {
- lnk = erts_lookup_link(dep->node_links, p->id);
+ lnk = erts_lookup_link(dep->node_links, p->common.id);
if (lnk != NULL) {
if ((--ERTS_LINK_REFC(lnk)) == 0) {
erts_destroy_link(erts_remove_link(&(dep->node_links),
- p->id));
+ p->common.id));
}
}
- lnk = erts_lookup_link(p->nlinks, Node);
+ lnk = erts_lookup_link(ERTS_P_LINKS(p), Node);
if (lnk != NULL) {
if ((--ERTS_LINK_REFC(lnk)) == 0) {
- erts_destroy_link(erts_remove_link(&(p->nlinks),
+ erts_destroy_link(erts_remove_link(&ERTS_P_LINKS(p),
Node));
}
}
@@ -3505,7 +3602,7 @@ erts_processes_monitoring_nodes(Process *c_p)
olist = erts_bld_cons(hpp, szp, am_nodedown_reason, olist);
res = erts_bld_cons(hpp, szp,
erts_bld_tuple(hpp, szp, 2,
- nmp->proc->id,
+ nmp->proc->common.id,
olist),
res);
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index 845151c895..2bc3d9c881 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -187,11 +187,12 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
if (prt) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ASSERT((erts_port_status_get(prt) & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT((erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLGS_DEAD) == 0);
ASSERT(prt->dist_entry);
dep = prt->dist_entry;
- id = prt->id;
+ id = prt->common.id;
}
else {
ASSERT(dist_entry);
@@ -203,13 +204,8 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
id = dep->cid;
}
- if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) {
- (void) erts_port_task_schedule(id,
- &dep->dist_cmd,
- ERTS_PORT_TASK_DIST_CMD,
- (ErlDrvEvent) -1,
- NULL);
- }
+ if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1))
+ erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD);
}
#endif
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 3eee53eba3..04d807b780 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -2128,6 +2128,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (want_tot_or_sys || want.processes || want.processes_used) {
+ int max_processes = erts_ptab_max(&erts_proc);
UWord tmp;
if (ERTS_MEM_NEED_ALL_ALCU)
@@ -2137,7 +2138,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
fi, ERTS_ALC_NO_FIXED_SIZES);
tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
}
- tmp += erts_max_processes*sizeof(Process*);
+ tmp += max_processes*sizeof(erts_smp_atomic_t);
tmp += erts_bif_timer_memory_size();
tmp += erts_tot_link_lh_size();
@@ -2268,6 +2269,8 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
Eterm res = THE_NON_VALUE;
int i, length;
Uint reserved_atom_space, atom_space;
+ int max_processes = erts_ptab_max(&erts_proc);
+ int max_ports = erts_ptab_max(&erts_port);
if (proc) {
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
@@ -2299,7 +2302,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "static";
values[i].ui[0] =
- erts_max_ports*sizeof(Port) /* Port table */
+ max_ports*sizeof(erts_smp_atomic_t) /* Port table */
+ erts_timer_wheel_memory_size(); /* Timer wheel */
i++;
@@ -2378,7 +2381,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "process_table";
- values[i].ui[0] = erts_max_processes*sizeof(Process*);
+ values[i].ui[0] = max_processes*sizeof(Process*);
i++;
values[i].arity = 2;
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index e475f9d8a2..ba5ec9c367 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -267,6 +267,8 @@ typedef void (*erts_alloc_verify_func_t)(Allctr_t *);
erts_alloc_verify_func_t
erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr);
+#define ERTS_ALC_DATA_ALIGN_SIZE(SZ) \
+ (((((SZ) - 1) / 8) + 1) * 8)
#define ERTS_ALC_CACHE_LINE_ALIGN_SIZE(SZ) \
(((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE)
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 0a4407f009..d4de0d076a 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -146,6 +146,7 @@ class SYSTEM system_data
type SBMBC SBMBC SYSTEM small_block_mbc
type PROC FIXED_SIZE PROCESSES proc
+type PORT DRIVER SYSTEM port
type ATOM LONG_LIVED ATOM atom_entry
type MODULE LONG_LIVED CODE module_entry
type REG_PROC STANDARD PROCESSES reg_proc
@@ -189,7 +190,10 @@ type PORT_TABLE LONG_LIVED SYSTEM port_tab
type TIMER_WHEEL LONG_LIVED SYSTEM timer_wheel
type DRV DRIVER SYSTEM drv_internal
type DRV_BINARY BINARY BINARIES drv_binary
-type DRIVER STANDARD SYSTEM driver
+type DRIVER DRIVER SYSTEM driver
+type DRV_CMD_DATA DRIVER SYSTEM driver_command_data
+type DRV_CTRL_DATA DRIVER SYSTEM driver_control_data
+type DRV_CALL_DATA DRIVER SYSTEM driver_call_data
type NIF DRIVER SYSTEM nif_internal
type BINARY BINARY BINARIES binary
type NBIF_TABLE SYSTEM SYSTEM nbif_tab
@@ -197,14 +201,12 @@ type ARG_REG STANDARD PROCESSES arg_reg
type PROC_DICT STANDARD PROCESSES proc_dict
type CALLS_BUF STANDARD PROCESSES calls_buf
type BPD STANDARD SYSTEM bpd
-type PORT_NAME STANDARD SYSTEM port_name
type LINEBUF STANDARD SYSTEM line_buf
type IOQ STANDARD SYSTEM io_queue
type BITS_BUF STANDARD SYSTEM bits_buf
type TMP_DIST_BUF TEMPORARY SYSTEM tmp_dist_buf
type ASYNC_DATA LONG_LIVED SYSTEM internal_async_data
type ESTACK TEMPORARY SYSTEM estack
-type PORT_CALL_BUF TEMPORARY SYSTEM port_call_buf
type DB_TABLE ETS ETS db_tab
type DB_FIXATION SHORT_LIVED ETS db_fixation
type DB_FIX_DEL SHORT_LIVED ETS fixed_del
@@ -234,14 +236,14 @@ type DDLL_HANDLE STANDARD SYSTEM ddll_handle
type DDLL_ERRCODES LONG_LIVED SYSTEM ddll_errcodes
type DDLL_TMP_BUF TEMPORARY SYSTEM ddll_tmp_buf
type PORT_TASK SHORT_LIVED SYSTEM port_task
-type PORT_TASKQ SHORT_LIVED SYSTEM port_task_queue
+type PT_HNDL_LIST SHORT_LIVED SYSTEM port_task_handle_list
type MISC_OP_LIST SHORT_LIVED SYSTEM misc_op_list
type PORT_NAMES SHORT_LIVED SYSTEM port_names
-type PORT_DATA_LOCK STANDARD SYSTEM port_data_lock
+type PORT_DATA_LOCK DRIVER SYSTEM port_data_lock
type NODES_MON STANDARD PROCESSES nodes_monitor
-type PROCS_TPROC_EL SHORT_LIVED PROCESSES processes_term_proc_el
-type PROCS_CNKINF SHORT_LIVED PROCESSES processes_chunk_info
-type PROCS_PIDS SHORT_LIVED PROCESSES processes_pids
+type PTAB_LIST_DEL SHORT_LIVED PROCESSES ptab_list_deleted_el
+type PTAB_LIST_CNKI SHORT_LIVED PROCESSES ptab_list_chunk_info
+type PTAB_LIST_PIDS SHORT_LIVED PROCESSES ptab_list_pids
type RE_TMP_BUF TEMPORARY SYSTEM re_tmp_buf
type RE_SUBJECT SHORT_LIVED SYSTEM re_subject
type RE_HEAP STANDARD SYSTEM re_heap
@@ -266,6 +268,8 @@ type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q
type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
+type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
+type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index c5f432bea1..f2ca193ace 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -382,10 +382,15 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
static ERTS_INLINE void call_async_ready(ErtsAsync *a)
{
+#if ERTS_USE_ASYNC_READY_Q
Port *p = erts_id2port_sflgs(a->port,
NULL,
0,
ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+#else
+ Port *p = erts_thr_id2port_sflgs(a->port,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+#endif
if (!p) {
if (a->async_free)
a->async_free(a->async_data);
@@ -395,7 +400,11 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
if (a->async_free)
a->async_free(a->async_data);
}
+#if ERTS_USE_ASYNC_READY_Q
erts_port_release(p);
+#else
+ erts_thr_port_release(p);
+#endif
}
if (a->pdl)
driver_pdl_dec_refc(a->pdl);
@@ -603,7 +612,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
sched_id = 1;
#endif
- prt = erts_drvport2port(ix);
+ prt = erts_drvport2port(ix, NULL);
if (!prt)
return -1;
@@ -615,7 +624,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
a->sched_id = sched_id;
#endif
a->hndl = (DE_Handle*)prt->drv_ptr->handle;
- a->port = prt->id;
+ a->port = prt->common.id;
a->pdl = NULL;
a->async_data = async_data;
a->async_invoke = async_invoke;
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 7f7c975e78..7cbea55eac 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -104,16 +104,49 @@ static void dereference_all_processes(DE_Handle *dh);
static void restore_process_references(DE_Handle *dh);
static void ddll_no_more_references(void *vdh);
-#define lock_drv_list() erts_smp_mtx_lock(&erts_driver_list_lock)
-#define unlock_drv_list() erts_smp_mtx_unlock(&erts_driver_list_lock)
+#define lock_drv_list() erts_smp_rwmtx_rwlock(&erts_driver_list_lock)
+#define unlock_drv_list() erts_smp_rwmtx_rwunlock(&erts_driver_list_lock)
#define assert_drv_list_locked() \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&erts_driver_list_lock))
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
+ || erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
+#define assert_drv_list_rwlocked() \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock))
+#define assert_drv_list_rlocked() \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define assert_drv_list_not_locked() \
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_mtx_is_locked(&erts_driver_list_lock))
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
+ && !erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING))
+static void
+kill_ports_driver_unloaded(DE_Handle *dh)
+{
+ int ix, max = erts_ptab_max(&erts_port);
+
+ for (ix = 0; ix < max; ix++) {
+ erts_aint32_t state;
+ Port* prt = erts_pix2port(ix);
+ if (!prt)
+ continue;
+
+ ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & FREE_PORT_FLAGS)
+ continue;
+
+ erts_smp_port_lock(prt);
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh)
+ driver_failure_atom((ErlDrvPort) prt, "driver_unloaded");
+
+ erts_port_release(prt);
+ }
+}
+
/*
* try_load(Path, Name, OptionList) -> {ok,Status} |
* {ok, PendingStatus, Ref} |
@@ -149,7 +182,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
Eterm name_term = BIF_ARG_2;
Eterm options = BIF_ARG_3;
char *path = NULL;
- Uint path_len;
+ ErlDrvSizeT path_len;
char *name = NULL;
DE_Handle *dh;
erts_driver_t *drv;
@@ -228,7 +261,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
goto error;
}
path = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, path_len + 1 /* might need path separator */ + sys_strlen(name) + 1);
- if (io_list_to_buf(path_term, path, path_len) != 0) {
+ if (erts_iolist_to_buf(path_term, path, path_len) != 0) {
goto error;
}
while (path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/')) {
@@ -356,40 +389,16 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
ok_term = mkatom("loaded");
}
}
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
if (kill_ports) {
- int j;
- /* Avoid closing the driver by referencing it */
+ /* Avoid closing the driver by referencing it */
erts_ddll_reference_driver(dh);
ASSERT(dh->status == ERL_DE_RELOAD);
dh->status = ERL_DE_FORCE_RELOAD;
#if DDLL_SMP
unlock_drv_list();
#endif
- for (j = 0; j < erts_max_ports; j++) {
- Port* prt = &erts_port[j];
- erts_smp_port_state_lock(prt);
- if (!(prt->status & FREE_PORT_FLAGS) &&
- prt->drv_ptr->handle == dh) {
- erts_smp_atomic_inc_nob(&prt->refc);
-#if DDLL_SMP
- /* Extremely rare spinlock */
- while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
- erts_smp_port_state_unlock(prt);
- erts_smp_port_state_lock(prt);
- }
- erts_smp_port_state_unlock(prt);
- erts_smp_mtx_lock(prt->lock);
- if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
- driver_failure_atom(j, "driver_unloaded");
- }
-#else
- driver_failure_atom(j, "driver_unloaded");
-#endif
- erts_port_release(prt);
- }
- else erts_smp_port_state_unlock(prt);
- }
+ kill_ports_driver_unloaded(dh);
/* Dereference, eventually causing driver destruction */
#if DDLL_SMP
lock_drv_list();
@@ -579,45 +588,21 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
dh->reload_full_path = dh->reload_driver_name = NULL;
dh->reload_flags = 0;
}
- if (dh->port_count > 0) {
+ if (erts_smp_atomic32_read_nob(&dh->port_count) > 0) {
++kill_ports;
}
dh->status = ERL_DE_UNLOAD;
ok_term = am_pending_driver;
done:
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
if (kill_ports > 1) {
- int j;
/* Avoid closing the driver by referencing it */
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
#if DDLL_SMP
unlock_drv_list();
#endif
- for (j = 0; j < erts_max_ports; j++) {
- Port* prt = &erts_port[j];
- erts_smp_port_state_lock(prt);
- if (!(prt->status & FREE_PORT_FLAGS)
- && prt->drv_ptr->handle == dh) {
- erts_smp_atomic_inc_nob(&prt->refc);
-#if DDLL_SMP
- /* Extremely rare spinlock */
- while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
- erts_smp_port_state_unlock(prt);
- erts_smp_port_state_lock(prt);
- }
- erts_smp_port_state_unlock(prt);
- erts_smp_mtx_lock(prt->lock);
- if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
- driver_failure_atom(j, "driver_unloaded");
- }
-#else
- driver_failure_atom(j, "driver_unloaded");
-#endif
- erts_port_release(prt);
- }
- else erts_smp_port_state_unlock(prt);
- }
+ kill_ports_driver_unloaded(dh);
#if DDLL_SMP
lock_drv_list();
#endif
@@ -787,7 +772,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
} else if (drv->handle->status == ERL_DE_PERMANENT) {
res = am_permanent;
} else {
- res = make_small(drv->handle->port_count);
+ res = make_small(erts_smp_atomic32_read_nob(&drv->handle->port_count));
}
goto done;
case am_linked_in_driver:
@@ -1045,38 +1030,16 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
}
dh->status = ERL_DE_UNLOAD;
}
- if (!left && drv->handle->port_count > 0) {
+ if (!left
+ && erts_smp_atomic32_read_nob(&drv->handle->port_count) > 0) {
if (kill_ports) {
- int j;
DE_Handle *dh = drv->handle;
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
#if DDLL_SMP
unlock_drv_list();
#endif
- for (j = 0; j < erts_max_ports; j++) {
- Port* prt = &erts_port[j];
- erts_smp_port_state_lock(prt);
- if (!(prt->status & FREE_PORT_FLAGS) &&
- prt->drv_ptr->handle == dh) {
- erts_smp_atomic_inc_nob(&prt->refc);
-#if DDLL_SMP
- while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
- erts_smp_port_state_unlock(prt);
- erts_smp_port_state_lock(prt);
- }
- erts_smp_port_state_unlock(prt);
- erts_smp_mtx_lock(prt->lock);
- if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
- driver_failure_atom(j, "driver_unloaded");
- }
-#else
- driver_failure_atom(j, "driver_unloaded");
-#endif
- erts_port_release(prt);
- }
- else erts_smp_port_state_unlock(prt);
- }
+ kill_ports_driver_unloaded(dh);
#if DDLL_SMP
lock_drv_list(); /* Needed for future list operations */
#endif
@@ -1098,7 +1061,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
void erts_ddll_lock_driver(DE_Handle *dh, char *name)
{
DE_ProcEntry *p,*q;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
notify_all(dh, name,
ERL_DE_PROC_AWAIT_LOAD, am_UP, am_permanent);
notify_all(dh, name,
@@ -1121,19 +1084,22 @@ void erts_ddll_lock_driver(DE_Handle *dh, char *name)
void erts_ddll_increment_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
- dh->port_count++;
+ erts_smp_atomic32_inc_nob(&dh->port_count);
}
void erts_ddll_decrement_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
- ASSERT(dh->port_count > 0);
- dh->port_count--;
+#if DEBUG
+ ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0);
+#else
+ erts_smp_atomic32_dec_nob(&dh->port_count);
+#endif
}
static void first_ddll_reference(DE_Handle *dh)
{
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
erts_refc_init(&(dh->refc),1);
}
@@ -1161,7 +1127,7 @@ void erts_ddll_dereference_driver(DE_Handle *dh)
static void dereference_all_processes(DE_Handle *dh)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
for(p = dh->procs;p != NULL; p = p->next) {
if (p->awaiting_status == ERL_DE_PROC_LOADED) {
ASSERT(!(p->flags & ERL_DE_FL_DEREFERENCED));
@@ -1174,7 +1140,7 @@ static void dereference_all_processes(DE_Handle *dh)
static void restore_process_references(DE_Handle *dh)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
ASSERT(erts_refc_read(&(dh->refc),0) == 0);
for(p = dh->procs;p != NULL; p = p->next) {
if (p->awaiting_status == ERL_DE_PROC_LOADED) {
@@ -1402,7 +1368,7 @@ static int is_last_user(DE_Handle *dh, Process *proc) {
DE_ProcEntry *p = dh->procs;
int found = 0;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->proc == proc && p->awaiting_status == ERL_DE_PROC_LOADED) {
@@ -1423,7 +1389,7 @@ static DE_ProcEntry *find_proc_entry(DE_Handle *dh, Process *proc, Uint status)
{
DE_ProcEntry *p = dh->procs;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->proc == proc && p->awaiting_status == status) {
@@ -1450,7 +1416,7 @@ static int num_procs(DE_Handle *dh, Uint status) {
DE_ProcEntry *p = dh->procs;
int i = 0;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->awaiting_status == status) {
@@ -1465,7 +1431,7 @@ static int num_entries(DE_Handle *dh, Process *proc, Uint status) {
DE_ProcEntry *p = dh->procs;
int i = 0;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->awaiting_status == status && p->proc == proc) {
++i;
@@ -1478,7 +1444,7 @@ static int num_entries(DE_Handle *dh, Process *proc, Uint status) {
static void add_proc_loaded(DE_Handle *dh, Process *proc)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
p->proc = proc;
p->flags = 0;
@@ -1490,7 +1456,7 @@ static void add_proc_loaded(DE_Handle *dh, Process *proc)
static void add_proc_loaded_deref(DE_Handle *dh, Process *proc)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
p->proc = proc;
p->awaiting_status = ERL_DE_PROC_LOADED;
@@ -1510,7 +1476,7 @@ static void add_proc_waiting(DE_Handle *dh, Process *proc,
Uint status, Eterm ref)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
p->proc = proc;
p->flags = 0;
@@ -1524,7 +1490,7 @@ static Eterm add_monitor(Process *p, DE_Handle *dh, Uint status)
{
Eterm r;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
r = erts_make_ref(p);
add_proc_waiting(dh, p, status, r);
return r;
@@ -1535,7 +1501,7 @@ static void set_driver_reloading(DE_Handle *dh, Process *proc, char *path, char
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
p->proc = proc;
p->awaiting_status = ERL_DE_OK;
@@ -1556,7 +1522,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
int res;
ErlDrvEntry *dp;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
if ((res = erts_sys_ddll_open(path, &(dh->handle))) != ERL_DE_NO_ERROR) {
return res;
@@ -1594,7 +1560,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
goto error;
}
erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
- dh->port_count = 0;
+ erts_smp_atomic32_init_nob(&dh->port_count, 0);
dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
sys_strcpy(dh->full_path, path);
dh->flags = 0;
@@ -1620,7 +1586,7 @@ static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
{
erts_driver_t *q, *p = driver_list;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->handle == dh) {
@@ -1660,11 +1626,11 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
int res;
DE_Handle *dh = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sizeof(DE_Handle));
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
dh->handle = NULL;
dh->procs = NULL;
- dh->port_count = 0;
+ erts_smp_atomic32_init_nob(&dh->port_count, 0);
erts_refc_init(&(dh->refc), (erts_aint_t) 0);
dh->status = -1;
dh->reload_full_path = NULL;
@@ -1698,7 +1664,7 @@ static int reload_driver_entry(DE_Handle *dh)
int loadres;
Uint flags = dh->reload_flags;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
dh->reload_full_path = NULL;
dh->reload_driver_name = NULL;
@@ -1736,7 +1702,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
ErtsProcLocks rp_locks = 0;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
if (errcode != 0) {
int need = load_error_need(errcode);
Eterm e;
@@ -1769,7 +1735,7 @@ static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Ete
{
DE_ProcEntry **p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = &(dh->procs);
while (*p != NULL) {
@@ -1875,7 +1841,7 @@ static Eterm mkatom(char *str)
static char *pick_list_or_atom(Eterm name_term)
{
char *name = NULL;
- Uint name_len;
+ ErlDrvSizeT name_len;
if (is_atom(name_term)) {
Atom *ap = atom_tab(atom_val(name_term));
if (ap->len == 0) {
@@ -1891,7 +1857,7 @@ static char *pick_list_or_atom(Eterm name_term)
goto error;
}
name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, name_len + 1);
- if (io_list_to_buf(name_term, name, name_len) != 0) {
+ if (erts_iolist_to_buf(name_term, name, name_len) != 0) {
goto error;
}
name[name_len] = '\0';
@@ -1912,10 +1878,10 @@ static int build_proc_info(DE_Handle *dh, ProcEntryInfo **out_pei, Uint filter)
int i;
DE_ProcEntry *pe;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
for (pe = dh->procs; pe != NULL; pe = pe->next) {
- Eterm id = pe->proc->id;
+ Eterm id = pe->proc->common.id;
Uint stat = pe->awaiting_status;
if (stat == ERL_DE_PROC_AWAIT_UNLOAD_ONLY) {
stat = ERL_DE_PROC_AWAIT_UNLOAD;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index a3811ccdb0..fabddffc68 100755
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -40,6 +40,8 @@
#include "erl_cpu_topology.h"
#include "erl_async.h"
#include "erl_thr_progress.h"
+#define ERTS_PTAB_WANT_DEBUG_FUNCS__
+#include "erl_ptab.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -128,8 +130,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
static Eterm os_type_tuple;
static Eterm os_version_tuple;
-static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item);
-
static Eterm
current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
@@ -873,8 +873,7 @@ BIF_RETTYPE process_info_1(BIF_ALIST_1)
&& external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
BIF_RET(am_undefined);
- if (is_not_internal_pid(BIF_ARG_1)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -909,8 +908,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2)
&& external_pid_dist_entry(pid) == erts_this_dist_entry)
BIF_RET(am_undefined);
- if (is_not_internal_pid(pid)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(pid)) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -1002,9 +1000,9 @@ process_info_aux(Process *BIF_P,
switch (item) {
case am_registered_name:
- if (rp->reg != NULL) {
+ if (rp->common.u.alive.reg) {
hp = HAlloc(BIF_P, 3);
- res = rp->reg->name;
+ res = rp->common.u.alive.reg->name;
} else {
if (always_wrap) {
hp = HAlloc(BIF_P, 3);
@@ -1050,7 +1048,7 @@ process_info_aux(Process *BIF_P,
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
n = rp->msg.len;
- if (n == 0 || rp->trace_flags & F_SENSITIVE) {
+ if (n == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
hp = HAlloc(BIF_P, 3);
} else {
int remove_bad_messages = 0;
@@ -1209,7 +1207,7 @@ process_info_aux(Process *BIF_P,
INIT_MONITOR_INFOS(mic);
- erts_doforall_links(rp->nlinks,&collect_one_link,&mic);
+ erts_doforall_links(ERTS_P_LINKS(rp),&collect_one_link,&mic);
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
@@ -1227,7 +1225,7 @@ process_info_aux(Process *BIF_P,
int i;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(rp->monitors,&collect_one_origin_monitor,&mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_origin_monitor,&mic);
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
@@ -1264,7 +1262,7 @@ process_info_aux(Process *BIF_P,
Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(rp->monitors,&collect_one_target_monitor,&mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_target_monitor,&mic);
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
@@ -1330,7 +1328,7 @@ process_info_aux(Process *BIF_P,
}
case am_dictionary:
- if (rp->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
res = NIL;
} else {
res = erts_dictionary_copy(BIF_P, rp->dictionary);
@@ -1426,8 +1424,8 @@ process_info_aux(Process *BIF_P,
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- erts_doforall_links(rp->nlinks, &one_link_size, &size);
- erts_doforall_monitors(rp->monitors, &one_mon_size, &size);
+ erts_doforall_links(ERTS_P_LINKS(rp), &one_link_size, &size);
+ erts_doforall_monitors(ERTS_P_MONITORS(rp), &one_mon_size, &size);
size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm);
if (rp->old_hend && rp->old_heap)
size += (rp->old_hend - rp->old_heap) * sizeof(Eterm);
@@ -1500,7 +1498,7 @@ process_info_aux(Process *BIF_P,
case am_trace:
hp = HAlloc(BIF_P, 3);
- res = make_small(rp->trace_flags & TRACEE_FLAGS);
+ res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
break;
case am_binary: {
@@ -1605,7 +1603,7 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
}
}
- if (BIF_P->id == rp->id) {
+ if (BIF_P == rp) {
FunctionInfo fi2;
/*
@@ -1837,17 +1835,17 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
# define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF_XML
# endif
#endif
- Uint buf_size = 8*1024; /* Try with 8KB first */
+ ErlDrvSizeT buf_size = 8*1024; /* Try with 8KB first */
char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
- int r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
- if (r < 0) {
+ ErlDrvSizeT r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
+ if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
if (erts_iolist_size(*tp, &buf_size)) {
goto badarg;
}
buf_size++;
buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
- r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
+ r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
ASSERT(r == buf_size - 1);
}
buf[buf_size - 1 - r] = '\0';
@@ -2159,9 +2157,13 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
BIF_RET(res);
} else if (BIF_ARG_1 == am_process_count) {
- BIF_RET(make_small(erts_process_count()));
+ BIF_RET(make_small(erts_ptab_count(&erts_proc)));
} else if (BIF_ARG_1 == am_process_limit) {
- BIF_RET(make_small(erts_max_processes));
+ BIF_RET(make_small(erts_ptab_max(&erts_proc)));
+ } else if (BIF_ARG_1 == am_port_count) {
+ BIF_RET(make_small(erts_ptab_count(&erts_port)));
+ } else if (BIF_ARG_1 == am_port_limit) {
+ BIF_RET(make_small(erts_ptab_max(&erts_port)));
} else if (BIF_ARG_1 == am_info
|| BIF_ARG_1 == am_procs
|| BIF_ARG_1 == am_loaded
@@ -2534,6 +2536,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("port_parallelism", BIF_ARG_1)) {
+ res = erts_port_parallelism ? am_true : am_false;
+ BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
Eterm *hp = NULL;
Uint sz = 0;
@@ -2701,66 +2706,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE
-port_info_1(BIF_ALIST_1)
-{
- Process* p = BIF_P;
- Eterm pid = BIF_ARG_1;
- static Eterm keys[] = {
- am_name,
- am_links,
- am_id,
- am_connected,
- am_input,
- am_output,
- am_os_pid
- };
- Eterm items[ASIZE(keys)];
- Eterm result = NIL;
- Eterm reg_name;
- Eterm* hp;
- Uint need;
- int i;
-
- /*
- * Collect all information about the port.
- */
-
- for (i = 0; i < ASIZE(keys); i++) {
- Eterm item;
-
- item = port_info(p, pid, keys[i]);
- if (is_non_value(item)) {
- return THE_NON_VALUE;
- }
- if (item == am_undefined) {
- return am_undefined;
- }
- items[i] = item;
- }
- reg_name = port_info(p, pid, am_registered_name);
-
- /*
- * Build the resulting list.
- */
-
- need = 2*ASIZE(keys);
- if (is_tuple(reg_name)) {
- need += 2;
- }
- hp = HAlloc(p, need);
- for (i = ASIZE(keys) - 1; i >= 0; i--) {
- result = CONS(hp, items[i], result);
- hp += 2;
- }
- if (is_tuple(reg_name)) {
- result = CONS(hp, reg_name, result);
- }
-
- return result;
-}
-
-
/**********************************************************************/
/* Return information on ports */
/* Info:
@@ -2773,38 +2718,20 @@ port_info_1(BIF_ALIST_1)
** os_pid The child's process ID
*/
-BIF_RETTYPE port_info_2(BIF_ALIST_2)
-{
- return port_info(BIF_P, BIF_ARG_1, BIF_ARG_2);
-}
-
-static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
+Eterm
+erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm item)
{
- BIF_RETTYPE ret;
- Port *prt;
- Eterm res;
- Eterm* hp;
- int count;
-
- if (is_internal_port(portid))
- prt = erts_id2port(portid, p, ERTS_PROC_LOCK_MAIN);
- else if (is_atom(portid))
- erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
- portid, NULL, 0, 0, &prt);
- else if (is_external_port(portid)
- && external_port_dist_entry(portid) == erts_this_dist_entry)
- BIF_RET(am_undefined);
- else {
- BIF_ERROR(p, BADARG);
- }
+ Eterm res = THE_NON_VALUE;
- if (!prt) {
- BIF_RET(am_undefined);
- }
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if (item == am_id) {
- hp = HAlloc(p, 3);
- res = make_small(internal_port_number(portid));
+ if (hpp)
+ res = make_small(internal_port_index(prt->common.id));
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_links) {
MonitorInfoCollection mic;
@@ -2813,17 +2740,26 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
INIT_MONITOR_INFOS(mic);
- erts_doforall_links(prt->nlinks, &collect_one_link, &mic);
+ erts_doforall_links(ERTS_P_LINKS(prt), &collect_one_link, &mic);
- hp = HAlloc(p, 3 + mic.sz);
- res = NIL;
- for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
- res = CONS(hp, item, res);
- hp += 2;
+ if (szp)
+ *szp += mic.sz;
+
+ if (hpp) {
+ res = NIL;
+ for (i = 0; i < mic.mi_i; i++) {
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ res = CONS(*hpp, item, res);
+ *hpp += 2;
+ }
}
+
DESTROY_MONITOR_INFOS(mic);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_monitors) {
MonitorInfoCollection mic;
@@ -2832,79 +2768,96 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(prt->monitors, &collect_one_origin_monitor, &mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(prt), &collect_one_origin_monitor, &mic);
- hp = HAlloc(p, 3 + mic.sz);
- res = NIL;
- for (i = 0; i < mic.mi_i; i++) {
- Eterm t;
- item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
- t = TUPLE2(hp, am_process, item);
- hp += 3;
- res = CONS(hp, t, res);
- hp += 2;
+ if (szp)
+ *szp += mic.sz;
+
+ if (hpp) {
+ res = NIL;
+ for (i = 0; i < mic.mi_i; i++) {
+ Eterm t;
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ t = TUPLE2(*hpp, am_process, item);
+ *hpp += 3;
+ res = CONS(*hpp, t, res);
+ *hpp += 2;
+ }
}
+
DESTROY_MONITOR_INFOS(mic);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_name) {
- count = sys_strlen(prt->name);
+ int count = sys_strlen(prt->name);
+
+ if (hpp)
+ res = buf_to_intlist(hpp, prt->name, count, NIL);
- hp = HAlloc(p, 3 + 2*count);
- res = buf_to_intlist(&hp, prt->name, count, NIL);
+ if (szp) {
+ *szp += 2*count;
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_connected) {
- hp = HAlloc(p, 3);
- res = prt->connected; /* internal pid */
+ if (hpp)
+ res = ERTS_PORT_GET_CONNECTED(prt); /* internal pid */
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_input) {
- Uint hsz = 3;
- Uint n = prt->bytes_in;
- (void) erts_bld_uint(NULL, &hsz, n);
- hp = HAlloc(p, hsz);
- res = erts_bld_uint(&hp, NULL, n);
+ res = erts_bld_uint(hpp, szp, prt->bytes_in);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_output) {
- Uint hsz = 3;
- Uint n = prt->bytes_out;
- (void) erts_bld_uint(NULL, &hsz, n);
- hp = HAlloc(p, hsz);
- res = erts_bld_uint(&hp, NULL, n);
+ res = erts_bld_uint(hpp, szp, prt->bytes_out);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_os_pid) {
- if (prt->os_pid >= 0) {
- Uint hsz = 3;
- UWord n = prt->os_pid;
- (void) erts_bld_uword(NULL, &hsz, n);
- hp = HAlloc(p, hsz);
- res = erts_bld_uword(&hp, NULL, n);
- } else {
- hp = HAlloc(p, 3);
- res = am_undefined;
- }
+ res = (prt->os_pid < 0
+ ? am_undefined
+ : erts_bld_uword(hpp, szp, (UWord) prt->os_pid));
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_registered_name) {
- RegProc *reg;
- reg = prt->reg;
- if (reg == NULL) {
- ERTS_BIF_PREP_RET(ret, NIL);
- goto done;
- } else {
- hp = HAlloc(p, 3);
+ RegProc *reg = prt->common.u.alive.reg;
+ if (reg) {
res = reg->name;
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
+ }
+ else {
+ if (szp)
+ return am_undefined;
+ return NIL;
}
}
else if (item == am_memory) {
/* All memory consumed in bytes (the Port struct should not be
included though).
*/
- Uint hsz = 3;
Uint size = 0;
ErlHeapFragment* bp;
- hp = HAlloc(p, 3);
-
- erts_doforall_links(prt->nlinks, &one_link_size, &size);
+ erts_doforall_links(ERTS_P_LINKS(prt), &one_link_size, &size);
for (bp = prt->bp; bp; bp = bp->next)
size += sizeof(ErlHeapFragment) + (bp->alloc_size - 1)*sizeof(Eterm);
@@ -2918,51 +2871,72 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
/* All memory allocated by the driver should be included, but it is
hard to retrieve... */
- (void) erts_bld_uint(NULL, &hsz, size);
- hp = HAlloc(p, hsz);
- res = erts_bld_uint(&hp, NULL, size);
+ res = erts_bld_uint(hpp, szp, size);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_queue_size) {
Uint ioq_size = erts_port_ioq_size(prt);
- Uint hsz = 3;
- (void) erts_bld_uint(NULL, &hsz, ioq_size);
- hp = HAlloc(p, hsz);
- res = erts_bld_uint(&hp, NULL, ioq_size);
+ res = erts_bld_uint(hpp, szp, ioq_size);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (ERTS_IS_ATOM_STR("locking", item)) {
- hp = HAlloc(p, 3);
+ if (hpp) {
#ifndef ERTS_SMP
- res = am_false;
+ res = am_false;
#else
- if (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
- DECL_AM(port_level);
- ASSERT(prt->drv_ptr->flags
- & ERL_DRV_FLAG_USE_PORT_LOCKING);
- res = AM_port_level;
+ if (erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ DECL_AM(port_level);
+ ASSERT(prt->drv_ptr->flags
+ & ERL_DRV_FLAG_USE_PORT_LOCKING);
+ res = AM_port_level;
+ }
+ else {
+ DECL_AM(driver_level);
+ ASSERT(!(prt->drv_ptr->flags
+ & ERL_DRV_FLAG_USE_PORT_LOCKING));
+ res = AM_driver_level;
+ }
+#endif
}
- else {
- DECL_AM(driver_level);
- ASSERT(!(prt->drv_ptr->flags
- & ERL_DRV_FLAG_USE_PORT_LOCKING));
- res = AM_driver_level;
+ if (szp) {
+ res = am_true;
+ goto done;
}
-#endif
+ }
+ else if (item == am_parallelism) {
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
+ res = ((ERTS_PTS_FLG_PARALLELISM &
+ erts_smp_atomic32_read_nob(&prt->sched.flags))
+ ? am_true
+ : am_false);
}
else {
- ERTS_BIF_PREP_ERROR(ret, p, BADARG);
- goto done;
+ if (szp)
+ return am_false;
+ return THE_NON_VALUE;
}
- ERTS_BIF_PREP_RET(ret, TUPLE2(hp, item, res));
-
- done:
-
- erts_smp_port_unlock(prt);
+done:
+ if (szp)
+ *szp += 3;
+ if (hpp) {
+ res = TUPLE2(*hpp, item, res);
+ *hpp += 3;
+ }
- return ret;
+ return res;
}
-
BIF_RETTYPE
fun_info_2(BIF_ALIST_2)
{
@@ -3094,12 +3068,9 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
if(is_internal_pid(BIF_ARG_1)) {
Process *rp;
- if (BIF_ARG_1 == BIF_P->id)
+ if (BIF_ARG_1 == BIF_P->common.id)
BIF_RET(am_true);
- if(internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
-
rp = erts_proc_lookup(BIF_ARG_1);
if (!rp) {
BIF_RET(am_false);
@@ -3317,10 +3288,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
/* Used by node_container_SUITE (emulator) */
Eterm res;
if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
- res = erts_test_next_pid(0, 0);
- else {
- res = erts_test_next_port(0, 0);
- }
+ res = erts_ptab_test_next_id(&erts_proc, 0, 0);
+ else
+ res = erts_ptab_test_next_id(&erts_port, 0, 0);
if (res < 0)
BIF_RET(am_false);
BIF_RET(erts_make_integer(res, BIF_P));
@@ -3356,11 +3326,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
- BIF_RET(erts_debug_processes(BIF_P));
+ BIF_RET(erts_debug_ptab_list(BIF_P, &erts_proc));
}
else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
- BIF_RET(erts_debug_processes_bif_info(BIF_P));
+ BIF_RET(erts_debug_ptab_list_bif_info(BIF_P, &erts_proc));
}
else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
/* Used by distribution_SUITE (emulator) */
@@ -3421,17 +3391,20 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
- res = make_link_list(BIF_P, p->nlinks, NIL);
+ res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
BIF_RET(res);
}
else if(is_internal_port(tp[2])) {
Eterm res;
- Port *p = erts_id2port(tp[2], BIF_P, ERTS_PROC_LOCK_MAIN);
+ Port *p = erts_id2port_sflgs(tp[2],
+ BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
if(!p)
BIF_RET(am_undefined);
- res = make_link_list(BIF_P, p->nlinks, NIL);
- erts_smp_port_unlock(p);
+ res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
+ erts_port_release(p);
BIF_RET(res);
}
else if(is_node_name_atom(tp[2])) {
@@ -3463,7 +3436,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
- res = make_monitor_list(BIF_P, p->monitors);
+ res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p));
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
BIF_RET(res);
} else if(is_node_name_atom(tp[2])) {
@@ -3606,7 +3579,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on);
if (on) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Process %T ", BIF_P->id);
+ erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
if (erts_is_alive)
erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
erts_dsprintf(dsbufp,
@@ -3674,10 +3647,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Eterm res;
if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
- res = erts_test_next_pid(1, next);
- else {
- res = erts_test_next_port(1, next);
- }
+ res = erts_ptab_test_next_id(&erts_proc, 1, next);
+ else
+ res = erts_ptab_test_next_id(&erts_port, 1, next);
if (res < 0)
BIF_RET(am_false);
BIF_RET(erts_make_integer(res, BIF_P));
@@ -3948,8 +3920,8 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
} else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
/* use registered names as id's for process locks if available */
proc = erts_proc_lookup(lock->id);
- if (proc && proc->reg) {
- id = proc->reg->name;
+ if (proc && proc->common.u.alive.reg) {
+ id = proc->common.u.alive.reg->name;
} else {
/* otherwise use process id */
id = lock->id;
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index f9009166c0..81146e38d7 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -42,28 +42,26 @@
#include "erl_bits.h"
#include "dtrace-wrapper.h"
-static int open_port(Process* p, Eterm name, Eterm settings, int *err_nump);
+static Port *open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump);
static byte* convert_environment(Process* p, Eterm env);
static char **convert_args(Eterm);
static void free_args(char **);
char *erts_default_arg0 = "default";
-static BIF_RETTYPE
-port_call(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
-
BIF_RETTYPE open_port_2(BIF_ALIST_2)
{
- int port_num;
- Eterm port_val;
+ Port *port;
+ Eterm port_id;
char *str;
- int err_num;
+ int err_type, err_num;
- if ((port_num = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_num)) < 0) {
- if (port_num == -3) {
+ port = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_type, &err_num);
+ if (!port) {
+ if (err_type == -3) {
ASSERT(err_num == BADARG || err_num == SYSTEM_LIMIT);
BIF_ERROR(BIF_P, err_num);
- } else if (port_num == -2) {
+ } else if (err_type == -2) {
str = erl_errno_id(err_num);
} else {
str = "einval";
@@ -74,546 +72,408 @@ BIF_RETTYPE open_port_2(BIF_ALIST_2)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
- port_val = erts_port[port_num].id;
- erts_add_link(&(erts_port[port_num].nlinks), LINK_PID, BIF_P->id);
- erts_add_link(&(BIF_P->nlinks), LINK_PID, port_val);
+ port_id = port->common.id;
+ erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id);
+ erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, port_id);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- erts_port_release(&erts_port[port_num]);
+ erts_port_release(port);
- BIF_RET(port_val);
+ BIF_RET(port_id);
}
-/****************************************************************************
-
- PORT BIFS:
-
- port_command/2 -- replace Port ! {..., {command, Data}}
- port_command(Port, Data) -> true
- when port(Port), io-list(Data)
-
- port_control/3 -- new port_control(Port, Ctl, Data) -> Reply
- port_control(Port, Ctl, Data) -> Reply
- where integer(Ctl), io-list(Data), io-list(Reply)
-
- port_close/1 -- replace Port ! {..., close}
- port_close(Port) -> true
- when port(Port)
-
- port_connect/2 -- replace Port ! {..., {connect, Pid}}
- port_connect(Port, Pid)
- when port(Port), pid(Pid)
-
- ***************************************************************************/
-
-static Port*
-id_or_name2port(Process *c_p, Eterm id)
+static ERTS_INLINE Port *
+lookup_port(Process *c_p, Eterm id_or_name)
{
- Port *port;
- if (is_not_atom(id))
- port = erts_id2port(id, c_p, ERTS_PROC_LOCK_MAIN);
+ /* TODO: Implement nicer lookup in register... */
+ Eterm id;
+ if (is_atom(id_or_name))
+ id = erts_whereis_name_to_id(c_p, id_or_name);
else
- erts_whereis_name(c_p, ERTS_PROC_LOCK_MAIN, id, NULL, 0, 0, &port);
- return port;
+ id = id_or_name;
+ return erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
}
-#define ERTS_PORT_COMMAND_FLAG_FORCE (((Uint32) 1) << 0)
-#define ERTS_PORT_COMMAND_FLAG_NOSUSPEND (((Uint32) 1) << 1)
+/*
+ * erts_internal:port_command/3 is used by the
+ * erlang:port_command/2 and erlang:port_command/3
+ * BIFs.
+ */
-static BIF_RETTYPE
-do_port_command(Process *BIF_P, Eterm arg1, Eterm arg2, Eterm arg3,
- Uint32 flags)
+BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
{
BIF_RETTYPE res;
- Port *p;
-
- /* Trace sched out before lock check wait */
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_out);
- }
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_inactive);
- }
-
- p = id_or_name2port(BIF_P, arg1);
- if (!p) {
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
+ Port *prt;
+ int flags = 0;
+ Eterm ref;
+
+ if (is_not_nil(BIF_ARG_3)) {
+ Eterm l = BIF_ARG_3;
+ while (is_list(l)) {
+ Eterm* cons = list_val(l);
+ Eterm car = CAR(cons);
+ if (car == am_force)
+ flags |= ERTS_PORT_SIG_FLG_FORCE;
+ else if (car == am_nosuspend)
+ flags |= ERTS_PORT_SIG_FLG_NOSUSPEND;
+ else
+ BIF_RET(am_badarg);
+ l = CDR(cons);
}
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
- }
- BIF_ERROR(BIF_P, BADARG);
+ if (!is_nil(l))
+ BIF_RET(am_badarg);
}
-
- /* Trace port in, id_or_name2port causes wait */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_in, am_command);
- }
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_active);
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ if (flags & ERTS_PORT_SIG_FLG_FORCE) {
+ if (!(prt->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY))
+ BIF_RET(am_notsup);
}
- ERTS_BIF_PREP_RET(res, am_true);
+#ifdef DEBUG
+ ref = NIL;
+#endif
- if ((flags & ERTS_PORT_COMMAND_FLAG_FORCE)
- && !(p->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_NOTSUP);
- }
- else if (!(flags & ERTS_PORT_COMMAND_FLAG_FORCE)
- && p->status & ERTS_PORT_SFLG_PORT_BUSY) {
- if (flags & ERTS_PORT_COMMAND_FLAG_NOSUSPEND) {
+ switch (erts_port_output(BIF_P, flags, prt, prt->common.id, BIF_ARG_2, &ref)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ ERTS_BIF_PREP_RET(res, am_badarg);
+ break;
+ case ERTS_PORT_OP_BUSY:
+ ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
+ if (flags & ERTS_PORT_SIG_FLG_NOSUSPEND)
ERTS_BIF_PREP_RET(res, am_false);
- }
else {
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, p);
- if (erts_system_monitor_flags.busy_port) {
- monitor_generic(BIF_P, am_busy_port, p->id);
- }
- ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P,
- arg1, arg2, arg3);
- }
- } else {
- int wres;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- wres = erts_write_to_port(BIF_P->id, p, arg2);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- if (wres != 0) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, prt);
+ ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_erts_internal_port_command_3],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
- }
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_out, am_command);
- }
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_inactive);
+ break;
+ case ERTS_PORT_OP_BUSY_SCHEDULED:
+ ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
+ /* Fall through... */
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(ref));
+ ERTS_BIF_PREP_RET(res, ref);
+ break;
+ case ERTS_PORT_OP_DONE:
+ ERTS_BIF_PREP_RET(res, am_true);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_output() result");
+ break;
}
- erts_port_release(p);
- /* Trace sched in after port release */
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
- }
-
if (ERTS_PROC_IS_EXITING(BIF_P)) {
KILL_CATCHES(BIF_P); /* Must exit */
ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR);
}
- return res;
-}
-BIF_RETTYPE port_command_2(BIF_ALIST_2)
-{
- return do_port_command(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL, 0);
+ return res;
}
-BIF_RETTYPE port_command_3(BIF_ALIST_3)
+BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
{
- Eterm l = BIF_ARG_3;
- Uint32 flags = 0;
- while (is_list(l)) {
- Eterm* cons = list_val(l);
- Eterm car = CAR(cons);
- if (car == am_force) {
- flags |= ERTS_PORT_COMMAND_FLAG_FORCE;
- } else if (car == am_nosuspend) {
- flags |= ERTS_PORT_COMMAND_FLAG_NOSUSPEND;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
- l = CDR(cons);
- }
- if(!is_nil(l)) {
- BIF_ERROR(BIF_P, BADARG);
+ Port* prt;
+ Eterm retval;
+ Uint uint_op;
+ unsigned int op;
+ erts_aint32_t state;
+
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ if (!term_to_Uint(BIF_ARG_2, &uint_op))
+ BIF_RET(am_badarg);
+
+ if (uint_op > (Uint) UINT_MAX)
+ BIF_RET(am_badarg);
+
+ op = (unsigned int) uint_op;
+
+ switch (erts_port_call(BIF_P, prt, op, BIF_ARG_3, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ retval = am_badarg;
+ break;
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ break;
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_call() result");
+ retval = am_internal_error;
+ break;
}
- return do_port_command(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, flags);
-}
-BIF_RETTYPE port_call_2(BIF_ALIST_2)
-{
- return port_call(BIF_P,BIF_ARG_1, make_small(0), BIF_ARG_2);
-}
+ state = erts_smp_atomic32_read_acqb(&BIF_P->state);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
+#ifdef ERTS_SMP
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
+#endif
+ ERTS_BIF_EXITED(BIF_P);
+ }
-BIF_RETTYPE port_call_3(BIF_ALIST_3)
-{
- return port_call(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ BIF_RET(retval);
}
-static BIF_RETTYPE
-port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3)
+BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
{
- Uint op;
- Port *p;
- Uint size;
- byte *bytes;
- byte *endp;
- ErlDrvSizeT real_size;
- erts_driver_t *drv;
- byte port_input[256]; /* Default input buffer to encode in */
- byte port_result[256]; /* Buffer for result from port. */
- byte* port_resp; /* Pointer to result buffer. */
- char *prc;
- ErlDrvSSizeT ret;
- Eterm res;
- Sint result_size;
- Eterm *hp;
- Eterm *hp_end;
- unsigned ret_flags = 0U;
- int fpe_was_unmasked;
-
- bytes = &port_input[0];
- port_resp = port_result;
- /* trace of port scheduling with virtual process descheduling
- * lock wait
- */
- if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(c_p, am_out);
- }
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(c_p, am_inactive);
+ Port* prt;
+ Eterm retval;
+ Uint uint_op;
+ unsigned int op;
+ erts_aint32_t state;
+
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ if (!term_to_Uint(BIF_ARG_2, &uint_op))
+ BIF_RET(am_badarg);
+
+ if (uint_op > (Uint) UINT_MAX)
+ BIF_RET(am_badarg);
+
+ op = (unsigned int) uint_op;
+
+ switch (erts_port_control(BIF_P, prt, op, BIF_ARG_3, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ retval = am_badarg;
+ break;
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ break;
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_control() result");
+ retval = am_internal_error;
+ break;
}
- p = id_or_name2port(c_p, arg1);
- if (!p) {
- error:
- if (port_resp != port_result &&
- !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
- driver_free(port_resp);
- }
- if (bytes != &port_input[0])
- erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes);
- /* Need to virtual schedule in the process if there
- * was an error.
- */
- if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(c_p, am_in);
- }
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(c_p, am_active);
- }
-
- if (p)
- erts_port_release(p);
+ state = erts_smp_atomic32_read_acqb(&BIF_P->state);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
-#else
- ERTS_BIF_CHK_EXITED(c_p);
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
#endif
- BIF_ERROR(c_p, BADARG);
- }
-
- if ((drv = p->drv_ptr) == NULL) {
- goto error;
- }
- if (drv->call == NULL) {
- goto error;
- }
- if (!term_to_Uint(arg2, &op)) {
- goto error;
- }
- p->caller = c_p->id;
-
- /* Lock taken, virtual schedule of port */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_in, am_call);
- }
-
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_active);
+ ERTS_BIF_EXITED(BIF_P);
}
- size = erts_encode_ext_size(arg3);
- if (size > sizeof(port_input))
- bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size);
- endp = bytes;
- erts_encode_ext(arg3, &endp);
+ BIF_RET(retval);
+}
- real_size = endp - bytes;
- if (real_size > size) {
- erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
- __FILE__, __LINE__, endp - (bytes + size));
- }
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(driver_call)) {
- DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+/*
+ * erts_internal:port_close/1 is used by the
+ * erlang:port_close/1 BIF.
+ */
+BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1)
+{
+ Eterm ref;
+ Port *prt;
- dtrace_pid_str(p->connected, process_str);
- dtrace_port_str(p, port_str);
- DTRACE5(driver_call, process_str, port_str, p->name, op, real_size);
- }
-#endif
- prc = (char *) port_resp;
- fpe_was_unmasked = erts_block_fpe();
- ret = drv->call((ErlDrvData)p->drv_data,
- (unsigned) op,
- (char *) bytes,
- (int) real_size,
- &prc,
- (int) sizeof(port_result),
- &ret_flags);
- erts_unblock_fpe(fpe_was_unmasked);
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_out, am_call);
- }
-
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_inactive);
- }
-
- port_resp = (byte *) prc;
- p->caller = NIL;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
-#ifdef HARDDEBUG
- {
- ErlDrvSizeT z;
- printf("real_size = %ld,%d, ret = %ld,%d\r\n", (unsigned long) real_size,
- (int) real_size, (unsigned long)ret, (int) ret);
- printf("[");
- for(z = 0; z < real_size; ++z) {
- printf("%d, ",(int) bytes[z]);
- }
- printf("]\r\n");
- printf("[");
- for(z = 0; z < ret; ++z) {
- printf("%d, ",(int) port_resp[z]);
- }
- printf("]\r\n");
- }
-#endif
- if (ret <= 0 || port_resp[0] != VERSION_MAGIC) {
- /* Error or a binary without magic/ with wrong magic */
- goto error;
- }
- result_size = erts_decode_ext_size(port_resp, ret);
- if (result_size < 0) {
- goto error;
- }
- hp = HAlloc(c_p, result_size);
- hp_end = hp + result_size;
- endp = port_resp;
- res = erts_decode_ext(&hp, &MSO(c_p), &endp);
- if (res == THE_NON_VALUE) {
- goto error;
- }
- HRelease(c_p, hp_end, hp);
- if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
- driver_free(port_resp);
- }
- if (bytes != &port_input[0])
- erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes);
- if (p)
- erts_port_release(p);
-#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
-#else
- ERTS_BIF_CHK_EXITED(c_p);
+#ifdef DEBUG
+ ref = NIL;
#endif
- if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(c_p, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(c_p, am_active);
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+
+ switch (erts_port_exit(BIF_P, 0, prt, prt->common.id, am_normal, &ref)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(ref));
+ BIF_RET(ref);
+ case ERTS_PORT_OP_DONE:
+ BIF_RET(am_true);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_exit() result");
+ BIF_RET(am_internal_error);
}
-
- return res;
}
-
-BIF_RETTYPE port_control_3(BIF_ALIST_3)
-{
- Port* p;
- Uint op;
- Eterm res = THE_NON_VALUE;
-
- /* Virtual schedule out calling process before lock wait */
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_out);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_inactive);
- }
+/*
+ * erts_internal:port_connect/2 is used by the
+ * erlang:port_connect/2 BIF.
+ */
+BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2)
+{
+ Eterm ref;
+ Port* prt;
- p = id_or_name2port(BIF_P, BIF_ARG_1);
- if (!p) {
- /* Schedule the process before exiting */
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
- }
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
- }
-
- BIF_ERROR(BIF_P, BADARG);
- }
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
- /* Trace the port for scheduling in */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_in, am_control);
- }
-
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_active);
- }
+#ifdef DEBUG
+ ref = NIL;
+#endif
- if (term_to_Uint(BIF_ARG_2, &op))
- res = erts_port_control(BIF_P, p, op, BIF_ARG_3);
-
- /* Trace the port for scheduling out */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_out, am_control);
+ switch (erts_port_connect(BIF_P, 0, prt, prt->common.id, BIF_ARG_2, &ref)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(ref));
+ BIF_RET(ref);
+ break;
+ case ERTS_PORT_OP_DONE:
+ BIF_RET(am_true);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_connect() result");
+ BIF_RET(am_internal_error);
}
+}
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_inactive);
- }
+BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1)
+{
+ Eterm retval;
+ Port* prt;
- erts_port_release(p);
-#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
-#else
- ERTS_BIF_CHK_EXITED(BIF_P);
-#endif
-
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
+ if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) {
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_undefined);
}
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
+ else if (is_external_port(BIF_ARG_1)) {
+ if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ BIF_RET(am_undefined);
+ else
+ BIF_RET(am_badarg);
}
-
- if (is_non_value(res)) {
- BIF_ERROR(BIF_P, BADARG);
+ else {
+ BIF_RET(am_badarg);
+ }
+
+ switch (erts_port_info(BIF_P, prt, THE_NON_VALUE, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_undefined);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ BIF_RET(retval);
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ BIF_RET(retval);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_info() result");
+ BIF_RET(am_internal_error);
}
- BIF_RET(res);
}
-BIF_RETTYPE port_close_1(BIF_ALIST_1)
-{
- Port* p;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- p = id_or_name2port(NULL, BIF_ARG_1);
- if (!p) {
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_ERROR(BIF_P, BADARG);
- }
- erts_do_exit_port(p, p->connected, am_normal);
- /* if !ERTS_SMP: since we terminate port with reason normal
- we SHOULD never get an exit signal ourselves
- */
- erts_port_release(p);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(am_true);
-}
-BIF_RETTYPE port_connect_2(BIF_ALIST_2)
+BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
{
+ Eterm retval;
Port* prt;
- Process* rp;
- Eterm pid = BIF_ARG_2;
- if (is_not_internal_pid(pid)) {
- error:
- BIF_ERROR(BIF_P, BADARG);
+ if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) {
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_undefined);
}
- prt = id_or_name2port(BIF_P, BIF_ARG_1);
- if (!prt) {
- goto error;
+ else if (is_external_port(BIF_ARG_1)) {
+ if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ BIF_RET(am_undefined);
+ else
+ BIF_RET(am_badarg);
}
-
- rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- erts_smp_port_unlock(prt);
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
- goto error;
- }
-
- erts_add_link(&(rp->nlinks), LINK_PID, prt->id);
- erts_add_link(&(prt->nlinks), LINK_PID, pid);
-
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-
- prt->connected = pid; /* internal pid */
- erts_smp_port_unlock(prt);
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(port_connect)) {
- DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
-
- dtrace_pid_str(prt->connected, process_str);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
- dtrace_proc_str(rp, newprocess_str);
- DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
+ else {
+ BIF_RET(am_badarg);
+ }
+
+ switch (erts_port_info(BIF_P, prt, BIF_ARG_2, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_undefined);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ BIF_RET(retval);
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ BIF_RET(retval);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_info() result");
+ BIF_RET(am_internal_error);
}
-#endif
- BIF_RET(am_true);
}
-BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
+
+BIF_RETTYPE erts_internal_port_set_data_2(BIF_ALIST_2)
{
+ Eterm ref;
Port* prt;
- Eterm portid = BIF_ARG_1;
- Eterm data = BIF_ARG_2;
- prt = id_or_name2port(BIF_P, portid);
- if (!prt) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (prt->bp != NULL) {
- free_message_buffer(prt->bp);
- prt->bp = NULL;
- }
- if (IS_CONST(data)) {
- prt->data = data;
- } else {
- Uint size;
- ErlHeapFragment* bp;
- Eterm* hp;
-
- size = size_object(data);
- prt->bp = bp = new_message_buffer(size);
- hp = bp->mem;
- prt->data = copy_struct(data, size, &hp, &bp->off_heap);
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ switch (erts_port_set_data(BIF_P, prt, BIF_ARG_2, &ref)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(ref));
+ BIF_RET(ref);
+ case ERTS_PORT_OP_DONE:
+ BIF_RET(am_true);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_set_data() result");
+ BIF_RET(am_internal_error);
}
- erts_smp_port_unlock(prt);
- BIF_RET(am_true);
}
-BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
+BIF_RETTYPE erts_internal_port_get_data_1(BIF_ALIST_1)
{
- BIF_RETTYPE res;
+ Eterm retval;
Port* prt;
- Eterm portid = BIF_ARG_1;
- prt = id_or_name2port(BIF_P, portid);
- if (!prt) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (prt->bp == NULL) { /* MUST be CONST! */
- res = prt->data;
- } else {
- Eterm* hp = HAlloc(BIF_P, prt->bp->used_size);
- res = copy_struct(prt->data, prt->bp->used_size, &hp, &MSO(BIF_P));
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ switch (erts_port_get_data(BIF_P, prt, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ BIF_RET(retval);
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ BIF_RET(retval);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_get_data() result");
+ BIF_RET(am_internal_error);
}
- erts_smp_port_unlock(prt);
- BIF_RET(res);
}
/*
@@ -625,11 +485,10 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
* either BADARG or SYSTEM_LIMIT).
*/
-static int
-open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
+static Port *
+open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
{
-#define OPEN_PORT_ERROR(VAL) do { port_num = (VAL); goto do_return; } while (0)
- int i, port_num;
+ int i;
Eterm option;
Uint arity;
Eterm* tp;
@@ -637,11 +496,11 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
erts_driver_t* driver;
char* name_buf = NULL;
SysDriverOpts opts;
- int binary_io;
- int soft_eof;
Sint linebuf;
Eterm edir = NIL;
byte dir[MAXPATHLEN];
+ erts_aint32_t sflgs = 0;
+ Port *port;
/* These are the defaults */
opts.packet_bytes = 0;
@@ -655,8 +514,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
opts.overlapped_io = 0;
opts.spawn_type = ERTS_SPAWN_ANY;
opts.argv = NULL;
- binary_io = 0;
- soft_eof = 0;
+ opts.parallelism = erts_port_parallelism;
linebuf = 0;
*err_nump = 0;
@@ -734,6 +592,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
}
} else if (option == am_cd) {
edir = *tp;
+ } else if (option == am_parallelism) {
+ if (*tp == am_true)
+ opts.parallelism = 1;
+ else if (*tp == am_false)
+ opts.parallelism = 0;
+ else
+ goto badarg;
} else {
goto badarg;
}
@@ -748,13 +613,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
} else if (*nargs == am_nouse_stdio) {
opts.use_stdio = 0;
} else if (*nargs == am_binary) {
- binary_io = 1;
+ sflgs |= ERTS_PORT_SFLG_BINARY_IO;
} else if (*nargs == am_in) {
opts.read_write |= DO_READ;
} else if (*nargs == am_out) {
opts.read_write |= DO_WRITE;
} else if (*nargs == am_eof) {
- soft_eof = 1;
+ sflgs |= ERTS_PORT_SFLG_SOFT_EOF;
} else if (*nargs == am_hide) {
opts.hide_window = 1;
} else if (*nargs == am_exit_status) {
@@ -902,9 +767,9 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
heap[2] = make_small(0);
heap[3] = NIL;
iolist = make_list(heap);
- r = io_list_to_buf(iolist, (char*) dir, MAXPATHLEN);
+ r = erts_iolist_to_buf(iolist, (char*) dir, MAXPATHLEN);
UnUseTmpHeap(4,p);
- if (r < 0) {
+ if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
goto badarg;
}
opts.wd = (char *) dir;
@@ -926,44 +791,40 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- port_num = erts_open_driver(driver, p->id, name_buf, &opts, err_nump);
+ port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump);
#ifdef USE_VM_PROBES
- if (port_num >= 0 && DTRACE_ENABLED(port_open)) {
+ if (port && DTRACE_ENABLED(port_open)) {
DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
dtrace_proc_str(p, process_str);
- erts_snprintf(port_str, sizeof(port_str), "%T", erts_port[port_num].id);
+ erts_snprintf(port_str, sizeof(port_str), "%T", port->common.id);
DTRACE3(port_open, process_str, name_buf, port_str);
}
#endif
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- if (port_num < 0) {
- DEBUGF(("open_driver returned %d(%d)\n", port_num, *err_nump));
+ if (!port) {
+ DEBUGF(("open_driver returned (%d:%d)\n",
+ err_typep ? *err_typep : 4711,
+ err_nump ? *err_nump : 4711));
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
trace_virtual_sched(p, am_in);
}
- OPEN_PORT_ERROR(port_num);
+ goto do_return;
}
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
trace_virtual_sched(p, am_in);
}
- if (binary_io) {
- erts_port_status_bor_set(&erts_port[port_num],
- ERTS_PORT_SFLG_BINARY_IO);
- }
- if (soft_eof) {
- erts_port_status_bor_set(&erts_port[port_num],
- ERTS_PORT_SFLG_SOFT_EOF);
- }
- if (linebuf && erts_port[port_num].linebuf == NULL){
- erts_port[port_num].linebuf = allocate_linebuf(linebuf);
- erts_port_status_bor_set(&erts_port[port_num],
- ERTS_PORT_SFLG_LINEBUF_IO);
+ if (linebuf && port->linebuf == NULL){
+ port->linebuf = allocate_linebuf(linebuf);
+ sflgs |= ERTS_PORT_SFLG_LINEBUF_IO;
}
+
+ if (sflgs)
+ erts_atomic32_read_bor_relb(&port->state, sflgs);
do_return:
if (name_buf)
@@ -974,13 +835,15 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
if (opts.wd && opts.wd != ((char *)dir)) {
erts_free(ERTS_ALC_T_TMP, (void *) opts.wd);
}
- return port_num;
+ return port;
badarg:
- *err_nump = BADARG;
- OPEN_PORT_ERROR(-3);
+ if (err_typep)
+ *err_typep = -3;
+ if (err_nump)
+ *err_nump = BADARG;
+ port = NULL;
goto do_return;
-#undef OPEN_PORT_ERROR
}
/* Arguments can be given i unicode and as raw binaries, convert filename is used to convert */
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index b036c5ef5c..88f980d19f 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -413,7 +413,7 @@ build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, con
static BIF_RETTYPE
re_compile(Process* p, Eterm arg1, Eterm arg2)
{
- Uint slen;
+ ErlDrvSizeT slen;
char *expr;
pcre *result;
int errcode = 0;
@@ -444,7 +444,7 @@ re_compile(Process* p, Eterm arg1, Eterm arg2)
BIF_ERROR(p,BADARG);
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (io_list_to_buf(arg1, expr, slen) != 0) {
+ if (erts_iolist_to_buf(arg1, expr, slen) != 0) {
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
BIF_ERROR(p,BADARG);
}
@@ -797,7 +797,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
memcpy(tmpb,ap->name,ap->len);
tmpb[ap->len] = '\0';
} else {
- Uint slen;
+ ErlDrvSizeT slen;
if (erts_iolist_size(val, &slen)) {
goto error;
}
@@ -809,7 +809,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
(tmpbsiz = slen + 1));
}
}
- if (io_list_to_buf(val, tmpb, slen) != 0) {
+ if (erts_iolist_to_buf(val, tmpb, slen) != 0) {
goto error;
}
tmpb[slen] = '\0';
@@ -853,7 +853,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
const pcre *code_tmp;
RestartContext restart;
byte *temp_alloc = NULL;
- Uint slength;
+ ErlDrvSizeT slength;
int startoffset = 0;
int options = 0, comp_options = 0;
int ovsize;
@@ -877,7 +877,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 4)) {
if (is_binary(arg2) || is_list(arg2) || is_nil(arg2)) {
/* Compile from textual RE */
- Uint slen;
+ ErlDrvSizeT slen;
char *expr;
pcre *result;
int errcode = 0;
@@ -896,7 +896,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (io_list_to_buf(arg2, expr, slen) != 0) {
+ if (erts_iolist_to_buf(arg2, expr, slen) != 0) {
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
BIF_ERROR(p,BADARG);
}
@@ -1039,7 +1039,7 @@ handle_iolist:
}
restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength);
- if (io_list_to_buf(arg1, restart.subject, slength) != 0) {
+ if (erts_iolist_to_buf(arg1, restart.subject, slength) != 0) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.subject);
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index 525b11f61c..d67695e533 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -265,10 +265,10 @@ link_proc(Process *p, ErtsBifTimer* btm)
{
btm->receiver.proc.ess = p;
btm->receiver.proc.prev = NULL;
- btm->receiver.proc.next = p->bif_timers;
- if (p->bif_timers)
- p->bif_timers->receiver.proc.prev = btm;
- p->bif_timers = btm;
+ btm->receiver.proc.next = p->u.bif_timers;
+ if (p->u.bif_timers)
+ p->u.bif_timers->receiver.proc.prev = btm;
+ p->u.bif_timers = btm;
}
static ERTS_INLINE void
@@ -277,7 +277,7 @@ unlink_proc(ErtsBifTimer* btm)
if (btm->receiver.proc.prev)
btm->receiver.proc.prev->receiver.proc.next = btm->receiver.proc.next;
else
- btm->receiver.proc.ess->bif_timers = btm->receiver.proc.next;
+ btm->receiver.proc.ess->u.bif_timers = btm->receiver.proc.next;
if (btm->receiver.proc.next)
btm->receiver.proc.next->receiver.proc.prev = btm->receiver.proc.prev;
}
@@ -613,7 +613,7 @@ erts_print_bif_timer_info(int to, void *to_arg)
for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
Eterm receiver = (btm->flags & BTM_FLG_BYNAME
? btm->receiver.name
- : btm->receiver.proc.ess->id);
+ : btm->receiver.proc.ess->common.id);
erts_print(to, to_arg, "=timer:%T\n", receiver);
erts_print(to, to_arg, "Message: %T\n", btm->message);
erts_print(to, to_arg, "Time left: %u ms\n",
@@ -637,7 +637,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
erts_smp_proc_lock(p, plocks);
}
- btm = p->bif_timers;
+ btm = p->u.bif_timers;
while (btm) {
ErtsBifTimer *tmp_btm;
ASSERT(!(btm->flags & BTM_FLG_CANCELED));
@@ -647,7 +647,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
erts_cancel_timer(&tmp_btm->tm);
}
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
erts_smp_btm_rwunlock();
}
@@ -696,7 +696,7 @@ erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *),
for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
(*func)((btm->flags & BTM_FLG_BYNAME
? btm->receiver.name
- : btm->receiver.proc.ess->id),
+ : btm->receiver.proc.ess->common.id),
btm->message,
btm->bp,
arg);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 805d788177..99a4394666 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -124,7 +124,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
struct trace_pattern_flags flags = erts_trace_pattern_flags_off;
int is_global;
Process *meta_tracer_proc = p;
- Eterm meta_tracer_pid = p->id;
+ Eterm meta_tracer_pid = p->common.id;
if (!erts_try_seize_code_write_permission(p)) {
ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
@@ -171,14 +171,12 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
} else if (is_internal_port(meta_tracer_pid)) {
Port *meta_tracer_port;
- meta_tracer_proc = NULL;
- if (internal_port_index(meta_tracer_pid) >= erts_max_ports)
+ meta_tracer_proc = NULL;
+ meta_tracer_port = (erts_port_lookup(
+ meta_tracer_pid,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP));
+ if (!meta_tracer_port)
goto error;
- meta_tracer_port =
- &erts_port[internal_port_index(meta_tracer_pid)];
- if (INVALID_TRACER_PORT(meta_tracer_port, meta_tracer_pid)) {
- goto error;
- }
} else {
goto error;
}
@@ -254,7 +252,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetRef(erts_default_meta_match_spec);
erts_default_meta_tracer_pid = meta_tracer_pid;
if (meta_tracer_proc) {
- meta_tracer_proc->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
}
} else if (! flags.breakpoint) {
MatchSetUnref(erts_default_meta_match_spec);
@@ -342,7 +340,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
if (meta_tracer_proc) {
- meta_tracer_proc->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
}
matches = erts_set_trace_pattern(p, mfa, specified,
@@ -526,24 +524,24 @@ Eterm trace_3(BIF_ALIST_3)
if (is_nil(tracer) || is_internal_pid(tracer)) {
Process *tracer_proc = erts_pid2proc(p,
ERTS_PROC_LOCK_MAIN,
- is_nil(tracer) ? p->id : tracer,
+ is_nil(tracer) ? p->common.id : tracer,
ERTS_PROC_LOCKS_ALL);
if (!tracer_proc)
goto error;
- tracer_proc->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(tracer_proc) |= F_TRACER;
erts_smp_proc_unlock(tracer_proc,
(tracer_proc == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
} else if (is_internal_port(tracer)) {
- Port *tracer_port = erts_id2port(tracer, p, ERTS_PROC_LOCK_MAIN);
- if (!erts_is_valid_tracer_port(tracer)) {
- if (tracer_port)
- erts_smp_port_unlock(tracer_port);
+ Port *tracer_port = erts_id2port_sflgs(tracer,
+ p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+ if (!tracer_port)
goto error;
- }
- tracer_port->trace_flags |= F_TRACER;
- erts_smp_port_unlock(tracer_port);
+ ERTS_TRACE_FLAGS(tracer_port) |= F_TRACER;
+ erts_port_release(tracer_port);
} else
goto error;
@@ -554,7 +552,7 @@ Eterm trace_3(BIF_ALIST_3)
case am_true:
on = 1;
if (is_nil(tracer))
- tracer = p->id;
+ tracer = p->common.id;
break;
default:
goto error;
@@ -576,26 +574,29 @@ Eterm trace_3(BIF_ALIST_3)
if (pid_spec == tracer)
goto error;
- tracee_port = erts_id2port(pid_spec, p, ERTS_PROC_LOCK_MAIN);
+ tracee_port = erts_id2port_sflgs(pid_spec,
+ p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
if (!tracee_port)
goto error;
if (tracer != NIL && port_already_traced(p, tracee_port, tracer)) {
- erts_smp_port_unlock(tracee_port);
+ erts_port_release(tracee_port);
goto already_traced;
}
if (on)
- tracee_port->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_port) |= mask;
else
- tracee_port->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
- if (!tracee_port->trace_flags)
- tracee_port->tracer_proc = NIL;
+ if (!ERTS_TRACE_FLAGS(tracee_port))
+ ERTS_TRACER_PROC(tracee_port) = NIL;
else if (tracer != NIL)
- tracee_port->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_port) = tracer;
- erts_smp_port_unlock(tracee_port);
+ erts_port_release(tracee_port);
matches = 1;
} else if (is_pid(pid_spec)) {
@@ -627,14 +628,14 @@ Eterm trace_3(BIF_ALIST_3)
}
if (on)
- tracee_p->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_p) |= mask;
else
- tracee_p->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
- if ((tracee_p->trace_flags & TRACEE_FLAGS) == 0)
- tracee_p->tracer_proc = NIL;
+ if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS) == 0)
+ ERTS_TRACER_PROC(tracee_p) = NIL;
else if (tracer != NIL)
- tracee_p->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_p) = tracer;
erts_smp_proc_unlock(tracee_p,
(tracee_p == p
@@ -708,47 +709,56 @@ Eterm trace_3(BIF_ALIST_3)
ok = 1;
if (procs || mods) {
+ int max = erts_ptab_max(&erts_proc);
/* tracing of processes */
- for (i = 0; i < erts_max_processes; i++) {
+ for (i = 0; i < max; i++) {
Process* tracee_p = erts_pix2proc(i);
if (! tracee_p)
continue;
if (tracer != NIL) {
- if (tracee_p->id == tracer)
+ if (tracee_p->common.id == tracer)
continue;
if (already_traced(NULL, tracee_p, tracer))
continue;
}
if (on) {
- tracee_p->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_p) |= mask;
} else {
- tracee_p->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
}
- if(!(tracee_p->trace_flags & TRACEE_FLAGS)) {
- tracee_p->tracer_proc = NIL;
+ if(!(ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)) {
+ ERTS_TRACER_PROC(tracee_p) = NIL;
} else if (tracer != NIL) {
- tracee_p->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_p) = tracer;
}
matches++;
}
}
if (ports || mods) {
+ int max = erts_ptab_max(&erts_port);
/* tracing of ports */
- for (i = 0; i < erts_max_ports; i++) {
- Port *tracee_port = &erts_port[i];
- if (tracee_port->status & ERTS_PORT_SFLGS_DEAD) continue;
+ for (i = 0; i < max; i++) {
+ erts_aint32_t state;
+ Port *tracee_port = erts_pix2port(i);
+ if (!tracee_port)
+ continue;
+ state = erts_atomic32_read_nob(&tracee_port->state);
+ if (state & ERTS_PORT_SFLGS_DEAD)
+ continue;
if (tracer != NIL) {
- if (tracee_port->id == tracer) continue;
- if (port_already_traced(NULL, tracee_port, tracer)) continue;
+ if (tracee_port->common.id == tracer)
+ continue;
+ if (port_already_traced(NULL, tracee_port, tracer))
+ continue;
}
- if (on) tracee_port->trace_flags |= mask;
- else tracee_port->trace_flags &= ~mask;
+ if (on) ERTS_TRACE_FLAGS(tracee_port) |= mask;
+ else ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
- if (!(tracee_port->trace_flags & TRACEE_FLAGS)) {
- tracee_port->tracer_proc = NIL;
+ if (!(ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)) {
+ ERTS_TRACER_PROC(tracee_port) = NIL;
} else if (tracer != NIL) {
- tracee_port->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_port) = tracer;
}
/* matches are not counted for ports since it would violate compatibility */
/* This could be a reason to modify this function or make a new one. */
@@ -817,20 +827,20 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
* * main lock is held on c_p
* * all locks are held on port tracee_p
*/
- if ((tracee_port->trace_flags & TRACEE_FLAGS)
- && tracee_port->tracer_proc != tracer) {
+ if ((ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)
+ && ERTS_TRACER_PROC(tracee_port) != tracer) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (is_internal_port(tracee_port->tracer_proc)) {
- if (!erts_is_valid_tracer_port(tracee_port->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(tracee_port))) {
+ if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_port))) {
/* Current trace port now invalid
* - discard it and approve the new. */
goto remove_tracer;
} else
return 1;
}
- else if(is_internal_pid(tracee_port->tracer_proc)) {
- Process *tracer_p = erts_proc_lookup(tracee_port->tracer_proc);
+ else if(is_internal_pid(ERTS_TRACER_PROC(tracee_port))) {
+ Process *tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_port));
if (!tracer_p) {
/* Current trace process now invalid
* - discard it and approve the new. */
@@ -840,8 +850,8 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
}
else {
remove_tracer:
- tracee_port->trace_flags &= ~TRACEE_FLAGS;
- tracee_port->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee_port) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(tracee_port) = NIL;
}
}
return 0;
@@ -857,20 +867,22 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
* * main lock is held on c_p
* * all locks multiple are held on tracee_p
*/
- if ((tracee_p->trace_flags & TRACEE_FLAGS)
- && tracee_p->tracer_proc != tracer) {
+ if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)
+ && ERTS_TRACER_PROC(tracee_p) != tracer) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (is_internal_port(tracee_p->tracer_proc)) {
- if (!erts_is_valid_tracer_port(tracee_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(tracee_p))) {
+ if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_p))) {
/* Current trace port now invalid
* - discard it and approve the new. */
goto remove_tracer;
} else
return 1;
}
- else if(is_internal_pid(tracee_p->tracer_proc)) {
- Process *tracer_p = erts_proc_lookup(tracee_p->tracer_proc);
+ else if(is_internal_pid(ERTS_TRACER_PROC(tracee_p))) {
+ Process *tracer_p;
+
+ tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_p));
if (!tracer_p) {
/* Current trace process now invalid
* - discard it and approve the new. */
@@ -880,8 +892,8 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
}
else {
remove_tracer:
- tracee_p->trace_flags &= ~TRACEE_FLAGS;
- tracee_p->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(tracee_p) = NIL;
}
}
return 0;
@@ -925,8 +937,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
if (pid_spec == am_new) {
erts_get_default_tracing(&trace_flags, &tracer);
- } else if (is_internal_pid(pid_spec)
- && internal_pid_index(pid_spec) < erts_max_processes) {
+ } else if (is_internal_pid(pid_spec)) {
Process *tracee;
tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
pid_spec, ERTS_PROC_LOCKS_ALL);
@@ -934,16 +945,16 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
if (!tracee) {
return am_undefined;
} else {
- tracer = tracee->tracer_proc;
- trace_flags = tracee->trace_flags;
+ tracer = ERTS_TRACER_PROC(tracee);
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
}
if (is_internal_pid(tracer)) {
if (!erts_proc_lookup(tracer)) {
reset_tracer:
- tracee->trace_flags &= ~TRACEE_FLAGS;
- trace_flags = tracee->trace_flags;
- tracer = tracee->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee) &= ~TRACEE_FLAGS;
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
+ tracer = ERTS_TRACER_PROC(tracee) = NIL;
}
}
else if (is_internal_port(tracer)) {
@@ -1877,7 +1888,7 @@ new_seq_trace_token(Process* p)
SEQ_TRACE_TOKEN(p) = TUPLE5(hp, make_small(0), /* Flags */
make_small(0), /* Label */
make_small(0), /* Serial */
- p->id, /* Internal pid */ /* From */
+ p->common.id, /* Internal pid */ /* From */
make_small(p->seq_trace_lastcnt));
}
}
@@ -2247,9 +2258,11 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
if (!profiler_p)
goto error;
} else if (is_internal_port(profiler)) {
- if (internal_port_index(profiler) >= erts_max_ports) goto error;
- profiler_port = &erts_port[internal_port_index(profiler)];
- if (INVALID_TRACER_PORT(profiler_port, profiler)) goto error;
+ profiler_port = (erts_port_lookup(
+ profiler,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP));
+ if (!profiler_port)
+ goto error;
} else {
goto error;
}
@@ -2313,8 +2326,7 @@ trace_delivered_1(BIF_ALIST_1)
p = NULL;
} else if (! (p = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
BIF_ARG_1, ERTS_PROC_LOCKS_ALL))) {
- if (is_not_internal_pid(BIF_ARG_1)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
}
@@ -2333,7 +2345,7 @@ trace_delivered_1(BIF_ALIST_1)
msg = TUPLE3(hp, AM_trace_delivered, BIF_ARG_1, msg_ref);
#ifdef ERTS_SMP
- erts_send_sys_msg_proc(BIF_P->id, BIF_P->id, msg, bp);
+ erts_send_sys_msg_proc(BIF_P->common.id, BIF_P->common.id, msg, bp);
if (p)
erts_smp_proc_unlock(p,
(BIF_P == p
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 1ba1048afa..48a95cdf32 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -425,7 +425,8 @@ DbTable* db_get_table_aux(Process *p,
if (tb) {
db_lock(tb, kind);
if (tb->common.id != id
- || ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
+ || ((tb->common.status & what) == 0
+ && p->common.id != tb->common.owner)) {
db_unlock(tb, kind);
tb = NULL;
}
@@ -615,7 +616,7 @@ BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2)
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:safe_fixtable(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
- BIF_ARG_1, BIF_ARG_2, BIF_P->id,
+ BIF_ARG_1, BIF_ARG_2, BIF_P->common.id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC;
@@ -1194,7 +1195,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:rename(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
- BIF_ARG_1, BIF_ARG_2, BIF_P->id,
+ BIF_ARG_1, BIF_ARG_2, BIF_P->common.id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
@@ -1437,7 +1438,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
"db_tab", "db_tab_fix");
tb->common.keypos = keypos;
- tb->common.owner = BIF_P->id;
+ tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
erts_smp_atomic_init_nob(&tb->common.nitems, 0);
@@ -1506,7 +1507,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n",
- BIF_ARG_1, BIF_ARG_2, ret, BIF_P->id,
+ BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
@@ -1518,7 +1519,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
if (db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple, BIF_P->id, make_small(slot)),
+ TUPLE2(meta_tuple,
+ BIF_P->common.id,
+ make_small(slot)),
0) != DB_ERROR_NONE) {
erl_exit(1,"Could not update ets metadata.");
}
@@ -1637,7 +1640,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:delete(%T); Process: %T, initial: %T:%T/%bpu\n",
- BIF_ARG_1, BIF_P->id,
+ BIF_ARG_1, BIF_P->common.id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
@@ -1654,7 +1657,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
tb->common.status |= DB_DELETE;
- if (tb->common.owner != BIF_P->id) {
+ if (tb->common.owner != BIF_P->common.id) {
DeclareTmpHeap(meta_tuple,3,BIF_P);
/*
@@ -1669,10 +1672,12 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
make_small(tb->common.slot));
BIF_P->flags |= F_USING_DB;
- tb->common.owner = BIF_P->id;
+ tb->common.owner = BIF_P->common.id;
db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,BIF_P->id,make_small(tb->common.slot)),
+ TUPLE2(meta_tuple,
+ BIF_P->common.id,
+ make_small(tb->common.slot)),
0);
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
UnUseTmpHeap(3,BIF_P);
@@ -1748,7 +1753,7 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
}
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->id) {
+ || tb->common.owner != BIF_P->common.id) {
goto badarg;
}
from_pid = tb->common.owner;
@@ -1771,7 +1776,10 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
db_unlock(tb,LCK_WRITE);
erts_send_message(BIF_P, to_proc, &to_locks,
- TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, from_pid, BIF_ARG_3),
+ TUPLE4(buf, am_ETS_TRANSFER,
+ tb->common.id,
+ from_pid,
+ BIF_ARG_3),
0);
erts_smp_proc_unlock(to_proc, to_locks);
UnUseTmpHeap(5,BIF_P);
@@ -1833,7 +1841,7 @@ BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
if (tail != NIL
|| (tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->id) {
+ || tb->common.owner != BIF_P->common.id) {
goto badarg;
}
@@ -2647,7 +2655,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
*/
/* If/when we implement lockless private tables:
- if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->id) {
+ if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->common.id) {
db_unlock(tb, LCK_READ);
rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
owner, ERTS_PROC_LOCK_MAIN);
@@ -3039,9 +3047,9 @@ static int give_away_to_heir(Process* p, DbTable* tb)
Eterm to_pid;
UWord heir_data;
- ASSERT(tb->common.owner == p->id);
+ ASSERT(tb->common.owner == p->common.id);
ASSERT(is_internal_pid(tb->common.heir));
- ASSERT(tb->common.heir != p->id);
+ ASSERT(tb->common.heir != p->common.id);
retry:
to_pid = tb->common.heir;
to_proc = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN,
@@ -3054,7 +3062,7 @@ retry:
db_lock(tb,LCK_WRITE);
ASSERT(tb != NULL);
- if (tb->common.owner != p->id) {
+ if (tb->common.owner != p->common.id) {
if (to_proc != NULL ) {
erts_smp_proc_unlock(to_proc, to_locks);
}
@@ -3065,7 +3073,7 @@ retry:
if (to_proc != NULL ) {
erts_smp_proc_unlock(to_proc, to_locks);
}
- if (to_pid == p->id || to_pid == am_none) {
+ if (to_pid == p->common.id || to_pid == am_none) {
return 0; /* no real heir, table still mine */
}
goto retry;
@@ -3074,7 +3082,8 @@ retry:
if (to_proc == NULL) {
return 0; /* heir not alive, table still mine */
}
- if (to_proc->started_interval != tb->common.heir_started_interval) {
+ if (to_proc->common.u.alive.started_interval
+ != tb->common.heir_started_interval) {
erts_smp_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
@@ -3099,7 +3108,11 @@ retry:
heir_data = tpv[1];
}
erts_send_message(p, to_proc, &to_locks,
- TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, p->id, heir_data),
+ TUPLE4(buf,
+ am_ETS_TRANSFER,
+ tb->common.id,
+ p->common.id,
+ heir_data),
0);
erts_smp_proc_unlock(to_proc, to_locks);
return !0;
@@ -3108,7 +3121,7 @@ retry:
/*
* erts_db_process_exiting() is called when a process terminates.
* It returns 0 when completely done, and !0 when it wants to
- * yield. c_p->u.exit_data can hold a pointer to a state while
+ * yield. c_p->u.terminate can hold a pointer to a state while
* yielding.
*/
#define ERTS_DB_INTERNAL_ERROR(LSTR) \
@@ -3118,8 +3131,8 @@ retry:
int
erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
{
- ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.exit_data;
- Eterm pid = c_p->id;
+ ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate;
+ Eterm pid = c_p->common.id;
ErtsDbProcCleanupState default_state;
int ret;
@@ -3300,7 +3313,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
if (state != &default_state)
erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
- c_p->u.exit_data = NULL;
+ c_p->u.terminate = NULL;
return 0;
default:
@@ -3321,13 +3334,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
break;
}
- ASSERT(c_p->u.exit_data == (void *) state
+ ASSERT(c_p->u.terminate == (void *) state
|| state == &default_state);
if (state == &default_state) {
- c_p->u.exit_data = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
+ c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
sizeof(ErtsDbProcCleanupState));
- sys_memcpy(c_p->u.exit_data,
+ sys_memcpy(c_p->u.terminate,
(void*) state,
sizeof(ErtsDbProcCleanupState));
}
@@ -3353,7 +3366,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
}
else {
for (; fix != NULL; fix = fix->next) {
- if (fix->pid == p->id) {
+ if (fix->pid == p->common.id) {
++(fix->counter);
#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
@@ -3365,7 +3378,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION,
tb, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation));
- fix->pid = p->id;
+ fix->pid = p->common.id;
fix->counter = 1;
fix->next = tb->common.fixations;
tb->common.fixations = fix;
@@ -3376,7 +3389,9 @@ static void fix_table_locked(Process* p, DbTable* tb)
UseTmpHeap(3,p);
db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
if (db_put_hash(meta_pid_to_fixed_tab,
- TUPLE2(meta_tuple, p->id, make_small(tb->common.slot)),
+ TUPLE2(meta_tuple,
+ p->common.id,
+ make_small(tb->common.slot)),
0) != DB_ERROR_NONE) {
UnUseTmpHeap(3,p);
erl_exit(1,"Could not insert ets metadata in safe_fixtable.");
@@ -3396,7 +3411,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
- if ((*pp)->pid == p->id) {
+ if ((*pp)->pid == p->common.id) {
DbFixation* fix = *pp;
erts_refc_dec(&tb->common.ref,0);
--(fix->counter);
@@ -3410,7 +3425,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
#endif
db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
db_erase_bag_exact2(meta_pid_to_fixed_tab,
- p->id, make_small(tb->common.slot));
+ p->common.id, make_small(tb->common.slot));
db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, (void *) fix, sizeof(DbFixation));
@@ -3469,15 +3484,15 @@ static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data)
if (heir == am_none) {
return;
}
- if (heir == me->id) {
- erts_ensure_later_proc_interval(me->started_interval);
- tb->common.heir_started_interval = me->started_interval;
+ if (heir == me->common.id) {
+ erts_ensure_later_proc_interval(me->common.u.alive.started_interval);
+ tb->common.heir_started_interval = me->common.u.alive.started_interval;
}
else {
Process* heir_proc= erts_proc_lookup(heir);
if (heir_proc != NULL) {
- erts_ensure_later_proc_interval(heir_proc->started_interval);
- tb->common.heir_started_interval = heir_proc->started_interval;
+ erts_ensure_later_proc_interval(heir_proc->common.u.alive.started_interval);
+ tb->common.heir_started_interval = heir_proc->common.u.alive.started_interval;
} else {
tb->common.heir = am_none;
}
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 0c9ca83ce4..bcdb630140 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -138,21 +138,23 @@ set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
Uint flags;
if (tracer == NIL) {
- flags = tracee_p->trace_flags & ~TRACEE_FLAGS;
+ flags = ERTS_TRACE_FLAGS(tracee_p) & ~TRACEE_FLAGS;
} else {
- flags = ((tracee_p->trace_flags & ~d_flags) | e_flags);
+ flags = ((ERTS_TRACE_FLAGS(tracee_p) & ~d_flags) | e_flags);
if (! flags) tracer = NIL;
}
- ret = tracee_p->tracer_proc != tracer || tracee_p->trace_flags != flags
- ? am_true : am_false;
- tracee_p->tracer_proc = tracer;
- tracee_p->trace_flags = flags;
+ ret = ((ERTS_TRACER_PROC(tracee_p) != tracer
+ || ERTS_TRACE_FLAGS(tracee_p) != flags)
+ ? am_true
+ : am_false);
+ ERTS_TRACER_PROC(tracee_p) = tracer;
+ ERTS_TRACE_FLAGS(tracee_p) = flags;
return ret;
}
/*
** Assuming all locks on tracee_p on entry
**
-** Changes tracee_p->trace_flags and tracee_p->tracer_proc
+** Changes ERTS_TRACE_FLAGS(tracee_p) and ERTS_TRACER_PROC(tracee_p)
** according to input disable/enable flags and tracer.
**
** Returns am_true|am_false on success, am_true if value changed,
@@ -173,17 +175,20 @@ set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer,
tracer, ERTS_PROC_LOCKS_ALL))) {
if (tracee_p != tracer_p) {
ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- tracer_p->trace_flags |= tracee_p->trace_flags ? F_TRACER : 0;
+ ERTS_TRACE_FLAGS(tracer_p) |= (ERTS_TRACE_FLAGS(tracee_p)
+ ? F_TRACER
+ : 0);
erts_smp_proc_unlock(tracer_p, ERTS_PROC_LOCKS_ALL);
}
} else if (is_internal_port(tracer)) {
Port *tracer_port =
- erts_id2port(tracer, tracee_p, ERTS_PROC_LOCKS_ALL);
+ erts_id2port_sflgs(tracer,
+ tracee_p,
+ ERTS_PROC_LOCKS_ALL,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
if (tracer_port) {
- if (! INVALID_TRACER_PORT(tracer_port, tracer)) {
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- }
- erts_smp_port_unlock(tracer_port);
+ ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
+ erts_port_release(tracer_port);
}
} else {
ASSERT(is_nil(tracer));
@@ -2174,7 +2179,7 @@ restart:
pc += n;
break;
case matchSelf:
- *esp++ = c_p->id;
+ *esp++ = c_p->common.id;
break;
case matchWaste:
--esp;
@@ -2261,7 +2266,7 @@ restart:
case matchEnableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, c_p->tracer_proc, 0, n);
+ set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), 0, n);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2274,7 +2279,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, c_p->tracer_proc, 0, n);
+ set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), 0, n);
esp[-1] = am_true;
}
}
@@ -2282,7 +2287,7 @@ restart:
case matchDisableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, c_p->tracer_proc, n, 0);
+ set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), n, 0);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2295,7 +2300,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, c_p->tracer_proc, n, 0);
+ set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), n, 0);
esp[-1] = am_true;
}
}
@@ -2316,12 +2321,12 @@ restart:
--esp;
if (*esp == am_true) {
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_TRACE_SILENT;
+ ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
else if (*esp == am_false) {
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags &= ~F_TRACE_SILENT;
+ ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
break;
@@ -2329,11 +2334,11 @@ restart:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = c_p->tracer_proc;
+ Eterm tracer = ERTS_TRACER_PROC(c_p);
/* XXX Atomicity note: Not fully atomic. Default tracer
* is sampled from current process but applied to
* tracee and tracer later after releasing main
- * locks on current process, so c_p->tracer_proc
+ * locks on current process, so ERTS_TRACER_PROC(c_p)
* may actually have changed when tracee and tracer
* gets updated. I do not think nobody will notice.
* It is just the default value that is not fully atomic.
@@ -2358,7 +2363,7 @@ restart:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = c_p->tracer_proc;
+ Eterm tracer = ERTS_TRACER_PROC(c_p);
/* XXX Atomicity note. Not fully atomic. See above.
* Above it could possibly be solved, but not here.
*/
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index dcecc4251a..d8f6e40d2e 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -320,10 +320,10 @@ ERTS_GLB_INLINE int db_eq(DbTableCommon* tb, Eterm a, DbTerm* b)
#define DB_INFO (DB_PROTECTED|DB_PUBLIC|DB_PRIVATE)
#define ONLY_WRITER(P,T) (((T)->common.status & (DB_PRIVATE|DB_PROTECTED)) \
- && (T)->common.owner == (P)->id)
+ && (T)->common.owner == (P)->common.id)
#define ONLY_READER(P,T) (((T)->common.status & DB_PRIVATE) && \
-(T)->common.owner == (P)->id)
+(T)->common.owner == (P)->common.id)
/* Function prototypes */
BIF_RETTYPE db_get_trace_control_word(Process* p);
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index 22e873afc6..b90d00f236 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -252,16 +252,16 @@ void erts_check_stack(Process *p)
if (p->stop > stack_start)
erl_exit(1,
"<%lu.%lu.%lu>: Stack underflow\n",
- internal_pid_channel_no(p->id),
- internal_pid_number(p->id),
- internal_pid_serial(p->id));
+ internal_pid_channel_no(p->common.id),
+ internal_pid_number(p->common.id),
+ internal_pid_serial(p->common.id));
if (p->stop < stack_end)
erl_exit(1,
"<%lu.%lu.%lu>: Stack overflow\n",
- internal_pid_channel_no(p->id),
- internal_pid_number(p->id),
- internal_pid_serial(p->id));
+ internal_pid_channel_no(p->common.id),
+ internal_pid_number(p->common.id),
+ internal_pid_serial(p->common.id));
for (elemp = p->stop; elemp < stack_start; elemp++) {
int in_mbuf = 0;
@@ -284,9 +284,9 @@ void erts_check_stack(Process *p)
erl_exit(1,
"<%lu.%lu.%lu>: Wild stack pointer\n",
- internal_pid_channel_no(p->id),
- internal_pid_number(p->id),
- internal_pid_serial(p->id));
+ internal_pid_channel_no(p->common.id),
+ internal_pid_number(p->common.id),
+ internal_pid_serial(p->common.id));
}
}
@@ -387,16 +387,16 @@ void verify_process(Process *p)
#define VERIFY_AREA(name,ptr,sz) { \
int n = (sz); \
while (n--) if(!verify_eterm(p,*(ptr+n))) \
- erl_exit(1,"Wild pointer found in " name " of %T!\n",p->id); }
+ erl_exit(1,"Wild pointer found in " name " of %T!\n",p->common.id); }
#define VERIFY_ETERM(name,eterm) { \
if(!verify_eterm(p,eterm)) \
- erl_exit(1,"Wild pointer found in " name " of %T!\n",p->id); }
+ erl_exit(1,"Wild pointer found in " name " of %T!\n",p->common.id); }
ErlMessage* mp = p->msg.first;
- VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->id));
+ VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->common.id));
while (mp != NULL) {
VERIFY_ETERM("message term",ERL_MESSAGE_TERM(mp));
@@ -516,7 +516,7 @@ static void print_process_memory(Process *p)
ErlHeapFragment* bp = MBUF(p);
erts_printf("==============================\n");
- erts_printf("|| Memory info for %T ||\n",p->id);
+ erts_printf("|| Memory info for %T ||\n",p->common.id);
erts_printf("==============================\n");
erts_printf("-- %-*s ---%s-%s-%s-%s--\n",
@@ -601,7 +601,7 @@ void print_memory_info(Process *p)
{
if (p != NULL) {
erts_printf("======================================\n");
- erts_printf("|| Memory info for %-12T ||\n",p->id);
+ erts_printf("|| Memory info for %-12T ||\n",p->common.id);
erts_printf("======================================\n");
erts_printf("+- local heap ----%s-%s-%s-%s-+\n",
dashes,dashes,dashes,dashes);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 771ee46d2b..046b46513f 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -133,7 +133,7 @@ typedef struct {
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
#define ERL_DRV_EXTENDED_MAJOR_VERSION 2
-#define ERL_DRV_EXTENDED_MINOR_VERSION 0
+#define ERL_DRV_EXTENDED_MINOR_VERSION 1
/*
* The emulator will refuse to load a driver with different major
@@ -154,6 +154,7 @@ typedef struct {
#define ERL_DRV_FLAG_USE_PORT_LOCKING (1 << 0)
#define ERL_DRV_FLAG_SOFT_BUSY (1 << 1)
+#define ERL_DRV_FLAG_NO_BUSY_MSGQ (1 << 2)
/*
* Integer types
@@ -207,8 +208,8 @@ typedef struct erl_drv_binary {
typedef struct _erl_drv_data* ErlDrvData; /* Data to be used by the driver itself. */
#ifndef ERL_SYS_DRV
typedef struct _erl_drv_event* ErlDrvEvent; /* An event to be selected on. */
-typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */
#endif
+typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */
typedef struct _erl_drv_port* ErlDrvThreadData; /* Thread data. */
#if !defined(__WIN32__) && !defined(_WIN32) && !defined(_WIN32_) && !defined(USE_SELECT)
@@ -377,9 +378,18 @@ typedef struct erl_drv_entry {
ErlDrvEntry* driver_init(void)
#endif
+#define ERL_DRV_BUSY_MSGQ_DISABLED (~((ErlDrvSizeT) 0))
+#define ERL_DRV_BUSY_MSGQ_READ_ONLY ((ErlDrvSizeT) 0)
+#define ERL_DRV_BUSY_MSGQ_LIM_MAX (ERL_DRV_BUSY_MSGQ_DISABLED - 1)
+#define ERL_DRV_BUSY_MSGQ_LIM_MIN ((ErlDrvSizeT) 1)
+
/*
* These are the functions available for driver writers.
*/
+EXTERN void erl_drv_busy_msgq_limits(ErlDrvPort port,
+ ErlDrvSizeT *low,
+ ErlDrvSizeT *high);
+
EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on);
EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event,
ErlDrvEventData event_data);
@@ -594,11 +604,33 @@ EXTERN ErlDrvPort driver_create_port(ErlDrvPort creator_port,
ErlDrvData drv_data);
+/*
+ * driver_output_term() is deprecated, and scheduled for removal in
+ * OTP-R17. Use erl_drv_output_term() instead. For more information
+ * see the erl_driver(3) documentation.
+ */
+EXTERN int driver_output_term(ErlDrvPort ix,
+ ErlDrvTermData* data,
+ int len) ERL_DRV_DEPRECATED_FUNC;
+/*
+ * driver_send_term() is deprecated, and scheduled for removal in
+ * OTP-R17. Use erl_drv_send_term() instead. For more information
+ * see the erl_driver(3) documentation.
+ */
+EXTERN int driver_send_term(ErlDrvPort ix,
+ ErlDrvTermData to,
+ ErlDrvTermData* data,
+ int len) ERL_DRV_DEPRECATED_FUNC;
+
/* output term data to the port owner */
-EXTERN int driver_output_term(ErlDrvPort ix, ErlDrvTermData* data, int len);
+EXTERN int erl_drv_output_term(ErlDrvTermData port,
+ ErlDrvTermData* data,
+ int len);
/* output term data to a specific process */
-EXTERN int driver_send_term(ErlDrvPort ix, ErlDrvTermData to,
- ErlDrvTermData* data, int len);
+EXTERN int erl_drv_send_term(ErlDrvTermData port,
+ ErlDrvTermData to,
+ ErlDrvTermData* data,
+ int len);
/* Async IO functions */
EXTERN long driver_async(ErlDrvPort ix,
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 5ae4b9254b..d377ba8f31 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -59,7 +59,7 @@ static Uint reclaimed; /* no of words reclaimed in GCs */
erts_fprintf(stderr, "htop=%p\n", (p)->htop); \
erts_fprintf(stderr, "heap=%p\n", (p)->heap); \
erl_exit(ERTS_ABORT_EXIT, "%s, line %d: %T: Overrun stack and heap\n", \
- __FILE__,__LINE__,(P)->id); \
+ __FILE__,__LINE__,(P)->common.id); \
}
#ifdef DEBUG
@@ -1946,9 +1946,9 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
n++;
}
#endif
- ASSERT(is_nil(p->tracer_proc) ||
- is_internal_pid(p->tracer_proc) ||
- is_internal_port(p->tracer_proc));
+ ASSERT(is_nil(ERTS_TRACER_PROC(p)) ||
+ is_internal_pid(ERTS_TRACER_PROC(p)) ||
+ is_internal_port(ERTS_TRACER_PROC(p)));
ASSERT(is_pid(follow_moved(p->group_leader)));
if (is_not_immed(p->group_leader)) {
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 4b90e5394a..175f04b15b 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -44,6 +44,7 @@
#include "erl_thr_progress.h"
#include "erl_thr_queue.h"
#include "erl_async.h"
+#include "erl_ptab.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -109,6 +110,11 @@ const int etp_lock_check = 1;
#else
const int etp_lock_check = 0;
#endif
+#ifdef WORDS_BIGENDIAN
+const int etp_big_endian = 1;
+#else
+const int etp_big_endian = 0;
+#endif
/*
* Note about VxWorks: All variables must be initialized by executable code,
* not by an initializer. Otherwise a new instance of the emulator will
@@ -121,9 +127,10 @@ extern void ConNormalExit(void);
extern void ConWaitForExit(void);
#endif
-static void erl_init(int ncpu);
-
-#define ERTS_MIN_COMPAT_REL 7
+static void erl_init(int ncpu,
+ int proc_tab_sz,
+ int port_tab_sz,
+ int port_tab_sz_ignore_files);
static erts_atomic_t exiting;
@@ -206,8 +213,6 @@ ErtsModifiedTimings erts_modified_timings[] = {
Export *erts_delay_trap = NULL;
-int erts_use_r9_pids_ports;
-
int ignore_break;
int replace_intr;
@@ -271,12 +276,18 @@ void
erts_short_init(void)
{
int ncpu = early_init(NULL, NULL);
- erl_init(ncpu);
+ erl_init(ncpu,
+ ERTS_DEFAULT_MAX_PROCESSES,
+ ERTS_DEFAULT_MAX_PORTS,
+ 0);
erts_initialized = 1;
}
static void
-erl_init(int ncpu)
+erl_init(int ncpu,
+ int proc_tab_sz,
+ int port_tab_sz,
+ int port_tab_sz_ignore_files)
{
init_benchmarking();
@@ -284,7 +295,7 @@ erl_init(int ncpu)
erts_init_gc();
erts_init_time();
erts_init_sys_common_misc();
- erts_init_process(ncpu);
+ erts_init_process(ncpu, proc_tab_sz);
erts_init_scheduling(no_schedulers,
no_schedulers_online);
erts_init_cpu_topology(); /* Must be after init_scheduling */
@@ -306,6 +317,7 @@ erl_init(int ncpu)
erts_bif_info_init();
erts_ddll_init();
init_emulator();
+ erts_ptab_init(); /* Must be after init_emulator() */
erts_bp_init();
init_db(); /* Must be after init_emulator */
erts_bif_timer_init();
@@ -313,7 +325,7 @@ erl_init(int ncpu)
init_dist();
erl_drv_thr_init();
erts_init_async();
- init_io();
+ erts_init_io(port_tab_sz, port_tab_sz_ignore_files);
init_load();
erts_init_bif();
erts_init_bif_chksum();
@@ -457,6 +469,7 @@ load_preloaded(void)
/* be helpful (or maybe downright rude:-) */
void erts_usage(void)
{
+ int this_rel = this_rel_num();
erts_fprintf(stderr, "Usage: %s [flags] [ -- [init_args] ]\n", progname(program));
erts_fprintf(stderr, "The flags are:\n\n");
@@ -490,16 +503,20 @@ void erts_usage(void)
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
-
+ erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n");
+ erts_fprintf(stderr, " Note that this flag is deprecated!\n");
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n");
erts_fprintf(stderr, " valid range is [%d-%d]\n",
- ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES);
+ ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES);
+ erts_fprintf(stderr, "-Q number set maximum number of ports on this node,\n");
+ erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ ERTS_MIN_PORTS, ERTS_MAX_PORTS);
erts_fprintf(stderr, "-R number set compatibility release number,\n");
erts_fprintf(stderr, " valid range [%d-%d]\n",
- ERTS_MIN_COMPAT_REL, this_rel_num());
+ this_rel-2, this_rel);
erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
erts_fprintf(stderr, "-rg amount set reader groups limit\n");
@@ -519,6 +536,7 @@ void erts_usage(void)
erts_fprintf(stderr, " valid range is [%d-%d]\n",
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
ERTS_SCHED_THREAD_MAX_STACK_SIZE);
+ erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
erts_fprintf(stderr, " schedulers online (n2), valid range for both\n");
erts_fprintf(stderr, " numbers are [1-%d]\n",
@@ -612,7 +630,6 @@ early_init(int *argc, char **argv) /*
erts_printf_eterm_func = erts_printf_term;
erts_disable_tolerant_timeofday = 0;
display_items = 200;
- erts_proc.max = ERTS_DEFAULT_MAX_PROCESSES;
erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
erts_async_max_threads = 0;
erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
@@ -641,8 +658,6 @@ early_init(int *argc, char **argv) /*
erts_compat_rel = this_rel_num();
- erts_use_r9_pids_ports = 0;
-
erts_sys_pre_init();
erts_atomic_init_nob(&exiting, 0);
#ifdef ERTS_SMP
@@ -897,11 +912,13 @@ erl_start(int argc, char **argv)
{
int i = 1;
char* arg=NULL;
- char* Parg = NULL;
int have_break_handler = 1;
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
int ncpu = early_init(&argc, argv);
+ int proc_tab_sz = ERTS_DEFAULT_MAX_PROCESSES;
+ int port_tab_sz = ERTS_DEFAULT_MAX_PORTS;
+ int port_tab_sz_ignore_files = 0;
envbufsz = sizeof(envbuf);
if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
@@ -1152,12 +1169,53 @@ erl_start(int argc, char **argv)
arg);
break;
- case 'P':
- /* set maximum number of processes */
- Parg = get_arg(argv[i]+2, argv[i+1], &i);
- erts_proc.max = atoi(Parg);
- /* Check of result is delayed until later. This is because +R
- may be given after +P. */
+ case 'n':
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ switch (arg[0]) {
+ case 's': /* synchronous */
+ erts_port_synchronous_ops = 1;
+ erts_port_schedule_all_ops = 0;
+ break;
+ case 'a': /* asynchronous */
+ erts_port_synchronous_ops = 0;
+ erts_port_schedule_all_ops = 1;
+ break;
+ case 'd': /* Default - schedule on conflict (asynchronous) */
+ erts_port_synchronous_ops = 0;
+ erts_port_schedule_all_ops = 0;
+ break;
+ default:
+ bad_n_option:
+ erts_fprintf(stderr, "bad -n option %s\n", arg);
+ erts_usage();
+ }
+ if (arg[1] != '\0')
+ goto bad_n_option;
+ break;
+
+ case 'P': /* set maximum number of processes */
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ errno = 0;
+ proc_tab_sz = strtol(arg, NULL, 10);
+ if (errno != 0
+ || proc_tab_sz < ERTS_MIN_PROCESSES
+ || ERTS_MAX_PROCESSES < proc_tab_sz) {
+ erts_fprintf(stderr, "bad number of processes %s\n", arg);
+ erts_usage();
+ }
+ break;
+
+ case 'Q': /* set maximum number of ports */
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ errno = 0;
+ port_tab_sz = strtol(arg, NULL, 10);
+ if (errno != 0
+ || port_tab_sz < ERTS_MIN_PROCESSES
+ || ERTS_MAX_PROCESSES < port_tab_sz) {
+ erts_fprintf(stderr, "bad number of ports %s\n", arg);
+ erts_usage();
+ }
+ port_tab_sz_ignore_files = 1;
break;
case 'S' : /* Was handled in early_init() just read past it */
@@ -1259,6 +1317,19 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("pp", sub_param)) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ if (sys_strcmp(arg, "true") == 0)
+ erts_port_parallelism = 1;
+ else if (sys_strcmp(arg, "false") == 0)
+ erts_port_parallelism = 0;
+ else {
+ erts_fprintf(stderr,
+ "bad port parallelism scheduling hint %s\n",
+ arg);
+ erts_usage();
+ }
+ }
else if (sys_strcmp("nsp", sub_param) == 0)
erts_use_sender_punish = 0;
else if (sys_strcmp("wt", sub_param) == 0) {
@@ -1340,22 +1411,19 @@ erl_start(int argc, char **argv)
case 'R': {
/* set compatibility release */
+ int this_rel;
arg = get_arg(argv[i]+2, argv[i+1], &i);
erts_compat_rel = atoi(arg);
- if (erts_compat_rel < ERTS_MIN_COMPAT_REL
- || erts_compat_rel > this_rel_num()) {
+ this_rel = this_rel_num();
+ if (erts_compat_rel < this_rel - 2 || this_rel < erts_compat_rel) {
erts_fprintf(stderr, "bad compatibility release number %s\n", arg);
erts_usage();
}
- ASSERT(ERTS_MIN_COMPAT_REL >= 7);
switch (erts_compat_rel) {
- case 7:
- case 8:
- case 9:
- erts_use_r9_pids_ports = 1;
+ /* Currently no compat features... */
default:
break;
}
@@ -1397,8 +1465,6 @@ erl_start(int argc, char **argv)
}
break;
}
- case 'n': /* XXX obsolete */
- break;
case 'c':
if (argv[i][2] == 0) { /* -c: documented option */
erts_disable_tolerant_timeofday = 1;
@@ -1453,15 +1519,6 @@ erl_start(int argc, char **argv)
i++;
}
- /* Delayed check of +P flag */
- if (erts_proc.max < ERTS_MIN_PROCESSES
- || erts_proc.max > ERTS_MAX_PROCESSES
- || (erts_use_r9_pids_ports
- && erts_proc.max > ERTS_MAX_R9_PROCESSES)) {
- erts_fprintf(stderr, "bad number of processes %s\n", Parg);
- erts_usage();
- }
-
/* Restart will not reinstall the break handler */
#ifdef __WIN32__
if (ignore_break)
@@ -1482,7 +1539,10 @@ erl_start(int argc, char **argv)
boot_argc = argc - i; /* Number of arguments to init */
boot_argv = &argv[i];
- erl_init(ncpu);
+ erl_init(ncpu,
+ proc_tab_sz,
+ port_tab_sz,
+ port_tab_sz_ignore_files);
load_preloaded();
erts_end_staging_code_ix();
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 314d2f6a9c..69bb4be717 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -95,7 +95,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry_links", "address" },
{ "code_write_permission", NULL },
{ "proc_status", "pid" },
- { "proc_tab", NULL },
{ "ports_snapshot", NULL },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
@@ -115,9 +114,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP)
{ "child_status", NULL },
#endif
-#ifdef __WIN32__
- { "sys_driver_data_lock", NULL },
-#endif
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
@@ -127,6 +123,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "schdlr_sspnd", NULL },
{ "migration_info_update", NULL },
{ "run_queue", "address" },
+ { "process_table", NULL },
{ "cpu_info", NULL },
{ "pollset", "address" },
#ifdef __WIN32__
@@ -157,12 +154,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pmmap", NULL },
#endif
#ifdef ERTS_SMP
+ { "port_sched_lock", "port_id" },
{ "port_task_pre_alloc_lock", "address" },
- { "port_taskq_pre_alloc_lock", "address" },
{ "proclist_pre_alloc_lock", "address" },
- { "port_tasks_lock", NULL },
- { "get_free_port", NULL },
- { "port_state", "address" },
+ { "port_table", NULL },
{ "xports_list_pre_alloc_lock", "address" },
{ "inet_buffer_stack_lock", NULL },
{ "gc_info", NULL },
@@ -247,6 +242,7 @@ typedef struct {
typedef struct erts_lc_locked_locks_t_ erts_lc_locked_locks_t;
struct erts_lc_locked_locks_t_ {
char *thread_name;
+ int emu_thread;
erts_tid_t tid;
erts_lc_locked_locks_t *next;
erts_lc_locked_locks_t *prev;
@@ -364,6 +360,7 @@ create_locked_locks(char *thread_name)
if (!l_lcks->thread_name)
lc_abort();
+ l_lcks->emu_thread = 0;
l_lcks->tid = erts_thr_self();
l_lcks->required.first = NULL;
l_lcks->required.last = NULL;
@@ -671,7 +668,7 @@ erts_lc_set_thread_name(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
if (!l_lcks)
- (void) create_locked_locks(thread_name);
+ l_lcks = create_locked_locks(thread_name);
else {
ASSERT(l_lcks->thread_name);
free((void *) l_lcks->thread_name);
@@ -679,6 +676,14 @@ erts_lc_set_thread_name(char *thread_name)
if (!l_lcks->thread_name)
lc_abort();
}
+ l_lcks->emu_thread = 1;
+}
+
+int
+erts_lc_is_emu_thr(void)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ return l_lcks->emu_thread;
}
int
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index df7b3758e1..068340abe7 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -102,6 +102,7 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_require_lock(erts_lc_lock_t *lck);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
+int erts_lc_is_emu_thr(void);
#define ERTS_LC_ASSERT(A) \
((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index d5b7d01048..325d77e911 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -495,7 +495,7 @@ queue_message(Process *c_p,
#ifndef ERTS_SMP
res = receiver->msg.len;
#else
- res = receiver->u.alive.msg_inq.len;
+ res = receiver->msg_inq.len;
if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
/*
* We move 'in queue' to 'private queue' and place
@@ -894,8 +894,8 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
*sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send)) {
- erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->id);
- erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->id);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id);
}
#endif
if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
@@ -917,7 +917,7 @@ erts_send_message(Process* sender,
seq_trace_update_send(sender);
seq_trace_output(stoken, message, SEQ_TRACE_SEND,
- receiver->id, sender);
+ receiver->common.id, sender);
seq_trace_size = 6; /* TUPLE5 */
#ifdef USE_VM_PROBES
}
@@ -948,7 +948,7 @@ erts_send_message(Process* sender,
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) Spreading tag (%T) with "
- "message %T!\r\n",sender->id, utag, message);
+ "message %T!\r\n",sender->common.id, utag, message);
#endif
}
#endif
@@ -1136,7 +1136,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
save = TUPLE3(hp, am_EXIT, from_copy, mess);
hp += 4;
/* the trace token must in this case be updated by the caller */
- seq_trace_output(token, save, SEQ_TRACE_SEND, to->id, NULL);
+ seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL);
temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap);
erts_queue_message(to, to_locksp, bp, save, temptoken
#ifdef USE_VM_PROBES
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 2ae94965b1..771eba431f 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -127,21 +127,21 @@ typedef struct {
/* Move in message queue to end of private message queue */
#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \
do { \
- if ((P)->u.alive.msg_inq.first) { \
- *(P)->msg.last = (P)->u.alive.msg_inq.first; \
- (P)->msg.last = (P)->u.alive.msg_inq.last; \
- (P)->msg.len += (P)->u.alive.msg_inq.len; \
- (P)->u.alive.msg_inq.first = NULL; \
- (P)->u.alive.msg_inq.last = &(P)->u.alive.msg_inq.first; \
- (P)->u.alive.msg_inq.len = 0; \
+ if ((P)->msg_inq.first) { \
+ *(P)->msg.last = (P)->msg_inq.first; \
+ (P)->msg.last = (P)->msg_inq.last; \
+ (P)->msg.len += (P)->msg_inq.len; \
+ (P)->msg_inq.first = NULL; \
+ (P)->msg_inq.last = &(P)->msg_inq.first; \
+ (P)->msg_inq.len = 0; \
} \
} while (0)
/* Add message last in message queue */
#define LINK_MESSAGE(p, mp) do { \
- *(p)->u.alive.msg_inq.last = (mp); \
- (p)->u.alive.msg_inq.last = &(mp)->next; \
- (p)->u.alive.msg_inq.len++; \
+ *(p)->msg_inq.last = (mp); \
+ (p)->msg_inq.last = &(mp)->next; \
+ (p)->msg_inq.len++; \
} while(0)
#else
@@ -245,6 +245,9 @@ void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *);
Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **,
Eterm *, ErtsDistExternal *);
+void erts_cleanup_offheap(ErlOffHeap *offheap);
+
+
ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg);
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg);
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index 1a84950120..63175c44d6 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -971,7 +971,7 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1)
}
} else {
erts_printf("Dumping pid monitors--------------------\n");
- erts_dump_monitors(rp->monitors,0);
+ erts_dump_monitors(ERTS_P_MONITORS(rp),0);
erts_printf("Monitors dumped-------------------------\n");
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
@@ -985,12 +985,15 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1)
Process *rp;
DistEntry *dep;
if (is_internal_port(pid)) {
- Port *rport = erts_id2port(pid, p, ERTS_PROC_LOCK_MAIN);
+ Port *rport = erts_id2port_sflgs(pid,
+ p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
if (rport) {
erts_printf("Dumping port links----------------------\n");
- erts_dump_links(rport->nlinks,0);
+ erts_dump_links(ERTS_P_LINKS(rport), 0);
erts_printf("Links dumped----------------------------\n");
- erts_smp_port_unlock(rport);
+ erts_port_release(rport);
BIF_RET(am_true);
} else {
BIF_ERROR(p,BADARG);
@@ -1014,7 +1017,7 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1)
} else {
erts_printf("Dumping pid links-----------------------\n");
- erts_dump_links(rp->nlinks,0);
+ erts_dump_links(ERTS_P_LINKS(rp), 0);
erts_printf("Links dumped----------------------------\n");
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
index d3f6d410dd..a7fa4e0145 100644
--- a/erts/emulator/beam/erl_monitors.h
+++ b/erts/emulator/beam/erl_monitors.h
@@ -137,8 +137,6 @@ typedef struct erts_suspend_monitor {
#define ERTS_LINK_ROOT(Linkp) ((Linkp)->shared.root)
#define ERTS_LINK_REFC(Linkp) ((Linkp)->shared.refc)
-#define ERTS_LINK_ROOT_AS_UINT(Linkp) (*((Uint *) &((Linkp)->root)))
-
Uint erts_tot_link_lh_size(void);
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 632d756481..1bd2d933b2 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -263,7 +263,7 @@ ErlNifEnv* enif_alloc_env(void)
HEAP_LIMIT(&msg_env->phony_proc) = phony_heap;
HEAP_END(&msg_env->phony_proc) = phony_heap;
MBUF(&msg_env->phony_proc) = NULL;
- msg_env->phony_proc.id = ERTS_INVALID_PID;
+ msg_env->phony_proc.common.id = ERTS_INVALID_PID;
#ifdef FORCE_HEAP_FRAGS
msg_env->phony_proc.space_verified = 0;
msg_env->phony_proc.space_verified_from = NULL;
@@ -287,7 +287,7 @@ void enif_clear_env(ErlNifEnv* env)
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env;
Process* p = &menv->phony_proc;
ASSERT(p == menv->env.proc);
- ASSERT(p->id == ERTS_INVALID_PID);
+ ASSERT(p->common.id == ERTS_INVALID_PID);
ASSERT(MBUF(p) == menv->env.heap_frag);
if (MBUF(p) != NULL) {
erts_cleanup_offheap(&MSO(p));
@@ -319,7 +319,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
if (env != NULL) {
c_p = env->proc;
- if (receiver == c_p->id) {
+ if (receiver == c_p->common.id) {
rp_locks = ERTS_PROC_LOCK_MAIN;
flush_me = 1;
}
@@ -341,7 +341,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
: erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC));
if (rp == NULL) {
- ASSERT(env == NULL || receiver != c_p->id);
+ ASSERT(env == NULL || receiver != c_p->common.id);
return 0;
}
flush_env(msg_env);
@@ -397,7 +397,7 @@ static int is_offheap(const ErlOffHeap* oh)
ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)
{
- pid->pid = caller_env->proc->id;
+ pid->pid = caller_env->proc->common.id;
return pid;
}
int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)
@@ -505,7 +505,7 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
{
struct enif_tmp_obj_t* tobj;
ErtsAlcType_t allocator;
- Uint sz;
+ ErlDrvSizeT sz;
if (is_binary(term)) {
return enif_inspect_binary(env,term,bin);
}
@@ -531,7 +531,7 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
bin->size = sz;
bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
- io_list_to_buf(term, (char*) bin->data, sz);
+ erts_iolist_to_buf(term, (char*) bin->data, sz);
ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
@@ -1844,7 +1844,7 @@ void erl_nif_init()
#ifdef USE_VM_PROBES
void dtrace_nifenv_str(ErlNifEnv *env, char *process_buf)
{
- dtrace_pid_str(env->proc->id, process_buf);
+ dtrace_pid_str(env->proc->common.id, process_buf);
}
#endif
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 7b4cb7b042..667bda255b 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -20,7 +20,7 @@
#ifndef ERL_NODE_CONTAINER_UTILS_H__
#define ERL_NODE_CONTAINER_UTILS_H__
-#include "erl_term.h"
+#include "erl_ptab.h"
/*
* Note regarding node containers:
@@ -29,9 +29,6 @@
* the emulator) for the Erlang data types that contain a reference
* to a node, i.e. pids, ports, and references.
*
- * Observe! The layouts of the node container data types have been
- * changed in R9.
- *
* Node containers are divided into internal and external node containers.
* An internal node container refer to the current incarnation of the
* node which it reside on. An external node container refer to
@@ -52,13 +49,6 @@
* reference is a boxed data type. An internal node container have an
* implicit reference to the 'erts_this_node' element in the node table.
*
- * Due to the R9 changes in layouts of node containers there are room to
- * store more data than previously. Today (R9) this extra space is unused,
- * but it is planned to be used in the future. For example only 18 bits
- * are used for data in a pid but there is room for 28 bits of data (on a
- * 32-bit machine). Some preparations have been made in the emulator for
- * usage of this extra space.
- *
* OBSERVE! Pids doesn't use fixed size 'serial' and 'number' fields any
* more. Previously the 15 bit 'number' field of a pid was used as index
* into the process table, and the 3 bit 'serial' field was used as a
@@ -104,8 +94,6 @@
#define internal_dist_entry(x) (erts_this_node->dist_entry)
#define external_dist_entry(x) (external_node((x))->dist_entry)
-extern int erts_use_r9_pids_ports;
-
/*
* For this node (and previous incarnations of this node), 0 is used as
* channel no. For other nodes, the atom index of the atom corresponding
@@ -128,47 +116,20 @@ extern int erts_use_r9_pids_ports;
* Pids *
\* */
-#define erts_max_processes erts_proc.max
-
-typedef struct {
- erts_smp_atomic_t *tab;
- int max;
- int tab_cache_lines;
- int pix_per_cache_line;
- int pix_cl_mask;
- int pix_cl_shift;
- int pix_cli_mask;
- int pix_cli_shift;
-} ErtsProcTab;
-
-extern ErtsProcTab erts_proc;
-
-ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data)
-{
- int n, pix;
-
- n = (int) pid_data;
- if (erts_proc.pix_cl_mask) {
- pix = ((n & erts_proc.pix_cl_mask) << erts_proc.pix_cl_shift);
- pix += ((n >> erts_proc.pix_cli_shift) & erts_proc.pix_cli_mask);
- }
- else {
- n %= erts_proc.max;
- pix = n % erts_proc.tab_cache_lines;
- pix *= erts_proc.pix_per_cache_line;
- pix += n / erts_proc.tab_cache_lines;
- }
- ASSERT(0 <= pix && pix < erts_proc.max);
- return pix;
-}
+extern ErtsPTab erts_proc;
-#endif
+#define make_internal_pid(D) erts_ptab_make_id(&erts_proc, \
+ (D), \
+ _TAG_IMMED1_PID)
-#define internal_pid_index(x) erts_pid_data2ix(internal_pid_data((x)))
+#define internal_pid_index(PID) (ASSERT_EXPR(is_internal_pid((PID))), \
+ erts_ptab_id2pix(&erts_proc, (PID)))
+
+#define internal_pid_data(PID) (ASSERT_EXPR(is_internal_pid((PID))), \
+ erts_ptab_id2data(&erts_proc, (PID)))
+
+#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x)))
+#define internal_pid_serial(x) _GET_PID_SER(internal_pid_data((x)))
#define internal_pid_node_name(x) (internal_pid_node((x))->sysname)
#define external_pid_node_name(x) (external_pid_node((x))->sysname)
@@ -208,34 +169,37 @@ ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data)
|| is_external_pid((x)))
#define is_not_pid(x) (!is_pid(x))
-#define ERTS_MAX_R9_PROCESSES (1 << ERTS_R9_PROC_BITS)
-
/*
* Maximum number of processes. We want the number to fit in a SMALL on
* 32-bit CPU.
*/
-#define ERTS_MAX_PROCESSES ((SWORD_CONSTANT(1) << 27)-1)
-#if (ERTS_MAX_PROCESSES > MAX_SMALL)
-# error "The maximum number of processes must fit in a SMALL."
-#endif
-
+#define ERTS_MAX_PROCESSES (ERTS_PTAB_MAX_SIZE-1)
#define ERTS_MAX_PID_DATA ((1 << _PID_DATA_SIZE) - 1)
#define ERTS_MAX_PID_NUMBER ((1 << _PID_NUM_SIZE) - 1)
#define ERTS_MAX_PID_SERIAL ((1 << _PID_SER_SIZE) - 1)
-#define ERTS_MAX_PID_R9_SERIAL ((1 << _PID_R9_SER_SIZE) - 1)
-#define ERTS_R9_PROC_BITS (_PID_R9_SER_SIZE + _PID_NUM_SIZE)
#define ERTS_PROC_BITS (_PID_SER_SIZE + _PID_NUM_SIZE)
-#define ERTS_INVALID_PID make_internal_pid(ERTS_MAX_PID_DATA)
+#define ERTS_INVALID_PID ERTS_PTAB_INVALID_ID(_TAG_IMMED1_PID)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Ports *
\* */
-#define internal_port_index(x) (internal_port_data((x)) \
- & erts_port_tab_index_mask)
+extern ErtsPTab erts_port;
+
+#define make_internal_port(D) erts_ptab_make_id(&erts_port, \
+ (D), \
+ _TAG_IMMED1_PORT)
+
+#define internal_port_index(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \
+ erts_ptab_id2pix(&erts_port, (PRT)))
+
+#define internal_port_data(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \
+ erts_ptab_id2data(&erts_port, (PRT)))
+
+#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x)))
#define internal_port_node_name(x) (internal_port_node((x))->sysname)
#define external_port_node_name(x) (external_port_node((x))->sysname)
@@ -274,18 +238,18 @@ ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data)
#define is_not_port(x) (!is_port(x))
/* Highest port-ID part in a term of type Port
- Not necessarily the same as the variable erts_max_ports
+ Not necessarily the same as current maximum port table size
which defines the maximum number of simultaneous Ports
in the Erlang node. ERTS_MAX_PORTS is a hard upper limit.
*/
-#define ERTS_MAX_R9_PORTS (1 << ERTS_R9_PORTS_BITS)
-#define ERTS_MAX_PORTS (1 << ERTS_PORTS_BITS)
-
+#define ERTS_MAX_PORTS (ERTS_PTAB_MAX_SIZE-1)
#define ERTS_MAX_PORT_DATA ((1 << _PORT_DATA_SIZE) - 1)
#define ERTS_MAX_PORT_NUMBER ((1 << _PORT_NUM_SIZE) - 1)
-#define ERTS_R9_PORTS_BITS (_PORT_R9_NUM_SIZE)
#define ERTS_PORTS_BITS (_PORT_NUM_SIZE)
+
+#define ERTS_INVALID_PORT ERTS_PTAB_INVALID_ID(_TAG_IMMED1_PORT)
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Refs *
\* */
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 40837d3817..ebfba065d1 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -116,8 +116,7 @@ dist_table_alloc(void *dep_tmpl)
dep->qsize = 0;
dep->out_queue.first = NULL;
dep->out_queue.last = NULL;
- dep->suspended.first = NULL;
- dep->suspended.last = NULL;
+ dep->suspended = NULL;
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
@@ -769,8 +768,7 @@ void erts_init_node_tables(void)
erts_this_dist_entry->qsize = 0;
erts_this_dist_entry->out_queue.first = NULL;
erts_this_dist_entry->out_queue.last = NULL;
- erts_this_dist_entry->suspended.first = NULL;
- erts_this_dist_entry->suspended.last = NULL;
+ erts_this_dist_entry->suspended = NULL;
erts_this_dist_entry->finalized_out_queue.first = NULL;
erts_this_dist_entry->finalized_out_queue.last = NULL;
@@ -1268,7 +1266,7 @@ setup_reference_table(void)
ErlHeapFragment *hfp;
DistEntry *dep;
HashInfo hi;
- int i;
+ int i, max;
DeclareTmpHeapNoproc(heap,3);
inserted_bins = NULL;
@@ -1297,8 +1295,9 @@ setup_reference_table(void)
UnUseTmpHeapNoproc(3);
+ max = erts_ptab_max(&erts_proc);
/* Insert all processes */
- for (i = 0; i < erts_max_processes; i++) {
+ for (i = 0; i < max; i++) {
Process *proc = erts_pix2proc(i);
if (proc) {
ErlMessage *msg;
@@ -1306,12 +1305,12 @@ setup_reference_table(void)
/* Insert Heap */
insert_offheap(&(proc->off_heap),
HEAP_REF,
- proc->id);
+ proc->common.id);
/* Insert message buffers */
for(hfp = proc->mbuf; hfp; hfp = hfp->next)
insert_offheap(&(hfp->off_heap),
HEAP_REF,
- proc->id);
+ proc->common.id);
/* Insert msg msg buffers */
for (msg = proc->msg.first; msg; msg = msg->next) {
ErlHeapFragment *heap_frag = NULL;
@@ -1321,7 +1320,7 @@ setup_reference_table(void)
else {
if (msg->data.dist_ext->dep)
insert_dist_entry(msg->data.dist_ext->dep,
- HEAP_REF, proc->id, 0);
+ HEAP_REF, proc->common.id, 0);
if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
}
@@ -1329,10 +1328,10 @@ setup_reference_table(void)
if (heap_frag)
insert_offheap(&(heap_frag->off_heap),
HEAP_REF,
- proc->id);
+ proc->common.id);
}
#ifdef ERTS_SMP
- for (msg = proc->u.alive.msg_inq.first; msg; msg = msg->next) {
+ for (msg = proc->msg_inq.first; msg; msg = msg->next) {
ErlHeapFragment *heap_frag = NULL;
if (msg->data.attached) {
if (is_value(ERL_MESSAGE_TERM(msg)))
@@ -1340,7 +1339,7 @@ setup_reference_table(void)
else {
if (msg->data.dist_ext->dep)
insert_dist_entry(msg->data.dist_ext->dep,
- HEAP_REF, proc->id, 0);
+ HEAP_REF, proc->common.id, 0);
if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
}
@@ -1348,19 +1347,19 @@ setup_reference_table(void)
if (heap_frag)
insert_offheap(&(heap_frag->off_heap),
HEAP_REF,
- proc->id);
+ proc->common.id);
}
#endif
/* Insert links */
- if(proc->nlinks)
- insert_links(proc->nlinks, proc->id);
- if(proc->monitors)
- insert_monitors(proc->monitors, proc->id);
+ if (ERTS_P_LINKS(proc))
+ insert_links(ERTS_P_LINKS(proc), proc->common.id);
+ if (ERTS_P_MONITORS(proc))
+ insert_monitors(ERTS_P_MONITORS(proc), proc->common.id);
/* Insert controller */
{
DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
if (dep)
- insert_dist_entry(dep, CTRL_REF, proc->id, 0);
+ insert_dist_entry(dep, CTRL_REF, proc->common.id, 0);
}
}
}
@@ -1370,21 +1369,33 @@ setup_reference_table(void)
#endif
/* Insert all ports */
- for (i = 0; i < erts_max_ports; i++) {
- if (erts_port[i].status & ERTS_PORT_SFLGS_DEAD)
+ max = erts_ptab_max(&erts_port);
+ for (i = 0; i < max; i++) {
+ erts_aint32_t state;
+ Port *prt;
+
+ prt = erts_pix2port(i);
+ if (!prt)
+ continue;
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_DEAD)
continue;
/* Insert links */
- if(erts_port[i].nlinks)
- insert_links(erts_port[i].nlinks, erts_port[i].id);
+ if (ERTS_P_LINKS(prt))
+ insert_links(ERTS_P_LINKS(prt), prt->common.id);
+ /* Insert monitors */
+ if (ERTS_P_MONITORS(prt))
+ insert_monitors(ERTS_P_MONITORS(prt), prt->common.id);
/* Insert port data */
- for(hfp = erts_port[i].bp; hfp; hfp = hfp->next)
- insert_offheap(&(hfp->off_heap), HEAP_REF, erts_port[i].id);
+ for(hfp = prt->bp; hfp; hfp = hfp->next)
+ insert_offheap(&(hfp->off_heap), HEAP_REF, prt->common.id);
/* Insert controller */
- if (erts_port[i].dist_entry)
- insert_dist_entry(erts_port[i].dist_entry,
+ if (prt->dist_entry)
+ insert_dist_entry(prt->dist_entry,
CTRL_REF,
- erts_port[i].id,
+ prt->common.id,
0);
}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index 4a015bdef9..af60071ea5 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -84,10 +84,6 @@ typedef struct {
} ErtsDistOutputQueue;
struct ErtsProcList_;
-typedef struct {
- struct ErtsProcList_ *first;
- struct ErtsProcList_ *last;
-} ErtsDistSuspended;
/*
* Lock order:
@@ -100,7 +96,6 @@ typedef struct {
*/
struct erl_link;
-struct port;
typedef struct dist_entry_ {
HashBucket hash_bucket; /* Hash bucket */
@@ -135,13 +130,13 @@ typedef struct dist_entry_ {
Uint32 qflgs;
Sint qsize;
ErtsDistOutputQueue out_queue;
- ErtsDistSuspended suspended;
+ struct ErtsProcList_ *suspended;
ErtsDistOutputQueue finalized_out_queue;
erts_smp_atomic_t dist_cmd_scheduled;
ErtsPortTaskHandle dist_cmd;
- Uint (*send)(struct port *prt, ErtsDistOutputBuf *obuf);
+ Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
struct cache* cache; /* The atom cache */
} DistEntry;
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
new file mode 100644
index 0000000000..f4d73e716a
--- /dev/null
+++ b/erts/emulator/beam/erl_port.h
@@ -0,0 +1,942 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_PORT_TYPE__
+#define ERL_PORT_TYPE__
+typedef struct _erl_drv_port Port;
+typedef struct ErtsProc2PortSigData_ ErtsProc2PortSigData;
+#endif
+
+#if !defined(ERL_PORT_H__) && !defined(ERL_PORT_GET_PORT_TYPE_ONLY__)
+#define ERL_PORT_H__
+
+#include "erl_port_task.h"
+#include "erl_ptab.h"
+#include "erl_thr_progress.h"
+#include "erl_trace.h"
+
+#define ERTS_DEFAULT_MAX_PORTS (1 << 16)
+#define ERTS_MIN_PORTS 1024
+
+extern int erts_port_synchronous_ops;
+extern int erts_port_schedule_all_ops;
+extern int erts_port_parallelism;
+
+typedef struct erts_driver_t_ erts_driver_t;
+
+#define ERTS_INVALID_ERL_DRV_PORT ((ErlDrvPort) (SWord) -1)
+#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
+
+typedef struct {
+ ErlDrvSizeT size; /* total size in bytes */
+
+ SysIOVec* v_start;
+ SysIOVec* v_end;
+ SysIOVec* v_head;
+ SysIOVec* v_tail;
+ SysIOVec v_small[SMALL_IO_QUEUE];
+
+ ErlDrvBinary** b_start;
+ ErlDrvBinary** b_end;
+ ErlDrvBinary** b_head;
+ ErlDrvBinary** b_tail;
+ ErlDrvBinary* b_small[SMALL_IO_QUEUE];
+} ErlIOQueue;
+
+typedef struct line_buf { /* Buffer used in line oriented I/O */
+ ErlDrvSizeT bufsiz; /* Size of character buffer */
+ ErlDrvSizeT ovlen; /* Length of overflow data */
+ ErlDrvSizeT ovsiz; /* Actual size of overflow buffer */
+ char data[1]; /* Starting point of buffer data,
+ data[0] is a flag indicating an unprocess CR,
+ The rest is the overflow buffer. */
+} LineBuf;
+
+/*
+ * Items part of erlang:port_info/1 result. Note am_registered_name
+ * *need* to be first.
+ */
+
+#define ERTS_PORT_INFO_1_ITEMS \
+ { am_registered_name, /* Needs to be first */ \
+ am_name, \
+ am_links, \
+ am_id, \
+ am_connected, \
+ am_input, \
+ am_output, \
+ am_os_pid }
+
+/*
+ * Port Specific Data.
+ *
+ * Only use PrtSD for very rarely used data.
+ */
+
+#define ERTS_PRTSD_SCHED_ID 0
+
+#define ERTS_PRTSD_SIZE 1
+
+typedef struct {
+ void *data[ERTS_PRTSD_SIZE];
+} ErtsPrtSD;
+
+#ifdef ERTS_SMP
+typedef struct ErtsXPortsList_ ErtsXPortsList;
+#endif
+
+/*
+ * Port locking:
+ *
+ * Locking is done either driver specific or port specific. When
+ * driver specific locking is used, all instances of the driver,
+ * i.e. ports running the driver, share the same lock. When port
+ * specific locking is used each instance have its own lock.
+ *
+ * Most fields in the Port structure are protected by the lock
+ * referred to by the 'lock' field. This lock is shared between
+ * all ports running the same driver when driver specific locking
+ * is used.
+ *
+ * The 'sched' field is protected by the run queue lock that the
+ * port currently is assigned to.
+ *
+ */
+
+struct _erl_drv_port {
+ ErtsPTabElementCommon common; /* *Need* to be first in struct */
+
+ ErtsPortTaskSched sched;
+ ErtsPortTaskHandle timeout_task;
+#ifdef ERTS_SMP
+ erts_mtx_t *lock;
+ ErtsXPortsList *xports;
+ erts_smp_atomic_t run_queue;
+#else
+ erts_atomic32_t refc;
+ int cleanup;
+#endif
+ erts_atomic_t connected; /* A connected process */
+ Eterm caller; /* Current caller. */
+ Eterm data; /* Data associated with port. */
+ ErlHeapFragment* bp; /* Heap fragment holding data (NULL if imm data). */
+ Uint bytes_in; /* Number of bytes read */
+ Uint bytes_out; /* Number of bytes written */
+
+ ErlIOQueue ioq; /* driver accessible i/o queue */
+ DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
+ char *name; /* String used in the open */
+ erts_driver_t* drv_ptr;
+ UWord drv_data;
+ SWord os_pid; /* Child process ID */
+ ErtsProcList *suspended; /* List of suspended processes. */
+ LineBuf *linebuf; /* Buffer to hold data not ready for
+ process to get (line oriented I/O)*/
+ erts_atomic32_t state; /* Status and type flags */
+ int control_flags; /* Flags for port_control() */
+ ErlDrvPDL port_data_lock;
+
+ ErtsPrtSD *psd; /* Port specific data */
+};
+
+#define ERTS_PORT_GET_CONNECTED(PRT) \
+ ((Eterm) erts_atomic_read_nob(&(PRT)->connected))
+#define ERTS_PORT_SET_CONNECTED(PRT, PID) \
+ erts_atomic_set_relb(&(PRT)->connected, (erts_aint_t) (PID))
+#define ERTS_PORT_INIT_CONNECTED(PRT, PID) \
+ erts_atomic_init_nob(&(PRT)->connected, (erts_aint_t) (PID))
+
+
+struct erl_drv_port_data_lock {
+ erts_mtx_t mtx;
+ erts_atomic_t refc;
+ Port *prt;
+};
+
+ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_port_runq(Port *prt)
+{
+#ifdef ERTS_SMP
+ ErtsRunQueue *rq1, *rq2;
+ rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
+ if (!rq1)
+ return NULL;
+ while (1) {
+ erts_smp_runq_lock(rq1);
+ rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
+ if (rq1 == rq2)
+ return rq1;
+ erts_smp_runq_unlock(rq1);
+ rq1 = rq2;
+ if (!rq1)
+ return NULL;
+ }
+#else
+ return ERTS_RUNQ_IX(0);
+#endif
+}
+
+#endif
+
+
+ERTS_GLB_INLINE void *erts_prtsd_get(Port *p, int ix);
+ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void *
+erts_prtsd_get(Port *prt, int ix)
+{
+ return prt->psd ? prt->psd->data[ix] : NULL;
+}
+
+ERTS_GLB_INLINE void *
+erts_prtsd_set(Port *prt, int ix, void *data)
+{
+ if (prt->psd) {
+ void *old = prt->psd->data[ix];
+ prt->psd->data[ix] = data;
+ return old;
+ }
+ else {
+ prt->psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD));
+ prt->psd->data[ix] = data;
+ return NULL;
+ }
+}
+
+#endif
+
+extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */
+extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
+
+
+/* port status flags */
+
+#define ERTS_PORT_SFLG_CONNECTED ((Uint32) (1 << 0))
+/* Port have begun exiting */
+#define ERTS_PORT_SFLG_EXITING ((Uint32) (1 << 1))
+/* Distribution port */
+#define ERTS_PORT_SFLG_DISTRIBUTION ((Uint32) (1 << 2))
+#define ERTS_PORT_SFLG_BINARY_IO ((Uint32) (1 << 3))
+#define ERTS_PORT_SFLG_SOFT_EOF ((Uint32) (1 << 4))
+/* Flow control */
+/* Port is closing (no i/o accepted) */
+#define ERTS_PORT_SFLG_CLOSING ((Uint32) (1 << 5))
+/* Send a closed message when terminating */
+#define ERTS_PORT_SFLG_SEND_CLOSED ((Uint32) (1 << 6))
+/* Line orinted io on port */
+#define ERTS_PORT_SFLG_LINEBUF_IO ((Uint32) (1 << 7))
+/* Immortal port (only certain system ports) */
+#define ERTS_PORT_SFLG_FREE ((Uint32) (1 << 8))
+#define ERTS_PORT_SFLG_INITIALIZING ((Uint32) (1 << 9))
+/* Port uses port specific locking (opposed to driver specific locking) */
+#define ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK ((Uint32) (1 << 10))
+#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 11))
+/* Last port to terminate halts the emulator */
+#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 12))
+#ifdef DEBUG
+/* Only debug: make sure all flags aren't cleared unintentionally */
+#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
+#endif
+
+/* Combinations of port status flags */
+#define ERTS_PORT_SFLGS_DEAD \
+ (ERTS_PORT_SFLG_FREE | ERTS_PORT_SFLG_INITIALIZING)
+#define ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
+ (ERTS_PORT_SFLGS_DEAD | ERTS_PORT_SFLG_INVALID)
+#define ERTS_PORT_SFLGS_INVALID_LOOKUP \
+ (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
+ | ERTS_PORT_SFLG_EXITING \
+ | ERTS_PORT_SFLG_CLOSING)
+#define ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP \
+ (ERTS_PORT_SFLGS_INVALID_LOOKUP \
+ | ERTS_PORT_SFLG_DISTRIBUTION)
+
+
+/*
+ * Costs in reductions for some port operations.
+ */
+#define ERTS_PORT_REDS_EXECUTE 10
+#define ERTS_PORT_REDS_FREE 100
+#define ERTS_PORT_REDS_TIMEOUT 400
+#define ERTS_PORT_REDS_INPUT 400
+#define ERTS_PORT_REDS_OUTPUT 400
+#define ERTS_PORT_REDS_EVENT 400
+#define ERTS_PORT_REDS_CMD_OUTPUTV 400
+#define ERTS_PORT_REDS_CMD_OUTPUT 400
+#define ERTS_PORT_REDS_EXIT 300
+#define ERTS_PORT_REDS_CONNECT 40
+#define ERTS_PORT_REDS_UNLINK 40
+#define ERTS_PORT_REDS_LINK 40
+#define ERTS_PORT_REDS_BADSIG 40
+#define ERTS_PORT_REDS_CONTROL 400
+#define ERTS_PORT_REDS_CALL 400
+#define ERTS_PORT_REDS_INFO 100
+#define ERTS_PORT_REDS_SET_DATA 40
+#define ERTS_PORT_REDS_GET_DATA 40
+#define ERTS_PORT_REDS_TERMINATE 200
+
+void print_port_info(Port *, int, void *);
+void erts_port_free(Port *);
+#ifndef ERTS_SMP
+void erts_port_cleanup(Port *);
+#endif
+void erts_fire_port_monitor(Port *prt, Eterm ref);
+#ifdef ERTS_SMP
+int erts_port_handle_xports(Port *);
+#endif
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_lc_is_port_locked(Port *);
+#endif
+
+ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt);
+ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt);
+ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc);
+
+ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt);
+ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt);
+ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt)
+{
+#ifdef ERTS_SMP
+ erts_ptab_inc_refc(&prt->common);
+#else
+ erts_atomic32_inc_nob(&prt->refc);
+#endif
+}
+
+ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt)
+{
+#ifdef ERTS_SMP
+ int referred = erts_ptab_dec_test_refc(&prt->common);
+ if (!referred)
+ erts_port_free(prt);
+#else
+ int refc = erts_atomic32_dec_read_nob(&prt->refc);
+ if (refc == 0)
+ erts_port_free(prt);
+#endif
+}
+
+ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc)
+{
+#ifdef ERTS_SMP
+ int referred = erts_ptab_add_test_refc(&prt->common, add_refc);
+ if (!referred)
+ erts_port_free(prt);
+#else
+ int refc = erts_atomic32_add_read_nob(&prt->refc, add_refc);
+ if (refc == 0)
+ erts_port_free(prt);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_port_trylock(Port *prt)
+{
+#ifdef ERTS_SMP
+ /* *Need* to be a managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ return erts_mtx_trylock(prt->lock);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_port_lock(Port *prt)
+{
+#ifdef ERTS_SMP
+ /* *Need* to be a managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ erts_mtx_lock(prt->lock);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_port_unlock(Port *prt)
+{
+#ifdef ERTS_SMP
+ /* *Need* to be a managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ erts_mtx_unlock(prt->lock);
+#endif
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+#define ERTS_INVALID_PORT_OPT(PP, ID, FLGS) \
+ (!(PP) \
+ || (erts_atomic32_read_nob(&(PP)->state) & (FLGS)) \
+ || (PP)->common.id != (ID))
+
+/* port lookup */
+
+#define INVALID_PORT(PP, ID) \
+ ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_LOOKUP)
+
+/* Invalidate trace port if anything suspicious, for instance
+ * that the port is a distribution port or it is busy.
+ */
+#define INVALID_TRACER_PORT(PP, ID) \
+ ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)
+
+#define ERTS_PORT_SCHED_ID(P, ID) \
+ ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID)))
+
+extern const Port erts_invalid_port;
+#define ERTS_PORT_LOCK_BUSY ((Port *) &erts_invalid_port)
+
+int erts_is_port_ioq_empty(Port *);
+void erts_terminate_port(Port *);
+
+#ifdef ERTS_SMP
+Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
+#endif
+
+ERTS_GLB_INLINE Port *erts_pix2port(int);
+ERTS_GLB_INLINE Port *erts_port_lookup_raw(Eterm);
+ERTS_GLB_INLINE Port *erts_port_lookup(Eterm, Uint32);
+ERTS_GLB_INLINE Port*erts_id2port(Eterm id);
+ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
+ERTS_GLB_INLINE void erts_port_release(Port *);
+#ifdef ERTS_SMP
+ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs);
+ERTS_GLB_INLINE void erts_thr_port_release(Port *prt);
+#endif
+ERTS_GLB_INLINE Port *erts_thr_drvport2port_raw(ErlDrvPort);
+ERTS_GLB_INLINE Port *erts_drvport2port_raw(ErlDrvPort drvport);
+ERTS_GLB_INLINE Port *erts_drvport2port(ErlDrvPort, erts_aint32_t *);
+ERTS_GLB_INLINE Port *erts_drvportid2port(Eterm);
+ERTS_GLB_INLINE Eterm erts_drvport2id(ErlDrvPort);
+ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm);
+ERTS_GLB_INLINE int erts_is_port_alive(Eterm);
+ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm);
+ERTS_GLB_INLINE int erts_port_driver_callback_epilogue(Port *, erts_aint32_t *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Port *erts_pix2port(int ix)
+{
+ Port *prt;
+ ASSERT(0 <= ix && ix < erts_ptab_max(&erts_port));
+ prt = (Port *) erts_ptab_pix2intptr_nob(&erts_port, ix);
+ return prt == ERTS_PORT_LOCK_BUSY ? NULL : prt;
+}
+
+ERTS_GLB_INLINE Port *
+erts_port_lookup_raw(Eterm id)
+{
+ Port *prt;
+
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+ return prt && prt->common.id == id ? prt : NULL;
+}
+
+ERTS_GLB_INLINE Port *
+erts_port_lookup(Eterm id, Uint32 invalid_sflgs)
+{
+ Port *prt = erts_port_lookup_raw(id);
+ return (!prt
+ ? NULL
+ : ((invalid_sflgs & erts_atomic32_read_nob(&prt->state))
+ ? NULL
+ : prt));
+}
+
+
+ERTS_GLB_INLINE Port*
+erts_id2port(Eterm id)
+{
+ erts_aint32_t state;
+ Port *prt;
+
+ /* Only allowed to be called from managed threads */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id)
+ return NULL;
+
+ erts_smp_port_lock(prt);
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) {
+ erts_smp_port_unlock(prt);
+ return NULL;
+ }
+
+ return prt;
+}
+
+
+ERTS_GLB_INLINE Port*
+erts_id2port_sflgs(Eterm id,
+ Process *c_p, ErtsProcLocks c_p_locks,
+ Uint32 invalid_sflgs)
+{
+#ifdef ERTS_SMP
+ int no_proc_locks = !c_p || !c_p_locks;
+#endif
+ erts_aint32_t state;
+ Port *prt;
+
+ /* Only allowed to be called from managed threads */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id)
+ return NULL;
+
+#ifdef ERTS_SMP
+ if (no_proc_locks)
+ erts_smp_port_lock(prt);
+ else if (erts_smp_port_trylock(prt) == EBUSY) {
+ /* Unlock process locks, and acquire locks in lock order... */
+ erts_smp_proc_unlock(c_p, c_p_locks);
+ erts_smp_port_lock(prt);
+ erts_smp_proc_lock(c_p, c_p_locks);
+ }
+#endif
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & invalid_sflgs) {
+#ifdef ERTS_SMP
+ erts_smp_port_unlock(prt);
+#endif
+ return NULL;
+ }
+
+ return prt;
+}
+
+ERTS_GLB_INLINE void
+erts_port_release(Port *prt)
+{
+ /* Only allowed to be called from managed threads */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+#ifdef ERTS_SMP
+ erts_smp_port_unlock(prt);
+#else
+ if (prt->cleanup) {
+ prt->cleanup = 0;
+ erts_port_cleanup(prt);
+ }
+#endif
+}
+
+#ifdef ERTS_SMP
+
+/*
+ * erts_thr_id2port_sflgs() and erts_thr_port_release() can
+ * be used by unmanaged threads in the SMP case.
+ */
+ERTS_GLB_INLINE Port *
+erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs)
+{
+ Port *prt;
+ ErtsThrPrgrDelayHandle dhndl;
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id) {
+ erts_thr_progress_unmanaged_continue(dhndl);
+ prt = NULL;
+ }
+ else {
+ erts_aint32_t state;
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_port_inc_refc(prt);
+ erts_thr_progress_unmanaged_continue(dhndl);
+ }
+
+ erts_mtx_lock(prt->lock);
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & invalid_sflgs) {
+ erts_mtx_unlock(prt->lock);
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_port_dec_refc(prt);
+ prt = NULL;
+ }
+ }
+
+ return prt;
+}
+
+ERTS_GLB_INLINE void
+erts_thr_port_release(Port *prt)
+{
+ erts_mtx_unlock(prt->lock);
+#ifdef ERTS_SMP
+ if (!erts_thr_progress_is_managed_thread())
+ erts_port_dec_refc(prt);
+#endif
+}
+
+#endif
+
+ERTS_GLB_INLINE Port*
+erts_thr_drvport2port_raw(ErlDrvPort drvport)
+{
+#if ERTS_ENABLE_LOCK_CHECK
+ int emu_thread = erts_lc_is_emu_thr();
+#endif
+ if (drvport == ERTS_INVALID_ERL_DRV_PORT)
+ return NULL;
+ else {
+ Port *prt = (Port *) drvport;
+#if ERTS_ENABLE_LOCK_CHECK
+ if (!ERTS_IS_CRASH_DUMPING) {
+ if (emu_thread) {
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(!prt->port_data_lock
+ || erts_lc_mtx_is_locked(&prt->port_data_lock->mtx));
+ }
+ else {
+ ERTS_LC_ASSERT(prt->port_data_lock);
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&prt->port_data_lock->mtx));
+ }
+ }
+#endif
+ return prt;
+ }
+}
+
+ERTS_GLB_INLINE Port*
+erts_drvport2port_raw(ErlDrvPort drvport)
+{
+ ERTS_LC_ASSERT(erts_lc_is_emu_thr());
+ if (drvport == ERTS_INVALID_ERL_DRV_PORT)
+ return NULL;
+ else {
+ Port *prt = (Port *) drvport;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+ return prt;
+ }
+}
+
+ERTS_GLB_INLINE Port*
+erts_drvport2port(ErlDrvPort drvport, erts_aint32_t *statep)
+{
+ Port *prt = erts_drvport2port_raw(drvport);
+ erts_aint32_t state;
+ if (!prt)
+ return NULL;
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return NULL;
+ if (statep)
+ *statep = state;
+ return prt;
+}
+
+ERTS_GLB_INLINE Port*
+erts_drvportid2port(Eterm id)
+{
+ Port *prt;
+ erts_aint32_t state;
+ if (is_not_internal_port(id))
+ return NULL;
+ prt = (Port *) erts_ptab_pix2intptr_nob(&erts_port,
+ internal_port_index(id));
+ if (!prt)
+ return NULL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+ if (prt->common.id != id)
+ return NULL;
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return NULL;
+ return prt;
+}
+
+ERTS_GLB_INLINE Eterm
+erts_drvport2id(ErlDrvPort drvport)
+{
+ Port *prt = erts_drvport2port_raw(drvport);
+ if (!prt)
+ return am_undefined;
+ else
+ return prt->common.id;
+}
+
+ERTS_GLB_INLINE Uint32
+erts_portid2status(Eterm id)
+{
+ Port *prt = erts_port_lookup_raw(id);
+ if (prt)
+ return (Uint32) erts_atomic32_read_acqb(&prt->state);
+ else
+ return ERTS_PORT_SFLG_INVALID;
+}
+
+ERTS_GLB_INLINE int
+erts_is_port_alive(Eterm id)
+{
+ return !(erts_portid2status(id) & (ERTS_PORT_SFLG_INVALID
+ | ERTS_PORT_SFLGS_DEAD));
+}
+
+ERTS_GLB_INLINE int
+erts_is_valid_tracer_port(Eterm id)
+{
+ return !(erts_portid2status(id) & ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+}
+
+ERTS_GLB_INLINE int
+erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep)
+{
+ int reds = 0;
+ erts_aint32_t state;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(prt)) {
+ reds += ERTS_PORT_REDS_TERMINATE;
+ erts_terminate_port(prt);
+ state = erts_atomic32_read_nob(&prt->state);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ }
+
+#ifdef ERTS_SMP
+ if (prt->xports) {
+ reds += erts_port_handle_xports(prt);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ASSERT(!prt->xports);
+ }
+#endif
+
+ if (statep)
+ *statep = state;
+
+ return reds;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+void erts_port_resume_procs(Port *);
+
+struct binary;
+
+#define ERTS_P2P_SIG_TYPE_BAD 0
+#define ERTS_P2P_SIG_TYPE_OUTPUT 1
+#define ERTS_P2P_SIG_TYPE_OUTPUTV 2
+#define ERTS_P2P_SIG_TYPE_CONNECT 3
+#define ERTS_P2P_SIG_TYPE_EXIT 4
+#define ERTS_P2P_SIG_TYPE_CONTROL 5
+#define ERTS_P2P_SIG_TYPE_CALL 6
+#define ERTS_P2P_SIG_TYPE_INFO 7
+#define ERTS_P2P_SIG_TYPE_LINK 8
+#define ERTS_P2P_SIG_TYPE_UNLINK 9
+#define ERTS_P2P_SIG_TYPE_SET_DATA 10
+#define ERTS_P2P_SIG_TYPE_GET_DATA 11
+
+#define ERTS_P2P_SIG_TYPE_BITS 4
+#define ERTS_P2P_SIG_TYPE_MASK \
+ ((1 << ERTS_P2P_SIG_TYPE_BITS) - 1)
+
+#define ERTS_P2P_SIG_DATA_FLG(N) \
+ (1 << (ERTS_P2P_SIG_TYPE_BITS + (N)))
+#define ERTS_P2P_SIG_DATA_FLG_BANG_OP ERTS_P2P_SIG_DATA_FLG(0)
+#define ERTS_P2P_SIG_DATA_FLG_REPLY ERTS_P2P_SIG_DATA_FLG(1)
+#define ERTS_P2P_SIG_DATA_FLG_NOSUSPEND ERTS_P2P_SIG_DATA_FLG(2)
+#define ERTS_P2P_SIG_DATA_FLG_FORCE ERTS_P2P_SIG_DATA_FLG(3)
+#define ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG(4)
+#define ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG(5)
+#define ERTS_P2P_SIG_DATA_FLG_SCHED ERTS_P2P_SIG_DATA_FLG(6)
+
+struct ErtsProc2PortSigData_ {
+ int flags;
+ Eterm caller;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ union {
+ struct {
+ Eterm from;
+ ErlIOVec *evp;
+ ErlDrvBinary *cbinp;
+ } outputv;
+ struct {
+ Eterm from;
+ char *bufp;
+ ErlDrvSizeT size;
+ } output;
+ struct {
+ Eterm from;
+ Eterm connected;
+ } connect;
+ struct {
+ Eterm from;
+ Eterm reason;
+ ErlHeapFragment *bp;
+ } exit;
+ struct {
+ struct binary *binp;
+ unsigned int command;
+ char *bufp;
+ ErlDrvSizeT size;
+ } control;
+ struct {
+ unsigned int command;
+ char *bufp;
+ ErlDrvSizeT size;
+ } call;
+ struct {
+ Eterm item;
+ } info;
+ struct {
+ Eterm port;
+ Eterm to;
+ } link;
+ struct {
+ Eterm from;
+ } unlink;
+ struct {
+ ErlHeapFragment *bp;
+ Eterm data;
+ } set_data;
+ } u;
+} ;
+
+ERTS_GLB_INLINE int
+erts_proc2port_sig_is_command_op(ErtsProc2PortSigData *sigdp);
+ERTS_GLB_INLINE ErlDrvSizeT
+erts_proc2port_sig_command_data_size(ErtsProc2PortSigData *sigdp);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_proc2port_sig_is_command_op(ErtsProc2PortSigData *sigdp)
+{
+ switch (sigdp->flags & ERTS_P2P_SIG_TYPE_MASK) {
+ case ERTS_P2P_SIG_TYPE_OUTPUT: return !0;
+ case ERTS_P2P_SIG_TYPE_OUTPUTV: return !0;
+ default: return 0;
+ }
+}
+
+ERTS_GLB_INLINE ErlDrvSizeT
+erts_proc2port_sig_command_data_size(ErtsProc2PortSigData *sigdp)
+{
+ switch (sigdp->flags & ERTS_P2P_SIG_TYPE_MASK) {
+ case ERTS_P2P_SIG_TYPE_OUTPUT: return sigdp->u.output.size;
+ case ERTS_P2P_SIG_TYPE_OUTPUTV: return sigdp->u.outputv.evp->size;
+ default: return (ErlDrvSizeT) 0;
+ }
+}
+
+#endif
+
+#define ERTS_PROC2PORT_SIG_EXEC 0
+#define ERTS_PROC2PORT_SIG_ABORT 1
+#define ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND 2
+#define ERTS_PROC2PORT_SIG_ABORT_CLOSED 3
+
+typedef int (*ErtsProc2PortSigCallback)(Port *,
+ erts_aint32_t,
+ int,
+ ErtsProc2PortSigData *);
+
+typedef enum {
+ ERTS_PORT_OP_BADARG,
+ ERTS_PORT_OP_CALLER_EXIT,
+ ERTS_PORT_OP_BUSY,
+ ERTS_PORT_OP_BUSY_SCHEDULED,
+ ERTS_PORT_OP_SCHEDULED,
+ ERTS_PORT_OP_DROPPED,
+ ERTS_PORT_OP_DONE
+} ErtsPortOpResult;
+
+ErtsPortOpResult
+erts_schedule_proc2port_signal(Process *,
+ Port *,
+ Eterm,
+ Eterm *,
+ ErtsProc2PortSigData *,
+ int,
+ ErtsProc2PortSigCallback);
+
+int erts_deliver_port_exit(Port *, Eterm, Eterm, int);
+
+/*
+ * Port signal flags
+ */
+#define ERTS_PORT_SIG_FLG_BANG_OP ERTS_P2P_SIG_DATA_FLG_BANG_OP
+#define ERTS_PORT_SIG_FLG_NOSUSPEND ERTS_P2P_SIG_DATA_FLG_NOSUSPEND
+#define ERTS_PORT_SIG_FLG_FORCE ERTS_P2P_SIG_DATA_FLG_FORCE
+#define ERTS_PORT_SIG_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK
+#define ERTS_PORT_SIG_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT
+#define ERTS_PORT_SIG_FLG_FORCE_SCHED ERTS_P2P_SIG_DATA_FLG_SCHED
+/* ERTS_PORT_SIG_FLG_FORCE_IMM_CALL only when crash dumping... */
+#define ERTS_PORT_SIG_FLG_FORCE_IMM_CALL ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT
+
+/*
+ * Port ! {Owner, {command, Data}}
+ * Port ! {Owner, {connect, NewOwner}}
+ * Port ! {Owner, close}
+ */
+ErtsPortOpResult erts_port_command(Process *, int, Port *, Eterm, Eterm *);
+
+/*
+ * Signals from processes to ports.
+ */
+ErtsPortOpResult erts_port_output(Process *, int, Port *, Eterm, Eterm, Eterm *);
+ErtsPortOpResult erts_port_exit(Process *, int, Port *, Eterm, Eterm, Eterm *);
+ErtsPortOpResult erts_port_connect(Process *, int, Port *, Eterm, Eterm, Eterm *);
+ErtsPortOpResult erts_port_link(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_unlink(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_control(Process *, Port *, unsigned int, Eterm, Eterm *);
+ErtsPortOpResult erts_port_call(Process *, Port *, unsigned int, Eterm, Eterm *);
+ErtsPortOpResult erts_port_info(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_set_data(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_get_data(Process *, Port *, Eterm *);
+
+#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index b6bc59a1c3..b661c26036 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -33,36 +33,29 @@
#include "erl_port_task.h"
#include "dist.h"
#include "dtrace-wrapper.h"
+#include <stdarg.h>
#if defined(DEBUG) && 0
-#define HARD_DEBUG
+#define ERTS_HARD_DEBUG_TASK_QUEUES
+#else
+#undef ERTS_HARD_DEBUG_TASK_QUEUES
#endif
-/*
- * Costs in reductions for some port operations.
- */
-#define ERTS_PORT_REDS_EXECUTE 0
-#define ERTS_PORT_REDS_FREE 50
-#define ERTS_PORT_REDS_TIMEOUT 200
-#define ERTS_PORT_REDS_INPUT 200
-#define ERTS_PORT_REDS_OUTPUT 200
-#define ERTS_PORT_REDS_EVENT 200
-#define ERTS_PORT_REDS_TERMINATE 100
-
-
-#define ERTS_PORT_TASK_INVALID_PORT(P, ID) \
- ((erts_port_status_get((P)) & ERTS_PORT_SFLGS_DEAD) || (P)->id != (ID))
-
-#define ERTS_PORT_IS_IN_RUNQ(RQ, P) \
- ((P)->sched.next || (P)->sched.prev || (RQ)->ports.start == (P))
+#ifdef ERTS_HARD_DEBUG_TASK_QUEUES
+static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue);
+#define ERTS_PT_DBG_CHK_TASK_QS(PP, EQ, PBQ) \
+ chk_task_queues((PP), (EQ), (PBQ))
+#else
+#define ERTS_PT_DBG_CHK_TASK_QS(PP, EQ, PBQ)
+#endif
#ifdef USE_VM_PROBES
#define DTRACE_DRIVER(PROBE_NAME, PP) \
- if (DTRACE_ENABLED(driver_ready_input)) { \
+ if (DTRACE_ENABLED(PROBE_NAME)) { \
DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); \
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); \
\
- dtrace_pid_str(PP->connected, process_str); \
+ dtrace_pid_str(ERTS_PORT_GET_CONNECTED(PP), process_str); \
dtrace_port_str(PP, port_str); \
DTRACE3(PROBE_NAME, process_str, port_str, PP->name); \
}
@@ -72,83 +65,766 @@
erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
-struct ErtsPortTaskQueue_ {
- ErtsPortTask *first;
- ErtsPortTask *last;
- Port *port;
-};
+#define ERTS_PT_STATE_SCHEDULED 0
+#define ERTS_PT_STATE_ABORTED 1
+#define ERTS_PT_STATE_EXECUTING 2
+
+typedef union {
+ struct { /* I/O tasks */
+ ErlDrvEvent event;
+ ErlDrvEventData event_data;
+ } io;
+ struct {
+ ErtsProc2PortSigCallback callback;
+ ErtsProc2PortSigData data;
+ } psig;
+} ErtsPortTaskTypeData;
struct ErtsPortTask_ {
- ErtsPortTask *prev;
- ErtsPortTask *next;
- ErtsPortTaskQueue *queue;
- ErtsPortTaskHandle *handle;
+ erts_smp_atomic32_t state;
ErtsPortTaskType type;
- ErlDrvEvent event;
- ErlDrvEventData event_data;
+ union {
+ struct {
+ ErtsPortTask *next;
+ ErtsPortTaskHandle *handle;
+ int flags;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ ErtsPortTaskTypeData td;
+ } alive;
+ ErtsThrPrgrLaterOp release;
+ } u;
};
-#ifdef HARD_DEBUG
-#define ERTS_PT_CHK_PORTQ(RQ) check_port_queue((RQ), NULL, 0)
-#define ERTS_PT_CHK_PRES_PORTQ(RQ, PP) check_port_queue((RQ), (PP), -1)
-#define ERTS_PT_CHK_IN_PORTQ(RQ, PP) check_port_queue((RQ), (PP), 1)
-#define ERTS_PT_CHK_NOT_IN_PORTQ(RQ, PP) check_port_queue((RQ), (PP), 0)
-#define ERTS_PT_CHK_TASKQ(Q) check_task_queue((Q), NULL, 0)
-#define ERTS_PT_CHK_IN_TASKQ(Q, T) check_task_queue((Q), (T), 1)
-#define ERTS_PT_CHK_NOT_IN_TASKQ(Q, T) check_task_queue((Q), (T), 0)
-static void
-check_port_queue(Port *chk_pp, int inq);
-static void
-check_task_queue(ErtsPortTaskQueue *ptqp,
- ErtsPortTask *chk_ptp,
- int inq);
-#else
-#define ERTS_PT_CHK_PORTQ(RQ)
-#define ERTS_PT_CHK_PRES_PORTQ(RQ, PP)
-#define ERTS_PT_CHK_IN_PORTQ(RQ, PP)
-#define ERTS_PT_CHK_NOT_IN_PORTQ(RQ, PP)
-#define ERTS_PT_CHK_TASKQ(Q)
-#define ERTS_PT_CHK_IN_TASKQ(Q, T)
-#define ERTS_PT_CHK_NOT_IN_TASKQ(Q, T)
+struct ErtsPortTaskHandleList_ {
+ ErtsPortTaskHandle handle;
+ union {
+ ErtsPortTaskHandleList *next;
+#ifdef ERTS_SMP
+ ErtsThrPrgrLaterOp release;
#endif
+ } u;
+};
+
+typedef struct ErtsPortTaskBusyCaller_ ErtsPortTaskBusyCaller;
+struct ErtsPortTaskBusyCaller_ {
+ ErtsPortTaskBusyCaller *next;
+ Eterm caller;
+ SWord count;
+ ErtsPortTask *last;
+};
+
+#define ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS 17
+struct ErtsPortTaskBusyCallerTable_ {
+ ErtsPortTaskBusyCaller *bucket[ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS];
+ ErtsPortTaskBusyCaller pre_alloc_busy_caller;
+};
+
-static void handle_remaining_tasks(ErtsRunQueue *runq, Port *pp);
+static void begin_port_cleanup(Port *pp, ErtsPortTask **execq);
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_task,
ErtsPortTask,
- 200,
+ 1000,
ERTS_ALC_T_PORT_TASK)
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_taskq,
- ErtsPortTaskQueue,
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table,
+ ErtsPortTaskBusyCallerTable,
50,
- ERTS_ALC_T_PORT_TASKQ)
+ ERTS_ALC_T_BUSY_CALLER_TAB)
+
+#ifdef ERTS_SMP
+static void
+call_port_task_free(void *vptp)
+{
+ port_task_free((ErtsPortTask *) vptp);
+}
+#endif
+
+static ERTS_INLINE void
+schedule_port_task_free(ErtsPortTask *ptp)
+{
+#ifdef ERTS_SMP
+ erts_schedule_thr_prgr_later_op(call_port_task_free,
+ (void *) ptp,
+ &ptp->u.release);
+#else
+ port_task_free(ptp);
+#endif
+}
+
+static ERTS_INLINE ErtsPortTask *
+p2p_sig_data_to_task(ErtsProc2PortSigData *sigdp)
+{
+ ErtsPortTask *ptp;
+ char *ptr = (char *) sigdp;
+ ptr -= offsetof(ErtsPortTask, u.alive.td.psig.data);
+ ptp = (ErtsPortTask *) ptr;
+ ASSERT(ptp->type == ERTS_PORT_TASK_PROC_SIG);
+ return ptp;
+}
+
+ErtsProc2PortSigData *
+erts_port_task_alloc_p2p_sig_data(void)
+{
+ ErtsPortTask *ptp = port_task_alloc();
+
+ ptp->type = ERTS_PORT_TASK_PROC_SIG;
+ ptp->u.alive.flags = ERTS_PT_FLG_SIG_DEP;
+ erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
+
+ ASSERT(ptp == p2p_sig_data_to_task(&ptp->u.alive.td.psig.data));
+
+ return &ptp->u.alive.td.psig.data;
+}
+
+static ERTS_INLINE Eterm
+task_caller(ErtsPortTask *ptp)
+{
+ Eterm caller;
+
+ ASSERT(ptp->type == ERTS_PORT_TASK_PROC_SIG);
+
+ caller = ptp->u.alive.td.psig.data.caller;
+
+ ASSERT(is_internal_pid(caller) || is_internal_port(caller));
+
+ return caller;
+}
+
+/*
+ * Busy queue management
+ */
+
+static ERTS_INLINE int
+caller2bix(Eterm caller)
+{
+ ASSERT(is_internal_pid(caller) || is_internal_port(caller));
+ return (int) (_GET_PID_DATA(caller) % ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS);
+}
+
+
+static void
+popped_from_busy_queue(Port *pp, ErtsPortTask *ptp, int last)
+{
+ ErtsPortTaskBusyCaller **prev_bcpp = NULL, *bcp;
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ Eterm caller = task_caller(ptp);
+ int bix = caller2bix(caller);
+
+ ASSERT(is_internal_pid(caller));
+
+ ASSERT(tabp);
+ bcp = tabp->bucket[bix];
+ prev_bcpp = &tabp->bucket[bix];
+ ASSERT(bcp);
+ while (bcp->caller != caller) {
+ prev_bcpp = &bcp->next;
+ bcp = bcp->next;
+ ASSERT(bcp);
+ }
+ ASSERT(bcp->count > 0);
+ if (--bcp->count != 0) {
+ ASSERT(!last);
+ }
+ else {
+ *prev_bcpp = bcp->next;
+ if (bcp == &tabp->pre_alloc_busy_caller)
+ bcp->caller = am_undefined;
+ else
+ erts_free(ERTS_ALC_T_BUSY_CALLER, bcp);
+ if (last) {
+#ifdef DEBUG
+ erts_aint32_t flags =
+#endif
+ erts_smp_atomic32_read_band_nob(
+ &pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+#ifdef DEBUG
+ for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) {
+ ASSERT(!tabp->bucket[bix]);
+ }
+#endif
+ busy_caller_table_free(tabp);
+ pp->sched.taskq.local.busy.first = NULL;
+ pp->sched.taskq.local.busy.last = NULL;
+ pp->sched.taskq.local.busy.table = NULL;
+ }
+ }
+}
+
+static void
+busy_wait_move_to_busy_queue(Port *pp, ErtsPortTask *ptp)
+{
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ Eterm caller = task_caller(ptp);
+ ErtsPortTaskBusyCaller *bcp;
+ int bix;
+
+ ASSERT(is_internal_pid(caller));
+ /*
+ * Port is busy and this task type needs to wait until not busy.
+ */
+
+ ASSERT(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY);
+
+ ptp->u.alive.next = NULL;
+ if (pp->sched.taskq.local.busy.last) {
+ ASSERT(pp->sched.taskq.local.busy.first);
+ pp->sched.taskq.local.busy.last->u.alive.next = ptp;
+ }
+ else {
+ int i;
+ erts_aint32_t flags;
+
+ pp->sched.taskq.local.busy.first = ptp;
+ flags = erts_smp_atomic32_read_bor_nob(&pp->sched.flags,
+ ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(!(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
+
+ ASSERT(!tabp);
+
+ tabp = busy_caller_table_alloc();
+ pp->sched.taskq.local.busy.table = tabp;
+ for (i = 0; i < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; i++)
+ tabp->bucket[i] = NULL;
+ tabp->pre_alloc_busy_caller.caller = am_undefined;
+ }
+ pp->sched.taskq.local.busy.last = ptp;
+
+ bix = caller2bix(caller);
+ ASSERT(tabp);
+ bcp = tabp->bucket[bix];
+
+ while (bcp && bcp->caller != caller)
+ bcp = bcp->next;
+
+ if (bcp)
+ bcp->count++;
+ else {
+ if (tabp->pre_alloc_busy_caller.caller == am_undefined)
+ bcp = &tabp->pre_alloc_busy_caller;
+ else
+ bcp = erts_alloc(ERTS_ALC_T_BUSY_CALLER,
+ sizeof(ErtsPortTaskBusyCaller));
+ bcp->caller = caller;
+ bcp->count = 1;
+ bcp->next = tabp->bucket[bix];
+ tabp->bucket[bix] = bcp;
+ }
+
+ bcp->last = ptp;
+}
+
+static ERTS_INLINE int
+check_sig_dep_move_to_busy_queue(Port *pp, ErtsPortTask *ptp)
+{
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ ErtsPortTask *last_ptp;
+ ErtsPortTaskBusyCaller *bcp;
+ int bix;
+ Eterm caller;
+
+ ASSERT(ptp->u.alive.flags & ERTS_PT_FLG_SIG_DEP);
+ ASSERT(pp->sched.taskq.local.busy.last);
+ ASSERT(tabp);
+
+
+ /*
+ * We are either not busy, or the task does not imply wait on busy port.
+ * However, due to the signaling order requirements the task might depend
+ * on other tasks in the busy queue.
+ */
+
+ caller = task_caller(ptp);
+ bix = caller2bix(caller);
+ bcp = tabp->bucket[bix];
+ while (bcp && bcp->caller != caller)
+ bcp = bcp->next;
+
+ if (!bcp)
+ return 0;
+
+ /*
+ * There are other tasks that we depend on in the busy queue;
+ * move into busy queue.
+ */
+
+ bcp->count++;
+ last_ptp = bcp->last;
+ ptp->u.alive.next = last_ptp->u.alive.next;
+ if (!ptp->u.alive.next) {
+ ASSERT(pp->sched.taskq.local.busy.last == last_ptp);
+ pp->sched.taskq.local.busy.last = ptp;
+ }
+ last_ptp->u.alive.next = ptp;
+ bcp->last = ptp;
+
+ return 1;
+}
+
+static void
+no_sig_dep_move_from_busyq(Port *pp)
+{
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ ErtsPortTask *first_ptp, *last_ptp, *ptp;
+ ErtsPortTaskBusyCaller **prev_bcpp = NULL, *bcp = NULL;
+
+ /*
+ * Move tasks at the head of the busy queue that no longer
+ * have any dependencies to busy wait tasks into the ordinary
+ * queue.
+ */
+
+ first_ptp = ptp = pp->sched.taskq.local.busy.first;
+
+ ASSERT(ptp && !(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY));
+ ASSERT(tabp);
+
+ do {
+ Eterm caller = task_caller(ptp);
+
+ if (!bcp || bcp->caller != caller) {
+ int bix = caller2bix(caller);
+
+ prev_bcpp = &tabp->bucket[bix];
+ bcp = tabp->bucket[bix];
+ ASSERT(bcp);
+ while (bcp->caller != caller) {
+ ASSERT(bcp);
+ prev_bcpp = &bcp->next;
+ bcp = bcp->next;
+ }
+ }
+
+ ASSERT(bcp->caller == caller);
+ ASSERT(bcp->count > 0);
+
+ if (--bcp->count == 0) {
+ *prev_bcpp = bcp->next;
+ if (bcp == &tabp->pre_alloc_busy_caller)
+ bcp->caller = am_undefined;
+ else
+ erts_free(ERTS_ALC_T_BUSY_CALLER, bcp);
+ }
+
+ last_ptp = ptp;
+ ptp = ptp->u.alive.next;
+ } while (ptp && !(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY));
+
+ pp->sched.taskq.local.busy.first = last_ptp->u.alive.next;
+ if (!pp->sched.taskq.local.busy.first) {
+#ifdef DEBUG
+ int bix;
+ erts_aint32_t flags =
+#endif
+ erts_smp_atomic32_read_band_nob(
+ &pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+#ifdef DEBUG
+ for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) {
+ ASSERT(!tabp->bucket[bix]);
+ }
+#endif
+ busy_caller_table_free(tabp);
+ pp->sched.taskq.local.busy.last = NULL;
+ pp->sched.taskq.local.busy.table = NULL;
+ }
+ last_ptp->u.alive.next = pp->sched.taskq.local.first;
+ pp->sched.taskq.local.first = first_ptp;
+}
+
+#ifdef ERTS_HARD_DEBUG_TASK_QUEUES
+
+static void
+chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue)
+{
+ Sint tot_count, tot_table_count;
+ int bix;
+ ErtsPortTask *ptp, *last;
+ ErtsPortTask *first = processing_busy_queue ? execq : pp->sched.taskq.local.busy.first;
+ ErtsPortTask *nb_task_queue = processing_busy_queue ? pp->sched.taskq.local.first : execq;
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ ErtsPortTaskBusyCaller *bcp;
+
+ if (!first) {
+ ASSERT(!tabp);
+ ASSERT(!pp->sched.taskq.local.busy.last);
+ ASSERT(!(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
+ return;
+ }
+
+ ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(tabp);
+
+ tot_count = 0;
+ ptp = first;
+ while (ptp) {
+ Sint count = 0;
+ Eterm caller = task_caller(ptp);
+ int bix = caller2bix(caller);
+ for (bcp = tabp->bucket[bix]; bcp; bcp = bcp->next)
+ if (bcp->caller == caller)
+ break;
+ ASSERT(bcp && bcp->caller == caller);
+
+ ASSERT(bcp->last);
+ while (1) {
+ ErtsPortTask *ptp2;
+
+ ASSERT(caller == task_caller(ptp));
+ count++;
+ tot_count++;
+ last = ptp;
+
+ for (ptp2 = nb_task_queue; ptp2; ptp2 = ptp2->u.alive.next) {
+ ASSERT(ptp != ptp2);
+ }
+
+ if (ptp == bcp->last)
+ break;
+ ptp = ptp->u.alive.next;
+ }
+
+ ASSERT(count == bcp->count);
+ ptp = ptp->u.alive.next;
+ }
+
+ tot_table_count = 0;
+ for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) {
+ for (bcp = tabp->bucket[bix]; bcp; bcp = bcp->next)
+ tot_table_count += bcp->count;
+ }
+
+ ASSERT(tot_count == tot_table_count);
+
+ ASSERT(last == pp->sched.taskq.local.busy.last);
+}
+
+#endif /* ERTS_HARD_DEBUG_TASK_QUEUES */
/*
* Task handle manipulation.
*/
+static ERTS_INLINE void
+reset_port_task_handle(ErtsPortTaskHandle *pthp)
+{
+ erts_smp_atomic_set_relb(pthp, (erts_aint_t) NULL);
+}
+
static ERTS_INLINE ErtsPortTask *
handle2task(ErtsPortTaskHandle *pthp)
{
- return (ErtsPortTask *) erts_smp_atomic_read_nob(pthp);
+ return (ErtsPortTask *) erts_smp_atomic_read_acqb(pthp);
}
static ERTS_INLINE void
reset_handle(ErtsPortTask *ptp)
{
- if (ptp->handle) {
- ASSERT(ptp == handle2task(ptp->handle));
- erts_smp_atomic_set_nob(ptp->handle, (erts_aint_t) NULL);
+ if (ptp->u.alive.handle) {
+ ASSERT(ptp == handle2task(ptp->u.alive.handle));
+ reset_port_task_handle(ptp->u.alive.handle);
}
}
static ERTS_INLINE void
set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
- ptp->handle = pthp;
+ ptp->u.alive.handle = pthp;
if (pthp) {
- erts_smp_atomic_set_nob(pthp, (erts_aint_t) ptp);
- ASSERT(ptp == handle2task(ptp->handle));
+ erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp);
+ ASSERT(ptp == handle2task(ptp->u.alive.handle));
+ }
+}
+
+
+/*
+ * Busy port queue management
+ */
+
+static erts_aint32_t
+check_unset_busy_port_q(Port *pp,
+ erts_aint32_t flags,
+ ErtsPortTaskBusyPortQ *bpq)
+{
+ ErlDrvSizeT qsize, low;
+ int resume_procs = 0;
+
+ ASSERT(bpq);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+
+ erts_port_task_sched_lock(&pp->sched);
+ qsize = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size);
+ low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low);
+ if (qsize < low) {
+ erts_aint32_t mask = ~(ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q
+ | ERTS_PTS_FLG_BUSY_PORT_Q);
+ flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, mask);
+ if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q)
+ resume_procs = 1;
+ }
+ else if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) {
+ flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags,
+ ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
+ flags &= ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q;
+ }
+ erts_port_task_sched_unlock(&pp->sched);
+ if (resume_procs)
+ erts_port_resume_procs(pp);
+
+ return flags;
+}
+
+static ERTS_INLINE void
+aborted_proc2port_data(Port *pp, ErlDrvSizeT size)
+{
+ ErtsPortTaskBusyPortQ *bpq;
+ erts_aint32_t flags;
+ ErlDrvSizeT qsz;
+
+ ASSERT(pp->sched.taskq.bpq);
+
+ if (size == 0)
+ return;
+
+ bpq = pp->sched.taskq.bpq;
+
+ qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size,
+ (erts_aint_t) -size);
+ ASSERT(qsz + size > qsz);
+ flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ ASSERT(pp->sched.taskq.bpq);
+ if ((flags & (ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q
+ | ERTS_PTS_FLG_BUSY_PORT_Q)) != ERTS_PTS_FLG_BUSY_PORT_Q)
+ return;
+ if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low))
+ erts_smp_atomic32_read_bor_nob(&pp->sched.flags,
+ ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
+}
+
+static ERTS_INLINE void
+dequeued_proc2port_data(Port *pp, ErlDrvSizeT size)
+{
+ ErtsPortTaskBusyPortQ *bpq;
+ erts_aint32_t flags;
+ ErlDrvSizeT qsz;
+
+ ASSERT(pp->sched.taskq.bpq);
+
+ if (size == 0)
+ return;
+
+ bpq = pp->sched.taskq.bpq;
+
+ qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size,
+ (erts_aint_t) -size);
+ ASSERT(qsz + size > qsz);
+ flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q))
+ return;
+ if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->low))
+ check_unset_busy_port_q(pp, flags, bpq);
+}
+
+static ERTS_INLINE erts_aint32_t
+enqueue_proc2port_data(Port *pp,
+ ErtsProc2PortSigData *sigdp,
+ erts_aint32_t flags)
+{
+ ErtsPortTaskBusyPortQ *bpq = pp->sched.taskq.bpq;
+ if (sigdp && bpq) {
+ ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp);
+ if (size) {
+ erts_aint_t asize = erts_smp_atomic_add_read_acqb(&bpq->size,
+ (erts_aint_t) size);
+ ErlDrvSizeT qsz = (ErlDrvSizeT) asize;
+
+ ASSERT(qsz - size < qsz);
+
+ if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q) && qsz > bpq->high) {
+ flags = erts_smp_atomic32_read_bor_acqb(&pp->sched.flags,
+ ERTS_PTS_FLG_BUSY_PORT_Q);
+ flags |= ERTS_PTS_FLG_BUSY_PORT_Q;
+ qsz = (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->size);
+ if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) {
+ flags = (erts_smp_atomic32_read_bor_relb(
+ &pp->sched.flags,
+ ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q));
+ flags |= ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q;
+ }
+ }
+ ASSERT(!(flags & ERTS_PTS_FLG_EXIT));
+ }
+ }
+ return flags;
+}
+
+/*
+ * erl_drv_busy_msgq_limits() is called by drivers either reading or
+ * writing the limits.
+ *
+ * A limit of zero is interpreted as a read only request (using a
+ * limit of zero would not be useful). Other values are interpreted
+ * as a write-read request.
+ */
+
+void
+erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp)
+{
+ Port *pp = erts_drvport2port(dport, NULL);
+ ErtsPortTaskBusyPortQ *bpq = pp->sched.taskq.bpq;
+ int written = 0, resume_procs = 0;
+ ErlDrvSizeT low, high;
+
+ if (!pp || !bpq) {
+ if (lowp)
+ *lowp = ERL_DRV_BUSY_MSGQ_DISABLED;
+ if (highp)
+ *highp = ERL_DRV_BUSY_MSGQ_DISABLED;
+ return;
+ }
+
+ low = lowp ? *lowp : 0;
+ high = highp ? *highp : 0;
+
+ erts_port_task_sched_lock(&pp->sched);
+
+ if (low == ERL_DRV_BUSY_MSGQ_DISABLED
+ || high == ERL_DRV_BUSY_MSGQ_DISABLED) {
+ /* Disable busy msgq feature */
+ erts_aint32_t flags;
+ pp->sched.taskq.bpq = NULL;
+ flags = ~(ERTS_PTS_FLG_BUSY_PORT_Q|ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
+ flags = erts_smp_atomic32_read_band_acqb(&pp->sched.flags, flags);
+ if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q)
+ resume_procs = 1;
+ }
+ else {
+
+ if (!low)
+ low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low);
+ else {
+ if (bpq->high < low)
+ bpq->high = low;
+ erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low);
+ written = 1;
+ }
+
+ if (!high)
+ high = bpq->high;
+ else {
+ if (low > high) {
+ low = high;
+ erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low);
+ }
+ bpq->high = high;
+ written = 1;
+ }
+
+ if (written) {
+ ErlDrvSizeT size = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size);
+ if (size > high)
+ erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ ERTS_PTS_FLG_BUSY_PORT_Q);
+ else if (size < low)
+ erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
+ }
+ }
+
+ erts_port_task_sched_unlock(&pp->sched);
+
+ if (resume_procs)
+ erts_port_resume_procs(pp);
+ if (lowp)
+ *lowp = low;
+ if (highp)
+ *highp = high;
+}
+
+/*
+ * No-suspend handles.
+ */
+
+#ifdef ERTS_SMP
+static void
+free_port_task_handle_list(void *vpthlp)
+{
+ erts_free(ERTS_ALC_T_PT_HNDL_LIST, vpthlp);
+}
+#endif
+
+static void
+schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
+{
+#ifdef ERTS_SMP
+ erts_schedule_thr_prgr_later_op(free_port_task_handle_list,
+ (void *) pthlp,
+ &pthlp->u.release);
+#else
+ erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp);
+#endif
+}
+
+static ERTS_INLINE void
+abort_nosuspend_task(Port *pp,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp)
+{
+
+ ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
+
+ if (!pp->sched.taskq.bpq)
+ tdp->psig.callback(NULL,
+ ERTS_PORT_SFLG_INVALID,
+ ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ &tdp->psig.data);
+ else {
+ ErlDrvSizeT size = erts_proc2port_sig_command_data_size(&tdp->psig.data);
+ tdp->psig.callback(NULL,
+ ERTS_PORT_SFLG_INVALID,
+ ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ &tdp->psig.data);
+ aborted_proc2port_data(pp, size);
+ }
+}
+
+static ErtsPortTaskHandleList *
+get_free_nosuspend_handles(Port *pp)
+{
+ ErtsPortTaskHandleList *nshp, *last_nshp = NULL;
+
+ ERTS_SMP_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched));
+
+ nshp = pp->sched.taskq.local.busy.nosuspend;
+
+ while (nshp && !erts_port_task_is_scheduled(&nshp->handle)) {
+ last_nshp = nshp;
+ nshp = nshp->u.next;
+ }
+
+ if (!last_nshp)
+ nshp = NULL;
+ else {
+ nshp = pp->sched.taskq.local.busy.nosuspend;
+ pp->sched.taskq.local.busy.nosuspend = last_nshp->u.next;
+ last_nshp->u.next = NULL;
+ if (!pp->sched.taskq.local.busy.nosuspend)
+ erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_NS_TASKS);
+ }
+ return nshp;
+}
+
+static void
+free_nosuspend_handles(ErtsPortTaskHandleList *free_nshp)
+{
+ while (free_nshp) {
+ ErtsPortTaskHandleList *nshp = free_nshp;
+ free_nshp = free_nshp->u.next;
+ schedule_port_task_handle_list_free(nshp);
}
}
@@ -161,7 +837,6 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
pp->sched.next = NULL;
- pp->sched.in_runq = 1;
if (runq->ports.end) {
ASSERT(runq->ports.start);
runq->ports.end->sched.next = pp;
@@ -199,285 +874,423 @@ pop_port(ErtsRunQueue *runq)
return pp;
}
+/*
+ * Task queue operations
+ */
-#ifdef HARD_DEBUG
+static ERTS_INLINE int
+enqueue_task(Port *pp,
+ ErtsPortTask *ptp,
+ ErtsProc2PortSigData *sigdp,
+ ErtsPortTaskHandleList *ns_pthlp,
+ erts_aint32_t *flagsp)
-static void
-check_port_queue(ErtsRunQueue *runq, Port *chk_pp, int inq)
{
- Port *pp;
- Port *last_pp;
- Port *first_pp = runq->ports.start;
- int no_forward = 0, no_backward = 0;
- int found_forward = 0, found_backward = 0;
- if (!first_pp) {
- ASSERT(!runq->ports.end);
- }
+ int res;
+ erts_aint32_t fail_flags = ERTS_PTS_FLG_EXIT;
+ erts_aint32_t flags;
+ ptp->u.alive.next = NULL;
+ if (ns_pthlp)
+ fail_flags |= ERTS_PTS_FLG_BUSY_PORT;
+ erts_port_task_sched_lock(&pp->sched);
+ flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ if (flags & fail_flags)
+ res = 0;
else {
- ASSERT(!first_pp->sched.prev);
- for (pp = first_pp; pp; pp = pp->sched.next) {
- ASSERT(pp->sched.taskq);
- if (pp->sched.taskq->first)
- no_forward++;
- if (chk_pp == pp)
- found_forward = 1;
- if (!pp->sched.prev) {
- ASSERT(first_pp == pp);
- }
- if (!pp->sched.next) {
- ASSERT(runq->ports.end == pp);
- last_pp = pp;
- }
- }
- for (pp = last_pp; pp; pp = pp->sched.prev) {
- ASSERT(pp->sched.taskq);
- if (pp->sched.taskq->last)
- no_backward++;
- if (chk_pp == pp)
- found_backward = 1;
- if (!pp->sched.prev) {
- ASSERT(first_pp == pp);
- }
- if (!pp->sched.next) {
- ASSERT(runq->ports.end == pp);
- }
- check_task_queue(pp->sched.taskq, NULL, 0);
+ if (ns_pthlp) {
+ ns_pthlp->u.next = pp->sched.taskq.local.busy.nosuspend;
+ pp->sched.taskq.local.busy.nosuspend = ns_pthlp;
}
- ASSERT(no_forward == no_backward);
- }
- ASSERT(no_forward == RUNQ_READ_LEN(&runq->ports.info.len));
- if (chk_pp) {
- if (chk_pp->sched.taskq || chk_pp->sched.exe_taskq) {
- ASSERT(chk_pp->sched.taskq != chk_pp->sched.exe_taskq);
- }
- ASSERT(!chk_pp->sched.taskq || chk_pp->sched.taskq->first);
- if (inq < 0)
- inq = chk_pp->sched.taskq && !chk_pp->sched.exe_taskq;
- if (inq) {
- ASSERT(found_forward && found_backward);
+ if (pp->sched.taskq.in.last) {
+ ASSERT(pp->sched.taskq.in.first);
+ ASSERT(!pp->sched.taskq.in.last->u.alive.next);
+
+ pp->sched.taskq.in.last->u.alive.next = ptp;
}
else {
- ASSERT(!found_forward && !found_backward);
- }
- }
-}
-
-#endif
+ ASSERT(!pp->sched.taskq.in.first);
-/*
- * Task queue operations
- */
-
-static ERTS_INLINE ErtsPortTaskQueue *
-port_taskq_init(ErtsPortTaskQueue *ptqp, Port *pp)
-{
- if (ptqp) {
- ptqp->first = NULL;
- ptqp->last = NULL;
- ptqp->port = pp;
+ pp->sched.taskq.in.first = ptp;
+ }
+ pp->sched.taskq.in.last = ptp;
+ flags = enqueue_proc2port_data(pp, sigdp, flags);
+ res = 1;
}
- return ptqp;
+ erts_port_task_sched_unlock(&pp->sched);
+ *flagsp = flags;
+ return res;
}
static ERTS_INLINE void
-enqueue_task(ErtsPortTaskQueue *ptqp, ErtsPortTask *ptp)
+prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
- ptp->next = NULL;
- ptp->prev = ptqp->last;
- ptp->queue = ptqp;
- if (ptqp->last) {
- ASSERT(ptqp->first);
- ptqp->last->next = ptp;
+ erts_aint32_t act = erts_smp_atomic32_read_nob(&pp->sched.flags);
+
+ if (!pp->sched.taskq.local.busy.first || (act & ERTS_PTS_FLG_BUSY_PORT)) {
+ *execqp = pp->sched.taskq.local.first;
+ *processing_busy_q_p = 0;
}
else {
- ASSERT(!ptqp->first);
- ptqp->first = ptp;
+ *execqp = pp->sched.taskq.local.busy.first;
+ *processing_busy_q_p = 1;
+ }
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
+ while (1) {
+ erts_aint32_t new, exp;
+
+ new = exp = act;
+
+ new &= ~ERTS_PTS_FLG_IN_RUNQ;
+ new |= ERTS_PTS_FLG_EXEC;
+
+ act = erts_smp_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp);
+
+ ASSERT(act & ERTS_PTS_FLG_IN_RUNQ);
+
+ if (exp == act)
+ break;
}
- ptqp->last = ptp;
- ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
}
-static ERTS_INLINE void
-push_task(ErtsPortTaskQueue *ptqp, ErtsPortTask *ptp)
+/* finalize_exec() return value != 0 if port should remain active */
+static ERTS_INLINE int
+finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
{
- ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
- ptp->next = ptqp->first;
- ptp->prev = NULL;
- ptp->queue = ptqp;
- if (ptqp->first) {
- ASSERT(ptqp->last);
- ptqp->first->prev = ptp;
- }
+ erts_aint32_t act;
+
+ if (!processing_busy_q)
+ pp->sched.taskq.local.first = *execq;
else {
- ASSERT(!ptqp->last);
- ptqp->last = ptp;
+ pp->sched.taskq.local.busy.first = *execq;
+ ASSERT(*execq);
+ }
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execq, processing_busy_q);
+
+ *execq = NULL;
+
+ act = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
+ act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
+
+ while (1) {
+ erts_aint32_t new, exp;
+
+ new = exp = act;
+
+ new &= ~ERTS_PTS_FLG_EXEC;
+ if (act & ERTS_PTS_FLG_HAVE_TASKS)
+ new |= ERTS_PTS_FLG_IN_RUNQ;
+
+ act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
+
+ ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+
+ if (exp == act)
+ break;
}
- ptqp->first = ptp;
- ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
+
+ return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0;
}
-static ERTS_INLINE void
-dequeue_task(ErtsPortTask *ptp)
+static ERTS_INLINE erts_aint32_t
+select_queue_for_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- ASSERT(ptp);
- ASSERT(ptp->queue);
- ERTS_PT_CHK_IN_TASKQ(ptp->queue, ptp);
- if (ptp->next)
- ptp->next->prev = ptp->prev;
- else {
- ASSERT(ptp->queue->last == ptp);
- ptp->queue->last = ptp->prev;
+ erts_aint32_t flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+
+ if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
+ flags = check_unset_busy_port_q(pp, flags, pp->sched.taskq.bpq);
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
+ if (flags & ERTS_PTS_FLG_BUSY_PORT) {
+ if (*processing_busy_q_p) {
+ ErtsPortTask *ptp;
+
+ ptp = pp->sched.taskq.local.busy.first = *execqp;
+ if (!ptp)
+ pp->sched.taskq.local.busy.last = NULL;
+ else if (!(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY))
+ no_sig_dep_move_from_busyq(pp);
+
+ *execqp = pp->sched.taskq.local.first;
+ *processing_busy_q_p = 0;
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+ }
+
+ return flags;
}
- if (ptp->prev)
- ptp->prev->next = ptp->next;
- else {
- ASSERT(ptp->queue->first == ptp);
- ptp->queue->first = ptp->next;
+
+ /* Not busy */
+
+ if (!*processing_busy_q_p && pp->sched.taskq.local.busy.first) {
+ pp->sched.taskq.local.first = *execqp;
+ *execqp = pp->sched.taskq.local.busy.first;
+ *processing_busy_q_p = 1;
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
}
- ASSERT(ptp->queue->first || !ptp->queue->last);
- ASSERT(ptp->queue->last || !ptp->queue->first);
- ERTS_PT_CHK_NOT_IN_TASKQ(ptp->queue, ptp);
+ return flags;
}
-static ERTS_INLINE ErtsPortTask *
-pop_task(ErtsPortTaskQueue *ptqp)
+/*
+ * check_task_for_exec() returns a value !0 if the task
+ * is ok to execute; otherwise 0.
+ */
+static ERTS_INLINE int
+check_task_for_exec(Port *pp,
+ erts_aint32_t flags,
+ ErtsPortTask **execqp,
+ int *processing_busy_q_p,
+ ErtsPortTask *ptp)
{
- ErtsPortTask *ptp = ptqp->first;
- if (!ptp) {
- ASSERT(!ptqp->last);
+
+ if (!*processing_busy_q_p) {
+ /* Processing normal queue */
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, ptp, *processing_busy_q_p);
+
+ if ((flags & ERTS_PTS_FLG_BUSY_PORT)
+ && (ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY)) {
+
+ busy_wait_move_to_busy_queue(pp, ptp);
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
+ return 0;
+ }
+
+ if (pp->sched.taskq.local.busy.last
+ && (ptp->u.alive.flags & ERTS_PT_FLG_SIG_DEP)) {
+
+ int res = !check_sig_dep_move_to_busy_queue(pp, ptp);
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
+ return res;
+ }
+
}
else {
- ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
- ASSERT(!ptp->prev);
- ptqp->first = ptp->next;
- if (ptqp->first)
- ptqp->first->prev = NULL;
- else {
- ASSERT(ptqp->last == ptp);
- ptqp->last = NULL;
+ /* Processing busy queue */
+
+ ASSERT(!(flags & ERTS_PTS_FLG_BUSY_PORT));
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, ptp, *processing_busy_q_p);
+
+ popped_from_busy_queue(pp, ptp, !*execqp);
+
+ if (!*execqp) {
+ *execqp = pp->sched.taskq.local.first;
+ *processing_busy_q_p = 0;
}
- ASSERT(ptp->queue->first || !ptp->queue->last);
- ASSERT(ptp->queue->last || !ptp->queue->first);
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
}
- ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
- return ptp;
+
+ return 1;
}
-#ifdef HARD_DEBUG
+static ErtsPortTask *
+fetch_in_queue(Port *pp, ErtsPortTask **execqp)
+{
+ ErtsPortTask *ptp;
+ ErtsPortTaskHandleList *free_nshp = NULL;
-static void
-check_task_queue(ErtsPortTaskQueue *ptqp,
- ErtsPortTask *chk_ptp,
- int inq)
+ erts_port_task_sched_lock(&pp->sched);
+
+ ptp = pp->sched.taskq.in.first;
+ pp->sched.taskq.in.first = NULL;
+ pp->sched.taskq.in.last = NULL;
+ if (ptp)
+ *execqp = ptp->u.alive.next;
+ else
+ erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_TASKS);
+
+
+ if (pp->sched.taskq.local.busy.nosuspend)
+ free_nshp = get_free_nosuspend_handles(pp);
+
+ erts_port_task_sched_unlock(&pp->sched);
+
+ if (free_nshp)
+ free_nosuspend_handles(free_nshp);
+
+ return ptp;
+}
+
+static ERTS_INLINE ErtsPortTask *
+select_task_for_exec(Port *pp,
+ ErtsPortTask **execqp,
+ int *processing_busy_q_p)
{
ErtsPortTask *ptp;
- ErtsPortTask *last_ptp;
- ErtsPortTask *first_ptp = ptqp->first;
- int found_forward = 0, found_backward = 0;
- if (!first_ptp) {
- ASSERT(!ptqp->last);
- }
- else {
- ASSERT(!first_ptp->prev);
- for (ptp = first_ptp; ptp; ptp = ptp->next) {
- ASSERT(ptp->queue == ptqp);
- if (chk_ptp == ptp)
- found_forward = 1;
- if (!ptp->prev) {
- ASSERT(first_ptp == ptp);
- }
- if (!ptp->next) {
- ASSERT(ptqp->last == ptp);
- last_ptp = ptp;
- }
- }
- for (ptp = last_ptp; ptp; ptp = ptp->prev) {
- ASSERT(ptp->queue == ptqp);
- if (chk_ptp == ptp)
- found_backward = 1;
- if (!ptp->prev) {
- ASSERT(first_ptp == ptp);
- }
- if (!ptp->next) {
- ASSERT(ptqp->last == ptp);
- }
- }
- }
- if (chk_ptp) {
- if (inq) {
- ASSERT(found_forward && found_backward);
- }
+ erts_aint32_t flags;
+
+ flags = select_queue_for_exec(pp, execqp, processing_busy_q_p);
+
+ while (1) {
+ ptp = *execqp;
+ if (ptp)
+ *execqp = ptp->u.alive.next;
else {
- ASSERT(!found_forward && !found_backward);
+ ptp = fetch_in_queue(pp, execqp);
+ if (!ptp)
+ return NULL;
}
+ if (check_task_for_exec(pp, flags, execqp, processing_busy_q_p, ptp))
+ return ptp;
}
}
-#endif
/*
* Abort a scheduled task.
*/
int
-erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp)
+erts_port_task_abort(ErtsPortTaskHandle *pthp)
{
- ErtsRunQueue *runq;
- ErtsPortTaskQueue *ptqp;
+ int res;
ErtsPortTask *ptp;
- Port *pp;
-
- pp = &erts_port[internal_port_index(id)];
- runq = erts_port_runq(pp);
- if (!runq)
- return 1;
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
+#endif
ptp = handle2task(pthp);
+ if (!ptp)
+ res = -1;
+ else {
+ erts_aint32_t old_state;
+
+#ifdef DEBUG
+ ErtsPortTaskHandle *saved_pthp = ptp->u.alive.handle;
+ ERTS_SMP_READ_MEMORY_BARRIER;
+ old_state = erts_smp_atomic32_read_nob(&ptp->state);
+ if (old_state == ERTS_PT_STATE_SCHEDULED) {
+ ASSERT(saved_pthp == pthp);
+ }
+#endif
- if (!ptp) {
- erts_smp_runq_unlock(runq);
- return 1;
+ old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ ERTS_PT_STATE_ABORTED,
+ ERTS_PT_STATE_SCHEDULED);
+ if (old_state != ERTS_PT_STATE_SCHEDULED)
+ res = - 1; /* Task already aborted, executing, or executed */
+ else {
+
+ reset_port_task_handle(pthp);
+
+ switch (ptp->type) {
+ case ERTS_PORT_TASK_INPUT:
+ case ERTS_PORT_TASK_OUTPUT:
+ case ERTS_PORT_TASK_EVENT:
+ ASSERT(erts_smp_atomic_read_nob(
+ &erts_port_task_outstanding_io_tasks) > 0);
+ erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
+ break;
+ case ERTS_PORT_TASK_PROC_SIG:
+ ERTS_INTERNAL_ERROR("Aborted process to port signal");
+ break;
+ default:
+ break;
+ }
+
+ res = 0;
+ }
}
- ASSERT(ptp->handle == pthp);
- ptqp = ptp->queue;
- ASSERT(pp == ptqp->port);
+#ifdef ERTS_SMP
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- ASSERT(ptqp);
- ASSERT(ptqp->first);
+ return res;
+}
- dequeue_task(ptp);
- reset_handle(ptp);
+void
+erts_port_task_abort_nosuspend_tasks(Port *pp)
+{
+ erts_aint32_t flags;
+ ErtsPortTaskHandleList *abort_list;
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID;
+#endif
- switch (ptp->type) {
- case ERTS_PORT_TASK_INPUT:
- case ERTS_PORT_TASK_OUTPUT:
- case ERTS_PORT_TASK_EVENT:
- ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0);
- erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
- break;
- default:
- break;
- }
+ erts_port_task_sched_lock(&pp->sched);
+ flags = erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_NS_TASKS);
+ abort_list = pp->sched.taskq.local.busy.nosuspend;
+ pp->sched.taskq.local.busy.nosuspend = NULL;
+ erts_port_task_sched_unlock(&pp->sched);
- ASSERT(ptqp == pp->sched.taskq || ptqp == pp->sched.exe_taskq);
+ while (abort_list) {
+#ifdef DEBUG
+ ErtsPortTaskHandle *saved_pthp;
+#endif
+ ErtsPortTaskType type;
+ ErtsPortTaskTypeData td;
+ ErtsPortTaskHandle *pthp;
+ ErtsPortTask *ptp;
+ ErtsPortTaskHandleList *pthlp;
+ erts_aint32_t old_state;
- if (ptqp->first || pp->sched.taskq != ptqp)
- ptqp = NULL;
- else
- pp->sched.taskq = NULL;
+ pthlp = abort_list;
+ abort_list = pthlp->u.next;
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ dhndl = erts_thr_progress_unmanaged_delay();
+#endif
- erts_smp_runq_unlock(runq);
+ pthp = &pthlp->handle;
+ ptp = handle2task(pthp);
+ if (!ptp) {
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+ schedule_port_task_handle_list_free(pthlp);
+ continue;
+ }
- port_task_free(ptp);
- if (ptqp)
- port_taskq_free(ptqp);
+#ifdef DEBUG
+ saved_pthp = ptp->u.alive.handle;
+ ERTS_SMP_READ_MEMORY_BARRIER;
+ old_state = erts_smp_atomic32_read_nob(&ptp->state);
+ if (old_state == ERTS_PT_STATE_SCHEDULED) {
+ ASSERT(saved_pthp == pthp);
+ }
+#endif
- return 0;
+ old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ ERTS_PT_STATE_ABORTED,
+ ERTS_PT_STATE_SCHEDULED);
+ if (old_state != ERTS_PT_STATE_SCHEDULED) {
+ /* Task already aborted, executing, or executed */
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+ schedule_port_task_handle_list_free(pthlp);
+ continue;
+ }
+
+ reset_port_task_handle(pthp);
+
+ type = ptp->type;
+ td = ptp->u.alive.td;
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+ schedule_port_task_handle_list_free(pthlp);
+
+ abort_nosuspend_task(pp, type, &td);
+ }
}
/*
@@ -488,243 +1301,264 @@ int
erts_port_task_schedule(Eterm id,
ErtsPortTaskHandle *pthp,
ErtsPortTaskType type,
- ErlDrvEvent event,
- ErlDrvEventData event_data)
+ ...)
{
+ ErtsProc2PortSigData *sigdp = NULL;
+ ErtsPortTaskHandleList *ns_pthlp = NULL;
+#ifdef ERTS_SMP
+ ErtsRunQueue *xrunq;
+ ErtsThrPrgrDelayHandle dhndl;
+#endif
ErtsRunQueue *runq;
Port *pp;
- ErtsPortTask *ptp;
- int enq_port = 0;
-
- /*
- * NOTE: We might not have the port lock here. We are only
- * allowed to access the 'sched', 'tab_status',
- * and 'id' fields of the port struct while
- * tasks_lock is held.
- */
+ ErtsPortTask *ptp = NULL;
+ erts_aint32_t act, add_flags;
if (pthp && erts_port_task_is_scheduled(pthp)) {
ASSERT(0);
- erts_port_task_abort(id, pthp);
+ erts_port_task_abort(pthp);
}
- ptp = port_task_alloc();
-
ASSERT(is_internal_port(id));
- pp = &erts_port[internal_port_index(id)];
- runq = erts_port_runq(pp);
- if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) {
- if (runq)
- erts_smp_runq_unlock(runq);
- return -1;
- }
-
- ASSERT(!erts_port_task_is_scheduled(pthp));
+#ifdef ERTS_SMP
+ dhndl = erts_thr_progress_unmanaged_delay();
+#endif
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+ pp = erts_port_lookup_raw(id);
- if (!pp->sched.taskq && !pp->sched.in_runq && !pp->sched.exe_taskq) {
#ifdef ERTS_SMP
- ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
- if (xrunq) {
- /* Port emigrated ... */
- erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
- erts_smp_runq_unlock(runq);
- runq = erts_port_runq(pp);
- if (!runq)
- return -1;
- }
- enq_port = !pp->sched.taskq && !pp->sched.in_runq && !pp->sched.exe_taskq;
-#else
- enq_port = 1;
-#endif
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ if (pp)
+ erts_port_inc_refc(pp);
+ erts_thr_progress_unmanaged_continue(dhndl);
}
+#endif
- ASSERT(!enq_port
- || !(ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_SUSPENDED));
+ if (!pp)
+ goto fail;
- if (!pp->sched.taskq)
- pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp);
+ if (type != ERTS_PORT_TASK_PROC_SIG) {
+ ptp = port_task_alloc();
- ASSERT(ptp);
+ ptp->type = type;
+ ptp->u.alive.flags = 0;
- ptp->type = type;
- ptp->event = event;
- ptp->event_data = event_data;
+ erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
- set_handle(ptp, pthp);
+ set_handle(ptp, pthp);
+ }
switch (type) {
- case ERTS_PORT_TASK_FREE:
- erl_exit(ERTS_ABORT_EXIT,
- "erts_port_task_schedule(): Cannot schedule free task\n");
- break;
case ERTS_PORT_TASK_INPUT:
- case ERTS_PORT_TASK_OUTPUT:
- case ERTS_PORT_TASK_EVENT:
+ case ERTS_PORT_TASK_OUTPUT: {
+ va_list argp;
+ va_start(argp, type);
+ ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
+ va_end(argp);
erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
- /* Fall through... */
+ break;
+ }
+ case ERTS_PORT_TASK_EVENT: {
+ va_list argp;
+ va_start(argp, type);
+ ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
+ ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData);
+ va_end(argp);
+ erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
+ break;
+ }
+ case ERTS_PORT_TASK_PROC_SIG: {
+ va_list argp;
+ ASSERT(!pthp);
+ va_start(argp, type);
+ sigdp = va_arg(argp, ErtsProc2PortSigData *);
+ ptp = p2p_sig_data_to_task(sigdp);
+ ptp->u.alive.td.psig.callback = va_arg(argp, ErtsProc2PortSigCallback);
+ ptp->u.alive.flags |= va_arg(argp, int);
+ va_end(argp);
+ if (!(ptp->u.alive.flags & ERTS_PT_FLG_NOSUSPEND))
+ set_handle(ptp, pthp);
+ else {
+ ns_pthlp = erts_alloc(ERTS_ALC_T_PT_HNDL_LIST,
+ sizeof(ErtsPortTaskHandleList));
+ set_handle(ptp, &ns_pthlp->handle);
+ }
+ break;
+ }
default:
- enqueue_task(pp->sched.taskq, ptp);
break;
}
-#ifndef ERTS_SMP
- /*
- * When (!enq_port && !pp->sched.exe_taskq) is true in the smp case,
- * the port might not be in the run queue. If this is the case, another
- * thread is in the process of enqueueing the port. This very seldom
- * occur, but do occur and is a valid scenario. Debug info showing this
- * enqueue in progress must be introduced before we can enable (modified
- * versions of these) assertions in the smp case again.
- */
-#if defined(HARD_DEBUG)
- if (pp->sched.exe_taskq || enq_port)
- ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp);
- else
- ERTS_PT_CHK_IN_PORTQ(runq, pp);
-#elif defined(DEBUG)
- if (!enq_port && !pp->sched.exe_taskq) {
- /* We should be in port run q */
- ASSERT(pp->sched.in_runq);
+ if (!enqueue_task(pp, ptp, sigdp, ns_pthlp, &act)) {
+ reset_handle(ptp);
+ if (ns_pthlp && !(act & ERTS_PTS_FLG_EXIT))
+ goto abort_nosuspend;
+ else
+ goto fail;
}
-#endif
-#endif
- if (!enq_port) {
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- erts_smp_runq_unlock(runq);
- }
- else {
- enqueue_port(runq, pp);
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
-
- if (erts_system_profile_flags.runnable_ports) {
- profile_runnable_port(pp, am_active);
+ add_flags = ERTS_PTS_FLG_HAVE_TASKS;
+ if (ns_pthlp)
+ add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS;
+
+ while (1) {
+ erts_aint32_t new, exp;
+
+ if ((act & add_flags) == add_flags
+ && (act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ goto done; /* Done */
+
+ new = exp = act;
+ new |= add_flags;
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ new |= ERTS_PTS_FLG_IN_RUNQ;
+
+ act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
+
+ if (exp == act) {
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ break; /* Need to enqueue port */
+ goto done; /* Done */
}
+ if (act & ERTS_PTS_FLG_EXIT)
+ goto done; /* Died after our task insert... */
+ }
+
+ /* Enqueue port on run-queue */
+
+ runq = erts_port_runq(pp);
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+
+#ifdef ERTS_SMP
+ xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ if (xrunq) {
+ /* Port emigrated ... */
+ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
+ runq = erts_port_runq(pp);
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ }
+#endif
- erts_smp_notify_inc_runq(runq);
+ enqueue_port(runq, pp);
+
+ if (erts_system_profile_flags.runnable_ports) {
+ profile_runnable_port(pp, am_active);
}
+
+ erts_smp_runq_unlock(runq);
+
+ erts_smp_notify_inc_runq(runq);
+
+done:
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_port_dec_refc(pp);
+#endif
+
+ return 0;
+
+abort_nosuspend:
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_port_dec_refc(pp);
+#endif
+
+ abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td);
+
+ ASSERT(ns_pthlp);
+ erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
+ if (ptp)
+ port_task_free(ptp);
+
return 0;
+
+fail:
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_port_dec_refc(pp);
+#endif
+
+ if (ns_pthlp)
+ erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
+
+ if (ptp)
+ port_task_free(ptp);
+
+ return -1;
}
void
erts_port_task_free_port(Port *pp)
{
+ ErtsProcList *suspended;
+ erts_aint32_t flags;
ErtsRunQueue *runq;
- ErtsPortTaskQueue *ptqp;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
- ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
+ ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
+
runq = erts_port_runq(pp);
- ASSERT(runq);
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- ptqp = pp->sched.exe_taskq;
- if (ptqp) {
- /* I (this thread) am currently executing this port, free it
- when scheduled out... */
- ErtsPortTask *ptp;
- enqueue_free:
- ptp = port_task_alloc();
- erts_smp_port_state_lock(pp);
- pp->status &= ~ERTS_PORT_SFLG_CLOSING;
- pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
- erts_may_save_closed_port(pp);
- erts_smp_port_state_unlock(pp);
- ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 1);
- ptp->type = ERTS_PORT_TASK_FREE;
- ptp->event = (ErlDrvEvent) -1;
- ptp->event_data = NULL;
- set_handle(ptp, NULL);
- push_task(ptqp, ptp);
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- erts_smp_runq_unlock(runq);
- }
- else {
- if (pp->sched.in_runq) {
- ptqp = pp->sched.taskq;
- if (!ptqp)
- pp->sched.taskq = ptqp = port_taskq_init(port_taskq_alloc(), pp);
- goto enqueue_free;
- }
- ASSERT(!pp->sched.taskq);
- erts_smp_port_state_lock(pp);
- pp->status &= ~ERTS_PORT_SFLG_CLOSING;
- pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
- erts_may_save_closed_port(pp);
- erts_smp_port_state_unlock(pp);
- erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
- ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
- handle_remaining_tasks(runq, pp); /* May release runq lock */
- ASSERT(!pp->sched.exe_taskq && (!ptqp || !ptqp->first));
- pp->sched.taskq = NULL;
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- erts_smp_runq_unlock(runq);
- }
-}
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ erts_port_task_sched_lock(&pp->sched);
+ flags = erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ ERTS_PTS_FLG_EXIT);
+ suspended = pp->suspended;
+ pp->suspended = NULL;
+ erts_port_task_sched_unlock(&pp->sched);
+ erts_atomic32_read_bset_relb(&pp->state,
+ (ERTS_PORT_SFLG_CLOSING
+ | ERTS_PORT_SFLG_FREE),
+ ERTS_PORT_SFLG_FREE);
-typedef struct {
- ErtsRunQueue *runq;
- int *resp;
-} ErtsPortTaskExeBlockData;
+ erts_smp_runq_unlock(runq);
+
+ if (erts_proclist_fetch(&suspended, NULL))
+ erts_resume_processes(suspended);
+
+ if (!(flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ begin_port_cleanup(pp, NULL);
+}
/*
- * Run all scheduled tasks for the first port in run queue. If
- * new tasks appear while running reschedule port (free task is
- * an exception; it is always handled instantly).
+ * Execute scheduled tasks of a port.
*
* erts_port_task_execute() is called by scheduler threads between
- * scheduleing of processes. Sched lock should be held by caller.
+ * scheduling of processes. Run-queue lock should be held by caller.
*/
int
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
Port *pp;
- ErtsPortTaskQueue *ptqp;
- ErtsPortTask *ptp;
+ ErtsPortTask *execq;
+ int processing_busy_q;
int res = 0;
int reds = ERTS_PORT_REDS_EXECUTE;
erts_aint_t io_tasks_executed = 0;
int fpe_was_unmasked;
+ erts_aint32_t state;
+ int active;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- ERTS_PT_CHK_PORTQ(runq);
-
pp = pop_port(runq);
if (!pp) {
res = 0;
goto done;
}
- ASSERT(pp->sched.in_runq);
- pp->sched.in_runq = 0;
- if (!pp->sched.taskq) {
- if (erts_system_profile_flags.runnable_ports)
- profile_runnable_port(pp, am_inactive);
- res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- != (erts_aint_t) 0);
- goto done;
- }
+ erts_smp_runq_unlock(runq);
*curr_port_pp = pp;
-
- ASSERT(pp->sched.taskq->first);
- ptqp = pp->sched.taskq;
- pp->sched.taskq = NULL;
-
- ASSERT(!pp->sched.exe_taskq);
- pp->sched.exe_taskq = ptqp;
-
- if (erts_smp_port_trylock(pp) == EBUSY) {
- erts_smp_runq_unlock(runq);
- erts_smp_port_lock(pp);
- erts_smp_runq_lock(runq);
- }
if (erts_sched_stat.enabled) {
ErtsSchedulerData *esdp = erts_get_scheduler_data();
@@ -741,77 +1575,94 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_spin_unlock(&erts_sched_stat.lock);
}
+ prepare_exec(pp, &execq, &processing_busy_q);
+
+ erts_smp_port_lock(pp);
+
/* trace port scheduling, in */
if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
trace_sched_ports(pp, am_in);
}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ fpe_was_unmasked = erts_block_fpe();
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- ptp = pop_task(ptqp);
+ state = erts_atomic32_read_nob(&pp->state);
+ goto begin_handle_tasks;
- fpe_was_unmasked = erts_block_fpe();
+ while (1) {
+ erts_aint32_t task_state;
+ ErtsPortTask *ptp;
- while (ptp) {
- ASSERT(pp->sched.taskq != pp->sched.exe_taskq);
+ ptp = select_task_for_exec(pp, &execq, &processing_busy_q);
+ if (!ptp)
+ break;
+
+ task_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ ERTS_PT_STATE_EXECUTING,
+ ERTS_PT_STATE_SCHEDULED);
+ if (task_state != ERTS_PT_STATE_SCHEDULED) {
+ ASSERT(task_state == ERTS_PT_STATE_ABORTED);
+ goto aborted_port_task;
+ }
reset_handle(ptp);
- erts_smp_runq_unlock(runq);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
ERTS_SMP_CHK_NO_PROC_LOCKS;
ASSERT(pp->drv_ptr);
switch (ptp->type) {
- case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */
- reds += ERTS_PORT_REDS_FREE;
- erts_smp_runq_lock(runq);
-
- erts_unblock_fpe(fpe_was_unmasked);
- ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
- if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first))
- handle_remaining_tasks(runq, pp);
- ASSERT(!ptqp->first
- && (!pp->sched.taskq || !pp->sched.taskq->first));
- erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
- ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
-
- port_task_free(ptp);
- if (pp->sched.taskq)
- port_taskq_free(pp->sched.taskq);
- pp->sched.taskq = NULL;
-
- goto tasks_done;
case ERTS_PORT_TASK_TIMEOUT:
reds += ERTS_PORT_REDS_TIMEOUT;
- if (!(pp->status & ERTS_PORT_SFLGS_DEAD)) {
+ if (!(state & ERTS_PORT_SFLGS_DEAD)) {
DTRACE_DRIVER(driver_timeout, pp);
(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
}
break;
case ERTS_PORT_TASK_INPUT:
reds += ERTS_PORT_REDS_INPUT;
- ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_input, pp);
/* NOTE some windows drivers use ->ready_input for input and output */
- (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
+ (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
+ ptp->u.alive.td.io.event);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_OUTPUT:
reds += ERTS_PORT_REDS_OUTPUT;
- ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_output, pp);
- (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
+ (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data,
+ ptp->u.alive.td.io.event);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_EVENT:
reds += ERTS_PORT_REDS_EVENT;
- ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_event, pp);
- (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
+ (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data,
+ ptp->u.alive.td.io.event,
+ ptp->u.alive.td.io.event_data);
io_tasks_executed++;
break;
+ case ERTS_PORT_TASK_PROC_SIG: {
+ ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
+ ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
+ if (!pp->sched.taskq.bpq)
+ reds += ptp->u.alive.td.psig.callback(pp,
+ state,
+ ERTS_PROC2PORT_SIG_EXEC,
+ sigdp);
+ else {
+ ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp);
+ reds += ptp->u.alive.td.psig.callback(pp,
+ state,
+ ERTS_PROC2PORT_SIG_EXEC,
+ sigdp);
+ dequeued_proc2port_data(pp, size);
+ }
+ break;
+ }
case ERTS_PORT_TASK_DIST_CMD:
reds += erts_dist_command(pp, CONTEXT_REDS-reds);
break;
@@ -822,33 +1673,31 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
break;
}
- if ((pp->status & ERTS_PORT_SFLG_CLOSING)
- && erts_is_port_ioq_empty(pp)) {
- reds += ERTS_PORT_REDS_TERMINATE;
- erts_terminate_port(pp);
- }
+ reds += erts_port_driver_callback_epilogue(pp, &state);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ aborted_port_task:
+ schedule_port_task_free(ptp);
-#ifdef ERTS_SMP
- if (pp->xports)
- erts_smp_xports_unlock(pp);
- ASSERT(!pp->xports);
-#endif
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ begin_handle_tasks:
+ if (state & ERTS_PORT_SFLG_FREE) {
+ reds += ERTS_PORT_REDS_FREE;
- port_task_free(ptp);
+ begin_port_cleanup(pp, &execq);
- erts_smp_runq_lock(runq);
+ break;
+ }
- ptp = pop_task(ptqp);
+ if (reds >= CONTEXT_REDS)
+ break;
}
- tasks_done:
-
erts_unblock_fpe(fpe_was_unmasked);
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports(pp, am_out);
+ }
+
if (io_tasks_executed) {
ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
>= io_tasks_executed);
@@ -856,15 +1705,19 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
-1*io_tasks_executed);
}
- *curr_port_pp = NULL;
-
#ifdef ERTS_SMP
ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif
- if (!pp->sched.taskq) {
- ASSERT(pp->sched.exe_taskq);
- pp->sched.exe_taskq = NULL;
+ active = finalize_exec(pp, &execq, processing_busy_q);
+
+ erts_port_release(pp);
+
+ *curr_port_pp = NULL;
+
+ erts_smp_runq_lock(runq);
+
+ if (!active) {
if (erts_system_profile_flags.runnable_ports)
profile_runnable_port(pp, am_inactive);
}
@@ -873,16 +1726,13 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
ErtsRunQueue *xrunq;
#endif
- ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
- ASSERT(pp->sched.taskq->first);
+ ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
if (!xrunq) {
#endif
enqueue_port(runq, pp);
- ASSERT(pp->sched.exe_taskq);
- pp->sched.exe_taskq = NULL;
/* No need to notify ourselves about inc in runq. */
#ifdef ERTS_SMP
}
@@ -892,49 +1742,20 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_unlock(runq);
xrunq = erts_port_runq(pp);
- if (xrunq) {
- enqueue_port(xrunq, pp);
- ASSERT(pp->sched.exe_taskq);
- pp->sched.exe_taskq = NULL;
- erts_smp_runq_unlock(xrunq);
- erts_smp_notify_inc_runq(xrunq);
- }
+ ASSERT(xrunq);
+ enqueue_port(xrunq, pp);
+ erts_smp_runq_unlock(xrunq);
+ erts_smp_notify_inc_runq(xrunq);
erts_smp_runq_lock(runq);
}
#endif
}
+ done:
res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
!= (erts_aint_t) 0);
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
-
- port_taskq_free(ptqp);
-
- /* trace port scheduling, out */
- if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports(pp, am_out);
- }
-#ifndef ERTS_SMP
- erts_port_release(pp);
-#else
- {
- erts_aint_t refc;
- erts_smp_mtx_unlock(pp->lock);
- refc = erts_smp_atomic_dec_read_nob(&pp->refc);
- ASSERT(refc >= 0);
- if (refc == 0) {
- erts_smp_runq_unlock(runq);
- erts_port_cleanup(pp); /* Might aquire runq lock */
- erts_smp_runq_lock(runq);
- res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- != (erts_aint_t) 0);
- }
- }
-#endif
-
- done:
runq->scheduler->reductions += reds;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
@@ -943,78 +1764,146 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
return res;
}
-/*
- * Handle remaining tasks after a free task.
- */
+#ifdef ERTS_SMP
+static void
+release_port(void *vport)
+{
+ erts_port_dec_refc((Port *) vport);
+}
+#endif
static void
-handle_remaining_tasks(ErtsRunQueue *runq, Port *pp)
+begin_port_cleanup(Port *pp, ErtsPortTask **execqp)
{
- int i;
- ErtsPortTask *ptp;
- ErtsPortTaskQueue *ptqps[] = {pp->sched.exe_taskq, pp->sched.taskq};
+ int i, max;
+ ErtsPortTask *qs[2];
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
- for (i = 0; i < sizeof(ptqps)/sizeof(ErtsPortTaskQueue *); i++) {
- if (!ptqps[i])
- continue;
- ptp = pop_task(ptqps[i]);
- while (ptp) {
+ /*
+ * Handle remaining tasks...
+ */
+
+ max = 0;
+ if (execqp && *execqp) {
+ qs[max++] = *execqp;
+ *execqp = NULL;
+ }
+
+ erts_port_task_sched_lock(&pp->sched);
+ qs[max] = pp->sched.taskq.in.first;
+ pp->sched.taskq.in.first = NULL;
+ pp->sched.taskq.in.last = NULL;
+ erts_port_task_sched_unlock(&pp->sched);
+ if (qs[max])
+ max++;
+
+ for (i = 0; i < max; i++) {
+ while (1) {
+ erts_aint32_t state;
+ ErtsPortTask *ptp = qs[i];
+ if (!ptp)
+ break;
+
+ qs[i] = ptp->u.alive.next;
+
+ /* Normal case here is aborted tasks... */
+ state = erts_smp_atomic32_read_nob(&ptp->state);
+ if (state == ERTS_PT_STATE_ABORTED)
+ goto aborted_port_task;
+
+ state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ ERTS_PT_STATE_EXECUTING,
+ ERTS_PT_STATE_SCHEDULED);
+ if (state != ERTS_PT_STATE_SCHEDULED) {
+ ASSERT(state == ERTS_PT_STATE_ABORTED);
+ goto aborted_port_task;
+ }
+
reset_handle(ptp);
- erts_smp_runq_unlock(runq);
switch (ptp->type) {
- case ERTS_PORT_TASK_FREE:
case ERTS_PORT_TASK_TIMEOUT:
break;
case ERTS_PORT_TASK_INPUT:
- erts_stale_drv_select(pp->id, ptp->event, DO_READ, 1);
+ erts_stale_drv_select(pp->common.id,
+ ptp->u.alive.td.io.event,
+ DO_READ,
+ 1);
break;
case ERTS_PORT_TASK_OUTPUT:
- erts_stale_drv_select(pp->id, ptp->event, DO_WRITE, 1);
+ erts_stale_drv_select(pp->common.id,
+ ptp->u.alive.td.io.event,
+ DO_WRITE,
+ 1);
break;
case ERTS_PORT_TASK_EVENT:
- erts_stale_drv_select(pp->id, ptp->event, 0, 1);
+ erts_stale_drv_select(pp->common.id,
+ ptp->u.alive.td.io.event,
+ 0,
+ 1);
break;
case ERTS_PORT_TASK_DIST_CMD:
break;
+ case ERTS_PORT_TASK_PROC_SIG: {
+ ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
+ if (!pp->sched.taskq.bpq)
+ ptp->u.alive.td.psig.callback(NULL,
+ ERTS_PORT_SFLG_INVALID,
+ ERTS_PROC2PORT_SIG_ABORT_CLOSED,
+ sigdp);
+ else {
+ ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp);
+ ptp->u.alive.td.psig.callback(NULL,
+ ERTS_PORT_SFLG_INVALID,
+ ERTS_PROC2PORT_SIG_ABORT_CLOSED,
+ sigdp);
+ aborted_proc2port_data(pp, size);
+ }
+ break;
+ }
default:
erl_exit(ERTS_ABORT_EXIT,
"Invalid port task type: %d\n",
(int) ptp->type);
}
- port_task_free(ptp);
-
- erts_smp_runq_lock(runq);
- ptp = pop_task(ptqps[i]);
+ aborted_port_task:
+ schedule_port_task_free(ptp);
}
}
- ASSERT(!pp->sched.taskq || !pp->sched.taskq->first);
+ erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_TASKS);
+
+ /*
+ * Schedule cleanup of port structure...
+ */
+#ifdef ERTS_SMP
+ erts_schedule_thr_prgr_later_op(release_port,
+ (void *) pp,
+ &pp->common.u.release);
+#else
+ pp->cleanup = 1;
+#endif
}
int
erts_port_is_scheduled(Port *pp)
{
- int res;
- ErtsRunQueue *runq = erts_port_runq(pp);
- if (!runq)
- return 0;
- res = pp->sched.taskq || pp->sched.exe_taskq;
- erts_smp_runq_unlock(runq);
- return res;
+ erts_aint32_t flags = erts_smp_atomic32_read_acqb(&pp->sched.flags);
+ return (flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)) != 0;
}
#ifdef ERTS_SMP
+
void
erts_enqueue_port(ErtsRunQueue *rq, Port *pp)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
- ASSERT(pp->sched.in_runq);
+ ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ);
enqueue_port(rq, pp);
}
@@ -1026,7 +1915,8 @@ erts_dequeue_port(ErtsRunQueue *rq)
pp = pop_port(rq);
ASSERT(!pp
|| rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
- ASSERT(!pp || pp->sched.in_runq);
+ ASSERT(!pp || (erts_smp_atomic32_read_nob(&pp->sched.flags)
+ & ERTS_PTS_FLG_IN_RUNQ));
return pp;
}
@@ -1041,5 +1931,5 @@ erts_port_task_init(void)
erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
(erts_aint_t) 0);
init_port_task_alloc();
- init_port_taskq_alloc();
+ init_busy_caller_table_alloc();
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index fd88b1c1ff..ae6cd69ae2 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -27,6 +27,9 @@
#define ERTS_PORT_TASK_H_BASIC_TYPES__
#include "erl_sys_driver.h"
#include "erl_smp.h"
+#define ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_port.h"
+#undef ERL_PORT_GET_PORT_TYPE_ONLY__
typedef erts_smp_atomic_t ErtsPortTaskHandle;
#endif
@@ -43,13 +46,19 @@ typedef erts_smp_atomic_t ErtsPortTaskHandle;
#define ERTS_INCLUDE_SCHEDULER_INTERNALS
#endif
+#define ERTS_PT_FLG_WAIT_BUSY (1 << 0)
+#define ERTS_PT_FLG_SIG_DEP (1 << 1)
+#define ERTS_PT_FLG_NOSUSPEND (1 << 2)
+#define ERTS_PT_FLG_REF (1 << 3)
+#define ERTS_PT_FLG_BAD_OUTPUT (1 << 4)
+
typedef enum {
- ERTS_PORT_TASK_FREE,
ERTS_PORT_TASK_INPUT,
ERTS_PORT_TASK_OUTPUT,
ERTS_PORT_TASK_EVENT,
ERTS_PORT_TASK_TIMEOUT,
- ERTS_PORT_TASK_DIST_CMD
+ ERTS_PORT_TASK_DIST_CMD,
+ ERTS_PORT_TASK_PROC_SIG
} ErtsPortTaskType;
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
@@ -57,19 +66,76 @@ typedef enum {
extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#endif
+#define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0)
+#define ERTS_PTS_FLG_EXEC (((erts_aint32_t) 1) << 1)
+#define ERTS_PTS_FLG_HAVE_TASKS (((erts_aint32_t) 1) << 2)
+#define ERTS_PTS_FLG_EXIT (((erts_aint32_t) 1) << 3)
+#define ERTS_PTS_FLG_BUSY_PORT (((erts_aint32_t) 1) << 4)
+#define ERTS_PTS_FLG_BUSY_PORT_Q (((erts_aint32_t) 1) << 5)
+#define ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q (((erts_aint32_t) 1) << 6)
+#define ERTS_PTS_FLG_HAVE_BUSY_TASKS (((erts_aint32_t) 1) << 7)
+#define ERTS_PTS_FLG_HAVE_NS_TASKS (((erts_aint32_t) 1) << 8)
+#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9)
+#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10)
+
+#define ERTS_PTS_FLGS_BUSY \
+ (ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q)
+
+#define ERTS_PTS_FLGS_FORCE_SCHEDULE_OP \
+ (ERTS_PTS_FLG_EXIT \
+ | ERTS_PTS_FLG_HAVE_BUSY_TASKS \
+ | ERTS_PTS_FLG_HAVE_TASKS \
+ | ERTS_PTS_FLG_EXEC \
+ | ERTS_PTS_FLG_FORCE_SCHED)
+
+#define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH 8192
+#define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW 4096
+
+typedef struct {
+ ErlDrvSizeT high;
+ erts_smp_atomic_t low;
+ erts_smp_atomic_t size;
+} ErtsPortTaskBusyPortQ;
+
typedef struct ErtsPortTask_ ErtsPortTask;
-typedef struct ErtsPortTaskQueue_ ErtsPortTaskQueue;
+typedef struct ErtsPortTaskBusyCallerTable_ ErtsPortTaskBusyCallerTable;
+typedef struct ErtsPortTaskHandleList_ ErtsPortTaskHandleList;
typedef struct {
Port *next;
- int in_runq;
- ErtsPortTaskQueue *taskq;
- ErtsPortTaskQueue *exe_taskq;
+ struct {
+ struct {
+ struct {
+ ErtsPortTask *first;
+ ErtsPortTask *last;
+ ErtsPortTaskBusyCallerTable *table;
+ ErtsPortTaskHandleList *nosuspend;
+ } busy;
+ ErtsPortTask *first;
+ } local;
+ struct {
+ ErtsPortTask *first;
+ ErtsPortTask *last;
+ } in;
+ ErtsPortTaskBusyPortQ *bpq;
+ } taskq;
+ erts_smp_atomic32_t flags;
+#ifdef ERTS_SMP
+ erts_mtx_t mtx;
+#endif
} ErtsPortTaskSched;
ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp);
ERTS_GLB_INLINE int erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp);
-ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
+ ErtsPortTaskBusyPortQ *bpq);
+ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp,
+ Eterm id);
+ERTS_GLB_INLINE void erts_port_task_fini_sched(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE void erts_port_task_sched_lock(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp);
+
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
#endif
@@ -88,13 +154,75 @@ erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp)
return ((void *) erts_smp_atomic_read_nob(pthp)) != NULL;
}
+ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
+ ErtsPortTaskBusyPortQ *bpq)
+{
+ if (bpq) {
+ erts_aint_t low = (erts_aint_t) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW;
+ erts_smp_atomic_init_nob(&bpq->low, low);
+ bpq->high = (ErlDrvSizeT) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH;
+ erts_smp_atomic_init_nob(&bpq->size, (erts_aint_t) 0);
+ }
+ ptsp->taskq.bpq = bpq;
+}
+
ERTS_GLB_INLINE void
-erts_port_task_init_sched(ErtsPortTaskSched *ptsp)
+erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
{
+#ifdef ERTS_SMP
+ char *lock_str = "port_sched_lock";
+#endif
ptsp->next = NULL;
- ptsp->in_runq = 0;
- ptsp->taskq = NULL;
- ptsp->exe_taskq = NULL;
+ ptsp->taskq.local.busy.first = NULL;
+ ptsp->taskq.local.busy.last = NULL;
+ ptsp->taskq.local.busy.table = NULL;
+ ptsp->taskq.local.busy.nosuspend = NULL;
+ ptsp->taskq.local.first = NULL;
+ ptsp->taskq.in.first = NULL;
+ ptsp->taskq.in.last = NULL;
+ erts_smp_atomic32_init_nob(&ptsp->flags, 0);
+#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
+ lock_str = NULL;
+#endif
+ erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_port_task_sched_lock(ErtsPortTaskSched *ptsp)
+{
+#ifdef ERTS_SMP
+ erts_mtx_lock(&ptsp->mtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp)
+{
+#ifdef ERTS_SMP
+ erts_mtx_unlock(&ptsp->mtx);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ return erts_lc_mtx_is_locked(&ptsp->mtx);
+#else
+ return 0;
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_port_task_fini_sched(ErtsPortTaskSched *ptsp)
+{
+#ifdef ERTS_SMP
+ erts_mtx_destroy(&ptsp->mtx);
+#endif
}
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
@@ -115,14 +243,16 @@ int erts_port_task_execute(ErtsRunQueue *, Port **);
void erts_port_task_init(void);
#endif
-int erts_port_task_abort(Eterm id, ErtsPortTaskHandle *);
+int erts_port_task_abort(ErtsPortTaskHandle *);
+void erts_port_task_abort_nosuspend_tasks(Port *);
+
int erts_port_task_schedule(Eterm,
ErtsPortTaskHandle *,
ErtsPortTaskType,
- ErlDrvEvent,
- ErlDrvEventData);
+ ...);
void erts_port_task_free_port(Port *);
int erts_port_is_scheduled(Port *);
+ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void);
#ifdef ERTS_SMP
void erts_enqueue_port(ErtsRunQueue *rq, Port *pp);
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 61554780c4..aaca4b5f59 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -42,6 +42,7 @@
#include "erl_thr_queue.h"
#include "erl_async.h"
#include "dtrace-wrapper.h"
+#include "erl_ptab.h"
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2)
@@ -136,155 +137,16 @@ do { \
#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
(RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL)
+const Process erts_invalid_process = {{ERTS_INVALID_PID}};
+
extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-#ifdef ARCH_32
-
-union {
- erts_smp_dw_atomic_t pid_data;
- char align[ERTS_CACHE_LINE_SIZE];
-} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-
-
-static ERTS_INLINE Uint64
-dw_aint_to_uint64(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-static void
-unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
- dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
-}
-
-static ERTS_INLINE void
-last_pid_data_init_nob(Uint64 val)
-{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_init_nob(&last.pid_data, &dw);
-}
-
-static ERTS_INLINE void
-last_pid_data_set_relb(Uint64 val)
-{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_set_relb(&last.pid_data, &dw);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_read_nob(void)
-{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_nob(&last.pid_data, &dw);
- return dw_aint_to_uint64(&dw);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_read_acqb(void)
-{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_acqb(&last.pid_data, &dw);
- return dw_aint_to_uint64(&dw);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_cmpxchg_relb(Uint64 new, Uint64 exp)
-{
- erts_dw_aint_t dw_new, dw_xchg;
-
- unint64_to_dw_aint(&dw_new, new);
- unint64_to_dw_aint(&dw_xchg, exp);
-
- if (erts_smp_dw_atomic_cmpxchg_relb(&last.pid_data, &dw_new, &dw_xchg))
- return exp;
- else
- return dw_aint_to_uint64(&dw_xchg);
-}
-
-#elif defined(ARCH_64)
-
-union {
- erts_smp_atomic_t pid_data;
- char align[ERTS_CACHE_LINE_SIZE];
-} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-
-static ERTS_INLINE void
-last_pid_data_init_nob(Uint64 val)
-{
- erts_smp_atomic_init_nob(&last.pid_data, (erts_aint_t) val);
-}
-
-static ERTS_INLINE void
-last_pid_data_set_relb(Uint64 val)
-{
- erts_smp_atomic_set_relb(&last.pid_data, (erts_aint_t) val);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_read_nob(void)
-{
- return (Uint64) erts_smp_atomic_read_nob(&last.pid_data);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_read_acqb(void)
-{
- return (Uint64) erts_smp_atomic_read_acqb(&last.pid_data);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_cmpxchg_relb(Uint64 new, Uint64 exp)
-{
- return (Uint64) erts_smp_atomic_cmpxchg_relb(&last.pid_data,
- (erts_aint_t) new,
- (erts_aint_t) exp);
-}
-
-#else
-# error "Not 64-bit, nor 32-bit architecture..."
-#endif
-
-static ERTS_INLINE int
-last_pid_data_cmp(Uint64 lpd1, Uint64 lpd2)
-{
- Uint64 lpd1_wrap;
-
- if (lpd1 == lpd2)
- return 0;
-
- lpd1_wrap = lpd1 + (((Uint64) 1) << 63);
-
- if (lpd1 < lpd1_wrap)
- return (lpd1 < lpd2 && lpd2 < lpd1_wrap) ? -1 : 1;
- else
- return (lpd1_wrap <= lpd2 && lpd2 < lpd1) ? 1 : -1;
-}
-
-
-#define ERTS_PID_DATA_MASK__ ((1 << _PID_DATA_SIZE) - 1)
-
int erts_sched_compact_load;
Uint erts_no_schedulers;
-ErtsProcTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
int erts_sched_thread_suggested_stack_size = -1;
@@ -371,8 +233,6 @@ erts_sched_stat_t erts_sched_stat;
static erts_tsd_key_t sched_data_key;
#endif
-erts_smp_rwmtx_t erts_proc_tab_rwmtx;
-
static erts_smp_atomic32_t function_calls;
#ifdef ERTS_SMP
@@ -409,29 +269,6 @@ struct erts_system_profile_flags_t erts_system_profile_flags;
#if ERTS_MAX_PROCESSES > 0x7fffffff
#error "Need to store process_count in another type"
#endif
-static erts_smp_atomic32_t process_count;
-
-typedef struct ErtsTermProcElement_ ErtsTermProcElement;
-struct ErtsTermProcElement_ {
- ErtsTermProcElement *next;
- ErtsTermProcElement *prev;
- int ix;
- union {
- struct {
- Eterm pid;
- Uint64 spawned;
- Uint64 exited;
- } process;
- struct {
- Uint64 interval;
- } bif_invocation;
- } u;
-};
-
-static struct {
- ErtsTermProcElement *start;
- ErtsTermProcElement *end;
-} saved_term_procs;
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list,
ErtsMiscOpList,
@@ -493,8 +330,6 @@ do { \
* Local functions.
*/
-static void init_processes_bif(void);
-static void save_terminating_process(Process *p);
static void exec_misc_ops(ErtsRunQueue *);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
@@ -562,39 +397,17 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
}
#endif
-static erts_interval_t *proc_interval;
-
-static void
-proc_interval_init(void)
-{
- proc_interval = erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_PROC_INTERVAL,
- sizeof(erts_interval_t));
- erts_smp_interval_init(proc_interval);
-}
-
-static ERTS_INLINE Uint64
-get_proc_interval(void)
-{
- return erts_smp_current_interval_nob(proc_interval);
-}
static ERTS_INLINE Uint64
ensure_later_proc_interval(Uint64 interval)
{
- return erts_smp_ensure_later_interval_nob(proc_interval, interval);
-}
-
-static ERTS_INLINE Uint64
-step_proc_interval(void)
-{
- return erts_smp_step_interval_nob(proc_interval);
+ return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval);
}
Uint64
erts_get_proc_interval(void)
{
- return get_proc_interval();
+ return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc));
}
Uint64
@@ -606,7 +419,7 @@ erts_ensure_later_proc_interval(Uint64 interval)
Uint64
erts_step_proc_interval(void)
{
- return step_proc_interval();
+ return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc));
}
void
@@ -654,20 +467,18 @@ erts_pre_init_process(void)
#endif
}
+#ifdef ERTS_SMP
+static void
+release_process(void *vproc)
+{
+ erts_smp_proc_dec_refc((Process *) vproc);
+}
+#endif
+
/* initialize the scheduler */
void
-erts_init_process(int ncpu)
+erts_init_process(int ncpu, int proc_tab_size)
{
- int proc_tab_sz;
- int max_proc_bits;
- int proc_bits = ERTS_PROC_BITS;
- erts_smp_atomic_t *proc_entry;
- char *proc_tab_end;
- erts_smp_rwmtx_opt_t proc_tab_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- proc_tab_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- proc_tab_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
-
- proc_interval_init();
#ifdef ERTS_SMP
erts_disable_proc_not_running_opt = 0;
@@ -676,52 +487,16 @@ erts_init_process(int ncpu)
init_proclist_alloc();
- erts_smp_atomic32_init_nob(&process_count, 0);
-
- if (erts_use_r9_pids_ports)
- proc_bits = ERTS_R9_PROC_BITS;
-
- if (erts_proc.max > (1 << proc_bits))
- erts_proc.max = 1 << proc_bits;
-
- proc_tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(erts_proc.max
- * sizeof(erts_smp_atomic_t));
- erts_proc.tab = erts_alloc(ERTS_ALC_T_PROC_TABLE, proc_tab_sz);
- proc_tab_end = ((char *) erts_proc.tab) + proc_tab_sz;
- proc_entry = erts_proc.tab;
- while (proc_tab_end > ((char *) proc_entry)) {
- erts_smp_atomic_init_nob(proc_entry, ERTS_AINT_NULL);
- proc_entry++;
- }
-
- erts_smp_rwmtx_init_opt(&erts_proc_tab_rwmtx,
- &proc_tab_rwmtx_opts,
- "proc_tab");
- last_pid_data_init_nob(~((Uint64) 0));
-
- max_proc_bits = erts_fit_in_bits_int32((Sint32) erts_proc.max - 1);
-
- erts_proc.tab_cache_lines = proc_tab_sz/ERTS_CACHE_LINE_SIZE;
- erts_proc.pix_per_cache_line = ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t);
- if ((erts_proc.max & (erts_proc.max - 1))
- | (erts_proc.pix_per_cache_line & (erts_proc.pix_per_cache_line - 1))) {
- /*
- * erts_proc.max or erts_proc.pix_per_cache_line
- * not a power of 2 :(
- */
- erts_proc.pix_cl_mask = 0;
- erts_proc.pix_cl_shift = 0;
- erts_proc.pix_cli_mask = 0;
- erts_proc.pix_cli_shift = 0;
- }
- else {
- ASSERT((erts_proc.tab_cache_lines
- & (erts_proc.tab_cache_lines - 1)) == 0);
- erts_proc.pix_cl_mask = erts_proc.tab_cache_lines-1;
- erts_proc.pix_cl_shift = erts_fit_in_bits_int32(erts_proc.pix_per_cache_line-1);
- erts_proc.pix_cli_shift = erts_fit_in_bits_int32(erts_proc.pix_cl_mask);
- erts_proc.pix_cli_mask = (1 << (max_proc_bits - erts_proc.pix_cli_shift)) - 1;
- }
+ erts_ptab_init_table(&erts_proc,
+ ERTS_ALC_T_PROC_TABLE,
+#ifdef ERTS_SMP
+ release_process,
+#else
+ NULL,
+#endif
+ (ErtsPTabElementCommon *) &erts_invalid_process.common,
+ proc_tab_size,
+ "process_table");
last_reductions = 0;
last_exact_reductions = 0;
@@ -732,7 +507,6 @@ void
erts_late_init_process(void)
{
int ix;
- init_processes_bif();
erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
@@ -980,9 +754,9 @@ static ERTS_INLINE ErtsProcList *
proclist_create(Process *p)
{
ErtsProcList *plp = proclist_alloc();
- ensure_later_proc_interval(p->started_interval);
- plp->pid = p->id;
- plp->started_interval = p->started_interval;
+ ensure_later_proc_interval(p->common.u.alive.started_interval);
+ plp->pid = p->common.id;
+ plp->started_interval = p->common.u.alive.started_interval;
return plp;
}
@@ -992,12 +766,6 @@ proclist_destroy(ErtsProcList *plp)
proclist_free(plp);
}
-static ERTS_INLINE int
-proclist_same(ErtsProcList *plp, Process *p)
-{
- return plp->pid == p->id && plp->started_interval == p->started_interval;
-}
-
ErtsProcList *
erts_proclist_create(Process *p)
{
@@ -1010,12 +778,6 @@ erts_proclist_destroy(ErtsProcList *plp)
proclist_destroy(plp);
}
-int
-erts_proclist_same(ErtsProcList *plp, Process *p)
-{
- return proclist_same(plp, p);
-}
-
void *
erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
{
@@ -1776,37 +1538,32 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS);
awdp->esdp->run_queue->halt_in_progress = 1;
if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
- int i;
+ int i, max = erts_ptab_max(&erts_port);
erts_smp_atomic32_set_nob(&erts_halt_progress, 1);
- for (i = 0; i < erts_max_ports; i++) {
- Port *prt = &erts_port[i];
- erts_smp_port_state_lock(prt);
- if ((prt->status & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
- | ERTS_PORT_SFLG_HALT))) {
- erts_smp_port_state_unlock(prt);
- continue;
- }
- /* We need to set the halt flag - get the port lock */
-#ifdef ERTS_SMP
- erts_smp_atomic_inc_nob(&prt->refc);
-#endif
- erts_smp_port_state_unlock(prt);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(prt->lock);
-#endif
- if ((prt->status & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
- | ERTS_PORT_SFLG_HALT))) {
- erts_port_release(prt);
+ for (i = 0; i < max; i++) {
+ erts_aint32_t state;
+ Port *prt = erts_pix2port(i);
+ if (!prt)
continue;
- }
- erts_port_status_bor_set(prt, ERTS_PORT_SFLG_HALT);
- erts_smp_atomic32_inc_nob(&erts_halt_progress);
- if (prt->status & (ERTS_PORT_SFLG_EXITING
- | ERTS_PORT_SFLG_CLOSING)) {
- erts_port_release(prt);
+ state = erts_atomic32_read_acqb(&prt->state);
+ if (state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ | ERTS_PORT_SFLG_HALT))
continue;
+
+ /* We need to set the halt flag - get the port lock */
+
+ erts_smp_port_lock(prt);
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ | ERTS_PORT_SFLG_HALT))) {
+ state = erts_atomic32_read_bor_relb(&prt->state,
+ ERTS_PORT_SFLG_HALT);
+ erts_smp_atomic32_inc_nob(&erts_halt_progress);
+ if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING)))
+ erts_deliver_port_exit(prt, prt->common.id, am_killed, 0);
}
- erts_do_exit_port(prt, prt->id, am_killed);
+
erts_port_release(prt);
}
if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) {
@@ -3183,12 +2940,12 @@ resume_run_queue(ErtsRunQueue *rq)
erts_smp_runq_lock(rq);
- (void) ERTS_RUNQ_FLGS_MASK_SET(rq,
- (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK
- | ERTS_RUNQ_FLG_SUSPENDED),
- (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
+ (void) ERTS_RUNQ_FLGS_READ_BSET(rq,
+ (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_SUSPENDED),
+ (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
@@ -4210,7 +3967,7 @@ erts_fprintf(stderr, "--------------------------------\n");
ERTS_DBG_CHK_FULL_REDS_HISTORY(rq);
rq->out_of_work_count = 0;
- (void) ERTS_RUNQ_FLGS_MASK_SET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
+ (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
rq->max_len = rq->len;
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
@@ -4896,6 +4653,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
erts_smp_atomic32_init_relb(&erts_halt_progress, -1);
erts_halt_code = 0;
+
+#if !defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ erts_lc_set_thread_name("scheduler 1");
+#endif
+
}
ErtsRunQueue *
@@ -5573,8 +5335,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
else if (on) { /* ------ BLOCK ------ */
if (schdlr_sspnd.msb.procs) {
plp = proclist_create(p);
- plp->next = schdlr_sspnd.msb.procs;
- schdlr_sspnd.msb.procs = plp;
+ erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
p->flags |= F_HAVE_BLCKD_MSCHED;
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
@@ -5659,8 +5420,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
- plp->next = schdlr_sspnd.msb.procs;
- schdlr_sspnd.msb.procs = plp;
+ erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
ASSERT(p->scheduler_data);
}
}
@@ -5671,20 +5431,16 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
}
else { /* ------ UNBLOCK ------ */
if (p->flags & F_HAVE_BLCKD_MSCHED) {
- ErtsProcList **plpp = &schdlr_sspnd.msb.procs;
- plp = schdlr_sspnd.msb.procs;
+ ErtsProcList *plp = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
while (plp) {
- if (!proclist_same(plp, p)){
- plpp = &plp->next;
- plp = plp->next;
- }
- else {
- *plpp = plp->next;
- proclist_destroy(plp);
+ ErtsProcList *tmp_plp = plp;
+ plp = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp);
+ if (erts_proclist_same(tmp_plp, p)) {
+ erts_proclist_remove(&schdlr_sspnd.msb.procs, tmp_plp);
+ proclist_destroy(tmp_plp);
if (!all)
break;
- plp = *plpp;
}
}
}
@@ -5753,23 +5509,25 @@ erts_multi_scheduling_blockers(Process *p)
Eterm res = NIL;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (schdlr_sspnd.msb.procs) {
+ if (!erts_proclist_is_empty(schdlr_sspnd.msb.procs)) {
Eterm *hp, *hp_end;
ErtsProcList *plp1, *plp2;
- Uint max_size;
- ASSERT(schdlr_sspnd.msb.procs);
- for (max_size = 0, plp1 = schdlr_sspnd.msb.procs;
+ Uint max_size = 0;
+
+ for (plp1 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
plp1;
- plp1 = plp1->next) {
+ plp1 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp1)) {
max_size += 2;
}
ASSERT(max_size);
hp = HAlloc(p, max_size);
hp_end = hp + max_size;
- for (plp1 = schdlr_sspnd.msb.procs; plp1; plp1 = plp1->next) {
- for (plp2 = schdlr_sspnd.msb.procs;
+ for (plp1 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
+ plp1;
+ plp1 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp1)) {
+ for (plp2 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
plp2->pid != plp1->pid;
- plp2 = plp2->next);
+ plp2 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp2));
if (plp2 == plp1) {
res = CONS(hp, plp1->pid, res);
hp += 2;
@@ -6021,7 +5779,7 @@ handle_pend_sync_suspend(Process *suspendee,
ASSERT(is_nil(suspender->suspendee));
if (suspendee_alive) {
erts_suspend(suspendee, suspendee_locks, NULL);
- suspender->suspendee = suspendee->id;
+ suspender->suspendee = suspendee->common.id;
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
@@ -6042,7 +5800,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN);
ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
- if (c_p->id == pid)
+ if (c_p->common.id == pid)
return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
if (c_p_locks & ERTS_PROC_LOCK_STATUS)
@@ -6094,7 +5852,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
*/
if (!c_p->pending_suspenders) {
/* Mark rp pending for suspend by c_p */
- add_pend_suspend(rp, c_p->id, handle_pend_sync_suspend);
+ add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
ASSERT(is_nil(c_p->suspendee));
/* Suspend c_p; when rp is suspended c_p will be resumed. */
@@ -6200,20 +5958,20 @@ handle_pend_bif_sync_suspend(Process *suspendee,
ASSERT(is_nil(suspender->suspendee));
if (!suspendee_alive)
erts_delete_suspend_monitor(&suspender->suspend_monitors,
- suspendee->id);
+ suspendee->common.id);
else {
#ifdef DEBUG
int res;
#endif
ErtsSuspendMonitor *smon;
smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
- suspendee->id);
+ suspendee->common.id);
#ifdef DEBUG
res =
#endif
do_bif_suspend_process(suspendee, smon, suspendee);
ASSERT(!smon || res != 0);
- suspender->suspendee = suspendee->id;
+ suspender->suspendee = suspendee->common.id;
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
@@ -6242,14 +6000,14 @@ handle_pend_bif_async_suspend(Process *suspendee,
ASSERT(is_nil(suspender->suspendee));
if (!suspendee_alive)
erts_delete_suspend_monitor(&suspender->suspend_monitors,
- suspendee->id);
+ suspendee->common.id);
else {
#ifdef DEBUG
int res;
#endif
ErtsSuspendMonitor *smon;
smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
- suspendee->id);
+ suspendee->common.id);
#ifdef DEBUG
res =
#endif
@@ -6294,7 +6052,7 @@ suspend_process_2(BIF_ALIST_2)
int unless_suspending = 0;
- if (BIF_P->id == BIF_ARG_1)
+ if (BIF_P->common.id == BIF_ARG_1)
goto badarg; /* We are not allowed to suspend ourselves */
if (is_not_nil(BIF_ARG_2)) {
@@ -6390,7 +6148,7 @@ suspend_process_2(BIF_ALIST_2)
if (!do_bif_suspend_process(BIF_P, smon, suspendee))
add_pend_suspend(suspendee,
- BIF_P->id,
+ BIF_P->common.id,
handle_pend_bif_async_suspend);
res = am_true;
@@ -6453,7 +6211,7 @@ suspend_process_2(BIF_ALIST_2)
else {
/* Mark suspendee pending for suspend by BIF_P */
add_pend_suspend(suspendee,
- BIF_P->id,
+ BIF_P->common.id,
handle_pend_bif_sync_suspend);
ASSERT(is_nil(BIF_P->suspendee));
@@ -6527,7 +6285,7 @@ resume_process_1(BIF_ALIST_1)
Process *suspendee;
int is_active;
- if (BIF_P->id == BIF_ARG_1)
+ if (BIF_P->common.id == BIF_ARG_1)
BIF_ERROR(BIF_P, BADARG);
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
@@ -6652,7 +6410,8 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
for (i = 0; i < erts_no_schedulers; i++) {
esdp = ERTS_SCHEDULER_IX(i);
erts_smp_runq_lock(esdp->run_queue);
- if (esdp->free_process && esdp->free_process->id == rpid) {
+ if (esdp->free_process
+ && esdp->free_process->common.id == rpid) {
res = am_free;
erts_smp_runq_unlock(esdp->run_queue);
break;
@@ -6673,27 +6432,31 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
void
erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
{
-#ifdef DEBUG
- int res;
-#endif
+ int suspend;
+
ASSERT(c_p == erts_get_current_process());
ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+ if (busy_port)
+ suspend = erts_save_suspend_process_on_port(busy_port, c_p);
+ else
+ suspend = 1;
+
+ if (suspend) {
#ifdef DEBUG
- res =
+ int res =
#endif
- suspend_process(c_p, c_p);
-
- ASSERT(res);
-
- if (busy_port)
- erts_wake_process_later(busy_port, c_p);
+ suspend_process(c_p, c_p);
+ ASSERT(res);
+ }
if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ if (suspend && busy_port && erts_system_monitor_flags.busy_port)
+ monitor_generic(c_p, am_busy_port, busy_port->common.id);
}
void
@@ -6708,16 +6471,19 @@ erts_resume(Process* process, ErtsProcLocks process_locks)
}
int
-erts_resume_processes(ErtsProcList *plp)
+erts_resume_processes(ErtsProcList *list)
{
+ /* 'list' is expected to have been fetched (i.e. not a ring anymore) */
int nresumed = 0;
+ ErtsProcList *plp = list;
+
while (plp) {
Process *proc;
ErtsProcList *fplp;
ASSERT(is_internal_pid(plp->pid));
proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
if (proc) {
- if (proclist_same(plp, proc)) {
+ if (erts_proclist_same(plp, proc)) {
resume_process(proc);
nresumed++;
}
@@ -6870,9 +6636,8 @@ Process *schedule(Process *p, int calls)
state = erts_smp_atomic32_read_acqb(&p->state);
if (IS_TRACED(p)) {
- if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- }
if (state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
trace_sched(p, ((state & ERTS_PSFLG_FREE)
@@ -6885,7 +6650,7 @@ Process *schedule(Process *p, int calls)
else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(p, am_out);
}
- }
+ }
#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
@@ -6949,9 +6714,8 @@ Process *schedule(Process *p, int calls)
#ifdef ERTS_SMP
{
ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
- rq->procs.pending_exiters = NULL;
-
- if (pnd_xtrs) {
+ if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
+ rq->procs.pending_exiters = NULL;
erts_smp_runq_unlock(rq);
handle_pending_exiters(pnd_xtrs);
erts_smp_runq_lock(rq);
@@ -7247,7 +7011,7 @@ Process *schedule(Process *p, int calls)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#ifdef ERTS_SMP
- if (is_not_nil(p->tracer_proc))
+ if (is_not_nil(ERTS_TRACER_PROC(p)))
erts_check_my_tracer_proc(p);
#endif
@@ -7450,70 +7214,6 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-/*
- * erts_test_next_pid() is only used for testing.
- */
-Sint
-erts_test_next_pid(int set, Uint next)
-{
- Uint64 lpd;
- Sint res;
- Eterm pid_data;
- int first_pix = -1;
-
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
-
- if (!set)
- lpd = last_pid_data_read_nob();
- else {
-
- lpd = (Uint64) next;
- pid_data = (Eterm) (lpd & ERTS_PID_DATA_MASK__);
- if (ERTS_INVALID_PID == make_internal_pid(pid_data)) {
- lpd += erts_proc.max;
- ASSERT(erts_pid_data2ix(pid_data)
- == erts_pid_data2ix(lpd & ERTS_PID_DATA_MASK__));
- }
- last_pid_data_set_relb(lpd);
- }
-
- while (1) {
- int pix;
- lpd++;
- pix = (int) (lpd % erts_proc.max);
- if (first_pix < 0)
- first_pix = pix;
- else if (pix == first_pix) {
- res = -1;
- break;
- }
- if (ERTS_AINT_NULL == erts_smp_atomic_read_nob(&erts_proc.tab[pix])) {
- pid_data = (Eterm) (lpd & ERTS_PID_DATA_MASK__);
- if (ERTS_INVALID_PID == make_internal_pid(pid_data)) {
- lpd += erts_proc.max;
- ASSERT(erts_pid_data2ix(pid_data)
- == erts_pid_data2ix(lpd & ERTS_PID_DATA_MASK__));
- }
- res = lpd & ERTS_PID_DATA_MASK__;
- break;
- }
- }
-
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
-
- return res;
-
-}
-
-Uint erts_process_count(void)
-{
- erts_aint32_t res = erts_smp_atomic32_read_nob(&process_count);
- if (res > erts_proc.max)
- return erts_proc.max;
- ASSERT(res >= 0);
- return (Uint) res;
-}
-
void
erts_free_proc(Process *p)
{
@@ -7523,132 +7223,66 @@ erts_free_proc(Process *p)
erts_free(ERTS_ALC_T_PROC, (void *) p);
}
+typedef struct {
+ Process *proc;
+ erts_aint32_t state;
+ ErtsRunQueue *run_queue;
+} ErtsEarlyProcInit;
+
+static void early_init_process_struct(void *varg, Eterm data)
+{
+ ErtsEarlyProcInit *arg = (ErtsEarlyProcInit *) varg;
+ Process *proc = arg->proc;
+
+ proc->common.id = make_internal_pid(data);
+ erts_smp_atomic32_init_relb(&proc->state, arg->state);
+
+#ifdef ERTS_SMP
+ RUNQ_SET_RQ(&proc->run_queue, arg->run_queue);
+
+ erts_proc_lock_init(proc); /* All locks locked */
+#endif
+
+}
+
/*
** Allocate process and find out where to place next process.
*/
static Process*
alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
{
- int pix;
- Process* p;
- Uint64 lpd, exp_lpd;
- Eterm pid_data;
- erts_aint32_t proc_count;
-#ifdef DEBUG
- Eterm pid;
-#endif
-
- erts_smp_rwmtx_rlock(&erts_proc_tab_rwmtx);
-
- proc_count = erts_smp_atomic32_inc_read_acqb(&process_count);
- if (proc_count > erts_proc.max) {
- while (1) {
- erts_aint32_t act_proc_count;
-
- act_proc_count = erts_smp_atomic32_cmpxchg_relb(&process_count,
- proc_count-1,
- proc_count);
- if (act_proc_count == proc_count)
- goto system_limit;
- proc_count = act_proc_count;
- if (proc_count <= erts_proc.max)
- break;
- }
- }
+ ErtsEarlyProcInit init_arg;
+ Process *p;
- p = (Process*) erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process));
+ p = erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process));
if (!p)
- goto enomem;
-
- p->approx_started = erts_get_approx_time();
- p->started_interval = get_proc_interval();
-
- lpd = last_pid_data_read_acqb();
-
- /* Reserve slot */
- while (1) {
- lpd++;
- pix = erts_pid_data2ix((Eterm) (lpd & ERTS_PID_DATA_MASK__));
- if (erts_smp_atomic_read_nob(&erts_proc.tab[pix]) == ERTS_AINT_NULL) {
- erts_aint_t val;
- val = erts_smp_atomic_cmpxchg_relb(&erts_proc.tab[pix],
- ((erts_aint_t)
- ERTS_PROC_LOCK_BUSY),
- ERTS_AINT_NULL);
-
- if (ERTS_AINT_NULL == val)
- break;
- }
- }
-
- pid_data = (Eterm) lpd & ERTS_PID_DATA_MASK__;
+ return NULL;
- p->id = make_internal_pid(pid_data);
- if (p->id == ERTS_INVALID_PID) {
- /* Do not use the invalid pid; change serial */
- lpd += erts_proc.max;
- ASSERT(pix == erts_pid_data2ix((Eterm) (lpd & ERTS_PID_DATA_MASK__)));
- pid_data = (Eterm) lpd & ERTS_PID_DATA_MASK__;
- p->id = make_internal_pid(pid_data);
- ASSERT(p->id != ERTS_INVALID_PID);
- }
+ init_arg.proc = (Process *) p;
+ init_arg.run_queue = rq;
+ init_arg.state = state;
- exp_lpd = last_pid_data_read_nob();
+ ASSERT(((char *) p) == ((char *) &p->common));
- /* Move last pid data forward */
- while (1) {
- Uint64 act_lpd;
- if (last_pid_data_cmp(lpd, exp_lpd) < 0)
- break;
- act_lpd = last_pid_data_cmpxchg_relb(lpd, exp_lpd);
- if (act_lpd == exp_lpd)
- break;
- exp_lpd = act_lpd;
+ if (!erts_ptab_new_element(&erts_proc,
+ &p->common,
+ (void *) &init_arg,
+ early_init_process_struct)) {
+ erts_free(ERTS_ALC_T_PROC, p);
+ return NULL;
}
-#ifdef ERTS_SMP
- RUNQ_SET_RQ(&p->run_queue, rq);
-#endif
-
- erts_smp_atomic32_init_relb(&p->state, state);
-
-#ifdef DEBUG
- pid = p->id;
-#endif
-
-#ifdef ERTS_SMP
- erts_proc_lock_init(p); /* All locks locked */
-#endif
-
- /* Move into slot reserved */
-#ifdef DEBUG
- ASSERT(ERTS_PROC_LOCK_BUSY
- == (Process *) erts_smp_atomic_xchg_relb(&erts_proc.tab[pix],
- (erts_aint_t) p));
-#else
- erts_smp_atomic_set_relb(&erts_proc.tab[pix], (erts_aint_t) p);
-#endif
-
- ASSERT(internal_pid_serial(p->id) <= (erts_use_r9_pids_ports
- ? ERTS_MAX_PID_R9_SERIAL
- : ERTS_MAX_PID_SERIAL));
-
- erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx);
-
+ ASSERT(internal_pid_serial(p->common.id) <= ERTS_MAX_PID_SERIAL);
+
+ p->approx_started = erts_get_approx_time();
p->rcount = 0;
- ASSERT(p == (Process *)
- erts_smp_atomic_read_nob(
- &erts_proc.tab[internal_pid_index(pid)]));
-
- return p;
-enomem:
-system_limit:
-
- erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx);
- return NULL;
+ ASSERT(p == (Process *) (erts_ptab_pix2intptr_nob(
+ &erts_proc,
+ internal_pid_index(p->common.id))));
+ return p;
}
Eterm
@@ -7795,21 +7429,21 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->reds = 0;
#ifdef ERTS_SMP
- p->u.alive.ptimer = NULL;
+ p->common.u.alive.ptimer = NULL;
#else
- sys_memset(&p->u.alive.tm, 0, sizeof(ErlTimer));
+ sys_memset(&p->common.u.alive.tm, 0, sizeof(ErlTimer));
#endif
- p->reg = NULL;
- p->nlinks = NULL;
- p->monitors = NULL;
+ p->common.u.alive.reg = NULL;
+ ERTS_P_LINKS(p) = NULL;
+ ERTS_P_MONITORS(p) = NULL;
p->nodes_monitors = NULL;
p->suspend_monitors = NULL;
ASSERT(is_pid(parent->group_leader));
if (parent->group_leader == ERTS_INVALID_PID)
- p->group_leader = p->id;
+ p->group_leader = p->common.id;
else {
/* Needs to be done after the heap has been set up */
p->group_leader =
@@ -7818,18 +7452,18 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
: STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
- erts_get_default_tracing(&p->trace_flags, &p->tracer_proc);
+ erts_get_default_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER_PROC(p));
p->msg.first = NULL;
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
p->msg.len = 0;
#ifdef ERTS_SMP
- p->u.alive.msg_inq.first = NULL;
- p->u.alive.msg_inq.last = &p->u.alive.msg_inq.first;
- p->u.alive.msg_inq.len = 0;
+ p->msg_inq.first = NULL;
+ p->msg_inq.last = &p->msg_inq.first;
+ p->msg_inq.len = 0;
#endif
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
p->mbuf = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
@@ -7841,7 +7475,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
DT_UTAG(p) = NIL;
DT_UTAG_FLAGS(p) = 0;
#endif
- p->parent = parent->id == ERTS_INVALID_PID ? NIL : parent->id;
+ p->parent = (parent->common.id == ERTS_INVALID_PID
+ ? NIL
+ : parent->common.id);
INIT_HOLE_CHECK(p);
#ifdef DEBUG
@@ -7849,18 +7485,19 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
if (IS_TRACED(parent)) {
- if (parent->trace_flags & F_TRACE_SOS) {
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc;
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
}
if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
- trace_proc_spawn(parent, p->id, mod, func, args);
+ trace_proc_spawn(parent, p->common.id, mod, func, args);
}
- if (parent->trace_flags & F_TRACE_SOS1) { /* Overrides TRACE_CHILDREN */
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc;
- p->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
- parent->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
+ /* Overrides TRACE_CHILDREN */
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
}
}
@@ -7873,27 +7510,27 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
int ret;
#endif
if (IS_TRACED_FL(parent, F_TRACE_PROCS)) {
- trace_proc(parent, parent, am_link, p->id);
+ trace_proc(parent, parent, am_link, p->common.id);
}
#ifdef DEBUG
- ret = erts_add_link(&(parent->nlinks), LINK_PID, p->id);
+ ret = erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
ASSERT(ret == 0);
- ret = erts_add_link(&(p->nlinks), LINK_PID, parent->id);
+ ret = erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
ASSERT(ret == 0);
#else
- erts_add_link(&(parent->nlinks), LINK_PID, p->id);
- erts_add_link(&(p->nlinks), LINK_PID, parent->id);
+ erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
+ erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
#endif
if (IS_TRACED(parent)) {
- if (parent->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) {
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc; /* maybe steal */
+ if (ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); /*maybe steal*/
- if (parent->trace_flags & F_TRACE_SOL1) { /* maybe override */
- p ->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- parent->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
}
}
}
@@ -7906,8 +7543,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm mref;
mref = erts_make_ref(parent);
- erts_add_monitor(&(parent->monitors), MON_ORIGIN, mref, p->id, NIL);
- erts_add_monitor(&(p->monitors), MON_TARGET, mref, parent->id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(parent), MON_ORIGIN, mref, p->common.id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(p), MON_TARGET, mref, parent->common.id, NIL);
so->mref = mref;
}
@@ -7915,8 +7552,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->scheduler_data = NULL;
p->suspendee = NIL;
p->pending_suspenders = NULL;
- p->u.alive.pending_exit.reason = THE_NON_VALUE;
- p->u.alive.pending_exit.bp = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
#endif
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
@@ -7925,7 +7562,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
- res = p->id;
+ res = p->common.id;
/*
* Schedule process for execution.
@@ -7933,7 +7570,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
schedule_process(p, state, 0);
- VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->id));
+ VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id));
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_spawn)) {
@@ -7968,10 +7605,10 @@ void erts_init_empty_process(Process *p)
p->min_heap_size = 0;
p->min_vheap_size = 0;
p->rcount = 0;
- p->id = ERTS_INVALID_PID;
+ p->common.id = ERTS_INVALID_PID;
p->reds = 0;
- p->tracer_proc = NIL;
- p->trace_flags = F_INITIAL_TRACE_FLAGS;
+ ERTS_TRACER_PROC(p) = NIL;
+ ERTS_TRACE_FLAGS(p) = F_INITIAL_TRACE_FLAGS;
p->group_leader = ERTS_INVALID_PID;
p->flags = 0;
p->fvalue = NIL;
@@ -7984,14 +7621,14 @@ void erts_init_empty_process(Process *p)
p->bin_old_vheap = 0;
p->bin_vheap_mature = 0;
#ifdef ERTS_SMP
- p->u.alive.ptimer = NULL;
+ p->common.u.alive.ptimer = NULL;
#else
- memset(&(p->u.alive.tm), 0, sizeof(ErlTimer));
+ memset(&(p->common.u.alive.tm), 0, sizeof(ErlTimer));
#endif
p->next = NULL;
p->off_heap.first = NULL;
p->off_heap.overhead = 0;
- p->reg = NULL;
+ p->common.u.alive.reg = NULL;
p->heap_sz = 0;
p->high_water = NULL;
p->old_hend = NULL;
@@ -8000,15 +7637,15 @@ void erts_init_empty_process(Process *p)
p->mbuf = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
- p->monitors = NULL;
- p->nlinks = NULL; /* List of links */
+ ERTS_P_MONITORS(p) = NULL;
+ ERTS_P_LINKS(p) = NULL; /* List of links */
p->nodes_monitors = NULL;
p->suspend_monitors = NULL;
p->msg.first = NULL;
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
p->msg.len = 0;
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
@@ -8036,7 +7673,7 @@ void erts_init_empty_process(Process *p)
p->parent = NIL;
p->approx_started = 0;
- p->started_interval = 0;
+ p->common.u.alive.started_interval = 0;
#ifdef HIPE
hipe_init_process(&p->hipe);
@@ -8054,13 +7691,13 @@ void erts_init_empty_process(Process *p)
#ifdef ERTS_SMP
p->scheduler_data = NULL;
- p->u.alive.msg_inq.first = NULL;
- p->u.alive.msg_inq.last = &p->u.alive.msg_inq.first;
- p->u.alive.msg_inq.len = 0;
+ p->msg_inq.first = NULL;
+ p->msg_inq.last = &p->msg_inq.first;
+ p->msg_inq.len = 0;
p->suspendee = NIL;
p->pending_suspenders = NULL;
- p->u.alive.pending_exit.reason = THE_NON_VALUE;
- p->u.alive.pending_exit.bp = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
erts_proc_lock_init(p);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
@@ -8082,25 +7719,25 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->stop == NULL);
ASSERT(p->hend == NULL);
ASSERT(p->heap == NULL);
- ASSERT(p->id == ERTS_INVALID_PID);
- ASSERT(p->tracer_proc == NIL);
- ASSERT(p->trace_flags == F_INITIAL_TRACE_FLAGS);
+ ASSERT(p->common.id == ERTS_INVALID_PID);
+ ASSERT(ERTS_TRACER_PROC(p) == NIL);
+ ASSERT(ERTS_TRACE_FLAGS(p) == F_INITIAL_TRACE_FLAGS);
ASSERT(p->group_leader == ERTS_INVALID_PID);
ASSERT(p->next == NULL);
- ASSERT(p->reg == NULL);
+ ASSERT(p->common.u.alive.reg == NULL);
ASSERT(p->heap_sz == 0);
ASSERT(p->high_water == NULL);
ASSERT(p->old_hend == NULL);
ASSERT(p->old_htop == NULL);
ASSERT(p->old_heap == NULL);
- ASSERT(p->monitors == NULL);
- ASSERT(p->nlinks == NULL);
+ ASSERT(ERTS_P_MONITORS(p) == NULL);
+ ASSERT(ERTS_P_LINKS(p) == NULL);
ASSERT(p->nodes_monitors == NULL);
ASSERT(p->suspend_monitors == NULL);
ASSERT(p->msg.first == NULL);
ASSERT(p->msg.len == 0);
- ASSERT(p->bif_timers == NULL);
+ ASSERT(p->u.bif_timers == NULL);
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -8110,12 +7747,12 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->parent == NIL);
#ifdef ERTS_SMP
- ASSERT(p->u.alive.msg_inq.first == NULL);
- ASSERT(p->u.alive.msg_inq.len == 0);
+ ASSERT(p->msg_inq.first == NULL);
+ ASSERT(p->msg_inq.len == 0);
ASSERT(p->suspendee == NIL);
ASSERT(p->pending_suspenders == NULL);
- ASSERT(p->u.alive.pending_exit.reason == THE_NON_VALUE);
- ASSERT(p->u.alive.pending_exit.bp == NULL);
+ ASSERT(p->pending_exit.reason == THE_NON_VALUE);
+ ASSERT(p->pending_exit.bp == NULL);
#endif
/* Thing that erts_cleanup_empty_process() cleans up */
@@ -8157,7 +7794,7 @@ delete_process(Process* p)
{
ErlMessage* mp;
- VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->id));
+ VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
/* Cleanup psd */
@@ -8231,8 +7868,6 @@ delete_process(Process* p)
mp = next_mp;
}
- ASSERT(!p->monitors);
- ASSERT(!p->nlinks);
ASSERT(!p->nodes_monitors);
ASSERT(!p->suspend_monitors);
@@ -8296,7 +7931,7 @@ void
erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
{
ErtsProcLocks xlocks;
- ASSERT(is_value(c_p->u.alive.pending_exit.reason));
+ ASSERT(is_value(c_p->pending_exit.reason));
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks);
ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN);
ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)
@@ -8315,10 +7950,10 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
set_proc_exiting(c_p,
erts_smp_atomic32_read_acqb(&c_p->state),
- c_p->u.alive.pending_exit.reason,
- c_p->u.alive.pending_exit.bp);
- c_p->u.alive.pending_exit.reason = THE_NON_VALUE;
- c_p->u.alive.pending_exit.bp = NULL;
+ c_p->pending_exit.reason,
+ c_p->pending_exit.bp);
+ c_p->pending_exit.reason = THE_NON_VALUE;
+ c_p->pending_exit.bp = NULL;
if (xlocks)
erts_smp_proc_unlock(c_p, xlocks);
@@ -8327,12 +7962,14 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
static void
handle_pending_exiters(ErtsProcList *pnd_xtrs)
{
+ /* 'list' is expected to have been fetched (i.e. not a ring anymore) */
ErtsProcList *plp = pnd_xtrs;
- ErtsProcList *free_plp;
+
while (plp) {
+ ErtsProcList *free_plp;
Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL);
if (p) {
- if (proclist_same(plp, p)) {
+ if (erts_proclist_same(plp, p)) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
if (!(state & ERTS_PSFLG_RUNNING)) {
ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
@@ -8361,8 +7998,7 @@ save_pending_exiter(Process *p)
erts_smp_runq_lock(rq);
- plp->next = rq->procs.pending_exiters;
- rq->procs.pending_exiters = plp;
+ erts_proclist_store_last(&rq->procs.pending_exiters, plp);
erts_smp_runq_unlock(rq);
wake_scheduler(rq, 1);
@@ -8409,7 +8045,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
hp = bp->mem;
mess = copy_struct(exit_term, term_size, &hp, &bp->off_heap);
/* the trace token must in this case be updated by the caller */
- seq_trace_output(token, mess, SEQ_TRACE_SEND, to->id, NULL);
+ seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL);
temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap);
erts_queue_message(to, to_locksp, bp, mess, temp_token
#ifdef USE_VM_PROBES
@@ -8539,7 +8175,7 @@ send_exit_signal(Process *c_p, /* current process if and only
else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
#ifdef ERTS_SMP
if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) {
- ASSERT(!rp->u.alive.pending_exit.bp);
+ ASSERT(!rp->pending_exit.bp);
if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) {
/* Ensure that all locks on c_p are locked before
@@ -8589,7 +8225,7 @@ send_exit_signal(Process *c_p, /* current process if and only
set_pending_exit:
if (is_immed(rsn)) {
- rp->u.alive.pending_exit.reason = rsn;
+ rp->pending_exit.reason = rsn;
}
else {
Eterm *hp;
@@ -8597,11 +8233,11 @@ send_exit_signal(Process *c_p, /* current process if and only
ErlHeapFragment *bp = new_message_buffer(sz);
hp = &bp->mem[0];
- rp->u.alive.pending_exit.reason = copy_struct(rsn,
- sz,
- &hp,
- &bp->off_heap);
- rp->u.alive.pending_exit.bp = bp;
+ rp->pending_exit.reason = copy_struct(rsn,
+ sz,
+ &hp,
+ &bp->off_heap);
+ rp->pending_exit.bp = bp;
}
erts_smp_atomic32_read_bor_relb(&rp->state,
ERTS_PSFLG_PENDING_EXIT);
@@ -8700,7 +8336,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
if (!rp) {
goto done;
}
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon == NULL) {
goto done;
@@ -8735,7 +8371,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ASSERT(mon->type == MON_TARGET);
ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid));
if (is_internal_port(mon->pid)) {
- Port *prt = erts_id2port(mon->pid, NULL, 0);
+ Port *prt = erts_id2port(mon->pid);
if (prt == NULL) {
goto done;
}
@@ -8751,13 +8387,13 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
goto done;
}
UseTmpHeapNoproc(3);
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
if (rmon) {
erts_destroy_monitor(rmon);
watched = (is_atom(mon->name)
? TUPLE2(lhp, mon->name,
erts_this_dist_entry->sysname)
- : pcontext->p->id);
+ : pcontext->p->common.id);
erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
watched, pcontext->reason);
}
@@ -8822,21 +8458,22 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
switch(lnk->type) {
case LINK_PID:
if(is_internal_port(item)) {
- Port *prt = erts_id2port(item, NULL, 0);
- if (prt) {
- rlnk = erts_remove_link(&prt->nlinks, p->id);
- if (rlnk)
- erts_destroy_link(rlnk);
- erts_do_exit_port(prt, p->id, reason);
- erts_port_release(prt);
- }
+ Port *prt = erts_port_lookup(item, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (prt)
+ erts_port_exit(NULL,
+ (ERTS_PORT_SIG_FLG_FORCE_SCHED
+ | ERTS_PORT_SIG_FLG_BROKEN_LINK),
+ prt,
+ p->common.id,
+ reason,
+ NULL);
}
else if(is_external_port(item)) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Erroneous link between %T and external port %T "
"found\n",
- p->id,
+ p->common.id,
item);
erts_send_error_to_logger_nogl(dsbufp);
ASSERT(0); /* It isn't possible to setup such a link... */
@@ -8846,14 +8483,14 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
| ERTS_PROC_LOCKS_XSIG_SEND);
rp = erts_pid2proc(NULL, 0, item, rp_locks);
if (rp) {
- rlnk = erts_remove_link(&(rp->nlinks), p->id);
+ rlnk = erts_remove_link(&ERTS_P_LINKS(rp), p->common.id);
/* If rlnk == NULL, we got unlinked while exiting,
i.e., do nothing... */
if (rlnk) {
int xres;
erts_destroy_link(rlnk);
xres = send_exit_signal(NULL,
- p->id,
+ p->common.id,
rp,
&rp_locks,
reason,
@@ -8865,7 +8502,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- trace_proc(p, rp, am_getting_unlinked, p->id);
+ trace_proc(p, rp, am_getting_unlinked, p->common.id);
}
}
}
@@ -8879,12 +8516,12 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
ErtsDSigData dsd;
int code;
ErtsDistLinkData dld;
- erts_remove_dist_link(&dld, p->id, item, dep);
+ erts_remove_dist_link(&dld, p->common.id, item, dep);
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_exit_tt(&dsd, p->id, item, reason,
- SEQ_TRACE_TOKEN(p));
+ code = erts_dsig_send_exit_tt(&dsd, p->common.id, item,
+ reason, SEQ_TRACE_TOKEN(p));
ASSERT(code == ERTS_DSIG_SEND_OK);
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
@@ -8899,7 +8536,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
/* dist entries have node links in a separate structure to
avoid confusion */
erts_smp_de_links_lock(dep);
- rlnk = erts_remove_link(&(dep->node_links), p->id);
+ rlnk = erts_remove_link(&(dep->node_links), p->common.id);
erts_smp_de_links_unlock(dep);
if (rlnk)
erts_destroy_link(rlnk);
@@ -8927,15 +8564,6 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
erts_destroy_suspend_monitor(smon);
}
-#ifdef ERTS_SMP
-static void
-proc_dec_refc(void *vproc)
-{
- erts_smp_proc_dec_refc((Process *) vproc);
-}
-#endif
-
-
/* this function fishishes a process and propagates exit messages - called
by process_main when a process dies */
void
@@ -8973,10 +8601,10 @@ erts_do_exit_process(Process* p, Eterm reason)
state = set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state));
if (state & ERTS_PSFLG_PENDING_EXIT) {
/* Process exited before pending exit was received... */
- p->u.alive.pending_exit.reason = THE_NON_VALUE;
- if (p->u.alive.pending_exit.bp) {
- free_message_buffer(p->u.alive.pending_exit.bp);
- p->u.alive.pending_exit.bp = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ if (p->pending_exit.bp) {
+ free_message_buffer(p->pending_exit.bp);
+ p->pending_exit.bp = NULL;
}
}
@@ -8993,23 +8621,25 @@ erts_do_exit_process(Process* p, Eterm reason)
trace_proc(p, p, am_exit, reason);
}
- erts_trace_check_exiting(p->id);
+ erts_trace_check_exiting(p->common.id);
- ASSERT((p->trace_flags & F_INITIAL_TRACE_FLAGS) == F_INITIAL_TRACE_FLAGS);
+ ASSERT((ERTS_TRACE_FLAGS(p) & F_INITIAL_TRACE_FLAGS)
+ == F_INITIAL_TRACE_FLAGS);
cancel_timer(p); /* Always cancel timer just in case */
- /*
- * The timer of this process can *not* be used anymore. The field used
- * for the timer is now used for misc exiting data.
- */
- p->u.exit_data = NULL;
-
- if (p->bif_timers)
+ if (p->u.bif_timers)
erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ /*
+ * The p->u.bif_timers of this process can *not* be used anymore;
+ * will be overwritten by misc termination data.
+ */
+ p->u.terminate = NULL;
+
+
erts_continue_exit_process(p);
}
@@ -9081,9 +8711,9 @@ erts_continue_exit_process(Process *p)
* The registered name *should* be the last "erlang resource" to
* cleanup.
*/
- if (p->reg) {
+ if (p->common.u.alive.reg) {
(void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE);
- ASSERT(!p->reg);
+ ASSERT(!p->common.u.alive.reg);
}
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -9097,22 +8727,18 @@ erts_continue_exit_process(Process *p)
yield_allowed = 0;
#endif
+ /*
+ * Note! The monitor and link fields will be overwritten
+ * by erts_ptab_delete_element() below.
+ */
+ mon = ERTS_P_MONITORS(p);
+ lnk = ERTS_P_LINKS(p);
+
{
- int maybe_save;
- int pix;
/* Do *not* use erts_get_runq_proc() */
ErtsRunQueue *rq;
rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p));
- pix = internal_pid_index(p->id);
-
- erts_smp_rwmtx_rlock(&erts_proc_tab_rwmtx);
- maybe_save = saved_term_procs.end != NULL;
- if (maybe_save) {
- erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx);
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
- }
-
erts_smp_runq_lock(rq);
#ifdef ERTS_SMP
@@ -9123,22 +8749,11 @@ erts_continue_exit_process(Process *p)
p->scheduler_data->current_process = NULL;
p->scheduler_data->free_process = p;
#endif
- /* Time of death! */
- erts_smp_atomic_set_relb(&erts_proc.tab[pix], ERTS_AINT_NULL);
- ASSERT(erts_smp_atomic32_read_nob(&process_count) > 0);
- erts_smp_atomic32_dec_relb(&process_count);
+ /* Time of death! */
+ erts_ptab_delete_element(&erts_proc, &p->common);
erts_smp_runq_unlock(rq);
-
- if (!maybe_save)
- erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx);
- else {
- if (saved_term_procs.end)
- save_terminating_process(p);
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
- }
-
}
/*
@@ -9148,12 +8763,6 @@ erts_continue_exit_process(Process *p)
* when the monitors and/or links hit.
*/
- mon = p->monitors;
- p->monitors = NULL; /* to avoid recursive deletion during traversal */
-
- lnk = p->nlinks;
- p->nlinks = NULL;
-
{
/* Inactivate and notify free */
erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state);
@@ -9197,7 +8806,7 @@ erts_continue_exit_process(Process *p)
UseTmpHeap(4,p);
hp = &tmp_heap[0];
- exit_tuple = TUPLE3(hp, am_EXIT, p->id, reason);
+ exit_tuple = TUPLE3(hp, am_EXIT, p->common.id, reason);
exit_tuple_sz = size_object(exit_tuple);
@@ -9223,9 +8832,6 @@ erts_continue_exit_process(Process *p)
delete_process(p);
#ifdef ERTS_SMP
- erts_schedule_thr_prgr_later_op(proc_dec_refc,
- (void *) p,
- &p->u.release_data);
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
#endif
@@ -9277,9 +8883,9 @@ cancel_timer(Process* p)
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
p->flags &= ~(F_INSLPQUEUE|F_TIMO);
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(p->u.alive.ptimer);
+ erts_cancel_smp_ptimer(p->common.u.alive.ptimer);
#else
- erts_cancel_timer(&p->u.alive.tm);
+ erts_cancel_timer(&p->common.u.alive.tm);
#endif
}
@@ -9300,12 +8906,12 @@ set_timer(Process* p, Uint timeout)
p->flags &= ~F_TIMO;
#ifdef ERTS_SMP
- erts_create_smp_ptimer(&p->u.alive.ptimer,
- p->id,
+ erts_create_smp_ptimer(&p->common.u.alive.ptimer,
+ p->common.id,
(ErlTimeoutProc) timeout_proc,
timeout);
#else
- erts_set_timer(&p->u.alive.tm,
+ erts_set_timer(&p->common.u.alive.tm,
(ErlTimeoutProc) timeout_proc,
NULL,
(void*) p,
@@ -9323,7 +8929,7 @@ erts_stack_dump(int to, void *to_arg, Process *p)
Eterm* sp;
int yreg = -1;
- if (p->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
return;
}
erts_program_counter_info(to, to_arg, p);
@@ -9411,1068 +9017,6 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
return yreg;
}
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * The processes/0 BIF implementation. *
-\* */
-
-
-#define ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED 25
-#define ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE 1000
-#define ERTS_PROCESSES_BIF_MIN_START_REDS \
- (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \
- / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED)
-
-#define ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS 1
-
-#define ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED 10
-
-#define ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS \
- (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \
- / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED)
-
-
-#define ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED 75
-
-#define ERTS_PROCS_DBG_DO_TRACE 0
-
-#ifdef DEBUG
-# define ERTS_PROCESSES_BIF_DEBUGLEVEL 100
-#else
-# define ERTS_PROCESSES_BIF_DEBUGLEVEL 0
-#endif
-
-#define ERTS_PROCS_DBGLVL_CHK_HALLOC 1
-#define ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS 5
-#define ERTS_PROCS_DBGLVL_CHK_PIDS 10
-#define ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST 20
-#define ERTS_PROCS_DBGLVL_CHK_RESLIST 20
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0
-# define ERTS_PROCS_ASSERT(EXP)
-#else
-# define ERTS_PROCS_ASSERT(EXP) \
- ((void) ((EXP) \
- ? 1 \
- : (debug_processes_assert_error(#EXP, __FILE__, __LINE__), 0)))
-#endif
-
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC
-# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ) \
-do { \
- ERTS_PROCS_ASSERT(!(PBDP)->debug.heap); \
- ERTS_PROCS_ASSERT(!(PBDP)->debug.heap_size); \
- (PBDP)->debug.heap = (HP); \
- (PBDP)->debug.heap_size = (SZ); \
-} while (0)
-# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP) \
-do { \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap); \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap_size); \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap + (PBDP)->debug.heap_size == (HP));\
- (PBDP)->debug.heap = NULL; \
- (PBDP)->debug.heap_size = 0; \
-} while (0)
-# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP) \
-do { \
- (PBDP)->debug.heap = NULL; \
- (PBDP)->debug.heap_size = 0; \
-} while (0)
-#else
-# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ)
-# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP)
-# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-# define ERTS_PROCS_DBG_CHK_RESLIST(R) debug_processes_check_res_list((R))
-#else
-# define ERTS_PROCS_DBG_CHK_RESLIST(R)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP) debug_processes_save_all_pids((PBDP))
-# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP) \
-do { \
- if (!(PBDP)->debug.correct_pids_verified) \
- debug_processes_verify_all_pids((PBDP)); \
-} while (0)
-# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP) \
-do { \
- if ((PBDP)->debug.correct_pids) { \
- erts_free(ERTS_ALC_T_PROCS_PIDS, \
- (PBDP)->debug.correct_pids); \
- (PBDP)->debug.correct_pids = NULL; \
- } \
-} while(0)
-# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP) \
-do { \
- (PBDP)->debug.correct_pids_verified = 0; \
- (PBDP)->debug.correct_pids = NULL; \
-} while (0)
-#else
-# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP)
-# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP)
-# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP)
-# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, IC) \
- debug_processes_check_found_pid((PBDP), (PID), (IC), 1)
-# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, IC) \
- debug_processes_check_found_pid((PBDP), (PID), (IC), 0)
-#else
-# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, IC)
-# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, IC)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-# define ERTS_PROCS_DBG_CHK_TPLIST() \
- debug_processes_check_term_proc_list()
-# define ERTS_PROCS_DBG_CHK_FREELIST(FL) \
- debug_processes_check_term_proc_free_list(FL)
-#else
-# define ERTS_PROCS_DBG_CHK_TPLIST()
-# define ERTS_PROCS_DBG_CHK_FREELIST(FL)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0
-#if ERTS_PROCS_DBG_DO_TRACE
-# define ERTS_PROCS_DBG_INIT(P, PBDP) (PBDP)->debug.caller = (P)->id
-# else
-# define ERTS_PROCS_DBG_INIT(P, PBDP)
-# endif
-# define ERTS_PROCS_DBG_CLEANUP(PBDP)
-#else
-# define ERTS_PROCS_DBG_INIT(P, PBDP) \
-do { \
- (PBDP)->debug.caller = (P)->id; \
- ERTS_PROCS_DBG_HEAP_ALLOC_INIT((PBDP)); \
- ERTS_PROCS_DBG_CHK_PIDS_INIT((PBDP)); \
-} while (0)
-# define ERTS_PROCS_DBG_CLEANUP(PBDP) \
-do { \
- ERTS_PROCS_DBG_CLEANUP_CHK_PIDS((PBDP)); \
-} while (0)
-#endif
-
-#if ERTS_PROCS_DBG_DO_TRACE
-# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT) \
- erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \
- (PID), __FILE__, __LINE__, #FUNC, #WHAT)
-#else
-# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT)
-#endif
-
-static Uint processes_bif_tab_chunks;
-static Export processes_trap_export;
-
-typedef struct {
- Uint64 interval;
-} ErtsProcessesBifChunkInfo;
-
-typedef enum {
- INITIALIZING,
- INSPECTING_TABLE,
- INSPECTING_TERMINATED_PROCESSES,
- BUILDING_RESULT,
- RETURN_RESULT
-} ErtsProcessesBifState;
-
-typedef struct {
- ErtsProcessesBifState state;
- Eterm caller;
- ErtsProcessesBifChunkInfo *chunk;
- int tix;
- int pid_ix;
- int pid_sz;
- Eterm *pid;
- ErtsTermProcElement *bif_invocation; /* Only used when > 1 chunk */
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0 || ERTS_PROCS_DBG_DO_TRACE
- struct {
- Eterm caller;
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- Uint64 *pid_started;
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC
- Eterm *heap;
- Uint heap_size;
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
- int correct_pids_verified;
- Eterm *correct_pids;
-#endif
- } debug;
-#endif
-
-} ErtsProcessesBifData;
-
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0
-static void debug_processes_assert_error(char* expr, char* file, int line);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-static void debug_processes_check_res_list(Eterm list);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-static void debug_processes_save_all_pids(ErtsProcessesBifData *pbdp);
-static void debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-static void debug_processes_check_found_pid(ErtsProcessesBifData *pbdp,
- Eterm pid,
- Uint64 ic,
- int pid_should_be_found);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-static void debug_processes_check_term_proc_list(void);
-static void debug_processes_check_term_proc_free_list(ErtsTermProcElement *tpep);
-#endif
-
-static void
-save_terminating_process(Process *p)
-{
- ErtsTermProcElement *tpep = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL,
- sizeof(ErtsTermProcElement));
- ERTS_PROCS_ASSERT(saved_term_procs.start && saved_term_procs.end);
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx));
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- tpep->prev = saved_term_procs.end;
- tpep->next = NULL;
- tpep->ix = internal_pid_index(p->id);
- tpep->u.process.pid = p->id;
- tpep->u.process.spawned = p->started_interval;
- tpep->u.process.exited = get_proc_interval();
-
- saved_term_procs.end->next = tpep;
- saved_term_procs.end = tpep;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- ERTS_PROCS_ASSERT(tpep->prev->ix >= 0
- ? (tpep->u.process.exited
- >= tpep->prev->u.process.exited)
- : (tpep->u.process.exited
- >= tpep->prev->u.bif_invocation.interval));
-}
-
-static void
-cleanup_processes_bif_data(Binary *bp)
-{
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(bp);
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller, cleanup_processes_bif_data, call);
-
- if (pbdp->state != INITIALIZING) {
-
- if (pbdp->chunk) {
- erts_free(ERTS_ALC_T_PROCS_CNKINF, pbdp->chunk);
- pbdp->chunk = NULL;
- }
- if (pbdp->pid) {
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->pid);
- pbdp->pid = NULL;
- }
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- if (pbdp->debug.pid_started) {
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.pid_started);
- pbdp->debug.pid_started = NULL;
- }
-#endif
-
- if (pbdp->bif_invocation) {
- ErtsTermProcElement *tpep;
-
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller,
- cleanup_processes_bif_data,
- term_proc_cleanup);
-
- tpep = pbdp->bif_invocation;
- pbdp->bif_invocation = NULL;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- if (tpep->prev) {
- /*
- * Only remove this bif invokation when we
- * have preceding invokations.
- */
- tpep->prev->next = tpep->next;
- if (tpep->next)
- tpep->next->prev = tpep->prev;
- else {
- /*
- * At the time of writing this branch cannot be
- * reached. I don't want to remove this code though
- * since it may be possible to reach this line
- * in the future if the cleanup order in
- * erts_do_exit_process() is changed. The ASSERT(0)
- * is only here to make us aware that the reorder
- * has happened. /rickard
- */
- ASSERT(0);
- saved_term_procs.end = tpep->prev;
- }
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep);
- }
- else {
- /*
- * Free all elements until next bif invokation
- * is found.
- */
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- do {
- ErtsTermProcElement *ftpep = tpep;
- tpep = tpep->next;
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, ftpep);
- } while (tpep && tpep->ix >= 0);
- saved_term_procs.start = tpep;
- if (tpep)
- tpep->prev = NULL;
- else
- saved_term_procs.end = NULL;
- }
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
-
- }
- }
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller,
- cleanup_processes_bif_data,
- return);
- ERTS_PROCS_DBG_CLEANUP(pbdp);
-}
-
-static int
-processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp)
-{
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp);
- int have_reds;
- int reds;
- int locked = 0;
-
- do {
- switch (pbdp->state) {
- case INITIALIZING:
- pbdp->chunk = erts_alloc(ERTS_ALC_T_PROCS_CNKINF,
- (sizeof(ErtsProcessesBifChunkInfo)
- * processes_bif_tab_chunks));
- pbdp->tix = 0;
- pbdp->pid_ix = 0;
-
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
- locked = 1;
-
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, init);
-
- pbdp->pid_sz = erts_process_count();
- pbdp->pid = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(Eterm)*pbdp->pid_sz);
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(Uint64)*pbdp->pid_sz);
-#endif
-
- ERTS_PROCS_DBG_SAVE_PIDS(pbdp);
-
- if (processes_bif_tab_chunks == 1)
- pbdp->bif_invocation = NULL;
- else {
- /*
- * We will have to access the table multiple times
- * releasing the table lock in between chunks.
- */
- pbdp->bif_invocation = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL,
- sizeof(ErtsTermProcElement));
- pbdp->bif_invocation->ix = -1;
- pbdp->bif_invocation->u.bif_invocation.interval
- = step_proc_interval();
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- pbdp->bif_invocation->next = NULL;
- if (saved_term_procs.end) {
- pbdp->bif_invocation->prev = saved_term_procs.end;
- saved_term_procs.end->next = pbdp->bif_invocation;
- ERTS_PROCS_ASSERT(saved_term_procs.start);
- }
- else {
- pbdp->bif_invocation->prev = NULL;
- saved_term_procs.start = pbdp->bif_invocation;
- }
- saved_term_procs.end = pbdp->bif_invocation;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- }
-
- pbdp->state = INSPECTING_TABLE;
- /* Fall through */
-
- case INSPECTING_TABLE: {
- int ix = pbdp->tix;
- int indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- int cix = ix / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- int end_ix = ix + indices;
- Uint64 *invocation_interval_p;
-
- invocation_interval_p
- = (pbdp->bif_invocation
- ? &pbdp->bif_invocation->u.bif_invocation.interval
- : NULL);
-
- ERTS_PROCS_ASSERT(is_nil(*res_accp));
- if (!locked) {
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
- locked = 1;
- }
-
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx));
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_table);
-
- if (cix != 0)
- pbdp->chunk[cix].interval = step_proc_interval();
- else if (pbdp->bif_invocation)
- pbdp->chunk[0].interval = *invocation_interval_p;
- /* else: interval is irrelevant */
-
- if (end_ix >= erts_proc.max) {
- ERTS_PROCS_ASSERT(cix+1 == processes_bif_tab_chunks);
- end_ix = erts_proc.max;
- indices = end_ix - ix;
- /* What to do when done with this chunk */
- pbdp->state = (processes_bif_tab_chunks == 1
- ? BUILDING_RESULT
- : INSPECTING_TERMINATED_PROCESSES);
- }
-
- for (; ix < end_ix; ix++) {
- Process *rp = erts_pix2proc(ix);
- if (rp
- && (!invocation_interval_p
- || rp->started_interval < *invocation_interval_p)) {
- ERTS_PROCS_ASSERT(is_internal_pid(rp->id));
- pbdp->pid[pbdp->pid_ix] = rp->id;
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started[pbdp->pid_ix] = rp->started_interval;
-#endif
-
- pbdp->pid_ix++;
- ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz);
- }
- }
-
- pbdp->tix = end_ix;
-
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
- locked = 0;
-
- reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED;
- BUMP_REDS(p, reds);
-
- have_reds = ERTS_BIF_REDS_LEFT(p);
-
- if (have_reds && pbdp->state == INSPECTING_TABLE) {
- ix = pbdp->tix;
- indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- end_ix = ix + indices;
- if (end_ix > erts_proc.max) {
- end_ix = erts_proc.max;
- indices = end_ix - ix;
- }
-
- reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED;
-
- /* Pretend we have no reds left if we haven't got enough
- reductions to complete next chunk */
- if (reds > have_reds)
- have_reds = 0;
- }
-
- break;
- }
-
- case INSPECTING_TERMINATED_PROCESSES: {
- int i;
- int max_reds;
- int free_term_procs = 0;
- Uint64 invocation_interval;
- ErtsTermProcElement *tpep;
- ErtsTermProcElement *free_list = NULL;
-
- tpep = pbdp->bif_invocation;
- ERTS_PROCS_ASSERT(tpep);
- invocation_interval = tpep->u.bif_invocation.interval;
-
- max_reds = have_reds = ERTS_BIF_REDS_LEFT(p);
- if (max_reds > ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS)
- max_reds = ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS;
-
- reds = 0;
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_term_procs);
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- if (tpep->prev)
- tpep->prev->next = tpep->next;
- else {
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- saved_term_procs.start = tpep->next;
-
- if (saved_term_procs.start && saved_term_procs.start->ix >= 0) {
- free_list = saved_term_procs.start;
- free_term_procs = 1;
- }
- }
-
- if (tpep->next)
- tpep->next->prev = tpep->prev;
- else
- saved_term_procs.end = tpep->prev;
-
- tpep = tpep->next;
-
- i = 0;
- while (reds < max_reds && tpep) {
- if (tpep->ix < 0) {
- if (free_term_procs) {
- ERTS_PROCS_ASSERT(free_list);
- ERTS_PROCS_ASSERT(tpep->prev);
-
- tpep->prev->next = NULL; /* end of free_list */
- saved_term_procs.start = tpep;
- tpep->prev = NULL;
- free_term_procs = 0;
- }
- }
- else {
- int cix = tpep->ix/ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- Uint64 chunk_interval = pbdp->chunk[cix].interval;
- Eterm pid = tpep->u.process.pid;
- ERTS_PROCS_ASSERT(is_internal_pid(pid));
-
- if (tpep->u.process.spawned < invocation_interval) {
- if (tpep->u.process.exited < chunk_interval) {
- ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp,
- pid,
- tpep->u.process.spawned);
- pbdp->pid[pbdp->pid_ix] = pid;
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started[pbdp->pid_ix]
- = tpep->u.process.spawned;
-#endif
- pbdp->pid_ix++;
- ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz);
- }
- else {
- ERTS_PROCS_DBG_CHK_PID_FOUND(pbdp,
- pid,
- tpep->u.process.spawned);
- }
- }
- else {
- ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp,
- pid,
- tpep->u.process.spawned);
- }
-
- i++;
- if (i == ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED) {
- reds++;
- i = 0;
- }
- if (free_term_procs)
- reds += ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS;
- }
- tpep = tpep->next;
- }
-
- if (free_term_procs) {
- ERTS_PROCS_ASSERT(free_list);
- saved_term_procs.start = tpep;
- if (!tpep)
- saved_term_procs.end = NULL;
- else {
- ERTS_PROCS_ASSERT(tpep->prev);
- tpep->prev->next = NULL; /* end of free_list */
- tpep->prev = NULL;
- }
- }
-
- if (!tpep) {
- /* Done */
- ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz);
- pbdp->state = BUILDING_RESULT;
- pbdp->bif_invocation->next = free_list;
- free_list = pbdp->bif_invocation;
- pbdp->bif_invocation = NULL;
- }
- else {
- /* Link in bif_invocation again where we left off */
- pbdp->bif_invocation->prev = tpep->prev;
- pbdp->bif_invocation->next = tpep;
- tpep->prev = pbdp->bif_invocation;
- if (pbdp->bif_invocation->prev)
- pbdp->bif_invocation->prev->next = pbdp->bif_invocation;
- else {
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- saved_term_procs.start = pbdp->bif_invocation;
- }
- }
-
- ERTS_PROCS_DBG_CHK_TPLIST();
- ERTS_PROCS_DBG_CHK_FREELIST(free_list);
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
-
- /*
- * We do the actual free of term proc structures now when we
- * have released the table lock instead of when we encountered
- * them. This since free() isn't for free and we don't want to
- * unnecessarily block other schedulers.
- */
- while (free_list) {
- tpep = free_list;
- free_list = tpep->next;
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep);
- }
-
- have_reds -= reds;
- if (have_reds < 0)
- have_reds = 0;
- BUMP_REDS(p, reds);
- break;
- }
-
- case BUILDING_RESULT: {
- int conses, ix, min_ix;
- Eterm *hp;
- Eterm res = *res_accp;
-
- ERTS_PROCS_DBG_VERIFY_PIDS(pbdp);
- ERTS_PROCS_DBG_CHK_RESLIST(res);
-
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, begin_build_res);
-
- have_reds = ERTS_BIF_REDS_LEFT(p);
- conses = ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds;
- min_ix = pbdp->pid_ix - conses;
- if (min_ix < 0) {
- min_ix = 0;
- conses = pbdp->pid_ix;
- }
-
- hp = HAlloc(p, conses*2);
- ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, conses*2);
-
- for (ix = pbdp->pid_ix - 1; ix >= min_ix; ix--) {
- ERTS_PROCS_ASSERT(is_internal_pid(pbdp->pid[ix]));
- res = CONS(hp, pbdp->pid[ix], res);
- hp += 2;
- }
-
- ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp);
-
- pbdp->pid_ix = min_ix;
- if (min_ix == 0)
- pbdp->state = RETURN_RESULT;
- else {
- pbdp->pid_sz = min_ix;
- pbdp->pid = erts_realloc(ERTS_ALC_T_PROCS_PIDS,
- pbdp->pid,
- sizeof(Eterm)*pbdp->pid_sz);
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started = erts_realloc(ERTS_ALC_T_PROCS_PIDS,
- pbdp->debug.pid_started,
- (sizeof(Uint64)
- * pbdp->pid_sz));
-#endif
- }
- reds = conses/ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED;
- BUMP_REDS(p, reds);
- have_reds -= reds;
-
- ERTS_PROCS_DBG_CHK_RESLIST(res);
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, end_build_res);
- *res_accp = res;
- break;
- }
- case RETURN_RESULT:
- cleanup_processes_bif_data(mbp);
- return 1;
-
- default:
- erl_exit(ERTS_ABORT_EXIT,
- "erlang:processes/0: Invalid state: %d\n",
- (int) pbdp->state);
- }
-
-
- } while (have_reds || pbdp->state == RETURN_RESULT);
-
- return 0;
-}
-
-/*
- * processes_trap/2 is a hidden BIF that processes/0 traps to.
- */
-
-static BIF_RETTYPE processes_trap(BIF_ALIST_2)
-{
- Eterm res_acc;
- Binary *mbp;
-
- /*
- * This bif cannot be called from erlang code. It can only be
- * trapped to from processes/0; therefore, a bad argument
- * is a processes/0 internal error.
- */
-
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, call);
- ERTS_PROCS_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1));
-
- res_acc = BIF_ARG_1;
-
- ERTS_PROCS_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
-
- ERTS_PROCS_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
- == cleanup_processes_bif_data);
- ERTS_PROCS_ASSERT(
- ((ErtsProcessesBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller
- == BIF_P->id);
-
- if (processes_bif_engine(BIF_P, &res_acc, mbp)) {
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, return);
- BIF_RET(res_acc);
- }
- else {
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, trap);
- ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, BIF_ARG_2);
- }
-}
-
-
-
-/*
- * The actual processes/0 BIF.
- */
-
-BIF_RETTYPE processes_0(BIF_ALIST_0)
-{
- /*
- * A requirement: The list of pids returned should be a consistent
- * snapshot of all processes existing at some point
- * in time during the execution of processes/0. Since
- * processes might terminate while processes/0 is
- * executing, we have to keep track of terminated
- * processes and add them to the result. We also
- * ignore processes created after processes/0 has
- * begun executing.
- */
- Eterm res_acc = NIL;
- Binary *mbp = erts_create_magic_binary(sizeof(ErtsProcessesBifData),
- cleanup_processes_bif_data);
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp);
-
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, call);
- pbdp->state = INITIALIZING;
- ERTS_PROCS_DBG_INIT(BIF_P, pbdp);
-
- if (ERTS_BIF_REDS_LEFT(BIF_P) >= ERTS_PROCESSES_BIF_MIN_START_REDS
- && processes_bif_engine(BIF_P, &res_acc, mbp)) {
- erts_bin_free(mbp);
- ERTS_PROCS_DBG_CHK_RESLIST(res_acc);
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, return);
- BIF_RET(res_acc);
- }
- else {
- Eterm *hp;
- Eterm magic_bin;
- ERTS_PROCS_DBG_CHK_RESLIST(res_acc);
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mbp);
- ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp);
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, trap);
- ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, magic_bin);
- }
-}
-
-static void
-init_processes_bif(void)
-{
- saved_term_procs.start = NULL;
- saved_term_procs.end = NULL;
- processes_bif_tab_chunks = (((erts_proc.max - 1)
- / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE)
- + 1);
-
- /* processes_trap/2 is a hidden BIF that the processes/0 BIF traps to. */
- erts_init_trap_export(&processes_trap_export, am_erlang, am_processes_trap, 2,
- &processes_trap);
-
-}
-
-/*
- * Debug stuff
- */
-
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-int
-erts_dbg_check_halloc_lock(Process *p)
-{
- if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
- return 1;
- if (p->id == ERTS_INVALID_PID)
- return 1;
- if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
- return 1;
- if (erts_thr_progress_is_blocking())
- return 1;
- return 0;
-}
-#endif
-
-Eterm
-erts_debug_processes(Process *c_p)
-{
- /* This is the old processes/0 BIF. */
- int i;
- Uint need;
- Eterm res;
- Eterm* hp;
- Process *p;
- Eterm *hp_end;
-
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
-
- res = NIL;
- need = erts_process_count() * 2;
- hp = HAlloc(c_p, need); /* we need two heap words for each pid */
- hp_end = hp + need;
-
- /* make the list by scanning bakward */
-
-
- for (i = erts_proc.max-1; i >= 0; i--) {
- p = erts_pix2proc(i);
- if (p) {
- res = CONS(hp, p->id, res);
- hp += 2;
- }
- }
-
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
-
- HRelease(c_p, hp_end, hp);
-
- return res;
-}
-
-Eterm
-erts_debug_processes_bif_info(Process *c_p)
-{
- ERTS_DECL_AM(processes_bif_info);
- Eterm elements[] = {
- AM_processes_bif_info,
- make_small((Uint) ERTS_PROCESSES_BIF_MIN_START_REDS),
- make_small((Uint) processes_bif_tab_chunks),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS),
- make_small((Uint) ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED),
- make_small((Uint) ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS),
- make_small((Uint) ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED),
- make_small((Uint) ERTS_PROCESSES_BIF_DEBUGLEVEL)
- };
- Uint sz = 0;
- Eterm *hp;
- (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements);
- hp = HAlloc(c_p, sz);
- return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements);
-}
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-static void
-debug_processes_check_found_pid(ErtsProcessesBifData *pbdp,
- Eterm pid,
- Uint64 ic,
- int pid_should_be_found)
-{
- int i;
- for (i = 0; i < pbdp->pid_ix; i++) {
- if (pbdp->pid[i] == pid && pbdp->debug.pid_started[i] == ic) {
- ERTS_PROCS_ASSERT(pid_should_be_found);
- return;
- }
- }
- ERTS_PROCS_ASSERT(!pid_should_be_found);
-}
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-static void
-debug_processes_check_res_list(Eterm list)
-{
- while (is_list(list)) {
- Eterm* consp = list_val(list);
- Eterm hd = CAR(consp);
- ERTS_PROCS_ASSERT(is_internal_pid(hd));
- list = CDR(consp);
- }
-
- ERTS_PROCS_ASSERT(is_nil(list));
-}
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-
-static void
-debug_processes_save_all_pids(ErtsProcessesBifData *pbdp)
-{
- int ix, tix, cpix;
- pbdp->debug.correct_pids_verified = 0;
- pbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(Eterm)*pbdp->pid_sz);
-
- for (tix = 0, cpix = 0; tix < erts_proc.max; tix++) {
- Process *rp = erts_pix2proc(tix);
- if (rp) {
- ERTS_PROCS_ASSERT(is_internal_pid(rp->id));
- pbdp->debug.correct_pids[cpix++] = rp->id;
- ERTS_PROCS_ASSERT(cpix <= pbdp->pid_sz);
- }
- }
- ERTS_PROCS_ASSERT(cpix == pbdp->pid_sz);
-
- for (ix = 0; ix < pbdp->pid_sz; ix++)
- pbdp->pid[ix] = make_small(ix);
-}
-
-static void
-debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp)
-{
- int ix, cpix;
-
- ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz);
-
- for (ix = 0; ix < pbdp->pid_sz; ix++) {
- int found = 0;
- Eterm pid = pbdp->pid[ix];
- ERTS_PROCS_ASSERT(is_internal_pid(pid));
- for (cpix = ix; cpix < pbdp->pid_sz; cpix++) {
- if (pbdp->debug.correct_pids[cpix] == pid) {
- pbdp->debug.correct_pids[cpix] = NIL;
- found = 1;
- break;
- }
- }
- if (!found) {
- for (cpix = 0; cpix < ix; cpix++) {
- if (pbdp->debug.correct_pids[cpix] == pid) {
- pbdp->debug.correct_pids[cpix] = NIL;
- found = 1;
- break;
- }
- }
- }
- ERTS_PROCS_ASSERT(found);
- }
- pbdp->debug.correct_pids_verified = 1;
-
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.correct_pids);
- pbdp->debug.correct_pids = NULL;
-}
-#endif /* ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS */
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-static void
-debug_processes_check_term_proc_list(void)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx));
- if (!saved_term_procs.start)
- ERTS_PROCS_ASSERT(!saved_term_procs.end);
- else {
- Uint64 curr_interval = get_proc_interval();
- Uint64 *prev_x_interval_p = NULL;
- ErtsTermProcElement *tpep;
-
- for (tpep = saved_term_procs.start; tpep; tpep = tpep->next) {
- if (!tpep->prev)
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- else
- ERTS_PROCS_ASSERT(tpep->prev->next == tpep);
- if (!tpep->next)
- ERTS_PROCS_ASSERT(saved_term_procs.end == tpep);
- else
- ERTS_PROCS_ASSERT(tpep->next->prev == tpep);
- if (tpep->ix < 0) {
- Uint64 interval = tpep->u.bif_invocation.interval;
- ERTS_PROCS_ASSERT(interval <= curr_interval);
- }
- else {
- Uint64 s_interval = tpep->u.process.spawned;
- Uint64 x_interval = tpep->u.process.exited;
-
- ERTS_PROCS_ASSERT(s_interval <= x_interval);
- if (prev_x_interval_p)
- ERTS_PROCS_ASSERT(*prev_x_interval_p <= x_interval);
- prev_x_interval_p = &tpep->u.process.exited;
- ERTS_PROCS_ASSERT(is_internal_pid(tpep->u.process.pid));
- ERTS_PROCS_ASSERT(tpep->ix
- == internal_pid_index(tpep->u.process.pid));
- }
- }
-
- }
-}
-
-static void
-debug_processes_check_term_proc_free_list(ErtsTermProcElement *free_list)
-{
- if (saved_term_procs.start) {
- ErtsTermProcElement *ftpep;
- ErtsTermProcElement *tpep;
-
- for (ftpep = free_list; ftpep; ftpep = ftpep->next) {
- for (tpep = saved_term_procs.start; tpep; tpep = tpep->next)
- ERTS_PROCS_ASSERT(ftpep != tpep);
- }
- }
-}
-
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0
-
-static void
-debug_processes_assert_error(char* expr, char* file, int line)
-{
- fflush(stdout);
- erts_fprintf(stderr, "%s:%d: Assertion failed: %s\n", file, line, expr);
- fflush(stderr);
- abort();
-}
-
-#endif
-
-/* *\
- * End of the processes/0 BIF implementation. *
-\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
/*
* A nice system halt closing all open port goes as follows:
* 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
@@ -10499,3 +9043,19 @@ void erl_halt(int code)
notify_reap_ports_relb();
}
}
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int
+erts_dbg_check_halloc_lock(Process *p)
+{
+ if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
+ return 1;
+ if (p->common.id == ERTS_INVALID_PID)
+ return 1;
+ if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
+ return 1;
+ if (erts_thr_progress_is_blocking())
+ return 1;
+ return 0;
+}
+#endif
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index e789c873fb..6d1032c292 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -42,6 +42,9 @@ typedef struct process Process;
#include "erl_process_lock.h" /* Only pull out important types... */
#undef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
+#define ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_port.h"
+#undef ERL_PORT_GET_PORT_TYPE_ONLY__
#include "erl_vm.h"
#include "erl_smp.h"
#include "erl_message.h"
@@ -66,11 +69,10 @@ typedef struct process Process;
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
struct ErtsNodesMonitor_;
-struct port;
#define ERTS_MAX_NO_OF_SCHEDULERS 1024
-#define ERTS_DEFAULT_MAX_PROCESSES (1 << 15)
+#define ERTS_DEFAULT_MAX_PROCESSES (1 << 18)
#define ERTS_HEAP_ALLOC(Type, Size) \
erts_alloc((Type), (Size))
@@ -205,32 +207,10 @@ extern int erts_sched_thread_suggested_stack_size;
((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags))
#define ERTS_RUNQ_FLGS_GET_MB(RQ) \
((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags))
-#define ERTS_RUNQ_FLGS_MASK_SET(RQ, MSK, FLGS) \
- ((Uint32) erts_smp_atomic32_mask_set_relb(&(RQ)->flags, \
- (erts_aint32_t) (MSK), \
- (erts_aint32_t) (FLGS)))
-
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_mask_set_relb(erts_smp_atomic32_t *a32p,
- erts_aint32_t mask,
- erts_aint32_t set);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE erts_aint32_t
-erts_smp_atomic32_mask_set_relb(erts_smp_atomic32_t *a32p,
- erts_aint32_t mask,
- erts_aint32_t set)
-{
- erts_aint32_t act = erts_smp_atomic32_read_nob(a32p);
- while (1) {
- erts_aint32_t exp = act;
- erts_aint32_t new = exp & ~mask;
- new |= (mask & set);
- act = erts_smp_atomic32_cmpxchg_relb(a32p, new, exp);
- if (act == exp)
- return act;
- }
-}
-#endif
+#define ERTS_RUNQ_FLGS_READ_BSET(RQ, MSK, FLGS) \
+ ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
+ (erts_aint32_t) (MSK), \
+ (erts_aint32_t) (FLGS)))
typedef enum {
ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED,
@@ -311,6 +291,7 @@ struct ErtsProcList_ {
Eterm pid;
Uint64 started_interval;
ErtsProcList* next;
+ ErtsProcList* prev;
};
typedef struct ErtsMiscOpList_ ErtsMiscOpList;
@@ -401,8 +382,8 @@ struct ErtsRunQueue_ {
struct {
ErtsRunQueueInfo info;
- struct port *start;
- struct port *end;
+ Port *start;
+ Port *end;
} ports;
};
@@ -512,7 +493,7 @@ struct ErtsSchedulerData_ {
ErtsSchedulerSleepInfo *ssi;
Process *current_process;
Uint no; /* Scheduler number */
- struct port *current_port;
+ Port *current_port;
ErtsRunQueue *run_queue;
int virtual_reds;
int cpu_id; /* >= 0 when bound */
@@ -737,8 +718,8 @@ struct ErtsPendingSuspend_ {
# define BIN_OLD_VHEAP(p) (p)->bin_old_vheap
struct process {
- Eterm id; /* The pid of this process
- (need to be first in struct) */
+ ErtsPTabElementCommon common; /* *Need* to be first in struct */
+
/* All fields in the PCB that differs between different heap
* architectures, have been moved to the end of this struct to
* make sure that as few offsets as possible differ. Different
@@ -782,12 +763,8 @@ struct process {
* Only valid for the current process.
*/
Uint32 rcount; /* suspend count */
- int prio; /* Priority of process */
int schedule_count; /* Times left to reschedule a low prio process */
Uint reds; /* No of reductions for this process */
- Eterm tracer_proc; /* If proc is traced, this is the tracer
- (can NOT be boxed) */
- Uint trace_flags; /* Trace flags (used to be in flags) */
Eterm group_leader; /* Pid in charge
(can be boxed) */
Uint flags; /* Trap exit, etc (no trace flags anymore) */
@@ -797,10 +774,6 @@ struct process {
Process *next; /* Pointer to next process in run queue */
- struct reg_proc *reg; /* NULL iff not registered */
- ErtsLink *nlinks;
- ErtsMonitor *monitors; /* The process monitors, both ends */
-
struct ErtsNodesMonitor_ *nodes_monitors;
ErtsSuspendMonitor *suspend_monitors; /* Processes suspended by
@@ -809,7 +782,10 @@ struct process {
ErlMessageQueue msg; /* Message queue */
- ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
+ union {
+ ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
+ void *terminate;
+ } u;
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -834,7 +810,6 @@ struct process {
*/
Eterm parent; /* Pid of process that created this process. */
erts_approx_time_t approx_started; /* Time when started. */
- Uint64 started_interval;
/* This is the place, where all fields that differs between memory
* architectures, have gone to.
@@ -856,25 +831,11 @@ struct process {
Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
- union {
- struct {
-#ifdef ERTS_SMP
- ErtsSmpPTimer *ptimer;
- ErlMessageInQueue msg_inq;
- ErtsPendExit pending_exit;
-#else
- ErlTimer tm; /* Timer entry */
-#endif
- } alive; /* when process is alive */
-#ifdef ERTS_SMP
- ErtsThrPrgrLaterOp release_data; /* when releasing process struct */
-#endif
- void *exit_data; /* Misc data referred during termination */
- } u;
-
erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
#ifdef ERTS_SMP
+ ErlMessageInQueue msg_inq;
+ ErtsPendExit pending_exit;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
Eterm suspendee;
@@ -904,6 +865,8 @@ struct process {
#endif
};
+extern const Process erts_invalid_process;
+
#ifdef CHECK_FOR_HOLES
# define INIT_HOLE_CHECK(p) \
do { \
@@ -1028,8 +991,6 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra);
Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
#endif
-extern erts_smp_rwmtx_t erts_proc_tab_rwmtx;
-extern erts_smp_atomic_t *erts_proc_tab;
extern Uint erts_default_process_flags;
extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
/* If any of the erts_system_monitor_* variables are set (enabled),
@@ -1057,10 +1018,6 @@ struct erts_system_profile_flags_t {
unsigned int exclusive : 1;
};
extern struct erts_system_profile_flags_t erts_system_profile_flags;
-
-#define IS_TRACED(p) ( (p)->tracer_proc != NIL )
-#define ARE_TRACE_FLAGS_ON(p,tf) ( ((p)->trace_flags & (tf|F_SENSITIVE)) == (tf) )
-#define IS_TRACED_FL(p,tf) ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) )
/* process flags */
#define F_HIBERNATE_SCHED (1 << 0) /* Schedule out after hibernate op */
@@ -1171,7 +1128,172 @@ Uint64 erts_step_proc_interval(void);
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
-int erts_proclist_same(ErtsProcList *, Process *);
+
+ERTS_GLB_INLINE int erts_proclist_same(ErtsProcList *, Process *);
+ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **, ErtsProcList *);
+ERTS_GLB_INLINE void erts_proclist_store_last(ErtsProcList **, ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_first(ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_last(ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_next(ErtsProcList *, ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_prev(ErtsProcList *, ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_last(ErtsProcList **);
+ERTS_GLB_INLINE int erts_proclist_fetch(ErtsProcList **, ErtsProcList **);
+ERTS_GLB_INLINE void erts_proclist_remove(ErtsProcList **, ErtsProcList *);
+ERTS_GLB_INLINE int erts_proclist_is_empty(ErtsProcList *);
+ERTS_GLB_INLINE int erts_proclist_is_first(ErtsProcList *, ErtsProcList *);
+ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *, ErtsProcList *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_proclist_same(ErtsProcList *plp, Process *p)
+{
+ return (plp->pid == p->common.id
+ && (plp->started_interval
+ == p->common.u.alive.started_interval));
+}
+
+ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **list,
+ ErtsProcList *element)
+{
+ if (!*list)
+ element->next = element->prev = element;
+ else {
+ element->prev = (*list)->prev;
+ element->next = *list;
+ element->prev->next = element;
+ element->next->prev = element;
+ }
+ *list = element;
+}
+
+ERTS_GLB_INLINE void erts_proclist_store_last(ErtsProcList **list,
+ ErtsProcList *element)
+{
+ if (!*list) {
+ element->next = element->prev = element;
+ *list = element;
+ }
+ else {
+ element->prev = (*list)->prev;
+ element->next = *list;
+ element->prev->next = element;
+ element->next->prev = element;
+ }
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_first(ErtsProcList *list)
+{
+ return list;
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_last(ErtsProcList *list)
+{
+ if (!list)
+ return NULL;
+ else
+ return list->prev;
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_next(ErtsProcList *list,
+ ErtsProcList *element)
+{
+ ErtsProcList *next;
+ ASSERT(list && element);
+ next = element->next;
+ return list == next ? NULL : next;
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_prev(ErtsProcList *list,
+ ErtsProcList *element)
+{
+ ErtsProcList *prev;
+ ASSERT(list && element);
+ prev = element->prev;
+ return list == element ? NULL : prev;
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
+{
+ if (!*list)
+ return NULL;
+ else {
+ ErtsProcList *res = *list;
+ if (res == *list)
+ *list = NULL;
+ else
+ *list = res->next;
+ res->next->prev = res->prev;
+ res->prev->next = res->next;
+ return res;
+ }
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_last(ErtsProcList **list)
+{
+ if (!*list)
+ return NULL;
+ else {
+ ErtsProcList *res = (*list)->prev;
+ if (res == *list)
+ *list = NULL;
+ res->next->prev = res->prev;
+ res->prev->next = res->next;
+ return res;
+ }
+}
+
+ERTS_GLB_INLINE int erts_proclist_fetch(ErtsProcList **list_first,
+ ErtsProcList **list_last)
+{
+ if (!*list_first) {
+ if (list_last)
+ *list_last = NULL;
+ return 0;
+ }
+ else {
+ if (list_last)
+ *list_last = (*list_first)->prev;
+ (*list_first)->prev->next = NULL;
+ (*list_first)->prev = NULL;
+ return !0;
+ }
+}
+
+ERTS_GLB_INLINE void erts_proclist_remove(ErtsProcList **list,
+ ErtsProcList *element)
+{
+ ASSERT(list && *list);
+ if (*list == element) {
+ *list = element->next;
+ if (*list == element)
+ *list = NULL;
+ }
+ element->next->prev = element->prev;
+ element->prev->next = element->next;
+}
+
+ERTS_GLB_INLINE int erts_proclist_is_empty(ErtsProcList *list)
+{
+ return list == NULL;
+}
+
+ERTS_GLB_INLINE int erts_proclist_is_first(ErtsProcList *list,
+ ErtsProcList *element)
+{
+ ASSERT(list && element);
+ return list == element;
+}
+
+ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *list,
+ ErtsProcList *element)
+{
+ ASSERT(list && element);
+ return list->prev == element;
+}
+
+#endif
int erts_sched_set_wakeup_other_thresold(char *str);
int erts_sched_set_wakeup_other_type(char *str);
@@ -1221,7 +1343,7 @@ void erts_schedule_multi_misc_aux_work(int ignore_self,
erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
-void erts_init_process(int);
+void erts_init_process(int, int);
Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
Uint erts_run_queues_len(Uint *);
void erts_add_to_runq(Process *);
@@ -1240,7 +1362,6 @@ void set_timer(Process*, Uint);
void cancel_timer(Process*);
/* Begin System profile */
Uint erts_runnable_process_count(void);
-Uint erts_process_count(void);
/* End System profile */
void erts_init_empty_process(Process *p);
void erts_cleanup_empty_process(Process* p);
@@ -1264,7 +1385,7 @@ Eterm erts_sched_stat_term(Process *p, int total);
void erts_free_proc(Process *);
-void erts_suspend(Process*, ErtsProcLocks, struct port*);
+void erts_suspend(Process*, ErtsProcLocks, Port*);
void erts_resume(Process*, ErtsProcLocks);
int erts_resume_processes(ErtsProcList *);
@@ -1289,9 +1410,6 @@ void erts_deep_process_dump(int, void *);
Eterm erts_get_reader_groups_map(Process *c_p);
Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
-Sint erts_test_next_pid(int, Uint);
-Eterm erts_debug_processes(Process *c_p);
-Eterm erts_debug_processes_bif_info(Process *c_p);
Uint erts_debug_nbalance(void);
int erts_debug_wait_deallocations(Process *c_p);
@@ -1603,7 +1721,7 @@ ERTS_GLB_INLINE
Eterm erts_get_current_pid(void)
{
Process *proc = erts_get_current_process();
- return proc ? proc->id : THE_NON_VALUE;
+ return proc ? proc->common.id : THE_NON_VALUE;
}
ERTS_GLB_INLINE
@@ -1812,10 +1930,10 @@ extern int erts_disable_proc_not_running_opt;
/* Minimum NUMBER of processes for a small system to start */
-#ifdef ERTS_SMP
+#define ERTS_MIN_PROCESSES 1024
+#if defined(ERTS_SMP) && ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS
+#undef ERTS_MIN_PROCESSES
#define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS
-#else
-#define ERTS_MIN_PROCESSES 16
#endif
void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index 93466da3aa..bf384c66e1 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -360,7 +360,7 @@ static void pd_hash_erase(Process *p, Eterm id, Eterm *ret)
erts_fprintf(stderr,
"Process dictionary for process %T is broken, trying to "
"display term found in line %d:\n"
- "%T\n", p->id, __LINE__, old);
+ "%T\n", p->common.id, __LINE__, old);
#endif
erl_exit(1, "Damaged process dictionary found during erase/1.");
}
@@ -405,7 +405,7 @@ Eterm erts_pd_hash_get(Process *p, Eterm id)
erts_fprintf(stderr,
"Process dictionary for process %T is broken, trying to "
"display term found in line %d:\n"
- "%T\n", p->id, __LINE__, tmp);
+ "%T\n", p->common.id, __LINE__, tmp);
#endif
erl_exit(1, "Damaged process dictionary found during get/1.");
}
@@ -614,7 +614,7 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
erts_fprintf(stderr,
"Process dictionary for process %T is broken, trying to "
"display term found in line %d:\n"
- "%T\n", p->id, __LINE__, old);
+ "%T\n", p->common.id, __LINE__, old);
#endif
erl_exit(1, "Damaged process dictionary found during put/2.");
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 542c5ed0d9..ba74dfd6a1 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -60,11 +60,11 @@ extern BeamInstr beam_continue_exit[];
void
erts_deep_process_dump(int to, void *to_arg)
{
- int i;
+ int i, max = erts_ptab_max(&erts_proc);
all_binaries = NULL;
- for (i = 0; i < erts_max_processes; i++) {
+ for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p && p->i != ENULL) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
@@ -85,8 +85,8 @@ dump_process_info(int to, void *to_arg, Process *p)
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
- if ((p->trace_flags & F_SENSITIVE) == 0 && p->msg.first) {
- erts_print(to, to_arg, "=proc_messages:%T\n", p->id);
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) {
+ erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id);
for (mp = p->msg.first; mp != NULL; mp = mp->next) {
Eterm mesg = ERL_MESSAGE_TERM(mp);
if (is_value(mesg))
@@ -100,21 +100,21 @@ dump_process_info(int to, void *to_arg, Process *p)
}
}
- if ((p->trace_flags & F_SENSITIVE) == 0) {
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
if (p->dictionary) {
- erts_print(to, to_arg, "=proc_dictionary:%T\n", p->id);
+ erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id);
erts_deep_dictionary_dump(to, to_arg,
p->dictionary, dump_element_nl);
}
}
- if ((p->trace_flags & F_SENSITIVE) == 0) {
- erts_print(to, to_arg, "=proc_stack:%T\n", p->id);
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
+ erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
for (sp = p->stop; sp < STACK_START(p); sp++) {
yreg = stack_element_dump(to, to_arg, p, sp, yreg);
}
- erts_print(to, to_arg, "=proc_heap:%T\n", p->id);
+ erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
for (sp = p->stop; sp < STACK_START(p); sp++) {
Eterm term = *sp;
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 84a8270d06..2db5df06b4 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -66,8 +66,7 @@
#endif
#include "erl_process.h"
-
-const Process erts_proc_lock_busy = {ERTS_INVALID_PID};
+#include "erl_thr_progress.h"
#ifdef ERTS_SMP
@@ -399,7 +398,7 @@ wait_for_locks(Process *p,
ErtsProcLocks need_locks,
ErtsProcLocks olflgs)
{
- erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
erts_tse_t *wtr;
/* Acquire a waiter object on which this thread can wait. */
@@ -553,7 +552,7 @@ erts_proc_unlock_failed(Process *p,
erts_pix_lock_t *pixlck,
ErtsProcLocks wait_locks)
{
- erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lock);
@@ -580,7 +579,7 @@ erts_proc_lock_prepare_proc_lock_waiter(void)
*/
static void
-proc_safelock(int is_sched,
+proc_safelock(int is_managed,
Process *a_proc,
ErtsProcLocks a_have_locks,
ErtsProcLocks a_need_locks,
@@ -603,40 +602,40 @@ proc_safelock(int is_sched,
* Locks with the same lock order should be locked on p1 before p2.
*/
if (a_proc) {
- if (a_proc->id < b_proc->id) {
+ if (a_proc->common.id < b_proc->common.id) {
p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = a_proc->id;
+ pid1 = a_proc->common.id;
#endif
need_locks1 = a_need_locks;
have_locks1 = a_have_locks;
p2 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid2 = b_proc->id;
+ pid2 = b_proc->common.id;
#endif
need_locks2 = b_need_locks;
have_locks2 = b_have_locks;
}
- else if (a_proc->id > b_proc->id) {
+ else if (a_proc->common.id > b_proc->common.id) {
p1 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = b_proc->id;
+ pid1 = b_proc->common.id;
#endif
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
p2 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid2 = a_proc->id;
+ pid2 = a_proc->common.id;
#endif
need_locks2 = a_need_locks;
have_locks2 = a_have_locks;
}
else {
ERTS_LC_ASSERT(a_proc == b_proc);
- ERTS_LC_ASSERT(a_proc->id == b_proc->id);
+ ERTS_LC_ASSERT(a_proc->common.id == b_proc->common.id);
p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = a_proc->id;
+ pid1 = a_proc->common.id;
#endif
need_locks1 = a_need_locks | b_need_locks;
have_locks1 = a_have_locks | b_have_locks;
@@ -651,7 +650,7 @@ proc_safelock(int is_sched,
else {
p1 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = b_proc->id;
+ pid1 = b_proc->common.id;
#endif
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
@@ -706,7 +705,7 @@ proc_safelock(int is_sched,
if (unlock_locks) {
have_locks1 &= ~unlock_locks;
need_locks1 |= unlock_locks;
- if (!is_sched && !have_locks1) {
+ if (!is_managed && !have_locks1) {
refc1 = 1;
erts_smp_proc_inc_refc(p1);
}
@@ -716,7 +715,7 @@ proc_safelock(int is_sched,
if (unlock_locks) {
have_locks2 &= ~unlock_locks;
need_locks2 |= unlock_locks;
- if (!is_sched && !have_locks2) {
+ if (!is_managed && !have_locks2) {
refc2 = 1;
erts_smp_proc_inc_refc(p2);
}
@@ -797,7 +796,7 @@ proc_safelock(int is_sched,
}
#endif
- if (!is_sched) {
+ if (!is_managed) {
if (refc1)
erts_smp_proc_dec_refc(p1);
if (refc2)
@@ -830,7 +829,7 @@ erts_pid2proc_opt(Process *c_p,
int flags)
{
Process *dec_refc_proc = NULL;
- int need_ptl;
+ ErtsThrPrgrDelayHandle dhndl;
ErtsProcLocks need_locks;
Uint pix;
Process *proc;
@@ -853,8 +852,8 @@ erts_pid2proc_opt(Process *c_p,
ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
need_locks = pid_need_locks;
- if (c_p && c_p->id == pid) {
- ASSERT(c_p->id != ERTS_INVALID_PID);
+ if (c_p && c_p->common.id == pid) {
+ ASSERT(c_p->common.id != ERTS_INVALID_PID);
ASSERT(c_p == erts_pix2proc(pix));
if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
@@ -868,15 +867,12 @@ erts_pid2proc_opt(Process *c_p,
}
}
- need_ptl = !erts_get_scheduler_id();
-
- if (need_ptl)
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
+ dhndl = erts_thr_progress_unmanaged_delay();
- proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]);
+ proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix);
if (proc) {
- if (proc->id != pid)
+ if (proc->common.id != pid)
proc = NULL;
else if (!need_locks) {
if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
@@ -935,6 +931,7 @@ erts_pid2proc_opt(Process *c_p,
if (flags & ERTS_P2P_FLG_TRY_LOCK)
proc = ERTS_PROC_LOCK_BUSY;
else {
+ int managed;
if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
erts_smp_proc_inc_refc(proc);
@@ -942,14 +939,21 @@ erts_pid2proc_opt(Process *c_p,
erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
#endif
- if (need_ptl) {
+ managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
+ if (!managed) {
erts_smp_proc_inc_refc(proc);
+ erts_thr_progress_unmanaged_continue(dhndl);
dec_refc_proc = proc;
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
- need_ptl = 0;
+
+ /*
+ * We don't want to call
+ * erts_thr_progress_unmanaged_continue()
+ * again.
+ */
+ dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED;
}
- proc_safelock(!need_ptl,
+ proc_safelock(managed,
c_p,
c_p_have_locks,
c_p_have_locks,
@@ -961,8 +965,8 @@ erts_pid2proc_opt(Process *c_p,
}
}
- if (need_ptl)
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
if (need_locks
&& proc
@@ -970,7 +974,7 @@ erts_pid2proc_opt(Process *c_p,
&& (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
? ERTS_PROC_IS_EXITING(proc)
: (proc
- != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) {
+ != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) {
erts_smp_proc_unlock(proc, need_locks);
@@ -1012,22 +1016,22 @@ erts_proc_lock_init(Process *p)
erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_mtx_init_x(&p->lock.main, "proc_main", p->id);
+ erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->id);
+ erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->id);
+ erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->id);
+ erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
@@ -1064,11 +1068,11 @@ erts_proc_lock_fin(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_proc_lock_init(Process *p) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
- if (p->id != ERTS_INVALID_PID) {
- erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->id);
+ if (p->common.id != ERTS_INVALID_PID) {
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->common.id);
} else {
erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK);
erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK);
@@ -1176,10 +1180,11 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res
}
-void erts_lcnt_enable_proc_lock_count(int enable) {
- int i;
+void erts_lcnt_enable_proc_lock_count(int enable)
+{
+ int i, max = erts_ptab_max(&erts_proc);
- for (i = 0; i < erts_max_processes; ++i) {
+ for (i = 0; i < max; ++i) {
Process* p = erts_pix2proc(i);
if (p) {
if (enable) {
@@ -1208,7 +1213,7 @@ void
erts_proc_lc_lock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1232,7 +1237,7 @@ void
erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1256,7 +1261,7 @@ void
erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1283,7 +1288,7 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1318,7 +1323,7 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1353,7 +1358,7 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1390,7 +1395,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
{
if (locks & ERTS_PROC_LOCKS_ALL) {
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN)
@@ -1415,7 +1420,7 @@ void erts_proc_lc_chk_only_proc_main(Process *p)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
erts_lc_check_exact(&proc_main, 1);
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
@@ -1439,19 +1444,19 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
ERTS_PROC_LC_EMPTY_LOCK_INIT};
if (locks & ERTS_PROC_LOCK_MAIN) {
have_locks[have_locks_len].id = lc_id.proc_lock_main;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_LINK) {
have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t have_locks[4];
@@ -1484,35 +1489,35 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCK_MAIN) {
have_locks[have_locks_len].id = lc_id.proc_lock_main;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_main;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_LINK) {
have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_link;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t have_locks[4];
@@ -1547,16 +1552,16 @@ erts_proc_lc_my_proc_locks(Process *p)
ErtsProcLocks res = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t locks[4] = {p->lock.main.lc,
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 4aec19c8c3..9dd503f3cb 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -176,8 +176,8 @@ typedef struct erts_proc_lock_t_ {
* on multiple processes, locks on processes with low process ids
* have to be locked before locks on processes with high process
* ids. E.g., if the main and the message queue locks are to be
- * locked on processes p1 and p2 and p1->id < p2->id, then locks
- * should be locked in the following order:
+ * locked on processes p1 and p2 and p1->common.id < p2->common.id,
+ * then locks should be locked in the following order:
* 1. main lock on p1
* 2. main lock on p2
* 3. message queue lock on p1
@@ -203,7 +203,7 @@ typedef struct erts_proc_lock_t_ {
& ~ERTS_PROC_LOCK_MAIN)
-#define ERTS_PIX_LOCKS_BITS 8
+#define ERTS_PIX_LOCKS_BITS 10
#define ERTS_NO_OF_PIX_LOCKS (1 << ERTS_PIX_LOCKS_BITS)
@@ -767,7 +767,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks, file, line);
#elif defined(ERTS_SMP)
@@ -775,7 +775,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks);
#endif /*ERTS_SMP*/
@@ -789,7 +789,7 @@ erts_smp_proc_unlock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
#endif
@@ -805,7 +805,7 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
#endif
@@ -814,21 +814,15 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *p)
{
#ifdef ERTS_SMP
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_aint32_t refc = erts_atomic32_inc_read_nob(&p->lock.refc);
- ERTS_SMP_LC_ASSERT(refc > 1);
-#else
- erts_atomic32_inc_nob(&p->lock.refc);
-#endif
+ erts_ptab_inc_refc(&p->common);
#endif
}
ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
{
#ifdef ERTS_SMP
- erts_aint32_t refc = erts_atomic32_dec_read_nob(&p->lock.refc);
- ERTS_SMP_LC_ASSERT(refc >= 0);
- if (refc == 0)
+ int referred = erts_ptab_dec_test_refc(&p->common);
+ if (!referred)
erts_free_proc(p);
#endif
}
@@ -836,10 +830,8 @@ ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 add_refc)
{
#ifdef ERTS_SMP
- erts_aint32_t refc = erts_atomic32_add_read_nob(&p->lock.refc,
- (erts_aint32_t) add_refc);
- ERTS_SMP_LC_ASSERT(refc >= 0);
- if (refc == 0)
+ int referred = erts_ptab_add_test_refc(&p->common, add_refc);
+ if (!referred)
erts_free_proc(p);
#endif
}
@@ -875,8 +867,7 @@ void erts_proc_safelock(Process *a_proc,
#define ERTS_P2P_FLG_TRY_LOCK (1 << 1)
#define ERTS_P2P_FLG_SMP_INC_REFC (1 << 2)
-#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_proc_lock_busy)
-extern const Process erts_proc_lock_busy;
+#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_invalid_process)
#define erts_pid2proc(PROC, HL, PID, NL) \
erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0)
@@ -896,33 +887,24 @@ Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int);
ERTS_GLB_INLINE Process *erts_pix2proc(int ix)
{
Process *proc;
- ASSERT(0 <= ix && ix < erts_proc.max);
- proc = (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[ix]);
+ ASSERT(0 <= ix && ix < erts_ptab_max(&erts_proc));
+ proc = (Process *) erts_ptab_pix2intptr_nob(&erts_proc, ix);
return proc == ERTS_PROC_LOCK_BUSY ? NULL : proc;
}
ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid)
{
Process *proc;
- int pix;
- /*
- * In SMP case: Only scheduler threads are allowed
- * to use this function. Other threads need to
- * atomicaly increment refc at lookup, i.e., use
- * erts_pid2proc_opt() with ERTS_P2P_FLG_SMP_INC_REFC.
- */
- ERTS_SMP_LC_ASSERT(erts_get_scheduler_id());
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
if (is_not_internal_pid(pid))
return NULL;
- pix = internal_pid_index(pid);
- proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]);
-
- if (proc && proc->id != pid)
+ proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ if (proc && proc->common.id != pid)
return NULL;
-
return proc;
}
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
new file mode 100644
index 0000000000..87beeafa1a
--- /dev/null
+++ b/erts/emulator/beam/erl_ptab.c
@@ -0,0 +1,1566 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Process/Port table implementation.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#define ERTS_PTAB_WANT_BIF_IMPL__
+#define ERTS_PTAB_WANT_DEBUG_FUNCS__
+#include "erl_ptab.h"
+#include "global.h"
+#include "erl_binary.h"
+
+typedef struct ErtsPTabListBifData_ ErtsPTabListBifData;
+
+#define ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED 25
+#define ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE 1000
+#define ERTS_PTAB_LIST_BIF_MIN_START_REDS \
+ (ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE \
+ / ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED)
+
+#define ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS 1
+
+#define ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED 10
+
+#define ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS \
+ (ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE \
+ / ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED)
+
+
+#define ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED 75
+
+#define ERTS_PTAB_LIST_DBG_DO_TRACE 0
+
+#ifdef DEBUG
+# define ERTS_PTAB_LIST_BIF_DEBUGLEVEL 100
+#else
+# define ERTS_PTAB_LIST_BIF_DEBUGLEVEL 0
+#endif
+
+#define ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC 1
+#define ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS 5
+#define ERTS_PTAB_LIST_DBGLVL_CHK_PIDS 10
+#define ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST 20
+#define ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST 20
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL == 0
+# define ERTS_PTAB_LIST_ASSERT(EXP)
+#else
+# define ERTS_PTAB_LIST_ASSERT(EXP) \
+ ((void) ((EXP) \
+ ? 1 \
+ : (debug_ptab_list_assert_error(#EXP, \
+ __FILE__, \
+ __LINE__, \
+ __func__), \
+ 0)))
+#endif
+
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC
+# define ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(PTLBDP, HP, SZ) \
+do { \
+ ERTS_PTAB_LIST_ASSERT(!(PTLBDP)->debug.heap); \
+ ERTS_PTAB_LIST_ASSERT(!(PTLBDP)->debug.heap_size); \
+ (PTLBDP)->debug.heap = (HP); \
+ (PTLBDP)->debug.heap_size = (SZ); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(PTLBDP, HP) \
+do { \
+ ERTS_PTAB_LIST_ASSERT((PTLBDP)->debug.heap); \
+ ERTS_PTAB_LIST_ASSERT((PTLBDP)->debug.heap_size); \
+ ERTS_PTAB_LIST_ASSERT(((PTLBDP)->debug.heap \
+ + (PTLBDP)->debug.heap_size) \
+ == (HP)); \
+ (PTLBDP)->debug.heap = NULL; \
+ (PTLBDP)->debug.heap_size = 0; \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT(PTLBDP) \
+do { \
+ (PTLBDP)->debug.heap = NULL; \
+ (PTLBDP)->debug.heap_size = 0; \
+} while (0)
+#else
+# define ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(PTLBDP, HP, SZ)
+# define ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(PTLBDP, HP)
+# define ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT(PTLBDP)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+# define ERTS_PTAB_LIST_DBG_CHK_RESLIST(R) \
+ debug_ptab_list_check_res_list((R))
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_RESLIST(R)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+# define ERTS_PTAB_LIST_DBG_SAVE_PIDS(PTLBDP) \
+ debug_ptab_list_save_all_pids((PTLBDP))
+# define ERTS_PTAB_LIST_DBG_VERIFY_PIDS(PTLBDP) \
+do { \
+ if (!(PTLBDP)->debug.correct_pids_verified) \
+ debug_ptab_list_verify_all_pids((PTLBDP)); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS(PTLBDP) \
+do { \
+ if ((PTLBDP)->debug.correct_pids) { \
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, \
+ (PTLBDP)->debug.correct_pids); \
+ (PTLBDP)->debug.correct_pids = NULL; \
+ } \
+} while(0)
+# define ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT(PTLBDP) \
+do { \
+ (PTLBDP)->debug.correct_pids_verified = 0; \
+ (PTLBDP)->debug.correct_pids = NULL; \
+} while (0)
+#else
+# define ERTS_PTAB_LIST_DBG_SAVE_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_VERIFY_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT(PTLBDP)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+# define ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(PTLBDP, PID, IC) \
+ debug_ptab_list_check_found_pid((PTLBDP), (PID), (IC), 1)
+# define ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(PTLBDP, PID, IC) \
+ debug_ptab_list_check_found_pid((PTLBDP), (PID), (IC), 0)
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(PTLBDP, PID, IC)
+# define ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(PTLBDP, PID, IC)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+# define ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(PTab) \
+ debug_ptab_list_check_del_list((PTab))
+# define ERTS_PTAB_LIST_DBG_CHK_FREELIST(PTab, FL) \
+ debug_ptab_list_check_del_free_list((PTab), (FL))
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(PTab)
+# define ERTS_PTAB_LIST_DBG_CHK_FREELIST(PTab, FL)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL == 0
+#if ERTS_PTAB_LIST_DBG_DO_TRACE
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) \
+ (PTLBDP)->debug.caller = (P)->common.id
+# else
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP)
+# endif
+# define ERTS_PTAB_LIST_DBG_CLEANUP(PTLBDP)
+#else
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) \
+do { \
+ (PTLBDP)->debug.caller = (P)->common.id; \
+ ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT((PTLBDP)); \
+ ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT((PTLBDP)); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_CLEANUP(PTLBDP) \
+do { \
+ ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS((PTLBDP)); \
+} while (0)
+#endif
+
+#if ERTS_PTAB_LIST_DBG_DO_TRACE
+# define ERTS_PTAB_LIST_DBG_TRACE(PID, WHAT) \
+ erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \
+ (PID), __FILE__, __LINE__, __func__, #WHAT)
+#else
+# define ERTS_PTAB_LIST_DBG_TRACE(PID, WHAT)
+#endif
+
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0
+static void debug_ptab_list_assert_error(char* expr,
+ const char* file,
+ int line,
+ const char *func);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+static void debug_ptab_list_check_res_list(Eterm list);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+static void debug_ptab_list_save_all_pids(ErtsPTabListBifData *ptlbdp);
+static void debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+static void debug_ptab_list_check_found_pid(ErtsPTabListBifData *ptlbdp,
+ Eterm pid,
+ Uint64 ic,
+ int pid_should_be_found);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+static void debug_ptab_list_check_del_list(ErtsPTab *ptab);
+static void debug_ptab_list_check_del_free_list(ErtsPTab *ptab,
+ ErtsPTabDeletedElement *ptdep);
+#endif
+
+struct ErtsPTabDeletedElement_ {
+ ErtsPTabDeletedElement *next;
+ ErtsPTabDeletedElement *prev;
+ int ix;
+ union {
+ struct {
+ Eterm id;
+ Uint64 inserted;
+ Uint64 deleted;
+ } element;
+ struct {
+ Uint64 interval;
+ } bif_invocation;
+ } u;
+};
+
+static Export ptab_list_continue_export;
+
+typedef struct {
+ Uint64 interval;
+} ErtsPTabListBifChunkInfo;
+
+typedef enum {
+ INITIALIZING,
+ INSPECTING_TABLE,
+ INSPECTING_DELETED,
+ BUILDING_RESULT,
+ RETURN_RESULT
+} ErtsPTabListBifState;
+
+struct ErtsPTabListBifData_ {
+ ErtsPTab *ptab;
+ ErtsPTabListBifState state;
+ Eterm caller;
+ ErtsPTabListBifChunkInfo *chunk;
+ int tix;
+ int pid_ix;
+ int pid_sz;
+ Eterm *pid;
+ ErtsPTabDeletedElement *bif_invocation; /* Only used when > 1 chunk */
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0 || ERTS_PTAB_LIST_DBG_DO_TRACE
+ struct {
+ Eterm caller;
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ Uint64 *pid_started;
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC
+ Eterm *heap;
+ Uint heap_size;
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+ int correct_pids_verified;
+ Eterm *correct_pids;
+#endif
+ } debug;
+#endif
+
+};
+
+#ifdef ARCH_32
+
+static ERTS_INLINE Uint64
+dw_aint_to_uint64(erts_dw_aint_t *dw)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (Uint64) dw->dw_sint;
+#else
+ Uint64 res;
+ res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+#endif
+}
+
+static void
+unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
+#else
+ dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
+ dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
+#endif
+}
+
+static ERTS_INLINE void
+last_data_init_nob(ErtsPTab *ptab, Uint64 val)
+{
+ erts_dw_aint_t dw;
+ unint64_to_dw_aint(&dw, val);
+ erts_smp_dw_atomic_init_nob(&ptab->vola.tile.last_data, &dw);
+}
+
+static ERTS_INLINE void
+last_data_set_relb(ErtsPTab *ptab, Uint64 val)
+{
+ erts_dw_aint_t dw;
+ unint64_to_dw_aint(&dw, val);
+ erts_smp_dw_atomic_set_relb(&ptab->vola.tile.last_data, &dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_nob(ErtsPTab *ptab)
+{
+ erts_dw_aint_t dw;
+ erts_smp_dw_atomic_read_nob(&ptab->vola.tile.last_data, &dw);
+ return dw_aint_to_uint64(&dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_acqb(ErtsPTab *ptab)
+{
+ erts_dw_aint_t dw;
+ erts_smp_dw_atomic_read_acqb(&ptab->vola.tile.last_data, &dw);
+ return dw_aint_to_uint64(&dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
+{
+ erts_dw_aint_t dw_new, dw_xchg;
+
+ unint64_to_dw_aint(&dw_new, new);
+ unint64_to_dw_aint(&dw_xchg, exp);
+
+ if (erts_smp_dw_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
+ &dw_new,
+ &dw_xchg))
+ return exp;
+ else
+ return dw_aint_to_uint64(&dw_xchg);
+}
+
+#elif defined(ARCH_64)
+
+union {
+ erts_smp_atomic_t pid_data;
+ char align[ERTS_CACHE_LINE_SIZE];
+} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static ERTS_INLINE void
+last_data_init_nob(ErtsPTab *ptab, Uint64 val)
+{
+ erts_smp_atomic_init_nob(&ptab->vola.tile.last_data, (erts_aint_t) val);
+}
+
+static ERTS_INLINE void
+last_data_set_relb(ErtsPTab *ptab, Uint64 val)
+{
+ erts_smp_atomic_set_relb(&ptab->vola.tile.last_data, (erts_aint_t) val);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_nob(ErtsPTab *ptab)
+{
+ return (Uint64) erts_smp_atomic_read_nob(&ptab->vola.tile.last_data);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_acqb(ErtsPTab *ptab)
+{
+ return (Uint64) erts_smp_atomic_read_acqb(&ptab->vola.tile.last_data);
+}
+
+static ERTS_INLINE Uint64
+last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
+{
+ return (Uint64) erts_smp_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
+ (erts_aint_t) new,
+ (erts_aint_t) exp);
+}
+
+#else
+# error "Not 64-bit, nor 32-bit architecture..."
+#endif
+
+static ERTS_INLINE int
+last_data_cmp(Uint64 ld1, Uint64 ld2)
+{
+ Uint64 ld1_wrap;
+
+ if (ld1 == ld2)
+ return 0;
+
+ ld1_wrap = ld1 + (((Uint64) 1) << 63);
+
+ if (ld1 < ld1_wrap)
+ return (ld1 < ld2 && ld2 < ld1_wrap) ? -1 : 1;
+ else
+ return (ld1_wrap <= ld2 && ld2 < ld1) ? 1 : -1;
+}
+
+#define ERTS_PTAB_LastData2EtermData(LD) \
+ ((Eterm) ((LD) & ~(~((Uint64) 0) << ERTS_PTAB_ID_DATA_SIZE)))
+
+void
+erts_ptab_init_table(ErtsPTab *ptab,
+ ErtsAlcType_t atype,
+ void (*release_element)(void *),
+ ErtsPTabElementCommon *invalid_element,
+ int size,
+ char *name)
+{
+ size_t tab_sz;
+ int bits;
+ char *tab_end;
+ erts_smp_atomic_t *tab_entry;
+ erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name);
+ erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0);
+ last_data_init_nob(ptab, ~((Uint64) 0));
+
+ /* A size that is a power of 2 is to prefer performance wise */
+ bits = erts_fit_in_bits_int32(size-1);
+ size = 1 << bits;
+ if (size > ERTS_PTAB_MAX_SIZE) {
+ size = ERTS_PTAB_MAX_SIZE;
+ bits = erts_fit_in_bits_int32((Sint32) size - 1);
+ }
+
+ ptab->r.o.max = size;
+
+ tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t));
+ ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, tab_sz);
+ tab_end = ((char *) ptab->r.o.tab) + tab_sz;
+ tab_entry = ptab->r.o.tab;
+ while (tab_end > ((char *) tab_entry)) {
+ erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL);
+ tab_entry++;
+ }
+
+ ptab->r.o.tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
+ ptab->r.o.pix_per_cache_line = (ERTS_CACHE_LINE_SIZE
+ / sizeof(erts_smp_atomic_t));
+ ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */
+ ASSERT((ptab->r.o.pix_per_cache_line
+ & (ptab->r.o.pix_per_cache_line - 1)) == 0); /* power of 2 */
+ ASSERT((ptab->r.o.tab_cache_lines
+ & (ptab->r.o.tab_cache_lines - 1)) == 0); /* power of 2 */
+
+ ptab->r.o.pix_mask
+ = (1 << bits) - 1;
+ ptab->r.o.pix_cl_mask
+ = ptab->r.o.tab_cache_lines-1;
+ ptab->r.o.pix_cl_shift
+ = erts_fit_in_bits_int32(ptab->r.o.pix_per_cache_line-1);
+ ptab->r.o.pix_cli_shift
+ = erts_fit_in_bits_int32(ptab->r.o.pix_cl_mask);
+ ptab->r.o.pix_cli_mask
+ = (1 << (bits - ptab->r.o.pix_cli_shift)) - 1;
+
+ ASSERT(ptab->r.o.pix_cl_shift + ptab->r.o.pix_cli_shift == bits);
+
+ ptab->r.o.invalid_element = invalid_element;
+ ptab->r.o.invalid_data = erts_ptab_id2data(ptab, invalid_element->id);
+ ptab->r.o.release_element = release_element;
+
+ erts_smp_interval_init(&ptab->list.data.interval);
+ ptab->list.data.deleted.start = NULL;
+ ptab->list.data.deleted.end = NULL;
+ ptab->list.data.chunks = (((ptab->r.o.max - 1)
+ / ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE)
+ + 1);
+
+ if (size == ERTS_PTAB_MAX_SIZE) {
+ int pix;
+ /*
+ * We want a table size of a power of 2 which ERTS_PTAB_MAX_SIZE
+ * is. We only have ERTS_PTAB_MAX_SIZE-1 unique identifiers and
+ * we don't want to shrink the size to ERTS_PTAB_MAX_SIZE/2.
+ *
+ * In order to fix this, we insert a pointer from the table
+ * to the invalid_element, wich will be interpreted as a
+ * slot currently being modified. This way we will be able to
+ * have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while
+ * still having a table size of the power of 2.
+ */
+ erts_smp_atomic32_inc_nob(&ptab->vola.tile.count);
+ pix = erts_ptab_data2pix(ptab, ptab->r.o.invalid_data);
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix],
+ (erts_aint_t) ptab->r.o.invalid_element);
+ }
+
+}
+
+int
+erts_ptab_initialized(ErtsPTab *ptab)
+{
+ return ptab->r.o.tab != NULL;
+}
+
+int
+erts_ptab_new_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el,
+ void *init_arg,
+ void (*init_ptab_el)(void *, Eterm))
+{
+ int pix;
+ Uint64 ld, exp_ld;
+ Eterm data;
+ erts_aint32_t count;
+ erts_aint_t invalid = (erts_aint_t) ptab->r.o.invalid_element;
+
+ erts_ptab_rlock(ptab);
+
+ count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count);
+ if (count > ptab->r.o.max) {
+ while (1) {
+ erts_aint32_t act_count;
+
+ act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count,
+ count-1,
+ count);
+ if (act_count == count) {
+ erts_ptab_runlock(ptab);
+ return 0;
+ }
+ count = act_count;
+ if (count <= ptab->r.o.max)
+ break;
+ }
+ }
+
+ ptab_el->u.alive.started_interval
+ = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+
+ ld = last_data_read_acqb(ptab);
+
+ /* Reserve slot */
+ while (1) {
+ ld++;
+ pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld));
+ if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix]) == ERTS_AINT_NULL) {
+ erts_aint_t val;
+ val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
+ invalid,
+ ERTS_AINT_NULL);
+
+ if (ERTS_AINT_NULL == val)
+ break;
+ }
+ }
+
+ data = ERTS_PTAB_LastData2EtermData(ld);
+
+ if (data == ptab->r.o.invalid_data) {
+ /* Do not use invalid data; fix it... */
+ ld += ptab->r.o.max;
+ ASSERT(pix == erts_ptab_data2pix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ ASSERT(data != ptab->r.o.invalid_data);
+ }
+
+ exp_ld = last_data_read_nob(ptab);
+
+ /* Move last data forward */
+ while (1) {
+ Uint64 act_ld;
+ if (last_data_cmp(ld, exp_ld) < 0)
+ break;
+ act_ld = last_data_cmpxchg_relb(ptab, ld, exp_ld);
+ if (act_ld == exp_ld)
+ break;
+ exp_ld = act_ld;
+ }
+
+ init_ptab_el(init_arg, data);
+
+#ifdef ERTS_SMP
+ erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
+#endif
+
+ /* Move into slot reserved */
+#ifdef DEBUG
+ ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ (erts_aint_t) ptab_el));
+#else
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+#endif
+
+ erts_ptab_runlock(ptab);
+
+ return 1;
+}
+
+static void
+save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el)
+{
+ ErtsPTabDeletedElement *ptdep = erts_alloc(ERTS_ALC_T_PTAB_LIST_DEL,
+ sizeof(ErtsPTabDeletedElement));
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start
+ && ptab->list.data.deleted.end);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ptdep->prev = ptab->list.data.deleted.end;
+ ptdep->next = NULL;
+ ptdep->ix = erts_ptab_id2pix(ptab, ptab_el->id);
+ ptdep->u.element.id = ptab_el->id;
+ ptdep->u.element.inserted = ptab_el->u.alive.started_interval;
+ ptdep->u.element.deleted =
+ erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+
+ ptab->list.data.deleted.end->next = ptdep;
+ ptab->list.data.deleted.end = ptdep;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev->ix >= 0
+ ? (ptdep->u.element.deleted
+ >= ptdep->prev->u.element.deleted)
+ : (ptdep->u.element.deleted
+ >= ptdep->prev->u.bif_invocation.interval));
+}
+
+void
+erts_ptab_delete_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el)
+{
+ int maybe_save;
+ int pix = erts_ptab_id2pix(ptab, ptab_el->id);
+
+ ASSERT(erts_get_scheduler_id()); /* *Need* to be a scheduler */
+
+ erts_ptab_rlock(ptab);
+ maybe_save = ptab->list.data.deleted.end != NULL;
+ if (maybe_save) {
+ erts_ptab_runlock(ptab);
+ erts_ptab_rwlock(ptab);
+ }
+
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL);
+
+ ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0);
+ erts_smp_atomic32_dec_relb(&ptab->vola.tile.count);
+
+ if (!maybe_save)
+ erts_ptab_runlock(ptab);
+ else {
+ if (ptab->list.data.deleted.end)
+ save_deleted_element(ptab, ptab_el);
+ erts_ptab_rwunlock(ptab);
+ }
+
+ if (ptab->r.o.release_element)
+ erts_schedule_thr_prgr_later_op(ptab->r.o.release_element,
+ (void *) ptab_el,
+ &ptab_el->u.release);
+}
+
+/*
+ * erts_ptab_list() implements BIFs listing the content of the table,
+ * e.g. erlang:processes/0.
+ */
+static void cleanup_ptab_list_bif_data(Binary *bp);
+static int ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp);
+
+
+BIF_RETTYPE
+erts_ptab_list(Process *c_p, ErtsPTab *ptab)
+{
+ /*
+ * A requirement: The list of identifiers returned should be a
+ * consistent snapshot of all elements existing
+ * in the table at some point in time during the
+ * execution of the BIF calling this function.
+ * Since elements might be deleted while the BIF
+ * is executing, we have to keep track of all
+ * deleted elements and add them to the result.
+ * We also ignore elements created after the BIF
+ * has begun executing.
+ */
+ BIF_RETTYPE ret_val;
+ Eterm res_acc = NIL;
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsPTabListBifData),
+ cleanup_ptab_list_bif_data);
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(mbp);
+
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, call);
+ ptlbdp->ptab = ptab;
+ ptlbdp->state = INITIALIZING;
+ ERTS_PTAB_LIST_DBG_INIT(c_p, ptlbdp);
+
+ if (ERTS_BIF_REDS_LEFT(c_p) >= ERTS_PTAB_LIST_BIF_MIN_START_REDS
+ && ptab_list_bif_engine(c_p, &res_acc, mbp)) {
+ erts_bin_free(mbp);
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, return);
+ ERTS_BIF_PREP_RET(ret_val, res_acc);
+ }
+ else {
+ Eterm *hp;
+ Eterm magic_bin;
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, PROC_BIN_SIZE);
+ magic_bin = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, trap);
+ ERTS_BIF_PREP_YIELD2(ret_val,
+ &ptab_list_continue_export,
+ c_p,
+ res_acc,
+ magic_bin);
+ }
+ return ret_val;
+}
+
+static void
+cleanup_ptab_list_bif_data(Binary *bp)
+{
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(bp);
+ ErtsPTab *ptab = ptlbdp->ptab;
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, call);
+
+ if (ptlbdp->state != INITIALIZING) {
+
+ if (ptlbdp->chunk) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_CNKI, ptlbdp->chunk);
+ ptlbdp->chunk = NULL;
+ }
+ if (ptlbdp->pid) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->pid);
+ ptlbdp->pid = NULL;
+ }
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ if (ptlbdp->debug.pid_started) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->debug.pid_started);
+ ptlbdp->debug.pid_started = NULL;
+ }
+#endif
+
+ if (ptlbdp->bif_invocation) {
+ ErtsPTabDeletedElement *ptdep;
+
+ erts_ptab_rwlock(ptab);
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, deleted_cleanup);
+
+ ptdep = ptlbdp->bif_invocation;
+ ptlbdp->bif_invocation = NULL;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ if (ptdep->prev) {
+ /*
+ * Only remove this bif invokation when we
+ * have preceding invokations.
+ */
+ ptdep->prev->next = ptdep->next;
+ if (ptdep->next)
+ ptdep->next->prev = ptdep->prev;
+ else {
+ /*
+ * At the time of writing this branch cannot be
+ * reached. I don't want to remove this code though
+ * since it may be possible to reach this line
+ * in the future if the cleanup order in
+ * erts_do_exit_process() is changed. The ASSERT(0)
+ * is only here to make us aware that the reorder
+ * has happened. /rickard
+ */
+ ASSERT(0);
+ ptab->list.data.deleted.end = ptdep->prev;
+ }
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, ptdep);
+ }
+ else {
+ /*
+ * Free all elements until next bif invokation
+ * is found.
+ */
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ do {
+ ErtsPTabDeletedElement *fptdep = ptdep;
+ ptdep = ptdep->next;
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, fptdep);
+ } while (ptdep && ptdep->ix >= 0);
+ ptab->list.data.deleted.start = ptdep;
+ if (ptdep)
+ ptdep->prev = NULL;
+ else
+ ptab->list.data.deleted.end = NULL;
+ }
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ erts_ptab_rwunlock(ptab);
+
+ }
+ }
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, return);
+ ERTS_PTAB_LIST_DBG_CLEANUP(ptlbdp);
+}
+
+static int
+ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp)
+{
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(mbp);
+ ErtsPTab *ptab = ptlbdp->ptab;
+ int have_reds;
+ int reds;
+ int locked = 0;
+
+ do {
+ switch (ptlbdp->state) {
+ case INITIALIZING:
+ ptlbdp->chunk = erts_alloc(ERTS_ALC_T_PTAB_LIST_CNKI,
+ (sizeof(ErtsPTabListBifChunkInfo)
+ * ptab->list.data.chunks));
+ ptlbdp->tix = 0;
+ ptlbdp->pid_ix = 0;
+
+ erts_ptab_rwlock(ptab);
+ locked = 1;
+
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, init);
+
+ ptlbdp->pid_sz = erts_ptab_count(ptab);
+ ptlbdp->pid = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started
+ = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Uint64)*ptlbdp->pid_sz);
+#endif
+
+ ERTS_PTAB_LIST_DBG_SAVE_PIDS(ptlbdp);
+
+ if (ptab->list.data.chunks == 1)
+ ptlbdp->bif_invocation = NULL;
+ else {
+ /*
+ * We will have to access the table multiple times
+ * releasing the table lock in between chunks.
+ */
+ ptlbdp->bif_invocation
+ = erts_alloc(ERTS_ALC_T_PTAB_LIST_DEL,
+ sizeof(ErtsPTabDeletedElement));
+ ptlbdp->bif_invocation->ix = -1;
+ ptlbdp->bif_invocation->u.bif_invocation.interval
+ = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ptlbdp->bif_invocation->next = NULL;
+ if (ptab->list.data.deleted.end) {
+ ptlbdp->bif_invocation->prev = ptab->list.data.deleted.end;
+ ptab->list.data.deleted.end->next = ptlbdp->bif_invocation;
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start);
+ }
+ else {
+ ptlbdp->bif_invocation->prev = NULL;
+ ptab->list.data.deleted.start = ptlbdp->bif_invocation;
+ }
+ ptab->list.data.deleted.end = ptlbdp->bif_invocation;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ }
+
+ ptlbdp->state = INSPECTING_TABLE;
+ /* Fall through */
+
+ case INSPECTING_TABLE: {
+ int ix = ptlbdp->tix;
+ int indices = ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ int cix = ix / ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ int end_ix = ix + indices;
+ Uint64 *invocation_interval_p;
+ ErtsPTabElementCommon *invalid_element;
+
+ invocation_interval_p
+ = (ptlbdp->bif_invocation
+ ? &ptlbdp->bif_invocation->u.bif_invocation.interval
+ : NULL);
+
+ ERTS_PTAB_LIST_ASSERT(is_nil(*res_accp));
+ if (!locked) {
+ erts_ptab_rwlock(ptab);
+ locked = 1;
+ }
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table);
+
+ if (cix != 0)
+ ptlbdp->chunk[cix].interval
+ = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ else if (ptlbdp->bif_invocation)
+ ptlbdp->chunk[0].interval = *invocation_interval_p;
+ /* else: interval is irrelevant */
+
+ if (end_ix >= ptab->r.o.max) {
+ ERTS_PTAB_LIST_ASSERT(cix+1 == ptab->list.data.chunks);
+ end_ix = ptab->r.o.max;
+ indices = end_ix - ix;
+ /* What to do when done with this chunk */
+ ptlbdp->state = (ptab->list.data.chunks == 1
+ ? BUILDING_RESULT
+ : INSPECTING_DELETED);
+ }
+
+ invalid_element = ptab->r.o.invalid_element;
+ for (; ix < end_ix; ix++) {
+ ErtsPTabElementCommon *el;
+ el = (ErtsPTabElementCommon *) erts_ptab_pix2intptr_nob(ptab,
+ ix);
+ if (el
+ && el != invalid_element
+ && (!invocation_interval_p
+ || el->u.alive.started_interval < *invocation_interval_p)) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(el->id));
+ ptlbdp->pid[ptlbdp->pid_ix] = el->id;
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started[ptlbdp->pid_ix]
+ = el->u.alive.started_interval;
+#endif
+
+ ptlbdp->pid_ix++;
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix <= ptlbdp->pid_sz);
+ }
+ }
+
+ ptlbdp->tix = end_ix;
+
+ erts_ptab_rwunlock(ptab);
+ locked = 0;
+
+ reds = indices/ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED;
+ BUMP_REDS(c_p, reds);
+
+ have_reds = ERTS_BIF_REDS_LEFT(c_p);
+
+ if (have_reds && ptlbdp->state == INSPECTING_TABLE) {
+ ix = ptlbdp->tix;
+ indices = ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ end_ix = ix + indices;
+ if (end_ix > ptab->r.o.max) {
+ end_ix = ptab->r.o.max;
+ indices = end_ix - ix;
+ }
+
+ reds = indices/ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED;
+
+ /* Pretend we have no reds left if we haven't got enough
+ reductions to complete next chunk */
+ if (reds > have_reds)
+ have_reds = 0;
+ }
+
+ break;
+ }
+
+ case INSPECTING_DELETED: {
+ int i;
+ int max_reds;
+ int free_deleted = 0;
+ Uint64 invocation_interval;
+ ErtsPTabDeletedElement *ptdep;
+ ErtsPTabDeletedElement *free_list = NULL;
+
+ ptdep = ptlbdp->bif_invocation;
+ ERTS_PTAB_LIST_ASSERT(ptdep);
+ invocation_interval = ptdep->u.bif_invocation.interval;
+
+ max_reds = have_reds = ERTS_BIF_REDS_LEFT(c_p);
+ if (max_reds > ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS)
+ max_reds = ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS;
+
+ reds = 0;
+ erts_ptab_rwlock(ptab);
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_term_procs);
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ if (ptdep->prev)
+ ptdep->prev->next = ptdep->next;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ ptab->list.data.deleted.start = ptdep->next;
+
+ if (ptab->list.data.deleted.start
+ && ptab->list.data.deleted.start->ix >= 0) {
+ free_list = ptab->list.data.deleted.start;
+ free_deleted = 1;
+ }
+ }
+
+ if (ptdep->next)
+ ptdep->next->prev = ptdep->prev;
+ else
+ ptab->list.data.deleted.end = ptdep->prev;
+
+ ptdep = ptdep->next;
+
+ i = 0;
+ while (reds < max_reds && ptdep) {
+ if (ptdep->ix < 0) {
+ if (free_deleted) {
+ ERTS_PTAB_LIST_ASSERT(free_list);
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev);
+
+ ptdep->prev->next = NULL; /* end of free_list */
+ ptab->list.data.deleted.start = ptdep;
+ ptdep->prev = NULL;
+ free_deleted = 0;
+ }
+ }
+ else {
+ int cix = ptdep->ix/ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ Uint64 chunk_interval = ptlbdp->chunk[cix].interval;
+ Eterm pid = ptdep->u.element.id;
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(pid));
+
+ if (ptdep->u.element.inserted < invocation_interval) {
+ if (ptdep->u.element.deleted < chunk_interval) {
+ ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ ptlbdp->pid[ptlbdp->pid_ix] = pid;
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started[ptlbdp->pid_ix]
+ = ptdep->u.element.inserted;
+#endif
+ ptlbdp->pid_ix++;
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix
+ <= ptlbdp->pid_sz);
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ }
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ }
+
+ i++;
+ if (i == ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED) {
+ reds++;
+ i = 0;
+ }
+ if (free_deleted)
+ reds += ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS;
+ }
+ ptdep = ptdep->next;
+ }
+
+ if (free_deleted) {
+ ERTS_PTAB_LIST_ASSERT(free_list);
+ ptab->list.data.deleted.start = ptdep;
+ if (!ptdep)
+ ptab->list.data.deleted.end = NULL;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev);
+ ptdep->prev->next = NULL; /* end of free_list */
+ ptdep->prev = NULL;
+ }
+ }
+
+ if (!ptdep) {
+ /* Done */
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix == ptlbdp->pid_sz);
+ ptlbdp->state = BUILDING_RESULT;
+ ptlbdp->bif_invocation->next = free_list;
+ free_list = ptlbdp->bif_invocation;
+ ptlbdp->bif_invocation = NULL;
+ }
+ else {
+ /* Link in bif_invocation again where we left off */
+ ptlbdp->bif_invocation->prev = ptdep->prev;
+ ptlbdp->bif_invocation->next = ptdep;
+ ptdep->prev = ptlbdp->bif_invocation;
+ if (ptlbdp->bif_invocation->prev)
+ ptlbdp->bif_invocation->prev->next = ptlbdp->bif_invocation;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start
+ == ptdep);
+ ptab->list.data.deleted.start = ptlbdp->bif_invocation;
+ }
+ }
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+ ERTS_PTAB_LIST_DBG_CHK_FREELIST(ptab, free_list);
+ erts_ptab_rwunlock(ptab);
+
+ /*
+ * We do the actual free of deleted structures now when we
+ * have released the table lock instead of when we encountered
+ * them. This since free() isn't for free and we don't want to
+ * unnecessarily block other schedulers.
+ */
+ while (free_list) {
+ ptdep = free_list;
+ free_list = ptdep->next;
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, ptdep);
+ }
+
+ have_reds -= reds;
+ if (have_reds < 0)
+ have_reds = 0;
+ BUMP_REDS(c_p, reds);
+ break;
+ }
+
+ case BUILDING_RESULT: {
+ int conses, ix, min_ix;
+ Eterm *hp;
+ Eterm res = *res_accp;
+
+ ERTS_PTAB_LIST_DBG_VERIFY_PIDS(ptlbdp);
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res);
+
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, begin_build_res);
+
+ have_reds = ERTS_BIF_REDS_LEFT(c_p);
+ conses = ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds;
+ min_ix = ptlbdp->pid_ix - conses;
+ if (min_ix < 0) {
+ min_ix = 0;
+ conses = ptlbdp->pid_ix;
+ }
+
+ if (conses) {
+ hp = HAlloc(c_p, conses*2);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, conses*2);
+
+ for (ix = ptlbdp->pid_ix - 1; ix >= min_ix; ix--) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(ptlbdp->pid[ix]));
+ res = CONS(hp, ptlbdp->pid[ix], res);
+ hp += 2;
+ }
+
+ ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
+ }
+
+ ptlbdp->pid_ix = min_ix;
+ if (min_ix == 0)
+ ptlbdp->state = RETURN_RESULT;
+ else {
+ ptlbdp->pid_sz = min_ix;
+ ptlbdp->pid = erts_realloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ ptlbdp->pid,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started
+ = erts_realloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ ptlbdp->debug.pid_started,
+ sizeof(Uint64) * ptlbdp->pid_sz);
+#endif
+ }
+ reds = conses/ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED;
+ BUMP_REDS(c_p, reds);
+ have_reds -= reds;
+
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, end_build_res);
+ *res_accp = res;
+ break;
+ }
+ case RETURN_RESULT:
+ cleanup_ptab_list_bif_data(mbp);
+ return 1;
+
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:ptab_list_bif_engine(): Invalid state: %d\n",
+ __FILE__, __LINE__, (int) ptlbdp->state);
+ }
+
+
+ } while (have_reds || ptlbdp->state == RETURN_RESULT);
+
+ return 0;
+}
+
+/*
+ * ptab_list_continue/2 is a hidden BIF that the original BIF traps to
+ * if there are too much work to do in one go.
+ */
+
+static BIF_RETTYPE ptab_list_continue(BIF_ALIST_2)
+{
+ Eterm res_acc;
+ Binary *mbp;
+
+ /*
+ * This bif cannot be called from erlang code. It can only be
+ * trapped to from other BIFs; therefore, a bad argument
+ * is an internal error and should never occur...
+ */
+
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, call);
+ ERTS_PTAB_LIST_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1));
+
+ res_acc = BIF_ARG_1;
+
+ ERTS_PTAB_LIST_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
+
+ mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+
+ ERTS_PTAB_LIST_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
+ == cleanup_ptab_list_bif_data);
+ ERTS_PTAB_LIST_ASSERT(
+ ((ErtsPTabListBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller
+ == BIF_P->common.id);
+
+ if (ptab_list_bif_engine(BIF_P, &res_acc, mbp)) {
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, return);
+ BIF_RET(res_acc);
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, trap);
+ ERTS_BIF_YIELD2(&ptab_list_continue_export, BIF_P, res_acc, BIF_ARG_2);
+ }
+}
+
+void
+erts_ptab_init(void)
+{
+ /* ptab_list_continue/2 is a hidden BIF that the original BIF traps to. */
+ erts_init_trap_export(&ptab_list_continue_export,
+ am_erlang, am_ptab_list_continue, 2,
+ &ptab_list_continue);
+
+}
+
+/*
+ * Debug stuff
+ */
+
+Sint
+erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
+{
+ Uint64 ld;
+ Sint res;
+ Eterm data;
+ int first_pix = -1;
+
+ erts_ptab_rwlock(ptab);
+
+ if (!set)
+ ld = last_data_read_nob(ptab);
+ else {
+
+ ld = (Uint64) next;
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ if (ptab->r.o.invalid_data == data) {
+ ld += ptab->r.o.max;
+ ASSERT(erts_ptab_data2pix(ptab, data)
+ == erts_ptab_data2pix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ }
+ last_data_set_relb(ptab, ld);
+ }
+
+ while (1) {
+ int pix;
+ ld++;
+ pix = (int) (ld % ptab->r.o.max);
+ if (first_pix < 0)
+ first_pix = pix;
+ else if (pix == first_pix) {
+ res = -1;
+ break;
+ }
+ if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) {
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ if (ptab->r.o.invalid_data == data) {
+ ld += ptab->r.o.max;
+ ASSERT(erts_ptab_data2pix(ptab, data)
+ == erts_ptab_data2pix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ }
+ res = data;
+ break;
+ }
+ }
+
+ erts_ptab_rwunlock(ptab);
+
+ return res;
+}
+
+static ERTS_INLINE ErtsPTabElementCommon *
+ptab_pix2el(ErtsPTab *ptab, int ix)
+{
+ ErtsPTabElementCommon *ptab_el;
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ ptab_el = (ErtsPTabElementCommon *) erts_ptab_pix2intptr_nob(ptab, ix);
+ if (ptab_el == ptab->r.o.invalid_element)
+ return NULL;
+ else
+ return ptab_el;
+}
+
+Eterm
+erts_debug_ptab_list(Process *c_p, ErtsPTab *ptab)
+{
+ int i;
+ Uint need;
+ Eterm res;
+ Eterm* hp;
+ Eterm *hp_end;
+
+ erts_ptab_rwlock(ptab);
+
+ res = NIL;
+ need = erts_ptab_count(ptab) * 2;
+ hp = HAlloc(c_p, need); /* we need two heap words for each id */
+ hp_end = hp + need;
+
+ /* make the list by scanning bakward */
+
+
+ for (i = ptab->r.o.max-1; i >= 0; i--) {
+ ErtsPTabElementCommon *el = ptab_pix2el(ptab, i);
+ if (el) {
+ res = CONS(hp, el->id, res);
+ hp += 2;
+ }
+ }
+
+ erts_ptab_rwunlock(ptab);
+
+ HRelease(c_p, hp_end, hp);
+
+ return res;
+}
+
+Eterm
+erts_debug_ptab_list_bif_info(Process *c_p, ErtsPTab *ptab)
+{
+ ERTS_DECL_AM(ptab_list_bif_info);
+ Eterm elements[] = {
+ AM_ptab_list_bif_info,
+ make_small((Uint) ERTS_PTAB_LIST_BIF_MIN_START_REDS),
+ make_small((Uint) ptab->list.data.chunks),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_DEBUGLEVEL)
+ };
+ Uint sz = 0;
+ Eterm *hp;
+ (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements);
+ hp = HAlloc(c_p, sz);
+ return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements);
+}
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+static void
+debug_ptab_list_check_found_pid(ErtsPTabListBifData *ptlbdp,
+ Eterm pid,
+ Uint64 ic,
+ int pid_should_be_found)
+{
+ int i;
+ for (i = 0; i < ptlbdp->pid_ix; i++) {
+ if (ptlbdp->pid[i] == pid && ptlbdp->debug.pid_started[i] == ic) {
+ ERTS_PTAB_LIST_ASSERT(pid_should_be_found);
+ return;
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(!pid_should_be_found);
+}
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+static void
+debug_ptab_list_check_res_list(Eterm list)
+{
+ while (is_list(list)) {
+ Eterm* consp = list_val(list);
+ Eterm hd = CAR(consp);
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(hd));
+ list = CDR(consp);
+ }
+
+ ERTS_PTAB_LIST_ASSERT(is_nil(list));
+}
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+
+static void
+debug_ptab_list_save_all_pids(ErtsPTabListBifData *ptlbdp)
+{
+ int ix, tix, cpix;
+ ErtsPTab *ptab = ptlbdp->ptab;
+ ptlbdp->debug.correct_pids_verified = 0;
+ ptlbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+
+ for (tix = 0, cpix = 0; tix < ptab->r.o.max; tix++) {
+ ErtsPTabElementCommon *el = ptab_pix2el(ptab, tix);
+ if (el) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(el->id));
+ ptlbdp->debug.correct_pids[cpix++] = el->id;
+ ERTS_PTAB_LIST_ASSERT(cpix <= ptlbdp->pid_sz);
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(cpix == ptlbdp->pid_sz);
+
+ for (ix = 0; ix < ptlbdp->pid_sz; ix++)
+ ptlbdp->pid[ix] = make_small(ix);
+}
+
+static void
+debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp)
+{
+ int ix, cpix;
+
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix == ptlbdp->pid_sz);
+
+ for (ix = 0; ix < ptlbdp->pid_sz; ix++) {
+ int found = 0;
+ Eterm pid = ptlbdp->pid[ix];
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(pid));
+ for (cpix = ix; cpix < ptlbdp->pid_sz; cpix++) {
+ if (ptlbdp->debug.correct_pids[cpix] == pid) {
+ ptlbdp->debug.correct_pids[cpix] = NIL;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ for (cpix = 0; cpix < ix; cpix++) {
+ if (ptlbdp->debug.correct_pids[cpix] == pid) {
+ ptlbdp->debug.correct_pids[cpix] = NIL;
+ found = 1;
+ break;
+ }
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(found);
+ }
+ ptlbdp->debug.correct_pids_verified = 1;
+
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->debug.correct_pids);
+ ptlbdp->debug.correct_pids = NULL;
+}
+#endif /* ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS */
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+static void
+debug_ptab_list_check_del_list(ErtsPTab *ptab)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ if (!ptab->list.data.deleted.start)
+ ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end);
+ else {
+ Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ Uint64 *prev_x_interval_p = NULL;
+ ErtsPTabDeletedElement *ptdep;
+
+ for (ptdep = ptab->list.data.deleted.start;
+ ptdep;
+ ptdep = ptdep->next) {
+ if (!ptdep->prev)
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ else
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev->next == ptdep);
+ if (!ptdep->next)
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.end == ptdep);
+ else
+ ERTS_PTAB_LIST_ASSERT(ptdep->next->prev == ptdep);
+ if (ptdep->ix < 0) {
+ Uint64 interval = ptdep->u.bif_invocation.interval;
+ ERTS_PTAB_LIST_ASSERT(interval <= curr_interval);
+ }
+ else {
+ Uint64 s_interval = ptdep->u.element.inserted;
+ Uint64 x_interval = ptdep->u.element.deleted;
+
+ ERTS_PTAB_LIST_ASSERT(s_interval <= x_interval);
+ if (prev_x_interval_p)
+ ERTS_PTAB_LIST_ASSERT(*prev_x_interval_p <= x_interval);
+ prev_x_interval_p = &ptdep->u.element.deleted;
+ ERTS_PTAB_LIST_ASSERT(
+ erts_ptab_is_valid_id(ptdep->u.element.id));
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_id2pix(ptab,
+ ptdep->u.element.id)
+ == ptdep->ix);
+
+ }
+ }
+
+ }
+}
+
+static void
+debug_ptab_list_check_del_free_list(ErtsPTab *ptab,
+ ErtsPTabDeletedElement *free_list)
+{
+ if (ptab->list.data.deleted.start) {
+ ErtsPTabDeletedElement *fptdep;
+ ErtsPTabDeletedElement *ptdep;
+
+ for (fptdep = free_list; fptdep; fptdep = fptdep->next) {
+ for (ptdep = ptab->list.data.deleted.start;
+ ptdep;
+ ptdep = ptdep->next) {
+ ERTS_PTAB_LIST_ASSERT(fptdep != ptdep);
+ }
+ }
+ }
+}
+
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0
+
+static void
+debug_ptab_list_assert_error(char* expr, const char* file, int line, const char *func)
+{
+ fflush(stdout);
+ erts_fprintf(stderr, "%s:%d:%s(): Assertion failed: %s\n",
+ (char *) file, line, (char *) func, expr);
+ fflush(stderr);
+ abort();
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
new file mode 100644
index 0000000000..8a130f42a3
--- /dev/null
+++ b/erts/emulator/beam/erl_ptab.h
@@ -0,0 +1,472 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Process/Port table implementation.
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_PTAB_H__
+#define ERL_PTAB_H__
+
+#include "sys.h"
+#include "erl_term.h"
+#include "erl_time.h"
+#include "erl_utils.h"
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_alloc.h"
+#include "erl_monitors.h"
+
+#define ERTS_TRACER_PROC(P) ((P)->common.tracer_proc)
+#define ERTS_TRACE_FLAGS(P) ((P)->common.trace_flags)
+
+#define ERTS_P_LINKS(P) ((P)->common.u.alive.links)
+#define ERTS_P_MONITORS(P) ((P)->common.u.alive.monitors)
+
+#define IS_TRACED(p) \
+ (ERTS_TRACER_PROC((p)) != NIL)
+#define ARE_TRACE_FLAGS_ON(p,tf) \
+ ((ERTS_TRACE_FLAGS((p)) & (tf|F_SENSITIVE)) == (tf))
+#define IS_TRACED_FL(p,tf) \
+ ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) )
+
+typedef struct {
+ Eterm id;
+#ifdef ERTS_SMP
+ erts_atomic32_t refc;
+#endif
+ Eterm tracer_proc;
+ Uint trace_flags;
+ union {
+ /* --- While being alive --- */
+ struct {
+ Uint64 started_interval;
+ struct reg_proc *reg;
+ ErtsLink *links;
+ ErtsMonitor *monitors;
+#ifdef ERTS_SMP
+ ErtsSmpPTimer *ptimer;
+#else
+ ErlTimer tm;
+#endif
+ } alive;
+
+ /* --- While being released --- */
+ ErtsThrPrgrLaterOp release;
+ } u;
+} ErtsPTabElementCommon;
+
+typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement;
+
+typedef struct {
+ erts_smp_rwmtx_t rwmtx;
+ erts_interval_t interval;
+ struct {
+ ErtsPTabDeletedElement *start;
+ ErtsPTabDeletedElement *end;
+ } deleted;
+ int chunks;
+} ErtsPTabListData;
+
+typedef struct {
+#ifdef ARCH_32
+ erts_smp_dw_atomic_t last_data;
+#else
+ erts_smp_atomic_t last_data;
+#endif
+ erts_smp_atomic32_t count;
+} ErtsPTabVolatileData;
+
+typedef struct {
+ erts_smp_atomic_t *tab;
+ Uint32 max;
+ Uint32 tab_cache_lines;
+ Uint32 pix_per_cache_line;
+ Uint32 pix_mask;
+ Uint32 pix_cl_mask;
+ Uint32 pix_cl_shift;
+ Uint32 pix_cli_mask;
+ Uint32 pix_cli_shift;
+ ErtsPTabElementCommon *invalid_element;
+ Eterm invalid_data;
+ void (*release_element)(void *);
+} ErtsPTabReadOnlyData;
+
+typedef struct {
+ /*
+ * Data mainly modified when someone is listing
+ * the content of the table.
+ */
+ union {
+ ErtsPTabListData data;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabListData))];
+ } list;
+
+ /*
+ * Frequently modified data.
+ */
+ union {
+ ErtsPTabVolatileData tile;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabVolatileData))];
+ } vola;
+
+ /*
+ * Read only data.
+ */
+ union {
+ ErtsPTabReadOnlyData o;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabReadOnlyData))];
+ } r;
+} ErtsPTab;
+
+#define ERTS_PTAB_ID_DATA_SIZE 28
+#define ERTS_PTAB_ID_DATA_SHIFT (_TAG_IMMED1_SIZE)
+/* ERTS_PTAB_MAX_SIZE must be a power of 2 */
+#define ERTS_PTAB_MAX_SIZE (SWORD_CONSTANT(1) << 27)
+#if (ERTS_PTAB_MAX_SIZE-1) > MAX_SMALL
+# error "The maximum number of processes/ports must fit in a SMALL."
+#endif
+
+
+/*
+ * Currently pids and ports are allowed.
+ */
+#if _PID_DATA_SIZE != ERTS_PTAB_ID_DATA_SIZE
+# error "Unexpected pid data size"
+#endif
+#if _PID_DATA_SHIFT != ERTS_PTAB_ID_DATA_SHIFT
+# error "Unexpected pid tag size"
+#endif
+#if _PORT_DATA_SIZE != ERTS_PTAB_ID_DATA_SIZE
+# error "Unexpected port data size"
+#endif
+#if _PORT_DATA_SHIFT != ERTS_PTAB_ID_DATA_SHIFT
+# error "Unexpected port tag size"
+#endif
+
+#define ERTS_PTAB_INVALID_ID(TAG) \
+ ((Eterm) \
+ ((((1 << ERTS_PTAB_ID_DATA_SIZE) - 1) << ERTS_PTAB_ID_DATA_SHIFT) \
+ | (TAG)))
+
+#define erts_ptab_is_valid_id(ID) \
+ (is_internal_pid((ID)) || is_internal_port((ID)))
+
+void erts_ptab_init(void);
+void erts_ptab_init_table(ErtsPTab *ptab,
+ ErtsAlcType_t atype,
+ void (*release_element)(void *),
+ ErtsPTabElementCommon *invalid_element,
+ int size,
+ char *name);
+int erts_ptab_new_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el,
+ void *init_arg,
+ void (*init_ptab_el)(void *, Eterm));
+void erts_ptab_delete_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el);
+int erts_ptab_initialized(ErtsPTab *ptab);
+
+ERTS_GLB_INLINE erts_interval_t *erts_ptab_interval(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_max(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_count(ErtsPTab *ptab);
+ERTS_GLB_INLINE Uint erts_ptab_pixdata2data(ErtsPTab *ptab, Eterm pixdata);
+ERTS_GLB_INLINE Uint32 erts_ptab_pixdata2pix(ErtsPTab *ptab, Eterm pixdata);
+ERTS_GLB_INLINE Uint32 erts_ptab_data2pix(ErtsPTab *ptab, Eterm data);
+ERTS_GLB_INLINE Uint erts_ptab_data2pixdata(ErtsPTab *ptab, Eterm data);
+ERTS_GLB_INLINE Eterm erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag);
+ERTS_GLB_INLINE int erts_ptab_id2pix(ErtsPTab *ptab, Eterm id);
+ERTS_GLB_INLINE Uint erts_ptab_id2data(ErtsPTab *ptab, Eterm id);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint32 add_refc);
+ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE erts_interval_t *
+erts_ptab_interval(ErtsPTab *ptab)
+{
+ return &ptab->list.data.interval;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_max(ErtsPTab *ptab)
+{
+ int max = ptab->r.o.max;
+ return max == ERTS_PTAB_MAX_SIZE ? max - 1 : max;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_count(ErtsPTab *ptab)
+{
+ int max = ptab->r.o.max;
+ erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count);
+ if (max == ERTS_PTAB_MAX_SIZE) {
+ max--;
+ res--;
+ }
+ if (res > max)
+ return max;
+ ASSERT(res >= 0);
+ return (int) res;
+
+}
+
+ERTS_GLB_INLINE Uint erts_ptab_pixdata2data(ErtsPTab *ptab, Eterm pixdata)
+{
+ Uint32 data = ((Uint32) pixdata) & ~ptab->r.o.pix_mask;
+ data |= (pixdata >> ptab->r.o.pix_cl_shift) & ptab->r.o.pix_cl_mask;
+ data |= (pixdata & ptab->r.o.pix_cli_mask) << ptab->r.o.pix_cli_shift;
+ return data;
+}
+
+ERTS_GLB_INLINE Uint32 erts_ptab_pixdata2pix(ErtsPTab *ptab, Eterm pixdata)
+{
+ return ((Uint32) pixdata) & ptab->r.o.pix_mask;
+}
+
+ERTS_GLB_INLINE Uint32 erts_ptab_data2pix(ErtsPTab *ptab, Eterm data)
+{
+ Uint32 n, pix;
+ n = (Uint32) data;
+ pix = ((n & ptab->r.o.pix_cl_mask) << ptab->r.o.pix_cl_shift);
+ pix += ((n >> ptab->r.o.pix_cli_shift) & ptab->r.o.pix_cli_mask);
+ ASSERT(0 <= pix && pix < ptab->r.o.max);
+ return pix;
+}
+
+ERTS_GLB_INLINE Uint erts_ptab_data2pixdata(ErtsPTab *ptab, Eterm data)
+{
+ Uint pixdata = data & ~((Uint) ptab->r.o.pix_mask);
+ pixdata |= (Uint) erts_ptab_data2pix(ptab, data);
+ ASSERT(data == erts_ptab_pixdata2data(ptab, pixdata));
+ return pixdata;
+}
+
+#if ERTS_SIZEOF_TERM == 8
+
+ERTS_GLB_INLINE Eterm
+erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag)
+{
+ HUint huint;
+ Uint32 low_data = (Uint32) data;
+ low_data &= (1 << ERTS_PTAB_ID_DATA_SIZE) - 1;
+ low_data <<= ERTS_PTAB_ID_DATA_SHIFT;
+ huint.hval[ERTS_HUINT_HVAL_HIGH] = erts_ptab_data2pix(ptab, data);
+ huint.hval[ERTS_HUINT_HVAL_LOW] = low_data | ((Uint32) tag);
+ return (Eterm) huint.val;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_id2pix(ErtsPTab *ptab, Eterm id)
+{
+ HUint huint;
+ huint.val = id;
+ return (int) huint.hval[ERTS_HUINT_HVAL_HIGH];
+}
+
+ERTS_GLB_INLINE Uint
+erts_ptab_id2data(ErtsPTab *ptab, Eterm id)
+{
+ HUint huint;
+ huint.val = id;
+ return (Uint) (huint.hval[ERTS_HUINT_HVAL_LOW] >> ERTS_PTAB_ID_DATA_SHIFT);
+}
+
+#elif ERTS_SIZEOF_TERM == 4
+
+ERTS_GLB_INLINE Eterm
+erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag)
+{
+ Eterm id;
+ data &= ((1 << ERTS_PTAB_ID_DATA_SIZE) - 1);
+ id = (Eterm) erts_ptab_data2pixdata(ptab, data);
+ return (id << ERTS_PTAB_ID_DATA_SHIFT) | tag;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_id2pix(ErtsPTab *ptab, Eterm id)
+{
+ Uint pixdata = (Uint) id;
+ pixdata >>= ERTS_PTAB_ID_DATA_SHIFT;
+ return (int) erts_ptab_pixdata2pix(ptab, pixdata);
+}
+
+ERTS_GLB_INLINE Uint
+erts_ptab_id2data(ErtsPTab *ptab, Eterm id)
+{
+ Uint pixdata = (Uint) id;
+ pixdata >>= ERTS_PTAB_ID_DATA_SHIFT;
+ return erts_ptab_pixdata2data(ptab, pixdata);
+}
+
+#else
+#error "Unsupported size of term"
+#endif
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el)
+{
+#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_aint32_t refc = erts_atomic32_inc_read_nob(&ptab_el->refc);
+ ERTS_SMP_LC_ASSERT(refc > 1);
+#else
+ erts_atomic32_inc_nob(&ptab_el->refc);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el)
+{
+#ifdef ERTS_SMP
+ erts_aint32_t refc = erts_atomic32_dec_read_nob(&ptab_el->refc);
+ ERTS_SMP_LC_ASSERT(refc >= 0);
+ return (int) refc;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint32 add_refc)
+{
+#ifdef ERTS_SMP
+ erts_aint32_t refc;
+
+#ifndef ERTS_ENABLE_LOCK_CHECK
+ if (add_refc >= 0) {
+ erts_atomic32_add_nob(&ptab_el->refc,
+ (erts_aint32_t) add_refc);
+ return 1;
+ }
+#endif
+
+ refc = erts_atomic32_add_read_nob(&ptab_el->refc,
+ (erts_aint32_t) add_refc);
+ ERTS_SMP_LC_ASSERT(refc >= 0);
+ return (int) refc;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab)
+{
+ return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab)
+{
+ return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab)
+{
+ return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab)
+{
+ return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx);
+}
+
+#endif
+
+#endif
+
+#if defined(ERTS_PTAB_WANT_BIF_IMPL__) && !defined(ERTS_PTAB_LIST__)
+#define ERTS_PTAB_LIST__
+
+#include "erl_process.h"
+#include "bif.h"
+
+BIF_RETTYPE erts_ptab_list(struct process *c_p, ErtsPTab *ptab);
+
+#endif
+
+#if defined(ERTS_PTAB_WANT_DEBUG_FUNCS__) && !defined(ERTS_PTAB_DEBUG_FUNCS__)
+#define ERTS_PTAB_DEBUG_FUNCS__
+#include "erl_process.h"
+
+/* Debug functions */
+Sint erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next);
+Eterm erts_debug_ptab_list(Process *c_p, ErtsPTab *ptab);
+Eterm erts_debug_ptab_list_bif_info(Process *c_p, ErtsPTab *ptab);
+
+#endif
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index a32e9d9d7c..34c90c0bda 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -274,6 +274,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob
#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob
#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob
+#define erts_smp_atomic_read_bset_nob erts_atomic_read_bset_nob
#define erts_smp_atomic_init_mb erts_atomic_init_mb
#define erts_smp_atomic_set_mb erts_atomic_set_mb
@@ -288,6 +289,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb
#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb
#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb
+#define erts_smp_atomic_read_bset_mb erts_atomic_read_bset_mb
#define erts_smp_atomic_init_acqb erts_atomic_init_acqb
#define erts_smp_atomic_set_acqb erts_atomic_set_acqb
@@ -302,6 +304,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb
#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb
#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb
+#define erts_smp_atomic_read_bset_acqb erts_atomic_read_bset_acqb
#define erts_smp_atomic_init_relb erts_atomic_init_relb
#define erts_smp_atomic_set_relb erts_atomic_set_relb
@@ -316,6 +319,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb
#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb
#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb
+#define erts_smp_atomic_read_bset_relb erts_atomic_read_bset_relb
#define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb
#define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb
@@ -330,6 +334,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb
#define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb
#define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb
+#define erts_smp_atomic_read_bset_ddrb erts_atomic_read_bset_ddrb
#define erts_smp_atomic_init_rb erts_atomic_init_rb
#define erts_smp_atomic_set_rb erts_atomic_set_rb
@@ -344,6 +349,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb
#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb
#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb
+#define erts_smp_atomic_read_bset_rb erts_atomic_read_bset_rb
#define erts_smp_atomic_init_wb erts_atomic_init_wb
#define erts_smp_atomic_set_wb erts_atomic_set_wb
@@ -358,6 +364,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb
#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb
#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb
+#define erts_smp_atomic_read_bset_wb erts_atomic_read_bset_wb
/* 32-bit atomics */
@@ -374,6 +381,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob
#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob
#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob
+#define erts_smp_atomic32_read_bset_nob erts_atomic32_read_bset_nob
#define erts_smp_atomic32_init_mb erts_atomic32_init_mb
#define erts_smp_atomic32_set_mb erts_atomic32_set_mb
@@ -388,6 +396,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb
#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb
#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb
+#define erts_smp_atomic32_read_bset_mb erts_atomic32_read_bset_mb
#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb
#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb
@@ -402,6 +411,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb
#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb
#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb
+#define erts_smp_atomic32_read_bset_acqb erts_atomic32_read_bset_acqb
#define erts_smp_atomic32_init_relb erts_atomic32_init_relb
#define erts_smp_atomic32_set_relb erts_atomic32_set_relb
@@ -416,6 +426,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb
#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb
#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb
+#define erts_smp_atomic32_read_bset_relb erts_atomic32_read_bset_relb
#define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb
#define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb
@@ -430,6 +441,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb
#define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb
#define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb
+#define erts_smp_atomic32_read_bset_ddrb erts_atomic32_read_bset_ddrb
#define erts_smp_atomic32_init_rb erts_atomic32_init_rb
#define erts_smp_atomic32_set_rb erts_atomic32_set_rb
@@ -444,6 +456,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb
#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb
#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb
+#define erts_smp_atomic32_read_bset_rb erts_atomic32_read_bset_rb
#define erts_smp_atomic32_init_wb erts_atomic32_init_wb
#define erts_smp_atomic32_set_wb erts_atomic32_set_wb
@@ -458,6 +471,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb
#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb
#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb
+#define erts_smp_atomic32_read_bset_wb erts_atomic32_read_bset_wb
#else /* !ERTS_SMP */
@@ -513,6 +527,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band
#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_nob erts_no_atomic_read_bset
#define erts_smp_atomic_init_mb erts_no_atomic_set
#define erts_smp_atomic_set_mb erts_no_atomic_set
@@ -527,6 +542,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_mb erts_no_atomic_read_bset
#define erts_smp_atomic_init_acqb erts_no_atomic_set
#define erts_smp_atomic_set_acqb erts_no_atomic_set
@@ -541,6 +557,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_acqb erts_no_atomic_read_bset
#define erts_smp_atomic_init_relb erts_no_atomic_set
#define erts_smp_atomic_set_relb erts_no_atomic_set
@@ -555,6 +572,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_relb erts_no_atomic_read_bset
#define erts_smp_atomic_init_ddrb erts_no_atomic_set
#define erts_smp_atomic_set_ddrb erts_no_atomic_set
@@ -569,6 +587,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_ddrb erts_no_atomic_read_bset
#define erts_smp_atomic_init_rb erts_no_atomic_set
#define erts_smp_atomic_set_rb erts_no_atomic_set
@@ -583,6 +602,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_rb erts_no_atomic_read_bset
#define erts_smp_atomic_init_wb erts_no_atomic_set
#define erts_smp_atomic_set_wb erts_no_atomic_set
@@ -597,6 +617,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_wb erts_no_atomic_read_bset
/* 32-bit atomics */
@@ -613,6 +634,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_nob erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_mb erts_no_atomic32_set
#define erts_smp_atomic32_set_mb erts_no_atomic32_set
@@ -627,6 +649,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_mb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_acqb erts_no_atomic32_set
#define erts_smp_atomic32_set_acqb erts_no_atomic32_set
@@ -641,6 +664,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_acqb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_relb erts_no_atomic32_set
#define erts_smp_atomic32_set_relb erts_no_atomic32_set
@@ -655,6 +679,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_relb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_ddrb erts_no_atomic32_set
#define erts_smp_atomic32_set_ddrb erts_no_atomic32_set
@@ -669,6 +694,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_rb erts_no_atomic32_set
#define erts_smp_atomic32_set_rb erts_no_atomic32_set
@@ -683,6 +709,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_rb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_wb erts_no_atomic32_set
#define erts_smp_atomic32_set_wb erts_no_atomic32_set
@@ -697,6 +724,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_wb erts_no_atomic32_read_bset
#endif /* !ERTS_SMP */
diff --git a/erts/emulator/beam/erl_sys_driver.h b/erts/emulator/beam/erl_sys_driver.h
index d429d0ce96..b991a2840c 100644
--- a/erts/emulator/beam/erl_sys_driver.h
+++ b/erts/emulator/beam/erl_sys_driver.h
@@ -31,7 +31,6 @@
#define ERL_SYS_DRV
typedef long ErlDrvEvent; /* An event to be selected on. */
-typedef long ErlDrvPort; /* A port descriptor. */
/* typedef struct _SysDriverOpts SysDriverOpts; defined in sys.h */
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index bf7774f882..4587cd84d1 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -133,7 +133,7 @@ ET_DEFINE_CHECKED(Uint,unsigned_val,Eterm,is_small);
ET_DEFINE_CHECKED(Sint,signed_val,Eterm,is_small);
ET_DEFINE_CHECKED(Uint,atom_val,Eterm,is_atom);
ET_DEFINE_CHECKED(Uint,header_arity,Eterm,is_header);
-ET_DEFINE_CHECKED(Uint,arityval,Eterm,is_arity_value);
+ET_DEFINE_CHECKED(Uint,arityval,Eterm,is_sane_arity_value);
ET_DEFINE_CHECKED(Uint,thing_arityval,Eterm,is_thing);
ET_DEFINE_CHECKED(Uint,thing_subtag,Eterm,is_thing);
ET_DEFINE_CHECKED(Eterm*,binary_val,Wterm,is_binary);
@@ -144,9 +144,7 @@ ET_DEFINE_CHECKED(Uint,bignum_header_arity,Eterm,_is_bignum_header);
ET_DEFINE_CHECKED(Eterm*,big_val,Wterm,is_big);
ET_DEFINE_CHECKED(Eterm*,float_val,Wterm,is_float);
ET_DEFINE_CHECKED(Eterm*,tuple_val,Wterm,is_tuple);
-ET_DEFINE_CHECKED(Uint,internal_pid_data,Eterm,is_internal_pid);
ET_DEFINE_CHECKED(struct erl_node_*,internal_pid_node,Eterm,is_internal_pid);
-ET_DEFINE_CHECKED(Uint,internal_port_data,Eterm,is_internal_port);
ET_DEFINE_CHECKED(struct erl_node_*,internal_port_node,Eterm,is_internal_port);
ET_DEFINE_CHECKED(Eterm*,internal_ref_val,Wterm,is_internal_ref);
ET_DEFINE_CHECKED(Uint,internal_ref_data_words,Wterm,is_internal_ref);
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index c270d13365..fb3ef9cd6c 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -300,8 +300,17 @@ _ET_DECLARE_CHECKED(Uint,header_arity,Eterm)
#define header_arity(x) _ET_APPLY(header_arity,(x))
/* arityval access methods */
+/* Erlang Spec. 4.7.3 defines max arity to 65535
+ * we will however enforce max arity of 16777215 (24 bits)
+ * (checked in bifs and asserted in debug)
+ */
+#define MAX_ARITYVAL ((((Uint)1) << 24) - 1)
+#define ERTS_MAX_TUPLE_SIZE MAX_ARITYVAL
+
#define make_arityval(sz) _make_header((sz),_TAG_HEADER_ARITYVAL)
#define is_arity_value(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL)
+#define is_sane_arity_value(x) ((((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL) && \
+ (((x) >> _HEADER_ARITY_OFFS) <= MAX_ARITYVAL))
#define is_not_arity_value(x) (!is_arity_value((x)))
#define _unchecked_arityval(x) _unchecked_header_arity((x))
_ET_DECLARE_CHECKED(Uint,arityval,Eterm)
@@ -542,12 +551,6 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm)
#define _GETBITS(X,Pos,Size) (((X) >> (Pos)) & ~(~((Uint) 0) << (Size)))
/*
- * Observe! New layout for pids, ports and references in R9 (see also note
- * in erl_node_container_utils.h).
- */
-
-
-/*
* Creation in node specific data (pids, ports, refs)
*/
@@ -584,7 +587,6 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm)
*
*/
-#define _PID_R9_SER_SIZE 3
#define _PID_SER_SIZE (_PID_DATA_SIZE - _PID_NUM_SIZE)
#define _PID_NUM_SIZE 15
@@ -598,23 +600,13 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm)
#define make_pid_data(Ser, Num) \
((Uint) ((Ser) << _PID_NUM_SIZE | (Num)))
-#define make_internal_pid(X) \
- ((Eterm) (((X) << _PID_DATA_SHIFT) | _TAG_IMMED1_PID))
-
#define is_internal_pid(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_PID)
#define is_not_internal_pid(x) (!is_internal_pid((x)))
-#define _unchecked_internal_pid_data(x) _GET_PID_DATA((x))
-_ET_DECLARE_CHECKED(Uint,internal_pid_data,Eterm)
-#define internal_pid_data(x) _ET_APPLY(internal_pid_data,(x))
-
#define _unchecked_internal_pid_node(x) erts_this_node
_ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm)
#define internal_pid_node(x) _ET_APPLY(internal_pid_node,(x))
-#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x)))
-#define internal_pid_serial(x) _GET_PID_SER(internal_pid_data((x)))
-
#define internal_pid_data_words(x) (1)
/*
@@ -644,7 +636,6 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm)
* N : node number
*
*/
-#define _PORT_R9_NUM_SIZE 18
#define _PORT_NUM_SIZE _PORT_DATA_SIZE
#define _PORT_DATA_SIZE 28
@@ -654,18 +645,9 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm)
#define _GET_PORT_NUM(X) _GETBITS((X), 0, _PORT_NUM_SIZE)
-#define make_internal_port(X) \
- ((Eterm) (((X) << _PORT_DATA_SHIFT) | _TAG_IMMED1_PORT))
-
#define is_internal_port(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_PORT)
#define is_not_internal_port(x) (!is_internal_port(x))
-#define _unchecked_internal_port_data(x) _GET_PORT_DATA((x))
-_ET_DECLARE_CHECKED(Uint,internal_port_data,Eterm)
-#define internal_port_data(x) _ET_APPLY(internal_port_data,(x))
-
-#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x)))
-
#define _unchecked_internal_port_node(x) erts_this_node
_ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm)
#define internal_port_node(x) _ET_APPLY(internal_port_node,(x))
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 88524bdd4c..9678d7e08b 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -96,17 +96,14 @@
#define ERTS_THR_PRGR_LFLG_BLOCK (((erts_aint32_t) 1) << 31)
#define ERTS_THR_PRGR_LFLG_NO_LEADER (((erts_aint32_t) 1) << 30)
-#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
- | ERTS_THR_PRGR_LFLG_BLOCK))
+#define ERTS_THR_PRGR_LFLG_WAITING_UM (((erts_aint32_t) 1) << 29)
+#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
+ | ERTS_THR_PRGR_LFLG_BLOCK \
+ | ERTS_THR_PRGR_LFLG_WAITING_UM))
-#define ERTS_THR_PRGR_LFLGS_ACTIVE(LFLGS) \
+#define ERTS_THR_PRGR_LFLGS_ACTIVE(LFLGS) \
((LFLGS) & ERTS_THR_PRGR_LFLG_ACTIVE_MASK)
-#define ERTS_THR_PRGR_LFLGS_ALL_WAITING(LFLGS) \
- (((LFLGS) & (ERTS_THR_PRGR_LFLG_NO_LEADER \
- |ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) \
- == ERTS_THR_PRGR_LFLG_NO_LEADER)
-
/*
* We use a 64-bit value for thread progress. By this wrapping of
* the thread progress will more or less never occur.
@@ -262,6 +259,11 @@ typedef struct {
erts_atomic32_t managed_count;
erts_atomic32_t managed_id;
erts_atomic32_t unmanaged_id;
+ int chk_next_ix;
+ struct {
+ int waiting;
+ erts_atomic32_t current;
+ } umrefc_ix;
} ErtsThrPrgrMiscData;
typedef struct {
@@ -276,12 +278,18 @@ typedef union {
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrPrgrElement))];
} ErtsThrPrgrArray;
+typedef union {
+ erts_atomic_t refc;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic_t))];
+} ErtsThrPrgrUnmanagedRefc;
+
typedef struct {
union {
ErtsThrPrgrMiscData data;
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
sizeof(ErtsThrPrgrMiscData))];
} misc;
+ ErtsThrPrgrUnmanagedRefc umrefc[2];
ErtsThrPrgrArray *thr;
struct {
int no;
@@ -346,7 +354,9 @@ init_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
tpd->is_managed = 0;
tpd->is_blocking = 0;
tpd->is_temporary = 1;
-
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ tpd->is_delaying = 0;
+#endif
erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
}
@@ -461,6 +471,12 @@ erts_thr_progress_init(int no_schedulers, int managed, int unmanaged)
erts_atomic32_init_nob(&intrnl->misc.data.managed_count, 0);
erts_atomic32_init_nob(&intrnl->misc.data.managed_id, no_schedulers);
erts_atomic32_init_nob(&intrnl->misc.data.unmanaged_id, -1);
+ intrnl->misc.data.chk_next_ix = 0;
+ intrnl->misc.data.umrefc_ix.waiting = -1;
+ erts_atomic32_init_nob(&intrnl->misc.data.umrefc_ix.current, 0);
+
+ erts_atomic_init_nob(&intrnl->umrefc[0].refc, (erts_aint_t) 0);
+ erts_atomic_init_nob(&intrnl->umrefc[1].refc, (erts_aint_t) 0);
intrnl->thr = (ErtsThrPrgrArray *) ptr;
ptr += thr_arr_sz;
@@ -547,6 +563,9 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
tpd->is_managed = 0;
tpd->is_blocking = is_blocking;
tpd->is_temporary = 0;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ tpd->is_delaying = 0;
+#endif
ASSERT(tpd->id >= 0);
if (tpd->id >= intrnl->unmanaged.no)
erl_exit(ERTS_ABORT_EXIT,
@@ -600,6 +619,9 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
tpd->is_managed = 1;
tpd->is_blocking = is_blocking;
tpd->is_temporary = 0;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ tpd->is_delaying = 1;
+#endif
init_wakeup_request_array(&tpd->wakeup_request[0]);
@@ -607,8 +629,8 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
tpd->leader = 0;
tpd->active = 1;
- tpd->previous.local = 0;
- tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING;
+ tpd->confirmed = 0;
+ tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING;
erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
erts_atomic32_inc_nob(&intrnl->misc.data.lflgs);
@@ -651,60 +673,113 @@ leader_update(ErtsThrPrgrData *tpd)
block_thread(tpd);
}
else {
+ ErtsThrPrgrVal current;
+ int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged;
erts_aint32_t lflgs;
ErtsThrPrgrVal next;
- int ix, sz, make_progress;
+ erts_aint_t refc;
- if (tpd->previous.current == ERTS_THR_PRGR_VAL_WAITING) {
- /* Took over as leader from another thread */
- tpd->previous.current = read_acqb(&erts_thr_prgr__.current);
- tpd->previous.next = tpd->previous.current;
- tpd->previous.next++;
- if (tpd->previous.next == ERTS_THR_PRGR_VAL_WAITING)
- tpd->previous.next = 0;
- }
+ my_ix = tpd->id;
- if (tpd->previous.local == tpd->previous.current) {
- ErtsThrPrgrVal val = tpd->previous.current + 1;
- if (val == ERTS_THR_PRGR_VAL_WAITING)
- val = 0;
- tpd->previous.local = val;
- set_mb(&intrnl->thr[tpd->id].data.current, val);
+ if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) {
+ /* Took over as leader from another thread */
+ tpd->leader_state.current = read_nob(&erts_thr_prgr__.current);
+ tpd->leader_state.next = tpd->leader_state.current;
+ tpd->leader_state.next++;
+ if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING)
+ tpd->leader_state.next = 0;
+ tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix;
+ tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting;
+ tpd->leader_state.umrefc_ix.current =
+ (int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current);
+
+ if (tpd->confirmed == tpd->leader_state.current) {
+ ErtsThrPrgrVal val = tpd->leader_state.current + 1;
+ if (val == ERTS_THR_PRGR_VAL_WAITING)
+ val = 0;
+ tpd->confirmed = val;
+ set_mb(&intrnl->thr[my_ix].data.current, val);
+ }
}
- next = tpd->previous.next;
- make_progress = 1;
- sz = intrnl->managed.no;
- for (ix = 0; ix < sz; ix++) {
- ErtsThrPrgrVal tmp;
- tmp = read_nob(&intrnl->thr[ix].data.current);
- if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
- make_progress = 0;
- ASSERT(erts_thr_progress_has_passed__(next, tmp));
- break;
+ next = tpd->leader_state.next;
+
+ waiting_unmanaged = 0;
+ umrefc_ix = -1; /* Shut up annoying warning */
+
+ chk_next_ix = tpd->leader_state.chk_next_ix;
+ no_managed = intrnl->managed.no;
+ ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed);
+ /* Check manged threads */
+ if (chk_next_ix < no_managed) {
+ for (ix = chk_next_ix; ix < no_managed; ix++) {
+ ErtsThrPrgrVal tmp;
+ if (ix == my_ix)
+ continue;
+ tmp = read_nob(&intrnl->thr[ix].data.current);
+ if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
+ tpd->leader_state.chk_next_ix = ix;
+ ASSERT(erts_thr_progress_has_passed__(next, tmp));
+ goto done;
+ }
}
}
- if (make_progress) {
- ErtsThrPrgrVal current = next;
+ /* Check unmanged threads */
+ waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1;
+ umrefc_ix = (waiting_unmanaged
+ ? tpd->leader_state.umrefc_ix.waiting
+ : tpd->leader_state.umrefc_ix.current);
+ refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
+ ASSERT(refc >= 0);
+ if (refc != 0) {
+ int new_umrefc_ix;
+
+ if (waiting_unmanaged)
+ goto done;
+
+ new_umrefc_ix = (umrefc_ix + 1) & 0x1;
+ tpd->leader_state.umrefc_ix.waiting = umrefc_ix;
+ tpd->leader_state.chk_next_ix = no_managed;
+ erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current,
+ (erts_aint32_t) new_umrefc_ix);
+ ETHR_MEMBAR(ETHR_StoreLoad);
+ refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
+ ASSERT(refc >= 0);
+ waiting_unmanaged = 1;
+ if (refc != 0)
+ goto done;
+ }
- next++;
- if (next == ERTS_THR_PRGR_VAL_WAITING)
- next = 0;
+ /* Make progress */
+ current = next;
- set_nob(&intrnl->thr[tpd->id].data.current, next);
- set_mb(&erts_thr_prgr__.current, current);
- tpd->previous.local = next;
- tpd->previous.next = next;
- tpd->previous.current = current;
+ next++;
+ if (next == ERTS_THR_PRGR_VAL_WAITING)
+ next = 0;
+
+ set_nob(&intrnl->thr[my_ix].data.current, next);
+ set_mb(&erts_thr_prgr__.current, current);
+ tpd->confirmed = next;
+ tpd->leader_state.next = next;
+ tpd->leader_state.current = current;
#if ERTS_THR_PRGR_PRINT_VAL
- if (current % 1000 == 0)
- erts_fprintf(stderr, "%b64u\n", current);
+ if (current % 1000 == 0)
+ erts_fprintf(stderr, "%b64u\n", current);
#endif
- handle_wakeup_requests(current);
+ handle_wakeup_requests(current);
+
+ if (waiting_unmanaged) {
+ waiting_unmanaged = 0;
+ tpd->leader_state.umrefc_ix.waiting = -1;
+ erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs,
+ ~ERTS_THR_PRGR_LFLG_WAITING_UM);
}
+ tpd->leader_state.chk_next_ix = 0;
+
+ done:
if (tpd->active) {
lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
@@ -712,20 +787,44 @@ leader_update(ErtsThrPrgrData *tpd)
(void) block_thread(tpd);
}
else {
+ int force_wakeup_check = 0;
+ erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER;
tpd->leader = 0;
- tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING;
+ tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING;
#if ERTS_THR_PRGR_PRINT_LEADER
erts_fprintf(stderr, "L <- %d\n", tpd->id);
#endif
ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0);
+ if (waiting_unmanaged)
+ set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM;
+
lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs,
- ERTS_THR_PRGR_LFLG_NO_LEADER);
+ set_flags);
+ lflgs |= set_flags;
if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
lflgs = block_thread(tpd);
- if (ERTS_THR_PRGR_LFLGS_ACTIVE(lflgs) == 0 && got_sched_wakeups())
+
+ if (waiting_unmanaged) {
+ /* Need to check umrefc again */
+ ETHR_MEMBAR(ETHR_StoreLoad);
+ refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
+ if (refc == 0) {
+ /* Need to force wakeup check */
+ force_wakeup_check = 1;
+ }
+ }
+
+ if ((force_wakeup_check
+ || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
+ | ERTS_THR_PRGR_LFLG_WAITING_UM
+ | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
+ == ERTS_THR_PRGR_LFLG_NO_LEADER))
+ && got_sched_wakeups()) {
+ /* Someone need to make progress */
wakeup_managed(0);
+ }
}
}
@@ -744,11 +843,11 @@ update(ErtsThrPrgrData *tpd)
erts_aint32_t lflgs;
res = 0;
val = read_acqb(&erts_thr_prgr__.current);
- if (tpd->previous.local == val) {
+ if (tpd->confirmed == val) {
val++;
if (val == ERTS_THR_PRGR_VAL_WAITING)
val = 0;
- tpd->previous.local = val;
+ tpd->confirmed = val;
set_mb(&intrnl->thr[tpd->id].data.current, val);
}
@@ -801,12 +900,19 @@ erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
block_count_dec();
- tpd->previous.local = ERTS_THR_PRGR_VAL_WAITING;
+ tpd->confirmed = ERTS_THR_PRGR_VAL_WAITING;
set_mb(&intrnl->thr[tpd->id].data.current, ERTS_THR_PRGR_VAL_WAITING);
lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
- if (ERTS_THR_PRGR_LFLGS_ALL_WAITING(lflgs) && got_sched_wakeups())
- wakeup_managed(0); /* Someone need to make progress */
+
+ if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
+ | ERTS_THR_PRGR_LFLG_WAITING_UM
+ | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
+ == ERTS_THR_PRGR_LFLG_NO_LEADER
+ && got_sched_wakeups()) {
+ /* Someone need to make progress */
+ wakeup_managed(0);
+ }
}
void
@@ -828,7 +934,7 @@ erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
val++;
if (val == ERTS_THR_PRGR_VAL_WAITING)
val = 0;
- tpd->previous.local = val;
+ tpd->confirmed = val;
set_mb(&intrnl->thr[tpd->id].data.current, val);
val = read_acqb(&erts_thr_prgr__.current);
if (current == val)
@@ -875,6 +981,68 @@ erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
}
+static ERTS_INLINE void
+unmanaged_continue(ErtsThrPrgrDelayHandle handle)
+{
+ int umrefc_ix = (int) handle;
+ erts_aint_t refc;
+
+ ASSERT(umrefc_ix == 0 || umrefc_ix == 1);
+ refc = erts_atomic_dec_read_relb(&intrnl->umrefc[umrefc_ix].refc);
+ ASSERT(refc >= 0);
+ if (refc == 0) {
+ erts_aint_t lflgs;
+ ERTS_THR_READ_MEMORY_BARRIER;
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
+ | ERTS_THR_PRGR_LFLG_WAITING_UM
+ | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
+ == (ERTS_THR_PRGR_LFLG_NO_LEADER|ERTS_THR_PRGR_LFLG_WAITING_UM)
+ && got_sched_wakeups()) {
+ /* Others waiting for us... */
+ wakeup_managed(0);
+ }
+ }
+}
+
+void
+erts_thr_progress_unmanaged_continue__(ErtsThrPrgrDelayHandle handle)
+{
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ ERTS_LC_ASSERT(tpd && tpd->is_delaying);
+ tpd->is_delaying = 0;
+ return_tmp_thr_prgr_data(tpd);
+#endif
+ ASSERT(!erts_thr_progress_is_managed_thread());
+
+ unmanaged_continue(handle);
+}
+
+ErtsThrPrgrDelayHandle
+erts_thr_progress_unmanaged_delay__(void)
+{
+ int umrefc_ix;
+ ASSERT(!erts_thr_progress_is_managed_thread());
+ umrefc_ix = (int) erts_atomic32_read_acqb(&intrnl->misc.data.umrefc_ix.current);
+ while (1) {
+ int tmp_ix;
+ erts_atomic_inc_acqb(&intrnl->umrefc[umrefc_ix].refc);
+ tmp_ix = (int) erts_atomic32_read_acqb(&intrnl->misc.data.umrefc_ix.current);
+ if (tmp_ix == umrefc_ix)
+ break;
+ unmanaged_continue(umrefc_ix);
+ umrefc_ix = tmp_ix;
+ }
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ ErtsThrPrgrData *tpd = tmp_thr_prgr_data(NULL);
+ tpd->is_delaying = 1;
+ }
+#endif
+ return (ErtsThrPrgrDelayHandle) umrefc_ix;
+}
+
static ERTS_INLINE int
has_reached_wakeup(ErtsThrPrgrVal wakeup)
{
@@ -931,7 +1099,7 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
*/
ASSERT(tpd->is_managed);
- ASSERT(tpd->previous.local != ERTS_THR_PRGR_VAL_WAITING);
+ ASSERT(tpd->confirmed != ERTS_THR_PRGR_VAL_WAITING);
if (has_reached_wakeup(value)) {
wakeup_managed(tpd->id);
@@ -946,7 +1114,7 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
tpd->wakeup_request[wix]));
- if (tpd->previous.local == value) {
+ if (tpd->confirmed == value) {
/*
* We have already confirmed this value. We need to request
* wakeup for a value later than our latest confirmed value in
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index e72321cf48..1416aa6166 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -53,9 +53,22 @@ typedef Uint64 ErtsThrPrgrVal;
#define ERTS_THR_PRGR_WAKEUP_DATA_SIZE 4 /* Need to be an even power of 2. */
typedef struct {
+ ErtsThrPrgrVal next;
+ ErtsThrPrgrVal current;
+ int chk_next_ix;
+ struct {
+ int current;
+ int waiting;
+ } umrefc_ix;
+} ErtsThrPrgrLeaderState;
+
+typedef struct {
int id;
int is_managed;
int is_blocking;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ int is_delaying; /* managed is always delaying */
+#endif
int is_temporary;
/* --- Part below only for registered threads --- */
@@ -66,11 +79,8 @@ typedef struct {
int leader; /* Needs to be first in the managed threads part */
int active;
- struct {
- ErtsThrPrgrVal local;
- ErtsThrPrgrVal next;
- ErtsThrPrgrVal current;
- } previous;
+ ErtsThrPrgrVal confirmed;
+ ErtsThrPrgrLeaderState leader_state;
} ErtsThrPrgrData;
void erts_thr_progress_fatal_error_block(SWord timeout,
@@ -121,6 +131,11 @@ typedef struct {
ERTS_THR_PRGR_ATOMIC current;
} ErtsThrPrgr;
+typedef int ErtsThrPrgrDelayHandle;
+#define ERTS_THR_PRGR_DHANDLE_MANAGED ((ErtsThrPrgrDelayHandle) -1)
+/* ERTS_THR_PRGR_DHANDLE_MANAGED implies managed thread */
+#define ERTS_THR_PRGR_DHANDLE_INVALID ((ErtsThrPrgrDelayHandle) -2)
+
extern ErtsThrPrgr erts_thr_prgr__;
void erts_thr_progress_pre_init(void);
@@ -136,6 +151,8 @@ int erts_thr_progress_update(ErtsSchedulerData *esdp);
int erts_thr_progress_leader_update(ErtsSchedulerData *esdp);
void erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp);
void erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp);
+ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay__(void);
+void erts_thr_progress_unmanaged_continue__(int umrefc_ix);
void erts_thr_progress_dbg_print_state(void);
@@ -148,6 +165,11 @@ ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *a
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE int erts_thr_progress_is_managed_thread(void);
+ERTS_GLB_INLINE ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay(void);
+ERTS_GLB_INLINE void erts_thr_progress_unmanaged_continue(ErtsThrPrgrDelayHandle handle);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ERTS_GLB_INLINE int erts_thr_progress_lc_is_delaying(void);
+#endif
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current_to_later__(ErtsThrPrgrVal val);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(ErtsSchedulerData *);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current(void);
@@ -229,6 +251,35 @@ erts_thr_progress_is_managed_thread(void)
return tpd && tpd->is_managed;
}
+ERTS_GLB_INLINE ErtsThrPrgrDelayHandle
+erts_thr_progress_unmanaged_delay(void)
+{
+ if (erts_thr_progress_is_managed_thread())
+ return ERTS_THR_PRGR_DHANDLE_MANAGED; /* Nothing to do */
+ else
+ return erts_thr_progress_unmanaged_delay__();
+}
+
+ERTS_GLB_INLINE void
+erts_thr_progress_unmanaged_continue(ErtsThrPrgrDelayHandle handle)
+{
+ ASSERT(handle != ERTS_THR_PRGR_DHANDLE_MANAGED
+ || erts_thr_progress_is_managed_thread());
+ if (handle != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue__(handle);
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+
+ERTS_GLB_INLINE int
+erts_thr_progress_lc_is_delaying(void)
+{
+ ErtsThrPrgrData *tpd = erts_tsd_get(erts_thr_prgr_data_key__);
+ return tpd && tpd->is_delaying;
+}
+
+#endif
+
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_progress_current_to_later__(ErtsThrPrgrVal val)
{
@@ -248,7 +299,7 @@ erts_thr_progress_later(ErtsSchedulerData *esdp)
if (esdp) {
tpd = &esdp->thr_progress_data;
managed_thread:
- val = tpd->previous.local;
+ val = tpd->confirmed;
ERTS_THR_MEMORY_BARRIER;
}
else {
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 17628286bc..1dc3ffeb3c 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -533,6 +533,9 @@ ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp,
ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
erts_aint_t new,
erts_aint_t expected);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bset(erts_no_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var,
erts_aint32_t i);
ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var);
@@ -553,6 +556,9 @@ ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp,
ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
erts_aint32_t new,
erts_aint32_t expected);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
char *name,
@@ -612,6 +618,78 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#ifdef USE_THREADS
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_nob(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_ddrb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_rb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_wb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_acqb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_relb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_mb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_nob(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_ddrb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_rb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_wb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_acqb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_relb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_mb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+#define ERTS_ATOMIC_BSET_IMPL__(Type, ReadOp, CmpxchgOp, VarP, Mask, Set) \
+do { \
+ Type act = ReadOp((VarP)); \
+ while (1) { \
+ Type exp = act; \
+ Type new = exp & ~(Mask); \
+ new |= ((Mask) & (Set)); \
+ act = CmpxchgOp((VarP), new, exp); \
+ if (act == exp) \
+ return act; \
+ } \
+} while (0)
+#endif
+
/*
* See "Documentation of atomics and memory barriers" at the top
* of this file for info on atomics.
@@ -670,6 +748,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_nob ethr_atomic_xchg
#define erts_atomic_cmpxchg_nob ethr_atomic_cmpxchg
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_nob(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_mb ethr_atomic_init_mb
#define erts_atomic_set_mb ethr_atomic_set_mb
#define erts_atomic_read_mb ethr_atomic_read_mb
@@ -684,6 +775,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_mb ethr_atomic_xchg_mb
#define erts_atomic_cmpxchg_mb ethr_atomic_cmpxchg_mb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_mb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_mb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_acqb ethr_atomic_init_acqb
#define erts_atomic_set_acqb ethr_atomic_set_acqb
#define erts_atomic_read_acqb ethr_atomic_read_acqb
@@ -698,6 +802,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_acqb ethr_atomic_xchg_acqb
#define erts_atomic_cmpxchg_acqb ethr_atomic_cmpxchg_acqb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_acqb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_acqb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_relb ethr_atomic_init_relb
#define erts_atomic_set_relb ethr_atomic_set_relb
#define erts_atomic_read_relb ethr_atomic_read_relb
@@ -712,6 +829,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_relb ethr_atomic_xchg_relb
#define erts_atomic_cmpxchg_relb ethr_atomic_cmpxchg_relb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_relb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_relb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_ddrb ethr_atomic_init_ddrb
#define erts_atomic_set_ddrb ethr_atomic_set_ddrb
#define erts_atomic_read_ddrb ethr_atomic_read_ddrb
@@ -726,6 +856,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_ddrb ethr_atomic_xchg_ddrb
#define erts_atomic_cmpxchg_ddrb ethr_atomic_cmpxchg_ddrb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_ddrb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_ddrb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_rb ethr_atomic_init_rb
#define erts_atomic_set_rb ethr_atomic_set_rb
#define erts_atomic_read_rb ethr_atomic_read_rb
@@ -740,6 +883,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_rb ethr_atomic_xchg_rb
#define erts_atomic_cmpxchg_rb ethr_atomic_cmpxchg_rb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_rb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_rb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_wb ethr_atomic_init_wb
#define erts_atomic_set_wb ethr_atomic_set_wb
#define erts_atomic_read_wb ethr_atomic_read_wb
@@ -754,6 +910,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_wb ethr_atomic_xchg_wb
#define erts_atomic_cmpxchg_wb ethr_atomic_cmpxchg_wb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_wb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_wb,
+ var, mask, set);
+}
+#endif
+
/* 32-bit atomics */
#define erts_atomic32_init_nob ethr_atomic32_init
@@ -770,6 +939,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_nob ethr_atomic32_xchg
#define erts_atomic32_cmpxchg_nob ethr_atomic32_cmpxchg
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_nob(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_mb ethr_atomic32_init_mb
#define erts_atomic32_set_mb ethr_atomic32_set_mb
#define erts_atomic32_read_mb ethr_atomic32_read_mb
@@ -784,6 +966,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_mb ethr_atomic32_xchg_mb
#define erts_atomic32_cmpxchg_mb ethr_atomic32_cmpxchg_mb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_mb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_mb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_acqb ethr_atomic32_init_acqb
#define erts_atomic32_set_acqb ethr_atomic32_set_acqb
#define erts_atomic32_read_acqb ethr_atomic32_read_acqb
@@ -798,6 +993,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_acqb ethr_atomic32_xchg_acqb
#define erts_atomic32_cmpxchg_acqb ethr_atomic32_cmpxchg_acqb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_acqb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_acqb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_relb ethr_atomic32_init_relb
#define erts_atomic32_set_relb ethr_atomic32_set_relb
#define erts_atomic32_read_relb ethr_atomic32_read_relb
@@ -812,6 +1020,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_relb ethr_atomic32_xchg_relb
#define erts_atomic32_cmpxchg_relb ethr_atomic32_cmpxchg_relb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_relb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_relb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_ddrb ethr_atomic32_init_ddrb
#define erts_atomic32_set_ddrb ethr_atomic32_set_ddrb
#define erts_atomic32_read_ddrb ethr_atomic32_read_ddrb
@@ -826,6 +1047,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_ddrb ethr_atomic32_xchg_ddrb
#define erts_atomic32_cmpxchg_ddrb ethr_atomic32_cmpxchg_ddrb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_ddrb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_ddrb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_rb ethr_atomic32_init_rb
#define erts_atomic32_set_rb ethr_atomic32_set_rb
#define erts_atomic32_read_rb ethr_atomic32_read_rb
@@ -840,6 +1074,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_rb ethr_atomic32_xchg_rb
#define erts_atomic32_cmpxchg_rb ethr_atomic32_cmpxchg_rb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_rb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_rb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_wb ethr_atomic32_init_wb
#define erts_atomic32_set_wb ethr_atomic32_set_wb
#define erts_atomic32_read_wb ethr_atomic32_read_wb
@@ -854,6 +1101,21 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_wb ethr_atomic32_xchg_wb
#define erts_atomic32_cmpxchg_wb ethr_atomic32_cmpxchg_wb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_wb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_wb,
+ var, mask, set);
+}
+#endif
+
+#undef ERTS_ATOMIC_BSET_IMPL__
+
#else /* !USE_THREADS */
/* Double word size atomics */
@@ -908,6 +1170,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_nob erts_no_atomic_read_band
#define erts_atomic_xchg_nob erts_no_atomic_xchg
#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_nob erts_no_atomic_read_bset
#define erts_atomic_init_mb erts_no_atomic_set
#define erts_atomic_set_mb erts_no_atomic_set
@@ -922,6 +1185,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_mb erts_no_atomic_read_band
#define erts_atomic_xchg_mb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_mb erts_no_atomic_read_bset
#define erts_atomic_init_acqb erts_no_atomic_set
#define erts_atomic_set_acqb erts_no_atomic_set
@@ -936,6 +1200,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_acqb erts_no_atomic_read_band
#define erts_atomic_xchg_acqb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_acqb erts_no_atomic_read_bset
#define erts_atomic_init_relb erts_no_atomic_set
#define erts_atomic_set_relb erts_no_atomic_set
@@ -950,6 +1215,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_relb erts_no_atomic_read_band
#define erts_atomic_xchg_relb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_relb erts_no_atomic_read_bset
#define erts_atomic_init_ddrb erts_no_atomic_set
#define erts_atomic_set_ddrb erts_no_atomic_set
@@ -964,6 +1230,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_ddrb erts_no_atomic_read_band
#define erts_atomic_xchg_ddrb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_ddrb erts_no_atomic_read_bset
#define erts_atomic_init_rb erts_no_atomic_set
#define erts_atomic_set_rb erts_no_atomic_set
@@ -978,6 +1245,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_rb erts_no_atomic_read_band
#define erts_atomic_xchg_rb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_rb erts_no_atomic_read_bset
#define erts_atomic_init_wb erts_no_atomic_set
#define erts_atomic_set_wb erts_no_atomic_set
@@ -992,6 +1260,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_wb erts_no_atomic_read_band
#define erts_atomic_xchg_wb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_wb erts_no_atomic_read_bset
/* 32-bit atomics */
@@ -1008,6 +1277,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_nob erts_no_atomic32_read_band
#define erts_atomic32_xchg_nob erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_nob erts_no_atomic32_read_bset
#define erts_atomic32_init_mb erts_no_atomic32_set
#define erts_atomic32_set_mb erts_no_atomic32_set
@@ -1022,6 +1292,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_mb erts_no_atomic32_read_band
#define erts_atomic32_xchg_mb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_mb erts_no_atomic32_read_bset
#define erts_atomic32_init_acqb erts_no_atomic32_set
#define erts_atomic32_set_acqb erts_no_atomic32_set
@@ -1036,6 +1307,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band
#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_acqb erts_no_atomic32_read_bset
#define erts_atomic32_init_relb erts_no_atomic32_set
#define erts_atomic32_set_relb erts_no_atomic32_set
@@ -1050,6 +1322,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_relb erts_no_atomic32_read_band
#define erts_atomic32_xchg_relb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_relb erts_no_atomic32_read_bset
#define erts_atomic32_init_ddrb erts_no_atomic32_set
#define erts_atomic32_set_ddrb erts_no_atomic32_set
@@ -1064,6 +1337,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band
#define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
#define erts_atomic32_init_rb erts_no_atomic32_set
#define erts_atomic32_set_rb erts_no_atomic32_set
@@ -1078,6 +1352,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_rb erts_no_atomic32_read_band
#define erts_atomic32_xchg_rb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_rb erts_no_atomic32_read_bset
#define erts_atomic32_init_wb erts_no_atomic32_set
#define erts_atomic32_set_wb erts_no_atomic32_set
@@ -1092,6 +1367,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_wb erts_no_atomic32_read_band
#define erts_atomic32_xchg_wb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_wb erts_no_atomic32_read_bset
#endif /* !USE_THREADS */
@@ -1856,6 +2132,17 @@ erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
return old;
}
+ERTS_GLB_INLINE erts_aint_t
+erts_no_atomic_read_bset(erts_no_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ erts_aint_t old = *var;
+ *var &= ~mask;
+ *var |= (mask & set);
+ return old;
+}
+
/* atomic32 */
ERTS_GLB_INLINE void
@@ -1943,6 +2230,17 @@ erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
return old;
}
+ERTS_GLB_INLINE erts_aint32_t
+erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ erts_aint32_t old = *var;
+ *var &= ~mask;
+ *var |= (mask & set);
+ return old;
+}
+
/* spinlock */
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index d04a91f18c..41f33efa79 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -156,8 +156,8 @@ do { (RES) = (TPID); } while(0)
#define ERTS_TRACER_REF_TYPE Process *
#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
do { \
- (RES) = erts_proc_lookup((TPID)); \
- if (!(RES) || !((RES)->trace_flags & F_TRACER)) { \
+ (RES) = erts_proc_lookup((TPID)); \
+ if (!(RES) || !(ERTS_TRACE_FLAGS((RES)) & F_TRACER)) { \
(TPID) = NIL; \
(TRACEE_FLGS) &= ~TRACEE_FLAGS; \
return; \
@@ -409,7 +409,7 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
}
#ifndef ERTS_SMP
- if (!INVALID_TRACER_PORT(trace_port, trace_port->id))
+ if (!INVALID_TRACER_PORT(trace_port, trace_port->common.id))
#endif
erts_raw_port_command(trace_port, buffer, ptr-buffer);
@@ -441,7 +441,7 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
message = TUPLE5(hp, am_trace_ts, pid, am_out, mfarity, timestamp);
/* Note, hp is deliberately NOT incremented since it will be reused */
- do_send_to_port(trace_port->id,
+ do_send_to_port(trace_port->common.id,
trace_port,
pid,
SYS_MSG_TYPE_UNDEFINED,
@@ -451,7 +451,7 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
hp += 5;
hp = patch_ts(message, hp);
- do_send_to_port(trace_port->id,
+ do_send_to_port(trace_port->common.id,
trace_port,
pid,
SYS_MSG_TYPE_UNDEFINED,
@@ -508,7 +508,7 @@ send_to_port(Process *c_p, Eterm message,
#endif
do_send_to_port(*tracer_pid,
trace_port,
- c_p ? c_p->id : NIL,
+ c_p ? c_p->common.id : NIL,
SYS_MSG_TYPE_TRACE,
message);
#ifndef ERTS_SMP
@@ -543,7 +543,7 @@ send_to_port(Process *c_p, Eterm message,
trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
do_send_to_port(*tracer_pid,
trace_port,
- c_p ? c_p->id : NIL,
+ c_p ? c_p->common.id : NIL,
SYS_MSG_TYPE_TRACE,
message);
@@ -557,7 +557,7 @@ send_to_port(Process *c_p, Eterm message,
* just after writning the real trace message, and now gets scheduled
* in again.
*/
- do_send_schedfix_to_port(trace_port, c_p->id, ts);
+ do_send_schedfix_to_port(trace_port, c_p->common.id, ts);
}
erts_port_release(trace_port);
@@ -599,15 +599,14 @@ profile_send(Eterm from, Eterm message) {
if (profiler_port) {
do_send_to_port(profiler,
profiler_port,
- NIL, /* or current process->id */
+ NIL, /* or current process->common.id */
SYS_MSG_TYPE_SYSPROF,
message);
erts_port_release(profiler_port);
}
} else {
- ASSERT(is_internal_pid(profiler)
- && internal_pid_index(profiler) < erts_max_processes);
+ ASSERT(is_internal_pid(profiler));
profile_p = erts_proc_lookup(profiler);
@@ -673,7 +672,7 @@ seq_trace_send_to_port(Process *c_p,
#endif
do_send_to_port(seq_tracer,
trace_port,
- c_p ? c_p->id : NIL,
+ c_p ? c_p->common.id : NIL,
SYS_MSG_TYPE_SEQTRACE,
message);
@@ -704,7 +703,7 @@ seq_trace_send_to_port(Process *c_p,
trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
do_send_to_port(seq_tracer,
trace_port,
- c_p ? c_p->id : NIL,
+ c_p ? c_p->common.id : NIL,
SYS_MSG_TYPE_SEQTRACE,
message);
@@ -718,7 +717,7 @@ seq_trace_send_to_port(Process *c_p,
* just after writing the real trace message, and now gets scheduled
* in again.
*/
- do_send_schedfix_to_port(trace_port, c_p->id, ts);
+ do_send_schedfix_to_port(trace_port, c_p->common.id, ts);
}
erts_port_release(trace_port);
@@ -729,7 +728,9 @@ seq_trace_send_to_port(Process *c_p,
}
#define TS_HEAP_WORDS 5
-#define TS_SIZE(p) (((p)->trace_flags & F_TIMESTAMP) ? TS_HEAP_WORDS : 0)
+#define TS_SIZE(p) ((ERTS_TRACE_FLAGS((p)) & F_TIMESTAMP) \
+ ? TS_HEAP_WORDS \
+ : 0)
/*
* Patch a timestamp into a tuple. The tuple must be the last thing
@@ -764,17 +765,17 @@ send_to_tracer(Process *tracee,
erts_smp_mtx_lock(&smq_mtx);
- if (tracee->trace_flags & F_TIMESTAMP)
+ if (ERTS_TRACE_FLAGS(tracee) & F_TIMESTAMP)
*hpp = patch_ts(msg, *hpp);
- if (is_internal_pid(tracee->tracer_proc))
- ERTS_ENQ_TRACE_MSG(tracee->id, tracer_ref, msg, bp);
+ if (is_internal_pid(ERTS_TRACER_PROC(tracee)))
+ ERTS_ENQ_TRACE_MSG(tracee->common.id, tracer_ref, msg, bp);
else {
- ASSERT(is_internal_port(tracee->tracer_proc));
+ ASSERT(is_internal_port(ERTS_TRACER_PROC(tracee)));
send_to_port(no_fake_sched ? NULL : tracee,
msg,
- &tracee->tracer_proc,
- &tracee->trace_flags);
+ &ERTS_TRACER_PROC(tracee),
+ &ERTS_TRACE_FLAGS(tracee));
}
erts_smp_mtx_unlock(&smq_mtx);
@@ -792,7 +793,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF;
int sched_no, curr_func, to_port, no_fake_sched;
- if (is_nil(p->tracer_proc))
+ if (is_nil(ERTS_TRACER_PROC(p)))
return;
no_fake_sched = never_fake_sched;
@@ -812,13 +813,14 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
}
sched_no = IS_TRACED_FL(p, F_TRACE_SCHED_NO);
- to_port = is_internal_port(p->tracer_proc);
+ to_port = is_internal_port(ERTS_TRACER_PROC(p));
if (!to_port) {
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
}
if (ERTS_PROC_IS_EXITING(p))
@@ -851,7 +853,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
}
if (!sched_no) {
- mess = TUPLE4(hp, am_trace, p->id, what, tmp);
+ mess = TUPLE4(hp, am_trace, p->common.id, what, tmp);
hp += 5;
}
else {
@@ -860,7 +862,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
#else
Eterm sched_id = make_small(1);
#endif
- mess = TUPLE5(hp, am_trace, p->id, what, sched_id, tmp);
+ mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, tmp);
hp += 6;
}
@@ -912,19 +914,19 @@ trace_send(Process *p, Eterm to, Eterm msg)
operation = am_atom_put(s, sys_strlen(s));
}
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (11)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE5(hp, am_trace, p->id, operation, msg, to);
+ mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -934,10 +936,11 @@ trace_send(Process *p, Eterm to, Eterm msg)
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
sz_msg = size_object(msg);
sz_to = size_object(to);
@@ -953,16 +956,16 @@ trace_send(Process *p, Eterm to, Eterm msg)
sz_msg,
&hp,
off_heap);
- mess = TUPLE5(hp, am_trace, p->id/* Local pid */, operation, msg, to);
+ mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -977,19 +980,19 @@ trace_receive(Process *rp, Eterm msg)
size_t sz_msg;
Eterm* hp;
- if (is_internal_port(rp->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(rp))) {
#define LOCAL_HEAP_SIZE (10)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE4(hp, am_trace, rp->id, am_receive, msg);
+ mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (rp->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(rp, mess, &rp->tracer_proc, &rp->trace_flags);
+ send_to_port(rp, mess, &ERTS_TRACER_PROC(rp), &ERTS_TRACE_FLAGS(rp));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -999,10 +1002,11 @@ trace_receive(Process *rp, Eterm msg)
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(rp->tracer_proc)
- && internal_pid_index(rp->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(rp)));
- ERTS_GET_TRACER_REF(tracer_ref, rp->tracer_proc, rp->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(rp),
+ ERTS_TRACE_FLAGS(rp));
sz_msg = size_object(msg);
@@ -1011,16 +1015,16 @@ trace_receive(Process *rp, Eterm msg)
hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, tracer_ref);
msg = copy_struct(msg, sz_msg, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, rp->id/* Local pid */, am_receive, msg);
+ mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (rp->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(rp->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(rp->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -1030,14 +1034,14 @@ seq_trace_update_send(Process *p)
{
Eterm seq_tracer = erts_get_system_seq_tracer();
ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
- if ( (p->id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL)
+ if ( (p->common.id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL)
#ifdef USE_VM_PROBES
|| (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
#endif
) {
return 0;
}
- SEQ_TRACE_TOKEN_SENDER(p) = p->id; /* Internal pid */
+ SEQ_TRACE_TOKEN_SENDER(p) = p->common.id;
SEQ_TRACE_TOKEN_SERIAL(p) =
make_small(++(p -> seq_trace_clock));
SEQ_TRACE_TOKEN_LASTCNT(p) =
@@ -1074,7 +1078,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
ASSERT(is_tuple(token) || is_nil(token));
if (SEQ_TRACE_T_SENDER(token) == seq_tracer || token == NIL ||
- (process && process->trace_flags & F_SENSITIVE)) {
+ (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE)) {
return;
}
@@ -1138,8 +1142,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
Uint sz_label, sz_lastcnt_serial, sz_msg, sz_ts, sz_sender,
sz_exitfrom, sz_receiver;
- ASSERT(is_internal_pid(seq_tracer)
- && internal_pid_index(seq_tracer) < erts_max_processes);
+ ASSERT(is_internal_pid(seq_tracer));
#ifndef ERTS_SMP
@@ -1253,17 +1256,17 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
hp += 4;
}
- mess = TUPLE4(hp, am_trace, p->id, am_return_to, mfa);
+ mess = TUPLE4(hp, am_trace, p->common.id, am_return_to, mfa);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- if (is_internal_port(p->tracer_proc)) {
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
} else {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
@@ -1273,10 +1276,11 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
/*
* Find the tracer.
*/
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
size = size_object(mess);
@@ -1286,7 +1290,7 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
* Copy the trace message into the buffer and enqueue it.
*/
mess = copy_struct(mess, size, &hp, off_heap);
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
}
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -1315,25 +1319,25 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
return;
}
ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->id) {
+ if (*tracer_pid == p->common.id) {
/* Do not generate trace messages to oneself */
return;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
- tracee = p->id;
+ tracee = p->common.id;
#endif
} else {
/* Tracer not specified in process structure =>
@@ -1362,7 +1366,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
hp = local_heap;
mfa = TUPLE3(hp, mod, name, make_small(arity));
hp += 4;
- mess = TUPLE5(hp, am_trace, p->id, am_return_from, mfa, retval);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
if (*tracee_flags & F_TIMESTAMP) {
@@ -1382,8 +1386,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
@@ -1405,7 +1408,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
mfa = TUPLE3(hp, mod, name, make_small(arity));
hp += 4;
retval = copy_struct(retval, retval_size, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->id/* Local pid */, am_return_from, mfa, retval);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
@@ -1446,25 +1449,25 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
return;
}
ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->id) {
+ if (*tracer_pid == p->common.id) {
/* Do not generate trace messages to oneself */
return;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
- tracee = p->id;
+ tracee = p->common.id;
#endif
if (! (*tracee_flags & F_TRACE_CALLS)) {
return;
@@ -1492,7 +1495,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
hp += 4;
cv = TUPLE2(hp, class, value);
hp += 3;
- mess = TUPLE5(hp, am_trace, p->id, am_exception_from, mfa_tuple, cv);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_exception_from, mfa_tuple, cv);
hp += 6;
ASSERT((hp - local_heap) <= LOCAL_HEAP_SIZE);
erts_smp_mtx_lock(&smq_mtx);
@@ -1514,8 +1517,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
@@ -1539,7 +1541,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
value = copy_struct(value, value_size, &hp, off_heap);
cv = TUPLE2(hp, class, value);
hp += 3;
- mess = TUPLE5(hp, am_trace, p->id/* Local pid */,
+ mess = TUPLE5(hp, am_trace, p->common.id,
am_exception_from, mfa_tuple, cv);
hp += 6;
@@ -1593,25 +1595,25 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
return 0;
}
ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->id) {
+ if (*tracer_pid == p->common.id) {
/* Do not generate trace messages to oneself */
return 0;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
- tracee = p->id;
+ tracee = p->common.id;
#endif
} else {
/* Tracer not specified in process structure =>
@@ -1619,7 +1621,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* meta trace =>
* use fixed flag set instead of process flags
*/
- if (p->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
/* No trace messages for sensitive processes. */
return 0;
}
@@ -1677,7 +1679,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
if (!erts_is_valid_tracer_port(*tracer_pid)) {
#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);
+ ASSERT(is_nil(tracee) || tracer_pid == &ERTS_TRACER_PROC(p));
if (is_not_nil(tracee))
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
@@ -1779,7 +1781,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* Build the trace tuple and send it to the port.
*/
- mess = TUPLE4(hp, am_trace, p->id, am_call, mfa_tuple);
+ mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
hp += 5;
if (pam_result != am_true) {
hp[-5] = make_arityval(5);
@@ -1814,21 +1816,21 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
*tracer_pid, ERTS_PROC_LOCK_STATUS);
if (!tracer)
invalid_tracer = 1;
else {
- invalid_tracer = (tracer->trace_flags & F_TRACER) == 0;
+ invalid_tracer = !(ERTS_TRACE_FLAGS(tracer) & F_TRACER);
erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
}
if (invalid_tracer) {
#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);
+ ASSERT(is_nil(tracee)
+ || tracer_pid == &ERTS_TRACER_PROC(p));
if (is_not_nil(tracee))
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
@@ -1952,7 +1954,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* Build the trace tuple and enqueue it.
*/
- mess = TUPLE4(hp, am_trace, p->id/* Local pid */, am_call, mfa_tuple);
+ mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
hp += 5;
if (pam_result != am_true) {
hp[-5] = make_arityval(5);
@@ -1990,17 +1992,17 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0)
|| erts_thr_progress_is_blocking());
- if (is_internal_port(t_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
send_to_port(
@@ -2011,7 +2013,9 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
/* Fake schedule out and in are never sent when smp enabled */
c_p,
#endif
- mess, &t_p->tracer_proc, &t_p->trace_flags);
+ mess,
+ &ERTS_TRACER_PROC(t_p),
+ &ERTS_TRACE_FLAGS(t_p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2022,10 +2026,11 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
ERTS_TRACER_REF_TYPE tracer_ref;
size_t sz_data;
- ASSERT(is_internal_pid(t_p->tracer_proc)
- && internal_pid_index(t_p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
- ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(t_p),
+ ERTS_TRACE_FLAGS(t_p));
sz_data = size_object(data);
@@ -2034,16 +2039,16 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
tmp = copy_struct(data, sz_data, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, t_p->id/* Local pid */, what, tmp);
+ mess = TUPLE4(hp, am_trace, t_p->common.id, what, tmp);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(t_p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -2064,7 +2069,7 @@ trace_proc_spawn(Process *p, Eterm pid,
Eterm mess;
Eterm* hp;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (4+6+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2072,13 +2077,13 @@ trace_proc_spawn(Process *p, Eterm pid,
hp = local_heap;
mfa = TUPLE3(hp, mod, func, args);
hp += 4;
- mess = TUPLE5(hp, am_trace, p->id, am_spawn, pid, mfa);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, pid, mfa);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2090,10 +2095,11 @@ trace_proc_spawn(Process *p, Eterm pid,
size_t sz_args, sz_pid;
Uint need;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
sz_args = size_object(args);
sz_pid = size_object(pid);
@@ -2105,16 +2111,16 @@ trace_proc_spawn(Process *p, Eterm pid,
mfa = TUPLE3(hp, mod, func, tmp);
hp += 4;
tmp = copy_struct(pid, sz_pid, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->id, am_spawn, tmp, mfa);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, tmp, mfa);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -2204,7 +2210,7 @@ trace_gc(Process *p, Eterm what)
UseTmpHeap(LOCAL_HEAP_SIZE,p);
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
hp = local_heap;
#ifdef DEBUG
size = 0;
@@ -2216,10 +2222,11 @@ trace_gc(Process *p, Eterm what)
size += 5/*4-tuple*/ + TS_SIZE(p);
#endif
} else {
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
size = 0;
(void) erts_bld_atom_uint_2tup_list(NULL,
@@ -2243,19 +2250,19 @@ trace_gc(Process *p, Eterm what)
tags,
values);
- msg = TUPLE4(hp, am_trace, p->id/* Local pid */, what, msg);
+ msg = TUPLE4(hp, am_trace, p->common.id, what, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(msg, hp);
}
ASSERT(hp == limit);
- if (is_internal_port(p->tracer_proc))
- send_to_port(p, msg, &p->tracer_proc, &p->trace_flags);
+ if (is_internal_port(ERTS_TRACER_PROC(p)))
+ send_to_port(p, msg, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
else
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, msg, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, msg, bp);
erts_smp_mtx_unlock(&smq_mtx);
UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
#undef LOCAL_HEAP_SIZE
@@ -2295,8 +2302,7 @@ monitor_long_gc(Process *p, Uint time) {
#endif
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
+ ASSERT(is_internal_pid(system_monitor));
monitor_p = erts_proc_lookup(system_monitor);
if (!monitor_p || p == monitor_p)
return;
@@ -2321,7 +2327,7 @@ monitor_long_gc(Process *p, Uint time) {
sizeof(values)/sizeof(Uint),
tags,
values);
- msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, am_long_gc, list);
+ msg = TUPLE4(hp, am_monitor, p->common.id, am_long_gc, list);
#ifdef DEBUG
hp += 5 /* 4-tuple */;
@@ -2329,7 +2335,7 @@ monitor_long_gc(Process *p, Uint time) {
#endif
#ifdef ERTS_SMP
- enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
#else
erts_queue_message(monitor_p, NULL, bp, msg, NIL
#ifdef USE_VM_PROBES
@@ -2370,8 +2376,7 @@ monitor_large_heap(Process *p) {
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
+ ASSERT(is_internal_pid(system_monitor));
monitor_p = erts_proc_lookup(system_monitor);
if (monitor_p || p == monitor_p) {
return;
@@ -2397,7 +2402,7 @@ monitor_large_heap(Process *p) {
sizeof(values)/sizeof(Uint),
tags,
values);
- msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, am_large_heap, list);
+ msg = TUPLE4(hp, am_monitor, p->common.id, am_large_heap, list);
#ifdef DEBUG
hp += 5 /* 4-tuple */;
@@ -2405,7 +2410,7 @@ monitor_large_heap(Process *p) {
#endif
#ifdef ERTS_SMP
- enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
#else
erts_queue_message(monitor_p, NULL, bp, msg, NIL
#ifdef USE_VM_PROBES
@@ -2425,8 +2430,7 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
Eterm *hp, msg;
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
+ ASSERT(is_internal_pid(system_monitor));
monitor_p = erts_proc_lookup(system_monitor);
if (!monitor_p || p == monitor_p)
return;
@@ -2434,11 +2438,11 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p);
- msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, type, spec);
+ msg = TUPLE4(hp, am_monitor, p->common.id, type, spec);
hp += 5;
#ifdef ERTS_SMP
- enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
#else
erts_queue_message(monitor_p, NULL, bp, msg, NIL
#ifdef USE_VM_PROBES
@@ -2560,21 +2564,21 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
Eterm mess;
Eterm* hp;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (5+6)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name);
+ mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake schedule */
- send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2584,25 +2588,26 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
size_t sz_data;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
sz_data = 6 + TS_SIZE(p);
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name);
+ mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -2623,20 +2628,20 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
- if (is_internal_port(t_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake schedule */
- send_to_port(NULL, mess, &t_p->tracer_proc, &t_p->trace_flags);
+ send_to_port(NULL,mess,&ERTS_TRACER_PROC(t_p),&ERTS_TRACE_FLAGS(t_p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2646,25 +2651,26 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
size_t sz_data;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(t_p->tracer_proc)
- && internal_pid_index(t_p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
sz_data = 5 + TS_SIZE(t_p);
- ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(t_p),
+ ERTS_TRACE_FLAGS(t_p));
hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
- mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(t_p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -2689,7 +2695,7 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
int ws = 5;
Eterm sched_id = am_undefined;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (5+6)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2704,21 +2710,21 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
#else
sched_id = make_small(1);
#endif
- mess = TUPLE5(hp, am_trace, p->id, what, sched_id, where);
+ mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
ws = 6;
} else {
- mess = TUPLE4(hp, am_trace, p->id, what, where);
+ mess = TUPLE4(hp, am_trace, p->common.id, what, where);
ws = 5;
}
hp += ws;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake scheduling */
- send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2727,12 +2733,13 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) ws = 6; /* Make place for scheduler id */
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
hp = ERTS_ALLOC_SYSMSG_HEAP(ws+TS_SIZE(p), &bp, &off_heap, tracer_ref);
@@ -2744,19 +2751,19 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
#else
sched_id = make_small(1);
#endif
- mess = TUPLE5(hp, am_trace, p->id, what, sched_id, where);
+ mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
} else {
- mess = TUPLE4(hp, am_trace, p->id, what, where);
+ mess = TUPLE4(hp, am_trace, p->common.id, what, where);
}
hp += ws;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -2792,14 +2799,14 @@ profile_runnable_port(Port *p, Eterm status) {
GET_NOW(&Ms, &s, &us);
timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE5(hp, am_profile, p->id, status, count, timestamp); hp += 6;
+ msg = TUPLE5(hp, am_profile, p->common.id, status, count, timestamp); hp += 6;
#ifndef ERTS_SMP
- profile_send(p->id, msg);
+ profile_send(p->common.id, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#else
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->id, NIL, msg, bp);
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
#endif
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -2846,13 +2853,13 @@ profile_runnable_proc(Process *p, Eterm status){
GET_NOW(&Ms, &s, &us);
timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE5(hp, am_profile, p->id, status, where, timestamp); hp += 6;
+ msg = TUPLE5(hp, am_profile, p->common.id, status, where, timestamp); hp += 6;
#ifndef ERTS_SMP
- profile_send(p->id, msg);
+ profile_send(p->common.id, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#else
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->id, NIL, msg, bp);
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
#endif
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -2865,16 +2872,19 @@ profile_runnable_proc(Process *p, Eterm status){
void
erts_check_my_tracer_proc(Process *p)
{
- if (is_internal_pid(p->tracer_proc)) {
- Process *tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- p->tracer_proc, ERTS_PROC_LOCK_STATUS);
- int invalid_tracer = !tracer || !(tracer->trace_flags & F_TRACER);
+ if (is_internal_pid(ERTS_TRACER_PROC(p))) {
+ Process *tracer = erts_pid2proc(p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_TRACER_PROC(p),
+ ERTS_PROC_LOCK_STATUS);
+ int invalid_tracer = (!tracer
+ || !(ERTS_TRACE_FLAGS(tracer) & F_TRACER));
if (tracer)
erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
if (invalid_tracer) {
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags &= ~TRACEE_FLAGS;
- p->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(p) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(p) = NIL;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
@@ -3218,7 +3228,7 @@ sys_msg_dispatcher_func(void *unused)
proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
if (!proc
|| (smqp->type == SYS_MSG_TYPE_TRACE
- && !(proc->trace_flags & F_TRACER))) {
+ && !(ERTS_TRACE_FLAGS(proc) & F_TRACER))) {
/* Bad tracer */
#ifdef DEBUG_PRINTOUTS
if (smqp->type == SYS_MSG_TYPE_TRACE && proc)
@@ -3245,16 +3255,14 @@ sys_msg_dispatcher_func(void *unused)
proc = erts_whereis_process(NULL,0,receiver,proc_locks,0);
if (!proc)
goto failure;
- else if (smqp->from == proc->id)
+ else if (smqp->from == proc->common.id)
goto drop_sys_msg;
else
goto queue_proc_msg;
}
else if (is_internal_port(receiver)) {
- port = erts_id2port_sflgs(receiver,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+ port = erts_thr_id2port_sflgs(receiver,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
if (!port)
goto failure;
else {
@@ -3268,7 +3276,7 @@ sys_msg_dispatcher_func(void *unused)
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "delivered\n");
#endif
- erts_port_release(port);
+ erts_thr_port_release(port);
if (smqp->bp)
free_message_buffer(smqp->bp);
}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
new file mode 100644
index 0000000000..50fb27aab0
--- /dev/null
+++ b/erts/emulator/beam/erl_trace.h
@@ -0,0 +1,141 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef ERL_TRACE_H__
+#define ERL_TRACE_H__
+
+struct binary;
+
+/* erl_bif_trace.c */
+Eterm erl_seq_trace_info(Process *p, Eterm arg1);
+void erts_system_monitor_clear(Process *c_p);
+void erts_system_profile_clear(Process *c_p);
+
+/* erl_trace.c */
+void erts_init_trace(void);
+void erts_trace_check_exiting(Eterm exiting);
+Eterm erts_set_system_seq_tracer(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ Eterm new);
+Eterm erts_get_system_seq_tracer(void);
+void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
+void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
+void erts_set_system_monitor(Eterm monitor);
+Eterm erts_get_system_monitor(void);
+
+#ifdef ERTS_SMP
+void erts_check_my_tracer_proc(Process *);
+void erts_block_sys_msg_dispatcher(void);
+void erts_release_sys_msg_dispatcher(void);
+void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
+ Eterm,
+ Eterm,
+ ErlHeapFragment *));
+void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
+#endif
+
+void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
+void trace_send(Process*, Eterm, Eterm);
+void trace_receive(Process*, Eterm);
+Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec, Eterm* args,
+ int local, Eterm *tracer_pid);
+void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid);
+void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
+ Eterm *tracer);
+void erts_trace_return_to(Process *p, BeamInstr *pc);
+void trace_sched(Process*, Eterm);
+void trace_proc(Process*, Process*, Eterm, Eterm);
+void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
+void save_calls(Process *p, Export *);
+void trace_gc(Process *p, Eterm what);
+/* port tracing */
+void trace_virtual_sched(Process*, Eterm);
+void trace_sched_ports(Port *pp, Eterm);
+void trace_sched_ports_where(Port *pp, Eterm, Eterm);
+void trace_port(Port *, Eterm what, Eterm data);
+void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name);
+
+/* system_profile */
+void erts_set_system_profile(Eterm profile);
+Eterm erts_get_system_profile(void);
+void profile_scheduler(Eterm scheduler_id, Eterm);
+void profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us);
+void profile_runnable_proc(Process* p, Eterm status);
+void profile_runnable_port(Port* p, Eterm status);
+void erts_system_profile_setup_active_schedulers(void);
+
+/* system_monitor */
+void monitor_long_gc(Process *p, Uint time);
+void monitor_large_heap(Process *p);
+void monitor_generic(Process *p, Eterm type, Eterm spec);
+Uint erts_trace_flag2bit(Eterm flag);
+int erts_trace_flags(Eterm List,
+ Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
+Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
+
+#ifdef ERTS_SMP
+void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
+#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \
+do { \
+ if ((ESDP)->pending_trace_msgs) \
+ erts_send_pending_trace_msgs((ESDP)); \
+} while (0)
+#else
+#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
+#endif
+
+#define seq_trace_output(token, msg, type, receiver, process) \
+seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
+#define seq_trace_output_exit(token, msg, type, receiver, exitfrom) \
+seq_trace_output_generic((token), (msg), (type), (receiver), NULL, (exitfrom))
+void seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
+ Eterm receiver, Process *process, Eterm exitfrom);
+
+int seq_trace_update_send(Process *process);
+
+Eterm erts_seq_trace(Process *process,
+ Eterm atom_type, Eterm atom_true_or_false,
+ int build_result);
+
+struct trace_pattern_flags {
+ unsigned int breakpoint : 1; /* Set if any other is set */
+ unsigned int local : 1; /* Local call trace breakpoint */
+ unsigned int meta : 1; /* Metadata trace breakpoint */
+ unsigned int call_count : 1; /* Fast call count breakpoint */
+ unsigned int call_time : 1; /* Fast call time breakpoint */
+};
+extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
+extern int erts_call_time_breakpoint_tracing;
+int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+ struct binary* match_prog_set,
+ struct binary *meta_match_prog_set,
+ int on, struct trace_pattern_flags,
+ Eterm meta_tracer_pid, int is_blocking);
+void
+erts_get_default_trace_pattern(int *trace_pattern_is_on,
+ struct binary **match_spec,
+ struct binary **meta_match_spec,
+ struct trace_pattern_flags *trace_pattern_flags,
+ Eterm *meta_tracer_pid);
+int erts_is_default_trace_enabled(void);
+void erts_bif_trace_init(void);
+int erts_finish_breakpointing(void);
+
+#endif /* ERL_TRACE_H__ */
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
new file mode 100644
index 0000000000..a2064bd8a3
--- /dev/null
+++ b/erts/emulator/beam/erl_utils.h
@@ -0,0 +1,215 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_UTILS_H__
+#define ERL_UTILS_H__
+
+#include "sys.h"
+#include "erl_smp.h"
+#include "erl_printf.h"
+
+typedef struct {
+#ifdef DEBUG
+ int smp_api;
+#endif
+ union {
+ Uint64 not_atomic;
+#ifdef ARCH_64
+ erts_atomic_t atomic;
+#else
+ erts_dw_atomic_t atomic;
+#endif
+ } counter;
+} erts_interval_t;
+
+void erts_interval_init(erts_interval_t *);
+void erts_smp_interval_init(erts_interval_t *);
+Uint64 erts_step_interval_nob(erts_interval_t *);
+Uint64 erts_step_interval_relb(erts_interval_t *);
+Uint64 erts_smp_step_interval_nob(erts_interval_t *);
+Uint64 erts_smp_step_interval_relb(erts_interval_t *);
+Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
+Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
+Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
+Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
+#ifdef ARCH_32
+ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *);
+#endif
+ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ARCH_32
+
+ERTS_GLB_INLINE Uint64
+erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (Uint64) dw->dw_sint;
+#else
+ Uint64 res;
+ res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+#endif
+}
+
+#endif
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_nob__(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ return (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
+#else
+ erts_dw_aint_t dw;
+ erts_dw_atomic_read_nob(&icp->counter.atomic, &dw);
+ return erts_interval_dw_aint_to_val__(&dw);
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_acqb__(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
+#else
+ erts_dw_aint_t dw;
+ erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw);
+ return erts_interval_dw_aint_to_val__(&dw);
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_nob(erts_interval_t *icp)
+{
+ ASSERT(!icp->smp_api);
+ return erts_current_interval_nob__(icp);
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_acqb(erts_interval_t *icp)
+{
+ ASSERT(!icp->smp_api);
+ return erts_current_interval_acqb__(icp);
+}
+
+ERTS_GLB_INLINE Uint64
+erts_smp_current_interval_nob(erts_interval_t *icp)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return erts_current_interval_nob__(icp);
+#else
+ return icp->counter.not_atomic;
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_smp_current_interval_acqb(erts_interval_t *icp)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return erts_current_interval_acqb__(icp);
+#else
+ return icp->counter.not_atomic;
+#endif
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/*
+ * To be used to silence unused result warnings, but do not abuse it.
+ */
+void erts_silence_warn_unused_result(long unused);
+
+
+int erts_fit_in_bits_int64(Sint64);
+int erts_fit_in_bits_int32(Sint32);
+int list_length(Eterm);
+int erts_is_builtin(Eterm, Eterm, int);
+Uint32 make_broken_hash(Eterm);
+Uint32 block_hash(byte *, unsigned, Uint32);
+Uint32 make_hash2(Eterm);
+Uint32 make_hash(Eterm);
+
+
+Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
+Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
+Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
+Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
+Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
+Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
+Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
+Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
+#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
+Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
+Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm terms1[], Uint terms2[]);
+Eterm
+erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], Uint uints[]);
+Eterm
+erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
+ Eterm atoms[], Uint uints1[], Uint uints2[]);
+
+void erts_init_utils(void);
+void erts_init_utils_mem(void);
+
+erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint);
+void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *);
+
+#if HALFWORD_HEAP
+int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base);
+# define eq(A,B) eq_rel(A,NULL,B,NULL)
+#else
+int eq(Eterm, Eterm);
+# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B)
+#endif
+
+#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
+
+#if HALFWORD_HEAP
+Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*);
+#define CMP(A,B) cmp_rel(A,NULL,B,NULL)
+#else
+Sint cmp(Eterm, Eterm);
+#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B)
+#define CMP(A,B) cmp(A,B)
+#endif
+#define cmp_lt(a,b) (CMP((a),(b)) < 0)
+#define cmp_le(a,b) (CMP((a),(b)) <= 0)
+#define cmp_eq(a,b) (CMP((a),(b)) == 0)
+#define cmp_ne(a,b) (CMP((a),(b)) != 0)
+#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
+#define cmp_gt(a,b) (CMP((a),(b)) > 0)
+
+#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
+#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
+#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
+#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
+
+#endif
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 4c0d3421c8..298241618f 100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -39,38 +39,8 @@
#include "erl_sys_driver.h"
#include "erl_debug.h"
#include "error.h"
-
-typedef struct port Port;
-#include "erl_port_task.h"
-
-typedef struct erts_driver_t_ erts_driver_t;
-
-#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
-
-typedef struct {
- ErlDrvSizeT size; /* total size in bytes */
-
- SysIOVec* v_start;
- SysIOVec* v_end;
- SysIOVec* v_head;
- SysIOVec* v_tail;
- SysIOVec v_small[SMALL_IO_QUEUE];
-
- ErlDrvBinary** b_start;
- ErlDrvBinary** b_end;
- ErlDrvBinary** b_head;
- ErlDrvBinary** b_tail;
- ErlDrvBinary* b_small[SMALL_IO_QUEUE];
-} ErlIOQueue;
-
-typedef struct line_buf { /* Buffer used in line oriented I/O */
- ErlDrvSizeT bufsiz; /* Size of character buffer */
- ErlDrvSizeT ovlen; /* Length of overflow data */
- ErlDrvSizeT ovsiz; /* Actual size of overflow buffer */
- char data[1]; /* Starting point of buffer data,
- data[0] is a flag indicating an unprocess CR,
- The rest is the overflow buffer. */
-} LineBuf;
+#include "erl_utils.h"
+#include "erl_port.h"
struct enif_environment_t /* ErlNifEnv */
{
@@ -90,162 +60,6 @@ extern void erts_print_nif_taints(int to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
extern void erl_nif_init(void);
-/*
- * Port Specific Data.
- *
- * Only use PrtSD for very rarely used data.
- */
-
-#define ERTS_PRTSD_SCHED_ID 0
-
-#define ERTS_PRTSD_SIZE 1
-
-typedef struct {
- void *data[ERTS_PRTSD_SIZE];
-} ErtsPrtSD;
-
-#ifdef ERTS_SMP
-typedef struct ErtsXPortsList_ ErtsXPortsList;
-#endif
-
-/*
- * Port locking:
- *
- * Locking is done either driver specific or port specific. When
- * driver specific locking is used, all instances of the driver,
- * i.e. ports running the driver, share the same lock. When port
- * specific locking is used each instance have its own lock.
- *
- * Most fields in the Port structure are protected by the lock
- * referred to by the lock field. I'v called it the port lock.
- * This lock is shared between all ports running the same driver
- * when driver specific locking is used.
- *
- * The 'sched' field is protected by the port tasks lock
- * (see erl_port_tasks.c)
- *
- * The 'status' field is protected by a combination of the port lock,
- * the port tasks lock, and the state_lck. It may be read if
- * the state_lck, or the port lock is held. It may only be
- * modified if both the port lock and the state_lck is held
- * (with one exception; see below). When changeing status from alive
- * to dead or vice versa, also the port task lock has to be held.
- * This in order to guarantee that tasks are scheduled only for
- * ports that are alive.
- *
- * The status field may be modified with only the state_lck
- * held when status is changed from dead to alive. This since no
- * threads can have any references to the port other than via the
- * port table.
- *
- * /rickard
- */
-
-struct port {
- ErtsPortTaskSched sched;
- ErtsPortTaskHandle timeout_task;
- erts_smp_atomic_t refc;
-#ifdef ERTS_SMP
- erts_smp_mtx_t *lock;
- ErtsXPortsList *xports;
- erts_smp_atomic_t run_queue;
- erts_smp_spinlock_t state_lck; /* protects: id, status, snapshot */
-#endif
- Eterm id; /* The Port id of this port */
- Eterm connected; /* A connected process */
- Eterm caller; /* Current caller. */
- Eterm data; /* Data associated with port. */
- ErlHeapFragment* bp; /* Heap fragment holding data (NULL if imm data). */
- ErtsLink *nlinks;
- ErtsMonitor *monitors; /* Only MON_ORIGIN monitors of pid's */
- Uint bytes_in; /* Number of bytes read */
- Uint bytes_out; /* Number of bytes written */
-#ifdef ERTS_SMP
- ErtsSmpPTimer *ptimer;
-#else
- ErlTimer tm; /* Timer entry */
-#endif
-
- Eterm tracer_proc; /* If the port is traced, this is the tracer */
- Uint trace_flags; /* Trace flags */
-
- ErlIOQueue ioq; /* driver accessible i/o queue */
- DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
- char *name; /* String used in the open */
- erts_driver_t* drv_ptr;
- UWord drv_data;
- SWord os_pid; /* Child process ID */
- ErtsProcList *suspended; /* List of suspended processes. */
- LineBuf *linebuf; /* Buffer to hold data not ready for
- process to get (line oriented I/O)*/
- Uint32 status; /* Status and type flags */
- int control_flags; /* Flags for port_control() */
- erts_aint32_t snapshot; /* Next snapshot that port should be part of */
- struct reg_proc *reg;
- ErlDrvPDL port_data_lock;
-
- ErtsPrtSD *psd; /* Port specific data */
-};
-
-
-ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE ErtsRunQueue *
-erts_port_runq(Port *prt)
-{
-#ifdef ERTS_SMP
- ErtsRunQueue *rq1, *rq2;
- rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
- if (!rq1)
- return NULL;
- while (1) {
- erts_smp_runq_lock(rq1);
- rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
- if (rq1 == rq2)
- return rq1;
- erts_smp_runq_unlock(rq1);
- rq1 = rq2;
- if (!rq1)
- return NULL;
- }
-#else
- return ERTS_RUNQ_IX(0);
-#endif
-}
-
-#endif
-
-
-ERTS_GLB_INLINE void *erts_prtsd_get(Port *p, int ix);
-ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void *
-erts_prtsd_get(Port *prt, int ix)
-{
- return prt->psd ? prt->psd->data[ix] : NULL;
-}
-
-ERTS_GLB_INLINE void *
-erts_prtsd_set(Port *prt, int ix, void *data)
-{
- if (prt->psd) {
- void *old = prt->psd->data[ix];
- prt->psd->data[ix] = data;
- return old;
- }
- else {
- prt->psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD));
- prt->psd->data[ix] = data;
- return NULL;
- }
-}
-
-#endif
-
/* Driver handle (wrapper for old plain handle) */
#define ERL_DE_OK 0
#define ERL_DE_UNLOAD 1
@@ -297,7 +111,7 @@ typedef struct {
or that wait for it to change state */
erts_refc_t refc; /* Number of ports/processes having
references to the driver */
- Uint port_count; /* Number of ports using the driver */
+ erts_smp_atomic32_t port_count; /* Number of ports using the driver */
Uint flags; /* ERL_DE_FL_KILL_PORTS */
int status; /* ERL_DE_xxx */
char *full_path; /* Full path of the driver */
@@ -349,7 +163,7 @@ struct erts_driver_t_ {
};
extern erts_driver_t *driver_list;
-extern erts_smp_mtx_t erts_driver_list_lock;
+extern erts_smp_rwmtx_t erts_driver_list_lock;
extern void erts_ddll_init(void);
extern void erts_ddll_lock_driver(DE_Handle *dh, char *name);
@@ -529,40 +343,9 @@ union erl_off_heap_ptr {
void* voidp;
};
-/* arrays that get malloced at startup */
-extern Port* erts_port;
-
-extern Uint erts_max_ports;
-extern Uint erts_port_tab_index_mask;
-extern erts_smp_atomic32_t erts_ports_snapshot;
-extern erts_smp_atomic_t erts_dead_ports_ptr;
-
-ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&prt->state_lck));
- if (prt->snapshot != erts_smp_atomic32_read_acqb(&erts_ports_snapshot)) {
- /* Dead ports are added from the end of the snapshot buffer */
- Eterm* tombstone;
- tombstone = (Eterm*) erts_smp_atomic_add_read_nob(&erts_dead_ports_ptr,
- -(erts_aint_t)sizeof(Eterm));
- ASSERT(tombstone+1 != NULL);
- ASSERT(prt->snapshot == erts_smp_atomic32_read_nob(&erts_ports_snapshot) - 1);
- *tombstone = prt->id;
- }
- /*else no ongoing snapshot or port was already included or created after snapshot */
-}
-
-#endif
-
/* controls warning mapping in error_logger */
extern Eterm node_cookie;
-extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */
-extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
extern Uint display_items; /* no of items to display in traces etc */
extern int erts_backtrace_depth;
@@ -700,54 +483,6 @@ do { \
#define WSTACK_ISEMPTY(s) (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_start))
#define WSTACK_POP(s) (*(--WSTK_CONCAT(s,_sp)))
-
-/* port status flags */
-
-#define ERTS_PORT_SFLG_CONNECTED ((Uint32) (1 << 0))
-/* Port have begun exiting */
-#define ERTS_PORT_SFLG_EXITING ((Uint32) (1 << 1))
-/* Distribution port */
-#define ERTS_PORT_SFLG_DISTRIBUTION ((Uint32) (1 << 2))
-#define ERTS_PORT_SFLG_BINARY_IO ((Uint32) (1 << 3))
-#define ERTS_PORT_SFLG_SOFT_EOF ((Uint32) (1 << 4))
-/* Flow control */
-#define ERTS_PORT_SFLG_PORT_BUSY ((Uint32) (1 << 5))
-/* Port is closing (no i/o accepted) */
-#define ERTS_PORT_SFLG_CLOSING ((Uint32) (1 << 6))
-/* Send a closed message when terminating */
-#define ERTS_PORT_SFLG_SEND_CLOSED ((Uint32) (1 << 7))
-/* Line orinted io on port */
-#define ERTS_PORT_SFLG_LINEBUF_IO ((Uint32) (1 << 8))
-/* Immortal port (only certain system ports) */
-#define ERTS_PORT_SFLG_IMMORTAL ((Uint32) (1 << 9))
-#define ERTS_PORT_SFLG_FREE ((Uint32) (1 << 10))
-#define ERTS_PORT_SFLG_FREE_SCHEDULED ((Uint32) (1 << 11))
-#define ERTS_PORT_SFLG_INITIALIZING ((Uint32) (1 << 12))
-/* Port uses port specific locking (opposed to driver specific locking) */
-#define ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK ((Uint32) (1 << 13))
-#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 14))
-/* Last port to terminate halts the emulator */
-#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 15))
-#ifdef DEBUG
-/* Only debug: make sure all flags aren't cleared unintentionally */
-#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
-#endif
-
-/* Combinations of port status flags */
-#define ERTS_PORT_SFLGS_DEAD \
- (ERTS_PORT_SFLG_FREE \
- | ERTS_PORT_SFLG_FREE_SCHEDULED \
- | ERTS_PORT_SFLG_INITIALIZING)
-#define ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
- (ERTS_PORT_SFLGS_DEAD | ERTS_PORT_SFLG_INVALID)
-#define ERTS_PORT_SFLGS_INVALID_LOOKUP \
- (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
- | ERTS_PORT_SFLG_CLOSING)
-#define ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP \
- (ERTS_PORT_SFLGS_INVALID_LOOKUP \
- | ERTS_PORT_SFLG_PORT_BUSY \
- | ERTS_PORT_SFLG_DISTRIBUTION)
-
/* binary.c */
void erts_emasculate_writable_binary(ProcBin* pb);
@@ -758,11 +493,35 @@ Eterm erts_realloc_binary(Eterm bin, size_t size);
/* erl_bif_info.c */
+Eterm
+erts_bld_port_info(Eterm **hpp,
+ ErlOffHeap *ohp,
+ Uint *szp,
+ Port *prt,
+ Eterm item);
+
void erts_bif_info_init(void);
/* bif.c */
Eterm erts_make_ref(Process *);
Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
+void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ Eterm *hp = HAlloc(c_p, REF_THING_SIZE);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+#endif
+
void erts_queue_monitor_message(Process *,
ErtsProcLocks*,
Eterm,
@@ -778,13 +537,6 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg);
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
-/* erl_bif_port.c */
-
-/* erl_bif_trace.c */
-Eterm erl_seq_trace_info(Process *p, Eterm arg1);
-void erts_system_monitor_clear(Process *c_p);
-void erts_system_profile_clear(Process *c_p);
-
/* beam_load.c */
typedef struct {
BeamInstr* current; /* Pointer to: Mod, Name, Arity */
@@ -960,11 +712,6 @@ void erts_free_heap_frags(Process* p);
/* io.c */
-struct erl_drv_port_data_lock {
- erts_mtx_t mtx;
- erts_atomic_t refc;
-};
-
typedef struct {
char *name;
char *driver_name;
@@ -973,477 +720,33 @@ typedef struct {
#define ERTS_SPAWN_DRIVER 1
#define ERTS_SPAWN_EXECUTABLE 2
#define ERTS_SPAWN_ANY (ERTS_SPAWN_DRIVER | ERTS_SPAWN_EXECUTABLE)
-
int erts_add_driver_entry(ErlDrvEntry *drv, DE_Handle *handle, int driver_list_locked);
void erts_destroy_driver(erts_driver_t *drv);
-void erts_wake_process_later(Port*, Process*);
-int erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *);
-int erts_is_port_ioq_empty(Port *);
-void erts_terminate_port(Port *);
-void close_port(Eterm);
-void init_io(void);
-void cleanup_io(void);
-void erts_do_exit_port(Port *, Eterm, Eterm);
-void erts_port_command(Process *, Eterm, Port *, Eterm);
-Eterm erts_port_control(Process*, Port*, Uint, Eterm);
-int erts_write_to_port(Eterm caller_id, Port *p, Eterm list);
-void print_port_info(int, void *, int);
+int erts_save_suspend_process_on_port(Port*, Process*);
+Port *erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *, int *);
+void erts_init_io(int, int);
void erts_raw_port_command(Port*, byte*, Uint);
-void driver_report_exit(int, int);
+void driver_report_exit(ErlDrvPort, int);
LineBuf* allocate_linebuf(int);
int async_ready(Port *, void*);
-Sint erts_test_next_port(int, Uint);
ErtsPortNames *erts_get_port_names(Eterm);
void erts_free_port_names(ErtsPortNames *);
Uint erts_port_ioq_size(Port *pp);
void erts_stale_drv_select(Eterm, ErlDrvEvent, int, int);
-void erts_port_cleanup(Port *);
-void erts_fire_port_monitor(Port *prt, Eterm ref);
Port *erts_get_heart_port(void);
-#ifdef ERTS_SMP
-void erts_smp_xports_unlock(Port *);
-#endif
-
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_enable_io_lock_count(int enable);
#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-int erts_lc_is_port_locked(Port *);
-#endif
-
-ERTS_GLB_INLINE void erts_smp_port_state_lock(Port*);
-ERTS_GLB_INLINE void erts_smp_port_state_unlock(Port*);
-
-ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt);
-ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt);
-ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void
-erts_smp_port_state_lock(Port* prt)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_lock(&prt->state_lck);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_port_state_unlock(Port *prt)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_unlock(&prt->state_lck);
-#endif
-}
-
-
-ERTS_GLB_INLINE int
-erts_smp_port_trylock(Port *prt)
-{
- int res;
-
- ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0);
- erts_smp_atomic_inc_nob(&prt->refc);
-
-#ifdef ERTS_SMP
- res = erts_smp_mtx_trylock(prt->lock);
- if (res == EBUSY) {
- erts_smp_atomic_dec_nob(&prt->refc);
- }
-#else
- res = 0;
-#endif
-
- return res;
-}
-
-ERTS_GLB_INLINE void
-erts_smp_port_lock(Port *prt)
-{
- ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0);
- erts_smp_atomic_inc_nob(&prt->refc);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(prt->lock);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_port_unlock(Port *prt)
-{
- erts_aint_t refc;
-#ifdef ERTS_SMP
- erts_smp_mtx_unlock(prt->lock);
-#endif
- refc = erts_smp_atomic_dec_read_nob(&prt->refc);
- ASSERT(refc >= 0);
- if (refc == 0)
- erts_port_cleanup(prt);
-}
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-
-#define ERTS_INVALID_PORT_OPT(PP, ID, FLGS) \
- (!(PP) || ((PP)->status & (FLGS)) || (PP)->id != (ID))
-
-/* port lookup */
-
-#define INVALID_PORT(PP, ID) \
- ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_LOOKUP)
-
-/* Invalidate trace port if anything suspicious, for instance
- * that the port is a distribution port or it is busy.
- */
-#define INVALID_TRACER_PORT(PP, ID) \
- ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)
-
-#define ERTS_PORT_SCHED_ID(P, ID) \
- ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID)))
-
-#ifdef ERTS_SMP
-Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
-#endif
-
-#define erts_id2port(ID, P, PL) \
- erts_id2port_sflgs((ID), (P), (PL), ERTS_PORT_SFLGS_INVALID_LOOKUP)
-
-ERTS_GLB_INLINE Port*erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
-ERTS_GLB_INLINE void erts_port_release(Port *);
-ERTS_GLB_INLINE Port*erts_drvport2port(ErlDrvPort);
-ERTS_GLB_INLINE Port*erts_drvportid2port(Eterm);
-ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm id);
-ERTS_GLB_INLINE int erts_is_port_alive(Eterm id);
-ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm id);
-ERTS_GLB_INLINE void erts_port_status_bandor_set(Port *, Uint32, Uint32);
-ERTS_GLB_INLINE void erts_port_status_band_set(Port *, Uint32);
-ERTS_GLB_INLINE void erts_port_status_bor_set(Port *, Uint32);
-ERTS_GLB_INLINE void erts_port_status_set(Port *, Uint32);
-ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Port*
-erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 sflgs)
-{
-#ifdef ERTS_SMP
- int no_proc_locks = !c_p || !c_p_locks;
-#endif
- Port *prt;
-
- if (is_not_internal_port(id))
- return NULL;
-
- prt = &erts_port[internal_port_index(id)];
-
- erts_smp_port_state_lock(prt);
- if (ERTS_INVALID_PORT_OPT(prt, id, sflgs)) {
- erts_smp_port_state_unlock(prt);
- prt = NULL;
- }
- else {
- erts_smp_atomic_inc_nob(&prt->refc);
- erts_smp_port_state_unlock(prt);
-
-#ifdef ERTS_SMP
- if (no_proc_locks)
- erts_smp_mtx_lock(prt->lock);
- else if (erts_smp_mtx_trylock(prt->lock) == EBUSY) {
- /* Unlock process locks, and acquire locks in lock order... */
- erts_smp_proc_unlock(c_p, c_p_locks);
- erts_smp_mtx_lock(prt->lock);
- erts_smp_proc_lock(c_p, c_p_locks);
- }
-
- /* The id may not have changed... */
- ERTS_SMP_LC_ASSERT(prt->id == id);
- /* ... but status may have... */
- if (prt->status & sflgs) {
- erts_smp_port_unlock(prt); /* Also decrements refc... */
- prt = NULL;
- }
-#endif
-
- }
-
- return prt;
-}
-
-ERTS_GLB_INLINE void
-erts_port_release(Port *prt)
-{
- erts_smp_port_unlock(prt);
-}
-
-ERTS_GLB_INLINE Port*
-erts_drvport2port(ErlDrvPort drvport)
-{
- int ix = (int) drvport;
- if (ix < 0 || erts_max_ports <= ix)
- return NULL;
- if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- return NULL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
- return &erts_port[ix];
-}
-
-ERTS_GLB_INLINE Port*
-erts_drvportid2port(Eterm id)
-{
- int ix;
- if (is_not_internal_port(id))
- return NULL;
- ix = (int) internal_port_index(id);
- if (erts_max_ports <= ix)
- return NULL;
- if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- return NULL;
- if (erts_port[ix].id != id)
- return NULL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
- return &erts_port[ix];
-}
-
-ERTS_GLB_INLINE Uint32
-erts_portid2status(Eterm id)
-{
- if (is_not_internal_port(id))
- return ERTS_PORT_SFLG_INVALID;
- else {
- Uint32 status;
- int ix = internal_port_index(id);
- if (erts_max_ports <= ix)
- return ERTS_PORT_SFLG_INVALID;
- erts_smp_port_state_lock(&erts_port[ix]);
- if (erts_port[ix].id == id)
- status = erts_port[ix].status;
- else
- status = ERTS_PORT_SFLG_INVALID;
- erts_smp_port_state_unlock(&erts_port[ix]);
- return status;
- }
-}
-
-ERTS_GLB_INLINE int
-erts_is_port_alive(Eterm id)
-{
- return !(erts_portid2status(id) & (ERTS_PORT_SFLG_INVALID
- | ERTS_PORT_SFLGS_DEAD));
-}
-
-ERTS_GLB_INLINE int
-erts_is_valid_tracer_port(Eterm id)
-{
- return !(erts_portid2status(id) & ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
-}
-
-ERTS_GLB_INLINE void erts_port_status_bandor_set(Port *prt,
- Uint32 band_status,
- Uint32 bor_status)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_lock(prt);
- prt->status &= band_status;
- prt->status |= bor_status;
- erts_smp_port_state_unlock(prt);
-}
-
-ERTS_GLB_INLINE void erts_port_status_band_set(Port *prt, Uint32 status)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_lock(prt);
- prt->status &= status;
- erts_smp_port_state_unlock(prt);
-}
-
-ERTS_GLB_INLINE void erts_port_status_bor_set(Port *prt, Uint32 status)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_lock(prt);
- prt->status |= status;
- erts_smp_port_state_unlock(prt);
-}
-
-ERTS_GLB_INLINE void erts_port_status_set(Port *prt, Uint32 status)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_lock(prt);
- prt->status = status;
- erts_smp_port_state_unlock(prt);
-}
-
-ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *prt)
-{
- Uint32 res;
- erts_smp_port_state_lock(prt);
- res = prt->status;
- erts_smp_port_state_unlock(prt);
- return res;
-}
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
/* erl_drv_thread.c */
void erl_drv_thr_init(void);
-/* time.c */
-
/* utils.c */
-
-typedef struct {
-#ifdef DEBUG
- int smp_api;
-#endif
- union {
- Uint64 not_atomic;
-#ifdef ARCH_64
- erts_atomic_t atomic;
-#else
- erts_dw_atomic_t atomic;
-#endif
- } counter;
-} erts_interval_t;
-
-void erts_interval_init(erts_interval_t *);
-void erts_smp_interval_init(erts_interval_t *);
-Uint64 erts_step_interval_nob(erts_interval_t *);
-Uint64 erts_step_interval_relb(erts_interval_t *);
-Uint64 erts_smp_step_interval_nob(erts_interval_t *);
-Uint64 erts_smp_step_interval_relb(erts_interval_t *);
-Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
-Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
-Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-#ifdef ARCH_32
-ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *);
-#endif
-ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-#ifdef ARCH_32
-
-ERTS_GLB_INLINE Uint64
-erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-#endif
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_nob__(erts_interval_t *icp)
-{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_nob(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_acqb__(erts_interval_t *icp)
-{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_nob(erts_interval_t *icp)
-{
- ASSERT(!icp->smp_api);
- return erts_current_interval_nob__(icp);
-}
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_acqb(erts_interval_t *icp)
-{
- ASSERT(!icp->smp_api);
- return erts_current_interval_acqb__(icp);
-}
-
-ERTS_GLB_INLINE Uint64
-erts_smp_current_interval_nob(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return erts_current_interval_nob__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_smp_current_interval_acqb(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return erts_current_interval_acqb__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
-}
-
-#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-/*
- * To be used to silence unused result warnings, but do not abuse it.
- */
-void erts_silence_warn_unused_result(long unused);
-
void erts_cleanup_offheap(ErlOffHeap *offheap);
-int erts_fit_in_bits_int64(Sint64);
-int erts_fit_in_bits_int32(Sint32);
-int list_length(Eterm);
Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex);
-int erts_is_builtin(Eterm, Eterm, int);
-Uint32 make_broken_hash(Eterm);
-Uint32 block_hash(byte *, unsigned, Uint32);
-Uint32 make_hash2(Eterm);
-Uint32 make_hash(Eterm);
-
-
-Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
-Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
-Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
-Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
-Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
-Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
-Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
-Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
-Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
-#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
-Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
-Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm terms1[], Uint terms2[]);
-Eterm
-erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm atoms[], Uint uints[]);
-Eterm
-erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
- Eterm atoms[], Uint uints1[], Uint uints2[]);
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
@@ -1458,42 +761,6 @@ Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
(ASSERT_EXPR(is_node_container((NC))), \
IS_CONST((NC)) ? (NC) : store_external_or_ref_in_proc_((Pp), (NC)))
-void erts_init_utils(void);
-void erts_init_utils_mem(void);
-
-erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint);
-void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *);
-
-#if HALFWORD_HEAP
-int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base);
-# define eq(A,B) eq_rel(A,NULL,B,NULL)
-#else
-int eq(Eterm, Eterm);
-# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B)
-#endif
-
-#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
-
-#if HALFWORD_HEAP
-Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*);
-#define CMP(A,B) cmp_rel(A,NULL,B,NULL)
-#else
-Sint cmp(Eterm, Eterm);
-#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B)
-#define CMP(A,B) cmp(A,B)
-#endif
-#define cmp_lt(a,b) (CMP((a),(b)) < 0)
-#define cmp_le(a,b) (CMP((a),(b)) <= 0)
-#define cmp_eq(a,b) (CMP((a),(b)) == 0)
-#define cmp_ne(a,b) (CMP((a),(b)) != 0)
-#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
-#define cmp_gt(a,b) (CMP((a),(b)) > 0)
-
-#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
-#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
-#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
-#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
-
/* duplicates from big.h */
int term_to_Uint(Eterm term, Uint *up);
int term_to_UWord(Eterm, UWord*);
@@ -1530,79 +797,6 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes);
#define ERTS_UTF8_ERROR 2
#define ERTS_UTF8_ANALYZE_MORE 3
-/* erl_trace.c */
-void erts_init_trace(void);
-void erts_trace_check_exiting(Eterm exiting);
-Eterm erts_set_system_seq_tracer(Process *c_p,
- ErtsProcLocks c_p_locks,
- Eterm new);
-Eterm erts_get_system_seq_tracer(void);
-void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
-void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
-void erts_set_system_monitor(Eterm monitor);
-Eterm erts_get_system_monitor(void);
-
-#ifdef ERTS_SMP
-void erts_check_my_tracer_proc(Process *);
-void erts_block_sys_msg_dispatcher(void);
-void erts_release_sys_msg_dispatcher(void);
-void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
- Eterm,
- Eterm,
- ErlHeapFragment *));
-void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
-#endif
-
-void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
-void trace_send(Process*, Eterm, Eterm);
-void trace_receive(Process*, Eterm);
-Uint32 erts_call_trace(Process *p, BeamInstr mfa[], Binary *match_spec, Eterm* args,
- int local, Eterm *tracer_pid);
-void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid);
-void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
- Eterm *tracer);
-void erts_trace_return_to(Process *p, BeamInstr *pc);
-void trace_sched(Process*, Eterm);
-void trace_proc(Process*, Process*, Eterm, Eterm);
-void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
-void save_calls(Process *p, Export *);
-void trace_gc(Process *p, Eterm what);
-/* port tracing */
-void trace_virtual_sched(Process*, Eterm);
-void trace_sched_ports(Port *pp, Eterm);
-void trace_sched_ports_where(Port *pp, Eterm, Eterm);
-void trace_port(Port *, Eterm what, Eterm data);
-void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name);
-
-/* system_profile */
-void erts_set_system_profile(Eterm profile);
-Eterm erts_get_system_profile(void);
-void profile_scheduler(Eterm scheduler_id, Eterm);
-void profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us);
-void profile_runnable_proc(Process* p, Eterm status);
-void profile_runnable_port(Port* p, Eterm status);
-void erts_system_profile_setup_active_schedulers(void);
-
-/* system_monitor */
-void monitor_long_gc(Process *p, Uint time);
-void monitor_large_heap(Process *p);
-void monitor_generic(Process *p, Eterm type, Eterm spec);
-Uint erts_trace_flag2bit(Eterm flag);
-int erts_trace_flags(Eterm List,
- Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
-Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
-
-#ifdef ERTS_SMP
-void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \
-do { \
- if ((ESDP)->pending_trace_msgs) \
- erts_send_pending_trace_msgs((ESDP)); \
-} while (0)
-#else
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
-#endif
-
void bin_write(int, void*, byte*, size_t);
int intlist_to_buf(Eterm, char*, int); /* most callers pass plain char*'s */
@@ -1620,9 +814,16 @@ char* Sint_to_buf(Sint, struct Sint_buf*);
#define ERTS_IOLIST_TYPE 2
Eterm buf_to_intlist(Eterm**, char*, size_t, Eterm); /* most callers pass plain char*'s */
-int io_list_to_buf(Eterm, char*, int);
-int io_list_to_buf2(Eterm, char*, int);
-int erts_iolist_size(Eterm, Uint *);
+
+#define ERTS_IOLIST_TO_BUF_OVERFLOW (~((ErlDrvSizeT) 0))
+#define ERTS_IOLIST_TO_BUF_TYPE_ERROR (~((ErlDrvSizeT) 1))
+#define ERTS_IOLIST_TO_BUF_FAILED(R) \
+ (((R) & (~((ErlDrvSizeT) 1))) == (~((ErlDrvSizeT) 1)))
+#define ERTS_IOLIST_TO_BUF_SUCCEEDED(R) \
+ (!ERTS_IOLIST_TO_BUF_FAILED((R)))
+
+ErlDrvSizeT erts_iolist_to_buf(Eterm, char*, ErlDrvSizeT);
+int erts_iolist_size(Eterm, ErlDrvSizeT *);
int is_string(Eterm);
void erl_at_exit(void (*) (void*), void*);
Eterm collect_memory(Process *);
@@ -1667,41 +868,6 @@ Uint erts_current_reductions(Process* current, Process *p);
int erts_print_system_version(int to, void *arg, Process *c_p);
int erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg);
-#define seq_trace_output(token, msg, type, receiver, process) \
-seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
-#define seq_trace_output_exit(token, msg, type, receiver, exitfrom) \
-seq_trace_output_generic((token), (msg), (type), (receiver), NULL, (exitfrom))
-void seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
- Eterm receiver, Process *process, Eterm exitfrom);
-
-int seq_trace_update_send(Process *process);
-
-Eterm erts_seq_trace(Process *process,
- Eterm atom_type, Eterm atom_true_or_false,
- int build_result);
-
-struct trace_pattern_flags {
- unsigned int breakpoint : 1; /* Set if any other is set */
- unsigned int local : 1; /* Local call trace breakpoint */
- unsigned int meta : 1; /* Metadata trace breakpoint */
- unsigned int call_count : 1; /* Fast call count breakpoint */
- unsigned int call_time : 1; /* Fast call time breakpoint */
-};
-extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
-extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
- Binary* match_prog_set, Binary *meta_match_prog_set,
- int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid, int is_blocking);
-void
-erts_get_default_trace_pattern(int *trace_pattern_is_on,
- Binary **match_spec,
- Binary **meta_match_spec,
- struct trace_pattern_flags *trace_pattern_flags,
- Eterm *meta_tracer_pid);
-int erts_is_default_trace_enabled(void);
-void erts_bif_trace_init(void);
-int erts_finish_breakpointing(void);
/*
** Call_trace uses this API for the parameter matching functions
@@ -1943,15 +1109,15 @@ dtrace_pid_str(Eterm pid, char *process_buf)
ERTS_GLB_INLINE void
dtrace_proc_str(Process *process, char *process_buf)
{
- dtrace_pid_str(process->id, process_buf);
+ dtrace_pid_str(process->common.id, process_buf);
}
ERTS_GLB_INLINE void
dtrace_port_str(Port *port, char *port_buf)
{
erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
- port_channel_no(port->id),
- port_number(port->id));
+ port_channel_no(port->common.id),
+ port_number(port->common.id));
}
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 60b9238d38..be094862d4 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -43,6 +43,8 @@
#include "erl_version.h"
#include "error.h"
#include "erl_async.h"
+#define ERTS_WANT_EXTERNAL_TAGS
+#include "external.h"
#include "dtrace-wrapper.h"
extern ErlDrvEntry fd_driver_entry;
@@ -51,25 +53,29 @@ extern ErlDrvEntry spawn_driver_entry;
extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */
erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */
-erts_smp_mtx_t erts_driver_list_lock; /* Mutex for driver list */
+erts_smp_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling
driver init */
static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a
per thread basis (for BC interfaces) */
-Port* erts_port; /* The port table */
+ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */
erts_smp_atomic_t erts_bytes_out; /* No bytes sent out of the system */
erts_smp_atomic_t erts_bytes_in; /* No bytes gotten into the system */
-Uint erts_max_ports;
-Uint erts_port_tab_index_mask;
-
const ErlDrvTermData driver_term_nil = (ErlDrvTermData)NIL;
+const Port erts_invalid_port = {{ERTS_INVALID_PORT}};
+
erts_driver_t vanilla_driver;
erts_driver_t spawn_driver;
erts_driver_t fd_driver;
+int erts_port_synchronous_ops = 0;
+int erts_port_schedule_all_ops = 0;
+int erts_port_parallelism = 0;
+
+static void deliver_result(Eterm sender, Eterm pid, Eterm res);
static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *);
static void terminate_port(Port *p);
static void pdl_init(void);
@@ -89,36 +95,12 @@ static void driver_monitor_unlock_pdl(Port *p);
static ERTS_INLINE ErlIOQueue*
drvport2ioq(ErlDrvPort drvport)
{
- int ix = (int) drvport;
- Uint32 status;
-
- if (ix < 0 || erts_max_ports <= ix)
+ Port *prt = erts_thr_drvport2port_raw(drvport);
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
return NULL;
-
- if (erts_get_scheduler_data()) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
- ERTS_LC_ASSERT(!erts_port[ix].port_data_lock
- || erts_lc_mtx_is_locked(
- &erts_port[ix].port_data_lock->mtx));
-
- status = erts_port[ix].status;
- }
- else {
- erts_smp_port_state_lock(&erts_port[ix]);
- status = erts_port[ix].status;
- erts_smp_port_state_unlock(&erts_port[ix]);
-
- ERTS_LC_ASSERT((status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- || erts_port[ix].port_data_lock);
- ERTS_LC_ASSERT(!erts_port[ix].port_data_lock
- || erts_lc_mtx_is_locked(
- &erts_port[ix].port_data_lock->mtx));
-
- }
-
- return ((status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- ? NULL
- : &erts_port[ix].ioq);
+ else
+ return &prt->ioq;
}
static ERTS_INLINE int
@@ -196,27 +178,13 @@ typedef struct line_buf_context {
dtrace_port_str((PORT), port_str);
#endif
-/* The 'number' field in a port now has two parts: the lowest bits
- contain the index in the port table, and the higher bits are a counter
- which is incremented each time we look for a free port and start from
- the beginning of the table. erts_max_ports is the number of file descriptors,
- rounded up to a power of 2.
- To get the index from a port, use the macro 'internal_port_index';
- 'port_number' returns the whole number field.
-*/
-
-static erts_smp_spinlock_t get_free_port_lck;
-static Uint last_port_num;
-static Uint port_num_mask;
-erts_smp_atomic32_t erts_ports_snapshot; /* Identifies the _next_ snapshot (not the ongoing) */
-
-
static ERTS_INLINE void
kill_port(Port *pp)
{
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */
erts_port_task_free_port(pp);
- ASSERT(pp->status & ERTS_PORT_SFLGS_DEAD);
+ /* In non-smp case the port structure may have been deallocated now */
}
#ifdef ERTS_SMP
@@ -227,146 +195,280 @@ erts_lc_is_port_locked(Port *prt)
{
if (!prt)
return 0;
+ ERTS_SMP_LC_ASSERT(prt->lock);
return erts_smp_lc_mtx_is_locked(prt->lock);
}
#endif
#endif /* #ifdef ERTS_SMP */
-static int
-get_free_port(void)
-{
- Uint num;
- Uint tries = erts_max_ports;
- Port* port;
+static void initq(Port* prt);
- erts_smp_spin_lock(&get_free_port_lck);
- num = last_port_num + 1;
- for (;; ++num) {
- port = &erts_port[num & erts_port_tab_index_mask];
+#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
+#define ERTS_PORT_INIT_INSTR_NEED_ID 1
+#else
+#define ERTS_PORT_INIT_INSTR_NEED_ID 0
+#endif
- erts_smp_port_state_lock(port);
- if (port->status & ERTS_PORT_SFLG_FREE) {
- last_port_num = num;
- erts_smp_spin_unlock(&get_free_port_lck);
- break;
- }
- erts_smp_port_state_unlock(port);
+static ERTS_INLINE void port_init_instr(Port *prt
+#if ERTS_PORT_INIT_INSTR_NEED_ID
+ , Eterm id
+#endif
+ )
+{
+#if !ERTS_PORT_INIT_INSTR_NEED_ID
+ Eterm id = NIL; /* Not used */
+#endif
- if (--tries == 0) {
- erts_smp_spin_unlock(&get_free_port_lck);
- return -1;
- }
+ /*
+ * Stuff that need to be initialized with the port id
+ * in the instrumented case, but not in the normal case.
+ */
+#ifdef ERTS_SMP
+ ASSERT(prt->drv_ptr && prt->lock);
+ if (!prt->drv_ptr->lock) {
+ char *lock_str = "port_lock";
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
+ lock_str = NULL;
+#endif
+ erts_mtx_init_locked_x(prt->lock, lock_str, id);
}
- port->status = ERTS_PORT_SFLG_INITIALIZING;
- ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 0);
- erts_smp_atomic_set_nob(&port->refc, 2); /* Port alive + lock */
- erts_smp_port_state_unlock(port);
- return num & port_num_mask;
+#endif
+ erts_port_task_init_sched(&prt->sched, id);
}
-/*
- * erts_test_next_port() is only used for testing.
- */
-Sint
-erts_test_next_port(int set, Uint next)
+#if !ERTS_PORT_INIT_INSTR_NEED_ID
+static ERTS_INLINE void port_init_instr_abort(Port *prt)
{
- Uint i, num;
- Sint res = -1;
-
- erts_smp_spin_lock(&get_free_port_lck);
- if (set) {
- last_port_num = (next - 1) & port_num_mask;
+#ifdef ERTS_SMP
+ ASSERT(prt->drv_ptr && prt->lock);
+ if (!prt->drv_ptr->lock) {
+ erts_mtx_unlock(prt->lock);
+ erts_mtx_destroy(prt->lock);
}
- num = last_port_num + 1;
+#endif
+ erts_port_task_fini_sched(&prt->sched);
+}
+#endif
- for (i=0; i < erts_max_ports && res<0; ++i, ++num) {
-
- Port* port = &erts_port[num & erts_port_tab_index_mask];
+static void insert_port_struct(void *vprt, Eterm data)
+{
+ Port *prt = (Port *) vprt;
+ Eterm id = make_internal_port(data);
+#if ERTS_PORT_INIT_INSTR_NEED_ID
+ /*
+ * This cannot be done earlier in the instrumented
+ * case since we don't now 'id' until now.
+ */
+ port_init_instr(prt, id);
+#endif
+ prt->common.id = id;
+ erts_atomic32_init_relb(&prt->state, ERTS_PORT_SFLG_INITIALIZING);
+}
- erts_smp_port_state_lock(port);
+#define ERTS_CREATE_PORT_FLAG_PARALLELISM (1 << 0)
- if (port->status & ERTS_PORT_SFLG_FREE) {
- last_port_num = num - 1;
- res = num & port_num_mask;
- }
- erts_smp_port_state_unlock(port);
+static Port *create_port(char *name,
+ erts_driver_t *driver,
+ erts_mtx_t *driver_lock,
+ int create_flags,
+ Eterm pid,
+ int *enop)
+{
+ ErtsPortTaskBusyPortQ *busy_port_queue;
+ Port *prt;
+ char *p;
+ size_t port_size, busy_port_queue_size, size;
+ erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED;
+ erts_aint32_t x_pts_flgs = 0;
+#ifdef DEBUG
+ /* Make sure the debug flags survives until port is freed */
+ state |= ERTS_PORT_SFLG_PORT_DEBUG;
+#endif
+
+#ifdef ERTS_SMP
+ if (!driver_lock) {
+ /* Align size for mutex following port struct */
+ port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
+ size += sizeof(erts_mtx_t);
}
- erts_smp_spin_unlock(&get_free_port_lck);
- return res;
-}
+ else
+#endif
+ port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
+ busy_port_queue_size
+ = ((driver->flags & ERL_DRV_FLAG_NO_BUSY_MSGQ)
+ ? 0
+ : ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ)));
+ size += busy_port_queue_size;
-static void port_cleanup(Port *prt);
+ size += sys_strlen(name) + 1;
-#ifdef ERTS_SMP
+ p = erts_alloc_fnf(ERTS_ALC_T_PORT, size);
+ if (!p) {
+ if (enop)
+ *enop = ENOMEM;
+ return NULL;
+ }
-static void
-sched_port_cleanup(void *vprt)
-{
- Port *prt = (Port *) vprt;
- erts_smp_mtx_lock(prt->lock);
- port_cleanup(prt);
-}
+ prt = (Port *) p;
+ p += port_size;
-#endif
+ if (!busy_port_queue_size)
+ busy_port_queue = NULL;
+ else {
+ busy_port_queue = (ErtsPortTaskBusyPortQ *) p;
+ p += busy_port_queue_size;
+ }
-void
-erts_port_cleanup(Port *prt)
-{
#ifdef ERTS_SMP
- if (erts_smp_mtx_trylock(prt->lock) == EBUSY)
- erts_schedule_misc_op(sched_port_cleanup, (void *) prt);
- else
+ if (driver_lock) {
+ prt->lock = driver_lock;
+ erts_mtx_lock(driver_lock);
+ }
+ else {
+ prt->lock = (erts_mtx_t *) p;
+ p += sizeof(erts_mtx_t);
+ state |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
+ }
+ erts_smp_atomic_set_nob(&prt->run_queue,
+ (erts_aint_t) erts_get_runq_current(NULL));
+ prt->xports = NULL;
+#else
+ erts_atomic32_init_nob(&prt->refc, 1);
+ prt->cleanup = 0;
#endif
- port_cleanup(prt);
-}
+
+ erts_port_task_pre_init_sched(&prt->sched, busy_port_queue);
-void
-port_cleanup(Port *prt)
-{
+ prt->name = p;
+ sys_strcpy(p, name);
+ prt->drv_ptr = driver;
+ ERTS_P_LINKS(prt) = NULL;
+ ERTS_P_MONITORS(prt) = NULL;
+ prt->linebuf = NULL;
+ prt->bp = NULL;
+ prt->suspended = NULL;
+ prt->data = am_undefined;
+ prt->port_data_lock = NULL;
+ prt->control_flags = 0;
+ prt->bytes_in = 0;
+ prt->bytes_out = 0;
+ prt->dist_entry = NULL;
+ ERTS_PORT_INIT_CONNECTED(prt, pid);
+ prt->common.u.alive.reg = NULL;
#ifdef ERTS_SMP
- Uint32 port_specific;
- erts_smp_mtx_t *mtx;
+ prt->common.u.alive.ptimer = NULL;
+#else
+ sys_memset(&prt->common.u.alive.tm, 0, sizeof(ErlTimer));
#endif
- erts_driver_t *driver;
+ erts_port_task_handle_init(&prt->timeout_task);
+ prt->psd = NULL;
+ prt->drv_data = (SWord) 0;
+ prt->os_pid = -1;
- erts_smp_port_state_lock(prt);
+ /* Set default tracing */
+ erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- driver = prt->drv_ptr;
- prt->drv_ptr = NULL;
- ASSERT(driver);
+ ASSERT(((char *) prt) == ((char *) &prt->common));
- ASSERT(prt->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
- ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&prt->refc) == 0);
+#if !ERTS_PORT_INIT_INSTR_NEED_ID
+ /*
+ * When 'id' isn't needed (the normal case), it is better to
+ * do the initialization here avoiding unnecessary contention
+ * on table...
+ */
+ port_init_instr(prt);
+#endif
- ASSERT(prt->status & ERTS_PORT_SFLG_PORT_DEBUG);
- ASSERT(!(prt->status & ERTS_PORT_SFLG_FREE));
- prt->status = ERTS_PORT_SFLG_FREE;
+ if (!erts_ptab_new_element(&erts_port,
+ &prt->common,
+ (void *) prt,
+ insert_port_struct)) {
+#if !ERTS_PORT_INIT_INSTR_NEED_ID
+ port_init_instr_abort(prt);
+#endif
#ifdef ERTS_SMP
+ if (driver_lock)
+ erts_mtx_unlock(driver_lock);
+#endif
+ if (enop)
+ *enop = 0;
+ return NULL;
+ }
- port_specific = (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK);
+ ASSERT(prt == (Port *) (erts_ptab_pix2intptr_nob(
+ &erts_port,
+ internal_port_index(prt->common.id))));
- mtx = prt->lock;
- ASSERT(mtx);
+ initq(prt);
- prt->lock = NULL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_unlock(prt);
- erts_smp_mtx_unlock(mtx);
+ if (erts_port_schedule_all_ops)
+ x_pts_flgs |= ERTS_PTS_FLG_FORCE_SCHED;
- if (port_specific) {
- erts_smp_mtx_destroy(mtx);
- erts_free(ERTS_ALC_T_PORT_LOCK, mtx);
- }
-#endif
+ if (create_flags & ERTS_CREATE_PORT_FLAG_PARALLELISM)
+ x_pts_flgs |= ERTS_PTS_FLG_PARALLELISM;
- if (driver->handle)
- erts_ddll_dereference_driver(driver->handle);
+ if (x_pts_flgs)
+ erts_smp_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs);
+
+ erts_atomic32_set_relb(&prt->state, state);
+ return prt;
+}
+
+#ifndef ERTS_SMP
+void
+erts_port_cleanup(Port *prt)
+{
+ if (prt->drv_ptr && prt->drv_ptr->handle)
+ erts_ddll_dereference_driver(prt->drv_ptr->handle);
+ prt->drv_ptr = NULL;
+ erts_port_dec_refc(prt);
}
+#endif
+void
+erts_port_free(Port *prt)
+{
+#if defined(ERTS_SMP) || defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+#endif
+ ERTS_LC_ASSERT(state & (ERTS_PORT_SFLG_INITIALIZING
+ | ERTS_PORT_SFLG_FREE));
+ ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG);
+
+#ifdef ERTS_SMP
+ ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->common.refc) == 0);
+#else
+ ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->refc) == 0);
+#endif
+
+ erts_port_task_fini_sched(&prt->sched);
+
+#ifdef ERTS_SMP
+ ASSERT(prt->lock);
+ if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
+ erts_mtx_destroy(prt->lock);
+
+ /*
+ * We cannot dereference a driver using driver
+ * locking until here in smp case. Otherwise,
+ * the driver lock may still be in use by others.
+ *
+ * In the non-smp case we cannot do it here since
+ * this function may be called by non-scheduler
+ * threads. This is done in erts_port_cleanup()
+ * in the non-smp case.
+ */
+ if (prt->drv_ptr->handle)
+ erts_ddll_dereference_driver(prt->drv_ptr->handle);
+#endif
+ erts_free(ERTS_ALC_T_PORT, prt);
+}
/*
** Initialize v_start to point to the small fixed vector.
@@ -414,94 +516,21 @@ static void stopq(Port* prt)
if (prt->port_data_lock) {
driver_pdl_unlock(prt->port_data_lock);
driver_pdl_dec_refc(prt->port_data_lock);
- prt->port_data_lock = NULL;
- }
-}
-
-
-
-static void
-setup_port(Port* prt, Eterm pid, erts_driver_t *driver,
- ErlDrvData drv_data, char *name, Uint32 xstatus)
-{
- ErtsRunQueue *runq = erts_get_runq_current(NULL);
- char *new_name, *old_name;
-#ifdef DEBUG
- /* Make sure the debug flags survives until port is freed */
- xstatus |= ERTS_PORT_SFLG_PORT_DEBUG;
-#endif
- ASSERT(runq);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
-
- new_name = (char*) erts_alloc(ERTS_ALC_T_PORT_NAME, sys_strlen(name)+1);
- sys_strcpy(new_name, name);
- erts_smp_runq_lock(runq);
- erts_smp_port_state_lock(prt);
- prt->os_pid = -1;
- prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus;
- prt->snapshot = erts_smp_atomic32_read_nob(&erts_ports_snapshot);
- old_name = prt->name;
- prt->name = new_name;
-#ifdef ERTS_SMP
- erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
-#endif
- ASSERT(!prt->drv_ptr);
- prt->drv_ptr = driver;
- erts_smp_port_state_unlock(prt);
- erts_smp_runq_unlock(runq);
-#ifdef ERTS_SMP
- ASSERT(!prt->xports);
-#endif
- if (old_name) {
- erts_free(ERTS_ALC_T_PORT_NAME, (void *) old_name);
}
-
- prt->control_flags = 0;
- prt->connected = pid;
- prt->drv_data = (SWord) drv_data;
- prt->bytes_in = 0;
- prt->bytes_out = 0;
- prt->dist_entry = NULL;
- prt->reg = NULL;
-#ifdef ERTS_SMP
- prt->ptimer = NULL;
-#else
- sys_memset(&prt->tm, 0, sizeof(ErlTimer));
-#endif
- erts_port_task_handle_init(&prt->timeout_task);
- prt->suspended = NULL;
- sys_strcpy(prt->name, name);
- prt->nlinks = NULL;
- prt->monitors = NULL;
- prt->linebuf = NULL;
- prt->bp = NULL;
- prt->data = am_undefined;
- /* Set default tracing */
- erts_get_default_tracing(&(prt->trace_flags), &(prt->tracer_proc));
-
- prt->psd = NULL;
-
- initq(prt);
}
-void
-erts_wake_process_later(Port *prt, Process *process)
+int
+erts_save_suspend_process_on_port(Port *prt, Process *process)
{
- ErtsProcList** p;
- ErtsProcList* new_p;
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- if (prt->status & ERTS_PORT_SFLGS_DEAD)
- return;
-
- for (p = &(prt->suspended); *p != NULL; p = &((*p)->next))
- /* Empty loop body */;
-
- new_p = erts_proclist_create(process);
- new_p->next = NULL;
- *p = new_p;
+ int saved;
+ erts_aint32_t flags;
+ erts_port_task_sched_lock(&prt->sched);
+ flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ saved = (flags & ERTS_PTS_FLGS_BUSY) && !(flags & ERTS_PTS_FLG_EXIT);
+ if (saved)
+ erts_proclist_store_last(&prt->suspended, erts_proclist_create(process));
+ erts_port_task_sched_unlock(&prt->sched);
+ return saved;
}
/*
@@ -513,47 +542,44 @@ erts_wake_process_later(Port *prt, Process *process)
(*error_number_ptr must contain either BADARG or SYSTEM_LIMIT).
The driver start function must obey the same conventions.
*/
-int
+Port *
erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
Eterm pid, /* Current process. */
char* name, /* Driver name. */
SysDriverOpts* opts, /* Options. */
- int *error_number_ptr) /* errno in case -2 is returned */
+ int *error_type_ptr, /* error type */
+ int *error_number_ptr) /* errno in case of error type -2 */
{
- int port_num;
- int port_ix;
+
+#undef ERTS_OPEN_DRIVER_RET
+#define ERTS_OPEN_DRIVER_RET(Prt, EType, ENo) \
+ do { \
+ if (error_type_ptr) \
+ *error_type_ptr = (EType); \
+ if (error_number_ptr) \
+ *error_number_ptr = (ENo); \
+ return (Prt); \
+ } while (0)
+
ErlDrvData drv_data = 0;
- Uint32 xstatus = 0;
Port *port;
int fpe_was_unmasked;
-
- if (error_number_ptr)
- *error_number_ptr = 0;
+ int error_type, error_number;
+ int port_errno = 0;
+ erts_mtx_t *driver_lock = NULL;
+ int cprt_flgs = 0;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if ((port_num = get_free_port()) < 0) {
- if (error_number_ptr) {
- *error_number_ptr = SYSTEM_LIMIT;
- }
- return -3;
- }
-
- port_ix = port_num & erts_port_tab_index_mask;
- port = &erts_port[port_ix];
- port->id = make_internal_port(port_num);
-
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
if (!driver) {
for (driver = driver_list; driver; driver = driver->next) {
if (sys_strcmp(driver->name, name) == 0)
break;
}
if (!driver) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- if (error_number_ptr)
- *error_number_ptr = BADARG;
- return -3;
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
}
if (driver == &spawn_driver) {
@@ -597,59 +623,49 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
}
if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- if (error_number_ptr) {
- *error_number_ptr = BADARG;
- }
- /* Need to mark the port as free again */
- erts_smp_port_state_lock(port);
- port->status = ERTS_PORT_SFLG_FREE;
- ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 2);
- erts_smp_atomic_set_nob(&port->refc, 0);
- erts_smp_port_state_unlock(port);
- return -3;
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
- /*
- * We'll set up the port before calling the start function,
- * to allow message sending and setting timers in the start function.
- */
-
#ifdef ERTS_SMP
- ASSERT(!port->lock);
- port->lock = driver->lock;
- if (!port->lock) {
- port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK,
- sizeof(erts_smp_mtx_t));
- erts_smp_mtx_init_x(port->lock,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_lock" : NULL,
-#else
- "port_lock",
-#endif
- port->id);
- xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
- }
+ driver_lock = driver->lock;
#endif
if (driver->handle != NULL) {
erts_ddll_increment_port_count(driver->handle);
erts_ddll_reference_driver(driver->handle);
}
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(port->lock);
-#endif
+ /*
+ * We'll set up the port before calling the start function,
+ * to allow message sending and setting timers in the start function.
+ */
+
+ if (opts->parallelism)
+ cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM;
- setup_port(port, pid, driver, drv_data, name, xstatus);
+ port = create_port(name, driver, driver_lock, cprt_flgs, pid, &port_errno);
+ if (!port) {
+ if (driver->handle) {
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_ddll_decrement_port_count(driver->handle);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_ddll_dereference_driver(driver->handle);
+ }
+ if (port_errno)
+ ERTS_OPEN_DRIVER_RET(NULL, -2, port_errno);
+ else
+ ERTS_OPEN_DRIVER_RET(NULL, -3, SYSTEM_LIMIT);
+ }
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
trace_port_open(port,
pid,
am_atom_put(port->name, strlen(port->name)));
}
-
+
+ error_number = error_type = 0;
if (driver->start) {
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_in, am_start);
@@ -662,56 +678,63 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
}
#endif
fpe_was_unmasked = erts_block_fpe();
- drv_data = (*driver->start)((ErlDrvPort)(port_ix),
- name, opts);
+ drv_data = (*driver->start)((ErlDrvPort) port, name, opts);
+ if (((SWord) drv_data) == -1)
+ error_type = -1;
+ else if (((SWord) drv_data) == -2) {
+ /*
+ * We need to save errno quickly after the
+ * call to the 'start' callback before
+ * something else modify it.
+ */
+ error_type = -2;
+ error_number = errno;
+ }
+ else if (((SWord) drv_data) == -3) {
+ error_type = -3;
+ error_number = BADARG;
+ }
+
erts_unblock_fpe(fpe_was_unmasked);
port->caller = NIL;
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_out, am_start);
}
- if (error_number_ptr && ((SWord) drv_data) == (SWord) -2)
- *error_number_ptr = errno;
#ifdef ERTS_SMP
if (port->xports)
- erts_smp_xports_unlock(port);
+ erts_port_handle_xports(port);
ASSERT(!port->xports);
#endif
}
- if (((SWord)drv_data) == -1 ||
- ((SWord)drv_data) == -2 ||
- ((SWord)drv_data) == -3) {
- int res = (int) ((SWord) drv_data);
-
- if (res == -3 && error_number_ptr) {
- *error_number_ptr = BADARG;
- }
-
+ if (error_type) {
/*
* Must clean up the port.
*/
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(port->ptimer);
+ erts_cancel_smp_ptimer(port->common.u.alive.ptimer);
#else
- erts_cancel_timer(&(port->tm));
+ erts_cancel_timer(&(port->common.u.alive.tm));
#endif
stopq(port);
- kill_port(port);
if (port->linebuf != NULL) {
erts_free(ERTS_ALC_T_LINEBUF,
(void *) port->linebuf);
port->linebuf = NULL;
}
if (driver->handle != NULL) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
}
+ kill_port(port);
erts_port_release(port);
- return res;
+ ERTS_OPEN_DRIVER_RET(NULL, error_type, error_number);
}
- port->drv_data = (SWord) drv_data;
- return port_ix;
+ port->drv_data = (UWord) drv_data;
+ ERTS_OPEN_DRIVER_RET(port, 0, 0);
+
+#undef ERTS_OPEN_DRIVER_RET
}
#ifdef ERTS_SMP
@@ -736,102 +759,122 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
char* name, /* Driver name */
ErlDrvData drv_data) /* Driver data */
{
+ int cprt_flgs = 0;
Port *creator_port;
Port* port;
erts_driver_t *driver;
Process *rp;
- int port_num;
- Eterm port_id;
- Uint32 xstatus = 0;
+ erts_mtx_t *driver_lock = NULL;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- creator_port = erts_drvport2port(creator_port_ix);
+ /* Need to be called from a scheduler thread */
+ if (!erts_get_scheduler_id())
+ return ERTS_INVALID_ERL_DRV_PORT;
+
+ creator_port = erts_drvport2port(creator_port_ix, NULL);
if (!creator_port)
- return (ErlDrvTermData) -1;
+ return ERTS_INVALID_ERL_DRV_PORT;
+
+ rp = erts_proc_lookup(pid);
+ if (!rp)
+ return ERTS_INVALID_ERL_DRV_PORT;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port));
driver = creator_port->drv_ptr;
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
if (!erts_ddll_driver_ok(driver->handle)) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- return (ErlDrvTermData) -1;
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ return ERTS_INVALID_ERL_DRV_PORT;
}
- rp = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- return (ErlDrvTermData) -1; /* pid does not exist */
+ if (driver->handle != NULL) {
+ erts_ddll_increment_port_count(driver->handle);
+ erts_ddll_reference_referenced_driver(driver->handle);
+ }
+
+#ifdef ERTS_SMP
+ driver_lock = driver->lock;
+#endif
+
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+
+ /* Inherit parallelism flag from parent */
+ if (ERTS_PTS_FLG_PARALLELISM &
+ erts_smp_atomic32_read_nob(&creator_port->sched.flags))
+ cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM;
+ port = create_port(name, driver, driver_lock, cprt_flgs, pid, NULL);
+ if (!port) {
+ if (driver->handle) {
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_ddll_decrement_port_count(driver->handle);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_ddll_dereference_driver(driver->handle);
+ }
+ return ERTS_INVALID_ERL_DRV_PORT;
}
- if ((port_num = get_free_port()) < 0) {
- errno = SYSTEM_LIMIT;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- return (ErlDrvTermData) -1;
+ if (driver->handle) {
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_ddll_decrement_port_count(driver->handle);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ }
+ kill_port(port);
+ erts_port_release(port);
+ return ERTS_INVALID_ERL_DRV_PORT;
}
- port_id = make_internal_port(port_num);
- port = &erts_port[port_num & erts_port_tab_index_mask];
+ erts_add_link(&ERTS_P_LINKS(port), LINK_PID, pid);
+ erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
#ifdef ERTS_SMP
- ASSERT(!port->lock);
- port->lock = driver->lock;
- if (!port->lock) {
+ if (!driver_lock) {
ErtsXPortsList *xplp = xports_list_alloc();
xplp->port = port;
xplp->next = creator_port->xports;
creator_port->xports = xplp;
- port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK,
- sizeof(erts_smp_mtx_t));
- erts_smp_mtx_init_locked_x(port->lock,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_lock" : NULL,
-#else
- "port_lock",
-#endif
- port_id);
- xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
}
-
#endif
- if (driver->handle != NULL) {
- erts_ddll_increment_port_count(driver->handle);
- erts_ddll_reference_referenced_driver(driver->handle);
- }
- erts_smp_mtx_unlock(&erts_driver_list_lock);
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+ port->drv_data = (UWord) drv_data;
- setup_port(port, pid, driver, drv_data, name, xstatus);
- port->id = port_id;
-
- erts_add_link(&(port->nlinks), LINK_PID, pid);
- erts_add_link(&(rp->nlinks), LINK_PID, port_id);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- return port_num & erts_port_tab_index_mask;
+ return (ErlDrvPort) port;
}
#ifdef ERTS_SMP
-void
-erts_smp_xports_unlock(Port *prt)
+int erts_port_handle_xports(Port *prt)
{
+ int reds = 0;
ErtsXPortsList *xplp;
ASSERT(prt);
xplp = prt->xports;
ASSERT(xplp);
while (xplp) {
+ Port *rprt = xplp->port;
ErtsXPortsList *free_xplp;
- if (xplp->port->xports)
- erts_smp_xports_unlock(xplp->port);
- erts_port_release(xplp->port);
+ erts_aint32_t state;
+ if (rprt->xports)
+ reds += erts_port_handle_xports(rprt);
+ state = erts_atomic32_read_nob(&rprt->state);
+ if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(rprt)) {
+ terminate_port(rprt);
+ reds += ERTS_PORT_REDS_TERMINATE;
+ }
+ erts_port_release(rprt);
free_xplp = xplp;
xplp = xplp->next;
xports_list_free(free_xplp);
+ reds++;
}
prt->xports = NULL;
+ return reds;
}
#endif
@@ -866,8 +909,8 @@ io_list_to_vec(Eterm obj, /* io-list */
DECLARE_ESTACK(s);
Eterm* objp;
char *buf = cbin->orig_bytes;
- ErlDrvSizeT len = cbin->orig_size;
- ErlDrvSizeT csize = 0;
+ Uint len = cbin->orig_size;
+ Uint csize = 0;
int vlen = 0;
char* cptr = buf;
@@ -982,7 +1025,7 @@ io_list_to_vec(Eterm obj, /* io-list */
#define IO_LIST_VEC_COUNT(obj) \
do { \
- ErlDrvSizeT _size = binary_size(obj); \
+ Uint _size = binary_size(obj); \
Eterm _real; \
ERTS_DECLARE_DUMMY(Uint _offset); \
int _bitoffs; \
@@ -1033,8 +1076,9 @@ do { \
*/
static int
-io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize,
- Uint* pvsize, Uint* pcsize, Uint* total_size)
+io_list_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ ErlDrvSizeT* total_size)
{
DECLARE_ESTACK(s);
Eterm* objp;
@@ -1045,7 +1089,7 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize,
Uint p_v_size = 0;
Uint p_c_size = 0;
Uint p_in_clist = 0;
- Uint total;
+ Uint total; /* Uint due to halfword emulator */
goto L_jump_start; /* avoid a push */
@@ -1105,7 +1149,7 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize,
if (total < c_size) {
goto L_overflow_error;
}
- *total_size = total;
+ *total_size = (ErlDrvSizeT) total;
DESTROY_ESTACK(s);
*vsize = v_size;
@@ -1120,56 +1164,724 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize,
return 1;
}
-/* write data to a port */
-int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
-{
- char *buf;
- erts_driver_t *drv = p->drv_ptr;
- Uint size;
+typedef enum {
+ ERTS_TRY_IMM_DRV_CALL_OK,
+ ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK,
+ ERTS_TRY_IMM_DRV_CALL_INVALID_PORT,
+ ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS
+} ErtsTryImmDrvCallResult;
+
+typedef struct {
+ Process *c_p; /* Currently executing process (unlocked) */
+ Port *port; /* Port to operate on */
+ Eterm port_op; /* port operation as an atom */
+ erts_aint32_t state; /* in: invalid state; out: read state (if read) */
+ erts_aint32_t sched_flags; /* in: invalid flags; out: read flags (if read) */
+ int async; /* Asynchronous operation */
+ int pre_chk_sched_flags; /* Check sched flags before lock? */
int fpe_was_unmasked;
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p) || ERTS_IS_CRASH_DUMPING);
+} ErtsTryImmDrvCallState;
+
+#define ERTS_INIT_TRY_IMM_DRV_CALL_STATE(C_P, PRT, SFLGS, PTS_FLGS, A, PRT_OP) \
+ {(C_P), (PRT), (PRT_OP), (SFLGS), (PTS_FLGS), (A), 1, 0}
+
+/*
+ * Try doing an immediate driver callback call from a process. If
+ * this fail, the operation should be scheduled in the normal case...
+ *
+ */
+static ERTS_INLINE ErtsTryImmDrvCallResult
+try_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ ErtsTryImmDrvCallResult res;
+ erts_aint32_t invalid_state, invalid_sched_flags;
+ Port *prt = sp->port;
+ Process *c_p = sp->c_p;
+
+ ASSERT(is_atom(sp->port_op));
+
+ invalid_sched_flags = ERTS_PTS_FLGS_FORCE_SCHEDULE_OP;
+ invalid_sched_flags |= sp->sched_flags;
+ if (sp->async)
+ invalid_sched_flags |= ERTS_PTS_FLG_PARALLELISM;
+
+ if (sp->pre_chk_sched_flags) {
+ sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sp->sched_flags & invalid_sched_flags)
+ return ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ }
+
+ if (erts_smp_port_trylock(prt) == EBUSY)
+ return ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK;
+
+ invalid_state = sp->state;
+ sp->state = erts_atomic32_read_nob(&prt->state);
+ if (sp->state & invalid_state) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_PORT;
+ goto locked_fail;
+ }
+
+ sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sp->sched_flags & invalid_sched_flags) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ goto locked_fail;
+ }
+
+ if (c_p) {
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
+ trace_virtual_sched(c_p, am_out);
+ if (erts_system_profile_flags.runnable_procs
+ && erts_system_profile_flags.exclusive)
+ profile_runnable_proc(c_p, am_inactive);
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+
ERTS_SMP_CHK_NO_PROC_LOCKS;
- p->caller = caller_id;
- if (drv->outputv != NULL) {
- Uint vsize;
- Uint csize;
- Uint pvsize;
- Uint pcsize;
- ErlDrvSizeT blimit;
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_in, sp->port_op);
+ if (erts_system_profile_flags.runnable_ports
+ && !erts_port_is_scheduled(prt))
+ profile_runnable_port(prt, am_active);
+
+ sp->fpe_was_unmasked = erts_block_fpe();
+
+ return ERTS_TRY_IMM_DRV_CALL_OK;
+
+locked_fail:
+ erts_port_release(prt);
+ return res;
+}
+
+static ERTS_INLINE void
+finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ Port *prt = sp->port;
+ Process *c_p = sp->c_p;
+
+ erts_port_driver_callback_epilogue(prt, NULL);
+
+ erts_unblock_fpe(sp->fpe_was_unmasked);
+
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_out, sp->port_op);
+ if (erts_system_profile_flags.runnable_ports
+ && !erts_port_is_scheduled(prt))
+ profile_runnable_port(prt, am_inactive);
+
+ erts_port_release(prt);
+
+ if (c_p) {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
+ trace_virtual_sched(c_p, am_in);
+ if (erts_system_profile_flags.runnable_procs
+ && erts_system_profile_flags.exclusive)
+ profile_runnable_proc(c_p, am_active);
+ }
+}
+
+/*
+ * force_imm_drv_call()/finalize_force_imm_drv_call() should *only*
+ * be used while crash dumping...
+ */
+static ErtsTryImmDrvCallResult
+force_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ erts_aint32_t invalid_state;
+ Port *prt = sp->port;
+
+ ASSERT(ERTS_IS_CRASH_DUMPING)
+ ASSERT(is_atom(sp->port_op));
+
+ invalid_state = sp->state;
+ sp->state = erts_atomic32_read_nob(&prt->state);
+ if (sp->state & invalid_state)
+ return ERTS_TRY_IMM_DRV_CALL_INVALID_PORT;
+
+ sp->fpe_was_unmasked = erts_block_fpe();
+
+ return ERTS_TRY_IMM_DRV_CALL_OK;
+}
+
+static void
+finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ erts_unblock_fpe(sp->fpe_was_unmasked);
+}
+
+#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (REF_THING_SIZE + 3)
+
+static ERTS_INLINE void
+queue_port_sched_op_reply(Process *rp,
+ ErtsProcLocks *rp_locksp,
+ Eterm *hp_start,
+ Eterm *hp,
+ Uint h_size,
+ ErlHeapFragment* bp,
+ Uint32 *ref_num,
+ Eterm msg)
+{
+ Eterm ref = make_internal_ref(hp);
+ write_ref_thing(hp, ref_num[0], ref_num[1], ref_num[2]);
+ hp += REF_THING_SIZE;
+
+ msg = TUPLE2(hp, ref, msg);
+ hp += 3;
+
+ if (!bp) {
+ HRelease(rp, hp_start + h_size, hp);
+ }
+ else {
+ Uint used_h_size = hp - hp_start;
+ ASSERT(h_size >= used_h_size);
+ if (h_size > used_h_size)
+ bp = erts_resize_message_buffer(bp, used_h_size, &msg, 1);
+ }
+
+ erts_queue_message(rp,
+ rp_locksp,
+ bp,
+ msg,
+ NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+}
+
+static void
+port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg)
+{
+ Process *rp = erts_proc_lookup_raw(to);
+ if (rp) {
+ ErlOffHeap *ohp;
+ ErlHeapFragment* bp;
+ Eterm msg_copy;
+ Uint hsz, msg_sz;
+ Eterm *hp, *hp_start;
+ ErtsProcLocks rp_locks = 0;
+
+ hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+ if (is_immed(msg))
+ msg_sz = 0;
+ else {
+ msg_sz = size_object(msg);
+ hsz += msg_sz;
+ }
+
+ hp_start = hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+ if (is_immed(msg))
+ msg_copy = msg;
+ else
+ msg_copy = copy_struct(msg, msg_sz, &hp, ohp);
+
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ ref_num,
+ msg_copy);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+}
+
+
+ErtsPortOpResult
+erts_schedule_proc2port_signal(Process *c_p,
+ Port *prt,
+ Eterm caller,
+ Eterm *refp,
+ ErtsProc2PortSigData *sigdp,
+ int task_flags,
+ ErtsProc2PortSigCallback callback)
+{
+ int sched_res;
+ if (!refp) {
+ if (c_p)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ else {
+ ASSERT(c_p);
+ sigdp->flags |= ERTS_P2P_SIG_DATA_FLG_REPLY;
+ erts_make_ref_in_array(sigdp->ref);
+ *refp = erts_proc_store_ref(c_p, sigdp->ref);
+
+ /*
+ * Caller needs to wait for a message containing
+ * the ref that we just created. No such message
+ * can exist in callers message queue at this time.
+ * We therefore move the save pointer of the
+ * callers message queue to the end of the queue.
+ *
+ * NOTE: It is of vital importance that the caller
+ * immediately do a receive unconditionaly
+ * waiting for the message with the reference;
+ * otherwise, next receive will *not* work
+ * as expected!
+ */
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+
+ if (ERTS_PROC_PENDING_EXIT(c_p)) {
+ /* need to exit caller instead */
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ KILL_CATCHES(c_p);
+ c_p->freason = EXC_EXIT;
+ return ERTS_PORT_OP_CALLER_EXIT;
+ }
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ c_p->msg.save = c_p->msg.last;
+
+ erts_smp_proc_unlock(c_p,
+ (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCKS_MSG_RECEIVE));
+ }
+
+
+ sigdp->caller = caller;
+
+ /* Schedule port close call for later execution... */
+ sched_res = erts_port_task_schedule(prt->common.id,
+ NULL,
+ ERTS_PORT_TASK_PROC_SIG,
+ sigdp,
+ callback,
+ task_flags);
+
+ if (c_p)
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ if (sched_res != 0) {
+ if (refp)
+ *refp = NIL;
+ return ERTS_PORT_OP_DROPPED;
+ }
+ return ERTS_PORT_OP_SCHEDULED;
+}
+
+static ERTS_INLINE void
+send_badsig(Port *prt)
+{
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ Process* rp;
+ Eterm connected = ERTS_PORT_GET_CONNECTED(prt);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_get_scheduler_id());
+
+ ASSERT(is_internal_pid(connected));
+
+ rp = erts_proc_lookup_raw(connected);
+ if (rp) {
+ erts_smp_proc_lock(rp, rp_locks);
+ if (!ERTS_PROC_IS_EXITING(rp))
+ (void) erts_send_exit_signal(NULL,
+ prt->common.id,
+ rp,
+ &rp_locks,
+ am_badsig,
+ NIL,
+ NULL,
+ 0);
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+}
+
+static void
+badsig_received(int bang_op,
+ Port *prt,
+ erts_aint32_t state,
+ int bad_output_value)
+{
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! Something" operation
+ * else
+ * we are part of a call to a port BIF
+ * behave accordingly...
+ */
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
+ if (bad_output_value) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Bad value on output port '%s'\n", prt->name);
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+ if (bang_op)
+ send_badsig(prt);
+ }
+}
+
+static int
+port_badsig(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ if (op == ERTS_PROC2PORT_SIG_EXEC)
+ badsig_received(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
+ prt,
+ state,
+ sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT);
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ return ERTS_PORT_REDS_BADSIG;
+}
+
+
+/*
+ * bad_port_signal() will
+ * - preserve signal order of signals.
+ * - send a 'badsig' exit signal to connected process if 'from' is an
+ * internal pid and the port is alive when the bad signal reaches
+ * it.
+ */
+static ErtsPortOpResult
+bad_port_signal(Process *c_p,
+ int flags,
+ Port *prt,
+ Eterm from,
+ Eterm *refp,
+ Eterm port_op)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ port_op);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ badsig_received(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ prt,
+ try_call_state.state,
+ flags & ERTS_PORT_SIG_FLG_BAD_OUTPUT);
+ finalize_imm_drv_call(&try_call_state);
+ return ERTS_PORT_OP_BADARG;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule badsig() call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = (flags & ~ERTS_P2P_SIG_TYPE_MASK) | ERTS_P2P_SIG_TYPE_BAD;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ refp,
+ sigdp,
+ 0,
+ port_badsig);
+}
+
+
+/*
+ * Driver outputv() callback
+ */
+
+static ERTS_INLINE void
+call_driver_outputv(int bang_op,
+ Eterm caller,
+ Eterm from,
+ Port *prt,
+ erts_driver_t *drv,
+ ErlIOVec *evp)
+{
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! {From, {command, Data}}" operation
+ * else
+ * we are part of a call to port_command/[2,3]
+ * behave accordingly...
+ */
+ if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt))
+ send_badsig(prt);
+ else {
+ ErlDrvSizeT size = evp->size;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_outputv)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
+ DTRACE4(driver_outputv, process_str, port_str, prt->name, size);
+ }
+#endif
+
+ prt->caller = caller;
+ (*drv->outputv)((ErlDrvData) prt->drv_data, evp);
+ prt->caller = NIL;
+
+ prt->bytes_out += size;
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
+ }
+}
+
+static ERTS_INLINE void
+cleanup_scheduled_outputv(ErlIOVec *ev, ErlDrvBinary *cbinp)
+{
+ int i;
+ /* Need to free all binaries */
+ for (i = 1; i < ev->vsize; i++)
+ if (ev->binv[i])
+ driver_free_binary(ev->binv[i]);
+ if (cbinp)
+ driver_free_binary(cbinp);
+ erts_free(ERTS_ALC_T_DRV_CMD_DATA, ev);
+}
+
+static int
+port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ Eterm reply;
+
+ switch (op) {
+ case ERTS_PROC2PORT_SIG_EXEC:
+ /* Execution of a scheduled outputv() call */
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ reply = am_badarg;
+ else {
+ call_driver_outputv(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
+ sigdp->caller,
+ sigdp->u.outputv.from,
+ prt,
+ prt->drv_ptr,
+ sigdp->u.outputv.evp);
+ reply = am_true;
+ }
+ break;
+ case ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND:
+ reply = am_false;
+ break;
+ default:
+ reply = am_badarg;
+ break;
+ }
+
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, reply);
+
+ cleanup_scheduled_outputv(sigdp->u.outputv.evp,
+ sigdp->u.outputv.cbinp);
+
+ return ERTS_PORT_REDS_CMD_OUTPUTV;
+}
+
+/*
+ * Driver output() callback
+ */
+
+static ERTS_INLINE void
+call_driver_output(int bang_op,
+ Eterm caller,
+ Eterm from,
+ Port *prt,
+ erts_driver_t *drv,
+ char *bufp,
+ ErlDrvSizeT size)
+{
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! {From, {command, Data}}" operation
+ * else
+ * we are part of a call to port_command/[2,3]
+ * behave accordingly...
+ */
+ if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt))
+ send_badsig(prt);
+ else {
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_output)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
+ DTRACE4(driver_output, process_str, port_str, prt->name, size);
+ }
+#endif
+
+ prt->caller = caller;
+ (*drv->output)((ErlDrvData) prt->drv_data, bufp, size);
+ prt->caller = NIL;
+
+ prt->bytes_out += size;
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
+ }
+}
+
+static ERTS_INLINE void
+cleanup_scheduled_output(char *bufp)
+{
+ erts_free(ERTS_ALC_T_DRV_CMD_DATA, bufp);
+}
+
+static int
+port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ Eterm reply;
+
+ switch (op) {
+ case ERTS_PROC2PORT_SIG_EXEC:
+ /* Execution of a scheduled output() call */
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ reply = am_badarg;
+ else {
+ call_driver_output(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
+ sigdp->caller,
+ sigdp->u.output.from,
+ prt,
+ prt->drv_ptr,
+ sigdp->u.output.bufp,
+ sigdp->u.output.size);
+ reply = am_true;
+ }
+ break;
+ case ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND:
+ reply = am_false;
+ break;
+ default:
+ reply = am_badarg;
+ break;
+ }
+
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, reply);
+
+ cleanup_scheduled_output(sigdp->u.output.bufp);
+
+ return ERTS_PORT_REDS_CMD_OUTPUT;
+}
+
+ErtsPortOpResult
+erts_port_output(Process *c_p,
+ int flags,
+ Port *prt,
+ Eterm from,
+ Eterm list,
+ Eterm *refp)
+{
+ ErtsPortOpResult res;
+ ErtsProc2PortSigData *sigdp;
+ erts_driver_t *drv = prt->drv_ptr;
+ size_t size;
+ int try_call;
+ erts_aint32_t sched_flags, busy_flgs, invalid_flags;
+ int task_flags;
+ ErtsProc2PortSigCallback port_sig_callback;
+ ErlDrvBinary *cbin = NULL;
+ ErlIOVec *evp = NULL;
+ char *buf = NULL;
+ int force_immediate_call = (flags & ERTS_PORT_SIG_FLG_FORCE_IMM_CALL);
+
+ ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP
+ | ERTS_PORT_SIG_FLG_NOSUSPEND
+ | ERTS_PORT_SIG_FLG_FORCE
+ | ERTS_PORT_SIG_FLG_FORCE_IMM_CALL)) == 0);
+
+ busy_flgs = ((flags & ERTS_PORT_SIG_FLG_FORCE)
+ ? ((erts_aint32_t) 0)
+ : ERTS_PTS_FLGS_BUSY);
+ invalid_flags = busy_flgs;
+ if (!refp)
+ invalid_flags |= ERTS_PTS_FLG_PARALLELISM;
+
+ /*
+ * Assumes caller have checked that port is valid...
+ */
+
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))
+ return ((sched_flags & ERTS_PTS_FLG_EXIT)
+ ? ERTS_PORT_OP_DROPPED
+ : ERTS_PORT_OP_BUSY);
+
+ try_call = (force_immediate_call /* crash dumping */
+ || !(sched_flags & (invalid_flags
+ | ERTS_PTS_FLGS_FORCE_SCHEDULE_OP)));
+
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(c_p ? c_p->common.id : ERTS_INVALID_PID, prt);
+ DTRACE4(port_command, process_str, port_str, prt->name, "command");
+ }
+#endif
+
+ if (drv->outputv) {
+ ErlIOVec ev;
SysIOVec iv[SMALL_WRITE_VEC];
ErlDrvBinary* bv[SMALL_WRITE_VEC];
SysIOVec* ivp;
ErlDrvBinary** bvp;
- ErlDrvBinary* cbin;
- ErlIOVec ev;
+ int vsize;
+ Uint csize;
+ Uint pvsize;
+ Uint pcsize;
+ Uint blimit;
+ size_t iov_offset, binv_offset, alloc_size;
- if (io_list_vec_len(list, &vsize, &csize,
- &pvsize, &pcsize, &size)) {
+ if (io_list_vec_len(list, &vsize, &csize, &pvsize, &pcsize, &size))
goto bad_value;
+
+ iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec));
+ binv_offset = iov_offset;
+ binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec));
+ alloc_size = binv_offset;
+ alloc_size += (vsize+1)*sizeof(ErlDrvBinary *);
+
+ if (try_call && vsize < SMALL_WRITE_VEC) {
+ ivp = ev.iov = iv;
+ bvp = ev.binv = bv;
+ evp = &ev;
+ }
+ else {
+ char *ptr = erts_alloc((try_call
+ ? ERTS_ALC_T_TMP
+ : ERTS_ALC_T_DRV_CMD_DATA), alloc_size);
+
+ evp = (ErlIOVec *) ptr;
+ ivp = evp->iov = (SysIOVec *) (ptr + iov_offset);
+ bvp = evp->binv = (ErlDrvBinary **) (ptr + binv_offset);
}
+
/* To pack or not to pack (small binaries) ...? */
- vsize++;
- if (vsize <= SMALL_WRITE_VEC) {
+ if (vsize < SMALL_WRITE_VEC) {
/* Do NOT pack */
blimit = 0;
- } else {
+ }
+ else {
/* Do pack */
vsize = pvsize + 1;
csize = pcsize;
blimit = ERL_SMALL_IO_BIN_LIMIT;
}
/* Use vsize and csize from now on */
- if (vsize <= SMALL_WRITE_VEC) {
- ivp = iv;
- bvp = bv;
- } else {
- ivp = (SysIOVec *) erts_alloc(ERTS_ALC_T_TMP,
- vsize * sizeof(SysIOVec));
- bvp = (ErlDrvBinary**) erts_alloc(ERTS_ALC_T_TMP,
- vsize * sizeof(ErlDrvBinary*));
- }
+
cbin = driver_alloc_binary(csize);
if (!cbin)
erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
@@ -1178,210 +1890,759 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
ivp[0].iov_base = NULL;
ivp[0].iov_len = 0;
bvp[0] = NULL;
- ev.vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
- if (ev.vsize < 0) {
- if (ivp != iv) {
- erts_free(ERTS_ALC_T_TMP, (void *) ivp);
- }
- if (bvp != bv) {
- erts_free(ERTS_ALC_T_TMP, (void *) bvp);
- }
+ evp->vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
+ if (evp->vsize < 0) {
+ if (evp != &ev)
+ erts_free(try_call ? ERTS_ALC_T_TMP : ERTS_ALC_T_DRV_CMD_DATA,
+ evp);
driver_free_binary(cbin);
goto bad_value;
}
- ev.vsize++;
#if 0
/* This assertion may say something useful, but it can
be falsified during the emulator test suites. */
- ASSERT(ev.vsize == vsize);
+ ASSERT(evp->vsize == vsize);
#endif
- ev.size = size; /* total size */
- ev.iov = ivp;
- ev.binv = bvp;
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(driver_outputv)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
- DTRACE4(driver_outputv, process_str, port_str, p->name, size);
- }
-#endif
- fpe_was_unmasked = erts_block_fpe();
- (*drv->outputv)((ErlDrvData)p->drv_data, &ev);
- erts_unblock_fpe(fpe_was_unmasked);
- if (ivp != iv) {
- erts_free(ERTS_ALC_T_TMP, (void *) ivp);
- }
- if (bvp != bv) {
- erts_free(ERTS_ALC_T_TMP, (void *) bvp);
+ evp->vsize++;
+ evp->size = size; /* total size */
+
+ if (!try_call) {
+ int i;
+ /* Need to increase refc on all binaries */
+ for (i = 1; i < evp->vsize; i++)
+ if (bvp[i])
+ driver_binary_inc_refc(bvp[i]);
}
- driver_free_binary(cbin);
- } else {
- int r;
-
- /* Try with an 8KB buffer first (will often be enough I guess). */
- size = 8*1024;
- /* See below why the extra byte is added. */
- buf = erts_alloc(ERTS_ALC_T_TMP, size+1);
- r = io_list_to_buf(list, buf, size);
+ else {
+ int i;
+ ErlIOVec *new_evp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ invalid_flags,
+ !refp,
+ am_command);
+
+ try_call_state.pre_chk_sched_flags = 0; /* already checked */
+ if (force_immediate_call)
+ try_call_res = force_imm_drv_call(&try_call_state);
+ else
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ call_driver_outputv(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
+ from,
+ prt,
+ drv,
+ evp);
+ if (force_immediate_call)
+ finalize_force_imm_drv_call(&try_call_state);
+ else
+ finalize_imm_drv_call(&try_call_state);
+ /* Fall through... */
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ driver_free_binary(cbin);
+ if (evp != &ev)
+ erts_free(ERTS_ALC_T_TMP, evp);
+ if (try_call_res == ERTS_TRY_IMM_DRV_CALL_OK)
+ return ERTS_PORT_OP_DONE;
+ else
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ sched_flags = try_call_state.sched_flags;
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule outputv() call instead... */
+ break;
+ }
-#ifdef USE_VM_PROBES
- if(DTRACE_ENABLED(port_command)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
- DTRACE4(port_command, process_str, port_str, p->name, "command");
- }
+ /* Need to increase refc on all binaries */
+ for (i = 1; i < evp->vsize; i++)
+ if (bvp[i])
+ driver_binary_inc_refc(bvp[i]);
+
+ new_evp = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, alloc_size);
+
+ if (evp != &ev) {
+ sys_memcpy((void *) new_evp, (void *) evp, alloc_size);
+ new_evp->iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ bvp = new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
+ + binv_offset);
+
+#ifdef DEBUG
+ ASSERT(new_evp->vsize == evp->vsize);
+ ASSERT(new_evp->size == evp->size);
+ for (i = 0; i < evp->vsize; i++) {
+ ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
+ ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
+ ASSERT(new_evp->binv[i] == evp->binv[i]);
+ }
#endif
- if (r >= 0) {
- size -= r;
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(driver_output)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
- DTRACE4(driver_output, process_str, port_str, p->name, size);
- }
+ erts_free(ERTS_ALC_T_TMP, evp);
+ }
+ else { /* from stack allocated structure; offsets may differ */
+
+ sys_memcpy((void *) new_evp, (void *) evp, sizeof(ErlIOVec));
+ new_evp->iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ sys_memcpy((void *) new_evp->iov,
+ (void *) evp->iov,
+ evp->vsize * sizeof(SysIOVec));
+ new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
+ + binv_offset);
+ sys_memcpy((void *) new_evp->binv,
+ (void *) evp->binv,
+ evp->vsize * sizeof(ErlDrvBinary *));
+
+#ifdef DEBUG
+ ASSERT(new_evp->vsize == evp->vsize);
+ ASSERT(new_evp->size == evp->size);
+ for (i = 0; i < evp->vsize; i++) {
+ ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
+ ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
+ ASSERT(new_evp->binv[i] == evp->binv[i]);
+ }
#endif
- fpe_was_unmasked = erts_block_fpe();
- (*drv->output)((ErlDrvData)p->drv_data, buf, size);
- erts_unblock_fpe(fpe_was_unmasked);
- erts_free(ERTS_ALC_T_TMP, buf);
+
+ }
+
+ evp = new_evp;
}
- else if (r == -2) {
- erts_free(ERTS_ALC_T_TMP, buf);
- goto bad_value;
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV;
+ sigdp->u.outputv.from = from;
+ sigdp->u.outputv.evp = evp;
+ sigdp->u.outputv.cbinp = cbin;
+ port_sig_callback = port_sig_outputv;
+ }
+ else {
+ ErlDrvSizeT r;
+
+ /*
+ * Apperently there exist code that write 1 byte to
+ * much in buffer. Where it resides I don't know, but
+ * we can live with one byte extra allocated...
+ */
+
+ if (!try_call) {
+ if (erts_iolist_size(list, &size))
+ goto bad_value;
+
+ buf = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, size + 1);
+
+ r = erts_iolist_to_buf(list, buf, size);
+ ASSERT(ERTS_IOLIST_TO_BUF_SUCCEEDED(r));
}
else {
- ASSERT(r == -1); /* Overflow */
- erts_free(ERTS_ALC_T_TMP, buf);
- if (erts_iolist_size(list, &size)) {
- goto bad_value;
+ char *new_buf;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ invalid_flags,
+ !refp,
+ am_command);
+
+ /* Try with an 8KB buffer first (will often be enough I guess). */
+ size = 8*1024;
+
+ buf = erts_alloc(ERTS_ALC_T_TMP, size + 1);
+ r = erts_iolist_to_buf(list, buf, size);
+
+ if (ERTS_IOLIST_TO_BUF_SUCCEEDED(r)) {
+ ASSERT(r <= size);
+ size -= r;
+ }
+ else {
+ erts_free(ERTS_ALC_T_TMP, buf);
+ if (r == ERTS_IOLIST_TO_BUF_TYPE_ERROR)
+ goto bad_value;
+ ASSERT(r == ERTS_IOLIST_TO_BUF_OVERFLOW);
+ if (erts_iolist_size(list, &size))
+ goto bad_value;
+ buf = erts_alloc(ERTS_ALC_T_TMP, size + 1);
+ r = erts_iolist_to_buf(list, buf, size);
+ ASSERT(ERTS_IOLIST_TO_BUF_SUCCEEDED(r));
}
- /*
- * I know drivers that pad space with '\0' this is clearly
- * incorrect but I don't feel like fixing them now, insted
- * add ONE extra byte.
- */
- buf = erts_alloc(ERTS_ALC_T_TMP, size+1);
- r = io_list_to_buf(list, buf, size);
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(driver_output)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
- DTRACE4(driver_output, process_str, port_str, p->name, size);
- }
-#endif
- fpe_was_unmasked = erts_block_fpe();
- (*drv->output)((ErlDrvData)p->drv_data, buf, size);
- erts_unblock_fpe(fpe_was_unmasked);
+ try_call_state.pre_chk_sched_flags = 0; /* already checked */
+ if (force_immediate_call)
+ try_call_res = force_imm_drv_call(&try_call_state);
+ else
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ call_driver_output(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
+ from,
+ prt,
+ drv,
+ buf,
+ size);
+ if (force_immediate_call)
+ finalize_force_imm_drv_call(&try_call_state);
+ else
+ finalize_imm_drv_call(&try_call_state);
+ /* Fall through... */
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ erts_free(ERTS_ALC_T_TMP, buf);
+ if (try_call_res == ERTS_TRY_IMM_DRV_CALL_OK)
+ return ERTS_PORT_OP_DONE;
+ else
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ sched_flags = try_call_state.sched_flags;
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule outputv() call instead... */
+ break;
+ }
+
+ new_buf = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, size + 1);
+ sys_memcpy(new_buf, buf, size);
erts_free(ERTS_ALC_T_TMP, buf);
+ buf = new_buf;
}
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUT;
+ sigdp->u.output.from = from;
+ sigdp->u.output.bufp = buf;
+ sigdp->u.output.size = size;
+ port_sig_callback = port_sig_output;
+ }
+
+ task_flags = ERTS_PT_FLG_WAIT_BUSY;
+ sigdp->flags |= flags;
+ if (flags & (ERTS_P2P_SIG_DATA_FLG_FORCE|ERTS_P2P_SIG_DATA_FLG_NOSUSPEND)) {
+ task_flags = 0;
+ if (flags & ERTS_P2P_SIG_DATA_FLG_FORCE)
+ sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND;
+ else if (flags & ERTS_P2P_SIG_DATA_FLG_NOSUSPEND)
+ task_flags = ERTS_PT_FLG_NOSUSPEND;
+ }
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
+ refp,
+ sigdp,
+ task_flags,
+ port_sig_callback);
+
+ if (res != ERTS_PORT_OP_SCHEDULED) {
+ if (drv->outputv)
+ cleanup_scheduled_outputv(evp, cbin);
+ else
+ cleanup_scheduled_output(buf);
+ return res;
}
- p->bytes_out += size;
- erts_smp_atomic_add_nob(&erts_bytes_out, size);
-#ifdef ERTS_SMP
- if (p->xports)
- erts_smp_xports_unlock(p);
- ASSERT(!p->xports);
+ if (!(sched_flags & ERTS_PTS_FLG_EXIT) && (sched_flags & busy_flgs))
+ return ERTS_PORT_OP_BUSY_SCHEDULED;
+
+ return res;
+
+bad_value:
+
+ flags |= ERTS_PORT_SIG_FLG_BAD_OUTPUT;
+ return bad_port_signal(c_p, flags, prt, from, refp, am_command);
+}
+
+static ERTS_INLINE ErtsPortOpResult
+call_deliver_port_exit(int bang_op,
+ Eterm from,
+ Port *prt,
+ erts_aint32_t state,
+ Eterm reason,
+ int broken_link)
+{
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! {From, close}" operation
+ * else
+ * we are part of a call to port_close(Port)
+ * behave accordingly...
+ */
+
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ return ERTS_PORT_OP_DROPPED;
+
+ if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt)) {
+ send_badsig(prt);
+ return ERTS_PORT_OP_DROPPED;
+ }
+
+ if (broken_link) {
+ ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from);
+ if (lnk)
+ erts_destroy_link(lnk);
+ else
+ return ERTS_PORT_OP_DROPPED;
+ }
+
+ if (!erts_deliver_port_exit(prt, from, reason, bang_op))
+ return ERTS_PORT_OP_DROPPED;
+
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command) && bang_op) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(from, prt);
+ DTRACE4(port_command, process_str, port_str, prt->name, "close");
+ }
#endif
- p->caller = NIL;
- return 0;
- bad_value:
- p->caller = NIL;
- {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Bad value on output port '%s'\n", p->name);
- erts_send_error_to_logger_nogl(dsbufp);
- return 1;
+ return ERTS_PORT_OP_DONE;
+}
+
+static int
+port_sig_exit(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ Eterm msg = am_badarg;
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ ErtsPortOpResult res;
+ int bang_op = sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP;
+ int broken_link = sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK;
+ res = call_deliver_port_exit(bang_op,
+ sigdp->u.exit.from,
+ prt,
+ state,
+ sigdp->u.exit.reason,
+ broken_link);
+
+ if (res == ERTS_PORT_OP_DONE)
+ msg = am_true;
+ }
+ if (sigdp->u.exit.bp)
+ free_message_buffer(sigdp->u.exit.bp);
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, msg);
+
+ return ERTS_PORT_REDS_EXIT;
+}
+
+ErtsPortOpResult
+erts_port_exit(Process *c_p,
+ int flags,
+ Port *prt,
+ Eterm from,
+ Eterm reason,
+ Eterm *refp)
+{
+ ErtsPortOpResult res;
+ ErtsProc2PortSigData *sigdp;
+ ErlHeapFragment *bp = NULL;
+
+ ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP
+ | ERTS_PORT_SIG_FLG_BROKEN_LINK
+ | ERTS_PORT_SIG_FLG_FORCE_SCHED)) == 0);
+
+ if (!(flags & ERTS_PORT_SIG_FLG_FORCE_SCHED)) {
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ am_exit);
+
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ res = call_deliver_port_exit(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ from,
+ prt,
+ try_call_state.state,
+ reason,
+ flags & ERTS_PORT_SIG_FLG_BROKEN_LINK);
+ finalize_imm_drv_call(&try_call_state);
+ return res;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ default:
+ /* Schedule call instead... */
+ break;
+ }
}
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_EXIT | flags;
+ sigdp->u.exit.from = from;
+
+ if (is_immed(reason)) {
+ sigdp->u.exit.reason = reason;
+ sigdp->u.exit.bp = NULL;
+ }
+ else {
+ Eterm *hp;
+ Uint hsz = size_object(reason);
+ bp = new_message_buffer(hsz);
+ sigdp->u.exit.bp = bp;
+ hp = bp->mem;
+ sigdp->u.exit.reason = copy_struct(reason,
+ hsz,
+ &hp,
+ &bp->off_heap);
+ }
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ port_sig_exit);
+
+ if (res == ERTS_PORT_OP_DROPPED) {
+ if (bp)
+ free_message_buffer(bp);
+ }
+
+ return res;
}
-/* initialize the port array */
-void init_io(void)
+static ErtsPortOpResult
+set_port_connected(int bang_op,
+ Eterm from,
+ Port *prt,
+ erts_aint32_t state,
+ Eterm connect)
{
- int i;
- ErlDrvEntry** dp;
- char maxports[21]; /* enough for any 64-bit integer */
- size_t maxportssize = sizeof(maxports);
- Uint ports_bits = ERTS_PORTS_BITS;
- Sint port_extra_shift;
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! {From, {connect, Connect}}" operation
+ * else
+ * we are part of a call to port_connect(Port, Connect)
+ * behave accordingly...
+ */
-#ifdef ERTS_SMP
- init_xports_list_alloc();
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ return ERTS_PORT_OP_DROPPED;
+
+ if (bang_op) { /* Bang operation */
+ if (is_not_internal_pid(connect) || ERTS_PORT_GET_CONNECTED(prt) != from) {
+ send_badsig(prt);
+ return ERTS_PORT_OP_DROPPED;
+ }
+
+ ERTS_PORT_SET_CONNECTED(prt, connect);
+ deliver_result(prt->common.id, from, am_connected);
+
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(from, prt);
+ DTRACE4(port_command, process_str, port_str, prt->name, "connect");
+ }
#endif
+ }
+ else { /* Port BIF operation */
+ Process *rp = erts_proc_lookup_raw(connect);
+ if (!rp)
+ return ERTS_PORT_OP_DROPPED;
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ return ERTS_PORT_OP_DROPPED;
+ }
- pdl_init();
+ erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, prt->common.id);
+ erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, connect);
+
+ ERTS_PORT_SET_CONNECTED(prt, connect);
+
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_connect)) {
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_pid_str(connect, process_str);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
+ dtrace_proc_str(rp, newprocess_str);
+ DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
+ }
+#endif
+ }
+
+ return ERTS_PORT_OP_DONE;
+}
- if (erts_sys_getenv_raw("ERL_MAX_PORTS", maxports, &maxportssize) == 0)
- erts_max_ports = atoi(maxports);
+static int
+port_sig_connect(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ Eterm msg = am_badarg;
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ ErtsPortOpResult res;
+ res = set_port_connected(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
+ sigdp->u.connect.from,
+ prt,
+ state,
+ sigdp->u.connect.connected);
+ if (res == ERTS_PORT_OP_DONE)
+ msg = am_true;
+ }
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, msg);
+ return ERTS_PORT_REDS_CONNECT;
+}
+
+ErtsPortOpResult
+erts_port_connect(Process *c_p,
+ int flags,
+ Port *prt,
+ Eterm from,
+ Eterm connect,
+ Eterm *refp)
+{
+ ErtsProc2PortSigData *sigdp;
+ Eterm connect_id;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ am_connect);
+
+ ASSERT((flags & ~ERTS_PORT_SIG_FLG_BANG_OP) == 0);
+
+ if (is_not_internal_pid(connect))
+ connect_id = NIL; /* Fail in op (for signal order) */
else
- erts_max_ports = sys_max_files();
+ connect_id = connect;
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ ErtsPortOpResult res;
+ res = set_port_connected(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ from,
+ prt,
+ try_call_state.state,
+ connect_id);
+ finalize_imm_drv_call(&try_call_state);
+ return res;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ default:
+ /* Schedule call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_CONNECT | flags;
+
+ sigdp->u.connect.from = from;
+ sigdp->u.connect.connected = connect_id;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ refp,
+ sigdp,
+ 0,
+ port_sig_connect);
+}
- if (erts_max_ports > ERTS_MAX_PORTS)
- erts_max_ports = ERTS_MAX_PORTS;
- if (erts_max_ports < 1024)
- erts_max_ports = 1024;
+static void
+port_unlink(Port *prt, Eterm from)
+{
+ ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from);
+ if (lnk)
+ erts_destroy_link(lnk);
+}
- if (erts_use_r9_pids_ports) {
- ports_bits = ERTS_R9_PORTS_BITS;
- if (erts_max_ports > ERTS_MAX_R9_PORTS)
- erts_max_ports = ERTS_MAX_R9_PORTS;
+static int
+port_sig_unlink(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ if (op == ERTS_PROC2PORT_SIG_EXEC)
+ port_unlink(prt, sigdp->u.unlink.from);
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ return ERTS_PORT_REDS_UNLINK;
+}
+
+ErtsPortOpResult
+erts_port_unlink(Process *c_p, Port *prt, Eterm from, Eterm *refp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
+ prt,
+ ERTS_PORT_SFLGS_DEAD,
+ 0,
+ !refp,
+ am_unlink);
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ port_unlink(prt, from);
+ finalize_imm_drv_call(&try_call_state);
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ default:
+ /* Schedule call instead... */
+ break;
}
- port_extra_shift = erts_fit_in_bits_int32(erts_max_ports - 1);
- port_num_mask = (1 << ports_bits) - 1;
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_UNLINK;
+ sigdp->u.unlink.from = from;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ port_sig_unlink);
+}
- erts_port_tab_index_mask = ~(~((Uint) 0) << port_extra_shift);
- erts_max_ports = 1 << port_extra_shift;
+static void
+port_link_failure(Eterm port_id, Eterm linker)
+{
+ Process *rp;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
+ ASSERT(is_internal_pid(linker));
+ rp = erts_pid2proc(NULL, 0, linker, rp_locks);
+ if (rp) {
+ ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), port_id);
+ if (rlnk) {
+ int xres = erts_send_exit_signal(NULL,
+ port_id,
+ rp,
+ &rp_locks,
+ am_noproc,
+ NIL,
+ NULL,
+ 0);
+ if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ /* We didn't exit the process and it is traced */
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS))
+ trace_proc(NULL, rp, am_getting_unlinked, port_id);
+ }
+ }
+ }
+}
- erts_smp_mtx_init(&erts_driver_list_lock,"driver_list");
- driver_list = NULL;
- erts_smp_tsd_key_create(&driver_list_lock_status_key);
- erts_smp_tsd_key_create(&driver_list_last_error_key);
+static void
+port_link(Port *prt, erts_aint32_t state, Eterm to)
+{
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP))
+ erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, to);
+ else
+ port_link_failure(prt->common.id, to);
+}
- if (erts_max_ports * sizeof(Port) <= erts_max_ports) {
- /* More memory needed than the whole address space. */
- erts_alloc_enomem(ERTS_ALC_T_PORT_TABLE, ~((Uint) 0));
+static int
+port_sig_link(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ if (op == ERTS_PROC2PORT_SIG_EXEC)
+ port_link(prt, state, sigdp->u.link.to);
+ else
+ port_link_failure(sigdp->u.link.port, sigdp->u.link.to);
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ return ERTS_PORT_REDS_LINK;
+}
+
+ErtsPortOpResult
+erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ am_link);
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ port_link(prt, try_call_state.state, to);
+ finalize_imm_drv_call(&try_call_state);
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_BADARG;
+ default:
+ /* Schedule call instead... */
+ break;
}
- erts_port = (Port *) erts_alloc(ERTS_ALC_T_PORT_TABLE,
- erts_max_ports * sizeof(Port));
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_LINK;
+ sigdp->u.link.port = prt->common.id;
+ sigdp->u.link.to = to;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : to,
+ refp,
+ sigdp,
+ 0,
+ port_sig_link);
+}
- erts_smp_atomic_init_nob(&erts_bytes_out, 0);
- erts_smp_atomic_init_nob(&erts_bytes_in, 0);
+void erts_init_io(int port_tab_size,
+ int port_tab_size_ignore_files)
+{
+ ErlDrvEntry** dp;
+ erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- for (i = 0; i < erts_max_ports; i++) {
- erts_port_task_init_sched(&erts_port[i].sched);
- erts_smp_atomic_init_nob(&erts_port[i].refc, 0);
#ifdef ERTS_SMP
- erts_port[i].lock = NULL;
- erts_port[i].xports = NULL;
- erts_smp_spinlock_init_x(&erts_port[i].state_lck,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_state" : NULL,
-#else
- "port_state",
-#endif
- make_small(0));
+ init_xports_list_alloc();
#endif
- erts_port[i].tracer_proc = NIL;
- erts_port[i].trace_flags = 0;
- erts_port[i].drv_ptr = NULL;
- erts_port[i].status = ERTS_PORT_SFLG_FREE;
- erts_port[i].name = NULL;
- erts_port[i].nlinks = NULL;
- erts_port[i].monitors = NULL;
- erts_port[i].linebuf = NULL;
- erts_port[i].port_data_lock = NULL;
+ pdl_init();
+
+ if (!port_tab_size_ignore_files) {
+ int max_files = sys_max_files();
+ if (port_tab_size < max_files)
+ port_tab_size = max_files;
}
- erts_smp_atomic32_init_nob(&erts_ports_snapshot, (erts_aint32_t) 0);
- last_port_num = 0;
- erts_smp_spinlock_init(&get_free_port_lck, "get_free_port");
+ if (port_tab_size > ERTS_MAX_PORTS)
+ port_tab_size = ERTS_MAX_PORTS;
+ else if (port_tab_size < ERTS_MIN_PORTS)
+ port_tab_size = ERTS_MIN_PORTS;
+
+ erts_smp_rwmtx_init_opt(&erts_driver_list_lock,
+ &drv_list_rwmtx_opts,
+ "driver_list");
+ driver_list = NULL;
+ erts_smp_tsd_key_create(&driver_list_lock_status_key);
+ erts_smp_tsd_key_create(&driver_list_last_error_key);
+
+ erts_ptab_init_table(&erts_port,
+ ERTS_ALC_T_PORT_TABLE,
+ NULL,
+ (ErtsPTabElementCommon *) &erts_invalid_port.common,
+ port_tab_size,
+ "port_table");
+
+ erts_smp_atomic_init_nob(&erts_bytes_out, 0);
+ erts_smp_atomic_init_nob(&erts_bytes_in, 0);
sys_init_io();
erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1);
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
init_driver(&fd_driver, &fd_driver_entry, NULL);
init_driver(&vanilla_driver, &vanilla_driver_entry, NULL);
@@ -1390,27 +2651,63 @@ void init_io(void)
erts_add_driver_entry(*dp, NULL, 1);
erts_smp_tsd_set(driver_list_lock_status_key, NULL);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
-void erts_lcnt_enable_io_lock_count(int enable) {
- int i;
- for (i = 0; i < erts_max_ports; i++) {
- Port* p = &erts_port[i];
- if (enable) {
- erts_lcnt_init_lock_x(&p->state_lck.lcnt, "port_state", ERTS_LCNT_LT_SPINLOCK, make_small(i));
- if (p->lock) {
- erts_lcnt_init_lock_x(&p->lock->lcnt, "port_lock", ERTS_LCNT_LT_MUTEX, make_small(i));
- }
- } else {
- erts_lcnt_destroy_lock(&p->state_lck.lcnt);
- if (p->lock) {
- erts_lcnt_destroy_lock(&p->lock->lcnt);
- }
- }
+static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable)
+{
+ if (dp->lock) {
+ if (enable)
+ erts_lcnt_init_lock_x(&dp->lock->lcnt,
+ "driver_lock",
+ ERTS_LCNT_LT_MUTEX,
+ am_atom_put(dp->name,
+ sys_strlen(dp->name)));
+ else
+ erts_lcnt_destroy_lock(&dp->lock->lcnt);
+
+ }
+}
+
+static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable)
+{
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+ if (!enable) {
+ erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt);
+ if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
+ erts_lcnt_destroy_lock(&prt->lock->lcnt);
}
+ else {
+ erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt,
+ "port_sched_lock",
+ ERTS_LCNT_LT_MUTEX,
+ prt->common.id);
+ if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
+ erts_lcnt_init_lock_x(&prt->lock->lcnt,
+ "port_lock",
+ ERTS_LCNT_LT_MUTEX,
+ prt->common.id);
+ }
+}
+
+void erts_lcnt_enable_io_lock_count(int enable)
+{
+ erts_driver_t *dp;
+ int i, max = erts_ptab_max(&erts_port);
+
+ for (i = 0; i < max; i++) {
+ Port *prt = erts_pix2port(i);
+ if (prt)
+ lcnt_enable_port_lock_count(prt, enable);
+ }
+
+ lcnt_enable_drv_lock_count(&vanilla_driver, enable);
+ lcnt_enable_drv_lock_count(&spawn_driver, enable);
+ lcnt_enable_drv_lock_count(&fd_driver, enable);
+ for (dp = driver_list; dp; dp = dp->next)
+ lcnt_enable_drv_lock_count(dp, enable);
}
#endif
@@ -1594,9 +2891,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- ASSERT(is_internal_port(sender)
- && is_internal_pid(pid)
- && internal_pid_index(pid) < erts_max_processes);
+ ASSERT(is_internal_port(sender) && is_internal_pid(pid));
rp = (scheduler
? erts_proc_lookup(pid)
@@ -1608,16 +2903,19 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
ErlOffHeap *ohp;
Eterm* hp;
Uint sz_res;
- sz_res = size_object(res);
- hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks);
- res = copy_struct(res, sz_res, &hp, ohp);
- tuple = TUPLE2(hp, sender, res);
+
+ sz_res = size_object(res);
+ hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks);
+ res = copy_struct(res, sz_res, &hp, ohp);
+ tuple = TUPLE2(hp, sender, res);
erts_queue_message(rp, &rp_locks, bp, tuple, NIL
#ifdef USE_VM_PROBES
, NIL
#endif
);
- erts_smp_proc_unlock(rp, rp_locks);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_smp_proc_dec_refc(rp);
@@ -1633,7 +2931,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
* len -- length of data
*/
-static void deliver_read_message(Port* prt, Eterm to,
+static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
char *hbuf, ErlDrvSizeT hlen,
char *buf, ErlDrvSizeT len, int eol)
{
@@ -1651,10 +2949,11 @@ static void deliver_read_message(Port* prt, Eterm to,
ERTS_SMP_CHK_NO_PROC_LOCKS;
need = 3 + 3 + 2*hlen;
- if (prt->status & ERTS_PORT_SFLG_LINEBUF_IO) {
+
+ if (state & ERTS_PORT_SFLG_LINEBUF_IO) {
need += 3;
}
- if (prt->status & ERTS_PORT_SFLG_BINARY_IO && buf != NULL) {
+ if ((state & ERTS_PORT_SFLG_BINARY_IO) && buf != NULL) {
need += PROC_BIN_SIZE;
} else {
need += 2*len;
@@ -1670,7 +2969,7 @@ static void deliver_read_message(Port* prt, Eterm to,
hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks);
listp = NIL;
- if ((prt->status & ERTS_PORT_SFLG_BINARY_IO) == 0) {
+ if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) {
listp = buf_to_intlist(&hp, buf, len, listp);
} else if (buf != NULL) {
ProcBin* pb;
@@ -1701,14 +3000,14 @@ static void deliver_read_message(Port* prt, Eterm to,
listp = buf_to_intlist(&hp, hbuf, hlen, listp);
}
- if (prt->status & ERTS_PORT_SFLG_LINEBUF_IO){
+ if (state & ERTS_PORT_SFLG_LINEBUF_IO){
listp = TUPLE2(hp, (eol) ? am_eol : am_noeol, listp);
hp += 3;
}
tuple = TUPLE2(hp, am_data, listp);
hp += 3;
- tuple = TUPLE2(hp, prt->id, tuple);
+ tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
@@ -1726,7 +3025,8 @@ static void deliver_read_message(Port* prt, Eterm to,
* Deliver all lines in a line buffer, repeats calls to
* deliver_read_message, and takes the same parameters.
*/
-static void deliver_linebuf_message(Port* prt, Eterm to,
+static void deliver_linebuf_message(Port* prt, erts_aint_t state,
+ Eterm to,
char* hbuf, ErlDrvSizeT hlen,
char *buf, ErlDrvSizeT len)
{
@@ -1735,7 +3035,7 @@ static void deliver_linebuf_message(Port* prt, Eterm to,
if(init_linebuf_context(&lc,&(prt->linebuf), buf, len) < 0)
return;
while((ret = read_linebuf(&lc)) > LINEBUF_EMPTY)
- deliver_read_message(prt, to, hbuf, hlen, LINEBUF_DATA(lc),
+ deliver_read_message(prt, state, to, hbuf, hlen, LINEBUF_DATA(lc),
LINEBUF_DATALEN(lc), (ret == LINEBUF_EOL));
}
@@ -1746,20 +3046,25 @@ static void deliver_linebuf_message(Port* prt, Eterm to,
* Parameters:
* prt - Pointer to a Port structure for this port.
*/
-static void flush_linebuf_messages(Port *prt)
+static void flush_linebuf_messages(Port *prt, erts_aint32_t state)
{
LineBufContext lc;
int ret;
ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
- if(prt == NULL || !(prt->status & ERTS_PORT_SFLG_LINEBUF_IO))
+
+ if (!prt)
+ return;
+
+ if (!(state & ERTS_PORT_SFLG_LINEBUF_IO))
return;
if(init_linebuf_context(&lc,&(prt->linebuf), NULL, 0) < 0)
return;
while((ret = flush_linebuf(&lc)) > LINEBUF_EMPTY)
deliver_read_message(prt,
- prt->connected,
+ state,
+ ERTS_PORT_GET_CONNECTED(prt),
NULL,
0,
LINEBUF_DATA(lc),
@@ -1787,6 +3092,7 @@ deliver_vec_message(Port* prt, /* Port */
ErlOffHeap *ohp;
ErtsProcLocks rp_locks = 0;
int scheduler = erts_get_scheduler_id() != 0;
+ erts_aint32_t state;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -1802,12 +3108,13 @@ deliver_vec_message(Port* prt, /* Port */
if (!rp)
return;
+ state = erts_atomic32_read_nob(&prt->state);
/*
* Calculate the exact number of heap words needed.
*/
need = 3 + 3; /* Heap space for two tuples */
- if (prt->status & ERTS_PORT_SFLG_BINARY_IO) {
+ if (state & ERTS_PORT_SFLG_BINARY_IO) {
need += (2+PROC_BIN_SIZE)*vsize - 2 + hlen*2;
} else {
need += (hlen+csize)*2;
@@ -1818,7 +3125,7 @@ deliver_vec_message(Port* prt, /* Port */
listp = NIL;
iov += vsize;
- if ((prt->status & ERTS_PORT_SFLG_BINARY_IO) == 0) {
+ if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) {
Eterm* thp = hp;
while (vsize--) {
iov--;
@@ -1871,7 +3178,7 @@ deliver_vec_message(Port* prt, /* Port */
tuple = TUPLE2(hp, am_data, listp);
hp += 3;
- tuple = TUPLE2(hp, prt->id, tuple);
+ tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
@@ -1904,7 +3211,7 @@ static void deliver_bin_message(Port* prt, /* port */
/*
* Note.
*
- * The test for (p->status & ERTS_PORT_SFLGS_DEAD) == 0 is important since the
+ * The test for ERTS_PORT_SFLGS_DEAD is important since the
* driver's flush function might call driver_async, which when using no
* threads and being short circuited will notice that the io queue is empty
* (after calling the driver's async_ready) and recursively call
@@ -1920,7 +3227,7 @@ static void flush_port(Port *p)
if (p->drv_ptr->flush != NULL) {
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_flush)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(p->connected, p)
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p)
DTRACE3(driver_flush, process_str, port_str, p->name);
}
#endif
@@ -1935,11 +3242,12 @@ static void flush_port(Port *p)
}
#ifdef ERTS_SMP
if (p->xports)
- erts_smp_xports_unlock(p);
+ erts_port_handle_xports(p);
ASSERT(!p->xports);
#endif
}
- if ((p->status & ERTS_PORT_SFLGS_DEAD) == 0 && is_port_ioq_empty(p)) {
+ if ((erts_atomic32_read_nob(&p->state) & ERTS_PORT_SFLGS_DEAD) == 0
+ && is_port_ioq_empty(p)) {
terminate_port(p);
}
}
@@ -1951,29 +3259,29 @@ terminate_port(Port *prt)
Eterm send_closed_port_id;
Eterm connected_id = NIL /* Initialize to silence compiler */;
erts_driver_t *drv;
- int halt;
+ erts_aint32_t state;
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ASSERT(!prt->nlinks);
- ASSERT(!prt->monitors);
+ ASSERT(!ERTS_P_LINKS(prt));
+ ASSERT(!ERTS_P_MONITORS(prt));
- /* prt->status may be altered by kill_port()below */
- halt = (prt->status & ERTS_PORT_SFLG_HALT) != 0;
- if (prt->status & ERTS_PORT_SFLG_SEND_CLOSED) {
- erts_port_status_band_set(prt, ~ERTS_PORT_SFLG_SEND_CLOSED);
- send_closed_port_id = prt->id;
- connected_id = prt->connected;
+ /* state may be altered by kill_port() below */
+ state = erts_atomic32_read_band_nob(&prt->state,
+ ~ERTS_PORT_SFLG_SEND_CLOSED);
+ if (state & ERTS_PORT_SFLG_SEND_CLOSED) {
+ send_closed_port_id = prt->common.id;
+ connected_id = ERTS_PORT_GET_CONNECTED(prt);
}
else {
send_closed_port_id = NIL;
}
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(prt->ptimer);
+ erts_cancel_smp_ptimer(prt->common.u.alive.ptimer);
#else
- erts_cancel_timer(&prt->tm);
+ erts_cancel_timer(&prt->common.u.alive.tm);
#endif
drv = prt->drv_ptr;
@@ -1981,7 +3289,7 @@ terminate_port(Port *prt)
int fpe_was_unmasked = erts_block_fpe();
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_stop)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(prt->connected, prt)
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(connected_id, prt)
DTRACE3(driver_stop, process_str, drv->name, port_str);
}
#endif
@@ -1989,14 +3297,14 @@ terminate_port(Port *prt)
erts_unblock_fpe(fpe_was_unmasked);
#ifdef ERTS_SMP
if (prt->xports)
- erts_smp_xports_unlock(prt);
+ erts_port_handle_xports(prt);
ASSERT(!prt->xports);
#endif
}
if(drv->handle != NULL) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(drv->handle);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
}
stopq(prt); /* clear queue memory */
if(prt->linebuf != NULL){
@@ -2012,20 +3320,21 @@ terminate_port(Port *prt)
if (prt->psd)
erts_free(ERTS_ALC_T_PRTSD, prt->psd);
+ ASSERT(prt->dist_entry == NULL);
+
kill_port(prt);
/*
* We don't want to send the closed message until after the
* port has been removed from the port table (in kill_port()).
*/
- if (halt && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
- erts_smp_port_unlock(prt); /* We will exit and never return */
+ if ((state & ERTS_PORT_SFLG_HALT)
+ && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
+ erts_port_release(prt); /* We will exit and never return */
erl_exit_flush_async(erts_halt_code, "");
}
if (is_internal_port(send_closed_port_id))
deliver_result(send_closed_port_id, connected_id, am_closed);
-
- ASSERT(prt->dist_entry == NULL);
}
void
@@ -2045,7 +3354,7 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
if (!rp) {
goto done;
}
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon == NULL) {
goto done;
@@ -2099,7 +3408,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
ASSERT(is_internal_pid(lnk->pid));
rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
if (rp) {
- ErtsLink *rlnk = erts_remove_link(&(rp->nlinks), psc->port);
+ ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), psc->port);
if (rlnk) {
int xres = erts_send_exit_signal(NULL,
@@ -2135,11 +3444,13 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
* that is to kill a port till reason kill. Then the port is stopped.
*
*/
-void
-erts_do_exit_port(Port *p, Eterm from, Eterm reason)
+
+int
+erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
{
ErtsLink *lnk;
Eterm rreason;
+ erts_aint32_t state;
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
@@ -2159,66 +3470,76 @@ erts_do_exit_port(Port *p, Eterm from, Eterm reason)
}
#endif
- if ((p->status & (ERTS_PORT_SFLGS_DEAD
- | ERTS_PORT_SFLG_EXITING
- | ERTS_PORT_SFLG_IMMORTAL))
- || ((reason == am_normal) &&
- ((from != p->connected) && (from != p->id)))) {
- return;
- }
+ state = erts_atomic32_read_nob(&p->state);
+ if (state & (ERTS_PORT_SFLGS_DEAD
+ | ERTS_PORT_SFLG_EXITING
+ | ERTS_PORT_SFLG_CLOSING))
+ return 0;
+
+ if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(p) && from != p->common.id)
+ return 0;
+
+ if (send_closed)
+ erts_atomic32_read_bor_relb(&p->state,
+ ERTS_PORT_SFLG_SEND_CLOSED);
if (IS_TRACED_FL(p, F_TRACE_PORTS)) {
trace_port(p, am_closed, reason);
}
- erts_trace_check_exiting(p->id);
+ erts_trace_check_exiting(p->common.id);
/*
* Setting the port to not busy here, frees the list of pending
* processes and makes them runnable.
*/
- set_busy_port((ErlDrvPort)internal_port_index(p->id), 0);
+ set_busy_port((ErlDrvPort) p, 0);
- if (p->reg != NULL)
- (void) erts_unregister_name(NULL, 0, p, p->reg->name);
+ if (p->common.u.alive.reg != NULL)
+ (void) erts_unregister_name(NULL, 0, p, p->common.u.alive.reg->name);
- erts_port_status_bor_set(p, ERTS_PORT_SFLG_EXITING);
+ state = erts_atomic32_read_bor_relb(&p->state, ERTS_PORT_SFLG_EXITING);
{
- SweepContext sc = {p->id, rreason};
- lnk = p->nlinks;
- p->nlinks = NULL;
+ SweepContext sc = {p->common.id, rreason};
+ lnk = ERTS_P_LINKS(p);
+ ERTS_P_LINKS(p) = NULL;
erts_sweep_links(lnk, &sweep_one_link, &sc);
}
DRV_MONITOR_LOCK_PDL(p);
{
- ErtsMonitor *moni = p->monitors;
- p->monitors = NULL;
+ ErtsMonitor *moni = ERTS_P_MONITORS(p);
+ ERTS_P_MONITORS(p) = NULL;
erts_sweep_monitors(moni, &sweep_one_monitor, NULL);
}
DRV_MONITOR_UNLOCK_PDL(p);
- if ((p->status & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) {
+ if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) {
erts_do_net_exits(p->dist_entry, rreason);
erts_deref_dist_entry(p->dist_entry);
- p->dist_entry = NULL;
- erts_port_status_band_set(p, ~ERTS_PORT_SFLG_DISTRIBUTION);
+ p->dist_entry = NULL;
+ erts_atomic32_read_band_relb(&p->state,
+ ~ERTS_PORT_SFLG_DISTRIBUTION);
}
if ((reason != am_kill) && !is_port_ioq_empty(p)) {
- erts_port_status_bandor_set(p,
- ~ERTS_PORT_SFLG_EXITING, /* must turn it off */
- ERTS_PORT_SFLG_CLOSING);
+ /* must turn exiting flag off */
+ erts_atomic32_read_bset_relb(&p->state,
+ (ERTS_PORT_SFLG_EXITING
+ | ERTS_PORT_SFLG_CLOSING),
+ ERTS_PORT_SFLG_CLOSING);
flush_port(p);
}
else {
terminate_port(p);
}
+
+ return 1;
}
/* About the states ERTS_PORT_SFLG_EXITING and ERTS_PORT_SFLG_CLOSING used above.
**
-** ERTS_PORT_SFLG_EXITING is a recursion protection for erts_do_exit_port().
+** ERTS_PORT_SFLG_EXITING is a recursion protection for erts_deliver_port_exit().
** It is unclear whether this state is necessary or not, it might be possible
** to merge it with ERTS_PORT_SFLG_CLOSING. ERTS_PORT_SFLG_EXITING only persists
** over a section of sequential (but highly recursive) code.
@@ -2234,232 +3555,1108 @@ erts_do_exit_port(Port *p, Eterm from, Eterm reason)
** {PID, close}
** {PID, {command, io-list}}
** {PID, {connect, New_PID}}
-**
-**
*/
-void erts_port_command(Process *proc,
- Eterm caller_id,
- Port *port,
- Eterm command)
+ErtsPortOpResult
+erts_port_command(Process *c_p,
+ int flags,
+ Port *port,
+ Eterm command,
+ Eterm *refp)
{
Eterm *tp;
- Eterm pid;
- if (!port)
- return;
+ ASSERT(port);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ASSERT(!INVALID_PORT(port, port->id));
+ flags |= ERTS_PORT_SIG_FLG_BANG_OP;
if (is_tuple_arity(command, 2)) {
+ Eterm cntd;
tp = tuple_val(command);
- if ((pid = port->connected) == tp[1]) {
- /* PID must be connected */
+ cntd = tp[1];
+ if (is_internal_pid(cntd)) {
if (tp[2] == am_close) {
- erts_port_status_bor_set(port, ERTS_PORT_SFLG_SEND_CLOSED);
- erts_do_exit_port(port, pid, am_normal);
-
-#ifdef USE_VM_PROBES
- if(DTRACE_ENABLED(port_command)) {
- DTRACE_FORMAT_COMMON_PROC_AND_PORT(proc, port)
- DTRACE4(port_command, process_str, port_str, port->name, "close");
- }
-#endif
- goto done;
+ if (!erts_port_synchronous_ops)
+ refp = NULL;
+ flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
+ return erts_port_exit(c_p, flags, port, cntd, am_normal, refp);
} else if (is_tuple_arity(tp[2], 2)) {
tp = tuple_val(tp[2]);
if (tp[1] == am_command) {
- if (erts_write_to_port(caller_id, port, tp[2]) == 0)
- goto done;
- } else if ((tp[1] == am_connect) && is_internal_pid(tp[2])) {
-#ifdef USE_VM_PROBES
- if(DTRACE_ENABLED(port_command)) {
- DTRACE_FORMAT_COMMON_PROC_AND_PORT(proc, port)
- DTRACE4(port_command, process_str, port_str, port->name, "connect");
- }
-#endif
- port->connected = tp[2];
- deliver_result(port->id, pid, am_connected);
- goto done;
+ if (!(flags & ERTS_PORT_SIG_FLG_NOSUSPEND)
+ && !erts_port_synchronous_ops)
+ refp = NULL;
+ return erts_port_output(c_p, flags, port, cntd, tp[2], refp);
+ }
+ else if (tp[1] == am_connect) {
+ if (!erts_port_synchronous_ops)
+ refp = NULL;
+ flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
+ return erts_port_connect(c_p, flags, port, cntd, tp[2], refp);
}
}
}
}
- {
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- Process* rp = erts_pid2proc(NULL, 0,
- port->connected, rp_locks);
- if (rp) {
- (void) erts_send_exit_signal(NULL,
- port->id,
- rp,
- &rp_locks,
- am_badsig,
- NIL,
- NULL,
- 0);
- erts_smp_proc_unlock(rp, rp_locks);
- }
+ /* badsig */
+ if (!erts_port_synchronous_ops)
+ refp = NULL;
+ flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
+ return bad_port_signal(c_p, flags, port, c_p->common.id, refp, am_command);
+}
+
+static ERTS_INLINE ErtsPortOpResult
+call_driver_control(Eterm caller,
+ Port *prt,
+ unsigned int command,
+ char *bufp,
+ ErlDrvSizeT size,
+ char **resp_bufp,
+ ErlDrvSizeT *from_size)
+{
+ ErlDrvSSizeT cres;
+
+ if (!prt->drv_ptr->control)
+ return ERTS_PORT_OP_BADARG;
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_control) || DTRACE_ENABLED(driver_control)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
+ DTRACE4(port_control, process_str, port_str, prt->name, command);
+ DTRACE5(driver_control, process_str, port_str, prt->name,
+ command, size);
}
- done:
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+#endif
+
+ prt->caller = caller;
+ cres = prt->drv_ptr->control((ErlDrvData) prt->drv_data,
+ command,
+ bufp,
+ size,
+ resp_bufp,
+ *from_size);
+ prt->caller = NIL;
+
+ if (cres < 0)
+ return ERTS_PORT_OP_BADARG;
+
+ *from_size = (ErlDrvSizeT) cres;
+
+ return ERTS_PORT_OP_DONE;
}
-/*
- * Control a port synchronously.
- * Returns either a list or a binary.
- */
-Eterm
-erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
-{
- byte* to_port = NULL; /* Buffer to write to port. */
- /* Initialization is for shutting up
- warning about use before set. */
- Uint to_len = 0; /* Length of buffer. */
- int must_free = 0; /* True if the buffer should be freed. */
- char port_result[ERL_ONHEAP_BIN_LIMIT]; /* Default buffer for result from port. */
- char* port_resp; /* Pointer to result buffer. */
- ErlDrvSSizeT n;
- ErlDrvSSizeT (*control)
- (ErlDrvData, unsigned, char*, ErlDrvSizeT, char**, ErlDrvSizeT);
- int fpe_was_unmasked;
+static void
+cleanup_scheduled_control(Binary *binp, char *bufp)
+{
+ if (binp) {
+ if (erts_refc_dectest(&binp->refc, 0) == 0)
+ erts_bin_free(binp);
+ }
+ else {
+ if (bufp)
+ erts_free(ERTS_ALC_T_DRV_CTRL_DATA, bufp);
+ }
+}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if ((control = prt->drv_ptr->control) == NULL) {
- return THE_NON_VALUE;
+static ERTS_INLINE Uint
+port_control_result_size(int control_flags,
+ char *resp_bufp,
+ ErlDrvSizeT *resp_size,
+ char *pre_alloc_buf)
+{
+ if (!resp_bufp)
+ return (Uint) 0;
+
+ if (control_flags & PORT_CONTROL_FLAG_BINARY) {
+ if (resp_bufp != pre_alloc_buf) {
+ ErlDrvBinary *dbin = (ErlDrvBinary *) resp_bufp;
+ *resp_size = dbin->orig_size;
+ if (*resp_size > ERL_ONHEAP_BIN_LIMIT)
+ return PROC_BIN_SIZE;
+ }
+ ASSERT(*resp_size <= ERL_ONHEAP_BIN_LIMIT);
+ return (Uint) heap_bin_size((*resp_size));
}
- /*
- * Convert the iolist to a buffer, pointed to by to_port,
- * and with its length in to_len.
- */
- if (is_binary(iolist) && binary_bitoffset(iolist) == 0) {
+ return (Uint) 2*(*resp_size);
+}
+
+static ERTS_INLINE Eterm
+write_port_control_result(int control_flags,
+ char *resp_bufp,
+ ErlDrvSizeT resp_size,
+ char *pre_alloc_buf,
+ Eterm **hpp,
+ ErlHeapFragment *bp,
+ ErlOffHeap *ohp)
+{
+ Eterm res;
+ if (!resp_bufp)
+ return NIL;
+ if (control_flags & PORT_CONTROL_FLAG_BINARY) {
+ /* Binary result */
+ ErlDrvBinary *dbin;
+ ErlHeapBin *hbin;
+
+ if (resp_bufp == pre_alloc_buf)
+ dbin = NULL;
+ else {
+ dbin = (ErlDrvBinary *) resp_bufp;
+ if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) {
+ ProcBin* pb = (ProcBin *) *hpp;
+ *hpp += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = dbin->orig_size;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header *) pb;
+ pb->val = ErlDrvBinary2Binary(dbin);
+ pb->bytes = (byte*) dbin->orig_bytes;
+ pb->flags = 0;
+ OH_OVERHEAD(ohp, dbin->orig_size / sizeof(Eterm));
+ return make_binary(pb);
+ }
+ resp_bufp = dbin->orig_bytes;
+ resp_size = dbin->orig_size;
+ }
+
+ hbin = (ErlHeapBin *) *hpp;
+ *hpp += heap_bin_size(resp_size);
+ ASSERT(resp_size <= ERL_ONHEAP_BIN_LIMIT);
+ hbin->thing_word = header_heap_bin(resp_size);
+ hbin->size = resp_size;
+ sys_memcpy(hbin->data, resp_bufp, resp_size);
+ if (dbin)
+ driver_free_binary(dbin);
+ return make_binary(hbin);
+ }
+
+ /* List result */
+ res = buf_to_intlist(hpp, resp_bufp, resp_size, NIL);
+ if (resp_bufp != pre_alloc_buf)
+ driver_free(resp_bufp);
+ return res;
+}
+
+static int
+port_sig_control(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ char resp_buf[ERL_ONHEAP_BIN_LIMIT];
+ ErlDrvSizeT resp_size = sizeof(resp_buf);
+ char *resp_bufp = &resp_buf[0];
+ ErtsPortOpResult res;
+
+ res = call_driver_control(sigdp->caller,
+ prt,
+ sigdp->u.control.command,
+ sigdp->u.control.bufp,
+ sigdp->u.control.size,
+ &resp_bufp,
+ &resp_size);
+
+ if (res == ERTS_PORT_OP_DONE) {
+ Eterm msg;
+ Eterm *hp, *hp_start;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+ Uint hsz;
+ int control_flags;
+
+ rp = erts_proc_lookup_raw(sigdp->caller);
+ if (!rp)
+ goto done;
+
+ control_flags = prt->control_flags;
+
+ hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+ hsz += port_control_result_size(control_flags,
+ resp_bufp,
+ &resp_size,
+ &resp_buf[0]);
+
+ hp_start = hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+
+ msg = write_port_control_result(control_flags,
+ resp_bufp,
+ resp_size,
+ &resp_buf[0],
+ &hp,
+ bp,
+ ohp);
+
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ sigdp->ref,
+ msg);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ goto done;
+ }
+ }
+
+ /* failure */
+
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+
+done:
+
+ cleanup_scheduled_control(sigdp->u.control.binp,
+ sigdp->u.control.bufp);
+
+ return ERTS_PORT_REDS_CONTROL;
+}
+
+
+ErtsPortOpResult
+erts_port_control(Process* c_p,
+ Port *prt,
+ unsigned int command,
+ Eterm data,
+ Eterm *retvalp)
+{
+ ErtsPortOpResult res;
+ char *bufp = NULL;
+ ErlDrvSizeT size = 0;
+ int try_call;
+ int tmp_alloced = 0;
+ erts_aint32_t sched_flags;
+ Binary *binp;
+ int copy;
+ ErtsProc2PortSigData *sigdp;
+
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sched_flags & ERTS_PTS_FLG_EXIT)
+ return ERTS_PORT_OP_BADARG;
+
+ try_call = !(sched_flags & ERTS_PTS_FLGS_FORCE_SCHEDULE_OP);
+
+ if (is_binary(data) && binary_bitoffset(data) == 0) {
+ byte *bytep;
ERTS_DECLARE_DUMMY(Uint bitoffs);
ERTS_DECLARE_DUMMY(Uint bitsize);
- ERTS_GET_BINARY_BYTES(iolist, to_port, bitoffs, bitsize);
- to_len = binary_size(iolist);
+ ERTS_GET_BINARY_BYTES(data, bytep, bitoffs, bitsize);
+ bufp = (char *) bytep;
+ size = binary_size(data);
} else {
int r;
- /* Try with an 8KB buffer first (will often be enough I guess). */
- to_len = 8*1024;
- to_port = erts_alloc(ERTS_ALC_T_TMP, to_len);
- must_free = 1;
+ if (!try_call) {
+ if (erts_iolist_size(data, &size))
+ return ERTS_PORT_OP_BADARG;
+ bufp = erts_alloc(ERTS_ALC_T_DRV_CTRL_DATA, size);
+ r = erts_iolist_to_buf(data, bufp, size);
+ ASSERT(r == 0);
+ }
+ else {
+ /* Try with an 8KB buffer first (will often be enough I guess). */
+ size = 8*1024;
+ bufp = erts_alloc(ERTS_ALC_T_TMP, size);
+ tmp_alloced = 1;
+
+ r = erts_iolist_to_buf(data, bufp, size);
+ if (ERTS_IOLIST_TO_BUF_SUCCEEDED(r)) {
+ size -= r;
+ } else {
+ if (r == ERTS_IOLIST_TO_BUF_TYPE_ERROR) { /* Type error */
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ return ERTS_PORT_OP_BADARG;
+ }
+ else {
+ ASSERT(r == ERTS_IOLIST_TO_BUF_OVERFLOW); /* Overflow */
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ if (erts_iolist_size(data, &size))
+ return ERTS_PORT_OP_BADARG; /* Type error */
+ }
+ bufp = erts_alloc(ERTS_ALC_T_TMP, size);
+ r = erts_iolist_to_buf(data, bufp, size);
+ ASSERT(r == 0);
+ }
+ }
+ }
- /*
- * In versions before R10B, we used to reserve random
- * amounts of extra memory. From R10B, we allocate the
- * exact amount.
- */
- r = io_list_to_buf(iolist, (char*) to_port, to_len);
- if (r >= 0) {
- to_len -= r;
- } else if (r == -2) { /* Type error */
- erts_free(ERTS_ALC_T_TMP, (void *) to_port);
- return THE_NON_VALUE;
- } else {
- ASSERT(r == -1); /* Overflow */
- erts_free(ERTS_ALC_T_TMP, (void *) to_port);
- if (erts_iolist_size(iolist, &to_len)) { /* Type error */
- return THE_NON_VALUE;
+ if (try_call) {
+ char resp_buf[ERL_ONHEAP_BIN_LIMIT];
+ char* resp_bufp = &resp_buf[0];
+ ErlDrvSizeT resp_size = sizeof(resp_buf);
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0,
+ am_control);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ Eterm *hp;
+ Uint hsz;
+ int control_flags;
+
+ res = call_driver_control(c_p->common.id,
+ prt,
+ command,
+ bufp,
+ size,
+ &resp_bufp,
+ &resp_size);
+ finalize_imm_drv_call(&try_call_state);
+ if (tmp_alloced)
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ if (res == ERTS_PORT_OP_BADARG) {
+ return ERTS_PORT_OP_BADARG;
}
- must_free = 1;
- to_port = erts_alloc(ERTS_ALC_T_TMP, to_len);
- r = io_list_to_buf(iolist, (char*) to_port, to_len);
- ASSERT(r == 0);
+
+ control_flags = prt->control_flags;
+
+ hsz = port_control_result_size(control_flags,
+ resp_bufp,
+ &resp_size,
+ &resp_buf[0]);
+ hp = HAlloc(c_p, hsz);
+ *retvalp = write_port_control_result(control_flags,
+ resp_bufp,
+ resp_size,
+ &resp_buf[0],
+ &hp,
+ NULL,
+ &c_p->off_heap);
+ return ERTS_PORT_OP_DONE;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ if (tmp_alloced)
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ return ERTS_PORT_OP_BADARG;
+ default:
+ /* Schedule control() call instead... */
+ break;
}
}
- prt->caller = p->id; /* Internal pid */
+ /* Convert data into something that can be scheduled */
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ copy = tmp_alloced;
+
+ binp = NULL;
+
+ if (is_binary(data) && binary_bitoffset(data) == 0) {
+ Eterm *ebinp = binary_val_rel(data, NULL);
+ ASSERT(!tmp_alloced);
+ if (*ebinp == HEADER_SUB_BIN)
+ ebinp = binary_val_rel(((ErlSubBin *) ebinp)->orig, NULL);
+ if (*ebinp != HEADER_PROC_BIN)
+ copy = 1;
+ else {
+ binp = ((ProcBin *) ebinp)->val;
+ ASSERT(bufp < bufp + size);
+ ASSERT(binp->orig_bytes <= bufp
+ && bufp + size <= binp->orig_bytes + binp->orig_size);
+ erts_refc_inc(&binp->refc, 1);
+ }
+ }
+
+ if (copy) {
+ char *old_bufp = bufp;
+ bufp = erts_alloc(ERTS_ALC_T_DRV_CTRL_DATA, size);
+ sys_memcpy(bufp, old_bufp, size);
+ if (tmp_alloced)
+ erts_free(ERTS_ALC_T_TMP, old_bufp);
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_CONTROL;
+ sigdp->u.control.binp = binp;
+ sigdp->u.control.command = command;
+ sigdp->u.control.bufp = bufp;
+ sigdp->u.control.size = size;
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ retvalp,
+ sigdp,
+ 0,
+ port_sig_control);
+ if (res != ERTS_PORT_OP_SCHEDULED) {
+ cleanup_scheduled_control(binp, bufp);
+ return ERTS_PORT_OP_BADARG;
+ }
+ return res;
+}
+
+static ERTS_INLINE ErtsPortOpResult
+call_driver_call(Eterm caller,
+ Port *prt,
+ unsigned int command,
+ char *bufp,
+ ErlDrvSizeT size,
+ char **resp_bufp,
+ ErlDrvSizeT *from_size,
+ unsigned *ret_flagsp)
+{
+ ErlDrvSSizeT cres;
+
+ if (!prt->drv_ptr->call)
+ return ERTS_PORT_OP_BADARG;
#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(port_control) || DTRACE_ENABLED(driver_control)) {
- DTRACE_FORMAT_COMMON_PROC_AND_PORT(p, prt);
- DTRACE4(port_control, process_str, port_str, prt->name, command);
- DTRACE5(driver_control, process_str, port_str, prt->name,
- command, to_len);
+ if (DTRACE_ENABLED(driver_call)) {
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_pid_str(caller, process_str);
+ dtrace_port_str(prt, port_str);
+ DTRACE5(driver_call, process_str, port_str, prt->name, command, size);
}
#endif
- /*
- * Call the port's control routine.
- */
+ prt->caller = caller;
+ cres = prt->drv_ptr->call((ErlDrvData) prt->drv_data,
+ command,
+ bufp,
+ size,
+ resp_bufp,
+ *from_size,
+ ret_flagsp);
+ prt->caller = NIL;
- port_resp = port_result;
- fpe_was_unmasked = erts_block_fpe();
- n = control((ErlDrvData)prt->drv_data, command, (char*)to_port, to_len,
- &port_resp, sizeof(port_result));
- erts_unblock_fpe(fpe_was_unmasked);
- if (must_free) {
- erts_free(ERTS_ALC_T_TMP, (void *) to_port);
+ if (cres <= 0
+ || ((byte) (*resp_bufp)[0]) != VERSION_MAGIC)
+ return ERTS_PORT_OP_BADARG;
+
+ *from_size = (ErlDrvSizeT) cres;
+
+ return ERTS_PORT_OP_DONE;
+}
+
+
+static
+void cleanup_scheduled_call(char *bufp)
+{
+ if (bufp)
+ erts_free(ERTS_ALC_T_DRV_CALL_DATA, bufp);
+}
+
+static int
+port_sig_call(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ char resp_buf[256];
+ ErlDrvSizeT resp_size = sizeof(resp_buf);
+ char *resp_bufp = &resp_buf[0];
+ unsigned ret_flags = 0U;
+
+
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ ErtsPortOpResult res;
+
+ res = call_driver_call(sigdp->caller,
+ prt,
+ sigdp->u.call.command,
+ sigdp->u.call.bufp,
+ sigdp->u.call.size,
+ &resp_bufp,
+ &resp_size,
+ &ret_flags);
+
+ if (res == ERTS_PORT_OP_DONE) {
+ Eterm msg;
+ Eterm *hp;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+ Uint hsz;
+
+ rp = erts_proc_lookup_raw(sigdp->caller);
+ if (!rp)
+ goto done;
+
+ hsz = erts_decode_ext_size((byte *) resp_bufp, resp_size);
+ if (hsz >= 0) {
+ Eterm *hp_start;
+ byte *endp;
+
+ hsz += 3; /* ok tuple */
+ hsz += ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+
+ hp_start = hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+ endp = (byte *) resp_bufp;
+ msg = erts_decode_ext(&hp, ohp, &endp);
+ if (is_value(msg)) {
+ msg = TUPLE2(hp, am_ok, msg);
+ hp += 3;
+
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ sigdp->ref,
+ msg);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ goto done;
+ }
+ if (bp)
+ free_message_buffer(bp);
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ }
}
- prt->caller = NIL;
-#ifdef ERTS_SMP
- if (prt->xports)
- erts_smp_xports_unlock(prt);
- ASSERT(!prt->xports);
-#endif
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- /*
- * Handle the result.
- */
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+
+done:
- if (n < 0) {
- return THE_NON_VALUE;
+ if (resp_bufp != &resp_buf[0] && !(ret_flags & DRIVER_CALL_KEEP_BUFFER))
+ driver_free(resp_bufp);
+
+ cleanup_scheduled_call(sigdp->u.call.bufp);
+
+ return ERTS_PORT_REDS_CALL;
+}
+
+
+ErtsPortOpResult
+erts_port_call(Process* c_p,
+ Port *prt,
+ unsigned int command,
+ Eterm data,
+ Eterm *retvalp)
+{
+ ErtsPortOpResult res;
+ char input_buf[256];
+ char *bufp;
+ byte *endp;
+ ErlDrvSizeT size;
+ int try_call;
+ erts_aint32_t sched_flags;
+ ErtsProc2PortSigData *sigdp;
+
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sched_flags & ERTS_PTS_FLG_EXIT) {
+ return ERTS_PORT_OP_BADARG;
}
- if ((prt->control_flags & PORT_CONTROL_FLAG_BINARY) == 0) { /* List result */
- Eterm ret;
- Eterm* hp = HAlloc(p, 2*n);
- ret = buf_to_intlist(&hp, port_resp, n, NIL);
- if (port_resp != port_result) {
- driver_free(port_resp);
+ try_call = !(sched_flags & ERTS_PTS_FLGS_FORCE_SCHEDULE_OP);
+
+ size = erts_encode_ext_size(data);
+
+ if (!try_call)
+ bufp = erts_alloc(ERTS_ALC_T_DRV_CALL_DATA, size);
+ else if (size <= sizeof(input_buf))
+ bufp = &input_buf[0];
+ else
+ bufp = erts_alloc(ERTS_ALC_T_TMP, size);
+
+ endp = (byte *) bufp;
+ erts_encode_ext(data, &endp);
+
+ if (endp - (byte *) bufp > size)
+ ERTS_INTERNAL_ERROR("erts_internal:port_call() - Buffer overflow");
+
+ size = endp - (byte *) bufp;
+
+ if (try_call) {
+ char resp_buf[255];
+ char* resp_bufp = &resp_buf[0];
+ ErlDrvSizeT resp_size = sizeof(resp_buf);
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0,
+ am_call);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ Eterm *hp, *hp_end;
+ Uint hsz;
+ unsigned ret_flags = 0U;
+ Eterm term;
+
+ res = call_driver_call(c_p->common.id,
+ prt,
+ command,
+ bufp,
+ size,
+ &resp_bufp,
+ &resp_size,
+ &ret_flags);
+
+ finalize_imm_drv_call(&try_call_state);
+ if (bufp != &input_buf[0])
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ if (res == ERTS_PORT_OP_BADARG)
+ return ERTS_PORT_OP_BADARG;
+ hsz = erts_decode_ext_size((byte *) resp_bufp, resp_size);
+ if (hsz < 0)
+ return ERTS_PORT_OP_BADARG;
+ hsz += 3;
+ hp = HAlloc(c_p, hsz);
+ hp_end = hp + hsz;
+ endp = (byte *) resp_bufp;
+ term = erts_decode_ext(&hp, &MSO(c_p), &endp);
+ if (term == THE_NON_VALUE)
+ return ERTS_PORT_OP_BADARG;
+ *retvalp = TUPLE2(hp, am_ok, term);
+ hp += 3;
+ HRelease(c_p, hp_end, hp);
+ if (resp_buf != &resp_buf[0]
+ && !(ret_flags & DRIVER_CALL_KEEP_BUFFER))
+ driver_free(resp_buf);
+ return ERTS_PORT_OP_DONE;
}
- return ret;
- }
- else if (port_resp == NULL) {
- return NIL;
- }
- else { /* Binary result */
- ErlDrvBinary *dbin;
- ErlHeapBin *hbin;
- if (port_resp != port_result) {
- dbin = (ErlDrvBinary *) port_resp;
- if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) {
- ProcBin* pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = dbin->orig_size;
- pb->next = MSO(p).first;
- MSO(p).first = (struct erl_off_heap_header*)pb;
- pb->val = ErlDrvBinary2Binary(dbin);
- pb->bytes = (byte*) dbin->orig_bytes;
- pb->flags = 0;
- OH_OVERHEAD(&(MSO(p)), dbin->orig_size / sizeof(Eterm));
- return make_binary(pb);
- }
- port_resp = dbin->orig_bytes;
- n = dbin->orig_size;
- } else {
- dbin = NULL;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ if (bufp != &input_buf[0])
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ return ERTS_PORT_OP_BADARG;
+ default:
+ /* Schedule call() call instead... */
+ break;
}
- hbin = (ErlHeapBin*) HAlloc(p, heap_bin_size(n));
- ASSERT(n <= ERL_ONHEAP_BIN_LIMIT);
- hbin->thing_word = header_heap_bin(n);
- hbin->size = n;
- sys_memcpy(hbin->data, port_resp, n);
- if (dbin != NULL) {
- driver_free_binary(dbin);
+ }
+
+ /* Convert data into something that can be scheduled */
+
+ if (bufp == &input_buf[0] || try_call) {
+ char *new_bufp = erts_alloc(ERTS_ALC_T_DRV_CALL_DATA, size);
+ sys_memcpy(new_bufp, bufp, size);
+ if (bufp != &input_buf[0])
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ bufp = new_bufp;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_CALL;
+ sigdp->u.call.command = command;
+ sigdp->u.call.bufp = bufp;
+ sigdp->u.call.size = size;
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ retvalp,
+ sigdp,
+ 0,
+ port_sig_call);
+ if (res != ERTS_PORT_OP_SCHEDULED) {
+ cleanup_scheduled_call(bufp);
+ return ERTS_PORT_OP_BADARG;
+ }
+ return res;
+}
+
+static Eterm
+make_port_info_term(Eterm **hpp_start,
+ Eterm **hpp,
+ Uint *hszp,
+ ErlHeapFragment **bpp,
+ Port *prt,
+ Eterm item)
+{
+ ErlOffHeap *ohp;
+
+ if (is_value(item)) {
+ if (erts_bld_port_info(NULL, NULL, hszp, prt, item) == am_false)
+ return THE_NON_VALUE;
+ if (*hszp) {
+ *bpp = new_message_buffer(*hszp);
+ *hpp_start = *hpp = (*bpp)->mem;
+ ohp = &(*bpp)->off_heap;
}
- return make_binary(hbin);
+ else {
+ *bpp = NULL;
+ *hpp_start = *hpp = NULL;
+ ohp = NULL;
+ }
+ return erts_bld_port_info(hpp, ohp, NULL, prt, item);
+ }
+ else {
+ int i;
+ int len;
+ int start;
+ static Eterm item[] = ERTS_PORT_INFO_1_ITEMS;
+ static Eterm value[sizeof(item)/sizeof(item[0])];
+
+ start = 0;
+ len = sizeof(item)/sizeof(item[0]);
+
+ for (i = start; i < sizeof(item)/sizeof(item[0]); i++) {
+ ASSERT(is_atom(item[i]));
+ value[i] = erts_bld_port_info(NULL, NULL, hszp, prt, item[i]);
+ }
+
+ if (value[0] == am_undefined) {
+ start++;
+ len--;
+ }
+
+ erts_bld_list(NULL, hszp, len, &value[start]);
+
+ *bpp = new_message_buffer(*hszp);
+ *hpp_start = *hpp = (*bpp)->mem;
+ ohp = &(*bpp)->off_heap;
+
+ for (i = start; i < sizeof(item)/sizeof(item[0]); i++)
+ value[i] = erts_bld_port_info(hpp, ohp, NULL, prt, item[i]);
+
+ return erts_bld_list(hpp, NULL, len, &value[start]);
+ }
+}
+
+static int
+port_sig_info(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+ if (op != ERTS_PROC2PORT_SIG_EXEC)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined);
+ else {
+ Eterm *hp, *hp_start;
+ Uint hsz;
+ ErlHeapFragment *bp;
+ Eterm value;
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+
+ rp = erts_proc_lookup_raw(sigdp->caller);
+ if (!rp)
+ return ERTS_PORT_REDS_INFO;
+
+ hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+ value = make_port_info_term(&hp_start,
+ &hp,
+ &hsz,
+ &bp,
+ prt,
+ sigdp->u.info.item);
+ if (is_value(value)) {
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ sigdp->ref,
+ value);
+ }
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ return ERTS_PORT_REDS_INFO;
+}
+
+ErtsPortOpResult
+erts_port_info(Process* c_p,
+ Port *prt,
+ Eterm item,
+ Eterm *retvalp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0,
+ am_info);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ Eterm *hp, *hp_start;
+ ErlHeapFragment *bp;
+ Uint hsz = 0;
+ Eterm value = make_port_info_term(&hp_start, &hp, &hsz, &bp, prt, item);
+ finalize_imm_drv_call(&try_call_state);
+ if (is_non_value(value))
+ return ERTS_PORT_OP_BADARG;
+ else if (is_immed(value))
+ *retvalp = value;
+ else {
+ Uint used_h_size = hp - hp_start;
+ hp = HAlloc(c_p, used_h_size);
+ *retvalp = copy_struct(value, used_h_size, &hp, &MSO(c_p));
+ free_message_buffer(bp);
+ }
+ return ERTS_PORT_OP_DONE;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_INFO;
+ sigdp->u.info.item = item;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ retvalp,
+ sigdp,
+ 0,
+ port_sig_info);
+}
+
+static int
+port_sig_set_data(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ if (prt->bp)
+ free_message_buffer(prt->bp);
+ prt->bp = sigdp->u.set_data.bp;
+ prt->data = sigdp->u.set_data.data;
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ }
+ else {
+ if (sigdp->u.set_data.bp)
+ free_message_buffer(sigdp->u.set_data.bp);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ }
+ return ERTS_PORT_REDS_SET_DATA;
+}
+
+ErtsPortOpResult
+erts_port_set_data(Process* c_p,
+ Port *prt,
+ Eterm data,
+ Eterm *refp)
+{
+ ErtsPortOpResult res;
+ Eterm set_data;
+ ErlHeapFragment *bp;
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ am_set_data);
+
+ if (is_immed(data)) {
+ set_data = data;
+ bp = NULL;
+ }
+ else {
+ Eterm *hp;
+ Uint sz = size_object(data);
+ bp = new_message_buffer(sz);
+ hp = bp->mem;
+ set_data = copy_struct(data, sz, &hp, &bp->off_heap);
+ }
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ if (prt->bp)
+ free_message_buffer(prt->bp);
+ prt->bp = bp;
+ prt->data = set_data;
+ finalize_imm_drv_call(&try_call_state);
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_SET_DATA;
+ sigdp->u.set_data.data = set_data;
+ sigdp->u.set_data.bp = bp;
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ refp,
+ sigdp,
+ 0,
+ port_sig_set_data);
+ if (res != ERTS_PORT_OP_SCHEDULED && bp)
+ free_message_buffer(bp);
+ return res;
+}
+
+static int
+port_sig_get_data(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+ if (op != ERTS_PROC2PORT_SIG_EXEC)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ else {
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+
+ rp = erts_proc_lookup_raw(sigdp->caller);
+ if (rp) {
+ Uint hsz;
+ Eterm *hp, *hp_start;
+ Eterm data, msg;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+
+ hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+ hsz += 3;
+ if (prt->bp)
+ hsz += prt->bp->used_size;
+
+ hp_start = hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+
+ if (is_immed(prt->data))
+ data = prt->data;
+ else
+ data = copy_struct(prt->data,
+ prt->bp->used_size,
+ &hp,
+ &bp->off_heap);
+
+
+
+ msg = TUPLE2(hp, am_ok, data);
+ hp += 3;
+
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ sigdp->ref,
+ msg);
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ }
+ return ERTS_PORT_REDS_GET_DATA;
+}
+
+ErtsPortOpResult
+erts_port_get_data(Process* c_p,
+ Port *prt,
+ Eterm *retvalp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0,
+ am_get_data);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ Eterm *hp;
+ Eterm data;
+ ErlHeapFragment *bp;
+ Uint sz;
+ if (is_immed(prt->data)) {
+ bp = NULL;
+ data = prt->data;
+ }
+ else {
+ bp = new_message_buffer(prt->bp->used_size);
+ data = copy_struct(prt->data,
+ prt->bp->used_size,
+ &hp,
+ &bp->off_heap);
+ }
+ finalize_imm_drv_call(&try_call_state);
+ if (is_immed(data))
+ sz = 0;
+ else
+ sz = bp->used_size;
+
+ hp = HAlloc(c_p, sz + 3);
+ if (is_not_immed(data)) {
+ data = copy_struct(data, bp->used_size, &hp, &MSO(c_p));
+ free_message_buffer(bp);
+ }
+ *retvalp = TUPLE2(hp, am_ok, data);
+ return ERTS_PORT_OP_DONE;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule call instead... */
+ break;
}
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_GET_DATA;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ retvalp,
+ sigdp,
+ 0,
+ port_sig_get_data);
}
typedef struct {
@@ -2480,39 +4677,39 @@ static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
}
void
-print_port_info(int to, void *arg, int i)
+print_port_info(Port *p, int to, void *arg)
{
- Port* p = &erts_port[i];
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
- if (p->status & ERTS_PORT_SFLGS_DEAD)
+ if (state & ERTS_PORT_SFLGS_DEAD)
return;
- erts_print(to, arg, "=port:%T\n", p->id);
- erts_print(to, arg, "Slot: %d\n", i);
- if (p->status & ERTS_PORT_SFLG_CONNECTED) {
- erts_print(to, arg, "Connected: %T", p->connected);
+ erts_print(to, arg, "=port:%T\n", p->common.id);
+ erts_print(to, arg, "Slot: %d\n", internal_port_index(p->common.id));
+ if (state & ERTS_PORT_SFLG_CONNECTED) {
+ erts_print(to, arg, "Connected: %T", ERTS_PORT_GET_CONNECTED(p));
erts_print(to, arg, "\n");
}
- if (p->nlinks != NULL) {
+ if (ERTS_P_LINKS(p)) {
prt_one_lnk_data prtd;
prtd.to = to;
prtd.arg = arg;
erts_print(to, arg, "Links: ");
- erts_doforall_links(p->nlinks, &prt_one_lnk, &prtd);
+ erts_doforall_links(ERTS_P_LINKS(p), &prt_one_lnk, &prtd);
erts_print(to, arg, "\n");
}
- if (p->monitors != NULL) {
+ if (ERTS_P_MONITORS(p)) {
prt_one_lnk_data prtd;
prtd.to = to;
prtd.arg = arg;
erts_print(to, arg, "Monitors: ");
- erts_doforall_monitors(p->monitors, &prt_one_monitor, &prtd);
+ erts_doforall_monitors(ERTS_P_MONITORS(p), &prt_one_monitor, &prtd);
erts_print(to, arg, "\n");
}
- if (p->reg != NULL)
- erts_print(to, arg, "Registered as: %T\n", p->reg->name);
+ if (p->common.u.alive.reg != NULL)
+ erts_print(to, arg, "Registered as: %T\n", p->common.u.alive.reg->name);
if (p->drv_ptr == &fd_driver) {
erts_print(to, arg, "Port is UNIX fd not opened by emulator: %s\n", p->name);
@@ -2526,109 +4723,143 @@ print_port_info(int to, void *arg, int i)
}
void
-set_busy_port(ErlDrvPort port_num, int on)
+set_busy_port(ErlDrvPort dprt, int on)
{
+ Port *prt;
+ erts_aint32_t flags;
+
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(port_str, 16);
#endif
ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num]));
+ prt = erts_drvport2port_raw(dprt);
+ if (!prt)
+ return;
if (on) {
- erts_port_status_bor_set(&erts_port[port_num],
- ERTS_PORT_SFLG_PORT_BUSY);
+ flags = erts_smp_atomic32_read_bor_acqb(&prt->sched.flags,
+ ERTS_PTS_FLG_BUSY_PORT);
+ if (flags & ERTS_PTS_FLG_BUSY_PORT)
+ return; /* Already busy */
+
+ if (flags & ERTS_PTS_FLG_HAVE_NS_TASKS)
+ erts_port_task_abort_nosuspend_tasks(prt);
+
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_busy)) {
erts_snprintf(port_str, sizeof(port_str),
- "%T", erts_port[port_num].id);
+ "%T", prt->common.id);
DTRACE1(port_busy, port_str);
}
#endif
} else {
- ErtsProcList* plp = erts_port[port_num].suspended;
- erts_port_status_band_set(&erts_port[port_num],
- ~ERTS_PORT_SFLG_PORT_BUSY);
- erts_port[port_num].suspended = NULL;
+ flags = erts_smp_atomic32_read_band_acqb(&prt->sched.flags,
+ ~ERTS_PTS_FLG_BUSY_PORT);
+ if (!(flags & ERTS_PTS_FLG_BUSY_PORT))
+ return; /* Already non-busy */
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_not_busy)) {
erts_snprintf(port_str, sizeof(port_str),
- "%T", erts_port[port_num].id);
+ "%T", prt->common.id);
DTRACE1(port_not_busy, port_str);
}
#endif
- if (erts_port[port_num].dist_entry) {
+ if (prt->dist_entry) {
/*
* Processes suspended on distribution ports are
* normally queued on the dist entry.
*/
- erts_dist_port_not_busy(&erts_port[port_num]);
+ erts_dist_port_not_busy(prt);
}
- /*
- * Resume, in a round-robin fashion, all processes waiting on the port.
- *
- * This version submitted by Tony Rogvall. The earlier version used
- * to resume the processes in order, which caused starvation of all but
- * the first process.
- */
+ if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q))
+ erts_port_resume_procs(prt);
+ }
+}
+
+void
+erts_port_resume_procs(Port *prt)
+{
+ /*
+ * Resume, in a round-robin fashion, all processes waiting on the port.
+ *
+ * This version submitted by Tony Rogvall. The earlier version used
+ * to resume the processes in order, which caused starvation of all but
+ * the first process.
+ */
+ ErtsProcList *plp;
+
+ erts_port_task_sched_lock(&prt->sched);
+ plp = prt->suspended;
+ prt->suspended = NULL;
+ erts_port_task_sched_unlock(&prt->sched);
+
+ if (erts_proclist_fetch(&plp, NULL)) {
- if (plp) {
#ifdef USE_VM_PROBES
- /*
- * Hrm, for blocked dist ports, plp always seems to be NULL.
- * That's not so fun.
- * Well, another way to get the same info is using a D
- * script to correlate an earlier process-port_blocked+pid
- * event with a later process-scheduled event. That's
- * subject to the multi-CPU races with how events are
- * handled, but hey, that way works most of the time.
- */
- if (DTRACE_ENABLED(process_port_unblocked)) {
- DTRACE_CHARBUF(pid_str, 16);
- ErtsProcList* plp2 = plp;
-
- erts_snprintf(port_str, sizeof(port_str),
- "%T", erts_port[port_num]);
- while (plp2 != NULL) {
- erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
- DTRACE2(process_port_unblocked, pid_str, port_str);
- }
- }
-#endif
- /* First proc should be resumed last */
- if (plp->next) {
- erts_resume_processes(plp->next);
- plp->next = NULL;
+ /*
+ * Hrm, for blocked dist ports, plp always seems to be NULL.
+ * That's not so fun.
+ * Well, another way to get the same info is using a D
+ * script to correlate an earlier process-port_blocked+pid
+ * event with a later process-scheduled event. That's
+ * subject to the multi-CPU races with how events are
+ * handled, but hey, that way works most of the time.
+ */
+ if (DTRACE_ENABLED(process_port_unblocked)) {
+ DTRACE_CHARBUF(port_str, 16);
+ DTRACE_CHARBUF(pid_str, 16);
+ ErtsProcList* plp2 = plp;
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
+ while (plp2 != NULL) {
+ erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
+ DTRACE2(process_port_unblocked, pid_str, port_str);
}
- erts_resume_processes(plp);
- }
+ }
+#endif
+
+ /* First proc should be resumed last */
+ if (plp->next) {
+ plp->next->prev = NULL;
+ erts_resume_processes(plp->next);
+ plp->next = NULL;
+ }
+ erts_resume_processes(plp);
}
}
void set_port_control_flags(ErlDrvPort port_num, int flags)
{
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num]));
-
- erts_port[port_num].control_flags = flags;
+ Port *prt = erts_drvport2port_raw(port_num);
+ if (prt)
+ prt->control_flags = flags;
}
-int get_port_flags(ErlDrvPort ix) {
- Port* prt = erts_drvport2port(ix);
+int get_port_flags(ErlDrvPort ix)
+{
+ int flags;
+ Port *prt;
+ erts_aint32_t state;
+
+ prt = erts_drvport2port(ix, &state);
+ if (!prt)
+ return 0;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (prt == NULL)
- return 0;
+ flags = 0;
+ if (state & ERTS_PORT_SFLG_BINARY_IO)
+ flags |= PORT_FLAG_BINARY;
+ if (state & ERTS_PORT_SFLG_LINEBUF_IO)
+ flags |= PORT_FLAG_LINE;
- return (prt->status & ERTS_PORT_SFLG_BINARY_IO ? PORT_FLAG_BINARY : 0)
- | (prt->status & ERTS_PORT_SFLG_LINEBUF_IO ? PORT_FLAG_LINE : 0);
+ return flags;
}
-
void erts_raw_port_command(Port* p, byte* buf, Uint len)
{
int fpe_was_unmasked;
@@ -2665,25 +4896,18 @@ int async_ready(Port *p, void* data)
if (p) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
- ASSERT(!(p->status & ERTS_PORT_SFLGS_DEAD));
if (p->drv_ptr->ready_async != NULL) {
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_ready_async)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(p->connected, p)
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p)
DTRACE3(driver_ready_async, process_str, port_str, p->name);
}
#endif
(*p->drv_ptr->ready_async)((ErlDrvData)p->drv_data, data);
need_free = 0;
-#ifdef ERTS_SMP
- if (p->xports)
- erts_smp_xports_unlock(p);
- ASSERT(!p->xports);
-#endif
- }
- if ((p->status & ERTS_PORT_SFLG_CLOSING) && is_port_ioq_empty(p)) {
- terminate_port(p);
+
}
+ erts_port_driver_callback_epilogue(p, NULL);
}
return need_free;
}
@@ -2691,12 +4915,12 @@ int async_ready(Port *p, void* data)
static void
report_missing_drv_callback(Port *p, char *drv_type, char *callback)
{
- ErtsPortNames *pnp = erts_get_port_names(p->id);
+ ErtsPortNames *pnp = erts_get_port_names(p->common.id);
char *unknown = "<unknown>";
char *drv_name = pnp->driver_name ? pnp->driver_name : unknown;
char *prt_name = pnp->name ? pnp->name : unknown;
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "%T: %s driver '%s' ", p->id, drv_type, drv_name);
+ erts_dsprintf(dsbufp, "%T: %s driver '%s' ", p->common.id, drv_type, drv_name);
if (sys_strcmp(drv_name, prt_name) != 0)
erts_dsprintf(dsbufp, "(%s) ", prt_name);
erts_dsprintf(dsbufp, "does not implement the %s callback!\n", callback);
@@ -2711,7 +4935,7 @@ erts_stale_drv_select(Eterm port,
int deselect)
{
char *type;
- ErlDrvPort drv_port = internal_port_index(port);
+ ErlDrvPort drv_port = (ErlDrvPort) erts_port_lookup_raw(port);
ErtsPortNames *pnp = erts_get_port_names(port);
erts_dsprintf_buf_t *dsbufp;
@@ -2751,16 +4975,16 @@ erts_stale_drv_select(Eterm port,
ErtsPortNames *
erts_get_port_names(Eterm id)
{
+ Port *prt = erts_port_lookup_raw(id);
ErtsPortNames *pnp;
ASSERT(is_nil(id) || is_internal_port(id));
-
- if (is_not_internal_port(id)) {
+
+ if (!prt) {
pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, sizeof(ErtsPortNames));
pnp->name = NULL;
pnp->driver_name = NULL;
}
else {
- Port* prt = &erts_port[internal_port_index(id)];
int do_realloc = 1;
int len = -1;
size_t pnp_len = sizeof(ErtsPortNames);
@@ -2776,17 +5000,10 @@ erts_get_port_names(Eterm id)
pnp_len = sizeof(ErtsPortNames) + len;
pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len);
}
- erts_smp_port_state_lock(prt);
- if (id != prt->id) {
- len = nlen = 0;
- name = driver_name = NULL;
- }
- else {
- name = prt->name;
- len = nlen = name ? sys_strlen(name) + 1 : 0;
- driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL);
- len += driver_name ? sys_strlen(driver_name) + 1 : 0;
- }
+ name = prt->name;
+ len = nlen = name ? sys_strlen(name) + 1 : 0;
+ driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL);
+ len += driver_name ? sys_strlen(driver_name) + 1 : 0;
if (len <= pnp_len - sizeof(ErtsPortNames)) {
if (!name)
pnp->name = NULL;
@@ -2804,7 +5021,6 @@ erts_get_port_names(Eterm id)
}
do_realloc = 0;
}
- erts_smp_port_state_unlock(prt);
} while (do_realloc);
}
return pnp;
@@ -2829,11 +5045,9 @@ static void schedule_port_timeout(Port *p)
* /Rickard
*/
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
- (void) erts_port_task_schedule(p->id,
- &p->timeout_task,
- ERTS_PORT_TASK_TIMEOUT,
- (ErlDrvEvent) -1,
- NULL);
+ erts_port_task_schedule(p->common.id,
+ &p->timeout_task,
+ ERTS_PORT_TASK_TIMEOUT);
}
ErlDrvTermData driver_mk_term_nil(void)
@@ -2841,9 +5055,9 @@ ErlDrvTermData driver_mk_term_nil(void)
return driver_term_nil;
}
-void driver_report_exit(int ix, int status)
+void driver_report_exit(ErlDrvPort ix, int status)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
Eterm* hp;
Eterm tuple;
Process *rp;
@@ -2856,7 +5070,7 @@ void driver_report_exit(int ix, int status)
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- pid = prt->connected;
+ pid = ERTS_PORT_GET_CONNECTED(prt);
ASSERT(is_internal_pid(pid));
rp = (scheduler
@@ -2869,7 +5083,7 @@ void driver_report_exit(int ix, int status)
tuple = TUPLE2(hp, am_exit_status, make_small(status));
hp += 3;
- tuple = TUPLE2(hp, prt->id, tuple);
+ tuple = TUPLE2(hp, prt->common.id, tuple);
erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
#ifdef USE_VM_PROBES
@@ -2882,28 +5096,6 @@ void driver_report_exit(int ix, int status)
erts_smp_proc_dec_refc(rp);
}
-
-static ERTS_INLINE int
-deliver_term_check_port(ErlDrvPort drvport)
-{
- int res;
- int ix = (int) drvport;
- if (ix < 0 || erts_max_ports <= ix)
- res = -1; /* invalid */
- else {
- Port* prt = &erts_port[ix];
- erts_smp_port_state_lock(prt);
- if (!(prt->status & ERTS_PORT_SFLGS_INVALID_LOOKUP))
- res = 1; /* ok */
- else if (prt->status & ERTS_PORT_SFLG_CLOSING)
- res = 0; /* closing */
- else
- res = -1; /* invalid (dead) */
- erts_smp_port_state_unlock(prt);
- }
- return res;
-}
-
#define ERTS_B2T_STATES_DEF_STATES_SZ 5
#define ERTS_B2T_STATES_DEF_STATES_INC 100
@@ -2991,10 +5183,7 @@ cleanup_b2t_states(struct b2t_states__ *b2tsp)
*/
static int
-driver_deliver_term(ErlDrvPort port,
- Eterm to,
- ErlDrvTermData* data,
- int len)
+driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
{
#define ERTS_DDT_FAIL do { res = -1; goto done; } while (0)
Uint need = 0;
@@ -3196,11 +5385,8 @@ driver_deliver_term(ErlDrvPort port,
b2t.ix = 0;
/*
- * The term is OK. Go ahead and validate the port and process.
+ * The term is OK. Go ahead and validate the process.
*/
- res = deliver_term_check_port(port);
- if (res <= 0)
- goto done;
/*
* Increase refc on proc if done from a non-scheduler thread.
@@ -3470,25 +5656,115 @@ driver_deliver_term(ErlDrvPort port,
#undef ERTS_DDT_FAIL
}
+static ERTS_INLINE int
+deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p)
+{
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
+#endif
+ Port *prt = erts_port_lookup_raw((Eterm) port_id);
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+ if (connected_p) {
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ *connected_p = ERTS_PORT_GET_CONNECTED(prt);
+ }
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(dhndl);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+ }
+#endif
+ ERTS_SMP_LC_ASSERT(dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED
+ ? erts_lc_is_port_locked(prt)
+ : !erts_lc_is_port_locked(prt));
+ return ((state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ ? -1
+ : ((state & ERTS_PORT_SFLG_CLOSING) ? 0 : 1));
+}
+
+int erl_drv_output_term(ErlDrvTermData port_id, ErlDrvTermData* data, int len)
+{
+ /* May be called from arbitrary thread */
+ Eterm connected;
+ int res = deliver_term_check_port(port_id, &connected);
+ if (res <= 0)
+ return res;
+ return driver_deliver_term(connected, data, len);
+}
+/*
+ * driver_output_term() is deprecated, and has been scheduled for
+ * removal in OTP-R17. It is replaced by erl_drv_output_term()
+ * above.
+ */
int
-driver_output_term(ErlDrvPort ix, ErlDrvTermData* data, int len)
+driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len)
{
- Port* prt = erts_drvport2port(ix);
+ erts_aint32_t state;
+ Port* prt;
ERTS_SMP_CHK_NO_PROC_LOCKS;
+ /* NOTE! It *not* safe to access 'drvport' from unmanaged threads. */
+ prt = erts_drvport2port(drvport, &state);
+ if (!prt)
+ return -1; /* invalid (dead) */
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- if (prt == NULL)
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
return -1;
- return driver_deliver_term(ix, prt->connected, data, len);
+ else if (state & ERTS_PORT_SFLG_CLOSING)
+ return 0;
+
+ return driver_deliver_term(ERTS_PORT_GET_CONNECTED(prt), data, len);
}
+int erl_drv_send_term(ErlDrvTermData port_id,
+ ErlDrvTermData to,
+ ErlDrvTermData* data,
+ int len)
+{
+ /* May be called from arbitrary thread */
+ int res = deliver_term_check_port(port_id, NULL);
+ if (res <= 0)
+ return res;
+ return driver_deliver_term(to, data, len);
+}
+/*
+ * driver_send_term() is deprecated, and has been scheduled for
+ * removal in OTP-R17. It is replaced by erl_drv_send_term() above.
+ */
int
-driver_send_term(ErlDrvPort ix, ErlDrvTermData to, ErlDrvTermData* data, int len)
+driver_send_term(ErlDrvPort drvport,
+ ErlDrvTermData to,
+ ErlDrvTermData* data,
+ int len)
{
- return driver_deliver_term(ix, to, data, len);
+ /*
+ * NOTE! It is *not* safe to access the 'drvport' parameter
+ * from unmanaged threads. Also note that it is impossible
+ * to make this access safe without using a less efficient
+ * internal data representation for ErlDrvPort.
+ */
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+#ifdef ERTS_SMP
+ if (erts_thr_progress_is_managed_thread())
+#endif
+ {
+ erts_aint32_t state;
+ Port* prt = erts_drvport2port(drvport, &state);
+ if (!prt)
+ return -1; /* invalid (dead) */
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return -1;
+ else if (state & ERTS_PORT_SFLG_CLOSING)
+ return 0;
+ }
+ return driver_deliver_term(to, data, len);
}
@@ -3500,26 +5776,27 @@ driver_send_term(ErlDrvPort ix, ErlDrvTermData to, ErlDrvTermData* data, int len
int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
ErlDrvBinary* bin, ErlDrvSizeT offs, ErlDrvSizeT len)
{
- Port* prt = erts_drvport2port(ix);
+ erts_aint32_t state;
+ Port* prt = erts_drvport2port(ix, &state);
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
prt->bytes_in += (hlen + len);
erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len));
- if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
+ if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
return erts_net_message(prt,
prt->dist_entry,
(byte*) hbuf, hlen,
(byte*) (bin->orig_bytes+offs), len);
}
else
- deliver_bin_message(prt, prt->connected,
+ deliver_bin_message(prt, ERTS_PORT_GET_CONNECTED(prt),
hbuf, hlen, bin, offs, len);
return 0;
}
@@ -3534,7 +5811,8 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
char* buf, ErlDrvSizeT len)
{
- Port* prt = erts_drvport2port(ix);
+ erts_aint32_t state;
+ Port* prt = erts_drvport2port(ix, &state);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -3543,12 +5821,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
prt->bytes_in += (hlen + len);
erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len));
- if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
+ if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
if (len == 0)
return erts_net_message(prt,
prt->dist_entry,
@@ -3560,10 +5838,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
(byte*) hbuf, hlen,
(byte*) buf, len);
}
- else if(prt->status & ERTS_PORT_SFLG_LINEBUF_IO)
- deliver_linebuf_message(prt, prt->connected, hbuf, hlen, buf, len);
+ else if (state & ERTS_PORT_SFLG_LINEBUF_IO)
+ deliver_linebuf_message(prt, state, ERTS_PORT_GET_CONNECTED(prt),
+ hbuf, hlen, buf, len);
else
- deliver_read_message(prt, prt->connected, hbuf, hlen, buf, len, 0);
+ deliver_read_message(prt, state, ERTS_PORT_GET_CONNECTED(prt),
+ hbuf, hlen, buf, len, 0);
return 0;
}
@@ -3584,6 +5864,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
SysIOVec* iov;
ErlDrvBinary** binv;
Port* prt;
+ erts_aint32_t state;
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -3596,13 +5877,13 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
if (hlen < 0)
hlen = 0;
- prt = erts_drvport2port(ix);
+ prt = erts_drvport2port(ix, &state);
if (prt == NULL)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
/* size > 0 ! */
@@ -3627,7 +5908,8 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
/* XXX handle distribution !!! */
prt->bytes_in += (hlen + size);
erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + size));
- deliver_vec_message(prt, prt->connected, hbuf, hlen, binv, iov, n, size);
+ deliver_vec_message(prt, ERTS_PORT_GET_CONNECTED(prt), hbuf, hlen,
+ binv, iov, n, size);
return 0;
}
@@ -3738,8 +6020,7 @@ ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
}
-void driver_free_binary(dbin)
-ErlDrvBinary* dbin;
+void driver_free_binary(ErlDrvBinary* dbin)
{
Binary *bin;
if (!dbin) {
@@ -3835,6 +6116,7 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
{
ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) == 0);
erts_mtx_destroy(&pdl->mtx);
+ erts_port_dec_refc(pdl->prt);
erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl);
}
@@ -3872,16 +6154,18 @@ ErlDrvPDL
driver_pdl_create(ErlDrvPort dp)
{
ErlDrvPDL pdl;
- Port *pp = erts_drvport2port(dp);
+ Port *pp = erts_drvport2port(dp, NULL);
if (!pp || pp->port_data_lock)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
erts_mtx_init(&pdl->mtx, "port_data_lock");
pdl_init_refc(pdl);
+ erts_port_inc_refc(pp);
+ pdl->prt = pp;
pp->port_data_lock = pdl;
#ifdef HARDDEBUG
- erts_fprintf(stderr, "driver_pdl_create(%T) -> 0x%08X\r\n",pp->id,(unsigned) pdl);
+ erts_fprintf(stderr, "driver_pdl_create(%T) -> 0x%08X\r\n",pp->common.id,(unsigned) pdl);
#endif
return pdl;
}
@@ -4319,33 +6603,33 @@ static ERTS_INLINE void
drv_cancel_timer(Port *prt)
{
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(prt->ptimer);
+ erts_cancel_smp_ptimer(prt->common.u.alive.ptimer);
#else
- erts_cancel_timer(&prt->tm);
+ erts_cancel_timer(&prt->common.u.alive.tm);
#endif
if (erts_port_task_is_scheduled(&prt->timeout_task))
- erts_port_task_abort(prt->id, &prt->timeout_task);
+ erts_port_task_abort(&prt->timeout_task);
}
int driver_set_timer(ErlDrvPort ix, unsigned long t)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
if (prt->drv_ptr->timeout == NULL)
return -1;
drv_cancel_timer(prt);
#ifdef ERTS_SMP
- erts_create_smp_ptimer(&prt->ptimer,
- prt->id,
+ erts_create_smp_ptimer(&prt->common.u.alive.ptimer,
+ prt->common.id,
(ErlTimeoutProc) schedule_port_timeout,
t);
#else
- erts_set_timer(&prt->tm,
+ erts_set_timer(&prt->common.u.alive.tm,
(ErlTimeoutProc) schedule_port_timeout,
NULL,
prt,
@@ -4356,7 +6640,7 @@ int driver_set_timer(ErlDrvPort ix, unsigned long t)
int driver_cancel_timer(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
if (prt == NULL)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
@@ -4368,7 +6652,7 @@ int driver_cancel_timer(ErlDrvPort ix)
int
driver_read_timer(ErlDrvPort ix, unsigned long* t)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -4376,9 +6660,11 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
#ifdef ERTS_SMP
- *t = prt->ptimer ? erts_time_left(&prt->ptimer->timer.tm) : 0;
+ *t = (prt->common.u.alive.ptimer
+ ? erts_time_left(&prt->common.u.alive.ptimer->timer.tm)
+ : 0);
#else
- *t = erts_time_left(&prt->tm);
+ *t = erts_time_left(&prt->common.u.alive.tm);
#endif
return 0;
}
@@ -4429,8 +6715,8 @@ static int do_driver_monitor_process(Port *prt,
}
ref = erts_make_ref_in_buffer(buf);
- erts_add_monitor(&(prt->monitors), MON_ORIGIN, ref, rp->id, NIL);
- erts_add_monitor(&(rp->monitors), MON_TARGET, ref, prt->id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(prt), MON_ORIGIN, ref, rp->common.id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
ref_to_driver_monitor(ref,monitor);
@@ -4440,31 +6726,24 @@ static int do_driver_monitor_process(Port *prt,
/*
* This can be called from a non scheduler thread iff a port_data_lock exists
*/
-int driver_monitor_process(ErlDrvPort port,
+int driver_monitor_process(ErlDrvPort drvport,
ErlDrvTermData process,
ErlDrvMonitor *monitor)
{
Port *prt;
int ret;
- Uint32 status;
+ erts_aint32_t state;
+#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
ErtsSchedulerData *sched = erts_get_scheduler_data();
- int ix = (int) port;
- if (ix < 0 || erts_max_ports <= ix) {
- return -1;
- }
- prt = &erts_port[ix];
+#endif
+
+ prt = erts_thr_drvport2port_raw(drvport);
DRV_MONITOR_LOCK_PDL(prt);
- if (sched) {
- status = erts_port[ix].status;
- } else {
- erts_smp_port_state_lock(prt);
- status = erts_port[ix].status;
- erts_smp_port_state_unlock(prt);
- }
+ state = erts_atomic32_read_nob(&prt->state);
- if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
DRV_MONITOR_UNLOCK_PDL(prt);
return -1;
}
@@ -4502,7 +6781,7 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf,
memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
ref = make_internal_ref(buf);
- mon = erts_lookup_monitor(prt->monitors, ref);
+ mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return 1;
}
@@ -4514,13 +6793,13 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf,
to,
ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
- mon = erts_remove_monitor(&(prt->monitors), ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref);
if (mon) {
erts_destroy_monitor(mon);
}
if (rp) {
ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&(rp->monitors), ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon != NULL) {
erts_destroy_monitor(rmon);
@@ -4529,30 +6808,23 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf,
return 0;
}
-int driver_demonitor_process(ErlDrvPort port,
+int driver_demonitor_process(ErlDrvPort drvport,
const ErlDrvMonitor *monitor)
{
Port *prt;
int ret;
- Uint32 status;
+ erts_aint32_t state;
+#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
ErtsSchedulerData *sched = erts_get_scheduler_data();
- int ix = (int) port;
- if (ix < 0 || erts_max_ports <= ix) {
- return -1;
- }
- prt = &erts_port[ix];
+#endif
+
+ prt = erts_thr_drvport2port_raw(drvport);
DRV_MONITOR_LOCK_PDL(prt);
- if (sched) {
- status = erts_port[ix].status;
- } else {
- erts_smp_port_state_lock(prt);
- status = erts_port[ix].status;
- erts_smp_port_state_unlock(prt);
- }
+ state = erts_atomic32_read_nob(&prt->state);
- if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
DRV_MONITOR_UNLOCK_PDL(prt);
return -1;
}
@@ -4588,7 +6860,7 @@ static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
ref = make_internal_ref(buf);
- mon = erts_lookup_monitor(prt->monitors, ref);
+ mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return driver_term_nil;
}
@@ -4599,30 +6871,22 @@ static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
}
-ErlDrvTermData driver_get_monitored_process(ErlDrvPort port,
+ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport,
const ErlDrvMonitor *monitor)
{
Port *prt;
ErlDrvTermData ret;
- Uint32 status;
+ erts_aint32_t state;
+#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
ErtsSchedulerData *sched = erts_get_scheduler_data();
- int ix = (int) port;
- if (ix < 0 || erts_max_ports <= ix) {
- return driver_term_nil;
- }
- prt = &erts_port[ix];
+#endif
- DRV_MONITOR_LOCK_PDL(prt);
+ prt = erts_thr_drvport2port_raw(drvport);
- if (sched) {
- status = erts_port[ix].status;
- } else {
- erts_smp_port_state_lock(prt);
- status = erts_port[ix].status;
- erts_smp_port_state_unlock(prt);
- }
+ DRV_MONITOR_LOCK_PDL(prt);
- if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
DRV_MONITOR_UNLOCK_PDL(prt);
return driver_term_nil;
}
@@ -4667,7 +6931,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(prt->drv_ptr != NULL);
DRV_MONITOR_LOCK_PDL(prt);
- if (erts_lookup_monitor(prt->monitors,ref) == NULL) {
+ if (erts_lookup_monitor(ERTS_P_MONITORS(prt), ref) == NULL) {
DRV_MONITOR_UNLOCK_PDL(prt);
return;
}
@@ -4677,7 +6941,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
DRV_MONITOR_UNLOCK_PDL(prt);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_process_exit)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(prt->connected, prt)
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(prt), prt)
DTRACE3(driver_process_exit, process_str, port_str, prt->name);
}
#endif
@@ -4686,7 +6950,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
erts_unblock_fpe(fpe_was_unmasked);
DRV_MONITOR_LOCK_PDL(prt);
/* remove monitor *after* callback */
- rmon = erts_remove_monitor(&(prt->monitors),ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref);
DRV_MONITOR_UNLOCK_PDL(prt);
if (rmon) {
erts_destroy_monitor(rmon);
@@ -4697,7 +6961,8 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
static int
driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
{
- Port* prt = erts_drvport2port(ix);
+ erts_aint32_t state;
+ Port* prt = erts_drvport2port(ix, &state);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -4705,19 +6970,19 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if (eof)
- flush_linebuf_messages(prt);
- if (prt->status & ERTS_PORT_SFLG_CLOSING) {
+ flush_linebuf_messages(prt, state);
+ if (state & ERTS_PORT_SFLG_CLOSING) {
terminate_port(prt);
- } else if (eof && (prt->status & ERTS_PORT_SFLG_SOFT_EOF)) {
- deliver_result(prt->id, prt->connected, am_eof);
+ } else if (eof && (state & ERTS_PORT_SFLG_SOFT_EOF)) {
+ deliver_result(prt->common.id, ERTS_PORT_GET_CONNECTED(prt), am_eof);
} else {
- /* XXX UGLY WORK AROUND, Let do_exit_port terminate the port */
+ /* XXX UGLY WORK AROUND, Let erts_deliver_port_exit() terminate the port */
if (prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
prt->ioq.size = 0;
if (prt->port_data_lock)
driver_pdl_unlock(prt->port_data_lock);
- erts_do_exit_port(prt, prt->id, eof ? am_normal : term);
+ erts_deliver_port_exit(prt, prt->common.id, eof ? am_normal : term, 0);
}
return 0;
}
@@ -4730,23 +6995,23 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
*/
int driver_exit(ErlDrvPort ix, int err)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
Process* rp;
ErtsLink *lnk, *rlnk = NULL;
+ Eterm connected;
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- rp = erts_pid2proc(NULL, 0, prt->connected, ERTS_PROC_LOCK_LINK);
+ connected = ERTS_PORT_GET_CONNECTED(prt);
+ rp = erts_pid2proc(NULL, 0, connected, ERTS_PROC_LOCK_LINK);
if (rp) {
- rlnk = erts_remove_link(&(rp->nlinks),prt->id);
+ rlnk = erts_remove_link(&ERTS_P_LINKS(rp),prt->common.id);
}
- lnk = erts_remove_link(&(prt->nlinks),prt->connected);
+ lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected);
#ifdef ERTS_SMP
if (rp)
@@ -4803,24 +7068,24 @@ ErlDrvTermData driver_mk_atom(char* string)
ErlDrvTermData driver_mk_port(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- return (ErlDrvTermData) prt->id;
+ return (ErlDrvTermData) prt->common.id;
}
ErlDrvTermData driver_connected(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return NIL;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- return prt->connected;
+ return ERTS_PORT_GET_CONNECTED(prt);
}
ErlDrvTermData driver_caller(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return NIL;
@@ -4830,25 +7095,25 @@ ErlDrvTermData driver_caller(ErlDrvPort ix)
int driver_lock_driver(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
DE_Handle* dh;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
if (prt == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
return -1;
}
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
return -1;
}
erts_ddll_lock_driver(dh, prt->drv_ptr->name);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
return 0;
}
@@ -4858,7 +7123,7 @@ static int maybe_lock_driver_list(void)
void *rec_lock;
rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
if (rec_lock == 0) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
return 1;
}
return 0;
@@ -4866,7 +7131,7 @@ static int maybe_lock_driver_list(void)
static void maybe_unlock_driver_list(int doit)
{
if (doit) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
}
/*
@@ -5035,7 +7300,7 @@ no_event_callback(ErlDrvData drv_data, ErlDrvEvent event, ErlDrvEventData event_
{
Port *prt = get_current_port();
report_missing_drv_callback(prt, "Event", "event()");
- driver_event((ErlDrvPort) internal_port_index(prt->id), event, NULL);
+ driver_event((ErlDrvPort) prt, event, NULL);
}
static void
@@ -5043,7 +7308,7 @@ no_ready_input_callback(ErlDrvData drv_data, ErlDrvEvent event)
{
Port *prt = get_current_port();
report_missing_drv_callback(prt, "Input", "ready_input()");
- driver_select((ErlDrvPort) internal_port_index(prt->id), event,
+ driver_select((ErlDrvPort) prt, event,
(ERL_DRV_READ | ERL_DRV_USE_NO_CALLBACK), 0);
}
@@ -5052,7 +7317,7 @@ no_ready_output_callback(ErlDrvData drv_data, ErlDrvEvent event)
{
Port *prt = get_current_port();
report_missing_drv_callback(prt, "Output", "ready_output()");
- driver_select((ErlDrvPort) internal_port_index(prt->id), event,
+ driver_select((ErlDrvPort) prt, event,
(ERL_DRV_WRITE | ERL_DRV_USE_NO_CALLBACK), 0);
}
@@ -5087,13 +7352,13 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->lock = NULL;
else {
drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK,
- sizeof(erts_smp_mtx_t));
- erts_smp_mtx_init_x(drv->lock,
- "driver_lock",
+ sizeof(erts_mtx_t));
+ erts_mtx_init_x(drv->lock,
+ "driver_lock",
#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
- am_atom_put(drv->name, sys_strlen(drv->name))
+ am_atom_put(drv->name, sys_strlen(drv->name))
#else
- NIL
+ NIL
#endif
);
}
@@ -5165,7 +7430,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
int res;
if (!driver_list_locked) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
}
dp->next = driver_list;
@@ -5194,7 +7459,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
if (!driver_list_locked) {
erts_smp_tsd_set(driver_list_lock_status_key, NULL);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
return res;
}
@@ -5207,7 +7472,7 @@ int remove_driver_entry(ErlDrvEntry *drv)
rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
if (rec_lock == NULL) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
}
dp = driver_list;
while (dp && dp->entry != drv)
@@ -5215,7 +7480,7 @@ int remove_driver_entry(ErlDrvEntry *drv)
if (dp) {
if (dp->handle) {
if (rec_lock == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
return -1;
}
@@ -5229,12 +7494,12 @@ int remove_driver_entry(ErlDrvEntry *drv)
}
erts_destroy_driver(dp);
if (rec_lock == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
return 1;
}
if (rec_lock == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
return 0;
}
@@ -5264,18 +7529,22 @@ erl_drv_getenv(char *key, char *value, size_t *value_size)
* - uses the fact that heart_port is registered when starting heart
*/
-Port *erts_get_heart_port() {
+Port *erts_get_heart_port(void)
+{
+ int ix, max = erts_ptab_max(&erts_port);
- Port* port;
- Uint ix;
+ for (ix = 0; ix < max; ix++) {
+ struct reg_proc *reg;
+ Port *port = erts_pix2port(ix);
- for(ix = 0; ix < erts_max_ports; ix++) {
- port = &erts_port[ix];
+ if (!port)
+ continue;
/* only examine undead or alive ports */
- if (port->status & ERTS_PORT_SFLGS_DEAD)
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_DEAD)
continue;
/* immediate atom compare */
- if (port->reg && port->reg->name == am_heart_port) {
+ reg = port->common.u.alive.reg;
+ if (reg && reg->name == am_heart_port) {
return port;
}
}
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index c02872ef80..757e2800e6 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -175,14 +175,14 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
if (is_not_atom(name) || name == am_undefined)
return res;
- if (c_p->id == id) /* A very common case I think... */
+ if (c_p->common.id == id) /* A very common case I think... */
proc = c_p;
else {
if (is_not_internal_pid(id) && is_not_internal_port(id))
return res;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
if (is_internal_port(id)) {
- port = erts_id2port(id, NULL, 0);
+ port = erts_id2port(id);
if (!port)
goto done;
}
@@ -204,7 +204,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
r.p = proc;
if (!proc)
goto done;
- if (proc->reg)
+ if (proc->common.u.alive.reg)
goto done;
r.pt = NULL;
}
@@ -212,7 +212,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
ASSERT(!INVALID_PORT(port, id));
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
r.pt = port;
- if (r.pt->reg)
+ if (r.pt->common.u.alive.reg)
goto done;
r.p = NULL;
}
@@ -224,23 +224,24 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
trace_proc(c_p, proc, am_register, name);
}
- proc->reg = rp;
+ proc->common.u.alive.reg = rp;
}
else if (port && rp->pt == port) {
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
trace_port(port, am_register, name);
}
- port->reg = rp;
+ port->common.u.alive.reg = rp;
}
- if ((rp->p && rp->p->id == id) || (rp->pt && rp->pt->id == id)) {
+ if ((rp->p && rp->p->common.id == id)
+ || (rp->pt && rp->pt->common.id == id)) {
res = 1;
}
done:
reg_write_unlock();
if (port)
- erts_smp_port_unlock(port);
+ erts_port_release(port);
if (c_p != proc) {
if (proc)
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
@@ -291,9 +292,9 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
* is read only.
*/
if (rp->p)
- res = rp->p->id;
+ res = rp->p->common.id;
else if (rp->pt)
- res = rp->pt->id;
+ res = rp->pt->common.id;
break;
}
b = b->next;
@@ -396,28 +397,26 @@ erts_whereis_name(Process *c_p,
if (!rp || !rp->pt)
*port = NULL;
else {
-#ifndef ERTS_SMP
- erts_smp_atomic_inc_nob(&rp->pt->refc);
-#else
+#ifdef ERTS_SMP
if (pending_port == rp->pt)
pending_port = NULL;
else {
if (pending_port) {
/* Ahh! Registered port changed while reg lock
was unlocked... */
- erts_smp_port_unlock(pending_port);
+ erts_port_release(pending_port);
pending_port = NULL;
}
if (erts_smp_port_trylock(rp->pt) == EBUSY) {
- Eterm id = rp->pt->id; /* id read only... */
+ Eterm id = rp->pt->common.id; /* id read only... */
/* Unlock all locks, acquire port lock, and restart... */
if (current_c_p_locks) {
erts_smp_proc_unlock(c_p, current_c_p_locks);
current_c_p_locks = 0;
}
reg_read_unlock();
- pending_port = erts_id2port(id, NULL, 0);
+ pending_port = erts_id2port(id);
goto restart;
}
}
@@ -431,7 +430,7 @@ erts_whereis_name(Process *c_p,
if (c_p && !current_c_p_locks)
erts_smp_proc_lock(c_p, c_p_locks);
if (pending_port)
- erts_smp_port_unlock(pending_port);
+ erts_port_release(pending_port);
#endif
reg_read_unlock();
@@ -493,8 +492,8 @@ int erts_unregister_name(Process *c_p,
current_c_p_locks = c_p_locks;
}
#endif
- if (c_p->reg) {
- r.name = c_p->reg->name;
+ if (c_p->common.u.alive.reg) {
+ r.name = c_p->common.u.alive.reg->name;
} else {
/* Name got unregistered while main lock was released */
res = 0;
@@ -505,24 +504,22 @@ int erts_unregister_name(Process *c_p,
if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) {
if (rp->pt) {
if (port != rp->pt) {
-#ifndef ERTS_SMP
- erts_smp_atomic_inc_nob(&rp->pt->refc);
-#else
+#ifdef ERTS_SMP
if (port) {
ASSERT(port != c_prt);
- erts_smp_port_unlock(port);
+ erts_port_release(port);
port = NULL;
}
if (erts_smp_port_trylock(rp->pt) == EBUSY) {
- Eterm id = rp->pt->id; /* id read only... */
+ Eterm id = rp->pt->common.id; /* id read only... */
/* Unlock all locks, acquire port lock, and restart... */
if (current_c_p_locks) {
erts_smp_proc_unlock(c_p, current_c_p_locks);
current_c_p_locks = 0;
}
reg_write_unlock();
- port = erts_id2port(id, NULL, 0);
+ port = erts_id2port(id);
goto restart;
}
#endif
@@ -532,7 +529,7 @@ int erts_unregister_name(Process *c_p,
ASSERT(rp->pt == port);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
- rp->pt->reg = NULL;
+ rp->pt->common.u.alive.reg = NULL;
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
trace_port(port, am_unregister, r.name);
@@ -549,7 +546,7 @@ int erts_unregister_name(Process *c_p,
ERTS_PROC_LOCK_MAIN);
current_c_p_locks = c_p_locks;
#endif
- rp->p->reg = NULL;
+ rp->p->common.u.alive.reg = NULL;
if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
trace_proc(c_p, rp->p, am_unregister, r.name);
}
@@ -568,7 +565,7 @@ int erts_unregister_name(Process *c_p,
reg_write_unlock();
if (c_prt != port) {
if (port) {
- erts_smp_port_unlock(port);
+ erts_port_release(port);
}
if (c_prt) {
erts_smp_port_lock(c_prt);
diff --git a/erts/emulator/beam/register.h b/erts/emulator/beam/register.h
index 38e8cfbf28..7170463375 100644
--- a/erts/emulator/beam/register.h
+++ b/erts/emulator/beam/register.h
@@ -24,26 +24,19 @@
#ifndef __REGPROC_H__
#define __REGPROC_H__
-#ifndef __SYS_H__
#include "sys.h"
-#endif
-
-#ifndef __HASH_H__
#include "hash.h"
-#endif
-
-#ifndef __PROCESS_H__
#include "erl_process.h"
-#endif
-
-struct port;
+#define ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_port.h"
+#undef ERL_PORT_GET_PORT_TYPE_ONLY__
typedef struct reg_proc
{
HashBucket bucket; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
Process *p; /* The process registered (only one of this and
'pt' is non-NULL */
- struct port *pt; /* The port registered */
+ Port *pt; /* The port registered */
Eterm name; /* Atom name */
} RegProc;
@@ -55,12 +48,12 @@ int erts_register_name(Process *, Eterm, Eterm);
Eterm erts_whereis_name_to_id(Process *, Eterm);
void erts_whereis_name(Process *, ErtsProcLocks,
Eterm, Process**, ErtsProcLocks, int,
- struct port**);
+ Port**);
Process *erts_whereis_process(Process *,
ErtsProcLocks,
Eterm,
ErtsProcLocks,
int);
-int erts_unregister_name(Process *, ErtsProcLocks, struct port *, Eterm);
+int erts_unregister_name(Process *, ErtsProcLocks, Port *, Eterm);
#endif
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index f5f10bb616..898a30b010 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -116,6 +116,16 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# define ERTS_DECLARE_DUMMY(X) X
#endif
+#if !defined(__func__)
+# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L
+# if !defined(__GNUC__) || __GNUC__ < 2
+# define __func__ "[unknown_function]"
+# else
+# define __func__ __FUNCTION__
+# endif
+# endif
+#endif
+
#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
# undef ERTS_CAN_INLINE
# define ERTS_CAN_INLINE 0
@@ -231,9 +241,11 @@ void erl_assert_error(char* expr, char* file, int line);
#if SIZEOF_VOID_P == 8
#undef ARCH_32
#define ARCH_64
+#define ERTS_SIZEOF_TERM 8
#elif SIZEOF_VOID_P == 4
#define ARCH_32
#undef ARCH_64
+#define ERTS_SIZEOF_TERM 4
#else
#error Neither 32 nor 64 bit architecture
#endif
@@ -241,6 +253,8 @@ void erl_assert_error(char* expr, char* file, int line);
# define HALFWORD_HEAP 1
# define HALFWORD_ASSERT 0
# define ASSERT_HALFWORD(COND) ASSERT(COND)
+# undef ERTS_SIZEOF_TERM
+# define ERTS_SIZEOF_TERM 4
#else
# define HALFWORD_HEAP 0
# define HALFWORD_ASSERT 0
@@ -367,6 +381,27 @@ typedef unsigned char byte;
#error 64-bit architecture, but no appropriate type to use for Uint64 and Sint64 found
#endif
+#ifdef WORDS_BIGENDIAN
+# define ERTS_HUINT_HVAL_HIGH 0
+# define ERTS_HUINT_HVAL_LOW 1
+#else
+# define ERTS_HUINT_HVAL_HIGH 1
+# define ERTS_HUINT_HVAL_LOW 0
+#endif
+#if ERTS_SIZEOF_TERM == 8
+typedef union {
+ Uint val;
+ Uint32 hval[2];
+} HUint;
+#elif ERTS_SIZEOF_TERM == 4
+typedef union {
+ Uint val;
+ Uint16 hval[2];
+} HUint;
+#else
+#error "Unsupported size of term"
+#endif
+
# define ERTS_EXTRA_DATA_ALIGN_SZ(X) \
(((size_t) 8) - (((size_t) (X)) & ((size_t) 7)))
@@ -505,6 +540,10 @@ __decl_noreturn void __noreturn erl_exit(int n, char*, ...);
#define ERTS_ABORT_EXIT (INT_MIN + 1) /* no crash dump; only abort() */
#define ERTS_DUMP_EXIT (INT_MIN + 2) /* crash dump; then exit() */
+#define ERTS_INTERNAL_ERROR(What) \
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error: %s\n", \
+ __FILE__, __LINE__, __func__, What)
+
Eterm erts_check_io_info(void *p);
/* Size of misc memory allocated from system dependent code */
@@ -579,6 +618,7 @@ typedef struct _SysDriverOpts {
char *wd; /* Working directory. */
unsigned spawn_type; /* Bitfield of ERTS_SPAWN_DRIVER |
ERTS_SPAWN_EXTERNAL | both*/
+ int parallelism; /* Optimize for parallelism */
} SysDriverOpts;
extern char *erts_default_arg0;
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 1969fc762c..5261effef9 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -46,6 +46,7 @@
#include "erl_thr_queue.h"
#include "erl_sched_spec_pre_alloc.h"
#include "beam_bp.h"
+#include "erl_ptab.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -3016,12 +3017,13 @@ buf_to_intlist(Eterm** hpp, char *buf, size_t len, Eterm tail)
** ;
**
** Return remaining bytes in buffer on success
-** -1 on overflow
-** -2 on type error (including that result would not be a whole number of bytes)
+** ERTS_IOLIST_TO_BUF_OVERFLOW on overflow
+** ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes)
*/
-int io_list_to_buf(Eterm obj, char* buf, int len)
+ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
{
+ ErlDrvSizeT len = (ErlDrvSizeT) alloced_len;
Eterm* objp;
DECLARE_ESTACK(s);
goto L_again;
@@ -3114,20 +3116,20 @@ int io_list_to_buf(Eterm obj, char* buf, int len)
L_type_error:
DESTROY_ESTACK(s);
- return -2;
+ return ERTS_IOLIST_TO_BUF_TYPE_ERROR;
L_overflow:
DESTROY_ESTACK(s);
- return -1;
+ return ERTS_IOLIST_TO_BUF_OVERFLOW;
}
/*
* Return 0 if successful, and non-zero if unsuccessful.
*/
-int erts_iolist_size(Eterm obj, Uint* sizep)
+int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
{
Eterm* objp;
- Uint size = 0;
+ Uint size = 0; /* Intentionally Uint due to halfword heap */
DECLARE_ESTACK(s);
goto L_again;
@@ -3179,7 +3181,7 @@ int erts_iolist_size(Eterm obj, Uint* sizep)
#undef SAFE_ADD
DESTROY_ESTACK(s);
- *sizep = size;
+ *sizep = (ErlDrvSizeT) size;
return ERTS_IOLIST_OK;
L_overflow_error:
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 3210ffa92a..236b8710fb 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -678,6 +678,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_LOPT_UDP_READ_PACKETS 33 /* Number of packets to read */
#define INET_OPT_RAW 34 /* Raw socket options */
#define INET_LOPT_TCP_SEND_TIMEOUT_CLOSE 35 /* auto-close on send timeout or not */
+#define INET_LOPT_TCP_MSGQ_HIWTRMRK 36 /* set local high watermark */
+#define INET_LOPT_TCP_MSGQ_LOWTRMRK 37 /* set local low watermark */
/* SCTP options: a separate range, from 100: */
#define SCTP_OPT_RTOINFO 100
#define SCTP_OPT_ASSOCINFO 101
@@ -788,6 +790,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_HIGH_WATERMARK (1024*8) /* 8k pending high => busy */
#define INET_LOW_WATERMARK (1024*4) /* 4k pending => allow more */
+#define INET_HIGH_MSGQ_WATERMARK (1024*8) /* 8k pending high => busy */
+#define INET_LOW_MSGQ_WATERMARK (1024*4) /* 4k pending => allow more */
#define INET_INFINITY 0xffffffff /* infinity value */
@@ -879,7 +883,7 @@ typedef struct subs_list_ {
#define NO_PROCESS 0
#define NO_SUBSCRIBERS(SLP) ((SLP)->subscriber == NO_PROCESS)
-static void send_to_subscribers(ErlDrvPort, subs_list *, int,
+static void send_to_subscribers(ErlDrvTermData, subs_list *, int,
ErlDrvTermData [], int);
static void free_subscribers(subs_list*);
static int save_subscriber(subs_list *, ErlDrvTermData);
@@ -1873,8 +1877,7 @@ static int deq_async(inet_descriptor* desc, int* ap, ErlDrvTermData* cp, int* rp
** {inet_async, Port, Ref, ok}
*/
static int
-send_async_ok(ErlDrvPort port, ErlDrvTermData Port, int Ref,
- ErlDrvTermData recipient)
+send_async_ok(ErlDrvTermData Port, int Ref,ErlDrvTermData recipient)
{
ErlDrvTermData spec[2*LOAD_ATOM_CNT + LOAD_PORT_CNT +
LOAD_INT_CNT + LOAD_TUPLE_CNT];
@@ -1888,14 +1891,14 @@ send_async_ok(ErlDrvPort port, ErlDrvTermData Port, int Ref,
ASSERT(i == sizeof(spec)/sizeof(*spec));
- return driver_send_term(port, recipient, spec, i);
+ return erl_drv_send_term(Port, recipient, spec, i);
}
/* send message:
** {inet_async, Port, Ref, {ok,Port2}}
*/
static int
-send_async_ok_port(ErlDrvPort port, ErlDrvTermData Port, int Ref,
+send_async_ok_port(ErlDrvTermData Port, int Ref,
ErlDrvTermData recipient, ErlDrvTermData Port2)
{
ErlDrvTermData spec[2*LOAD_ATOM_CNT + 2*LOAD_PORT_CNT +
@@ -1914,14 +1917,14 @@ send_async_ok_port(ErlDrvPort port, ErlDrvTermData Port, int Ref,
ASSERT(i == sizeof(spec)/sizeof(*spec));
- return driver_send_term(port, recipient, spec, i);
+ return erl_drv_send_term(Port, recipient, spec, i);
}
/* send message:
** {inet_async, Port, Ref, {error,Reason}}
*/
static int
-send_async_error(ErlDrvPort port, ErlDrvTermData Port, int Ref,
+send_async_error(ErlDrvTermData Port, int Ref,
ErlDrvTermData recipient, ErlDrvTermData Reason)
{
ErlDrvTermData spec[3*LOAD_ATOM_CNT + LOAD_PORT_CNT +
@@ -1939,7 +1942,7 @@ send_async_error(ErlDrvPort port, ErlDrvTermData Port, int Ref,
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i == sizeof(spec)/sizeof(*spec));
DEBUGF(("send_async_error %ld %ld\r\n", recipient, Reason));
- return driver_send_term(port, recipient, spec, i);
+ return erl_drv_send_term(Port, recipient, spec, i);
}
@@ -1951,7 +1954,7 @@ static int async_ok(inet_descriptor* desc)
if (deq_async(desc, &aid, &caller, &req) < 0)
return -1;
- return send_async_ok(desc->port, desc->dport, aid, caller);
+ return send_async_ok(desc->dport, aid, caller);
}
static int async_ok_port(inet_descriptor* desc, ErlDrvTermData Port2)
@@ -1962,7 +1965,7 @@ static int async_ok_port(inet_descriptor* desc, ErlDrvTermData Port2)
if (deq_async(desc, &aid, &caller, &req) < 0)
return -1;
- return send_async_ok_port(desc->port, desc->dport, aid, caller, Port2);
+ return send_async_ok_port(desc->dport, aid, caller, Port2);
}
static int async_error_am(inet_descriptor* desc, ErlDrvTermData reason)
@@ -1973,8 +1976,7 @@ static int async_error_am(inet_descriptor* desc, ErlDrvTermData reason)
if (deq_async(desc, &aid, &caller, &req) < 0)
return -1;
- return send_async_error(desc->port, desc->dport, aid, caller,
- reason);
+ return send_async_error(desc->dport, aid, caller, reason);
}
/* dequeue all operations */
@@ -1985,8 +1987,7 @@ static int async_error_am_all(inet_descriptor* desc, ErlDrvTermData reason)
ErlDrvTermData caller;
while (deq_async(desc, &aid, &caller, &req) == 0) {
- send_async_error(desc->port, desc->dport, aid, caller,
- reason);
+ send_async_error(desc->dport, aid, caller, reason);
}
return 0;
}
@@ -2014,7 +2015,7 @@ static int inet_reply_ok(inet_descriptor* desc)
ASSERT(i == sizeof(spec)/sizeof(*spec));
desc->caller = 0;
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
#ifdef HAVE_SCTP
@@ -2033,7 +2034,7 @@ static int inet_reply_ok_port(inet_descriptor* desc, ErlDrvTermData dport)
ASSERT(i == sizeof(spec)/sizeof(*spec));
desc->caller = 0;
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
#endif
@@ -2056,7 +2057,7 @@ static int inet_reply_error_am(inet_descriptor* desc, ErlDrvTermData reason)
desc->caller = 0;
DEBUGF(("inet_reply_error_am %ld %ld\r\n", caller, reason));
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
/* send:
@@ -2165,12 +2166,12 @@ static int http_response_inetdrv(void *arg, int major, int minor,
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i<=27);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i<=27);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2262,12 +2263,12 @@ http_request_inetdrv(void* arg, const http_atom_t* meth, const char* meth_ptr,
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 43);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 43);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2316,12 +2317,12 @@ http_header_inetdrv(void* arg, const http_atom_t* name, const char* name_ptr,
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 26);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 26);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2347,7 +2348,7 @@ static int http_eoh_inetdrv(void* arg)
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 14);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
/* {http, S, http_eoh} */
@@ -2356,7 +2357,7 @@ static int http_eoh_inetdrv(void* arg)
i = LOAD_ATOM(spec, i, am_http_eoh);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 14);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2384,7 +2385,7 @@ static int http_error_inetdrv(void* arg, const char* buf, int len)
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 19);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
/* {http, S, {http_error,Line} */
@@ -2395,7 +2396,7 @@ static int http_error_inetdrv(void* arg, const char* buf, int len)
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 19);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2448,11 +2449,11 @@ int ssl_tls_inetdrv(void* arg, unsigned type, unsigned major, unsigned minor,
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 28);
- ret = driver_send_term(desc->inet.port, caller, spec, i);
+ ret = erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
ASSERT(i <= 28);
- ret = driver_output_term(desc->inet.port, spec, i);
+ ret = erl_drv_output_term(desc->inet.dport, spec, i);
}
done:
driver_free_binary(bin);
@@ -2502,7 +2503,7 @@ static int inet_async_data(inet_descriptor* desc, const char* buf, int len)
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i == 15);
desc->caller = 0;
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
else {
/* INET_MODE_BINARY => [H1,H2,...HSz | Binary] */
@@ -2516,7 +2517,7 @@ static int inet_async_data(inet_descriptor* desc, const char* buf, int len)
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 20);
desc->caller = 0;
- code = driver_send_term(desc->port, caller, spec, i);
+ code = erl_drv_send_term(desc->dport, caller, spec, i);
return code;
}
}
@@ -3109,7 +3110,7 @@ inet_async_binary_data
ASSERT(i <= PACKET_ERL_DRV_TERM_DATA_LEN);
desc->caller = 0;
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
/*
@@ -3132,7 +3133,7 @@ static int tcp_message(inet_descriptor* desc, const char* buf, int len)
i = LOAD_STRING(spec, i, buf, len); /* => [H1,H2,...Hn] */
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 20);
- return driver_output_term(desc->port, spec, i);
+ return erl_drv_output_term(desc->dport, spec, i);
}
else {
/* INET_MODE_BINARY => [H1,H2,...HSz | Binary] */
@@ -3144,7 +3145,7 @@ static int tcp_message(inet_descriptor* desc, const char* buf, int len)
i = LOAD_STRING_CONS(spec, i, buf, hsz);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 20);
- code = driver_output_term(desc->port, spec, i);
+ code = erl_drv_output_term(desc->dport, spec, i);
return code;
}
}
@@ -3179,7 +3180,7 @@ tcp_binary_message(inet_descriptor* desc, ErlDrvBinary* bin, int offs, int len)
}
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 20);
- return driver_output_term(desc->port, spec, i);
+ return erl_drv_output_term(desc->dport, spec, i);
}
/*
@@ -3198,7 +3199,7 @@ static int tcp_closed_message(tcp_descriptor* desc)
i = LOAD_PORT(spec, i, desc->inet.dport);
i = LOAD_TUPLE(spec, i, 2);
ASSERT(i <= 6);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
return 0;
}
@@ -3219,7 +3220,7 @@ static int tcp_error_message(tcp_descriptor* desc, int err)
i = LOAD_ATOM(spec, i, am_err);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 8);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
/*
@@ -3310,7 +3311,7 @@ static int packet_binary_message
/* Close up the outer 5-tuple: */
i = LOAD_TUPLE(spec, i, 5);
ASSERT(i <= PACKET_ERL_DRV_TERM_DATA_LEN);
- return driver_output_term(desc->port, spec, i);
+ return erl_drv_output_term(desc->dport, spec, i);
}
/*
@@ -3337,7 +3338,7 @@ static int packet_error_message(udp_descriptor* udesc, int err)
i = LOAD_ATOM(spec, i, am_err);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i == sizeof(spec)/sizeof(*spec));
- return driver_output_term(desc->port, spec, i);
+ return erl_drv_output_term(desc->dport, spec, i);
}
@@ -5465,6 +5466,28 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
}
continue;
+ case INET_LOPT_TCP_MSGQ_HIWTRMRK:
+ if (desc->stype == SOCK_STREAM) {
+ ErlDrvSizeT high;
+ if (ival < ERL_DRV_BUSY_MSGQ_LIM_MIN
+ || ERL_DRV_BUSY_MSGQ_LIM_MAX < ival)
+ return -1;
+ high = (ErlDrvSizeT) ival;
+ erl_drv_busy_msgq_limits(desc->port, NULL, &high);
+ }
+ continue;
+
+ case INET_LOPT_TCP_MSGQ_LOWTRMRK:
+ if (desc->stype == SOCK_STREAM) {
+ ErlDrvSizeT low;
+ if (ival < ERL_DRV_BUSY_MSGQ_LIM_MIN
+ || ERL_DRV_BUSY_MSGQ_LIM_MAX < ival)
+ return -1;
+ low = (ErlDrvSizeT) ival;
+ erl_drv_busy_msgq_limits(desc->port, &low, NULL);
+ }
+ continue;
+
case INET_LOPT_TCP_SEND_TIMEOUT:
if (desc->stype == SOCK_STREAM) {
tcp_descriptor* tdesc = (tcp_descriptor*) desc;
@@ -6365,6 +6388,32 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
}
continue;
+ case INET_LOPT_TCP_MSGQ_HIWTRMRK:
+ if (desc->stype == SOCK_STREAM) {
+ ErlDrvSizeT high = ERL_DRV_BUSY_MSGQ_READ_ONLY;
+ *ptr++ = opt;
+ erl_drv_busy_msgq_limits(desc->port, NULL, &high);
+ ival = high > INT_MAX ? INT_MAX : (int) high;
+ put_int32(ival, ptr);
+ }
+ else {
+ TRUNCATE_TO(0,ptr);
+ }
+ continue;
+
+ case INET_LOPT_TCP_MSGQ_LOWTRMRK:
+ if (desc->stype == SOCK_STREAM) {
+ ErlDrvSizeT low = ERL_DRV_BUSY_MSGQ_READ_ONLY;
+ *ptr++ = opt;
+ erl_drv_busy_msgq_limits(desc->port, &low, NULL);
+ ival = low > INT_MAX ? INT_MAX : (int) low;
+ put_int32(ival, ptr);
+ }
+ else {
+ TRUNCATE_TO(0,ptr);
+ }
+ continue;
+
case INET_LOPT_TCP_SEND_TIMEOUT:
if (desc->stype == SOCK_STREAM) {
*ptr++ = opt;
@@ -7278,7 +7327,7 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
i = LOAD_TUPLE(spec, i, 3);
/* Now, convert "spec" into the returnable term: */
- driver_send_term(desc->port, driver_caller(desc->port), spec, i);
+ erl_drv_send_term(desc->dport, driver_caller(desc->port), spec, i);
FREE(spec);
(*dest)[0] = INET_REP;
@@ -7360,7 +7409,7 @@ send_empty_out_q_msgs(inet_descriptor* desc)
ASSERT(msg_len == sizeof(msg)/sizeof(*msg));
- send_to_subscribers(desc->port,
+ send_to_subscribers(desc->dport,
&desc->empty_out_q_subs,
1,
msg,
@@ -8007,6 +8056,7 @@ static int tcp_inet_init(void)
static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args)
{
+ ErlDrvSizeT q_low, q_high;
tcp_descriptor* desc;
DEBUGF(("tcp_inet_start(%ld) {\r\n", (long)port));
@@ -8016,6 +8066,17 @@ static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args)
return ERL_DRV_ERROR_ERRNO;
desc->high = INET_HIGH_WATERMARK;
desc->low = INET_LOW_WATERMARK;
+ q_high = INET_HIGH_MSGQ_WATERMARK;
+ q_low = INET_LOW_MSGQ_WATERMARK;
+ if (q_low < ERL_DRV_BUSY_MSGQ_LIM_MIN)
+ q_low = ERL_DRV_BUSY_MSGQ_LIM_MIN;
+ else if (q_low > ERL_DRV_BUSY_MSGQ_LIM_MAX)
+ q_low = ERL_DRV_BUSY_MSGQ_LIM_MAX;
+ if (q_high < ERL_DRV_BUSY_MSGQ_LIM_MIN)
+ q_high = ERL_DRV_BUSY_MSGQ_LIM_MIN;
+ else if (q_high > ERL_DRV_BUSY_MSGQ_LIM_MAX)
+ q_high = ERL_DRV_BUSY_MSGQ_LIM_MAX;
+ erl_drv_busy_msgq_limits(port, &q_low, &q_high);
desc->send_timeout = INET_INFINITY;
desc->send_timeout_close = 0;
desc->busy_on_send = 0;
@@ -8039,6 +8100,7 @@ static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args)
static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
ErlDrvTermData owner, int* err)
{
+ ErlDrvSizeT q_low, q_high;
ErlDrvPort port = desc->inet.port;
tcp_descriptor* copy_desc;
@@ -8076,6 +8138,13 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
FREE(copy_desc);
return NULL;
}
+
+ /* Read busy msgq limits of parent */
+ q_low = q_high = ERL_DRV_BUSY_MSGQ_READ_ONLY;
+ erl_drv_busy_msgq_limits(desc->inet.port, &q_low, &q_high);
+ /* Write same busy msgq limits to child */
+ erl_drv_busy_msgq_limits(port, &q_low, &q_high);
+
copy_desc->inet.port = port;
copy_desc->inet.dport = driver_mk_port(port);
*err = 0;
@@ -8108,7 +8177,7 @@ static void tcp_close_check(tcp_descriptor* desc)
desc->inet.state = INET_STATE_LISTENING;
while (deq_multi_op(desc,&id,&req,&caller,NULL,&monitor) == 0) {
driver_demonitor_process(desc->inet.port, &monitor);
- send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_closed);
+ send_async_error(desc->inet.dport, id, caller, am_closed);
}
clean_multi_timers(&(desc->mtd), desc->inet.port);
}
@@ -8532,7 +8601,7 @@ static void tcp_inet_multi_timeout(ErlDrvData e, ErlDrvTermData caller)
sock_select(INETP(desc),FD_ACCEPT,0);
desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
- send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_timeout);
+ send_async_error(desc->inet.dport, id, caller, am_timeout);
}
@@ -9273,7 +9342,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
if (s == INVALID_SOCKET) { /* Not ERRNO_BLOCK, that's handled right away */
- ret = send_async_error(desc->inet.port, desc->inet.dport,
+ ret = send_async_error(desc->inet.dport,
id, caller, error_atom(sock_errno()));
goto done;
}
@@ -9283,7 +9352,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
if ((accept_desc = tcp_inet_copy(desc,s,caller,&err)) == NULL) {
sock_close(s);
- ret = send_async_error(desc->inet.port, desc->inet.dport,
+ ret = send_async_error(desc->inet.dport,
id, caller, error_atom(err));
goto done;
}
@@ -9294,7 +9363,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
ERL_DRV_READ, 1);
#endif
accept_desc->inet.state = INET_STATE_CONNECTED;
- ret = send_async_ok_port(desc->inet.port, desc->inet.dport,
+ ret = send_async_ok_port(desc->inet.dport,
id, caller, accept_desc->inet.dport);
}
}
@@ -10936,7 +11005,7 @@ subs_list *subs;
static void send_to_subscribers
(
- ErlDrvPort port,
+ ErlDrvTermData port,
subs_list *subs,
int free_subs,
ErlDrvTermData msg[],
@@ -10953,7 +11022,7 @@ static void send_to_subscribers
this = subs;
while(this) {
- (void) driver_send_term(port, this->subscriber, msg, msg_len);
+ (void) erl_drv_send_term(port, this->subscriber, msg, msg_len);
if(free_subs && !first) {
next = this->next;
diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4
index ab078b9583..764b8d180c 100644
--- a/erts/emulator/hipe/hipe_bif_list.m4
+++ b/erts/emulator/hipe/hipe_bif_list.m4
@@ -145,6 +145,7 @@
* Zero-arity BIFs that can fail.
*/
standard_bif_interface_0(nbif_processes_0, processes_0)
+standard_bif_interface_0(nbif_ports_0, ports_0)
/*
* BIFs and primops that may do a GC (change heap limit and walk the native stack).
diff --git a/erts/emulator/hipe/hipe_debug.c b/erts/emulator/hipe/hipe_debug.c
index 37615bf718..f2e9d03607 100644
--- a/erts/emulator/hipe/hipe_debug.c
+++ b/erts/emulator/hipe/hipe_debug.c
@@ -189,11 +189,10 @@ void hipe_print_pcb(Process *p)
U("old_head ", old_heap);
U("min_heap_..", min_heap_size);
U("rcount ", rcount);
- U("id ", id);
- U("prio ", prio);
+ U("id ", common.id);
U("reds ", reds);
- U("tracer_pr..", tracer_proc);
- U("trace_fla..", trace_flags);
+ U("tracer_pr..", common.tracer_proc);
+ U("trace_fla..", common.trace_flags);
U("group_lea..", group_leader);
U("flags ", flags);
U("fvalue ", fvalue);
@@ -202,8 +201,8 @@ void hipe_print_pcb(Process *p)
/*XXX: ErlTimer tm; */
U("next ", next);
/*XXX: ErlOffHeap off_heap; */
- U("reg ", reg);
- U("nlinks ", nlinks);
+ U("reg ", common.u.alive.reg);
+ U("nlinks ", common.u.alive.links);
/*XXX: ErlMessageQueue msg; */
U("mbuf ", mbuf);
U("mbuf_sz ", mbuf_sz);
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index cbbf1db2e5..5911d76ad9 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -267,7 +267,7 @@ static const struct literal {
{ "P_HP_LIMIT", offsetof(struct process, stop) },
{ "P_OFF_HEAP_FIRST", offsetof(struct process, off_heap.first) },
{ "P_MBUF", offsetof(struct process, mbuf) },
- { "P_ID", offsetof(struct process, id) },
+ { "P_ID", offsetof(struct process, common.id) },
{ "P_FLAGS", offsetof(struct process, flags) },
{ "P_FVALUE", offsetof(struct process, fvalue) },
{ "P_FREASON", offsetof(struct process, freason) },
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index ce014c19c2..474408ae7c 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -200,17 +200,6 @@ static void event_large_fd_error(ErlDrvPort, ErtsSysFdType, ErlDrvEventData);
#endif
static void steal_pending_stop_select(erts_dsprintf_buf_t*, ErlDrvPort,
ErtsDrvEventState*, int mode, int on);
-static ERTS_INLINE Eterm
-drvport2id(ErlDrvPort dp)
-{
- Port *pp = erts_drvport2port(dp);
- if (pp)
- return pp->id;
- else {
- ASSERT(0);
- return am_undefined;
- }
-}
#ifdef ERTS_SMP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_FD_LIST)
@@ -378,7 +367,7 @@ abort_task(Eterm id, ErtsPortTaskHandle *pthp, EventStateType type)
|| !erts_port_task_is_scheduled(pthp));
}
else if (erts_port_task_is_scheduled(pthp)) {
- erts_port_task_abort(id, pthp);
+ erts_port_task_abort(pthp);
ASSERT(erts_is_port_alive(id));
}
}
@@ -492,7 +481,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
int on)
{
void (*stop_select_fn)(ErlDrvEvent, void*) = NULL;
- Eterm id = drvport2id(ix);
+ Eterm id = erts_drvport2id(ix);
ErtsSysFdType fd = (ErtsSysFdType) e;
ErtsPollEvents ctl_events = (ErtsPollEvents) 0;
ErtsPollEvents new_events, old_events;
@@ -503,8 +492,8 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
DTRACE_CHARBUF(name, 64);
#endif
- ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
- && erts_lc_is_port_locked(erts_drvport2port(ix)));
+ ERTS_SMP_LC_ASSERT(erts_drvport2port(ix, NULL)
+ && erts_lc_is_port_locked(erts_drvport2port(ix, NULL)));
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
@@ -530,9 +519,9 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
if (!on && (mode&ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
if (IS_FD_UNKNOWN(state)) {
/* fast track to stop_select callback */
- stop_select_fn = erts_drvport2port(ix)->drv_ptr->stop_select;
+ stop_select_fn = erts_drvport2port(ix, NULL)->drv_ptr->stop_select;
#ifdef USE_VM_PROBES
- strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1);
+ strncpy(name, erts_drvport2port(ix, NULL)->drv_ptr->name, sizeof(name)-1);
name[sizeof(name)-1] = '\0';
#endif
ret = 0;
@@ -665,14 +654,14 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
}
}
if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
- erts_driver_t* drv_ptr = erts_drvport2port(ix)->drv_ptr;
+ erts_driver_t* drv_ptr = erts_drvport2port(ix, NULL)->drv_ptr;
ASSERT(new_events==0);
if (state->remove_cnt == 0 || !wake_poller) {
/* Safe to close fd now as it is not in pollset
or there was no need to eject fd (kernel poll) */
stop_select_fn = drv_ptr->stop_select;
#ifdef USE_VM_PROBES
- strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1);
+ strncpy(name, erts_drvport2port(ix, NULL)->drv_ptr->name, sizeof(name)-1);
name[sizeof(name)-1] = '\0';
#endif
}
@@ -719,13 +708,13 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
ErtsPollEvents events;
ErtsPollEvents add_events;
ErtsPollEvents remove_events;
- Eterm id = drvport2id(ix);
+ Eterm id = erts_drvport2id(ix);
ErtsDrvEventState *state;
int do_wake = 0;
int ret;
- ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
- && erts_lc_is_port_locked(erts_drvport2port(ix)));
+ ERTS_SMP_LC_ASSERT(erts_drvport2port(ix, NULL)
+ && erts_lc_is_port_locked(erts_drvport2port(ix, NULL)));
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
@@ -960,7 +949,7 @@ static void
print_select_op(erts_dsprintf_buf_t *dsbufp,
ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
{
- Port *pp = erts_drvport2port(ix);
+ Port *pp = erts_drvport2port(ix, NULL);
erts_dsprintf(dsbufp,
"driver_select(%p, %d,%s%s%s%s, %d) "
"by ",
@@ -971,8 +960,8 @@ print_select_op(erts_dsprintf_buf_t *dsbufp,
mode & ERL_DRV_USE ? " ERL_DRV_USE" : "",
mode & (ERL_DRV_USE_NO_CALLBACK & ~ERL_DRV_USE) ? "_NO_CALLBACK" : "",
on);
- print_driver_name(dsbufp, pp->id);
- erts_dsprintf(dsbufp, "driver %T ", pp ? pp->id : NIL);
+ print_driver_name(dsbufp, pp->common.id);
+ erts_dsprintf(dsbufp, "driver %T ", pp ? pp->common.id : NIL);
}
static void
@@ -1031,7 +1020,7 @@ steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix,
state->driver.drv_ptr = NULL;
}
else if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
- erts_driver_t* drv_ptr = erts_drvport2port(ix)->drv_ptr;
+ erts_driver_t* drv_ptr = erts_drvport2port(ix, NULL)->drv_ptr;
if (drv_ptr != state->driver.drv_ptr) {
/* Some other driver wants the stop_select callback */
if (state->driver.drv_ptr->handle) {
@@ -1053,7 +1042,7 @@ static void
print_event_op(erts_dsprintf_buf_t *dsbufp,
ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data)
{
- Port *pp = erts_drvport2port(ix);
+ Port *pp = erts_drvport2port(ix, NULL);
erts_dsprintf(dsbufp, "driver_event(%p, %d, ", ix, (int) fd);
if (!event_data)
erts_dsprintf(dsbufp, "NULL");
@@ -1062,8 +1051,8 @@ print_event_op(erts_dsprintf_buf_t *dsbufp,
(unsigned int) event_data->events,
(unsigned int) event_data->revents);
erts_dsprintf(dsbufp, ") by ");
- print_driver_name(dsbufp, pp->id);
- erts_dsprintf(dsbufp, "driver %T ", pp ? pp->id : NIL);
+ print_driver_name(dsbufp, pp->common.id);
+ erts_dsprintf(dsbufp, "driver %T ", pp ? pp->common.id : NIL);
}
static void
@@ -1100,8 +1089,7 @@ iready(Eterm id, ErtsDrvEventState *state)
if (erts_port_task_schedule(id,
&state->driver.select->intask,
ERTS_PORT_TASK_INPUT,
- (ErlDrvEvent) state->fd,
- NULL) != 0) {
+ (ErlDrvEvent) state->fd) != 0) {
stale_drv_select(id, state, ERL_DRV_READ);
}
}
@@ -1112,8 +1100,7 @@ oready(Eterm id, ErtsDrvEventState *state)
if (erts_port_task_schedule(id,
&state->driver.select->outtask,
ERTS_PORT_TASK_OUTPUT,
- (ErlDrvEvent) state->fd,
- NULL) != 0) {
+ (ErlDrvEvent) state->fd) != 0) {
stale_drv_select(id, state, ERL_DRV_WRITE);
}
}
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 9e7cbc017f..0b96eded76 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -123,7 +123,8 @@ struct ErtsSysReportExit_ {
/* This data is shared by these drivers - initialized by spawn_init() */
static struct driver_data {
- int port_num, ofd, packet_bytes;
+ ErlDrvPort port_num;
+ int ofd, packet_bytes;
ErtsSysReportExit *report_exit;
int pid;
int alive;
@@ -731,7 +732,8 @@ prepare_crash_dump(int secs)
list = CONS(hp, make_small(8), list); hp += 2;
/* send to heart port, CMD = 8, i.e. prepare crash dump =o */
- erts_write_to_port(ERTS_INVALID_PID, heart_port, list);
+ erts_port_output(NULL, ERTS_PORT_SIG_FLG_FORCE_IMM_CALL, heart_port,
+ heart_port->common.id, list, NULL);
}
/* Make sure we unregister at epmd (unknown fd) and get at least
@@ -1182,7 +1184,7 @@ static RETSIGTYPE onchld(int signum)
#endif
}
-static int set_driver_data(int port_num,
+static int set_driver_data(ErlDrvPort port_num,
int ifd,
int ofd,
int packet_bytes,
@@ -1190,6 +1192,7 @@ static int set_driver_data(int port_num,
int exit_status,
int pid)
{
+ Port *prt;
ErtsSysReportExit *report_exit;
if (!exit_status)
@@ -1198,7 +1201,7 @@ static int set_driver_data(int port_num,
report_exit = erts_alloc(ERTS_ALC_T_PRT_REP_EXIT,
sizeof(ErtsSysReportExit));
report_exit->next = report_exit_list;
- report_exit->port = erts_port[port_num].id;
+ report_exit->port = erts_drvport2id(port_num);
report_exit->pid = pid;
report_exit->ifd = read_write & DO_READ ? ifd : -1;
report_exit->ofd = read_write & DO_WRITE ? ofd : -1;
@@ -1208,7 +1211,9 @@ static int set_driver_data(int port_num,
report_exit_list = report_exit;
}
- erts_port[port_num].os_pid = pid;
+ prt = erts_drvport2port(port_num, NULL);
+ if (prt)
+ prt->os_pid = pid;
if (read_write & DO_READ) {
driver_data[ifd].packet_bytes = packet_bytes;
@@ -1281,7 +1286,7 @@ static void close_pipes(int ifd[2], int ofd[2], int read_write)
}
}
-static void init_fd_data(int fd, int prt)
+static void init_fd_data(int fd, ErlDrvPort port_num)
{
fd_data[fd].buf = NULL;
fd_data[fd].cpos = NULL;
@@ -1971,7 +1976,7 @@ static void clear_fd_data(int fd)
fd_data[fd].psz = 0;
}
-static void nbio_stop_fd(int prt, int fd)
+static void nbio_stop_fd(ErlDrvPort prt, int fd)
{
driver_select(prt,fd,DO_READ|DO_WRITE,0);
clear_fd_data(fd);
@@ -2019,7 +2024,8 @@ static ErlDrvData vanilla_start(ErlDrvPort port_num, char* name,
static void stop(ErlDrvData fd)
{
- int prt, ofd;
+ ErlDrvPort prt;
+ int ofd;
prt = driver_data[(int)(long)fd].port_num;
nbio_stop_fd(prt, (int)(long)fd);
@@ -2032,7 +2038,7 @@ static void stop(ErlDrvData fd)
CHLD_STAT_LOCK;
- /* Mark as unused. Maybe resetting the 'port_num' slot is better? */
+ /* Mark as unused. */
driver_data[(int)(long)fd].pid = -1;
CHLD_STAT_UNLOCK;
@@ -2048,7 +2054,7 @@ static void stop(ErlDrvData fd)
static void outputv(ErlDrvData e, ErlIOVec* ev)
{
int fd = (int)(long)e;
- int ix = driver_data[fd].port_num;
+ ErlDrvPort ix = driver_data[fd].port_num;
int pb = driver_data[fd].packet_bytes;
int ofd = driver_data[fd].ofd;
ssize_t n;
@@ -2098,7 +2104,7 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
static void output(ErlDrvData e, char* buf, ErlDrvSizeT len)
{
int fd = (int)(long)e;
- int ix = driver_data[fd].port_num;
+ ErlDrvPort ix = driver_data[fd].port_num;
int pb = driver_data[fd].packet_bytes;
int ofd = driver_data[fd].ofd;
ssize_t n;
@@ -2149,7 +2155,7 @@ static void output(ErlDrvData e, char* buf, ErlDrvSizeT len)
return; /* 0; */
}
-static int port_inp_failure(int port_num, int ready_fd, int res)
+static int port_inp_failure(ErlDrvPort port_num, int ready_fd, int res)
/* Result: 0 (eof) or -1 (error) */
{
int err = errno;
@@ -2199,7 +2205,7 @@ static int port_inp_failure(int port_num, int ready_fd, int res)
static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
{
int fd = (int)(long)e;
- int port_num;
+ ErlDrvPort port_num;
int packet_bytes;
int res;
Uint h;
@@ -2322,7 +2328,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
static void ready_output(ErlDrvData e, ErlDrvEvent ready_fd)
{
int fd = (int)(long)e;
- int ix = driver_data[fd].port_num;
+ ErlDrvPort ix = driver_data[fd].port_num;
int n;
struct iovec* iv;
int vsize;
@@ -2631,19 +2637,20 @@ report_exit_status(ErtsSysReportExit *rep, int status)
Port *pp;
#ifdef ERTS_SMP
CHLD_STAT_UNLOCK;
-#endif
+ pp = erts_thr_id2port_sflgs(rep->port,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+ CHLD_STAT_LOCK;
+#else
pp = erts_id2port_sflgs(rep->port,
NULL,
0,
ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
-#ifdef ERTS_SMP
- CHLD_STAT_LOCK;
#endif
if (pp) {
if (rep->ifd >= 0) {
driver_data[rep->ifd].alive = 0;
driver_data[rep->ifd].status = status;
- (void) driver_select((ErlDrvPort) internal_port_index(pp->id),
+ (void) driver_select((ErlDrvPort) pp,
rep->ifd,
(ERL_DRV_READ|ERL_DRV_USE),
1);
@@ -2651,12 +2658,16 @@ report_exit_status(ErtsSysReportExit *rep, int status)
if (rep->ofd >= 0) {
driver_data[rep->ofd].alive = 0;
driver_data[rep->ofd].status = status;
- (void) driver_select((ErlDrvPort) internal_port_index(pp->id),
+ (void) driver_select((ErlDrvPort) pp,
rep->ofd,
(ERL_DRV_WRITE|ERL_DRV_USE),
1);
}
+#ifdef ERTS_SMP
+ erts_thr_port_release(pp);
+#else
erts_port_release(pp);
+#endif
}
erts_free(ERTS_ALC_T_PRT_REP_EXIT, rep);
}
diff --git a/erts/emulator/sys/win32/erl_win_dyn_driver.h b/erts/emulator/sys/win32/erl_win_dyn_driver.h
index ec5141838a..8b6be2b2f1 100644
--- a/erts/emulator/sys/win32/erl_win_dyn_driver.h
+++ b/erts/emulator/sys/win32/erl_win_dyn_driver.h
@@ -74,7 +74,9 @@ WDD_TYPEDEF(ErlDrvTermData, driver_mk_port,(ErlDrvPort));
WDD_TYPEDEF(ErlDrvTermData, driver_connected,(ErlDrvPort));
WDD_TYPEDEF(ErlDrvTermData, driver_caller,(ErlDrvPort));
WDD_TYPEDEF(ErlDrvTermData, driver_mk_term_nil,(void));
+WDD_TYPEDEF(int, erl_drv_output_term, (ErlDrvTermData, ErlDrvTermData*, int));
WDD_TYPEDEF(int, driver_output_term, (ErlDrvPort, ErlDrvTermData*, int));
+WDD_TYPEDEF(int, erl_drv_send_term, (ErlDrvTermData, ErlDrvTermData, ErlDrvTermData*, int));
WDD_TYPEDEF(int, driver_send_term, (ErlDrvPort, ErlDrvTermData, ErlDrvTermData*, int));
WDD_TYPEDEF(long, driver_async, (ErlDrvPort,unsigned int*,void (*)(void*),void*,void (*)(void*)));
WDD_TYPEDEF(int, driver_async_cancel, (unsigned int));
@@ -187,7 +189,9 @@ typedef struct {
WDD_FTYPE(driver_connected) *driver_connected;
WDD_FTYPE(driver_caller) *driver_caller;
WDD_FTYPE(driver_mk_term_nil) *driver_mk_term_nil;
+ WDD_FTYPE(erl_drv_output_term) *erl_drv_output_term;
WDD_FTYPE(driver_output_term) *driver_output_term;
+ WDD_FTYPE(erl_drv_send_term) *erl_drv_send_term;
WDD_FTYPE(driver_send_term) *driver_send_term;
WDD_FTYPE(driver_async) *driver_async;
WDD_FTYPE(driver_async_cancel) *driver_async_cancel;
@@ -294,7 +298,9 @@ extern TWinDynDriverCallbacks WinDynDriverCallbacks;
#define driver_connected (WinDynDriverCallbacks.driver_connected)
#define driver_caller (WinDynDriverCallbacks.driver_caller)
#define driver_mk_term_nil (WinDynDriverCallbacks.driver_mk_term_nil)
+#define erl_drv_output_term (WinDynDriverCallbacks.erl_drv_output_term)
#define driver_output_term (WinDynDriverCallbacks.driver_output_term)
+#define erl_drv_send_term (WinDynDriverCallbacks.erl_drv_send_term)
#define driver_send_term (WinDynDriverCallbacks.driver_send_term)
#define driver_async (WinDynDriverCallbacks.driver_async)
#define driver_async_cancel (WinDynDriverCallbacks.driver_async_cancel)
@@ -425,7 +431,9 @@ do { \
((W).driver_connected) = driver_connected; \
((W).driver_caller) = driver_caller; \
((W).driver_mk_term_nil) = driver_mk_term_nil; \
+((W).erl_drv_output_term) = erl_drv_output_term; \
((W).driver_output_term) = driver_output_term; \
+((W).erl_drv_send_term) = erl_drv_send_term; \
((W).driver_send_term) = driver_send_term; \
((W).driver_async) = driver_async; \
((W).driver_async_cancel) = driver_async_cancel; \
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index f19f4ebd8c..1cd9072cea 100755
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -87,9 +87,6 @@ static erts_smp_tsd_key_t win32_errstr_key;
static erts_smp_atomic_t pipe_creation_counter;
-static erts_smp_mtx_t sys_driver_data_lock;
-
-
/* Results from application_type(_w) is one of */
#define APPL_NONE 0
#define APPL_DOS 1
@@ -97,7 +94,6 @@ static erts_smp_mtx_t sys_driver_data_lock;
#define APPL_WIN32 3
static int driver_write(long, HANDLE, byte*, int);
-static void common_stop(int);
static int create_file_thread(struct async_io* aio, int mode);
#ifdef ERTS_SMP
static void close_active_handle(ErlDrvPort, HANDLE handle);
@@ -115,9 +111,6 @@ BOOL WINAPI ctrl_handler(DWORD dwCtrlType);
#define PORT_BUFSIZ 4096
-#define PORT_FREE (-1)
-#define PORT_EXITING (-2)
-
#define DRV_BUF_ALLOC(SZ) \
erts_alloc_fnf(ERTS_ALC_T_DRV_DATA_BUF, (SZ))
#define DRV_BUF_REALLOC(P, SZ) \
@@ -269,7 +262,8 @@ int erts_sys_prepare_crash_dump(int secs)
list = CONS(hp, make_small(8), list); hp += 2;
/* send to heart port, CMD = 8, i.e. prepare crash dump =o */
- erts_write_to_port(NIL, heart_port, list);
+ erts_port_output(NULL, ERTS_PORT_SIG_FLG_FORCE_IMM_CALL, heart_port,
+ heart_port->common.id, list, NULL);
return 1;
}
@@ -474,7 +468,7 @@ typedef struct driver_data {
byte *inbuf; /* Buffer to use for overlapped read. */
int outBufSize; /* Size of output buffer. */
byte *outbuf; /* Buffer to use for overlapped write. */
- ErlDrvPort port_num; /* The port number. */
+ ErlDrvPort port_num; /* The port handle. */
int packet_bytes; /* 0: continous stream, 1, 2, or 4: the number
* of bytes in the packet header.
*/
@@ -484,8 +478,6 @@ typedef struct driver_data {
int report_exit; /* Do report exit status for the port */
} DriverData;
-static DriverData* driver_data; /* Pointer to array of driver data. */
-
/* Driver interfaces */
static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
@@ -597,67 +589,53 @@ struct erl_drv_entry vanilla_driver_entry = {
*/
static DriverData*
-new_driver_data(int port_num, int packet_bytes, int wait_objs_required, int use_threads)
+new_driver_data(ErlDrvPort port_num, int packet_bytes, int wait_objs_required, int use_threads)
{
DriverData* dp;
-
- erts_smp_mtx_lock(&sys_driver_data_lock);
- DEBUGF(("new_driver_data(port_num %d, pb %d)\n",
- port_num, packet_bytes));
+ DEBUGF(("new_driver_data(%p, pb %d)\n", port_num, packet_bytes));
+ dp = driver_alloc(sizeof(DriverData));
+ if (!dp)
+ return NULL;
/*
* We used to test first at all that there is enough room in the
* array used by WaitForMultipleObjects(), but that is not necessary
* any more, since driver_select() can't fail.
*/
- /*
- * Search for a free slot.
- */
+ dp->bytesInBuffer = 0;
+ dp->totalNeeded = packet_bytes;
+ dp->inBufSize = PORT_BUFSIZ;
+ dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize);
+ if (dp->inbuf == NULL)
+ goto buf_alloc_error;
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize);
+ dp->outBufSize = 0;
+ dp->outbuf = NULL;
+ dp->port_num = port_num;
+ dp->packet_bytes = packet_bytes;
+ dp->port_pid = INVALID_HANDLE_VALUE;
+ if (init_async_io(&dp->in, use_threads) == -1)
+ goto async_io_error1;
+ if (init_async_io(&dp->out, use_threads) == -1)
+ goto async_io_error2;
- for (dp = driver_data; dp < driver_data+max_files; dp++) {
- if (dp->port_num == PORT_FREE) {
- dp->bytesInBuffer = 0;
- dp->totalNeeded = packet_bytes;
- dp->inBufSize = PORT_BUFSIZ;
- dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize);
- if (dp->inbuf == NULL) {
- erts_smp_mtx_unlock(&sys_driver_data_lock);
- return NULL;
- }
- erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize);
- dp->outBufSize = 0;
- dp->outbuf = NULL;
- dp->port_num = port_num;
- dp->packet_bytes = packet_bytes;
- dp->port_pid = INVALID_HANDLE_VALUE;
- if (init_async_io(&dp->in, use_threads) == -1)
- break;
- if (init_async_io(&dp->out, use_threads) == -1)
- break;
- erts_smp_mtx_unlock(&sys_driver_data_lock);
- return dp;
- }
- }
+ return dp;
- /*
- * Error or no free driver data.
- */
+async_io_error2:
+ release_async_io(&dp->in, dp->port_num);
+async_io_error1:
+ release_async_io(&dp->out, dp->port_num);
- if (dp < driver_data+max_files) {
- release_async_io(&dp->in, dp->port_num);
- release_async_io(&dp->out, dp->port_num);
- }
- erts_smp_mtx_unlock(&sys_driver_data_lock);
+buf_alloc_error:
+ driver_free(dp);
return NULL;
}
static void
release_driver_data(DriverData* dp)
{
- erts_smp_mtx_lock(&sys_driver_data_lock);
-
#ifdef ERTS_SMP
#ifdef USE_CANCELIOEX
if (fpCancelIoEx != NULL) {
@@ -741,8 +719,7 @@ release_driver_data(DriverData* dp)
* the exit thread.
*/
- dp->port_num = PORT_FREE;
- erts_smp_mtx_unlock(&sys_driver_data_lock);
+ driver_free(dp);
}
#ifdef ERTS_SMP
@@ -837,7 +814,6 @@ threaded_handle_closer(LPVOID param)
static ErlDrvData
set_driver_data(DriverData* dp, HANDLE ifd, HANDLE ofd, int read_write, int report_exit)
{
- int index = dp - driver_data;
int result;
dp->in.fd = ifd;
@@ -856,13 +832,12 @@ set_driver_data(DriverData* dp, HANDLE ifd, HANDLE ofd, int read_write, int repo
ERL_DRV_WRITE|ERL_DRV_USE, 1);
ASSERT(result != -1);
}
- return (ErlDrvData)index;
+ return (ErlDrvData) dp;
}
static ErlDrvData
reuse_driver_data(DriverData *dp, HANDLE ifd, HANDLE ofd, int read_write, ErlDrvPort port_num)
{
- int index = dp - driver_data;
int result;
dp->port_num = port_num;
@@ -881,7 +856,7 @@ reuse_driver_data(DriverData *dp, HANDLE ifd, HANDLE ofd, int read_write, ErlDrv
ERL_DRV_WRITE|ERL_DRV_USE, 1);
ASSERT(result != -1);
}
- return (ErlDrvData)index;
+ return (ErlDrvData) dp;
}
/*
@@ -1154,12 +1129,6 @@ spawn_init(void)
((module != NULL) ? GetProcAddress(module,"CancelIoEx") : NULL);
DEBUGF(("fpCancelIoEx = %p\r\n", fpCancelIoEx));
#endif
- driver_data = (struct driver_data *)
- erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
- erts_smp_atomic_add_nob(&sys_misc_mem_sz,
- max_files*sizeof(struct driver_data));
- for (i = 0; i < max_files; i++)
- driver_data[i].port_num = PORT_FREE;
return 0;
}
@@ -1290,9 +1259,12 @@ spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
#endif
retval = set_driver_data(dp, hFromChild, hToChild, opts->read_write,
opts->exit_status);
- if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO)
- /* We assume that this cannot generate a negative number */
- erts_port[port_num].os_pid = (SWord) pid;
+ if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO) {
+ Port *prt = erts_drvport2port_raw(port_num);
+ /* We assume that this cannot generate a negative number */
+ ASSERT(prt);
+ prt->os_pid = (SWord) pid;
+ }
}
if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO)
@@ -2281,12 +2253,10 @@ fd_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
**/
if (!create_file_thread(&dp->in, DO_READ)) {
- dp->port_num = PORT_FREE;
return ERL_DRV_ERROR_GENERAL;
}
if (!create_file_thread(&dp->out, DO_WRITE)) {
- dp->port_num = PORT_FREE;
return ERL_DRV_ERROR_GENERAL;
}
@@ -2306,10 +2276,9 @@ fd_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
}
}
-static void fd_stop(ErlDrvData d)
+static void fd_stop(ErlDrvData data)
{
- int fd = (int)d;
- DriverData* dp = driver_data+fd;
+ DriverData * dp = (DriverData *) data;
/*
* There's no way we can terminate an fd port in a consistent way.
* Instead we let it live until it's opened again (which it is,
@@ -2372,16 +2341,10 @@ vanilla_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
}
static void
-stop(ErlDrvData index)
-{
- common_stop((int)index);
-}
-
-static void common_stop(int index)
+stop(ErlDrvData data)
{
- DriverData* dp = driver_data+index;
-
- DEBUGF(("common_stop(%d)\n", index));
+ DriverData *dp = (DriverData *) data;
+ DEBUGF(("stop(%p)\n", dp));
if (dp->in.ov.hEvent != NULL) {
(void) driver_select(dp->port_num,
@@ -2403,7 +2366,6 @@ static void common_stop(int index)
*/
HANDLE thread;
DWORD tid;
- dp->port_num = PORT_EXITING;
thread = (HANDLE *) _beginthreadex(NULL, 0, threaded_exiter, dp, 0, &tid);
CloseHandle(thread);
}
@@ -2528,22 +2490,17 @@ threaded_exiter(LPVOID param)
static void
output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
-/* long drv_data; /* The slot to use in the driver data table.
+/* ErlDrvData drv_data; /* The slot to use in the driver data table.
* For Windows NT, this is *NOT* a file handle.
* The handle is found in the driver data.
*/
/* char *buf; /* Pointer to data to write to the port program. */
/* ErlDrvSizeT len; /* Number of bytes to write. */
{
- DriverData* dp;
+ DriverData* dp = (DriverData *) drv_data;
int pb; /* The header size for this port. */
- int port_num; /* The actual port number (for diagnostics). */
char* current;
- dp = driver_data + (int)drv_data;
- if ((port_num = dp->port_num) == -1)
- return ; /*-1;*/
-
pb = dp->packet_bytes;
if ((pb+len) == 0)
@@ -2554,7 +2511,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
*/
if ((pb == 2 && len > 65535) || (pb == 1 && len > 255)) {
- driver_failure_posix(port_num, EINVAL);
+ driver_failure_posix(dp->port_num, EINVAL);
return ; /* -1; */
}
@@ -2568,7 +2525,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
ASSERT(!dp->outbuf);
dp->outbuf = DRV_BUF_ALLOC(pb+len);
if (!dp->outbuf) {
- driver_failure_posix(port_num, ENOMEM);
+ driver_failure_posix(dp->port_num, ENOMEM);
return ; /* -1; */
}
@@ -2598,7 +2555,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
memcpy(current, buf, len);
if (!async_write_file(&dp->out, dp->outbuf, pb+len)) {
- set_busy_port(port_num, 1);
+ set_busy_port(dp->port_num, 1);
} else {
dp->out.ov.Offset += pb+len; /* For vanilla driver. */
/* XXX OffsetHigh should be changed too. */
@@ -2633,10 +2590,9 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event)
{
int error = 0; /* The error code (assume initially no errors). */
DWORD bytesRead; /* Number of bytes read. */
- DriverData* dp;
+ DriverData* dp = (DriverData *) drv_data;
int pb;
- dp = driver_data+(int)drv_data;
pb = dp->packet_bytes;
#ifdef ERTS_SMP
if(dp->in.thread == (HANDLE) -1) {
@@ -2804,7 +2760,7 @@ static void
ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
{
DWORD bytesWritten;
- DriverData* dp = driver_data + (int)drv_data;
+ DriverData *dp = (DriverData *) drv_data;
int error;
#ifdef ERTS_SMP
@@ -2812,7 +2768,7 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
dp->out.async_io_active = 0;
}
#endif
- DEBUGF(("ready_output(%d, 0x%x)\n", drv_data, ready_event));
+ DEBUGF(("ready_output(%p, 0x%x)\n", drv_data, ready_event));
set_busy_port(dp->port_num, 0);
if (!(dp->outbuf)) {
/* Happens because event sometimes get signalled during a successful
@@ -2867,7 +2823,7 @@ sys_init_io(void)
can change our view of the number of open files possible.
We estimate the number to twice the amount of ports.
We really dont know on windows, do we? */
- max_files = 2*erts_max_ports;
+ max_files = 2*erts_ptab_max(&erts_port);
}
#ifdef ERTS_SMP
@@ -3322,9 +3278,6 @@ void erl_sys_init(void)
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
-
- erts_smp_mtx_init(&sys_driver_data_lock, "sys_driver_data_lock");
-
#ifdef ERTS_SMP
erts_smp_tsd_key_create(&win32_errstr_key);
InitializeCriticalSection(&htbc_lock);
diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl
index 3a29fd4d68..32e907ca69 100644
--- a/erts/emulator/test/busy_port_SUITE.erl
+++ b/erts/emulator/test/busy_port_SUITE.erl
@@ -148,9 +148,9 @@ message_order(Config) when is_list(Config) ->
send_to_busy_1(Parent) ->
{Owner, Slave} = get_slave(),
- Slave ! {Owner, {command, "set_me_busy"}},
- Slave ! {Owner, {command, "hello"}},
- Slave ! {Owner, {command, "hello again"}},
+ (catch port_command(Slave, "set_me_busy")),
+ (catch port_command(Slave, "hello")),
+ (catch port_command(Slave, "hello again")),
receive
Message ->
Parent ! {self(), Message}
@@ -193,10 +193,10 @@ system_monitor(Config) when is_list(Config) ->
?line Busy =
spawn_link(
fun() ->
- Slave ! {Owner,{command,"set busy"}},
+ (catch port_command(Slave, "set busy")),
receive {Parent,alpha} -> ok end,
- Slave ! {Owner,{command,"busy"}},
- Slave ! {Owner,{command,"free"}},
+ (catch port_command(Slave, "busy")),
+ (catch port_command(Slave, "free")),
Parent ! {self(),alpha},
command(lock),
receive {Parent,beta} -> ok end,
@@ -212,7 +212,7 @@ system_monitor(Config) when is_list(Config) ->
?line Void = rec(Void),
?line Busy ! {self(), beta},
?line {monitor,Owner,busy_port,Slave} = rec(Void),
- ?line Master ! {Owner, {command, "u"}},
+ ?line port_command(Master, "u"),
?line {Busy,beta} = rec(Void),
?line Void = rec(Void),
?line _NewMonitor = erlang:system_monitor(OldMonitor),
@@ -296,9 +296,9 @@ no_trap_exit_process(ResultTo, Link, Config) ->
linked -> ok;
unlink -> unlink(Slave)
end,
- ?line Slave ! {self(), {command, "lock port"}},
+ ?line (catch port_command(Slave, "lock port")),
?line ResultTo ! {self(), port_created, Slave},
- ?line Slave ! {self(), {command, "suspend me"}},
+ ?line (catch port_command(Slave, "suspend me")),
ok.
%% Assuming the following scenario,
@@ -339,9 +339,9 @@ busy_port_exit_process(ResultTo, Config) ->
?line load_busy_driver(Config),
?line _Master = open_port({spawn, "busy_drv master"}, [eof]),
?line Slave = open_port({spawn, "busy_drv slave"}, [eof]),
- ?line Slave ! {self(), {command, "lock port"}},
+ ?line (catch port_command(Slave, "lock port")),
?line ResultTo ! {self(), port_created, Slave},
- ?line Slave ! {self(), {command, "suspend me"}},
+ ?line (catch port_command(Slave, "suspend me")),
receive
{'EXIT', Slave, die} ->
ResultTo ! {self(), ok};
@@ -383,8 +383,8 @@ multiple_writers(Config) when is_list(Config) ->
quick_writer() ->
{Owner, Port} = get_slave(),
- Port ! {Owner, {command, "port to busy"}},
- Port ! {Owner, {command, "lock me"}},
+ (catch port_command(Port, "port to busy")),
+ (catch port_command(Port, "lock me")),
ok.
hard_busy_driver(Config) when is_list(Config) ->
@@ -644,11 +644,11 @@ loop(Master, Slave) ->
Pid ! {busy_drv_reply, {self(), Slave}},
loop(Master, Slave);
{Pid, unlock} ->
- Master ! {self(), {command, "u"}},
+ port_command(Master, "u"),
Pid ! {busy_drv_reply, ok},
loop(Master, Slave);
{Pid, lock} ->
- Master ! {self(), {command, "l"}},
+ port_command(Master, "l"),
Pid ! {busy_drv_reply, ok},
loop(Master, Slave);
{Pid, {port_command,Data}} ->
diff --git a/erts/emulator/test/ddll_SUITE.erl b/erts/emulator/test/ddll_SUITE.erl
index 6e15c228cd..4675cab15c 100644
--- a/erts/emulator/test/ddll_SUITE.erl
+++ b/erts/emulator/test/ddll_SUITE.erl
@@ -136,8 +136,8 @@ delayed_unload_with_ports(Config) when is_list(Config) ->
?line {ok,pending_driver,Ref} = erl_ddll:try_unload(echo_drv,[{monitor, pending_driver}]),
?line ok = receive _ -> false after 0 -> ok end,
?line Port ! {self(), close},
- ?line 1 = erl_ddll:info(echo_drv, port_count),
?line ok = receive {Port,closed} -> ok after 1000 -> false end,
+ ?line 1 = erl_ddll:info(echo_drv, port_count),
?line Port2 ! {self(), close},
?line ok = receive {Port2,closed} -> ok after 1000 -> false end,
?line ok = receive {'DOWN', Ref, driver, echo_drv, unloaded} -> ok after 1000 -> false end,
diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl
index 643357263c..13f18b4563 100644
--- a/erts/emulator/test/driver_SUITE.erl
+++ b/erts/emulator/test/driver_SUITE.erl
@@ -77,7 +77,8 @@
thread_mseg_alloc_cache_clean/1,
otp_9302/1,
thr_free_drv/1,
- async_blast/1]).
+ async_blast/1,
+ thr_msg_blast/1]).
-export([bin_prefix/2]).
@@ -147,7 +148,8 @@ all() ->
thread_mseg_alloc_cache_clean,
otp_9302,
thr_free_drv,
- async_blast].
+ async_blast,
+ thr_msg_blast].
groups() ->
[{timer, [],
@@ -1136,7 +1138,9 @@ check_driver_system_info_result(Result) ->
{{1, 1}, _} ->
?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
-- ?EXPECTED_SYSTEM_INFO_NAMES2),
- ?line ExpNs = lists:sort(Ns)
+ ?line ExpNs = lists:sort(Ns);
+ {{2, 0}, _} ->
+ ?line [] = Ns
end.
chk_sis(SIs, Ns) ->
@@ -2010,7 +2014,64 @@ async_blast(Config) when is_list(Config) ->
?line erlang:display({async_blast_time, AsyncBlastTime}),
?line ok.
+thr_msg_blast_receiver(_Port, N, N) ->
+ ok;
+thr_msg_blast_receiver(Port, N, Max) ->
+ receive
+ {Port, hi} ->
+ thr_msg_blast_receiver(Port, N+1, Max)
+ end.
+
+thr_msg_blast_receiver_proc(Port, Max, Parent, Done) ->
+ case port_control(Port, 0, "") of
+ "receiver" ->
+ spawn(fun () ->
+ thr_msg_blast_receiver_proc(Port, Max+1, Parent, Done)
+ end),
+ thr_msg_blast_receiver(Port, 0, Max);
+ "done" ->
+ Parent ! Done
+ end.
+thr_msg_blast(Config) when is_list(Config) ->
+ case erlang:system_info(smp_support) of
+ false ->
+ {skipped, "Non-SMP emulator; nothing to test..."};
+ true ->
+ Path = ?config(data_dir, Config),
+ erl_ddll:start(),
+ ok = load_driver(Path, thr_msg_blast_drv),
+ MemBefore = driver_alloc_size(),
+ Start = os:timestamp(),
+ Port = open_port({spawn, thr_msg_blast_drv}, []),
+ true = is_port(Port),
+ Done = make_ref(),
+ Me = self(),
+ spawn(fun () ->
+ thr_msg_blast_receiver_proc(Port, 1, Me, Done)
+ end),
+ receive
+ Done -> ok
+ end,
+ ok = thr_msg_blast_receiver(Port, 0, 32*10000),
+ port_close(Port),
+ End = os:timestamp(),
+ receive
+ Garbage ->
+ ?t:fail({received_garbage, Port, Garbage})
+ after 2000 ->
+ ok
+ end,
+ MemAfter = driver_alloc_size(),
+ io:format("MemBefore=~p, MemAfter=~p~n",
+ [MemBefore, MemAfter]),
+ ThrMsgBlastTime = timer:now_diff(End,Start)/1000000,
+ io:format("ThrMsgBlastTime=~p~n", [ThrMsgBlastTime]),
+ MemBefore = MemAfter,
+ Res = {thr_msg_blast_time, ThrMsgBlastTime},
+ erlang:display(Res),
+ Res
+ end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Utilities
diff --git a/erts/emulator/test/driver_SUITE_data/Makefile.src b/erts/emulator/test/driver_SUITE_data/Makefile.src
index 9cc107cc66..b667dff6b6 100644
--- a/erts/emulator/test/driver_SUITE_data/Makefile.src
+++ b/erts/emulator/test/driver_SUITE_data/Makefile.src
@@ -14,7 +14,8 @@ MISC_DRVS = outputv_drv@dll@ \
thr_alloc_drv@dll@ \
otp_9302_drv@dll@ \
thr_free_drv@dll@ \
- async_blast_drv@dll@
+ async_blast_drv@dll@ \
+ thr_msg_blast_drv@dll@
SYS_INFO_DRVS = sys_info_base_drv@dll@ \
sys_info_prev_drv@dll@ \
diff --git a/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c b/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c
new file mode 100644
index 0000000000..1070678d7b
--- /dev/null
+++ b/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c
@@ -0,0 +1,178 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#include "erl_driver.h"
+
+#define THR_MSG_BLAST_NO_PROCS 10
+#define THR_MSG_BLAST_NO_SENDS_PER_PROC 10000
+
+#define THR_MSG_BLAST_THREADS 32
+
+static void stop(ErlDrvData drv_data);
+static ErlDrvData start(ErlDrvPort port,
+ char *command);
+static ErlDrvSSizeT control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen);
+
+static ErlDrvEntry thr_msg_blast_drv_entry = {
+ NULL /* init */,
+ start,
+ stop,
+ NULL /* output */,
+ NULL /* ready_input */,
+ NULL /* ready_output */,
+ "thr_msg_blast_drv",
+ NULL /* finish */,
+ NULL /* handle */,
+ control,
+ NULL /* timeout */,
+ NULL /* outputv */,
+ NULL /* ready_async */,
+ NULL /* flush */,
+ NULL /* call */,
+ NULL /* event */,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL /* handle2 */,
+ NULL /* handle_monitor */
+};
+
+typedef struct {
+ ErlDrvPort port;
+ ErlDrvTermData td_port;
+ ErlDrvTermData hi;
+ ErlDrvTid tid[THR_MSG_BLAST_THREADS];
+ int no_thrs;
+ ErlDrvTermData proc[THR_MSG_BLAST_NO_PROCS];
+ int no_procs;
+} thr_msg_blast_data_t;
+
+
+DRIVER_INIT(thr_msg_blast_drv)
+{
+ return &thr_msg_blast_drv_entry;
+}
+
+static void stop(ErlDrvData drv_data)
+{
+ int i;
+ thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) drv_data;
+ for (i = 0; i < tmbd->no_thrs; i++)
+ erl_drv_thread_join(tmbd->tid[i], NULL);
+ driver_free((void *) tmbd);
+}
+
+static ErlDrvData start(ErlDrvPort port,
+ char *command)
+{
+ thr_msg_blast_data_t *tmbd;
+
+ tmbd = driver_alloc(sizeof(thr_msg_blast_data_t));
+ if (!tmbd)
+ return ERL_DRV_ERROR_GENERAL;
+
+ tmbd->port = port;
+ tmbd->td_port = driver_mk_port(port);
+ tmbd->hi = driver_mk_atom("hi");
+ tmbd->no_thrs = 0;
+ tmbd->no_procs = 1;
+ tmbd->proc[0] = driver_caller(port);
+
+ return (ErlDrvData) tmbd;
+}
+
+static void *thread(void *);
+
+static ErlDrvSSizeT control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
+{
+ thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) drv_data;
+ char *res_str = "error";
+
+ if (tmbd->no_procs >= THR_MSG_BLAST_NO_PROCS) {
+ int i;
+ for (i = 0; i < tmbd->no_thrs; i++)
+ erl_drv_thread_join(tmbd->tid[i], NULL);
+ tmbd->no_thrs = 0;
+ res_str = "done";
+ }
+ else {
+
+ tmbd->proc[tmbd->no_procs++] = driver_caller(tmbd->port);
+
+ if (tmbd->no_procs == THR_MSG_BLAST_NO_PROCS) {
+ for (tmbd->no_thrs = 0;
+ tmbd->no_thrs < THR_MSG_BLAST_THREADS;
+ tmbd->no_thrs++) {
+ int res = erl_drv_thread_create("test",
+ &tmbd->tid[tmbd->no_thrs],
+ thread,
+ tmbd,
+ NULL);
+ if (res != 0) {
+ driver_failure_posix(tmbd->port, res);
+ goto done;
+ }
+ }
+ }
+
+ res_str = "receiver";
+ }
+
+ done: {
+ ErlDrvSSizeT res_len = strlen(res_str);
+ if (res_len > rlen) {
+ char *abuf = driver_alloc(sizeof(char)*res_len);
+ if (!abuf)
+ return 0;
+ *rbuf = abuf;
+ }
+
+ memcpy((void *) *rbuf, (void *) res_str, res_len);
+
+ return res_len;
+ }
+}
+
+static void *thread(void *varg)
+{
+ int s, p;
+ thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) varg;
+ ErlDrvTermData spec[] = {
+ ERL_DRV_PORT, tmbd->td_port,
+ ERL_DRV_ATOM, tmbd->hi,
+ ERL_DRV_TUPLE, 2
+ };
+
+ for (s = 0; s < THR_MSG_BLAST_NO_SENDS_PER_PROC; s++) {
+ for (p = 0; p < THR_MSG_BLAST_NO_PROCS; p++) {
+ int res = driver_send_term(tmbd->port, tmbd->proc[p],
+ spec, sizeof(spec)/sizeof(spec[0]));
+ if (p == 0 && res <= 0)
+ abort(); /* Could not send to creator */
+ }
+ }
+ return NULL;
+}
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 873601ddd1..13aa0f4c00 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -90,7 +90,7 @@
mix_up_ports/1, otp_5112/1, otp_5119/1, otp_6224/1,
exit_status_multi_scheduling_block/1, ports/1,
spawn_driver/1, spawn_executable/1, close_deaf_port/1,
- unregister_name/1]).
+ unregister_name/1, parallelism_option/1]).
-export([]).
@@ -114,7 +114,8 @@ all() ->
stderr_to_stdout, otp_3906, otp_4389, win_massive,
mix_up_ports, otp_5112, otp_5119,
exit_status_multi_scheduling_block, ports, spawn_driver,
- spawn_executable, close_deaf_port, unregister_name].
+ spawn_executable, close_deaf_port, unregister_name,
+ parallelism_option].
groups() ->
[{stream, [], [stream_small, stream_big]},
@@ -159,11 +160,11 @@ win_massive(Config) when is_list(Config) ->
do_win_massive() ->
Dog = test_server:timetrap(test_server:seconds(360)),
SuiteDir = filename:dirname(code:which(?MODULE)),
- Env = " -env ERL_MAX_PORTS 8192",
+ Ports = " +Q 8192",
{ok, Node} =
test_server:start_node(win_massive,
slave,
- [{args, " -pa " ++ SuiteDir ++ Env}]),
+ [{args, " -pa " ++ SuiteDir ++ Ports}]),
ok = rpc:call(Node,?MODULE,win_massive_client,[3000]),
test_server:stop_node(Node),
test_server:timetrap_cancel(Dog),
@@ -1298,6 +1299,43 @@ spawn_driver(Config) when is_list(Config) ->
test_server:timetrap_cancel(Dog),
ok.
+parallelism_option(suite) ->
+ [];
+parallelism_option(doc) ->
+ ["Test parallelism option of open_port"];
+parallelism_option(Config) when is_list(Config) ->
+ ?line Dog = test_server:timetrap(test_server:seconds(10)),
+ ?line Path = ?config(data_dir, Config),
+ ?line ok = load_driver(Path, "echo_drv"),
+ ?line Port = erlang:open_port({spawn_driver, "echo_drv"},
+ [{parallelism, true}]),
+ ?line {parallelism, true} = erlang:port_info(Port, parallelism),
+ ?line Port ! {self(), {command, "Hello port!"}},
+ ?line receive
+ {Port, {data, "Hello port!"}} = Msg1 ->
+ io:format("~p~n", [Msg1]),
+ ok;
+ Other ->
+ test_server:fail({unexpected, Other})
+ end,
+ ?line Port ! {self(), close},
+ ?line receive {Port, closed} -> ok end,
+
+ ?line Port2 = erlang:open_port({spawn_driver, "echo_drv -Hello port?"},
+ [{parallelism, false}]),
+ ?line {parallelism, false} = erlang:port_info(Port2, parallelism),
+ ?line receive
+ {Port2, {data, "Hello port?"}} = Msg2 ->
+ io:format("~p~n", [Msg2]),
+ ok;
+ Other2 ->
+ test_server:fail({unexpected2, Other2})
+ end,
+ ?line Port2 ! {self(), close},
+ ?line receive {Port2, closed} -> ok end,
+ ?line test_server:timetrap_cancel(Dog),
+ ok.
+
spawn_executable(suite) ->
[];
spawn_executable(doc) ->
@@ -1566,6 +1604,7 @@ otp_5112(Config) when is_list(Config) ->
?t:format("Links1: ~p~n",[Links1]),
true = lists:member(Port, Links1),
Port ! {self(), {command, ""}},
+ ?line wait_until(fun () -> lists:member(Port, erlang:ports()) == false end),
{links, Links2} = process_info(self(),links),
?t:format("Links2: ~p~n",[Links2]),
false = lists:member(Port, Links2), %% This used to fail
@@ -1636,38 +1675,8 @@ otp_5119_fill_empty_port_tab(Ports) ->
LastPort
end.
--define(DEF_MAX_PORTS, 1024).
-
-max_ports_env() ->
- case os:getenv("ERL_MAX_PORTS") of
- EMP when is_list(EMP) ->
- case catch list_to_integer(EMP) of
- Int when is_integer(Int) -> Int;
- _ -> false
- end;
- _ -> false
- end.
-
max_ports() ->
- PreMaxPorts
- = case max_ports_env() of
- Env when is_integer(Env) -> Env;
- _ ->
- case os:type() of
- {unix, _} ->
- UlimStr = string:strip(os:cmd("ulimit -n")
- -- "\n"),
- case catch list_to_integer(UlimStr) of
- Ulim when is_integer(Ulim) -> Ulim;
- _ -> ?DEF_MAX_PORTS
- end;
- _ -> ?DEF_MAX_PORTS
- end
- end,
- case PreMaxPorts > ?DEF_MAX_PORTS of
- true -> PreMaxPorts;
- false -> ?DEF_MAX_PORTS
- end.
+ erlang:system_info(port_limit).
port_ix(Port) when is_port(Port) ->
["#Port",_,PortIxStr] = string:tokens(erlang:port_to_list(Port),
@@ -2270,5 +2279,12 @@ close_deaf_port_1(N, Cmd) ->
_:eagain ->
{comment, "Could not spawn more than " ++ integer_to_list(N) ++ " OS processes."}
end.
-
+wait_until(Fun) ->
+ case catch Fun() of
+ true ->
+ ok;
+ _ ->
+ receive after 100 -> ok end,
+ wait_until(Fun)
+ end.
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index 6509871a7d..898535693f 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -1384,7 +1384,7 @@ otp_6237_select_loop() ->
-define(NoTestProcs, 10000).
--record(processes_bif_info, {min_start_reds,
+-record(ptab_list_bif_info, {min_start_reds,
tab_chunks,
tab_chunks_size,
tab_indices_per_red,
@@ -1419,11 +1419,11 @@ processes_large_tab(Config) when is_list(Config) ->
%% the connection times out; therefore, shrink the test on
%% high debug levels.
?line DbgLvl = case erts_debug:get_internal_state(processes_bif_info) of
- #processes_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl ->
+ #ptab_list_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl ->
20;
- #processes_bif_info{debug_level = Lvl} when Lvl < 0 ->
+ #ptab_list_bif_info{debug_level = Lvl} when Lvl < 0 ->
?line ?t:fail({debug_level, Lvl});
- #processes_bif_info{debug_level = Lvl} ->
+ #ptab_list_bif_info{debug_level = Lvl} ->
Lvl
end,
?line ProcTabSize3 = ProcTabSize2 - (1300000 * DbgLvl div MaxDbgLvl),
@@ -1438,7 +1438,7 @@ processes_large_tab(Config) when is_list(Config) ->
erts_debug,
get_internal_state,
[processes_bif_info]) of
- #processes_bif_info{tab_chunks = Chunks} when is_integer(Chunks),
+ #ptab_list_bif_info{tab_chunks = Chunks} when is_integer(Chunks),
Chunks > 1 -> ok;
PBInfo -> ?t:fail(PBInfo)
end,
@@ -1460,14 +1460,14 @@ processes_small_tab(doc) ->
processes_small_tab(suite) ->
[];
processes_small_tab(Config) when is_list(Config) ->
- ?line {ok, SmallNode} = start_node(Config, "+P 500"),
+ ?line {ok, SmallNode} = start_node(Config, "+P 1024"),
?line Res = rpc:call(SmallNode, ?MODULE, processes_bif_test, []),
?line PBInfo = rpc:call(SmallNode,
erts_debug,
get_internal_state,
[processes_bif_info]),
?line stop_node(SmallNode),
- ?line 1 = PBInfo#processes_bif_info.tab_chunks,
+ ?line true = PBInfo#ptab_list_bif_info.tab_chunks < 10,
?line chk_processes_bif_test_res(Res).
processes_this_tab(doc) ->
@@ -1481,7 +1481,7 @@ chk_processes_bif_test_res(ok) -> ok;
chk_processes_bif_test_res({comment, _} = Comment) -> Comment;
chk_processes_bif_test_res(Failure) -> ?t:fail(Failure).
-print_processes_bif_info(#processes_bif_info{min_start_reds = MinStartReds,
+print_processes_bif_info(#ptab_list_bif_info{min_start_reds = MinStartReds,
tab_chunks = TabChunks,
tab_chunks_size = TabChunksSize,
tab_indices_per_red = TabIndPerRed,
@@ -1579,11 +1579,11 @@ processes_bif_test() ->
?line enable_internal_state(),
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
+ ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
?line WillTrap = case PBInfo of
- #processes_bif_info{tab_chunks = 1} ->
- false;
- #processes_bif_info{tab_chunks = Chunks,
+ #ptab_list_bif_info{tab_chunks = Chunks} when Chunks < 10 ->
+ false; %% Skip for small tables
+ #ptab_list_bif_info{tab_chunks = Chunks,
tab_chunks_size = ChunksSize,
tab_indices_per_red = IndiciesPerRed
} ->
@@ -1618,7 +1618,7 @@ processes_bif_test() ->
?line erlang:system_flag(multi_scheduling, unblock),
?line [{status,suspended},
- {current_function,{erlang,processes_trap,2}}]
+ {current_function,{erlang,ptab_list_continue,2}}]
= process_info(Suspendee, [status, current_function]),
?line ok = do_processes_bif_test(WantReds, WillTrap, Processes),
@@ -1785,7 +1785,7 @@ do_processes_bif_die_test(N, Processes) ->
catch
throw:{kill_in_trap, R} when N > 0 ->
?t:format("Failed to kill in trap: ~p~n", [R]),
- ?t:format("Trying again~p~n", []),
+ ?t:format("Trying again~n", []),
do_processes_bif_die_test(N-1, Processes)
end.
@@ -1848,7 +1848,7 @@ processes_last_call_trap(Config) when is_list(Config) ->
?line Processes = fun () -> processes() end,
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = case PBInfo#processes_bif_info.min_start_reds of
+ ?line WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
R when R > 10 -> R - 1;
_R -> 9
end,
@@ -1873,7 +1873,7 @@ processes_apply_trap(Config) when is_list(Config) ->
?line enable_internal_state(),
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = case PBInfo#processes_bif_info.min_start_reds of
+ ?line WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
R when R > 10 -> R - 1;
_R -> 9
end,
@@ -1893,7 +1893,7 @@ processes_gc_trap(Config) when is_list(Config) ->
?line enable_internal_state(),
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
+ ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
?line Processes = fun () ->
erts_debug:set_internal_state(reds_left,WantReds),
processes()
@@ -1911,7 +1911,7 @@ processes_gc_trap(Config) when is_list(Config) ->
?line erlang:suspend_process(Suspendee),
?line erlang:system_flag(multi_scheduling, unblock),
- ?line [{status,suspended}, {current_function,{erlang,processes_trap,2}}]
+ ?line [{status,suspended}, {current_function,{erlang,ptab_list_continue,2}}]
= process_info(Suspendee, [status, current_function]),
?line erlang:garbage_collect(Suspendee),
@@ -1982,8 +1982,8 @@ chk_term_proc_list(Line, MustChk, ExpectBlks) ->
not_enabled;
{_, MS} ->
{value,
- {processes_term_proc_el,
- DL}} = lists:keysearch(processes_term_proc_el, 1, MS),
+ {ptab_list_deleted_el,
+ DL}} = lists:keysearch(ptab_list_deleted_el, 1, MS),
case lists:keysearch(blocks, 1, DL) of
{value, {blocks, ExpectBlks, _, _}} ->
ok;
@@ -2001,8 +2001,8 @@ processes_term_proc_list_test(MustChk) ->
?line enable_internal_state(),
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
- ?line #processes_bif_info{tab_chunks = Chunks,
+ ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
+ ?line #ptab_list_bif_info{tab_chunks = Chunks,
tab_chunks_size = ChunksSize,
tab_indices_per_red = IndiciesPerRed
} = PBInfo,
@@ -2038,7 +2038,7 @@ processes_term_proc_list_test(MustChk) ->
erlang:suspend_process(P),
erlang:system_flag(multi_scheduling, unblock),
[{status,suspended},
- {current_function,{erlang,processes_trap,2}}]
+ {current_function,{erlang,ptab_list_continue,2}}]
= process_info(P, [status, current_function]),
P
end,
diff --git a/erts/emulator/test/tuple_SUITE.erl b/erts/emulator/test/tuple_SUITE.erl
index bfc3910742..978d96a110 100644
--- a/erts/emulator/test/tuple_SUITE.erl
+++ b/erts/emulator/test/tuple_SUITE.erl
@@ -60,40 +60,40 @@ end_per_group(_GroupName, Config) ->
build_and_match(Config) when is_list(Config) ->
- ?line {} = id({}),
- ?line {1} = id({1}),
- ?line {1, 2} = id({1, 2}),
- ?line {1, 2, 3} = id({1, 2, 3}),
- ?line {1, 2, 3, 4} = id({1, 2, 3, 4}),
- ?line {1, 2, 3, 4, 5} = id({1, 2, 3, 4, 5}),
- ?line {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}),
- ?line {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}),
- ?line {1, 2, 3, 4, 5, 6, 7} = id({1, 2, 3, 4, 5, 6, 7}),
- ?line {1, 2, 3, 4, 5, 6, 7, 8} = id({1, 2, 3, 4, 5, 6, 7, 8}),
+ {} = id({}),
+ {1} = id({1}),
+ {1, 2} = id({1, 2}),
+ {1, 2, 3} = id({1, 2, 3}),
+ {1, 2, 3, 4} = id({1, 2, 3, 4}),
+ {1, 2, 3, 4, 5} = id({1, 2, 3, 4, 5}),
+ {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}),
+ {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}),
+ {1, 2, 3, 4, 5, 6, 7} = id({1, 2, 3, 4, 5, 6, 7}),
+ {1, 2, 3, 4, 5, 6, 7, 8} = id({1, 2, 3, 4, 5, 6, 7, 8}),
ok.
%% Tests size(Tuple).
t_size(Config) when is_list(Config) ->
- ?line 0 = size({}),
- ?line 1 = size({a}),
- ?line 1 = size({{a}}),
- ?line 2 = size({{a}, {b}}),
- ?line 3 = size({1, 2, 3}),
+ 0 = size({}),
+ 1 = size({a}),
+ 1 = size({{a}}),
+ 2 = size({{a}, {b}}),
+ 3 = size({1, 2, 3}),
ok.
t_tuple_size(Config) when is_list(Config) ->
- ?line 0 = tuple_size(id({})),
- ?line 1 = tuple_size(id({a})),
- ?line 1 = tuple_size(id({{a}})),
- ?line 2 = tuple_size(id({{a},{b}})),
- ?line 3 = tuple_size(id({1,2,3})),
+ 0 = tuple_size(id({})),
+ 1 = tuple_size(id({a})),
+ 1 = tuple_size(id({{a}})),
+ 2 = tuple_size(id({{a},{b}})),
+ 3 = tuple_size(id({1,2,3})),
%% Error cases.
- ?line {'EXIT',{badarg,_}} = (catch tuple_size([])),
- ?line {'EXIT',{badarg,_}} = (catch tuple_size(<<1,2,3>>)),
- ?line error = ludicrous_tuple_size({a,b,c}),
- ?line error = ludicrous_tuple_size([a,b,c]),
+ {'EXIT',{badarg,_}} = (catch tuple_size([])),
+ {'EXIT',{badarg,_}} = (catch tuple_size(<<1,2,3>>)),
+ error = ludicrous_tuple_size({a,b,c}),
+ error = ludicrous_tuple_size([a,b,c]),
ok.
@@ -104,44 +104,44 @@ ludicrous_tuple_size(_) -> error.
%% Tests element/2.
t_element(Config) when is_list(Config) ->
- ?line a = element(1, {a}),
- ?line a = element(1, {a, b}),
+ a = element(1, {a}),
+ a = element(1, {a, b}),
- ?line List = lists:seq(1, 4096),
- ?line Tuple = list_to_tuple(lists:seq(1, 4096)),
- ?line get_elements(List, Tuple, 1),
+ List = lists:seq(1, 4096),
+ Tuple = list_to_tuple(lists:seq(1, 4096)),
+ get_elements(List, Tuple, 1),
- ?line {'EXIT', {badarg, _}} = (catch element(0, id({a,b}))),
- ?line {'EXIT', {badarg, _}} = (catch element(3, id({a,b}))),
- ?line {'EXIT', {badarg, _}} = (catch element(1, id({}))),
- ?line {'EXIT', {badarg, _}} = (catch element(1, id([a,b]))),
- ?line {'EXIT', {badarg, _}} = (catch element(1, id(42))),
- ?line {'EXIT', {badarg, _}} = (catch element(id(1.5), id({a,b}))),
+ {'EXIT', {badarg, _}} = (catch element(0, id({a,b}))),
+ {'EXIT', {badarg, _}} = (catch element(3, id({a,b}))),
+ {'EXIT', {badarg, _}} = (catch element(1, id({}))),
+ {'EXIT', {badarg, _}} = (catch element(1, id([a,b]))),
+ {'EXIT', {badarg, _}} = (catch element(1, id(42))),
+ {'EXIT', {badarg, _}} = (catch element(id(1.5), id({a,b}))),
ok.
get_elements([Element|Rest], Tuple, Pos) ->
- ?line Element = element(Pos, Tuple),
- ?line get_elements(Rest, Tuple, Pos+1);
+ Element = element(Pos, Tuple),
+ get_elements(Rest, Tuple, Pos+1);
get_elements([], _Tuple, _Pos) ->
ok.
%% Tests set_element/3.
t_setelement(Config) when is_list(Config) ->
- ?line {x} = setelement(1, id({1}), x),
- ?line {x,2} = setelement(1, id({1,2}), x),
- ?line {1,x} = setelement(2, id({1,2}), x),
+ {x} = setelement(1, id({1}), x),
+ {x,2} = setelement(1, id({1,2}), x),
+ {1,x} = setelement(2, id({1,2}), x),
- ?line Tuple = list_to_tuple(lists:duplicate(2048, x)),
- ?line NewTuple = set_all_elements(Tuple, 1),
- ?line NewTuple = list_to_tuple(lists:seq(1+7, 2048+7)),
+ Tuple = list_to_tuple(lists:duplicate(2048, x)),
+ NewTuple = set_all_elements(Tuple, 1),
+ NewTuple = list_to_tuple(lists:seq(1+7, 2048+7)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(0, {a, b}, x)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(3, {a, b}, x)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(1, {}, x)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(1, [a, b], x)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(1.5, {a, b}, x)),
+ {'EXIT', {badarg, _}} = (catch setelement(0, {a, b}, x)),
+ {'EXIT', {badarg, _}} = (catch setelement(3, {a, b}, x)),
+ {'EXIT', {badarg, _}} = (catch setelement(1, {}, x)),
+ {'EXIT', {badarg, _}} = (catch setelement(1, [a, b], x)),
+ {'EXIT', {badarg, _}} = (catch setelement(1.5, {a, b}, x)),
%% Nested setelement with literals.
AnotherTuple = id({0,0,a,b,c}),
@@ -159,52 +159,68 @@ set_all_elements(Tuple, Pos) when Pos > size(Tuple) ->
%% Tests list_to_tuple/1.
t_list_to_tuple(Config) when is_list(Config) ->
- ?line {} = list_to_tuple([]),
- ?line {a} = list_to_tuple([a]),
- ?line {a, b} = list_to_tuple([a, b]),
- ?line {a, b, c} = list_to_tuple([a, b, c]),
- ?line {a, b, c, d} = list_to_tuple([a, b, c, d]),
- ?line {a, b, c, d, e} = list_to_tuple([a, b, c, d, e]),
-
- ?line Size = 4096,
- ?line Tuple = list_to_tuple(lists:seq(1, Size)),
- ?line Size = size(Tuple),
-
- ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id({a,b}))),
- ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
- ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
-
+ {} = list_to_tuple([]),
+ {a} = list_to_tuple([a]),
+ {a, b} = list_to_tuple([a, b]),
+ {a, b, c} = list_to_tuple([a, b, c]),
+ {a, b, c, d} = list_to_tuple([a, b, c, d]),
+ {a, b, c, d, e} = list_to_tuple([a, b, c, d, e]),
+
+ Size = 4096,
+ Tuple = list_to_tuple(lists:seq(1, Size)),
+ Size = size(Tuple),
+
+ {'EXIT', {badarg, _}} = (catch list_to_tuple(id({a,b}))),
+ {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
+ {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
+
+ % test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ MaxSize = size(MaxTuple),
+
+ {'EXIT', {badarg,_}} = (catch list_to_tuple(lists:seq(1, 1 bsl 24))),
ok.
%% Tests tuple_to_list/1.
t_tuple_to_list(Config) when is_list(Config) ->
- ?line [] = tuple_to_list({}),
- ?line [a] = tuple_to_list({a}),
- ?line [a, b] = tuple_to_list({a, b}),
- ?line [a, b, c] = tuple_to_list({a, b, c}),
- ?line [a, b, c, d] = tuple_to_list({a, b, c, d}),
- ?line [a, b, c, d] = tuple_to_list({a, b, c, d}),
-
- ?line Size = 4096,
- ?line List = lists:seq(1, Size),
- ?line Tuple = list_to_tuple(List),
- ?line Size = size(Tuple),
- ?line List = tuple_to_list(Tuple),
-
- ?line {'EXIT', {badarg,_}} = (catch tuple_to_list(id(a))),
- ?line {'EXIT', {badarg,_}} = (catch tuple_to_list(id(42))),
+ [] = tuple_to_list({}),
+ [a] = tuple_to_list({a}),
+ [a, b] = tuple_to_list({a, b}),
+ [a, b, c] = tuple_to_list({a, b, c}),
+ [a, b, c, d] = tuple_to_list({a, b, c, d}),
+ [a, b, c, d] = tuple_to_list({a, b, c, d}),
+
+ Size = 4096,
+ List = lists:seq(1, Size),
+ Tuple = list_to_tuple(List),
+ Size = size(Tuple),
+ List = tuple_to_list(Tuple),
+
+ {'EXIT', {badarg,_}} = (catch tuple_to_list(id(a))),
+ {'EXIT', {badarg,_}} = (catch tuple_to_list(id(42))),
ok.
%% Tests the make_tuple/2 BIF.
t_make_tuple_2(Config) when is_list(Config) ->
- ?line t_make_tuple1([]),
- ?line t_make_tuple1(42),
- ?line t_make_tuple1(a),
- ?line t_make_tuple1({}),
- ?line t_make_tuple1({a}),
- ?line t_make_tuple1(erlang:make_tuple(400, [])),
+ t_make_tuple1([]),
+ t_make_tuple1(42),
+ t_make_tuple1(a),
+ t_make_tuple1({}),
+ t_make_tuple1({a}),
+ t_make_tuple1(erlang:make_tuple(400, [])),
+
+ % test upper boundry, 16777215 elements
+ t_make_tuple(1 bsl 24 - 1, a),
+ {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 24, a)),
+
+ {'EXIT', {badarg,_}} = (catch erlang:make_tuple(-1, a)),
+ % 26 bits is the total header arity room (for now)
+ {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 26 + 3, a)),
+ % bignum
+ {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 65 + 3, a)),
ok.
t_make_tuple1(Element) ->
@@ -222,29 +238,39 @@ t_make_tuple(Size, Element) ->
%% Tests the erlang:make_tuple/3 BIF.
t_make_tuple_3(Config) when is_list(Config) ->
- ?line {} = erlang:make_tuple(0, def, []),
- ?line {def} = erlang:make_tuple(1, def, []),
- ?line {a} = erlang:make_tuple(1, def, [{1,a}]),
- ?line {a,def,c,def,e} = erlang:make_tuple(5, def, [{5,e},{1,a},{3,c}]),
- ?line {a,def,c,def,e} = erlang:make_tuple(5, def,
- [{1,blurf},{5,e},{3,blurf},
- {1,a},{3,c}]),
+ {} = erlang:make_tuple(0, def, []),
+ {def} = erlang:make_tuple(1, def, []),
+ {a} = erlang:make_tuple(1, def, [{1,a}]),
+
+ {a,def,c,def,e} = erlang:make_tuple(5, def, [{5,e},{1,a},{3,c}]),
+ {a,def,c,def,e} = erlang:make_tuple(5, def, [{1,blurf},{5,e},{3,blurf},{1,a},{3,c}]),
+ MaxSize = 1 bsl 16 - 1,
+ MaxTuple = erlang:make_tuple(MaxSize, def, [{1,blurf},{5,e},{3,blurf},{1,a},{3,c}]),
+ MaxSize = size(MaxTuple),
%% Error cases.
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(0, def, [{1,a}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{-1,a}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{0,a}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{6,z}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(a, def, [{6,z}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{1,a}|b])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [42])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [[a,b,c]])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, non_list)),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(0, def, [{1,a}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{-1,a}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{0,a}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{6,z}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(a, def, [{6,z}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{1,a}|b])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [42])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [[a,b,c]])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, non_list)),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(1 bsl 24, def, [{5,e},{1,a},{3,c}])),
+
ok.
%% Tests the append_element/2 BIF.
t_append_element(Config) when is_list(Config) ->
- t_append_element({}, 2048, 2048).
+ ok = t_append_element({}, 2048, 2048),
+
+ % test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)),
+ ok.
t_append_element(_Tuple, 0, _High) -> ok;
t_append_element(Tuple, N, High) ->
@@ -261,7 +287,7 @@ verify_seq([High|T], High, Lower) ->
%% (This is known to crash earlier versions of BEAM.)
tuple_with_case(Config) when is_list(Config) ->
- ?line {reply, true} = tuple_with_case(),
+ {reply, true} = tuple_with_case(),
ok.
tuple_with_case() ->
@@ -280,21 +306,21 @@ foo() -> ignored.
%% Test to build a tuple in a guard.
tuple_in_guard(Config) when is_list(Config) ->
- ?line Tuple1 = id({a,b}),
- ?line Tuple2 = id({a,b,c}),
- ?line if
- Tuple1 == {element(1, Tuple2),element(2, Tuple2)} ->
- ok;
- true ->
- ?line test_server:fail()
- end,
- ?line if
- Tuple2 == {element(1, Tuple2),element(2, Tuple2),
- element(3, Tuple2)} ->
- ok;
- true ->
- ?line test_server:fail()
- end,
+ Tuple1 = id({a,b}),
+ Tuple2 = id({a,b,c}),
+ if
+ Tuple1 == {element(1, Tuple2),element(2, Tuple2)} ->
+ ok;
+ true ->
+ test_server:fail()
+ end,
+ if
+ Tuple2 == {element(1, Tuple2),element(2, Tuple2),
+ element(3, Tuple2)} ->
+ ok;
+ true ->
+ test_server:fail()
+ end,
ok.
%% Use this function to avoid compile-time evaluation of an expression.