aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--erts/emulator/Makefile.in3
-rw-r--r--erts/emulator/beam/atom.names2
-rw-r--r--erts/emulator/beam/beam_bif_load.c2
-rw-r--r--erts/emulator/beam/beam_emu.c12
-rw-r--r--erts/emulator/beam/bif.c48
-rw-r--r--erts/emulator/beam/break.c18
-rw-r--r--erts/emulator/beam/dist.c4
-rw-r--r--erts/emulator/beam/erl_alloc.c8
-rw-r--r--erts/emulator/beam/erl_alloc.types7
-rw-r--r--erts/emulator/beam/erl_bif_info.c37
-rw-r--r--erts/emulator/beam/erl_bif_timer.c14
-rw-r--r--erts/emulator/beam/erl_bif_trace.c101
-rw-r--r--erts/emulator/beam/erl_db.c22
-rw-r--r--erts/emulator/beam/erl_db_util.c38
-rw-r--r--erts/emulator/beam/erl_gc.c6
-rw-r--r--erts/emulator/beam/erl_init.c22
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_message.c2
-rw-r--r--erts/emulator/beam/erl_message.h23
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h45
-rw-r--r--erts/emulator/beam/erl_node_tables.c7
-rw-r--r--erts/emulator/beam/erl_process.c1784
-rw-r--r--erts/emulator/beam/erl_process.h49
-rw-r--r--erts/emulator/beam/erl_process_dump.c10
-rw-r--r--erts/emulator/beam/erl_process_lock.c100
-rw-r--r--erts/emulator/beam/erl_process_lock.h54
-rw-r--r--erts/emulator/beam/erl_ptab.c1560
-rw-r--r--erts/emulator/beam/erl_ptab.h376
-rw-r--r--erts/emulator/beam/erl_trace.c242
-rw-r--r--erts/emulator/beam/erl_utils.h214
-rw-r--r--erts/emulator/beam/export.h1
-rw-r--r--erts/emulator/beam/global.h203
-rw-r--r--erts/emulator/beam/io.c10
-rw-r--r--erts/emulator/beam/register.c10
-rw-r--r--erts/emulator/beam/utils.c1
-rw-r--r--erts/emulator/hipe/hipe_debug.c6
-rw-r--r--erts/emulator/test/process_SUITE.erl40
-rw-r--r--erts/etc/unix/etp-commands6
38 files changed, 2782 insertions, 2307 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 2efbe2d57e..47089f7dd0 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -759,7 +759,8 @@ RUN_OBJS = \
$(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \
$(OBJDIR)/erl_zlib.o $(OBJDIR)/erl_nif.o \
$(OBJDIR)/erl_bif_binary.o $(OBJDIR)/erl_ao_firstfit_alloc.o \
- $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o
+ $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o \
+ $(OBJDIR)/erl_ptab.o
ifeq ($(TARGET),win32)
DRV_OBJS = \
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 02735d4b68..201719507f 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -424,7 +424,6 @@ atom priority
atom private
atom process
atom processes
-atom processes_trap
atom processes_used
atom process_count
atom process_display
@@ -434,6 +433,7 @@ atom procs
atom profile
atom protected
atom protection
+atom ptab_list_continue
atom public
atom purify
atom quantify
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 78a9d76a20..29367c057a 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -180,8 +180,6 @@ check_process_code_2(BIF_ALIST_2)
}
if (is_internal_pid(BIF_ARG_1)) {
Eterm res;
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- goto error;
modp = erts_get_module(BIF_ARG_2);
if (modp == NULL) { /* Doesn't exist. */
return am_false;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index b831147295..fadbbfc0e3 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1296,7 +1296,7 @@ void process_main(void)
reds = c_p->fcalls;
if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
- && (c_p->trace_flags & F_SENSITIVE) == 0) {
+ && (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE) == 0) {
neg_o_reds = -reds;
FCALLS = REDS_IN(c_p) = 0;
} else {
@@ -4645,7 +4645,7 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
- 0, &c_p->tracer_proc);
+ 0, &ERTS_TRACER_PROC(c_p));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -4666,15 +4666,15 @@ void process_main(void)
E -= 3;
ASSERT(c_p->htop <= E && E <= c_p->hend);
ASSERT(is_CP((BeamInstr)(ep->code)));
- ASSERT(is_internal_pid(c_p->tracer_proc) ||
- is_internal_port(c_p->tracer_proc));
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(c_p)) ||
+ is_internal_port(ERTS_TRACER_PROC(c_p)));
E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */
E[1] = am_true; /* Process tracer */
E[0] = make_cp(ep->code);
c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE)
? beam_exception_trace : beam_return_trace;
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
+ ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
@@ -4879,7 +4879,7 @@ void process_main(void)
(flags & MATCH_SET_EXCEPTION_TRACE)
? beam_exception_trace : beam_return_trace;
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
+ ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
Goto(real_I);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 20ac0637e5..89a8f839db 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -83,8 +83,10 @@ static int insert_internal_link(Process* p, Eterm rpid)
ASSERT(is_internal_pid(rpid));
#ifdef ERTS_SMP
- if (IS_TRACED(p) && (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)))
+ if (IS_TRACED(p)
+ && (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) {
rp_locks = ERTS_PROC_LOCKS_ALL;
+ }
erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
#endif
@@ -103,18 +105,18 @@ static int insert_internal_link(Process* p, Eterm rpid)
erts_add_link(&(p->nlinks), LINK_PID, rp->id);
erts_add_link(&(rp->nlinks), LINK_PID, p->id);
- ASSERT(is_nil(p->tracer_proc)
- || is_internal_pid(p->tracer_proc)
- || is_internal_port(p->tracer_proc));
+ ASSERT(is_nil(ERTS_TRACER_PROC(p))
+ || is_internal_pid(ERTS_TRACER_PROC(p))
+ || is_internal_port(ERTS_TRACER_PROC(p)));
if (IS_TRACED(p)) {
- if (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) {
- rp->trace_flags |= (p->trace_flags & TRACEE_FLAGS);
- rp->tracer_proc = p->tracer_proc; /* maybe steal */
+ if (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(rp) |= (ERTS_TRACE_FLAGS(p) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(rp) = ERTS_TRACER_PROC(p); /* maybe steal */
- if (p->trace_flags & F_TRACE_SOL1) { /* maybe override */
- rp->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- p->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ if (ERTS_TRACE_FLAGS(p) & F_TRACE_SOL1) { /* maybe override */
+ ERTS_TRACE_FLAGS(rp) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
}
}
}
@@ -144,10 +146,6 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
/* check that the pid or port which is our argument is OK */
if (is_internal_pid(BIF_ARG_1)) {
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
if (insert_internal_link(BIF_P, BIF_ARG_1)) {
BIF_RET(am_true);
}
@@ -1037,10 +1035,6 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
/* Internal pid... */
- /* process ok ? */
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
-
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
/* get process struct */
@@ -1397,8 +1391,6 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
*/
ErtsProcLocks rp_locks;
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
if (BIF_ARG_1 == BIF_P->id) {
rp_locks = ERTS_PROC_LOCKS_ALL;
rp = BIF_P;
@@ -1617,11 +1609,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
goto error;
}
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
- old_value = BIF_P->trace_flags & F_SENSITIVE ? am_true : am_false;
+ old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE
+ ? am_true
+ : am_false);
if (is_sensitive) {
- BIF_P->trace_flags |= F_SENSITIVE;
+ ERTS_TRACE_FLAGS(BIF_P) |= F_SENSITIVE;
} else {
- BIF_P->trace_flags &= ~F_SENSITIVE;
+ ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE;
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
BIF_RET(old_value);
@@ -1815,9 +1809,6 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
- if (internal_pid_index(to) >= erts_max_processes)
- return SEND_BADARG;
-
rp = erts_proc_lookup_raw(to);
if (!rp) {
@@ -4209,12 +4200,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_RET(old_value);
}
} else if (BIF_ARG_1 == make_small(1)) {
- Uint i;
+ int i, max;
ErlMessage* mp;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
- for (i = 0; i < erts_max_processes; i++) {
+ max = erts_ptab_max(&erts_proc);
+ for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p) {
#ifdef USE_VM_PROBES
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 9cb5f2cc16..bcc8700bc8 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -69,8 +69,8 @@ port_info(int to, void *to_arg)
void
process_info(int to, void *to_arg)
{
- int i;
- for (i = 0; i < erts_max_processes; i++) {
+ int i, max = erts_ptab_max(&erts_proc);
+ for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p && p->i != ENULL) {
if (!ERTS_PROC_IS_EXITING(p))
@@ -84,12 +84,12 @@ process_info(int to, void *to_arg)
static void
process_killer(void)
{
- int i, j;
+ int i, j, max = erts_ptab_max(&erts_proc);
Process* rp;
erts_printf("\n\nProcess Information\n\n");
erts_printf("--------------------------------------------------\n");
- for (i = erts_max_processes-1; i >= 0; i--) {
+ for (i = max-1; i >= 0; i--) {
rp = erts_pix2proc(i);
if (rp && rp->i != ENULL) {
int br;
@@ -226,8 +226,8 @@ print_process_info(int to, void *to_arg, Process *p)
* If the process is registered as a global process, display the
* registered name
*/
- if (p->reg != NULL)
- erts_print(to, to_arg, "Name: %T\n", p->reg->name);
+ if (p->common.u.alive.reg)
+ erts_print(to, to_arg, "Name: %T\n", p->common.u.alive.reg->name);
/*
* Display the initial function name
@@ -618,9 +618,9 @@ bin_check(void)
{
Process *rp;
struct erl_off_heap_header* hdr;
- int i, printed = 0;
+ int i, printed = 0, max = erts_ptab_max(&erts_proc);
- for (i=0; i < erts_max_processes; i++) {
+ for (i=0; i < max; i++) {
rp = erts_pix2proc(i);
if (!rp)
continue;
@@ -710,7 +710,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_print_nif_taints(fd, NULL);
erts_fdprintf(fd, "Atoms: %d\n", atom_table_size());
info(fd, NULL); /* General system info */
- if (erts_proc.tab)
+ if (erts_ptab_initialized(&erts_proc))
process_info(fd, NULL); /* Info about each process and port */
db_info(fd, NULL, 0);
erts_print_bif_timer_info(fd, NULL);
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index a7f11740f5..5de8236a7e 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -2557,8 +2557,8 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
if (!(DFLAG_EXTENDED_REFERENCES & flags)) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "%T", BIF_P->id);
- if (BIF_P->reg)
- erts_dsprintf(dsbufp, " (%T)", BIF_P->reg->name);
+ if (BIF_P->common.u.alive.reg)
+ erts_dsprintf(dsbufp, " (%T)", BIF_P->common.u.alive.reg->name);
erts_dsprintf(dsbufp,
" attempted to enable connection to node %T "
"which is not able to handle extended references.\n",
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 8130d5c576..d0c0a13400 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -2128,6 +2128,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (want_tot_or_sys || want.processes || want.processes_used) {
+ int max_processes = erts_ptab_max(&erts_proc);
UWord tmp;
if (ERTS_MEM_NEED_ALL_ALCU)
@@ -2137,9 +2138,9 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
fi, ERTS_ALC_NO_FIXED_SIZES);
tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
}
- tmp += erts_max_processes*sizeof(Process*);
+ tmp += max_processes*sizeof(Process*);
#ifdef HYBRID
- tmp += erts_max_processes*sizeof(Process*);
+ tmp += max_processes*sizeof(Process*);
#endif
tmp += erts_bif_timer_memory_size();
tmp += erts_tot_link_lh_size();
@@ -2271,6 +2272,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
Eterm res = THE_NON_VALUE;
int i, length;
Uint reserved_atom_space, atom_space;
+ int max_processes = erts_ptab_max(&erts_proc);
if (proc) {
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
@@ -2385,7 +2387,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "process_table";
- values[i].ui[0] = erts_max_processes*sizeof(Process*);
+ values[i].ui[0] = max_processes*sizeof(Process*);
i++;
values[i].arity = 2;
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index d1e3b4b0ef..4ad0c41b50 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -238,9 +238,9 @@ type MISC_OP_LIST SHORT_LIVED SYSTEM misc_op_list
type PORT_NAMES SHORT_LIVED SYSTEM port_names
type PORT_DATA_LOCK STANDARD SYSTEM port_data_lock
type NODES_MON STANDARD PROCESSES nodes_monitor
-type PROCS_TPROC_EL SHORT_LIVED PROCESSES processes_term_proc_el
-type PROCS_CNKINF SHORT_LIVED PROCESSES processes_chunk_info
-type PROCS_PIDS SHORT_LIVED PROCESSES processes_pids
+type PTAB_LIST_DEL SHORT_LIVED PROCESSES ptab_list_deleted_el
+type PTAB_LIST_CNKI SHORT_LIVED PROCESSES ptab_list_chunk_info
+type PTAB_LIST_PIDS SHORT_LIVED PROCESSES ptab_list_pids
type RE_TMP_BUF TEMPORARY SYSTEM re_tmp_buf
type RE_SUBJECT SHORT_LIVED SYSTEM re_subject
type RE_HEAP STANDARD SYSTEM re_heap
@@ -263,7 +263,6 @@ type ZLIB STANDARD SYSTEM zlib
type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map
type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
-type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 90ddaa4fac..874d1606fd 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -41,6 +41,8 @@
#include "erl_cpu_topology.h"
#include "erl_async.h"
#include "erl_thr_progress.h"
+#define ERTS_PTAB_WANT_DEBUG_FUNCS__
+#include "erl_ptab.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -886,8 +888,7 @@ BIF_RETTYPE process_info_1(BIF_ALIST_1)
&& external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
BIF_RET(am_undefined);
- if (is_not_internal_pid(BIF_ARG_1)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -922,8 +923,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2)
&& external_pid_dist_entry(pid) == erts_this_dist_entry)
BIF_RET(am_undefined);
- if (is_not_internal_pid(pid)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(pid)) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -1015,9 +1015,9 @@ process_info_aux(Process *BIF_P,
switch (item) {
case am_registered_name:
- if (rp->reg != NULL) {
+ if (rp->common.u.alive.reg) {
hp = HAlloc(BIF_P, 3);
- res = rp->reg->name;
+ res = rp->common.u.alive.reg->name;
} else {
if (always_wrap) {
hp = HAlloc(BIF_P, 3);
@@ -1063,7 +1063,7 @@ process_info_aux(Process *BIF_P,
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
n = rp->msg.len;
- if (n == 0 || rp->trace_flags & F_SENSITIVE) {
+ if (n == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
hp = HAlloc(BIF_P, 3);
} else {
int remove_bad_messages = 0;
@@ -1347,7 +1347,7 @@ process_info_aux(Process *BIF_P,
}
case am_dictionary:
- if (rp->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
res = NIL;
} else {
res = erts_dictionary_copy(BIF_P, rp->dictionary);
@@ -1517,7 +1517,7 @@ process_info_aux(Process *BIF_P,
case am_trace:
hp = HAlloc(BIF_P, 3);
- res = make_small(rp->trace_flags & TRACEE_FLAGS);
+ res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
break;
case am_binary: {
@@ -2186,9 +2186,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
BIF_RET(res);
} else if (BIF_ARG_1 == am_process_count) {
- BIF_RET(make_small(erts_process_count()));
+ BIF_RET(make_small(erts_ptab_count(&erts_proc)));
} else if (BIF_ARG_1 == am_process_limit) {
- BIF_RET(make_small(erts_max_processes));
+ BIF_RET(make_small(erts_ptab_max(&erts_proc)));
} else if (BIF_ARG_1 == am_info
|| BIF_ARG_1 == am_procs
|| BIF_ARG_1 == am_loaded
@@ -3138,9 +3138,6 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
if (BIF_ARG_1 == BIF_P->id)
BIF_RET(am_true);
- if(internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
-
rp = erts_proc_lookup(BIF_ARG_1);
if (!rp) {
BIF_RET(am_false);
@@ -3358,7 +3355,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
/* Used by node_container_SUITE (emulator) */
Eterm res;
if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
- res = erts_test_next_pid(0, 0);
+ res = erts_ptab_test_next_id(&erts_proc, 0, 0);
else {
res = erts_test_next_port(0, 0);
}
@@ -3397,11 +3394,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
- BIF_RET(erts_debug_processes(BIF_P));
+ BIF_RET(erts_debug_ptab_list(BIF_P, &erts_proc));
}
else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
- BIF_RET(erts_debug_processes_bif_info(BIF_P));
+ BIF_RET(erts_debug_ptab_list_bif_info(BIF_P, &erts_proc));
}
else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
/* Used by distribution_SUITE (emulator) */
@@ -3715,7 +3712,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Eterm res;
if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
- res = erts_test_next_pid(1, next);
+ res = erts_ptab_test_next_id(&erts_proc, 1, next);
else {
res = erts_test_next_port(1, next);
}
@@ -3989,8 +3986,8 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
} else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
/* use registered names as id's for process locks if available */
proc = erts_proc_lookup(lock->id);
- if (proc && proc->reg) {
- id = proc->reg->name;
+ if (proc && proc->common.u.alive.reg) {
+ id = proc->common.u.alive.reg->name;
} else {
/* otherwise use process id */
id = lock->id;
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index 525b11f61c..cf58e6cb2b 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -265,10 +265,10 @@ link_proc(Process *p, ErtsBifTimer* btm)
{
btm->receiver.proc.ess = p;
btm->receiver.proc.prev = NULL;
- btm->receiver.proc.next = p->bif_timers;
- if (p->bif_timers)
- p->bif_timers->receiver.proc.prev = btm;
- p->bif_timers = btm;
+ btm->receiver.proc.next = p->u.bif_timers;
+ if (p->u.bif_timers)
+ p->u.bif_timers->receiver.proc.prev = btm;
+ p->u.bif_timers = btm;
}
static ERTS_INLINE void
@@ -277,7 +277,7 @@ unlink_proc(ErtsBifTimer* btm)
if (btm->receiver.proc.prev)
btm->receiver.proc.prev->receiver.proc.next = btm->receiver.proc.next;
else
- btm->receiver.proc.ess->bif_timers = btm->receiver.proc.next;
+ btm->receiver.proc.ess->u.bif_timers = btm->receiver.proc.next;
if (btm->receiver.proc.next)
btm->receiver.proc.next->receiver.proc.prev = btm->receiver.proc.prev;
}
@@ -637,7 +637,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
erts_smp_proc_lock(p, plocks);
}
- btm = p->bif_timers;
+ btm = p->u.bif_timers;
while (btm) {
ErtsBifTimer *tmp_btm;
ASSERT(!(btm->flags & BTM_FLG_CANCELED));
@@ -647,7 +647,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
erts_cancel_timer(&tmp_btm->tm);
}
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
erts_smp_btm_rwunlock();
}
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 8fc8e363b1..3baac1572a 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -234,7 +234,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetRef(erts_default_meta_match_spec);
erts_default_meta_tracer_pid = meta_tracer_pid;
if (meta_tracer_proc) {
- meta_tracer_proc->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
}
} else if (! flags.breakpoint) {
MatchSetUnref(erts_default_meta_match_spec);
@@ -327,7 +327,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
if (meta_tracer_proc) {
- meta_tracer_proc->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
}
@@ -473,7 +473,7 @@ Eterm trace_3(BIF_ALIST_3)
ERTS_PROC_LOCKS_ALL);
if (!tracer_proc)
goto error;
- tracer_proc->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(tracer_proc) |= F_TRACER;
erts_smp_proc_unlock(tracer_proc,
(tracer_proc == p
? ERTS_PROC_LOCKS_ALL_MINOR
@@ -485,7 +485,7 @@ Eterm trace_3(BIF_ALIST_3)
erts_smp_port_unlock(tracer_port);
goto error;
}
- tracer_port->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(tracer_port) |= F_TRACER;
erts_smp_port_unlock(tracer_port);
} else
goto error;
@@ -529,14 +529,14 @@ Eterm trace_3(BIF_ALIST_3)
}
if (on)
- tracee_port->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_port) |= mask;
else
- tracee_port->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
- if (!tracee_port->trace_flags)
- tracee_port->tracer_proc = NIL;
+ if (!ERTS_TRACE_FLAGS(tracee_port))
+ ERTS_TRACER_PROC(tracee_port) = NIL;
else if (tracer != NIL)
- tracee_port->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_port) = tracer;
erts_smp_port_unlock(tracee_port);
@@ -570,14 +570,14 @@ Eterm trace_3(BIF_ALIST_3)
}
if (on)
- tracee_p->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_p) |= mask;
else
- tracee_p->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
- if ((tracee_p->trace_flags & TRACEE_FLAGS) == 0)
- tracee_p->tracer_proc = NIL;
+ if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS) == 0)
+ ERTS_TRACER_PROC(tracee_p) = NIL;
else if (tracer != NIL)
- tracee_p->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_p) = tracer;
erts_smp_proc_unlock(tracee_p,
(tracee_p == p
@@ -651,8 +651,9 @@ Eterm trace_3(BIF_ALIST_3)
ok = 1;
if (procs || mods) {
+ int max = erts_ptab_max(&erts_proc);
/* tracing of processes */
- for (i = 0; i < erts_max_processes; i++) {
+ for (i = 0; i < max; i++) {
Process* tracee_p = erts_pix2proc(i);
if (! tracee_p)
continue;
@@ -663,14 +664,14 @@ Eterm trace_3(BIF_ALIST_3)
continue;
}
if (on) {
- tracee_p->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_p) |= mask;
} else {
- tracee_p->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
}
- if(!(tracee_p->trace_flags & TRACEE_FLAGS)) {
- tracee_p->tracer_proc = NIL;
+ if(!(ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)) {
+ ERTS_TRACER_PROC(tracee_p) = NIL;
} else if (tracer != NIL) {
- tracee_p->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_p) = tracer;
}
matches++;
}
@@ -685,13 +686,13 @@ Eterm trace_3(BIF_ALIST_3)
if (port_already_traced(NULL, tracee_port, tracer)) continue;
}
- if (on) tracee_port->trace_flags |= mask;
- else tracee_port->trace_flags &= ~mask;
+ if (on) ERTS_TRACE_FLAGS(tracee_port) |= mask;
+ else ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
- if (!(tracee_port->trace_flags & TRACEE_FLAGS)) {
- tracee_port->tracer_proc = NIL;
+ if (!(ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)) {
+ ERTS_TRACER_PROC(tracee_port) = NIL;
} else if (tracer != NIL) {
- tracee_port->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_port) = tracer;
}
/* matches are not counted for ports since it would violate compatibility */
/* This could be a reason to modify this function or make a new one. */
@@ -758,20 +759,20 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
* * main lock is held on c_p
* * all locks are held on port tracee_p
*/
- if ((tracee_port->trace_flags & TRACEE_FLAGS)
- && tracee_port->tracer_proc != tracer) {
+ if ((ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)
+ && ERTS_TRACER_PROC(tracee_port) != tracer) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (is_internal_port(tracee_port->tracer_proc)) {
- if (!erts_is_valid_tracer_port(tracee_port->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(tracee_port))) {
+ if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_port))) {
/* Current trace port now invalid
* - discard it and approve the new. */
goto remove_tracer;
} else
return 1;
}
- else if(is_internal_pid(tracee_port->tracer_proc)) {
- Process *tracer_p = erts_proc_lookup(tracee_port->tracer_proc);
+ else if(is_internal_pid(ERTS_TRACER_PROC(tracee_port))) {
+ Process *tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_port));
if (!tracer_p) {
/* Current trace process now invalid
* - discard it and approve the new. */
@@ -781,8 +782,8 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
}
else {
remove_tracer:
- tracee_port->trace_flags &= ~TRACEE_FLAGS;
- tracee_port->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee_port) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(tracee_port) = NIL;
}
}
return 0;
@@ -798,20 +799,22 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
* * main lock is held on c_p
* * all locks multiple are held on tracee_p
*/
- if ((tracee_p->trace_flags & TRACEE_FLAGS)
- && tracee_p->tracer_proc != tracer) {
+ if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)
+ && ERTS_TRACER_PROC(tracee_p) != tracer) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (is_internal_port(tracee_p->tracer_proc)) {
- if (!erts_is_valid_tracer_port(tracee_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(tracee_p))) {
+ if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_p))) {
/* Current trace port now invalid
* - discard it and approve the new. */
goto remove_tracer;
} else
return 1;
}
- else if(is_internal_pid(tracee_p->tracer_proc)) {
- Process *tracer_p = erts_proc_lookup(tracee_p->tracer_proc);
+ else if(is_internal_pid(ERTS_TRACER_PROC(tracee_p))) {
+ Process *tracer_p;
+
+ tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_p));
if (!tracer_p) {
/* Current trace process now invalid
* - discard it and approve the new. */
@@ -821,8 +824,8 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
}
else {
remove_tracer:
- tracee_p->trace_flags &= ~TRACEE_FLAGS;
- tracee_p->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(tracee_p) = NIL;
}
}
return 0;
@@ -859,8 +862,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
if (pid_spec == am_new) {
erts_get_default_tracing(&trace_flags, &tracer);
- } else if (is_internal_pid(pid_spec)
- && internal_pid_index(pid_spec) < erts_max_processes) {
+ } else if (is_internal_pid(pid_spec)) {
Process *tracee;
tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
pid_spec, ERTS_PROC_LOCKS_ALL);
@@ -868,16 +870,16 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
if (!tracee) {
return am_undefined;
} else {
- tracer = tracee->tracer_proc;
- trace_flags = tracee->trace_flags;
+ tracer = ERTS_TRACER_PROC(tracee);
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
}
if (is_internal_pid(tracer)) {
if (!erts_proc_lookup(tracer)) {
reset_tracer:
- tracee->trace_flags &= ~TRACEE_FLAGS;
- trace_flags = tracee->trace_flags;
- tracer = tracee->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee) &= ~TRACEE_FLAGS;
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
+ tracer = ERTS_TRACER_PROC(tracee) = NIL;
}
}
else if (is_internal_port(tracer)) {
@@ -2209,8 +2211,7 @@ trace_delivered_1(BIF_ALIST_1)
p = NULL;
} else if (! (p = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
BIF_ARG_1, ERTS_PROC_LOCKS_ALL))) {
- if (is_not_internal_pid(BIF_ARG_1)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
}
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 3b1c4ff5ac..92687336a2 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -3104,7 +3104,7 @@ retry:
if (to_proc == NULL) {
return 0; /* heir not alive, table still mine */
}
- if (to_proc->started_interval != tb->common.heir_started_interval) {
+ if (to_proc->common.u.alive.started_interval != tb->common.heir_started_interval) {
erts_smp_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
@@ -3138,7 +3138,7 @@ retry:
/*
* erts_db_process_exiting() is called when a process terminates.
* It returns 0 when completely done, and !0 when it wants to
- * yield. c_p->u.exit_data can hold a pointer to a state while
+ * yield. c_p->u.terminate can hold a pointer to a state while
* yielding.
*/
#define ERTS_DB_INTERNAL_ERROR(LSTR) \
@@ -3148,7 +3148,7 @@ retry:
int
erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
{
- ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.exit_data;
+ ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate;
Eterm pid = c_p->id;
ErtsDbProcCleanupState default_state;
int ret;
@@ -3330,7 +3330,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
if (state != &default_state)
erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
- c_p->u.exit_data = NULL;
+ c_p->u.terminate = NULL;
return 0;
default:
@@ -3351,13 +3351,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
break;
}
- ASSERT(c_p->u.exit_data == (void *) state
+ ASSERT(c_p->u.terminate == (void *) state
|| state == &default_state);
if (state == &default_state) {
- c_p->u.exit_data = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
+ c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
sizeof(ErtsDbProcCleanupState));
- sys_memcpy(c_p->u.exit_data,
+ sys_memcpy(c_p->u.terminate,
(void*) state,
sizeof(ErtsDbProcCleanupState));
}
@@ -3500,14 +3500,14 @@ static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data)
return;
}
if (heir == me->id) {
- erts_ensure_later_proc_interval(me->started_interval);
- tb->common.heir_started_interval = me->started_interval;
+ erts_ensure_later_proc_interval(me->common.u.alive.started_interval);
+ tb->common.heir_started_interval = me->common.u.alive.started_interval;
}
else {
Process* heir_proc= erts_proc_lookup(heir);
if (heir_proc != NULL) {
- erts_ensure_later_proc_interval(heir_proc->started_interval);
- tb->common.heir_started_interval = heir_proc->started_interval;
+ erts_ensure_later_proc_interval(heir_proc->common.u.alive.started_interval);
+ tb->common.heir_started_interval = heir_proc->common.u.alive.started_interval;
} else {
tb->common.heir = am_none;
}
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index c2f6cfa933..25d22f713b 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -138,21 +138,23 @@ set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
Uint flags;
if (tracer == NIL) {
- flags = tracee_p->trace_flags & ~TRACEE_FLAGS;
+ flags = ERTS_TRACE_FLAGS(tracee_p) & ~TRACEE_FLAGS;
} else {
- flags = ((tracee_p->trace_flags & ~d_flags) | e_flags);
+ flags = ((ERTS_TRACE_FLAGS(tracee_p) & ~d_flags) | e_flags);
if (! flags) tracer = NIL;
}
- ret = tracee_p->tracer_proc != tracer || tracee_p->trace_flags != flags
- ? am_true : am_false;
- tracee_p->tracer_proc = tracer;
- tracee_p->trace_flags = flags;
+ ret = ((ERTS_TRACER_PROC(tracee_p) != tracer
+ || ERTS_TRACE_FLAGS(tracee_p) != flags)
+ ? am_true
+ : am_false);
+ ERTS_TRACER_PROC(tracee_p) = tracer;
+ ERTS_TRACE_FLAGS(tracee_p) = flags;
return ret;
}
/*
** Assuming all locks on tracee_p on entry
**
-** Changes tracee_p->trace_flags and tracee_p->tracer_proc
+** Changes ERTS_TRACE_FLAGS(tracee_p) and ERTS_TRACER_PROC(tracee_p)
** according to input disable/enable flags and tracer.
**
** Returns am_true|am_false on success, am_true if value changed,
@@ -173,7 +175,9 @@ set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer,
tracer, ERTS_PROC_LOCKS_ALL))) {
if (tracee_p != tracer_p) {
ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- tracer_p->trace_flags |= tracee_p->trace_flags ? F_TRACER : 0;
+ ERTS_TRACE_FLAGS(tracer_p) |= (ERTS_TRACE_FLAGS(tracee_p)
+ ? F_TRACER
+ : 0);
erts_smp_proc_unlock(tracer_p, ERTS_PROC_LOCKS_ALL);
}
} else if (is_internal_port(tracer)) {
@@ -2261,7 +2265,7 @@ restart:
case matchEnableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, c_p->tracer_proc, 0, n);
+ set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), 0, n);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2274,7 +2278,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, c_p->tracer_proc, 0, n);
+ set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), 0, n);
esp[-1] = am_true;
}
}
@@ -2282,7 +2286,7 @@ restart:
case matchDisableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, c_p->tracer_proc, n, 0);
+ set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), n, 0);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2295,7 +2299,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, c_p->tracer_proc, n, 0);
+ set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), n, 0);
esp[-1] = am_true;
}
}
@@ -2316,12 +2320,12 @@ restart:
--esp;
if (*esp == am_true) {
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_TRACE_SILENT;
+ ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
else if (*esp == am_false) {
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags &= ~F_TRACE_SILENT;
+ ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
break;
@@ -2329,11 +2333,11 @@ restart:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = c_p->tracer_proc;
+ Eterm tracer = ERTS_TRACER_PROC(c_p);
/* XXX Atomicity note: Not fully atomic. Default tracer
* is sampled from current process but applied to
* tracee and tracer later after releasing main
- * locks on current process, so c_p->tracer_proc
+ * locks on current process, so ERTS_TRACER_PROC(c_p)
* may actually have changed when tracee and tracer
* gets updated. I do not think nobody will notice.
* It is just the default value that is not fully atomic.
@@ -2358,7 +2362,7 @@ restart:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = c_p->tracer_proc;
+ Eterm tracer = ERTS_TRACER_PROC(c_p);
/* XXX Atomicity note. Not fully atomic. See above.
* Above it could possibly be solved, but not here.
*/
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 6075a527c3..d694ce954e 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -1932,9 +1932,9 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
n++;
}
#endif
- ASSERT(is_nil(p->tracer_proc) ||
- is_internal_pid(p->tracer_proc) ||
- is_internal_port(p->tracer_proc));
+ ASSERT(is_nil(ERTS_TRACER_PROC(p)) ||
+ is_internal_pid(ERTS_TRACER_PROC(p)) ||
+ is_internal_port(ERTS_TRACER_PROC(p)));
ASSERT(is_pid(follow_moved(p->group_leader)));
if (is_not_immed(p->group_leader)) {
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 945cc2565c..d4004361af 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -45,6 +45,7 @@
#include "erl_thr_progress.h"
#include "erl_thr_queue.h"
#include "erl_async.h"
+#include "erl_ptab.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -122,7 +123,7 @@ extern void ConNormalExit(void);
extern void ConWaitForExit(void);
#endif
-static void erl_init(int ncpu);
+static void erl_init(int ncpu, int proc_tab_sz);
#define ERTS_MIN_COMPAT_REL 7
@@ -294,12 +295,12 @@ void
erts_short_init(void)
{
int ncpu = early_init(NULL, NULL);
- erl_init(ncpu);
+ erl_init(ncpu, ERTS_DEFAULT_MAX_PROCESSES);
erts_initialized = 1;
}
static void
-erl_init(int ncpu)
+erl_init(int ncpu, int proc_tab_sz)
{
init_benchmarking();
@@ -307,7 +308,7 @@ erl_init(int ncpu)
erts_init_gc();
erts_init_time();
erts_init_sys_common_misc();
- erts_init_process(ncpu);
+ erts_init_process(ncpu, proc_tab_sz);
erts_init_scheduling(no_schedulers,
no_schedulers_online);
erts_init_cpu_topology(); /* Must be after init_scheduling */
@@ -328,6 +329,7 @@ erl_init(int ncpu)
erts_bif_info_init();
erts_ddll_init();
init_emulator();
+ erts_ptab_init(); /* Must be after init_emulator() */
erts_bp_init();
init_db(); /* Must be after init_emulator */
erts_bif_timer_init();
@@ -669,7 +671,6 @@ early_init(int *argc, char **argv) /*
erts_printf_eterm_func = erts_printf_term;
erts_disable_tolerant_timeofday = 0;
display_items = 200;
- erts_proc.max = ERTS_DEFAULT_MAX_PROCESSES;
erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE;
erts_async_max_threads = 0;
erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
@@ -955,6 +956,7 @@ erl_start(int argc, char **argv)
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
int ncpu = early_init(&argc, argv);
+ int proc_tab_sz = ERTS_DEFAULT_MAX_PROCESSES;
envbufsz = sizeof(envbuf);
if (erts_sys_getenv(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
@@ -1216,7 +1218,7 @@ erl_start(int argc, char **argv)
case 'P':
/* set maximum number of processes */
Parg = get_arg(argv[i]+2, argv[i+1], &i);
- erts_proc.max = atoi(Parg);
+ proc_tab_sz = atoi(Parg);
/* Check of result is delayed until later. This is because +R
may be given after +P. */
break;
@@ -1495,10 +1497,10 @@ erl_start(int argc, char **argv)
}
/* Delayed check of +P flag */
- if (erts_proc.max < ERTS_MIN_PROCESSES
- || erts_proc.max > ERTS_MAX_PROCESSES
+ if (proc_tab_sz < ERTS_MIN_PROCESSES
+ || proc_tab_sz > ERTS_MAX_PROCESSES
|| (erts_use_r9_pids_ports
- && erts_proc.max > ERTS_MAX_R9_PROCESSES)) {
+ && proc_tab_sz > ERTS_MAX_R9_PROCESSES)) {
erts_fprintf(stderr, "bad number of processes %s\n", Parg);
erts_usage();
}
@@ -1523,7 +1525,7 @@ erl_start(int argc, char **argv)
boot_argc = argc - i; /* Number of arguments to init */
boot_argv = &argv[i];
- erl_init(ncpu);
+ erl_init(ncpu, proc_tab_sz);
init_shared_memory(boot_argc, boot_argv);
load_preloaded();
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 175695e856..1db7b27412 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -93,7 +93,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
{ "proc_status", "pid" },
- { "proc_tab", NULL },
{ "ports_snapshot", NULL },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
@@ -125,6 +124,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "schdlr_sspnd", NULL },
{ "migration_info_update", NULL },
{ "run_queue", "address" },
+ { "process_table", NULL },
{ "cpu_info", NULL },
{ "pollset", "address" },
#ifdef __WIN32__
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 59e14bc0ed..64065bafb8 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -496,7 +496,7 @@ queue_message(Process *c_p,
#ifndef ERTS_SMP
res = receiver->msg.len;
#else
- res = receiver->u.alive.msg_inq.len;
+ res = receiver->msg_inq.len;
if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
/*
* We move 'in queue' to 'private queue' and place
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 2ae94965b1..771eba431f 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -127,21 +127,21 @@ typedef struct {
/* Move in message queue to end of private message queue */
#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \
do { \
- if ((P)->u.alive.msg_inq.first) { \
- *(P)->msg.last = (P)->u.alive.msg_inq.first; \
- (P)->msg.last = (P)->u.alive.msg_inq.last; \
- (P)->msg.len += (P)->u.alive.msg_inq.len; \
- (P)->u.alive.msg_inq.first = NULL; \
- (P)->u.alive.msg_inq.last = &(P)->u.alive.msg_inq.first; \
- (P)->u.alive.msg_inq.len = 0; \
+ if ((P)->msg_inq.first) { \
+ *(P)->msg.last = (P)->msg_inq.first; \
+ (P)->msg.last = (P)->msg_inq.last; \
+ (P)->msg.len += (P)->msg_inq.len; \
+ (P)->msg_inq.first = NULL; \
+ (P)->msg_inq.last = &(P)->msg_inq.first; \
+ (P)->msg_inq.len = 0; \
} \
} while (0)
/* Add message last in message queue */
#define LINK_MESSAGE(p, mp) do { \
- *(p)->u.alive.msg_inq.last = (mp); \
- (p)->u.alive.msg_inq.last = &(mp)->next; \
- (p)->u.alive.msg_inq.len++; \
+ *(p)->msg_inq.last = (mp); \
+ (p)->msg_inq.last = &(mp)->next; \
+ (p)->msg_inq.len++; \
} while(0)
#else
@@ -245,6 +245,9 @@ void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *);
Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **,
Eterm *, ErtsDistExternal *);
+void erts_cleanup_offheap(ErlOffHeap *offheap);
+
+
ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg);
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg);
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 7b4cb7b042..62fb277eef 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -20,7 +20,7 @@
#ifndef ERL_NODE_CONTAINER_UTILS_H__
#define ERL_NODE_CONTAINER_UTILS_H__
-#include "erl_term.h"
+#include "erl_ptab.h"
/*
* Note regarding node containers:
@@ -128,47 +128,10 @@ extern int erts_use_r9_pids_ports;
* Pids *
\* */
-#define erts_max_processes erts_proc.max
-
-typedef struct {
- erts_smp_atomic_t *tab;
- int max;
- int tab_cache_lines;
- int pix_per_cache_line;
- int pix_cl_mask;
- int pix_cl_shift;
- int pix_cli_mask;
- int pix_cli_shift;
-} ErtsProcTab;
-
-extern ErtsProcTab erts_proc;
-
-ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data)
-{
- int n, pix;
-
- n = (int) pid_data;
- if (erts_proc.pix_cl_mask) {
- pix = ((n & erts_proc.pix_cl_mask) << erts_proc.pix_cl_shift);
- pix += ((n >> erts_proc.pix_cli_shift) & erts_proc.pix_cli_mask);
- }
- else {
- n %= erts_proc.max;
- pix = n % erts_proc.tab_cache_lines;
- pix *= erts_proc.pix_per_cache_line;
- pix += n / erts_proc.tab_cache_lines;
- }
- ASSERT(0 <= pix && pix < erts_proc.max);
- return pix;
-}
+extern ErtsPTab erts_proc;
-#endif
-
-#define internal_pid_index(x) erts_pid_data2ix(internal_pid_data((x)))
+#define internal_pid_index(x) erts_ptab_data2ix(&erts_proc, \
+ internal_pid_data((x)))
#define internal_pid_node_name(x) (internal_pid_node((x))->sysname)
#define external_pid_node_name(x) (external_pid_node((x))->sysname)
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 9bf8bf6580..93122b8317 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -1274,7 +1274,7 @@ setup_reference_table(void)
ErlHeapFragment *hfp;
DistEntry *dep;
HashInfo hi;
- int i;
+ int i, max_processes;
DeclareTmpHeapNoproc(heap,3);
inserted_bins = NULL;
@@ -1309,8 +1309,9 @@ setup_reference_table(void)
#endif
UnUseTmpHeapNoproc(3);
+ max_processes = erts_ptab_max(&erts_proc);
/* Insert all processes */
- for (i = 0; i < erts_max_processes; i++) {
+ for (i = 0; i < max_processes; i++) {
Process *proc = erts_pix2proc(i);
if (proc) {
ErlMessage *msg;
@@ -1344,7 +1345,7 @@ setup_reference_table(void)
proc->id);
}
#ifdef ERTS_SMP
- for (msg = proc->u.alive.msg_inq.first; msg; msg = msg->next) {
+ for (msg = proc->msg_inq.first; msg; msg = msg->next) {
ErlHeapFragment *heap_frag = NULL;
if (msg->data.attached) {
if (is_value(ERL_MESSAGE_TERM(msg)))
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index da2e931d43..79614c85db 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -43,6 +43,8 @@
#include "erl_thr_queue.h"
#include "erl_async.h"
#include "dtrace-wrapper.h"
+#define ERTS_PTAB_WANT_BIF_IMPL__
+#include "erl_ptab.h"
#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
@@ -133,155 +135,16 @@ do { \
#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
(RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL)
+const Process erts_invalid_process = {{ERTS_INVALID_PID}};
+
extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-#ifdef ARCH_32
-
-union {
- erts_smp_dw_atomic_t pid_data;
- char align[ERTS_CACHE_LINE_SIZE];
-} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-
-
-static ERTS_INLINE Uint64
-dw_aint_to_uint64(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-static void
-unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
-#else
- dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
- dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
-#endif
-}
-
-static ERTS_INLINE void
-last_pid_data_init_nob(Uint64 val)
-{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_init_nob(&last.pid_data, &dw);
-}
-
-static ERTS_INLINE void
-last_pid_data_set_relb(Uint64 val)
-{
- erts_dw_aint_t dw;
- unint64_to_dw_aint(&dw, val);
- erts_smp_dw_atomic_set_relb(&last.pid_data, &dw);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_read_nob(void)
-{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_nob(&last.pid_data, &dw);
- return dw_aint_to_uint64(&dw);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_read_acqb(void)
-{
- erts_dw_aint_t dw;
- erts_smp_dw_atomic_read_acqb(&last.pid_data, &dw);
- return dw_aint_to_uint64(&dw);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_cmpxchg_relb(Uint64 new, Uint64 exp)
-{
- erts_dw_aint_t dw_new, dw_xchg;
-
- unint64_to_dw_aint(&dw_new, new);
- unint64_to_dw_aint(&dw_xchg, exp);
-
- if (erts_smp_dw_atomic_cmpxchg_relb(&last.pid_data, &dw_new, &dw_xchg))
- return exp;
- else
- return dw_aint_to_uint64(&dw_xchg);
-}
-
-#elif defined(ARCH_64)
-
-union {
- erts_smp_atomic_t pid_data;
- char align[ERTS_CACHE_LINE_SIZE];
-} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-
-static ERTS_INLINE void
-last_pid_data_init_nob(Uint64 val)
-{
- erts_smp_atomic_init_nob(&last.pid_data, (erts_aint_t) val);
-}
-
-static ERTS_INLINE void
-last_pid_data_set_relb(Uint64 val)
-{
- erts_smp_atomic_set_relb(&last.pid_data, (erts_aint_t) val);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_read_nob(void)
-{
- return (Uint64) erts_smp_atomic_read_nob(&last.pid_data);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_read_acqb(void)
-{
- return (Uint64) erts_smp_atomic_read_acqb(&last.pid_data);
-}
-
-static ERTS_INLINE Uint64
-last_pid_data_cmpxchg_relb(Uint64 new, Uint64 exp)
-{
- return (Uint64) erts_smp_atomic_cmpxchg_relb(&last.pid_data,
- (erts_aint_t) new,
- (erts_aint_t) exp);
-}
-
-#else
-# error "Not 64-bit, nor 32-bit architecture..."
-#endif
-
-static ERTS_INLINE int
-last_pid_data_cmp(Uint64 lpd1, Uint64 lpd2)
-{
- Uint64 lpd1_wrap;
-
- if (lpd1 == lpd2)
- return 0;
-
- lpd1_wrap = lpd1 + (((Uint64) 1) << 63);
-
- if (lpd1 < lpd1_wrap)
- return (lpd1 < lpd2 && lpd2 < lpd1_wrap) ? -1 : 1;
- else
- return (lpd1_wrap <= lpd2 && lpd2 < lpd1) ? 1 : -1;
-}
-
-
-#define ERTS_PID_DATA_MASK__ ((1 << _PID_DATA_SIZE) - 1)
-
int erts_sched_compact_load;
Uint erts_no_schedulers;
-ErtsProcTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
static int wakeup_other_limit;
@@ -364,8 +227,6 @@ erts_sched_stat_t erts_sched_stat;
static erts_tsd_key_t sched_data_key;
#endif
-erts_smp_rwmtx_t erts_proc_tab_rwmtx;
-
static erts_smp_atomic32_t function_calls;
#ifdef ERTS_SMP
@@ -407,29 +268,6 @@ Process** erts_active_procs;
#if ERTS_MAX_PROCESSES > 0x7fffffff
#error "Need to store process_count in another type"
#endif
-static erts_smp_atomic32_t process_count;
-
-typedef struct ErtsTermProcElement_ ErtsTermProcElement;
-struct ErtsTermProcElement_ {
- ErtsTermProcElement *next;
- ErtsTermProcElement *prev;
- int ix;
- union {
- struct {
- Eterm pid;
- Uint64 spawned;
- Uint64 exited;
- } process;
- struct {
- Uint64 interval;
- } bif_invocation;
- } u;
-};
-
-static struct {
- ErtsTermProcElement *start;
- ErtsTermProcElement *end;
-} saved_term_procs;
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list,
ErtsMiscOpList,
@@ -491,8 +329,6 @@ do { \
* Local functions.
*/
-static void init_processes_bif(void);
-static void save_terminating_process(Process *p);
static void exec_misc_ops(ErtsRunQueue *);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
@@ -559,39 +395,17 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
}
#endif
-static erts_interval_t *proc_interval;
-
-static void
-proc_interval_init(void)
-{
- proc_interval = erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_PROC_INTERVAL,
- sizeof(erts_interval_t));
- erts_smp_interval_init(proc_interval);
-}
-
-static ERTS_INLINE Uint64
-get_proc_interval(void)
-{
- return erts_smp_current_interval_nob(proc_interval);
-}
static ERTS_INLINE Uint64
ensure_later_proc_interval(Uint64 interval)
{
- return erts_smp_ensure_later_interval_nob(proc_interval, interval);
-}
-
-static ERTS_INLINE Uint64
-step_proc_interval(void)
-{
- return erts_smp_step_interval_nob(proc_interval);
+ return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval);
}
Uint64
erts_get_proc_interval(void)
{
- return get_proc_interval();
+ return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc));
}
Uint64
@@ -603,7 +417,7 @@ erts_ensure_later_proc_interval(Uint64 interval)
Uint64
erts_step_proc_interval(void)
{
- return step_proc_interval();
+ return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc));
}
void
@@ -651,20 +465,18 @@ erts_pre_init_process(void)
#endif
}
+#ifdef ERTS_SMP
+static void
+release_process(void *vproc)
+{
+ erts_smp_proc_dec_refc((Process *) vproc);
+}
+#endif
+
/* initialize the scheduler */
void
-erts_init_process(int ncpu)
+erts_init_process(int ncpu, int proc_tab_size)
{
- int proc_tab_sz;
- int max_proc_bits;
- int proc_bits = ERTS_PROC_BITS;
- erts_smp_atomic_t *proc_entry;
- char *proc_tab_end;
- erts_smp_rwmtx_opt_t proc_tab_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- proc_tab_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
- proc_tab_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
-
- proc_interval_init();
#ifdef ERTS_SMP
erts_disable_proc_not_running_opt = 0;
@@ -673,23 +485,17 @@ erts_init_process(int ncpu)
init_proclist_alloc();
- erts_smp_atomic32_init_nob(&process_count, 0);
-
- if (erts_use_r9_pids_ports)
- proc_bits = ERTS_R9_PROC_BITS;
-
- if (erts_proc.max > (1 << proc_bits))
- erts_proc.max = 1 << proc_bits;
+ erts_ptab_init_table(&erts_proc,
+ ERTS_ALC_T_PROC_TABLE,
+#ifdef ERTS_SMP
+ release_process,
+#else
+ NULL,
+#endif
+ (ErtsPTabElementCommon *) &erts_invalid_process.common,
+ proc_tab_size,
+ "process_table");
- proc_tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(erts_proc.max
- * sizeof(erts_smp_atomic_t));
- erts_proc.tab = erts_alloc(ERTS_ALC_T_PROC_TABLE, proc_tab_sz);
- proc_tab_end = ((char *) erts_proc.tab) + proc_tab_sz;
- proc_entry = erts_proc.tab;
- while (proc_tab_end > ((char *) proc_entry)) {
- erts_smp_atomic_init_nob(proc_entry, ERTS_AINT_NULL);
- proc_entry++;
- }
#ifdef HYBRID
erts_active_procs = (Process**)
erts_alloc(ERTS_ALC_T_ACTIVE_PROCS,
@@ -697,35 +503,6 @@ erts_init_process(int ncpu)
erts_num_active_procs = 0;
#endif
- erts_smp_rwmtx_init_opt(&erts_proc_tab_rwmtx,
- &proc_tab_rwmtx_opts,
- "proc_tab");
- last_pid_data_init_nob(~((Uint64) 0));
-
- max_proc_bits = erts_fit_in_bits_int32((Sint32) erts_proc.max - 1);
-
- erts_proc.tab_cache_lines = proc_tab_sz/ERTS_CACHE_LINE_SIZE;
- erts_proc.pix_per_cache_line = ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t);
- if ((erts_proc.max & (erts_proc.max - 1))
- | (erts_proc.pix_per_cache_line & (erts_proc.pix_per_cache_line - 1))) {
- /*
- * erts_proc.max or erts_proc.pix_per_cache_line
- * not a power of 2 :(
- */
- erts_proc.pix_cl_mask = 0;
- erts_proc.pix_cl_shift = 0;
- erts_proc.pix_cli_mask = 0;
- erts_proc.pix_cli_shift = 0;
- }
- else {
- ASSERT((erts_proc.tab_cache_lines
- & (erts_proc.tab_cache_lines - 1)) == 0);
- erts_proc.pix_cl_mask = erts_proc.tab_cache_lines-1;
- erts_proc.pix_cl_shift = erts_fit_in_bits_int32(erts_proc.pix_per_cache_line-1);
- erts_proc.pix_cli_shift = erts_fit_in_bits_int32(erts_proc.pix_cl_mask);
- erts_proc.pix_cli_mask = (1 << (max_proc_bits - erts_proc.pix_cli_shift)) - 1;
- }
-
last_reductions = 0;
last_exact_reductions = 0;
erts_default_process_flags = 0;
@@ -735,7 +512,6 @@ void
erts_late_init_process(void)
{
int ix;
- init_processes_bif();
erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
@@ -983,9 +759,9 @@ static ERTS_INLINE ErtsProcList *
proclist_create(Process *p)
{
ErtsProcList *plp = proclist_alloc();
- ensure_later_proc_interval(p->started_interval);
+ ensure_later_proc_interval(p->common.u.alive.started_interval);
plp->pid = p->id;
- plp->started_interval = p->started_interval;
+ plp->started_interval = p->common.u.alive.started_interval;
return plp;
}
@@ -998,7 +774,9 @@ proclist_destroy(ErtsProcList *plp)
static ERTS_INLINE int
proclist_same(ErtsProcList *plp, Process *p)
{
- return plp->pid == p->id && plp->started_interval == p->started_interval;
+ return (plp->pid == p->id
+ && (plp->started_interval
+ == p->common.u.alive.started_interval));
}
ErtsProcList *
@@ -6500,9 +6278,8 @@ Process *schedule(Process *p, int calls)
state = erts_smp_atomic32_read_acqb(&p->state);
if (IS_TRACED(p)) {
- if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- }
if (state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
trace_sched(p, ((state & ERTS_PSFLG_FREE)
@@ -6515,7 +6292,7 @@ Process *schedule(Process *p, int calls)
else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(p, am_out);
}
- }
+ }
#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
@@ -6898,7 +6675,7 @@ Process *schedule(Process *p, int calls)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#ifdef ERTS_SMP
- if (is_not_nil(p->tracer_proc))
+ if (is_not_nil(ERTS_TRACER_PROC(p)))
erts_check_my_tracer_proc(p);
#endif
@@ -7102,70 +6879,6 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-/*
- * erts_test_next_pid() is only used for testing.
- */
-Sint
-erts_test_next_pid(int set, Uint next)
-{
- Uint64 lpd;
- Sint res;
- Eterm pid_data;
- int first_pix = -1;
-
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
-
- if (!set)
- lpd = last_pid_data_read_nob();
- else {
-
- lpd = (Uint64) next;
- pid_data = (Eterm) (lpd & ERTS_PID_DATA_MASK__);
- if (ERTS_INVALID_PID == make_internal_pid(pid_data)) {
- lpd += erts_proc.max;
- ASSERT(erts_pid_data2ix(pid_data)
- == erts_pid_data2ix(lpd & ERTS_PID_DATA_MASK__));
- }
- last_pid_data_set_relb(lpd);
- }
-
- while (1) {
- int pix;
- lpd++;
- pix = (int) (lpd % erts_proc.max);
- if (first_pix < 0)
- first_pix = pix;
- else if (pix == first_pix) {
- res = -1;
- break;
- }
- if (ERTS_AINT_NULL == erts_smp_atomic_read_nob(&erts_proc.tab[pix])) {
- pid_data = (Eterm) (lpd & ERTS_PID_DATA_MASK__);
- if (ERTS_INVALID_PID == make_internal_pid(pid_data)) {
- lpd += erts_proc.max;
- ASSERT(erts_pid_data2ix(pid_data)
- == erts_pid_data2ix(lpd & ERTS_PID_DATA_MASK__));
- }
- res = lpd & ERTS_PID_DATA_MASK__;
- break;
- }
- }
-
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
-
- return res;
-
-}
-
-Uint erts_process_count(void)
-{
- erts_aint32_t res = erts_smp_atomic32_read_nob(&process_count);
- if (res > erts_proc.max)
- return erts_proc.max;
- ASSERT(res >= 0);
- return (Uint) res;
-}
-
void
erts_free_proc(Process *p)
{
@@ -7175,132 +6888,69 @@ erts_free_proc(Process *p)
erts_free(ERTS_ALC_T_PROC, (void *) p);
}
+typedef struct {
+ Process *proc;
+ erts_aint32_t state;
+ ErtsRunQueue *run_queue;
+} ErtsEarlyProcInit;
+
+static void early_init_process_struct(void *varg, Eterm data)
+{
+ ErtsEarlyProcInit *arg = (ErtsEarlyProcInit *) varg;
+ Process *proc = arg->proc;
+
+ proc->common.id = make_internal_pid(data);
+ proc->id = proc->common.id; /* to be removed */
+ erts_smp_atomic32_init_relb(&proc->state, arg->state);
+
+#ifdef ERTS_SMP
+ RUNQ_SET_RQ(&proc->run_queue, arg->run_queue);
+
+ erts_proc_lock_init(proc); /* All locks locked */
+#endif
+
+}
+
/*
** Allocate process and find out where to place next process.
*/
static Process*
alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
{
- int pix;
- Process* p;
- Uint64 lpd, exp_lpd;
- Eterm pid_data;
- erts_aint32_t proc_count;
-#ifdef DEBUG
- Eterm pid;
-#endif
-
- erts_smp_rwmtx_rlock(&erts_proc_tab_rwmtx);
-
- proc_count = erts_smp_atomic32_inc_read_acqb(&process_count);
- if (proc_count > erts_proc.max) {
- while (1) {
- erts_aint32_t act_proc_count;
-
- act_proc_count = erts_smp_atomic32_cmpxchg_relb(&process_count,
- proc_count-1,
- proc_count);
- if (act_proc_count == proc_count)
- goto system_limit;
- proc_count = act_proc_count;
- if (proc_count <= erts_proc.max)
- break;
- }
- }
+ ErtsEarlyProcInit init_arg;
+ Process *p;
- p = (Process*) erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process));
+ p = erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process));
if (!p)
- goto enomem;
-
- p->approx_started = erts_get_approx_time();
- p->started_interval = get_proc_interval();
-
- lpd = last_pid_data_read_acqb();
-
- /* Reserve slot */
- while (1) {
- lpd++;
- pix = erts_pid_data2ix((Eterm) (lpd & ERTS_PID_DATA_MASK__));
- if (erts_smp_atomic_read_nob(&erts_proc.tab[pix]) == ERTS_AINT_NULL) {
- erts_aint_t val;
- val = erts_smp_atomic_cmpxchg_relb(&erts_proc.tab[pix],
- ((erts_aint_t)
- ERTS_PROC_LOCK_BUSY),
- ERTS_AINT_NULL);
-
- if (ERTS_AINT_NULL == val)
- break;
- }
- }
-
- pid_data = (Eterm) lpd & ERTS_PID_DATA_MASK__;
+ return NULL;
- p->id = make_internal_pid(pid_data);
- if (p->id == ERTS_INVALID_PID) {
- /* Do not use the invalid pid; change serial */
- lpd += erts_proc.max;
- ASSERT(pix == erts_pid_data2ix((Eterm) (lpd & ERTS_PID_DATA_MASK__)));
- pid_data = (Eterm) lpd & ERTS_PID_DATA_MASK__;
- p->id = make_internal_pid(pid_data);
- ASSERT(p->id != ERTS_INVALID_PID);
- }
+ init_arg.proc = (Process *) p;
+ init_arg.run_queue = rq;
+ init_arg.state = state;
- exp_lpd = last_pid_data_read_nob();
+ ASSERT(((char *) p) == ((char *) &p->common));
- /* Move last pid data forward */
- while (1) {
- Uint64 act_lpd;
- if (last_pid_data_cmp(lpd, exp_lpd) < 0)
- break;
- act_lpd = last_pid_data_cmpxchg_relb(lpd, exp_lpd);
- if (act_lpd == exp_lpd)
- break;
- exp_lpd = act_lpd;
+ if (!erts_ptab_new_element(&erts_proc,
+ &p->common,
+ (void *) &init_arg,
+ early_init_process_struct)) {
+ erts_free(ERTS_ALC_T_PROC, p);
+ return NULL;
}
-#ifdef ERTS_SMP
- RUNQ_SET_RQ(&p->run_queue, rq);
-#endif
-
- erts_smp_atomic32_init_relb(&p->state, state);
-
-#ifdef DEBUG
- pid = p->id;
-#endif
-
-#ifdef ERTS_SMP
- erts_proc_lock_init(p); /* All locks locked */
-#endif
-
- /* Move into slot reserved */
-#ifdef DEBUG
- ASSERT(ERTS_PROC_LOCK_BUSY
- == (Process *) erts_smp_atomic_xchg_relb(&erts_proc.tab[pix],
- (erts_aint_t) p));
-#else
- erts_smp_atomic_set_relb(&erts_proc.tab[pix], (erts_aint_t) p);
-#endif
-
- ASSERT(internal_pid_serial(p->id) <= (erts_use_r9_pids_ports
- ? ERTS_MAX_PID_R9_SERIAL
- : ERTS_MAX_PID_SERIAL));
-
- erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx);
-
+ ASSERT(internal_pid_serial(p->common.id) <= (erts_use_r9_pids_ports
+ ? ERTS_MAX_PID_R9_SERIAL
+ : ERTS_MAX_PID_SERIAL));
+
+ p->approx_started = erts_get_approx_time();
p->rcount = 0;
- ASSERT(p == (Process *)
- erts_smp_atomic_read_nob(
- &erts_proc.tab[internal_pid_index(pid)]));
-
- return p;
-
-enomem:
-system_limit:
- erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx);
- return NULL;
+ ASSERT(p == (Process *) (erts_ptab_pix2intptr_nob(
+ &erts_proc,
+ internal_pid_index(p->common.id))));
+ return p;
}
Eterm
@@ -7330,7 +6980,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
/*
* Copy the arguments to the global heap
* Since global GC might occur we want to do this before adding the
- * new process to the erts_proc.tab.
+ * new process to the erts_proc.r.o.tab.
*/
BM_SWAP_TIMER(system,copy);
LAZY_COPY(parent,args);
@@ -7474,12 +7124,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->reds = 0;
#ifdef ERTS_SMP
- p->u.alive.ptimer = NULL;
+ p->common.u.alive.ptimer = NULL;
#else
- sys_memset(&p->u.alive.tm, 0, sizeof(ErlTimer));
+ sys_memset(&p->common.u.alive.tm, 0, sizeof(ErlTimer));
#endif
- p->reg = NULL;
+ p->common.u.alive.reg = NULL;
p->nlinks = NULL;
p->monitors = NULL;
p->nodes_monitors = NULL;
@@ -7497,18 +7147,18 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
: STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
- erts_get_default_tracing(&p->trace_flags, &p->tracer_proc);
+ erts_get_default_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER_PROC(p));
p->msg.first = NULL;
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
p->msg.len = 0;
#ifdef ERTS_SMP
- p->u.alive.msg_inq.first = NULL;
- p->u.alive.msg_inq.last = &p->u.alive.msg_inq.first;
- p->u.alive.msg_inq.len = 0;
+ p->msg_inq.first = NULL;
+ p->msg_inq.last = &p->msg_inq.first;
+ p->msg_inq.len = 0;
#endif
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
p->mbuf = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
@@ -7535,18 +7185,19 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
if (IS_TRACED(parent)) {
- if (parent->trace_flags & F_TRACE_SOS) {
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc;
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
}
if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
trace_proc_spawn(parent, p->id, mod, func, args);
}
- if (parent->trace_flags & F_TRACE_SOS1) { /* Overrides TRACE_CHILDREN */
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc;
- p->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
- parent->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
+ /* Overrides TRACE_CHILDREN */
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
}
}
@@ -7573,13 +7224,13 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
if (IS_TRACED(parent)) {
- if (parent->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) {
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc; /* maybe steal */
+ if (ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); /*maybe steal*/
- if (parent->trace_flags & F_TRACE_SOL1) { /* maybe override */
- p ->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- parent->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
}
}
}
@@ -7610,8 +7261,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->scheduler_data = NULL;
p->suspendee = NIL;
p->pending_suspenders = NULL;
- p->u.alive.pending_exit.reason = THE_NON_VALUE;
- p->u.alive.pending_exit.bp = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
#endif
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
@@ -7665,8 +7316,8 @@ void erts_init_empty_process(Process *p)
p->rcount = 0;
p->id = ERTS_INVALID_PID;
p->reds = 0;
- p->tracer_proc = NIL;
- p->trace_flags = F_INITIAL_TRACE_FLAGS;
+ ERTS_TRACER_PROC(p) = NIL;
+ ERTS_TRACE_FLAGS(p) = F_INITIAL_TRACE_FLAGS;
p->group_leader = ERTS_INVALID_PID;
p->flags = 0;
p->fvalue = NIL;
@@ -7679,14 +7330,14 @@ void erts_init_empty_process(Process *p)
p->bin_old_vheap = 0;
p->bin_vheap_mature = 0;
#ifdef ERTS_SMP
- p->u.alive.ptimer = NULL;
+ p->common.u.alive.ptimer = NULL;
#else
- memset(&(p->u.alive.tm), 0, sizeof(ErlTimer));
+ memset(&(p->common.u.alive.tm), 0, sizeof(ErlTimer));
#endif
p->next = NULL;
p->off_heap.first = NULL;
p->off_heap.overhead = 0;
- p->reg = NULL;
+ p->common.u.alive.reg = NULL;
p->heap_sz = 0;
p->high_water = NULL;
#ifdef INCREMENTAL
@@ -7706,7 +7357,7 @@ void erts_init_empty_process(Process *p)
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
p->msg.len = 0;
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
@@ -7734,7 +7385,7 @@ void erts_init_empty_process(Process *p)
p->parent = NIL;
p->approx_started = 0;
- p->started_interval = 0;
+ p->common.u.alive.started_interval = 0;
#ifdef HIPE
hipe_init_process(&p->hipe);
@@ -7760,13 +7411,13 @@ void erts_init_empty_process(Process *p)
#ifdef ERTS_SMP
p->scheduler_data = NULL;
- p->u.alive.msg_inq.first = NULL;
- p->u.alive.msg_inq.last = &p->u.alive.msg_inq.first;
- p->u.alive.msg_inq.len = 0;
+ p->msg_inq.first = NULL;
+ p->msg_inq.last = &p->msg_inq.first;
+ p->msg_inq.len = 0;
p->suspendee = NIL;
p->pending_suspenders = NULL;
- p->u.alive.pending_exit.reason = THE_NON_VALUE;
- p->u.alive.pending_exit.bp = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ p->pending_exit.bp = NULL;
erts_proc_lock_init(p);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
@@ -7789,11 +7440,11 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->hend == NULL);
ASSERT(p->heap == NULL);
ASSERT(p->id == ERTS_INVALID_PID);
- ASSERT(p->tracer_proc == NIL);
- ASSERT(p->trace_flags == F_INITIAL_TRACE_FLAGS);
+ ASSERT(ERTS_TRACER_PROC(p) == NIL);
+ ASSERT(ERTS_TRACE_FLAGS(p) == F_INITIAL_TRACE_FLAGS);
ASSERT(p->group_leader == ERTS_INVALID_PID);
ASSERT(p->next == NULL);
- ASSERT(p->reg == NULL);
+ ASSERT(p->common.u.alive.reg == NULL);
ASSERT(p->heap_sz == 0);
ASSERT(p->high_water == NULL);
#ifdef INCREMENTAL
@@ -7809,7 +7460,7 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->suspend_monitors == NULL);
ASSERT(p->msg.first == NULL);
ASSERT(p->msg.len == 0);
- ASSERT(p->bif_timers == NULL);
+ ASSERT(p->u.bif_timers == NULL);
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -7819,12 +7470,12 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->parent == NIL);
#ifdef ERTS_SMP
- ASSERT(p->u.alive.msg_inq.first == NULL);
- ASSERT(p->u.alive.msg_inq.len == 0);
+ ASSERT(p->msg_inq.first == NULL);
+ ASSERT(p->msg_inq.len == 0);
ASSERT(p->suspendee == NIL);
ASSERT(p->pending_suspenders == NULL);
- ASSERT(p->u.alive.pending_exit.reason == THE_NON_VALUE);
- ASSERT(p->u.alive.pending_exit.bp == NULL);
+ ASSERT(p->pending_exit.reason == THE_NON_VALUE);
+ ASSERT(p->pending_exit.bp == NULL);
#endif
/* Thing that erts_cleanup_empty_process() cleans up */
@@ -8021,7 +7672,7 @@ void
erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
{
ErtsProcLocks xlocks;
- ASSERT(is_value(c_p->u.alive.pending_exit.reason));
+ ASSERT(is_value(c_p->pending_exit.reason));
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks);
ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN);
ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)
@@ -8040,10 +7691,10 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
set_proc_exiting(c_p,
erts_smp_atomic32_read_acqb(&c_p->state),
- c_p->u.alive.pending_exit.reason,
- c_p->u.alive.pending_exit.bp);
- c_p->u.alive.pending_exit.reason = THE_NON_VALUE;
- c_p->u.alive.pending_exit.bp = NULL;
+ c_p->pending_exit.reason,
+ c_p->pending_exit.bp);
+ c_p->pending_exit.reason = THE_NON_VALUE;
+ c_p->pending_exit.bp = NULL;
if (xlocks)
erts_smp_proc_unlock(c_p, xlocks);
@@ -8264,7 +7915,7 @@ send_exit_signal(Process *c_p, /* current process if and only
else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
#ifdef ERTS_SMP
if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) {
- ASSERT(!rp->u.alive.pending_exit.bp);
+ ASSERT(!rp->pending_exit.bp);
if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) {
/* Ensure that all locks on c_p are locked before
@@ -8314,7 +7965,7 @@ send_exit_signal(Process *c_p, /* current process if and only
set_pending_exit:
if (is_immed(rsn)) {
- rp->u.alive.pending_exit.reason = rsn;
+ rp->pending_exit.reason = rsn;
}
else {
Eterm *hp;
@@ -8322,11 +7973,11 @@ send_exit_signal(Process *c_p, /* current process if and only
ErlHeapFragment *bp = new_message_buffer(sz);
hp = &bp->mem[0];
- rp->u.alive.pending_exit.reason = copy_struct(rsn,
- sz,
- &hp,
- &bp->off_heap);
- rp->u.alive.pending_exit.bp = bp;
+ rp->pending_exit.reason = copy_struct(rsn,
+ sz,
+ &hp,
+ &bp->off_heap);
+ rp->pending_exit.bp = bp;
}
erts_smp_atomic32_read_bor_relb(&rp->state,
ERTS_PSFLG_PENDING_EXIT);
@@ -8653,15 +8304,6 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
erts_destroy_suspend_monitor(smon);
}
-#ifdef ERTS_SMP
-static void
-proc_dec_refc(void *vproc)
-{
- erts_smp_proc_dec_refc((Process *) vproc);
-}
-#endif
-
-
/* this function fishishes a process and propagates exit messages - called
by process_main when a process dies */
void
@@ -8699,10 +8341,10 @@ erts_do_exit_process(Process* p, Eterm reason)
state = set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state));
if (state & ERTS_PSFLG_PENDING_EXIT) {
/* Process exited before pending exit was received... */
- p->u.alive.pending_exit.reason = THE_NON_VALUE;
- if (p->u.alive.pending_exit.bp) {
- free_message_buffer(p->u.alive.pending_exit.bp);
- p->u.alive.pending_exit.bp = NULL;
+ p->pending_exit.reason = THE_NON_VALUE;
+ if (p->pending_exit.bp) {
+ free_message_buffer(p->pending_exit.bp);
+ p->pending_exit.bp = NULL;
}
}
@@ -8721,21 +8363,23 @@ erts_do_exit_process(Process* p, Eterm reason)
erts_trace_check_exiting(p->id);
- ASSERT((p->trace_flags & F_INITIAL_TRACE_FLAGS) == F_INITIAL_TRACE_FLAGS);
+ ASSERT((ERTS_TRACE_FLAGS(p) & F_INITIAL_TRACE_FLAGS)
+ == F_INITIAL_TRACE_FLAGS);
cancel_timer(p); /* Always cancel timer just in case */
- /*
- * The timer of this process can *not* be used anymore. The field used
- * for the timer is now used for misc exiting data.
- */
- p->u.exit_data = NULL;
-
- if (p->bif_timers)
+ if (p->u.bif_timers)
erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ /*
+ * The p->u.bif_timers of this process can *not* be used anymore;
+ * will be overwritten by misc termination data.
+ */
+ p->u.terminate = NULL;
+
+
erts_continue_exit_process(p);
}
@@ -8807,9 +8451,9 @@ erts_continue_exit_process(Process *p)
* The registered name *should* be the last "erlang resource" to
* cleanup.
*/
- if (p->reg) {
+ if (p->common.u.alive.reg) {
(void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE);
- ASSERT(!p->reg);
+ ASSERT(!p->common.u.alive.reg);
}
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -8824,21 +8468,10 @@ erts_continue_exit_process(Process *p)
#endif
{
- int maybe_save;
- int pix;
/* Do *not* use erts_get_runq_proc() */
ErtsRunQueue *rq;
rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p));
- pix = internal_pid_index(p->id);
-
- erts_smp_rwmtx_rlock(&erts_proc_tab_rwmtx);
- maybe_save = saved_term_procs.end != NULL;
- if (maybe_save) {
- erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx);
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
- }
-
erts_smp_runq_lock(rq);
#ifdef ERTS_SMP
@@ -8849,22 +8482,11 @@ erts_continue_exit_process(Process *p)
p->scheduler_data->current_process = NULL;
p->scheduler_data->free_process = p;
#endif
- /* Time of death! */
- erts_smp_atomic_set_relb(&erts_proc.tab[pix], ERTS_AINT_NULL);
- ASSERT(erts_smp_atomic32_read_nob(&process_count) > 0);
- erts_smp_atomic32_dec_relb(&process_count);
+ /* Time of death! */
+ erts_ptab_delete_element(&erts_proc, &p->common);
erts_smp_runq_unlock(rq);
-
- if (!maybe_save)
- erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx);
- else {
- if (saved_term_procs.end)
- save_terminating_process(p);
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
- }
-
}
/*
@@ -8949,9 +8571,6 @@ erts_continue_exit_process(Process *p)
delete_process(p);
#ifdef ERTS_SMP
- erts_schedule_thr_prgr_later_op(proc_dec_refc,
- (void *) p,
- &p->u.release_data);
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
#endif
@@ -9003,9 +8622,9 @@ cancel_timer(Process* p)
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
p->flags &= ~(F_INSLPQUEUE|F_TIMO);
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(p->u.alive.ptimer);
+ erts_cancel_smp_ptimer(p->common.u.alive.ptimer);
#else
- erts_cancel_timer(&p->u.alive.tm);
+ erts_cancel_timer(&p->common.u.alive.tm);
#endif
}
@@ -9026,12 +8645,12 @@ set_timer(Process* p, Uint timeout)
p->flags &= ~F_TIMO;
#ifdef ERTS_SMP
- erts_create_smp_ptimer(&p->u.alive.ptimer,
+ erts_create_smp_ptimer(&p->common.u.alive.ptimer,
p->id,
(ErlTimeoutProc) timeout_proc,
timeout);
#else
- erts_set_timer(&p->u.alive.tm,
+ erts_set_timer(&p->common.u.alive.tm,
(ErlTimeoutProc) timeout_proc,
NULL,
(void*) p,
@@ -9049,7 +8668,7 @@ erts_stack_dump(int to, void *to_arg, Process *p)
Eterm* sp;
int yreg = -1;
- if (p->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
return;
}
erts_program_counter_info(to, to_arg, p);
@@ -9137,1072 +8756,13 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
return yreg;
}
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * The processes/0 BIF implementation. *
-\* */
-
-
-#define ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED 25
-#define ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE 1000
-#define ERTS_PROCESSES_BIF_MIN_START_REDS \
- (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \
- / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED)
-
-#define ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS 1
-
-#define ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED 10
-
-#define ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS \
- (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \
- / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED)
-
-
-#define ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED 75
-
-#define ERTS_PROCS_DBG_DO_TRACE 0
-
-#ifdef DEBUG
-# define ERTS_PROCESSES_BIF_DEBUGLEVEL 100
-#else
-# define ERTS_PROCESSES_BIF_DEBUGLEVEL 0
-#endif
-
-#define ERTS_PROCS_DBGLVL_CHK_HALLOC 1
-#define ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS 5
-#define ERTS_PROCS_DBGLVL_CHK_PIDS 10
-#define ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST 20
-#define ERTS_PROCS_DBGLVL_CHK_RESLIST 20
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0
-# define ERTS_PROCS_ASSERT(EXP)
-#else
-# define ERTS_PROCS_ASSERT(EXP) \
- ((void) ((EXP) \
- ? 1 \
- : (debug_processes_assert_error(#EXP, __FILE__, __LINE__), 0)))
-#endif
-
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC
-# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ) \
-do { \
- ERTS_PROCS_ASSERT(!(PBDP)->debug.heap); \
- ERTS_PROCS_ASSERT(!(PBDP)->debug.heap_size); \
- (PBDP)->debug.heap = (HP); \
- (PBDP)->debug.heap_size = (SZ); \
-} while (0)
-# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP) \
-do { \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap); \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap_size); \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap + (PBDP)->debug.heap_size == (HP));\
- (PBDP)->debug.heap = NULL; \
- (PBDP)->debug.heap_size = 0; \
-} while (0)
-# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP) \
-do { \
- (PBDP)->debug.heap = NULL; \
- (PBDP)->debug.heap_size = 0; \
-} while (0)
-#else
-# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ)
-# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP)
-# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-# define ERTS_PROCS_DBG_CHK_RESLIST(R) debug_processes_check_res_list((R))
-#else
-# define ERTS_PROCS_DBG_CHK_RESLIST(R)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP) debug_processes_save_all_pids((PBDP))
-# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP) \
-do { \
- if (!(PBDP)->debug.correct_pids_verified) \
- debug_processes_verify_all_pids((PBDP)); \
-} while (0)
-# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP) \
-do { \
- if ((PBDP)->debug.correct_pids) { \
- erts_free(ERTS_ALC_T_PROCS_PIDS, \
- (PBDP)->debug.correct_pids); \
- (PBDP)->debug.correct_pids = NULL; \
- } \
-} while(0)
-# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP) \
-do { \
- (PBDP)->debug.correct_pids_verified = 0; \
- (PBDP)->debug.correct_pids = NULL; \
-} while (0)
-#else
-# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP)
-# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP)
-# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP)
-# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, IC) \
- debug_processes_check_found_pid((PBDP), (PID), (IC), 1)
-# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, IC) \
- debug_processes_check_found_pid((PBDP), (PID), (IC), 0)
-#else
-# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, IC)
-# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, IC)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-# define ERTS_PROCS_DBG_CHK_TPLIST() \
- debug_processes_check_term_proc_list()
-# define ERTS_PROCS_DBG_CHK_FREELIST(FL) \
- debug_processes_check_term_proc_free_list(FL)
-#else
-# define ERTS_PROCS_DBG_CHK_TPLIST()
-# define ERTS_PROCS_DBG_CHK_FREELIST(FL)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0
-#if ERTS_PROCS_DBG_DO_TRACE
-# define ERTS_PROCS_DBG_INIT(P, PBDP) (PBDP)->debug.caller = (P)->id
-# else
-# define ERTS_PROCS_DBG_INIT(P, PBDP)
-# endif
-# define ERTS_PROCS_DBG_CLEANUP(PBDP)
-#else
-# define ERTS_PROCS_DBG_INIT(P, PBDP) \
-do { \
- (PBDP)->debug.caller = (P)->id; \
- ERTS_PROCS_DBG_HEAP_ALLOC_INIT((PBDP)); \
- ERTS_PROCS_DBG_CHK_PIDS_INIT((PBDP)); \
-} while (0)
-# define ERTS_PROCS_DBG_CLEANUP(PBDP) \
-do { \
- ERTS_PROCS_DBG_CLEANUP_CHK_PIDS((PBDP)); \
-} while (0)
-#endif
-
-#if ERTS_PROCS_DBG_DO_TRACE
-# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT) \
- erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \
- (PID), __FILE__, __LINE__, #FUNC, #WHAT)
-#else
-# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT)
-#endif
-
-static Uint processes_bif_tab_chunks;
-static Export processes_trap_export;
-
-typedef struct {
- Uint64 interval;
-} ErtsProcessesBifChunkInfo;
-
-typedef enum {
- INITIALIZING,
- INSPECTING_TABLE,
- INSPECTING_TERMINATED_PROCESSES,
- BUILDING_RESULT,
- RETURN_RESULT
-} ErtsProcessesBifState;
-
-typedef struct {
- ErtsProcessesBifState state;
- Eterm caller;
- ErtsProcessesBifChunkInfo *chunk;
- int tix;
- int pid_ix;
- int pid_sz;
- Eterm *pid;
- ErtsTermProcElement *bif_invocation; /* Only used when > 1 chunk */
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0 || ERTS_PROCS_DBG_DO_TRACE
- struct {
- Eterm caller;
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- Uint64 *pid_started;
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC
- Eterm *heap;
- Uint heap_size;
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
- int correct_pids_verified;
- Eterm *correct_pids;
-#endif
- } debug;
-#endif
-
-} ErtsProcessesBifData;
-
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0
-static void debug_processes_assert_error(char* expr, char* file, int line);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-static void debug_processes_check_res_list(Eterm list);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-static void debug_processes_save_all_pids(ErtsProcessesBifData *pbdp);
-static void debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-static void debug_processes_check_found_pid(ErtsProcessesBifData *pbdp,
- Eterm pid,
- Uint64 ic,
- int pid_should_be_found);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-static void debug_processes_check_term_proc_list(void);
-static void debug_processes_check_term_proc_free_list(ErtsTermProcElement *tpep);
-#endif
-
-static void
-save_terminating_process(Process *p)
-{
- ErtsTermProcElement *tpep = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL,
- sizeof(ErtsTermProcElement));
- ERTS_PROCS_ASSERT(saved_term_procs.start && saved_term_procs.end);
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx));
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- tpep->prev = saved_term_procs.end;
- tpep->next = NULL;
- tpep->ix = internal_pid_index(p->id);
- tpep->u.process.pid = p->id;
- tpep->u.process.spawned = p->started_interval;
- tpep->u.process.exited = get_proc_interval();
-
- saved_term_procs.end->next = tpep;
- saved_term_procs.end = tpep;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- ERTS_PROCS_ASSERT(tpep->prev->ix >= 0
- ? (tpep->u.process.exited
- >= tpep->prev->u.process.exited)
- : (tpep->u.process.exited
- >= tpep->prev->u.bif_invocation.interval));
-}
-
-static void
-cleanup_processes_bif_data(Binary *bp)
-{
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(bp);
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller, cleanup_processes_bif_data, call);
-
- if (pbdp->state != INITIALIZING) {
-
- if (pbdp->chunk) {
- erts_free(ERTS_ALC_T_PROCS_CNKINF, pbdp->chunk);
- pbdp->chunk = NULL;
- }
- if (pbdp->pid) {
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->pid);
- pbdp->pid = NULL;
- }
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- if (pbdp->debug.pid_started) {
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.pid_started);
- pbdp->debug.pid_started = NULL;
- }
-#endif
-
- if (pbdp->bif_invocation) {
- ErtsTermProcElement *tpep;
-
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller,
- cleanup_processes_bif_data,
- term_proc_cleanup);
-
- tpep = pbdp->bif_invocation;
- pbdp->bif_invocation = NULL;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- if (tpep->prev) {
- /*
- * Only remove this bif invokation when we
- * have preceding invokations.
- */
- tpep->prev->next = tpep->next;
- if (tpep->next)
- tpep->next->prev = tpep->prev;
- else {
- /*
- * At the time of writing this branch cannot be
- * reached. I don't want to remove this code though
- * since it may be possible to reach this line
- * in the future if the cleanup order in
- * erts_do_exit_process() is changed. The ASSERT(0)
- * is only here to make us aware that the reorder
- * has happened. /rickard
- */
- ASSERT(0);
- saved_term_procs.end = tpep->prev;
- }
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep);
- }
- else {
- /*
- * Free all elements until next bif invokation
- * is found.
- */
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- do {
- ErtsTermProcElement *ftpep = tpep;
- tpep = tpep->next;
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, ftpep);
- } while (tpep && tpep->ix >= 0);
- saved_term_procs.start = tpep;
- if (tpep)
- tpep->prev = NULL;
- else
- saved_term_procs.end = NULL;
- }
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
-
- }
- }
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller,
- cleanup_processes_bif_data,
- return);
- ERTS_PROCS_DBG_CLEANUP(pbdp);
-}
-
-static int
-processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp)
-{
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp);
- int have_reds;
- int reds;
- int locked = 0;
-
- do {
- switch (pbdp->state) {
- case INITIALIZING:
- pbdp->chunk = erts_alloc(ERTS_ALC_T_PROCS_CNKINF,
- (sizeof(ErtsProcessesBifChunkInfo)
- * processes_bif_tab_chunks));
- pbdp->tix = 0;
- pbdp->pid_ix = 0;
-
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
- locked = 1;
-
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, init);
-
- pbdp->pid_sz = erts_process_count();
- pbdp->pid = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(Eterm)*pbdp->pid_sz);
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(Uint64)*pbdp->pid_sz);
-#endif
-
- ERTS_PROCS_DBG_SAVE_PIDS(pbdp);
-
- if (processes_bif_tab_chunks == 1)
- pbdp->bif_invocation = NULL;
- else {
- /*
- * We will have to access the table multiple times
- * releasing the table lock in between chunks.
- */
- pbdp->bif_invocation = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL,
- sizeof(ErtsTermProcElement));
- pbdp->bif_invocation->ix = -1;
- pbdp->bif_invocation->u.bif_invocation.interval
- = step_proc_interval();
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- pbdp->bif_invocation->next = NULL;
- if (saved_term_procs.end) {
- pbdp->bif_invocation->prev = saved_term_procs.end;
- saved_term_procs.end->next = pbdp->bif_invocation;
- ERTS_PROCS_ASSERT(saved_term_procs.start);
- }
- else {
- pbdp->bif_invocation->prev = NULL;
- saved_term_procs.start = pbdp->bif_invocation;
- }
- saved_term_procs.end = pbdp->bif_invocation;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- }
-
- pbdp->state = INSPECTING_TABLE;
- /* Fall through */
-
- case INSPECTING_TABLE: {
- int ix = pbdp->tix;
- int indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- int cix = ix / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- int end_ix = ix + indices;
- Uint64 *invocation_interval_p;
-
- invocation_interval_p
- = (pbdp->bif_invocation
- ? &pbdp->bif_invocation->u.bif_invocation.interval
- : NULL);
-
- ERTS_PROCS_ASSERT(is_nil(*res_accp));
- if (!locked) {
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
- locked = 1;
- }
-
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx));
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_table);
-
- if (cix != 0)
- pbdp->chunk[cix].interval = step_proc_interval();
- else if (pbdp->bif_invocation)
- pbdp->chunk[0].interval = *invocation_interval_p;
- /* else: interval is irrelevant */
-
- if (end_ix >= erts_proc.max) {
- ERTS_PROCS_ASSERT(cix+1 == processes_bif_tab_chunks);
- end_ix = erts_proc.max;
- indices = end_ix - ix;
- /* What to do when done with this chunk */
- pbdp->state = (processes_bif_tab_chunks == 1
- ? BUILDING_RESULT
- : INSPECTING_TERMINATED_PROCESSES);
- }
-
- for (; ix < end_ix; ix++) {
- Process *rp = erts_pix2proc(ix);
- if (rp
- && (!invocation_interval_p
- || rp->started_interval < *invocation_interval_p)) {
- ERTS_PROCS_ASSERT(is_internal_pid(rp->id));
- pbdp->pid[pbdp->pid_ix] = rp->id;
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started[pbdp->pid_ix] = rp->started_interval;
-#endif
-
- pbdp->pid_ix++;
- ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz);
- }
- }
-
- pbdp->tix = end_ix;
-
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
- locked = 0;
-
- reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED;
- BUMP_REDS(p, reds);
-
- have_reds = ERTS_BIF_REDS_LEFT(p);
-
- if (have_reds && pbdp->state == INSPECTING_TABLE) {
- ix = pbdp->tix;
- indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- end_ix = ix + indices;
- if (end_ix > erts_proc.max) {
- end_ix = erts_proc.max;
- indices = end_ix - ix;
- }
-
- reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED;
-
- /* Pretend we have no reds left if we haven't got enough
- reductions to complete next chunk */
- if (reds > have_reds)
- have_reds = 0;
- }
-
- break;
- }
-
- case INSPECTING_TERMINATED_PROCESSES: {
- int i;
- int max_reds;
- int free_term_procs = 0;
- Uint64 invocation_interval;
- ErtsTermProcElement *tpep;
- ErtsTermProcElement *free_list = NULL;
-
- tpep = pbdp->bif_invocation;
- ERTS_PROCS_ASSERT(tpep);
- invocation_interval = tpep->u.bif_invocation.interval;
-
- max_reds = have_reds = ERTS_BIF_REDS_LEFT(p);
- if (max_reds > ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS)
- max_reds = ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS;
-
- reds = 0;
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_term_procs);
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- if (tpep->prev)
- tpep->prev->next = tpep->next;
- else {
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- saved_term_procs.start = tpep->next;
-
- if (saved_term_procs.start && saved_term_procs.start->ix >= 0) {
- free_list = saved_term_procs.start;
- free_term_procs = 1;
- }
- }
-
- if (tpep->next)
- tpep->next->prev = tpep->prev;
- else
- saved_term_procs.end = tpep->prev;
-
- tpep = tpep->next;
-
- i = 0;
- while (reds < max_reds && tpep) {
- if (tpep->ix < 0) {
- if (free_term_procs) {
- ERTS_PROCS_ASSERT(free_list);
- ERTS_PROCS_ASSERT(tpep->prev);
-
- tpep->prev->next = NULL; /* end of free_list */
- saved_term_procs.start = tpep;
- tpep->prev = NULL;
- free_term_procs = 0;
- }
- }
- else {
- int cix = tpep->ix/ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- Uint64 chunk_interval = pbdp->chunk[cix].interval;
- Eterm pid = tpep->u.process.pid;
- ERTS_PROCS_ASSERT(is_internal_pid(pid));
-
- if (tpep->u.process.spawned < invocation_interval) {
- if (tpep->u.process.exited < chunk_interval) {
- ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp,
- pid,
- tpep->u.process.spawned);
- pbdp->pid[pbdp->pid_ix] = pid;
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started[pbdp->pid_ix]
- = tpep->u.process.spawned;
-#endif
- pbdp->pid_ix++;
- ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz);
- }
- else {
- ERTS_PROCS_DBG_CHK_PID_FOUND(pbdp,
- pid,
- tpep->u.process.spawned);
- }
- }
- else {
- ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp,
- pid,
- tpep->u.process.spawned);
- }
-
- i++;
- if (i == ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED) {
- reds++;
- i = 0;
- }
- if (free_term_procs)
- reds += ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS;
- }
- tpep = tpep->next;
- }
-
- if (free_term_procs) {
- ERTS_PROCS_ASSERT(free_list);
- saved_term_procs.start = tpep;
- if (!tpep)
- saved_term_procs.end = NULL;
- else {
- ERTS_PROCS_ASSERT(tpep->prev);
- tpep->prev->next = NULL; /* end of free_list */
- tpep->prev = NULL;
- }
- }
-
- if (!tpep) {
- /* Done */
- ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz);
- pbdp->state = BUILDING_RESULT;
- pbdp->bif_invocation->next = free_list;
- free_list = pbdp->bif_invocation;
- pbdp->bif_invocation = NULL;
- }
- else {
- /* Link in bif_invocation again where we left off */
- pbdp->bif_invocation->prev = tpep->prev;
- pbdp->bif_invocation->next = tpep;
- tpep->prev = pbdp->bif_invocation;
- if (pbdp->bif_invocation->prev)
- pbdp->bif_invocation->prev->next = pbdp->bif_invocation;
- else {
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- saved_term_procs.start = pbdp->bif_invocation;
- }
- }
-
- ERTS_PROCS_DBG_CHK_TPLIST();
- ERTS_PROCS_DBG_CHK_FREELIST(free_list);
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
-
- /*
- * We do the actual free of term proc structures now when we
- * have released the table lock instead of when we encountered
- * them. This since free() isn't for free and we don't want to
- * unnecessarily block other schedulers.
- */
- while (free_list) {
- tpep = free_list;
- free_list = tpep->next;
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep);
- }
-
- have_reds -= reds;
- if (have_reds < 0)
- have_reds = 0;
- BUMP_REDS(p, reds);
- break;
- }
-
- case BUILDING_RESULT: {
- int conses, ix, min_ix;
- Eterm *hp;
- Eterm res = *res_accp;
-
- ERTS_PROCS_DBG_VERIFY_PIDS(pbdp);
- ERTS_PROCS_DBG_CHK_RESLIST(res);
-
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, begin_build_res);
-
- have_reds = ERTS_BIF_REDS_LEFT(p);
- conses = ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds;
- min_ix = pbdp->pid_ix - conses;
- if (min_ix < 0) {
- min_ix = 0;
- conses = pbdp->pid_ix;
- }
-
- hp = HAlloc(p, conses*2);
- ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, conses*2);
-
- for (ix = pbdp->pid_ix - 1; ix >= min_ix; ix--) {
- ERTS_PROCS_ASSERT(is_internal_pid(pbdp->pid[ix]));
- res = CONS(hp, pbdp->pid[ix], res);
- hp += 2;
- }
-
- ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp);
-
- pbdp->pid_ix = min_ix;
- if (min_ix == 0)
- pbdp->state = RETURN_RESULT;
- else {
- pbdp->pid_sz = min_ix;
- pbdp->pid = erts_realloc(ERTS_ALC_T_PROCS_PIDS,
- pbdp->pid,
- sizeof(Eterm)*pbdp->pid_sz);
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started = erts_realloc(ERTS_ALC_T_PROCS_PIDS,
- pbdp->debug.pid_started,
- (sizeof(Uint64)
- * pbdp->pid_sz));
-#endif
- }
- reds = conses/ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED;
- BUMP_REDS(p, reds);
- have_reds -= reds;
-
- ERTS_PROCS_DBG_CHK_RESLIST(res);
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, end_build_res);
- *res_accp = res;
- break;
- }
- case RETURN_RESULT:
- cleanup_processes_bif_data(mbp);
- return 1;
-
- default:
- erl_exit(ERTS_ABORT_EXIT,
- "erlang:processes/0: Invalid state: %d\n",
- (int) pbdp->state);
- }
-
-
- } while (have_reds || pbdp->state == RETURN_RESULT);
-
- return 0;
-}
-
-/*
- * processes_trap/2 is a hidden BIF that processes/0 traps to.
- */
-
-static BIF_RETTYPE processes_trap(BIF_ALIST_2)
-{
- Eterm res_acc;
- Binary *mbp;
-
- /*
- * This bif cannot be called from erlang code. It can only be
- * trapped to from processes/0; therefore, a bad argument
- * is a processes/0 internal error.
- */
-
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, call);
- ERTS_PROCS_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1));
-
- res_acc = BIF_ARG_1;
-
- ERTS_PROCS_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
-
- ERTS_PROCS_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
- == cleanup_processes_bif_data);
- ERTS_PROCS_ASSERT(
- ((ErtsProcessesBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller
- == BIF_P->id);
-
- if (processes_bif_engine(BIF_P, &res_acc, mbp)) {
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, return);
- BIF_RET(res_acc);
- }
- else {
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, trap);
- ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, BIF_ARG_2);
- }
-}
-
-
-
/*
- * The actual processes/0 BIF.
+ * The processes/0 BIF.
*/
-
BIF_RETTYPE processes_0(BIF_ALIST_0)
{
- /*
- * A requirement: The list of pids returned should be a consistent
- * snapshot of all processes existing at some point
- * in time during the execution of processes/0. Since
- * processes might terminate while processes/0 is
- * executing, we have to keep track of terminated
- * processes and add them to the result. We also
- * ignore processes created after processes/0 has
- * begun executing.
- */
- Eterm res_acc = NIL;
- Binary *mbp = erts_create_magic_binary(sizeof(ErtsProcessesBifData),
- cleanup_processes_bif_data);
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp);
-
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, call);
- pbdp->state = INITIALIZING;
- ERTS_PROCS_DBG_INIT(BIF_P, pbdp);
-
- if (ERTS_BIF_REDS_LEFT(BIF_P) >= ERTS_PROCESSES_BIF_MIN_START_REDS
- && processes_bif_engine(BIF_P, &res_acc, mbp)) {
- erts_bin_free(mbp);
- ERTS_PROCS_DBG_CHK_RESLIST(res_acc);
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, return);
- BIF_RET(res_acc);
- }
- else {
- Eterm *hp;
- Eterm magic_bin;
- ERTS_PROCS_DBG_CHK_RESLIST(res_acc);
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mbp);
- ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp);
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, trap);
- ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, magic_bin);
- }
-}
-
-static void
-init_processes_bif(void)
-{
- saved_term_procs.start = NULL;
- saved_term_procs.end = NULL;
- processes_bif_tab_chunks = (((erts_proc.max - 1)
- / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE)
- + 1);
-
- /* processes_trap/2 is a hidden BIF that the processes/0 BIF traps to. */
- sys_memset((void *) &processes_trap_export, 0, sizeof(Export));
- processes_trap_export.address = &processes_trap_export.code[3];
- processes_trap_export.code[0] = am_erlang;
- processes_trap_export.code[1] = am_processes_trap;
- processes_trap_export.code[2] = 2;
- processes_trap_export.code[3] = (BeamInstr) em_apply_bif;
- processes_trap_export.code[4] = (BeamInstr) &processes_trap;
-
-}
-
-/*
- * Debug stuff
- */
-
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-int
-erts_dbg_check_halloc_lock(Process *p)
-{
- if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
- return 1;
- if (p->id == ERTS_INVALID_PID)
- return 1;
- if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
- return 1;
- if (erts_thr_progress_is_blocking())
- return 1;
- return 0;
-}
-#endif
-
-Eterm
-erts_debug_processes(Process *c_p)
-{
- /* This is the old processes/0 BIF. */
- int i;
- Uint need;
- Eterm res;
- Eterm* hp;
- Process *p;
- Eterm *hp_end;
-
- erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);
-
- res = NIL;
- need = erts_process_count() * 2;
- hp = HAlloc(c_p, need); /* we need two heap words for each pid */
- hp_end = hp + need;
-
- /* make the list by scanning bakward */
-
-
- for (i = erts_proc.max-1; i >= 0; i--) {
- p = erts_pix2proc(i);
- if (p) {
- res = CONS(hp, p->id, res);
- hp += 2;
- }
- }
-
- erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
-
- HRelease(c_p, hp_end, hp);
-
- return res;
-}
-
-Eterm
-erts_debug_processes_bif_info(Process *c_p)
-{
- ERTS_DECL_AM(processes_bif_info);
- Eterm elements[] = {
- AM_processes_bif_info,
- make_small((Uint) ERTS_PROCESSES_BIF_MIN_START_REDS),
- make_small((Uint) processes_bif_tab_chunks),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS),
- make_small((Uint) ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED),
- make_small((Uint) ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS),
- make_small((Uint) ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED),
- make_small((Uint) ERTS_PROCESSES_BIF_DEBUGLEVEL)
- };
- Uint sz = 0;
- Eterm *hp;
- (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements);
- hp = HAlloc(c_p, sz);
- return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements);
-}
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-static void
-debug_processes_check_found_pid(ErtsProcessesBifData *pbdp,
- Eterm pid,
- Uint64 ic,
- int pid_should_be_found)
-{
- int i;
- for (i = 0; i < pbdp->pid_ix; i++) {
- if (pbdp->pid[i] == pid && pbdp->debug.pid_started[i] == ic) {
- ERTS_PROCS_ASSERT(pid_should_be_found);
- return;
- }
- }
- ERTS_PROCS_ASSERT(!pid_should_be_found);
-}
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-static void
-debug_processes_check_res_list(Eterm list)
-{
- while (is_list(list)) {
- Eterm* consp = list_val(list);
- Eterm hd = CAR(consp);
- ERTS_PROCS_ASSERT(is_internal_pid(hd));
- list = CDR(consp);
- }
-
- ERTS_PROCS_ASSERT(is_nil(list));
-}
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-
-static void
-debug_processes_save_all_pids(ErtsProcessesBifData *pbdp)
-{
- int ix, tix, cpix;
- pbdp->debug.correct_pids_verified = 0;
- pbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(Eterm)*pbdp->pid_sz);
-
- for (tix = 0, cpix = 0; tix < erts_proc.max; tix++) {
- Process *rp = erts_pix2proc(tix);
- if (rp) {
- ERTS_PROCS_ASSERT(is_internal_pid(rp->id));
- pbdp->debug.correct_pids[cpix++] = rp->id;
- ERTS_PROCS_ASSERT(cpix <= pbdp->pid_sz);
- }
- }
- ERTS_PROCS_ASSERT(cpix == pbdp->pid_sz);
-
- for (ix = 0; ix < pbdp->pid_sz; ix++)
- pbdp->pid[ix] = make_small(ix);
-}
-
-static void
-debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp)
-{
- int ix, cpix;
-
- ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz);
-
- for (ix = 0; ix < pbdp->pid_sz; ix++) {
- int found = 0;
- Eterm pid = pbdp->pid[ix];
- ERTS_PROCS_ASSERT(is_internal_pid(pid));
- for (cpix = ix; cpix < pbdp->pid_sz; cpix++) {
- if (pbdp->debug.correct_pids[cpix] == pid) {
- pbdp->debug.correct_pids[cpix] = NIL;
- found = 1;
- break;
- }
- }
- if (!found) {
- for (cpix = 0; cpix < ix; cpix++) {
- if (pbdp->debug.correct_pids[cpix] == pid) {
- pbdp->debug.correct_pids[cpix] = NIL;
- found = 1;
- break;
- }
- }
- }
- ERTS_PROCS_ASSERT(found);
- }
- pbdp->debug.correct_pids_verified = 1;
-
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.correct_pids);
- pbdp->debug.correct_pids = NULL;
+ return erts_ptab_list(BIF_P, &erts_proc);
}
-#endif /* ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS */
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-static void
-debug_processes_check_term_proc_list(void)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx));
- if (!saved_term_procs.start)
- ERTS_PROCS_ASSERT(!saved_term_procs.end);
- else {
- Uint64 curr_interval = get_proc_interval();
- Uint64 *prev_x_interval_p = NULL;
- ErtsTermProcElement *tpep;
-
- for (tpep = saved_term_procs.start; tpep; tpep = tpep->next) {
- if (!tpep->prev)
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- else
- ERTS_PROCS_ASSERT(tpep->prev->next == tpep);
- if (!tpep->next)
- ERTS_PROCS_ASSERT(saved_term_procs.end == tpep);
- else
- ERTS_PROCS_ASSERT(tpep->next->prev == tpep);
- if (tpep->ix < 0) {
- Uint64 interval = tpep->u.bif_invocation.interval;
- ERTS_PROCS_ASSERT(interval <= curr_interval);
- }
- else {
- Uint64 s_interval = tpep->u.process.spawned;
- Uint64 x_interval = tpep->u.process.exited;
-
- ERTS_PROCS_ASSERT(s_interval <= x_interval);
- if (prev_x_interval_p)
- ERTS_PROCS_ASSERT(*prev_x_interval_p <= x_interval);
- prev_x_interval_p = &tpep->u.process.exited;
- ERTS_PROCS_ASSERT(is_internal_pid(tpep->u.process.pid));
- ERTS_PROCS_ASSERT(tpep->ix
- == internal_pid_index(tpep->u.process.pid));
- }
- }
-
- }
-}
-
-static void
-debug_processes_check_term_proc_free_list(ErtsTermProcElement *free_list)
-{
- if (saved_term_procs.start) {
- ErtsTermProcElement *ftpep;
- ErtsTermProcElement *tpep;
-
- for (ftpep = free_list; ftpep; ftpep = ftpep->next) {
- for (tpep = saved_term_procs.start; tpep; tpep = tpep->next)
- ERTS_PROCS_ASSERT(ftpep != tpep);
- }
- }
-}
-
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0
-
-static void
-debug_processes_assert_error(char* expr, char* file, int line)
-{
- fflush(stdout);
- erts_fprintf(stderr, "%s:%d: Assertion failed: %s\n", file, line, expr);
- fflush(stderr);
- abort();
-}
-
-#endif
-
-/* *\
- * End of the processes/0 BIF implementation. *
-\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* A nice system halt closing all open port goes as follows:
@@ -10230,3 +8790,19 @@ void erl_halt(int code)
notify_reap_ports_relb();
}
}
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int
+erts_dbg_check_halloc_lock(Process *p)
+{
+ if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
+ return 1;
+ if (p->id == ERTS_INVALID_PID)
+ return 1;
+ if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
+ return 1;
+ if (erts_thr_progress_is_blocking())
+ return 1;
+ return 0;
+}
+#endif
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index b3d44743c6..17d1ff0bd6 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -723,8 +723,9 @@ struct ErtsPendingSuspend_ {
# define BIN_OLD_VHEAP(p) (p)->bin_old_vheap
struct process {
- Eterm id; /* The pid of this process
- (need to be first in struct) */
+ ErtsPTabElementCommon common; /* *Need* to be first in struct */
+
+ Eterm id; /* Duplicate (to be removed) */
/* All fields in the PCB that differs between different heap
* architectures, have been moved to the end of this struct to
* make sure that as few offsets as possible differ. Different
@@ -771,9 +772,6 @@ struct process {
int prio; /* Priority of process */
int schedule_count; /* Times left to reschedule a low prio process */
Uint reds; /* No of reductions for this process */
- Eterm tracer_proc; /* If proc is traced, this is the tracer
- (can NOT be boxed) */
- Uint trace_flags; /* Trace flags (used to be in flags) */
Eterm group_leader; /* Pid in charge
(can be boxed) */
Uint flags; /* Trap exit, etc (no trace flags anymore) */
@@ -783,7 +781,6 @@ struct process {
Process *next; /* Pointer to next process in run queue */
- struct reg_proc *reg; /* NULL iff not registered */
ErtsLink *nlinks;
ErtsMonitor *monitors; /* The process monitors, both ends */
@@ -795,7 +792,10 @@ struct process {
ErlMessageQueue msg; /* Message queue */
- ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
+ union {
+ ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
+ void *terminate;
+ } u;
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -820,7 +820,6 @@ struct process {
*/
Eterm parent; /* Pid of process that created this process. */
erts_approx_time_t approx_started; /* Time when started. */
- Uint64 started_interval;
/* This is the place, where all fields that differs between memory
* architectures, have gone to.
@@ -842,25 +841,11 @@ struct process {
Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
- union {
- struct {
-#ifdef ERTS_SMP
- ErtsSmpPTimer *ptimer;
- ErlMessageInQueue msg_inq;
- ErtsPendExit pending_exit;
-#else
- ErlTimer tm; /* Timer entry */
-#endif
- } alive; /* when process is alive */
-#ifdef ERTS_SMP
- ErtsThrPrgrLaterOp release_data; /* when releasing process struct */
-#endif
- void *exit_data; /* Misc data referred during termination */
- } u;
-
erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
#ifdef ERTS_SMP
+ ErlMessageInQueue msg_inq;
+ ErtsPendExit pending_exit;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
Eterm suspendee;
@@ -908,6 +893,8 @@ struct process {
#endif
};
+extern const Process erts_invalid_process;
+
#ifdef CHECK_FOR_HOLES
# define INIT_HOLE_CHECK(p) \
do { \
@@ -1032,8 +1019,6 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra);
Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
#endif
-extern erts_smp_rwmtx_t erts_proc_tab_rwmtx;
-extern erts_smp_atomic_t *erts_proc_tab;
#ifdef HYBRID
extern Uint erts_num_active_procs;
extern Process** erts_active_procs;
@@ -1065,10 +1050,6 @@ struct erts_system_profile_flags_t {
unsigned int exclusive : 1;
};
extern struct erts_system_profile_flags_t erts_system_profile_flags;
-
-#define IS_TRACED(p) ( (p)->tracer_proc != NIL )
-#define ARE_TRACE_FLAGS_ON(p,tf) ( ((p)->trace_flags & (tf|F_SENSITIVE)) == (tf) )
-#define IS_TRACED_FL(p,tf) ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) )
/* process flags */
#define F_HIBERNATE_SCHED (1 << 0) /* Schedule out after hibernate op */
@@ -1223,7 +1204,7 @@ void erts_schedule_multi_misc_aux_work(int ignore_self,
erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
-void erts_init_process(int);
+void erts_init_process(int, int);
Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
Uint erts_run_queues_len(Uint *);
void erts_add_to_runq(Process *);
@@ -1242,7 +1223,6 @@ void set_timer(Process*, Uint);
void cancel_timer(Process*);
/* Begin System profile */
Uint erts_runnable_process_count(void);
-Uint erts_process_count(void);
/* End System profile */
void erts_init_empty_process(Process *p);
void erts_cleanup_empty_process(Process* p);
@@ -1291,9 +1271,6 @@ void erts_deep_process_dump(int, void *);
Eterm erts_get_reader_groups_map(Process *c_p);
Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
-Sint erts_test_next_pid(int, Uint);
-Eterm erts_debug_processes(Process *c_p);
-Eterm erts_debug_processes_bif_info(Process *c_p);
Uint erts_debug_nbalance(void);
int erts_debug_wait_deallocations(Process *c_p);
@@ -1601,7 +1578,7 @@ ERTS_GLB_INLINE
Eterm erts_get_current_pid(void)
{
Process *proc = erts_get_current_process();
- return proc ? proc->id : THE_NON_VALUE;
+ return proc ? proc->common.id : THE_NON_VALUE;
}
ERTS_GLB_INLINE
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 964dc1ae3e..5c0512b6d9 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -60,11 +60,11 @@ extern BeamInstr beam_continue_exit[];
void
erts_deep_process_dump(int to, void *to_arg)
{
- int i;
+ int i, max = erts_ptab_max(&erts_proc);
all_binaries = NULL;
- for (i = 0; i < erts_max_processes; i++) {
+ for (i = 0; i < max; i++) {
Process *p = erts_pix2proc(i);
if (p && p->i != ENULL) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
@@ -85,7 +85,7 @@ dump_process_info(int to, void *to_arg, Process *p)
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
- if ((p->trace_flags & F_SENSITIVE) == 0 && p->msg.first) {
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) {
erts_print(to, to_arg, "=proc_messages:%T\n", p->id);
for (mp = p->msg.first; mp != NULL; mp = mp->next) {
Eterm mesg = ERL_MESSAGE_TERM(mp);
@@ -100,7 +100,7 @@ dump_process_info(int to, void *to_arg, Process *p)
}
}
- if ((p->trace_flags & F_SENSITIVE) == 0) {
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
if (p->dictionary) {
erts_print(to, to_arg, "=proc_dictionary:%T\n", p->id);
erts_deep_dictionary_dump(to, to_arg,
@@ -108,7 +108,7 @@ dump_process_info(int to, void *to_arg, Process *p)
}
}
- if ((p->trace_flags & F_SENSITIVE) == 0) {
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
erts_print(to, to_arg, "=proc_stack:%T\n", p->id);
for (sp = p->stop; sp < STACK_START(p); sp++) {
yreg = stack_element_dump(to, to_arg, p, sp, yreg);
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index fa935af315..2984665cbd 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -68,8 +68,6 @@
#include "erl_process.h"
#include "erl_thr_progress.h"
-const Process erts_proc_lock_busy = {ERTS_INVALID_PID};
-
#ifdef ERTS_SMP
#if ERTS_PROC_LOCK_OWN_IMPL
@@ -464,7 +462,7 @@ wait_for_locks(Process *p,
ErtsProcLocks need_locks,
ErtsProcLocks olflgs)
{
- erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
erts_tse_t *wtr;
erts_proc_lock_queues_t *qs;
@@ -645,7 +643,7 @@ erts_proc_unlock_failed(Process *p,
erts_pix_lock_t *pixlck,
ErtsProcLocks wait_locks)
{
- erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lock);
@@ -695,40 +693,40 @@ proc_safelock(int is_managed,
* Locks with the same lock order should be locked on p1 before p2.
*/
if (a_proc) {
- if (a_proc->id < b_proc->id) {
+ if (a_proc->common.id < b_proc->common.id) {
p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = a_proc->id;
+ pid1 = a_proc->common.id;
#endif
need_locks1 = a_need_locks;
have_locks1 = a_have_locks;
p2 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid2 = b_proc->id;
+ pid2 = b_proc->common.id;
#endif
need_locks2 = b_need_locks;
have_locks2 = b_have_locks;
}
- else if (a_proc->id > b_proc->id) {
+ else if (a_proc->common.id > b_proc->common.id) {
p1 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = b_proc->id;
+ pid1 = b_proc->common.id;
#endif
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
p2 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid2 = a_proc->id;
+ pid2 = a_proc->common.id;
#endif
need_locks2 = a_need_locks;
have_locks2 = a_have_locks;
}
else {
ERTS_LC_ASSERT(a_proc == b_proc);
- ERTS_LC_ASSERT(a_proc->id == b_proc->id);
+ ERTS_LC_ASSERT(a_proc->common.id == b_proc->common.id);
p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = a_proc->id;
+ pid1 = a_proc->common.id;
#endif
need_locks1 = a_need_locks | b_need_locks;
have_locks1 = a_have_locks | b_have_locks;
@@ -743,7 +741,7 @@ proc_safelock(int is_managed,
else {
p1 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = b_proc->id;
+ pid1 = b_proc->common.id;
#endif
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
@@ -945,8 +943,8 @@ erts_pid2proc_opt(Process *c_p,
ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
need_locks = pid_need_locks;
- if (c_p && c_p->id == pid) {
- ASSERT(c_p->id != ERTS_INVALID_PID);
+ if (c_p && c_p->common.id == pid) {
+ ASSERT(c_p->common.id != ERTS_INVALID_PID);
ASSERT(c_p == erts_pix2proc(pix));
if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
@@ -962,10 +960,10 @@ erts_pid2proc_opt(Process *c_p,
dhndl = erts_thr_progress_unmanaged_delay();
- proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]);
+ proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix);
if (proc) {
- if (proc->id != pid)
+ if (proc->common.id != pid)
proc = NULL;
else if (!need_locks) {
if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
@@ -1067,7 +1065,7 @@ erts_pid2proc_opt(Process *c_p,
&& (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
? ERTS_PROC_IS_EXITING(proc)
: (proc
- != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) {
+ != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) {
erts_smp_proc_unlock(proc, need_locks);
@@ -1107,22 +1105,22 @@ erts_proc_lock_init(Process *p)
erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
- erts_mtx_init_x(&p->lock.main, "proc_main", p->id);
+ erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.main.lc);
#endif
- erts_mtx_init_x(&p->lock.link, "proc_link", p->id);
+ erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.link.lc);
#endif
- erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->id);
+ erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
- erts_mtx_init_x(&p->lock.status, "proc_status", p->id);
+ erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_trylock(1, &p->lock.status.lc);
@@ -1162,11 +1160,11 @@ erts_proc_lock_fin(Process *p)
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_proc_lock_init(Process *p) {
- if (p->id != ERTS_INVALID_PID) {
- erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->id);
+ if (p->common.id != ERTS_INVALID_PID) {
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->common.id);
} else {
erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK);
erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK);
@@ -1280,7 +1278,7 @@ void
erts_proc_lc_lock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1304,7 +1302,7 @@ void
erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1328,7 +1326,7 @@ void
erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1355,7 +1353,7 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1390,7 +1388,7 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1425,7 +1423,7 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1462,7 +1460,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
{
if (locks & ERTS_PROC_LOCKS_ALL) {
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN)
@@ -1487,7 +1485,7 @@ void erts_proc_lc_chk_only_proc_main(Process *p)
{
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
erts_lc_check_exact(&proc_main, 1);
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
@@ -1511,19 +1509,19 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
ERTS_PROC_LC_EMPTY_LOCK_INIT};
if (locks & ERTS_PROC_LOCK_MAIN) {
have_locks[have_locks_len].id = lc_id.proc_lock_main;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_LINK) {
have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t have_locks[4];
@@ -1556,35 +1554,35 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCK_MAIN) {
have_locks[have_locks_len].id = lc_id.proc_lock_main;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_main;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_LINK) {
have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_link;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t have_locks[4];
@@ -1619,16 +1617,16 @@ erts_proc_lc_my_proc_locks(Process *p)
ErtsProcLocks res = 0;
#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK)};
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
erts_lc_lock_t locks[4] = {p->lock.main.lc,
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 015fda583e..7ca52303da 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -178,8 +178,8 @@ typedef struct erts_proc_lock_t_ {
* on multiple processes, locks on processes with low process ids
* have to be locked before locks on processes with high process
* ids. E.g., if the main and the message queue locks are to be
- * locked on processes p1 and p2 and p1->id < p2->id, then locks
- * should be locked in the following order:
+ * locked on processes p1 and p2 and p1->common.id < p2->common.id,
+ * then locks should be locked in the following order:
* 1. main lock on p1
* 2. main lock on p2
* 3. message queue lock on p1
@@ -767,7 +767,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks, file, line);
#elif defined(ERTS_SMP)
@@ -775,7 +775,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks);
#endif /*ERTS_SMP*/
@@ -789,7 +789,7 @@ erts_smp_proc_unlock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
#endif
@@ -805,7 +805,7 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
#endif
@@ -814,21 +814,15 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *p)
{
#ifdef ERTS_SMP
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_aint32_t refc = erts_atomic32_inc_read_nob(&p->lock.refc);
- ERTS_SMP_LC_ASSERT(refc > 1);
-#else
- erts_atomic32_inc_nob(&p->lock.refc);
-#endif
+ erts_ptab_inc_refc(&p->common);
#endif
}
ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
{
#ifdef ERTS_SMP
- erts_aint32_t refc = erts_atomic32_dec_read_nob(&p->lock.refc);
- ERTS_SMP_LC_ASSERT(refc >= 0);
- if (refc == 0)
+ int referred = erts_ptab_dec_test_refc(&p->common);
+ if (!referred)
erts_free_proc(p);
#endif
}
@@ -836,10 +830,8 @@ ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 add_refc)
{
#ifdef ERTS_SMP
- erts_aint32_t refc = erts_atomic32_add_read_nob(&p->lock.refc,
- (erts_aint32_t) add_refc);
- ERTS_SMP_LC_ASSERT(refc >= 0);
- if (refc == 0)
+ int referred = erts_ptab_add_test_refc(&p->common, add_refc);
+ if (!referred)
erts_free_proc(p);
#endif
}
@@ -875,8 +867,7 @@ void erts_proc_safelock(Process *a_proc,
#define ERTS_P2P_FLG_TRY_LOCK (1 << 1)
#define ERTS_P2P_FLG_SMP_INC_REFC (1 << 2)
-#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_proc_lock_busy)
-extern const Process erts_proc_lock_busy;
+#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_invalid_process)
#define erts_pid2proc(PROC, HL, PID, NL) \
erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0)
@@ -896,33 +887,24 @@ Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int);
ERTS_GLB_INLINE Process *erts_pix2proc(int ix)
{
Process *proc;
- ASSERT(0 <= ix && ix < erts_proc.max);
- proc = (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[ix]);
+ ASSERT(0 <= ix && ix < erts_ptab_max(&erts_proc));
+ proc = (Process *) erts_ptab_pix2intptr_nob(&erts_proc, ix);
return proc == ERTS_PROC_LOCK_BUSY ? NULL : proc;
}
ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid)
{
Process *proc;
- int pix;
- /*
- * In SMP case: Only scheduler threads are allowed
- * to use this function. Other threads need to
- * atomicaly increment refc at lookup, i.e., use
- * erts_pid2proc_opt() with ERTS_P2P_FLG_SMP_INC_REFC.
- */
- ERTS_SMP_LC_ASSERT(erts_get_scheduler_id());
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
if (is_not_internal_pid(pid))
return NULL;
- pix = internal_pid_index(pid);
- proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]);
-
- if (proc && proc->id != pid)
+ proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ if (proc && proc->common.id != pid)
return NULL;
-
return proc;
}
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
new file mode 100644
index 0000000000..8195a350fb
--- /dev/null
+++ b/erts/emulator/beam/erl_ptab.c
@@ -0,0 +1,1560 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Process/Port table implementation.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#define ERTS_PTAB_WANT_BIF_IMPL__
+#define ERTS_PTAB_WANT_DEBUG_FUNCS__
+#include "erl_ptab.h"
+#include "global.h"
+#include "erl_binary.h"
+
+typedef struct ErtsPTabListBifData_ ErtsPTabListBifData;
+
+#define ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED 25
+#define ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE 1000
+#define ERTS_PTAB_LIST_BIF_MIN_START_REDS \
+ (ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE \
+ / ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED)
+
+#define ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS 1
+
+#define ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED 10
+
+#define ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS \
+ (ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE \
+ / ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED)
+
+
+#define ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED 75
+
+#define ERTS_PTAB_LIST_DBG_DO_TRACE 0
+
+#ifdef DEBUG
+# define ERTS_PTAB_LIST_BIF_DEBUGLEVEL 100
+#else
+# define ERTS_PTAB_LIST_BIF_DEBUGLEVEL 0
+#endif
+
+#define ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC 1
+#define ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS 5
+#define ERTS_PTAB_LIST_DBGLVL_CHK_PIDS 10
+#define ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST 20
+#define ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST 20
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL == 0
+# define ERTS_PTAB_LIST_ASSERT(EXP)
+#else
+# define ERTS_PTAB_LIST_ASSERT(EXP) \
+ ((void) ((EXP) \
+ ? 1 \
+ : (debug_ptab_list_assert_error(#EXP, \
+ __FILE__, \
+ __LINE__, \
+ __func__), \
+ 0)))
+#endif
+
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC
+# define ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(PTLBDP, HP, SZ) \
+do { \
+ ERTS_PTAB_LIST_ASSERT(!(PTLBDP)->debug.heap); \
+ ERTS_PTAB_LIST_ASSERT(!(PTLBDP)->debug.heap_size); \
+ (PTLBDP)->debug.heap = (HP); \
+ (PTLBDP)->debug.heap_size = (SZ); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(PTLBDP, HP) \
+do { \
+ ERTS_PTAB_LIST_ASSERT((PTLBDP)->debug.heap); \
+ ERTS_PTAB_LIST_ASSERT((PTLBDP)->debug.heap_size); \
+ ERTS_PTAB_LIST_ASSERT(((PTLBDP)->debug.heap \
+ + (PTLBDP)->debug.heap_size) \
+ == (HP)); \
+ (PTLBDP)->debug.heap = NULL; \
+ (PTLBDP)->debug.heap_size = 0; \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT(PTLBDP) \
+do { \
+ (PTLBDP)->debug.heap = NULL; \
+ (PTLBDP)->debug.heap_size = 0; \
+} while (0)
+#else
+# define ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(PTLBDP, HP, SZ)
+# define ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(PTLBDP, HP)
+# define ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT(PTLBDP)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+# define ERTS_PTAB_LIST_DBG_CHK_RESLIST(R) \
+ debug_ptab_list_check_res_list((R))
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_RESLIST(R)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+# define ERTS_PTAB_LIST_DBG_SAVE_PIDS(PTLBDP) \
+ debug_ptab_list_save_all_pids((PTLBDP))
+# define ERTS_PTAB_LIST_DBG_VERIFY_PIDS(PTLBDP) \
+do { \
+ if (!(PTLBDP)->debug.correct_pids_verified) \
+ debug_ptab_list_verify_all_pids((PTLBDP)); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS(PTLBDP) \
+do { \
+ if ((PTLBDP)->debug.correct_pids) { \
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, \
+ (PTLBDP)->debug.correct_pids); \
+ (PTLBDP)->debug.correct_pids = NULL; \
+ } \
+} while(0)
+# define ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT(PTLBDP) \
+do { \
+ (PTLBDP)->debug.correct_pids_verified = 0; \
+ (PTLBDP)->debug.correct_pids = NULL; \
+} while (0)
+#else
+# define ERTS_PTAB_LIST_DBG_SAVE_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_VERIFY_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT(PTLBDP)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+# define ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(PTLBDP, PID, IC) \
+ debug_ptab_list_check_found_pid((PTLBDP), (PID), (IC), 1)
+# define ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(PTLBDP, PID, IC) \
+ debug_ptab_list_check_found_pid((PTLBDP), (PID), (IC), 0)
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(PTLBDP, PID, IC)
+# define ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(PTLBDP, PID, IC)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+# define ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(PTab) \
+ debug_ptab_list_check_del_list((PTab))
+# define ERTS_PTAB_LIST_DBG_CHK_FREELIST(PTab, FL) \
+ debug_ptab_list_check_del_free_list((PTab), (FL))
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(PTab)
+# define ERTS_PTAB_LIST_DBG_CHK_FREELIST(PTab, FL)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL == 0
+#if ERTS_PTAB_LIST_DBG_DO_TRACE
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) \
+ (PTLBDP)->debug.caller = (P)->common.id
+# else
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP)
+# endif
+# define ERTS_PTAB_LIST_DBG_CLEANUP(PTLBDP)
+#else
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) \
+do { \
+ (PTLBDP)->debug.caller = (P)->common.id; \
+ ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT((PTLBDP)); \
+ ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT((PTLBDP)); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_CLEANUP(PTLBDP) \
+do { \
+ ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS((PTLBDP)); \
+} while (0)
+#endif
+
+#if ERTS_PTAB_LIST_DBG_DO_TRACE
+# define ERTS_PTAB_LIST_DBG_TRACE(PID, WHAT) \
+ erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \
+ (PID), __FILE__, __LINE__, __func__, #WHAT)
+#else
+# define ERTS_PTAB_LIST_DBG_TRACE(PID, WHAT)
+#endif
+
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0
+static void debug_ptab_list_assert_error(char* expr,
+ const char* file,
+ int line,
+ const char *func);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+static void debug_ptab_list_check_res_list(Eterm list);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+static void debug_ptab_list_save_all_pids(ErtsPTabListBifData *ptlbdp);
+static void debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+static void debug_ptab_list_check_found_pid(ErtsPTabListBifData *ptlbdp,
+ Eterm pid,
+ Uint64 ic,
+ int pid_should_be_found);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+static void debug_ptab_list_check_del_list(ErtsPTab *ptab);
+static void debug_ptab_list_check_del_free_list(ErtsPTab *ptab,
+ ErtsPTabDeletedElement *ptdep);
+#endif
+
+struct ErtsPTabDeletedElement_ {
+ ErtsPTabDeletedElement *next;
+ ErtsPTabDeletedElement *prev;
+ int ix;
+ union {
+ struct {
+ Eterm id;
+ Uint64 inserted;
+ Uint64 deleted;
+ } element;
+ struct {
+ Uint64 interval;
+ } bif_invocation;
+ } u;
+};
+
+static Export ptab_list_continue_export;
+
+typedef struct {
+ Uint64 interval;
+} ErtsPTabListBifChunkInfo;
+
+typedef enum {
+ INITIALIZING,
+ INSPECTING_TABLE,
+ INSPECTING_DELETED,
+ BUILDING_RESULT,
+ RETURN_RESULT
+} ErtsPTabListBifState;
+
+struct ErtsPTabListBifData_ {
+ ErtsPTab *ptab;
+ ErtsPTabListBifState state;
+ Eterm caller;
+ ErtsPTabListBifChunkInfo *chunk;
+ int tix;
+ int pid_ix;
+ int pid_sz;
+ Eterm *pid;
+ ErtsPTabDeletedElement *bif_invocation; /* Only used when > 1 chunk */
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0 || ERTS_PTAB_LIST_DBG_DO_TRACE
+ struct {
+ Eterm caller;
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ Uint64 *pid_started;
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC
+ Eterm *heap;
+ Uint heap_size;
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+ int correct_pids_verified;
+ Eterm *correct_pids;
+#endif
+ } debug;
+#endif
+
+};
+
+#ifdef ARCH_32
+
+static ERTS_INLINE Uint64
+dw_aint_to_uint64(erts_dw_aint_t *dw)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (Uint64) dw->dw_sint;
+#else
+ Uint64 res;
+ res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+#endif
+}
+
+static void
+unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
+#else
+ dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
+ dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
+#endif
+}
+
+static ERTS_INLINE void
+last_data_init_nob(ErtsPTab *ptab, Uint64 val)
+{
+ erts_dw_aint_t dw;
+ unint64_to_dw_aint(&dw, val);
+ erts_smp_dw_atomic_init_nob(&ptab->vola.tile.last_data, &dw);
+}
+
+static ERTS_INLINE void
+last_data_set_relb(ErtsPTab *ptab, Uint64 val)
+{
+ erts_dw_aint_t dw;
+ unint64_to_dw_aint(&dw, val);
+ erts_smp_dw_atomic_set_relb(&ptab->vola.tile.last_data, &dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_nob(ErtsPTab *ptab)
+{
+ erts_dw_aint_t dw;
+ erts_smp_dw_atomic_read_nob(&ptab->vola.tile.last_data, &dw);
+ return dw_aint_to_uint64(&dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_acqb(ErtsPTab *ptab)
+{
+ erts_dw_aint_t dw;
+ erts_smp_dw_atomic_read_acqb(&ptab->vola.tile.last_data, &dw);
+ return dw_aint_to_uint64(&dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
+{
+ erts_dw_aint_t dw_new, dw_xchg;
+
+ unint64_to_dw_aint(&dw_new, new);
+ unint64_to_dw_aint(&dw_xchg, exp);
+
+ if (erts_smp_dw_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
+ &dw_new,
+ &dw_xchg))
+ return exp;
+ else
+ return dw_aint_to_uint64(&dw_xchg);
+}
+
+#elif defined(ARCH_64)
+
+union {
+ erts_smp_atomic_t pid_data;
+ char align[ERTS_CACHE_LINE_SIZE];
+} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static ERTS_INLINE void
+last_data_init_nob(ErtsPTab *ptab, Uint64 val)
+{
+ erts_smp_atomic_init_nob(&ptab->vola.tile.last_data, (erts_aint_t) val);
+}
+
+static ERTS_INLINE void
+last_data_set_relb(ErtsPTab *ptab, Uint64 val)
+{
+ erts_smp_atomic_set_relb(&ptab->vola.tile.last_data, (erts_aint_t) val);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_nob(ErtsPTab *ptab)
+{
+ return (Uint64) erts_smp_atomic_read_nob(&ptab->vola.tile.last_data);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_acqb(ErtsPTab *ptab)
+{
+ return (Uint64) erts_smp_atomic_read_acqb(&ptab->vola.tile.last_data);
+}
+
+static ERTS_INLINE Uint64
+last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
+{
+ return (Uint64) erts_smp_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
+ (erts_aint_t) new,
+ (erts_aint_t) exp);
+}
+
+#else
+# error "Not 64-bit, nor 32-bit architecture..."
+#endif
+
+static ERTS_INLINE int
+last_data_cmp(Uint64 ld1, Uint64 ld2)
+{
+ Uint64 ld1_wrap;
+
+ if (ld1 == ld2)
+ return 0;
+
+ ld1_wrap = ld1 + (((Uint64) 1) << 63);
+
+ if (ld1 < ld1_wrap)
+ return (ld1 < ld2 && ld2 < ld1_wrap) ? -1 : 1;
+ else
+ return (ld1_wrap <= ld2 && ld2 < ld1) ? 1 : -1;
+}
+
+#define ERTS_PTAB_LastData2EtermData(LD) \
+ ((Eterm) ((LD) & ~(~((Uint64) 0) << ERTS_PTAB_ID_DATA_SIZE)))
+
+static void noop(void *unused)
+{
+
+}
+
+void
+erts_ptab_init_table(ErtsPTab *ptab,
+ ErtsAlcType_t atype,
+ void (*release_element)(void *),
+ ErtsPTabElementCommon *invalid_element,
+ int size,
+ char *name)
+{
+ size_t tab_sz;
+ int max_data_bits;
+ char *tab_end;
+ erts_smp_atomic_t *tab_entry;
+ int proc_bits = ERTS_PROC_BITS;
+ erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name);
+ erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0);
+ last_data_init_nob(ptab, ~((Uint64) 0));
+
+ if (erts_use_r9_pids_ports)
+ proc_bits = ERTS_R9_PROC_BITS;
+
+ if (size > (1 << proc_bits))
+ size = 1 << proc_bits;
+
+ ptab->r.o.max = size;
+
+ tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t));
+ ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, tab_sz);
+ tab_end = ((char *) ptab->r.o.tab) + tab_sz;
+ tab_entry = ptab->r.o.tab;
+ while (tab_end > ((char *) tab_entry)) {
+ erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL);
+ tab_entry++;
+ }
+
+ max_data_bits = erts_fit_in_bits_int32((Sint32) ptab->r.o.max - 1);
+
+ ptab->r.o.tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
+ ptab->r.o.pix_per_cache_line = (ERTS_CACHE_LINE_SIZE
+ / sizeof(erts_smp_atomic_t));
+ if ((ptab->r.o.max & (ptab->r.o.max - 1))
+ | (ptab->r.o.pix_per_cache_line & (ptab->r.o.pix_per_cache_line - 1))) {
+ /* ptab->r.o.max or ptab->r.o.pix_per_cache_line not a power of 2 :-( */
+ ptab->r.o.pix_cl_mask = 0;
+ ptab->r.o.pix_cl_shift = 0;
+ ptab->r.o.pix_cli_mask = 0;
+ ptab->r.o.pix_cli_shift = 0;
+ }
+ else {
+ ASSERT((ptab->r.o.tab_cache_lines
+ & (ptab->r.o.tab_cache_lines - 1)) == 0);
+ ptab->r.o.pix_cl_mask
+ = ptab->r.o.tab_cache_lines-1;
+ ptab->r.o.pix_cl_shift
+ = erts_fit_in_bits_int32(ptab->r.o.pix_per_cache_line-1);
+ ptab->r.o.pix_cli_shift
+ = erts_fit_in_bits_int32(ptab->r.o.pix_cl_mask);
+ ptab->r.o.pix_cli_mask
+ = (1 << (max_data_bits - ptab->r.o.pix_cli_shift)) - 1;
+ }
+
+ ptab->r.o.invalid_element = invalid_element;
+ ptab->r.o.invalid_data = ERTS_PTAB_ID2DATA(invalid_element->id);
+ if (release_element)
+ ptab->r.o.release_element = release_element;
+ else
+ ptab->r.o.release_element = noop;
+
+ erts_smp_interval_init(&ptab->list.data.interval);
+ ptab->list.data.deleted.start = NULL;
+ ptab->list.data.deleted.end = NULL;
+ ptab->list.data.chunks = (((ptab->r.o.max - 1)
+ / ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE)
+ + 1);
+}
+
+int
+erts_ptab_initialized(ErtsPTab *ptab)
+{
+ return ptab->r.o.tab != NULL;
+}
+
+int
+erts_ptab_new_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el,
+ void *init_arg,
+ void (*init_ptab_el)(void *, Eterm))
+{
+ int pix;
+ Uint64 ld, exp_ld;
+ Eterm data;
+ erts_aint32_t count;
+ erts_aint_t invalid = (erts_aint_t) ptab->r.o.invalid_element;
+
+ erts_ptab_rlock(ptab);
+
+ count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count);
+ if (count > ptab->r.o.max) {
+ while (1) {
+ erts_aint32_t act_count;
+
+ act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count,
+ count-1,
+ count);
+ if (act_count == count) {
+ erts_ptab_runlock(ptab);
+ return 0;
+ }
+ count = act_count;
+ if (count <= ptab->r.o.max)
+ break;
+ }
+ }
+
+ ptab_el->u.alive.started_interval
+ = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+
+ ld = last_data_read_acqb(ptab);
+
+ /* Reserve slot */
+ while (1) {
+ ld++;
+ pix = erts_ptab_data2ix(ptab, ERTS_PTAB_LastData2EtermData(ld));
+ if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix]) == ERTS_AINT_NULL) {
+ erts_aint_t val;
+ val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
+ invalid,
+ ERTS_AINT_NULL);
+
+ if (ERTS_AINT_NULL == val)
+ break;
+ }
+ }
+
+ data = ERTS_PTAB_LastData2EtermData(ld);
+
+ if (data == ptab->r.o.invalid_data) {
+ /* Do not use invalid data; fix it... */
+ ld += ptab->r.o.max;
+ ASSERT(pix == erts_ptab_data2ix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ ASSERT(data != ptab->r.o.invalid_data);
+ }
+
+ exp_ld = last_data_read_nob(ptab);
+
+ /* Move last data forward */
+ while (1) {
+ Uint64 act_ld;
+ if (last_data_cmp(ld, exp_ld) < 0)
+ break;
+ act_ld = last_data_cmpxchg_relb(ptab, ld, exp_ld);
+ if (act_ld == exp_ld)
+ break;
+ exp_ld = act_ld;
+ }
+
+ init_ptab_el(init_arg, data);
+
+#ifdef ERTS_SMP
+ erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
+#endif
+
+ /* Move into slot reserved */
+#ifdef DEBUG
+ ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ (erts_aint_t) ptab_el));
+#else
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+#endif
+
+ erts_ptab_runlock(ptab);
+
+ return 1;
+}
+
+static void
+save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el)
+{
+ ErtsPTabDeletedElement *ptdep = erts_alloc(ERTS_ALC_T_PTAB_LIST_DEL,
+ sizeof(ErtsPTabDeletedElement));
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start
+ && ptab->list.data.deleted.end);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ptdep->prev = ptab->list.data.deleted.end;
+ ptdep->next = NULL;
+ ptdep->ix = erts_ptab_data2ix(ptab, ERTS_PTAB_ID2DATA(ptab_el->id));
+ ptdep->u.element.id = ptab_el->id;
+ ptdep->u.element.inserted = ptab_el->u.alive.started_interval;
+ ptdep->u.element.deleted =
+ erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+
+ ptab->list.data.deleted.end->next = ptdep;
+ ptab->list.data.deleted.end = ptdep;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev->ix >= 0
+ ? (ptdep->u.element.deleted
+ >= ptdep->prev->u.element.deleted)
+ : (ptdep->u.element.deleted
+ >= ptdep->prev->u.bif_invocation.interval));
+}
+
+void
+erts_ptab_delete_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el)
+{
+ int maybe_save;
+ int pix = erts_ptab_data2ix(ptab, ERTS_PTAB_ID2DATA(ptab_el->id));
+
+ ASSERT(erts_get_scheduler_id()); /* *Need* to be a scheduler */
+
+ erts_ptab_rlock(ptab);
+ maybe_save = ptab->list.data.deleted.end != NULL;
+ if (maybe_save) {
+ erts_ptab_runlock(ptab);
+ erts_ptab_rwlock(ptab);
+ }
+
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL);
+
+ ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0);
+ erts_smp_atomic32_dec_relb(&ptab->vola.tile.count);
+
+ if (!maybe_save)
+ erts_ptab_runlock(ptab);
+ else {
+ if (ptab->list.data.deleted.end)
+ save_deleted_element(ptab, ptab_el);
+ erts_ptab_rwunlock(ptab);
+ }
+
+#ifdef ERTS_SMP
+ erts_schedule_thr_prgr_later_op(ptab->r.o.release_element,
+ (void *) ptab_el,
+ &ptab_el->u.release);
+#endif
+}
+
+/*
+ * erts_ptab_list() implements BIFs listing the content of the table,
+ * e.g. erlang:processes/0.
+ */
+static void cleanup_ptab_list_bif_data(Binary *bp);
+static int ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp);
+
+
+BIF_RETTYPE
+erts_ptab_list(Process *c_p, ErtsPTab *ptab)
+{
+ /*
+ * A requirement: The list of identifiers returned should be a
+ * consistent snapshot of all elements existing
+ * in the table at some point in time during the
+ * execution of the BIF calling this function.
+ * Since elements might be deleted while the BIF
+ * is executing, we have to keep track of all
+ * deleted elements and add them to the result.
+ * We also ignore elements created after the BIF
+ * has begun executing.
+ */
+ BIF_RETTYPE ret_val;
+ Eterm res_acc = NIL;
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsPTabListBifData),
+ cleanup_ptab_list_bif_data);
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(mbp);
+
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, call);
+ ptlbdp->ptab = ptab;
+ ptlbdp->state = INITIALIZING;
+ ERTS_PTAB_LIST_DBG_INIT(c_p, ptlbdp);
+
+ if (ERTS_BIF_REDS_LEFT(c_p) >= ERTS_PTAB_LIST_BIF_MIN_START_REDS
+ && ptab_list_bif_engine(c_p, &res_acc, mbp)) {
+ erts_bin_free(mbp);
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, return);
+ ERTS_BIF_PREP_RET(ret_val, res_acc);
+ }
+ else {
+ Eterm *hp;
+ Eterm magic_bin;
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, PROC_BIN_SIZE);
+ magic_bin = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, trap);
+ ERTS_BIF_PREP_YIELD2(ret_val,
+ &ptab_list_continue_export,
+ c_p,
+ res_acc,
+ magic_bin);
+ }
+ return ret_val;
+}
+
+static void
+cleanup_ptab_list_bif_data(Binary *bp)
+{
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(bp);
+ ErtsPTab *ptab = ptlbdp->ptab;
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, call);
+
+ if (ptlbdp->state != INITIALIZING) {
+
+ if (ptlbdp->chunk) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_CNKI, ptlbdp->chunk);
+ ptlbdp->chunk = NULL;
+ }
+ if (ptlbdp->pid) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->pid);
+ ptlbdp->pid = NULL;
+ }
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ if (ptlbdp->debug.pid_started) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->debug.pid_started);
+ ptlbdp->debug.pid_started = NULL;
+ }
+#endif
+
+ if (ptlbdp->bif_invocation) {
+ ErtsPTabDeletedElement *ptdep;
+
+ erts_ptab_rwlock(ptab);
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, deleted_cleanup);
+
+ ptdep = ptlbdp->bif_invocation;
+ ptlbdp->bif_invocation = NULL;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ if (ptdep->prev) {
+ /*
+ * Only remove this bif invokation when we
+ * have preceding invokations.
+ */
+ ptdep->prev->next = ptdep->next;
+ if (ptdep->next)
+ ptdep->next->prev = ptdep->prev;
+ else {
+ /*
+ * At the time of writing this branch cannot be
+ * reached. I don't want to remove this code though
+ * since it may be possible to reach this line
+ * in the future if the cleanup order in
+ * erts_do_exit_process() is changed. The ASSERT(0)
+ * is only here to make us aware that the reorder
+ * has happened. /rickard
+ */
+ ASSERT(0);
+ ptab->list.data.deleted.end = ptdep->prev;
+ }
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, ptdep);
+ }
+ else {
+ /*
+ * Free all elements until next bif invokation
+ * is found.
+ */
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ do {
+ ErtsPTabDeletedElement *fptdep = ptdep;
+ ptdep = ptdep->next;
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, fptdep);
+ } while (ptdep && ptdep->ix >= 0);
+ ptab->list.data.deleted.start = ptdep;
+ if (ptdep)
+ ptdep->prev = NULL;
+ else
+ ptab->list.data.deleted.end = NULL;
+ }
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ erts_ptab_rwunlock(ptab);
+
+ }
+ }
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, return);
+ ERTS_PTAB_LIST_DBG_CLEANUP(ptlbdp);
+}
+
+static int
+ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp)
+{
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(mbp);
+ ErtsPTab *ptab = ptlbdp->ptab;
+ int have_reds;
+ int reds;
+ int locked = 0;
+
+ do {
+ switch (ptlbdp->state) {
+ case INITIALIZING:
+ ptlbdp->chunk = erts_alloc(ERTS_ALC_T_PTAB_LIST_CNKI,
+ (sizeof(ErtsPTabListBifChunkInfo)
+ * ptab->list.data.chunks));
+ ptlbdp->tix = 0;
+ ptlbdp->pid_ix = 0;
+
+ erts_ptab_rwlock(ptab);
+ locked = 1;
+
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, init);
+
+ ptlbdp->pid_sz = erts_ptab_count(ptab);
+ ptlbdp->pid = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started
+ = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Uint64)*ptlbdp->pid_sz);
+#endif
+
+ ERTS_PTAB_LIST_DBG_SAVE_PIDS(ptlbdp);
+
+ if (ptab->list.data.chunks == 1)
+ ptlbdp->bif_invocation = NULL;
+ else {
+ /*
+ * We will have to access the table multiple times
+ * releasing the table lock in between chunks.
+ */
+ ptlbdp->bif_invocation
+ = erts_alloc(ERTS_ALC_T_PTAB_LIST_DEL,
+ sizeof(ErtsPTabDeletedElement));
+ ptlbdp->bif_invocation->ix = -1;
+ ptlbdp->bif_invocation->u.bif_invocation.interval
+ = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ptlbdp->bif_invocation->next = NULL;
+ if (ptab->list.data.deleted.end) {
+ ptlbdp->bif_invocation->prev = ptab->list.data.deleted.end;
+ ptab->list.data.deleted.end->next = ptlbdp->bif_invocation;
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start);
+ }
+ else {
+ ptlbdp->bif_invocation->prev = NULL;
+ ptab->list.data.deleted.start = ptlbdp->bif_invocation;
+ }
+ ptab->list.data.deleted.end = ptlbdp->bif_invocation;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ }
+
+ ptlbdp->state = INSPECTING_TABLE;
+ /* Fall through */
+
+ case INSPECTING_TABLE: {
+ int ix = ptlbdp->tix;
+ int indices = ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ int cix = ix / ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ int end_ix = ix + indices;
+ Uint64 *invocation_interval_p;
+ ErtsPTabElementCommon *invalid_element;
+
+ invocation_interval_p
+ = (ptlbdp->bif_invocation
+ ? &ptlbdp->bif_invocation->u.bif_invocation.interval
+ : NULL);
+
+ ERTS_PTAB_LIST_ASSERT(is_nil(*res_accp));
+ if (!locked) {
+ erts_ptab_rwlock(ptab);
+ locked = 1;
+ }
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table);
+
+ if (cix != 0)
+ ptlbdp->chunk[cix].interval
+ = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ else if (ptlbdp->bif_invocation)
+ ptlbdp->chunk[0].interval = *invocation_interval_p;
+ /* else: interval is irrelevant */
+
+ if (end_ix >= ptab->r.o.max) {
+ ERTS_PTAB_LIST_ASSERT(cix+1 == ptab->list.data.chunks);
+ end_ix = ptab->r.o.max;
+ indices = end_ix - ix;
+ /* What to do when done with this chunk */
+ ptlbdp->state = (ptab->list.data.chunks == 1
+ ? BUILDING_RESULT
+ : INSPECTING_DELETED);
+ }
+
+ invalid_element = ptab->r.o.invalid_element;
+ for (; ix < end_ix; ix++) {
+ ErtsPTabElementCommon *el;
+ el = (ErtsPTabElementCommon *) erts_ptab_pix2intptr_nob(ptab,
+ ix);
+ if (el
+ && el != invalid_element
+ && (!invocation_interval_p
+ || el->u.alive.started_interval < *invocation_interval_p)) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(el->id));
+ ptlbdp->pid[ptlbdp->pid_ix] = el->id;
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started[ptlbdp->pid_ix]
+ = el->u.alive.started_interval;
+#endif
+
+ ptlbdp->pid_ix++;
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix <= ptlbdp->pid_sz);
+ }
+ }
+
+ ptlbdp->tix = end_ix;
+
+ erts_ptab_rwunlock(ptab);
+ locked = 0;
+
+ reds = indices/ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED;
+ BUMP_REDS(c_p, reds);
+
+ have_reds = ERTS_BIF_REDS_LEFT(c_p);
+
+ if (have_reds && ptlbdp->state == INSPECTING_TABLE) {
+ ix = ptlbdp->tix;
+ indices = ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ end_ix = ix + indices;
+ if (end_ix > ptab->r.o.max) {
+ end_ix = ptab->r.o.max;
+ indices = end_ix - ix;
+ }
+
+ reds = indices/ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED;
+
+ /* Pretend we have no reds left if we haven't got enough
+ reductions to complete next chunk */
+ if (reds > have_reds)
+ have_reds = 0;
+ }
+
+ break;
+ }
+
+ case INSPECTING_DELETED: {
+ int i;
+ int max_reds;
+ int free_deleted = 0;
+ Uint64 invocation_interval;
+ ErtsPTabDeletedElement *ptdep;
+ ErtsPTabDeletedElement *free_list = NULL;
+
+ ptdep = ptlbdp->bif_invocation;
+ ERTS_PTAB_LIST_ASSERT(ptdep);
+ invocation_interval = ptdep->u.bif_invocation.interval;
+
+ max_reds = have_reds = ERTS_BIF_REDS_LEFT(c_p);
+ if (max_reds > ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS)
+ max_reds = ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS;
+
+ reds = 0;
+ erts_ptab_rwlock(ptab);
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_term_procs);
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ if (ptdep->prev)
+ ptdep->prev->next = ptdep->next;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ ptab->list.data.deleted.start = ptdep->next;
+
+ if (ptab->list.data.deleted.start
+ && ptab->list.data.deleted.start->ix >= 0) {
+ free_list = ptab->list.data.deleted.start;
+ free_deleted = 1;
+ }
+ }
+
+ if (ptdep->next)
+ ptdep->next->prev = ptdep->prev;
+ else
+ ptab->list.data.deleted.end = ptdep->prev;
+
+ ptdep = ptdep->next;
+
+ i = 0;
+ while (reds < max_reds && ptdep) {
+ if (ptdep->ix < 0) {
+ if (free_deleted) {
+ ERTS_PTAB_LIST_ASSERT(free_list);
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev);
+
+ ptdep->prev->next = NULL; /* end of free_list */
+ ptab->list.data.deleted.start = ptdep;
+ ptdep->prev = NULL;
+ free_deleted = 0;
+ }
+ }
+ else {
+ int cix = ptdep->ix/ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ Uint64 chunk_interval = ptlbdp->chunk[cix].interval;
+ Eterm pid = ptdep->u.element.id;
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(pid));
+
+ if (ptdep->u.element.inserted < invocation_interval) {
+ if (ptdep->u.element.deleted < chunk_interval) {
+ ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ ptlbdp->pid[ptlbdp->pid_ix] = pid;
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started[ptlbdp->pid_ix]
+ = ptdep->u.element.inserted;
+#endif
+ ptlbdp->pid_ix++;
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix
+ <= ptlbdp->pid_sz);
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ }
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ }
+
+ i++;
+ if (i == ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED) {
+ reds++;
+ i = 0;
+ }
+ if (free_deleted)
+ reds += ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS;
+ }
+ ptdep = ptdep->next;
+ }
+
+ if (free_deleted) {
+ ERTS_PTAB_LIST_ASSERT(free_list);
+ ptab->list.data.deleted.start = ptdep;
+ if (!ptdep)
+ ptab->list.data.deleted.end = NULL;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev);
+ ptdep->prev->next = NULL; /* end of free_list */
+ ptdep->prev = NULL;
+ }
+ }
+
+ if (!ptdep) {
+ /* Done */
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix == ptlbdp->pid_sz);
+ ptlbdp->state = BUILDING_RESULT;
+ ptlbdp->bif_invocation->next = free_list;
+ free_list = ptlbdp->bif_invocation;
+ ptlbdp->bif_invocation = NULL;
+ }
+ else {
+ /* Link in bif_invocation again where we left off */
+ ptlbdp->bif_invocation->prev = ptdep->prev;
+ ptlbdp->bif_invocation->next = ptdep;
+ ptdep->prev = ptlbdp->bif_invocation;
+ if (ptlbdp->bif_invocation->prev)
+ ptlbdp->bif_invocation->prev->next = ptlbdp->bif_invocation;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start
+ == ptdep);
+ ptab->list.data.deleted.start = ptlbdp->bif_invocation;
+ }
+ }
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+ ERTS_PTAB_LIST_DBG_CHK_FREELIST(ptab, free_list);
+ erts_ptab_rwunlock(ptab);
+
+ /*
+ * We do the actual free of deleted structures now when we
+ * have released the table lock instead of when we encountered
+ * them. This since free() isn't for free and we don't want to
+ * unnecessarily block other schedulers.
+ */
+ while (free_list) {
+ ptdep = free_list;
+ free_list = ptdep->next;
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, ptdep);
+ }
+
+ have_reds -= reds;
+ if (have_reds < 0)
+ have_reds = 0;
+ BUMP_REDS(c_p, reds);
+ break;
+ }
+
+ case BUILDING_RESULT: {
+ int conses, ix, min_ix;
+ Eterm *hp;
+ Eterm res = *res_accp;
+
+ ERTS_PTAB_LIST_DBG_VERIFY_PIDS(ptlbdp);
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res);
+
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, begin_build_res);
+
+ have_reds = ERTS_BIF_REDS_LEFT(c_p);
+ conses = ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds;
+ min_ix = ptlbdp->pid_ix - conses;
+ if (min_ix < 0) {
+ min_ix = 0;
+ conses = ptlbdp->pid_ix;
+ }
+
+ hp = HAlloc(c_p, conses*2);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, conses*2);
+
+ for (ix = ptlbdp->pid_ix - 1; ix >= min_ix; ix--) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(ptlbdp->pid[ix]));
+ res = CONS(hp, ptlbdp->pid[ix], res);
+ hp += 2;
+ }
+
+ ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
+
+ ptlbdp->pid_ix = min_ix;
+ if (min_ix == 0)
+ ptlbdp->state = RETURN_RESULT;
+ else {
+ ptlbdp->pid_sz = min_ix;
+ ptlbdp->pid = erts_realloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ ptlbdp->pid,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started
+ = erts_realloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ ptlbdp->debug.pid_started,
+ sizeof(Uint64) * ptlbdp->pid_sz);
+#endif
+ }
+ reds = conses/ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED;
+ BUMP_REDS(c_p, reds);
+ have_reds -= reds;
+
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, end_build_res);
+ *res_accp = res;
+ break;
+ }
+ case RETURN_RESULT:
+ cleanup_ptab_list_bif_data(mbp);
+ return 1;
+
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:ptab_list_bif_engine(): Invalid state: %d\n",
+ __FILE__, __LINE__, (int) ptlbdp->state);
+ }
+
+
+ } while (have_reds || ptlbdp->state == RETURN_RESULT);
+
+ return 0;
+}
+
+/*
+ * ptab_list_continue/2 is a hidden BIF that the original BIF traps to
+ * if there are too much work to do in one go.
+ */
+
+static BIF_RETTYPE ptab_list_continue(BIF_ALIST_2)
+{
+ Eterm res_acc;
+ Binary *mbp;
+
+ /*
+ * This bif cannot be called from erlang code. It can only be
+ * trapped to from other BIFs; therefore, a bad argument
+ * is an internal error and should never occur...
+ */
+
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, call);
+ ERTS_PTAB_LIST_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1));
+
+ res_acc = BIF_ARG_1;
+
+ ERTS_PTAB_LIST_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
+
+ mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+
+ ERTS_PTAB_LIST_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
+ == cleanup_ptab_list_bif_data);
+ ERTS_PTAB_LIST_ASSERT(
+ ((ErtsPTabListBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller
+ == BIF_P->common.id);
+
+ if (ptab_list_bif_engine(BIF_P, &res_acc, mbp)) {
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, return);
+ BIF_RET(res_acc);
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, trap);
+ ERTS_BIF_YIELD2(&ptab_list_continue_export, BIF_P, res_acc, BIF_ARG_2);
+ }
+}
+
+void
+erts_ptab_init(void)
+{
+ /* ptab_list_continue/2 is a hidden BIF that the original BIF traps to. */
+ sys_memset((void *) &ptab_list_continue_export, 0, sizeof(Export));
+ ptab_list_continue_export.address = &ptab_list_continue_export.code[3];
+ ptab_list_continue_export.code[0] = am_erlang;
+ ptab_list_continue_export.code[1] = am_ptab_list_continue;
+ ptab_list_continue_export.code[2] = 2;
+ ptab_list_continue_export.code[3] = (BeamInstr) em_apply_bif;
+ ptab_list_continue_export.code[4] = (BeamInstr) &ptab_list_continue;
+}
+
+/*
+ * Debug stuff
+ */
+
+Sint
+erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
+{
+ Uint64 ld;
+ Sint res;
+ Eterm data;
+ int first_pix = -1;
+
+ erts_ptab_rwlock(ptab);
+
+ if (!set)
+ ld = last_data_read_nob(ptab);
+ else {
+
+ ld = (Uint64) next;
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ if (ptab->r.o.invalid_data == data) {
+ ld += ptab->r.o.max;
+ ASSERT(erts_ptab_data2ix(ptab, data)
+ == erts_ptab_data2ix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ }
+ last_data_set_relb(ptab, ld);
+ }
+
+ while (1) {
+ int pix;
+ ld++;
+ pix = (int) (ld % ptab->r.o.max);
+ if (first_pix < 0)
+ first_pix = pix;
+ else if (pix == first_pix) {
+ res = -1;
+ break;
+ }
+ if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) {
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ if (ptab->r.o.invalid_data == data) {
+ ld += ptab->r.o.max;
+ ASSERT(erts_ptab_data2ix(ptab, data)
+ == erts_ptab_data2ix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ }
+ res = data;
+ break;
+ }
+ }
+
+ erts_ptab_rwunlock(ptab);
+
+ return res;
+}
+
+static ERTS_INLINE ErtsPTabElementCommon *
+ptab_pix2el(ErtsPTab *ptab, int ix)
+{
+ ErtsPTabElementCommon *ptab_el;
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ ptab_el = (ErtsPTabElementCommon *) erts_ptab_pix2intptr_nob(ptab, ix);
+ if (ptab_el == ptab->r.o.invalid_element)
+ return NULL;
+ else
+ return ptab_el;
+}
+
+Eterm
+erts_debug_ptab_list(Process *c_p, ErtsPTab *ptab)
+{
+ int i;
+ Uint need;
+ Eterm res;
+ Eterm* hp;
+ Eterm *hp_end;
+
+ erts_ptab_rwlock(ptab);
+
+ res = NIL;
+ need = erts_ptab_count(ptab) * 2;
+ hp = HAlloc(c_p, need); /* we need two heap words for each id */
+ hp_end = hp + need;
+
+ /* make the list by scanning bakward */
+
+
+ for (i = ptab->r.o.max-1; i >= 0; i--) {
+ ErtsPTabElementCommon *el = ptab_pix2el(ptab, i);
+ if (el) {
+ res = CONS(hp, el->id, res);
+ hp += 2;
+ }
+ }
+
+ erts_ptab_rwunlock(ptab);
+
+ HRelease(c_p, hp_end, hp);
+
+ return res;
+}
+
+Eterm
+erts_debug_ptab_list_bif_info(Process *c_p, ErtsPTab *ptab)
+{
+ ERTS_DECL_AM(ptab_list_bif_info);
+ Eterm elements[] = {
+ AM_ptab_list_bif_info,
+ make_small((Uint) ERTS_PTAB_LIST_BIF_MIN_START_REDS),
+ make_small((Uint) ptab->list.data.chunks),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_DEBUGLEVEL)
+ };
+ Uint sz = 0;
+ Eterm *hp;
+ (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements);
+ hp = HAlloc(c_p, sz);
+ return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements);
+}
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+static void
+debug_ptab_list_check_found_pid(ErtsPTabListBifData *ptlbdp,
+ Eterm pid,
+ Uint64 ic,
+ int pid_should_be_found)
+{
+ int i;
+ for (i = 0; i < ptlbdp->pid_ix; i++) {
+ if (ptlbdp->pid[i] == pid && ptlbdp->debug.pid_started[i] == ic) {
+ ERTS_PTAB_LIST_ASSERT(pid_should_be_found);
+ return;
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(!pid_should_be_found);
+}
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+static void
+debug_ptab_list_check_res_list(Eterm list)
+{
+ while (is_list(list)) {
+ Eterm* consp = list_val(list);
+ Eterm hd = CAR(consp);
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(hd));
+ list = CDR(consp);
+ }
+
+ ERTS_PTAB_LIST_ASSERT(is_nil(list));
+}
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+
+static void
+debug_ptab_list_save_all_pids(ErtsPTabListBifData *ptlbdp)
+{
+ int ix, tix, cpix;
+ ErtsPTab *ptab = ptlbdp->ptab;
+ ptlbdp->debug.correct_pids_verified = 0;
+ ptlbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+
+ for (tix = 0, cpix = 0; tix < ptab->r.o.max; tix++) {
+ ErtsPTabElementCommon *el = ptab_pix2el(ptab, tix);
+ if (el) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(el->id));
+ ptlbdp->debug.correct_pids[cpix++] = el->id;
+ ERTS_PTAB_LIST_ASSERT(cpix <= ptlbdp->pid_sz);
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(cpix == ptlbdp->pid_sz);
+
+ for (ix = 0; ix < ptlbdp->pid_sz; ix++)
+ ptlbdp->pid[ix] = make_small(ix);
+}
+
+static void
+debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp)
+{
+ int ix, cpix;
+
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix == ptlbdp->pid_sz);
+
+ for (ix = 0; ix < ptlbdp->pid_sz; ix++) {
+ int found = 0;
+ Eterm pid = ptlbdp->pid[ix];
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(pid));
+ for (cpix = ix; cpix < ptlbdp->pid_sz; cpix++) {
+ if (ptlbdp->debug.correct_pids[cpix] == pid) {
+ ptlbdp->debug.correct_pids[cpix] = NIL;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ for (cpix = 0; cpix < ix; cpix++) {
+ if (ptlbdp->debug.correct_pids[cpix] == pid) {
+ ptlbdp->debug.correct_pids[cpix] = NIL;
+ found = 1;
+ break;
+ }
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(found);
+ }
+ ptlbdp->debug.correct_pids_verified = 1;
+
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->debug.correct_pids);
+ ptlbdp->debug.correct_pids = NULL;
+}
+#endif /* ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS */
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+static void
+debug_ptab_list_check_del_list(ErtsPTab *ptab)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ if (!ptab->list.data.deleted.start)
+ ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end);
+ else {
+ Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ Uint64 *prev_x_interval_p = NULL;
+ ErtsPTabDeletedElement *ptdep;
+
+ for (ptdep = ptab->list.data.deleted.start;
+ ptdep;
+ ptdep = ptdep->next) {
+ if (!ptdep->prev)
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ else
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev->next == ptdep);
+ if (!ptdep->next)
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.end == ptdep);
+ else
+ ERTS_PTAB_LIST_ASSERT(ptdep->next->prev == ptdep);
+ if (ptdep->ix < 0) {
+ Uint64 interval = ptdep->u.bif_invocation.interval;
+ ERTS_PTAB_LIST_ASSERT(interval <= curr_interval);
+ }
+ else {
+ Uint64 s_interval = ptdep->u.element.inserted;
+ Uint64 x_interval = ptdep->u.element.deleted;
+
+ ERTS_PTAB_LIST_ASSERT(s_interval <= x_interval);
+ if (prev_x_interval_p)
+ ERTS_PTAB_LIST_ASSERT(*prev_x_interval_p <= x_interval);
+ prev_x_interval_p = &ptdep->u.element.deleted;
+ ERTS_PTAB_LIST_ASSERT(
+ erts_ptab_is_valid_id(ptdep->u.element.id));
+ ERTS_PTAB_LIST_ASSERT(
+ erts_ptab_data2ix(ptab,
+ ERTS_PTAB_ID2DATA(ptdep->u.element.id))
+ == ptdep->ix);
+
+ }
+ }
+
+ }
+}
+
+static void
+debug_ptab_list_check_del_free_list(ErtsPTab *ptab,
+ ErtsPTabDeletedElement *free_list)
+{
+ if (ptab->list.data.deleted.start) {
+ ErtsPTabDeletedElement *fptdep;
+ ErtsPTabDeletedElement *ptdep;
+
+ for (fptdep = free_list; fptdep; fptdep = fptdep->next) {
+ for (ptdep = ptab->list.data.deleted.start;
+ ptdep;
+ ptdep = ptdep->next) {
+ ERTS_PTAB_LIST_ASSERT(fptdep != ptdep);
+ }
+ }
+ }
+}
+
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0
+
+static void
+debug_ptab_list_assert_error(char* expr, const char* file, int line, const char *func)
+{
+ fflush(stdout);
+ erts_fprintf(stderr, "%s:%d:%s(): Assertion failed: %s\n",
+ (char *) file, line, (char *) func, expr);
+ fflush(stderr);
+ abort();
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
new file mode 100644
index 0000000000..6e94130df9
--- /dev/null
+++ b/erts/emulator/beam/erl_ptab.h
@@ -0,0 +1,376 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Process/Port table implementation.
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_PTAB_H__
+#define ERL_PTAB_H__
+
+#include "sys.h"
+#include "erl_term.h"
+#include "erl_time.h"
+#include "erl_utils.h"
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_alloc.h"
+
+#define ERTS_TRACER_PROC(P) (P)->common.tracer_proc
+#define ERTS_TRACE_FLAGS(P) (P)->common.trace_flags
+
+#define IS_TRACED(p) \
+ (ERTS_TRACER_PROC((p)) != NIL)
+#define ARE_TRACE_FLAGS_ON(p,tf) \
+ ((ERTS_TRACE_FLAGS((p)) & (tf|F_SENSITIVE)) == (tf))
+#define IS_TRACED_FL(p,tf) \
+ ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) )
+
+typedef struct {
+ Eterm id;
+#ifdef ERTS_SMP
+ erts_atomic32_t refc;
+#endif
+ Eterm tracer_proc;
+ Uint trace_flags;
+ union {
+ /* --- While being alive --- */
+ struct {
+ Uint64 started_interval;
+ struct reg_proc *reg;
+#ifdef ERTS_SMP
+ ErtsSmpPTimer *ptimer;
+#else
+ ErlTimer tm;
+#endif
+ } alive;
+
+ /* --- While being released --- */
+#ifdef ERTS_SMP
+ ErtsThrPrgrLaterOp release;
+#endif
+ } u;
+} ErtsPTabElementCommon;
+
+typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement;
+
+typedef struct {
+ erts_smp_rwmtx_t rwmtx;
+ erts_interval_t interval;
+ struct {
+ ErtsPTabDeletedElement *start;
+ ErtsPTabDeletedElement *end;
+ } deleted;
+ int chunks;
+} ErtsPTabListData;
+
+typedef struct {
+#ifdef ARCH_32
+ erts_smp_dw_atomic_t last_data;
+#else
+ erts_smp_atomic_t last_data;
+#endif
+ erts_smp_atomic32_t count;
+} ErtsPTabVolatileData;
+
+typedef struct {
+ erts_smp_atomic_t *tab;
+ int max;
+ int tab_cache_lines;
+ int pix_per_cache_line;
+ int pix_cl_mask;
+ int pix_cl_shift;
+ int pix_cli_mask;
+ int pix_cli_shift;
+ ErtsPTabElementCommon *invalid_element;
+ Eterm invalid_data;
+ void (*release_element)(void *);
+} ErtsPTabReadOnlyData;
+
+typedef struct {
+ /*
+ * Data mainly modified when someone is listing
+ * the content of the table.
+ */
+ union {
+ ErtsPTabListData data;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabListData))];
+ } list;
+
+ /*
+ * Frequently modified data.
+ */
+ union {
+ ErtsPTabVolatileData tile;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabVolatileData))];
+ } vola;
+
+ /*
+ * Read only data.
+ */
+ union {
+ ErtsPTabReadOnlyData o;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabReadOnlyData))];
+ } r;
+} ErtsPTab;
+
+#define ERTS_PTAB_ID_DATA_SIZE 28
+#define ERTS_PTAB_ID_DATA_SHIFT (_TAG_IMMED1_SIZE)
+
+/*
+ * Currently pids and ports are allowed.
+ */
+#if _PID_DATA_SIZE != ERTS_PTAB_ID_DATA_SIZE
+# error "Unexpected pid data size"
+#endif
+#if _PID_DATA_SHIFT != ERTS_PTAB_ID_DATA_SHIFT
+# error "Unexpected pid tag size"
+#endif
+#if _PORT_DATA_SIZE != ERTS_PTAB_ID_DATA_SIZE
+# error "Unexpected port data size"
+#endif
+#if _PORT_DATA_SHIFT != ERTS_PTAB_ID_DATA_SHIFT
+# error "Unexpected port tag size"
+#endif
+
+#define erts_ptab_is_valid_id(ID) \
+ (is_internal_pid((ID)) || is_internal_port((ID)))
+
+#define ERTS_PTAB_ID2DATA(ID) \
+ (ASSERT_EXPR(erts_ptab_is_valid_id((ID))), \
+ (((ID) >> ERTS_PTAB_ID_DATA_SHIFT) \
+ & ~(~((Uint) 0) << ERTS_PTAB_ID_DATA_SIZE)))
+
+void erts_ptab_init(void);
+void erts_ptab_init_table(ErtsPTab *ptab,
+ ErtsAlcType_t atype,
+ void (*release_element)(void *),
+ ErtsPTabElementCommon *invalid_element,
+ int size,
+ char *name);
+int erts_ptab_new_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el,
+ void *init_arg,
+ void (*init_ptab_el)(void *, Eterm));
+void erts_ptab_delete_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el);
+int erts_ptab_initialized(ErtsPTab *ptab);
+
+ERTS_GLB_INLINE erts_interval_t *erts_ptab_interval(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_max(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_count(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_data2ix(ErtsPTab *ptab, Eterm data);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint32 add_refc);
+ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE erts_interval_t *
+erts_ptab_interval(ErtsPTab *ptab)
+{
+ return &ptab->list.data.interval;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_max(ErtsPTab *ptab)
+{
+ return ptab->r.o.max;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_count(ErtsPTab *ptab)
+{
+ erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count);
+ if (res > ptab->r.o.max)
+ return ptab->r.o.max;
+ ASSERT(res >= 0);
+ return (int) res;
+
+}
+
+ERTS_GLB_INLINE int erts_ptab_data2ix(ErtsPTab *ptab, Eterm data)
+{
+ int n, pix;
+
+ n = (int) data;
+ if (ptab->r.o.pix_cl_mask) {
+ pix = ((n & ptab->r.o.pix_cl_mask) << ptab->r.o.pix_cl_shift);
+ pix += ((n >> ptab->r.o.pix_cli_shift) & ptab->r.o.pix_cli_mask);
+ }
+ else {
+ n %= ptab->r.o.max;
+ pix = n % ptab->r.o.tab_cache_lines;
+ pix *= ptab->r.o.pix_per_cache_line;
+ pix += n / ptab->r.o.tab_cache_lines;
+ }
+ ASSERT(0 <= pix && pix < ptab->r.o.max);
+ return pix;
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el)
+{
+#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_aint32_t refc = erts_atomic32_inc_read_nob(&ptab_el->refc);
+ ERTS_SMP_LC_ASSERT(refc > 1);
+#else
+ erts_atomic32_inc_nob(&ptab_el->refc);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el)
+{
+#ifdef ERTS_SMP
+ erts_aint32_t refc = erts_atomic32_dec_read_nob(&ptab_el->refc);
+ ERTS_SMP_LC_ASSERT(refc >= 0);
+ return (int) refc;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint32 add_refc)
+{
+#ifdef ERTS_SMP
+ erts_aint32_t refc;
+
+#ifndef ERTS_ENABLE_LOCK_CHECK
+ if (add_refc >= 0) {
+ erts_atomic32_add_nob(&ptab_el->refc,
+ (erts_aint32_t) add_refc);
+ return 1;
+ }
+#endif
+
+ refc = erts_atomic32_add_read_nob(&ptab_el->refc,
+ (erts_aint32_t) add_refc);
+ ERTS_SMP_LC_ASSERT(refc >= 0);
+ return (int) refc;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab)
+{
+ return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab)
+{
+ return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab)
+{
+ return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab)
+{
+ return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx);
+}
+
+#endif
+
+#endif
+
+#if defined(ERTS_PTAB_WANT_BIF_IMPL__) && !defined(ERTS_PTAB_LIST__)
+#define ERTS_PTAB_LIST__
+
+#include "erl_process.h"
+#include "bif.h"
+
+BIF_RETTYPE erts_ptab_list(struct process *c_p, ErtsPTab *ptab);
+
+#endif
+
+#if defined(ERTS_PTAB_WANT_DEBUG_FUNCS__) && !defined(ERTS_PTAB_DEBUG_FUNCS__)
+#define ERTS_PTAB_DEBUG_FUNCS__
+#include "erl_process.h"
+
+/* Debug functions */
+Sint erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next);
+Eterm erts_debug_ptab_list(Process *c_p, ErtsPTab *ptab);
+Eterm erts_debug_ptab_list_bif_info(Process *c_p, ErtsPTab *ptab);
+
+#endif
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index bc988cd61b..739a0ee912 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -156,8 +156,8 @@ do { (RES) = (TPID); } while(0)
#define ERTS_TRACER_REF_TYPE Process *
#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
do { \
- (RES) = erts_proc_lookup((TPID)); \
- if (!(RES) || !((RES)->trace_flags & F_TRACER)) { \
+ (RES) = erts_proc_lookup((TPID)); \
+ if (!(RES) || !(ERTS_TRACE_FLAGS((RES)) & F_TRACER)) { \
(TPID) = NIL; \
(TRACEE_FLGS) &= ~TRACEE_FLAGS; \
return; \
@@ -606,8 +606,7 @@ profile_send(Eterm from, Eterm message) {
}
} else {
- ASSERT(is_internal_pid(profiler)
- && internal_pid_index(profiler) < erts_max_processes);
+ ASSERT(is_internal_pid(profiler));
profile_p = erts_proc_lookup(profiler);
@@ -729,7 +728,9 @@ seq_trace_send_to_port(Process *c_p,
}
#define TS_HEAP_WORDS 5
-#define TS_SIZE(p) (((p)->trace_flags & F_TIMESTAMP) ? TS_HEAP_WORDS : 0)
+#define TS_SIZE(p) ((ERTS_TRACE_FLAGS((p)) & F_TIMESTAMP) \
+ ? TS_HEAP_WORDS \
+ : 0)
/*
* Patch a timestamp into a tuple. The tuple must be the last thing
@@ -764,17 +765,17 @@ send_to_tracer(Process *tracee,
erts_smp_mtx_lock(&smq_mtx);
- if (tracee->trace_flags & F_TIMESTAMP)
+ if (ERTS_TRACE_FLAGS(tracee) & F_TIMESTAMP)
*hpp = patch_ts(msg, *hpp);
- if (is_internal_pid(tracee->tracer_proc))
+ if (is_internal_pid(ERTS_TRACER_PROC(tracee)))
ERTS_ENQ_TRACE_MSG(tracee->id, tracer_ref, msg, bp);
else {
- ASSERT(is_internal_port(tracee->tracer_proc));
+ ASSERT(is_internal_port(ERTS_TRACER_PROC(tracee)));
send_to_port(no_fake_sched ? NULL : tracee,
msg,
- &tracee->tracer_proc,
- &tracee->trace_flags);
+ &ERTS_TRACER_PROC(tracee),
+ &ERTS_TRACE_FLAGS(tracee));
}
erts_smp_mtx_unlock(&smq_mtx);
@@ -792,7 +793,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF;
int sched_no, curr_func, to_port, no_fake_sched;
- if (is_nil(p->tracer_proc))
+ if (is_nil(ERTS_TRACER_PROC(p)))
return;
no_fake_sched = never_fake_sched;
@@ -812,13 +813,14 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
}
sched_no = IS_TRACED_FL(p, F_TRACE_SCHED_NO);
- to_port = is_internal_port(p->tracer_proc);
+ to_port = is_internal_port(ERTS_TRACER_PROC(p));
if (!to_port) {
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
}
if (ERTS_PROC_IS_EXITING(p))
@@ -912,7 +914,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
operation = am_atom_put(s, sys_strlen(s));
}
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (11)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -921,10 +923,10 @@ trace_send(Process *p, Eterm to, Eterm msg)
mess = TUPLE5(hp, am_trace, p->id, operation, msg, to);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -934,10 +936,11 @@ trace_send(Process *p, Eterm to, Eterm msg)
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
sz_msg = size_object(msg);
sz_to = size_object(to);
@@ -958,7 +961,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
patch_ts(mess, hp);
}
@@ -977,7 +980,7 @@ trace_receive(Process *rp, Eterm msg)
size_t sz_msg;
Eterm* hp;
- if (is_internal_port(rp->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(rp))) {
#define LOCAL_HEAP_SIZE (10)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -986,10 +989,10 @@ trace_receive(Process *rp, Eterm msg)
mess = TUPLE4(hp, am_trace, rp->id, am_receive, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (rp->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(rp, mess, &rp->tracer_proc, &rp->trace_flags);
+ send_to_port(rp, mess, &ERTS_TRACER_PROC(rp), &ERTS_TRACE_FLAGS(rp));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -999,10 +1002,11 @@ trace_receive(Process *rp, Eterm msg)
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(rp->tracer_proc)
- && internal_pid_index(rp->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(rp)));
- ERTS_GET_TRACER_REF(tracer_ref, rp->tracer_proc, rp->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(rp),
+ ERTS_TRACE_FLAGS(rp));
sz_msg = size_object(msg);
@@ -1016,7 +1020,7 @@ trace_receive(Process *rp, Eterm msg)
erts_smp_mtx_lock(&smq_mtx);
- if (rp->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
patch_ts(mess, hp);
}
@@ -1074,7 +1078,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
ASSERT(is_tuple(token) || is_nil(token));
if (SEQ_TRACE_T_SENDER(token) == seq_tracer || token == NIL ||
- (process && process->trace_flags & F_SENSITIVE)) {
+ (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE)) {
return;
}
@@ -1138,8 +1142,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
Uint sz_label, sz_lastcnt_serial, sz_msg, sz_ts, sz_sender,
sz_exitfrom, sz_receiver;
- ASSERT(is_internal_pid(seq_tracer)
- && internal_pid_index(seq_tracer) < erts_max_processes);
+ ASSERT(is_internal_pid(seq_tracer));
#ifndef ERTS_SMP
@@ -1258,12 +1261,12 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- if (is_internal_port(p->tracer_proc)) {
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
} else {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
@@ -1273,10 +1276,11 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
/*
* Find the tracer.
*/
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
size = size_object(mess);
@@ -1315,7 +1319,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
@@ -1326,12 +1330,12 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
/* Do not generate trace messages to oneself */
return;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
tracee = p->id;
#endif
@@ -1382,8 +1386,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
@@ -1446,7 +1449,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
@@ -1457,12 +1460,12 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
/* Do not generate trace messages to oneself */
return;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
tracee = p->id;
#endif
@@ -1514,8 +1517,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
@@ -1593,7 +1595,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
@@ -1604,12 +1606,12 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
/* Do not generate trace messages to oneself */
return 0;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
tracee = p->id;
#endif
@@ -1619,7 +1621,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* meta trace =>
* use fixed flag set instead of process flags
*/
- if (p->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
/* No trace messages for sensitive processes. */
return 0;
}
@@ -1677,7 +1679,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
if (!erts_is_valid_tracer_port(*tracer_pid)) {
#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);
+ ASSERT(is_nil(tracee) || tracer_pid == &ERTS_TRACER_PROC(p));
if (is_not_nil(tracee))
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
@@ -1814,21 +1816,21 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
*tracer_pid, ERTS_PROC_LOCK_STATUS);
if (!tracer)
invalid_tracer = 1;
else {
- invalid_tracer = (tracer->trace_flags & F_TRACER) == 0;
+ invalid_tracer = !(ERTS_TRACE_FLAGS(tracer) & F_TRACER);
erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
}
if (invalid_tracer) {
#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);
+ ASSERT(is_nil(tracee)
+ || tracer_pid == &ERTS_TRACER_PROC(p));
if (is_not_nil(tracee))
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
@@ -1990,7 +1992,7 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0)
|| erts_thr_progress_is_blocking());
- if (is_internal_port(t_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2000,7 +2002,7 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
mess = TUPLE4(hp, am_trace, t_p->id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
send_to_port(
@@ -2011,7 +2013,9 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
/* Fake schedule out and in are never sent when smp enabled */
c_p,
#endif
- mess, &t_p->tracer_proc, &t_p->trace_flags);
+ mess,
+ &ERTS_TRACER_PROC(t_p),
+ &ERTS_TRACE_FLAGS(t_p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2022,10 +2026,11 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
ERTS_TRACER_REF_TYPE tracer_ref;
size_t sz_data;
- ASSERT(is_internal_pid(t_p->tracer_proc)
- && internal_pid_index(t_p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
- ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(t_p),
+ ERTS_TRACE_FLAGS(t_p));
sz_data = size_object(data);
@@ -2039,7 +2044,7 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
@@ -2064,7 +2069,7 @@ trace_proc_spawn(Process *p, Eterm pid,
Eterm mess;
Eterm* hp;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (4+6+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2075,10 +2080,10 @@ trace_proc_spawn(Process *p, Eterm pid,
mess = TUPLE5(hp, am_trace, p->id, am_spawn, pid, mfa);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2090,10 +2095,11 @@ trace_proc_spawn(Process *p, Eterm pid,
size_t sz_args, sz_pid;
Uint need;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
sz_args = size_object(args);
sz_pid = size_object(pid);
@@ -2110,7 +2116,7 @@ trace_proc_spawn(Process *p, Eterm pid,
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
@@ -2179,7 +2185,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
if (global || local) {
flags = erts_call_trace(p, ep->code, ep->match_prog_set, args,
- local, &p->tracer_proc);
+ local, &ERTS_TRACER_PROC(p));
}
if (meta) {
flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
@@ -2262,7 +2268,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
if (flags & MATCH_SET_EXCEPTION_TRACE) {
erts_trace_exception(p, ep->code, class, value,
- &p->tracer_proc);
+ &ERTS_TRACER_PROC(p));
}
if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
/* can only happen if(local)*/
@@ -2287,7 +2293,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
UnUseTmpHeapNoproc(3);
if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags |= F_EXCEPTION_TRACE;
+ ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
@@ -2297,7 +2303,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
/* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &p->tracer_proc);
+ erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p));
}
if (flags & MATCH_SET_RETURN_TO_TRACE) {
/* can only happen if(local)*/
@@ -2385,7 +2391,7 @@ trace_gc(Process *p, Eterm what)
UseTmpHeap(LOCAL_HEAP_SIZE,p);
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
hp = local_heap;
#ifdef DEBUG
size = 0;
@@ -2397,10 +2403,11 @@ trace_gc(Process *p, Eterm what)
size += 5/*4-tuple*/ + TS_SIZE(p);
#endif
} else {
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
size = 0;
(void) erts_bld_atom_uint_2tup_list(NULL,
@@ -2429,12 +2436,12 @@ trace_gc(Process *p, Eterm what)
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(msg, hp);
}
ASSERT(hp == limit);
- if (is_internal_port(p->tracer_proc))
- send_to_port(p, msg, &p->tracer_proc, &p->trace_flags);
+ if (is_internal_port(ERTS_TRACER_PROC(p)))
+ send_to_port(p, msg, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
else
ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, msg, bp);
erts_smp_mtx_unlock(&smq_mtx);
@@ -2476,8 +2483,7 @@ monitor_long_gc(Process *p, Uint time) {
#endif
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
+ ASSERT(is_internal_pid(system_monitor));
monitor_p = erts_proc_lookup(system_monitor);
if (!monitor_p || p == monitor_p)
return;
@@ -2551,8 +2557,7 @@ monitor_large_heap(Process *p) {
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
+ ASSERT(is_internal_pid(system_monitor));
monitor_p = erts_proc_lookup(system_monitor);
if (monitor_p || p == monitor_p) {
return;
@@ -2606,8 +2611,7 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
Eterm *hp, msg;
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
+ ASSERT(is_internal_pid(system_monitor));
monitor_p = erts_proc_lookup(system_monitor);
if (!monitor_p || p == monitor_p)
return;
@@ -2741,7 +2745,7 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
Eterm mess;
Eterm* hp;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (5+6)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2751,11 +2755,11 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake schedule */
- send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2765,12 +2769,13 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
size_t sz_data;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
sz_data = 6 + TS_SIZE(p);
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
@@ -2779,7 +2784,7 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
@@ -2804,7 +2809,7 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
- if (is_internal_port(t_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2813,11 +2818,11 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
mess = TUPLE4(hp, am_trace, t_p->id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake schedule */
- send_to_port(NULL, mess, &t_p->tracer_proc, &t_p->trace_flags);
+ send_to_port(NULL,mess,&ERTS_TRACER_PROC(t_p),&ERTS_TRACE_FLAGS(t_p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2827,12 +2832,13 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
size_t sz_data;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(t_p->tracer_proc)
- && internal_pid_index(t_p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
sz_data = 5 + TS_SIZE(t_p);
- ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(t_p),
+ ERTS_TRACE_FLAGS(t_p));
hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
@@ -2841,7 +2847,7 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
@@ -2870,7 +2876,7 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
int ws = 5;
Eterm sched_id = am_undefined;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (5+6)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2894,12 +2900,12 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
hp += ws;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake scheduling */
- send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2908,12 +2914,13 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) ws = 6; /* Make place for scheduler id */
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
hp = ERTS_ALLOC_SYSMSG_HEAP(ws+TS_SIZE(p), &bp, &off_heap, tracer_ref);
@@ -2933,7 +2940,7 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
@@ -3046,16 +3053,19 @@ profile_runnable_proc(Process *p, Eterm status){
void
erts_check_my_tracer_proc(Process *p)
{
- if (is_internal_pid(p->tracer_proc)) {
- Process *tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- p->tracer_proc, ERTS_PROC_LOCK_STATUS);
- int invalid_tracer = !tracer || !(tracer->trace_flags & F_TRACER);
+ if (is_internal_pid(ERTS_TRACER_PROC(p))) {
+ Process *tracer = erts_pid2proc(p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_TRACER_PROC(p),
+ ERTS_PROC_LOCK_STATUS);
+ int invalid_tracer = (!tracer
+ || !(ERTS_TRACE_FLAGS(tracer) & F_TRACER));
if (tracer)
erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
if (invalid_tracer) {
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags &= ~TRACEE_FLAGS;
- p->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(p) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(p) = NIL;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
@@ -3399,7 +3409,7 @@ sys_msg_dispatcher_func(void *unused)
proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
if (!proc
|| (smqp->type == SYS_MSG_TYPE_TRACE
- && !(proc->trace_flags & F_TRACER))) {
+ && !(ERTS_TRACE_FLAGS(proc) & F_TRACER))) {
/* Bad tracer */
#ifdef DEBUG_PRINTOUTS
if (smqp->type == SYS_MSG_TYPE_TRACE && proc)
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
new file mode 100644
index 0000000000..8425534df5
--- /dev/null
+++ b/erts/emulator/beam/erl_utils.h
@@ -0,0 +1,214 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_UTILS_H__
+#define ERL_UTILS_H__
+
+#include "sys.h"
+#include "erl_smp.h"
+#include "erl_printf.h"
+
+typedef struct {
+#ifdef DEBUG
+ int smp_api;
+#endif
+ union {
+ Uint64 not_atomic;
+#ifdef ARCH_64
+ erts_atomic_t atomic;
+#else
+ erts_dw_atomic_t atomic;
+#endif
+ } counter;
+} erts_interval_t;
+
+void erts_interval_init(erts_interval_t *);
+void erts_smp_interval_init(erts_interval_t *);
+Uint64 erts_step_interval_nob(erts_interval_t *);
+Uint64 erts_step_interval_relb(erts_interval_t *);
+Uint64 erts_smp_step_interval_nob(erts_interval_t *);
+Uint64 erts_smp_step_interval_relb(erts_interval_t *);
+Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
+Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
+Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
+Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
+#ifdef ARCH_32
+ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *);
+#endif
+ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ARCH_32
+
+ERTS_GLB_INLINE Uint64
+erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (Uint64) dw->dw_sint;
+#else
+ Uint64 res;
+ res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+#endif
+}
+
+#endif
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_nob__(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ return (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
+#else
+ erts_dw_aint_t dw;
+ erts_dw_atomic_read_nob(&icp->counter.atomic, &dw);
+ return erts_interval_dw_aint_to_val__(&dw);
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_acqb__(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
+#else
+ erts_dw_aint_t dw;
+ erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw);
+ return erts_interval_dw_aint_to_val__(&dw);
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_nob(erts_interval_t *icp)
+{
+ ASSERT(!icp->smp_api);
+ return erts_current_interval_nob__(icp);
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_acqb(erts_interval_t *icp)
+{
+ ASSERT(!icp->smp_api);
+ return erts_current_interval_acqb__(icp);
+}
+
+ERTS_GLB_INLINE Uint64
+erts_smp_current_interval_nob(erts_interval_t *icp)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return erts_current_interval_nob__(icp);
+#else
+ return icp->counter.not_atomic;
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_smp_current_interval_acqb(erts_interval_t *icp)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return erts_current_interval_acqb__(icp);
+#else
+ return icp->counter.not_atomic;
+#endif
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/*
+ * To be used to silence unused result warnings, but do not abuse it.
+ */
+void erts_silence_warn_unused_result(long unused);
+
+int erts_fit_in_bits_int64(Sint64);
+int erts_fit_in_bits_int32(Sint32);
+int list_length(Eterm);
+int erts_is_builtin(Eterm, Eterm, int);
+Uint32 make_broken_hash(Eterm);
+Uint32 block_hash(byte *, unsigned, Uint32);
+Uint32 make_hash2(Eterm);
+Uint32 make_hash(Eterm);
+
+
+Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
+Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
+Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
+Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
+Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
+Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
+Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
+Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
+#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
+Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
+Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm terms1[], Uint terms2[]);
+Eterm
+erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], Uint uints[]);
+Eterm
+erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
+ Eterm atoms[], Uint uints1[], Uint uints2[]);
+
+void erts_init_utils(void);
+void erts_init_utils_mem(void);
+
+erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint);
+void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *);
+
+#if HALFWORD_HEAP
+int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base);
+# define eq(A,B) eq_rel(A,NULL,B,NULL)
+#else
+int eq(Eterm, Eterm);
+# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B)
+#endif
+
+#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
+
+#if HALFWORD_HEAP
+Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*);
+#define CMP(A,B) cmp_rel(A,NULL,B,NULL)
+#else
+Sint cmp(Eterm, Eterm);
+#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B)
+#define CMP(A,B) cmp(A,B)
+#endif
+#define cmp_lt(a,b) (CMP((a),(b)) < 0)
+#define cmp_le(a,b) (CMP((a),(b)) <= 0)
+#define cmp_eq(a,b) (CMP((a),(b)) == 0)
+#define cmp_ne(a,b) (CMP((a),(b)) != 0)
+#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
+#define cmp_gt(a,b) (CMP((a),(b)) > 0)
+
+#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
+#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
+#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
+#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
+
+#endif
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index c604fdf7c3..86d85e05ee 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -70,6 +70,7 @@ Export *export_list(int);
int export_list_size(void);
int export_table_sz(void);
Export *export_get(Export*);
+Export* erts_find_function(Eterm, Eterm, unsigned int);
#include "beam_load.h" /* For em_* extern declarations */
#define ExportIsBuiltIn(EntryPtr) \
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index e9dbba3d4a..86f8e3890b 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -38,6 +38,7 @@
#include "erl_sys_driver.h"
#include "erl_debug.h"
#include "error.h"
+#include "erl_utils.h"
typedef struct port Port;
#include "erl_port_task.h"
@@ -165,8 +166,14 @@ struct port {
ErlTimer tm; /* Timer entry */
#endif
- Eterm tracer_proc; /* If the port is traced, this is the tracer */
- Uint trace_flags; /* Trace flags */
+ struct {
+ union {
+ struct {
+ Eterm tracer_proc; /* If the port is traced, this is the tracer */
+ Uint trace_flags; /* Trace flags */
+ } alive;
+ } u;
+ } common;
ErlIOQueue ioq; /* driver accessible i/o queue */
DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
@@ -1468,164 +1475,8 @@ ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *prt)
/* erl_drv_thread.c */
void erl_drv_thr_init(void);
-/* time.c */
-
/* utils.c */
-typedef struct {
-#ifdef DEBUG
- int smp_api;
-#endif
- union {
- Uint64 not_atomic;
-#ifdef ARCH_64
- erts_atomic_t atomic;
-#else
- erts_dw_atomic_t atomic;
-#endif
- } counter;
-} erts_interval_t;
-
-void erts_interval_init(erts_interval_t *);
-void erts_smp_interval_init(erts_interval_t *);
-Uint64 erts_step_interval_nob(erts_interval_t *);
-Uint64 erts_step_interval_relb(erts_interval_t *);
-Uint64 erts_smp_step_interval_nob(erts_interval_t *);
-Uint64 erts_smp_step_interval_relb(erts_interval_t *);
-Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
-Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
-Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
-#ifdef ARCH_32
-ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *);
-#endif
-ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *);
-ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-#ifdef ARCH_32
-
-ERTS_GLB_INLINE Uint64
-erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw)
-{
-#ifdef ETHR_SU_DW_NAINT_T__
- return (Uint64) dw->dw_sint;
-#else
- Uint64 res;
- res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
- res <<= 32;
- res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
- return res;
-#endif
-}
-
-#endif
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_nob__(erts_interval_t *icp)
-{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_nob(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_acqb__(erts_interval_t *icp)
-{
-#ifdef ARCH_64
- return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
-#else
- erts_dw_aint_t dw;
- erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw);
- return erts_interval_dw_aint_to_val__(&dw);
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_nob(erts_interval_t *icp)
-{
- ASSERT(!icp->smp_api);
- return erts_current_interval_nob__(icp);
-}
-
-ERTS_GLB_INLINE Uint64
-erts_current_interval_acqb(erts_interval_t *icp)
-{
- ASSERT(!icp->smp_api);
- return erts_current_interval_acqb__(icp);
-}
-
-ERTS_GLB_INLINE Uint64
-erts_smp_current_interval_nob(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return erts_current_interval_nob__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
-}
-
-ERTS_GLB_INLINE Uint64
-erts_smp_current_interval_acqb(erts_interval_t *icp)
-{
- ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
- return erts_current_interval_acqb__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
-}
-
-#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-/*
- * To be used to silence unused result warnings, but do not abuse it.
- */
-void erts_silence_warn_unused_result(long unused);
-
-void erts_cleanup_offheap(ErlOffHeap *offheap);
-
-int erts_fit_in_bits_int64(Sint64);
-int erts_fit_in_bits_int32(Sint32);
-int list_length(Eterm);
-Export* erts_find_function(Eterm, Eterm, unsigned int);
-int erts_is_builtin(Eterm, Eterm, int);
-Uint32 make_broken_hash(Eterm);
-Uint32 block_hash(byte *, unsigned, Uint32);
-Uint32 make_hash2(Eterm);
-Uint32 make_hash(Eterm);
-
-
-Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
-Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
-Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
-Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
-Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
-Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
-Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
-Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
-Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
-#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
-Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
-Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm terms1[], Uint terms2[]);
-Eterm
-erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm atoms[], Uint uints[]);
-Eterm
-erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
- Eterm atoms[], Uint uints1[], Uint uints2[]);
-
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
@@ -1639,42 +1490,6 @@ Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
(ASSERT_EXPR(is_node_container((NC))), \
IS_CONST((NC)) ? (NC) : store_external_or_ref_in_proc_((Pp), (NC)))
-void erts_init_utils(void);
-void erts_init_utils_mem(void);
-
-erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint);
-void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *);
-
-#if HALFWORD_HEAP
-int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base);
-# define eq(A,B) eq_rel(A,NULL,B,NULL)
-#else
-int eq(Eterm, Eterm);
-# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B)
-#endif
-
-#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
-
-#if HALFWORD_HEAP
-Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*);
-#define CMP(A,B) cmp_rel(A,NULL,B,NULL)
-#else
-Sint cmp(Eterm, Eterm);
-#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B)
-#define CMP(A,B) cmp(A,B)
-#endif
-#define cmp_lt(a,b) (CMP((a),(b)) < 0)
-#define cmp_le(a,b) (CMP((a),(b)) <= 0)
-#define cmp_eq(a,b) (CMP((a),(b)) == 0)
-#define cmp_ne(a,b) (CMP((a),(b)) != 0)
-#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
-#define cmp_gt(a,b) (CMP((a),(b)) > 0)
-
-#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
-#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
-#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
-#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
-
/* duplicates from big.h */
int term_to_Uint(Eterm term, Uint *up);
int term_to_UWord(Eterm, UWord*);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 4dd60b4d23..f9e1b62f14 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -477,7 +477,7 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver,
prt->bp = NULL;
prt->data = am_undefined;
/* Set default tracing */
- erts_get_default_tracing(&(prt->trace_flags), &(prt->tracer_proc));
+ erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
prt->psd = NULL;
@@ -1345,8 +1345,8 @@ void init_io(void)
erts_port[i].xports = NULL;
erts_smp_spinlock_init_x(&erts_port[i].state_lck, "port_state", make_small(i));
#endif
- erts_port[i].tracer_proc = NIL;
- erts_port[i].trace_flags = 0;
+ ERTS_TRACER_PROC(&erts_port[i]) = NIL;
+ ERTS_TRACE_FLAGS(&erts_port[i]) = 0;
erts_port[i].drv_ptr = NULL;
erts_port[i].status = ERTS_PORT_SFLG_FREE;
@@ -1556,9 +1556,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- ASSERT(is_internal_port(sender)
- && is_internal_pid(pid)
- && internal_pid_index(pid) < erts_max_processes);
+ ASSERT(is_internal_port(sender) && is_internal_pid(pid));
rp = (scheduler
? erts_proc_lookup(pid)
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index c02872ef80..9011eb0c10 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -204,7 +204,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
r.p = proc;
if (!proc)
goto done;
- if (proc->reg)
+ if (proc->common.u.alive.reg)
goto done;
r.pt = NULL;
}
@@ -224,7 +224,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
trace_proc(c_p, proc, am_register, name);
}
- proc->reg = rp;
+ proc->common.u.alive.reg = rp;
}
else if (port && rp->pt == port) {
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
@@ -493,8 +493,8 @@ int erts_unregister_name(Process *c_p,
current_c_p_locks = c_p_locks;
}
#endif
- if (c_p->reg) {
- r.name = c_p->reg->name;
+ if (c_p->common.u.alive.reg) {
+ r.name = c_p->common.u.alive.reg->name;
} else {
/* Name got unregistered while main lock was released */
res = 0;
@@ -549,7 +549,7 @@ int erts_unregister_name(Process *c_p,
ERTS_PROC_LOCK_MAIN);
current_c_p_locks = c_p_locks;
#endif
- rp->p->reg = NULL;
+ rp->p->common.u.alive.reg = NULL;
if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
trace_proc(c_p, rp->p, am_unregister, r.name);
}
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index db6597dc7c..fb203c6076 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -46,6 +46,7 @@
#include "erl_thr_queue.h"
#include "erl_sched_spec_pre_alloc.h"
#include "beam_bp.h"
+#include "erl_ptab.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
diff --git a/erts/emulator/hipe/hipe_debug.c b/erts/emulator/hipe/hipe_debug.c
index 37615bf718..8c3a14a201 100644
--- a/erts/emulator/hipe/hipe_debug.c
+++ b/erts/emulator/hipe/hipe_debug.c
@@ -189,11 +189,13 @@ void hipe_print_pcb(Process *p)
U("old_head ", old_heap);
U("min_heap_..", min_heap_size);
U("rcount ", rcount);
- U("id ", id);
+ U("id ", common.id);
U("prio ", prio);
U("reds ", reds);
+#if 0
U("tracer_pr..", tracer_proc);
U("trace_fla..", trace_flags);
+#endif
U("group_lea..", group_leader);
U("flags ", flags);
U("fvalue ", fvalue);
@@ -202,7 +204,9 @@ void hipe_print_pcb(Process *p)
/*XXX: ErlTimer tm; */
U("next ", next);
/*XXX: ErlOffHeap off_heap; */
+#if 0
U("reg ", reg);
+#endif
U("nlinks ", nlinks);
/*XXX: ErlMessageQueue msg; */
U("mbuf ", mbuf);
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index fdc55a4cc5..157589d1a4 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -1392,7 +1392,7 @@ otp_6237_select_loop() ->
-define(NoTestProcs, 10000).
--record(processes_bif_info, {min_start_reds,
+-record(ptab_list_bif_info, {min_start_reds,
tab_chunks,
tab_chunks_size,
tab_indices_per_red,
@@ -1427,11 +1427,11 @@ processes_large_tab(Config) when is_list(Config) ->
%% the connection times out; therefore, shrink the test on
%% high debug levels.
?line DbgLvl = case erts_debug:get_internal_state(processes_bif_info) of
- #processes_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl ->
+ #ptab_list_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl ->
20;
- #processes_bif_info{debug_level = Lvl} when Lvl < 0 ->
+ #ptab_list_bif_info{debug_level = Lvl} when Lvl < 0 ->
?line ?t:fail({debug_level, Lvl});
- #processes_bif_info{debug_level = Lvl} ->
+ #ptab_list_bif_info{debug_level = Lvl} ->
Lvl
end,
?line ProcTabSize3 = ProcTabSize2 - (1300000 * DbgLvl div MaxDbgLvl),
@@ -1446,7 +1446,7 @@ processes_large_tab(Config) when is_list(Config) ->
erts_debug,
get_internal_state,
[processes_bif_info]) of
- #processes_bif_info{tab_chunks = Chunks} when is_integer(Chunks),
+ #ptab_list_bif_info{tab_chunks = Chunks} when is_integer(Chunks),
Chunks > 1 -> ok;
PBInfo -> ?t:fail(PBInfo)
end,
@@ -1475,7 +1475,7 @@ processes_small_tab(Config) when is_list(Config) ->
get_internal_state,
[processes_bif_info]),
?line stop_node(SmallNode),
- ?line 1 = PBInfo#processes_bif_info.tab_chunks,
+ ?line 1 = PBInfo#ptab_list_bif_info.tab_chunks,
?line chk_processes_bif_test_res(Res).
processes_this_tab(doc) ->
@@ -1489,7 +1489,7 @@ chk_processes_bif_test_res(ok) -> ok;
chk_processes_bif_test_res({comment, _} = Comment) -> Comment;
chk_processes_bif_test_res(Failure) -> ?t:fail(Failure).
-print_processes_bif_info(#processes_bif_info{min_start_reds = MinStartReds,
+print_processes_bif_info(#ptab_list_bif_info{min_start_reds = MinStartReds,
tab_chunks = TabChunks,
tab_chunks_size = TabChunksSize,
tab_indices_per_red = TabIndPerRed,
@@ -1587,11 +1587,11 @@ processes_bif_test() ->
?line enable_internal_state(),
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
+ ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
?line WillTrap = case PBInfo of
- #processes_bif_info{tab_chunks = 1} ->
+ #ptab_list_bif_info{tab_chunks = 1} ->
false;
- #processes_bif_info{tab_chunks = Chunks,
+ #ptab_list_bif_info{tab_chunks = Chunks,
tab_chunks_size = ChunksSize,
tab_indices_per_red = IndiciesPerRed
} ->
@@ -1626,7 +1626,7 @@ processes_bif_test() ->
?line erlang:system_flag(multi_scheduling, unblock),
?line [{status,suspended},
- {current_function,{erlang,processes_trap,2}}]
+ {current_function,{erlang,ptab_list_continue,2}}]
= process_info(Suspendee, [status, current_function]),
?line ok = do_processes_bif_test(WantReds, WillTrap, Processes),
@@ -1856,7 +1856,7 @@ processes_last_call_trap(Config) when is_list(Config) ->
?line Processes = fun () -> processes() end,
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = case PBInfo#processes_bif_info.min_start_reds of
+ ?line WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
R when R > 10 -> R - 1;
_R -> 9
end,
@@ -1881,7 +1881,7 @@ processes_apply_trap(Config) when is_list(Config) ->
?line enable_internal_state(),
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = case PBInfo#processes_bif_info.min_start_reds of
+ ?line WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
R when R > 10 -> R - 1;
_R -> 9
end,
@@ -1901,7 +1901,7 @@ processes_gc_trap(Config) when is_list(Config) ->
?line enable_internal_state(),
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
+ ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
?line Processes = fun () ->
erts_debug:set_internal_state(reds_left,WantReds),
processes()
@@ -1919,7 +1919,7 @@ processes_gc_trap(Config) when is_list(Config) ->
?line erlang:suspend_process(Suspendee),
?line erlang:system_flag(multi_scheduling, unblock),
- ?line [{status,suspended}, {current_function,{erlang,processes_trap,2}}]
+ ?line [{status,suspended}, {current_function,{erlang,ptab_list_continue,2}}]
= process_info(Suspendee, [status, current_function]),
?line erlang:garbage_collect(Suspendee),
@@ -1990,8 +1990,8 @@ chk_term_proc_list(Line, MustChk, ExpectBlks) ->
not_enabled;
{_, MS} ->
{value,
- {processes_term_proc_el,
- DL}} = lists:keysearch(processes_term_proc_el, 1, MS),
+ {ptab_list_deleted_el,
+ DL}} = lists:keysearch(ptab_list_deleted_el, 1, MS),
case lists:keysearch(blocks, 1, DL) of
{value, {blocks, ExpectBlks, _, _}} ->
ok;
@@ -2009,8 +2009,8 @@ processes_term_proc_list_test(MustChk) ->
?line enable_internal_state(),
?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
- ?line #processes_bif_info{tab_chunks = Chunks,
+ ?line WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
+ ?line #ptab_list_bif_info{tab_chunks = Chunks,
tab_chunks_size = ChunksSize,
tab_indices_per_red = IndiciesPerRed
} = PBInfo,
@@ -2046,7 +2046,7 @@ processes_term_proc_list_test(MustChk) ->
erlang:suspend_process(P),
erlang:system_flag(multi_scheduling, unblock),
[{status,suspended},
- {current_function,{erlang,processes_trap,2}}]
+ {current_function,{erlang,ptab_list_continue,2}}]
= process_info(P, [status, current_function]),
P
end,
diff --git a/erts/etc/unix/etp-commands b/erts/etc/unix/etp-commands
index 1f2af4a291..41a37cc1fa 100644
--- a/erts/etc/unix/etp-commands
+++ b/erts/etc/unix/etp-commands
@@ -1371,9 +1371,9 @@ define etp-process-info
etp-1 $arg0->id
printf "\n State: "
etp-proc-state $arg0
- if ($arg0->reg)
+ if ($arg0->common.u.alive.reg)
printf " Registered name: "
- etp-1 $arg0->reg->name
+ etp-1 $arg0->common.u.alive.reg->name
printf "\n"
end
if ($arg0->current)
@@ -1399,7 +1399,7 @@ define etp-process-info
end
printf " Mbuf size: %ld\n", $arg0->mbuf_sz
if (etp_smp_compiled)
- printf " Msgq len: %ld (inner=%ld, outer=%ld)\n", ($arg0->msg.len + $arg0->u.alive.msg_inq.len), $arg0->msg.len, $arg0->u.alive.msg_inq.len
+ printf " Msgq len: %ld (inner=%ld, outer=%ld)\n", ($arg0->msg.len + $arg0->msg_inq.len), $arg0->msg.len, $arg0->msg_inq.len
else
printf " Msgq len: %d\n", $arg0->msg.len
end