aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_bif_info.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_bif_info.c')
-rw-r--r--erts/emulator/beam/erl_bif_info.c3297
1 files changed, 1938 insertions, 1359 deletions
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 29ba12dfdb..7fada0d548 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1999-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1999-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,7 +38,7 @@
#include "erl_message.h"
#include "erl_binary.h"
#include "erl_db.h"
-#include "erl_instrument.h"
+#include "erl_mtrace.h"
#include "dist.h"
#include "erl_gc.h"
#include "erl_cpu_topology.h"
@@ -46,8 +46,12 @@
#include "erl_thr_progress.h"
#include "erl_bif_unique.h"
#include "erl_map.h"
+#include "erl_check_io.h"
#define ERTS_PTAB_WANT_DEBUG_FUNCS__
#include "erl_ptab.h"
+#include "erl_time.h"
+#include "erl_proc_sig_queue.h"
+#include "erl_alloc_util.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -70,6 +74,9 @@ static Export *gather_msacc_res_trap;
static Export *gather_gc_info_res_trap;
static Export *gather_system_check_res_trap;
+static Export *is_process_alive_trap;
+
+
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
static char otp_version[] = ERLANG_OTP_VERSION;
@@ -87,21 +94,15 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#ifdef ARCH_64
" [64-bit]"
#endif
-#ifdef ERTS_SMP
" [smp:%beu:%beu]"
-#endif
-#ifdef USE_THREADS
-#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
" [ds:%beu:%beu:%beu]"
+#if defined(ERTS_DIRTY_SCHEDULERS_TEST)
+ " [dirty-schedulers-TEST]"
#endif
" [async-threads:%d]"
-#endif
#ifdef HIPE
" [hipe]"
#endif
-#ifdef ERTS_ENABLE_KERNEL_POLL
- " [kernel-poll:%s]"
-#endif
#ifdef ET_DEBUG
#if ET_DEBUG
" [type-assertions]"
@@ -155,8 +156,10 @@ static Eterm os_type_tuple;
static Eterm os_version_tuple;
static Eterm
-current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
-static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
+current_function(Process* p, ErtsHeapFactory *hfact, Process* rp,
+ int full_info, Uint reserve_size, int flags);
+static Eterm current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
+ Uint reserve_size);
static Eterm
bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
@@ -174,7 +177,33 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
if (szp)
*szp += 4+2;
if (hpp) {
- Uint refc = (Uint) erts_smp_atomic_read_nob(&pb->val->refc);
+ Uint refc = (Uint) erts_refc_read(&pb->val->intern.refc, 1);
+ tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
+ res = CONS(*hpp + 4, tuple, res);
+ *hpp += 4+2;
+ }
+ }
+ }
+ return res;
+}
+
+static Eterm
+bld_magic_ref_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
+{
+ struct erl_off_heap_header* ohh;
+ Eterm res = NIL;
+ Eterm tuple;
+
+ for (ohh = oh->first; ohh; ohh = ohh->next) {
+ if (is_ref_thing_header((*((Eterm *) ohh)))) {
+ ErtsMRefThing *mrtp = (ErtsMRefThing *) ohh;
+ Eterm val = erts_bld_uword(hpp, szp, (UWord) mrtp->mb);
+ Eterm orig_size = erts_bld_uint(hpp, szp, mrtp->mb->orig_size);
+
+ if (szp)
+ *szp += 4+2;
+ if (hpp) {
+ Uint refc = (Uint) erts_refc_read(&mrtp->mb->intern.refc, 1);
tuple = TUPLE3(*hpp, val, orig_size, make_small(refc));
res = CONS(*hpp + 4, tuple, res);
*hpp += 4+2;
@@ -189,19 +218,27 @@ bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
make_monitor_list:
returns a list of records..
-record(erl_monitor, {
- type, % MON_ORIGIN or MON_TARGET (1 or 3)
- ref,
+ type, % process | port | time_offset | dist_process | resource
+ % | node | nodes | suspend
+ dir, % origin | target
+ ref, % reference or []
pid, % Process or nodename
- name % registered name or []
+ extra % registered name, integer or []
}).
*/
static void do_calc_mon_size(ErtsMonitor *mon, void *vpsz)
{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
Uint *psz = vpsz;
- *psz += IS_CONST(mon->ref) ? 0 : NC_HEAP_SIZE(mon->ref);
- *psz += IS_CONST(mon->pid) ? 0 : NC_HEAP_SIZE(mon->pid);
- *psz += 8; /* CONS + 5-tuple */
+ *psz += is_immed(mdp->ref) ? 0 : NC_HEAP_SIZE(mdp->ref);
+
+ if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
+ *psz += erts_resource_ref_size(mon->other.ptr);
+ else
+ *psz += is_immed(mon->other.item) ? 0 : NC_HEAP_SIZE(mon->other.item);
+
+ *psz += 9; /* CONS + 6-tuple */
}
typedef struct {
@@ -213,36 +250,97 @@ typedef struct {
static void do_make_one_mon_element(ErtsMonitor *mon, void * vpmlc)
{
+ ErtsMonitorData *mdp = erts_monitor_to_data(mon);
MonListContext *pmlc = vpmlc;
- Eterm tup;
- Eterm r = (IS_CONST(mon->ref)
- ? mon->ref
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->ref));
- Eterm p = (IS_CONST(mon->pid)
- ? mon->pid
- : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->pid));
- tup = TUPLE5(pmlc->hp, pmlc->tag, make_small(mon->type), r, p, mon->name);
- pmlc->hp += 6;
+ Eterm tup, t, d, r, p, x;
+
+ r = is_immed(mdp->ref) ? mdp->ref : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mdp->ref);
+ if (mon->type == ERTS_MON_TYPE_RESOURCE && erts_monitor_is_target(mon))
+ p = erts_bld_resource_ref(&(pmlc->hp), &MSO(pmlc->p), mon->other.ptr);
+ else
+ p = (is_immed(mon->other.item)
+ ? mon->other.item
+ : STORE_NC(&(pmlc->hp), &MSO(pmlc->p), mon->other.item));
+
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ x = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else if (erts_monitor_is_target(mon))
+ x = NIL;
+ else if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES)
+ x = make_small(((ErtsMonitorDataExtended *) mdp)->u.refc);
+ else
+ x = NIL;
+
+ switch (mon->type) {
+ case ERTS_MON_TYPE_PROC:
+ t = am_process;
+ break;
+ case ERTS_MON_TYPE_PORT:
+ t = am_port;
+ break;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ t = am_time_offset;
+ break;
+ case ERTS_MON_TYPE_DIST_PROC: {
+ ERTS_DECL_AM(dist_process);
+ t = AM_dist_process;
+ break;
+ }
+ case ERTS_MON_TYPE_RESOURCE: {
+ ERTS_DECL_AM(resource);
+ t = AM_resource;
+ break;
+ }
+ case ERTS_MON_TYPE_NODE:
+ t = am_node;
+ break;
+ case ERTS_MON_TYPE_NODES: {
+ ERTS_DECL_AM(nodes);
+ t = AM_nodes;
+ break;
+ }
+ case ERTS_MON_TYPE_SUSPEND:
+ t = am_suspend;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unknown monitor type");
+ t = am_error;
+ break;
+ }
+ if (erts_monitor_is_target(mon)) {
+ ERTS_DECL_AM(target);
+ d = AM_target;
+ }
+ else {
+ ERTS_DECL_AM(origin);
+ d = AM_origin;
+ }
+ tup = TUPLE6(pmlc->hp, pmlc->tag, t, d, r, p, x);
+ pmlc->hp += 7;
pmlc->res = CONS(pmlc->hp, tup, pmlc->res);
pmlc->hp += 2;
}
static Eterm
-make_monitor_list(Process *p, ErtsMonitor *root)
+make_monitor_list(Process *p, int tree, ErtsMonitor *root, Eterm tail)
{
DECL_AM(erl_monitor);
Uint sz = 0;
MonListContext mlc;
+ void (*foreach)(ErtsMonitor *,
+ void (*)(ErtsMonitor *, void *),
+ void *);
- erts_doforall_monitors(root, &do_calc_mon_size, &sz);
- if (sz == 0) {
- return NIL;
- }
+ foreach = tree ? erts_monitor_tree_foreach : erts_monitor_list_foreach;
+
+ (*foreach)(root, do_calc_mon_size, &sz);
+ if (sz == 0)
+ return tail;
mlc.p = p;
mlc.hp = HAlloc(p,sz);
- mlc.res = NIL;
+ mlc.res = tail;
mlc.tag = AM_erl_monitor;
- erts_doforall_monitors(root, &do_make_one_mon_element, &mlc);
+ (*foreach)(root, do_make_one_mon_element, &mlc);
return mlc.res;
}
@@ -250,20 +348,22 @@ make_monitor_list(Process *p, ErtsMonitor *root)
make_link_list:
returns a list of records..
-record(erl_link, {
- type, % LINK_NODE or LINK_PID (1 or 3)
- pid, % Process or nodename
- targets % List of erl_link's or nil
+ type, % process | port | dist_process
+ pid, % Process or port
+ id % (address)
}).
*/
-static void do_calc_lnk_size(ErtsLink *lnk, void *vpsz)
+static void calc_lnk_size(ErtsLink *lnk, void *vpsz)
{
Uint *psz = vpsz;
- *psz += IS_CONST(lnk->pid) ? 0 : NC_HEAP_SIZE(lnk->pid);
- if (lnk->type != LINK_NODE && ERTS_LINK_ROOT(lnk) != NULL) {
- /* Node links use this pointer as ref counter... */
- erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_calc_lnk_size,vpsz);
- }
+ Uint sz = 0;
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+
+ (void) erts_bld_uword(NULL, &sz, (UWord) ldp);
+
+ *psz += sz;
+ *psz += is_immed(lnk->other.item) ? 0 : size_object(lnk->other.item);
*psz += 7; /* CONS + 4-tuple */
}
@@ -274,37 +374,58 @@ typedef struct {
Eterm tag;
} LnkListContext;
-static void do_make_one_lnk_element(ErtsLink *lnk, void * vpllc)
+static void make_one_lnk_element(ErtsLink *lnk, void * vpllc)
{
LnkListContext *pllc = vpllc;
- Eterm tup;
- Eterm old_res, targets = NIL;
- Eterm p = (IS_CONST(lnk->pid)
- ? lnk->pid
- : STORE_NC(&(pllc->hp), &MSO(pllc->p), lnk->pid));
- if (lnk->type == LINK_NODE) {
- targets = make_small(ERTS_LINK_REFC(lnk));
- } else if (ERTS_LINK_ROOT(lnk) != NULL) {
- old_res = pllc->res;
- pllc->res = NIL;
- erts_doforall_links(ERTS_LINK_ROOT(lnk),&do_make_one_lnk_element, vpllc);
- targets = pllc->res;
- pllc->res = old_res;
- }
- tup = TUPLE4(pllc->hp, pllc->tag, make_small(lnk->type), p, targets);
+ Eterm tup, t, pid, id;
+ ErtsLinkData *ldp = erts_link_to_data(lnk);
+
+ id = erts_bld_uword(&pllc->hp, NULL, (UWord) ldp);
+
+ if (is_immed(lnk->other.item))
+ pid = lnk->other.item;
+ else {
+ Uint sz = size_object(lnk->other.item);
+ pid = copy_struct(lnk->other.item, sz, &(pllc->hp), &MSO(pllc->p));
+ }
+
+ switch (lnk->type) {
+ case ERTS_LNK_TYPE_PROC:
+ t = am_process;
+ break;
+ case ERTS_LNK_TYPE_PORT:
+ t = am_port;
+ break;
+ case ERTS_LNK_TYPE_DIST_PROC: {
+ ERTS_DECL_AM(dist_process);
+ t = AM_dist_process;
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Unkown link type");
+ t = am_undefined;
+ break;
+ }
+
+ tup = TUPLE4(pllc->hp, pllc->tag, t, pid, id);
pllc->hp += 5;
pllc->res = CONS(pllc->hp, tup, pllc->res);
pllc->hp += 2;
}
static Eterm
-make_link_list(Process *p, ErtsLink *root, Eterm tail)
+make_link_list(Process *p, int tree, ErtsLink *root, Eterm tail)
{
DECL_AM(erl_link);
Uint sz = 0;
LnkListContext llc;
+ void (*foreach)(ErtsLink *,
+ void (*)(ErtsLink *, void *),
+ void *);
+
+ foreach = tree ? erts_link_tree_foreach : erts_link_list_foreach;
- erts_doforall_links(root, &do_calc_lnk_size, &sz);
+ (*foreach)(root, calc_lnk_size, (void *) &sz);
if (sz == 0) {
return tail;
}
@@ -312,25 +433,23 @@ make_link_list(Process *p, ErtsLink *root, Eterm tail)
llc.hp = HAlloc(p,sz);
llc.res = tail;
llc.tag = AM_erl_link;
- erts_doforall_links(root, &do_make_one_lnk_element, &llc);
+ (*foreach)(root, make_one_lnk_element, (void *) &llc);
return llc.res;
}
int
-erts_print_system_version(int to, void *arg, Process *c_p)
+erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
{
int i, rc = -1;
char *rc_str = "";
char rc_buf[100];
char *ov = otp_version;
-#ifdef ERTS_SMP
Uint total, online, active;
Uint dirty_cpu, dirty_cpu_onln, dirty_io;
erts_schedulers_state(&total, &online, &active,
&dirty_cpu, &dirty_cpu_onln, NULL,
&dirty_io, NULL);
-#endif
for (i = 0; i < sizeof(otp_version)-4; i++) {
if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
rc = atoi(&ov[i+3]);
@@ -345,26 +464,23 @@ erts_print_system_version(int to, void *arg, Process *c_p)
}
return erts_print(to, arg, erts_system_version,
rc_str
-#ifdef ERTS_SMP
, total, online
-#ifdef ERTS_DIRTY_SCHEDULERS
, dirty_cpu, dirty_cpu_onln, dirty_io
-#endif
-#endif
-#ifdef USE_THREADS
, erts_async_max_threads
-#endif
-#ifdef ERTS_ENABLE_KERNEL_POLL
- , erts_use_kernel_poll ? "true" : "false"
-#endif
);
}
typedef struct {
/* {Entity,Node} = {monitor.Name,monitor.Pid} for external by name
* {Entity,Node} = {monitor.Pid,NIL} for external/external by pid
- * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name */
- Eterm entity;
+ * {Entity,Node} = {monitor.Name,erlang:node()} for internal by name
+ * {Entity,Node} = {monitor.resource,MON_NIF_TARGET}*/
+ union {
+ Eterm term;
+ ErtsResource* resource;
+ }entity;
+ int named;
+ Uint16 type;
Eterm node;
/* pid is actual target being monitored, no matter pid/port or name */
Eterm pid;
@@ -407,77 +523,107 @@ static void collect_one_link(ErtsLink *lnk, void *vmicp)
{
MonitorInfoCollection *micp = vmicp;
EXTEND_MONITOR_INFOS(micp);
- if (!(lnk->type == LINK_PID)) {
- return;
- }
- micp->mi[micp->mi_i].entity = lnk->pid;
- micp->sz += 2 + NC_HEAP_SIZE(lnk->pid);
+ micp->mi[micp->mi_i].entity.term = lnk->other.item;
+ micp->sz += 2 + NC_HEAP_SIZE(lnk->other.item);
micp->mi_i++;
}
static void collect_one_origin_monitor(ErtsMonitor *mon, void *vmicp)
{
- MonitorInfoCollection *micp = vmicp;
+ if (erts_monitor_is_origin(mon)) {
+ MonitorInfoCollection *micp = vmicp;
- if (mon->type != MON_ORIGIN) {
- return;
- }
- EXTEND_MONITOR_INFOS(micp);
- if (is_atom(mon->pid)) { /* external by name */
- micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = mon->pid;
- micp->sz += 3; /* need one 2-tuple */
- } else if (is_external_pid(mon->pid)) { /* external by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->mi[micp->mi_i].node = NIL;
- micp->sz += NC_HEAP_SIZE(mon->pid);
- } else if (!is_nil(mon->name)) { /* internal by name */
- micp->mi[micp->mi_i].entity = mon->name;
- micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
- micp->sz += 3; /* need one 2-tuple */
- } else { /* internal by pid */
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->mi[micp->mi_i].node = NIL;
- /* no additional heap space needed */
- }
-
- /* have always pid at hand, to assist with figuring out if its a port or
- * a process, when we monitored by name and process_info is requested.
- * See: erl_bif_info.c:process_info_aux section for am_monitors */
- micp->mi[micp->mi_i].pid = mon->pid;
+ EXTEND_MONITOR_INFOS(micp);
+
+ micp->mi[micp->mi_i].type = mon->type;
+
+ switch (mon->type) {
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_DIST_PROC:
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ if (!(mon->flags & ERTS_ML_FLG_NAME)) {
+ micp->mi[micp->mi_i].named = 0;
+ micp->mi[micp->mi_i].entity.term = mon->other.item;
+ micp->mi[micp->mi_i].node = NIL;
+ if (is_not_atom(mon->other.item))
+ micp->sz += NC_HEAP_SIZE(mon->other.item);
+ }
+ else {
+ ErtsMonitorDataExtended *mdep;
+ micp->mi[micp->mi_i].named = !0;
+ mdep = (ErtsMonitorDataExtended *) erts_monitor_to_data(mon);
+ micp->mi[micp->mi_i].entity.term = mdep->u.name;
+ if (mdep->dist)
+ micp->mi[micp->mi_i].node = mdep->dist->nodename;
+ else
+ micp->mi[micp->mi_i].node = erts_this_dist_entry->sysname;
+ micp->sz += 3; /* need one 2-tuple */
+ }
- micp->mi_i++;
- micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
+ /* have always pid at hand, to assist with figuring out if its a port or
+ * a process, when we monitored by name and process_info is requested.
+ * See: erl_bif_info.c:process_info_aux section for am_monitors */
+ micp->mi[micp->mi_i].pid = mon->other.item;
+
+ micp->mi_i++;
+ micp->sz += 2 + 3; /* For a cons cell and a 2-tuple */
+ break;
+ default:
+ break;
+ }
+ }
}
static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
{
MonitorInfoCollection *micp = vmicp;
- if (mon->type != MON_TARGET) {
- return;
- }
+ if (erts_monitor_is_target(mon)) {
- EXTEND_MONITOR_INFOS(micp);
+ EXTEND_MONITOR_INFOS(micp);
- micp->mi[micp->mi_i].node = NIL;
- micp->mi[micp->mi_i].entity = mon->pid;
- micp->sz += (NC_HEAP_SIZE(mon->pid) + 2 /* cons */);
- micp->mi_i++;
+ micp->mi[micp->mi_i].type = mon->type;
+ micp->mi[micp->mi_i].named = !!(mon->flags & ERTS_ML_FLG_NAME);
+ switch (mon->type) {
+
+ case ERTS_MON_TYPE_PROC:
+ case ERTS_MON_TYPE_PORT:
+ case ERTS_MON_TYPE_DIST_PROC:
+
+ micp->mi[micp->mi_i].entity.term = mon->other.item;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += NC_HEAP_SIZE(mon->other.item);
+
+ micp->sz += 2; /* cons */;
+ micp->mi_i++;
+ break;
+
+ case ERTS_MON_TYPE_RESOURCE:
+
+ micp->mi[micp->mi_i].entity.resource = mon->other.ptr;
+ micp->mi[micp->mi_i].node = NIL;
+ micp->sz += erts_resource_ref_size(mon->other.ptr);
+
+ micp->sz += 2; /* cons */;
+ micp->mi_i++;
+ break;
+
+ default:
+ break;
+ }
+
+ }
}
typedef struct {
- Process *c_p;
- ErtsProcLocks c_p_locks;
- ErtsSuspendMonitor **smi;
+ ErtsMonitorSuspend **smi;
Uint smi_i;
Uint smi_max;
- int sz;
+ Uint sz;
} ErtsSuspendMonitorInfoCollection;
-#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
- (SMIC).c_p = (CP); \
- (SMIC).c_p_locks = (CPL); \
+#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \
(SMIC).smi = NULL; \
(SMIC).smi_i = (SMIC).smi_max = 0; \
(SMIC).sz = 0; \
@@ -492,10 +638,10 @@ do { \
(SMICP)->smi, \
((SMICP)->smi_max \
+ ERTS_SMI_INC) \
- * sizeof(ErtsSuspendMonitor *)) \
+ * sizeof(ErtsMonitorSuspend *)) \
: erts_alloc(ERTS_ALC_T_TMP, \
ERTS_SMI_INC \
- * sizeof(ErtsSuspendMonitor *))); \
+ * sizeof(ErtsMonitorSuspend *))); \
(SMICP)->smi_max += ERTS_SMI_INC; \
} \
} while (0)
@@ -508,35 +654,28 @@ do { \
} while (0)
static void
-collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
+collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp)
{
- ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
- Process *suspendee = erts_pid2proc(smicp->c_p,
- smicp->c_p_locks,
- smon->pid,
- 0);
- if (suspendee) { /* suspendee is alive */
- Sint a, p;
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- }
+ if (mon->type == ERTS_MON_TYPE_SUSPEND) {
+ Sint count;
+ erts_aint_t mstate;
+ ErtsMonitorSuspend *msp;
+ ErtsSuspendMonitorInfoCollection *smicp;
- ASSERT((smon->active && !smon->pending)
- || (smon->pending && !smon->active));
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+ smicp = vsmicp;
ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
- smicp->smi[smicp->smi_i] = smon;
+ smicp->smi[smicp->smi_i] = msp;
smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
- a = (Sint) smon->active; /* quiet compiler warnings */
- p = (Sint) smon->pending; /* on 64-bit machines */
+ mstate = erts_atomic_read_nob(&msp->state);
- if (!IS_SSMALL(a))
- smicp->sz += BIG_UINT_HEAP_SIZE;
- if (!IS_SSMALL(p))
+ count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
+ if (!IS_SSMALL(count))
smicp->sz += BIG_UINT_HEAP_SIZE;
+
smicp->smi_i++;
}
}
@@ -545,123 +684,222 @@ collect_one_suspend_monitor(ErtsSuspendMonitor *smon, void *vsmicp)
* process_info/[1,2]
*/
-#define ERTS_PI_FAIL_TYPE_BADARG 0
-#define ERTS_PI_FAIL_TYPE_YIELD 1
-#define ERTS_PI_FAIL_TYPE_AWAIT_EXIT 2
-
-static ERTS_INLINE ErtsProcLocks
-pi_locks(Eterm info)
-{
- switch (info) {
- case am_status:
- case am_priority:
- case am_trap_exit:
- return ERTS_PROC_LOCK_STATUS;
- case am_links:
- case am_monitors:
- case am_monitored_by:
- case am_suspending:
- return ERTS_PROC_LOCK_LINK;
- case am_messages:
- case am_message_queue_len:
- case am_total_heap_size:
- return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
- case am_memory:
- return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_MSGQ;
- default:
- return ERTS_PROC_LOCK_MAIN;
- }
-}
-
/*
* All valid process_info arguments.
*/
-static Eterm pi_args[] = {
- am_registered_name,
- am_current_function,
- am_initial_call,
- am_status,
- am_messages,
- am_message_queue_len,
- am_links,
- am_monitors,
- am_monitored_by,
- am_dictionary,
- am_trap_exit,
- am_error_handler,
- am_heap_size,
- am_stack_size,
- am_memory,
- am_garbage_collection,
- am_group_leader,
- am_reductions,
- am_priority,
- am_trace,
- am_binary,
- am_sequential_trace_token,
- am_catchlevel,
- am_backtrace,
- am_last_calls,
- am_total_heap_size,
- am_suspending,
- am_min_heap_size,
- am_min_bin_vheap_size,
- am_max_heap_size,
- am_current_location,
- am_current_stacktrace,
- am_message_queue_data,
- am_garbage_collection_info
+
+#define ERTS_PI_IX_REGISTERED_NAME 0
+#define ERTS_PI_IX_CURRENT_FUNCTION 1
+#define ERTS_PI_IX_INITIAL_CALL 2
+#define ERTS_PI_IX_STATUS 3
+#define ERTS_PI_IX_MESSAGES 4
+#define ERTS_PI_IX_MESSAGE_QUEUE_LEN 5
+#define ERTS_PI_IX_LINKS 6
+#define ERTS_PI_IX_MONITORS 7
+#define ERTS_PI_IX_MONITORED_BY 8
+#define ERTS_PI_IX_DICTIONARY 9
+#define ERTS_PI_IX_TRAP_EXIT 10
+#define ERTS_PI_IX_ERROR_HANDLER 11
+#define ERTS_PI_IX_HEAP_SIZE 12
+#define ERTS_PI_IX_STACK_SIZE 13
+#define ERTS_PI_IX_MEMORY 14
+#define ERTS_PI_IX_GARBAGE_COLLECTION 15
+#define ERTS_PI_IX_GROUP_LEADER 16
+#define ERTS_PI_IX_REDUCTIONS 17
+#define ERTS_PI_IX_PRIORITY 18
+#define ERTS_PI_IX_TRACE 19
+#define ERTS_PI_IX_BINARY 20
+#define ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN 21
+#define ERTS_PI_IX_CATCHLEVEL 22
+#define ERTS_PI_IX_BACKTRACE 23
+#define ERTS_PI_IX_LAST_CALLS 24
+#define ERTS_PI_IX_TOTAL_HEAP_SIZE 25
+#define ERTS_PI_IX_SUSPENDING 26
+#define ERTS_PI_IX_MIN_HEAP_SIZE 27
+#define ERTS_PI_IX_MIN_BIN_VHEAP_SIZE 28
+#define ERTS_PI_IX_MAX_HEAP_SIZE 29
+#define ERTS_PI_IX_CURRENT_LOCATION 30
+#define ERTS_PI_IX_CURRENT_STACKTRACE 31
+#define ERTS_PI_IX_MESSAGE_QUEUE_DATA 32
+#define ERTS_PI_IX_GARBAGE_COLLECTION_INFO 33
+#define ERTS_PI_IX_MAGIC_REF 34
+#define ERTS_PI_IX_FULLSWEEP_AFTER 35
+
+#define ERTS_PI_FLAG_SINGELTON (1 << 0)
+#define ERTS_PI_FLAG_ALWAYS_WRAP (1 << 1)
+#define ERTS_PI_FLAG_WANT_MSGS (1 << 2)
+#define ERTS_PI_FLAG_NEED_MSGQ_LEN (1 << 3)
+#define ERTS_PI_FLAG_FORCE_SIG_SEND (1 << 4)
+#define ERTS_PI_FLAG_REQUEST_FOR_OTHER (1 << 5)
+
+#define ERTS_PI_UNRESERVE(RS, SZ) \
+ (ASSERT((RS) >= (SZ)), (RS) -= (SZ))
+
+
+typedef struct {
+ Eterm name;
+ Uint reserve_size;
+ int flags;
+ ErtsProcLocks locks;
+} ErtsProcessInfoArgs;
+
+static ErtsProcessInfoArgs pi_args[] = {
+ {am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_function, 4, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN},
+ {am_status, 0, 0, 0},
+ {am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_message_queue_len, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN, ERTS_PROC_LOCK_MAIN},
+ {am_links, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_monitors, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_monitored_by, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_dictionary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_trap_exit, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_error_handler, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_stack_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_memory, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_garbage_collection, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + ERTS_MAX_HEAP_SIZE_MAP_SZ, 0, ERTS_PROC_LOCK_MAIN},
+ {am_group_leader, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_reductions, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_priority, 0, 0, 0},
+ {am_trace, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_backtrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0},
+ {am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_location, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_current_stacktrace, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN},
+ {am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_fullsweep_after, 0, 0, ERTS_PROC_LOCK_MAIN}
};
-#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
+#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(pi_args[0])))
+
+#ifdef DEBUG
+# define ERTS_PI_DEF_ARR_SZ 2
+#else
+# define ERTS_PI_DEF_ARR_SZ ERTS_PI_ARGS
+#endif
static ERTS_INLINE Eterm
pi_ix2arg(int ix)
{
if (ix < 0 || ERTS_PI_ARGS <= ix)
return am_undefined;
- return pi_args[ix];
+ return pi_args[ix].name;
+}
+
+static ERTS_INLINE int
+pi_ix2flags(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].flags;
+}
+
+static ERTS_INLINE Uint
+pi_ix2rsz(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].reserve_size;
+}
+
+static ERTS_INLINE ErtsProcLocks
+pi_ix2locks(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].locks;
}
static ERTS_INLINE int
pi_arg2ix(Eterm arg)
{
switch (arg) {
- case am_registered_name: return 0;
- case am_current_function: return 1;
- case am_initial_call: return 2;
- case am_status: return 3;
- case am_messages: return 4;
- case am_message_queue_len: return 5;
- case am_links: return 6;
- case am_monitors: return 7;
- case am_monitored_by: return 8;
- case am_dictionary: return 9;
- case am_trap_exit: return 10;
- case am_error_handler: return 11;
- case am_heap_size: return 12;
- case am_stack_size: return 13;
- case am_memory: return 14;
- case am_garbage_collection: return 15;
- case am_group_leader: return 16;
- case am_reductions: return 17;
- case am_priority: return 18;
- case am_trace: return 19;
- case am_binary: return 20;
- case am_sequential_trace_token: return 21;
- case am_catchlevel: return 22;
- case am_backtrace: return 23;
- case am_last_calls: return 24;
- case am_total_heap_size: return 25;
- case am_suspending: return 26;
- case am_min_heap_size: return 27;
- case am_min_bin_vheap_size: return 28;
- case am_max_heap_size: return 29;
- case am_current_location: return 30;
- case am_current_stacktrace: return 31;
- case am_message_queue_data: return 32;
- case am_garbage_collection_info: return 33;
- default: return -1;
+ case am_registered_name:
+ return ERTS_PI_IX_REGISTERED_NAME;
+ case am_current_function:
+ return ERTS_PI_IX_CURRENT_FUNCTION;
+ case am_initial_call:
+ return ERTS_PI_IX_INITIAL_CALL;
+ case am_status:
+ return ERTS_PI_IX_STATUS;
+ case am_messages:
+ return ERTS_PI_IX_MESSAGES;
+ case am_message_queue_len:
+ return ERTS_PI_IX_MESSAGE_QUEUE_LEN;
+ case am_links:
+ return ERTS_PI_IX_LINKS;
+ case am_monitors:
+ return ERTS_PI_IX_MONITORS;
+ case am_monitored_by:
+ return ERTS_PI_IX_MONITORED_BY;
+ case am_dictionary:
+ return ERTS_PI_IX_DICTIONARY;
+ case am_trap_exit:
+ return ERTS_PI_IX_TRAP_EXIT;
+ case am_error_handler:
+ return ERTS_PI_IX_ERROR_HANDLER;
+ case am_heap_size:
+ return ERTS_PI_IX_HEAP_SIZE;
+ case am_stack_size:
+ return ERTS_PI_IX_STACK_SIZE;
+ case am_memory:
+ return ERTS_PI_IX_MEMORY;
+ case am_garbage_collection:
+ return ERTS_PI_IX_GARBAGE_COLLECTION;
+ case am_group_leader:
+ return ERTS_PI_IX_GROUP_LEADER;
+ case am_reductions:
+ return ERTS_PI_IX_REDUCTIONS;
+ case am_priority:
+ return ERTS_PI_IX_PRIORITY;
+ case am_trace:
+ return ERTS_PI_IX_TRACE;
+ case am_binary:
+ return ERTS_PI_IX_BINARY;
+ case am_sequential_trace_token:
+ return ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN;
+ case am_catchlevel:
+ return ERTS_PI_IX_CATCHLEVEL;
+ case am_backtrace:
+ return ERTS_PI_IX_BACKTRACE;
+ case am_last_calls:
+ return ERTS_PI_IX_LAST_CALLS;
+ case am_total_heap_size:
+ return ERTS_PI_IX_TOTAL_HEAP_SIZE;
+ case am_suspending:
+ return ERTS_PI_IX_SUSPENDING;
+ case am_min_heap_size:
+ return ERTS_PI_IX_MIN_HEAP_SIZE;
+ case am_min_bin_vheap_size:
+ return ERTS_PI_IX_MIN_BIN_VHEAP_SIZE;
+ case am_max_heap_size:
+ return ERTS_PI_IX_MAX_HEAP_SIZE;
+ case am_current_location:
+ return ERTS_PI_IX_CURRENT_LOCATION;
+ case am_current_stacktrace:
+ return ERTS_PI_IX_CURRENT_STACKTRACE;
+ case am_message_queue_data:
+ return ERTS_PI_IX_MESSAGE_QUEUE_DATA;
+ case am_garbage_collection_info:
+ return ERTS_PI_IX_GARBAGE_COLLECTION_INFO;
+ case am_magic_ref:
+ return ERTS_PI_IX_MAGIC_REF;
+ case am_fullsweep_after:
+ return ERTS_PI_IX_FULLSWEEP_AFTER;
+ default:
+ return -1;
}
}
@@ -671,7 +909,6 @@ static Eterm pi_1_keys[] = {
am_initial_call,
am_status,
am_message_queue_len,
- am_messages,
am_links,
am_dictionary,
am_trap_exit,
@@ -715,155 +952,41 @@ process_info_init(void)
}
-static ERTS_INLINE Process *
-pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
-{
-#ifdef ERTS_SMP
- /*
- * If the main lock is needed, we use erts_pid2proc_not_running()
- * instead of erts_pid2proc() for two reasons:
- * * Current function of pid and possibly other information will
- * have been updated so that process_info() is consistent with an
- * info-request/info-response signal model.
- * * We avoid blocking the whole scheduler executing the
- * process that is calling process_info() for a long time
- * which will happen if pid is currently running.
- * The caller of process_info() may have to yield if pid
- * is currently running.
- */
-
- if (info_locks & ERTS_PROC_LOCK_MAIN)
- return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
- pid, info_locks);
- else
-#endif
- return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
- pid, info_locks);
-}
-
-
-
static BIF_RETTYPE
-process_info_aux(Process *BIF_P,
+process_info_aux(Process *c_p,
+ ErtsHeapFactory *hfact,
Process *rp,
ErtsProcLocks rp_locks,
- Eterm rpid,
- Eterm item,
- int always_wrap);
-
-#define ERTS_PI_RES_ELEM_IX_BUF_INC 1024
-#define ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ ERTS_PI_ARGS
+ int item_ix,
+ int flags,
+ Uint *reserve_sizep,
+ Uint *reds);
-static Eterm
-process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
- int *fail_type)
+Eterm
+erts_process_info(Process *c_p,
+ ErtsHeapFactory *hfact,
+ Process *rp,
+ ErtsProcLocks rp_locks,
+ int *item_ix,
+ int item_ix_len,
+ int flags,
+ Uint reserve_size,
+ Uint *reds)
{
- int want_messages = 0;
- int def_res_elem_ix_buf[ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ];
- int *res_elem_ix = &def_res_elem_ix_buf[0];
- int res_elem_ix_ix = -1;
- int res_elem_ix_sz = ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ;
+ Eterm res;
Eterm part_res[ERTS_PI_ARGS];
- Eterm res, arg;
- Uint *hp, *hp_end;
- ErtsProcLocks locks = (ErtsProcLocks) 0;
- int res_len, ix;
- Process *rp = NULL;
+ int item_ix_ix, ix;
- *fail_type = ERTS_PI_FAIL_TYPE_BADARG;
+ if (ERTS_PI_FLAG_SINGELTON & flags) {
+ ASSERT(item_ix_len == 1);
+ res = process_info_aux(c_p, hfact, rp, rp_locks, item_ix[0],
+ flags, &reserve_size, reds);
+ return res;
+ }
for (ix = 0; ix < ERTS_PI_ARGS; ix++)
part_res[ix] = THE_NON_VALUE;
- ASSERT(is_list(list));
-
- while (is_list(list)) {
- Eterm* consp = list_val(list);
-
- arg = CAR(consp);
- ix = pi_arg2ix(arg);
- if (ix < 0) {
- res = THE_NON_VALUE;
- goto done;
- }
- if (arg == am_messages)
- want_messages = 1;
- locks |= pi_locks(arg);
- res_elem_ix_ix++;
- if (res_elem_ix_ix >= res_elem_ix_sz) {
- if (res_elem_ix != &def_res_elem_ix_buf[0])
- res_elem_ix =
- erts_realloc(ERTS_ALC_T_TMP,
- res_elem_ix,
- sizeof(int)*(res_elem_ix_sz
- += ERTS_PI_RES_ELEM_IX_BUF_INC));
- else {
- int new_res_elem_ix_sz = ERTS_PI_RES_ELEM_IX_BUF_INC;
- int *new_res_elem_ix = erts_alloc(ERTS_ALC_T_TMP,
- sizeof(int)*new_res_elem_ix_sz);
- sys_memcpy((void *) new_res_elem_ix,
- (void *) res_elem_ix,
- sizeof(int)*res_elem_ix_sz);
- res_elem_ix = new_res_elem_ix;
- res_elem_ix_sz = new_res_elem_ix_sz;
- }
- }
- res_elem_ix[res_elem_ix_ix] = ix;
- list = CDR(consp);
- }
- if (is_not_nil(list)) {
- res = THE_NON_VALUE;
- goto done;
- }
-
- res_len = res_elem_ix_ix+1;
-
- ASSERT(res_len > 0);
-
- rp = pi_pid2proc(c_p, pid, locks|ERTS_PROC_LOCK_STATUS);
- if (!rp) {
- res = am_undefined;
- goto done;
- }
- else if (rp == ERTS_PROC_LOCK_BUSY) {
- rp = NULL;
- res = THE_NON_VALUE;
- *fail_type = ERTS_PI_FAIL_TYPE_YIELD;
- goto done;
- }
- else if (c_p != rp && ERTS_PROC_PENDING_EXIT(rp)) {
- locks |= ERTS_PROC_LOCK_STATUS;
- res = THE_NON_VALUE;
- *fail_type = ERTS_PI_FAIL_TYPE_AWAIT_EXIT;
- goto done;
- }
- else {
- ErtsProcLocks unlock_locks = 0;
-
- if (c_p == rp)
- locks |= ERTS_PROC_LOCK_MAIN;
-
- if (!(locks & ERTS_PROC_LOCK_STATUS))
- unlock_locks |= ERTS_PROC_LOCK_STATUS;
-
- if (locks & ERTS_PROC_LOCK_MSGQ) {
- /*
- * Move in queue into private queue and
- * release msgq lock, enabling others to
- * send messages to the process while it
- * is being inspected...
- */
- ASSERT(locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- locks &= ~ERTS_PROC_LOCK_MSGQ;
- unlock_locks |= ERTS_PROC_LOCK_MSGQ;
- }
-
- if (unlock_locks)
- erts_smp_proc_unlock(rp, unlock_locks);
-
- }
-
/*
* We always handle 'messages' first if it should be part
* of the result. This since if both 'messages' and
@@ -871,28 +994,31 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
* change the result of 'message_queue_len' (in case
* the queue contain bad distribution messages).
*/
- if (want_messages) {
+ if (flags & ERTS_PI_FLAG_WANT_MSGS) {
ix = pi_arg2ix(am_messages);
ASSERT(part_res[ix] == THE_NON_VALUE);
- part_res[ix] = process_info_aux(c_p, rp, locks, pid, am_messages, always_wrap);
- ASSERT(part_res[ix] != THE_NON_VALUE);
+ res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
+ flags, &reserve_size, reds);
+ ASSERT(res != am_undefined);
+ ASSERT(res != THE_NON_VALUE);
+ part_res[ix] = res;
}
- for (; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
- ix = res_elem_ix[res_elem_ix_ix];
+ for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
+ ix = item_ix[item_ix_ix];
if (part_res[ix] == THE_NON_VALUE) {
- arg = pi_ix2arg(ix);
- part_res[ix] = process_info_aux(c_p, rp, locks, pid, arg, always_wrap);
- ASSERT(part_res[ix] != THE_NON_VALUE);
+ res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
+ flags, &reserve_size, reds);
+ ASSERT(res != am_undefined);
+ ASSERT(res != THE_NON_VALUE);
+ part_res[ix] = res;
}
}
- hp = HAlloc(c_p, res_len*2);
- hp_end = hp + res_len*2;
res = NIL;
- for (res_elem_ix_ix = res_len - 1; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
- ix = res_elem_ix[res_elem_ix_ix];
+ for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
+ ix = item_ix[item_ix_ix];
ASSERT(part_res[ix] != THE_NON_VALUE);
/*
* If we should ignore the value of registered_name,
@@ -900,173 +1026,317 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
* beginning of process_info_aux().
*/
if (is_nil(part_res[ix])) {
- ASSERT(!always_wrap);
+ ASSERT(!(flags & ERTS_PI_FLAG_ALWAYS_WRAP));
ASSERT(pi_ix2arg(ix) == am_registered_name);
}
else {
+ Eterm *hp;
+ ERTS_PI_UNRESERVE(reserve_size, 2);
+ hp = erts_produce_heap(hfact, 2, reserve_size);
res = CONS(hp, part_res[ix], res);
- hp += 2;
}
}
- if (!always_wrap) {
- HRelease(c_p, hp_end, hp);
- }
-
- done:
-
- if (c_p == rp)
- locks &= ~ERTS_PROC_LOCK_MAIN;
- if (locks && rp)
- erts_smp_proc_unlock(rp, locks);
-
- if (res_elem_ix != &def_res_elem_ix_buf[0])
- erts_free(ERTS_ALC_T_TMP, res_elem_ix);
-
return res;
}
-BIF_RETTYPE process_info_1(BIF_ALIST_1)
+static void
+pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix);
+
+static BIF_RETTYPE
+process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
{
+ ErtsHeapFactory hfact;
+ int def_arr[ERTS_PI_DEF_ARR_SZ];
+ int *item_ix = &def_arr[0];
+ Process *rp = NULL;
+ erts_aint32_t state;
+ BIF_RETTYPE ret;
+ Uint reds = 0;
+ ErtsProcLocks locks = 0;
+ int flags;
+ Uint reserve_size;
+ int len;
Eterm res;
- int fail_type;
- if (is_external_pid(BIF_ARG_1)
- && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
- BIF_RET(am_undefined);
-
- if (is_not_internal_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
- res = process_info_list(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, &fail_type);
- if (is_non_value(res)) {
- switch (fail_type) {
- case ERTS_PI_FAIL_TYPE_BADARG:
- BIF_ERROR(BIF_P, BADARG);
- case ERTS_PI_FAIL_TYPE_YIELD:
- ERTS_BIF_YIELD1(bif_export[BIF_process_info_1], BIF_P, BIF_ARG_1);
- case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
- ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
- }
+ ERTS_CT_ASSERT(ERTS_PI_DEF_ARR_SZ > 0);
+
+ if (c_p->common.id == pid) {
+ int local_only = c_p->flags & F_LOCAL_SIGS_ONLY;
+ int sres, sreds, reds_left;
+
+ reds_left = ERTS_BIF_REDS_LEFT(c_p);
+ sreds = reds_left;
+
+ if (!local_only) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ }
+
+ sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
+
+ BUMP_REDS(c_p, (int) sreds);
+ reds_left -= sreds;
+
+ if (state & ERTS_PSFLG_EXITING) {
+ c_p->flags &= ~F_LOCAL_SIGS_ONLY;
+ goto exited;
+ }
+ if (!sres | (reds_left <= 0)) {
+ /*
+ * More signals to handle or out of reds; need
+ * to yield and continue. Prevent fetching of
+ * more signals by setting local-sigs-only flag.
+ */
+ c_p->flags |= F_LOCAL_SIGS_ONLY;
+ goto yield;
+ }
+
+ c_p->flags &= ~F_LOCAL_SIGS_ONLY;
}
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
-}
+ if (is_atom(opt)) {
+ int ix = pi_arg2ix(opt);
+ item_ix[0] = ix;
+ len = 1;
+ locks = pi_ix2locks(ix);
+ reserve_size = 3 + pi_ix2rsz(ix);
+ flags = ERTS_PI_FLAG_SINGELTON;
+ flags |= pi_ix2flags(ix);
+ if (ix < 0)
+ goto badarg;
+ }
+ else {
+ Eterm list = opt;
+ Uint size = ERTS_PI_DEF_ARR_SZ;
+ len = 0;
+ reserve_size = 0;
+ locks = 0;
+ flags = 0;
-BIF_RETTYPE process_info_2(BIF_ALIST_2)
-{
- Eterm res;
- Process *rp;
- Eterm pid = BIF_ARG_1;
- ErtsProcLocks info_locks;
- int fail_type;
+ while (is_list(list)) {
+ Eterm *consp = list_val(list);
+ Eterm arg = CAR(consp);
+ int ix = pi_arg2ix(arg);
+ if (ix < 0)
+ goto badarg;
- if (is_external_pid(pid)
- && external_pid_dist_entry(pid) == erts_this_dist_entry)
- BIF_RET(am_undefined);
-
- if (is_not_internal_pid(pid)) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (is_nil(BIF_ARG_2))
- BIF_RET(NIL);
-
- if (is_list(BIF_ARG_2)) {
- res = process_info_list(BIF_P, BIF_ARG_1, BIF_ARG_2, 1, &fail_type);
- if (is_non_value(res)) {
- switch (fail_type) {
- case ERTS_PI_FAIL_TYPE_BADARG:
- BIF_ERROR(BIF_P, BADARG);
- case ERTS_PI_FAIL_TYPE_YIELD:
- ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- case ERTS_PI_FAIL_TYPE_AWAIT_EXIT:
- ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
- __FILE__, __LINE__);
- }
- }
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
+ if (len >= size)
+ pi_setup_grow(&item_ix, def_arr, &size, len);
+
+ item_ix[len++] = ix;
+
+ locks |= pi_ix2locks(ix);
+ flags |= pi_ix2flags(ix);
+ reserve_size += pi_ix2rsz(ix);
+ reserve_size += 3; /* 2-tuple */
+ reserve_size += 2; /* cons */
+
+ list = CDR(consp);
+ }
+
+ if (is_not_nil(list))
+ goto badarg;
}
- if (pi_arg2ix(BIF_ARG_2) < 0)
- BIF_ERROR(BIF_P, BADARG);
+ if (is_not_internal_pid(pid)) {
+ if (is_external_pid(pid)
+ && external_pid_dist_entry(pid) == erts_this_dist_entry)
+ goto undefined;
+ goto badarg;
+ }
- info_locks = pi_locks(BIF_ARG_2);
+ if (always_wrap)
+ flags |= ERTS_PI_FLAG_ALWAYS_WRAP;
- rp = pi_pid2proc(BIF_P, pid, info_locks|ERTS_PROC_LOCK_STATUS);
- if (!rp)
- res = am_undefined;
- else if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
- erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS);
- ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined);
+ if (c_p->common.id == pid) {
+ rp = c_p;
+ if (locks & ~ERTS_PROC_LOCK_MAIN)
+ erts_proc_lock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+ locks |= ERTS_PROC_LOCK_MAIN;
}
else {
- ErtsProcLocks unlock_locks = 0;
+ if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
+ goto send_signal;
+ state = ERTS_PSFLG_RUNNING; /* fail state... */
+ rp = erts_try_lock_sig_free_proc(pid, locks, &state);
+ if (!rp)
+ goto undefined;
+ if (rp == ERTS_PROC_LOCK_BUSY) {
+ rp = NULL;
+ goto send_signal;
+ }
+ if (state & ERTS_PSFLG_EXITING) {
+ if (locks)
+ erts_proc_unlock(rp, locks);
+ locks = 0;
+ /* wait for it to terminate properly... */
+ goto send_signal;
+ }
+ if (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN) {
+ ASSERT(locks & ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(rp);
+ if (c_p->sig_qs.cont) {
+ erts_proc_unlock(rp, locks|ERTS_PROC_LOCK_MSGQ);
+ locks = 0;
+ goto send_signal;
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
+ }
+ }
- if (BIF_P == rp)
- info_locks |= ERTS_PROC_LOCK_MAIN;
+ erts_factory_proc_init(&hfact, c_p);
- if (!(info_locks & ERTS_PROC_LOCK_STATUS))
- unlock_locks |= ERTS_PROC_LOCK_STATUS;
+ res = erts_process_info(c_p, &hfact, rp, locks, item_ix, len,
+ flags, reserve_size, &reds);
- if (info_locks & ERTS_PROC_LOCK_MSGQ) {
- /*
- * Move in queue into private queue and
- * release msgq lock, enabling others to
- * send messages to the process while it
- * is being inspected...
- */
- ASSERT(info_locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- info_locks &= ~ERTS_PROC_LOCK_MSGQ;
- unlock_locks |= ERTS_PROC_LOCK_MSGQ;
- }
+ erts_factory_close(&hfact);
- if (unlock_locks)
- erts_smp_proc_unlock(rp, unlock_locks);
+ if (reds > INT_MAX/2)
+ reds = INT_MAX/2;
+ BUMP_REDS(c_p, (int) reds);
- res = process_info_aux(BIF_P, rp, info_locks, pid, BIF_ARG_2, 0);
+ state = erts_atomic32_read_acqb(&rp->state);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)) {
+ if (state & ERTS_PSFLG_FREE) {
+ ASSERT(!locks);
+ goto undefined;
+ }
+ if (locks)
+ erts_proc_unlock(rp, locks);
+ locks = 0;
+ /* wait for it to terminate properly... */
+ goto send_signal;
}
- ASSERT(is_value(res));
-#ifdef ERTS_SMP
- if (BIF_P == rp)
- info_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (rp && info_locks)
- erts_smp_proc_unlock(rp, info_locks);
-#endif
+ ERTS_BIF_PREP_RET(ret, res);
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
+done:
+
+ if (c_p == rp)
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (locks && rp)
+ erts_proc_unlock(rp, locks);
+
+ if (item_ix != def_arr)
+ erts_free(ERTS_ALC_T_TMP, item_ix);
+
+ return ret;
+
+badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ goto done;
+
+undefined:
+ ERTS_BIF_PREP_RET(ret, am_undefined);
+ goto done;
+
+exited:
+ ERTS_BIF_PREP_EXITED(ret, c_p);
+ goto done;
+
+yield:
+ if (pi2)
+ ERTS_BIF_PREP_YIELD2(ret, bif_export[BIF_process_info_2], c_p, pid, opt);
+ else
+ ERTS_BIF_PREP_YIELD1(ret, bif_export[BIF_process_info_1], c_p, pid);
+ goto done;
+
+send_signal: {
+ Eterm ref = erts_make_ref(c_p);
+ int enqueued, need_msgq_len;
+ flags |= ERTS_PI_FLAG_REQUEST_FOR_OTHER;
+ need_msgq_len = (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ /*
+ * Set receive mark so we wont have to scan the whole
+ * message queue for the result. Note caller unconditionally
+ * has to enter a receive only matching messages containing
+ * 'ref', or restore save pointer.
+ */
+ ERTS_RECV_MARK_SAVE(c_p);
+ ERTS_RECV_MARK_SET(c_p);
+ enqueued = erts_proc_sig_send_process_info_request(c_p, pid, item_ix,
+ len, need_msgq_len,
+ flags, reserve_size,
+ ref);
+ if (!enqueued) {
+ /* Restore save pointer... */
+ JOIN_MESSAGE(c_p);
+ goto undefined;
+ }
+ ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, ref);
+ goto done;
+ }
+}
+
+static void
+pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix)
+{
+ *sz = (ix+1) + ERTS_PI_DEF_ARR_SZ;
+ if (*arr != def_arr)
+ *arr = erts_realloc(ERTS_ALC_T_TMP, *arr, (*sz)*sizeof(int));
+ else {
+ int *new_arr = erts_alloc(ERTS_ALC_T_TMP, (*sz)*sizeof(int));
+ sys_memcpy((void *) new_arr, (void *) def_arr,
+ sizeof(int)*ERTS_PI_DEF_ARR_SZ);
+ *arr = new_arr;
+ }
+}
+
+
+BIF_RETTYPE process_info_2(BIF_ALIST_2)
+{
+ return process_info_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, !is_atom(BIF_ARG_2), !0);
+}
+
+BIF_RETTYPE process_info_1(BIF_ALIST_1)
+{
+ return process_info_bif(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, 0);
}
Eterm
-process_info_aux(Process *BIF_P,
+process_info_aux(Process *c_p,
+ ErtsHeapFactory *hfact,
Process *rp,
ErtsProcLocks rp_locks,
- Eterm rpid,
- Eterm item,
- int always_wrap)
+ int item_ix,
+ int flags,
+ Uint *reserve_sizep,
+ Uint *reds)
{
Eterm *hp;
Eterm res = NIL;
+ Uint reserved;
+ Uint reserve_size = *reserve_sizep;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ErtsProcLocks locks = erts_proc_lc_my_proc_locks(rp);
+
+ switch (item_ix) {
+ case ERTS_PI_IX_STATUS:
+ case ERTS_PI_IX_PRIORITY:
+ case ERTS_PI_IX_SUSPENDING:
+ ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCK_MAIN) == 0);
+ break;
+ default:
+ ERTS_LC_ASSERT(locks == ERTS_PROC_LOCK_MAIN);
+ break;
+ }
+#endif
+
+ reserved = pi_ix2rsz(item_ix);
+ ERTS_PI_UNRESERVE(reserve_size, reserved);
+
+ (*reds)++;
ASSERT(rp);
/*
- * Q: Why this always_wrap argument?
+ * Q: Why this ERTS_PI_FLAG_ALWAYS_WRAP flag?
*
* A: registered_name is strange. If process has no registered name,
* process_info(Pid, registered_name) returns [], and
@@ -1078,138 +1348,170 @@ process_info_aux(Process *BIF_P,
* registered_name behaves as it should, i.e. a
* {registered_name, []} will appear in the resulting list.
*
- * If always_wrap != 0, process_info_aux() always wrap the result
- * in a key two tuple.
+ * If ERTS_PI_FLAG_ALWAYS_WRAP is set, process_info_aux() always
+ * wrap the result in a key two tuple.
*/
- switch (item) {
+ switch (item_ix) {
- case am_registered_name:
- if (rp->common.u.alive.reg) {
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_REGISTERED_NAME:
+ if (rp->common.u.alive.reg)
res = rp->common.u.alive.reg->name;
- } else {
- if (always_wrap) {
- hp = HAlloc(BIF_P, 3);
+ else {
+ if (flags & ERTS_PI_FLAG_ALWAYS_WRAP)
res = NIL;
- }
- else {
+ else
return NIL;
- }
}
break;
- case am_current_function:
- res = current_function(BIF_P, rp, &hp, 0);
+ case ERTS_PI_IX_CURRENT_FUNCTION:
+ res = current_function(c_p, hfact, rp, 0,
+ reserve_size, flags);
break;
- case am_current_location:
- res = current_function(BIF_P, rp, &hp, 1);
+ case ERTS_PI_IX_CURRENT_LOCATION:
+ res = current_function(c_p, hfact, rp, 1,
+ reserve_size, flags);
break;
- case am_current_stacktrace:
- res = current_stacktrace(BIF_P, rp, &hp);
+ case ERTS_PI_IX_CURRENT_STACKTRACE:
+ res = current_stacktrace(hfact, rp, reserve_size);
break;
- case am_initial_call:
- hp = HAlloc(BIF_P, 3+4);
+ case ERTS_PI_IX_INITIAL_CALL:
+ hp = erts_produce_heap(hfact, 4, reserve_size);
res = TUPLE3(hp,
- rp->u.initial[INITIAL_MOD],
- rp->u.initial[INITIAL_FUN],
- make_small(rp->u.initial[INITIAL_ARI]));
+ rp->u.initial.module,
+ rp->u.initial.function,
+ make_small(rp->u.initial.arity));
hp += 4;
break;
- case am_status:
- res = erts_process_status(rp, rpid);
- ASSERT(res != am_undefined);
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_STATUS: {
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
+ res = erts_process_state2status(state);
+ if (res == am_running && (state & ERTS_PSFLG_RUNNING_SYS)) {
+ ASSERT(c_p == rp);
+ ASSERT(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
+ if (!(state & (ERTS_PSFLG_SYS_TASKS
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_SIG_Q
+ | ERTS_PSFLG_SIG_IN_Q))) {
+ /*
+ * We are servicing a process-info request from
+ * another process. If that other process could
+ * have inspected our state itself, we would have
+ * been in the 'waiting' state.
+ */
+ res = am_waiting;
+ }
+ }
break;
+ }
- case am_messages: {
-
- if (rp->msg.len == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
- hp = HAlloc(BIF_P, 3);
- } else {
+ case ERTS_PI_IX_MESSAGES: {
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ if (rp->sig_qs.len == 0 || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE))
+ res = NIL;
+ else {
+ int info_on_self = !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
ErtsMessageInfo *mip;
- Sint i;
+ Sint i, len;
Uint heap_need;
-#ifdef DEBUG
- Eterm *hp_end;
-#endif
mip = erts_alloc(ERTS_ALC_T_TMP,
- rp->msg.len*sizeof(ErtsMessageInfo));
+ rp->sig_qs.len*sizeof(ErtsMessageInfo));
/*
* Note that message queue may shrink when calling
- * erts_prep_msgq_for_inspection() since it removes
+ * erts_proc_sig_prep_msgq_for_inspection() since it removes
* corrupt distribution messages.
*/
- heap_need = erts_prep_msgq_for_inspection(BIF_P, rp, rp_locks, mip);
- heap_need += 3; /* top 2-tuple */
- heap_need += rp->msg.len*2; /* Cons cells */
+ heap_need = erts_proc_sig_prep_msgq_for_inspection(c_p, rp,
+ rp_locks,
+ info_on_self,
+ mip);
+ len = rp->sig_qs.len;
- hp = HAlloc(BIF_P, heap_need); /* heap_need is exact */
-#ifdef DEBUG
- hp_end = hp + heap_need;
-#endif
+ heap_need += len*2; /* Cons cells */
+
+ reserve_size += heap_need;
/* Build list of messages... */
- for (i = rp->msg.len - 1, res = NIL; i >= 0; i--) {
+ for (i = len - 1, res = NIL; i >= 0; i--) {
Eterm msg = ERL_MESSAGE_TERM(mip[i].msgp);
Uint sz = mip[i].size;
+ ERTS_PI_UNRESERVE(reserve_size, sz+2);
+ hp = erts_produce_heap(hfact, sz+2, reserve_size);
+
if (sz != 0)
- msg = copy_struct(msg, sz, &hp, &BIF_P->off_heap);
+ msg = copy_struct(msg, sz, &hp, hfact->off_heap);
res = CONS(hp, msg, res);
hp += 2;
}
- ASSERT(hp_end == hp + 3);
+ *reds += (Uint) len / 4;
erts_free(ERTS_ALC_T_TMP, mip);
}
break;
}
- case am_message_queue_len:
- hp = HAlloc(BIF_P, 3);
- res = make_small(rp->msg.len);
+ case ERTS_PI_IX_MESSAGE_QUEUE_LEN: {
+ Sint len = rp->sig_qs.len;
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ ASSERT(len >= 0);
+ if (len <= MAX_SMALL)
+ res = make_small(len);
+ else {
+ hp = erts_produce_heap(hfact, BIG_UINT_HEAP_SIZE, reserve_size);
+ res = uint_to_big((Uint) len, hp);
+ }
break;
+ }
- case am_links: {
+ case ERTS_PI_IX_LINKS: {
MonitorInfoCollection mic;
int i;
Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_links(ERTS_P_LINKS(rp),&collect_one_link,&mic);
+ erts_link_tree_foreach(ERTS_P_LINKS(rp), collect_one_link, (void *) &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ Eterm item_src = mic.mi[i].entity.term;
+ Uint sz = NC_HEAP_SIZE(item_src) + 2;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ item = STORE_NC(&hp, hfact->off_heap, item_src);
res = CONS(hp, item, res);
- hp += 2;
}
+
+ *reds += (Uint) mic.mi_i / 4;
+
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_monitors: {
+ case ERTS_PI_IX_MONITORS: {
MonitorInfoCollection mic;
int i;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(rp),
- &collect_one_origin_monitor, &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
+ collect_one_origin_monitor,
+ (void *) &mic);
+
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- if (is_atom(mic.mi[i].entity)) {
+ if (mic.mi[i].named) {
/* Monitor by name.
* Build {process|port, {Name, Node}} and cons it.
*/
@@ -1221,175 +1523,228 @@ process_info_aux(Process *BIF_P,
|| is_port(mic.mi[i].pid)
|| is_atom(mic.mi[i].pid));
- t1 = TUPLE2(hp, mic.mi[i].entity, mic.mi[i].node);
+ ERTS_PI_UNRESERVE(reserve_size, 3+3+2);
+ hp = erts_produce_heap(hfact, 3+3+2, reserve_size);
+
+ t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
hp += 3;
t2 = TUPLE2(hp, m_type, t1);
hp += 3;
res = CONS(hp, t2, res);
- hp += 2;
}
else {
- /* Monitor by pid. Build {process|port, Pid} and cons it. */
+ /* Build {process|port|time_offset, Pid|clock_service} and cons it. */
Eterm t;
- Eterm pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ Eterm pid;
+ Eterm m_type;
+ Eterm pid_src = mic.mi[i].entity.term;
+ Uint sz = is_atom(pid_src) ? 0 : NC_HEAP_SIZE(pid_src);
+ sz += 3 + 2;
+
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ pid = (is_atom(pid_src)
+ ? pid_src
+ : STORE_NC(&hp, hfact->off_heap, pid_src));
+
+ switch (mic.mi[i].type) {
+ case ERTS_MON_TYPE_PORT:
+ m_type = am_port;
+ break;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ m_type = am_time_offset;
+ break;
+ default:
+ m_type = am_process;
+ break;
+ }
- Eterm m_type = is_port(mic.mi[i].pid) ? am_port : am_process;
ASSERT(is_pid(mic.mi[i].pid)
|| is_port(mic.mi[i].pid));
t = TUPLE2(hp, m_type, pid);
hp += 3;
res = CONS(hp, t, res);
- hp += 2;
}
}
+
+ *reds += (Uint) mic.mi_i / 4;
+
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_monitored_by: {
+ case ERTS_PI_IX_MONITORED_BY: {
MonitorInfoCollection mic;
int i;
Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_target_monitor,&mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(rp),
+ collect_one_target_monitor,
+ (void *) &mic);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
+ collect_one_target_monitor,
+ (void *) &mic);
+
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity);
+ Uint sz = 2;
+
+ if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
+ sz += erts_resource_ref_size(mic.mi[i].entity.resource);
+ else
+ sz += NC_HEAP_SIZE(mic.mi[i].entity.term);
+
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
+ item = erts_bld_resource_ref(&hp,
+ hfact->off_heap,
+ mic.mi[i].entity.resource);
+ else
+ item = STORE_NC(&hp,
+ hfact->off_heap,
+ mic.mi[i].entity.term);
res = CONS(hp, item, res);
- hp += 2;
}
+
+ *reds += (Uint) mic.mi_i / 4;
+
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_suspending: {
+ case ERTS_PI_IX_SUSPENDING: {
ErtsSuspendMonitorInfoCollection smic;
int i;
- Eterm item;
-#ifdef DEBUG
- Eterm *hp_end;
-#endif
- ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
- BIF_P,
- (BIF_P == rp
- ? ERTS_PROC_LOCK_MAIN
- : 0) | ERTS_PROC_LOCK_LINK);
+ ERTS_INIT_SUSPEND_MONITOR_INFOS(smic);
+
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
+ collect_one_suspend_monitor,
+ (void *) &smic);
+
+ reserve_size += smic.sz;
- erts_doforall_suspend_monitors(rp->suspend_monitors,
- &collect_one_suspend_monitor,
- &smic);
- hp = HAlloc(BIF_P, 3 + smic.sz);
-#ifdef DEBUG
- hp_end = hp + smic.sz;
-#endif
-
res = NIL;
for (i = 0; i < smic.smi_i; i++) {
- Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
- Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
- Eterm active;
- Eterm pending;
- if (IS_SSMALL(a))
- active = make_small(a);
- else {
- active = small_to_big(a, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- if (IS_SSMALL(p))
- pending = make_small(p);
- else {
- pending = small_to_big(p, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- item = TUPLE3(hp, smic.smi[i]->pid, active, pending);
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
+ Sint ci;
+ Eterm ct, active, pending, item;
+ Uint sz = 4 + 2;
+
+ msp = smic.smi[i];
+ mstate = erts_atomic_read_nob(&msp->state);
+
+ ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
+ if (!IS_SSMALL(ci))
+ sz += BIG_UINT_HEAP_SIZE;
+
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ if (IS_SSMALL(ci))
+ ct = make_small(ci);
+ else {
+ ct = small_to_big(ci, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+
+ if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) {
+ active = ct;
+ pending = make_small(0);
+ }
+ else {
+ active = make_small(0);
+ pending = ct;
+ }
+
+ ASSERT(is_internal_pid(msp->md.origin.other.item));
+
+ item = TUPLE3(hp, msp->md.origin.other.item, active, pending);
hp += 4;
res = CONS(hp, item, res);
- hp += 2;
}
+ *reds += (Uint) smic.smi_i / 4;
+
ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
- ASSERT(hp == hp_end);
break;
}
- case am_dictionary:
- if (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
+ case ERTS_PI_IX_DICTIONARY:
+ if (!rp->dictionary || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE)) {
res = NIL;
} else {
- res = erts_dictionary_copy(BIF_P, rp->dictionary);
+ Uint num = rp->dictionary->numElements;
+ res = erts_dictionary_copy(hfact, rp->dictionary, reserve_size);
+ *reds += (Uint) num / 4;
}
- hp = HAlloc(BIF_P, 3);
+
break;
- case am_trap_exit: {
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
- hp = HAlloc(BIF_P, 3);
- if (state & ERTS_PSFLG_TRAP_EXIT)
- res = am_true;
- else
- res = am_false;
+ case ERTS_PI_IX_TRAP_EXIT:
+ res = (rp->flags & F_TRAP_EXIT) ? am_true : am_false;
break;
- }
- case am_error_handler:
- hp = HAlloc(BIF_P, 3);
- res = erts_proc_get_error_handler(BIF_P);
+ case ERTS_PI_IX_ERROR_HANDLER:
+ res = erts_proc_get_error_handler(rp);
break;
- case am_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
break;
}
- case am_fullsweep_after: {
- Uint hsz = 3;
+ case ERTS_PI_IX_FULLSWEEP_AFTER: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
break;
}
- case am_min_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MIN_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
break;
}
- case am_min_bin_vheap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MIN_BIN_VHEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
break;
}
- case am_max_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MAX_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
MAX_HEAP_SIZE_FLAGS_GET(rp),
NULL, &hsz);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
MAX_HEAP_SIZE_FLAGS_GET(rp),
&hp, NULL);
break;
}
- case am_total_heap_size: {
- ErtsMessage *mp;
+ case ERTS_PI_IX_TOTAL_HEAP_SIZE: {
Uint total_heap_size;
- Uint hsz = 3;
+ Uint hsz = 0;
total_heap_size = rp->heap_sz;
if (rp->old_hend && rp->old_heap)
@@ -1397,44 +1752,53 @@ process_info_aux(Process *BIF_P,
total_heap_size += rp->mbuf_sz;
- if (rp->flags & F_ON_HEAP_MSGQ)
- for (mp = rp->msg.first; mp; mp = mp->next)
+ if (rp->flags & F_ON_HEAP_MSGQ) {
+ ErtsMessage *mp;
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ for (mp = rp->sig_qs.first; mp; mp = mp->next) {
+ ASSERT(ERTS_SIG_IS_MSG(mp));
if (mp->data.attached)
total_heap_size += erts_msg_attached_data_size(mp);
+ }
+ *reds += (Uint) rp->sig_qs.len / 4;
+ }
(void) erts_bld_uint(NULL, &hsz, total_heap_size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, total_heap_size);
break;
}
- case am_stack_size: {
+ case ERTS_PI_IX_STACK_SIZE: {
Uint stack_size = STACK_START(rp) - rp->stop;
- Uint hsz = 3;
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, stack_size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, stack_size);
break;
}
- case am_memory: { /* Memory consumed in bytes */
- Uint hsz = 3;
+ case ERTS_PI_IX_MEMORY: { /* Memory consumed in bytes */
+ Uint hsz = 0;
Uint size = erts_process_memory(rp, 0);
(void) erts_bld_uint(NULL, &hsz, size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, size);
+
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ *reds += (Uint) rp->sig_qs.len / 4;
+
break;
}
- case am_garbage_collection: {
+ case ERTS_PI_IX_GARBAGE_COLLECTION: {
DECL_AM(minor_gcs);
Eterm t;
Uint map_sz = 0;
erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
- hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz + 3);
- /* last "3" is for outside tuple */
+ hp = erts_produce_heap(hfact, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz, reserve_size);
t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
res = CONS(hp, t, NIL); hp += 2;
@@ -1453,89 +1817,76 @@ process_info_aux(Process *BIF_P,
break;
}
- case am_garbage_collection_info: {
+ case ERTS_PI_IX_GARBAGE_COLLECTION_INFO: {
Uint sz = 0, actual_sz = 0;
- if (rp == BIF_P) {
- sz += ERTS_PROCESS_GC_INFO_MAX_SIZE;
- } else {
- erts_process_gc_info(rp, &sz, NULL, 0, 0);
- sz += 3;
- }
+ erts_process_gc_info(rp, &sz, NULL, 0, 0);
- hp = HAlloc(BIF_P, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
- /* We may have some extra space, fill with 0 tuples */
- if (actual_sz <= sz - 3) {
- for (; actual_sz < sz - 3; hp++, actual_sz++)
- hp[0] = make_arityval(0);
- } else {
- for (; actual_sz < sz; hp++, actual_sz++)
- hp[0] = make_arityval(0);
- hp = HAlloc(BIF_P, 3);
- }
-
break;
}
- case am_group_leader: {
+ case ERTS_PI_IX_GROUP_LEADER: {
int sz = NC_HEAP_SIZE(rp->group_leader);
- hp = HAlloc(BIF_P, 3 + sz);
- res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ res = STORE_NC(&hp, hfact->off_heap, rp->group_leader);
break;
}
- case am_reductions: {
- Uint reds = rp->reds + erts_current_reductions(BIF_P, rp);
- Uint hsz = 3;
+ case ERTS_PI_IX_REDUCTIONS: {
+ Uint reds = rp->reds + erts_current_reductions(c_p, rp);
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, reds);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, reds);
break;
}
- case am_priority:
- hp = HAlloc(BIF_P, 3);
- res = erts_get_process_priority(rp);
+ case ERTS_PI_IX_PRIORITY: {
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
+ if (ERTS_PSFLG_EXITING & state)
+ return am_undefined;
+ res = erts_get_process_priority(state);
break;
+ }
- case am_trace:
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_TRACE:
res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
break;
- case am_binary: {
- Uint sz = 3;
+ case ERTS_PI_IX_BINARY: {
+ Uint sz = 0;
(void) bld_bin_list(NULL, &sz, &MSO(rp));
- hp = HAlloc(BIF_P, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
res = bld_bin_list(&hp, NULL, &MSO(rp));
break;
}
- case am_sequential_trace_token:
- res = copy_object(rp->seq_trace_token, BIF_P);
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN: {
+ Uint sz = size_object(rp->seq_trace_token);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ res = copy_struct(rp->seq_trace_token, sz, &hp, hfact->off_heap);
break;
+ }
- case am_catchlevel:
- hp = HAlloc(BIF_P, 3);
- res = make_small(catchlevel(BIF_P));
+ case ERTS_PI_IX_CATCHLEVEL:
+ res = make_small(catchlevel(rp));
break;
- case am_backtrace: {
+ case ERTS_PI_IX_BACKTRACE: {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
- res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
+ res = erts_heap_factory_new_binary(hfact, (byte *) dsbufp->str,
+ dsbufp->str_len, reserve_size);
erts_destroy_tmp_dsbuf(dsbufp);
- hp = HAlloc(BIF_P, 3);
break;
}
- case am_last_calls: {
+ case ERTS_PI_IX_LAST_CALLS: {
struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
if (!scb) {
- hp = HAlloc(BIF_P, 3);
res = am_false;
} else {
/*
@@ -1543,44 +1894,55 @@ process_info_aux(Process *BIF_P,
* Might be less than that, if there are sends, receives or timeouts,
* so we must do a HRelease() to avoid creating holes.
*/
- Uint needed = scb->n*(2+4) + 3;
- Eterm* limit;
+ Sint needed = scb->n*(2+4);
Eterm term, list;
int i, j;
+ Export *exp;
+
+ reserve_size += needed;
- hp = HAlloc(BIF_P, needed);
- limit = hp + needed;
list = NIL;
for (i = 0; i < scb->n; i++) {
+ Uint sz;
j = scb->cur - i - 1;
if (j < 0)
j += scb->len;
- if (scb->ct[j] == &exp_send)
+
+ sz = 2;
+ exp = scb->ct[j];
+ if (exp != &exp_send && exp != &exp_receive && exp != &exp_timeout)
+ sz += 4;
+
+ needed -= sz;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ if (exp == &exp_send)
term = am_send;
- else if (scb->ct[j] == &exp_receive)
+ else if (exp == &exp_receive)
term = am_receive;
- else if (scb->ct[j] == &exp_timeout)
+ else if (exp == &exp_timeout)
term = am_timeout;
else {
term = TUPLE3(hp,
- scb->ct[j]->code[0],
- scb->ct[j]->code[1],
- make_small(scb->ct[j]->code[2]));
+ scb->ct[j]->info.mfa.module,
+ scb->ct[j]->info.mfa.function,
+ make_small(scb->ct[j]->info.mfa.arity));
hp += 4;
}
list = CONS(hp, term, list);
- hp += 2;
}
+
+ ASSERT(needed >= 0);
+ if (needed > 0)
+ reserve_size -= needed;
+
res = list;
- res = TUPLE2(hp, item, res);
- hp += 3;
- HRelease(BIF_P,limit,hp);
- return res;
}
break;
}
- case am_message_queue_data:
+ case ERTS_PI_IX_MESSAGE_QUEUE_DATA:
switch (rp->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) {
case F_OFF_HEAP_MSGQ:
res = am_off_heap;
@@ -1593,20 +1955,34 @@ process_info_aux(Process *BIF_P,
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
break;
}
- hp = HAlloc(BIF_P, 3);
break;
+ case ERTS_PI_IX_MAGIC_REF: {
+ Uint sz = 0;
+ (void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
+ hp = erts_produce_heap(hfact, sz, 0);
+ res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
+
+ *reds += (Uint) 10;
+ break;
+ }
+
default:
return THE_NON_VALUE; /* will produce badarg */
}
- return TUPLE2(hp, item, res);
+ ERTS_PI_UNRESERVE(reserve_size, 3);
+ *reserve_sizep = reserve_size;
+ hp = erts_produce_heap(hfact, 3, reserve_size);
+
+ return TUPLE2(hp, pi_ix2arg(item_ix), res);
}
#undef MI_INC
static Eterm
-current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
+current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
+ int full_info, Uint reserve_size, int flags)
{
Eterm* hp;
Eterm res;
@@ -1614,16 +1990,16 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
if (rp->current == NULL) {
erts_lookup_function_info(&fi, rp->i, full_info);
- rp->current = fi.current;
+ rp->current = fi.mfa;
} else if (full_info) {
erts_lookup_function_info(&fi, rp->i, full_info);
- if (fi.current == NULL) {
+ if (fi.mfa == NULL) {
/* Use the current function without location info */
erts_set_current_function(&fi, rp->current);
}
}
- if (BIF_P == rp) {
+ if (c_p == rp && !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) {
FunctionInfo fi2;
/*
@@ -1633,9 +2009,9 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
* instead if it can be looked up.
*/
erts_lookup_function_info(&fi2, rp->cp, full_info);
- if (fi2.current) {
+ if (fi2.mfa) {
fi = fi2;
- rp->current = fi2.current;
+ rp->current = fi2.mfa;
}
}
@@ -1643,23 +2019,22 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
* Return the result.
*/
if (rp->current == NULL) {
- hp = HAlloc(BIF_P, 3);
res = am_undefined;
} else if (full_info) {
- hp = HAlloc(BIF_P, 3+fi.needed);
- hp = erts_build_mfa_item(&fi, hp, am_true, &res);
+ hp = erts_produce_heap(hfact, fi.needed, reserve_size);
+ erts_build_mfa_item(&fi, hp, am_true, &res);
} else {
- hp = HAlloc(BIF_P, 3+4);
- res = TUPLE3(hp, rp->current[0],
- rp->current[1], make_small(rp->current[2]));
- hp += 4;
+ hp = erts_produce_heap(hfact, 4, reserve_size);
+ res = TUPLE3(hp, rp->current->module,
+ rp->current->function,
+ make_small(rp->current->arity));
}
- *hpp = hp;
return res;
}
static Eterm
-current_stacktrace(Process* p, Process* rp, Eterm** hpp)
+current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
+ Uint reserve_size)
{
Uint sz;
struct StackTrace* s;
@@ -1668,15 +2043,15 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
FunctionInfo* stkp;
Uint heap_size;
int i;
- Eterm* hp = *hpp;
+ Eterm* hp;
Eterm mfa;
Eterm res = NIL;
- depth = 8;
+ depth = erts_backtrace_depth;
sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
s->depth = 0;
- if (rp->i) {
+ if (depth > 0 && rp->i) {
s->trace[s->depth++] = rp->i;
depth--;
}
@@ -1692,37 +2067,32 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
heap_size = 3;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
- if (stkp->current) {
+ if (stkp->mfa) {
heap_size += stkp->needed + 2;
stkp++;
}
}
- hp = HAlloc(p, heap_size);
+ reserve_size += heap_size;
+
+ /*
+ * We intentionally produce heap in small chunks
+ * (for more info see process_info_aux()).
+ */
while (stkp > stk) {
stkp--;
+ sz = stkp->needed + 2;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
res = CONS(hp, mfa, res);
- hp += 2;
}
erts_free(ERTS_ALC_T_TMP, stk);
erts_free(ERTS_ALC_T_TMP, s);
- *hpp = hp;
return res;
}
-#if defined(VALGRIND)
-static int check_if_xml(void)
-{
- char buf[1];
- size_t bufsz = sizeof(buf);
- return erts_sys_getenv_raw("VALGRIND_LOG_XML", buf, &bufsz) >= 0;
-}
-#else
-#define check_if_xml() 0
-#endif
-
/*
* This function takes care of calls to erlang:system_info/1 when the argument
* is a tuple.
@@ -1766,45 +2136,6 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
return make_small(sizeof(UWord));
}
goto badarg;
- } else if (sel == am_allocated) {
- if (arity == 2) {
- Eterm res = THE_NON_VALUE;
- char *buf;
- Sint len = is_string(*tp);
- if (len <= 0)
- return res;
- buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
- if (intlist_to_buf(*tp, buf, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
- buf[len] = '\0';
- res = erts_instr_dump_memory_map(buf) ? am_true : am_false;
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- if (is_non_value(res))
- goto badarg;
- return res;
- }
- else if (arity == 3 && tp[0] == am_status) {
- if (is_atom(tp[1]))
- return erts_instr_get_stat(BIF_P, tp[1], 1);
- else {
- Eterm res = THE_NON_VALUE;
- char *buf;
- Sint len = is_string(tp[1]);
- if (len <= 0)
- return res;
- buf = (char *) erts_alloc(ERTS_ALC_T_TMP, len+1);
- if (intlist_to_buf(tp[1], buf, len) != len)
- erts_exit(ERTS_ERROR_EXIT, "%s:%d: Internal error\n", __FILE__, __LINE__);
- buf[len] = '\0';
- res = erts_instr_dump_stat(buf, 1) ? am_true : am_false;
- erts_free(ERTS_ALC_T_TMP, (void *) buf);
- if (is_non_value(res))
- goto badarg;
- return res;
- }
- }
- else
- goto badarg;
} else if (sel == am_allocator) {
switch (arity) {
case 2:
@@ -1858,15 +2189,9 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
#endif
} else if (is_list(*tp)) {
#if defined(PURIFY)
-#define ERTS_ERROR_CHECKER_PRINTF purify_printf
-#define ERTS_ERROR_CHECKER_PRINTF_XML purify_printf
+# define ERTS_ERROR_CHECKER_PRINTF purify_printf
#elif defined(VALGRIND)
-#define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
-# ifndef HAVE_VALGRIND_PRINTF_XML
-# define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF
-# else
-# define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF_XML
-# endif
+# define ERTS_ERROR_CHECKER_PRINTF VALGRIND_PRINTF
#endif
ErlDrvSizeT buf_size = 8*1024; /* Try with 8KB first */
char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
@@ -1882,12 +2207,7 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
ASSERT(r == buf_size - 1);
}
buf[buf_size - 1 - r] = '\0';
- if (check_if_xml()) {
- ERTS_ERROR_CHECKER_PRINTF_XML("<erlang_info_log>"
- "%s</erlang_info_log>\n", buf);
- } else {
- ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
- }
+ ERTS_ERROR_CHECKER_PRINTF("%s\n", buf);
erts_free(ERTS_ALC_T_TMP, (void *) buf);
BIF_RET(am_true);
#undef ERTS_ERROR_CHECKER_PRINTF
@@ -2085,14 +2405,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ASSERT(erts_compat_rel > 0);
BIF_RET(make_small(erts_compat_rel));
} else if (BIF_ARG_1 == am_multi_scheduling) {
-#ifndef ERTS_SMP
- BIF_RET(am_disabled);
-#else
-#ifndef ERTS_DIRTY_SCHEDULERS
- if (erts_no_schedulers == 1)
- BIF_RET(am_disabled);
- else
-#endif
{
int msb = erts_is_multi_scheduling_blocked();
BIF_RET(!msb
@@ -2101,7 +2413,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
? am_blocked
: am_blocked_normal));
}
-#endif
} else if (BIF_ARG_1 == am_build_type) {
#if defined(DEBUG)
ERTS_DECL_AM(debug);
@@ -2181,8 +2492,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (BIF_ARG_1 == am_allocated_areas) {
res = erts_allocated_areas(NULL, NULL, BIF_P);
BIF_RET(res);
- } else if (BIF_ARG_1 == am_allocated) {
- BIF_RET(erts_instr_get_memory_map(BIF_P));
} else if (BIF_ARG_1 == am_hipe_architecture) {
#if defined(HIPE)
BIF_RET(hipe_arch_name);
@@ -2211,7 +2520,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = TUPLE2(hp, am_sequential_tracer, val);
BIF_RET(res);
} else if (BIF_ARG_1 == am_garbage_collection){
- Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
Eterm tup;
hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2);
@@ -2229,7 +2538,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
} else if (BIF_ARG_1 == am_fullsweep_after){
- Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
+ Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs);
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_fullsweep_after, make_small(val));
BIF_RET(res);
@@ -2262,8 +2571,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0);
/* Need to be the only thread running... */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
if (BIF_ARG_1 == am_info)
info(ERTS_PRINT_DSBUF, (void *) dsbufp);
@@ -2274,8 +2583,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else
distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
ASSERT(dsbufp && dsbufp->str);
res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
@@ -2284,7 +2593,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) {
DistEntry *dep;
i = 0;
- erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_rlock(&erts_dist_table_rwmtx);
for (dep = erts_visible_dist_entries; dep; dep = dep->next)
++i;
for (dep = erts_hidden_dist_entries; dep; dep = dep->next)
@@ -2307,7 +2616,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = CONS(hp, tpl, res);
hp += 2;
}
- erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx);
+ erts_rwmtx_runlock(&erts_dist_table_rwmtx);
BIF_RET(res);
} else if (BIF_ARG_1 == am_system_version) {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
@@ -2323,9 +2632,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
sizeof(ERLANG_ARCHITECTURE)-1,
NIL));
}
- else if (BIF_ARG_1 == am_memory_types) {
- return erts_instr_get_type_info(BIF_P);
- }
else if (BIF_ARG_1 == am_os_type) {
BIF_RET(os_type_tuple);
}
@@ -2333,16 +2639,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_allocator_options((void *) BIF_P));
}
else if (BIF_ARG_1 == am_thread_pool_size) {
-#ifdef USE_THREADS
extern int erts_async_max_threads;
-#endif
int n;
-#ifdef USE_THREADS
n = erts_async_max_threads;
-#else
- n = 0;
-#endif
BIF_RET(make_small(n));
}
else if (BIF_ARG_1 == am_alloc_util_allocators) {
@@ -2356,12 +2656,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(os_version_tuple);
}
else if (BIF_ARG_1 == am_version) {
- int n = strlen(ERLANG_VERSION);
+ int n = sys_strlen(ERLANG_VERSION);
hp = HAlloc(BIF_P, ((sizeof ERLANG_VERSION)-1) * 2);
BIF_RET(buf_to_intlist(&hp, ERLANG_VERSION, n, NIL));
}
else if (BIF_ARG_1 == am_machine) {
- int n = strlen(EMULATOR);
+ int n = sys_strlen(EMULATOR);
hp = HAlloc(BIF_P, n*2);
BIF_RET(buf_to_intlist(&hp, EMULATOR, n, NIL));
}
@@ -2387,11 +2687,11 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = erts_bld_cons(hpp, hszp,
erts_bld_tuple(hpp, hszp, 2,
erts_atom_put((byte *)opc[i].name,
- strlen(opc[i].name),
+ sys_strlen(opc[i].name),
ERTS_ATOM_ENC_LATIN1,
1),
erts_bld_uint(hpp, hszp,
- opc[i].count)),
+ erts_instr_count[i])),
res);
}
@@ -2410,7 +2710,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
#endif
BIF_RET(res);
-#endif /* #ifndef ERTS_SMP */
+#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */
} else if (BIF_ARG_1 == am_wordsize) {
return make_small(sizeof(Eterm));
} else if (BIF_ARG_1 == am_endian) {
@@ -2490,11 +2790,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
#endif
} else if (BIF_ARG_1 == am_threads) {
-#ifdef USE_THREADS
return am_true;
-#else
- return am_false;
-#endif
} else if (BIF_ARG_1 == am_creation) {
return make_small(erts_this_node->creation);
} else if (BIF_ARG_1 == am_break_ignored) {
@@ -2553,11 +2849,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = HAlloc(BIF_P, 2*n);
BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
} else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
-#ifdef ERTS_SMP
BIF_RET(am_true);
-#else
- BIF_RET(am_false);
-#endif
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
BIF_RET(erts_bound_schedulers_term(BIF_P));
} else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
@@ -2569,11 +2861,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = make_small(erts_no_schedulers);
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 4);
- res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
- BIF_RET(res);
-#else
Eterm *hp;
Uint total, online, active;
erts_schedulers_state(&total, &online, &active,
@@ -2584,13 +2871,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
make_small(online),
make_small(active));
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 4);
- res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
- BIF_RET(res);
-#else
Eterm *hp;
Uint total, online, active;
erts_schedulers_state(&total, &online, &active,
@@ -2601,19 +2882,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
make_small(online),
make_small(active));
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 2+5);
- res = CONS(hp+5,
- TUPLE4(hp,
- am_normal,
- make_small(1),
- make_small(1),
- make_small(1)),
- NIL);
- BIF_RET(res);
-#else
Eterm *hp, tpl;
Uint sz, total, online, active,
dirty_cpu_total, dirty_cpu_online, dirty_cpu_active,
@@ -2659,24 +2928,14 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp += 5;
res = CONS(hp, tpl, res);
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(make_small(1));
-#else
Uint online;
erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL);
BIF_RET(make_small(online));
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(make_small(1));
-#else
Uint active;
erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
BIF_RET(make_small(active));
-#endif
-#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
Uint dirty_cpu;
erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
@@ -2689,7 +2948,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
Uint dirty_io;
erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
BIF_RET(make_small(dirty_io));
-#endif
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
BIF_RET(res);
@@ -2710,7 +2968,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(make_small(CONTEXT_REDS));
} else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) {
#ifdef ERTS_ENABLE_KERNEL_POLL
- BIF_RET(erts_use_kernel_poll ? am_true : am_false);
+ BIF_RET(am_true);
#else
BIF_RET(am_false);
#endif
@@ -2735,23 +2993,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
BIF_RET(erts_check_io_info(BIF_P));
} else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(NIL);
-#else
if (erts_no_schedulers == 1)
BIF_RET(NIL);
else
BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0));
-#endif
} else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(NIL);
-#else
if (erts_no_schedulers == 1)
BIF_RET(NIL);
else
BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1));
-#endif
} else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
BIF_RET(ERTS_USE_MODIFIED_TIMING()
? make_small(erts_modified_timing_level)
@@ -2814,12 +3064,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_false);
#endif
}
-#ifdef ERTS_SMP
else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
erts_thr_progress_dbg_print_state();
BIF_RET(am_true);
}
-#endif
else if (BIF_ARG_1 == am_message_queue_data) {
switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) {
case SPO_OFF_HEAP_MSGQ:
@@ -2835,21 +3083,21 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
Uint sz;
Eterm res = NIL, tup, text;
Eterm *hp = HAlloc(BIF_P, 3*(2 + 3) + /* three 2-tuples and three cons */
- 2*(strlen(erts_build_flags_CONFIG_H) +
- strlen(erts_build_flags_CFLAGS) +
- strlen(erts_build_flags_LDFLAGS)));
+ 2*(sys_strlen(erts_build_flags_CONFIG_H) +
+ sys_strlen(erts_build_flags_CFLAGS) +
+ sys_strlen(erts_build_flags_LDFLAGS)));
- sz = strlen(erts_build_flags_CONFIG_H);
+ sz = sys_strlen(erts_build_flags_CONFIG_H);
text = buf_to_intlist(&hp, erts_build_flags_CONFIG_H, sz, NIL);
tup = TUPLE2(hp, am_config_h, text); hp += 3;
res = CONS(hp, tup, res); hp += 2;
- sz = strlen(erts_build_flags_CFLAGS);
+ sz = sys_strlen(erts_build_flags_CFLAGS);
text = buf_to_intlist(&hp, erts_build_flags_CFLAGS, sz, NIL);
tup = TUPLE2(hp, am_cflags, text); hp += 3;
res = CONS(hp, tup, res); hp += 2;
- sz = strlen(erts_build_flags_LDFLAGS);
+ sz = sys_strlen(erts_build_flags_LDFLAGS);
text = buf_to_intlist(&hp, erts_build_flags_LDFLAGS, sz, NIL);
tup = TUPLE2(hp, am_ldflags, text); hp += 3;
res = CONS(hp, tup, res); hp += 2;
@@ -2859,6 +3107,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_db_get_max_tabs()));
}
+ else if (ERTS_IS_ATOM_STR("ets_count",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_ets_table_count()));
+ }
+ else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_get_atom_limit()));
+ }
+ else if (ERTS_IS_ATOM_STR("atom_count",BIF_ARG_1)) {
+ BIF_RET(make_small(atom_table_size()));
+ }
else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) {
if (erts_has_time_correction()
&& erts_time_offset_state() == ERTS_TIME_OFFSET_FINAL) {
@@ -2867,7 +3124,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_disabled);
}
else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) {
- BIF_RET(erts_eager_check_io ? am_true : am_false);
+ BIF_RET(am_true);
}
else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) {
#ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE
@@ -2883,31 +3140,20 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(AM_tag);
#endif
}
- else if (ERTS_IS_ATOM_STR("check_process_code",BIF_ARG_1)) {
- Eterm terms[3];
- Sint length = 1;
- Uint sz = 0;
- Eterm *hp, res;
- DECL_AM(direct_references);
-
- terms[0] = AM_direct_references;
-#if !defined(ERTS_NEW_PURGE_STRATEGY)
- {
- DECL_AM(indirect_references);
- terms[1] = AM_indirect_references;
- terms[2] = am_copy_literals;
- length = 3;
- }
-#endif
- erts_bld_list(NULL, &sz, length, terms);
- hp = HAlloc(BIF_P, sz);
- res = erts_bld_list(&hp, NULL, length, terms);
- BIF_RET(res);
- }
BIF_ERROR(BIF_P, BADARG);
}
+static void monitor_size(ErtsMonitor *mon, void *vsz)
+{
+ *((Uint *) vsz) = erts_monitor_size(mon);
+}
+
+static void link_size(ErtsMonitor *lnk, void *vsz)
+{
+ *((Uint *) vsz) = erts_link_size(lnk);
+}
+
/**********************************************************************/
/* Return information on ports */
/* Info:
@@ -2926,7 +3172,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
{
Eterm res = THE_NON_VALUE;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(erts_lc_is_port_locked(prt));
if (item == am_id) {
if (hpp)
@@ -2943,7 +3189,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
INIT_MONITOR_INFOS(mic);
- erts_doforall_links(ERTS_P_LINKS(prt), &collect_one_link, &mic);
+ erts_link_tree_foreach(ERTS_P_LINKS(prt), collect_one_link, (void *) &mic);
if (szp)
*szp += mic.sz;
@@ -2951,7 +3197,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -2967,11 +3213,11 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
else if (item == am_monitors) {
MonitorInfoCollection mic;
int i;
- Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(prt),
- &collect_one_origin_monitor, &mic);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
+ collect_one_origin_monitor,
+ (void *) &mic);
if (szp)
*szp += mic.sz;
@@ -2980,11 +3226,10 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
Eterm t;
- Eterm m_type;
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
- m_type = is_port(item) ? am_port : am_process;
- t = TUPLE2(*hpp, m_type, item);
+ ASSERT(mic.mi[i].type == ERTS_MON_TYPE_PORT);
+ ASSERT(is_internal_pid(mic.mi[i].entity.term));
+ t = TUPLE2(*hpp, am_process, mic.mi[i].entity.term);
*hpp += 3;
res = CONS(*hpp, t, res);
*hpp += 2;
@@ -3003,15 +3248,20 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(ERTS_P_MONITORS(prt),
- &collect_one_target_monitor, &mic);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
+ collect_one_target_monitor,
+ (void *) &mic);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
+ collect_one_target_monitor,
+ (void *) &mic);
if (szp)
*szp += mic.sz;
if (hpp) {
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
- item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ ASSERT(mic.mi[i].type != ERTS_MON_TYPE_RESOURCE);
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity.term);
res = CONS(*hpp, item, res);
*hpp += 2;
}
@@ -3087,7 +3337,12 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
*/
Uint size = 0;
- erts_doforall_links(ERTS_P_LINKS(prt), &erts_one_link_size, &size);
+ erts_link_tree_foreach(ERTS_P_LINKS(prt),
+ link_size, (void *) &size);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(prt),
+ monitor_size, (void *) &size);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(prt),
+ monitor_size, (void *) &size);
size += erts_port_data_size(prt);
@@ -3116,9 +3371,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
}
else if (ERTS_IS_ATOM_STR("locking", item)) {
if (hpp) {
-#ifndef ERTS_SMP
- res = am_false;
-#else
if (erts_atomic32_read_nob(&prt->state)
& ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
DECL_AM(port_level);
@@ -3132,7 +3384,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
& ERL_DRV_FLAG_USE_PORT_LOCKING));
res = AM_driver_level;
}
-#endif
}
if (szp) {
res = am_true;
@@ -3145,7 +3396,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
goto done;
}
res = ((ERTS_PTS_FLG_PARALLELISM &
- erts_smp_atomic32_read_nob(&prt->sched.flags))
+ erts_atomic32_read_nob(&prt->sched.flags))
? am_true
: am_false);
}
@@ -3221,7 +3472,7 @@ fun_info_2(BIF_ALIST_2)
}
break;
case am_refc:
- val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p);
+ val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p);
hp = HAlloc(p, 3);
break;
case am_arity:
@@ -3248,7 +3499,7 @@ fun_info_2(BIF_ALIST_2)
break;
case am_module:
hp = HAlloc(p, 3);
- val = exp->code[0];
+ val = exp->info.mfa.module;
break;
case am_new_index:
hp = HAlloc(p, 3);
@@ -3276,11 +3527,11 @@ fun_info_2(BIF_ALIST_2)
break;
case am_arity:
hp = HAlloc(p, 3);
- val = make_small(exp->code[2]);
+ val = make_small(exp->info.mfa.arity);
break;
case am_name:
hp = HAlloc(p, 3);
- val = exp->code[1];
+ val = exp->info.mfa.function;
break;
default:
goto error;
@@ -3306,74 +3557,102 @@ fun_info_mfa_1(BIF_ALIST_1)
} else if (is_export(fun)) {
Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
hp = HAlloc(p, 4);
- BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ BIF_RET(TUPLE3(hp,exp->info.mfa.module,
+ exp->info.mfa.function,
+ make_small(exp->info.mfa.arity)));
}
BIF_ERROR(p, BADARG);
}
+BIF_RETTYPE erts_internal_is_process_alive_2(BIF_ALIST_2)
+{
+ if (!is_internal_pid(BIF_ARG_1) || !is_internal_ordinary_ref(BIF_ARG_2))
+ BIF_ERROR(BIF_P, BADARG);
+ erts_proc_sig_send_is_alive_request(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ BIF_RET(am_ok);
+}
+
BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
{
- if(is_internal_pid(BIF_ARG_1)) {
- Process *rp;
-
- if (BIF_ARG_1 == BIF_P->common.id)
- BIF_RET(am_true);
-
- rp = erts_proc_lookup_raw(BIF_ARG_1);
- if (!rp) {
- BIF_RET(am_false);
- }
- else {
- if (erts_smp_atomic32_read_acqb(&rp->state)
- & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING))
- ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false);
- else
- BIF_RET(am_true);
- }
- }
- else if(is_external_pid(BIF_ARG_1)) {
- if(external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ if (is_internal_pid(BIF_ARG_1)) {
+ erts_aint32_t state;
+ Process *rp;
+
+ if (BIF_ARG_1 == BIF_P->common.id)
+ BIF_RET(am_true);
+
+ rp = erts_proc_lookup_raw(BIF_ARG_1);
+ if (!rp)
+ BIF_RET(am_false);
+
+ state = erts_atomic32_read_acqb(&rp->state);
+ if (state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_SIG_Q
+ | ERTS_PSFLG_SIG_IN_Q)) {
+ /*
+ * If in exiting state, trap out and send 'is alive'
+ * request and wait for it to complete termination.
+ *
+ * If process has signals enqueued, we need to
+ * send it an 'is alive' request via its signal
+ * queue in order to ensure that signal order is
+ * preserved (we may earlier have sent it an
+ * exit signal that has not been processed yet).
+ */
+ BIF_TRAP1(is_process_alive_trap, BIF_P, BIF_ARG_1);
+ }
+
+ BIF_RET(am_true);
+ }
+
+ if (is_external_pid(BIF_ARG_1)) {
+ if (external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
BIF_RET(am_false); /* A pid from an old incarnation of this node */
- else
- BIF_ERROR(BIF_P, BADARG);
- }
- else {
- BIF_ERROR(BIF_P, BADARG);
}
+
+ BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE process_display_2(BIF_ALIST_2)
+static Eterm
+process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
{
- Process *rp;
+ if (redsp)
+ *redsp = 1;
- if (BIF_ARG_2 != am_backtrace)
- BIF_ERROR(BIF_P, BADARG);
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return am_badarg;
- rp = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCKS_ALL);
- if(!rp) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_process_display_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) {
- Eterm args[2] = {BIF_ARG_1, BIF_ARG_2};
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL);
- ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P,
- BIF_ARG_1,
- am_erlang,
- am_process_display,
- args,
- 2);
- }
- erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
-#ifdef ERTS_SMP
- erts_smp_proc_unlock(rp, (BIF_P == rp
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
-#endif
- BIF_RET(am_true);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+
+ return am_true;
+}
+
+
+BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2)
+{
+ Eterm res;
+
+ if (BIF_ARG_2 != am_backtrace)
+ BIF_RET(am_badarg);
+
+ if (BIF_P->common.id == BIF_ARG_1) {
+ res = process_display(BIF_P, NULL, NULL, NULL);
+ BIF_RET(res);
+ }
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_RET(am_badarg);
+
+ res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
+ !0,
+ process_display,
+ NULL);
+ if (is_non_value(res))
+ BIF_RET(am_badarg);
+
+ BIF_RET(res);
}
/* this is a general call which return some possibly useful information */
@@ -3384,13 +3663,24 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
Eterm* hp;
if (BIF_ARG_1 == am_scheduler_wall_time) {
- res = erts_sched_wall_time_request(BIF_P, 0, 0);
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 0);
if (is_non_value(res))
BIF_RET(am_undefined);
BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
- } else if (BIF_ARG_1 == am_total_active_tasks
- || BIF_ARG_1 == am_total_run_queue_lengths) {
- Uint no = erts_run_queues_len(NULL, 0, BIF_ARG_1 == am_total_active_tasks);
+ } else if (BIF_ARG_1 == am_scheduler_wall_time_all) {
+ res = erts_sched_wall_time_request(BIF_P, 0, 0, 1, 1);
+ if (is_non_value(res))
+ BIF_RET(am_undefined);
+ BIF_TRAP1(gather_sched_wall_time_res_trap, BIF_P, res);
+ } else if ((BIF_ARG_1 == am_total_active_tasks)
+ | (BIF_ARG_1 == am_total_run_queue_lengths)
+ | (BIF_ARG_1 == am_total_active_tasks_all)
+ | (BIF_ARG_1 == am_total_run_queue_lengths_all)) {
+ Uint no = erts_run_queues_len(NULL, 0,
+ ((BIF_ARG_1 == am_total_active_tasks)
+ | (BIF_ARG_1 == am_total_active_tasks_all)),
+ ((BIF_ARG_1 == am_total_active_tasks_all)
+ | (BIF_ARG_1 == am_total_run_queue_lengths_all)));
if (IS_USMALL(0, no))
res = make_small(no);
else {
@@ -3398,13 +3688,21 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = uint_to_big(no, hp);
}
BIF_RET(res);
- } else if (BIF_ARG_1 == am_active_tasks
- || BIF_ARG_1 == am_run_queue_lengths) {
+ } else if ((BIF_ARG_1 == am_active_tasks)
+ | (BIF_ARG_1 == am_run_queue_lengths)
+ | (BIF_ARG_1 == am_active_tasks_all)
+ | (BIF_ARG_1 == am_run_queue_lengths_all)) {
Eterm res, *hp, **hpp;
Uint sz, *szp;
- int no_qs = erts_no_run_queues;
+ int incl_dirty_io = ((BIF_ARG_1 == am_active_tasks_all)
+ | (BIF_ARG_1 == am_run_queue_lengths_all));
+ int no_qs = (erts_no_run_queues + ERTS_NUM_DIRTY_CPU_RUNQS +
+ (incl_dirty_io ? ERTS_NUM_DIRTY_IO_RUNQS : 0));
Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
- (void) erts_run_queues_len(qszs, 0, BIF_ARG_1 == am_active_tasks);
+ (void) erts_run_queues_len(qszs, 0,
+ ((BIF_ARG_1 == am_active_tasks)
+ | (BIF_ARG_1 == am_active_tasks_all)),
+ incl_dirty_io);
sz = 0;
szp = &sz;
hpp = NULL;
@@ -3468,24 +3766,32 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_runtime) {
- UWord u1, u2, dummy;
+ ErtsMonotonicTime u1, u2;
Eterm b1, b2;
- elapsed_time_both(&u1,&dummy,&u2,&dummy);
- b1 = erts_make_integer(u1,BIF_P);
- b2 = erts_make_integer(u2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ erts_runtime_elapsed_both(&u1, NULL, &u2, NULL);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, u1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, u2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, u1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, u2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_run_queue) {
- res = erts_run_queues_len(NULL, 1, 0);
+ res = erts_run_queues_len(NULL, 1, 0, 0);
BIF_RET(make_small(res));
} else if (BIF_ARG_1 == am_wall_clock) {
- UWord w1, w2;
+ ErtsMonotonicTime w1, w2;
Eterm b1, b2;
- wall_clock_elapsed_time_both(&w1, &w2);
- b1 = erts_make_integer((Uint) w1,BIF_P);
- b2 = erts_make_integer((Uint) w2,BIF_P);
- hp = HAlloc(BIF_P,3);
+ Uint hsz;
+ erts_wall_clock_elapsed_both(&w1, &w2);
+ hsz = 3; /* 2-tuple */
+ (void) erts_bld_monotonic_time(NULL, &hsz, w1);
+ (void) erts_bld_monotonic_time(NULL, &hsz, w2);
+ hp = HAlloc(BIF_P, hsz);
+ b1 = erts_bld_monotonic_time(&hp, NULL, w1);
+ b2 = erts_bld_monotonic_time(&hp, NULL, w2);
res = TUPLE2(hp, b1, b2);
BIF_RET(res);
} else if (BIF_ARG_1 == am_io) {
@@ -3495,9 +3801,9 @@ BIF_RETTYPE statistics_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
Eterm res, *hp, **hpp;
Uint sz, *szp;
- int no_qs = erts_no_run_queues;
+ int no_qs = erts_no_run_queues + ERTS_NUM_DIRTY_RUNQS;
Uint *qszs = erts_alloc(ERTS_ALC_T_TMP,sizeof(Uint)*no_qs*2);
- (void) erts_run_queues_len(qszs, 0, 0);
+ (void) erts_run_queues_len(qszs, 0, 0, 1);
sz = 0;
szp = &sz;
hpp = NULL;
@@ -3523,7 +3829,12 @@ BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0)
BIF_RET(erts_error_logger_warnings);
}
-static erts_smp_atomic_t available_internal_state;
+static erts_atomic_t available_internal_state;
+
+static int empty_magic_ref_destructor(Binary *bin)
+{
+ return 1;
+}
BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
{
@@ -3531,7 +3842,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
* NOTE: Only supposed to be used for testing, and debugging.
*/
- if (!erts_smp_atomic_read_nob(&available_internal_state)) {
+ if (!erts_atomic_read_nob(&available_internal_state)) {
BIF_ERROR(BIF_P, EXC_UNDEF);
}
@@ -3563,7 +3874,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("DbTable_words", BIF_ARG_1)) {
/* Used by ets_SUITE (stdlib) */
size_t words = (sizeof(DbTable) + sizeof(Uint) - 1)/sizeof(Uint);
- BIF_RET(make_small((Uint) words));
+ Eterm* hp = HAlloc(BIF_P ,3);
+ BIF_RET(TUPLE2(hp, make_small((Uint) words),
+ erts_ets_hash_sizeof_ext_segtab()));
}
else if (ERTS_IS_ATOM_STR("check_io_debug", BIF_ARG_1)) {
/* Used by driver_SUITE (emulator) */
@@ -3572,9 +3885,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
int no_errors;
ErtsCheckIoDebugInfo ciodi = {0};
#ifdef HAVE_ERTS_CHECK_IO_DEBUG
- erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN);
no_errors = erts_check_io_debug(&ciodi);
- erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN);
#else
no_errors = 0;
#endif
@@ -3589,8 +3902,8 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
(Uint) ciodi.no_used_fds),
erts_bld_uint(hpp, szp,
(Uint) ciodi.no_driver_select_structs),
- erts_bld_uint(hpp, szp,
- (Uint) ciodi.no_driver_event_structs));
+ erts_bld_uint(hpp, szp,
+ (Uint) ciodi.no_enif_select_structs));
if (hpp)
break;
hp = HAlloc(BIF_P, sz);
@@ -3605,7 +3918,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
Eterm res = NIL;
Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
- res = CONS(hp, pi_args[i], res);
+ res = CONS(hp, pi_args[i].name, res);
hp += 2;
}
BIF_RET(res);
@@ -3624,9 +3937,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) {
Uint n;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
n = erts_debug_nbalance();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(erts_make_integer(n, BIF_P));
}
else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) {
@@ -3641,11 +3954,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) {
Eterm res;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_block();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE);
- erts_smp_thr_progress_unblock();
+ erts_thr_progress_unblock();
BIF_RET(res);
}
else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
@@ -3674,6 +3987,36 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_sint64_to_big(value, &hp));
}
}
+ else if (ERTS_IS_ATOM_STR("stack_check", BIF_ARG_1)) {
+ UWord size;
+ char c;
+ if (erts_is_above_stack_limit(&c))
+ size = erts_check_stack_recursion_downwards(&c);
+ else
+ size = erts_check_stack_recursion_upwards(&c);
+ if (IS_SSMALL(size))
+ BIF_RET(make_small(size));
+ else {
+ Uint hsz = BIG_UWORD_HEAP_SIZE(size);
+ Eterm *hp = HAlloc(BIF_P, hsz);
+ BIF_RET(uword_to_big(size, hp));
+ }
+ } else if (ERTS_IS_ATOM_STR("scheduler_dump", BIF_ARG_1)) {
+#if defined(ERTS_HAVE_TRY_CATCH) && defined(ERTS_SYS_SUSPEND_SIGNAL)
+ BIF_RET(am_true);
+#else
+ BIF_RET(am_false);
+#endif
+ }
+ else if (ERTS_IS_ATOM_STR("lc_graph", BIF_ARG_1)) {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ Eterm res = erts_lc_dump_graph();
+ BIF_RET(res);
+#else
+ BIF_RET(am_notsup);
+#endif
+ }
+
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
@@ -3686,22 +4029,59 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_process_status(NULL, tp[2]));
}
}
+ else if (ERTS_IS_ATOM_STR("connection_id", tp[1])) {
+ DistEntry *dep;
+ Eterm *hp, res;
+ Uint con_id, hsz = 0;
+ if (!is_atom(tp[2]))
+ BIF_ERROR(BIF_P, BADARG);
+ dep = erts_sysname_to_connected_dist_entry(tp[2]);
+ if (!dep)
+ BIF_ERROR(BIF_P, BADARG);
+ erts_de_rlock(dep);
+ con_id = (Uint) dep->connection_id;
+ erts_de_runlock(dep);
+ (void) erts_bld_uint(NULL, &hsz, con_id);
+ hp = hsz ? HAlloc(BIF_P, hsz) : NULL;
+ res = erts_bld_uint(&hp, NULL, con_id);
+ BIF_RET(res);
+ }
else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
/* Used by erl_link_SUITE (emulator) */
if(is_internal_pid(tp[2])) {
+ erts_aint32_t state;
Eterm res;
Process *p;
+ int sigs_done, local_only;
p = erts_pid2proc(BIF_P,
ERTS_PROC_LOCK_MAIN,
tp[2],
- ERTS_PROC_LOCK_LINK);
+ ERTS_PROC_LOCK_MAIN);
if (!p) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
- res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+
+ local_only = 0;
+ do {
+ int reds = CONTEXT_REDS;
+ sigs_done = erts_proc_sig_handle_incoming(p,
+ &state,
+ &reds,
+ CONTEXT_REDS,
+ local_only);
+ local_only = !0;
+ } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
+
+ if (!(state & ERTS_PSFLG_EXITING))
+ res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
+ else if (BIF_P == p)
+ ERTS_BIF_EXITED(BIF_P);
+ else
+ res = am_undefined;
+ if (BIF_P != p)
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
}
else if(is_internal_port(tp[2])) {
@@ -3712,20 +4092,20 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
ERTS_PORT_SFLGS_INVALID_LOOKUP);
if(!p)
BIF_RET(am_undefined);
- res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
+ res = make_link_list(BIF_P, 1, ERTS_P_LINKS(p), NIL);
erts_port_release(p);
BIF_RET(res);
}
else if(is_node_name_atom(tp[2])) {
DistEntry *dep = erts_find_dist_entry(tp[2]);
if(dep) {
- Eterm subres;
- erts_smp_de_links_lock(dep);
- subres = make_link_list(BIF_P, dep->nlinks, NIL);
- subres = make_link_list(BIF_P, dep->node_links, subres);
- erts_smp_de_links_unlock(dep);
- erts_deref_dist_entry(dep);
- BIF_RET(subres);
+ Eterm res = NIL;
+ if (dep->mld) {
+ erts_mtx_lock(&dep->mld->mtx);
+ res = make_link_list(BIF_P, 0, dep->mld->links, NIL);
+ erts_mtx_unlock(&dep->mld->mtx);
+ }
+ BIF_RET(res);
} else {
BIF_RET(am_undefined);
}
@@ -3734,28 +4114,54 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("monitor_list", tp[1])) {
/* Used by erl_link_SUITE (emulator) */
if(is_internal_pid(tp[2])) {
+ erts_aint32_t state;
Process *p;
Eterm res;
+ int sigs_done, local_only;
p = erts_pid2proc(BIF_P,
ERTS_PROC_LOCK_MAIN,
tp[2],
- ERTS_PROC_LOCK_LINK);
+ ERTS_PROC_LOCK_MAIN);
if (!p) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
+ ERTS_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
- res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p));
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
+
+ local_only = 0;
+ do {
+ int reds = CONTEXT_REDS;
+ sigs_done = erts_proc_sig_handle_incoming(p,
+ &state,
+ &reds,
+ CONTEXT_REDS,
+ local_only);
+ local_only = !0;
+ } while (!sigs_done && !(state & ERTS_PSFLG_EXITING));
+
+ if (!(state & ERTS_PSFLG_EXITING)) {
+ res = make_monitor_list(BIF_P, 1, ERTS_P_MONITORS(p), NIL);
+ res = make_monitor_list(BIF_P, 0, ERTS_P_LT_MONITORS(p), res);
+ }
+ else {
+ if (BIF_P == p)
+ ERTS_BIF_EXITED(BIF_P);
+ else
+ res = am_undefined;
+ }
+ if (BIF_P != p)
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
BIF_RET(res);
} else if(is_node_name_atom(tp[2])) {
DistEntry *dep = erts_find_dist_entry(tp[2]);
if(dep) {
- Eterm ml;
- erts_smp_de_links_lock(dep);
- ml = make_monitor_list(BIF_P, dep->monitors);
- erts_smp_de_links_unlock(dep);
- erts_deref_dist_entry(dep);
+ Eterm ml = NIL;
+ if (dep->mld) {
+ erts_mtx_lock(&dep->mld->mtx);
+ ml = make_monitor_list(BIF_P, 1, dep->mld->orig_name_monitors, NIL);
+ ml = make_monitor_list(BIF_P, 0, dep->mld->monitors, ml);
+ erts_mtx_unlock(&dep->mld->mtx);
+ }
BIF_RET(ml);
} else {
BIF_RET(am_undefined);
@@ -3770,22 +4176,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
else {
Uint cno = dist_entry_channel_no(dep);
res = make_small(cno);
- erts_deref_dist_entry(dep);
}
BIF_RET(res);
}
- else if (ERTS_IS_ATOM_STR("have_pending_exit", tp[1])) {
- Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- tp[2], ERTS_PROC_LOCK_STATUS);
- if (!rp) {
- BIF_RET(am_undefined);
- }
- else {
- Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false;
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- BIF_RET(res);
- }
- }
else if (ERTS_IS_ATOM_STR("binary_info", tp[1])) {
Eterm bin = tp[2];
if (is_binary(bin)) {
@@ -3826,21 +4219,20 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(res);
}
}
- else if (ERTS_IS_ATOM_STR("term_to_binary_no_funs", tp[1])) {
- Uint dflags = (DFLAG_EXTENDED_REFERENCES |
- DFLAG_EXTENDED_PIDS_PORTS |
- DFLAG_BIT_BINARIES);
+ else if (ERTS_IS_ATOM_STR("term_to_binary_tuple_fallbacks", tp[1])) {
+ Uint dflags = (TERM_TO_BINARY_DFLAGS
+ & ~DFLAG_EXPORT_PTR_TAG
+ & ~DFLAG_BIT_BINARIES);
BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags));
}
- else if (ERTS_IS_ATOM_STR("dist_port", tp[1])) {
+ else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) {
Eterm res = am_undefined;
DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]);
if (dep) {
- erts_smp_de_rlock(dep);
- if (is_internal_port(dep->cid))
+ erts_de_rlock(dep);
+ if (is_internal_port(dep->cid) || is_internal_pid(dep->cid))
res = dep->cid;
- erts_smp_de_runlock(dep);
- erts_deref_dist_entry(dep);
+ erts_de_runlock(dep);
}
BIF_RET(res);
}
@@ -3866,7 +4258,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(erts_debug_reader_groups_map(BIF_P, (int) groups));
}
else if (ERTS_IS_ATOM_STR("internal_hash", tp[1])) {
- Uint hash = (Uint) make_internal_hash(tp[2]);
+ Uint hash = (Uint) make_internal_hash(tp[2], 0);
Uint hsz = 0;
Eterm* hp;
erts_bld_uint(NULL, &hsz, hash);
@@ -3880,10 +4272,38 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
while (ix >= atom_table_size()) {
char tmp[20];
erts_snprintf(tmp, sizeof(tmp), "am%x", atom_table_size());
- erts_atom_put((byte *) tmp, strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
+ erts_atom_put((byte *) tmp, sys_strlen(tmp), ERTS_ATOM_ENC_LATIN1, 1);
}
return make_atom(ix);
}
+ else if (ERTS_IS_ATOM_STR("magic_ref", tp[1])) {
+ Binary *bin;
+ UWord bin_addr, refc;
+ Eterm bin_addr_term, refc_term, test_type;
+ Uint sz;
+ Eterm *hp;
+ if (!is_internal_magic_ref(tp[2])) {
+ if (is_internal_ordinary_ref(tp[2])) {
+ ErtsORefThing *rtp;
+ rtp = (ErtsORefThing *) internal_ref_val(tp[2]);
+ if (erts_is_ref_numbers_magic(rtp->num))
+ BIF_RET(am_true);
+ }
+ BIF_RET(am_false);
+ }
+ bin = erts_magic_ref2bin(tp[2]);
+ refc = erts_refc_read(&bin->intern.refc, 1);
+ bin_addr = (UWord) bin;
+ sz = 4;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ erts_bld_uword(NULL, &sz, refc);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ refc_term = erts_bld_uword(&hp, NULL, refc);
+ test_type = (ERTS_MAGIC_BIN_DESTRUCTOR(bin) == empty_magic_ref_destructor
+ ? am_true : am_false);
+ BIF_RET(TUPLE3(hp, bin_addr_term, refc_term, test_type));
+ }
break;
}
@@ -3956,7 +4376,7 @@ BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-static erts_smp_atomic_t hipe_test_reschedule_flag;
+static erts_atomic_t hipe_test_reschedule_flag;
#if defined(VALGRIND) && defined(__GNUC__)
/* Force noinline for valgrind suppression */
@@ -3972,7 +4392,6 @@ static void broken_halt_test(Eterm bif_arg_2)
erts_exit(ERTS_DUMP_EXIT, "%T", bif_arg_2);
}
-
BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
{
/*
@@ -3981,7 +4400,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)
&& (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) {
erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true);
- erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on);
+ erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on);
if (on) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
@@ -3997,7 +4416,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(prev_on ? am_true : am_false);
}
- if (!erts_smp_atomic_read_nob(&available_internal_state)) {
+ if (!erts_atomic_read_nob(&available_internal_state)) {
BIF_ERROR(BIF_P, EXC_UNDEF);
}
@@ -4021,13 +4440,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Sint ms;
if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
if (ms > 0) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
if (block)
- erts_smp_thr_progress_block();
+ erts_thr_progress_block();
while (erts_milli_sleep((long) ms) != 0);
if (block)
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_thr_progress_unblock();
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
}
@@ -4036,9 +4455,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Sint ms;
if (term_to_Sint(BIF_ARG_2, &ms) != 0) {
if (ms > 0) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
while (erts_milli_sleep((long) ms) != 0);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
}
BIF_RET(am_true);
}
@@ -4086,61 +4505,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
erts_set_gc_state(BIF_P, enable);
BIF_RET(res);
}
- else if (ERTS_IS_ATOM_STR("send_fake_exit_signal", BIF_ARG_1)) {
- /* Used by signal_SUITE (emulator) */
-
- /* Testcases depend on the exit being received via
- a pending exit when the receiver is the same as
- the caller. */
- if (is_tuple(BIF_ARG_2)) {
- Eterm* tp = tuple_val(BIF_ARG_2);
- if (arityval(tp[0]) == 3
- && (is_pid(tp[1]) || is_port(tp[1]))
- && is_internal_pid(tp[2])) {
- int xres;
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- tp[2], rp_locks);
- if (!rp) {
- DECL_AM(dead);
- BIF_RET(AM_dead);
- }
-
-#ifdef ERTS_SMP
- if (BIF_P == rp)
- rp_locks |= ERTS_PROC_LOCK_MAIN;
-#endif
- xres = erts_send_exit_signal(NULL, /* NULL in order to
- force a pending exit
- when we send to our
- selves. */
- tp[1],
- rp,
- &rp_locks,
- tp[3],
- NIL,
- NULL,
- 0);
-#ifdef ERTS_SMP
- if (BIF_P == rp)
- rp_locks &= ~ERTS_PROC_LOCK_MAIN;
-#endif
- erts_smp_proc_unlock(rp, rp_locks);
- if (xres > 1) {
- DECL_AM(message);
- BIF_RET(AM_message);
- }
- else if (xres == 0) {
- DECL_AM(unaffected);
- BIF_RET(AM_unaffected);
- }
- else {
- DECL_AM(exit);
- BIF_RET(AM_exit);
- }
- }
- }
- }
else if (ERTS_IS_ATOM_STR("colliding_names", BIF_ARG_1)) {
/* Used by ets_SUITE (stdlib) */
if (is_tuple(BIF_ARG_2)) {
@@ -4187,14 +4551,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) {
/* Used by hipe test suites */
- erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag);
+ erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag);
if (!flag && BIF_ARG_2 != am_false) {
- erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1);
+ erts_atomic_set_nob(&hipe_test_reschedule_flag, 1);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
}
- erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
+ erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag);
BIF_RET(NIL);
}
else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) {
@@ -4205,7 +4569,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
if (rp) {
erts_resume(rp, ERTS_PROC_LOCK_STATUS);
res = am_true;
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
}
BIF_RET(res);
}
@@ -4222,39 +4586,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_false);
else {
Uint32 con_id;
- erts_smp_de_rlock(dep);
+ erts_de_rlock(dep);
con_id = dep->connection_id;
- erts_smp_de_runlock(dep);
+ erts_de_runlock(dep);
erts_kill_dist_connection(dep, con_id);
- erts_deref_dist_entry(dep);
BIF_RET(am_true);
}
}
- else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
-#ifdef ERTS_SMP
- int old_use_opt, use_opt;
- switch (BIF_ARG_2) {
- case am_true:
- use_opt = 1;
- break;
- case am_false:
- use_opt = 0;
- break;
- default:
- BIF_ERROR(BIF_P, BADARG);
- }
-
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- old_use_opt = !erts_disable_proc_not_running_opt;
- erts_disable_proc_not_running_opt = !use_opt;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(old_use_opt ? am_true : am_false);
-#else
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#endif
- }
else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
@@ -4281,9 +4619,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Sint64 msecs;
if (term_to_Sint64(BIF_ARG_2, &msecs)) {
/* Negative value restore original value... */
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_debug_test_node_tab_delayed_delete(msecs);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(am_ok);
}
}
@@ -4298,54 +4636,210 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
BIF_RET(am_ok);
}
+ else if (ERTS_IS_ATOM_STR("make", BIF_ARG_1)) {
+ if (ERTS_IS_ATOM_STR("magic_ref", BIF_ARG_2)) {
+ Binary *bin = erts_create_magic_binary(0, empty_magic_ref_destructor);
+ UWord bin_addr = (UWord) bin;
+ Eterm bin_addr_term, magic_ref, res;
+ Eterm *hp;
+ Uint sz = ERTS_MAGIC_REF_THING_SIZE + 3;
+ erts_bld_uword(NULL, &sz, bin_addr);
+ hp = HAlloc(BIF_P, sz);
+ bin_addr_term = erts_bld_uword(&hp, NULL, bin_addr);
+ magic_ref = erts_mk_magic_ref(&hp, &BIF_P->off_heap, bin);
+ res = TUPLE2(hp, magic_ref, bin_addr_term);
+ BIF_RET(res);
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("binary", BIF_ARG_1)) {
+ Sint64 size;
+ if (term_to_Sint64(BIF_ARG_2, &size)) {
+ Binary* refbin = erts_bin_drv_alloc_fnf(size);
+ if (!refbin)
+ BIF_RET(am_false);
+ sys_memset(refbin->orig_bytes, 0, size);
+ BIF_RET(erts_build_proc_bin(&MSO(BIF_P),
+ HAlloc(BIF_P, PROC_BIN_SIZE),
+ refbin));
+ }
+ }
+ else if (ERTS_IS_ATOM_STR("ets_force_trap", BIF_ARG_1)) {
+#ifdef ETS_DBG_FORCE_TRAP
+ erts_ets_dbg_force_trap = (BIF_ARG_2 == am_true) ? 1 : 0;
+ BIF_RET(am_ok);
+#else
+ BIF_RET(am_notsup);
+#endif
+ }
}
BIF_ERROR(BIF_P, BADARG);
}
+static BIF_RETTYPE
+gather_histograms_helper(Process * c_p, Eterm arg_tuple,
+ int gather(Process *, int, int, int, UWord, Eterm))
+{
+ SWord hist_start, hist_width, sched_id;
+ int msg_count, alloc_num;
+ Eterm *args;
+
+ /* This is an internal BIF, so the error checking is mostly left to erlang
+ * code. */
+
+ ASSERT(is_tuple_arity(arg_tuple, 5));
+ args = tuple_val(arg_tuple);
+
+ for (alloc_num = ERTS_ALC_A_MIN; alloc_num <= ERTS_ALC_A_MAX; alloc_num++) {
+ if(erts_is_atom_str(ERTS_ALC_A2AD(alloc_num), args[1], 0)) {
+ break;
+ }
+ }
+
+ if (alloc_num > ERTS_ALC_A_MAX) {
+ BIF_ERROR(c_p, BADARG);
+ }
+
+ sched_id = signed_val(args[2]);
+ hist_width = signed_val(args[3]);
+ hist_start = signed_val(args[4]);
+
+ if (sched_id < 0 || sched_id > erts_no_schedulers) {
+ BIF_ERROR(c_p, BADARG);
+ }
+
+ msg_count = gather(c_p, alloc_num, sched_id, hist_width, hist_start, args[5]);
+
+ BIF_RET(make_small(msg_count));
+}
+
+BIF_RETTYPE erts_internal_gather_alloc_histograms_1(BIF_ALIST_1)
+{
+ return gather_histograms_helper(BIF_P, BIF_ARG_1,
+ erts_alcu_gather_alloc_histograms);
+}
+
+BIF_RETTYPE erts_internal_gather_carrier_info_1(BIF_ALIST_1)
+{
+ return gather_histograms_helper(BIF_P, BIF_ARG_1,
+ erts_alcu_gather_carrier_info);
+}
+
#ifdef ERTS_ENABLE_LOCK_COUNT
+
+typedef struct {
+ /* info->location_count may increase between size calculation and term
+ * building, so we cap it at the value sampled in lcnt_build_result_vector.
+ *
+ * Shrinking is safe though. */
+ int max_location_count;
+ erts_lcnt_lock_info_t *info;
+} lcnt_sample_t;
+
+typedef struct lcnt_sample_vector_ {
+ lcnt_sample_t *elements;
+ size_t size;
+} lcnt_sample_vector_t;
+
+static lcnt_sample_vector_t lcnt_build_sample_vector(erts_lcnt_lock_info_list_t *list) {
+ erts_lcnt_lock_info_t *iterator;
+ lcnt_sample_vector_t result;
+ size_t allocated_entries;
+
+ allocated_entries = 64;
+ result.size = 0;
+
+ result.elements = erts_alloc(ERTS_ALC_T_LCNT_VECTOR,
+ allocated_entries * sizeof(lcnt_sample_t));
+
+ iterator = NULL;
+ while(erts_lcnt_iterate_list(list, &iterator)) {
+ erts_lcnt_retain_lock_info(iterator);
+
+ result.elements[result.size].max_location_count = iterator->location_count;
+ result.elements[result.size].info = iterator;
+
+ result.size++;
+
+ if(result.size >= allocated_entries) {
+ allocated_entries *= 2;
+
+ result.elements = erts_realloc(ERTS_ALC_T_LCNT_VECTOR, result.elements,
+ allocated_entries * sizeof(lcnt_sample_t));
+ }
+ }
+
+ return result;
+}
+
+static void lcnt_destroy_sample_vector(lcnt_sample_vector_t *vector) {
+ size_t i;
+
+ for(i = 0; i < vector->size; i++) {
+ erts_lcnt_release_lock_info(vector->elements[i].info);
+ }
+
+ erts_free(ERTS_ALC_T_LCNT_VECTOR, vector->elements);
+}
+
+/* The size of an integer is not guaranteed to be constant since we're walking
+ * over live data, and may cross over into bignum territory between size calc
+ * and the actual build. This takes care of that through always assuming the
+ * worst, but needs to be fixed up with HRelease once the final term has been
+ * built. */
+static ERTS_INLINE Eterm bld_unstable_uint64(Uint **hpp, Uint *szp, Uint64 ui) {
+ Eterm res = THE_NON_VALUE;
+
+ if(szp) {
+ *szp += ERTS_UINT64_HEAP_SIZE(~((Uint64) 0));
+ }
+
+ if(hpp) {
+ if (IS_USMALL(0, ui)) {
+ res = make_small(ui);
+ } else {
+ res = erts_uint64_to_big(ui, hpp);
+ }
+ }
+
+ return res;
+}
+
static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_stats_t *stats, Eterm res) {
- Uint tries = 0, colls = 0;
- unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
- unsigned int line = 0;
unsigned int i;
-
+ const char *file;
+
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
-
+
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
- * { .. histogram .. }]
- */
+ * [{{file, line},
+ {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }] */
- tries = (Uint) ethr_atomic_read(&stats->tries);
- colls = (Uint) ethr_atomic_read(&stats->colls);
-
- line = stats->line;
- timer_s = stats->timer.s;
- timer_ns = stats->timer.ns;
- timer_n = stats->timer_n;
-
- af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
- uil = erts_bld_uint( hpp, szp, line);
+ file = stats->file ? stats->file : "undefined";
+
+ af = erts_atom_put((byte *)file, sys_strlen(file), ERTS_ATOM_ENC_LATIN1, 1);
+ uil = erts_bld_uint( hpp, szp, stats->line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
-
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
- uits = erts_bld_uint( hpp, szp, timer_s);
- uitns = erts_bld_uint( hpp, szp, timer_ns);
- uitn = erts_bld_uint( hpp, szp, timer_n);
+ uit = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->attempts));
+ uic = bld_unstable_uint64(hpp, szp, (Uint)ethr_atomic_read(&stats->collisions));
+
+ uits = bld_unstable_uint64(hpp, szp, stats->total_time_waited.s);
+ uitns = bld_unstable_uint64(hpp, szp, stats->total_time_waited.ns);
+ uitn = bld_unstable_uint64(hpp, szp, stats->times_waited);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
- vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ vhist[i] = bld_unstable_uint64(hpp, szp, stats->wait_time_histogram.ns[i]);
}
+
thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
@@ -4354,198 +4848,279 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
return res;
}
-static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock, Eterm res) {
+static Eterm lcnt_pretty_print_lock_id(erts_lcnt_lock_info_t *info) {
+ Eterm id = info->id;
+
+ if((info->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == ERTS_LOCK_FLAGS_TYPE_PROCLOCK) {
+ /* Use registered names as id's for process locks if available. Thread
+ * progress is delayed since we may be running on a dirty scheduler. */
+ ErtsThrPrgrDelayHandle delay_handle;
+ Process *process;
+
+ delay_handle = erts_thr_progress_unmanaged_delay();
+
+ process = erts_proc_lookup(info->id);
+ if (process && process->common.u.alive.reg) {
+ id = process->common.u.alive.reg->name;
+ }
+
+ erts_thr_progress_unmanaged_continue(delay_handle);
+ } else if(info->flags & ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR) {
+ if(is_small(id) && !sys_strcmp(info->name, "alcu_allocator")) {
+ const char *name = (const char*)ERTS_ALC_A2AD(signed_val(id));
+ id = erts_atom_put((byte*)name, sys_strlen(name), ERTS_ATOM_ENC_LATIN1, 1);
+ }
+ }
+
+ return id;
+}
+
+static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, lcnt_sample_t *sample, Eterm res) {
+ erts_lcnt_lock_info_t *info = sample->info;
+
Eterm name, type, id, stats = NIL, t;
- Process *proc = NULL;
- char *ltype;
+ const char *lock_desc;
int i;
+
+ /* term: [{name, id, type, stats()}] */
+
+ ASSERT(info->name);
- /* term:
- * [{name, id, type, stats()}]
- */
-
- ASSERT(lock->name);
-
- ltype = erts_lcnt_lock_type(lock->flag);
-
- ASSERT(ltype);
-
- type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
-
- if (lock->flag & ERTS_LCNT_LT_ALLOC) {
- /* use allocator types names as id's for allocator locks */
- ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
- /* use registered names as id's for process locks if available */
- proc = erts_proc_lookup(lock->id);
- if (proc && proc->common.u.alive.reg) {
- id = proc->common.u.alive.reg->name;
- } else {
- /* otherwise use process id */
- id = lock->id;
- }
+ lock_desc = erts_lock_flags_get_type_name(info->flags);
+
+ type = erts_atom_put((byte*)lock_desc, sys_strlen(lock_desc), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte*)info->name, sys_strlen(info->name), ERTS_ATOM_ENC_LATIN1, 1);
+
+ /* Only attempt to resolve ids when actually emitting the term. This ought
+ * to be safe since all immediates are the same size. */
+ if(hpp != NULL) {
+ id = lcnt_pretty_print_lock_id(info);
} else {
- id = lock->id;
+ id = NIL;
}
- for (i = 0; i < lock->n_stats; i++) {
- stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
+ for(i = 0; i < MIN(info->location_count, sample->max_location_count); i++) {
+ stats = lcnt_build_lock_stats_term(hpp, szp, &(info->location_stats[i]), stats);
}
t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
- res = erts_bld_cons( hpp, szp, t, res);
+ res = erts_bld_cons(hpp, szp, t, res);
return res;
}
-static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *data, Eterm res) {
+static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_time_t *duration,
+ lcnt_sample_vector_t *current_locks,
+ lcnt_sample_vector_t *deleted_locks, Eterm res) {
+ const char *str_duration = "duration";
+ const char *str_locks = "locks";
+
Eterm dts, dtns, tdt, adur, tdur, aloc, lloc = NIL, tloc;
- erts_lcnt_lock_t *lock = NULL;
- char *str_duration = "duration";
- char *str_locks = "locks";
-
- /* term:
- * [{'duration', {seconds, nanoseconds}}, {'locks', locks()}]
- */
-
+ size_t i;
+
+ /* term: [{'duration', {seconds, nanoseconds}}, {'locks', locks()}] */
+
/* duration tuple */
- dts = erts_bld_uint( hpp, szp, data->duration.s);
- dtns = erts_bld_uint( hpp, szp, data->duration.ns);
+ dts = bld_unstable_uint64(hpp, szp, duration->s);
+ dtns = bld_unstable_uint64(hpp, szp, duration->ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
-
- adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
+
+ adur = erts_atom_put((byte *)str_duration, sys_strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
-
- aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
-
- for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+ aloc = erts_atom_put((byte *)str_locks, sys_strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
+
+ for(i = 0; i < current_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &current_locks->elements[i], lloc);
}
-
- for (lock = data->deleted_locks->head; lock != NULL ; lock = lock->next ) {
- lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
+
+ for(i = 0; i < deleted_locks->size; i++) {
+ lloc = lcnt_build_lock_term(hpp, szp, &deleted_locks->elements[i], lloc);
}
-
+
tloc = erts_bld_tuple(hpp, szp, 2, aloc, lloc);
-
- res = erts_bld_cons( hpp, szp, tloc, res);
- res = erts_bld_cons( hpp, szp, tdur, res);
+
+ res = erts_bld_cons(hpp, szp, tloc, res);
+ res = erts_bld_cons(hpp, szp, tdur, res);
return res;
-}
+}
+
+static struct {
+ const char *name;
+ erts_lock_flags_t flag;
+} lcnt_category_map[] = {
+ {"allocator", ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR},
+ {"db", ERTS_LOCK_FLAGS_CATEGORY_DB},
+ {"debug", ERTS_LOCK_FLAGS_CATEGORY_DEBUG},
+ {"distribution", ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION},
+ {"generic", ERTS_LOCK_FLAGS_CATEGORY_GENERIC},
+ {"io", ERTS_LOCK_FLAGS_CATEGORY_IO},
+ {"process", ERTS_LOCK_FLAGS_CATEGORY_PROCESS},
+ {"scheduler", ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER},
+ {NULL, 0}
+ };
+
+static erts_lock_flags_t lcnt_atom_to_lock_category(Eterm atom) {
+ int i = 0;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(erts_is_atom_str(lcnt_category_map[i].name, atom, 0)) {
+ return lcnt_category_map[i].flag;
+ }
+ }
+
+ return 0;
+}
+
+static Eterm lcnt_build_category_list(Eterm **hpp, Uint *szp, erts_lock_flags_t mask) {
+ Eterm res;
+ int i;
+
+ res = NIL;
+
+ for(i = 0; lcnt_category_map[i].name != NULL; i++) {
+ if(mask & lcnt_category_map[i].flag) {
+ Eterm category = erts_atom_put((byte*)lcnt_category_map[i].name,
+ sys_strlen(lcnt_category_map[i].name),
+ ERTS_ATOM_ENC_UTF8, 0);
+
+ res = erts_bld_cons(hpp, szp, category, res);
+ }
+ }
+
+ return res;
+}
+
#endif
-BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1)
+BIF_RETTYPE erts_debug_lcnt_clear_0(BIF_ALIST_0)
{
-#ifdef ERTS_ENABLE_LOCK_COUNT
- Eterm res = NIL;
-#endif
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
+#else
+ erts_lcnt_clear_counters();
+ BIF_RET(am_ok);
+#endif
+}
- if (BIF_ARG_1 == am_enabled) {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- BIF_RET(am_true);
+BIF_RETTYPE erts_debug_lcnt_collect_0(BIF_ALIST_0)
+{
+#ifndef ERTS_ENABLE_LOCK_COUNT
+ BIF_RET(am_error);
#else
- BIF_RET(am_false);
+ lcnt_sample_vector_t current_locks, deleted_locks;
+ erts_lcnt_data_t data;
+
+ Eterm *term_heap_start, *term_heap_end;
+ Uint term_heap_size = 0;
+ Eterm result;
+
+ data = erts_lcnt_get_data();
+
+ current_locks = lcnt_build_sample_vector(data.current_locks);
+ deleted_locks = lcnt_build_sample_vector(data.deleted_locks);
+
+ lcnt_build_result_term(NULL, &term_heap_size, &data.duration,
+ &current_locks, &deleted_locks, NIL);
+
+ term_heap_start = HAlloc(BIF_P, term_heap_size);
+ term_heap_end = term_heap_start;
+
+ result = lcnt_build_result_term(&term_heap_end, NULL,
+ &data.duration, &current_locks, &deleted_locks, NIL);
+
+ HRelease(BIF_P, term_heap_start + term_heap_size, term_heap_end);
+
+ lcnt_destroy_sample_vector(&current_locks);
+ lcnt_destroy_sample_vector(&deleted_locks);
+
+ BIF_RET(result);
#endif
- }
+}
+
+BIF_RETTYPE erts_debug_lcnt_control_1(BIF_ALIST_1)
+{
#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t mask;
+ Eterm *term_heap_block;
+ Uint term_heap_size;
- else if (BIF_ARG_1 == am_info) {
- erts_lcnt_data_t *data;
- Uint hsize = 0;
- Uint *szp;
- Eterm* hp;
+ mask = erts_lcnt_get_category_mask();
+ term_heap_size = 0;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ lcnt_build_category_list(NULL, &term_heap_size, mask);
- erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_SUSPEND);
- data = erts_lcnt_get_data();
+ term_heap_block = HAlloc(BIF_P, term_heap_size);
- /* calculate size */
+ BIF_RET(lcnt_build_category_list(&term_heap_block, NULL, mask));
+ } else if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ if(erts_lcnt_get_preserve_info()) {
+ BIF_RET(am_true);
+ }
- szp = &hsize;
- lcnt_build_result_term(NULL, szp, data, NIL);
+ BIF_RET(am_false);
+ }
+#endif
+ BIF_ERROR(BIF_P, BADARG);
+}
- /* alloc and build */
+BIF_RETTYPE erts_debug_lcnt_control_2(BIF_ALIST_2)
+{
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if(ERTS_IS_ATOM_STR("mask", BIF_ARG_1)) {
+ erts_lock_flags_t category_mask = 0;
+ Eterm categories = BIF_ARG_2;
- hp = HAlloc(BIF_P, hsize);
+ if(!(is_list(categories) || is_nil(categories))) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
- res = lcnt_build_result_term(&hp, NULL, data, res);
-
- erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_SUSPEND);
+ while(is_list(categories)) {
+ Eterm *cell = list_val(categories);
+ erts_lock_flags_t category;
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(res);
- } else if (BIF_ARG_1 == am_clear) {
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
- erts_lcnt_clear_counters();
-
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- BIF_RET(am_ok);
- } else if (is_tuple(BIF_ARG_1)) {
- Eterm* ptr = tuple_val(BIF_ARG_1);
-
- if ((arityval(ptr[0]) == 2) && (ptr[2] == am_false || ptr[2] == am_true)) {
- int lock_opt = 0, enable = (ptr[2] == am_true) ? 1 : 0;
- if (ERTS_IS_ATOM_STR("copy_save", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_COPYSAVE;
- } else if (ERTS_IS_ATOM_STR("process_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PROCLOCK;
- } else if (ERTS_IS_ATOM_STR("port_locks", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_PORTLOCK;
- } else if (ERTS_IS_ATOM_STR("suspend", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_SUSPEND;
- } else if (ERTS_IS_ATOM_STR("location", ptr[1])) {
- lock_opt = ERTS_LCNT_OPT_LOCATION;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
+ category = lcnt_atom_to_lock_category(CAR(cell));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
- if (enable) res = erts_lcnt_set_rt_opt(lock_opt) ? am_true : am_false;
- else res = erts_lcnt_clear_rt_opt(lock_opt) ? am_true : am_false;
-
-#ifdef ERTS_SMP
- if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PORTLOCK) {
- erts_lcnt_enable_io_lock_count(enable);
- } else if (res != ptr[2] && lock_opt == ERTS_LCNT_OPT_PROCLOCK) {
- erts_lcnt_enable_proc_lock_count(enable);
+ if(!category) {
+ Eterm *hp = HAlloc(BIF_P, 4);
+
+ BIF_RET(TUPLE3(hp, am_error, am_badarg, CAR(cell)));
}
-#endif
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
+
+ category_mask |= category;
+ categories = CDR(cell);
}
- }
-#endif
+ erts_lcnt_set_category_mask(category_mask);
+
+ BIF_RET(am_ok);
+ } else if(BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) {
+ int enabled = (BIF_ARG_2 == am_true);
+
+ if(ERTS_IS_ATOM_STR("copy_save", BIF_ARG_1)) {
+ erts_lcnt_set_preserve_info(enabled);
+
+ BIF_RET(am_ok);
+ }
+ }
+#endif
BIF_ERROR(BIF_P, BADARG);
}
static void os_info_init(void)
{
- Eterm type = erts_atom_put((byte *) os_type, strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1);
+ Eterm type = erts_atom_put((byte *) os_type, sys_strlen(os_type), ERTS_ATOM_ENC_LATIN1, 1);
Eterm flav;
int major, minor, build;
char* buf = erts_alloc(ERTS_ALC_T_TMP, 1024); /* More than enough */
Eterm* hp;
os_flavor(buf, 1024);
- flav = erts_atom_put((byte *) buf, strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
+ flav = erts_atom_put((byte *) buf, sys_strlen(buf), ERTS_ATOM_ENC_LATIN1, 1);
erts_free(ERTS_ALC_T_TMP, (void *) buf);
hp = erts_alloc(ERTS_ALC_T_LITERAL, (3+4)*sizeof(Eterm));
os_type_tuple = TUPLE2(hp, type, flav);
@@ -4563,13 +5138,13 @@ static void os_info_init(void)
void
erts_bif_info_init(void)
{
- erts_smp_atomic_init_nob(&available_internal_state, 0);
- erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0);
+ erts_atomic_init_nob(&available_internal_state, 0);
+ erts_atomic_init_nob(&hipe_test_reschedule_flag, 0);
alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1);
alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1);
gather_sched_wall_time_res_trap
- = erts_export_put(am_erlang, am_gather_sched_wall_time_result, 1);
+ = erts_export_put(am_erts_internal, am_gather_sched_wall_time_result, 1);
gather_gc_info_res_trap
= erts_export_put(am_erlang, am_gather_gc_info_result, 1);
gather_io_bytes_trap
@@ -4578,6 +5153,10 @@ erts_bif_info_init(void)
= erts_export_put(am_erts_internal, am_gather_microstate_accounting_result, 2);
gather_system_check_res_trap
= erts_export_put(am_erts_internal, am_gather_system_check_result, 1);
+
+ is_process_alive_trap = erts_export_put(am_erts_internal, am_is_process_alive, 1);
+
+
process_info_init();
os_info_init();
}