aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
authorErlang/OTP <[email protected]>2010-06-04 14:01:56 +0000
committerErlang/OTP <[email protected]>2010-06-04 14:01:56 +0000
commit5e354ca66996ee03d615dc36c284e94ccf176e89 (patch)
treecce028987420702d213a751d03f1cd3683c4bed5 /erts/emulator/beam
parentfa66eb731b85726970cc4c581d32ebdc2ebb965e (diff)
parent63751e0554dfb35a63d572aaca2f1fc37e1c12fe (diff)
downloadotp-5e354ca66996ee03d615dc36c284e94ccf176e89.tar.gz
otp-5e354ca66996ee03d615dc36c284e94ccf176e89.tar.bz2
otp-5e354ca66996ee03d615dc36c284e94ccf176e89.zip
Merge branch 'egil/call-time-trace-patterns' into dev
* egil/call-time-trace-patterns: Fix ops.tab to include i_return_time_trace Let trace_call_time_SUITE scheduling test be more relaxed Add documentation for call time breakpoints Fix bp_hash_put to do hash value after rehash Extend trace_call_time_SUITE with bif-tests Fix assertion in trace_pattern for bifs Teach erlang:system_info/1 to list snifs Add bif tests to call time trace tests Teach call time trace patterns to include bifs Fix BeamInstr code array in export.h Add trace call_time tests for nifs and bifs Teach call count tracing to use atomics Add first phase of trace call time test suite Add a scheduler array for BpData at BeamInstr[-4] Add search keys in breakpoints and a searchfunction Fix BeamInstr for call_time Teach call_time trace to use intruction pointers Add return_time trace and PSD deletions Remove trailing character in beam_bif_load Fix set_function_break to use correct breakpoint ... OTP-8677 egil/call-time-trace-patterns
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c1
-rw-r--r--erts/emulator/beam/beam_bp.c798
-rw-r--r--erts/emulator/beam/beam_bp.h171
-rw-r--r--erts/emulator/beam/beam_emu.c137
-rw-r--r--erts/emulator/beam/erl_bif_info.c40
-rw-r--r--erts/emulator/beam/erl_bif_trace.c111
-rw-r--r--erts/emulator/beam/erl_lock_check.c1
-rw-r--r--erts/emulator/beam/erl_nif.c3
-rw-r--r--erts/emulator/beam/erl_process.c30
-rw-r--r--erts/emulator/beam/erl_process.h12
-rw-r--r--erts/emulator/beam/erl_trace.c26
-rw-r--r--erts/emulator/beam/export.h6
-rw-r--r--erts/emulator/beam/global.h20
-rw-r--r--erts/emulator/beam/ops.tab2
15 files changed, 1138 insertions, 221 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 1138c0c871..28f69b9460 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -123,6 +123,7 @@ atom busy_dist_port
atom busy_port
atom call
atom call_count
+atom call_time
atom caller
atom capture
atom case_clause
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 8462f1c7fd..596ad9a010 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -368,7 +368,6 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
BIF_RET(am_true);
}
-
static void
set_default_trace_pattern(Eterm module)
{
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 6278ff6bad..682f31b83f 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -30,6 +30,7 @@
#include "error.h"
#include "erl_binary.h"
#include "beam_bp.h"
+#include "erl_term.h"
/* *************************************************************************
** Macros
@@ -100,6 +101,11 @@ do { \
(b)->prev = (a); \
} while (0)
+
+#define BREAK_IS_BIF (1)
+#define BREAK_IS_ERL (0)
+
+
/* *************************************************************************
** Local prototypes
*/
@@ -114,7 +120,7 @@ static int set_break(Eterm mfa[3], int specified,
static int set_module_break(Module *modp, Eterm mfa[3], int specified,
Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid);
-static int set_function_break(Module *modp, BeamInstr *pc,
+static int set_function_break(Module *modp, BeamInstr *pc, int bif,
Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid);
@@ -122,11 +128,29 @@ static int clear_break(Eterm mfa[3], int specified,
BeamInstr break_op);
static int clear_module_break(Module *modp, Eterm mfa[3], int specified,
BeamInstr break_op);
-static int clear_function_break(Module *modp, BeamInstr *pc,
+static int clear_function_break(Module *modp, BeamInstr *pc, int bif,
BeamInstr break_op);
static BpData *is_break(BeamInstr *pc, BeamInstr break_op);
-
+static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op);
+
+/* bp_hash */
+#define BP_TIME_ADD(pi0, pi1) \
+ do { \
+ Uint r; \
+ (pi0)->count += (pi1)->count; \
+ (pi0)->s_time += (pi1)->s_time; \
+ (pi0)->us_time += (pi1)->us_time; \
+ r = (pi0)->us_time / 1000000; \
+ (pi0)->s_time += r; \
+ (pi0)->us_time = (pi0)->us_time % 1000000; \
+ } while(0)
+
+static void bp_hash_init(bp_time_hash_t *hash, Uint n);
+static void bp_hash_rehash(bp_time_hash_t *hash, Uint n);
+static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
+static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
+static void bp_hash_delete(bp_time_hash_t *hash);
/* *************************************************************************
@@ -156,25 +180,20 @@ erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
(BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
}
+/* set breakpoint data for on exported bif entry */
+
void
erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
- BpDataTrace *bdt;
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+}
- bdt = (BpDataTrace *) pc[-4];
- if (bdt) {
- MatchSetUnref(bdt->match_spec);
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- } else {
- bdt = Alloc(sizeof(BpDataTrace));
- BpInit((BpData *) bdt, 0);
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- pc[-4] = (BeamInstr) bdt;
- }
+void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) {
+ set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+}
+
+void erts_clear_time_trace_bif(BeamInstr *pc) {
+ clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint));
}
int
@@ -191,7 +210,12 @@ erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
(BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
}
-
+int
+erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return set_break(mfa, specified, NULL,
+ (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+}
int
erts_clear_trace_break(Eterm mfa[3], int specified) {
@@ -209,17 +233,7 @@ erts_clear_mtrace_break(Eterm mfa[3], int specified) {
void
erts_clear_mtrace_bif(BeamInstr *pc) {
- BpDataTrace *bdt;
- ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
-
- bdt = (BpDataTrace *) pc[-4];
- if (bdt) {
- if (bdt->match_spec) {
- MatchSetUnref(bdt->match_spec);
- }
- Free(bdt);
- }
- pc[-4] = (BeamInstr) NULL;
+ clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
}
int
@@ -237,6 +251,13 @@ erts_clear_count_break(Eterm mfa[3], int specified) {
}
int
+erts_clear_time_break(Eterm mfa[3], int specified) {
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+ return clear_break(mfa, specified,
+ (BeamInstr) BeamOp(op_i_time_breakpoint));
+}
+
+int
erts_clear_break(Eterm mfa[3], int specified) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
return clear_break(mfa, specified, 0);
@@ -253,7 +274,7 @@ int
erts_clear_function_break(Module *modp, BeamInstr *pc) {
ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
ASSERT(modp);
- return clear_function_break(modp, pc, 0);
+ return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
}
@@ -265,9 +286,12 @@ BeamInstr
erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid) {
Eterm tpid1, tpid2;
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
+ BpData **bds = (BpData **) (pc)[-4];
+ BpDataTrace *bdt = NULL;
+ ASSERT(bds);
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)];
ASSERT(bdt);
bdt = (BpDataTrace *) bdt->next;
ASSERT(bdt);
@@ -286,7 +310,7 @@ erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
bdt->tracer_pid = tpid2;
ErtsSmpBPUnlock(bdt);
}
- pc[-4] = (BeamInstr) bdt;
+ bds[bp_sched2ix_proc(p)] = (BpData *) bdt;
return bdt->orig_instr;
}
@@ -298,12 +322,15 @@ erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32
erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local,
Eterm *tracer_pid) {
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
+ BpData **bds = (BpData **) (pc)[-4];
+ BpDataTrace *bdt = NULL;
+
ASSERT(tracer_pid);
- if (bdt) {
+ if (bds) {
Eterm tpid1, tpid2;
Uint32 flags;
+ bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)];
ErtsSmpBPLock(bdt);
tpid1 = tpid2 = bdt->tracer_pid;
@@ -364,24 +391,6 @@ erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_r
}
int
-erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt = (BpDataTrace *) pc[-4];
-
- if (bdt) {
- if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
- }
- if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
- }
- return !0;
- }
- return 0;
-}
-
-int
erts_is_native_break(BeamInstr *pc) {
#ifdef HIPE
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
@@ -399,15 +408,69 @@ erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
if (bdc) {
if (count_ret) {
- ErtsSmpBPLock(bdc);
- *count_ret = bdc->count;
- ErtsSmpBPUnlock(bdc);
+ *count_ret = (Sint) erts_smp_atomic_read(&bdc->acount);
}
return !0;
}
return 0;
}
+int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
+ Uint i, ix;
+ bp_time_hash_t hash;
+ Uint size;
+ Eterm *hp, t;
+ bp_data_time_item_t *item = NULL;
+ BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ if (bdt) {
+ if (retval) {
+ /* collect all hashes to one hash */
+ bp_hash_init(&hash, 64);
+ /* foreach threadspecific hash */
+ for (i = 0; i < bdt->n; i++) {
+ bp_data_time_item_t *sitem;
+
+ /* foreach hash bucket not NIL*/
+ for(ix = 0; ix < bdt->hash[i].n; ix++) {
+ item = &(bdt->hash[i].item[ix]);
+ if (item->pid != NIL) {
+ sitem = bp_hash_get(&hash, item);
+ if (sitem) {
+ BP_TIME_ADD(sitem, item);
+ } else {
+ bp_hash_put(&hash, item);
+ }
+ }
+ }
+ }
+ /* *retval should be NIL or term from previous bif in export entry */
+
+ if (hash.used > 0) {
+ size = (5 + 2)*hash.used;
+ hp = HAlloc(p, size);
+
+ for(ix = 0; ix < hash.n; ix++) {
+ item = &(hash.item[ix]);
+ if (item->pid != NIL) {
+ t = TUPLE4(hp, item->pid,
+ make_small(item->count),
+ make_small(item->s_time),
+ make_small(item->us_time));
+ hp += 5;
+ *retval = CONS(hp, t, *retval); hp += 2;
+ }
+ }
+ }
+ bp_hash_delete(&hash);
+ }
+ return !0;
+ }
+
+ return 0;
+}
+
+
BeamInstr *
erts_find_local_func(Eterm mfa[3]) {
Module *modp;
@@ -432,6 +495,351 @@ erts_find_local_func(Eterm mfa[3]) {
return NULL;
}
+/* bp_hash */
+ERTS_INLINE Uint bp_sched2ix() {
+#ifdef ERTS_SMP
+ ErtsSchedulerData *esdp;
+ esdp = erts_get_scheduler_data();
+ return esdp->no - 1;
+#else
+ return 0;
+#endif
+}
+static void bp_hash_init(bp_time_hash_t *hash, Uint n) {
+ Uint size = sizeof(bp_data_time_item_t)*n;
+ Uint i;
+
+ hash->n = n;
+ hash->used = 0;
+
+ hash->item = (bp_data_time_item_t *)Alloc(size);
+ sys_memzero(hash->item, size);
+
+ for(i = 0; i < n; ++i) {
+ hash->item[i].pid = NIL;
+ }
+}
+
+static void bp_hash_rehash(bp_time_hash_t *hash, Uint n) {
+ bp_data_time_item_t *item = NULL;
+ Uint size = sizeof(bp_data_time_item_t)*n;
+ Uint ix;
+ Uint hval;
+
+ item = (bp_data_time_item_t *)Alloc(size);
+ sys_memzero(item, size);
+
+ for( ix = 0; ix < n; ++ix) {
+ item[ix].pid = NIL;
+ }
+
+ /* rehash, old hash -> new hash */
+
+ for( ix = 0; ix < hash->n; ix++) {
+ if (hash->item[ix].pid != NIL) {
+
+ hval = ((hash->item[ix].pid) >> 4) % n; /* new n */
+
+ while (item[hval].pid != NIL) {
+ hval = (hval + 1) % n;
+ }
+ item[hval].pid = hash->item[ix].pid;
+ item[hval].count = hash->item[ix].count;
+ item[hval].s_time = hash->item[ix].s_time;
+ item[hval].us_time = hash->item[ix].us_time;
+ }
+ }
+
+ Free(hash->item);
+ hash->n = n;
+ hash->item = item;
+}
+static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_data_time_item_t *sitem) {
+ Eterm pid = sitem->pid;
+ Uint hval = (pid >> 4) % hash->n;
+ bp_data_time_item_t *item = NULL;
+
+ item = hash->item;
+
+ while (item[hval].pid != pid) {
+ if (item[hval].pid == NIL) return NULL;
+ hval = (hval + 1) % hash->n;
+ }
+
+ return &(item[hval]);
+}
+
+static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t* sitem) {
+ Uint hval;
+ float r = 0.0;
+ bp_data_time_item_t *item;
+
+ /* make sure that the hash is not saturated */
+ /* if saturated, rehash it */
+
+ r = hash->used / (float) hash->n;
+
+ if (r > 0.7f) {
+ bp_hash_rehash(hash, hash->n * 2);
+ }
+ /* Do hval after rehash */
+ hval = (sitem->pid >> 4) % hash->n;
+
+ /* find free slot */
+ item = hash->item;
+
+ while (item[hval].pid != NIL) {
+ hval = (hval + 1) % hash->n;
+ }
+ item = &(hash->item[hval]);
+
+ item->pid = sitem->pid;
+ item->s_time = sitem->s_time;
+ item->us_time = sitem->us_time;
+ item->count = sitem->count;
+ hash->used++;
+
+ return item;
+}
+
+static void bp_hash_delete(bp_time_hash_t *hash) {
+ hash->n = 0;
+ hash->used = 0;
+ Free(hash->item);
+ hash->item = NULL;
+}
+
+static void bp_time_diff(bp_data_time_item_t *item, /* out */
+ process_breakpoint_time_t *pbt, /* in */
+ Uint ms, Uint s, Uint us) {
+ int dms,ds,dus;
+
+ dms = ms - pbt->ms;
+ ds = s - pbt->s;
+ dus = us - pbt->us;
+
+ /* get_sys_now may return zero difftime,
+ * this is ok.
+ */
+
+ ASSERT(dms >= 0 || ds >= 0 || dus >= 0);
+
+ if (dus < 0) {
+ dus += 1000000;
+ ds -= 1;
+ }
+ if (ds < 0) {
+ ds += 1000000;
+ }
+
+ item->s_time = ds;
+ item->us_time = dus;
+}
+
+void erts_schedule_time_break(Process *p, Uint schedule) {
+ Uint ms, s, us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+
+ if (pbt) {
+
+ switch(schedule) {
+ case ERTS_BP_CALL_TIME_SCHEDULE_EXITING :
+ break;
+ case ERTS_BP_CALL_TIME_SCHEDULE_OUT :
+ /* When a process is scheduled _out_,
+ * timestamp it and add its delta to
+ * the previous breakpoint.
+ */
+
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ if (pbdt) {
+ get_sys_now(&ms,&s,&us);
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+ break;
+ case ERTS_BP_CALL_TIME_SCHEDULE_IN :
+ /* When a process is scheduled _in_,
+ * timestamp it and remove the previous
+ * timestamp in the psd.
+ */
+ get_sys_now(&ms,&s,&us);
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ break;
+ default :
+ ASSERT(0);
+ /* will never happen */
+ break;
+ }
+ } /* pbt */
+}
+
+/* call_time breakpoint
+ * Accumulated times are added to the previous bp,
+ * not the current one. The current one is saved
+ * for future reference.
+ * The previous breakpoint is stored in the process it self, the psd.
+ * We do not need to store in a stack frame.
+ * There is no need for locking, each thread has its own
+ * area in each bp to save data.
+ * Since we need to diffrentiate between processes for each bp,
+ * every bp has a hash (per thread) to process-bp statistics.
+ * - egil
+ */
+
+void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) {
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+ ASSERT(p->status == P_RUNNING);
+
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+ get_sys_now(&ms,&s,&us);
+
+ switch(type) {
+ /* get pbt
+ * timestamp = t0
+ * lookup bdt from code
+ * set ts0 to pbt
+ * add call count here?
+ */
+ case ERTS_BP_CALL_TIME_CALL:
+ case ERTS_BP_CALL_TIME_TAIL_CALL:
+
+ if (pbt) {
+ ASSERT(pbt->pc);
+ /* add time to previous code */
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ /* if null then the breakpoint was removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ } else {
+ /* first call of process to instrumented function */
+ pbt = Alloc(sizeof(process_breakpoint_time_t));
+ (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt);
+ }
+ /* add count to this code */
+ sitem.pid = p->id;
+ sitem.count = 1;
+ sitem.s_time = 0;
+ sitem.us_time = 0;
+
+ /* this breakpoint */
+ ASSERT(bdt);
+ h = &(bdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ break;
+
+ case ERTS_BP_CALL_TIME_RETURN:
+ /* get pbt
+ * lookup bdt from code
+ * timestamp = t1
+ * get ts0 from pbt
+ * get item from bdt->hash[bp_hash(p->id)]
+ * ack diff (t1, t0) to item
+ */
+
+ if(pbt) {
+ /* might have been removed due to
+ * trace_pattern(false)
+ */
+ ASSERT(pbt->pc);
+
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+
+ /* beware, the trace_pattern might have been removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ }
+ break;
+ default :
+ ASSERT(0);
+ /* will never happen */
+ break;
+ }
+}
/* *************************************************************************
@@ -489,26 +897,35 @@ static int set_module_break(Module *modp, Eterm mfa[3], int specified,
ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
(specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr+5;
+ BeamInstr *pc = code_ptr+5;
num_processed +=
- set_function_break(modp, pc, match_spec,
+ set_function_break(modp, pc, BREAK_IS_ERL, match_spec,
break_op, count_op, tracer_pid);
}
}
return num_processed;
}
-static int set_function_break(Module *modp, BeamInstr *pc,
+static int set_function_break(Module *modp, BeamInstr *pc, int bif,
Binary *match_spec, BeamInstr break_op,
enum erts_break_op count_op, Eterm tracer_pid) {
- BpData *bd, **r;
+
+ BeamInstr **code_base = NULL;
+ BpData *bd, **r, ***rs;
size_t size;
- BeamInstr **code_base = (BeamInstr **)modp->code;
+ Uint ix = 0;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (modp->code_length/sizeof(BeamInstr *)));
+ if (bif == BREAK_IS_ERL) {
+ code_base = (BeamInstr **)modp->code;
+ ASSERT(code_base);
+ ASSERT(code_base <= (BeamInstr **)pc);
+ ASSERT((BeamInstr **)pc < code_base + (modp->code_length/sizeof(BeamInstr *)));
+ } else {
+ ASSERT(*pc == (BeamInstr) em_apply_bif);
+ ASSERT(modp == NULL);
+ }
+
/*
* Currently no trace support for native code.
*/
@@ -519,6 +936,7 @@ static int set_function_break(Module *modp, BeamInstr *pc,
if ( (bd = is_break(pc, break_op))) {
if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint)
|| break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
+
BpDataTrace *bdt = (BpDataTrace *) bd;
Binary *old_match_spec;
@@ -531,20 +949,44 @@ static int set_function_break(Module *modp, BeamInstr *pc,
ErtsSmpBPUnlock(bdt);
MatchSetUnref(old_match_spec);
} else {
+ BpDataCount *bdc = (BpDataCount *) bd;
+ long count = 0;
+ long res = 0;
+
ASSERT(! match_spec);
ASSERT(is_nil(tracer_pid));
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- BpDataCount *bdc = (BpDataCount *) bd;
- ErtsSmpBPLock(bdc);
+ if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
if (count_op == erts_break_stop) {
- if (bdc->count >= 0) {
- bdc->count = -bdc->count-1; /* Stop call counter */
+ count = erts_smp_atomic_read(&bdc->acount);
+ if (count >= 0) {
+ while(1) {
+ res = erts_smp_atomic_cmpxchg(&bdc->acount, -count - 1, count);
+ if ((res == count) || count < 0) break;
+ count = res;
+ }
}
} else {
- bdc->count = 0; /* Reset call counter */
+ /* Reset call counter */
+ erts_smp_atomic_set(&bdc->acount, 0);
}
- ErtsSmpBPUnlock(bdc);
+
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_is_system_blocked(0));
+
+ if (count_op == erts_break_stop) {
+ bdt->pause = 1;
+ } else {
+ bdt->pause = 0;
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_delete(&(bdt->hash[i]));
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
+ }
+
} else {
ASSERT (! count_op);
}
@@ -558,43 +1000,63 @@ static int set_function_break(Module *modp, BeamInstr *pc,
ASSERT(! match_spec);
ASSERT(is_nil(tracer_pid));
if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_reset
- || count_op == erts_break_stop) {
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
/* Do not insert a new breakpoint */
return 1;
}
size = sizeof(BpDataCount);
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ /* Do not insert a new breakpoint */
+ return 1;
+ }
+ size = sizeof(BpDataTime);
} else {
ASSERT(! count_op);
ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint));
size = sizeof(BpDataDebug);
}
}
- r = (BpData **) (pc-4);
+ rs = (BpData ***) (pc-4);
+ if (! *rs) {
+ size_t ssize = sizeof(BeamInstr) * erts_no_schedulers;
+ *rs = (BpData **) Alloc(ssize);
+ sys_memzero(*rs, ssize);
+ }
+
+ r = &((*rs)[0]);
+
if (! *r) {
ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint));
ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint));
ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint));
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint));
/* First breakpoint; create singleton ring */
bd = Alloc(size);
BpInit(bd, *pc);
- *pc = break_op;
*r = bd;
+ if (bif == BREAK_IS_ERL) {
+ *pc = break_op;
+ }
} else {
ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
*pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) ||
*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_count_breakpoint));
+ *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) ||
+ *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) ||
+ *pc == (BeamInstr) em_apply_bif);
if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
/* Debug bp must be last, so if it is also first;
* it must be singleton. */
- ASSERT(BpSingleton(*r));
+ ASSERT(BpSingleton(*r));
/* Insert new bp first in the ring, i.e second to last. */
bd = Alloc(size);
BpInitAndSpliceNext(bd, *pc, *r);
- *pc = break_op;
- } else if ((*r)->prev->orig_instr
+ if (bif == BREAK_IS_ERL) {
+ *pc = break_op;
+ }
+ } else if ((*r)->prev->orig_instr
== (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
/* Debug bp last in the ring; insert new second to last. */
bd = Alloc(size);
@@ -608,6 +1070,11 @@ static int set_function_break(Module *modp, BeamInstr *pc,
*r = bd;
}
}
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ (*rs)[ix] = (*rs)[0];
+ }
+
+ bd->this_instr = break_op;
/* Init the bp type specific data */
if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
@@ -617,12 +1084,25 @@ static int set_function_break(Module *modp, BeamInstr *pc,
MatchSetRef(match_spec);
bdt->match_spec = match_spec;
bdt->tracer_pid = tracer_pid;
+ } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+
+ bdt->pause = 0;
+ bdt->n = erts_no_schedulers;
+ bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
+
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
} else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
BpDataCount *bdc = (BpDataCount *) bd;
+ erts_smp_atomic_init(&bdc->acount, 0);
+ }
- bdc->count = 0;
+ if (bif == BREAK_IS_ERL) {
+ ++(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
}
- ++(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
return 1;
}
@@ -672,33 +1152,51 @@ static int clear_module_break(Module *m, Eterm mfa[3], int specified,
BeamInstr *pc = code_ptr + 5;
num_processed +=
- clear_function_break(m, pc, break_op);
+ clear_function_break(m, pc, BREAK_IS_ERL, break_op);
}
}
return num_processed;
}
-static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
+static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) {
BpData *bd;
- BeamInstr **code_base = (BeamInstr **)m->code;
-
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (m->code_length/sizeof(BeamInstr *)));
+ Uint ix = 0;
+ BeamInstr **code_base = NULL;
+
+ if (bif == BREAK_IS_ERL) {
+ code_base = (BeamInstr **)m->code;
+ ASSERT(code_base);
+ ASSERT(code_base <= (BeamInstr **)pc);
+ ASSERT((BeamInstr **)pc < code_base + (m->code_length/sizeof(BeamInstr *)));
+ } else {
+ ASSERT(*pc == (BeamInstr) em_apply_bif);
+ ASSERT(m == NULL);
+ }
+
/*
* Currently no trace support for native code.
*/
if (erts_is_native_break(pc)) {
return 0;
}
+
while ( (bd = is_break(pc, break_op))) {
/* Remove all breakpoints of this type.
* There should be only one of each type,
* but break_op may be 0 which matches any type.
*/
BeamInstr op;
- BpData **r = (BpData **) (pc-4);
+ BpData ***rs = (BpData ***) (pc - 4);
+ BpData **r = NULL;
+
+#ifdef DEBUG
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ ASSERT((*rs)[ix] == (*rs)[0]);
+ }
+#endif
+ r = &((*rs)[0]);
+
ASSERT(*r);
/* Find opcode for this breakpoint */
if (break_op) {
@@ -714,8 +1212,11 @@ static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
if (BpSingleton(bd)) {
ASSERT(*r == bd);
/* Only one breakpoint to remove */
- *r = NULL;
- *pc = bd->orig_instr;
+ if (bif == BREAK_IS_ERL) {
+ *pc = bd->orig_instr;
+ }
+ Free(*rs);
+ *rs = NULL;
} else {
BpData *bd_prev = bd->prev;
@@ -727,7 +1228,9 @@ static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
bd_prev->orig_instr = bd->orig_instr;
} else if (bd_prev == *r) {
/* We removed the first breakpoint in the ring */
- *pc = bd->orig_instr;
+ if (bif == BREAK_IS_ERL) {
+ *pc = bd->orig_instr;
+ }
} else {
bd_prev->orig_instr = bd->orig_instr;
}
@@ -736,13 +1239,53 @@ static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
BpDataTrace *bdt = (BpDataTrace *) bd;
-
MatchSetUnref(bdt->match_spec);
}
+ if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
+ BpDataTime *bdt = (BpDataTime *) bd;
+ Uint i = 0;
+ Uint j = 0;
+ Process *h_p = NULL;
+ bp_data_time_item_t *item = NULL;
+ process_breakpoint_time_t *pbt = NULL;
+
+ /* remove all psd associated with the hash
+ * and then delete the hash.
+ * ... sigh ...
+ */
+
+ for( i = 0; i < bdt->n; ++i) {
+ if (bdt->hash[i].used) {
+ for (j = 0; j < bdt->hash[i].n; ++j) {
+ item = &(bdt->hash[i].item[j]);
+ if (item->pid != NIL) {
+ h_p = process_tab[internal_pid_index(item->pid)];
+ if (h_p) {
+ pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL);
+ if (pbt) {
+ Free(pbt);
+ }
+ }
+ }
+ }
+ }
+ bp_hash_delete(&(bdt->hash[i]));
+ }
+ Free(bdt->hash);
+ bdt->hash = NULL;
+ bdt->n = 0;
+ }
Free(bd);
- ASSERT(((BeamInstr) code_base[MI_NUM_BREAKPOINTS]) > 0);
- --(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
- }
+ if (bif == BREAK_IS_ERL) {
+ ASSERT(((BeamInstr) code_base[MI_NUM_BREAKPOINTS]) > 0);
+ --(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
+ }
+ if (*rs) {
+ for (ix = 1; ix < erts_no_schedulers; ++ix) {
+ (*rs)[ix] = (*rs)[0];
+ }
+ }
+ } /* while bd != NULL */
return 1;
}
@@ -755,32 +1298,63 @@ static int clear_function_break(Module *m, BeamInstr *pc, BeamInstr break_op) {
** returned. The program counter must point to the first executable
** (breakpoint) instruction of the function.
*/
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
+
+BpData *erts_get_time_break(Process *p, BeamInstr *pc) {
+ return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+}
+
+static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) {
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
if (! erts_is_native_break(pc)) {
- BpData *bd = (BpData *) pc[-4];
-
- if (break_op == 0) {
- return bd;
- }
- if (*pc == break_op) {
- ASSERT(bd);
- return bd->next;
- }
- if (! bd){
+ BpData **rs = (BpData **) pc[-4];
+ BpData *bd = NULL, *ebd = NULL;
+
+ if (! rs) {
return NULL;
}
+
+ bd = ebd = rs[bp_sched2ix_proc(p)];
+ ASSERT(bd);
+ if (bd->this_instr == break_op) {
+ return bd;
+ }
+
bd = bd->next;
- while (bd != (BpData *) pc[-4]) {
+ while (bd != ebd) {
ASSERT(bd);
- if (bd->orig_instr == break_op) {
- bd = bd->next;
+ if (bd->this_instr == break_op) {
ASSERT(bd);
return bd;
- } else {
- bd = bd->next;
}
+ bd = bd->next;
}
}
return NULL;
}
+
+static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
+ BpData **rs = (BpData **) pc[-4];
+ BpData *bd = NULL, *ebd = NULL;
+ ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+
+ if (! rs) {
+ return NULL;
+ }
+
+ bd = ebd = rs[bp_sched2ix()];
+ ASSERT(bd);
+ if ( (break_op == 0) || (bd->this_instr == break_op)) {
+ return bd;
+ }
+
+ bd = bd->next;
+ while (bd != ebd) {
+ ASSERT(bd);
+ if (bd->this_instr == break_op) {
+ ASSERT(bd);
+ return bd;
+ }
+ bd = bd->next;
+ }
+ return NULL;
+}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 786cbbe9d9..b5d5b3c203 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -27,28 +27,46 @@
-/*
-** Common struct to all bp_data_*
-**
-** Two gotchas:
-**
-** 1) The type of bp_data structure in the ring is deduced from the
-** orig_instr field of the structure _before_ in the ring, except for
-** the first structure in the ring that has its instruction in
-** pc[0] of the code to execute.
-**
-** 2) pc[-4] points to the _last_ structure in the ring before the
-** breakpoints are being executed.
-**
-** So, as an example, when a breakpointed function starts to execute,
-** the first instruction that is a breakpoint instruction at pc[0] finds
-** its data at ((BpData *) pc[-4])->next and has to cast that pointer
-** to the correct bp_data type.
+/* A couple of gotchas:
+ *
+ * The breakpoint structure from BeamInstr,
+ * In beam_emu where the instruction counter pointer, I (or pc),
+ * points to the *current* instruction. At that time, if the instruction
+ * is a breakpoint instruction the pc looks like the following,
+ *
+ * I[-5] | op_i_func_info_IaaI | scheduler specific entries
+ * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN |
+ * I[-3] | Tagged Module | | |
+ * I[-2] | Tagged Function | V V
+ * I[-1] | Arity | BpData -> BpData -> BpData -> BpData
+ * I[0] | The bp instruction | ^ * the bp wheel * |
+ * |------------------------------
+ *
+ * Common struct to all bp_data_*
+ *
+ * 1) The type of bp_data structure in the ring is deduced from the
+ * orig_instr field of the structure _before_ in the ring, except for
+ * the first structure in the ring that has its instruction in
+ * pc[0] of the code to execute.
+ * This is valid as long as you don't search for the function while it is
+ * being executed by something else. Or is in the middle of its rotation for
+ * any other reason.
+ * A key, the bp beam instruction, is included for this reason.
+ *
+ * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the
+ * breakpoints are being executed.
+ *
+ * So, as an example, when a breakpointed function starts to execute,
+ * the first instruction that is a breakpoint instruction at pc[0] finds
+ * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer
+ * to the correct bp_data type.
*/
+
typedef struct bp_data {
struct bp_data *next; /* Doubly linked ring pointers */
struct bp_data *prev; /* -"- */
BeamInstr orig_instr; /* The original instruction to execute */
+ BeamInstr this_instr; /* key */
} BpData;
/*
** All the following bp_data_.. structs must begin the same way
@@ -57,26 +75,67 @@ typedef struct bp_data {
typedef struct bp_data_trace {
struct bp_data *next;
struct bp_data *prev;
- BeamInstr orig_instr;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
Binary *match_spec;
- Eterm tracer_pid;
+ Eterm tracer_pid;
} BpDataTrace;
typedef struct bp_data_debug {
struct bp_data *next;
struct bp_data *prev;
- BeamInstr orig_instr;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
} BpDataDebug;
-typedef struct bp_data_count { /* Call count */
+typedef struct bp_data_count { /* Call count */
struct bp_data *next;
struct bp_data *prev;
BeamInstr orig_instr;
- Sint count;
+ BeamInstr this_instr; /* key */
+ erts_smp_atomic_t acount;
} BpDataCount;
+typedef struct {
+ Eterm pid;
+ Sint count;
+ Uint s_time;
+ Uint us_time;
+} bp_data_time_item_t;
+
+typedef struct {
+ Uint n;
+ Uint used;
+ bp_data_time_item_t *item;
+} bp_time_hash_t;
+
+typedef struct bp_data_time { /* Call time */
+ struct bp_data *next;
+ struct bp_data *prev;
+ BeamInstr orig_instr;
+ BeamInstr this_instr; /* key */
+ Uint pause;
+ Uint n;
+ bp_time_hash_t *hash;
+} BpDataTime;
+
+typedef struct {
+ Uint ms;
+ Uint s;
+ Uint us;
+ Uint *pc;
+} process_breakpoint_time_t; /* used within psd */
+
extern erts_smp_spinlock_t erts_bp_lock;
+#define ERTS_BP_CALL_TIME_SCHEDULE_IN (0)
+#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
+#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
+
+#define ERTS_BP_CALL_TIME_CALL (0)
+#define ERTS_BP_CALL_TIME_RETURN (1)
+#define ERTS_BP_CALL_TIME_TAIL_CALL (2)
+
#ifdef ERTS_SMP
#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock)
#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock)
@@ -85,31 +144,46 @@ extern erts_smp_spinlock_t erts_bp_lock;
#define ErtsSmpBPUnlock(BDC)
#endif
-#define ErtsCountBreak(pc,instr_result) \
-do { \
- BpDataCount *bdc = (BpDataCount *) (pc)[-4]; \
- \
+ERTS_INLINE Uint bp_sched2ix(void);
+
+#ifdef ERTS_SMP
+#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
+#else
+#define bp_sched2ix_proc(p) (0)
+#endif
+
+#define ErtsCountBreak(p, pc,instr_result) \
+do { \
+ BpData **bds = (BpData **) (pc)[-4]; \
+ BpDataCount *bdc = NULL; \
+ Uint ix = bp_sched2ix_proc( (p) ); \
+ long count = 0; \
+ \
ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bdc); \
- bdc = (BpDataCount *) bdc->next; \
- ASSERT(bdc); \
- (pc)[-4] = (BeamInstr) bdc; \
- ErtsSmpBPLock(bdc); \
- if (bdc->count >= 0) bdc->count++; \
- ErtsSmpBPUnlock(bdc); \
- *(instr_result) = bdc->orig_instr; \
+ ASSERT(bds); \
+ bdc = (BpDataCount *) bds[ix]; \
+ bdc = (BpDataCount *) bdc->next; \
+ ASSERT(bdc); \
+ bds[ix] = (BpData *) bdc; \
+ count = erts_smp_atomic_read(&bdc->acount); \
+ if (count >= 0) erts_smp_atomic_inc(&bdc->acount); \
+ *(instr_result) = bdc->orig_instr; \
} while (0)
-#define ErtsBreakSkip(pc,instr_result) \
-do { \
- BpData *bd = (BpData *) (pc)[-4]; \
- \
+#define ErtsBreakSkip(p, pc,instr_result) \
+do { \
+ BpData **bds = (BpData **) (pc)[-4]; \
+ BpData *bd = NULL; \
+ Uint ix = bp_sched2ix_proc( (p) ); \
+ \
ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bd); \
- bd = bd->next; \
- ASSERT(bd); \
- (pc)[-4] = (BeamInstr) bd; \
- *(instr_result) = bd->orig_instr; \
+ ASSERT(bds); \
+ bd = bds[ix]; \
+ ASSERT(bd); \
+ bd = bd->next; \
+ ASSERT(bd); \
+ bds[ix] = bd; \
+ *(instr_result) = bd->orig_instr; \
} while (0)
enum erts_break_op{
@@ -159,6 +233,17 @@ int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_ret);
int erts_is_native_break(BeamInstr *pc);
int erts_is_count_break(BeamInstr *pc, Sint *count_ret);
+int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
+
+void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type);
+void erts_schedule_time_break(Process *p, Uint out);
+int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op);
+int erts_clear_time_break(Eterm mfa[3], int specified);
+
+int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
+void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
+void erts_clear_time_trace_bif(BeamInstr *pc);
+BpData *erts_get_time_break(Process *p, BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index d42e74ccc9..c0680086aa 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -228,9 +228,10 @@ BeamInstr* em_call_traced_function;
** for the refering variable (one of these), and rouge references
** will most likely cause chaos.
*/
-BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
-BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
+BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
+BeamInstr beam_exception_trace[1]; /* UGLY also OpCode(i_return_trace) */
+BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/*
* All Beam instructions in numerical order.
@@ -4403,35 +4404,109 @@ apply_bif_or_nif_epilogue:
OpCase(i_count_breakpoint): {
BeamInstr real_I;
- ErtsCountBreak((BeamInstr *) I, &real_I);
+ ErtsCountBreak(c_p, (BeamInstr *) I, &real_I);
ASSERT(VALID_INSTR(real_I));
Goto(real_I);
}
+ /* need to send mfa instead of bdt pointer
+ * the pointer might be deallocated.
+ */
+
+ OpCase(i_time_breakpoint): {
+ BeamInstr real_I;
+ BpData **bds = (BpData **) (I)[-4];
+ BpDataTime *bdt = NULL;
+ Uint ix = 0;
+#ifdef ERTS_SMP
+ ix = c_p->scheduler_data->no - 1;
+#else
+ ix = 0;
+#endif
+ bdt = (BpDataTime *)bds[ix];
+
+ ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ ASSERT(bdt);
+ bdt = (BpDataTime *) bdt->next;
+ ASSERT(bdt);
+ bds[ix] = (BpData *) bdt;
+ real_I = bdt->orig_instr;
+ ASSERT(VALID_INSTR(real_I));
+
+ if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) {
+ if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) ||
+ (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) ||
+ (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) {
+ /* This _IS_ a tail recursive call */
+ SWAPOUT;
+ erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL);
+ SWAPIN;
+ } else {
+ SWAPOUT;
+ erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL);
+
+ /* r register needs to be copied to the array
+ * for the garbage collector
+ */
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - 2 < HTOP) {
+ reg[0] = r(0);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ r(0) = reg[0];
+ }
+ SWAPIN;
+
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+
+ E -= 2;
+ E[0] = make_cp(I);
+ E[1] = make_cp(c_p->cp); /* original return address */
+ c_p->cp = (BeamInstr *) make_cp(beam_return_time_trace);
+ }
+ }
+
+ Goto(real_I);
+ }
+
+ OpCase(i_return_time_trace): {
+ BeamInstr *pc = (BeamInstr *) (UWord) E[0];
+ SWAPOUT;
+ erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN);
+ SWAPIN;
+ c_p->cp = NULL;
+ SET_I((BeamInstr *) cp_val(E[1]));
+ E += 2;
+ Goto(*I);
+ }
+
OpCase(i_trace_breakpoint):
if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
BeamInstr real_I;
- ErtsBreakSkip((BeamInstr *) I, &real_I);
+ ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I);
Goto(real_I);
}
/* Fall through to next case */
OpCase(i_mtrace_breakpoint): {
- Uint real_I;
+ BeamInstr real_I;
Uint32 flags;
Eterm tracer_pid;
- Uint *cpp;
+ BeamInstr *cpp;
int return_to_trace = 0, need = 0;
flags = 0;
SWAPOUT;
reg[0] = r(0);
if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
- cpp = (Uint*)&E[2];
- } else if (*(c_p->cp)
- == (BeamInstr) OpCode(i_return_to_trace)) {
+ cpp = (BeamInstr*)&E[2];
+ } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
return_to_trace = !0;
- cpp = (Uint*)&E[0];
+ cpp = (BeamInstr*)&E[0];
+ } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
+ return_to_trace = !0;
+ cpp = (BeamInstr*)&E[0];
} else {
cpp = NULL;
}
@@ -4448,6 +4523,8 @@ apply_bif_or_nif_epilogue:
} else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
return_to_trace = !0;
cpp += 1;
+ } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) {
+ cpp += 2;
} else
break;
}
@@ -4957,13 +5034,15 @@ apply_bif_or_nif_epilogue:
em_call_error_handler = OpCode(call_error_handler);
em_call_traced_function = OpCode(call_traced_function);
em_apply_bif = OpCode(apply_bif);
- beam_apply[0] = (BeamInstr) OpCode(i_apply);
- beam_apply[1] = (BeamInstr) OpCode(normal_exit);
- beam_exit[0] = (BeamInstr) OpCode(error_action_code);
- beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit);
- beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace);
- beam_return_trace[0] = (BeamInstr) OpCode(return_trace);
- beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */
+
+ beam_apply[0] = (BeamInstr) OpCode(i_apply);
+ beam_apply[1] = (BeamInstr) OpCode(normal_exit);
+ beam_exit[0] = (BeamInstr) OpCode(error_action_code);
+ beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit);
+ beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace);
+ beam_return_trace[0] = (BeamInstr) OpCode(return_trace);
+ beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */
+ beam_return_time_trace[0] = (BeamInstr) OpCode(i_return_time_trace);
/*
* Enter all BIFs into the export table.
@@ -4975,6 +5054,8 @@ apply_bif_or_nif_epilogue:
bif_export[i] = ep;
ep->code[3] = (BeamInstr) OpCode(apply_bif);
ep->code[4] = (BeamInstr) bif_table[i].f;
+ /* XXX: set func info for bifs */
+ ((BeamInstr*)ep->code + 3)[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
}
return;
@@ -5167,14 +5248,18 @@ next_catch(Process* c_p, Eterm *reg) {
int active_catches = c_p->catches > 0;
int have_return_to_trace = 0;
Eterm *ptr, *prev, *return_to_trace_ptr = NULL;
- BeamInstr i_return_trace = beam_return_trace[0];
- BeamInstr i_return_to_trace = beam_return_to_trace[0];
+
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
+
ptr = prev = c_p->stop;
ASSERT(is_CP(*ptr));
ASSERT(ptr <= STACK_START(c_p));
if (ptr == STACK_START(c_p)) return NULL;
if ((is_not_CP(*ptr) || (*cp_val(*ptr) != i_return_trace &&
- *cp_val(*ptr) != i_return_to_trace))
+ *cp_val(*ptr) != i_return_to_trace &&
+ *cp_val(*ptr) != i_return_time_trace ))
&& c_p->cp) {
/* Can not follow cp here - code may be unloaded */
BeamInstr *cpp = c_p->cp;
@@ -5186,6 +5271,9 @@ next_catch(Process* c_p, Eterm *reg) {
} else if (cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ } else if (cpp == beam_return_time_trace) {
+ /* Skip return_trace parameters */
+ ptr += 1;
} else if (cpp == beam_return_to_trace) {
have_return_to_trace = !0; /* Record next cp */
}
@@ -5215,6 +5303,13 @@ next_catch(Process* c_p, Eterm *reg) {
}
have_return_to_trace = !0; /* Record next cp */
return_to_trace_ptr = NULL;
+ } else if (*cp_val(*prev) == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (++ptr, ptr < STACK_START(c_p) && is_not_CP(*ptr)) {
+ if (is_catch(*ptr) && active_catches) goto found_catch;
+ }
+ /* Skip return_trace parameters */
+ ptr += 1;
} else {
if (have_return_to_trace) {
/* Record this cp as possible return_to trace cp */
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 70d0e91a2c..48cda52612 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1892,6 +1892,37 @@ c_compiler_used(Eterm **hpp, Uint *szp)
}
+static int is_snif_term(Eterm module_atom) {
+ int i;
+ Atom *a = atom_tab(atom_val(module_atom));
+ char *aname = (char *) a->name;
+
+ /* if a->name has a '.' then the bif (snif) is bogus i.e a package */
+ for (i = 0; i < a->len; i++) {
+ if (aname[i] == '.')
+ return 0;
+ }
+
+ return 1;
+}
+
+static Eterm build_snif_term(Eterm **hpp, Uint *szp, int ix, Eterm res) {
+ Eterm tup;
+ tup = erts_bld_tuple(hpp, szp, 3, bif_table[ix].module, bif_table[ix].name, make_small(bif_table[ix].arity));
+ res = erts_bld_cons( hpp, szp, tup, res);
+ return res;
+}
+
+static Eterm build_snifs_term(Eterm **hpp, Uint *szp, Eterm res) {
+ int i;
+ for (i = 0; i < BIF_SIZE; i++) {
+ if (is_snif_term(bif_table[i].module)) {
+ res = build_snif_term(hpp, szp, i, res);
+ }
+ }
+ return res;
+}
+
BIF_RETTYPE system_info_1(BIF_ALIST_1)
{
Eterm res;
@@ -1940,6 +1971,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(db_get_trace_control_word_0(BIF_P));
} else if (ERTS_IS_ATOM_STR("ets_realloc_moves", BIF_ARG_1)) {
BIF_RET((erts_ets_realloc_always_moves) ? am_true : am_false);
+ } else if (ERTS_IS_ATOM_STR("snifs", BIF_ARG_1)) {
+ Uint size = 0;
+ Uint *szp;
+
+ szp = &size;
+ build_snifs_term(NULL, szp, NIL);
+ hp = HAlloc(BIF_P, size);
+ res = build_snifs_term(&hp, NULL, NIL);
+ BIF_RET(res);
} else if (BIF_ARG_1 == am_sequential_tracer) {
val = erts_get_system_seq_tracer();
ASSERT(is_internal_pid(val) || is_internal_port(val) || val==am_false)
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 7fe4410e0d..443cac9033 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -40,8 +40,7 @@
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
-static erts_smp_mtx_t trace_pattern_mutex;
-const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0};
+const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
@@ -65,7 +64,6 @@ static void clear_trace_bif(int bif_index);
void
erts_bif_trace_init(void)
{
- erts_smp_mtx_init(&trace_pattern_mutex, "trace_pattern");
erts_default_trace_pattern_is_on = 0;
erts_default_match_spec = NULL;
erts_default_meta_match_spec = NULL;
@@ -186,6 +184,14 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
flags.breakpoint = 1;
flags.call_count = 1;
break;
+ case am_call_time:
+ if (is_global) {
+ goto error;
+ }
+ flags.breakpoint = 1;
+ flags.call_time = 1;
+ break;
+
default:
goto error;
}
@@ -195,8 +201,8 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
goto error;
}
- if (match_prog_set && !flags.local && !flags.meta && flags.call_count) {
- /* A match prog is not allowed with just call_count */
+ if (match_prog_set && !flags.local && !flags.meta && (flags.call_count || flags.call_time)) {
+ /* A match prog is not allowed with just call_count or call_time*/
goto error;
}
@@ -235,6 +241,8 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
|= flags.meta;
erts_default_trace_pattern_flags.call_count
|= (on == 1) ? flags.call_count : 0;
+ erts_default_trace_pattern_flags.call_time
+ |= (on == 1) ? flags.call_time : 0;
} else {
erts_default_trace_pattern_flags.local
&= ~flags.local;
@@ -242,10 +250,13 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
&= ~flags.meta;
erts_default_trace_pattern_flags.call_count
&= ~flags.call_count;
+ erts_default_trace_pattern_flags.call_time
+ &= ~flags.call_time;
if (! (erts_default_trace_pattern_flags.breakpoint =
erts_default_trace_pattern_flags.local |
erts_default_trace_pattern_flags.meta |
- erts_default_trace_pattern_flags.call_count)) {
+ erts_default_trace_pattern_flags.call_count |
+ erts_default_trace_pattern_flags.call_time)) {
erts_default_trace_pattern_is_on = !!on; /* i.e off */
}
}
@@ -267,8 +278,9 @@ trace_pattern_3(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
if (on) {
if (on != 1) {
flags.call_count = 0;
+ flags.call_time = 0;
}
- flags.breakpoint = flags.local | flags.meta | flags.call_count;
+ flags.breakpoint = flags.local | flags.meta | flags.call_count | flags.call_time;
erts_default_trace_pattern_flags = flags; /* Struct copy */
erts_default_trace_pattern_is_on = !!flags.breakpoint;
}
@@ -336,7 +348,6 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid)
{
- erts_smp_mtx_lock(&trace_pattern_mutex);
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -347,12 +358,10 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
*trace_pattern_flags = erts_default_trace_pattern_flags;
if (meta_tracer_pid)
*meta_tracer_pid = erts_default_meta_tracer_pid;
- erts_smp_mtx_unlock(&trace_pattern_mutex);
}
-
Uint
erts_trace_flag2bit(Eterm flag)
{
@@ -380,7 +389,7 @@ erts_trace_flag2bit(Eterm flag)
default: return 0;
}
}
-
+
/* Scan the argument list and sort out the trace flags.
**
** Returns !0 on success, 0 on failure.
@@ -931,6 +940,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
#define FUNC_TRACE_LOCAL_TRACE (1<<2)
#define FUNC_TRACE_META_TRACE (1<<3)
#define FUNC_TRACE_COUNT_TRACE (1<<4)
+#define FUNC_TRACE_TIME_TRACE (1<<5)
/*
* Returns either FUNC_TRACE_NOEXIST, FUNC_TRACE_UNTRACED,
* FUNC_TRACE_GLOBAL_TRACE, or,
@@ -945,11 +955,13 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
*
* If the return value contains FUNC_TRACE_COUNT_TRACE, *count is set.
*/
-static int function_is_traced(Eterm mfa[3],
- Binary **ms, /* out */
- Binary **ms_meta, /* out */
+static int function_is_traced(Process *p,
+ Eterm mfa[3],
+ Binary **ms, /* out */
+ Binary **ms_meta, /* out */
Eterm *tracer_pid_meta, /* out */
- Sint *count) /* out */
+ Sint *count, /* out */
+ Eterm *call_time) /* out */
{
Export e;
Export* ep;
@@ -980,10 +992,13 @@ static int function_is_traced(Eterm mfa[3],
r |= FUNC_TRACE_LOCAL_TRACE;
*ms = ep->match_prog_set;
}
- if (erts_is_mtrace_bif(ep->code+3, ms_meta,
+ if (erts_is_mtrace_break(ep->code+3, ms_meta,
tracer_pid_meta)) {
r |= FUNC_TRACE_META_TRACE;
}
+ if (erts_is_time_break(p, ep->code+3, call_time)) {
+ r |= FUNC_TRACE_TIME_TRACE;
+ }
}
return r ? r : FUNC_TRACE_UNTRACED;
}
@@ -1001,7 +1016,9 @@ static int function_is_traced(Eterm mfa[3],
| (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
| (erts_is_count_break(code, count)
- ? FUNC_TRACE_COUNT_TRACE : 0);
+ ? FUNC_TRACE_COUNT_TRACE : 0)
+ | (erts_is_time_break(p, code, call_time)
+ ? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
}
@@ -1020,6 +1037,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm match_spec = am_false;
Eterm retval = am_false;
Eterm meta = am_false;
+ Eterm call_time = NIL;
int r;
@@ -1039,7 +1057,22 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
mfa[1] = tp[2];
mfa[2] = signed_val(tp[3]);
- r = function_is_traced(mfa, &ms, &ms_meta, &meta, &count);
+#ifdef ERTS_SMP
+ if ( (key == am_call_time) || (key == am_all)) {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
+ erts_smp_block_system(0);
+ }
+#endif
+
+ r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
+
+#ifdef ERTS_SMP
+ if ( (key == am_call_time) || (key == am_all)) {
+ erts_smp_release_system();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+ }
+#endif
+
switch (r) {
case FUNC_TRACE_NOEXIST:
UnUseTmpHeap(3,p);
@@ -1092,8 +1125,13 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_make_integer(count, p);
}
break;
+ case am_call_time:
+ if (r & FUNC_TRACE_TIME_TRACE) {
+ retval = call_time;
+ }
+ break;
case am_all: {
- Eterm match_spec_meta = am_false, c = am_false, t;
+ Eterm match_spec_meta = am_false, c = am_false, t, ct = am_false;
if (ms) {
match_spec = MatchSetGetSource(ms);
@@ -1111,10 +1149,15 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_make_integer(-count-1, p) :
erts_make_integer(count, p);
}
- hp = HAlloc(p, (3+2)*5);
+ if (r & FUNC_TRACE_TIME_TRACE) {
+ ct = call_time;
+ }
+ hp = HAlloc(p, (3+2)*6);
retval = NIL;
t = TUPLE2(hp, am_call_count, c); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
+ t = TUPLE2(hp, am_call_time, ct); hp += 3;
+ retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta_match_spec, match_spec_meta); hp += 3;
retval = CONS(hp, t, retval); hp += 2;
t = TUPLE2(hp, am_meta, meta); hp += 3;
@@ -1210,6 +1253,13 @@ trace_info_on_load(Process* p, Eterm key)
} else {
return TUPLE2(hp, key, am_false);
}
+ case am_call_time:
+ hp = HAlloc(p, 3);
+ if (erts_default_trace_pattern_flags.call_time) {
+ return TUPLE2(hp, key, am_true);
+ } else {
+ return TUPLE2(hp, key, am_false);
+ }
case am_all:
{
Eterm match_spec = am_false, meta_match_spec = am_false, r = NIL, t;
@@ -1284,6 +1334,7 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
/* Empty loop body */
}
+
if (j == specified) {
if (on) {
if (! flags.breakpoint)
@@ -1356,6 +1407,12 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
m = 1;
}
+ if (flags.call_time) {
+ erts_set_time_trace_bif(bif_export[i]->code + 3, on);
+ /* I don't want to remove any other tracers */
+ erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME;
+ m = 1;
+ }
if (erts_bif_trace_flags[i]) {
setup_bif_trace(i);
}
@@ -1375,6 +1432,11 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
}
m = 1;
}
+ if (flags.call_time) {
+ erts_clear_time_trace_bif(bif_export[i]->code + 3);
+ erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME;
+ m = 1;
+ }
if (! erts_bif_trace_flags[i]) {
reset_bif_trace(i);
}
@@ -1392,6 +1454,7 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
erts_clear_trace_break(mfa, specified);
erts_clear_mtrace_break(mfa, specified);
erts_clear_count_break(mfa, specified);
+ erts_clear_time_break(mfa, specified);
} else {
int m = 0;
if (flags.local) {
@@ -1405,6 +1468,9 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (flags.call_count) {
m = erts_set_count_break(mfa, specified, on);
}
+ if (flags.call_time) {
+ m = erts_set_time_break(mfa, specified, on);
+ }
/* All assignments to 'm' above should give the same value,
* so just use the last */
matches += m;
@@ -1420,6 +1486,9 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (flags.call_count) {
m = erts_clear_count_break(mfa, specified);
}
+ if (flags.call_time) {
+ m = erts_clear_time_break(mfa, specified);
+ }
/* All assignments to 'm' above should give the same value,
* so just use the last */
matches += m;
@@ -1536,7 +1605,7 @@ static void reset_bif_trace(int bif_index) {
ASSERT(ExportIsBuiltIn(ep));
ASSERT(ep->code[4]);
ASSERT(! ep->match_prog_set);
- ASSERT(! erts_is_mtrace_bif((BeamInstr *)ep->code+3, NULL, NULL));
+ ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL));
ep->code[4] = (BeamInstr) bif_table[bif_index].f;
}
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 6ff5c1b9da..cee470ae37 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -105,7 +105,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "node_table", NULL },
{ "dist_table", NULL },
{ "sys_tracers", NULL },
- { "trace_pattern", NULL },
{ "module_tab", NULL },
{ "export_tab", NULL },
{ "fun_tab", NULL },
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 75de00d4c1..3d63fa1caf 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1546,7 +1546,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- BpData* bp = (BpData*) code_ptr[1];
+ BpData** bps = (BpData**) code_ptr[1];
+ BpData* bp = (BpData*) bps[bp_sched2ix()];
bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 055733ac49..f4c889889d 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -38,6 +38,7 @@
#include "erl_instrument.h"
#include "erl_threads.h"
#include "erl_binary.h"
+#include "beam_bp.h"
#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
@@ -392,7 +393,12 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks
= ERTS_PSD_DIST_ENTRY_GET_LOCKS;
erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
- = ERTS_PSD_DIST_ENTRY_GET_LOCKS;
+ = ERTS_PSD_DIST_ENTRY_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].get_locks
+ = ERTS_PSD_CALL_TIME_BP_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_CALL_TIME_BP].set_locks
+ = ERTS_PSD_CALL_TIME_BP_SET_LOCKS;
/* Check that we have locks for all entries */
for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
@@ -5872,6 +5878,9 @@ Process *schedule(Process *p, int calls)
}
if (IS_TRACED(p)) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS) && p->status != P_FREE) {
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
+ }
switch (p->status) {
case P_EXITING:
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
@@ -6308,7 +6317,11 @@ Process *schedule(Process *p, int calls)
trace_virtual_sched(p, am_in);
break;
}
+ if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
+ }
}
+
if (p->status != P_EXITING)
p->status = P_RUNNING;
@@ -8051,8 +8064,13 @@ erts_do_exit_process(Process* p, Eterm reason)
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
#endif
- if (IS_TRACED_FL(p,F_TRACE_PROCS))
- trace_proc(p, p, am_exit, reason);
+ if (IS_TRACED(p)) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS))
+ erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
+
+ if (IS_TRACED_FL(p,F_TRACE_PROCS))
+ trace_proc(p, p, am_exit, reason);
+ }
erts_trace_check_exiting(p->id);
@@ -8101,6 +8119,8 @@ continue_exit_process(Process *p
Eterm reason = p->fvalue;
DistEntry *dep;
struct saved_calls *scb;
+ process_breakpoint_time_t *pbt;
+
#ifdef DEBUG
int yield_allowed = 1;
#endif
@@ -8240,6 +8260,7 @@ continue_exit_process(Process *p
? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL)
: NULL);
scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
+ pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
processes_busy--;
@@ -8282,6 +8303,9 @@ continue_exit_process(Process *p
if (scb)
erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+ if (pbt)
+ erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+
delete_process(p);
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index e2cb523cf5..8f9f7f004e 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -397,8 +397,9 @@ extern ErtsSchedulerData *erts_scheduler_data;
#define ERTS_PSD_SAVED_CALLS_BUF 1
#define ERTS_PSD_SCHED_ID 2
#define ERTS_PSD_DIST_ENTRY 3
+#define ERTS_PSD_CALL_TIME_BP 4
-#define ERTS_PSD_SIZE 4
+#define ERTS_PSD_SIZE 5
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -419,6 +420,9 @@ typedef struct {
#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_CALL_TIME_BP_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_CALL_TIME_BP_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
typedef struct {
ErtsProcLocks get_locks;
ErtsProcLocks set_locks;
@@ -1208,6 +1212,12 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#define ERTS_PROC_SET_SAVED_CALLS_BUF(P, L, SCB) \
((struct saved_calls *) erts_psd_set((P), (L), ERTS_PSD_SAVED_CALLS_BUF, (void *) (SCB)))
+#define ERTS_PROC_GET_CALL_TIME(P) \
+ ((process_breakpoint_time_t *) erts_psd_get((P), ERTS_PSD_CALL_TIME_BP))
+#define ERTS_PROC_SET_CALL_TIME(P, L, PBT) \
+ ((process_breakpoint_time_t *) erts_psd_set((P), (L), ERTS_PSD_CALL_TIME_BP, (void *) (PBT)))
+
+
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p,
ErtsProcLocks plocks,
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 381a182e39..3043bb1e8c 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -43,8 +43,9 @@
#undef DEBUG_PRINTOUTS
#endif
-extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/* Pseudo export entries. Never filled in with data, only used to
yield unique pointers of the correct type. */
@@ -2116,6 +2117,7 @@ erts_bif_trace(int bif_index, Process* p,
Uint32 flags = 0, flags_meta = 0;
int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
+ int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME);
Eterm meta_tracer_pid = NIL;
int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
* is actually in the
@@ -2139,6 +2141,17 @@ erts_bif_trace(int bif_index, Process* p,
flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
&meta_tracer_pid);
}
+ if (time) {
+ BpDataTime *bdt = NULL;
+ BeamInstr *pc = (BeamInstr *)ep->code+3;
+
+ bdt = (BpDataTime *) erts_get_time_break(p, pc);
+ ASSERT(bdt);
+
+ if (!bdt->pause) {
+ erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL);
+ }
+ }
/* Restore original continuation pointer (if changed). */
p->cp = cp;
@@ -2147,8 +2160,9 @@ erts_bif_trace(int bif_index, Process* p,
result = func(p, arg1, arg2, arg3, I);
if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- Uint i_return_trace = beam_return_trace[0];
- Uint i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
Eterm *cpp;
/* Maybe advance cp to skip trace stack frames */
for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
@@ -2156,6 +2170,10 @@ erts_bif_trace(int bif_index, Process* p,
/* Skip stack frame variables */
while (is_not_CP(*cpp)) cpp++;
cpp += 2; /* Skip return_trace parameters */
+ } else if (*cp == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 1; /* Skip return_time_trace parameters */
} else if (*cp == i_return_to_trace) {
/* A return_to trace message is going to be generated
* by normal means, so we do not have to.
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index 87c1d483f4..c604fdf7c3 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -37,7 +37,7 @@ typedef struct export
void* address; /* Pointer to code for function. */
struct binary* match_prog_set; /* Match program for tracing. */
- Eterm fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
+ BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
/*
* code[0]: Tagged atom for module.
* code[1]: Tagged atom for function.
@@ -52,11 +52,7 @@ typedef struct export
* on_load function that has not been run yet.
* Otherwise: 0.
*/
-#if HALFWORD_HEAP
BeamInstr code[5];
-#else
- Eterm code[5];
-#endif
} Export;
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 19dd3d6f97..b4a7a22082 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1741,8 +1741,10 @@ struct trace_pattern_flags {
unsigned int local : 1; /* Local call trace breakpoint */
unsigned int meta : 1; /* Metadata trace breakpoint */
unsigned int call_count : 1; /* Fast call count breakpoint */
+ unsigned int call_time : 1; /* Fast call time breakpoint */
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
+extern int erts_call_time_breakpoint_tracing;
int erts_set_trace_pattern(Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
@@ -1786,18 +1788,20 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
void (*)(ErlOffHeap *, void *),
void *);
-#define MATCH_SET_RETURN_TRACE 0x1 /* return trace requested */
-#define MATCH_SET_RETURN_TO_TRACE 0x2 /* Misleading name, it is not actually
- set by the match program, but by the
- breakpoint functions */
-#define MATCH_SET_EXCEPTION_TRACE 0x4 /* exception trace requested */
+#define MATCH_SET_RETURN_TRACE (0x1) /* return trace requested */
+#define MATCH_SET_RETURN_TO_TRACE (0x2) /* Misleading name, it is not actually
+ set by the match program, but by the
+ breakpoint functions */
+#define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */
#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
/*
* Flag values when tracing bif
+ * Future note: flag field is 8 bits
*/
-#define BIF_TRACE_AS_LOCAL 0x1
-#define BIF_TRACE_AS_GLOBAL 0x2
-#define BIF_TRACE_AS_META 0x4
+#define BIF_TRACE_AS_LOCAL (0x1)
+#define BIF_TRACE_AS_GLOBAL (0x2)
+#define BIF_TRACE_AS_META (0x4)
+#define BIF_TRACE_AS_CALL_TIME (0x8)
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 42c84989c6..a2439d5582 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -84,6 +84,8 @@ i_trace_breakpoint
i_mtrace_breakpoint
i_debug_breakpoint
i_count_breakpoint
+i_time_breakpoint
+i_return_time_trace
i_return_to_trace
i_yield
i_global_cons