aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
authorBjörn Gustavsson <[email protected]>2012-02-29 17:36:20 +0100
committerBjörn Gustavsson <[email protected]>2012-06-25 14:53:33 +0200
commitbf5bf8b93dccca86c4775b21ee0db49eb6a3b133 (patch)
tree6d3288eb9e8afeae2c6d071d7976fc8478189464 /erts/emulator
parentdf41b1ab56ad0186d1233b141861fbaa794d69aa (diff)
downloadotp-bf5bf8b93dccca86c4775b21ee0db49eb6a3b133.tar.gz
otp-bf5bf8b93dccca86c4775b21ee0db49eb6a3b133.tar.bz2
otp-bf5bf8b93dccca86c4775b21ee0db49eb6a3b133.zip
Don't go to single-scheduler mode when managing breakpoints
Calls to erlang:set_trace_pattern/3 will no longer block all other schedulers. We will still go to single-scheduler mode when new code is loaded for a module that is traced, or when loading code when there is a default trace pattern set. That is not impossible to fix, but that requires much closer cooperation between tracing BIFs and the loader BIFs.
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/beam/beam_bif_load.c12
-rw-r--r--erts/emulator/beam/beam_bp.c381
-rw-r--r--erts/emulator/beam/beam_bp.h15
-rw-r--r--erts/emulator/beam/beam_debug.c2
-rw-r--r--erts/emulator/beam/beam_emu.c60
-rw-r--r--erts/emulator/beam/beam_load.h1
-rw-r--r--erts/emulator/beam/erl_bif_trace.c546
-rw-r--r--erts/emulator/beam/erl_process.c55
-rw-r--r--erts/emulator/beam/erl_process.h8
-rw-r--r--erts/emulator/beam/erl_trace.c179
-rw-r--r--erts/emulator/beam/export.c6
-rw-r--r--erts/emulator/beam/export.h11
-rwxr-xr-xerts/emulator/beam/global.h13
-rw-r--r--erts/emulator/beam/ops.tab1
-rwxr-xr-xerts/emulator/utils/make_tables2
15 files changed, 693 insertions, 599 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 9c65769b86..94f8edf165 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -672,11 +672,11 @@ set_default_trace_pattern(Eterm module)
if (trace_pattern_is_on) {
Eterm mfa[1];
mfa[0] = module;
- (void) erts_set_trace_pattern(mfa, 1,
+ (void) erts_set_trace_pattern(0, mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid);
+ meta_tracer_pid, 1);
}
}
@@ -1006,12 +1006,11 @@ delete_code(Module* modp)
if (ep->code[3] == (BeamInstr) em_apply_bif) {
continue;
}
- else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
+ else if (ep->code[3] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp->curr.num_traced_exports > 0);
- --modp->curr.num_traced_exports;
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
+ erts_clear_export_break(modp, ep->code+3);
}
else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
|| !erts_initialized);
@@ -1019,7 +1018,6 @@ delete_code(Module* modp)
ep->addressv[code_ix] = ep->code+3;
ep->code[3] = (BeamInstr) em_call_error_handler;
ep->code[4] = 0;
- ASSERT(ep->match_prog_set == NULL);
}
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index ebd7e1372f..50d18b0347 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -61,8 +61,9 @@
#define ERTS_BPF_DEBUG 0x10
#define ERTS_BPF_TIME_TRACE 0x20
#define ERTS_BPF_TIME_TRACE_ACTIVE 0x40
+#define ERTS_BPF_GLOBAL_TRACE 0x80
-#define ERTS_BPF_ALL 0x7F
+#define ERTS_BPF_ALL 0xFF
extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
@@ -80,7 +81,7 @@ erts_smp_atomic32_t erts_staging_bp_index;
** Helpers
*/
static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
- Binary* ms, Eterm tracer_pid);
+ int local, Binary* ms, Eterm tracer_pid);
static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
enum erts_break_op count_op, Eterm tracer_pid);
static void set_function_break(BeamInstr *pc,
@@ -101,7 +102,7 @@ static void bp_time_diff(bp_data_time_item_t *item,
static void bp_meta_unref(BpMetaPid* bmp);
static void bp_count_unref(BpCount* bcp);
static void bp_time_unref(BpDataTime* bdt);
-static void consolidate_bp_data(Module* modp, BeamInstr* pc);
+static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
static void uninstall_breakpoint(BeamInstr* pc);
/* bp_hash */
@@ -201,30 +202,79 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
}
void
+erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ int i;
+ int num_exps = export_list_size(code_ix);
+ int ne;
+
+ f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction));
+ ne = 0;
+ for (i = 0; i < num_exps; i++) {
+ Export* ep = export_list(i, code_ix);
+ BeamInstr* pc;
+ int j;
+
+ for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
+ /* Empty loop body */
+ }
+ if (j < specified) {
+ continue;
+ }
+ pc = ep->code+3;
+ if (ep->addressv[code_ix] == pc) {
+ if ((*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) em_call_error_handler)) {
+ continue;
+ }
+ ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ continue;
+ }
+
+ f->matching[ne].pc = pc;
+ f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ ne++;
+
+ }
+ f->matched = ne;
+}
+
+void
erts_bp_free_matched_functions(BpFunctions* f)
{
Free(f->matching);
}
void
-erts_consolidate_bp_data(BpFunctions* f)
+erts_consolidate_bp_data(BpFunctions* f, int local)
{
BpFunction* fs = f->matching;
Uint i;
Uint n = f->matched;
- for (i = 0; i < BIF_SIZE; i++) {
- Export *ep = bif_export[i];
- consolidate_bp_data(0, ep->code+3);
- }
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
for (i = 0; i < n; i++) {
- consolidate_bp_data(fs[i].mod, fs[i].pc);
+ consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ }
+}
+
+void
+erts_consolidate_bif_bp_data(void)
+{
+ int i;
+
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+ for (i = 0; i < BIF_SIZE; i++) {
+ Export *ep = bif_export[i];
+ consolidate_bp_data(0, ep->code+3, 0);
}
}
static void
-consolidate_bp_data(Module* modp, BeamInstr* pc)
+consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
{
GenericBp* g = (GenericBp *) pc[-4];
GenericBpData* src;
@@ -244,7 +294,7 @@ consolidate_bp_data(Module* modp, BeamInstr* pc)
*/
flags = dst->flags;
- if (flags & ERTS_BPF_LOCAL_TRACE) {
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
MatchSetUnref(dst->local_ms);
}
if (flags & ERTS_BPF_META_TRACE) {
@@ -264,13 +314,18 @@ consolidate_bp_data(Module* modp, BeamInstr* pc)
flags = dst->flags = src->flags;
if (flags == 0) {
- pc[-4] = 0;
- Free(g);
if (modp) {
- modp->curr.num_breakpoints--;
+ if (local) {
+ modp->curr.num_breakpoints--;
+ } else {
+ modp->curr.num_traced_exports--;
+ }
ASSERT(modp->curr.num_breakpoints >= 0);
+ ASSERT(modp->curr.num_traced_exports >= 0);
ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
}
+ pc[-4] = 0;
+ Free(g);
return;
}
@@ -279,7 +334,7 @@ consolidate_bp_data(Module* modp, BeamInstr* pc)
* for the next time it will be used).
*/
- if (flags & ERTS_BPF_LOCAL_TRACE) {
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
dst->local_ms = src->local_ms;
MatchSetRef(dst->local_ms);
}
@@ -377,21 +432,26 @@ uninstall_breakpoint(BeamInstr* pc)
void
erts_set_trace_break(BpFunctions* f, Binary *match_spec)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true);
}
void
erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
void
+erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+
+ set_function_break(pc, match_spec, flags, 0, NIL);
+}
+
+void
erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
@@ -410,14 +470,12 @@ erts_clear_time_trace_bif(BeamInstr *pc) {
void
erts_set_debug_break(BpFunctions* f) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL);
}
void
erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
count_op, NIL);
}
@@ -425,7 +483,6 @@ erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
void
erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
count_op, NIL);
}
@@ -433,14 +490,25 @@ erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
void
erts_clear_trace_break(BpFunctions* f)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
clear_break(f, ERTS_BPF_LOCAL_TRACE);
}
void
+erts_clear_call_trace_bif(BeamInstr *pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+
+ if (g) {
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ if (g->data[erts_staging_bp_ix()].flags & flags) {
+ clear_function_break(pc, flags);
+ }
+ }
+}
+
+void
erts_clear_mtrace_break(BpFunctions* f)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
clear_break(f, ERTS_BPF_META_TRACE);
}
@@ -460,21 +528,18 @@ erts_clear_debug_break(BpFunctions* f)
void
erts_clear_count_break(BpFunctions* f)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE);
}
void
erts_clear_time_break(BpFunctions* f)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
}
void
erts_clear_all_breaks(BpFunctions* f)
{
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
clear_break(f, ERTS_BPF_ALL);
}
@@ -511,12 +576,24 @@ erts_clear_module_break(Module *modp) {
continue;
}
uninstall_breakpoint(pc);
- consolidate_bp_data(modp, pc);
+ consolidate_bp_data(modp, pc, 1);
ASSERT(pc[-4] == 0);
}
return n;
}
+void
+erts_clear_export_break(Module* modp, BeamInstr* pc)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+
+ clear_function_break(pc, ERTS_BPF_ALL);
+ erts_commit_staged_bp();
+ *pc = (BeamInstr) 0;
+ consolidate_bp_data(modp, pc, 0);
+ ASSERT(pc[-4] == 0);
+}
+
BeamInstr
erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
{
@@ -529,9 +606,12 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
bp = &g->data[ix];
bp_flags = bp->flags;
ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
- if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE) &&
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE) &&
!IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
ERTS_BPF_TIME_TRACE|
ERTS_BPF_TIME_TRACE_ACTIVE);
if (bp_flags == 0) { /* Quick exit */
@@ -540,7 +620,10 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
}
if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
- (void) do_call_trace(c_p, I, reg, bp->local_ms, am_true);
+ ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
+ (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true);
+ } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
+ (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true);
}
if (bp_flags & ERTS_BPF_META_TRACE) {
@@ -548,7 +631,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
Eterm new_pid;
old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
- new_pid = do_call_trace(c_p, I, reg, bp->meta_ms, old_pid);
+ new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid);
if (new_pid != old_pid) {
erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid);
}
@@ -590,9 +673,188 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
}
}
+/*
+ * Entry point called by the trace wrap functions in erl_bif_wrap.c
+ *
+ * The trace wrap functions are themselves called through the export
+ * entries instead of the original BIF functions.
+ */
+Eterm
+erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
+{
+ Eterm result;
+ Eterm (*func)(Process*, Eterm*, BeamInstr*);
+ Export* ep = bif_export[bif_index];
+ Uint32 flags = 0, flags_meta = 0;
+ Eterm meta_tracer_pid = NIL;
+ int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
+ BeamInstr *cp = p->cp;
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint bp_flags = 0;
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ bp = &g->data[erts_active_bp_ix()];
+ bp_flags = bp->flags;
+ }
+
+ /*
+ * Make continuation pointer OK, it is not during direct BIF calls,
+ * but it is correct during apply of bif.
+ */
+ if (!applying) {
+ p->cp = I;
+ }
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
+ flags = erts_call_trace(p, ep->code, bp->local_ms, args,
+ local, &p->tracer_proc);
+ }
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm tpid1, tpid2;
+
+ tpid1 = tpid2 =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
+ 0, &tpid2);
+ meta_tracer_pid = tpid2;
+ if (tpid1 != tpid2) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
+ }
+ }
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ BeamInstr *pc = (BeamInstr *)ep->code+3;
+ erts_trace_time_call(p, pc, bp->time);
+ }
+
+ /* Restore original continuation pointer (if changed). */
+ p->cp = cp;
+
+ func = bif_table[bif_index].f;
+
+ result = func(p, args, I);
+
+ if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
+ Eterm *cpp;
+ /* Maybe advance cp to skip trace stack frames */
+ for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
+ if (*cp == i_return_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 2; /* Skip return_trace parameters */
+ } else if (*cp == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 1; /* Skip return_time_trace parameters */
+ } else if (*cp == i_return_to_trace) {
+ /* A return_to trace message is going to be generated
+ * by normal means, so we do not have to.
+ */
+ cp = NULL;
+ break;
+ } else break;
+ }
+ }
+
+ /* Try to get these in the order
+ * they usually appear in normal code... */
+ if (is_non_value(result)) {
+ Uint reason = p->freason;
+ if (reason != TRAP) {
+ Eterm class;
+ Eterm value = p->fvalue;
+ DeclareTmpHeapNoproc(nocatch,3);
+ UseTmpHeapNoproc(3);
+ /* Expand error value like in handle_error() */
+ if (reason & EXF_ARGLIST) {
+ Eterm *tp;
+ ASSERT(is_tuple(value));
+ tp = tuple_val(value);
+ value = tp[1];
+ }
+ if ((reason & EXF_THROWN) && (p->catches <= 0)) {
+ value = TUPLE2(nocatch, am_nocatch, value);
+ reason = EXC_ERROR;
+ }
+ /* Note: expand_error_value() could theoretically
+ * allocate on the heap, but not for any error
+ * returned by a BIF, and it would do no harm,
+ * just be annoying.
+ */
+ value = expand_error_value(p, reason, value);
+ class = exception_tag[GET_EXC_CLASS(reason)];
+
+ if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &meta_tracer_pid);
+ }
+ if (flags & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &p->tracer_proc);
+ }
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
+ /* can only happen if(local)*/
+ Eterm *ptr = p->stop;
+ ASSERT(is_CP(*ptr));
+ ASSERT(ptr <= STACK_START(p));
+ /* Search the nearest stack frame for a catch */
+ while (++ptr < STACK_START(p)) {
+ if (is_CP(*ptr)) break;
+ if (is_catch(*ptr)) {
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into
+ * calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ }
+ UnUseTmpHeapNoproc(3);
+ if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ p->trace_flags |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ } else {
+ if (flags_meta & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ }
+ /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
+ if (flags & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &p->tracer_proc);
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE) {
+ /* can only happen if(local)*/
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ return result;
+}
+
static Eterm
do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
- Binary* ms, Eterm tracer_pid)
+ int local, Binary* ms, Eterm tracer_pid)
{
Eterm* cpp;
int return_to_trace = 0;
@@ -632,7 +894,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
ASSERT(is_CP(*cpp));
}
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, I-3, ms, reg, 1, &tracer_pid);
+ flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
if (cpp) {
c_p->cp = cp_save;
@@ -821,44 +1083,11 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
}
}
-/*
- * SMP NOTE: Process p may have become exiting on return!
- */
-Uint32
-erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local,
- Eterm *tracer_pid)
-{
- GenericBp* g;
-
- ASSERT(tracer_pid);
- g = (GenericBp *) pc[-4];
- if (g) {
- Eterm tpid1, tpid2;
- Uint32 flags;
- GenericBpData* bp;
- ErtsBpIndex ix = erts_active_bp_ix();
-
- bp = &g->data[ix];
- tpid1 = tpid2 =
- (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
- flags = erts_call_trace(p, pc-3/*mfa*/, bp->meta_ms, args,
- local, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
- }
- return flags;
- }
- *tracer_pid = NIL;
- return 0;
-}
-
-
-
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret)
+erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
{
- GenericBpData* bp = check_break(pc, ERTS_BPF_LOCAL_TRACE);
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ GenericBpData* bp = check_break(pc, flags);
if (bp) {
if (match_spec_ret) {
@@ -1220,6 +1449,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
Uint common;
ErtsBpIndex ix = erts_staging_bp_ix();
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
g = (GenericBp *) pc[-4];
if (g == 0) {
int i;
@@ -1241,7 +1471,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
*/
common = break_flags & bp->flags;
- if (common & ERTS_BPF_LOCAL_TRACE) {
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
MatchSetUnref(bp->local_ms);
} else if (common & ERTS_BPF_META_TRACE) {
MatchSetUnref(bp->meta_ms);
@@ -1276,7 +1506,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
* Initialize the new breakpoint data.
*/
- if (break_flags & ERTS_BPF_LOCAL_TRACE) {
+ if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
MatchSetRef(match_spec);
bp->local_ms = match_spec;
} else if (break_flags & ERTS_BPF_META_TRACE) {
@@ -1335,6 +1565,8 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
Uint common;
ErtsBpIndex ix = erts_staging_bp_ix();
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+
if ((g = (GenericBp *) pc[-4]) == 0) {
return 1;
}
@@ -1343,7 +1575,7 @@ clear_function_break(BeamInstr *pc, Uint break_flags)
ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
common = bp->flags & break_flags;
bp->flags &= ~break_flags;
- if (common & ERTS_BPF_LOCAL_TRACE) {
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
MatchSetUnref(bp->local_ms);
}
if (common & ERTS_BPF_META_TRACE) {
@@ -1426,13 +1658,6 @@ get_time_break(BeamInstr *pc)
return bp ? bp->time : 0;
}
-BpDataTime*
-erts_get_active_time_break(BeamInstr *pc)
-{
- GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE_ACTIVE);
- return bp ? bp->time : 0;
-}
-
static GenericBpData*
check_break(BeamInstr *pc, Uint break_flags)
{
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index e854df2e19..28aaaa462a 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -119,20 +119,27 @@ ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
void erts_bp_free_matched_functions(BpFunctions* f);
void erts_install_breakpoints(BpFunctions* f);
void erts_uninstall_breakpoints(BpFunctions* f);
-void erts_consolidate_bp_data(BpFunctions* f);
+void erts_consolidate_bp_data(BpFunctions* f, int local);
+void erts_consolidate_bif_bp_data(void);
void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
void erts_clear_trace_break(BpFunctions *f);
+
+void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+
void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
Eterm tracer_pid);
void erts_clear_mtrace_break(BpFunctions *f);
void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
Eterm tracer_pid);
void erts_clear_mtrace_bif(BeamInstr *pc);
+
void erts_set_debug_break(BpFunctions *f);
void erts_clear_debug_break(BpFunctions *f);
void erts_set_count_break(BpFunctions *f, enum erts_break_op);
@@ -141,14 +148,13 @@ void erts_clear_count_break(BpFunctions *f);
void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
+void erts_clear_export_break(Module *modp, BeamInstr* pc);
BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid);
-Uint32 erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args,
- int local, Eterm *tracer_pid);
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret);
+int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_rte);
int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
@@ -166,7 +172,6 @@ void erts_clear_time_break(BpFunctions *f);
int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
void erts_clear_time_trace_bif(BeamInstr *pc);
-BpDataTime* erts_get_active_time_break(BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 8d72627b38..a609ed8c71 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -132,7 +132,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
erts_commit_staged_bp();
erts_uninstall_breakpoints(&f);
}
- erts_consolidate_bp_data(&f);
+ erts_consolidate_bp_data(&f, 1);
res = make_small(f.matched);
erts_bp_free_matched_functions(&f);
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index b5282830af..efce070061 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -217,7 +217,6 @@ BeamInstr beam_continue_exit[1];
BeamInstr* em_call_error_handler;
BeamInstr* em_apply_bif;
-BeamInstr* em_call_traced_function;
/* NOTE These should be the only variables containing trace instructions.
@@ -4571,64 +4570,6 @@ void process_main(void)
* Trace and debugging support.
*/
- /*
- * At this point, I points to the code[3] in the export entry for
- * a trace-enabled function.
- *
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&call_traced_function
- * code[4]: Address of function.
- */
- OpCase(call_traced_function): {
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- unsigned offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
- Export* ep = (Export *) (((char *)I)-offset);
- Uint32 flags;
-
- SWAPOUT;
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
- 0, &c_p->tracer_proc);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- SWAPIN;
-
- if (flags & MATCH_SET_RX_TRACE) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 3 < HTOP) {
- /* SWAPOUT, SWAPIN was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((BeamInstr)(ep->code)));
- ASSERT(is_internal_pid(c_p->tracer_proc) ||
- is_internal_port(c_p->tracer_proc));
- E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */
- E[1] = am_true; /* Process tracer */
- E[0] = make_cp(ep->code);
- c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- SET_I((BeamInstr *)Arg(0));
- Dispatch();
- }
-
OpCase(return_trace): {
BeamInstr* code = (BeamInstr *) (UWord) E[0];
@@ -5015,7 +4956,6 @@ void process_main(void)
#endif /* NO_JUMP_TABLE */
em_call_error_handler = OpCode(call_error_handler);
- em_call_traced_function = OpCode(call_traced_function);
em_apply_bif = OpCode(apply_bif);
beam_apply[0] = (BeamInstr) OpCode(i_apply);
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index f1506a8684..1048f258a5 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -49,7 +49,6 @@ extern void** beam_ops;
extern BeamInstr beam_debug_apply[];
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_apply_bif;
-extern BeamInstr* em_call_traced_function;
/*
* The following variables keep a sorted list of address ranges for
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 971e5ed650..e88fb8c9f4 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -42,12 +42,24 @@
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
+
+/*
+ * The following variables are protected by code write permission.
+ */
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
static Eterm erts_default_meta_tracer_pid;
+static struct { /* Protected by code write permission */
+ int current;
+ int install;
+ int local;
+ BpFunctions f; /* Local functions */
+ BpFunctions e; /* Export entries */
+} finish_bp;
+
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
static BIF_RETTYPE
@@ -60,12 +72,11 @@ static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
-static int setup_func_trace(Export* ep, void* match_prog, ErtsCodeIndex);
-static int reset_func_trace(Export* ep, ErtsCodeIndex);
-static void reset_bif_trace(int bif_index);
-static void setup_bif_trace(int bif_index);
-static void set_trace_bif(int bif_index, void* match_prog);
-static void clear_trace_bif(int bif_index);
+static void reset_bif_trace(void);
+static void setup_bif_trace(void);
+static void install_exp_breakpoints(BpFunctions* f);
+static void uninstall_exp_breakpoints(BpFunctions* f);
+static void clean_export_entries(BpFunctions* f);
void
erts_bif_trace_init(void)
@@ -107,12 +118,12 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
int is_global;
Process *meta_tracer_proc = p;
Eterm meta_tracer_pid = p->id;
+ int is_blocking = 0;
if (!erts_try_seize_code_write_permission(p)) {
ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ finish_bp.current = -1;
UseTmpHeap(3,p);
/*
@@ -328,16 +339,24 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
meta_tracer_proc->trace_flags |= F_TRACER;
}
- matches = erts_set_trace_pattern(mfa, specified,
+ matches = erts_set_trace_pattern(p, mfa, specified,
match_prog_set, match_prog_set,
- on, flags, meta_tracer_pid);
+ on, flags, meta_tracer_pid, 0);
}
error:
MatchSetUnref(match_prog_set);
UnUseTmpHeap(3,p);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+#ifdef ERTS_SMP
+ if (finish_bp.current >= 0) {
+ ASSERT(matches >= 0);
+ erts_notify_finish_breakpointing(p);
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(p, make_small(matches));
+ }
+#endif
+
erts_release_code_write_permission();
if (matches >= 0) {
@@ -355,6 +374,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid)
{
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked() ||
+ erts_smp_thr_progress_is_blocking());
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -369,6 +390,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
int erts_is_default_trace_enabled(void)
{
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked() ||
+ erts_smp_thr_progress_is_blocking());
return erts_default_trace_pattern_is_on;
}
@@ -842,6 +865,11 @@ Eterm trace_info_2(BIF_ALIST_2)
Eterm What = BIF_ARG_1;
Eterm Key = BIF_ARG_2;
Eterm res;
+
+ if (!erts_try_seize_code_write_permission(p)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, What, Key);
+ }
+
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
} else if (is_atom(What) || is_pid(What)) {
@@ -849,8 +877,10 @@ Eterm trace_info_2(BIF_ALIST_2)
} else if (is_tuple(What)) {
res = trace_info_func(p, What, Key);
} else {
+ erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
}
+ erts_release_code_write_permission();
BIF_RET(res);
}
@@ -983,59 +1013,49 @@ static int function_is_traced(Process *p,
{
Export e;
Export* ep;
- int i;
- BeamInstr *code;
+ BeamInstr* pc;
/* First look for an export entry */
e.code[0] = mfa[0];
e.code[1] = mfa[1];
e.code[2] = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- if (ep->addressv[erts_active_code_ix()] == ep->code+3 &&
- ep->code[3] != (BeamInstr) em_call_error_handler) {
- if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- *ms = ep->match_prog_set;
+ pc = ep->code+3;
+ if (ep->addressv[erts_active_code_ix()] == pc &&
+ *pc != (BeamInstr) em_call_error_handler) {
+
+ int r = 0;
+
+ ASSERT(*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+
+ if (erts_is_trace_break(pc, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
- for (i = 0; i < BIF_SIZE; ++i) {
- if (bif_export[i] == ep) {
- int r = 0;
-
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- *ms = ep->match_prog_set;
- return FUNC_TRACE_GLOBAL_TRACE;
- } else {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- r |= FUNC_TRACE_LOCAL_TRACE;
- *ms = ep->match_prog_set;
- }
- if (erts_is_mtrace_break(ep->code+3, ms_meta,
- tracer_pid_meta)) {
- r |= FUNC_TRACE_META_TRACE;
- }
- if (erts_is_time_break(p, ep->code+3, call_time)) {
- r |= FUNC_TRACE_TIME_TRACE;
- }
- }
- return r ? r : FUNC_TRACE_UNTRACED;
- }
- }
- erl_exit(1,"Impossible ghost bif encountered in trace_info.");
+
+ if (erts_is_trace_break(pc, ms, 1)) {
+ r |= FUNC_TRACE_LOCAL_TRACE;
+ }
+ if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ r |= FUNC_TRACE_META_TRACE;
}
+ if (erts_is_time_break(p, pc, call_time)) {
+ r |= FUNC_TRACE_TIME_TRACE;
+ }
+ return r ? r : FUNC_TRACE_UNTRACED;
}
}
/* OK, now look for breakpoint tracing */
- if ((code = erts_find_local_func(mfa)) != NULL) {
+ if ((pc = erts_find_local_func(mfa)) != NULL) {
int r =
- (erts_is_trace_break(code, ms)
+ (erts_is_trace_break(pc, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(code, count)
+ | (erts_is_count_break(pc, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, code, call_time)
+ | (erts_is_time_break(p, pc, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1327,40 +1347,46 @@ trace_info_on_load(Process* p, Eterm key)
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
- Eterm meta_tracer_pid)
+ Eterm meta_tracer_pid, int is_blocking)
{
const ErtsCodeIndex code_ix = erts_active_code_ix();
- BpFunctions f;
int matches = 0;
int i;
+ int n;
+ BpFunction* fp;
/*
* First work on normal functions (not real BIFs).
*/
-
- for (i = 0; i < export_list_size(code_ix); i++) {
- Export* ep = export_list(i, code_ix);
- int j;
-
- if (ExportIsBuiltIn(ep)) {
- continue;
- }
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- if (on) {
- if (! flags.breakpoint)
- matches += setup_func_trace(ep, match_prog_set, code_ix);
- else
- reset_func_trace(ep, code_ix);
- } else if (! flags.breakpoint) {
- matches += reset_func_trace(ep, code_ix);
+ erts_bp_match_export(&finish_bp.e, mfa, specified);
+ fp = finish_bp.e.matching;
+ n = finish_bp.e.matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code));
+
+ if (!on || flags.breakpoint) {
+ erts_clear_call_trace_bif(pc, 0);
+ if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ }
+ } else {
+ if (ep->addressv[code_ix] != pc) {
+ fp[i].mod->curr.num_traced_exports++;
+#ifdef DEBUG
+ pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+#endif
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ pc[1] = (BeamInstr) ep->addressv[code_ix];
+ }
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
+ if (ep->addressv[code_ix] != pc) {
+ pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
}
}
}
@@ -1385,26 +1411,15 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/* Empty loop body */
}
if (j == specified) {
+ BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
+
if (! flags.breakpoint) { /* Export entry call trace */
if (on) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- ASSERT(ExportIsBuiltIn(bif_export[i]));
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_GLOBAL;
- setup_bif_trace(i);
+ erts_clear_call_trace_bif(pc, 1);
+ erts_clear_mtrace_bif(pc);
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
} else { /* off */
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
- }
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
+ erts_clear_call_trace_bif(pc, 0);
}
matches++;
} else { /* Breakpoint call trace */
@@ -1412,52 +1427,33 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (on) {
if (flags.local) {
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_clear_call_trace_bif(pc, 0);
+ erts_set_call_trace_bif(pc, match_prog_set, 1);
m = 1;
}
if (flags.meta) {
- erts_set_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3,
- meta_match_prog_set, meta_tracer_pid);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_META;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_set_mtrace_bif(pc, meta_match_prog_set,
+ meta_tracer_pid);
m = 1;
}
if (flags.call_time) {
- erts_set_time_trace_bif(bif_export[i]->code + 3, on);
+ erts_set_time_trace_bif(pc, on);
/* I don't want to remove any other tracers */
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME;
m = 1;
}
- if (erts_bif_trace_flags[i]) {
- setup_bif_trace(i);
- }
} else { /* off */
if (flags.local) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- }
+ erts_clear_call_trace_bif(pc, 1);
m = 1;
}
if (flags.meta) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
+ erts_clear_mtrace_bif(pc);
m = 1;
}
if (flags.call_time) {
- erts_clear_time_trace_bif(bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME;
+ erts_clear_time_trace_bif(pc);
m = 1;
}
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
}
matches += m;
}
@@ -1467,184 +1463,242 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/*
** So, now for breakpoint tracing
*/
- erts_bp_match_functions(&f, mfa, specified);
+ erts_bp_match_functions(&finish_bp.f, mfa, specified);
if (on) {
if (! flags.breakpoint) {
- erts_clear_all_breaks(&f);
- erts_commit_staged_bp();
- erts_uninstall_breakpoints(&f);
+ erts_clear_all_breaks(&finish_bp.f);
} else {
if (flags.local) {
- erts_set_trace_break(&f, match_prog_set);
+ erts_set_trace_break(&finish_bp.f, match_prog_set);
}
if (flags.meta) {
- erts_set_mtrace_break(&f, meta_match_prog_set,
+ erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
meta_tracer_pid);
}
if (flags.call_count) {
- erts_set_count_break(&f, on);
+ erts_set_count_break(&finish_bp.f, on);
}
if (flags.call_time) {
- erts_set_time_break(&f, on);
+ erts_set_time_break(&finish_bp.f, on);
}
- erts_install_breakpoints(&f);
- erts_commit_staged_bp();
}
} else {
if (flags.local) {
- erts_clear_trace_break(&f);
+ erts_clear_trace_break(&finish_bp.f);
}
if (flags.meta) {
- erts_clear_mtrace_break(&f);
+ erts_clear_mtrace_break(&finish_bp.f);
}
if (flags.call_count) {
- erts_clear_count_break(&f);
+ erts_clear_count_break(&finish_bp.f);
}
if (flags.call_time) {
- erts_clear_time_break(&f);
+ erts_clear_time_break(&finish_bp.f);
}
- erts_commit_staged_bp();
- erts_uninstall_breakpoints(&f);
}
- erts_consolidate_bp_data(&f);
+
+ finish_bp.current = 0;
+ finish_bp.install = on;
+ finish_bp.local = flags.breakpoint;
+
+#ifdef ERTS_SMP
+ if (is_blocking) {
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+#endif
+ while (erts_finish_breakpointing()) {
+ /* Empty loop body */
+ }
+#ifdef ERTS_SMP
+ finish_bp.current = -1;
+ }
+#endif
+
if (flags.breakpoint) {
- matches += f.matched;
+ matches += finish_bp.f.matched;
+ } else {
+ matches += finish_bp.e.matched;
}
- erts_bp_free_matched_functions(&f);
return matches;
}
-/*
- * Setup function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
-
-static int
-setup_func_trace(Export* ep, void* match_prog, ErtsCodeIndex code_ix)
+int
+erts_finish_breakpointing(void)
{
- Module* modp;
-
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
- }
- }
-
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+
/*
- * Currently no trace support for native code.
+ * Memory barriers will be issued for all processes *before*
+ * each of the stages below. (Unless the other schedulers
+ * are blocked, in which case memory barriers will be issued
+ * when they are awaken.)
*/
- if (erts_is_native_break(ep->addressv[code_ix])) {
+
+ switch (finish_bp.current++) {
+ case 0:
+ /*
+ * At this point, in all functions that are to be breakpointed,
+ * a pointer to a GenericBp struct has already been added,
+ *
+ * Insert the new breakpoints (if any) into the
+ * code. Different schedulers may see breakpoint instruction
+ * at different times, but it does not matter since the newly
+ * added breakpoints are disabled.
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ erts_install_breakpoints(&finish_bp.f);
+ } else {
+ install_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ setup_bif_trace();
+ return 1;
+ case 1:
+ /*
+ * Switch index for the breakpoint data, activating the staged
+ * data. (Depending on the changes in the breakpoint data,
+ * that could either activate breakpoints or disable
+ * breakpoints.)
+ */
+ erts_commit_staged_bp();
+ return 1;
+ case 2:
+ /*
+ * Remove breakpoints instructions for disabled breakpoints
+ * (if any).
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ } else {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ }
+ } else {
+ if (finish_bp.local) {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ } else {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ reset_bif_trace();
+ return 1;
+ case 3:
+ /*
+ * Now all breakpoints have either been inserted or removed.
+ * For all updated breakpoints, copy the active breakpoint
+ * data to the staged breakpoint data to make them equal
+ * (simplifying for the next time breakpoints are to be
+ * updated). If any breakpoints have been totally disabled,
+ * deallocate the GenericBp structs for them.
+ */
+ erts_consolidate_bif_bp_data();
+ clean_export_entries(&finish_bp.e);
+ erts_consolidate_bp_data(&finish_bp.e, 0);
+ erts_consolidate_bp_data(&finish_bp.f, 1);
+ erts_bp_free_matched_functions(&finish_bp.e);
+ erts_bp_free_matched_functions(&finish_bp.f);
return 0;
+ default:
+ ASSERT(0);
}
+ return 0;
+}
- ep->code[3] = (BeamInstr) em_call_traced_function;
- ep->code[4] = (BeamInstr) ep->addressv[code_ix];
- ep->addressv[code_ix] = ep->code+3;
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
+static void
+install_exp_breakpoints(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
- modp = erts_get_module(ep->code[0], code_ix);
- ASSERT(modp);
- modp->curr.num_traced_exports++;
- return 1;
-}
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
-static void setup_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[bif_index].traced;
+ ep->addressv[code_ix] = pc;
+ }
}
-static void set_trace_bif(int bif_index, void* match_prog) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "set_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
-}
+static void
+uninstall_exp_breakpoints(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
-/*
- * Reset function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
-static int
-reset_func_trace(Export* ep, ErtsCodeIndex code_ix)
-{
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- Module* modp = erts_get_module(ep->code[0], code_ix);
- ASSERT(modp);
- modp->curr.num_traced_exports--;
-
- ep->addressv[code_ix] = (Uint *) ep->code[4];
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
+ if (ep->addressv[code_ix] != pc) {
+ continue;
}
+ ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
+ ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
}
-
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(ep->addressv[code_ix])) {
- return 0;
- }
-
- /*
- * Nothing to do, but the export entry matches.
- */
+}
+
+static void
+clean_export_entries(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
+
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
- return 1;
+ if (ep->addressv[code_ix] == pc) {
+ continue;
+ }
+ if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
+ ep->code[3] = (BeamInstr) 0;
+ ep->code[4] = (BeamInstr) 0;
+ }
+ }
}
-static void reset_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ASSERT(! ep->match_prog_set);
- ep->code[4] = (BeamInstr) bif_table[bif_index].f;
+static void
+setup_bif_trace(void)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].traced;
+ }
+ }
+ }
}
-static void clear_trace_bif(int bif_index) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "clear_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
+static void
+reset_bif_trace(void)
+{
+ int i;
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ BeamInstr* pc = ep->code+3;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g && g->data[active].flags == 0) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].f;
+ }
+ }
+ }
}
/*
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 3d978f4994..e07c9ae2b0 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -530,6 +530,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#endif
#ifdef ERTS_SMP
valid |= ERTS_SSI_AUX_WORK_CODE_IX_ACTIVATION;
+ valid |= ERTS_SSI_AUX_WORK_FINISH_BP;
#endif
#ifdef ERTS_SSI_AUX_WORK_REAP_PORTS
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
@@ -1446,6 +1447,55 @@ handle_code_ix_activation(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
}
#endif /* ERTS_SMP */
+#ifdef ERTS_SMP
+void
+erts_notify_finish_breakpointing(Process* p)
+{
+ ErtsAuxWorkData* awdp = &p->scheduler_data->aux_work_data;
+
+ ASSERT(awdp->bp_ix_activation.stager == NULL);
+ awdp->bp_ix_activation.stager = p;
+ awdp->bp_ix_activation.thr_prgr = erts_thr_progress_later(awdp->esdp);
+ erts_thr_progress_wakeup(awdp->esdp, awdp->bp_ix_activation.thr_prgr);
+ erts_smp_proc_inc_refc(p);
+ set_aux_work_flags_wakeup_relb(p->scheduler_data->ssi,
+ ERTS_SSI_AUX_WORK_FINISH_BP);
+}
+
+static erts_aint32_t
+handle_finish_bp(ErtsAuxWorkData* awdp, erts_aint32_t aux_work)
+{
+ ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+
+ if (!erts_thr_progress_has_reached_this(current,
+ awdp->bp_ix_activation.thr_prgr)) {
+ return aux_work & ~ERTS_SSI_AUX_WORK_FINISH_BP;
+ }
+ if (erts_finish_breakpointing()) { /* Not done */
+ /* Arrange for being called again */
+ awdp->bp_ix_activation.thr_prgr =
+ erts_thr_progress_later(awdp->esdp);
+ erts_thr_progress_wakeup(awdp->esdp, awdp->bp_ix_activation.thr_prgr);
+ } else { /* Done */
+ Process* p;
+
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_FINISH_BP);
+ p = awdp->bp_ix_activation.stager;
+#ifdef DEBUG
+ awdp->bp_ix_activation.stager = NULL;
+#endif
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_dec_refc(p);
+ erts_release_code_write_permission();
+ }
+ return aux_work & ~ERTS_SSI_AUX_WORK_FINISH_BP;
+}
+#endif /* ERTS_SMP */
+
static ERTS_INLINE erts_aint32_t
handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
{
@@ -1817,6 +1867,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
handle_reap_ports);
+#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_FINISH_BP,
+ handle_finish_bp);
+#endif
+
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index e9e5a3365f..93e71681da 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -283,6 +283,7 @@ typedef enum {
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 10)
#define ERTS_SSI_AUX_WORK_CODE_IX_ACTIVATION (((erts_aint32_t) 1) << 11)
#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 12)
+#define ERTS_SSI_AUX_WORK_FINISH_BP (((erts_aint32_t) 1) << 13)
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
@@ -472,6 +473,12 @@ typedef struct {
ErtsThrPrgrVal thr_prgr;
} code_ix_activation;
#endif
+#ifdef ERTS_SMP
+ struct {
+ Process* stager;
+ ErtsThrPrgrVal thr_prgr;
+ } bp_ix_activation;
+#endif
} ErtsAuxWorkData;
struct ErtsSchedulerData_ {
@@ -1188,6 +1195,7 @@ void erts_notify_check_async_ready_queue(void *);
#endif
#ifdef ERTS_SMP
void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later);
+void erts_notify_finish_breakpointing(Process* p);
#endif
void erts_schedule_misc_aux_work(int sched_id,
void (*func)(void *),
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index c64c9d8eee..d04a91f18c 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2134,185 +2134,6 @@ void save_calls(Process *p, Export *e)
}
}
-/*
- * Entry point called by the trace wrap functions in erl_bif_wrap.c
- *
- * The trace wrap functions are themselves called through the export
- * entries instead of the original BIF functions.
- */
-Eterm
-erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
-{
- Eterm result;
- int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
-
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-
- if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) {
- /* Warning! This is an Optimization.
- *
- * If neither meta trace is active nor process trace flags then
- * no tracing will occur. Doing the whole else branch will
- * also do nothing, only slower.
- */
- Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f;
- result = func(p, args, I);
- } else {
- Eterm (*func)(Process*, Eterm*, BeamInstr*);
- Export* ep = bif_export[bif_index];
- Uint32 flags = 0, flags_meta = 0;
- int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
- int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
- int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME);
- Eterm meta_tracer_pid = NIL;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
- BeamInstr *cp = p->cp;
-
- /*
- * Make continuation pointer OK, it is not during direct BIF calls,
- * but it is correct during apply of bif.
- */
- if (!applying) {
- p->cp = I;
- }
- if (global || local) {
- flags = erts_call_trace(p, ep->code, ep->match_prog_set, args,
- local, &p->tracer_proc);
- }
- if (meta) {
- flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
- &meta_tracer_pid);
- }
- if (time) {
- BpDataTime *bdt;
- BeamInstr *pc = (BeamInstr *)ep->code+3;
-
- bdt = erts_get_active_time_break(pc);
- if (bdt) {
- erts_trace_time_call(p, pc, bdt);
- }
- }
- /* Restore original continuation pointer (if changed). */
- p->cp = cp;
-
- func = bif_table[bif_index].f;
-
- result = func(p, args, I);
-
- if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- BeamInstr i_return_trace = beam_return_trace[0];
- BeamInstr i_return_to_trace = beam_return_to_trace[0];
- BeamInstr i_return_time_trace = beam_return_time_trace[0];
- Eterm *cpp;
- /* Maybe advance cp to skip trace stack frames */
- for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
- if (*cp == i_return_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 2; /* Skip return_trace parameters */
- } else if (*cp == i_return_time_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 1; /* Skip return_time_trace parameters */
- } else if (*cp == i_return_to_trace) {
- /* A return_to trace message is going to be generated
- * by normal means, so we do not have to.
- */
- cp = NULL;
- break;
- } else break;
- }
- }
-
- /* Try to get these in the order
- * they usually appear in normal code... */
- if (is_non_value(result)) {
- Uint reason = p->freason;
- if (reason != TRAP) {
- Eterm class;
- Eterm value = p->fvalue;
- DeclareTmpHeapNoproc(nocatch,3);
- UseTmpHeapNoproc(3);
- /* Expand error value like in handle_error() */
- if (reason & EXF_ARGLIST) {
- Eterm *tp;
- ASSERT(is_tuple(value));
- tp = tuple_val(value);
- value = tp[1];
- }
- if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- value = TUPLE2(nocatch, am_nocatch, value);
- reason = EXC_ERROR;
- }
- /* Note: expand_error_value() could theoretically
- * allocate on the heap, but not for any error
- * returned by a BIF, and it would do no harm,
- * just be annoying.
- */
- value = expand_error_value(p, reason, value);
- class = exception_tag[GET_EXC_CLASS(reason)];
-
- if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &meta_tracer_pid);
- }
- if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &p->tracer_proc);
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
- /* can only happen if(local)*/
- Eterm *ptr = p->stop;
- ASSERT(is_CP(*ptr));
- ASSERT(ptr <= STACK_START(p));
- /* Search the nearest stack frame for a catch */
- while (++ptr < STACK_START(p)) {
- if (is_CP(*ptr)) break;
- if (is_catch(*ptr)) {
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into
- * calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- UnUseTmpHeapNoproc(3);
- if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- } else {
- if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer_pid);
- }
- /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
- if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &p->tracer_proc);
- }
- if (flags & MATCH_SET_RETURN_TO_TRACE) {
- /* can only happen if(local)*/
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- return result;
-}
-
/* Sends trace message:
* {trace_ts, Pid, What, Msg, Timestamp}
* or {trace, Pid, What, Msg}
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 229641cb32..6b5121f917 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -137,7 +137,6 @@ export_alloc(struct export_entry* tmpl_e)
obj->code[2] = tmpl->code[2];
obj->code[3] = (BeamInstr) em_call_error_handler;
obj->code[4] = 0;
- obj->match_prog_set = NULL;
for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
obj->addressv[ix] = obj->code+3;
@@ -260,8 +259,9 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
struct export_entry* ee;
ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
- if (ee == NULL || (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
- ee->ep->code[3] != (BeamInstr) em_call_traced_function)) {
+ if (ee == NULL ||
+ (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
+ ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
return NULL;
}
return ee->ep;
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index ec9fcb26f2..ee06e69aff 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -37,7 +37,6 @@
typedef struct export
{
void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
- struct binary* match_prog_set; /* Match program for tracing. */
BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
/*
@@ -46,12 +45,12 @@ typedef struct export
* code[2]: Arity (untagged integer).
* code[3]: This entry is 0 unless the 'address' field points to it.
* Threaded code instruction to load function
- * (em_call_error_handler), execute BIF (em_apply_bif,
- * em_apply_apply), or call a traced function
- * (em_call_traced_function).
- * code[4]: Function pointer to BIF function (for BIFs only)
+ * (em_call_error_handler), execute BIF (em_apply_bif),
+ * or a breakpoint instruction (op_i_generic_breakpoint).
+ * code[4]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
- * on_load function that has not been run yet.
+ * on_load function that has not been run yet, or pointer
+ * to code for function code[3] is a breakpont instruction.
* Otherwise: 0.
*/
BeamInstr code[5];
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 9e387c550f..c9be20322d 100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1682,10 +1682,10 @@ struct trace_pattern_flags {
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Eterm* mfa, int specified,
+int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid);
+ Eterm meta_tracer_pid, int is_blocking);
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
Binary **match_spec,
@@ -1694,6 +1694,7 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
Eterm *meta_tracer_pid);
int erts_is_default_trace_enabled(void);
void erts_bif_trace_init(void);
+int erts_finish_breakpointing(void);
/*
** Call_trace uses this API for the parameter matching functions
@@ -1739,14 +1740,6 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
breakpoint functions */
#define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */
#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
-/*
- * Flag values when tracing bif
- * Future note: flag field is 8 bits
- */
-#define BIF_TRACE_AS_LOCAL (0x1)
-#define BIF_TRACE_AS_GLOBAL (0x2)
-#define BIF_TRACE_AS_META (0x4)
-#define BIF_TRACE_AS_CALL_TIME (0x8)
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 8cb57a9c60..6764e88c81 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -519,7 +519,6 @@ apply_bif
call_nif
call_error_handler
error_action_code
-call_traced_function
return_trace
#
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index 91efb4c023..a841f26d6a 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -167,7 +167,6 @@ typedef struct bif_entry {
extern BifEntry bif_table[];
extern Export* bif_export[];
-extern unsigned char erts_bif_trace_flags[];
#define BIF_SIZE $bif_size
@@ -197,7 +196,6 @@ includes("export.h", "sys.h", "erl_vm.h", "erl_process.h", "bif.h",
"erl_bif_table.h", "erl_atom_table.h");
print "\nExport* bif_export[BIF_SIZE];\n";
-print "unsigned char erts_bif_trace_flags[BIF_SIZE];\n\n";
print "BifEntry bif_table[] = {\n";
for ($i = 0; $i < @bif; $i++) {