aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjörn Gustavsson <[email protected]>2012-06-28 11:33:10 +0200
committerBjörn Gustavsson <[email protected]>2012-06-28 11:33:10 +0200
commit65c23210b49d348954a6f7f6c70d02f79676ffeb (patch)
treeb580ad9099457f495f1ffaadac94e9190f87eb4d
parent2af66d1761c54e0bc2811a2ad2950f62d9ceccdc (diff)
parent00d769bf3d35df706dbbb9155052183db3631475 (diff)
downloadotp-65c23210b49d348954a6f7f6c70d02f79676ffeb.tar.gz
otp-65c23210b49d348954a6f7f6c70d02f79676ffeb.tar.bz2
otp-65c23210b49d348954a6f7f6c70d02f79676ffeb.zip
Merge branch 'bjorn/erts/breakpoints/OTP-10122'
* bjorn/erts/breakpoints/OTP-10122: Strengthen trace test cases Don't go to single-scheduler mode when managing breakpoints Refactor the code for managing breakpoints Change the data structures for breakpoints Add trace_local_SUITE:concurrency/1 Make the lock check in erts_is_code_ix_locked() stronger
-rw-r--r--erts/emulator/beam/beam_bif_load.c12
-rw-r--r--erts/emulator/beam/beam_bp.c1997
-rw-r--r--erts/emulator/beam/beam_bp.h233
-rw-r--r--erts/emulator/beam/beam_debug.c13
-rw-r--r--erts/emulator/beam/beam_emu.c242
-rw-r--r--erts/emulator/beam/beam_load.h1
-rw-r--r--erts/emulator/beam/code_ix.c16
-rw-r--r--erts/emulator/beam/erl_bif_trace.c559
-rw-r--r--erts/emulator/beam/erl_nif.c7
-rw-r--r--erts/emulator/beam/erl_process.c55
-rw-r--r--erts/emulator/beam/erl_process.h8
-rw-r--r--erts/emulator/beam/erl_trace.c181
-rw-r--r--erts/emulator/beam/export.c6
-rw-r--r--erts/emulator/beam/export.h11
-rwxr-xr-xerts/emulator/beam/global.h13
-rw-r--r--erts/emulator/beam/ops.tab6
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl13
-rw-r--r--erts/emulator/test/trace_local_SUITE.erl53
-rwxr-xr-xerts/emulator/utils/make_tables2
19 files changed, 1729 insertions, 1699 deletions
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 9c65769b86..94f8edf165 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -672,11 +672,11 @@ set_default_trace_pattern(Eterm module)
if (trace_pattern_is_on) {
Eterm mfa[1];
mfa[0] = module;
- (void) erts_set_trace_pattern(mfa, 1,
+ (void) erts_set_trace_pattern(0, mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid);
+ meta_tracer_pid, 1);
}
}
@@ -1006,12 +1006,11 @@ delete_code(Module* modp)
if (ep->code[3] == (BeamInstr) em_apply_bif) {
continue;
}
- else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
+ else if (ep->code[3] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp->curr.num_traced_exports > 0);
- --modp->curr.num_traced_exports;
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
+ erts_clear_export_break(modp, ep->code+3);
}
else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
|| !erts_initialized);
@@ -1019,7 +1018,6 @@ delete_code(Module* modp)
ep->addressv[code_ix] = ep->code+3;
ep->code[3] = (BeamInstr) em_call_error_handler;
ep->code[4] = 0;
- ASSERT(ep->match_prog_set == NULL);
}
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 26dadfbbc0..50d18b0347 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -44,67 +44,34 @@
#define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
#define Free(P) erts_free(ERTS_ALC_T_BPD, (P))
-/*
-** Doubly linked ring macros
-*/
-
-#define BpInit(a,i) \
-do { \
- (a)->orig_instr = (i); \
- (a)->next = (a); \
- (a)->prev = (a); \
-} while (0)
-
-#define BpSpliceNext(a,b) \
-do { \
- register BpData *c = (a), *d = (b), *e; \
- e = c->next->prev; \
- c->next->prev = d->next->prev; \
- d->next->prev = e; \
- e = c->next; \
- c->next = d->next; \
- d->next = e; \
-} while (0)
-
-#define BpSplicePrev(a,b) \
-do { \
- register BpData *c = (a), *d = (b), *e; \
- e = c->prev->next; \
- c->prev->next = d->prev->next; \
- d->prev->next = e; \
- e = c->prev; \
- c->prev = d->prev; \
- d->prev = e; \
-} while (0)
-
-#ifdef DEBUG
-# define BpSingleton(a) ((a)->next == (a) && (a)->prev == (a))
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
-# define BpSingleton(a) ((a)->next == (a))
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
#endif
-#define BpInitAndSpliceNext(a,i,b) \
-do { \
- (a)->orig_instr = (i); \
- (a)->prev = (b); \
- (b)->next->prev = (a); \
- (a)->next = (b)->next; \
- (b)->next = (a); \
-} while (0)
+#define ERTS_BPF_LOCAL_TRACE 0x01
+#define ERTS_BPF_META_TRACE 0x02
+#define ERTS_BPF_COUNT 0x04
+#define ERTS_BPF_COUNT_ACTIVE 0x08
+#define ERTS_BPF_DEBUG 0x10
+#define ERTS_BPF_TIME_TRACE 0x20
+#define ERTS_BPF_TIME_TRACE_ACTIVE 0x40
+#define ERTS_BPF_GLOBAL_TRACE 0x80
-#define BpInitAndSplicePrev(a,i,b) \
-do { \
- (a)->orig_instr = (i); \
- (a)->next = (b); \
- (b)->prev->next = (a); \
- (a)->prev = (b)->prev; \
- (b)->prev = (a); \
-} while (0)
+#define ERTS_BPF_ALL 0xFF
+extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern Eterm beam_exception_trace[1]; /* OpCode(i_exception_trace) */
+extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-#define BREAK_IS_BIF (1)
-#define BREAK_IS_ERL (0)
-
+erts_smp_atomic32_t erts_active_bp_index;
+erts_smp_atomic32_t erts_staging_bp_index;
/* *************************************************************************
** Local prototypes
@@ -113,26 +80,30 @@ do { \
/*
** Helpers
*/
-
-static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-static int set_function_break(Module *modp, BeamInstr *pc, int bif,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-
-static int clear_break(Eterm mfa[3], int specified,
- BeamInstr break_op);
-static int clear_module_break(Module *modp, Eterm mfa[3], int specified,
- BeamInstr break_op);
-static int clear_function_break(Module *modp, BeamInstr *pc, int bif,
- BeamInstr break_op);
-
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op);
-static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op);
+static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, Eterm tracer_pid);
+static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid);
+static void set_function_break(BeamInstr *pc,
+ Binary *match_spec,
+ Uint break_flags,
+ enum erts_break_op count_op,
+ Eterm tracer_pid);
+
+static void clear_break(BpFunctions* f, Uint break_flags);
+static int clear_function_break(BeamInstr *pc, Uint break_flags);
+
+static BpDataTime* get_time_break(BeamInstr *pc);
+static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
+static void bp_time_diff(bp_data_time_item_t *item,
+ process_breakpoint_time_t *pbt,
+ Uint ms, Uint s, Uint us);
+
+static void bp_meta_unref(BpMetaPid* bmp);
+static void bp_count_unref(BpCount* bcp);
+static void bp_time_unref(BpDataTime* bdt);
+static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
+static void uninstall_breakpoint(BeamInstr* pc);
/* bp_hash */
#define BP_TIME_ADD(pi0, pi1) \
@@ -152,240 +123,996 @@ static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_da
static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
static void bp_hash_delete(bp_time_hash_t *hash);
-
/* *************************************************************************
** External interfaces
*/
-erts_smp_spinlock_t erts_bp_lock;
-
void
erts_bp_init(void) {
- erts_smp_spinlock_init(&erts_bp_lock, "breakpoints");
+ erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
+ erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
}
-int
-erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, match_spec,
- (BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
+
+void
+erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ Uint max_funcs = 0;
+ int current;
+ int max_modules = module_code_size(code_ix);
+ int num_modules = 0;
+ Module* modp;
+ Module** module;
+ Uint i;
+
+ module = (Module **) Alloc(max_modules*sizeof(Module *));
+ num_modules = 0;
+ for (current = 0; current < max_modules; current++) {
+ modp = module_code(current, code_ix);
+ if (modp->curr.code) {
+ max_funcs += modp->curr.code[MI_NUM_FUNCTIONS];
+ module[num_modules++] = modp;
+ }
+ }
+
+ f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction));
+ i = 0;
+ for (current = 0; current < num_modules; current++) {
+ BeamInstr** code_base = (BeamInstr **) module[current]->curr.code;
+ BeamInstr* code;
+ Uint num_functions = (Uint) code_base[MI_NUM_FUNCTIONS];
+ Uint fi;
+
+ if (specified > 0) {
+ if (mfa[0] != make_atom(module[current]->module)) {
+ /* Wrong module name */
+ continue;
+ }
+ }
+
+ for (fi = 0; fi < num_functions; fi++) {
+ Eterm* pc;
+ int wi;
+
+ code = code_base[MI_FUNCTIONS+fi];
+ ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ pc = code+5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ if (is_nil(code[3])) { /* Ignore BIF stub */
+ continue;
+ }
+ for (wi = 0;
+ wi < specified && (Eterm) code[2+wi] == mfa[wi];
+ wi++) {
+ /* Empty loop body */
+ }
+ if (wi == specified) {
+ /* Store match */
+ f->matching[i].pc = pc;
+ f->matching[i].mod = module[current];
+ i++;
+ }
+ }
+ }
+ f->matched = i;
+ Free(module);
}
-int
-erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, match_spec,
- (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+void
+erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ int i;
+ int num_exps = export_list_size(code_ix);
+ int ne;
+
+ f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction));
+ ne = 0;
+ for (i = 0; i < num_exps; i++) {
+ Export* ep = export_list(i, code_ix);
+ BeamInstr* pc;
+ int j;
+
+ for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
+ /* Empty loop body */
+ }
+ if (j < specified) {
+ continue;
+ }
+ pc = ep->code+3;
+ if (ep->addressv[code_ix] == pc) {
+ if ((*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) em_call_error_handler)) {
+ continue;
+ }
+ ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ continue;
+ }
+
+ f->matching[ne].pc = pc;
+ f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ ne++;
+
+ }
+ f->matched = ne;
}
-/* set breakpoint data for on exported bif entry */
+void
+erts_bp_free_matched_functions(BpFunctions* f)
+{
+ Free(f->matching);
+}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+erts_consolidate_bp_data(BpFunctions* f, int local)
+{
+ BpFunction* fs = f->matching;
+ Uint i;
+ Uint n = f->matched;
+
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+
+ for (i = 0; i < n; i++) {
+ consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ }
}
-void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) {
- set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+void
+erts_consolidate_bif_bp_data(void)
+{
+ int i;
+
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+ for (i = 0; i < BIF_SIZE; i++) {
+ Export *ep = bif_export[i];
+ consolidate_bp_data(0, ep->code+3, 0);
+ }
}
-void erts_clear_time_trace_bif(BeamInstr *pc) {
- clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint));
+static void
+consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+ GenericBpData* src;
+ GenericBpData* dst;
+ Uint flags;
+
+ if (g == 0) {
+ return;
+ }
+
+ src = &g->data[erts_active_bp_ix()];
+ dst = &g->data[erts_staging_bp_ix()];
+
+ /*
+ * The contents of the staging area may be out of date.
+ * Decrement all reference pointers.
+ */
+
+ flags = dst->flags;
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(dst->local_ms);
+ }
+ if (flags & ERTS_BPF_META_TRACE) {
+ bp_meta_unref(dst->meta_pid);
+ MatchSetUnref(dst->meta_ms);
+ }
+ if (flags & ERTS_BPF_COUNT) {
+ bp_count_unref(dst->count);
+ }
+ if (flags & ERTS_BPF_TIME_TRACE) {
+ bp_time_unref(dst->time);
+ }
+
+ /*
+ * If all flags are zero, deallocate all breakpoint data.
+ */
+
+ flags = dst->flags = src->flags;
+ if (flags == 0) {
+ if (modp) {
+ if (local) {
+ modp->curr.num_breakpoints--;
+ } else {
+ modp->curr.num_traced_exports--;
+ }
+ ASSERT(modp->curr.num_breakpoints >= 0);
+ ASSERT(modp->curr.num_traced_exports >= 0);
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ }
+ pc[-4] = 0;
+ Free(g);
+ return;
+ }
+
+ /*
+ * Copy the active data to the staging area (making it ready
+ * for the next time it will be used).
+ */
+
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ dst->local_ms = src->local_ms;
+ MatchSetRef(dst->local_ms);
+ }
+ if (flags & ERTS_BPF_META_TRACE) {
+ dst->meta_pid = src->meta_pid;
+ erts_refc_inc(&dst->meta_pid->refc, 1);
+ dst->meta_ms = src->meta_ms;
+ MatchSetRef(dst->meta_ms);
+ }
+ if (flags & ERTS_BPF_COUNT) {
+ dst->count = src->count;
+ erts_refc_inc(&dst->count->refc, 1);
+ }
+ if (flags & ERTS_BPF_TIME_TRACE) {
+ dst->time = src->time;
+ erts_refc_inc(&dst->time->refc, 1);
+ ASSERT(dst->time->hash);
+ }
}
-int
-erts_set_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL);
+void
+erts_commit_staged_bp(void)
+{
+ ErtsBpIndex staging = erts_staging_bp_ix();
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ erts_smp_atomic32_set_nob(&erts_active_bp_index, staging);
+ erts_smp_atomic32_set_nob(&erts_staging_bp_index, active);
}
-int
-erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
+void
+erts_install_breakpoints(BpFunctions* f)
+{
+ Uint i;
+ Uint n = f->matched;
+ BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (*pc != br && g) {
+ Module* modp = f->matching[i].mod;
+
+ /*
+ * The breakpoint must be disabled in the active data
+ * (it will enabled later by switching bp indices),
+ * and enabled in the staging data.
+ */
+ ASSERT(g->data[erts_active_bp_ix()].flags == 0);
+ ASSERT(g->data[erts_staging_bp_ix()].flags != 0);
+
+ /*
+ * The following write is not protected by any lock. We
+ * assume that the hardware guarantees that a write of an
+ * aligned word-size (or half-word) writes is atomic
+ * (i.e. that other processes executing this code will not
+ * see a half pointer).
+ */
+ *pc = br;
+ modp->curr.num_breakpoints++;
+ }
+ }
}
-int
-erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+void
+erts_uninstall_breakpoints(BpFunctions* f)
+{
+ Uint i;
+ Uint n = f->matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ uninstall_breakpoint(pc);
+ }
}
-int
-erts_clear_trace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_trace_breakpoint));
+static void
+uninstall_breakpoint(BeamInstr* pc)
+{
+ if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g->data[erts_active_bp_ix()].flags == 0) {
+ /*
+ * The following write is not protected by any lock. We
+ * assume that the hardware guarantees that a write of an
+ * aligned word-size (or half-word) writes is atomic
+ * (i.e. that other processes executing this code will not
+ * see a half pointer).
+ */
+ *pc = g->orig_instr;
+ }
+ }
}
-int
-erts_clear_mtrace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+void
+erts_set_trace_break(BpFunctions* f, Binary *match_spec)
+{
+ set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true);
}
void
-erts_clear_mtrace_bif(BeamInstr *pc) {
- clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid)
+{
+ set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
-int
-erts_clear_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_debug_breakpoint));
+void
+erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+
+ set_function_break(pc, match_spec, flags, 0, NIL);
}
-int
-erts_clear_count_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_count_breakpoint));
+void
+erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid)
+{
+ set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
-int
-erts_clear_time_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_time_breakpoint));
+void
+erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
+{
+ set_function_break(pc, NULL,
+ ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
+ count_op, NIL);
}
-int
-erts_clear_break(Eterm mfa[3], int specified) {
+void
+erts_clear_time_trace_bif(BeamInstr *pc) {
+ clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+}
+
+void
+erts_set_debug_break(BpFunctions* f) {
+ set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL);
+}
+
+void
+erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
+{
+ set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
+ count_op, NIL);
+}
+
+void
+erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
+{
+ set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
+ count_op, NIL);
+}
+
+void
+erts_clear_trace_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_LOCAL_TRACE);
+}
+
+void
+erts_clear_call_trace_bif(BeamInstr *pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+
+ if (g) {
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ if (g->data[erts_staging_bp_ix()].flags & flags) {
+ clear_function_break(pc, flags);
+ }
+ }
+}
+
+void
+erts_clear_mtrace_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_META_TRACE);
+}
+
+void
+erts_clear_mtrace_bif(BeamInstr *pc)
+{
+ clear_function_break(pc, ERTS_BPF_META_TRACE);
+}
+
+void
+erts_clear_debug_break(BpFunctions* f)
+{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified, 0);
+ clear_break(f, ERTS_BPF_DEBUG);
+}
+
+void
+erts_clear_count_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE);
+}
+
+void
+erts_clear_time_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+}
+
+void
+erts_clear_all_breaks(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_ALL);
}
int
erts_clear_module_break(Module *modp) {
+ BeamInstr** code_base;
+ Uint n;
+ Uint i;
+
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp);
- return clear_module_break(modp, NULL, 0, 0);
+ code_base = (BeamInstr **) modp->curr.code;
+ if (code_base == NULL) {
+ return 0;
+ }
+ n = (Uint) code_base[MI_NUM_FUNCTIONS];
+ for (i = 0; i < n; ++i) {
+ BeamInstr* pc;
+
+ pc = code_base[MI_FUNCTIONS+i] + 5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ clear_function_break(pc, ERTS_BPF_ALL);
+ }
+
+ erts_commit_staged_bp();
+
+ for (i = 0; i < n; ++i) {
+ BeamInstr* pc;
+
+ pc = code_base[MI_FUNCTIONS+i] + 5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ uninstall_breakpoint(pc);
+ consolidate_bp_data(modp, pc, 1);
+ ASSERT(pc[-4] == 0);
+ }
+ return n;
}
-int
-erts_clear_function_break(Module *modp, BeamInstr *pc) {
+void
+erts_clear_export_break(Module* modp, BeamInstr* pc)
+{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- ASSERT(modp);
- return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
+
+ clear_function_break(pc, ERTS_BPF_ALL);
+ erts_commit_staged_bp();
+ *pc = (BeamInstr) 0;
+ consolidate_bp_data(modp, pc, 0);
+ ASSERT(pc[-4] == 0);
}
+BeamInstr
+erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
+{
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint bp_flags;
+ ErtsBpIndex ix = erts_active_bp_ix();
+
+ g = (GenericBp *) I[-4];
+ bp = &g->data[ix];
+ bp_flags = bp->flags;
+ ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE) &&
+ !IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
+ bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE);
+ if (bp_flags == 0) { /* Quick exit */
+ return g->orig_instr;
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
+ ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
+ (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true);
+ } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
+ (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true);
+ }
+
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm old_pid;
+ Eterm new_pid;
+
+ old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid);
+ if (new_pid != old_pid) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid);
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
+ erts_smp_atomic_inc_nob(&bp->count->acount);
+ }
+
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
+ Eterm w;
+ erts_trace_time_call(c_p, I, bp->time);
+ w = (BeamInstr) *c_p->cp;
+ if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
+ w == (BeamInstr) BeamOp(op_return_trace) ||
+ w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) {
+ Eterm* E = c_p->stop;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - 2 < c_p->htop) {
+ (void) erts_garbage_collect(c_p, 2, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ }
+ E = c_p->stop;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+
+ E -= 2;
+ E[0] = make_cp(I);
+ E[1] = make_cp(c_p->cp); /* original return address */
+ c_p->cp = beam_return_time_trace;
+ c_p->stop = E;
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_DEBUG) {
+ return (BeamInstr) BeamOp(op_i_debug_breakpoint);
+ } else {
+ return g->orig_instr;
+ }
+}
/*
- * SMP NOTE: Process p may have become exiting on return!
+ * Entry point called by the trace wrap functions in erl_bif_wrap.c
+ *
+ * The trace wrap functions are themselves called through the export
+ * entries instead of the original BIF functions.
*/
-BeamInstr
-erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
- Uint32 *ret_flags, Eterm *tracer_pid) {
- Eterm tpid1, tpid2;
- BpData **bds = (BpData **) (pc)[-4];
- BpDataTrace *bdt = NULL;
+Eterm
+erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
+{
+ Eterm result;
+ Eterm (*func)(Process*, Eterm*, BeamInstr*);
+ Export* ep = bif_export[bif_index];
+ Uint32 flags = 0, flags_meta = 0;
+ Eterm meta_tracer_pid = NIL;
+ int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
+ BeamInstr *cp = p->cp;
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint bp_flags = 0;
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ bp = &g->data[erts_active_bp_ix()];
+ bp_flags = bp->flags;
+ }
- ASSERT(bds);
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)];
- ASSERT(bdt);
- bdt = (BpDataTrace *) bdt->next;
- ASSERT(bdt);
- ASSERT(ret_flags);
- ASSERT(tracer_pid);
-
- ErtsSmpBPLock(bdt);
- tpid1 = tpid2 = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
-
- *ret_flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
- 1, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- ErtsSmpBPLock(bdt);
- bdt->tracer_pid = tpid2;
- ErtsSmpBPUnlock(bdt);
- }
- bds[bp_sched2ix_proc(p)] = (BpData *) bdt;
- return bdt->orig_instr;
+ /*
+ * Make continuation pointer OK, it is not during direct BIF calls,
+ * but it is correct during apply of bif.
+ */
+ if (!applying) {
+ p->cp = I;
+ }
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
+ flags = erts_call_trace(p, ep->code, bp->local_ms, args,
+ local, &p->tracer_proc);
+ }
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm tpid1, tpid2;
+
+ tpid1 = tpid2 =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
+ 0, &tpid2);
+ meta_tracer_pid = tpid2;
+ if (tpid1 != tpid2) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
+ }
+ }
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ BeamInstr *pc = (BeamInstr *)ep->code+3;
+ erts_trace_time_call(p, pc, bp->time);
+ }
+
+ /* Restore original continuation pointer (if changed). */
+ p->cp = cp;
+
+ func = bif_table[bif_index].f;
+
+ result = func(p, args, I);
+
+ if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
+ Eterm *cpp;
+ /* Maybe advance cp to skip trace stack frames */
+ for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
+ if (*cp == i_return_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 2; /* Skip return_trace parameters */
+ } else if (*cp == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 1; /* Skip return_time_trace parameters */
+ } else if (*cp == i_return_to_trace) {
+ /* A return_to trace message is going to be generated
+ * by normal means, so we do not have to.
+ */
+ cp = NULL;
+ break;
+ } else break;
+ }
+ }
+
+ /* Try to get these in the order
+ * they usually appear in normal code... */
+ if (is_non_value(result)) {
+ Uint reason = p->freason;
+ if (reason != TRAP) {
+ Eterm class;
+ Eterm value = p->fvalue;
+ DeclareTmpHeapNoproc(nocatch,3);
+ UseTmpHeapNoproc(3);
+ /* Expand error value like in handle_error() */
+ if (reason & EXF_ARGLIST) {
+ Eterm *tp;
+ ASSERT(is_tuple(value));
+ tp = tuple_val(value);
+ value = tp[1];
+ }
+ if ((reason & EXF_THROWN) && (p->catches <= 0)) {
+ value = TUPLE2(nocatch, am_nocatch, value);
+ reason = EXC_ERROR;
+ }
+ /* Note: expand_error_value() could theoretically
+ * allocate on the heap, but not for any error
+ * returned by a BIF, and it would do no harm,
+ * just be annoying.
+ */
+ value = expand_error_value(p, reason, value);
+ class = exception_tag[GET_EXC_CLASS(reason)];
+
+ if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &meta_tracer_pid);
+ }
+ if (flags & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &p->tracer_proc);
+ }
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
+ /* can only happen if(local)*/
+ Eterm *ptr = p->stop;
+ ASSERT(is_CP(*ptr));
+ ASSERT(ptr <= STACK_START(p));
+ /* Search the nearest stack frame for a catch */
+ while (++ptr < STACK_START(p)) {
+ if (is_CP(*ptr)) break;
+ if (is_catch(*ptr)) {
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into
+ * calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ }
+ UnUseTmpHeapNoproc(3);
+ if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ p->trace_flags |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ } else {
+ if (flags_meta & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ }
+ /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
+ if (flags & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &p->tracer_proc);
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE) {
+ /* can only happen if(local)*/
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ return result;
+}
+
+static Eterm
+do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, Eterm tracer_pid)
+{
+ Eterm* cpp;
+ int return_to_trace = 0;
+ BeamInstr w;
+ BeamInstr *cp_save;
+ Uint32 flags;
+ Uint need = 0;
+ Eterm* E = c_p->stop;
+
+ w = *c_p->cp;
+ if (w == (BeamInstr) BeamOp(op_return_trace)) {
+ cpp = &E[2];
+ } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
+ return_to_trace = 1;
+ cpp = &E[0];
+ } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
+ cpp = &E[0];
+ } else {
+ cpp = NULL;
+ }
+ if (cpp) {
+ for (;;) {
+ BeamInstr w = *cp_val(*cpp);
+ if (w == (BeamInstr) BeamOp(op_return_trace)) {
+ cpp += 3;
+ } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
+ return_to_trace = 1;
+ cpp += 1;
+ } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
+ cpp += 2;
+ } else {
+ break;
+ }
+ }
+ cp_save = c_p->cp;
+ c_p->cp = (BeamInstr *) cp_val(*cpp);
+ ASSERT(is_CP(*cpp));
+ }
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ if (cpp) {
+ c_p->cp = cp_save;
+ }
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
+ need += 1;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ need += 3;
+ }
+ if (need) {
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - need < c_p->htop) {
+ (void) erts_garbage_collect(c_p, need, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ E = c_p->stop;
+ }
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE && !return_to_trace) {
+ E -= 1;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ E[0] = make_cp(c_p->cp);
+ c_p->cp = (BeamInstr *) beam_return_to_trace;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ E -= 3;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ ASSERT(is_CP((Eterm) (UWord) (I - 3)));
+ ASSERT(am_true == tracer_pid ||
+ is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
+ E[2] = make_cp(c_p->cp);
+ E[1] = tracer_pid;
+ E[0] = make_cp(I - 3); /* We ARE at the beginning of an
+ instruction,
+ the funcinfo is above i. */
+ c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
+ beam_exception_trace : beam_return_trace;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ c_p->trace_flags |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ c_p->stop = E;
+ return tracer_pid;
}
+void
+erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
+{
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+ ASSERT(c_p);
+ ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING);
-/*
- * SMP NOTE: Process p may have become exiting on return!
- */
-Uint32
-erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local,
- Eterm *tracer_pid) {
- BpData **bds = (BpData **) (pc)[-4];
- BpDataTrace *bdt = NULL;
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+ pbt = ERTS_PROC_GET_CALL_TIME(c_p);
+ get_sys_now(&ms, &s, &us);
- ASSERT(tracer_pid);
- if (bds) {
- Eterm tpid1, tpid2;
- Uint32 flags;
- bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)];
+ /* get pbt
+ * timestamp = t0
+ * lookup bdt from code
+ * set ts0 to pbt
+ * add call count here?
+ */
+ if (pbt == 0) {
+ /* First call of process to instrumented function */
+ pbt = Alloc(sizeof(process_breakpoint_time_t));
+ (void *) ERTS_PROC_SET_CALL_TIME(c_p, ERTS_PROC_LOCK_MAIN, pbt);
+ } else {
+ ASSERT(pbt->pc);
+ /* add time to previous code */
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = c_p->id;
+ sitem.count = 0;
- ErtsSmpBPLock(bdt);
- tpid1 = tpid2 = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ /* previous breakpoint */
+ pbdt = get_time_break(pbt->pc);
- flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
- local, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- ErtsSmpBPLock(bdt);
- bdt->tracer_pid = tpid2;
- ErtsSmpBPUnlock(bdt);
+ /* if null then the breakpoint was removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
}
- return flags;
}
- *tracer_pid = NIL;
- return 0;
+
+ /* Add count to this code */
+ sitem.pid = c_p->id;
+ sitem.count = 1;
+ sitem.s_time = 0;
+ sitem.us_time = 0;
+
+ /* this breakpoint */
+ ASSERT(bdt);
+ h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+
+ pbt->pc = I;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
}
+void
+erts_trace_time_return(Process *p, BeamInstr *pc)
+{
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+ ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING);
+
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+ get_sys_now(&ms,&s,&us);
+
+ /* get pbt
+ * lookup bdt from code
+ * timestamp = t1
+ * get ts0 from pbt
+ * get item from bdt->hash[bp_hash(p->id)]
+ * ack diff (t1, t0) to item
+ */
+
+ if (pbt) {
+ /* might have been removed due to
+ * trace_pattern(false)
+ */
+ ASSERT(pbt->pc);
+
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = get_time_break(pbt->pc);
+
+ /* beware, the trace_pattern might have been removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ }
+}
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_trace_breakpoint));
-
- if (bdt) {
+erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ GenericBpData* bp = check_break(pc, flags);
+
+ if (bp) {
if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
- }
- if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ *match_spec_ret = bp->local_ms;
}
- return !0;
+ return 1;
}
return 0;
}
int
-erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+ Eterm *tracer_pid_ret)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
- if (bdt) {
+ if (bp) {
if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
+ *match_spec_ret = bp->meta_ms;
}
if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ *tracer_pid_ret =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
}
- return !0;
+ return 1;
}
return 0;
}
@@ -402,15 +1129,15 @@ erts_is_native_break(BeamInstr *pc) {
}
int
-erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
- BpDataCount *bdc =
- (BpDataCount *) is_break(pc, (BeamInstr) BeamOp(op_i_count_breakpoint));
+erts_is_count_break(BeamInstr *pc, Uint *count_ret)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT);
- if (bdc) {
+ if (bp) {
if (count_ret) {
- *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount);
+ *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount);
}
- return !0;
+ return 1;
}
return 0;
}
@@ -421,7 +1148,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
Uint size;
Eterm *hp, t;
bp_data_time_item_t *item = NULL;
- BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ BpDataTime *bdt = get_time_break(pc);
if (bdt) {
if (retval) {
@@ -464,7 +1191,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
}
bp_hash_delete(&hash);
}
- return !0;
+ return 1;
}
return 0;
@@ -655,7 +1382,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* the previous breakpoint.
*/
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ pbdt = get_time_break(pbt->pc);
if (pbdt) {
get_sys_now(&ms,&s,&us);
bp_time_diff(&sitem, pbt, ms, s, us);
@@ -693,671 +1420,259 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
} /* pbt */
}
-/* call_time breakpoint
- * Accumulated times are added to the previous bp,
- * not the current one. The current one is saved
- * for future reference.
- * The previous breakpoint is stored in the process it self, the psd.
- * We do not need to store in a stack frame.
- * There is no need for locking, each thread has its own
- * area in each bp to save data.
- * Since we need to diffrentiate between processes for each bp,
- * every bp has a hash (per thread) to process-bp statistics.
- * - egil
- */
-
-void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) {
- Uint ms,s,us;
- process_breakpoint_time_t *pbt = NULL;
- bp_data_time_item_t sitem, *item = NULL;
- bp_time_hash_t *h = NULL;
- BpDataTime *pbdt = NULL;
-
- ASSERT(p);
- ASSERT(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&p->state));
-
- /* get previous timestamp and breakpoint
- * from the process psd */
-
- pbt = ERTS_PROC_GET_CALL_TIME(p);
- get_sys_now(&ms,&s,&us);
-
- switch(type) {
- /* get pbt
- * timestamp = t0
- * lookup bdt from code
- * set ts0 to pbt
- * add call count here?
- */
- case ERTS_BP_CALL_TIME_CALL:
- case ERTS_BP_CALL_TIME_TAIL_CALL:
-
- if (pbt) {
- ASSERT(pbt->pc);
- /* add time to previous code */
- bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
- sitem.count = 0;
-
- /* previous breakpoint */
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
-
- /* if null then the breakpoint was removed */
- if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
- }
-
- } else {
- /* first call of process to instrumented function */
- pbt = Alloc(sizeof(process_breakpoint_time_t));
- (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt);
- }
- /* add count to this code */
- sitem.pid = p->id;
- sitem.count = 1;
- sitem.s_time = 0;
- sitem.us_time = 0;
-
- /* this breakpoint */
- ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
-
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
- break;
-
- case ERTS_BP_CALL_TIME_RETURN:
- /* get pbt
- * lookup bdt from code
- * timestamp = t1
- * get ts0 from pbt
- * get item from bdt->hash[bp_hash(p->id)]
- * ack diff (t1, t0) to item
- */
-
- if(pbt) {
- /* might have been removed due to
- * trace_pattern(false)
- */
- ASSERT(pbt->pc);
-
- bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
- sitem.count = 0;
-
- /* previous breakpoint */
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
-
- /* beware, the trace_pattern might have been removed */
- if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
- }
-
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
- }
- break;
- default :
- ASSERT(0);
- /* will never happen */
- break;
- }
-}
-
-
/* *************************************************************************
** Local helpers
*/
-static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid)
+static void
+set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid)
{
- Module *modp;
- int num_processed = 0;
- ErtsCodeIndex code_ix = erts_active_code_ix();
- if (!specified) {
- /* Find and process all modules in the system... */
- int current;
- int last = module_code_size(code_ix);
- for (current = 0; current < last; current++) {
- modp = module_code(current, code_ix);
- ASSERT(modp != NULL);
- num_processed +=
- set_module_break(modp, mfa, specified,
- match_spec, break_op, count_op,
- tracer_pid);
- }
- } else {
- /* Process a single module */
- if ((modp = erts_get_module(mfa[0], code_ix)) != NULL) {
- num_processed +=
- set_module_break(modp, mfa, specified,
- match_spec, break_op, count_op,
- tracer_pid);
- }
- }
- return num_processed;
-}
-
-static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid) {
- BeamInstr** code_base;
- BeamInstr* code_ptr;
- int num_processed = 0;
- Uint i,n;
+ Uint i;
+ Uint n;
- ASSERT(break_op);
- ASSERT(modp);
- code_base = (BeamInstr **) modp->curr.code;
- if (code_base == NULL) {
- return 0;
+ n = f->matched;
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ set_function_break(pc, match_spec, break_flags,
+ count_op, tracer_pid);
}
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
- for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
- (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr+5;
-
- num_processed +=
- set_function_break(modp, pc, BREAK_IS_ERL, match_spec,
- break_op, count_op, tracer_pid);
- }
- }
- return num_processed;
}
-static int set_function_break(Module *modp, BeamInstr *pc, int bif,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid) {
-
- BeamInstr **code_base = NULL;
- BpData *bd, **r, ***rs;
- size_t size;
- Uint ix = 0;
-
- if (bif == BREAK_IS_ERL) {
- code_base = (BeamInstr **)modp->curr.code;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (modp->curr.code_length/sizeof(BeamInstr *)));
- } else {
- ASSERT(*pc == (BeamInstr) em_apply_bif);
- ASSERT(modp == NULL);
+static void
+set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid)
+{
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint common;
+ ErtsBpIndex ix = erts_staging_bp_ix();
+
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+ g = (GenericBp *) pc[-4];
+ if (g == 0) {
+ int i;
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ /* Do not insert a new breakpoint */
+ return;
+ }
+ g = Alloc(sizeof(GenericBp));
+ g->orig_instr = *pc;
+ for (i = 0; i < ERTS_NUM_BP_IX; i++) {
+ g->data[i].flags = 0;
+ }
+ pc[-4] = (BeamInstr) g;
}
+ bp = &g->data[ix];
/*
- * Currently no trace support for native code.
+ * If we are changing an existing breakpoint, clean up old data.
*/
- if (erts_is_native_break(pc)) {
- return 0;
- }
- /* Do not allow two breakpoints of the same kind */
- if ( (bd = is_break(pc, break_op))) {
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint)
- || break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
- Binary *old_match_spec;
-
- /* Update match spec and tracer */
- MatchSetRef(match_spec);
- ErtsSmpBPLock(bdt);
- old_match_spec = bdt->match_spec;
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- ErtsSmpBPUnlock(bdt);
- MatchSetUnref(old_match_spec);
- } else {
- BpDataCount *bdc = (BpDataCount *) bd;
- erts_aint_t count = 0;
- erts_aint_t res = 0;
-
- ASSERT(! match_spec);
- ASSERT(is_nil(tracer_pid));
-
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_stop) {
- count = erts_smp_atomic_read_nob(&bdc->acount);
- if (count >= 0) {
- while(1) {
- res = erts_smp_atomic_cmpxchg_nob(&bdc->acount, -count - 1, count);
- if ((res == count) || count < 0) break;
- count = res;
- }
- }
- } else {
- /* Reset call counter */
- erts_smp_atomic_set_nob(&bdc->acount, 0);
- }
-
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
-
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-
- if (count_op == erts_break_stop) {
- bdt->pause = 1;
- } else {
- bdt->pause = 0;
- for (i = 0; i < bdt->n; i++) {
- bp_hash_delete(&(bdt->hash[i]));
- bp_hash_init(&(bdt->hash[i]), 32);
- }
- }
- } else {
- ASSERT (! count_op);
- }
- }
- return 1;
- }
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
- size = sizeof(BpDataTrace);
- } else {
- ASSERT(! match_spec);
- ASSERT(is_nil(tracer_pid));
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
- /* Do not insert a new breakpoint */
- return 1;
- }
- size = sizeof(BpDataCount);
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
- /* Do not insert a new breakpoint */
- return 1;
- }
- size = sizeof(BpDataTime);
+ common = break_flags & bp->flags;
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(bp->local_ms);
+ } else if (common & ERTS_BPF_META_TRACE) {
+ MatchSetUnref(bp->meta_ms);
+ bp_meta_unref(bp->meta_pid);
+ } else if (common & ERTS_BPF_COUNT) {
+ if (count_op == erts_break_stop) {
+ bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
} else {
- ASSERT(! count_op);
- ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint));
- size = sizeof(BpDataDebug);
+ bp->flags |= ERTS_BPF_COUNT_ACTIVE;
+ erts_smp_atomic_set_nob(&bp->count->acount, 0);
}
- }
- rs = (BpData ***) (pc-4);
- if (! *rs) {
- size_t ssize = sizeof(BeamInstr) * erts_no_schedulers;
- *rs = (BpData **) Alloc(ssize);
- sys_memzero(*rs, ssize);
- }
-
- r = &((*rs)[0]);
-
- if (! *r) {
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint));
- /* First breakpoint; create singleton ring */
- bd = Alloc(size);
- BpInit(bd, *pc);
- *r = bd;
- if (bif == BREAK_IS_ERL) {
- *pc = break_op;
- }
- } else {
- ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) ||
- *pc == (BeamInstr) em_apply_bif);
- if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
- /* Debug bp must be last, so if it is also first;
- * it must be singleton. */
- ASSERT(BpSingleton(*r));
- /* Insert new bp first in the ring, i.e second to last. */
- bd = Alloc(size);
- BpInitAndSpliceNext(bd, *pc, *r);
- if (bif == BREAK_IS_ERL) {
- *pc = break_op;
- }
- } else if ((*r)->prev->orig_instr
- == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
- /* Debug bp last in the ring; insert new second to last. */
- bd = Alloc(size);
- BpInitAndSplicePrev(bd, (*r)->prev->orig_instr, *r);
- (*r)->prev->orig_instr = break_op;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return;
+ } else if (common & ERTS_BPF_TIME_TRACE) {
+ BpDataTime* bdt = bp->time;
+ Uint i = 0;
+
+ if (count_op == erts_break_stop) {
+ bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE;
} else {
- /* Just insert last in the ring */
- bd = Alloc(size);
- BpInitAndSpliceNext(bd, (*r)->orig_instr, *r);
- (*r)->orig_instr = break_op;
- *r = bd;
+ bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE;
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_delete(&(bdt->hash[i]));
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
}
- }
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- (*rs)[ix] = (*rs)[0];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return;
}
- bd->this_instr = break_op;
- /* Init the bp type specific data */
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
-
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
-
- bdt->pause = 0;
- bdt->n = erts_no_schedulers;
- bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
+ /*
+ * Initialize the new breakpoint data.
+ */
+ if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetRef(match_spec);
+ bp->local_ms = match_spec;
+ } else if (break_flags & ERTS_BPF_META_TRACE) {
+ BpMetaPid* bmp;
+ MatchSetRef(match_spec);
+ bp->meta_ms = match_spec;
+ bmp = Alloc(sizeof(BpMetaPid));
+ erts_refc_init(&bmp->refc, 1);
+ erts_smp_atomic_init_nob(&bmp->pid, tracer_pid);
+ bp->meta_pid = bmp;
+ } else if (break_flags & ERTS_BPF_COUNT) {
+ BpCount* bcp;
+
+ ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
+ bcp = Alloc(sizeof(BpCount));
+ erts_refc_init(&bcp->refc, 1);
+ erts_smp_atomic_init_nob(&bcp->acount, 0);
+ bp->count = bcp;
+ } else if (break_flags & ERTS_BPF_TIME_TRACE) {
+ BpDataTime* bdt;
+ int i;
+
+ ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
+ bdt = Alloc(sizeof(BpDataTime));
+ erts_refc_init(&bdt->refc, 1);
+ bdt->n = erts_no_schedulers;
+ bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
}
- } else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- BpDataCount *bdc = (BpDataCount *) bd;
- erts_smp_atomic_init_nob(&bdc->acount, 0);
+ bp->time = bdt;
}
- if (bif == BREAK_IS_ERL) {
- ++modp->curr.num_breakpoints;
+ bp->flags |= break_flags;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+}
+
+static void
+clear_break(BpFunctions* f, Uint break_flags)
+{
+ Uint i;
+ Uint n;
+
+ n = f->matched;
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ clear_function_break(pc, break_flags);
}
- return 1;
}
-static int clear_break(Eterm mfa[3], int specified, BeamInstr break_op)
+static int
+clear_function_break(BeamInstr *pc, Uint break_flags)
{
- ErtsCodeIndex code_ix = erts_active_code_ix();
- int num_processed = 0;
- Module *modp;
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint common;
+ ErtsBpIndex ix = erts_staging_bp_ix();
- if (!specified) {
- /* Iterate over all modules */
- int current;
- int last = module_code_size(code_ix);
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
- for (current = 0; current < last; current++) {
- modp = module_code(current, code_ix);
- ASSERT(modp != NULL);
- num_processed += clear_module_break(modp, mfa, specified, break_op);
- }
- } else {
- /* Process a single module */
- if ((modp = erts_get_module(mfa[0], code_ix)) != NULL) {
- num_processed +=
- clear_module_break(modp, mfa, specified, break_op);
- }
+ if ((g = (GenericBp *) pc[-4]) == 0) {
+ return 1;
}
- return num_processed;
-}
-static int clear_module_break(Module *m, Eterm mfa[3], int specified,
- BeamInstr break_op) {
- BeamInstr** code_base;
- BeamInstr* code_ptr;
- int num_processed = 0;
- Uint i;
- BeamInstr n;
-
- ASSERT(m);
- code_base = (BeamInstr **) m->curr.code;
- if (code_base == NULL) {
- return 0;
+ bp = &g->data[ix];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ common = bp->flags & break_flags;
+ bp->flags &= ~break_flags;
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(bp->local_ms);
}
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
- for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
- (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr + 5;
-
- num_processed +=
- clear_function_break(m, pc, BREAK_IS_ERL, break_op);
- }
+ if (common & ERTS_BPF_META_TRACE) {
+ MatchSetUnref(bp->meta_ms);
+ }
+ if (common & ERTS_BPF_COUNT) {
+ ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
+ bp_count_unref(bp->count);
+ }
+ if (common & ERTS_BPF_TIME_TRACE) {
+ ASSERT((bp->flags & ERTS_BPF_TIME_TRACE_ACTIVE) == 0);
+ bp_time_unref(bp->time);
}
- return num_processed;
-}
-static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) {
- BpData *bd;
- Uint ix = 0;
- BeamInstr **code_base = NULL;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return 1;
+}
- if (bif == BREAK_IS_ERL) {
- code_base = (BeamInstr **)m->curr.code;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (m->curr.code_length/sizeof(BeamInstr *)));
- } else {
- ASSERT(*pc == (BeamInstr) em_apply_bif);
- ASSERT(m == NULL);
+static void
+bp_meta_unref(BpMetaPid* bmp)
+{
+ if (erts_refc_dectest(&bmp->refc, 0) <= 0) {
+ Free(bmp);
}
+}
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(pc)) {
- return 0;
+static void
+bp_count_unref(BpCount* bcp)
+{
+ if (erts_refc_dectest(&bcp->refc, 0) <= 0) {
+ Free(bcp);
}
+}
- while ( (bd = is_break(pc, break_op))) {
- /* Remove all breakpoints of this type.
- * There should be only one of each type,
- * but break_op may be 0 which matches any type.
+static void
+bp_time_unref(BpDataTime* bdt)
+{
+ if (erts_refc_dectest(&bdt->refc, 0) <= 0) {
+ Uint i = 0;
+ Uint j = 0;
+ Process *h_p = NULL;
+ bp_data_time_item_t* item = NULL;
+ process_breakpoint_time_t* pbt = NULL;
+
+ /* remove all psd associated with the hash
+ * and then delete the hash.
+ * ... sigh ...
*/
- BeamInstr op;
- BpData ***rs = (BpData ***) (pc - 4);
- BpData **r = NULL;
-#ifdef DEBUG
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- ASSERT((*rs)[ix] == (*rs)[0]);
- }
-#endif
-
- r = &((*rs)[0]);
-
- ASSERT(*r);
- /* Find opcode for this breakpoint */
- if (break_op) {
- op = break_op;
- } else {
- if (bd == (*r)->next) {
- /* First breakpoint in ring */
- op = *pc;
- } else {
- op = bd->prev->orig_instr;
- }
- }
- if (BpSingleton(bd)) {
- ASSERT(*r == bd);
- /* Only one breakpoint to remove */
- if (bif == BREAK_IS_ERL) {
- *pc = bd->orig_instr;
- }
- Free(*rs);
- *rs = NULL;
- } else {
- BpData *bd_prev = bd->prev;
-
- BpSpliceNext(bd, bd_prev);
- ASSERT(BpSingleton(bd));
- if (bd == *r) {
- /* We removed the last breakpoint in the ring */
- *r = bd_prev;
- bd_prev->orig_instr = bd->orig_instr;
- } else if (bd_prev == *r) {
- /* We removed the first breakpoint in the ring */
- if (bif == BREAK_IS_ERL) {
- *pc = bd->orig_instr;
- }
- } else {
- bd_prev->orig_instr = bd->orig_instr;
- }
- }
- if (op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
- MatchSetUnref(bdt->match_spec);
- }
- if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
- Uint j = 0;
- Process *h_p = NULL;
- bp_data_time_item_t *item = NULL;
- process_breakpoint_time_t *pbt = NULL;
-
- /* remove all psd associated with the hash
- * and then delete the hash.
- * ... sigh ...
- */
-
- for( i = 0; i < bdt->n; ++i) {
- if (bdt->hash[i].used) {
- for (j = 0; j < bdt->hash[i].n; ++j) {
- item = &(bdt->hash[i].item[j]);
- if (item->pid != NIL) {
- h_p = erts_proc_lookup(item->pid);
- if (h_p) {
- pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL);
- if (pbt) {
- Free(pbt);
- }
+ for (i = 0; i < bdt->n; ++i) {
+ if (bdt->hash[i].used) {
+ for (j = 0; j < bdt->hash[i].n; ++j) {
+ item = &(bdt->hash[i].item[j]);
+ if (item->pid != NIL) {
+ h_p = erts_pid2proc(NULL, 0, item->pid,
+ ERTS_PROC_LOCK_MAIN);
+ if (h_p) {
+ pbt = ERTS_PROC_SET_CALL_TIME(h_p,
+ ERTS_PROC_LOCK_MAIN,
+ NULL);
+ if (pbt) {
+ Free(pbt);
}
+ erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN);
}
}
}
- bp_hash_delete(&(bdt->hash[i]));
}
- Free(bdt->hash);
- bdt->hash = NULL;
- bdt->n = 0;
+ bp_hash_delete(&(bdt->hash[i]));
}
- Free(bd);
- if (bif == BREAK_IS_ERL) {
- ASSERT(m->curr.num_breakpoints > 0);
- --m->curr.num_breakpoints;
- }
- if (*rs) {
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- (*rs)[ix] = (*rs)[0];
- }
- }
- } /* while bd != NULL */
- return 1;
+ Free(bdt->hash);
+ Free(bdt);
+ }
}
-
-
-/*
-** Searches (linear forward) the breakpoint ring for a specified opcode
-** and returns a pointer to the breakpoint data structure or NULL if
-** not found. If the specified opcode is 0, the last breakpoint is
-** returned. The program counter must point to the first executable
-** (breakpoint) instruction of the function.
-*/
-
-BpData *erts_get_time_break(Process *p, BeamInstr *pc) {
- return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+static BpDataTime*
+get_time_break(BeamInstr *pc)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE);
+ return bp ? bp->time : 0;
}
-static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) {
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (! erts_is_native_break(pc)) {
- BpData **rs = (BpData **) pc[-4];
- BpData *bd = NULL, *ebd = NULL;
-
- if (! rs) {
- return NULL;
- }
-
- bd = ebd = rs[bp_sched2ix_proc(p)];
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- return bd;
- }
-
- bd = bd->next;
- while (bd != ebd) {
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- ASSERT(bd);
- return bd;
- }
- bd = bd->next;
- }
- }
- return NULL;
-}
+static GenericBpData*
+check_break(BeamInstr *pc, Uint break_flags)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
- BpData **rs;
- BpData *bd = NULL, *ebd = NULL;
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
-
if (erts_is_native_break(pc)) {
- return NULL;
- }
- rs = (BpData **) pc[-4];
- if (! rs) {
- return NULL;
- }
-
- bd = ebd = rs[erts_bp_sched2ix()];
- ASSERT(bd);
- if ( (break_op == 0) || (bd->this_instr == break_op)) {
- return bd;
+ return 0;
}
-
- bd = bd->next;
- while (bd != ebd) {
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- ASSERT(bd);
- return bd;
+ if (g) {
+ GenericBpData* bp = &g->data[erts_active_bp_ix()];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ if (bp->flags & break_flags) {
+ return bp;
}
- bd = bd->next;
}
- return NULL;
+ return 0;
}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 167069552f..28aaaa462a 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -25,77 +25,6 @@
#include "erl_vm.h"
#include "global.h"
-
-
-/* A couple of gotchas:
- *
- * The breakpoint structure from BeamInstr,
- * In beam_emu where the instruction counter pointer, I (or pc),
- * points to the *current* instruction. At that time, if the instruction
- * is a breakpoint instruction the pc looks like the following,
- *
- * I[-5] | op_i_func_info_IaaI | scheduler specific entries
- * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN |
- * I[-3] | Tagged Module | | |
- * I[-2] | Tagged Function | V V
- * I[-1] | Arity | BpData -> BpData -> BpData -> BpData
- * I[0] | The bp instruction | ^ * the bp wheel * |
- * |------------------------------
- *
- * Common struct to all bp_data_*
- *
- * 1) The type of bp_data structure in the ring is deduced from the
- * orig_instr field of the structure _before_ in the ring, except for
- * the first structure in the ring that has its instruction in
- * pc[0] of the code to execute.
- * This is valid as long as you don't search for the function while it is
- * being executed by something else. Or is in the middle of its rotation for
- * any other reason.
- * A key, the bp beam instruction, is included for this reason.
- *
- * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the
- * breakpoints are being executed.
- *
- * So, as an example, when a breakpointed function starts to execute,
- * the first instruction that is a breakpoint instruction at pc[0] finds
- * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer
- * to the correct bp_data type.
-*/
-
-typedef struct bp_data {
- struct bp_data *next; /* Doubly linked ring pointers */
- struct bp_data *prev; /* -"- */
- BeamInstr orig_instr; /* The original instruction to execute */
- BeamInstr this_instr; /* key */
-} BpData;
-/*
-** All the following bp_data_.. structs must begin the same way
-*/
-
-typedef struct bp_data_trace {
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- Binary *match_spec;
- Eterm tracer_pid;
-} BpDataTrace;
-
-typedef struct bp_data_debug {
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
-} BpDataDebug;
-
-typedef struct bp_data_count { /* Call count */
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- erts_smp_atomic_t acount;
-} BpDataCount;
-
typedef struct {
Eterm pid;
Sint count;
@@ -110,13 +39,9 @@ typedef struct {
} bp_time_hash_t;
typedef struct bp_data_time { /* Call time */
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- Uint pause;
- Uint n;
- bp_time_hash_t *hash;
+ Uint n;
+ bp_time_hash_t *hash;
+ erts_refc_t refc;
} BpDataTime;
typedef struct {
@@ -126,64 +51,42 @@ typedef struct {
BeamInstr *pc;
} process_breakpoint_time_t; /* used within psd */
-extern erts_smp_spinlock_t erts_bp_lock;
+typedef struct {
+ erts_smp_atomic_t acount;
+ erts_refc_t refc;
+} BpCount;
+
+typedef struct {
+ erts_smp_atomic_t pid;
+ erts_refc_t refc;
+} BpMetaPid;
+
+typedef struct generic_bp_data {
+ Uint flags;
+ Binary* local_ms; /* Match spec for local call trace */
+ Binary* meta_ms; /* Match spec for meta trace */
+ BpMetaPid* meta_pid; /* Meta trace pid */
+ BpCount* count; /* For call count */
+ BpDataTime* time; /* For time trace */
+} GenericBpData;
+
+#define ERTS_NUM_BP_IX 2
+
+typedef struct generic_bp {
+ BeamInstr orig_instr;
+ GenericBpData data[ERTS_NUM_BP_IX];
+} GenericBp;
#define ERTS_BP_CALL_TIME_SCHEDULE_IN (0)
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#define ERTS_BP_CALL_TIME_CALL (0)
-#define ERTS_BP_CALL_TIME_RETURN (1)
-#define ERTS_BP_CALL_TIME_TAIL_CALL (2)
-
-#ifdef ERTS_SMP
-#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock)
-#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock)
-#else
-#define ErtsSmpBPLock(BDC)
-#define ErtsSmpBPUnlock(BDC)
-#endif
-
#ifdef ERTS_SMP
#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
#else
#define bp_sched2ix_proc(p) (0)
#endif
-#define ErtsCountBreak(p, pc,instr_result) \
-do { \
- BpData **bds = (BpData **) (pc)[-4]; \
- BpDataCount *bdc = NULL; \
- Uint ix = bp_sched2ix_proc( (p) ); \
- erts_aint_t count = 0; \
- \
- ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bds); \
- bdc = (BpDataCount *) bds[ix]; \
- bdc = (BpDataCount *) bdc->next; \
- ASSERT(bdc); \
- bds[ix] = (BpData *) bdc; \
- count = erts_smp_atomic_read_nob(&bdc->acount); \
- if (count >= 0) erts_smp_atomic_inc_nob(&bdc->acount); \
- *(instr_result) = bdc->orig_instr; \
-} while (0)
-
-#define ErtsBreakSkip(p, pc,instr_result) \
-do { \
- BpData **bds = (BpData **) (pc)[-4]; \
- BpData *bd = NULL; \
- Uint ix = bp_sched2ix_proc( (p) ); \
- \
- ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bds); \
- bd = bds[ix]; \
- ASSERT(bd); \
- bd = bd->next; \
- ASSERT(bd); \
- bds[ix] = bd; \
- *(instr_result) = bd->orig_instr; \
-} while (0)
-
enum erts_break_op{
erts_break_nop = 0, /* Must be false */
erts_break_set = !0, /* Must be true */
@@ -191,7 +94,17 @@ enum erts_break_op{
erts_break_stop
};
+typedef Uint32 ErtsBpIndex;
+typedef struct {
+ BeamInstr* pc;
+ Module* mod;
+} BpFunction;
+
+typedef struct {
+ Uint matched; /* Number matched */
+ BpFunction* matching; /* Matching functions */
+} BpFunctions;
/*
** Function interface exported from beam_bp.c
@@ -199,49 +112,66 @@ enum erts_break_op{
void erts_bp_init(void);
-int erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid);
-int erts_clear_trace_break(Eterm mfa[3], int specified);
-int erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
+void erts_prepare_bp_staging(void);
+void erts_commit_staged_bp(void);
+
+ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
+ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
+
+void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_free_matched_functions(BpFunctions* f);
+
+void erts_install_breakpoints(BpFunctions* f);
+void erts_uninstall_breakpoints(BpFunctions* f);
+void erts_consolidate_bp_data(BpFunctions* f, int local);
+void erts_consolidate_bif_bp_data(void);
+
+void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
+void erts_clear_trace_break(BpFunctions *f);
+
+void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+
+void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
Eterm tracer_pid);
-int erts_clear_mtrace_break(Eterm mfa[3], int specified);
+void erts_clear_mtrace_break(BpFunctions *f);
void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
Eterm tracer_pid);
void erts_clear_mtrace_bif(BeamInstr *pc);
-int erts_set_debug_break(Eterm mfa[3], int specified);
-int erts_clear_debug_break(Eterm mfa[3], int specified);
-int erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op);
-int erts_clear_count_break(Eterm mfa[3], int specified);
+void erts_set_debug_break(BpFunctions *f);
+void erts_clear_debug_break(BpFunctions *f);
+void erts_set_count_break(BpFunctions *f, enum erts_break_op);
+void erts_clear_count_break(BpFunctions *f);
-int erts_clear_break(Eterm mfa[3], int specified);
+
+void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
-int erts_clear_function_break(Module *modp, BeamInstr *pc);
+void erts_clear_export_break(Module *modp, BeamInstr* pc);
+BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid);
-Uint32 erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args,
- int local, Eterm *tracer_pid);
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret);
+int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_rte);
int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_ret);
int erts_is_native_break(BeamInstr *pc);
-int erts_is_count_break(BeamInstr *pc, Sint *count_ret);
+int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
-void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type);
+void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt);
+void erts_trace_time_return(Process* c_p, BeamInstr* pc);
void erts_schedule_time_break(Process *p, Uint out);
-int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op);
-int erts_clear_time_break(Eterm mfa[3], int specified);
+void erts_set_time_break(BpFunctions *f, enum erts_break_op);
+void erts_clear_time_break(BpFunctions *f);
int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
void erts_clear_time_trace_bif(BeamInstr *pc);
-BpData *erts_get_time_break(Process *p, BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
@@ -258,6 +188,19 @@ ERTS_GLB_INLINE Uint erts_bp_sched2ix(void)
return 0;
#endif
}
+
+extern erts_smp_atomic32_t erts_active_bp_index;
+extern erts_smp_atomic32_t erts_staging_bp_index;
+
+ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&erts_active_bp_index);
+}
+
+ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&erts_staging_bp_index);
+}
#endif
#endif /* _BEAM_BP_H */
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index e69cbc3048..a609ed8c71 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -84,6 +84,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
int i;
int specified = 0;
Eterm res;
+ BpFunctions f;
if (bool != am_true && bool != am_false)
goto error;
@@ -121,11 +122,19 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
+ erts_bp_match_functions(&f, mfa, specified);
if (bool == am_true) {
- res = make_small(erts_set_debug_break(mfa, specified));
+ erts_set_debug_break(&f);
+ erts_install_breakpoints(&f);
+ erts_commit_staged_bp();
} else {
- res = make_small(erts_clear_debug_break(mfa, specified));
+ erts_clear_debug_break(&f);
+ erts_commit_staged_bp();
+ erts_uninstall_breakpoints(&f);
}
+ erts_consolidate_bp_data(&f, 1);
+ res = make_small(f.matched);
+ erts_bp_free_matched_functions(&f);
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 15a937289d..efce070061 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -217,7 +217,6 @@ BeamInstr beam_continue_exit[1];
BeamInstr* em_call_error_handler;
BeamInstr* em_apply_bif;
-BeamInstr* em_call_traced_function;
/* NOTE These should be the only variables containing trace instructions.
@@ -4571,64 +4570,6 @@ void process_main(void)
* Trace and debugging support.
*/
- /*
- * At this point, I points to the code[3] in the export entry for
- * a trace-enabled function.
- *
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&call_traced_function
- * code[4]: Address of function.
- */
- OpCase(call_traced_function): {
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- unsigned offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
- Export* ep = (Export *) (((char *)I)-offset);
- Uint32 flags;
-
- SWAPOUT;
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
- 0, &c_p->tracer_proc);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- SWAPIN;
-
- if (flags & MATCH_SET_RX_TRACE) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 3 < HTOP) {
- /* SWAPOUT, SWAPIN was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((BeamInstr)(ep->code)));
- ASSERT(is_internal_pid(c_p->tracer_proc) ||
- is_internal_port(c_p->tracer_proc));
- E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */
- E[1] = am_true; /* Process tracer */
- E[0] = make_cp(ep->code);
- c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- SET_I((BeamInstr *)Arg(0));
- Dispatch();
- }
-
OpCase(return_trace): {
BeamInstr* code = (BeamInstr *) (UWord) E[0];
@@ -4643,80 +4584,22 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_count_breakpoint): {
+ OpCase(i_generic_breakpoint): {
BeamInstr real_I;
-
- ErtsCountBreak(c_p, (BeamInstr *) I, &real_I);
- ASSERT(VALID_INSTR(real_I));
- Goto(real_I);
- }
-
- /* need to send mfa instead of bdt pointer
- * the pointer might be deallocated.
- */
-
- OpCase(i_time_breakpoint): {
- BeamInstr real_I;
- BpData **bds = (BpData **) (I)[-4];
- BpDataTime *bdt = NULL;
- Uint ix = 0;
-#ifdef ERTS_SMP
- ix = c_p->scheduler_data->no - 1;
-#else
- ix = 0;
-#endif
- bdt = (BpDataTime *)bds[ix];
-
- ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- ASSERT(bdt);
- bdt = (BpDataTime *) bdt->next;
- ASSERT(bdt);
- bds[ix] = (BpData *) bdt;
- real_I = bdt->orig_instr;
+ ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ SWAPOUT;
+ reg[0] = r(0);
+ real_I = erts_generic_breakpoint(c_p, I, reg);
+ r(0) = reg[0];
+ SWAPIN;
ASSERT(VALID_INSTR(real_I));
-
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) {
- if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) ||
- (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) ||
- (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) {
- /* This _IS_ a tail recursive call */
- SWAPOUT;
- erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL);
- SWAPIN;
- } else {
- SWAPOUT;
- erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL);
-
- /* r register needs to be copied to the array
- * for the garbage collector
- */
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 2 < HTOP) {
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- }
- SWAPIN;
-
- ASSERT(c_p->htop <= E && E <= c_p->hend);
-
- E -= 2;
- E[0] = make_cp(I);
- E[1] = make_cp(c_p->cp); /* original return address */
- c_p->cp = beam_return_time_trace;
- }
- }
-
Goto(real_I);
}
OpCase(i_return_time_trace): {
BeamInstr *pc = (BeamInstr *) (UWord) E[0];
SWAPOUT;
- erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN);
+ erts_trace_time_return(c_p, pc);
SWAPIN;
c_p->cp = NULL;
SET_I((BeamInstr *) cp_val(E[1]));
@@ -4724,114 +4607,6 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_trace_breakpoint):
- if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- BeamInstr real_I;
-
- ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I);
- Goto(real_I);
- }
- /* Fall through to next case */
- OpCase(i_mtrace_breakpoint): {
- BeamInstr real_I;
- Uint32 flags;
- Eterm tracer_pid;
- Uint* cpp;
- int return_to_trace = 0, need = 0;
- flags = 0;
- SWAPOUT;
- reg[0] = r(0);
-
- if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
- cpp = &E[2];
- } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
- return_to_trace = !0;
- cpp = &E[0];
- } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
- return_to_trace = !0;
- cpp = &E[0];
- } else {
- cpp = NULL;
- }
- if (cpp) {
- /* This _IS_ a tail recursive call, if there are
- * return_trace and/or i_return_to_trace stackframes
- * on the stack, they are not intermixed with y registers
- */
- BeamInstr *cp_save = c_p->cp;
- for (;;) {
- ASSERT(is_CP(*cpp));
- if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
- cpp += 3;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
- return_to_trace = !0;
- cpp += 1;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) {
- cpp += 2;
- } else
- break;
- }
- c_p->cp = (BeamInstr *) cp_val(*cpp);
- ASSERT(is_CP(*cpp));
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN; /* Needed by shared heap. */
- c_p->cp = cp_save;
- } else {
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN; /* Needed by shared heap. */
- }
-
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
-
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
- need += 1;
- }
- if (flags & MATCH_SET_RX_TRACE) {
- need += 3;
- }
- if (need) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - need < HTOP) {
- /* SWAPOUT was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
- E -= 1;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- E[0] = make_cp(c_p->cp);
- c_p->cp = (BeamInstr *) beam_return_to_trace;
- }
- if (flags & MATCH_SET_RX_TRACE) {
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (UWord) (I - 3)));
- ASSERT(am_true == tracer_pid ||
- is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
- E[2] = make_cp(c_p->cp);
- E[1] = tracer_pid;
- E[0] = make_cp(I - 3); /* We ARE at the beginning of an
- instruction,
- the funcinfo is above i. */
- c_p->cp =
- (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- Goto(real_I);
- }
-
OpCase(i_return_to_trace): {
if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
Uint *cpp = (Uint*) E;
@@ -5181,7 +4956,6 @@ void process_main(void)
#endif /* NO_JUMP_TABLE */
em_call_error_handler = OpCode(call_error_handler);
- em_call_traced_function = OpCode(call_traced_function);
em_apply_bif = OpCode(apply_bif);
beam_apply[0] = (BeamInstr) OpCode(i_apply);
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index f1506a8684..1048f258a5 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -49,7 +49,6 @@ extern void** beam_ops;
extern BeamInstr beam_debug_apply[];
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_apply_bif;
-extern BeamInstr* em_call_traced_function;
/*
* The following variables keep a sorted list of address ranges for
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index ae4cca1e58..8025058ee0 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -44,6 +44,10 @@ struct code_ix_queue_item {
static struct code_ix_queue_item* the_code_ix_queue = NULL;
static erts_smp_mtx_t the_code_ix_queue_lock;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+static erts_tsd_key_t has_code_write_permission;
+#endif
+
void erts_code_ix_init(void)
{
/* We start emulator by initializing preloaded modules
@@ -53,6 +57,9 @@ void erts_code_ix_init(void)
erts_smp_atomic32_init_nob(&the_active_code_index, 0);
erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
erts_smp_mtx_init(&the_code_ix_queue_lock, "code_ix_queue");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_key_create(&has_code_write_permission);
+#endif
CIX_TRACE("init");
}
@@ -112,6 +119,9 @@ int erts_try_seize_code_write_permission(Process* c_p)
success = !the_code_ix_lock;
if (success) {
the_code_ix_lock = 1;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_set(has_code_write_permission, (void *) 1);
+#endif
}
else { /* Already locked */
struct code_ix_queue_item* qitem;
@@ -128,6 +138,7 @@ int erts_try_seize_code_write_permission(Process* c_p)
void erts_release_code_write_permission(void)
{
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
erts_smp_mtx_lock(&the_code_ix_queue_lock);
while (the_code_ix_queue != NULL) { /* unleash the entire herd */
struct code_ix_queue_item* qitem = the_code_ix_queue;
@@ -141,12 +152,15 @@ void erts_release_code_write_permission(void)
erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
}
the_code_ix_lock = 0;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_set(has_code_write_permission, (void *) 0);
+#endif
erts_smp_mtx_unlock(&the_code_ix_queue_lock);
}
#ifdef ERTS_ENABLE_LOCK_CHECK
int erts_is_code_ix_locked(void)
{
- return the_code_ix_lock;
+ return the_code_ix_lock && erts_tsd_get(has_code_write_permission);
}
#endif
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 7f1b02b9b4..e88fb8c9f4 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -42,12 +42,24 @@
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
+
+/*
+ * The following variables are protected by code write permission.
+ */
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
static Eterm erts_default_meta_tracer_pid;
+static struct { /* Protected by code write permission */
+ int current;
+ int install;
+ int local;
+ BpFunctions f; /* Local functions */
+ BpFunctions e; /* Export entries */
+} finish_bp;
+
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
static BIF_RETTYPE
@@ -60,12 +72,11 @@ static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
-static int setup_func_trace(Export* ep, void* match_prog, ErtsCodeIndex);
-static int reset_func_trace(Export* ep, ErtsCodeIndex);
-static void reset_bif_trace(int bif_index);
-static void setup_bif_trace(int bif_index);
-static void set_trace_bif(int bif_index, void* match_prog);
-static void clear_trace_bif(int bif_index);
+static void reset_bif_trace(void);
+static void setup_bif_trace(void);
+static void install_exp_breakpoints(BpFunctions* f);
+static void uninstall_exp_breakpoints(BpFunctions* f);
+static void clean_export_entries(BpFunctions* f);
void
erts_bif_trace_init(void)
@@ -107,12 +118,12 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
int is_global;
Process *meta_tracer_proc = p;
Eterm meta_tracer_pid = p->id;
+ int is_blocking = 0;
if (!erts_try_seize_code_write_permission(p)) {
ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ finish_bp.current = -1;
UseTmpHeap(3,p);
/*
@@ -328,16 +339,24 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
meta_tracer_proc->trace_flags |= F_TRACER;
}
- matches = erts_set_trace_pattern(mfa, specified,
+ matches = erts_set_trace_pattern(p, mfa, specified,
match_prog_set, match_prog_set,
- on, flags, meta_tracer_pid);
+ on, flags, meta_tracer_pid, 0);
}
error:
MatchSetUnref(match_prog_set);
UnUseTmpHeap(3,p);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
+
+#ifdef ERTS_SMP
+ if (finish_bp.current >= 0) {
+ ASSERT(matches >= 0);
+ erts_notify_finish_breakpointing(p);
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(p, make_small(matches));
+ }
+#endif
+
erts_release_code_write_permission();
if (matches >= 0) {
@@ -355,6 +374,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid)
{
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked() ||
+ erts_smp_thr_progress_is_blocking());
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -369,6 +390,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
int erts_is_default_trace_enabled(void)
{
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked() ||
+ erts_smp_thr_progress_is_blocking());
return erts_default_trace_pattern_is_on;
}
@@ -842,6 +865,11 @@ Eterm trace_info_2(BIF_ALIST_2)
Eterm What = BIF_ARG_1;
Eterm Key = BIF_ARG_2;
Eterm res;
+
+ if (!erts_try_seize_code_write_permission(p)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, What, Key);
+ }
+
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
} else if (is_atom(What) || is_pid(What)) {
@@ -849,8 +877,10 @@ Eterm trace_info_2(BIF_ALIST_2)
} else if (is_tuple(What)) {
res = trace_info_func(p, What, Key);
} else {
+ erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
}
+ erts_release_code_write_permission();
BIF_RET(res);
}
@@ -978,64 +1008,54 @@ static int function_is_traced(Process *p,
Binary **ms, /* out */
Binary **ms_meta, /* out */
Eterm *tracer_pid_meta, /* out */
- Sint *count, /* out */
+ Uint *count, /* out */
Eterm *call_time) /* out */
{
Export e;
Export* ep;
- int i;
- BeamInstr *code;
+ BeamInstr* pc;
/* First look for an export entry */
e.code[0] = mfa[0];
e.code[1] = mfa[1];
e.code[2] = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- if (ep->addressv[erts_active_code_ix()] == ep->code+3 &&
- ep->code[3] != (BeamInstr) em_call_error_handler) {
- if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- *ms = ep->match_prog_set;
+ pc = ep->code+3;
+ if (ep->addressv[erts_active_code_ix()] == pc &&
+ *pc != (BeamInstr) em_call_error_handler) {
+
+ int r = 0;
+
+ ASSERT(*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+
+ if (erts_is_trace_break(pc, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
- for (i = 0; i < BIF_SIZE; ++i) {
- if (bif_export[i] == ep) {
- int r = 0;
-
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- *ms = ep->match_prog_set;
- return FUNC_TRACE_GLOBAL_TRACE;
- } else {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- r |= FUNC_TRACE_LOCAL_TRACE;
- *ms = ep->match_prog_set;
- }
- if (erts_is_mtrace_break(ep->code+3, ms_meta,
- tracer_pid_meta)) {
- r |= FUNC_TRACE_META_TRACE;
- }
- if (erts_is_time_break(p, ep->code+3, call_time)) {
- r |= FUNC_TRACE_TIME_TRACE;
- }
- }
- return r ? r : FUNC_TRACE_UNTRACED;
- }
- }
- erl_exit(1,"Impossible ghost bif encountered in trace_info.");
+
+ if (erts_is_trace_break(pc, ms, 1)) {
+ r |= FUNC_TRACE_LOCAL_TRACE;
+ }
+ if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ r |= FUNC_TRACE_META_TRACE;
}
+ if (erts_is_time_break(p, pc, call_time)) {
+ r |= FUNC_TRACE_TIME_TRACE;
+ }
+ return r ? r : FUNC_TRACE_UNTRACED;
}
}
/* OK, now look for breakpoint tracing */
- if ((code = erts_find_local_func(mfa)) != NULL) {
+ if ((pc = erts_find_local_func(mfa)) != NULL) {
int r =
- (erts_is_trace_break(code, ms, NULL)
+ (erts_is_trace_break(pc, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(code, count)
+ | (erts_is_count_break(pc, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, code, call_time)
+ | (erts_is_time_break(p, pc, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1050,7 +1070,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm* hp;
DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
Binary *ms = NULL, *ms_meta = NULL;
- Sint count = 0;
+ Uint count = 0;
Eterm traced = am_false;
Eterm match_spec = am_false;
Eterm retval = am_false;
@@ -1138,9 +1158,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
break;
case am_call_count:
if (r & FUNC_TRACE_COUNT_TRACE) {
- retval = count < 0 ?
- erts_make_integer(-count-1, p) :
- erts_make_integer(count, p);
+ retval = erts_make_integer(count, p);
}
break;
case am_call_time:
@@ -1329,39 +1347,46 @@ trace_info_on_load(Process* p, Eterm key)
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
- Eterm meta_tracer_pid)
+ Eterm meta_tracer_pid, int is_blocking)
{
const ErtsCodeIndex code_ix = erts_active_code_ix();
int matches = 0;
int i;
+ int n;
+ BpFunction* fp;
/*
* First work on normal functions (not real BIFs).
*/
-
- for (i = 0; i < export_list_size(code_ix); i++) {
- Export* ep = export_list(i, code_ix);
- int j;
-
- if (ExportIsBuiltIn(ep)) {
- continue;
- }
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- if (on) {
- if (! flags.breakpoint)
- matches += setup_func_trace(ep, match_prog_set, code_ix);
- else
- reset_func_trace(ep, code_ix);
- } else if (! flags.breakpoint) {
- matches += reset_func_trace(ep, code_ix);
+ erts_bp_match_export(&finish_bp.e, mfa, specified);
+ fp = finish_bp.e.matching;
+ n = finish_bp.e.matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code));
+
+ if (!on || flags.breakpoint) {
+ erts_clear_call_trace_bif(pc, 0);
+ if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ }
+ } else {
+ if (ep->addressv[code_ix] != pc) {
+ fp[i].mod->curr.num_traced_exports++;
+#ifdef DEBUG
+ pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+#endif
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ pc[1] = (BeamInstr) ep->addressv[code_ix];
+ }
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
+ if (ep->addressv[code_ix] != pc) {
+ pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
}
}
}
@@ -1386,26 +1411,15 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/* Empty loop body */
}
if (j == specified) {
+ BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
+
if (! flags.breakpoint) { /* Export entry call trace */
if (on) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- ASSERT(ExportIsBuiltIn(bif_export[i]));
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_GLOBAL;
- setup_bif_trace(i);
+ erts_clear_call_trace_bif(pc, 1);
+ erts_clear_mtrace_bif(pc);
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
} else { /* off */
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
- }
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
+ erts_clear_call_trace_bif(pc, 0);
}
matches++;
} else { /* Breakpoint call trace */
@@ -1413,52 +1427,33 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (on) {
if (flags.local) {
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_clear_call_trace_bif(pc, 0);
+ erts_set_call_trace_bif(pc, match_prog_set, 1);
m = 1;
}
if (flags.meta) {
- erts_set_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3,
- meta_match_prog_set, meta_tracer_pid);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_META;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_set_mtrace_bif(pc, meta_match_prog_set,
+ meta_tracer_pid);
m = 1;
}
if (flags.call_time) {
- erts_set_time_trace_bif(bif_export[i]->code + 3, on);
+ erts_set_time_trace_bif(pc, on);
/* I don't want to remove any other tracers */
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME;
m = 1;
}
- if (erts_bif_trace_flags[i]) {
- setup_bif_trace(i);
- }
} else { /* off */
if (flags.local) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- }
+ erts_clear_call_trace_bif(pc, 1);
m = 1;
}
if (flags.meta) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
+ erts_clear_mtrace_bif(pc);
m = 1;
}
if (flags.call_time) {
- erts_clear_time_trace_bif(bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME;
+ erts_clear_time_trace_bif(pc);
m = 1;
}
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
}
matches += m;
}
@@ -1468,186 +1463,242 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/*
** So, now for breakpoint tracing
*/
+ erts_bp_match_functions(&finish_bp.f, mfa, specified);
if (on) {
if (! flags.breakpoint) {
- erts_clear_trace_break(mfa, specified);
- erts_clear_mtrace_break(mfa, specified);
- erts_clear_count_break(mfa, specified);
- erts_clear_time_break(mfa, specified);
+ erts_clear_all_breaks(&finish_bp.f);
} else {
- int m = 0;
if (flags.local) {
- m = erts_set_trace_break(mfa, specified, match_prog_set,
- am_true);
+ erts_set_trace_break(&finish_bp.f, match_prog_set);
}
if (flags.meta) {
- m = erts_set_mtrace_break(mfa, specified, meta_match_prog_set,
- meta_tracer_pid);
+ erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
+ meta_tracer_pid);
}
if (flags.call_count) {
- m = erts_set_count_break(mfa, specified, on);
+ erts_set_count_break(&finish_bp.f, on);
}
if (flags.call_time) {
- m = erts_set_time_break(mfa, specified, on);
+ erts_set_time_break(&finish_bp.f, on);
}
- /* All assignments to 'm' above should give the same value,
- * so just use the last */
- matches += m;
}
} else {
- int m = 0;
if (flags.local) {
- m = erts_clear_trace_break(mfa, specified);
+ erts_clear_trace_break(&finish_bp.f);
}
if (flags.meta) {
- m = erts_clear_mtrace_break(mfa, specified);
+ erts_clear_mtrace_break(&finish_bp.f);
}
if (flags.call_count) {
- m = erts_clear_count_break(mfa, specified);
+ erts_clear_count_break(&finish_bp.f);
}
if (flags.call_time) {
- m = erts_clear_time_break(mfa, specified);
+ erts_clear_time_break(&finish_bp.f);
}
- /* All assignments to 'm' above should give the same value,
- * so just use the last */
- matches += m;
}
+ finish_bp.current = 0;
+ finish_bp.install = on;
+ finish_bp.local = flags.breakpoint;
+
+#ifdef ERTS_SMP
+ if (is_blocking) {
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+#endif
+ while (erts_finish_breakpointing()) {
+ /* Empty loop body */
+ }
+#ifdef ERTS_SMP
+ finish_bp.current = -1;
+ }
+#endif
+
+ if (flags.breakpoint) {
+ matches += finish_bp.f.matched;
+ } else {
+ matches += finish_bp.e.matched;
+ }
return matches;
}
-/*
- * Setup function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
-
-static int
-setup_func_trace(Export* ep, void* match_prog, ErtsCodeIndex code_ix)
+int
+erts_finish_breakpointing(void)
{
- Module* modp;
-
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
- }
- }
-
+ ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked());
+
/*
- * Currently no trace support for native code.
+ * Memory barriers will be issued for all processes *before*
+ * each of the stages below. (Unless the other schedulers
+ * are blocked, in which case memory barriers will be issued
+ * when they are awaken.)
*/
- if (erts_is_native_break(ep->addressv[code_ix])) {
+
+ switch (finish_bp.current++) {
+ case 0:
+ /*
+ * At this point, in all functions that are to be breakpointed,
+ * a pointer to a GenericBp struct has already been added,
+ *
+ * Insert the new breakpoints (if any) into the
+ * code. Different schedulers may see breakpoint instruction
+ * at different times, but it does not matter since the newly
+ * added breakpoints are disabled.
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ erts_install_breakpoints(&finish_bp.f);
+ } else {
+ install_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ setup_bif_trace();
+ return 1;
+ case 1:
+ /*
+ * Switch index for the breakpoint data, activating the staged
+ * data. (Depending on the changes in the breakpoint data,
+ * that could either activate breakpoints or disable
+ * breakpoints.)
+ */
+ erts_commit_staged_bp();
+ return 1;
+ case 2:
+ /*
+ * Remove breakpoints instructions for disabled breakpoints
+ * (if any).
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ } else {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ }
+ } else {
+ if (finish_bp.local) {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ } else {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ reset_bif_trace();
+ return 1;
+ case 3:
+ /*
+ * Now all breakpoints have either been inserted or removed.
+ * For all updated breakpoints, copy the active breakpoint
+ * data to the staged breakpoint data to make them equal
+ * (simplifying for the next time breakpoints are to be
+ * updated). If any breakpoints have been totally disabled,
+ * deallocate the GenericBp structs for them.
+ */
+ erts_consolidate_bif_bp_data();
+ clean_export_entries(&finish_bp.e);
+ erts_consolidate_bp_data(&finish_bp.e, 0);
+ erts_consolidate_bp_data(&finish_bp.f, 1);
+ erts_bp_free_matched_functions(&finish_bp.e);
+ erts_bp_free_matched_functions(&finish_bp.f);
return 0;
+ default:
+ ASSERT(0);
}
+ return 0;
+}
- ep->code[3] = (BeamInstr) em_call_traced_function;
- ep->code[4] = (BeamInstr) ep->addressv[code_ix];
- ep->addressv[code_ix] = ep->code+3;
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
+static void
+install_exp_breakpoints(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
- modp = erts_get_module(ep->code[0], code_ix);
- ASSERT(modp);
- modp->curr.num_traced_exports++;
- return 1;
-}
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
-static void setup_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[bif_index].traced;
+ ep->addressv[code_ix] = pc;
+ }
}
-static void set_trace_bif(int bif_index, void* match_prog) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "set_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
-}
+static void
+uninstall_exp_breakpoints(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
-/*
- * Reset function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
-static int
-reset_func_trace(Export* ep, ErtsCodeIndex code_ix)
-{
- if (ep->addressv[code_ix] == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- Module* modp = erts_get_module(ep->code[0], code_ix);
- ASSERT(modp);
- modp->curr.num_traced_exports--;
-
- ep->addressv[code_ix] = (Uint *) ep->code[4];
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
+ if (ep->addressv[code_ix] != pc) {
+ continue;
}
+ ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
+ ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
}
-
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(ep->addressv[code_ix])) {
- return 0;
- }
-
- /*
- * Nothing to do, but the export entry matches.
- */
+}
+
+static void
+clean_export_entries(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
+
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
- return 1;
+ if (ep->addressv[code_ix] == pc) {
+ continue;
+ }
+ if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
+ ep->code[3] = (BeamInstr) 0;
+ ep->code[4] = (BeamInstr) 0;
+ }
+ }
}
-static void reset_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ASSERT(! ep->match_prog_set);
- ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL));
- ep->code[4] = (BeamInstr) bif_table[bif_index].f;
+static void
+setup_bif_trace(void)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].traced;
+ }
+ }
+ }
}
-static void clear_trace_bif(int bif_index) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "clear_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
+static void
+reset_bif_trace(void)
+{
+ int i;
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ BeamInstr* pc = ep->code+3;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g && g->data[active].flags == 0) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].f;
+ }
+ }
+ }
}
/*
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index da4376fd0a..594c51a5db 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1711,9 +1711,10 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- BpData** bps = (BpData**) code_ptr[1];
- BpData* bp = (BpData*) bps[erts_bp_sched2ix()];
- bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ GenericBp* g = (GenericBp *) code_ptr[1];
+ ASSERT(code_ptr[5+0] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
code_ptr[5+2] = (BeamInstr) lib;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 3d978f4994..e07c9ae2b0 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -530,6 +530,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#endif
#ifdef ERTS_SMP
valid |= ERTS_SSI_AUX_WORK_CODE_IX_ACTIVATION;
+ valid |= ERTS_SSI_AUX_WORK_FINISH_BP;
#endif
#ifdef ERTS_SSI_AUX_WORK_REAP_PORTS
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
@@ -1446,6 +1447,55 @@ handle_code_ix_activation(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
}
#endif /* ERTS_SMP */
+#ifdef ERTS_SMP
+void
+erts_notify_finish_breakpointing(Process* p)
+{
+ ErtsAuxWorkData* awdp = &p->scheduler_data->aux_work_data;
+
+ ASSERT(awdp->bp_ix_activation.stager == NULL);
+ awdp->bp_ix_activation.stager = p;
+ awdp->bp_ix_activation.thr_prgr = erts_thr_progress_later(awdp->esdp);
+ erts_thr_progress_wakeup(awdp->esdp, awdp->bp_ix_activation.thr_prgr);
+ erts_smp_proc_inc_refc(p);
+ set_aux_work_flags_wakeup_relb(p->scheduler_data->ssi,
+ ERTS_SSI_AUX_WORK_FINISH_BP);
+}
+
+static erts_aint32_t
+handle_finish_bp(ErtsAuxWorkData* awdp, erts_aint32_t aux_work)
+{
+ ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+
+ if (!erts_thr_progress_has_reached_this(current,
+ awdp->bp_ix_activation.thr_prgr)) {
+ return aux_work & ~ERTS_SSI_AUX_WORK_FINISH_BP;
+ }
+ if (erts_finish_breakpointing()) { /* Not done */
+ /* Arrange for being called again */
+ awdp->bp_ix_activation.thr_prgr =
+ erts_thr_progress_later(awdp->esdp);
+ erts_thr_progress_wakeup(awdp->esdp, awdp->bp_ix_activation.thr_prgr);
+ } else { /* Done */
+ Process* p;
+
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_FINISH_BP);
+ p = awdp->bp_ix_activation.stager;
+#ifdef DEBUG
+ awdp->bp_ix_activation.stager = NULL;
+#endif
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_dec_refc(p);
+ erts_release_code_write_permission();
+ }
+ return aux_work & ~ERTS_SSI_AUX_WORK_FINISH_BP;
+}
+#endif /* ERTS_SMP */
+
static ERTS_INLINE erts_aint32_t
handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
{
@@ -1817,6 +1867,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS,
handle_reap_ports);
+#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_FINISH_BP,
+ handle_finish_bp);
+#endif
+
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
#ifdef ERTS_SMP
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index e9e5a3365f..93e71681da 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -283,6 +283,7 @@ typedef enum {
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 10)
#define ERTS_SSI_AUX_WORK_CODE_IX_ACTIVATION (((erts_aint32_t) 1) << 11)
#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 12)
+#define ERTS_SSI_AUX_WORK_FINISH_BP (((erts_aint32_t) 1) << 13)
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
@@ -472,6 +473,12 @@ typedef struct {
ErtsThrPrgrVal thr_prgr;
} code_ix_activation;
#endif
+#ifdef ERTS_SMP
+ struct {
+ Process* stager;
+ ErtsThrPrgrVal thr_prgr;
+ } bp_ix_activation;
+#endif
} ErtsAuxWorkData;
struct ErtsSchedulerData_ {
@@ -1188,6 +1195,7 @@ void erts_notify_check_async_ready_queue(void *);
#endif
#ifdef ERTS_SMP
void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later);
+void erts_notify_finish_breakpointing(Process* p);
#endif
void erts_schedule_misc_aux_work(int sched_id,
void (*func)(void *),
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index bc988cd61b..d04a91f18c 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2134,187 +2134,6 @@ void save_calls(Process *p, Export *e)
}
}
-/*
- * Entry point called by the trace wrap functions in erl_bif_wrap.c
- *
- * The trace wrap functions are themselves called through the export
- * entries instead of the original BIF functions.
- */
-Eterm
-erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
-{
- Eterm result;
- int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
-
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-
- if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) {
- /* Warning! This is an Optimization.
- *
- * If neither meta trace is active nor process trace flags then
- * no tracing will occur. Doing the whole else branch will
- * also do nothing, only slower.
- */
- Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f;
- result = func(p, args, I);
- } else {
- Eterm (*func)(Process*, Eterm*, BeamInstr*);
- Export* ep = bif_export[bif_index];
- Uint32 flags = 0, flags_meta = 0;
- int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
- int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
- int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME);
- Eterm meta_tracer_pid = NIL;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
- BeamInstr *cp = p->cp;
-
- /*
- * Make continuation pointer OK, it is not during direct BIF calls,
- * but it is correct during apply of bif.
- */
- if (!applying) {
- p->cp = I;
- }
- if (global || local) {
- flags = erts_call_trace(p, ep->code, ep->match_prog_set, args,
- local, &p->tracer_proc);
- }
- if (meta) {
- flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
- &meta_tracer_pid);
- }
- if (time) {
- BpDataTime *bdt = NULL;
- BeamInstr *pc = (BeamInstr *)ep->code+3;
-
- bdt = (BpDataTime *) erts_get_time_break(p, pc);
- ASSERT(bdt);
-
- if (!bdt->pause) {
- erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL);
- }
- }
- /* Restore original continuation pointer (if changed). */
- p->cp = cp;
-
- func = bif_table[bif_index].f;
-
- result = func(p, args, I);
-
- if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- BeamInstr i_return_trace = beam_return_trace[0];
- BeamInstr i_return_to_trace = beam_return_to_trace[0];
- BeamInstr i_return_time_trace = beam_return_time_trace[0];
- Eterm *cpp;
- /* Maybe advance cp to skip trace stack frames */
- for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
- if (*cp == i_return_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 2; /* Skip return_trace parameters */
- } else if (*cp == i_return_time_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 1; /* Skip return_time_trace parameters */
- } else if (*cp == i_return_to_trace) {
- /* A return_to trace message is going to be generated
- * by normal means, so we do not have to.
- */
- cp = NULL;
- break;
- } else break;
- }
- }
-
- /* Try to get these in the order
- * they usually appear in normal code... */
- if (is_non_value(result)) {
- Uint reason = p->freason;
- if (reason != TRAP) {
- Eterm class;
- Eterm value = p->fvalue;
- DeclareTmpHeapNoproc(nocatch,3);
- UseTmpHeapNoproc(3);
- /* Expand error value like in handle_error() */
- if (reason & EXF_ARGLIST) {
- Eterm *tp;
- ASSERT(is_tuple(value));
- tp = tuple_val(value);
- value = tp[1];
- }
- if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- value = TUPLE2(nocatch, am_nocatch, value);
- reason = EXC_ERROR;
- }
- /* Note: expand_error_value() could theoretically
- * allocate on the heap, but not for any error
- * returned by a BIF, and it would do no harm,
- * just be annoying.
- */
- value = expand_error_value(p, reason, value);
- class = exception_tag[GET_EXC_CLASS(reason)];
-
- if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &meta_tracer_pid);
- }
- if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &p->tracer_proc);
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
- /* can only happen if(local)*/
- Eterm *ptr = p->stop;
- ASSERT(is_CP(*ptr));
- ASSERT(ptr <= STACK_START(p));
- /* Search the nearest stack frame for a catch */
- while (++ptr < STACK_START(p)) {
- if (is_CP(*ptr)) break;
- if (is_catch(*ptr)) {
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into
- * calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- UnUseTmpHeapNoproc(3);
- if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- } else {
- if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer_pid);
- }
- /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
- if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &p->tracer_proc);
- }
- if (flags & MATCH_SET_RETURN_TO_TRACE) {
- /* can only happen if(local)*/
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- return result;
-}
-
/* Sends trace message:
* {trace_ts, Pid, What, Msg, Timestamp}
* or {trace, Pid, What, Msg}
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 229641cb32..6b5121f917 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -137,7 +137,6 @@ export_alloc(struct export_entry* tmpl_e)
obj->code[2] = tmpl->code[2];
obj->code[3] = (BeamInstr) em_call_error_handler;
obj->code[4] = 0;
- obj->match_prog_set = NULL;
for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
obj->addressv[ix] = obj->code+3;
@@ -260,8 +259,9 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
struct export_entry* ee;
ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
- if (ee == NULL || (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
- ee->ep->code[3] != (BeamInstr) em_call_traced_function)) {
+ if (ee == NULL ||
+ (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
+ ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
return NULL;
}
return ee->ep;
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index ec9fcb26f2..ee06e69aff 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -37,7 +37,6 @@
typedef struct export
{
void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
- struct binary* match_prog_set; /* Match program for tracing. */
BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
/*
@@ -46,12 +45,12 @@ typedef struct export
* code[2]: Arity (untagged integer).
* code[3]: This entry is 0 unless the 'address' field points to it.
* Threaded code instruction to load function
- * (em_call_error_handler), execute BIF (em_apply_bif,
- * em_apply_apply), or call a traced function
- * (em_call_traced_function).
- * code[4]: Function pointer to BIF function (for BIFs only)
+ * (em_call_error_handler), execute BIF (em_apply_bif),
+ * or a breakpoint instruction (op_i_generic_breakpoint).
+ * code[4]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
- * on_load function that has not been run yet.
+ * on_load function that has not been run yet, or pointer
+ * to code for function code[3] is a breakpont instruction.
* Otherwise: 0.
*/
BeamInstr code[5];
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 9e387c550f..c9be20322d 100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -1682,10 +1682,10 @@ struct trace_pattern_flags {
};
extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Eterm* mfa, int specified,
+int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid);
+ Eterm meta_tracer_pid, int is_blocking);
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
Binary **match_spec,
@@ -1694,6 +1694,7 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
Eterm *meta_tracer_pid);
int erts_is_default_trace_enabled(void);
void erts_bif_trace_init(void);
+int erts_finish_breakpointing(void);
/*
** Call_trace uses this API for the parameter matching functions
@@ -1739,14 +1740,6 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
breakpoint functions */
#define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */
#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
-/*
- * Flag values when tracing bif
- * Future note: flag field is 8 bits
- */
-#define BIF_TRACE_AS_LOCAL (0x1)
-#define BIF_TRACE_AS_GLOBAL (0x2)
-#define BIF_TRACE_AS_META (0x4)
-#define BIF_TRACE_AS_CALL_TIME (0x8)
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index c58b36231c..6764e88c81 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -62,11 +62,8 @@ label L
i_func_info I a a I
int_code_end
-i_trace_breakpoint
-i_mtrace_breakpoint
+i_generic_breakpoint
i_debug_breakpoint
-i_count_breakpoint
-i_time_breakpoint
i_return_time_trace
i_return_to_trace
i_yield
@@ -522,7 +519,6 @@ apply_bif
call_nif
call_error_handler
error_action_code
-call_traced_function
return_trace
#
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index a642c3a63a..eaecd32f95 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -78,7 +78,13 @@ init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
end_per_testcase(_Func, Config) ->
Dog = ?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
+ ?t:timetrap_cancel(Dog),
+
+ %% Reloading the module will clear all trace patterns, and
+ %% in a debug-compiled emulator run assertions of the counters
+ %% for the number of traced exported functions in this module.
+
+ c:l(?MODULE).
hipe(Config) when is_list(Config) ->
?line 0 = erlang:trace_pattern({?MODULE,worker_foo,1}, true),
@@ -187,7 +193,12 @@ basic() ->
%% Trace some functions...
?line trace_func({lists,'_','_'}, []),
+
+ %% Make sure that tracing the same functions more than once
+ %% does not cause any problems.
?line 3 = trace_func({?MODULE,foo,'_'}, true),
+ ?line 3 = trace_func({?MODULE,foo,'_'}, true),
+ ?line 1 = trace_func({?MODULE,bar,0}, true),
?line 1 = trace_func({?MODULE,bar,0}, true),
?line {traced,global} = trace_info({?MODULE,bar,0}, traced),
?line 1 = trace_func({erlang,list_to_integer,1}, true),
diff --git a/erts/emulator/test/trace_local_SUITE.erl b/erts/emulator/test/trace_local_SUITE.erl
index 32e2a98e3c..b238d5c630 100644
--- a/erts/emulator/test/trace_local_SUITE.erl
+++ b/erts/emulator/test/trace_local_SUITE.erl
@@ -80,6 +80,7 @@ config(priv_dir,_) ->
exception_meta_nocatch/1, exception_meta_nocatch_apply/1,
exception_meta_nocatch_function/1,
exception_meta_nocatch_apply_function/1,
+ concurrency/1,
init_per_testcase/2, end_per_testcase/2]).
init_per_testcase(_Case, Config) ->
?line Dog=test_server:timetrap(test_server:minutes(2)),
@@ -89,7 +90,15 @@ end_per_testcase(_Case, Config) ->
shutdown(),
Dog=?config(watchdog, Config),
test_server:timetrap_cancel(Dog),
- ok.
+
+ %% Reloading the module will clear all trace patterns, and
+ %% in a debug-compiled emulator run assertions of the counters
+ %% for the number of functions with breakpoints.
+
+ c:l(?MODULE).
+
+
+
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
@@ -106,7 +115,8 @@ all() ->
exception_meta_apply_function, exception_meta_nocatch,
exception_meta_nocatch_apply,
exception_meta_nocatch_function,
- exception_meta_nocatch_apply_function]
+ exception_meta_nocatch_apply_function,
+ concurrency]
end.
groups() ->
@@ -350,7 +360,8 @@ same(A, B) ->
basic_test() ->
?line setup([call]),
- ?line erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
+ NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
+ NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
?line erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
?line [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
?line ?CT(?MODULE,exported_wrap,[1]),
@@ -813,6 +824,42 @@ clean_location({crash,{Reason,Stk0}}) ->
{crash,{Reason,Stk}};
clean_location(Term) -> Term.
+concurrency(_Config) ->
+ N = erlang:system_info(schedulers),
+
+ %% Spawn 2*N processes that spin in a tight infinite loop,
+ %% and one process that will turn on and off local call
+ %% trace on the infinite_loop/0 function. We expect the
+ %% emulator to crash if there is a memory barrier bug or
+ %% if an aligned word-sized write is not atomic.
+
+ Ps0 = [spawn_monitor(fun() -> infinite_loop() end) ||
+ _ <- lists:seq(1, 2*N)],
+ OnAndOff = fun() -> concurrency_on_and_off() end,
+ Ps1 = [spawn_monitor(OnAndOff)|Ps0],
+ ?t:sleep(1000),
+
+ %% Now spawn off N more processes that turn on off and off
+ %% a local trace pattern.
+ Ps = [spawn_monitor(OnAndOff) || _ <- lists:seq(1, N)] ++ Ps1,
+ ?t:sleep(1000),
+
+ %% Clean up.
+ [exit(Pid, kill) || {Pid,_} <- Ps],
+ [receive
+ {'DOWN',Ref,process,Pid,killed} -> ok
+ end || {Pid,Ref} <- Ps],
+ erlang:trace_pattern({?MODULE,infinite_loop,0}, false, [local]),
+ ok.
+
+concurrency_on_and_off() ->
+ 1 = erlang:trace_pattern({?MODULE,infinite_loop,0}, true, [local]),
+ 1 = erlang:trace_pattern({?MODULE,infinite_loop,0}, false, [local]),
+ concurrency_on_and_off().
+
+infinite_loop() ->
+ infinite_loop().
+
%%% Tracee target functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index 91efb4c023..a841f26d6a 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -167,7 +167,6 @@ typedef struct bif_entry {
extern BifEntry bif_table[];
extern Export* bif_export[];
-extern unsigned char erts_bif_trace_flags[];
#define BIF_SIZE $bif_size
@@ -197,7 +196,6 @@ includes("export.h", "sys.h", "erl_vm.h", "erl_process.h", "bif.h",
"erl_bif_table.h", "erl_atom_table.h");
print "\nExport* bif_export[BIF_SIZE];\n";
-print "unsigned char erts_bif_trace_flags[BIF_SIZE];\n\n";
print "BifEntry bif_table[] = {\n";
for ($i = 0; $i < @bif; $i++) {