diff options
Diffstat (limited to 'erts/emulator/beam')
48 files changed, 1794 insertions, 5100 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 78c566ed38..106fad030b 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -244,7 +244,6 @@ atom gather_sched_wall_time_result atom getting_linked atom getting_unlinked atom global -atom global_heaps_size atom Gt='>' atom grun atom group_leader @@ -259,7 +258,6 @@ atom hide atom high atom hipe_architecture atom http httph https http_response http_request http_header http_eoh http_error http_bin httph_bin -atom hybrid atom id atom if_clause atom imports diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 768c38dae1..94f8edf165 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -361,7 +361,7 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, * without any memory barriers at all. */ - later = erts_thr_progress_later(); + later = erts_thr_progress_later(c_p->scheduler_data); erts_thr_progress_wakeup(c_p->scheduler_data, later); erts_notify_code_ix_activation(c_p, later); erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); @@ -672,11 +672,11 @@ set_default_trace_pattern(Eterm module) if (trace_pattern_is_on) { Eterm mfa[1]; mfa[0] = module; - (void) erts_set_trace_pattern(mfa, 1, + (void) erts_set_trace_pattern(0, mfa, 1, match_spec, meta_match_spec, 1, trace_pattern_flags, - meta_tracer_pid); + meta_tracer_pid, 1); } } @@ -688,10 +688,8 @@ check_process_code(Process* rp, Module* modp) Uint mod_size; BeamInstr* end; Eterm* sp; -#ifndef HYBRID /* FIND ME! */ struct erl_off_heap_header* oh; int done_gc = 0; -#endif #define INSIDE(a) (start <= (a) && (a) < end) @@ -750,7 +748,6 @@ check_process_code(Process* rp, Module* modp) * See if there are funs that refer to the old version of the module. */ -#ifndef HYBRID /* FIND ME! */ rescan: for (oh = MSO(rp).first; oh; oh = oh->next) { if (thing_subtag(oh->thing_word) == FUN_SUBTAG) { @@ -776,7 +773,6 @@ check_process_code(Process* rp, Module* modp) } } } -#endif /* * See if there are constants inside the module referenced by the process. @@ -1010,12 +1006,11 @@ delete_code(Module* modp) if (ep->code[3] == (BeamInstr) em_apply_bif) { continue; } - else if (ep->code[3] == (BeamInstr) em_call_traced_function) { + else if (ep->code[3] == + (BeamInstr) BeamOp(op_i_generic_breakpoint)) { ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); ASSERT(modp->curr.num_traced_exports > 0); - --modp->curr.num_traced_exports; - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = NULL; + erts_clear_export_break(modp, ep->code+3); } else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler || !erts_initialized); @@ -1023,7 +1018,6 @@ delete_code(Module* modp) ep->addressv[code_ix] = ep->code+3; ep->code[3] = (BeamInstr) em_call_error_handler; ep->code[4] = 0; - ASSERT(ep->match_prog_set == NULL); } } diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 26dadfbbc0..50d18b0347 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -44,67 +44,34 @@ #define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ)) #define Free(P) erts_free(ERTS_ALC_T_BPD, (P)) -/* -** Doubly linked ring macros -*/ - -#define BpInit(a,i) \ -do { \ - (a)->orig_instr = (i); \ - (a)->next = (a); \ - (a)->prev = (a); \ -} while (0) - -#define BpSpliceNext(a,b) \ -do { \ - register BpData *c = (a), *d = (b), *e; \ - e = c->next->prev; \ - c->next->prev = d->next->prev; \ - d->next->prev = e; \ - e = c->next; \ - c->next = d->next; \ - d->next = e; \ -} while (0) - -#define BpSplicePrev(a,b) \ -do { \ - register BpData *c = (a), *d = (b), *e; \ - e = c->prev->next; \ - c->prev->next = d->prev->next; \ - d->prev->next = e; \ - e = c->prev; \ - c->prev = d->prev; \ - d->prev = e; \ -} while (0) - -#ifdef DEBUG -# define BpSingleton(a) ((a)->next == (a) && (a)->prev == (a)) +#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ + if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN) +# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ + if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) #else -# define BpSingleton(a) ((a)->next == (a)) +# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) +# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) #endif -#define BpInitAndSpliceNext(a,i,b) \ -do { \ - (a)->orig_instr = (i); \ - (a)->prev = (b); \ - (b)->next->prev = (a); \ - (a)->next = (b)->next; \ - (b)->next = (a); \ -} while (0) +#define ERTS_BPF_LOCAL_TRACE 0x01 +#define ERTS_BPF_META_TRACE 0x02 +#define ERTS_BPF_COUNT 0x04 +#define ERTS_BPF_COUNT_ACTIVE 0x08 +#define ERTS_BPF_DEBUG 0x10 +#define ERTS_BPF_TIME_TRACE 0x20 +#define ERTS_BPF_TIME_TRACE_ACTIVE 0x40 +#define ERTS_BPF_GLOBAL_TRACE 0x80 -#define BpInitAndSplicePrev(a,i,b) \ -do { \ - (a)->orig_instr = (i); \ - (a)->next = (b); \ - (b)->prev->next = (a); \ - (a)->prev = (b)->prev; \ - (b)->prev = (a); \ -} while (0) +#define ERTS_BPF_ALL 0xFF +extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */ +extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */ +extern Eterm beam_exception_trace[1]; /* OpCode(i_exception_trace) */ +extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ -#define BREAK_IS_BIF (1) -#define BREAK_IS_ERL (0) - +erts_smp_atomic32_t erts_active_bp_index; +erts_smp_atomic32_t erts_staging_bp_index; /* ************************************************************************* ** Local prototypes @@ -113,26 +80,30 @@ do { \ /* ** Helpers */ - -static int set_break(Eterm mfa[3], int specified, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid); -static int set_module_break(Module *modp, Eterm mfa[3], int specified, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid); -static int set_function_break(Module *modp, BeamInstr *pc, int bif, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid); - -static int clear_break(Eterm mfa[3], int specified, - BeamInstr break_op); -static int clear_module_break(Module *modp, Eterm mfa[3], int specified, - BeamInstr break_op); -static int clear_function_break(Module *modp, BeamInstr *pc, int bif, - BeamInstr break_op); - -static BpData *is_break(BeamInstr *pc, BeamInstr break_op); -static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op); +static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg, + int local, Binary* ms, Eterm tracer_pid); +static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags, + enum erts_break_op count_op, Eterm tracer_pid); +static void set_function_break(BeamInstr *pc, + Binary *match_spec, + Uint break_flags, + enum erts_break_op count_op, + Eterm tracer_pid); + +static void clear_break(BpFunctions* f, Uint break_flags); +static int clear_function_break(BeamInstr *pc, Uint break_flags); + +static BpDataTime* get_time_break(BeamInstr *pc); +static GenericBpData* check_break(BeamInstr *pc, Uint break_flags); +static void bp_time_diff(bp_data_time_item_t *item, + process_breakpoint_time_t *pbt, + Uint ms, Uint s, Uint us); + +static void bp_meta_unref(BpMetaPid* bmp); +static void bp_count_unref(BpCount* bcp); +static void bp_time_unref(BpDataTime* bdt); +static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local); +static void uninstall_breakpoint(BeamInstr* pc); /* bp_hash */ #define BP_TIME_ADD(pi0, pi1) \ @@ -152,240 +123,996 @@ static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_da static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem); static void bp_hash_delete(bp_time_hash_t *hash); - /* ************************************************************************* ** External interfaces */ -erts_smp_spinlock_t erts_bp_lock; - void erts_bp_init(void) { - erts_smp_spinlock_init(&erts_bp_lock, "breakpoints"); + erts_smp_atomic32_init_nob(&erts_active_bp_index, 0); + erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1); } -int -erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec, - Eterm tracer_pid) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, match_spec, - (BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid); + +void +erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified) +{ + ErtsCodeIndex code_ix = erts_active_code_ix(); + Uint max_funcs = 0; + int current; + int max_modules = module_code_size(code_ix); + int num_modules = 0; + Module* modp; + Module** module; + Uint i; + + module = (Module **) Alloc(max_modules*sizeof(Module *)); + num_modules = 0; + for (current = 0; current < max_modules; current++) { + modp = module_code(current, code_ix); + if (modp->curr.code) { + max_funcs += modp->curr.code[MI_NUM_FUNCTIONS]; + module[num_modules++] = modp; + } + } + + f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction)); + i = 0; + for (current = 0; current < num_modules; current++) { + BeamInstr** code_base = (BeamInstr **) module[current]->curr.code; + BeamInstr* code; + Uint num_functions = (Uint) code_base[MI_NUM_FUNCTIONS]; + Uint fi; + + if (specified > 0) { + if (mfa[0] != make_atom(module[current]->module)) { + /* Wrong module name */ + continue; + } + } + + for (fi = 0; fi < num_functions; fi++) { + Eterm* pc; + int wi; + + code = code_base[MI_FUNCTIONS+fi]; + ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); + pc = code+5; + if (erts_is_native_break(pc)) { + continue; + } + if (is_nil(code[3])) { /* Ignore BIF stub */ + continue; + } + for (wi = 0; + wi < specified && (Eterm) code[2+wi] == mfa[wi]; + wi++) { + /* Empty loop body */ + } + if (wi == specified) { + /* Store match */ + f->matching[i].pc = pc; + f->matching[i].mod = module[current]; + i++; + } + } + } + f->matched = i; + Free(module); } -int -erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec, - Eterm tracer_pid) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, match_spec, - (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid); +void +erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified) +{ + ErtsCodeIndex code_ix = erts_active_code_ix(); + int i; + int num_exps = export_list_size(code_ix); + int ne; + + f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction)); + ne = 0; + for (i = 0; i < num_exps; i++) { + Export* ep = export_list(i, code_ix); + BeamInstr* pc; + int j; + + for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) { + /* Empty loop body */ + } + if (j < specified) { + continue; + } + pc = ep->code+3; + if (ep->addressv[code_ix] == pc) { + if ((*pc == (BeamInstr) em_apply_bif || + *pc == (BeamInstr) em_call_error_handler)) { + continue; + } + ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)); + } else if (erts_is_native_break(ep->addressv[code_ix])) { + continue; + } + + f->matching[ne].pc = pc; + f->matching[ne].mod = erts_get_module(ep->code[0], code_ix); + ne++; + + } + f->matched = ne; } -/* set breakpoint data for on exported bif entry */ +void +erts_bp_free_matched_functions(BpFunctions* f) +{ + Free(f->matching); +} void -erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid); +erts_consolidate_bp_data(BpFunctions* f, int local) +{ + BpFunction* fs = f->matching; + Uint i; + Uint n = f->matched; + + ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked()); + + for (i = 0; i < n; i++) { + consolidate_bp_data(fs[i].mod, fs[i].pc, local); + } } -void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) { - set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL); +void +erts_consolidate_bif_bp_data(void) +{ + int i; + + ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked()); + for (i = 0; i < BIF_SIZE; i++) { + Export *ep = bif_export[i]; + consolidate_bp_data(0, ep->code+3, 0); + } } -void erts_clear_time_trace_bif(BeamInstr *pc) { - clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint)); +static void +consolidate_bp_data(Module* modp, BeamInstr* pc, int local) +{ + GenericBp* g = (GenericBp *) pc[-4]; + GenericBpData* src; + GenericBpData* dst; + Uint flags; + + if (g == 0) { + return; + } + + src = &g->data[erts_active_bp_ix()]; + dst = &g->data[erts_staging_bp_ix()]; + + /* + * The contents of the staging area may be out of date. + * Decrement all reference pointers. + */ + + flags = dst->flags; + if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + MatchSetUnref(dst->local_ms); + } + if (flags & ERTS_BPF_META_TRACE) { + bp_meta_unref(dst->meta_pid); + MatchSetUnref(dst->meta_ms); + } + if (flags & ERTS_BPF_COUNT) { + bp_count_unref(dst->count); + } + if (flags & ERTS_BPF_TIME_TRACE) { + bp_time_unref(dst->time); + } + + /* + * If all flags are zero, deallocate all breakpoint data. + */ + + flags = dst->flags = src->flags; + if (flags == 0) { + if (modp) { + if (local) { + modp->curr.num_breakpoints--; + } else { + modp->curr.num_traced_exports--; + } + ASSERT(modp->curr.num_breakpoints >= 0); + ASSERT(modp->curr.num_traced_exports >= 0); + ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint)); + } + pc[-4] = 0; + Free(g); + return; + } + + /* + * Copy the active data to the staging area (making it ready + * for the next time it will be used). + */ + + if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + dst->local_ms = src->local_ms; + MatchSetRef(dst->local_ms); + } + if (flags & ERTS_BPF_META_TRACE) { + dst->meta_pid = src->meta_pid; + erts_refc_inc(&dst->meta_pid->refc, 1); + dst->meta_ms = src->meta_ms; + MatchSetRef(dst->meta_ms); + } + if (flags & ERTS_BPF_COUNT) { + dst->count = src->count; + erts_refc_inc(&dst->count->refc, 1); + } + if (flags & ERTS_BPF_TIME_TRACE) { + dst->time = src->time; + erts_refc_inc(&dst->time->refc, 1); + ASSERT(dst->time->hash); + } } -int -erts_set_debug_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, NULL, - (BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL); +void +erts_commit_staged_bp(void) +{ + ErtsBpIndex staging = erts_staging_bp_ix(); + ErtsBpIndex active = erts_active_bp_ix(); + + erts_smp_atomic32_set_nob(&erts_active_bp_index, staging); + erts_smp_atomic32_set_nob(&erts_staging_bp_index, active); } -int -erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, NULL, - (BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL); +void +erts_install_breakpoints(BpFunctions* f) +{ + Uint i; + Uint n = f->matched; + BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint); + + for (i = 0; i < n; i++) { + BeamInstr* pc = f->matching[i].pc; + GenericBp* g = (GenericBp *) pc[-4]; + if (*pc != br && g) { + Module* modp = f->matching[i].mod; + + /* + * The breakpoint must be disabled in the active data + * (it will enabled later by switching bp indices), + * and enabled in the staging data. + */ + ASSERT(g->data[erts_active_bp_ix()].flags == 0); + ASSERT(g->data[erts_staging_bp_ix()].flags != 0); + + /* + * The following write is not protected by any lock. We + * assume that the hardware guarantees that a write of an + * aligned word-size (or half-word) writes is atomic + * (i.e. that other processes executing this code will not + * see a half pointer). + */ + *pc = br; + modp->curr.num_breakpoints++; + } + } } -int -erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, NULL, - (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL); +void +erts_uninstall_breakpoints(BpFunctions* f) +{ + Uint i; + Uint n = f->matched; + + for (i = 0; i < n; i++) { + BeamInstr* pc = f->matching[i].pc; + uninstall_breakpoint(pc); + } } -int -erts_clear_trace_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_trace_breakpoint)); +static void +uninstall_breakpoint(BeamInstr* pc) +{ + if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { + GenericBp* g = (GenericBp *) pc[-4]; + if (g->data[erts_active_bp_ix()].flags == 0) { + /* + * The following write is not protected by any lock. We + * assume that the hardware guarantees that a write of an + * aligned word-size (or half-word) writes is atomic + * (i.e. that other processes executing this code will not + * see a half pointer). + */ + *pc = g->orig_instr; + } + } } -int -erts_clear_mtrace_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_mtrace_breakpoint)); +void +erts_set_trace_break(BpFunctions* f, Binary *match_spec) +{ + set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true); } void -erts_clear_mtrace_bif(BeamInstr *pc) { - clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint)); +erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid) +{ + set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid); } -int -erts_clear_debug_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_debug_breakpoint)); +void +erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local) +{ + Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE; + + set_function_break(pc, match_spec, flags, 0, NIL); } -int -erts_clear_count_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_count_breakpoint)); +void +erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) +{ + set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid); } -int -erts_clear_time_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_time_breakpoint)); +void +erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) +{ + set_function_break(pc, NULL, + ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE, + count_op, NIL); } -int -erts_clear_break(Eterm mfa[3], int specified) { +void +erts_clear_time_trace_bif(BeamInstr *pc) { + clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE); +} + +void +erts_set_debug_break(BpFunctions* f) { + set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL); +} + +void +erts_set_count_break(BpFunctions* f, enum erts_break_op count_op) +{ + set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE, + count_op, NIL); +} + +void +erts_set_time_break(BpFunctions* f, enum erts_break_op count_op) +{ + set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE, + count_op, NIL); +} + +void +erts_clear_trace_break(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_LOCAL_TRACE); +} + +void +erts_clear_call_trace_bif(BeamInstr *pc, int local) +{ + GenericBp* g = (GenericBp *) pc[-4]; + + if (g) { + Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE; + if (g->data[erts_staging_bp_ix()].flags & flags) { + clear_function_break(pc, flags); + } + } +} + +void +erts_clear_mtrace_break(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_META_TRACE); +} + +void +erts_clear_mtrace_bif(BeamInstr *pc) +{ + clear_function_break(pc, ERTS_BPF_META_TRACE); +} + +void +erts_clear_debug_break(BpFunctions* f) +{ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, 0); + clear_break(f, ERTS_BPF_DEBUG); +} + +void +erts_clear_count_break(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE); +} + +void +erts_clear_time_break(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE); +} + +void +erts_clear_all_breaks(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_ALL); } int erts_clear_module_break(Module *modp) { + BeamInstr** code_base; + Uint n; + Uint i; + ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); ASSERT(modp); - return clear_module_break(modp, NULL, 0, 0); + code_base = (BeamInstr **) modp->curr.code; + if (code_base == NULL) { + return 0; + } + n = (Uint) code_base[MI_NUM_FUNCTIONS]; + for (i = 0; i < n; ++i) { + BeamInstr* pc; + + pc = code_base[MI_FUNCTIONS+i] + 5; + if (erts_is_native_break(pc)) { + continue; + } + clear_function_break(pc, ERTS_BPF_ALL); + } + + erts_commit_staged_bp(); + + for (i = 0; i < n; ++i) { + BeamInstr* pc; + + pc = code_base[MI_FUNCTIONS+i] + 5; + if (erts_is_native_break(pc)) { + continue; + } + uninstall_breakpoint(pc); + consolidate_bp_data(modp, pc, 1); + ASSERT(pc[-4] == 0); + } + return n; } -int -erts_clear_function_break(Module *modp, BeamInstr *pc) { +void +erts_clear_export_break(Module* modp, BeamInstr* pc) +{ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - ASSERT(modp); - return clear_function_break(modp, pc, BREAK_IS_ERL, 0); + + clear_function_break(pc, ERTS_BPF_ALL); + erts_commit_staged_bp(); + *pc = (BeamInstr) 0; + consolidate_bp_data(modp, pc, 0); + ASSERT(pc[-4] == 0); } +BeamInstr +erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg) +{ + GenericBp* g; + GenericBpData* bp; + Uint bp_flags; + ErtsBpIndex ix = erts_active_bp_ix(); + + g = (GenericBp *) I[-4]; + bp = &g->data[ix]; + bp_flags = bp->flags; + ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0); + if (bp_flags & (ERTS_BPF_LOCAL_TRACE| + ERTS_BPF_GLOBAL_TRACE| + ERTS_BPF_TIME_TRACE_ACTIVE) && + !IS_TRACED_FL(c_p, F_TRACE_CALLS)) { + bp_flags &= ~(ERTS_BPF_LOCAL_TRACE| + ERTS_BPF_GLOBAL_TRACE| + ERTS_BPF_TIME_TRACE| + ERTS_BPF_TIME_TRACE_ACTIVE); + if (bp_flags == 0) { /* Quick exit */ + return g->orig_instr; + } + } + + if (bp_flags & ERTS_BPF_LOCAL_TRACE) { + ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0); + (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true); + } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) { + (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true); + } + + if (bp_flags & ERTS_BPF_META_TRACE) { + Eterm old_pid; + Eterm new_pid; + + old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid); + new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid); + if (new_pid != old_pid) { + erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid); + } + } + + if (bp_flags & ERTS_BPF_COUNT_ACTIVE) { + erts_smp_atomic_inc_nob(&bp->count->acount); + } + + if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) { + Eterm w; + erts_trace_time_call(c_p, I, bp->time); + w = (BeamInstr) *c_p->cp; + if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) || + w == (BeamInstr) BeamOp(op_return_trace) || + w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) { + Eterm* E = c_p->stop; + ASSERT(c_p->htop <= E && E <= c_p->hend); + if (E - 2 < c_p->htop) { + (void) erts_garbage_collect(c_p, 2, reg, I[-1]); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + } + E = c_p->stop; + ASSERT(c_p->htop <= E && E <= c_p->hend); + + E -= 2; + E[0] = make_cp(I); + E[1] = make_cp(c_p->cp); /* original return address */ + c_p->cp = beam_return_time_trace; + c_p->stop = E; + } + } + + if (bp_flags & ERTS_BPF_DEBUG) { + return (BeamInstr) BeamOp(op_i_debug_breakpoint); + } else { + return g->orig_instr; + } +} /* - * SMP NOTE: Process p may have become exiting on return! + * Entry point called by the trace wrap functions in erl_bif_wrap.c + * + * The trace wrap functions are themselves called through the export + * entries instead of the original BIF functions. */ -BeamInstr -erts_trace_break(Process *p, BeamInstr *pc, Eterm *args, - Uint32 *ret_flags, Eterm *tracer_pid) { - Eterm tpid1, tpid2; - BpData **bds = (BpData **) (pc)[-4]; - BpDataTrace *bdt = NULL; +Eterm +erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) +{ + Eterm result; + Eterm (*func)(Process*, Eterm*, BeamInstr*); + Export* ep = bif_export[bif_index]; + Uint32 flags = 0, flags_meta = 0; + Eterm meta_tracer_pid = NIL; + int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif + * is actually in the + * export entry */ + BeamInstr *cp = p->cp; + GenericBp* g; + GenericBpData* bp; + Uint bp_flags = 0; + + ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + + g = (GenericBp *) ep->fake_op_func_info_for_hipe[1]; + if (g) { + bp = &g->data[erts_active_bp_ix()]; + bp_flags = bp->flags; + } - ASSERT(bds); - ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)]; - ASSERT(bdt); - bdt = (BpDataTrace *) bdt->next; - ASSERT(bdt); - ASSERT(ret_flags); - ASSERT(tracer_pid); - - ErtsSmpBPLock(bdt); - tpid1 = tpid2 = bdt->tracer_pid; - ErtsSmpBPUnlock(bdt); - - *ret_flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args, - 1, &tpid2); - *tracer_pid = tpid2; - if (tpid1 != tpid2) { - ErtsSmpBPLock(bdt); - bdt->tracer_pid = tpid2; - ErtsSmpBPUnlock(bdt); - } - bds[bp_sched2ix_proc(p)] = (BpData *) bdt; - return bdt->orig_instr; + /* + * Make continuation pointer OK, it is not during direct BIF calls, + * but it is correct during apply of bif. + */ + if (!applying) { + p->cp = I; + } + if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) && + IS_TRACED_FL(p, F_TRACE_CALLS)) { + int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE); + flags = erts_call_trace(p, ep->code, bp->local_ms, args, + local, &p->tracer_proc); + } + if (bp_flags & ERTS_BPF_META_TRACE) { + Eterm tpid1, tpid2; + + tpid1 = tpid2 = + (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid); + flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args, + 0, &tpid2); + meta_tracer_pid = tpid2; + if (tpid1 != tpid2) { + erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2); + } + } + if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && + IS_TRACED_FL(p, F_TRACE_CALLS)) { + BeamInstr *pc = (BeamInstr *)ep->code+3; + erts_trace_time_call(p, pc, bp->time); + } + + /* Restore original continuation pointer (if changed). */ + p->cp = cp; + + func = bif_table[bif_index].f; + + result = func(p, args, I); + + if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) { + BeamInstr i_return_trace = beam_return_trace[0]; + BeamInstr i_return_to_trace = beam_return_to_trace[0]; + BeamInstr i_return_time_trace = beam_return_time_trace[0]; + Eterm *cpp; + /* Maybe advance cp to skip trace stack frames */ + for (cpp = p->stop; ; cp = cp_val(*cpp++)) { + if (*cp == i_return_trace) { + /* Skip stack frame variables */ + while (is_not_CP(*cpp)) cpp++; + cpp += 2; /* Skip return_trace parameters */ + } else if (*cp == i_return_time_trace) { + /* Skip stack frame variables */ + while (is_not_CP(*cpp)) cpp++; + cpp += 1; /* Skip return_time_trace parameters */ + } else if (*cp == i_return_to_trace) { + /* A return_to trace message is going to be generated + * by normal means, so we do not have to. + */ + cp = NULL; + break; + } else break; + } + } + + /* Try to get these in the order + * they usually appear in normal code... */ + if (is_non_value(result)) { + Uint reason = p->freason; + if (reason != TRAP) { + Eterm class; + Eterm value = p->fvalue; + DeclareTmpHeapNoproc(nocatch,3); + UseTmpHeapNoproc(3); + /* Expand error value like in handle_error() */ + if (reason & EXF_ARGLIST) { + Eterm *tp; + ASSERT(is_tuple(value)); + tp = tuple_val(value); + value = tp[1]; + } + if ((reason & EXF_THROWN) && (p->catches <= 0)) { + value = TUPLE2(nocatch, am_nocatch, value); + reason = EXC_ERROR; + } + /* Note: expand_error_value() could theoretically + * allocate on the heap, but not for any error + * returned by a BIF, and it would do no harm, + * just be annoying. + */ + value = expand_error_value(p, reason, value); + class = exception_tag[GET_EXC_CLASS(reason)]; + + if (flags_meta & MATCH_SET_EXCEPTION_TRACE) { + erts_trace_exception(p, ep->code, class, value, + &meta_tracer_pid); + } + if (flags & MATCH_SET_EXCEPTION_TRACE) { + erts_trace_exception(p, ep->code, class, value, + &p->tracer_proc); + } + if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) { + /* can only happen if(local)*/ + Eterm *ptr = p->stop; + ASSERT(is_CP(*ptr)); + ASSERT(ptr <= STACK_START(p)); + /* Search the nearest stack frame for a catch */ + while (++ptr < STACK_START(p)) { + if (is_CP(*ptr)) break; + if (is_catch(*ptr)) { + if (applying) { + /* Apply of BIF, cp is in calling function */ + if (cp) erts_trace_return_to(p, cp); + } else { + /* Direct bif call, I points into + * calling function */ + erts_trace_return_to(p, I); + } + } + } + } + UnUseTmpHeapNoproc(3); + if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) { + erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + p->trace_flags |= F_EXCEPTION_TRACE; + erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + } + } + } else { + if (flags_meta & MATCH_SET_RX_TRACE) { + erts_trace_return(p, ep->code, result, &meta_tracer_pid); + } + /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */ + if (flags & MATCH_SET_RX_TRACE) { + erts_trace_return(p, ep->code, result, &p->tracer_proc); + } + if (flags & MATCH_SET_RETURN_TO_TRACE) { + /* can only happen if(local)*/ + if (applying) { + /* Apply of BIF, cp is in calling function */ + if (cp) erts_trace_return_to(p, cp); + } else { + /* Direct bif call, I points into calling function */ + erts_trace_return_to(p, I); + } + } + } + ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + return result; +} + +static Eterm +do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg, + int local, Binary* ms, Eterm tracer_pid) +{ + Eterm* cpp; + int return_to_trace = 0; + BeamInstr w; + BeamInstr *cp_save; + Uint32 flags; + Uint need = 0; + Eterm* E = c_p->stop; + + w = *c_p->cp; + if (w == (BeamInstr) BeamOp(op_return_trace)) { + cpp = &E[2]; + } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) { + return_to_trace = 1; + cpp = &E[0]; + } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) { + cpp = &E[0]; + } else { + cpp = NULL; + } + if (cpp) { + for (;;) { + BeamInstr w = *cp_val(*cpp); + if (w == (BeamInstr) BeamOp(op_return_trace)) { + cpp += 3; + } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) { + return_to_trace = 1; + cpp += 1; + } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) { + cpp += 2; + } else { + break; + } + } + cp_save = c_p->cp; + c_p->cp = (BeamInstr *) cp_val(*cpp); + ASSERT(is_CP(*cpp)); + } + ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid); + ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + if (cpp) { + c_p->cp = cp_save; + } + + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) { + need += 1; + } + if (flags & MATCH_SET_RX_TRACE) { + need += 3; + } + if (need) { + ASSERT(c_p->htop <= E && E <= c_p->hend); + if (E - need < c_p->htop) { + (void) erts_garbage_collect(c_p, need, reg, I[-1]); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + E = c_p->stop; + } + } + if (flags & MATCH_SET_RETURN_TO_TRACE && !return_to_trace) { + E -= 1; + ASSERT(c_p->htop <= E && E <= c_p->hend); + E[0] = make_cp(c_p->cp); + c_p->cp = (BeamInstr *) beam_return_to_trace; + } + if (flags & MATCH_SET_RX_TRACE) { + E -= 3; + ASSERT(c_p->htop <= E && E <= c_p->hend); + ASSERT(is_CP((Eterm) (UWord) (I - 3))); + ASSERT(am_true == tracer_pid || + is_internal_pid(tracer_pid) || is_internal_port(tracer_pid)); + E[2] = make_cp(c_p->cp); + E[1] = tracer_pid; + E[0] = make_cp(I - 3); /* We ARE at the beginning of an + instruction, + the funcinfo is above i. */ + c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ? + beam_exception_trace : beam_return_trace; + erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + c_p->trace_flags |= F_EXCEPTION_TRACE; + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + } + c_p->stop = E; + return tracer_pid; } +void +erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt) +{ + Uint ms,s,us; + process_breakpoint_time_t *pbt = NULL; + bp_data_time_item_t sitem, *item = NULL; + bp_time_hash_t *h = NULL; + BpDataTime *pbdt = NULL; + ASSERT(c_p); + ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING); -/* - * SMP NOTE: Process p may have become exiting on return! - */ -Uint32 -erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local, - Eterm *tracer_pid) { - BpData **bds = (BpData **) (pc)[-4]; - BpDataTrace *bdt = NULL; + /* get previous timestamp and breakpoint + * from the process psd */ + pbt = ERTS_PROC_GET_CALL_TIME(c_p); + get_sys_now(&ms, &s, &us); - ASSERT(tracer_pid); - if (bds) { - Eterm tpid1, tpid2; - Uint32 flags; - bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)]; + /* get pbt + * timestamp = t0 + * lookup bdt from code + * set ts0 to pbt + * add call count here? + */ + if (pbt == 0) { + /* First call of process to instrumented function */ + pbt = Alloc(sizeof(process_breakpoint_time_t)); + (void *) ERTS_PROC_SET_CALL_TIME(c_p, ERTS_PROC_LOCK_MAIN, pbt); + } else { + ASSERT(pbt->pc); + /* add time to previous code */ + bp_time_diff(&sitem, pbt, ms, s, us); + sitem.pid = c_p->id; + sitem.count = 0; - ErtsSmpBPLock(bdt); - tpid1 = tpid2 = bdt->tracer_pid; - ErtsSmpBPUnlock(bdt); + /* previous breakpoint */ + pbdt = get_time_break(pbt->pc); - flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args, - local, &tpid2); - *tracer_pid = tpid2; - if (tpid1 != tpid2) { - ErtsSmpBPLock(bdt); - bdt->tracer_pid = tpid2; - ErtsSmpBPUnlock(bdt); + /* if null then the breakpoint was removed */ + if (pbdt) { + h = &(pbdt->hash[bp_sched2ix_proc(c_p)]); + + ASSERT(h); + ASSERT(h->item); + + item = bp_hash_get(h, &sitem); + if (!item) { + item = bp_hash_put(h, &sitem); + } else { + BP_TIME_ADD(item, &sitem); + } } - return flags; } - *tracer_pid = NIL; - return 0; + + /* Add count to this code */ + sitem.pid = c_p->id; + sitem.count = 1; + sitem.s_time = 0; + sitem.us_time = 0; + + /* this breakpoint */ + ASSERT(bdt); + h = &(bdt->hash[bp_sched2ix_proc(c_p)]); + + ASSERT(h); + ASSERT(h->item); + + item = bp_hash_get(h, &sitem); + if (!item) { + item = bp_hash_put(h, &sitem); + } else { + BP_TIME_ADD(item, &sitem); + } + + pbt->pc = I; + pbt->ms = ms; + pbt->s = s; + pbt->us = us; } +void +erts_trace_time_return(Process *p, BeamInstr *pc) +{ + Uint ms,s,us; + process_breakpoint_time_t *pbt = NULL; + bp_data_time_item_t sitem, *item = NULL; + bp_time_hash_t *h = NULL; + BpDataTime *pbdt = NULL; + + ASSERT(p); + ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING); + + /* get previous timestamp and breakpoint + * from the process psd */ + + pbt = ERTS_PROC_GET_CALL_TIME(p); + get_sys_now(&ms,&s,&us); + + /* get pbt + * lookup bdt from code + * timestamp = t1 + * get ts0 from pbt + * get item from bdt->hash[bp_hash(p->id)] + * ack diff (t1, t0) to item + */ + + if (pbt) { + /* might have been removed due to + * trace_pattern(false) + */ + ASSERT(pbt->pc); + + bp_time_diff(&sitem, pbt, ms, s, us); + sitem.pid = p->id; + sitem.count = 0; + + /* previous breakpoint */ + pbdt = get_time_break(pbt->pc); + + /* beware, the trace_pattern might have been removed */ + if (pbdt) { + h = &(pbdt->hash[bp_sched2ix_proc(p)]); + ASSERT(h); + ASSERT(h->item); + + item = bp_hash_get(h, &sitem); + if (!item) { + item = bp_hash_put(h, &sitem); + } else { + BP_TIME_ADD(item, &sitem); + } + } + + pbt->pc = pc; + pbt->ms = ms; + pbt->s = s; + pbt->us = us; + } +} int -erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) { - BpDataTrace *bdt = - (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_trace_breakpoint)); - - if (bdt) { +erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local) +{ + Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE; + GenericBpData* bp = check_break(pc, flags); + + if (bp) { if (match_spec_ret) { - *match_spec_ret = bdt->match_spec; - } - if (tracer_pid_ret) { - ErtsSmpBPLock(bdt); - *tracer_pid_ret = bdt->tracer_pid; - ErtsSmpBPUnlock(bdt); + *match_spec_ret = bp->local_ms; } - return !0; + return 1; } return 0; } int -erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) { - BpDataTrace *bdt = - (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_mtrace_breakpoint)); +erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, + Eterm *tracer_pid_ret) +{ + GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE); - if (bdt) { + if (bp) { if (match_spec_ret) { - *match_spec_ret = bdt->match_spec; + *match_spec_ret = bp->meta_ms; } if (tracer_pid_ret) { - ErtsSmpBPLock(bdt); - *tracer_pid_ret = bdt->tracer_pid; - ErtsSmpBPUnlock(bdt); + *tracer_pid_ret = + (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid); } - return !0; + return 1; } return 0; } @@ -402,15 +1129,15 @@ erts_is_native_break(BeamInstr *pc) { } int -erts_is_count_break(BeamInstr *pc, Sint *count_ret) { - BpDataCount *bdc = - (BpDataCount *) is_break(pc, (BeamInstr) BeamOp(op_i_count_breakpoint)); +erts_is_count_break(BeamInstr *pc, Uint *count_ret) +{ + GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT); - if (bdc) { + if (bp) { if (count_ret) { - *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount); + *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount); } - return !0; + return 1; } return 0; } @@ -421,7 +1148,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) { Uint size; Eterm *hp, t; bp_data_time_item_t *item = NULL; - BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); + BpDataTime *bdt = get_time_break(pc); if (bdt) { if (retval) { @@ -464,7 +1191,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) { } bp_hash_delete(&hash); } - return !0; + return 1; } return 0; @@ -655,7 +1382,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) { * the previous breakpoint. */ - pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); + pbdt = get_time_break(pbt->pc); if (pbdt) { get_sys_now(&ms,&s,&us); bp_time_diff(&sitem, pbt, ms, s, us); @@ -693,671 +1420,259 @@ void erts_schedule_time_break(Process *p, Uint schedule) { } /* pbt */ } -/* call_time breakpoint - * Accumulated times are added to the previous bp, - * not the current one. The current one is saved - * for future reference. - * The previous breakpoint is stored in the process it self, the psd. - * We do not need to store in a stack frame. - * There is no need for locking, each thread has its own - * area in each bp to save data. - * Since we need to diffrentiate between processes for each bp, - * every bp has a hash (per thread) to process-bp statistics. - * - egil - */ - -void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) { - Uint ms,s,us; - process_breakpoint_time_t *pbt = NULL; - bp_data_time_item_t sitem, *item = NULL; - bp_time_hash_t *h = NULL; - BpDataTime *pbdt = NULL; - - ASSERT(p); - ASSERT(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&p->state)); - - /* get previous timestamp and breakpoint - * from the process psd */ - - pbt = ERTS_PROC_GET_CALL_TIME(p); - get_sys_now(&ms,&s,&us); - - switch(type) { - /* get pbt - * timestamp = t0 - * lookup bdt from code - * set ts0 to pbt - * add call count here? - */ - case ERTS_BP_CALL_TIME_CALL: - case ERTS_BP_CALL_TIME_TAIL_CALL: - - if (pbt) { - ASSERT(pbt->pc); - /* add time to previous code */ - bp_time_diff(&sitem, pbt, ms, s, us); - sitem.pid = p->id; - sitem.count = 0; - - /* previous breakpoint */ - pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); - - /* if null then the breakpoint was removed */ - if (pbdt) { - h = &(pbdt->hash[bp_sched2ix_proc(p)]); - - ASSERT(h); - ASSERT(h->item); - - item = bp_hash_get(h, &sitem); - if (!item) { - item = bp_hash_put(h, &sitem); - } else { - BP_TIME_ADD(item, &sitem); - } - } - - } else { - /* first call of process to instrumented function */ - pbt = Alloc(sizeof(process_breakpoint_time_t)); - (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt); - } - /* add count to this code */ - sitem.pid = p->id; - sitem.count = 1; - sitem.s_time = 0; - sitem.us_time = 0; - - /* this breakpoint */ - ASSERT(bdt); - h = &(bdt->hash[bp_sched2ix_proc(p)]); - - ASSERT(h); - ASSERT(h->item); - - item = bp_hash_get(h, &sitem); - if (!item) { - item = bp_hash_put(h, &sitem); - } else { - BP_TIME_ADD(item, &sitem); - } - - pbt->pc = pc; - pbt->ms = ms; - pbt->s = s; - pbt->us = us; - break; - - case ERTS_BP_CALL_TIME_RETURN: - /* get pbt - * lookup bdt from code - * timestamp = t1 - * get ts0 from pbt - * get item from bdt->hash[bp_hash(p->id)] - * ack diff (t1, t0) to item - */ - - if(pbt) { - /* might have been removed due to - * trace_pattern(false) - */ - ASSERT(pbt->pc); - - bp_time_diff(&sitem, pbt, ms, s, us); - sitem.pid = p->id; - sitem.count = 0; - - /* previous breakpoint */ - pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); - - /* beware, the trace_pattern might have been removed */ - if (pbdt) { - h = &(pbdt->hash[bp_sched2ix_proc(p)]); - - ASSERT(h); - ASSERT(h->item); - - item = bp_hash_get(h, &sitem); - if (!item) { - item = bp_hash_put(h, &sitem); - } else { - BP_TIME_ADD(item, &sitem); - } - } - - pbt->pc = pc; - pbt->ms = ms; - pbt->s = s; - pbt->us = us; - } - break; - default : - ASSERT(0); - /* will never happen */ - break; - } -} - - /* ************************************************************************* ** Local helpers */ -static int set_break(Eterm mfa[3], int specified, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid) +static void +set_break(BpFunctions* f, Binary *match_spec, Uint break_flags, + enum erts_break_op count_op, Eterm tracer_pid) { - Module *modp; - int num_processed = 0; - ErtsCodeIndex code_ix = erts_active_code_ix(); - if (!specified) { - /* Find and process all modules in the system... */ - int current; - int last = module_code_size(code_ix); - for (current = 0; current < last; current++) { - modp = module_code(current, code_ix); - ASSERT(modp != NULL); - num_processed += - set_module_break(modp, mfa, specified, - match_spec, break_op, count_op, - tracer_pid); - } - } else { - /* Process a single module */ - if ((modp = erts_get_module(mfa[0], code_ix)) != NULL) { - num_processed += - set_module_break(modp, mfa, specified, - match_spec, break_op, count_op, - tracer_pid); - } - } - return num_processed; -} - -static int set_module_break(Module *modp, Eterm mfa[3], int specified, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid) { - BeamInstr** code_base; - BeamInstr* code_ptr; - int num_processed = 0; - Uint i,n; + Uint i; + Uint n; - ASSERT(break_op); - ASSERT(modp); - code_base = (BeamInstr **) modp->curr.code; - if (code_base == NULL) { - return 0; + n = f->matched; + for (i = 0; i < n; i++) { + BeamInstr* pc = f->matching[i].pc; + set_function_break(pc, match_spec, break_flags, + count_op, tracer_pid); } - n = (BeamInstr) code_base[MI_NUM_FUNCTIONS]; - for (i = 0; i < n; ++i) { - code_ptr = code_base[MI_FUNCTIONS+i]; - ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) && - (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) { - BeamInstr *pc = code_ptr+5; - - num_processed += - set_function_break(modp, pc, BREAK_IS_ERL, match_spec, - break_op, count_op, tracer_pid); - } - } - return num_processed; } -static int set_function_break(Module *modp, BeamInstr *pc, int bif, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid) { - - BeamInstr **code_base = NULL; - BpData *bd, **r, ***rs; - size_t size; - Uint ix = 0; - - if (bif == BREAK_IS_ERL) { - code_base = (BeamInstr **)modp->curr.code; - ASSERT(code_base); - ASSERT(code_base <= (BeamInstr **)pc); - ASSERT((BeamInstr **)pc < code_base + (modp->curr.code_length/sizeof(BeamInstr *))); - } else { - ASSERT(*pc == (BeamInstr) em_apply_bif); - ASSERT(modp == NULL); +static void +set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags, + enum erts_break_op count_op, Eterm tracer_pid) +{ + GenericBp* g; + GenericBpData* bp; + Uint common; + ErtsBpIndex ix = erts_staging_bp_ix(); + + ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked()); + g = (GenericBp *) pc[-4]; + if (g == 0) { + int i; + if (count_op == erts_break_reset || count_op == erts_break_stop) { + /* Do not insert a new breakpoint */ + return; + } + g = Alloc(sizeof(GenericBp)); + g->orig_instr = *pc; + for (i = 0; i < ERTS_NUM_BP_IX; i++) { + g->data[i].flags = 0; + } + pc[-4] = (BeamInstr) g; } + bp = &g->data[ix]; /* - * Currently no trace support for native code. + * If we are changing an existing breakpoint, clean up old data. */ - if (erts_is_native_break(pc)) { - return 0; - } - /* Do not allow two breakpoints of the same kind */ - if ( (bd = is_break(pc, break_op))) { - if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) - || break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) { - - BpDataTrace *bdt = (BpDataTrace *) bd; - Binary *old_match_spec; - - /* Update match spec and tracer */ - MatchSetRef(match_spec); - ErtsSmpBPLock(bdt); - old_match_spec = bdt->match_spec; - bdt->match_spec = match_spec; - bdt->tracer_pid = tracer_pid; - ErtsSmpBPUnlock(bdt); - MatchSetUnref(old_match_spec); - } else { - BpDataCount *bdc = (BpDataCount *) bd; - erts_aint_t count = 0; - erts_aint_t res = 0; - - ASSERT(! match_spec); - ASSERT(is_nil(tracer_pid)); - - if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) { - if (count_op == erts_break_stop) { - count = erts_smp_atomic_read_nob(&bdc->acount); - if (count >= 0) { - while(1) { - res = erts_smp_atomic_cmpxchg_nob(&bdc->acount, -count - 1, count); - if ((res == count) || count < 0) break; - count = res; - } - } - } else { - /* Reset call counter */ - erts_smp_atomic_set_nob(&bdc->acount, 0); - } - - } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { - BpDataTime *bdt = (BpDataTime *) bd; - Uint i = 0; - - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - - if (count_op == erts_break_stop) { - bdt->pause = 1; - } else { - bdt->pause = 0; - for (i = 0; i < bdt->n; i++) { - bp_hash_delete(&(bdt->hash[i])); - bp_hash_init(&(bdt->hash[i]), 32); - } - } - } else { - ASSERT (! count_op); - } - } - return 1; - } - if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) || - break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) { - size = sizeof(BpDataTrace); - } else { - ASSERT(! match_spec); - ASSERT(is_nil(tracer_pid)); - if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) { - if (count_op == erts_break_reset || count_op == erts_break_stop) { - /* Do not insert a new breakpoint */ - return 1; - } - size = sizeof(BpDataCount); - } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { - if (count_op == erts_break_reset || count_op == erts_break_stop) { - /* Do not insert a new breakpoint */ - return 1; - } - size = sizeof(BpDataTime); + common = break_flags & bp->flags; + if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + MatchSetUnref(bp->local_ms); + } else if (common & ERTS_BPF_META_TRACE) { + MatchSetUnref(bp->meta_ms); + bp_meta_unref(bp->meta_pid); + } else if (common & ERTS_BPF_COUNT) { + if (count_op == erts_break_stop) { + bp->flags &= ~ERTS_BPF_COUNT_ACTIVE; } else { - ASSERT(! count_op); - ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint)); - size = sizeof(BpDataDebug); + bp->flags |= ERTS_BPF_COUNT_ACTIVE; + erts_smp_atomic_set_nob(&bp->count->acount, 0); } - } - rs = (BpData ***) (pc-4); - if (! *rs) { - size_t ssize = sizeof(BeamInstr) * erts_no_schedulers; - *rs = (BpData **) Alloc(ssize); - sys_memzero(*rs, ssize); - } - - r = &((*rs)[0]); - - if (! *r) { - ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint)); - ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint)); - ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint)); - ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint)); - ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint)); - /* First breakpoint; create singleton ring */ - bd = Alloc(size); - BpInit(bd, *pc); - *r = bd; - if (bif == BREAK_IS_ERL) { - *pc = break_op; - } - } else { - ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) || - *pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) || - *pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) || - *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) || - *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) || - *pc == (BeamInstr) em_apply_bif); - if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) { - /* Debug bp must be last, so if it is also first; - * it must be singleton. */ - ASSERT(BpSingleton(*r)); - /* Insert new bp first in the ring, i.e second to last. */ - bd = Alloc(size); - BpInitAndSpliceNext(bd, *pc, *r); - if (bif == BREAK_IS_ERL) { - *pc = break_op; - } - } else if ((*r)->prev->orig_instr - == (BeamInstr) BeamOp(op_i_debug_breakpoint)) { - /* Debug bp last in the ring; insert new second to last. */ - bd = Alloc(size); - BpInitAndSplicePrev(bd, (*r)->prev->orig_instr, *r); - (*r)->prev->orig_instr = break_op; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + return; + } else if (common & ERTS_BPF_TIME_TRACE) { + BpDataTime* bdt = bp->time; + Uint i = 0; + + if (count_op == erts_break_stop) { + bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE; } else { - /* Just insert last in the ring */ - bd = Alloc(size); - BpInitAndSpliceNext(bd, (*r)->orig_instr, *r); - (*r)->orig_instr = break_op; - *r = bd; + bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE; + for (i = 0; i < bdt->n; i++) { + bp_hash_delete(&(bdt->hash[i])); + bp_hash_init(&(bdt->hash[i]), 32); + } } - } - for (ix = 1; ix < erts_no_schedulers; ++ix) { - (*rs)[ix] = (*rs)[0]; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + return; } - bd->this_instr = break_op; - /* Init the bp type specific data */ - if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) || - break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) { - - BpDataTrace *bdt = (BpDataTrace *) bd; - - MatchSetRef(match_spec); - bdt->match_spec = match_spec; - bdt->tracer_pid = tracer_pid; - } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { - BpDataTime *bdt = (BpDataTime *) bd; - Uint i = 0; - - bdt->pause = 0; - bdt->n = erts_no_schedulers; - bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n)); + /* + * Initialize the new breakpoint data. + */ + if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + MatchSetRef(match_spec); + bp->local_ms = match_spec; + } else if (break_flags & ERTS_BPF_META_TRACE) { + BpMetaPid* bmp; + MatchSetRef(match_spec); + bp->meta_ms = match_spec; + bmp = Alloc(sizeof(BpMetaPid)); + erts_refc_init(&bmp->refc, 1); + erts_smp_atomic_init_nob(&bmp->pid, tracer_pid); + bp->meta_pid = bmp; + } else if (break_flags & ERTS_BPF_COUNT) { + BpCount* bcp; + + ASSERT((bp->flags & ERTS_BPF_COUNT) == 0); + bcp = Alloc(sizeof(BpCount)); + erts_refc_init(&bcp->refc, 1); + erts_smp_atomic_init_nob(&bcp->acount, 0); + bp->count = bcp; + } else if (break_flags & ERTS_BPF_TIME_TRACE) { + BpDataTime* bdt; + int i; + + ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0); + bdt = Alloc(sizeof(BpDataTime)); + erts_refc_init(&bdt->refc, 1); + bdt->n = erts_no_schedulers; + bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n)); for (i = 0; i < bdt->n; i++) { bp_hash_init(&(bdt->hash[i]), 32); } - } else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) { - BpDataCount *bdc = (BpDataCount *) bd; - erts_smp_atomic_init_nob(&bdc->acount, 0); + bp->time = bdt; } - if (bif == BREAK_IS_ERL) { - ++modp->curr.num_breakpoints; + bp->flags |= break_flags; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); +} + +static void +clear_break(BpFunctions* f, Uint break_flags) +{ + Uint i; + Uint n; + + n = f->matched; + for (i = 0; i < n; i++) { + BeamInstr* pc = f->matching[i].pc; + clear_function_break(pc, break_flags); } - return 1; } -static int clear_break(Eterm mfa[3], int specified, BeamInstr break_op) +static int +clear_function_break(BeamInstr *pc, Uint break_flags) { - ErtsCodeIndex code_ix = erts_active_code_ix(); - int num_processed = 0; - Module *modp; + GenericBp* g; + GenericBpData* bp; + Uint common; + ErtsBpIndex ix = erts_staging_bp_ix(); - if (!specified) { - /* Iterate over all modules */ - int current; - int last = module_code_size(code_ix); + ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked()); - for (current = 0; current < last; current++) { - modp = module_code(current, code_ix); - ASSERT(modp != NULL); - num_processed += clear_module_break(modp, mfa, specified, break_op); - } - } else { - /* Process a single module */ - if ((modp = erts_get_module(mfa[0], code_ix)) != NULL) { - num_processed += - clear_module_break(modp, mfa, specified, break_op); - } + if ((g = (GenericBp *) pc[-4]) == 0) { + return 1; } - return num_processed; -} -static int clear_module_break(Module *m, Eterm mfa[3], int specified, - BeamInstr break_op) { - BeamInstr** code_base; - BeamInstr* code_ptr; - int num_processed = 0; - Uint i; - BeamInstr n; - - ASSERT(m); - code_base = (BeamInstr **) m->curr.code; - if (code_base == NULL) { - return 0; + bp = &g->data[ix]; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + common = bp->flags & break_flags; + bp->flags &= ~break_flags; + if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + MatchSetUnref(bp->local_ms); } - n = (BeamInstr) code_base[MI_NUM_FUNCTIONS]; - for (i = 0; i < n; ++i) { - code_ptr = code_base[MI_FUNCTIONS+i]; - if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) && - (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) { - BeamInstr *pc = code_ptr + 5; - - num_processed += - clear_function_break(m, pc, BREAK_IS_ERL, break_op); - } + if (common & ERTS_BPF_META_TRACE) { + MatchSetUnref(bp->meta_ms); + } + if (common & ERTS_BPF_COUNT) { + ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0); + bp_count_unref(bp->count); + } + if (common & ERTS_BPF_TIME_TRACE) { + ASSERT((bp->flags & ERTS_BPF_TIME_TRACE_ACTIVE) == 0); + bp_time_unref(bp->time); } - return num_processed; -} -static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) { - BpData *bd; - Uint ix = 0; - BeamInstr **code_base = NULL; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + return 1; +} - if (bif == BREAK_IS_ERL) { - code_base = (BeamInstr **)m->curr.code; - ASSERT(code_base); - ASSERT(code_base <= (BeamInstr **)pc); - ASSERT((BeamInstr **)pc < code_base + (m->curr.code_length/sizeof(BeamInstr *))); - } else { - ASSERT(*pc == (BeamInstr) em_apply_bif); - ASSERT(m == NULL); +static void +bp_meta_unref(BpMetaPid* bmp) +{ + if (erts_refc_dectest(&bmp->refc, 0) <= 0) { + Free(bmp); } +} - /* - * Currently no trace support for native code. - */ - if (erts_is_native_break(pc)) { - return 0; +static void +bp_count_unref(BpCount* bcp) +{ + if (erts_refc_dectest(&bcp->refc, 0) <= 0) { + Free(bcp); } +} - while ( (bd = is_break(pc, break_op))) { - /* Remove all breakpoints of this type. - * There should be only one of each type, - * but break_op may be 0 which matches any type. +static void +bp_time_unref(BpDataTime* bdt) +{ + if (erts_refc_dectest(&bdt->refc, 0) <= 0) { + Uint i = 0; + Uint j = 0; + Process *h_p = NULL; + bp_data_time_item_t* item = NULL; + process_breakpoint_time_t* pbt = NULL; + + /* remove all psd associated with the hash + * and then delete the hash. + * ... sigh ... */ - BeamInstr op; - BpData ***rs = (BpData ***) (pc - 4); - BpData **r = NULL; -#ifdef DEBUG - for (ix = 1; ix < erts_no_schedulers; ++ix) { - ASSERT((*rs)[ix] == (*rs)[0]); - } -#endif - - r = &((*rs)[0]); - - ASSERT(*r); - /* Find opcode for this breakpoint */ - if (break_op) { - op = break_op; - } else { - if (bd == (*r)->next) { - /* First breakpoint in ring */ - op = *pc; - } else { - op = bd->prev->orig_instr; - } - } - if (BpSingleton(bd)) { - ASSERT(*r == bd); - /* Only one breakpoint to remove */ - if (bif == BREAK_IS_ERL) { - *pc = bd->orig_instr; - } - Free(*rs); - *rs = NULL; - } else { - BpData *bd_prev = bd->prev; - - BpSpliceNext(bd, bd_prev); - ASSERT(BpSingleton(bd)); - if (bd == *r) { - /* We removed the last breakpoint in the ring */ - *r = bd_prev; - bd_prev->orig_instr = bd->orig_instr; - } else if (bd_prev == *r) { - /* We removed the first breakpoint in the ring */ - if (bif == BREAK_IS_ERL) { - *pc = bd->orig_instr; - } - } else { - bd_prev->orig_instr = bd->orig_instr; - } - } - if (op == (BeamInstr) BeamOp(op_i_trace_breakpoint) || - op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) { - - BpDataTrace *bdt = (BpDataTrace *) bd; - MatchSetUnref(bdt->match_spec); - } - if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { - BpDataTime *bdt = (BpDataTime *) bd; - Uint i = 0; - Uint j = 0; - Process *h_p = NULL; - bp_data_time_item_t *item = NULL; - process_breakpoint_time_t *pbt = NULL; - - /* remove all psd associated with the hash - * and then delete the hash. - * ... sigh ... - */ - - for( i = 0; i < bdt->n; ++i) { - if (bdt->hash[i].used) { - for (j = 0; j < bdt->hash[i].n; ++j) { - item = &(bdt->hash[i].item[j]); - if (item->pid != NIL) { - h_p = erts_proc_lookup(item->pid); - if (h_p) { - pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL); - if (pbt) { - Free(pbt); - } + for (i = 0; i < bdt->n; ++i) { + if (bdt->hash[i].used) { + for (j = 0; j < bdt->hash[i].n; ++j) { + item = &(bdt->hash[i].item[j]); + if (item->pid != NIL) { + h_p = erts_pid2proc(NULL, 0, item->pid, + ERTS_PROC_LOCK_MAIN); + if (h_p) { + pbt = ERTS_PROC_SET_CALL_TIME(h_p, + ERTS_PROC_LOCK_MAIN, + NULL); + if (pbt) { + Free(pbt); } + erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN); } } } - bp_hash_delete(&(bdt->hash[i])); } - Free(bdt->hash); - bdt->hash = NULL; - bdt->n = 0; + bp_hash_delete(&(bdt->hash[i])); } - Free(bd); - if (bif == BREAK_IS_ERL) { - ASSERT(m->curr.num_breakpoints > 0); - --m->curr.num_breakpoints; - } - if (*rs) { - for (ix = 1; ix < erts_no_schedulers; ++ix) { - (*rs)[ix] = (*rs)[0]; - } - } - } /* while bd != NULL */ - return 1; + Free(bdt->hash); + Free(bdt); + } } - - -/* -** Searches (linear forward) the breakpoint ring for a specified opcode -** and returns a pointer to the breakpoint data structure or NULL if -** not found. If the specified opcode is 0, the last breakpoint is -** returned. The program counter must point to the first executable -** (breakpoint) instruction of the function. -*/ - -BpData *erts_get_time_break(Process *p, BeamInstr *pc) { - return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); +static BpDataTime* +get_time_break(BeamInstr *pc) +{ + GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE); + return bp ? bp->time : 0; } -static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) { - ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - if (! erts_is_native_break(pc)) { - BpData **rs = (BpData **) pc[-4]; - BpData *bd = NULL, *ebd = NULL; - - if (! rs) { - return NULL; - } - - bd = ebd = rs[bp_sched2ix_proc(p)]; - ASSERT(bd); - if (bd->this_instr == break_op) { - return bd; - } - - bd = bd->next; - while (bd != ebd) { - ASSERT(bd); - if (bd->this_instr == break_op) { - ASSERT(bd); - return bd; - } - bd = bd->next; - } - } - return NULL; -} +static GenericBpData* +check_break(BeamInstr *pc, Uint break_flags) +{ + GenericBp* g = (GenericBp *) pc[-4]; -static BpData *is_break(BeamInstr *pc, BeamInstr break_op) { - BpData **rs; - BpData *bd = NULL, *ebd = NULL; ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - if (erts_is_native_break(pc)) { - return NULL; - } - rs = (BpData **) pc[-4]; - if (! rs) { - return NULL; - } - - bd = ebd = rs[erts_bp_sched2ix()]; - ASSERT(bd); - if ( (break_op == 0) || (bd->this_instr == break_op)) { - return bd; + return 0; } - - bd = bd->next; - while (bd != ebd) { - ASSERT(bd); - if (bd->this_instr == break_op) { - ASSERT(bd); - return bd; + if (g) { + GenericBpData* bp = &g->data[erts_active_bp_ix()]; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + if (bp->flags & break_flags) { + return bp; } - bd = bd->next; } - return NULL; + return 0; } diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h index 167069552f..28aaaa462a 100644 --- a/erts/emulator/beam/beam_bp.h +++ b/erts/emulator/beam/beam_bp.h @@ -25,77 +25,6 @@ #include "erl_vm.h" #include "global.h" - - -/* A couple of gotchas: - * - * The breakpoint structure from BeamInstr, - * In beam_emu where the instruction counter pointer, I (or pc), - * points to the *current* instruction. At that time, if the instruction - * is a breakpoint instruction the pc looks like the following, - * - * I[-5] | op_i_func_info_IaaI | scheduler specific entries - * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN | - * I[-3] | Tagged Module | | | - * I[-2] | Tagged Function | V V - * I[-1] | Arity | BpData -> BpData -> BpData -> BpData - * I[0] | The bp instruction | ^ * the bp wheel * | - * |------------------------------ - * - * Common struct to all bp_data_* - * - * 1) The type of bp_data structure in the ring is deduced from the - * orig_instr field of the structure _before_ in the ring, except for - * the first structure in the ring that has its instruction in - * pc[0] of the code to execute. - * This is valid as long as you don't search for the function while it is - * being executed by something else. Or is in the middle of its rotation for - * any other reason. - * A key, the bp beam instruction, is included for this reason. - * - * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the - * breakpoints are being executed. - * - * So, as an example, when a breakpointed function starts to execute, - * the first instruction that is a breakpoint instruction at pc[0] finds - * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer - * to the correct bp_data type. -*/ - -typedef struct bp_data { - struct bp_data *next; /* Doubly linked ring pointers */ - struct bp_data *prev; /* -"- */ - BeamInstr orig_instr; /* The original instruction to execute */ - BeamInstr this_instr; /* key */ -} BpData; -/* -** All the following bp_data_.. structs must begin the same way -*/ - -typedef struct bp_data_trace { - struct bp_data *next; - struct bp_data *prev; - BeamInstr orig_instr; - BeamInstr this_instr; /* key */ - Binary *match_spec; - Eterm tracer_pid; -} BpDataTrace; - -typedef struct bp_data_debug { - struct bp_data *next; - struct bp_data *prev; - BeamInstr orig_instr; - BeamInstr this_instr; /* key */ -} BpDataDebug; - -typedef struct bp_data_count { /* Call count */ - struct bp_data *next; - struct bp_data *prev; - BeamInstr orig_instr; - BeamInstr this_instr; /* key */ - erts_smp_atomic_t acount; -} BpDataCount; - typedef struct { Eterm pid; Sint count; @@ -110,13 +39,9 @@ typedef struct { } bp_time_hash_t; typedef struct bp_data_time { /* Call time */ - struct bp_data *next; - struct bp_data *prev; - BeamInstr orig_instr; - BeamInstr this_instr; /* key */ - Uint pause; - Uint n; - bp_time_hash_t *hash; + Uint n; + bp_time_hash_t *hash; + erts_refc_t refc; } BpDataTime; typedef struct { @@ -126,64 +51,42 @@ typedef struct { BeamInstr *pc; } process_breakpoint_time_t; /* used within psd */ -extern erts_smp_spinlock_t erts_bp_lock; +typedef struct { + erts_smp_atomic_t acount; + erts_refc_t refc; +} BpCount; + +typedef struct { + erts_smp_atomic_t pid; + erts_refc_t refc; +} BpMetaPid; + +typedef struct generic_bp_data { + Uint flags; + Binary* local_ms; /* Match spec for local call trace */ + Binary* meta_ms; /* Match spec for meta trace */ + BpMetaPid* meta_pid; /* Meta trace pid */ + BpCount* count; /* For call count */ + BpDataTime* time; /* For time trace */ +} GenericBpData; + +#define ERTS_NUM_BP_IX 2 + +typedef struct generic_bp { + BeamInstr orig_instr; + GenericBpData data[ERTS_NUM_BP_IX]; +} GenericBp; #define ERTS_BP_CALL_TIME_SCHEDULE_IN (0) #define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1) #define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2) -#define ERTS_BP_CALL_TIME_CALL (0) -#define ERTS_BP_CALL_TIME_RETURN (1) -#define ERTS_BP_CALL_TIME_TAIL_CALL (2) - -#ifdef ERTS_SMP -#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock) -#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock) -#else -#define ErtsSmpBPLock(BDC) -#define ErtsSmpBPUnlock(BDC) -#endif - #ifdef ERTS_SMP #define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1) #else #define bp_sched2ix_proc(p) (0) #endif -#define ErtsCountBreak(p, pc,instr_result) \ -do { \ - BpData **bds = (BpData **) (pc)[-4]; \ - BpDataCount *bdc = NULL; \ - Uint ix = bp_sched2ix_proc( (p) ); \ - erts_aint_t count = 0; \ - \ - ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \ - ASSERT(bds); \ - bdc = (BpDataCount *) bds[ix]; \ - bdc = (BpDataCount *) bdc->next; \ - ASSERT(bdc); \ - bds[ix] = (BpData *) bdc; \ - count = erts_smp_atomic_read_nob(&bdc->acount); \ - if (count >= 0) erts_smp_atomic_inc_nob(&bdc->acount); \ - *(instr_result) = bdc->orig_instr; \ -} while (0) - -#define ErtsBreakSkip(p, pc,instr_result) \ -do { \ - BpData **bds = (BpData **) (pc)[-4]; \ - BpData *bd = NULL; \ - Uint ix = bp_sched2ix_proc( (p) ); \ - \ - ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \ - ASSERT(bds); \ - bd = bds[ix]; \ - ASSERT(bd); \ - bd = bd->next; \ - ASSERT(bd); \ - bds[ix] = bd; \ - *(instr_result) = bd->orig_instr; \ -} while (0) - enum erts_break_op{ erts_break_nop = 0, /* Must be false */ erts_break_set = !0, /* Must be true */ @@ -191,7 +94,17 @@ enum erts_break_op{ erts_break_stop }; +typedef Uint32 ErtsBpIndex; +typedef struct { + BeamInstr* pc; + Module* mod; +} BpFunction; + +typedef struct { + Uint matched; /* Number matched */ + BpFunction* matching; /* Matching functions */ +} BpFunctions; /* ** Function interface exported from beam_bp.c @@ -199,49 +112,66 @@ enum erts_break_op{ void erts_bp_init(void); -int erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec, - Eterm tracer_pid); -int erts_clear_trace_break(Eterm mfa[3], int specified); -int erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec, +void erts_prepare_bp_staging(void); +void erts_commit_staged_bp(void); + +ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void); +ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void); + +void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified); +void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified); +void erts_bp_free_matched_functions(BpFunctions* f); + +void erts_install_breakpoints(BpFunctions* f); +void erts_uninstall_breakpoints(BpFunctions* f); +void erts_consolidate_bp_data(BpFunctions* f, int local); +void erts_consolidate_bif_bp_data(void); + +void erts_set_trace_break(BpFunctions *f, Binary *match_spec); +void erts_clear_trace_break(BpFunctions *f); + +void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local); +void erts_clear_call_trace_bif(BeamInstr *pc, int local); + +void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec, Eterm tracer_pid); -int erts_clear_mtrace_break(Eterm mfa[3], int specified); +void erts_clear_mtrace_break(BpFunctions *f); void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid); void erts_clear_mtrace_bif(BeamInstr *pc); -int erts_set_debug_break(Eterm mfa[3], int specified); -int erts_clear_debug_break(Eterm mfa[3], int specified); -int erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op); -int erts_clear_count_break(Eterm mfa[3], int specified); +void erts_set_debug_break(BpFunctions *f); +void erts_clear_debug_break(BpFunctions *f); +void erts_set_count_break(BpFunctions *f, enum erts_break_op); +void erts_clear_count_break(BpFunctions *f); -int erts_clear_break(Eterm mfa[3], int specified); + +void erts_clear_all_breaks(BpFunctions* f); int erts_clear_module_break(Module *modp); -int erts_clear_function_break(Module *modp, BeamInstr *pc); +void erts_clear_export_break(Module *modp, BeamInstr* pc); +BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg); BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args, Uint32 *ret_flags, Eterm *tracer_pid); -Uint32 erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, - int local, Eterm *tracer_pid); -int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, - Eterm *tracer_pid_ret); +int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local); int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_rte); int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret); int erts_is_native_break(BeamInstr *pc); -int erts_is_count_break(BeamInstr *pc, Sint *count_ret); +int erts_is_count_break(BeamInstr *pc, Uint *count_ret); int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time); -void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type); +void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt); +void erts_trace_time_return(Process* c_p, BeamInstr* pc); void erts_schedule_time_break(Process *p, Uint out); -int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op); -int erts_clear_time_break(Eterm mfa[3], int specified); +void erts_set_time_break(BpFunctions *f, enum erts_break_op); +void erts_clear_time_break(BpFunctions *f); int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time); void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op); void erts_clear_time_trace_bif(BeamInstr *pc); -BpData *erts_get_time_break(Process *p, BeamInstr *pc); BeamInstr *erts_find_local_func(Eterm mfa[3]); @@ -258,6 +188,19 @@ ERTS_GLB_INLINE Uint erts_bp_sched2ix(void) return 0; #endif } + +extern erts_smp_atomic32_t erts_active_bp_index; +extern erts_smp_atomic32_t erts_staging_bp_index; + +ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void) +{ + return erts_smp_atomic32_read_nob(&erts_active_bp_index); +} + +ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void) +{ + return erts_smp_atomic32_read_nob(&erts_staging_bp_index); +} #endif #endif /* _BEAM_BP_H */ diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index e69cbc3048..a609ed8c71 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -84,6 +84,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2) int i; int specified = 0; Eterm res; + BpFunctions f; if (bool != am_true && bool != am_false) goto error; @@ -121,11 +122,19 @@ erts_debug_breakpoint_2(BIF_ALIST_2) erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); + erts_bp_match_functions(&f, mfa, specified); if (bool == am_true) { - res = make_small(erts_set_debug_break(mfa, specified)); + erts_set_debug_break(&f); + erts_install_breakpoints(&f); + erts_commit_staged_bp(); } else { - res = make_small(erts_clear_debug_break(mfa, specified)); + erts_clear_debug_break(&f); + erts_commit_staged_bp(); + erts_uninstall_breakpoints(&f); } + erts_consolidate_bp_data(&f, 1); + res = make_small(f.matched); + erts_bp_free_matched_functions(&f); erts_smp_thr_progress_unblock(); erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 3973d1d378..5dc5aa1e03 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -26,7 +26,6 @@ #include "erl_vm.h" #include "global.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "error.h" #include "bif.h" #include "big.h" @@ -218,7 +217,6 @@ BeamInstr beam_continue_exit[1]; BeamInstr* em_call_error_handler; BeamInstr* em_apply_bif; -BeamInstr* em_call_traced_function; /* NOTE These should be the only variables containing trace instructions. @@ -249,20 +247,6 @@ void** beam_ops; extern int count_instructions; #endif -#if defined(HYBRID) -#define SWAPIN \ - g_htop = global_htop; \ - g_hend = global_hend; \ - HTOP = HEAP_TOP(c_p); \ - E = c_p->stop - -#define SWAPOUT \ - global_htop = g_htop; \ - global_hend = g_hend; \ - HEAP_TOP(c_p) = HTOP; \ - c_p->stop = E - -#else #define SWAPIN \ HTOP = HEAP_TOP(c_p); \ E = c_p->stop @@ -290,8 +274,6 @@ extern int count_instructions; #define LIGHT_SWAPIN HTOP = HEAP_TOP(c_p) -#endif - #ifdef FORCE_HEAP_FRAGS # define HEAP_SPACE_VERIFIED(Words) do { \ c_p->space_verified = (Words); \ @@ -453,36 +435,6 @@ extern int count_instructions; CHECK_TERM(r(0)); \ } while (0) -#ifdef HYBRID -#ifdef INCREMENTAL -#define TestGlobalHeap(Nh, Live, hp) \ - do { \ - unsigned need = (Nh); \ - ASSERT(global_heap <= g_htop && g_htop <= global_hend); \ - SWAPOUT; \ - reg[0] = r(0); \ - FCALLS -= need; \ - (hp) = IncAlloc(c_p,need,reg,(Live)); \ - r(0) = reg[0]; \ - SWAPIN; \ - } while (0) -#else -#define TestGlobalHeap(Nh, Live, hp) \ - do { \ - unsigned need = (Nh); \ - ASSERT(global_heap <= g_htop && g_htop <= global_hend); \ - if (g_hend - g_htop < need) { \ - SWAPOUT; \ - reg[0] = r(0); \ - FCALLS -= erts_global_garbage_collect(c_p, need, reg, (Live)); \ - r(0) = reg[0]; \ - SWAPIN; \ - } \ - (hp) = global_htop; \ - } while (0) -#endif -#endif /* HYBRID */ - #define Init(N) make_blank(yb(N)) #define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0) @@ -1008,16 +960,9 @@ static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, static struct StackTrace * get_trace_from_exc(Eterm exc); static Eterm make_arglist(Process* c_p, Eterm* reg, int a); -#if defined(VXWORKS) -static int init_done; -#endif - void init_emulator(void) { -#if defined(VXWORKS) - init_done = 0; -#endif erts_smp_atomic_init_nob(&warned_for_tuple_funs, (erts_aint_t) 0); process_main(); } @@ -1150,9 +1095,7 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf) */ void process_main(void) { -#if !defined(VXWORKS) static int init_done = 0; -#endif Process* c_p = NULL; int reds_used; #ifdef DEBUG @@ -1174,12 +1117,6 @@ void process_main(void) */ register Eterm* HTOP REG_htop = NULL; - -#ifdef HYBRID - Eterm *g_htop; - Eterm *g_hend; -#endif - /* Stack pointer. Grows downwards; points * to last item pushed (normally a saved * continuation pointer). @@ -4624,64 +4561,6 @@ void process_main(void) * Trace and debugging support. */ - /* - * At this point, I points to the code[3] in the export entry for - * a trace-enabled function. - * - * code[0]: Module - * code[1]: Function - * code[2]: Arity - * code[3]: &&call_traced_function - * code[4]: Address of function. - */ - OpCase(call_traced_function): { - if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) { - unsigned offset = offsetof(Export, code) + 3*sizeof(BeamInstr); - Export* ep = (Export *) (((char *)I)-offset); - Uint32 flags; - - SWAPOUT; - reg[0] = r(0); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg, - 0, &c_p->tracer_proc); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - SWAPIN; - - if (flags & MATCH_SET_RX_TRACE) { - ASSERT(c_p->htop <= E && E <= c_p->hend); - if (E - 3 < HTOP) { - /* SWAPOUT, SWAPIN was done and r(0) was saved above */ - PROCESS_MAIN_CHK_LOCKS(c_p); - FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - r(0) = reg[0]; - SWAPIN; - } - E -= 3; - ASSERT(c_p->htop <= E && E <= c_p->hend); - ASSERT(is_CP((BeamInstr)(ep->code))); - ASSERT(is_internal_pid(c_p->tracer_proc) || - is_internal_port(c_p->tracer_proc)); - E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */ - E[1] = am_true; /* Process tracer */ - E[0] = make_cp(ep->code); - c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) - ? beam_exception_trace : beam_return_trace; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - c_p->trace_flags |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - } - } - SET_I((BeamInstr *)Arg(0)); - Dispatch(); - } - OpCase(return_trace): { BeamInstr* code = (BeamInstr *) (UWord) E[0]; @@ -4696,80 +4575,22 @@ void process_main(void) Goto(*I); } - OpCase(i_count_breakpoint): { - BeamInstr real_I; - - ErtsCountBreak(c_p, (BeamInstr *) I, &real_I); - ASSERT(VALID_INSTR(real_I)); - Goto(real_I); - } - - /* need to send mfa instead of bdt pointer - * the pointer might be deallocated. - */ - - OpCase(i_time_breakpoint): { + OpCase(i_generic_breakpoint): { BeamInstr real_I; - BpData **bds = (BpData **) (I)[-4]; - BpDataTime *bdt = NULL; - Uint ix = 0; -#ifdef ERTS_SMP - ix = c_p->scheduler_data->no - 1; -#else - ix = 0; -#endif - bdt = (BpDataTime *)bds[ix]; - - ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - ASSERT(bdt); - bdt = (BpDataTime *) bdt->next; - ASSERT(bdt); - bds[ix] = (BpData *) bdt; - real_I = bdt->orig_instr; + ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); + SWAPOUT; + reg[0] = r(0); + real_I = erts_generic_breakpoint(c_p, I, reg); + r(0) = reg[0]; + SWAPIN; ASSERT(VALID_INSTR(real_I)); - - if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) { - if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) || - (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) || - (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) { - /* This _IS_ a tail recursive call */ - SWAPOUT; - erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL); - SWAPIN; - } else { - SWAPOUT; - erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL); - - /* r register needs to be copied to the array - * for the garbage collector - */ - ASSERT(c_p->htop <= E && E <= c_p->hend); - if (E - 2 < HTOP) { - reg[0] = r(0); - PROCESS_MAIN_CHK_LOCKS(c_p); - FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - r(0) = reg[0]; - } - SWAPIN; - - ASSERT(c_p->htop <= E && E <= c_p->hend); - - E -= 2; - E[0] = make_cp(I); - E[1] = make_cp(c_p->cp); /* original return address */ - c_p->cp = beam_return_time_trace; - } - } - Goto(real_I); } OpCase(i_return_time_trace): { BeamInstr *pc = (BeamInstr *) (UWord) E[0]; SWAPOUT; - erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN); + erts_trace_time_return(c_p, pc); SWAPIN; c_p->cp = NULL; SET_I((BeamInstr *) cp_val(E[1])); @@ -4777,114 +4598,6 @@ void process_main(void) Goto(*I); } - OpCase(i_trace_breakpoint): - if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) { - BeamInstr real_I; - - ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I); - Goto(real_I); - } - /* Fall through to next case */ - OpCase(i_mtrace_breakpoint): { - BeamInstr real_I; - Uint32 flags; - Eterm tracer_pid; - Uint* cpp; - int return_to_trace = 0, need = 0; - flags = 0; - SWAPOUT; - reg[0] = r(0); - - if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) { - cpp = &E[2]; - } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) { - return_to_trace = !0; - cpp = &E[0]; - } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) { - return_to_trace = !0; - cpp = &E[0]; - } else { - cpp = NULL; - } - if (cpp) { - /* This _IS_ a tail recursive call, if there are - * return_trace and/or i_return_to_trace stackframes - * on the stack, they are not intermixed with y registers - */ - BeamInstr *cp_save = c_p->cp; - for (;;) { - ASSERT(is_CP(*cpp)); - if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) { - cpp += 3; - } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) { - return_to_trace = !0; - cpp += 1; - } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) { - cpp += 2; - } else - break; - } - c_p->cp = (BeamInstr *) cp_val(*cpp); - ASSERT(is_CP(*cpp)); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - SWAPIN; /* Needed by shared heap. */ - c_p->cp = cp_save; - } else { - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - SWAPIN; /* Needed by shared heap. */ - } - - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - - if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) { - need += 1; - } - if (flags & MATCH_SET_RX_TRACE) { - need += 3; - } - if (need) { - ASSERT(c_p->htop <= E && E <= c_p->hend); - if (E - need < HTOP) { - /* SWAPOUT was done and r(0) was saved above */ - PROCESS_MAIN_CHK_LOCKS(c_p); - FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - r(0) = reg[0]; - SWAPIN; - } - } - if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) { - E -= 1; - ASSERT(c_p->htop <= E && E <= c_p->hend); - E[0] = make_cp(c_p->cp); - c_p->cp = (BeamInstr *) beam_return_to_trace; - } - if (flags & MATCH_SET_RX_TRACE) { - E -= 3; - ASSERT(c_p->htop <= E && E <= c_p->hend); - ASSERT(is_CP((Eterm) (UWord) (I - 3))); - ASSERT(am_true == tracer_pid || - is_internal_pid(tracer_pid) || is_internal_port(tracer_pid)); - E[2] = make_cp(c_p->cp); - E[1] = tracer_pid; - E[0] = make_cp(I - 3); /* We ARE at the beginning of an - instruction, - the funcinfo is above i. */ - c_p->cp = - (flags & MATCH_SET_EXCEPTION_TRACE) - ? beam_exception_trace : beam_return_trace; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - c_p->trace_flags |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - } - Goto(real_I); - } - OpCase(i_return_to_trace): { if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) { Uint *cpp = (Uint*) E; @@ -5234,7 +4947,6 @@ void process_main(void) #endif /* NO_JUMP_TABLE */ em_call_error_handler = OpCode(call_error_handler); - em_call_traced_function = OpCode(call_traced_function); em_apply_bif = OpCode(apply_bif); beam_apply[0] = (BeamInstr) OpCode(i_apply); @@ -6534,10 +6246,8 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) hp = funp->env; erts_refc_inc(&fe->refc, 2); funp->thing_word = HEADER_FUN; -#ifndef HYBRID /* FIND ME! */ funp->next = MSO(p).first; MSO(p).first = (struct erl_off_heap_header*) funp; -#endif funp->fe = fe; funp->num_free = num_free; funp->creator = p->id; diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h index f1506a8684..1048f258a5 100644 --- a/erts/emulator/beam/beam_load.h +++ b/erts/emulator/beam/beam_load.h @@ -49,7 +49,6 @@ extern void** beam_ops; extern BeamInstr beam_debug_apply[]; extern BeamInstr* em_call_error_handler; extern BeamInstr* em_apply_bif; -extern BeamInstr* em_call_traced_function; /* * The following variables keep a sorted list of address ranges for diff --git a/erts/emulator/beam/benchmark.c b/erts/emulator/beam/benchmark.c index 7fbf44a03c..7ac14b8e8b 100644 --- a/erts/emulator/beam/benchmark.c +++ b/erts/emulator/beam/benchmark.c @@ -33,17 +33,6 @@ unsigned long long messages_copied; unsigned long long messages_ego; unsigned long long minor_gc; unsigned long long major_gc; -#ifdef HYBRID -unsigned long long minor_global_gc; -unsigned long long major_global_gc; -unsigned long long gc_in_copy; -#ifdef INCREMENTAL -unsigned long long minor_gc_cycles; -unsigned long long major_gc_cycles; -unsigned long long minor_gc_stages; -unsigned long long major_gc_stages; -#endif -#endif #endif /* BM_COUNTERS */ #ifdef BM_TIMERS @@ -191,17 +180,6 @@ void init_benchmarking() messages_ego = 0; minor_gc = 0; major_gc = 0; -#ifdef HYBRID - minor_global_gc = 0; - major_global_gc = 0; - gc_in_copy = 0; -#ifdef INCREMENTAL - minor_gc_cycles = 0; - major_gc_cycles = 0; - minor_gc_stages = 0; - major_gc_stages = 0; -#endif -#endif #endif /* BM_COUNTERS */ #ifdef BM_HEAP_SIZES @@ -243,16 +221,6 @@ void save_statistics() erts_fprintf(file,"Number of processes spawned: %lld\n",processes_spawned); erts_fprintf(file,"Number of local minor GCs: %lld\n",minor_gc); erts_fprintf(file,"Number of local major GCs: %lld\n",major_gc); -#ifdef HYBRID - erts_fprintf(file,"Number of global minor GCs: %lld\n",minor_global_gc); - erts_fprintf(file,"Number of global major GCs: %lld\n",major_global_gc); -#ifdef INCREMENTAL - erts_fprintf(file,"Number of minor GC-cycles: %lld\n",minor_gc_cycles); - erts_fprintf(file,"Number of major GC-cycles: %lld\n",major_gc_cycles); - erts_fprintf(file,"Number of minor GC-stages: %lld\n",minor_gc_stages); - erts_fprintf(file,"Number of major GC-stages: %lld\n",major_gc_stages); -#endif -#endif erts_fprintf(file,"Number of messages sent: %lld\n",messages_sent); erts_fprintf(file,"Number of messages copied: %lld\n",messages_copied); erts_fprintf(file,"Number of messages sent to self: %lld\n",messages_ego); diff --git a/erts/emulator/beam/benchmark.h b/erts/emulator/beam/benchmark.h index eedb06a1b6..003e821bce 100644 --- a/erts/emulator/beam/benchmark.h +++ b/erts/emulator/beam/benchmark.h @@ -99,17 +99,6 @@ extern unsigned long long messages_copied; extern unsigned long long messages_ego; extern unsigned long long minor_gc; extern unsigned long long major_gc; -#ifdef HYBRID -extern unsigned long long minor_global_gc; -extern unsigned long long major_global_gc; -extern unsigned long long gc_in_copy; -#ifdef INCREMENTAL -extern unsigned long long minor_gc_cycles; -extern unsigned long long major_gc_cycles; -extern unsigned long long minor_gc_stages; -extern unsigned long long major_gc_stages; -#endif -#endif #define BM_COUNT(var) (var)++; diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index fcb130655a..6943c8852c 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -3507,22 +3507,6 @@ BIF_RETTYPE garbage_collect_0(BIF_ALIST_0) } /**********************************************************************/ -/* Perform garbage collection of the message area */ - -BIF_RETTYPE garbage_collect_message_area_0(BIF_ALIST_0) -{ -#if defined(HYBRID) && !defined(INCREMENTAL) - int reds = 0; - - FLAGS(BIF_P) |= F_NEED_FULLSWEEP; - reds = erts_global_garbage_collect(BIF_P, 0, NULL, 0); - BIF_RET2(am_true, reds); -#else - BIF_RET(am_false); -#endif -} - -/**********************************************************************/ /* Return a list of active ports */ BIF_RETTYPE ports_0(BIF_ALIST_0) diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 698bbf0098..f7dad2767f 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -99,8 +99,6 @@ bif erlang:garbage_collect/0 bif 'erl.system':garbage_collect/0 ebif_garbage_collect_0 bif erlang:garbage_collect/1 bif 'erl.system':garbage_collect/1 ebif_garbage_collect_1 -bif erlang:garbage_collect_message_area/0 -bif 'erl.system':garbage_collect_message_area/0 ebif_garbage_collect_message_area_0 bif erlang:get/0 bif 'erl.lang.proc.pdict':get/0 ebif_get_0 bif erlang:get/1 diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c index ae4cca1e58..8025058ee0 100644 --- a/erts/emulator/beam/code_ix.c +++ b/erts/emulator/beam/code_ix.c @@ -44,6 +44,10 @@ struct code_ix_queue_item { static struct code_ix_queue_item* the_code_ix_queue = NULL; static erts_smp_mtx_t the_code_ix_queue_lock; +#ifdef ERTS_ENABLE_LOCK_CHECK +static erts_tsd_key_t has_code_write_permission; +#endif + void erts_code_ix_init(void) { /* We start emulator by initializing preloaded modules @@ -53,6 +57,9 @@ void erts_code_ix_init(void) erts_smp_atomic32_init_nob(&the_active_code_index, 0); erts_smp_atomic32_init_nob(&the_staging_code_index, 0); erts_smp_mtx_init(&the_code_ix_queue_lock, "code_ix_queue"); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_tsd_key_create(&has_code_write_permission); +#endif CIX_TRACE("init"); } @@ -112,6 +119,9 @@ int erts_try_seize_code_write_permission(Process* c_p) success = !the_code_ix_lock; if (success) { the_code_ix_lock = 1; +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_tsd_set(has_code_write_permission, (void *) 1); +#endif } else { /* Already locked */ struct code_ix_queue_item* qitem; @@ -128,6 +138,7 @@ int erts_try_seize_code_write_permission(Process* c_p) void erts_release_code_write_permission(void) { + ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked()); erts_smp_mtx_lock(&the_code_ix_queue_lock); while (the_code_ix_queue != NULL) { /* unleash the entire herd */ struct code_ix_queue_item* qitem = the_code_ix_queue; @@ -141,12 +152,15 @@ void erts_release_code_write_permission(void) erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem); } the_code_ix_lock = 0; +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_tsd_set(has_code_write_permission, (void *) 0); +#endif erts_smp_mtx_unlock(&the_code_ix_queue_lock); } #ifdef ERTS_ENABLE_LOCK_CHECK int erts_is_code_ix_locked(void) { - return the_code_ix_lock; + return the_code_ix_lock && erts_tsd_get(has_code_write_permission); } #endif diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index d7345c2f54..36eda04de2 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -26,30 +26,13 @@ #include "global.h" #include "erl_process.h" #include "erl_gc.h" -#include "erl_nmgc.h" #include "big.h" #include "erl_binary.h" #include "erl_bits.h" #include "dtrace-wrapper.h" -#ifdef HYBRID -MA_STACK_DECLARE(src); -MA_STACK_DECLARE(dst); -MA_STACK_DECLARE(offset); -#endif - static void move_one_frag(Eterm** hpp, Eterm* src, Uint src_sz, ErlOffHeap*); -void -init_copy(void) -{ -#ifdef HYBRID - MA_STACK_ALLOC(src); - MA_STACK_ALLOC(dst); - MA_STACK_ALLOC(offset); -#endif -} - /* * Copy object "obj" to process p. */ @@ -432,12 +415,10 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) while (i--) { *htop++ = *objp++; } -#ifndef HYBRID /* FIND ME! */ funp = (ErlFunThing *) tp; funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*) funp; erts_refc_inc(&funp->fe->refc, 2); -#endif *argp = make_fun_rel(tp, dst_base); } break; @@ -500,420 +481,6 @@ Eterm copy_struct(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) return res; } -#ifdef HYBRID - -#ifdef BM_MESSAGE_SIZES -# define BM_ADD(var,val) (var) += (val); -#else -# define BM_ADD(var,val) -#endif - -#ifdef DEBUG -# define CLEARMEM(PTR,SIZE) memset(PTR,0,SIZE*sizeof(Eterm)) -#else -# define CLEARMEM(PTR,SIZE) -#endif - -#ifdef INCREMENTAL -#define GlobalAlloc(p, need, hp) \ -do { \ - Uint n = (need); \ - BM_ADD(words_copied,n); \ - BM_SWAP_TIMER(copy,system); \ - /* If a new collection cycle is started during copy, the message * \ - * will end up in the old generation and all allocations * \ - * thereafter must go directly into the old generation. */ \ - if (alloc_old) { \ - erts_incremental_gc((p),n,&dest,1); \ - (hp) = erts_inc_alloc(n); \ - } else { \ - (hp) = IncAlloc((p),n,&dest,1); \ - if (ma_gc_flags & GC_CYCLE_START) { \ - alloc_old = 1; \ - global_htop = global_heap; \ - (hp) = erts_inc_alloc(n); \ - } \ - } \ - CLEARMEM((hp),(n)); \ - BM_SWAP_TIMER(system,copy); \ -} while(0) - -#else /* no INCREMELNTAL */ - -#define GlobalAlloc(p, need, hp) \ -do { \ - Uint n = (need); \ - total_need += n; \ - if (total_need >= global_heap_sz) \ - erl_exit(ERTS_ABORT_EXIT, "Copying a message (%d words) larger than the nursery simply won't work...\n", total_need); \ - if (global_hend - n < global_htop) { \ - BM_SWAP_TIMER(copy,system); \ - erts_global_garbage_collect((p),total_need,NULL,0); \ - BM_SWAP_TIMER(system,copy); \ - total_need = 0; \ - ma_src_top = 0; \ - ma_dst_top = 0; \ - ma_offset_top = 0; \ - goto copy_start; \ - } \ - (hp) = global_htop; \ - global_htop += n; \ - BM_ADD(words_copied,n); \ -} while(0) -#endif /* INCREMENTAL */ - -/* Copy a message to the message area. */ -Eterm copy_struct_lazy(Process *from, Eterm orig, Uint offs) -{ - Eterm obj; - Eterm dest; -#ifdef INCREMENTAL - int alloc_old = 0; -#else - int total_need = 0; -#endif - - VERBOSE(DEBUG_MESSAGES, - ("COPY START; %T is sending a message @ 0x%016x\n%T\n", - from->id, orig, orig)); - -#ifndef INCREMENTAL - copy_start: -#endif - MA_STACK_PUSH(src,orig); - MA_STACK_PUSH(dst,&dest); - MA_STACK_PUSH(offset,offs); - - while (ma_src_top > 0) { - obj = MA_STACK_POP(src); - - /* copy_struct_lazy should never be called with something that - * do not need to be copied. Within the loop, nothing that do - * not need copying should be placed in the src-stack. - */ - ASSERT(!NO_COPY(obj)); - - switch (primary_tag(obj)) { - case TAG_PRIMARY_LIST: { - Eterm *hp; - Eterm *objp; - - GlobalAlloc(from,2,hp); - objp = list_val(obj); - - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_list(hp)); - MA_STACK_POP(dst); - - /* TODO: Byt ordningen nedan så att CDR pushas först. */ - - if (NO_COPY(*objp)) { - hp[0] = *objp; -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,2); -#endif - } else { - MA_STACK_PUSH(src,*objp); - MA_STACK_PUSH(dst,hp); - MA_STACK_PUSH(offset,0); - } - - objp++; - - if (NO_COPY(*objp)) { - hp[1] = *objp; -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,2); -#endif - } - else { - MA_STACK_PUSH(src,*objp); - MA_STACK_PUSH(dst,hp); - MA_STACK_PUSH(offset,1); - } - continue; - } - - case TAG_PRIMARY_BOXED: { - Eterm *objp = boxed_val(obj); - - switch (*objp & _TAG_HEADER_MASK) { - case ARITYVAL_SUBTAG: { - Uint ari = arityval(*objp); - Uint i; - Eterm *hp; - GlobalAlloc(from,ari + 1,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp)); - MA_STACK_POP(dst); - *hp = *objp++; - for (i = 1; i <= ari; i++) { - switch (primary_tag(*objp)) { - case TAG_PRIMARY_LIST: - case TAG_PRIMARY_BOXED: - if (NO_COPY(*objp)) { - hp[i] = *objp; -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp), - inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,BOXED_NEED(hp,*hp)); -#endif - objp++; - } else { - MA_STACK_PUSH(src,*objp++); - MA_STACK_PUSH(dst,hp); - MA_STACK_PUSH(offset,i); - } - break; - default: - hp[i] = *objp++; - } - } - continue; - } - - case REFC_BINARY_SUBTAG: { - ProcBin *pb; - Uint i = thing_arityval(*objp) + 1; - Eterm *hp; - GlobalAlloc(from,i,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_binary(hp)); - MA_STACK_POP(dst); - pb = (ProcBin*) hp; - while (i--) { - *hp++ = *objp++; - } - erts_refc_inc(&pb->val->refc, 2); - pb->next = erts_global_offheap.first; - erts_global_offheap.first = pb; - OH_OVERHEAD(off_heap, pb->size / sizeof(Eterm)); - continue; - } - - case FUN_SUBTAG: { - ErlFunThing *funp = (ErlFunThing*) objp; - Uint i = thing_arityval(*objp) + 1; - Uint j = i + 1 + funp->num_free; - Uint k = i; - Eterm *hp, *hp_start; - GlobalAlloc(from,j,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - hp_start = hp; - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_fun(hp)); - MA_STACK_POP(dst); - funp = (ErlFunThing*) hp; - while (i--) { - *hp++ = *objp++; - } -#ifndef HYBRID /* FIND ME! */ - funp->next = erts_global_offheap.first; - erts_global_offheap.first = funp; - erts_refc_inc(&funp->fe->refc, 2); -#endif - for (i = k; i < j; i++) { - switch (primary_tag(*objp)) { - case TAG_PRIMARY_LIST: - case TAG_PRIMARY_BOXED: - if (NO_COPY(*objp)) { -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp), - inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,BOXED_NEED(hp,*hp)); -#endif - *hp++ = *objp++; - } else { - MA_STACK_PUSH(src,*objp++); - MA_STACK_PUSH(dst,hp_start); - MA_STACK_PUSH(offset,i); - hp++; - } - break; - default: - *hp++ = *objp++; - } - } - continue; - } - - case EXTERNAL_PID_SUBTAG: - case EXTERNAL_PORT_SUBTAG: - case EXTERNAL_REF_SUBTAG: { - ExternalThing *etp; - Uint i = thing_arityval(*objp) + 1; - Eterm *hp; - GlobalAlloc(from,i,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_external(hp)); - MA_STACK_POP(dst); - etp = (ExternalThing*) hp; - while (i--) { - *hp++ = *objp++; - } - - etp->next = erts_global_offheap.first; - erts_global_offheap.first = etp; - erts_refc_inc(&etp->node->refc, 2); - continue; - } - - case SUB_BINARY_SUBTAG: { - ErlSubBin *sb = (ErlSubBin *) objp; - Eterm *hp; - Eterm res_binary; - Eterm real_bin = sb->orig; - Uint bit_offset = sb->bitoffs; - Uint bit_size = sb -> bitsize; - Uint sub_offset = sb->offs; - size_t size = sb->size; - Uint extra_bytes; - Uint real_size; - Uint sub_binary_heapneed; - if ((bit_size + bit_offset) > 8) { - extra_bytes = 2; - sub_binary_heapneed = ERL_SUB_BIN_SIZE; - } else if ((bit_size + bit_offset) > 0) { - extra_bytes = 1; - sub_binary_heapneed = ERL_SUB_BIN_SIZE; - } else { - extra_bytes = 0; - sub_binary_heapneed = 0; - } - - real_size = size+extra_bytes; - objp = binary_val(real_bin); - if (thing_subtag(*objp) == HEAP_BINARY_SUBTAG) { - ErlHeapBin *from_bin; - ErlHeapBin *to_bin; - Uint i = heap_bin_size(real_size); - GlobalAlloc(from,i+sub_binary_heapneed,hp); - from_bin = (ErlHeapBin *) objp; - to_bin = (ErlHeapBin *) hp; - to_bin->thing_word = header_heap_bin(real_size); - to_bin->size = real_size; - sys_memcpy(to_bin->data, ((byte *)from_bin->data) + - sub_offset, real_size); - res_binary = make_binary(to_bin); - hp += i; - } else { - ProcBin *from_bin; - ProcBin *to_bin; - - ASSERT(thing_subtag(*objp) == REFC_BINARY_SUBTAG); - from_bin = (ProcBin *) objp; - erts_refc_inc(&from_bin->val->refc, 2); - GlobalAlloc(from,PROC_BIN_SIZE+sub_binary_heapneed,hp); - to_bin = (ProcBin *) hp; - to_bin->thing_word = HEADER_PROC_BIN; - to_bin->size = real_size; - to_bin->val = from_bin->val; - to_bin->bytes = from_bin->bytes + sub_offset; - to_bin->next = erts_global_offheap.first; - erts_global_offheap.first = to_bin; - OH_OVERHEAD(&erts_global_offheap, to_bin->size / sizeof(Eterm)); - res_binary=make_binary(to_bin); - hp += PROC_BIN_SIZE; - } - if (extra_bytes != 0) { - ErlSubBin* res; - res = (ErlSubBin *) hp; - res->thing_word = HEADER_SUB_BIN; - res->size = size; - res->bitsize = bit_size; - res->bitoffs = bit_offset; - res->offs = 0; - res->is_writable = 0; - res->orig = res_binary; - res_binary = make_binary(hp); - } - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),res_binary); - MA_STACK_POP(dst); - continue; - } - - case BIN_MATCHSTATE_SUBTAG: - erl_exit(ERTS_ABORT_EXIT, - "copy_struct_lazy: matchstate term not allowed"); - - default: { - Uint size = thing_arityval(*objp) + 1; - Eterm *hp; - GlobalAlloc(from,size,hp); - /* A GC above might invalidate the value of objp */ - objp = boxed_val(obj); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_boxed(hp)); - MA_STACK_POP(dst); - while (size--) { - *hp++ = *objp++; - } - continue; - } - } - continue; - } - - case TAG_PRIMARY_HEADER: - ASSERT((obj & _TAG_HEADER_MASK) == ARITYVAL_SUBTAG); - { - Eterm *objp = &obj; - Uint ari = arityval(obj); - Uint i; - Eterm *hp; - GlobalAlloc(from,ari + 1,hp); - MA_STACK_UPDATE(dst,MA_STACK_POP(offset),make_tuple(hp)); - MA_STACK_POP(dst); - *hp = *objp++; - for (i = 1; i <= ari; i++) { - switch (primary_tag(*objp)) { - case TAG_PRIMARY_LIST: - case TAG_PRIMARY_BOXED: - if (NO_COPY(*objp)) { -#ifdef INCREMENTAL - if (ptr_within(ptr_val(*objp),inc_fromspc,inc_fromend)) - INC_STORE(gray,hp,ari + 1); -#endif - hp[i] = *objp++; - } else { - MA_STACK_PUSH(src,*objp++); - MA_STACK_PUSH(dst,hp); - MA_STACK_PUSH(offset,i); - } - break; - default: - hp[i] = *objp++; - } - } - continue; - } - - default: - erl_exit(ERTS_ABORT_EXIT, - "%s, line %d: Internal error in copy_struct_lazy: 0x%08x\n", - __FILE__, __LINE__,obj); - } - } - - VERBOSE(DEBUG_MESSAGES, - ("Copy allocated @ 0x%08lx:\n%T\n", - (unsigned long)ptr_val(dest),dest)); - - ma_gc_flags &= ~GC_CYCLE_START; - - ASSERT(eq(orig, dest)); - ASSERT(ma_src_top == 0); - ASSERT(ma_dst_top == 0); - ASSERT(ma_offset_top == 0); - return dest; -} - -#undef NO_COPY -#endif /* HYBRID */ - /* * Copy a term that is guaranteed to be contained in a single * heap block. The heap block is copied word by word, and any diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 4b85909828..5db68f6d45 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -1578,11 +1578,9 @@ int erts_net_message(Port *prt, } erts_cleanup_offheap(&off_heap); -#ifndef HYBRID /* FIND ME! */ if (ctl != ctl_default) { erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl); } -#endif UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); ERTS_SMP_CHK_NO_PROC_LOCKS; return 0; @@ -1595,11 +1593,9 @@ int erts_net_message(Port *prt, data_error: PURIFY_MSG("data error"); erts_cleanup_offheap(&off_heap); -#ifndef HYBRID /* FIND ME! */ if (ctl != ctl_default) { erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl); } -#endif UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); erts_do_exit_port(prt, dep->cid, am_killed); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -1973,6 +1969,7 @@ erts_dist_command(Port *prt, int reds_limit) bw(foq.first->extp, size); #endif reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); + erts_smp_atomic_add_nob(&erts_bytes_out, size); fob = foq.first; obufsize += size_obuf(fob); foq.first = foq.first->next; @@ -2056,6 +2053,7 @@ erts_dist_command(Port *prt, int reds_limit) bw(oq.first->extp, size); #endif reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); + erts_smp_atomic_add_nob(&erts_bytes_out, size); fob = oq.first; obufsize += size_obuf(fob); oq.first = oq.first->next; diff --git a/erts/emulator/beam/dtrace-wrapper.h b/erts/emulator/beam/dtrace-wrapper.h index 1aeb7f9221..6ec0c91e21 100644 --- a/erts/emulator/beam/dtrace-wrapper.h +++ b/erts/emulator/beam/dtrace-wrapper.h @@ -42,6 +42,8 @@ #define DTRACE_CHARBUF(name, size) \ char name##_BUFFER[size], *name = name##_BUFFER +#define DTRACE_CHARBUF_NAME(name) name##_BUFFER + #if defined(USE_DYNAMIC_TRACE) && defined(USE_VM_PROBES) #include "erlang_dtrace.h" diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index c501b79b1f..ba73ca6da7 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -2138,9 +2138,6 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg) tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0); } tmp += erts_max_processes*sizeof(Process*); -#ifdef HYBRID - tmp += erts_max_processes*sizeof(Process*); -#endif tmp += erts_bif_timer_memory_size(); tmp += erts_tot_link_lh_size(); @@ -2303,11 +2300,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc) values[i].name = "static"; values[i].ui[0] = erts_max_ports*sizeof(Port) /* Port table */ - + erts_timer_wheel_memory_size() /* Timer wheel */ -#ifdef SYS_TMP_BUF_SIZE - + SYS_TMP_BUF_SIZE /* tmp_buf in sys on vxworks & ose */ -#endif - ; + + erts_timer_wheel_memory_size(); /* Timer wheel */ i++; erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space); diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 4c1424350f..7b27b3dda9 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -320,19 +320,6 @@ type ACTIVE_PROCS STANDARD PROCESSES active_procs +endif -+if hybrid - -type ACTIVE_PROCS STANDARD PROCESSES active_procs - -# Used for all memory involved in incremental gc of the message area -# that is, young (x2) and old generation, forwarding pointers and blackmap -type MESSAGE_AREA LONG_LIVED PROCESSES message_area - -# Used in MA_STACK (global.h) and INC_STORAGE (erl_nmgc.h) -type OBJECT_STACK STANDARD PROCESSES object_stack - -+endif - +if smp type SL_PTIMER SHORT_LIVED SYSTEM ptimer_sl type LL_PTIMER STANDARD SYSTEM ptimer_ll @@ -433,28 +420,4 @@ type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf +endif -+if vxworks - -type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf -type PEND_DATA SYSTEM SYSTEM pending_data -type FD_TAB LONG_LIVED SYSTEM fd_tab -type FD_ENTRY_BUF SYSTEM SYSTEM fd_entry_buf - -+endif - -+if ose - -type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf -type PUTENV_STR SYSTEM SYSTEM putenv_string -type GETENV_STR SYSTEM SYSTEM getenv_string -type GETENV_STATE SYSTEM SYSTEM getenv_state -type SIG_ENTRY SYSTEM SYSTEM sig_entry -type DRIVER_DATA SYSTEM SYSTEM driver_data -type PGM_TAB SYSTEM SYSTEM pgm_tab -type PGM_ENTRY SYSTEM SYSTEM pgm_entry -type PRT_TAB SYSTEM SYSTEM prt_tab -type PRT_ENTRY SYSTEM SYSTEM prt_entry - -+endif - # ---------------------------------------------------------------------------- diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index 62225d3572..9a011e2adc 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -952,7 +952,7 @@ ddq_check_incoming(ErtsAllctrDDQueue_t *ddq) ERTS_THR_MEMORY_BARRIER; else { ddq->head.next.unref_end = (ErtsAllctrDDBlock_t *) ilast; - ddq->head.next.thr_progress = erts_thr_progress_later(); + ddq->head.next.thr_progress = erts_thr_progress_later(NULL); erts_atomic32_set_relb(&ddq->tail.data.um_refc_ix, um_refc_ix); ddq->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0; diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index eb98d2f6dd..e2f7c8673f 100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -25,7 +25,6 @@ #include "erl_vm.h" #include "global.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "error.h" #include "erl_driver.h" #include "bif.h" @@ -89,12 +88,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE #ifdef ERTS_ENABLE_KERNEL_POLL " [kernel-poll:%s]" #endif -#ifdef HYBRID - " [hybrid heap]" -#endif -#ifdef INCREMENTAL - " [incremental GC]" -#endif #ifdef ET_DEBUG #if ET_DEBUG " [type-assertions]" @@ -576,9 +569,6 @@ static Eterm pi_args[] = { am_min_bin_vheap_size, am_current_location, am_current_stacktrace, -#ifdef HYBRID - am_message_binary -#endif }; #define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm))) @@ -626,9 +616,6 @@ pi_arg2ix(Eterm arg) case am_min_bin_vheap_size: return 28; case am_current_location: return 29; case am_current_stacktrace: return 30; -#ifdef HYBRID - case am_message_binary: return 31; -#endif default: return -1; } } @@ -1081,12 +1068,8 @@ process_info_aux(Process *BIF_P, if (rp != BIF_P) { Eterm msg = ERL_MESSAGE_TERM(mq[i].msgp); if (is_value(msg)) { - mq[i].copy_struct_size = (is_immed(msg) -#ifdef HYBRID - || NO_COPY(msg) -#endif - ? 0 - : size_object(msg)); + mq[i].copy_struct_size = (is_immed(msg)? 0 : + size_object(msg)); } else if (mq[i].msgp->data.attached) { mq[i].copy_struct_size @@ -1528,16 +1511,6 @@ process_info_aux(Process *BIF_P, break; } -#ifdef HYBRID - case am_message_binary: { - Uint sz = 3; - (void) bld_bin_list(NULL, &sz, erts_global_offheap.mso); - hp = HAlloc(BIF_P, sz); - res = bld_bin_list(&hp, NULL, erts_global_offheap.mso); - break; - } -#endif - case am_sequential_trace_token: res = copy_object(rp->seq_trace_token, BIF_P); hp = HAlloc(BIF_P, 3); @@ -2356,36 +2329,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) #endif } else if (BIF_ARG_1 == am_heap_sizes) { return erts_heap_sizes(BIF_P); - } else if (BIF_ARG_1 == am_global_heaps_size) { -#ifdef HYBRID - Uint hsz = 0; - Uint sz = 0; - - sz += global_heap_sz; -#ifdef INCREMENTAL - /* The size of the old generation is a bit hard to define here... - * The amount of live data in the last collection perhaps..? */ - sz = 0; -#else - if (global_old_hend && global_old_heap) - sz += global_old_hend - global_old_heap; -#endif - - sz *= sizeof(Eterm); - - (void) erts_bld_uint(NULL, &hsz, sz); - hp = hsz ? HAlloc(BIF_P, hsz) : NULL; - res = erts_bld_uint(&hp, NULL, sz); -#else - res = make_small(0); -#endif - return res; } else if (BIF_ARG_1 == am_heap_type) { -#if defined(HYBRID) - return am_hybrid; -#else return am_private; -#endif } else if (ERTS_IS_ATOM_STR("cpu_topology", BIF_ARG_1)) { res = erts_get_cpu_topology_term(BIF_P, am_used); BIF_TRAP1(erts_format_cpu_topology_trap, BIF_P, res); diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index 5525426824..2a1b01b107 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -265,7 +265,7 @@ port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3) Eterm res; Sint result_size; Eterm *hp; - Eterm *hp_end; /* To satisfy hybrid heap architecture */ + Eterm *hp_end; unsigned ret_flags = 0U; int fpe_was_unmasked; diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index 7f1b02b9b4..e88fb8c9f4 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -42,12 +42,24 @@ #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0}; + +/* + * The following variables are protected by code write permission. + */ static int erts_default_trace_pattern_is_on; static Binary *erts_default_match_spec; static Binary *erts_default_meta_match_spec; static struct trace_pattern_flags erts_default_trace_pattern_flags; static Eterm erts_default_meta_tracer_pid; +static struct { /* Protected by code write permission */ + int current; + int install; + int local; + BpFunctions f; /* Local functions */ + BpFunctions e; /* Export entries */ +} finish_bp; + static Eterm trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist); static BIF_RETTYPE @@ -60,12 +72,11 @@ static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key); static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key); static Eterm trace_info_on_load(Process* p, Eterm key); -static int setup_func_trace(Export* ep, void* match_prog, ErtsCodeIndex); -static int reset_func_trace(Export* ep, ErtsCodeIndex); -static void reset_bif_trace(int bif_index); -static void setup_bif_trace(int bif_index); -static void set_trace_bif(int bif_index, void* match_prog); -static void clear_trace_bif(int bif_index); +static void reset_bif_trace(void); +static void setup_bif_trace(void); +static void install_exp_breakpoints(BpFunctions* f); +static void uninstall_exp_breakpoints(BpFunctions* f); +static void clean_export_entries(BpFunctions* f); void erts_bif_trace_init(void) @@ -107,12 +118,12 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) int is_global; Process *meta_tracer_proc = p; Eterm meta_tracer_pid = p->id; + int is_blocking = 0; if (!erts_try_seize_code_write_permission(p)) { ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + finish_bp.current = -1; UseTmpHeap(3,p); /* @@ -328,16 +339,24 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) meta_tracer_proc->trace_flags |= F_TRACER; } - matches = erts_set_trace_pattern(mfa, specified, + matches = erts_set_trace_pattern(p, mfa, specified, match_prog_set, match_prog_set, - on, flags, meta_tracer_pid); + on, flags, meta_tracer_pid, 0); } error: MatchSetUnref(match_prog_set); UnUseTmpHeap(3,p); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + +#ifdef ERTS_SMP + if (finish_bp.current >= 0) { + ASSERT(matches >= 0); + erts_notify_finish_breakpointing(p); + erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL); + ERTS_BIF_YIELD_RETURN(p, make_small(matches)); + } +#endif + erts_release_code_write_permission(); if (matches >= 0) { @@ -355,6 +374,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, struct trace_pattern_flags *trace_pattern_flags, Eterm *meta_tracer_pid) { + ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked() || + erts_smp_thr_progress_is_blocking()); if (trace_pattern_is_on) *trace_pattern_is_on = erts_default_trace_pattern_is_on; if (match_spec) @@ -369,6 +390,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, int erts_is_default_trace_enabled(void) { + ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked() || + erts_smp_thr_progress_is_blocking()); return erts_default_trace_pattern_is_on; } @@ -842,6 +865,11 @@ Eterm trace_info_2(BIF_ALIST_2) Eterm What = BIF_ARG_1; Eterm Key = BIF_ARG_2; Eterm res; + + if (!erts_try_seize_code_write_permission(p)) { + ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, What, Key); + } + if (What == am_on_load) { res = trace_info_on_load(p, Key); } else if (is_atom(What) || is_pid(What)) { @@ -849,8 +877,10 @@ Eterm trace_info_2(BIF_ALIST_2) } else if (is_tuple(What)) { res = trace_info_func(p, What, Key); } else { + erts_release_code_write_permission(); BIF_ERROR(p, BADARG); } + erts_release_code_write_permission(); BIF_RET(res); } @@ -978,64 +1008,54 @@ static int function_is_traced(Process *p, Binary **ms, /* out */ Binary **ms_meta, /* out */ Eterm *tracer_pid_meta, /* out */ - Sint *count, /* out */ + Uint *count, /* out */ Eterm *call_time) /* out */ { Export e; Export* ep; - int i; - BeamInstr *code; + BeamInstr* pc; /* First look for an export entry */ e.code[0] = mfa[0]; e.code[1] = mfa[1]; e.code[2] = mfa[2]; if ((ep = export_get(&e)) != NULL) { - if (ep->addressv[erts_active_code_ix()] == ep->code+3 && - ep->code[3] != (BeamInstr) em_call_error_handler) { - if (ep->code[3] == (BeamInstr) em_call_traced_function) { - *ms = ep->match_prog_set; + pc = ep->code+3; + if (ep->addressv[erts_active_code_ix()] == pc && + *pc != (BeamInstr) em_call_error_handler) { + + int r = 0; + + ASSERT(*pc == (BeamInstr) em_apply_bif || + *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)); + + if (erts_is_trace_break(pc, ms, 0)) { return FUNC_TRACE_GLOBAL_TRACE; } - if (ep->code[3] == (BeamInstr) em_apply_bif) { - for (i = 0; i < BIF_SIZE; ++i) { - if (bif_export[i] == ep) { - int r = 0; - - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) { - *ms = ep->match_prog_set; - return FUNC_TRACE_GLOBAL_TRACE; - } else { - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) { - r |= FUNC_TRACE_LOCAL_TRACE; - *ms = ep->match_prog_set; - } - if (erts_is_mtrace_break(ep->code+3, ms_meta, - tracer_pid_meta)) { - r |= FUNC_TRACE_META_TRACE; - } - if (erts_is_time_break(p, ep->code+3, call_time)) { - r |= FUNC_TRACE_TIME_TRACE; - } - } - return r ? r : FUNC_TRACE_UNTRACED; - } - } - erl_exit(1,"Impossible ghost bif encountered in trace_info."); + + if (erts_is_trace_break(pc, ms, 1)) { + r |= FUNC_TRACE_LOCAL_TRACE; + } + if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) { + r |= FUNC_TRACE_META_TRACE; } + if (erts_is_time_break(p, pc, call_time)) { + r |= FUNC_TRACE_TIME_TRACE; + } + return r ? r : FUNC_TRACE_UNTRACED; } } /* OK, now look for breakpoint tracing */ - if ((code = erts_find_local_func(mfa)) != NULL) { + if ((pc = erts_find_local_func(mfa)) != NULL) { int r = - (erts_is_trace_break(code, ms, NULL) + (erts_is_trace_break(pc, ms, 1) ? FUNC_TRACE_LOCAL_TRACE : 0) - | (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta) + | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta) ? FUNC_TRACE_META_TRACE : 0) - | (erts_is_count_break(code, count) + | (erts_is_count_break(pc, count) ? FUNC_TRACE_COUNT_TRACE : 0) - | (erts_is_time_break(p, code, call_time) + | (erts_is_time_break(p, pc, call_time) ? FUNC_TRACE_TIME_TRACE : 0); return r ? r : FUNC_TRACE_UNTRACED; @@ -1050,7 +1070,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key) Eterm* hp; DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */ Binary *ms = NULL, *ms_meta = NULL; - Sint count = 0; + Uint count = 0; Eterm traced = am_false; Eterm match_spec = am_false; Eterm retval = am_false; @@ -1138,9 +1158,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key) break; case am_call_count: if (r & FUNC_TRACE_COUNT_TRACE) { - retval = count < 0 ? - erts_make_integer(-count-1, p) : - erts_make_integer(count, p); + retval = erts_make_integer(count, p); } break; case am_call_time: @@ -1329,39 +1347,46 @@ trace_info_on_load(Process* p, Eterm key) #undef FUNC_TRACE_LOCAL_TRACE int -erts_set_trace_pattern(Eterm* mfa, int specified, +erts_set_trace_pattern(Process*p, Eterm* mfa, int specified, Binary* match_prog_set, Binary *meta_match_prog_set, int on, struct trace_pattern_flags flags, - Eterm meta_tracer_pid) + Eterm meta_tracer_pid, int is_blocking) { const ErtsCodeIndex code_ix = erts_active_code_ix(); int matches = 0; int i; + int n; + BpFunction* fp; /* * First work on normal functions (not real BIFs). */ - - for (i = 0; i < export_list_size(code_ix); i++) { - Export* ep = export_list(i, code_ix); - int j; - - if (ExportIsBuiltIn(ep)) { - continue; - } - - for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) { - /* Empty loop body */ - } - if (j == specified) { - if (on) { - if (! flags.breakpoint) - matches += setup_func_trace(ep, match_prog_set, code_ix); - else - reset_func_trace(ep, code_ix); - } else if (! flags.breakpoint) { - matches += reset_func_trace(ep, code_ix); + erts_bp_match_export(&finish_bp.e, mfa, specified); + fp = finish_bp.e.matching; + n = finish_bp.e.matched; + + for (i = 0; i < n; i++) { + BeamInstr* pc = fp[i].pc; + Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code)); + + if (!on || flags.breakpoint) { + erts_clear_call_trace_bif(pc, 0); + if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { + pc[0] = (BeamInstr) BeamOp(op_jump_f); + } + } else { + if (ep->addressv[code_ix] != pc) { + fp[i].mod->curr.num_traced_exports++; +#ifdef DEBUG + pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI); +#endif + pc[0] = (BeamInstr) BeamOp(op_jump_f); + pc[1] = (BeamInstr) ep->addressv[code_ix]; + } + erts_set_call_trace_bif(pc, match_prog_set, 0); + if (ep->addressv[code_ix] != pc) { + pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint); } } } @@ -1386,26 +1411,15 @@ erts_set_trace_pattern(Eterm* mfa, int specified, /* Empty loop body */ } if (j == specified) { + BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3; + if (! flags.breakpoint) { /* Export entry call trace */ if (on) { - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) { - ASSERT(ExportIsBuiltIn(bif_export[i])); - erts_clear_mtrace_bif - ((BeamInstr *)bif_export[i]->code + 3); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META; - } - set_trace_bif(i, match_prog_set); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL; - erts_bif_trace_flags[i] |= BIF_TRACE_AS_GLOBAL; - setup_bif_trace(i); + erts_clear_call_trace_bif(pc, 1); + erts_clear_mtrace_bif(pc); + erts_set_call_trace_bif(pc, match_prog_set, 0); } else { /* off */ - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) { - clear_trace_bif(i); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL; - } - if (! erts_bif_trace_flags[i]) { - reset_bif_trace(i); - } + erts_clear_call_trace_bif(pc, 0); } matches++; } else { /* Breakpoint call trace */ @@ -1413,52 +1427,33 @@ erts_set_trace_pattern(Eterm* mfa, int specified, if (on) { if (flags.local) { - set_trace_bif(i, match_prog_set); - erts_bif_trace_flags[i] |= BIF_TRACE_AS_LOCAL; - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL; + erts_clear_call_trace_bif(pc, 0); + erts_set_call_trace_bif(pc, match_prog_set, 1); m = 1; } if (flags.meta) { - erts_set_mtrace_bif - ((BeamInstr *)bif_export[i]->code + 3, - meta_match_prog_set, meta_tracer_pid); - erts_bif_trace_flags[i] |= BIF_TRACE_AS_META; - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL; + erts_set_mtrace_bif(pc, meta_match_prog_set, + meta_tracer_pid); m = 1; } if (flags.call_time) { - erts_set_time_trace_bif(bif_export[i]->code + 3, on); + erts_set_time_trace_bif(pc, on); /* I don't want to remove any other tracers */ - erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME; m = 1; } - if (erts_bif_trace_flags[i]) { - setup_bif_trace(i); - } } else { /* off */ if (flags.local) { - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) { - clear_trace_bif(i); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL; - } + erts_clear_call_trace_bif(pc, 1); m = 1; } if (flags.meta) { - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) { - erts_clear_mtrace_bif - ((BeamInstr *)bif_export[i]->code + 3); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META; - } + erts_clear_mtrace_bif(pc); m = 1; } if (flags.call_time) { - erts_clear_time_trace_bif(bif_export[i]->code + 3); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME; + erts_clear_time_trace_bif(pc); m = 1; } - if (! erts_bif_trace_flags[i]) { - reset_bif_trace(i); - } } matches += m; } @@ -1468,186 +1463,242 @@ erts_set_trace_pattern(Eterm* mfa, int specified, /* ** So, now for breakpoint tracing */ + erts_bp_match_functions(&finish_bp.f, mfa, specified); if (on) { if (! flags.breakpoint) { - erts_clear_trace_break(mfa, specified); - erts_clear_mtrace_break(mfa, specified); - erts_clear_count_break(mfa, specified); - erts_clear_time_break(mfa, specified); + erts_clear_all_breaks(&finish_bp.f); } else { - int m = 0; if (flags.local) { - m = erts_set_trace_break(mfa, specified, match_prog_set, - am_true); + erts_set_trace_break(&finish_bp.f, match_prog_set); } if (flags.meta) { - m = erts_set_mtrace_break(mfa, specified, meta_match_prog_set, - meta_tracer_pid); + erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set, + meta_tracer_pid); } if (flags.call_count) { - m = erts_set_count_break(mfa, specified, on); + erts_set_count_break(&finish_bp.f, on); } if (flags.call_time) { - m = erts_set_time_break(mfa, specified, on); + erts_set_time_break(&finish_bp.f, on); } - /* All assignments to 'm' above should give the same value, - * so just use the last */ - matches += m; } } else { - int m = 0; if (flags.local) { - m = erts_clear_trace_break(mfa, specified); + erts_clear_trace_break(&finish_bp.f); } if (flags.meta) { - m = erts_clear_mtrace_break(mfa, specified); + erts_clear_mtrace_break(&finish_bp.f); } if (flags.call_count) { - m = erts_clear_count_break(mfa, specified); + erts_clear_count_break(&finish_bp.f); } if (flags.call_time) { - m = erts_clear_time_break(mfa, specified); + erts_clear_time_break(&finish_bp.f); } - /* All assignments to 'm' above should give the same value, - * so just use the last */ - matches += m; } + finish_bp.current = 0; + finish_bp.install = on; + finish_bp.local = flags.breakpoint; + +#ifdef ERTS_SMP + if (is_blocking) { + ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); +#endif + while (erts_finish_breakpointing()) { + /* Empty loop body */ + } +#ifdef ERTS_SMP + finish_bp.current = -1; + } +#endif + + if (flags.breakpoint) { + matches += finish_bp.f.matched; + } else { + matches += finish_bp.e.matched; + } return matches; } -/* - * Setup function tracing for the given exported function. - * - * Return Value: 1 if entry refers to a BIF or loaded function, - * 0 if the entry refers to a function not loaded. - */ - -static int -setup_func_trace(Export* ep, void* match_prog, ErtsCodeIndex code_ix) +int +erts_finish_breakpointing(void) { - Module* modp; - - if (ep->addressv[code_ix] == ep->code+3) { - if (ep->code[3] == (BeamInstr) em_call_error_handler) { - return 0; - } else if (ep->code[3] == (BeamInstr) em_call_traced_function) { - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = match_prog; - MatchSetRef(ep->match_prog_set); - return 1; - } else { - /* - * We ignore apply/3 and anything else. - */ - return 0; - } - } - + ERTS_SMP_LC_ASSERT(erts_is_code_ix_locked()); + /* - * Currently no trace support for native code. + * Memory barriers will be issued for all processes *before* + * each of the stages below. (Unless the other schedulers + * are blocked, in which case memory barriers will be issued + * when they are awaken.) */ - if (erts_is_native_break(ep->addressv[code_ix])) { + + switch (finish_bp.current++) { + case 0: + /* + * At this point, in all functions that are to be breakpointed, + * a pointer to a GenericBp struct has already been added, + * + * Insert the new breakpoints (if any) into the + * code. Different schedulers may see breakpoint instruction + * at different times, but it does not matter since the newly + * added breakpoints are disabled. + */ + if (finish_bp.install) { + if (finish_bp.local) { + erts_install_breakpoints(&finish_bp.f); + } else { + install_exp_breakpoints(&finish_bp.e); + } + } + setup_bif_trace(); + return 1; + case 1: + /* + * Switch index for the breakpoint data, activating the staged + * data. (Depending on the changes in the breakpoint data, + * that could either activate breakpoints or disable + * breakpoints.) + */ + erts_commit_staged_bp(); + return 1; + case 2: + /* + * Remove breakpoints instructions for disabled breakpoints + * (if any). + */ + if (finish_bp.install) { + if (finish_bp.local) { + uninstall_exp_breakpoints(&finish_bp.e); + } else { + erts_uninstall_breakpoints(&finish_bp.f); + } + } else { + if (finish_bp.local) { + erts_uninstall_breakpoints(&finish_bp.f); + } else { + uninstall_exp_breakpoints(&finish_bp.e); + } + } + reset_bif_trace(); + return 1; + case 3: + /* + * Now all breakpoints have either been inserted or removed. + * For all updated breakpoints, copy the active breakpoint + * data to the staged breakpoint data to make them equal + * (simplifying for the next time breakpoints are to be + * updated). If any breakpoints have been totally disabled, + * deallocate the GenericBp structs for them. + */ + erts_consolidate_bif_bp_data(); + clean_export_entries(&finish_bp.e); + erts_consolidate_bp_data(&finish_bp.e, 0); + erts_consolidate_bp_data(&finish_bp.f, 1); + erts_bp_free_matched_functions(&finish_bp.e); + erts_bp_free_matched_functions(&finish_bp.f); return 0; + default: + ASSERT(0); } + return 0; +} - ep->code[3] = (BeamInstr) em_call_traced_function; - ep->code[4] = (BeamInstr) ep->addressv[code_ix]; - ep->addressv[code_ix] = ep->code+3; - ep->match_prog_set = match_prog; - MatchSetRef(ep->match_prog_set); +static void +install_exp_breakpoints(BpFunctions* f) +{ + const ErtsCodeIndex code_ix = erts_active_code_ix(); + BpFunction* fp = f->matching; + Uint ne = f->matched; + Uint i; + Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); - modp = erts_get_module(ep->code[0], code_ix); - ASSERT(modp); - modp->curr.num_traced_exports++; - return 1; -} + for (i = 0; i < ne; i++) { + BeamInstr* pc = fp[i].pc; + Export* ep = (Export *) (((char *)pc)-offset); -static void setup_bif_trace(int bif_index) { - Export *ep = bif_export[bif_index]; - - ASSERT(ExportIsBuiltIn(ep)); - ASSERT(ep->code[4]); - ep->code[4] = (BeamInstr) bif_table[bif_index].traced; + ep->addressv[code_ix] = pc; + } } -static void set_trace_bif(int bif_index, void* match_prog) { - Export *ep = bif_export[bif_index]; - -#ifdef HARDDEBUG - erts_fprintf(stderr, "set_trace_bif: %T:%T/%bpu\n", - ep->code[0], ep->code[1], ep->code[2]); -#endif - ASSERT(ExportIsBuiltIn(ep)); - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = match_prog; - MatchSetRef(ep->match_prog_set); -} +static void +uninstall_exp_breakpoints(BpFunctions* f) +{ + const ErtsCodeIndex code_ix = erts_active_code_ix(); + BpFunction* fp = f->matching; + Uint ne = f->matched; + Uint i; + Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); -/* - * Reset function tracing for the given exported function. - * - * Return Value: 1 if entry refers to a BIF or loaded function, - * 0 if the entry refers to a function not loaded. - */ + for (i = 0; i < ne; i++) { + BeamInstr* pc = fp[i].pc; + Export* ep = (Export *) (((char *)pc)-offset); -static int -reset_func_trace(Export* ep, ErtsCodeIndex code_ix) -{ - if (ep->addressv[code_ix] == ep->code+3) { - if (ep->code[3] == (BeamInstr) em_call_error_handler) { - return 0; - } else if (ep->code[3] == (BeamInstr) em_call_traced_function) { - Module* modp = erts_get_module(ep->code[0], code_ix); - ASSERT(modp); - modp->curr.num_traced_exports--; - - ep->addressv[code_ix] = (Uint *) ep->code[4]; - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = NULL; - return 1; - } else { - /* - * We ignore apply/3 and anything else. - */ - return 0; + if (ep->addressv[code_ix] != pc) { + continue; } + ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f)); + ep->addressv[code_ix] = (BeamInstr *) ep->code[4]; } - - /* - * Currently no trace support for native code. - */ - if (erts_is_native_break(ep->addressv[code_ix])) { - return 0; - } - - /* - * Nothing to do, but the export entry matches. - */ +} + +static void +clean_export_entries(BpFunctions* f) +{ + const ErtsCodeIndex code_ix = erts_active_code_ix(); + BpFunction* fp = f->matching; + Uint ne = f->matched; + Uint i; + Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); + + for (i = 0; i < ne; i++) { + BeamInstr* pc = fp[i].pc; + Export* ep = (Export *) (((char *)pc)-offset); - return 1; + if (ep->addressv[code_ix] == pc) { + continue; + } + if (*pc == (BeamInstr) BeamOp(op_jump_f)) { + ep->code[3] = (BeamInstr) 0; + ep->code[4] = (BeamInstr) 0; + } + } } -static void reset_bif_trace(int bif_index) { - Export *ep = bif_export[bif_index]; - - ASSERT(ExportIsBuiltIn(ep)); - ASSERT(ep->code[4]); - ASSERT(! ep->match_prog_set); - ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL)); - ep->code[4] = (BeamInstr) bif_table[bif_index].f; +static void +setup_bif_trace(void) +{ + int i; + + for (i = 0; i < BIF_SIZE; ++i) { + Export *ep = bif_export[i]; + GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1]; + if (g) { + if (ExportIsBuiltIn(ep)) { + ASSERT(ep->code[4]); + ep->code[4] = (BeamInstr) bif_table[i].traced; + } + } + } } -static void clear_trace_bif(int bif_index) { - Export *ep = bif_export[bif_index]; - -#ifdef HARDDEBUG - erts_fprintf(stderr, "clear_trace_bif: %T:%T/%bpu\n", - ep->code[0], ep->code[1], ep->code[2]); -#endif - ASSERT(ExportIsBuiltIn(ep)); - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = NULL; +static void +reset_bif_trace(void) +{ + int i; + ErtsBpIndex active = erts_active_bp_ix(); + + for (i = 0; i < BIF_SIZE; ++i) { + Export *ep = bif_export[i]; + BeamInstr* pc = ep->code+3; + GenericBp* g = (GenericBp *) pc[-4]; + if (g && g->data[active].flags == 0) { + if (ExportIsBuiltIn(ep)) { + ASSERT(ep->code[4]); + ep->code[4] = (BeamInstr) bif_table[i].f; + } + } + } } /* diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c index d7d6fcf0a2..2121f72fd2 100644 --- a/erts/emulator/beam/erl_debug.c +++ b/erts/emulator/beam/erl_debug.c @@ -25,7 +25,6 @@ #include "erl_vm.h" #include "global.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "big.h" #include "bif.h" #include "beam_catches.h" @@ -33,34 +32,9 @@ #define WITHIN(ptr, x, y) ((x) <= (ptr) && (ptr) < (y)) -#if defined(HYBRID) -#if defined(INCREMENTAL) -/* Hybrid + Incremental */ -#define IN_HEAP(p, ptr) \ - (WITHIN((ptr), p->heap, p->hend) || \ - (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))) || \ - WITHIN((ptr), global_heap, global_hend) || \ - (inc_fromspc && WITHIN((ptr), inc_fromspc, inc_fromend)) || \ - WITHIN((ptr), global_old_heap, global_old_hend)) - -#define IN_MA(ptr) \ - (WITHIN((ptr), global_heap, global_hend) || \ - (inc_fromspc && WITHIN((ptr), inc_fromspc, inc_fromend)) || \ - WITHIN((ptr), global_old_heap, global_old_hend)) -#else -/* Hybrid */ -#define IN_HEAP(p, ptr) \ - (WITHIN((ptr), p->heap, p->hend) || \ - (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p))) || \ - WITHIN((ptr), global_heap, global_hend) || \ - (global_old_heap && WITHIN((ptr),global_old_heap,global_old_hend))) -#endif -#else -/* Private */ #define IN_HEAP(p, ptr) \ (WITHIN((ptr), p->heap, p->hend) || \ (OLD_HEAP(p) && WITHIN((ptr), OLD_HEAP(p), OLD_HEND(p)))) -#endif #ifdef __GNUC__ @@ -266,13 +240,6 @@ static int verify_eterm(Process *p,Eterm element) } } } -#ifdef INCREMENTAL - else { - if (IN_MA(ptr)) - return 1; - } -#endif - return 0; } @@ -447,51 +414,12 @@ void verify_process(Process *p) VERIFY_ETERM("fvalue",p->fvalue); VERIFY_ETERM("ftrace",p->ftrace); -#ifdef HYBRID - VERIFY_AREA("rrma",p->rrma,p->nrr); -#endif - VERBOSE(DEBUG_MEMORY,("...done\n")); #undef VERIFY_AREA #undef VERIFY_ETERM } -void verify_everything() -{ -#ifdef HYBRID - Uint i; - Uint n = erts_num_active_procs; - -#ifdef INCREMENTAL_FREE_SIZES_NEEDS_TO_BE_TAGGED_AS_HEADERS_WITH_ARITY - INC_Page *page = inc_used_mem; -#endif - - for (i = 0; i < n; i++) { - verify_process(erts_active_procs[i]); - } - - erts_check_memory(NULL,global_heap,global_htop); - -#ifdef INCREMENTAL_FREE_SIZES_NEEDS_TO_BE_TAGGED_AS_HEADERS_WITH_ARITY - while (page) - { - Eterm *end = page + INC_PAGE_SIZE; - Eterm *pos = page->start; - - while( pos < end) { - Eterm val = *pos++; - if(is_header(val)) - pos += thing_arityval(val); - else - verify_eterm(NULL,val); - } - page = page->next; - } -#endif -#endif /* HYBRID */ -} - /* * print_untagged_memory will print the contents of given memory area. */ @@ -582,83 +510,6 @@ void print_tagged_memory(Eterm *pos, Eterm *end) erts_printf("+-%s-+-%s-+\n",dashes,dashes); } -#ifdef HYBRID -void print_ma_info(void) -{ - erts_printf("Message Area (start - top - end): " - "0x%0*lx - 0x%0*lx - 0x%0*lx\n", - PTR_SIZE, (unsigned long)global_heap, - PTR_SIZE, (unsigned long)global_htop, - PTR_SIZE, (unsigned long)global_hend); -#ifndef INCREMENTAL - erts_printf(" High water: 0x%0*lx " - "Old gen: 0x%0*lx - 0x%0*lx - 0x%0*lx\n", - PTR_SIZE, (unsigned long)global_high_water, - PTR_SIZE, (unsigned long)global_old_heap, - PTR_SIZE, (unsigned long)global_old_htop, - PTR_SIZE, (unsigned long)global_old_hend); -#endif -} - -void print_message_area(void) -{ - Eterm *pos = global_heap; - Eterm *end = global_htop; - - erts_printf("From: 0x%0*lx to 0x%0*lx\n", - PTR_SIZE,(unsigned long)pos,PTR_SIZE,(unsigned long)end); - erts_printf("(Old generation: 0x%0*lx to 0x%0*lx\n", - PTR_SIZE, (unsigned long)global_old_heap, - PTR_SIZE, (unsigned long)global_old_hend); - erts_printf("| %-*s | %-*s |\n",PTR_SIZE,"Address",PTR_SIZE,"Contents"); - erts_printf("|-%s-|-%s-|\n",dashes,dashes); - while( pos < end ) { - Eterm val = pos[0]; - erts_printf("| 0x%0*lx | 0x%0*lx | ", - PTR_SIZE,(unsigned long)pos,PTR_SIZE,(unsigned long)val); - ++pos; - if( is_arity_value(val) ) { - erts_printf("Arity(%lu)", arityval(val)); - } else if( is_thing(val) ) { - unsigned int ari = thing_arityval(val); - erts_printf("Thing Arity(%u) Tag(%lu)", ari, thing_subtag(val)); - while( ari ) { - erts_printf("\n| 0x%0*lx | 0x%0*lx | THING", - PTR_SIZE, (unsigned long)pos, - PTR_SIZE, (unsigned long)*pos); - ++pos; - --ari; - } - } else - erts_printf("%.30T", val); - erts_printf("\n"); - } - erts_printf("+-%s-+-%s-+\n",dashes,dashes); -} - -void check_message_area() -{ - Eterm *pos = global_heap; - Eterm *end = global_htop; - - while( pos < end ) { - Eterm val = *pos++; - if(is_header(val)) - pos += thing_arityval(val); - else if(!is_immed(val)) - if ((ptr_val(val) < global_heap || ptr_val(val) >= global_htop) && - (ptr_val(val) < global_old_heap || - ptr_val(val) >= global_old_hend)) - { - erts_printf("check_message_area: Stray pointer found\n"); - print_message_area(); - erts_printf("Crashing to make it look real...\n"); - pos = 0; - } - } -} -#endif /* HYBRID */ - static void print_process_memory(Process *p); static void print_process_memory(Process *p) { @@ -703,19 +554,6 @@ static void print_process_memory(Process *p) erts_printf(" Fvalue: 0x%0*lx\n",PTR_SIZE,p->fvalue); erts_printf(" Ftrace: 0x%0*lx\n",PTR_SIZE,p->ftrace); -#ifdef HYBRID - if (p->nrr > 0) { - int i; - erts_printf(" Remembered Roots:\n"); - for (i = 0; i < p->nrr; i++) - if (p->rrsrc[i] != NULL) - erts_printf("0x%0*lx -> 0x%0*lx\n", - PTR_SIZE, (unsigned long)p->rrsrc[i], - PTR_SIZE, (unsigned long)p->rrma[i]); - erts_printf("\n"); - } -#endif - erts_printf("+- %-*s -+ 0x%0*lx 0x%0*lx %s-%s-+\n", PTR_SIZE, "Stack", PTR_SIZE, (unsigned long)STACK_TOP(p), @@ -757,92 +595,6 @@ void print_memory(Process *p) if (p != NULL) { print_process_memory(p); } -#ifdef HYBRID - else { - Uint i; - Uint n = erts_num_active_procs; - - for (i = 0; i < n; i++) { - Process *p = erts_active_procs[i]; - print_process_memory(p); - } - - erts_printf("==================\n"); - erts_printf("|| Message area ||\n"); - erts_printf("==================\n"); - erts_printf("+-%s-+-%s-%s-%s-%s-+\n", - dashes,dashes,dashes,dashes,dashes); - erts_printf("| %-*s | 0x%0*lx - 0x%0*lx - 0x%0*lx%*s|\n", - PTR_SIZE, "Young", - PTR_SIZE, (unsigned long)global_heap, - PTR_SIZE, (unsigned long)global_htop, - PTR_SIZE, (unsigned long)global_hend, - PTR_SIZE, ""); - erts_printf("+-%s-+-%s-%s-%s-%s-+\n", - dashes,dashes,dashes,dashes,dashes); - - print_untagged_memory(global_heap,global_htop); - - - erts_printf("+-%s-+-%s-%s-%s-%s-+\n", - dashes,dashes,dashes,dashes,dashes); - erts_printf("| %-*s | 0x%0*lx - 0x%0*lx %*s |\n", - PTR_SIZE, "Old", - PTR_SIZE, (unsigned long)global_old_heap, - PTR_SIZE, (unsigned long)global_old_hend, - 2 * PTR_SIZE, ""); - erts_printf("+-%s-+-%s-%s-%s-%s-+\n", - dashes,dashes,dashes,dashes,dashes); - -#ifdef INCREMENTAL - { - INC_Page *page = inc_used_mem; - /* Genom att g� igenom fri-listan f�rst kan vi markera de - omr�den som inte �r allokerade och bara skriva ut de som - lever. - char markarea[INC_PAGESIZE]; - */ - - while (page) { - Eterm *ptr = (Eterm*)page->start; - Eterm *end = (Eterm*)page->start + INC_PAGESIZE; - - erts_printf("| %*s | This: 0x%0*lx Next: 0x%0*lx %*s|\n", - PTR_SIZE, "", - PTR_SIZE, (unsigned long)page, - PTR_SIZE, (unsigned long)page->next, - 2 * PTR_SIZE - 8, ""); - print_untagged_memory(ptr,end); - page = page->next; - } - } - - { - INC_MemBlock *this = inc_free_list; - - erts_printf("-- %-*s --%s-%s-%s-%s-\n",PTR_SIZE+2,"Free list", - dashes,dashes,dashes,dashes); - while (this) { - erts_printf("Block @ 0x%0*lx sz: %8d prev: 0x%0*lx next: 0x%0*lx\n", - PTR_SIZE, (unsigned long)this,this->size, - PTR_SIZE, (unsigned long)this->prev, - PTR_SIZE, (unsigned long)this->next); - this = this->next; - } - erts_printf("--%s---%s-%s-%s-%s--\n", - dashes,dashes,dashes,dashes,dashes); - } - - if (inc_fromspc != NULL) { - erts_printf("-- fromspace - 0x%0*lx 0x%0*lx " - "------------------------------\n", - PTR_SIZE, (unsigned long)inc_fromspc, - PTR_SIZE, (unsigned long)inc_fromend); - print_untagged_memory(inc_fromspc,inc_fromend); - } -#endif /* INCREMENTAL */ - } -#endif /* HYBRID */ } void print_memory_info(Process *p) @@ -869,26 +621,6 @@ void print_memory_info(Process *p) erts_printf("|| Memory info ||\n"); erts_printf("=================\n"); } -#ifdef HYBRID - erts_printf("|- message area --%s-%s-%s-%s-|\n", - dashes,dashes,dashes,dashes); - erts_printf("| Young | 0x%0*lx - 0x%0*lx - 0x%0*lx %*s |\n", - PTR_SIZE, (unsigned long)global_heap, - PTR_SIZE, (unsigned long)global_htop, - PTR_SIZE, (unsigned long)global_hend, - PTR_SIZE, ""); - erts_printf("| Old | 0x%0*lx - 0x%0*lx %*s |\n", - PTR_SIZE, (unsigned long)global_old_heap, - PTR_SIZE, (unsigned long)global_old_hend, - 2 * PTR_SIZE, ""); -#endif -#ifdef INCREMENTAL - if (inc_fromspc != NULL) - erts_printf("| Frmsp | 0x%0*lx - 0x%0*lx %*s |\n", - PTR_SIZE, (unsigned long)inc_fromspc, - PTR_SIZE, (unsigned long)inc_fromend, - 2 * PTR_SIZE, ""); -#endif erts_printf("+-----------------%s-%s-%s-%s-+\n",dashes,dashes,dashes,dashes); } #if !HEAP_ON_C_STACK && defined(DEBUG) diff --git a/erts/emulator/beam/erl_debug.h b/erts/emulator/beam/erl_debug.h index c49354a2b3..a028a95fef 100644 --- a/erts/emulator/beam/erl_debug.h +++ b/erts/emulator/beam/erl_debug.h @@ -42,12 +42,11 @@ #define DEBUG_DEFAULT 0x0000 /* No flags are set per default */ #define DEBUG_SYSTEM 0x0001 /* Misc system info at startup and end */ #define DEBUG_PRIVATE_GC 0x0002 /* GC of private heaps */ -#define DEBUG_HYBRID_GC 0x0004 /* GC of the message area */ -#define DEBUG_ALLOCATION 0x0008 /* HAlloc. To find holes in the heap */ -#define DEBUG_MESSAGES 0x0010 /* Message passing */ -#define DEBUG_THREADS 0x0020 /* Thread-related stuff */ -#define DEBUG_PROCESSES 0x0040 /* Process creation and removal */ -#define DEBUG_MEMORY 0x0080 /* Display results of memory checks */ +#define DEBUG_ALLOCATION 0x0004 /* HAlloc. To find holes in the heap */ +#define DEBUG_MESSAGES 0x0008 /* Message passing */ +#define DEBUG_THREADS 0x0010 /* Thread-related stuff */ +#define DEBUG_PROCESSES 0x0020 /* Process creation and removal */ +#define DEBUG_MEMORY 0x0040 /* Display results of memory checks */ extern Uint32 verbose; @@ -88,7 +87,6 @@ extern void erts_check_stack(Process *p); extern void erts_check_heap(Process *p); extern void erts_check_memory(Process *p, Eterm *start, Eterm *end); extern void verify_process(Process *p); -extern void verify_everything(void); extern void print_tagged_memory(Eterm *start, Eterm *end); extern void print_untagged_memory(Eterm *start, Eterm *end); extern void print_memory(Process *p); @@ -99,10 +97,4 @@ extern void erts_debug_use_tmp_heap(int, Process *); extern void erts_debug_unuse_tmp_heap(int, Process *); #endif -#ifdef HYBRID -extern void print_ma_info(void); -extern void print_message_area(void); -extern void check_message_area(void); -#endif - #endif /* _ERL_DEBUG_H_ */ diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h index 1ae9a211d7..771ee46d2b 100644 --- a/erts/emulator/beam/erl_driver.h +++ b/erts/emulator/beam/erl_driver.h @@ -87,10 +87,7 @@ #include <stdlib.h> #include <string.h> /* ssize_t on Mac OS X */ -#if defined(VXWORKS) -# include <ioLib.h> -typedef struct iovec SysIOVec; -#elif defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_) +#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_) #ifndef STATIC_ERLANG_DRIVER /* Windows dynamic drivers, everything is different... */ #define ERL_DRIVER_TYPES_ONLY @@ -370,11 +367,7 @@ typedef struct erl_drv_entry { /* For windows dynamic drivers */ #ifndef ERL_DRIVER_TYPES_ONLY -#if defined(VXWORKS) -# define DRIVER_INIT(DRIVER_NAME) \ - ErlDrvEntry* DRIVER_NAME ## _init(void); \ - ErlDrvEntry* DRIVER_NAME ## _init(void) -#elif defined(__WIN32__) +#if defined(__WIN32__) # define DRIVER_INIT(DRIVER_NAME) \ __declspec(dllexport) ErlDrvEntry* driver_init(void); \ __declspec(dllexport) ErlDrvEntry* driver_init(void) diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h index 6023fa0448..217066ab11 100644 --- a/erts/emulator/beam/erl_fun.h +++ b/erts/emulator/beam/erl_fun.h @@ -54,9 +54,7 @@ typedef struct erl_fun_entry { typedef struct erl_fun_thing { Eterm thing_word; /* Subtag FUN_SUBTAG. */ ErlFunEntry* fe; /* Pointer to fun entry. */ -#ifndef HYBRID /* FIND ME! */ struct erl_off_heap_header* next; -#endif #ifdef HIPE UWord* native_address; /* Native code for the fun. */ #endif @@ -81,9 +79,7 @@ ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index, byte* uniq, int index, int arity); void erts_erase_fun_entry(ErlFunEntry* fe); -#ifndef HYBRID /* FIND ME! */ void erts_cleanup_funs(ErlFunThing* funp); -#endif void erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end); void erts_dump_fun_entries(int, void *); diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index f88d485448..d382e421e6 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -34,7 +34,6 @@ #include "erl_binary.h" #include "dist.h" #include "erl_mseg.h" -#include "erl_nmgc.h" #include "erl_threads.h" #include "erl_bif_timer.h" #include "erl_instrument.h" @@ -209,28 +208,6 @@ Export *erts_delay_trap = NULL; int erts_use_r9_pids_ports; -#ifdef HYBRID -Eterm *global_heap; -Eterm *global_hend; -Eterm *global_htop; -Eterm *global_saved_htop; -Eterm *global_old_heap; -Eterm *global_old_hend; -ErlOffHeap erts_global_offheap; -Uint global_heap_sz = SH_DEFAULT_SIZE; - -#ifndef INCREMENTAL -Eterm *global_high_water; -Eterm *global_old_htop; -#endif - -Uint16 global_gen_gcs; -Uint16 global_max_gen_gcs; -Uint global_gc_flags; - -Uint global_heap_min_sz = SH_DEFAULT_SIZE; -#endif - int ignore_break; int replace_intr; @@ -337,7 +314,6 @@ erl_init(int ncpu) erl_drv_thr_init(); erts_init_async(); init_io(); - init_copy(); init_load(); erts_init_bif(); erts_init_bif_chksum(); @@ -358,45 +334,6 @@ erl_init(int ncpu) } static void -init_shared_memory(int argc, char **argv) -{ -#ifdef HYBRID - int arg_size = 0; - - global_heap_sz = erts_next_heap_size(global_heap_sz,0); - - /* Make sure arguments will fit on the heap, no one else will check! */ - while (argc--) - arg_size += 2 + strlen(argv[argc]); - if (global_heap_sz < arg_size) - global_heap_sz = erts_next_heap_size(arg_size,1); - -#ifndef INCREMENTAL - global_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, - sizeof(Eterm) * global_heap_sz); - global_hend = global_heap + global_heap_sz; - global_htop = global_heap; - global_high_water = global_heap; - global_old_hend = global_old_htop = global_old_heap = NULL; -#endif - - global_gen_gcs = 0; - global_max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); - global_gc_flags = erts_default_process_flags; - - erts_global_offheap.mso = NULL; -#ifndef HYBRID /* FIND ME! */ - erts_global_offheap.funs = NULL; -#endif - erts_global_offheap.overhead = 0; -#endif - -#ifdef INCREMENTAL - erts_init_incgc(); -#endif -} - -static void erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** argv) { int i; @@ -1057,7 +994,6 @@ erl_start(int argc, char **argv) switch (*ch) { case 's': verbose |= DEBUG_SYSTEM; break; case 'g': verbose |= DEBUG_PRIVATE_GC; break; - case 'h': verbose |= DEBUG_HYBRID_GC; break; case 'M': verbose |= DEBUG_MEMORY; break; case 'a': verbose |= DEBUG_ALLOCATION; break; case 't': verbose |= DEBUG_THREADS; break; @@ -1070,7 +1006,6 @@ erl_start(int argc, char **argv) erts_printf("Verbose level: "); if (verbose & DEBUG_SYSTEM) erts_printf("SYSTEM "); if (verbose & DEBUG_PRIVATE_GC) erts_printf("PRIVATE_GC "); - if (verbose & DEBUG_HYBRID_GC) erts_printf("HYBRID_GC "); if (verbose & DEBUG_MEMORY) erts_printf("PARANOID_MEMORY "); if (verbose & DEBUG_ALLOCATION) erts_printf("ALLOCATION "); if (verbose & DEBUG_THREADS) erts_printf("THREADS "); @@ -1098,12 +1033,6 @@ erl_start(int argc, char **argv) #ifdef HIPE strcat(tmp, ",HIPE"); #endif -#ifdef INCREMENTAL - strcat(tmp, ",INCREMENTAL_GC"); -#endif -#ifdef HYBRID - strcat(tmp, ",HYBRID"); -#endif erts_fprintf(stderr, "Erlang "); if (tmp[1]) { erts_fprintf(stderr, "(%s) ", tmp+1); @@ -1555,7 +1484,6 @@ erl_start(int argc, char **argv) erl_init(ncpu); - init_shared_memory(boot_argc, boot_argv); load_preloaded(); erts_end_staging_code_ix(); erts_commit_staging_code_ix(); @@ -1644,32 +1572,6 @@ system_cleanup(int flush_async) #endif #endif -#ifdef HYBRID - if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, - (void *)ma_src_stack); - if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, - (void *)ma_dst_stack); - if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, - (void *)ma_offset_stack); - ma_src_stack = NULL; - ma_dst_stack = NULL; - ma_offset_stack = NULL; - erts_cleanup_offheap(&erts_global_offheap); -#endif - -#if defined(HYBRID) && !defined(INCREMENTAL) - if (global_heap) { - ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, - (void*) global_heap, - sizeof(Eterm) * global_heap_sz); - } - global_heap = NULL; -#endif - -#ifdef INCREMENTAL - erts_cleanup_incgc(); -#endif - erts_exit_flush_async(); } diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index b10964da52..c98ca09f3f 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -29,7 +29,6 @@ #include "global.h" #include "erl_message.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "erl_binary.h" #include "dtrace-wrapper.h" @@ -597,9 +596,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg) #endif #ifdef HARD_DEBUG - ProcBin *dbg_mso_start = off_heap->mso; - ErlFunThing *dbg_fun_start = off_heap->funs; - ExternalThing *dbg_external_start = off_heap->externals; + struct erl_off_heap_header* dbg_oh_start = off_heap->first; Eterm dbg_term, dbg_token; ErlHeapFragment *dbg_bp; Uint *dbg_hp, *dbg_thp_start; @@ -773,48 +770,16 @@ copy_done: int i, j; ErlHeapFragment* frag; { - ProcBin *mso = off_heap->mso; + struct erl_off_heap_header* dbg_oh = off_heap->first; i = j = 0; - while (mso != dbg_mso_start) { - mso = mso->next; + while (dbg_oh != dbg_oh_start) { + dbg_oh = dbg_oh->next; i++; } for (frag=bp; frag; frag=frag->next) { - mso = frag->off_heap.mso; - while (mso) { - mso = mso->next; - j++; - } - } - ASSERT(i == j); - } - { - ErlFunThing *fun = off_heap->funs; - i = j = 0; - while (fun != dbg_fun_start) { - fun = fun->next; - i++; - } - for (frag=bp; frag; frag=frag->next) { - fun = frag->off_heap.funs; - while (fun) { - fun = fun->next; - j++; - } - } - ASSERT(i == j); - } - { - ExternalThing *external = off_heap->externals; - i = j = 0; - while (external != dbg_external_start) { - external = external->next; - i++; - } - for (frag=bp; frag; frag=frag->next) { - external = frag->off_heap.externals; - while (external) { - external = external->next; + dbg_oh = frag->off_heap.first; + while (dbg_oh) { + dbg_oh = dbg_oh->next; j++; } } @@ -923,8 +888,8 @@ erts_send_message(Process* sender, #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { - erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id); - erts_snprintf(receiver_name, sizeof(receiver_name), "%T", receiver->id); + erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->id); + erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->id); } #endif if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { @@ -1005,53 +970,6 @@ erts_send_message(Process* sender, #endif ); BM_SWAP_TIMER(send,system); -#ifdef HYBRID - } else { - ErlMessage* mp = message_alloc(); - BM_SWAP_TIMER(send,copy); -#ifdef INCREMENTAL - /* TODO: During GC activate processes if the message relies in - * the fromspace and the sender is active. During major - * collections add the message to the gray stack if it relies - * in the old generation and the sender is active and the - * receiver is inactive. - - if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) && - (ptr_val(message) >= inc_fromspc && - ptr_val(message) < inc_fromend) && INC_IS_ACTIVE(sender)) - INC_ACTIVATE(receiver); - else if (!IS_CONST(message) && (ma_gc_flags & GC_CYCLE) && - (ptr_val(message) >= global_old_heap && - ptr_val(message) < global_old_hend) && - INC_IS_ACTIVE(sender) && !INC_IS_ACTIVE(receiver)) - Mark message in blackmap and add it to the gray stack - */ - - if (!IS_CONST(message)) - INC_ACTIVATE(receiver); -#endif - LAZY_COPY(sender,message); - BM_SWAP_TIMER(copy,send); - DTRACE6(message_send, sender_name, receiver_name, - size_object(message)msize, tok_label, tok_lastcnt, tok_serial); - ERL_MESSAGE_TERM(mp) = message; - ERL_MESSAGE_TOKEN(mp) = NIL; -#ifdef USE_VM_PROBES - ERL_MESSAGE_DT_UTAG(mp) = NIL; -#endif - mp->next = NULL; - LINK_MESSAGE(receiver, mp); - ACTIVATE(receiver); - - erts_proc_notify_new_message(receiver); - - if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { - trace_receive(receiver, message); - } - - BM_SWAP_TIMER(send,system); - return; -#else } else if (sender == receiver) { /* Drop message if receiver has a pending exit ... */ #ifdef ERTS_SMP @@ -1169,7 +1087,7 @@ erts_send_message(Process* sender, } BM_SWAP_TIMER(send,system); #endif /* #ifndef ERTS_SMP */ -#endif /* HYBRID */ + return; } } diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index da4376fd0a..594c51a5db 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -1711,9 +1711,10 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif); } else { /* Function traced, patch the original instruction word */ - BpData** bps = (BpData**) code_ptr[1]; - BpData* bp = (BpData*) bps[erts_bp_sched2ix()]; - bp->orig_instr = (BeamInstr) BeamOp(op_call_nif); + GenericBp* g = (GenericBp *) code_ptr[1]; + ASSERT(code_ptr[5+0] == + (BeamInstr) BeamOp(op_i_generic_breakpoint)); + g->orig_instr = (BeamInstr) BeamOp(op_call_nif); } code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr; code_ptr[5+2] = (BeamInstr) lib; diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index e5d99dc4f1..3fc7b2c9c2 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -187,11 +187,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks; #else # define ERL_NIF_INIT_GLOB # define ERL_NIF_INIT_BODY -# if defined(VXWORKS) -# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _init(void) -# else -# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void) -# endif +# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void) #endif diff --git a/erts/emulator/beam/erl_nmgc.c b/erts/emulator/beam/erl_nmgc.c deleted file mode 100644 index 2a8c819360..0000000000 --- a/erts/emulator/beam/erl_nmgc.c +++ /dev/null @@ -1,1401 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2004-2011. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif -#include "global.h" -#include "erl_gc.h" -#include "erl_binary.h" -#include "erl_nmgc.h" -#include "erl_debug.h" -#if HIPE -#include "hipe_stack.h" -#endif - - -#ifdef INCREMENTAL -/*************************************************************************** - * * - * Incremental Garbage Collector for the Message Area * - * * - ***************************************************************************/ - -/* - * The heap pointers are declared in erl_init.c - * global_heap is the nursery - * global_old_heap is the old generation - */ -unsigned char *blackmap = NULL; -INC_Page *inc_used_mem = NULL; -INC_MemBlock *inc_free_list = NULL; -Eterm *inc_fromspc; -Eterm *inc_fromend; -Eterm *inc_nursery_scn_ptr; -Eterm **fwdptrs; -Eterm *inc_alloc_limit; -Process *inc_active_proc; -Process *inc_active_last; -int inc_words_to_go; - -static Eterm *inc_last_nursery; -static int inc_pages = INC_NoPAGES; -static INC_Page *inc_bibop = NULL; -static int inc_used_pages; - -/* Used when growing the old generation */ -/* -#define INC_ROOTSAVE 16384 -static Eterm *root_save[INC_ROOTSAVE]; -static int roots_saved = 0; -*/ - -INC_STORAGE_DECLARATION(,gray); - -static void inc_minor_gc(Process *p, int need, Eterm* objv, int nobj); -static void inc_major_gc(Process *p, int need, Eterm* objv, int nobj); - -#ifdef INC_TIME_BASED -#if USE_PERFCTR - -/* - * This uses the Linux perfctr extension to virtualise the - * time-stamp counter. - */ -#include "libperfctr.h" -static struct vperfctr *vperfctr; -static double cpu_khz; -static double tsc_to_cpu_mult; - -static void inc_start_hrvtime(void) -{ - struct perfctr_info info; - struct vperfctr_control control; - - if( vperfctr != NULL ) - return; - vperfctr = vperfctr_open(); - if( vperfctr == NULL ) - return; - if( vperfctr_info(vperfctr, &info) >= 0 ) { - cpu_khz = (double)info.cpu_khz; - tsc_to_cpu_mult = (double)(info.tsc_to_cpu_mult ? : 1); - if( info.cpu_features & PERFCTR_FEATURE_RDTSC ) { - memset(&control, 0, sizeof control); - control.cpu_control.tsc_on = 1; - if( vperfctr_control(vperfctr, &control) >= 0 ) - return; - } - } - vperfctr_close(vperfctr); - vperfctr = NULL; -} - -#define inc_get_hrvtime() (((double)vperfctr_read_tsc(vperfctr) * tsc_to_cpu_mult) / cpu_khz) - -#endif /* USE_PERFCTR */ -#endif /* INC_TIME_BASED */ - -#ifdef INC_TIME_BASED -# define timeslice 1 /* milli seconds */ -# define WORK_MORE (inc_get_hrvtime() < start_time + timeslice) -#else -//# define inc_min_work 100 /* words */ -# define inc_min_work global_heap_sz + inc_pages * INC_FULLPAGE /* words */ -# define WORK_MORE (inc_words_to_go > 0) -#endif - -void erts_init_incgc(void) -{ - int i; - int size = inc_pages * INC_FULLPAGE; - - /* Young generation */ - global_heap = (Eterm *)erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - sizeof(Eterm) * global_heap_sz); - global_hend = global_heap + global_heap_sz; - global_htop = global_heap; - inc_alloc_limit = global_hend; - - /* Fromspace */ - inc_last_nursery = (Eterm *) erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - global_heap_sz * sizeof(Eterm)); - inc_fromspc = inc_fromend = NULL; - - /* Forward-pointers */ - fwdptrs = erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - global_heap_sz * sizeof(Eterm*)); - /* Old generation */ - global_old_heap = (Eterm *)erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - size * sizeof(Eterm)); - global_old_hend = global_old_heap + size; - - /* Pages i BiBOP */ - for (i = 0; i < inc_pages; i++) - { - INC_Page *this = (INC_Page*)(global_old_heap + i * INC_FULLPAGE); - this->next = (INC_Page*)((Eterm*)this + INC_FULLPAGE); - } - - inc_bibop = (INC_Page*)global_old_heap; - ((INC_Page*)(global_old_heap + (inc_pages - 1) * INC_FULLPAGE))->next = - NULL; - - inc_used_mem = inc_bibop; - inc_bibop = inc_bibop->next; - inc_used_mem->next = NULL; - inc_used_pages = 1; - - /* Free-list */ - inc_free_list = (INC_MemBlock*)inc_used_mem->start; - inc_free_list->size = INC_PAGESIZE; - inc_free_list->prev = NULL; - inc_free_list->next = NULL; - - /* Blackmap */ - blackmap = (unsigned char*)erts_alloc(ERTS_ALC_T_MESSAGE_AREA, - INC_FULLPAGE * inc_pages); - /* Gray stack */ - INC_STORAGE_INIT(gray); - - inc_active_proc = NULL; - inc_active_last = NULL; - -#ifdef INC_TIME_BASED - inc_start_hrvtime(); -#endif -} - -void erts_cleanup_incgc(void) -{ - INC_STORAGE_ERASE(gray); - - if (inc_fromspc) - inc_last_nursery = inc_fromspc; - - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_heap); - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)inc_last_nursery); - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_old_heap); - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)blackmap); - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)fwdptrs); -} - -void erts_incremental_gc(Process* p, int need, Eterm* objv, int nobj) -{ - int repeat_minor; -#ifdef INC_TIME_BASED - double start_time = inc_get_hrvtime(); - int work_left_before = inc_words_to_go; -#endif - /* Used when growing the fromspace */ - static char inc_growing_nurs = 0; - - BM_STOP_TIMER(system); - //BM_MMU_READ(); - BM_RESET_TIMER(gc); - BM_START_TIMER(gc); - - VERBOSE(DEBUG_HYBRID_GC, - ("INCGC: Incremental GC START Caused by: %T Need: %d\n", - p->id,need)); - - ma_gc_flags |= GC_GLOBAL; - ma_gc_flags &= ~GC_CYCLE_START; - -#ifndef INC_TIME_BASED - /* Decide how much work to do this GC stage. The work is meassured - * in number of words copied from the young generation to the old - * plus number of work marked in the old generation. - */ - if (ma_gc_flags & GC_MAJOR) { - int wm = (need > inc_min_work) ? need : inc_min_work; - inc_words_to_go = (int)((wm * (((inc_used_pages * INC_PAGESIZE) / - (double)global_heap_sz) + 1)) + 0.5); - } - else - inc_words_to_go = (need > inc_min_work) ? need : inc_min_work; -#endif - - do { - if (ma_gc_flags & GC_MAJOR) { - /* This is a major collection cycle. */ - inc_major_gc(p,need,objv,nobj); - } else if (ma_gc_flags & GC_CYCLE) { - /* This is a minor collection cycle. */ - inc_minor_gc(p,need,objv,nobj); - } else { - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Collection cycle START\n")); - ma_gc_flags |= (GC_CYCLE | GC_CYCLE_START); - inc_fromspc = global_heap; - inc_fromend = global_htop; - global_heap = global_htop = inc_last_nursery; - global_hend = global_heap + global_heap_sz; - inc_nursery_scn_ptr = global_heap; -#ifdef INC_TIME_BASED - work_left_before = inc_words_to_go = global_heap_sz; -#endif -#ifdef DEBUG - inc_last_nursery = NULL; -#endif - memset(fwdptrs,0,global_heap_sz * sizeof(Eterm)); - - { - /* TODO: Alla processer ska v�l egentligen inte aktiveras h�r... */ - int i; - for (i = 0; i < erts_num_active_procs; i++) { - Process *cp = erts_active_procs[i]; - INC_ACTIVATE(cp); - cp->scan_top = cp->high_water; - } - } - - if (ma_gc_flags & GC_NEED_MAJOR) { - /* The previous collection cycle caused the old generation to - * overflow. This collection cycle will therefore be a major - * one. - */ - BM_COUNT(major_gc_cycles); - VERBOSE(DEBUG_HYBRID_GC,("INCGC: MAJOR cycle\n")); - inc_major_gc(p,need,objv,nobj); - } else { - BM_COUNT(minor_gc_cycles); - VERBOSE(DEBUG_HYBRID_GC,("INCGC: MINOR cycle\n")); - inc_minor_gc(p,need,objv,nobj); - } - } - - repeat_minor = 0; - if (!(ma_gc_flags & GC_CYCLE)) { - inc_alloc_limit = global_hend; - inc_last_nursery = inc_fromspc; - inc_fromspc = inc_fromend = NULL; - ASSERT(INC_STORAGE_EMPTY(gray)); - - if (inc_growing_nurs) { - /* - * The previous collection cycle caused the nursery to - * grow, now we have to grow the from-space as well. - */ - inc_last_nursery = - (Eterm*) erts_realloc(ERTS_ALC_T_MESSAGE_AREA, - (void*)inc_last_nursery, - sizeof(Eterm) * global_heap_sz); - inc_growing_nurs = 0; - } - - if (global_hend - global_htop <= need) { - /* - * Initiate a new GC cycle immediately and, if necessary, - * enlarge the nursery. - */ - if (global_heap_sz <= need) { - VERBOSE(DEBUG_HYBRID_GC, - ("INCGC: Allocating a larger nursery\n")); - global_heap_sz = erts_next_heap_size(need * 1.5,0); - inc_last_nursery = - (Eterm*) erts_realloc(ERTS_ALC_T_MESSAGE_AREA, - (void*)inc_last_nursery, - sizeof(Eterm) * global_heap_sz); - fwdptrs = erts_realloc(ERTS_ALC_T_MESSAGE_AREA,fwdptrs, - global_heap_sz * sizeof(Eterm*)); - inc_growing_nurs = 1; - } - repeat_minor = 1; - } - -#ifdef DEBUG - /* Fill the from-space with bad things */ - memset(inc_last_nursery,DEBUG_BAD_BYTE, - global_heap_sz * sizeof(Eterm)); -#endif - } - } while (repeat_minor); - - - /* Clean up after garbage collection ********************************/ - - if (inc_alloc_limit != global_hend) { - -#ifdef INC_TIME_BASED - if ((work_left_before - inc_words_to_go) == 0) { - inc_alloc_limit = global_htop + need; - } else { - inc_alloc_limit = (global_hend - global_htop) / - (inc_words_to_go / (work_left_before - inc_words_to_go)) + - global_htop; - if (inc_alloc_limit > global_hend) - inc_alloc_limit = global_hend; - } -#else - inc_alloc_limit = (Eterm*)(global_htop + (need > inc_min_work) ? - need : inc_min_work); - if (inc_alloc_limit > global_hend) - inc_alloc_limit = global_hend; -#endif - } - - ma_gc_flags &= ~GC_GLOBAL; - - /* INC_TIME_BASED: If this fails we have to increase the timeslice! */ - ASSERT(inc_alloc_limit - global_htop > need); - - BM_STOP_TIMER(gc); -#ifdef BM_TIMERS - minor_global_gc_time += gc_time; - if (gc_time > max_global_minor_time) - max_global_minor_time = gc_time; - - pause_times[(((gc_time * 1000) < MAX_PAUSE_TIME) ? - (int)(gc_time * 1000) : - MAX_PAUSE_TIME - 1)]++; -#endif - //BM_MMU_INIT(); - { static long long verif = 0; - //erts_printf("innan verify: %d\n",++verif); - if (verif==168) print_memory(NULL); - verify_everything(); - //erts_printf("efter verify: %d\n",verif); - } - BM_START_TIMER(system); - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Incremental GC END\n")); -} - - -/*************************************************************************** - * * - * Minor collection - Copy live data from young generation to old * - * * - ***************************************************************************/ - -#define MINOR_SCAN(PTR,END) do { \ - ASSERT(PTR <= END); \ - while (WORK_MORE && PTR < END) { \ - Eterm val = *PTR; \ - Eterm *obj_ptr = ptr_val(val); \ - switch (primary_tag(val)) { \ - case TAG_PRIMARY_LIST: \ - if (ptr_within(obj_ptr,inc_fromspc,inc_fromend)) { \ - if (INC_IS_FORWARDED(obj_ptr)) { \ - *PTR = make_list(INC_FORWARD_VALUE(obj_ptr)); \ - } \ - else { \ - Eterm *hp = erts_inc_alloc(2); \ - INC_STORE(gray,hp,2); \ - INC_COPY_CONS(obj_ptr,hp,PTR); \ - } \ - } \ - break; \ - case TAG_PRIMARY_BOXED: \ - if (ptr_within(obj_ptr,inc_fromspc,inc_fromend)) { \ - if (INC_IS_FORWARDED(obj_ptr)) { \ - *PTR = make_boxed(INC_FORWARD_VALUE(obj_ptr)); \ - } \ - else { \ - Eterm *hp = erts_inc_alloc(BOXED_NEED(obj_ptr,*obj_ptr)); \ - INC_STORE(gray,hp,BOXED_NEED(obj_ptr,*obj_ptr)); \ - INC_COPY_BOXED(obj_ptr,hp,PTR); \ - } \ - } \ - break; \ - case TAG_PRIMARY_HEADER: \ - switch (val & _TAG_HEADER_MASK) { \ - case ARITYVAL_SUBTAG: break; \ - default: PTR += thing_arityval(val); break; \ - } \ - break; \ - } \ - PTR++; \ - } \ -} while(0) - - -/* Returns: TRUE (1) if the need is greater than the available space - * and the garbage collector needs to be restarted immediately. FALSE - * (0) otherwise. - */ -static void inc_minor_gc(Process* p, int need, Eterm* objv, int nobj) -{ - BM_COUNT(minor_gc_stages); - - /* Start with looking at gray objects found in earlier collection - * stages. - */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Rescue gray found from nursery\n")); - { - INC_Object *obj = NULL; - Eterm *ptr; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - MINOR_SCAN(ptr,obj->this + obj->size); - } - /* TODO: Se f�reg�ende uppdatering av gr� objekt */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan root-set\n")); - while (WORK_MORE && inc_active_proc) { - Rootset rootset; - Process *cp = inc_active_proc; - - ASSERT(INC_IS_ACTIVE(cp)); - - /* TODO: Hur dyrt �r det att bygga nytt rootset varje g�ng? */ - - /* TODO: Fundera p� ordningen! Rootset, Heap, Old heap... */ - - /* TODO: Scanna stacken fr�n p->send till p->stop! [Brooks84] */ - /* Notera: Vi GC:ar inte de yngsta objekten - de som allokeras - under GC-cykeln. Detta ger ynglingarna en chans att d� innan - GC:n b�rjar kopiera dem. [StefanovicMcKinleyMoss@OOPSLA99] */ - - /* TODO: N�r rootset �r scannat borde processen inte vara - aktiv mer. Den b�r aktiveras i schedule, endast om en - process har k�rt beh�ver vi scanna rootset igen. */ - - /* MT: In a multithreaded system the process cp needs to be - * locked here. - */ - - if (cp == p) - rootset.n = setup_rootset(cp, objv, nobj, &rootset); - else - rootset.n = setup_rootset(cp, cp->arg_reg, cp->arity, &rootset); - - //MA_GENSWEEP_NSTACK(cp, old_htop, n_htop, objv, nobj); - - while (WORK_MORE && rootset.n--) { - Eterm *g_ptr = rootset.v[rootset.n]; - Uint g_sz = rootset.sz[rootset.n]; - - while (WORK_MORE && g_sz--) { - Eterm gval = *g_ptr; - switch (primary_tag(gval)) { - case TAG_PRIMARY_LIST: { - Eterm *ptr = list_val(gval); - if (ptr_within(ptr,inc_fromspc,inc_fromend)) { - if (INC_IS_FORWARDED(ptr)) { - *g_ptr++ = make_list(INC_FORWARD_VALUE(ptr)); - } - else { - Eterm *hp = erts_inc_alloc(2); - INC_STORE(gray,hp,2); - INC_COPY_CONS(ptr,hp,g_ptr++); - } - } - else - ++g_ptr; - continue; - } - - case TAG_PRIMARY_BOXED: { - Eterm *ptr = boxed_val(gval); - if (ptr_within(ptr,inc_fromspc,inc_fromend)) { - if (INC_IS_FORWARDED(ptr)) { - *g_ptr++ = make_boxed(INC_FORWARD_VALUE(ptr)); - } - else { - Eterm *hp = erts_inc_alloc(BOXED_NEED(ptr,*ptr)); - INC_STORE(gray,hp,BOXED_NEED(ptr,*ptr)); - INC_COPY_BOXED(ptr,hp,g_ptr++); - } - } - else - ++g_ptr; - continue; - } - - default: - g_ptr++; - continue; - } - } - } - - restore_one_rootset(cp, &rootset); - - /* MT: cp can be unlocked now. */ - - /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan private nursery\n")); */ - if (cp->scan_top != HEAP_TOP(cp)) { - Eterm *ptr = cp->scan_top; - MINOR_SCAN(ptr,HEAP_TOP(cp)); - /* TODO: F�r att spara scan_top h�r m�ste alla ma-pekare - * som hittas l�ggas till i cp->rrma. - */ - //cp->scan_top = ptr; - } - - /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan heap fragments\n")); */ - { - ErlHeapFragment* bp = MBUF(cp); - - while (WORK_MORE && bp) { - Eterm *ptr = bp->mem; - if ((ARITH_HEAP(cp) >= bp->mem) && - (ARITH_HEAP(cp) < bp->mem + bp->size)) { - MINOR_SCAN(ptr,ARITH_HEAP(cp)); - } else { - MINOR_SCAN(ptr,bp->mem + bp->size); - } - bp = bp->next; - } - } - - /* VERBOSE(DEBUG_HYBRID_GC,("INCGC: Scan gray\n")); */ - { - INC_Object *obj = NULL; - Eterm *ptr; - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - MINOR_SCAN(ptr,obj->this + obj->size); - } - /* TODO: INC_STORE(gray,ptr,obj->size-(ptr-obj->this)); Typ.. */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - if (WORK_MORE) { - //printf("Rootset after:\r\n"); - //print_one_rootset(&rootset); - INC_DEACTIVATE(cp); - } - } - - /* Update new pointers in the nursery to new copies in old generation. */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update nursery\n")); - { - Eterm *ptr = inc_nursery_scn_ptr; - MINOR_SCAN(ptr,global_htop); - inc_nursery_scn_ptr = ptr; - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Rescue gray found from nursery\n")); - { - INC_Object *obj = NULL; - Eterm *ptr; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - MINOR_SCAN(ptr,obj->this + obj->size); - } - /* TODO: Se f�reg�ende uppdatering av gr� objekt */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update copy stack\n")); - { - Uint i; - for (i = 0; i < ma_dst_top; i++) { - if (ptr_within(ma_dst_stack[i],inc_fromspc,inc_fromend)) { - if (INC_IS_FORWARDED(ma_dst_stack[i])) - ma_dst_stack[i] = INC_FORWARD_VALUE(ma_dst_stack[i]); - } - } - } - - if (WORK_MORE) { - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update offheap-lists\n")); - { - ExternalThing **prev = &erts_global_offheap.externals; - ExternalThing *ptr = erts_global_offheap.externals; - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep proc externals\n")); - while (ptr) { - Eterm *ppt = (Eterm*) ptr; - - if (ptr_within(ppt,global_old_heap,global_old_hend)) { - prev = &ptr->next; - ptr = ptr->next; - } else if (ptr_within(ppt, inc_fromspc, inc_fromend) && - INC_IS_FORWARDED(ppt)) { - ExternalThing *ro = (ExternalThing*)INC_FORWARD_VALUE(ppt); - *prev = ro; /* Patch to moved pos */ - prev = &ro->next; - ptr = ro->next; - } else { - erts_deref_node_entry(ptr->node); - *prev = ptr = ptr->next; - } - } - ASSERT(*prev == NULL); - } - - { - ProcBin **prev = &erts_global_offheap.mso; - ProcBin *ptr = erts_global_offheap.mso; - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep proc bins\n")); - while (ptr) { - Eterm *ppt = (Eterm*)ptr; - - if (ptr_within(ppt,global_old_heap,global_old_hend)) { - prev = &ptr->next; - ptr = ptr->next; - } else if (ptr_within(ppt, inc_fromspc, inc_fromend) && - INC_IS_FORWARDED(ppt)) { - ProcBin *ro = (ProcBin*)INC_FORWARD_VALUE(ppt); - *prev = ro; /* Patch to moved pos */ - prev = &ro->next; - ptr = ro->next; - } else { - Binary *bptr; - *prev = ptr->next; - bptr = ptr->val; - if (erts_refc_dectest(&bptr->refc, 0) == 0) - erts_bin_free(bptr); - ptr = *prev; - } - } - ASSERT(*prev == NULL); - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Minor collection cycle END\n")); - ma_gc_flags &= ~GC_CYCLE; - } -} - - - - -/*************************************************************************** - * * - * Major collection - CopyMark - Copy young to old, Mark-Sweep old * - * * - ***************************************************************************/ - -#define COPYMARK(PTR,END) do { \ - ASSERT(PTR <= END); \ - while (WORK_MORE && PTR < END) { \ - Eterm val = *PTR; \ - Eterm *obj_ptr = ptr_val(val); \ - switch (primary_tag(val)) { \ - case TAG_PRIMARY_LIST: \ - COPYMARK_CONS(obj_ptr,aging_htop,PTR,aging_end); break; \ - case TAG_PRIMARY_BOXED: \ - COPYMARK_BOXED(obj_ptr,aging_htop,PTR,aging_end); break; \ - case TAG_PRIMARY_HEADER: \ - switch (val & _TAG_HEADER_MASK) { \ - case ARITYVAL_SUBTAG: break; \ - default: \ - PTR += thing_arityval(val); \ - break; \ - } \ - break; \ - default: break; \ - } \ - PTR++; \ - } \ -} while(0); -/* TODO: - if (aging_htop + 10 > aging + INC_FULLPAGE) { - aging->next = inc_used_mem; - inc_used_mem = aging; - } -*/ - -static void inc_major_gc(Process *p, int need, Eterm* objv, int nobj) -{ - Eterm *free_start = NULL; - Uint live = 0; - Uint old_gen_sz = 0; - static INC_Page *aging; - static Eterm *aging_htop; - static Eterm *aging_end; - BM_NEW_TIMER(old_gc); - - BM_SWAP_TIMER(gc,old_gc); - BM_COUNT(major_gc_stages); - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Major collection START\n")); - - ma_gc_flags |= GC_INCLUDE_ALL; - - if (ma_gc_flags & GC_NEED_MAJOR) - { - INC_Page *page = inc_used_mem; - - ma_gc_flags |= GC_MAJOR; - ma_gc_flags &= ~GC_NEED_MAJOR; - - while (page) - { - memset(blackmap + - ((void*)page - (void*)global_old_heap) / sizeof(void*), - 0, INC_FULLPAGE); - page = page->next; - } - - if (inc_bibop) { - aging = inc_bibop; - inc_bibop = inc_bibop->next; - aging->next = NULL; - memset(blackmap + - ((void*)aging - (void*)global_old_heap) / sizeof(void*), - 1, INC_FULLPAGE); - aging_htop = aging->start; - aging_end = aging->start + INC_PAGESIZE; - } - else { - /* There are no free pages.. Either fragmentation is a - * problem or we are simply out of memory. Allocation in - * the old generation will be done through the free-list - * this GC cycle. - */ - aging = NULL; - aging_htop = aging_end = NULL; - } - } - - /* Start with looking at gray objects found in earlier collection - * stages. - */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray\n")); - { - INC_Object *obj = NULL; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - Eterm *ptr; - - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - COPYMARK(ptr,obj->this + obj->size); - } - /* TODO: Titta p� motsvarande i minor. */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark roots\n")); - while (WORK_MORE && inc_active_proc) - { - /* For each process: Scan all areas containing pointers to the - * message area. When a process is done here, all it's - * message-pointers should be to the old generation. - */ - Rootset rootset; - Process *cp = inc_active_proc; - - ASSERT(INC_IS_ACTIVE(cp)); - - /* MT: In a multithreaded system the process cp needs to be - * locked here. - */ - if (cp == p) - rootset.n = setup_rootset(cp, objv, nobj, &rootset); - else - rootset.n = setup_rootset(cp, cp->arg_reg, cp->arity, &rootset); - - while (WORK_MORE && rootset.n--) - { - Eterm *ptr = rootset.v[rootset.n]; - Eterm *end = ptr + rootset.sz[rootset.n]; - - while (WORK_MORE && ptr < end) { - Eterm val = *ptr; - Eterm *obj_ptr = ptr_val(val); - - switch (primary_tag(val)) { - case TAG_PRIMARY_LIST: - { - COPYMARK_CONS(obj_ptr,aging_htop,ptr,aging_end); - break; - } - - case TAG_PRIMARY_BOXED: - { - COPYMARK_BOXED(obj_ptr,aging_htop,ptr,aging_end); - break; - } - } - ptr++; - } - } - -#ifdef HIPE - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Native stack scan: %T\n",cp->id)); - aging_htop = ma_fullsweep_nstack(cp,aging_htop,aging_end); -#endif - restore_one_rootset(cp, &rootset); - - /* MT: cp can be unlocked now. But beware!! The message queue - * might be updated with new pointers to the fromspace while - * we work below. The send operation can not assume that all - * active processes will look through their message queue - * before deactivating as is the case in non-MT incremental - * collection. - */ - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark process heap\n")); - { - Eterm *ptr = cp->scan_top; - COPYMARK(ptr,cp->htop); - //cp->scan_top = ptr; - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark heap fragments\n")); - { - ErlHeapFragment* bp = MBUF(cp); - - while (WORK_MORE && bp) { - Eterm *ptr = bp->mem; - Eterm *end; - - if ((ARITH_HEAP(cp) >= bp->mem) && - (ARITH_HEAP(cp) < bp->mem + bp->size)) { - end = ARITH_HEAP(cp); - } else { - end = bp->mem + bp->size; - } - - COPYMARK(ptr,end); - bp = bp->next; - } - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray stack\n")); - { - INC_Object *obj = NULL; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - Eterm *ptr; - - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - COPYMARK(ptr,obj->this + obj->size); - } - /* TODO: Titta p� motsvarande i minor. */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - if (WORK_MORE) { - INC_DEACTIVATE(cp); - } - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark nursery\n")); - { - Eterm *ptr = inc_nursery_scn_ptr; - COPYMARK(ptr,global_htop); - inc_nursery_scn_ptr = ptr; - } - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Copy-Mark gray found in nursery\n")); - { - INC_Object *obj = NULL; - - while (WORK_MORE && !INC_STORAGE_EMPTY(gray)) { - Eterm *ptr; - - obj = INC_STORAGE_GET(gray); - if ((*obj->this & _TAG_HEADER_MASK) == FUN_SUBTAG) { - ptr = obj->this + thing_arityval(*obj->this) + 1; - } else { - ptr = obj->this; - } - COPYMARK(ptr,obj->this + obj->size); - } - /* TODO: Titta p� motsvarande i minor. */ - if (!WORK_MORE && obj != NULL) - INC_STORE(gray,obj->this,obj->size); - } - - - /**********************************************************************/ - if (WORK_MORE) { - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep phase\n")); - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep externals in old generation\n")); - { - ExternalThing** prev = &erts_global_offheap.externals; - ExternalThing* ptr = erts_global_offheap.externals; - - while (ptr) { - Eterm* ppt = (Eterm *) ptr; - - if ((ptr_within(ppt, global_old_heap, global_old_hend) && - blackmap[ppt - global_old_heap] == 0) || - (ptr_within(ppt, inc_fromspc, inc_fromend) && - !INC_IS_FORWARDED(ppt))) - { - erts_deref_node_entry(ptr->node); - *prev = ptr = ptr->next; - } else if (ptr_within(ppt, inc_fromspc, inc_fromend)) { - ExternalThing* ro = (ExternalThing*)INC_FORWARD_VALUE(ppt); - *prev = ro; /* Patch to moved pos */ - prev = &ro->next; - ptr = ro->next; - } else { - prev = &ptr->next; - ptr = ptr->next; - } - } - ASSERT(*prev == NULL); - } - - /* Atomic phase */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep refc bins in old generation\n")); - { - ProcBin** prev = &erts_global_offheap.mso; - ProcBin* ptr = erts_global_offheap.mso; - - while (ptr) { - Eterm *ppt = (Eterm*)ptr; - - if ((ptr_within(ppt, global_old_heap, global_old_hend) && - blackmap[ppt - global_old_heap] == 0) || - (ptr_within(ppt, inc_fromspc, inc_fromend) && - !INC_IS_FORWARDED(ppt))) - { - Binary* bptr; - *prev = ptr->next; - bptr = ptr->val; - if (erts_refc_dectest(&bptr->refc, 0) == 0) - erts_bin_free(bptr); - ptr = *prev; - } else if (ptr_within(ppt, inc_fromspc, inc_fromend)) { - ProcBin* ro = (ProcBin*)INC_FORWARD_VALUE(ppt); - *prev = ro; /* Patch to moved pos */ - prev = &ro->next; - ptr = ro->next; - } else { - prev = &ptr->next; - ptr = ptr->next; - } - } - ASSERT(*prev == NULL); - } - - /* TODO: Currently atomic phase - Can not be later of course. */ - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Sweep old generation\n")); - { - INC_Page *page = inc_used_mem; - INC_Page *prev = NULL; - inc_free_list = NULL; - - while (page) { - int scavenging = 0; - int n = page->start - global_old_heap; - int stop = n + INC_PAGESIZE; - - old_gen_sz += INC_PAGESIZE; - while (n < stop) { - if (blackmap[n] != 0) { - if (scavenging) { - Eterm *ptr = global_old_heap + n; - scavenging = 0; - if ((ptr - free_start) * sizeof(Eterm) >= - sizeof(INC_MemBlock)) - { - INC_MemBlock *new = (INC_MemBlock*)free_start; - new->size = ptr - free_start; - new->prev = NULL; - new->next = inc_free_list; - if (inc_free_list) - inc_free_list->prev = new; - inc_free_list = new; - } - } - if (blackmap[n] == 255) { - unsigned int size = - *(unsigned int*)(((long)&blackmap[n]+4) & ~3); - live += size; - n += size; - } - else { - live += blackmap[n]; - n += blackmap[n]; - } - } - else if (!scavenging) { - free_start = global_old_heap + n; - scavenging = 1; - n++; - } - else { - n++; - } - } - - if (scavenging) { - if ((global_old_heap + n - free_start) * sizeof(Eterm) > - sizeof(INC_MemBlock)) - { - INC_MemBlock *new = (INC_MemBlock*)free_start; - new->size = global_old_heap + n - free_start; - new->prev = NULL; - new->next = inc_free_list; - if (inc_free_list) - inc_free_list->prev = new; - inc_free_list = new; - } - else if (free_start == page->start) { - INC_Page *next = page->next; - - if (prev) - prev->next = page->next; - else - inc_used_mem = page->next; - - page->next = inc_bibop; - inc_bibop = page; - inc_used_pages--; - page = next; - continue; - } - } - prev = page; - page = page->next; - } - } - } - - ASSERT(inc_bibop); - /* - This code is not expected to work right now. - if (!inc_bibop) { - int i; - int new_pages = inc_pages * 2; - int size = sizeof(Eterm) * new_pages * INC_FULLPAGE; - Eterm *new_heap = erts_alloc(ERTS_ALC_T_MESSAGE_AREA,size); - Eterm *new_hend = new_heap + size; - Eterm *new_htop; - Eterm *last_page_end; - INC_Page *new_used_mem; - INC_Page *page; - - erts_printf("The last page has been allocated..\n"); - erts_printf("We need to copy things!\n"); - - / * Create new, bigger bag of pages * / - for (i = 0; i < new_pages; i++) - { - INC_Page *this = - (INC_Page*)(new_heap + i * INC_FULLPAGE); - this->next = (INC_Page*)((Eterm*)this + INC_FULLPAGE); - } - inc_bibop = (INC_Page*)new_heap; - ((INC_Page*)(new_heap + (new_pages - 1) * - INC_FULLPAGE))->next = NULL; - - new_used_mem = inc_bibop; - inc_bibop = inc_bibop->next; - new_used_mem->next = NULL; - - / * Move stuff from old bag to new * / - inc_free_list = NULL; - new_htop = new_used_mem->start; - last_page_end = new_htop + INC_PAGESIZE; - page = inc_used_mem; - while (page) - { - Eterm *ptr = page->start; - Eterm *page_end = ptr + INC_PAGESIZE; - int n = offsetof(INC_Page,start) / sizeof(void*) + - ((Eterm*)page - global_old_heap); - while (ptr < page_end) - { - if (blackmap[n] > 0) - { - if (last_page_end - new_htop < blackmap[n]) - { - INC_Page *new_page = inc_bibop; - inc_bibop = inc_bibop->next; - new_page->next = new_used_mem; - new_used_mem = new_page; - new_htop = new_page->start; - last_page_end = new_htop + INC_PAGESIZE; - } - - memcpy(new_htop,ptr,blackmap[n] * sizeof(Eterm)); - for (i = 0; i < blackmap[n]; i++) - { - *ptr++ = (Eterm)new_htop++; - } - //new_htop += blackmap[n]; - //ptr += blackmap[n]; - / * - if (blackmap[n] == 255) Do the right thing... - * / - n += blackmap[n]; - } - else - { - n++; ptr++; - } - } - page = page->next; - } - - page = inc_used_mem; - while (page) - { - Eterm *ptr = page->start; - Eterm *page_end = ptr + INC_PAGESIZE; - - / * TODO: If inc_used_mem is sorted in address order, this - * pass can be done at the same time as copying. * / - while (ptr < page_end) - { - if (ptr_within(ptr_val(*ptr),global_old_heap,global_old_hend)) - { - *ptr = *((Eterm*)ptr_val(*ptr)); - } - ptr++; - } - page = page->next; - } - - printf("Restore rootset after heap move. Roots: %d\r\n",roots_saved); - while (roots_saved--) - { - Eterm *ptr = root_save[roots_saved]; - *ptr = *((Eterm*)ptr_val(*ptr)); - } - - erts_free(ERTS_ALC_T_MESSAGE_AREA,(void*)global_old_heap); - - global_old_heap = new_heap; - global_old_hend = new_hend; - inc_used_mem = new_used_mem; - inc_pages = new_pages; - - if ((last_page_end - new_htop) * sizeof(Eterm) >= - sizeof(INC_MemBlock)) - { - inc_free_list = (INC_MemBlock*)(new_htop); - inc_free_list->size = last_page_end - new_htop; - inc_free_list->prev = NULL; - inc_free_list->next = NULL; - } - } - */ - - /* I vilka l�gen kan vi vilja sl�nga p� en extra sida.. ( < 25% kvar?) - if () - { - INC_Page *new_page = inc_bibop; - INC_MemBlock *new_free = - (INC_MemBlock*)new_page->start; - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Fetching new page\n")); - inc_bibop = inc_bibop->next; - - new_page->next = inc_used_mem; - if (inc_used_mem) - inc_used_mem->prev = new_page; - inc_used_mem = new_page; - - // kolla detta med normal sidstorlek! old_gen_sz += INC_PAGESIZE; - //BM_SWAP_TIMER(gc,misc1); - memset(blackmap + - ((void*)new_page - (void*)global_old_heap) / sizeof(void*), - 0, INC_FULLPAGE); - //BM_SWAP_TIMER(misc1,gc); - - new_free->prev = NULL; - new_free->next = inc_free_list; - new_free->size = INC_PAGESIZE; - if (inc_free_list) - inc_free_list->prev = new_free; - inc_free_list = new_free; - //printf("Snatched a new page @ 0x%08x\r\n",(int)new_page); - //print_free_list(); - found = new_free; - } - */ - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Update copy stack\n")); - { - Uint i; - for (i = 0; i < ma_dst_top; i++) { - if (ptr_within(ma_dst_stack[i],inc_fromspc,inc_fromend)) { - if (INC_IS_FORWARDED(ma_dst_stack[i])) - ma_dst_stack[i] = INC_FORWARD_VALUE(ma_dst_stack[i]); - } - } - } - - if (WORK_MORE) - { - int size_left = INC_PAGESIZE - (aging_htop - aging->start); - - if (size_left > sizeof(INC_MemBlock)) - { - ((INC_MemBlock*)aging_htop)->size = size_left; - ((INC_MemBlock*)aging_htop)->prev = NULL; - ((INC_MemBlock*)aging_htop)->next = inc_free_list; - if (inc_free_list) - inc_free_list->prev = (INC_MemBlock*)aging_htop; - inc_free_list = (INC_MemBlock*)aging_htop; - } - aging->next = inc_used_mem; - inc_used_mem = aging; - inc_used_pages++; - - ma_gc_flags &= ~GC_MAJOR; - ma_gc_flags &= ~GC_CYCLE; - - VERBOSE(DEBUG_HYBRID_GC,("INCGC: Major collection cycle END\n")); - } - - ma_gc_flags &= ~GC_INCLUDE_ALL; - - BM_STOP_TIMER(old_gc); -#ifdef BM_TIMER - major_global_gc_time += old_gc_time; - if (old_gc_time > max_global_major_time) - max_global_major_time = old_gc_time; - - if ((old_gc_time * 1000) < MAX_PAUSE_TIME) - pause_times_old[(int)(old_gc_time * 1000)]++; - else - pause_times_old[MAX_PAUSE_TIME - 1]++; -#endif - BM_START_TIMER(gc); -} - - - -/*************************************************************************** - * * - * Allocation in the old generation. Used in minor colection and when * - * copying the rest of a message after a GC. * - * * - ***************************************************************************/ - - -Eterm *erts_inc_alloc(int need) -{ - INC_MemBlock *this = inc_free_list; - - ASSERT(need < INC_PAGESIZE); - while (this && (this->size) < need) - { - this = this->next; - } - - if (!this) - { - /* If a free block large enough is not found, a new page is - * allocated. GC_NEED_MAJOR is set so that the next garbage - * collection cycle will be a major one, that is, both - * generations will be garbage collected. - */ - INC_Page *new_page = inc_bibop; - INC_MemBlock *new_free = (INC_MemBlock*)new_page->start; - - if (new_page) - { - VERBOSE(DEBUG_HYBRID_GC, - ("INCGC: Allocation grabs a new page\n")); - inc_bibop = inc_bibop->next; - new_page->next = inc_used_mem; - inc_used_mem = new_page; - inc_used_pages++; - - new_free->prev = NULL; - new_free->next = inc_free_list; - new_free->size = INC_PAGESIZE; - if (inc_free_list) - inc_free_list->prev = new_free; - inc_free_list = new_free; - - this = new_free; - if (!(ma_gc_flags & GC_MAJOR)) - ma_gc_flags |= GC_NEED_MAJOR; - } - else - { - erl_exit(-1, "inc_alloc ran out of pages!\n"); - } - } - - if (((this->size) - need) * sizeof(Eterm) >= sizeof(INC_MemBlock)) - { - INC_MemBlock *rest = (INC_MemBlock*)((Eterm*)this + need); - - /* The order here IS important! */ - rest->next = this->next; - - if (rest->next) - rest->next->prev = rest; - - rest->prev = this->prev; - - if (rest->prev) - rest->prev->next = rest; - else - inc_free_list = rest; - - rest->size = this->size - need; - } - else - { - if (this->prev) - this->prev->next = this->next; - else - inc_free_list = this->next; - - if (this->next) - this->next->prev = this->prev; - } - - if (ma_gc_flags & GC_MAJOR) { - if (need > 254) { - blackmap[(Eterm*)this - global_old_heap] = 255; - *(int*)((UWord)(&blackmap[(Eterm*)this - global_old_heap]+4) & ~3) = - need; - } else - blackmap[(Eterm*)this - global_old_heap] = need; - } - return (Eterm*)this; -} -#endif /* INCREMENTAL */ diff --git a/erts/emulator/beam/erl_nmgc.h b/erts/emulator/beam/erl_nmgc.h deleted file mode 100644 index b207dd37fa..0000000000 --- a/erts/emulator/beam/erl_nmgc.h +++ /dev/null @@ -1,364 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2004-2009. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ - -#ifndef __ERL_NMGC_H__ -#define __ERL_NMGC_H__ - -#ifdef INCREMENTAL -#include <stddef.h> /* offsetof() */ -#include "erl_process.h" - -#define INC_FULLPAGE (INC_PAGESIZE + offsetof(INC_Page,start) / sizeof(void*)) - -#define BOXED_NEED(PTR,HDR) \ - (((HDR) & _HEADER_SUBTAG_MASK) == SUB_BINARY_SUBTAG ? \ - header_arity(HDR) + 2 : \ - ((HDR) & _HEADER_SUBTAG_MASK) == FUN_SUBTAG ? \ - header_arity(HDR) + ((ErlFunThing*)(PTR))->num_free + 2 : \ - header_arity(HDR) + 1) - - -#define INC_DECREASE_WORK(n) inc_words_to_go -= (n); - -#define INC_COPY_CONS(FROM,TO,PTR) \ -do { \ - TO[0] = FROM[0]; \ - TO[1] = FROM[1]; \ - INC_MARK_FORWARD(FROM,TO); \ - *(PTR) = make_list(TO); \ - INC_DECREASE_WORK(2); \ - (TO) += 2; \ -} while(0) - -#define INC_COPY_BOXED(FROM,TO,PTR) \ -do { \ - Sint nelts; \ - Eterm hdr = *(FROM); \ - \ - ASSERT(is_header(hdr)); \ - INC_MARK_FORWARD(FROM,TO); \ - *(PTR) = make_boxed(TO); \ - *(TO)++ = *(FROM)++; \ - nelts = header_arity(hdr); \ - switch ((hdr) & _HEADER_SUBTAG_MASK) { \ - case SUB_BINARY_SUBTAG: nelts++; break; \ - case FUN_SUBTAG: nelts+=((ErlFunThing*)(FROM-1))->num_free+1; break;\ - } \ - INC_DECREASE_WORK(nelts + 1); \ - while (nelts--) \ - *(TO)++ = *(FROM)++; \ -} while(0) - - -/* Things copied to the old generation are not marked in the blackmap. - * This is ok since the page they are copied to (aging) is not part of - * the sweep. - */ -#define COPYMARK_CONS(FROM,TO,PTR,LIMIT) \ -do { \ - if (ptr_within(FROM,inc_fromspc,inc_fromend)) { \ - if (INC_IS_FORWARDED(FROM)) { \ - *PTR = make_list(INC_FORWARD_VALUE(FROM)); \ - } else if (TO + 2 <= LIMIT) { \ - INC_STORE(gray,TO,2); \ - INC_COPY_CONS(FROM,TO,PTR); \ - } else { \ - Eterm *hp = erts_inc_alloc(2); \ - INC_STORE(gray,hp,2); \ - INC_COPY_CONS(FROM,hp,PTR); \ - } \ - } else if (ptr_within(FROM,global_old_heap,global_old_hend) && \ - (blackmap[FROM - global_old_heap] == 0)) { \ - blackmap[FROM - global_old_heap] = 2; \ - INC_DECREASE_WORK(2); \ - INC_STORE(gray,FROM,2); \ - } \ -} while(0) - -#define COPYMARK_BOXED(FROM,TO,PTR,LIMIT) \ -do { \ - if (ptr_within(FROM,inc_fromspc,inc_fromend)) { \ - int size = BOXED_NEED(FROM,*FROM); \ - if (INC_IS_FORWARDED(FROM)) { \ - *PTR = make_boxed(INC_FORWARD_VALUE(FROM)); \ - } else if (TO + size <= LIMIT) { \ - INC_STORE(gray,TO,size); \ - INC_COPY_BOXED(FROM,TO,PTR); \ - } else { \ - Eterm *hp = erts_inc_alloc(size); \ - INC_STORE(gray,hp,size); \ - INC_COPY_BOXED(FROM,hp,PTR); \ - } \ - } else if (ptr_within(FROM,global_old_heap,global_old_hend) && \ - (blackmap[FROM - global_old_heap] == 0)) { \ - int size = BOXED_NEED(FROM,*FROM); \ - if (size > 254) { \ - blackmap[FROM - global_old_heap] = 255; \ - *(int*)((long)(&blackmap[FROM - \ - global_old_heap] + 4) & ~3) = size; \ - } else \ - blackmap[FROM - global_old_heap] = size; \ - INC_DECREASE_WORK(size); \ - INC_STORE(gray,FROM,size); \ - } \ -} while(0) - -#define INC_MARK_FORWARD(ptr,dst) fwdptrs[(ptr) - inc_fromspc] = (dst); -#define INC_IS_FORWARDED(ptr) (fwdptrs[(ptr) - inc_fromspc] != 0) -#define INC_FORWARD_VALUE(ptr) fwdptrs[(ptr) - inc_fromspc] - -/* Note for BM_TIMER: Active timer should always be 'system' when IncAlloc - * is called! - */ -#define IncAlloc(p, sz, objv, nobj) \ - (ASSERT_EXPR((sz) >= 0), \ - (((inc_alloc_limit - global_htop) <= (sz)) ? \ - erts_incremental_gc((p),(sz),(objv),(nobj)) : 0), \ - ASSERT_EXPR(global_hend - global_htop > (sz)), \ - global_htop += (sz), global_htop - (sz)) - - -/************************************************************************ - * INC_STORAGE, a dynamic circular storage for objects (INC_Object). * - * Use INC_STORE to add objects to the storage. The storage can then * - * be used either as a queue, using INC_STORAGE_GET to retreive * - * values, or as a stack, using INC_STORAGE_POP. It is OK to mix calls * - * to GET and POP if that is desired. * - * An iterator can be declared to traverse the storage without removing * - * any elements, and INC_STORAGE_STEP will then return each element in * - * turn, oldest first. * - ***********************************************************************/ - -/* Declare a new storage; must be in the beginning of a block. Give - * the storage a name that is used in all later calls to the storage. - * If this is an external declaration of the storage, pass the keyword - * external as the first argument, otherwise leave it empty. - */ -#define INC_STORAGE_DECLARATION(ext,name) \ - ext INC_Storage *name##head; \ - ext INC_Storage *name##tail; \ - ext INC_Object *name##free; \ - ext INC_Object *name##last_free; \ - ext int name##size; - - -/* Initialize the storage. Note that memory allocation is involved - - * don't forget to erase the storage when you are done. - */ -#define INC_STORAGE_INIT(name) do { \ - name##head = (INC_Storage*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \ - sizeof(INC_Storage)); \ - name##head->next = name##head; \ - name##head->prev = name##head; \ - name##tail = name##head; \ - name##free = name##head->data; \ - name##last_free = name##free + INC_STORAGE_SIZE - 1; \ - name##size = 0; \ -} while(0) - - -/* -#define INC_STORAGE_SWAP(s1,s2) do { \ - INC_Storage *tmphead = s1##head; \ - INC_Storage *tmptail = s1##tail; \ - INC_Object *tmpfree = s1##free; \ - INC_Object *tmplast = s1##last_free; \ - int tmpsize = s1##size; \ - s1##head = s2##head; \ - s1##tail = s2##tail; \ - s1##free = s2##free; \ - s1##last_free = s2##last_free; \ - s1##size = s2##size; \ - s2##head = tmphead; \ - s2##tail = tmptail; \ - s2##free = tmpfree; \ - s2##last_free = tmplast; \ - s2##size = tmpsize; \ -} while(0) -*/ - - -/* Return and remove the youngest element - treat the storage as a - * stack. Always check that there are elements in the queue before - * using INC_STORAGE_POP! - */ -#define INC_STORAGE_POP(name) (ASSERT_EXPR(name##size != 0), \ - name##size--, \ - (--name##free != name##head->data - 1) ? \ - name##free : (name##head = name##head->prev, \ - name##free = name##head->data + INC_STORAGE_SIZE - 1)) - - -/* Return and remove the oldest element - treat the storage as a - * queue. Always check that there are elements in the queue before - * using INC_STORAGE_GET! - */ -#define INC_STORAGE_GET(name) (ASSERT_EXPR(name##size != 0), \ - name##size--, \ - (++name##last_free != name##tail->data + INC_STORAGE_SIZE) ? \ - name##last_free : (name##tail = name##tail->next, \ - name##last_free = name##tail->data)) - - -/* Advance the head to the next free location. If the storage is full, - * a new storage is allocated and linked into the list. - */ -#define INC_STORAGE_NEXT(name) do { \ - if (name##free == name##last_free) { \ - name##tail = (INC_Storage*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \ - sizeof(INC_Storage)); \ - memcpy(name##tail->data,name##head->data, \ - INC_STORAGE_SIZE * sizeof(INC_Object)); \ - name##tail->next = name##head->next; \ - name##head->next = name##tail; \ - name##tail->prev = name##tail->next->prev; \ - name##tail->next->prev = name##tail; \ - name##last_free = ((void*)name##tail + \ - ((void*)name##last_free - (void*)name##head)); \ - } \ - name##free++; \ - name##size++; \ - if (name##free == name##head->data + INC_STORAGE_SIZE) { \ - name##head = name##head->next; \ - name##free = name##head->data; \ - } \ -} while(0) - - -/* The head of this storage is the next free location. This is where - * the next element will be stored. - */ -#define INC_STORAGE_HEAD(name) (name##free) - - -/* Return the top - the youngest element in the storage. */ -/* #define INC_STORAGE_TOP(name) (name##free - 1 with some magic..) */ - - -/* True if the storage is empty, false otherwise */ -#define INC_STORAGE_EMPTY(name) (name##size == 0) - - -/* Store a new element in the head of the storage and advance the head - * to the next free location. - */ -#define INC_STORE(name,ptr,sz) do { \ - INC_STORAGE_HEAD(name)->this = ptr; \ - INC_STORAGE_HEAD(name)->size = sz; \ - INC_STORAGE_NEXT(name); \ -} while(0) - - -/* An iterator. Use it together with INC_STORAGE_STEP to browse throuh - * the storage. Please note that it is not possible to remove an entry - * in the middle of the storage, use GET or POP to remove enties. - */ -#define INC_STORAGE_ITERATOR(name) \ - INC_Storage *name##iterator_head = name##tail; \ - INC_Object *name##iterator_current = name##last_free; \ - int name##iterator_left = name##size; - - -/* Return the next element in the storage (sorted by age, oldest - * first) or NULL if the storage is empty or the last element has been - * returned already. - */ -#define INC_STORAGE_STEP(name) (name##iterator_left == 0 ? NULL : \ - (name##iterator_left--, \ - (++name##iterator_current != name##iterator_head->data + \ - INC_STORAGE_SIZE) ? name##iterator_current : \ - (name##iterator_head = name##iterator_head->next, \ - name##iterator_current = name##iterator_head->data))) - - -/* Erase the storage. */ -#define INC_STORAGE_ERASE(name)do { \ - name##head->prev->next = NULL; \ - while (name##head != NULL) { \ - name##tail = name##head; \ - name##head = name##head->next; \ - erts_free(ERTS_ALC_T_OBJECT_STACK,(void*)name##tail); \ - } \ - name##tail = NULL; \ - name##free = NULL; \ - name##last_free = NULL; \ - name##size = 0; \ -} while(0) - -/* - * Structures used by the non-moving memory manager - */ - -typedef struct -{ - Eterm *this; - unsigned long size; -} INC_Object; - -typedef struct inc_storage { - struct inc_storage *next; - struct inc_storage *prev; - INC_Object data[INC_STORAGE_SIZE]; -} INC_Storage; - -typedef struct inc_mem_block -{ - unsigned long size; - struct inc_mem_block *prev; - struct inc_mem_block *next; -} INC_MemBlock; - -typedef struct inc_page -{ - struct inc_page *next; - Eterm start[1]; /* Has to be last in struct, this is where the data start */ -} INC_Page; - - -/* - * Heap pointers for the non-moving memory area. - */ -extern INC_Page *inc_used_mem; -extern INC_MemBlock *inc_free_list; -extern unsigned char *blackmap; - -extern Eterm **fwdptrs; -extern Eterm *inc_fromspc; -extern Eterm *inc_fromend; -extern Process *inc_active_proc; -extern Process *inc_active_last; -extern Eterm *inc_alloc_limit; -extern int inc_words_to_go; - -INC_STORAGE_DECLARATION(extern,gray); -INC_STORAGE_DECLARATION(extern,root); - -void erts_init_incgc(void); -void erts_cleanup_incgc(void); -void erts_incremental_gc(Process *p, int sz, Eterm* objv, int nobj); -Eterm *erts_inc_alloc(int need); - -#else -# define INC_STORE(lst,ptr,sz) -# define INC_MARK_FORWARD(ptr) -# define INC_IS_FORWARDED(ptr) -# define INC_FORWARD_VALUE(ptr) -#endif /* INCREMENTAL */ - -#endif /* _ERL_NMGC_H_ */ diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index 16367c305d..649be3f454 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -849,9 +849,6 @@ static Eterm AM_dist_references; static Eterm AM_node_references; static Eterm AM_system; static Eterm AM_timer; -#ifdef HYBRID -static Eterm AM_processes; -#endif static void setup_reference_table(void); static Eterm reference_table_term(Uint **hpp, Uint *szp); @@ -936,9 +933,6 @@ erts_get_node_and_dist_references(struct process *proc) INIT_AM(node_references); INIT_AM(timer); INIT_AM(system); -#ifdef HYBRID - INIT_AM(processes); -#endif references_atoms_need_init = 0; } @@ -1301,12 +1295,6 @@ setup_reference_table(void) SYSTEM_REF, TUPLE2(&heap[0], AM_system, am_undefined)); -#ifdef HYBRID - /* Insert Heap */ - insert_offheap(&erts_global_offheap, - HEAP_REF, - TUPLE2(&heap[0], AM_processes, am_undefined)); -#endif UnUseTmpHeapNoproc(3); /* Insert all processes */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 005015c4ee..e62556ce72 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -28,7 +28,6 @@ #include "erl_vm.h" #include "global.h" #include "erl_process.h" -#include "erl_nmgc.h" #include "error.h" #include "bif.h" #include "erl_db.h" @@ -404,11 +403,6 @@ struct erts_system_monitor_flags_t erts_system_monitor_flags; Eterm erts_system_profile; struct erts_system_profile_flags_t erts_system_profile_flags; -#ifdef HYBRID -Uint erts_num_active_procs; -Process** erts_active_procs; -#endif - #if ERTS_MAX_PROCESSES > 0x7fffffff #error "Need to store process_count in another type" #endif @@ -536,6 +530,7 @@ dbg_chk_aux_work_val(erts_aint32_t value) #endif #ifdef ERTS_SMP valid |= ERTS_SSI_AUX_WORK_CODE_IX_ACTIVATION; + valid |= ERTS_SSI_AUX_WORK_FINISH_BP; #endif #ifdef ERTS_SSI_AUX_WORK_REAP_PORTS valid |= ERTS_SSI_AUX_WORK_REAP_PORTS; @@ -697,12 +692,6 @@ erts_init_process(int ncpu) erts_smp_atomic_init_nob(proc_entry, ERTS_AINT_NULL); proc_entry++; } -#ifdef HYBRID - erts_active_procs = (Process**) - erts_alloc(ERTS_ALC_T_ACTIVE_PROCS, - erts_proc.max * sizeof(Process*)); - erts_num_active_procs = 0; -#endif erts_smp_rwmtx_init_opt(&erts_proc_tab_rwmtx, &proc_tab_rwmtx_opts, @@ -1157,13 +1146,13 @@ unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs) #ifdef ERTS_SMP static ERTS_INLINE void -thr_prgr_current_reset(ErtsAuxWorkData *awdp) +haw_thr_prgr_current_reset(ErtsAuxWorkData *awdp) { awdp->current_thr_prgr = ERTS_THR_PRGR_INVALID; } static ERTS_INLINE ErtsThrPrgrVal -thr_prgr_current(ErtsAuxWorkData *awdp) +haw_thr_prgr_current(ErtsAuxWorkData *awdp) { ErtsThrPrgrVal current = awdp->current_thr_prgr; if (current == ERTS_THR_PRGR_INVALID) { @@ -1173,6 +1162,21 @@ thr_prgr_current(ErtsAuxWorkData *awdp) return current; } +static ERTS_INLINE void +haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp) +{ + ErtsThrPrgrVal current = awdp->current_thr_prgr; + if (current != ERTS_THR_PRGR_INVALID + && !erts_thr_progress_equal(current, erts_thr_progress_current())) { + /* + * We have used a previouly read current value that isn't the + * latest; need to poke ourselfs in order to guarantee no loss + * of wakeups. + */ + erts_sched_poke(awdp->ssi); + } +} + #endif typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t; @@ -1271,7 +1275,7 @@ static ERTS_INLINE erts_aint32_t handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) { - if (!erts_thr_progress_has_reached_this(thr_prgr_current(awdp), + if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->misc.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR; @@ -1376,7 +1380,7 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp, #ifdef ERTS_SMP if (awdp->async_ready.need_thr_prgr - && !erts_thr_progress_has_reached_this(thr_prgr_current(awdp), + && !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->async_ready.thr_prgr)) { return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN; } @@ -1443,6 +1447,55 @@ handle_code_ix_activation(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) } #endif /* ERTS_SMP */ +#ifdef ERTS_SMP +void +erts_notify_finish_breakpointing(Process* p) +{ + ErtsAuxWorkData* awdp = &p->scheduler_data->aux_work_data; + + ASSERT(awdp->bp_ix_activation.stager == NULL); + awdp->bp_ix_activation.stager = p; + awdp->bp_ix_activation.thr_prgr = erts_thr_progress_later(awdp->esdp); + erts_thr_progress_wakeup(awdp->esdp, awdp->bp_ix_activation.thr_prgr); + erts_smp_proc_inc_refc(p); + set_aux_work_flags_wakeup_relb(p->scheduler_data->ssi, + ERTS_SSI_AUX_WORK_FINISH_BP); +} + +static erts_aint32_t +handle_finish_bp(ErtsAuxWorkData* awdp, erts_aint32_t aux_work) +{ + ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); + + if (!erts_thr_progress_has_reached_this(current, + awdp->bp_ix_activation.thr_prgr)) { + return aux_work & ~ERTS_SSI_AUX_WORK_FINISH_BP; + } + if (erts_finish_breakpointing()) { /* Not done */ + /* Arrange for being called again */ + awdp->bp_ix_activation.thr_prgr = + erts_thr_progress_later(awdp->esdp); + erts_thr_progress_wakeup(awdp->esdp, awdp->bp_ix_activation.thr_prgr); + } else { /* Done */ + Process* p; + + unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_FINISH_BP); + p = awdp->bp_ix_activation.stager; +#ifdef DEBUG + awdp->bp_ix_activation.stager = NULL; +#endif + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + if (!ERTS_PROC_IS_EXITING(p)) { + erts_resume(p, ERTS_PROC_LOCK_STATUS); + } + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_proc_dec_refc(p); + erts_release_code_write_permission(); + } + return aux_work & ~ERTS_SSI_AUX_WORK_FINISH_BP; +} +#endif /* ERTS_SMP */ + static ERTS_INLINE erts_aint32_t handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) { @@ -1495,7 +1548,7 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) if (need_thr_progress) { if (wakeup == ERTS_THR_PRGR_INVALID) - wakeup = erts_thr_progress_later_than(thr_prgr_current(awdp)); + wakeup = erts_thr_progress_later(awdp->esdp); awdp->dd.thr_prgr = wakeup; set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD_THR_PRGR); awdp->dd.thr_prgr = wakeup; @@ -1516,7 +1569,7 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) int need_thr_progress; int more_work; ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; - ErtsThrPrgrVal current = thr_prgr_current(awdp); + ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR; @@ -1538,7 +1591,7 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) if (need_thr_progress) { if (wakeup == ERTS_THR_PRGR_INVALID) - wakeup = erts_thr_progress_later_than(current); + wakeup = erts_thr_progress_later(awdp->esdp); awdp->dd.thr_prgr = wakeup; erts_thr_progress_wakeup(awdp->esdp, wakeup); } @@ -1727,7 +1780,7 @@ handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) } static erts_aint32_t -handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work) +handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) { #undef HANDLE_AUX_WORK #define HANDLE_AUX_WORK(FLG, HNDLR) \ @@ -1745,7 +1798,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work) erts_aint32_t ignore = 0; #ifdef ERTS_SMP - thr_prgr_current_reset(awdp); + haw_thr_prgr_current_reset(awdp); #endif ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); @@ -1814,8 +1867,18 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work) HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_REAP_PORTS, handle_reap_ports); +#ifdef ERTS_SMP + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_FINISH_BP, + handle_finish_bp); +#endif + ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); +#ifdef ERTS_SMP + if (waiting && !aux_work) + haw_thr_prgr_current_check_progress(awdp); +#endif + return aux_work; #undef HANDLE_AUX_WORK @@ -2274,7 +2337,7 @@ aux_thread(void *unused) if (aux_work) { if (!thr_prgr_active) erts_thr_progress_active(NULL, thr_prgr_active = 1); - aux_work = handle_aux_work(awdp, aux_work); + aux_work = handle_aux_work(awdp, aux_work, 1); if (aux_work && erts_thr_progress_update(NULL)) erts_thr_progress_leader_update(NULL); } @@ -2353,7 +2416,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_thr_progress_active(esdp, thr_prgr_active = 1); sched_wall_time_change(esdp, 1); } - aux_work = handle_aux_work(&esdp->aux_work_data, aux_work); + aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); } @@ -2457,7 +2520,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (!thr_prgr_active) erts_thr_progress_active(esdp, thr_prgr_active = 1); #endif - aux_work = handle_aux_work(&esdp->aux_work_data, aux_work); + aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); #ifdef ERTS_SMP if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); @@ -3582,7 +3645,7 @@ retire_mpaths(ErtsMigrationPaths *mps) if (!mpaths.retired.first) mpaths.retired.last = NULL; - mps->thr_prgr = erts_thr_progress_later_than(current); + mps->thr_prgr = erts_thr_progress_later(NULL); mps->next = NULL; if (mpaths.retired.last) @@ -5179,7 +5242,9 @@ suspend_scheduler(ErtsSchedulerData *esdp) sched_wall_time_change(esdp, 1); } if (aux_work) - aux_work = handle_aux_work(&esdp->aux_work_data, aux_work); + aux_work = handle_aux_work(&esdp->aux_work_data, + aux_work, + 1); if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); if (qmask) { @@ -6851,7 +6916,7 @@ Process *schedule(Process *p, int calls) if (leader_update) erts_thr_progress_leader_update(esdp); if (aux_work) - handle_aux_work(&esdp->aux_work_data, aux_work); + handle_aux_work(&esdp->aux_work_data, aux_work, 0); erts_smp_runq_lock(rq); } } @@ -6864,7 +6929,7 @@ Process *schedule(Process *p, int calls) erts_aint32_t aux_work; aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); if (aux_work) - handle_aux_work(&esdp->aux_work_data, aux_work); + handle_aux_work(&esdp->aux_work_data, aux_work, 0); } #endif /* ERTS_SMP */ @@ -7078,7 +7143,6 @@ Process *schedule(Process *p, int calls) /* Never run a suspended process */ ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state))); - ACTIVATE(p); reds = context_reds; if (IS_TRACED(p)) { @@ -7114,7 +7178,6 @@ Process *schedule(Process *p, int calls) } p->fcalls = reds; - ASSERT(IS_ACTIVE(p)); ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); return p; } @@ -7515,9 +7578,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). ErtsRunQueue *rq = NULL; Process *p; Sint arity; /* Number of arguments. */ -#ifndef HYBRID Uint arg_size; /* Size of arguments. */ -#endif Uint sz; /* Needed words on heap. */ Uint heap_need; /* Size needed on heap. */ Eterm res = THE_NON_VALUE; @@ -7528,17 +7589,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); #endif -#ifdef HYBRID - /* - * Copy the arguments to the global heap - * Since global GC might occur we want to do this before adding the - * new process to the erts_proc.tab. - */ - BM_SWAP_TIMER(system,copy); - LAZY_COPY(parent,args); - BM_SWAP_TIMER(copy,system); - heap_need = 0; -#endif /* HYBRID */ /* * Check for errors. */ @@ -7577,12 +7627,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #endif BM_COUNT(processes_spawned); -#ifndef HYBRID BM_SWAP_TIMER(system,size); arg_size = size_object(args); BM_SWAP_TIMER(size,system); heap_need = arg_size; -#endif p->flags = erts_default_process_flags; @@ -7627,9 +7675,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz); p->old_hend = p->old_htop = p->old_heap = NULL; p->high_water = p->heap; -#ifdef INCREMENTAL - p->scan_top = p->high_water; -#endif p->gen_gcs = 0; p->stop = p->hend = p->heap + sz; p->htop = p->heap; @@ -7655,19 +7700,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). BM_STOP_TIMER(system); BM_MESSAGE(args,p,parent); BM_START_TIMER(system); -#ifdef HYBRID - p->arg_reg[2] = args; -#ifdef INCREMENTAL - p->active = 0; - if (ptr_val(args) >= inc_fromspc && ptr_val(args) < inc_fromend) - INC_ACTIVATE(p); -#endif -#else BM_SWAP_TIMER(system,copy); p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap); BM_MESSAGE_COPIED(arg_size); BM_SWAP_TIMER(copy,system); -#endif p->arity = 3; p->fvalue = NIL; @@ -7724,13 +7760,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #endif p->parent = parent->id == ERTS_INVALID_PID ? NIL : parent->id; -#ifdef HYBRID - p->rrma = NULL; - p->rrsrc = NULL; - p->nrr = 0; - p->rrsz = 0; -#endif - INIT_HOLE_CHECK(p); #ifdef DEBUG p->last_old_htop = NULL; @@ -7799,15 +7828,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). so->mref = mref; } -#ifdef HYBRID - /* - * Add process to the array of active processes. - */ - ACTIVATE(p); - p->active_index = erts_num_active_procs++; - erts_active_procs[p->active_index] = p; -#endif - #ifdef ERTS_SMP p->scheduler_data = NULL; p->suspendee = NIL; @@ -7891,9 +7911,6 @@ void erts_init_empty_process(Process *p) p->reg = NULL; p->heap_sz = 0; p->high_water = NULL; -#ifdef INCREMENTAL - p->scan_top = NULL; -#endif p->old_hend = NULL; p->old_htop = NULL; p->old_heap = NULL; @@ -7945,14 +7962,6 @@ void erts_init_empty_process(Process *p) #endif #endif - ACTIVATE(p); - -#ifdef HYBRID - p->rrma = NULL; - p->rrsrc = NULL; - p->nrr = 0; - p->rrsz = 0; -#endif INIT_HOLE_CHECK(p); #ifdef DEBUG p->last_old_htop = NULL; @@ -7998,9 +8007,6 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->reg == NULL); ASSERT(p->heap_sz == 0); ASSERT(p->high_water == NULL); -#ifdef INCREMENTAL - ASSERT(p->scan_top == NULL); -#endif ASSERT(p->old_hend == NULL); ASSERT(p->old_htop == NULL); ASSERT(p->old_heap == NULL); @@ -8148,22 +8154,6 @@ delete_process(Process* p) ASSERT(!p->suspend_monitors); p->fvalue = NIL; - -#ifdef HYBRID - erts_active_procs[p->active_index] = - erts_active_procs[--erts_num_active_procs]; - erts_active_procs[p->active_index]->active_index = p->active_index; -#ifdef INCREMENTAL - if (INC_IS_ACTIVE(p)) - INC_DEACTIVATE(p); -#endif - - if (p->rrma != NULL) { - erts_free(ERTS_ALC_T_ROOTSET,p->rrma); - erts_free(ERTS_ALC_T_ROOTSET,p->rrsrc); - } -#endif - } static ERTS_INLINE erts_aint32_t @@ -8552,7 +8542,6 @@ send_exit_signal(Process *c_p, /* current process if and only ? rsn : copy_object(rsn, rp)), NULL); - ACTIVATE(rp); } #endif return -1; /* Receiver will exit */ diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 0798697350..93e71681da 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -283,6 +283,7 @@ typedef enum { #define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 10) #define ERTS_SSI_AUX_WORK_CODE_IX_ACTIVATION (((erts_aint32_t) 1) << 11) #define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 12) +#define ERTS_SSI_AUX_WORK_FINISH_BP (((erts_aint32_t) 1) << 13) typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo; @@ -472,6 +473,12 @@ typedef struct { ErtsThrPrgrVal thr_prgr; } code_ix_activation; #endif +#ifdef ERTS_SMP + struct { + Process* stager; + ErtsThrPrgrVal thr_prgr; + } bp_ix_activation; +#endif } ErtsAuxWorkData; struct ErtsSchedulerData_ { @@ -868,24 +875,6 @@ struct process { #endif #endif -#ifdef HYBRID - Eterm *rrma; /* Remembered roots to Message Area */ - Eterm **rrsrc; /* The source of the root */ - Uint nrr; /* Number of remembered roots */ - Uint rrsz; /* Size of root array */ -#endif - -#ifdef HYBRID - Uint active; /* Active since last major collection? */ - Uint active_index; /* Index in the active process array */ -#endif - -#ifdef INCREMENTAL - Process *active_next; /* Active processes to scan for roots */ - Process *active_prev; /* in collection of the message area */ - Eterm *scan_top; -#endif - #ifdef CHECK_FOR_HOLES Eterm* last_htop; /* No need to scan the heap below this point. */ ErlHeapFragment* last_mbuf; /* No need to scan beyond this mbuf. */ @@ -1031,10 +1020,6 @@ Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz); extern erts_smp_rwmtx_t erts_proc_tab_rwmtx; extern erts_smp_atomic_t *erts_proc_tab; -#ifdef HYBRID -extern Uint erts_num_active_procs; -extern Process** erts_active_procs; -#endif extern Uint erts_default_process_flags; extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx; /* If any of the erts_system_monitor_* variables are set (enabled), @@ -1210,6 +1195,7 @@ void erts_notify_check_async_ready_queue(void *); #endif #ifdef ERTS_SMP void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later); +void erts_notify_finish_breakpointing(Process* p); #endif void erts_schedule_misc_aux_work(int sched_id, void (*func)(void *), diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index bae2b383a4..83272aab42 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -1272,7 +1272,7 @@ void erts_lcnt_enable_proc_lock_count(int enable) { int i; for (i = 0; i < erts_max_processes; ++i) { - Process* p = process_tab[i]; + Process* p = erts_pix2proc(i); if (p) { if (enable) { if (!ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) { diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c index bff9d246a3..37b186abd9 100644 --- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c +++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c @@ -227,7 +227,7 @@ fetch_remote(erts_sspa_chunk_header_t *chdr, int max) ERTS_THR_MEMORY_BARRIER; else { chdr->head.next.unref_end = (erts_sspa_blk_t *) ilast; - chdr->head.next.thr_progress = erts_thr_progress_later(); + chdr->head.next.thr_progress = erts_thr_progress_later(NULL); erts_atomic32_set_relb(&chdr->tail.data.um_refc_ix, um_refc_ix); chdr->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0; diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c index 9ef83746c5..88524bdd4c 100644 --- a/erts/emulator/beam/erl_thr_progress.c +++ b/erts/emulator/beam/erl_thr_progress.c @@ -891,16 +891,16 @@ has_reached_wakeup(ErtsThrPrgrVal wakeup) ErtsThrPrgrVal limit; /* * erts_thr_progress_later() returns values which are - * equal to 'current + 2'. That is, users should never - * get a hold of values larger than that. + * equal to 'current + 2', or 'current + 3'. That is, users + * should never get a hold of values larger than that. * - * That is, valid values are values less than 'current + 3'. + * That is, valid values are values less than 'current + 4'. * * Values larger than this won't work with the wakeup * algorithm. */ - limit = current + 3; + limit = current + 4; if (limit == ERTS_THR_PRGR_VAL_WAITING) limit = 0; else if (limit < current) /* Wrapped */ diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h index a71724b813..89486b065b 100644 --- a/erts/emulator/beam/erl_thr_progress.h +++ b/erts/emulator/beam/erl_thr_progress.h @@ -139,11 +139,12 @@ ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atm ERTS_GLB_INLINE int erts_thr_progress_is_managed_thread(void); ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current_to_later__(ErtsThrPrgrVal val); -ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later_than(ErtsThrPrgrVal val); -ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(void); +ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(ErtsSchedulerData *); ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current(void); ERTS_GLB_INLINE int erts_thr_progress_has_passed__(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2); ERTS_GLB_INLINE int erts_thr_progress_has_reached_this(ErtsThrPrgrVal this, ErtsThrPrgrVal val); +ERTS_GLB_INLINE int erts_thr_progress_equal(ErtsThrPrgrVal val1, + ErtsThrPrgrVal val2); ERTS_GLB_INLINE int erts_thr_progress_cmp(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2); ERTS_GLB_INLINE int erts_thr_progress_has_reached(ErtsThrPrgrVal val); @@ -230,16 +231,23 @@ erts_thr_progress_current_to_later__(ErtsThrPrgrVal val) } ERTS_GLB_INLINE ErtsThrPrgrVal -erts_thr_progress_later_than(ErtsThrPrgrVal val) +erts_thr_progress_later(ErtsSchedulerData *esdp) { - ERTS_THR_MEMORY_BARRIER; - return erts_thr_progress_current_to_later__(val); -} - -ERTS_GLB_INLINE ErtsThrPrgrVal -erts_thr_progress_later(void) -{ - ErtsThrPrgrVal val = erts_thr_prgr_read_mb__(&erts_thr_prgr__.current); + ErtsThrPrgrData *tpd; + ErtsThrPrgrVal val; + if (esdp) { + tpd = &esdp->thr_progress_data; + managed_thread: + val = tpd->previous.local; + ERTS_THR_MEMORY_BARRIER; + } + else { + tpd = erts_tsd_get(erts_thr_prgr_data_key__); + if (tpd && tpd->is_managed) + goto managed_thread; + val = erts_thr_prgr_read_mb__(&erts_thr_prgr__.current); + } + ASSERT(val != ERTS_THR_PRGR_VAL_WAITING); return erts_thr_progress_current_to_later__(val); } @@ -279,6 +287,12 @@ erts_thr_progress_has_reached_this(ErtsThrPrgrVal this, ErtsThrPrgrVal val) } ERTS_GLB_INLINE int +erts_thr_progress_equal(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2) +{ + return val1 == val2 && val1 != ERTS_THR_PRGR_INVALID; +} + +ERTS_GLB_INLINE int erts_thr_progress_cmp(ErtsThrPrgrVal val1, ErtsThrPrgrVal val2) { if (val1 == val2) diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c index 70949ece76..f07964a265 100644 --- a/erts/emulator/beam/erl_thr_queue.c +++ b/erts/emulator/beam/erl_thr_queue.c @@ -422,7 +422,7 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify) else { q->head.next.unref_end = (ErtsThrQElement_t *) ilast; #ifdef ERTS_SMP - q->head.next.thr_progress = erts_thr_progress_later(); + q->head.next.thr_progress = erts_thr_progress_later(NULL); #endif erts_atomic32_set_relb(&q->tail.data.um_refc_ix, um_refc_ix); diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index bc988cd61b..d04a91f18c 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -2134,187 +2134,6 @@ void save_calls(Process *p, Export *e) } } -/* - * Entry point called by the trace wrap functions in erl_bif_wrap.c - * - * The trace wrap functions are themselves called through the export - * entries instead of the original BIF functions. - */ -Eterm -erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) -{ - Eterm result; - int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META); - - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); - - if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) { - /* Warning! This is an Optimization. - * - * If neither meta trace is active nor process trace flags then - * no tracing will occur. Doing the whole else branch will - * also do nothing, only slower. - */ - Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f; - result = func(p, args, I); - } else { - Eterm (*func)(Process*, Eterm*, BeamInstr*); - Export* ep = bif_export[bif_index]; - Uint32 flags = 0, flags_meta = 0; - int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL); - int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL); - int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME); - Eterm meta_tracer_pid = NIL; - int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif - * is actually in the - * export entry */ - BeamInstr *cp = p->cp; - - /* - * Make continuation pointer OK, it is not during direct BIF calls, - * but it is correct during apply of bif. - */ - if (!applying) { - p->cp = I; - } - if (global || local) { - flags = erts_call_trace(p, ep->code, ep->match_prog_set, args, - local, &p->tracer_proc); - } - if (meta) { - flags_meta = erts_bif_mtrace(p, ep->code+3, args, local, - &meta_tracer_pid); - } - if (time) { - BpDataTime *bdt = NULL; - BeamInstr *pc = (BeamInstr *)ep->code+3; - - bdt = (BpDataTime *) erts_get_time_break(p, pc); - ASSERT(bdt); - - if (!bdt->pause) { - erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL); - } - } - /* Restore original continuation pointer (if changed). */ - p->cp = cp; - - func = bif_table[bif_index].f; - - result = func(p, args, I); - - if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) { - BeamInstr i_return_trace = beam_return_trace[0]; - BeamInstr i_return_to_trace = beam_return_to_trace[0]; - BeamInstr i_return_time_trace = beam_return_time_trace[0]; - Eterm *cpp; - /* Maybe advance cp to skip trace stack frames */ - for (cpp = p->stop; ; cp = cp_val(*cpp++)) { - if (*cp == i_return_trace) { - /* Skip stack frame variables */ - while (is_not_CP(*cpp)) cpp++; - cpp += 2; /* Skip return_trace parameters */ - } else if (*cp == i_return_time_trace) { - /* Skip stack frame variables */ - while (is_not_CP(*cpp)) cpp++; - cpp += 1; /* Skip return_time_trace parameters */ - } else if (*cp == i_return_to_trace) { - /* A return_to trace message is going to be generated - * by normal means, so we do not have to. - */ - cp = NULL; - break; - } else break; - } - } - - /* Try to get these in the order - * they usually appear in normal code... */ - if (is_non_value(result)) { - Uint reason = p->freason; - if (reason != TRAP) { - Eterm class; - Eterm value = p->fvalue; - DeclareTmpHeapNoproc(nocatch,3); - UseTmpHeapNoproc(3); - /* Expand error value like in handle_error() */ - if (reason & EXF_ARGLIST) { - Eterm *tp; - ASSERT(is_tuple(value)); - tp = tuple_val(value); - value = tp[1]; - } - if ((reason & EXF_THROWN) && (p->catches <= 0)) { - value = TUPLE2(nocatch, am_nocatch, value); - reason = EXC_ERROR; - } - /* Note: expand_error_value() could theoretically - * allocate on the heap, but not for any error - * returned by a BIF, and it would do no harm, - * just be annoying. - */ - value = expand_error_value(p, reason, value); - class = exception_tag[GET_EXC_CLASS(reason)]; - - if (flags_meta & MATCH_SET_EXCEPTION_TRACE) { - erts_trace_exception(p, ep->code, class, value, - &meta_tracer_pid); - } - if (flags & MATCH_SET_EXCEPTION_TRACE) { - erts_trace_exception(p, ep->code, class, value, - &p->tracer_proc); - } - if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) { - /* can only happen if(local)*/ - Eterm *ptr = p->stop; - ASSERT(is_CP(*ptr)); - ASSERT(ptr <= STACK_START(p)); - /* Search the nearest stack frame for a catch */ - while (++ptr < STACK_START(p)) { - if (is_CP(*ptr)) break; - if (is_catch(*ptr)) { - if (applying) { - /* Apply of BIF, cp is in calling function */ - if (cp) erts_trace_return_to(p, cp); - } else { - /* Direct bif call, I points into - * calling function */ - erts_trace_return_to(p, I); - } - } - } - } - UnUseTmpHeapNoproc(3); - if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) { - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); - p->trace_flags |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); - } - } - } else { - if (flags_meta & MATCH_SET_RX_TRACE) { - erts_trace_return(p, ep->code, result, &meta_tracer_pid); - } - /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */ - if (flags & MATCH_SET_RX_TRACE) { - erts_trace_return(p, ep->code, result, &p->tracer_proc); - } - if (flags & MATCH_SET_RETURN_TO_TRACE) { - /* can only happen if(local)*/ - if (applying) { - /* Apply of BIF, cp is in calling function */ - if (cp) erts_trace_return_to(p, cp); - } else { - /* Direct bif call, I points into calling function */ - erts_trace_return_to(p, I); - } - } - } - } - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); - return result; -} - /* Sends trace message: * {trace_ts, Pid, What, Msg, Timestamp} * or {trace, Pid, What, Msg} diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index 5dc307e383..a0e12e57f2 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -29,19 +29,10 @@ /* #define FORCE_HEAP_FRAGS */ -#if defined(HYBRID) -/* # define CHECK_FOR_HOLES */ -#endif - #if defined(DEBUG) && !defined(CHECK_FOR_HOLES) && !defined(__WIN32__) # define CHECK_FOR_HOLES #endif -#if defined(HYBRID) -/* # define INCREMENTAL 1 */ /* Incremental garbage collection */ -/* # define INC_TIME_BASED 1 */ /* Time-based incremental GC (vs Work-based) */ -#endif - #define BEAM 1 #define EMULATOR "BEAM" #define SEQ_TRACE 1 @@ -70,16 +61,6 @@ #define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */ #define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */ -#ifdef HYBRID -# define SH_DEFAULT_SIZE 2629425 /* default message area min size */ -#endif - -#ifdef INCREMENTAL -# define INC_NoPAGES 256 /* Number of pages in the old generation */ -# define INC_PAGESIZE 32768 /* The size of each page */ -# define INC_STORAGE_SIZE 1024 /* The size of gray stack and similar */ -#endif - #define CP_SIZE 1 #define ErtsHAllocLockCheck(P) \ diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d index c1024dafc4..ac9b0052e5 100644 --- a/erts/emulator/beam/erlang_dtrace.d +++ b/erts/emulator/beam/erlang_dtrace.d @@ -639,9 +639,9 @@ provider erlang { * Entry into the efile_drv.c file I/O driver * * For a list of command numbers used by this driver, see the section - * "Guide to probe arguments" in ../../../README.md. That section - * also contains explanation of the various integer and string - * arguments that may be present when any particular probe fires. + * "Guide to efile_drv.c probe arguments" in ../../../HOWTO/DTRACE.md. + * That section also contains explanation of the various integer and + * string arguments that may be present when any particular probe fires. * * NOTE: Not all Linux platforms (using SystemTap) can support * arguments beyond arg9. diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c index 229641cb32..6b5121f917 100644 --- a/erts/emulator/beam/export.c +++ b/erts/emulator/beam/export.c @@ -137,7 +137,6 @@ export_alloc(struct export_entry* tmpl_e) obj->code[2] = tmpl->code[2]; obj->code[3] = (BeamInstr) em_call_error_handler; obj->code[4] = 0; - obj->match_prog_set = NULL; for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) { obj->addressv[ix] = obj->code+3; @@ -260,8 +259,9 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix) struct export_entry* ee; ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a)); - if (ee == NULL || (ee->ep->addressv[code_ix] == ee->ep->code+3 && - ee->ep->code[3] != (BeamInstr) em_call_traced_function)) { + if (ee == NULL || + (ee->ep->addressv[code_ix] == ee->ep->code+3 && + ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) { return NULL; } return ee->ep; diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h index ec9fcb26f2..ee06e69aff 100644 --- a/erts/emulator/beam/export.h +++ b/erts/emulator/beam/export.h @@ -37,7 +37,6 @@ typedef struct export { void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */ - struct binary* match_prog_set; /* Match program for tracing. */ BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */ /* @@ -46,12 +45,12 @@ typedef struct export * code[2]: Arity (untagged integer). * code[3]: This entry is 0 unless the 'address' field points to it. * Threaded code instruction to load function - * (em_call_error_handler), execute BIF (em_apply_bif, - * em_apply_apply), or call a traced function - * (em_call_traced_function). - * code[4]: Function pointer to BIF function (for BIFs only) + * (em_call_error_handler), execute BIF (em_apply_bif), + * or a breakpoint instruction (op_i_generic_breakpoint). + * code[4]: Function pointer to BIF function (for BIFs only), * or pointer to threaded code if the module has an - * on_load function that has not been run yet. + * on_load function that has not been run yet, or pointer + * to code for function code[3] is a breakpont instruction. * Otherwise: 0. */ BeamInstr code[5]; diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index 261a480ebf..feb1740649 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -2626,14 +2626,12 @@ dec_term_atom_common: } old_uniq = unsigned_val(temp); -#ifndef HYBRID /* FIND ME! */ /* * It is safe to link the fun into the fun list only when * no more validity tests can fail. */ funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*)funp; -#endif funp->fe = erts_put_fun_entry2(module, old_uniq, old_index, uniq, index, arity); @@ -2704,14 +2702,12 @@ dec_term_atom_common: goto error; } -#ifndef HYBRID /* FIND ME! */ /* * It is safe to link the fun into the fun list only when * no more validity tests can fail. */ funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*)funp; -#endif old_uniq = unsigned_val(temp); funp->fe = erts_put_fun_entry(module, old_uniq, old_index); diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 12fea779da..c9be20322d 100755 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -570,92 +570,6 @@ extern erts_smp_atomic32_t erts_max_gen_gcs; extern int erts_disable_tolerant_timeofday; -#ifdef HYBRID - -/* Message Area heap pointers */ -extern Eterm *global_heap; /* Heap start */ -extern Eterm *global_hend; /* Heap end */ -extern Eterm *global_htop; /* Heap top (heap pointer) */ -extern Eterm *global_saved_htop; /* Saved heap top (heap pointer) */ -extern Uint global_heap_sz; /* Heap size, in words */ -extern Eterm *global_old_heap; /* Old generation */ -extern Eterm *global_old_hend; -extern ErlOffHeap erts_global_offheap; /* Global MSO (OffHeap) list */ - -extern Uint16 global_gen_gcs; -extern Uint16 global_max_gen_gcs; -extern Uint global_gc_flags; - -#ifdef INCREMENTAL -#define ACTIVATE(p) -#define DEACTIVATE(p) -#define IS_ACTIVE(p) 1 - -#define INC_ACTIVATE(p) do { \ - if ((p)->active) { \ - if ((p)->active_next != NULL) { \ - (p)->active_next->active_prev = (p)->active_prev; \ - if ((p)->active_prev) { \ - (p)->active_prev->active_next = (p)->active_next; \ - } else { \ - inc_active_proc = (p)->active_next; \ - } \ - inc_active_last->active_next = (p); \ - (p)->active_next = NULL; \ - (p)->active_prev = inc_active_last; \ - inc_active_last = (p); \ - } \ - } else { \ - (p)->active_next = NULL; \ - (p)->active_prev = inc_active_last; \ - if (inc_active_last) { \ - inc_active_last->active_next = (p); \ - } else { \ - inc_active_proc = (p); \ - } \ - inc_active_last = (p); \ - (p)->active = 1; \ - } \ -} while(0); - -#define INC_DEACTIVATE(p) do { \ - ASSERT((p)->active == 1); \ - if ((p)->active_next == NULL) { \ - inc_active_last = (p)->active_prev; \ - } else { \ - (p)->active_next->active_prev = (p)->active_prev; \ - } \ - if ((p)->active_prev == NULL) { \ - inc_active_proc = (p)->active_next; \ - } else { \ - (p)->active_prev->active_next = (p)->active_next; \ - } \ - (p)->active = 0; \ -} while(0); - -#define INC_IS_ACTIVE(p) ((p)->active != 0) - -#else -extern Eterm *global_old_htop; -extern Eterm *global_high_water; -#define ACTIVATE(p) (p)->active = 1; -#define DEACTIVATE(p) (p)->active = 0; -#define IS_ACTIVE(p) ((p)->active != 0) -#define INC_ACTIVATE(p) -#define INC_IS_ACTIVE(p) 1 -#endif /* INCREMENTAL */ - -#else -# define ACTIVATE(p) -# define DEACTIVATE(p) -# define IS_ACTIVE(p) 1 -# define INC_ACTIVATE(p) -#endif /* HYBRID */ - -#ifdef HYBRID -extern Uint global_heap_min_sz; -#endif - extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */ extern int stackdump_on_exit; @@ -922,7 +836,6 @@ __decl_noreturn void __noreturn erl_exit_flush_async(int n, char*, ...); void erl_error(char*, va_list); /* copy.c */ -void init_copy(void); Eterm copy_object(Eterm, Process*); #if HALFWORD_HEAP @@ -952,116 +865,6 @@ Eterm copy_shallow(Eterm*, Uint, Eterm**, ErlOffHeap*); void move_multi_frags(Eterm** hpp, ErlOffHeap*, ErlHeapFragment* first, Eterm* refs, unsigned nrefs); -#ifdef HYBRID -#define RRMA_DEFAULT_SIZE 256 -#define RRMA_STORE(p,ptr,src) do { \ - ASSERT((p)->rrma != NULL); \ - ASSERT((p)->rrsrc != NULL); \ - (p)->rrma[(p)->nrr] = (ptr); \ - (p)->rrsrc[(p)->nrr++] = (src); \ - if ((p)->nrr == (p)->rrsz) \ - { \ - (p)->rrsz *= 2; \ - (p)->rrma = (Eterm *) erts_realloc(ERTS_ALC_T_ROOTSET, \ - (void*)(p)->rrma, \ - sizeof(Eterm) * (p)->rrsz); \ - (p)->rrsrc = (Eterm **) erts_realloc(ERTS_ALC_T_ROOTSET, \ - (void*)(p)->rrsrc, \ - sizeof(Eterm) * (p)->rrsz); \ - } \ -} while(0) - -/* Note that RRMA_REMOVE decreases the given index after deletion. - * This is done so that a loop with an increasing index can call - * remove without having to decrease the index to see the element - * placed in the hole after the deleted element. - */ -#define RRMA_REMOVE(p,index) do { \ - p->rrsrc[index] = p->rrsrc[--p->nrr]; \ - p->rrma[index--] = p->rrma[p->nrr]; \ - } while(0); - - -/* The MessageArea STACKs are used while copying messages to the - * message area. - */ -#define MA_STACK_EXTERNAL_DECLARE(type,_s_) \ - typedef type ma_##_s_##_type; \ - extern ma_##_s_##_type *ma_##_s_##_stack; \ - extern Uint ma_##_s_##_top; \ - extern Uint ma_##_s_##_size; - -#define MA_STACK_DECLARE(_s_) \ - ma_##_s_##_type *ma_##_s_##_stack; Uint ma_##_s_##_top; Uint ma_##_s_##_size; - -#define MA_STACK_ALLOC(_s_) do { \ - ma_##_s_##_top = 0; \ - ma_##_s_##_size = 512; \ - ma_##_s_##_stack = (ma_##_s_##_type*)erts_alloc(ERTS_ALC_T_OBJECT_STACK, \ - sizeof(ma_##_s_##_type) * ma_##_s_##_size); \ -} while(0) - - -#define MA_STACK_PUSH(_s_,val) do { \ - ma_##_s_##_stack[ma_##_s_##_top++] = (val); \ - if (ma_##_s_##_top == ma_##_s_##_size) \ - { \ - ma_##_s_##_size *= 2; \ - ma_##_s_##_stack = \ - (ma_##_s_##_type*) erts_realloc(ERTS_ALC_T_OBJECT_STACK, \ - (void*)ma_##_s_##_stack, \ - sizeof(ma_##_s_##_type) * ma_##_s_##_size); \ - } \ -} while(0) - -#define MA_STACK_POP(_s_) (ma_##_s_##_top != 0 ? ma_##_s_##_stack[--ma_##_s_##_top] : 0) -#define MA_STACK_TOP(_s_) (ma_##_s_##_stack[ma_##_s_##_top - 1]) -#define MA_STACK_UPDATE(_s_,offset,value) \ - *(ma_##_s_##_stack[ma_##_s_##_top - 1] + (offset)) = (value) -#define MA_STACK_SIZE(_s_) (ma_##_s_##_top) -#define MA_STACK_ELM(_s_,i) ma_##_s_##_stack[i] - -MA_STACK_EXTERNAL_DECLARE(Eterm,src); -MA_STACK_EXTERNAL_DECLARE(Eterm*,dst); -MA_STACK_EXTERNAL_DECLARE(Uint,offset); - - -#ifdef INCREMENTAL -extern Eterm *ma_pending_stack; -extern Uint ma_pending_top; -extern Uint ma_pending_size; - -#define NO_COPY(obj) (IS_CONST(obj) || \ - (((ptr_val(obj) >= global_heap) && \ - (ptr_val(obj) < global_htop)) || \ - ((ptr_val(obj) >= inc_fromspc) && \ - (ptr_val(obj) < inc_fromend)) || \ - ((ptr_val(obj) >= global_old_heap) && \ - (ptr_val(obj) < global_old_hend)))) - -#else - -#define NO_COPY(obj) (IS_CONST(obj) || \ - (((ptr_val(obj) >= global_heap) && \ - (ptr_val(obj) < global_htop)) || \ - ((ptr_val(obj) >= global_old_heap) && \ - (ptr_val(obj) < global_old_hend)))) - -#endif /* INCREMENTAL */ - -#define LAZY_COPY(from,obj) do { \ - if (!NO_COPY(obj)) { \ - BM_LAZY_COPY_START; \ - BM_COUNT(messages_copied); \ - obj = copy_struct_lazy(from,obj,0); \ - BM_LAZY_COPY_STOP; \ - } \ -} while(0) - -Eterm copy_struct_lazy(Process*, Eterm, Uint); - -#endif /* HYBRID */ - /* Utilities */ extern void erts_delete_nodes_monitors(Process *, ErtsProcLocks); extern Eterm erts_monitor_nodes(Process *, Eterm, Eterm); @@ -1155,10 +958,6 @@ void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*); void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*); void erts_free_heap_frags(Process* p); -#ifdef HYBRID -int erts_global_garbage_collect(Process*, int, Eterm*, int); -#endif - /* io.c */ struct erl_drv_port_data_lock { @@ -1883,10 +1682,10 @@ struct trace_pattern_flags { }; extern const struct trace_pattern_flags erts_trace_pattern_flags_off; extern int erts_call_time_breakpoint_tracing; -int erts_set_trace_pattern(Eterm* mfa, int specified, +int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified, Binary* match_prog_set, Binary *meta_match_prog_set, int on, struct trace_pattern_flags, - Eterm meta_tracer_pid); + Eterm meta_tracer_pid, int is_blocking); void erts_get_default_trace_pattern(int *trace_pattern_is_on, Binary **match_spec, @@ -1895,6 +1694,7 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, Eterm *meta_tracer_pid); int erts_is_default_trace_enabled(void); void erts_bif_trace_init(void); +int erts_finish_breakpointing(void); /* ** Call_trace uses this API for the parameter matching functions @@ -1940,14 +1740,6 @@ extern void erts_match_prog_foreach_offheap(Binary *b, breakpoint functions */ #define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */ #define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE) -/* - * Flag values when tracing bif - * Future note: flag field is 8 bits - */ -#define BIF_TRACE_AS_LOCAL (0x1) -#define BIF_TRACE_AS_GLOBAL (0x2) -#define BIF_TRACE_AS_META (0x4) -#define BIF_TRACE_AS_CALL_TIME (0x8) extern erts_driver_t vanilla_driver; extern erts_driver_t spawn_driver; diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index c58b36231c..6764e88c81 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -62,11 +62,8 @@ label L i_func_info I a a I int_code_end -i_trace_breakpoint -i_mtrace_breakpoint +i_generic_breakpoint i_debug_breakpoint -i_count_breakpoint -i_time_breakpoint i_return_time_trace i_return_to_trace i_yield @@ -522,7 +519,6 @@ apply_bif call_nif call_error_handler error_action_code -call_traced_function return_trace # diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 0d3b910278..1dc6f1d233 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -25,11 +25,6 @@ # define NO_FPE_SIGNALS #endif -/* xxxP __VXWORKS__ */ -#ifdef VXWORKS -#include <vxWorks.h> -#endif - #ifdef DISABLE_CHILD_WAITER_THREAD #undef ENABLE_CHILD_WAITER_THREAD #endif @@ -43,8 +38,6 @@ #if defined (__WIN32__) # include "erl_win_sys.h" -#elif defined (VXWORKS) -# include "erl_vxworks_sys.h" #else # include "erl_unix_sys.h" #ifndef UNIX @@ -182,12 +175,6 @@ void erl_assert_error(char* expr, char* file, int line); # define const #endif -#ifdef VXWORKS -/* Replace VxWorks' printf with a real one that does fprintf(stdout, ...) */ -int real_printf(const char *fmt, ...); -# define printf real_printf -#endif - #undef __deprecated #if ERTS_AT_LEAST_GCC_VSN__(3, 0, 0) # define __deprecated __attribute__((deprecated)) @@ -486,38 +473,28 @@ static unsigned long zero_value = 0, one_value = 1; # define SET_NONBLOCKING(fd) ioctlsocket((fd), FIONBIO, &one_value) # else -# ifdef VXWORKS -# include <fcntl.h> /* xxxP added for O_WRONLY etc ... macro:s ... */ -# include <ioLib.h> -static const int zero_value = 0, one_value = 1; -# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, (int)&zero_value) -# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, (int)&one_value) -# define ERRNO_BLOCK EWOULDBLOCK - -# else -# ifdef NB_FIONBIO /* Old BSD */ -# include <sys/ioctl.h> +# ifdef NB_FIONBIO /* Old BSD */ +# include <sys/ioctl.h> static const int zero_value = 0, one_value = 1; -# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value) -# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value) -# define ERRNO_BLOCK EWOULDBLOCK -# else /* !NB_FIONBIO */ -# include <fcntl.h> -# ifdef NB_O_NDELAY /* Nothing needs this? */ -# define NB_FLAG O_NDELAY -# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */ -# define ERRNO_BLOCK EWOULDBLOCK -# endif -# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */ -# define NB_FLAG O_NONBLOCK -# define ERRNO_BLOCK EAGAIN -# endif /* !NB_O_NDELAY */ -# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \ - fcntl((fd), F_GETFL, 0) & ~NB_FLAG) -# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \ - fcntl((fd), F_GETFL, 0) | NB_FLAG) -# endif /* !NB_FIONBIO */ -# endif /* _WXWORKS_ */ +# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value) +# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value) +# define ERRNO_BLOCK EWOULDBLOCK +# else /* !NB_FIONBIO */ +# include <fcntl.h> +# ifdef NB_O_NDELAY /* Nothing needs this? */ +# define NB_FLAG O_NDELAY +# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */ +# define ERRNO_BLOCK EWOULDBLOCK +# endif +# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */ +# define NB_FLAG O_NONBLOCK +# define ERRNO_BLOCK EAGAIN +# endif /* !NB_O_NDELAY */ +# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \ + fcntl((fd), F_GETFL, 0) & ~NB_FLAG) +# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \ + fcntl((fd), F_GETFL, 0) | NB_FLAG) +# endif /* !NB_FIONBIO */ # endif /* !__WIN32__ */ #endif /* WANT_NONBLOCKING */ @@ -870,13 +847,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val) extern int erts_use_kernel_poll; #endif -#if defined(VXWORKS) -/* NOTE! sys_calloc2 does not exist on other - platforms than VxWorks and OSE */ -void* sys_calloc2(Uint, Uint); -#endif /* VXWORKS || OSE */ - - #define sys_memcpy(s1,s2,n) memcpy(s1,s2,n) #define sys_memmove(s1,s2,n) memmove(s1,s2,n) #define sys_memcmp(s1,s2,n) memcmp(s1,s2,n) @@ -983,43 +953,6 @@ void erl_bin_write(unsigned char *, int, int); # define DEBUGF(x) #endif - -#ifdef VXWORKS -/* This includes redefines of malloc etc - this should be done after sys_alloc, etc, above */ -# include "reclaim.h" -/*********************Malloc and friends************************ - * There is a problem with the naming of malloc and friends, - * malloc is used throughout sys.c and the resolver to mean save_alloc, - * but it should actually mean either sys_alloc or sys_alloc2, - * so the definitions from reclaim_master.h are not any - * good, i redefine the malloc family here, although it's quite - * ugly, actually it would be preferrable to use the - * names sys_alloc and so on throughout the offending code, but - * that will be saved as an later exercise... - * I also add an own calloc, to make the BSD resolver source happy. - ***************************************************************/ -/* Undefine malloc and friends */ -# ifdef malloc -# undef malloc -# endif -# ifdef calloc -# undef calloc -# endif -# ifdef realloc -# undef realloc -# endif -# ifdef free -# undef free -# endif -/* Redefine malloc and friends */ -# define malloc sys_alloc -# define calloc sys_calloc -# define realloc sys_realloc -# define free sys_free - -#endif - #ifdef __WIN32__ #ifdef ARCH_64 #define ERTS_ALLOC_ALIGN_BYTES 16 @@ -1035,23 +968,20 @@ void erl_bin_write(unsigned char *, int, int); #ifdef __WIN32__ - void call_break_handler(void); char* last_error(void); char* win32_errorstr(int); - - #endif /************************************************************************ * Find out the native filename encoding of the process (look at locale of * Unix processes and just do UTF16 on windows ************************************************************************/ -#define ERL_FILENAME_UNKNOWN 0 -#define ERL_FILENAME_LATIN1 1 -#define ERL_FILENAME_UTF8 2 -#define ERL_FILENAME_UTF8_MAC 3 -#define ERL_FILENAME_WIN_WCHAR 4 +#define ERL_FILENAME_UNKNOWN (0) +#define ERL_FILENAME_LATIN1 (1) +#define ERL_FILENAME_UTF8 (2) +#define ERL_FILENAME_UTF8_MAC (3) +#define ERL_FILENAME_WIN_WCHAR (4) int erts_get_native_filename_encoding(void); /* The set function is only to be used by erl_init! */ @@ -1061,4 +991,3 @@ int erts_get_user_requested_filename_encoding(void); void erts_init_sys_common_misc(void); #endif - |