diff options
Diffstat (limited to 'erts/emulator')
78 files changed, 3149 insertions, 2336 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 2212aed5e0..e0260205e3 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -51,6 +51,7 @@ ARFLAGS=rc OMIT_OMIT_FP=no DIRTY_SCHEDULER_SUPPORT=@DIRTY_SCHEDULER_SUPPORT@ +NEW_PURGE_STRATEGY=@NEW_PURGE_STRATEGY@ ifeq ($(TYPE),debug) PURIFY = @@ -591,11 +592,8 @@ GENERATE += $(TTF_DIR)/driver_tab.c # # This list must be consistent with PRE_LOADED_MODULES in # erts/preloaded/src/Makefile. -ifeq ($(TARGET),win32) -# On windows the preloaded objects are in a resource object. -PRELOAD_OBJ = $(OBJDIR)/beams.$(RES_EXT) -PRELOAD_SRC = $(TARGET)/beams.rc -$(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ + +PRELOAD_BEAM = $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ $(ERL_TOP)/erts/preloaded/ebin/erts_code_purger.beam \ $(ERL_TOP)/erts/preloaded/ebin/init.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_eval.beam \ @@ -606,23 +604,20 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ $(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \ $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \ $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erl_tracer.beam + $(ERL_TOP)/erts/preloaded/ebin/erl_tracer.beam \ + $(ERL_TOP)/erts/preloaded/ebin/erts_literal_area_collector.beam \ + $(ERL_TOP)/erts/preloaded/ebin/erts_dirty_process_code_checker.beam + +ifeq ($(TARGET),win32) +# On windows the preloaded objects are in a resource object. +PRELOAD_OBJ = $(OBJDIR)/beams.$(RES_EXT) +PRELOAD_SRC = $(TARGET)/beams.rc +$(PRELOAD_SRC): $(PRELOAD_BEAM) $(gen_verbose)LANG=C $(PERL) utils/make_preload $(MAKE_PRELOAD_EXTRA) -rc $^ > $@ else PRELOAD_OBJ = $(OBJDIR)/preload.o PRELOAD_SRC = $(TARGET)/preload.c -$(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erts_code_purger.beam \ - $(ERL_TOP)/erts/preloaded/ebin/init.beam \ - $(ERL_TOP)/erts/preloaded/ebin/prim_eval.beam \ - $(ERL_TOP)/erts/preloaded/ebin/prim_inet.beam \ - $(ERL_TOP)/erts/preloaded/ebin/prim_file.beam \ - $(ERL_TOP)/erts/preloaded/ebin/zlib.beam \ - $(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erl_tracer.beam +$(PRELOAD_SRC): $(PRELOAD_BEAM) $(gen_verbose)LANG=C $(PERL) utils/make_preload -old $^ > $@ endif @@ -744,7 +739,7 @@ EMU_OBJS = \ $(OBJDIR)/beam_ranges.o RUN_OBJS = \ - $(OBJDIR)/erl_pbifs.o $(OBJDIR)/benchmark.o \ + $(OBJDIR)/erl_pbifs.o \ $(OBJDIR)/erl_alloc.o $(OBJDIR)/erl_mtrace.o \ $(OBJDIR)/erl_alloc_util.o $(OBJDIR)/erl_goodfit_alloc.o \ $(OBJDIR)/erl_bestfit_alloc.o $(OBJDIR)/erl_afit_alloc.o \ diff --git a/erts/emulator/beam/atom.h b/erts/emulator/beam/atom.h index fbd0528009..ae60904785 100644 --- a/erts/emulator/beam/atom.h +++ b/erts/emulator/beam/atom.h @@ -21,10 +21,7 @@ #ifndef __ATOM_H__ #define __ATOM_H__ -#ifndef __INDEX_H__ #include "index.h" -#endif - #include "erl_atom_table.h" #define MAX_ATOM_CHARACTERS 255 diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index badd69856e..89c42f4700 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -65,6 +65,7 @@ atom undefined_lambda atom DOWN='DOWN' atom UP='UP' atom EXIT='EXIT' +atom abort atom aborted atom abs_path atom absoluteURI @@ -165,6 +166,7 @@ atom commandv atom compact atom compat_rel atom compile +atom complete atom compressed atom config_h atom convert_time_unit @@ -176,6 +178,7 @@ atom const atom context_switches atom control atom copy +atom copy_literals atom counters atom cpu atom cpu_timestamp @@ -195,6 +198,7 @@ atom dgroup_leader atom dictionary atom dirty_cpu atom dirty_cpu_schedulers_online +atom dirty_execution atom dirty_io atom disable_trace atom disabled @@ -235,6 +239,7 @@ atom erlang atom ERROR='ERROR' atom error_handler atom error_logger +atom erts_code_purger atom erts_internal atom ets atom ETS_TRANSFER='ETS-TRANSFER' @@ -407,6 +412,7 @@ atom named_table atom namelist atom native atom native_addresses +atom need_gc atom Neq='=/=' atom Neqeq='/=' atom net_kernel @@ -485,6 +491,7 @@ atom pause atom pending atom pending_driver atom pending_process +atom pending_purge_lambda atom pending_reload atom permanent atom pid @@ -494,6 +501,7 @@ atom port_count atom port_limit atom port_op atom positive +atom prepare atom print atom priority atom private diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 15e878ba65..ad107b4861 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -37,13 +37,85 @@ #include "erl_bits.h" #include "erl_thr_progress.h" +#ifdef HIPE +# include "hipe_stack.h" +#endif + +static struct { + Eterm module; + erts_smp_mtx_t mtx; + Export *pending_purge_lambda; + Eterm *sprocs; + Eterm def_sprocs[10]; + Uint sp_size; + Uint sp_ix; + ErlFunEntry **funs; + ErlFunEntry *def_funs[10]; + Uint fe_size; + Uint fe_ix; +} purge_state; + +Process *erts_code_purger = NULL; + +ErtsLiteralArea *erts_copy_literal_area = NULL; +#ifdef ERTS_DIRTY_SCHEDULERS +Process *erts_dirty_process_code_checker; +#endif +#ifdef ERTS_NEW_PURGE_STRATEGY +Process *erts_literal_area_collector = NULL; + +typedef struct ErtsLiteralAreaRef_ ErtsLiteralAreaRef; +struct ErtsLiteralAreaRef_ { + ErtsLiteralAreaRef *next; + ErtsLiteralArea *literal_area; +}; + +struct { + erts_smp_mtx_t mtx; + ErtsLiteralAreaRef *first; + ErtsLiteralAreaRef *last; +} release_literal_areas; +#endif + static void set_default_trace_pattern(Eterm module); static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls); static void delete_code(Module* modp); -static void decrement_refc(BeamCodeHeader*); static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size); static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size); +static void +init_purge_state(void) +{ + purge_state.module = THE_NON_VALUE; + + erts_smp_mtx_init(&purge_state.mtx, "purge_state"); + + purge_state.pending_purge_lambda = + erts_export_put(am_erts_code_purger, am_pending_purge_lambda, 3); + + purge_state.sprocs = &purge_state.def_sprocs[0]; + purge_state.sp_size = sizeof(purge_state.def_sprocs); + purge_state.sp_size /= sizeof(purge_state.def_sprocs[0]); + purge_state.sp_ix = 0; + + purge_state.funs = &purge_state.def_funs[0]; + purge_state.fe_size = sizeof(purge_state.def_funs); + purge_state.fe_size /= sizeof(purge_state.def_funs[0]); + purge_state.fe_ix = 0; +} + +void +erts_beam_bif_load_init(void) +{ +#ifdef ERTS_NEW_PURGE_STRATEGY + erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas"); + release_literal_areas.first = NULL; + release_literal_areas.last = NULL; +#endif + + init_purge_state(); +} + BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1) { Module* modp; @@ -516,6 +588,43 @@ badarg: BIF_ERROR(BIF_P, BADARG); } +BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) +{ +#if !defined(ERTS_DIRTY_SCHEDULERS) + BIF_ERROR(BIF_P, EXC_NOTSUP); +#else + Process *rp; + int reds = 0; + Eterm res; + + if (BIF_P != erts_dirty_process_code_checker) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + if (is_not_internal_pid(BIF_ARG_1)) + BIF_ERROR(BIF_P, BADARG); + + if (is_not_atom(BIF_ARG_2)) + BIF_ERROR(BIF_P, BADARG); + + rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, + BIF_ARG_1, ERTS_PROC_LOCK_MAIN); + if (rp == ERTS_PROC_LOCK_BUSY) + ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2], + BIF_P, BIF_ARG_1, BIF_ARG_2); + if (!rp) + BIF_RET(am_false); + + res = erts_check_process_code(rp, BIF_ARG_2, 0, &reds, BIF_P->fcalls); + + if (BIF_P != rp) + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + + ASSERT(is_value(res)); + + BIF_RET2(res, reds); +#endif +} + BIF_RETTYPE delete_module_1(BIF_ALIST_1) { ErtsCodeIndex code_ix; @@ -748,6 +857,8 @@ set_default_trace_pattern(Eterm module) } } +#ifndef ERTS_NEW_PURGE_STRATEGY + static ERTS_INLINE int check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size) { @@ -762,12 +873,244 @@ check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size) return 0; } +#endif + static Uint hfrag_literal_size(Eterm* start, Eterm* end, char* lit_start, Uint lit_size); static void hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp, Eterm *start, Eterm *end, char *lit_start, Uint lit_size); +#ifdef ERTS_NEW_PURGE_STRATEGY + +Eterm +erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed) +{ + ErtsLiteralArea *la; + ErtsMessage *msgp; + struct erl_off_heap_header* oh; + char *literals; + Uint lit_bsize; + ErlHeapFragment *hfrag; + + la = erts_copy_literal_area; + if (!la) + return am_ok; + + oh = la->off_heap; + literals = (char *) &la->start[0]; + lit_bsize = (char *) la->end - literals; + + /* + * If a literal is in the message queue we make an explicit copy of + * it and attach it to the heap fragment. Each message needs to be + * self contained, we cannot save the literal in the old_heap or + * any other heap than the message it self. + */ + + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); + ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); + + for (msgp = c_p->msg.first; msgp; msgp = msgp->next) { + ErlHeapFragment *hf; + Uint lit_sz = 0; + + *redsp += 1; + + if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) + hfrag = &msgp->hfrag; + else if (is_value(ERL_MESSAGE_TERM(msgp)) && msgp->data.heap_frag) + hfrag = msgp->data.heap_frag; + else + continue; /* Content on heap or in external term format... */ + + for (hf = hfrag; hf; hf = hf->next) { + lit_sz += hfrag_literal_size(&hf->mem[0], &hf->mem[hf->used_size], + literals, lit_bsize); + *redsp += 1; + } + + *redsp += lit_sz / 16; /* Better value needed... */ + if (lit_sz > 0) { + ErlHeapFragment *bp = new_message_buffer(lit_sz); + Eterm *hp = bp->mem; + + for (hf = hfrag; hf; hf = hf->next) { + hfrag_literal_copy(&hp, &bp->off_heap, + &hf->mem[0], &hf->mem[hf->used_size], + literals, lit_bsize); + hfrag = hf; + } + + /* link new hfrag last */ + ASSERT(hfrag->next == NULL); + hfrag->next = bp; + bp->next = NULL; + } + } + + if (gc_allowed) { + /* + * Current implementation first tests without + * allowing GC, and then restarts the operation + * allowing GC if it is needed. It is therfore + * very likely that we will need the GC (although + * this is not completely certain). We go for + * the GC directly instead of scanning everything + * one more time... + */ + goto literal_gc; + } + + *redsp += 2; + if (any_heap_ref_ptrs(&c_p->fvalue, &c_p->fvalue+1, literals, lit_bsize)) { + c_p->freason = EXC_NULL; + c_p->fvalue = NIL; + c_p->ftrace = NIL; + } + + if (any_heap_ref_ptrs(c_p->stop, c_p->hend, literals, lit_bsize)) + goto literal_gc; + *redsp += 1; + if (any_heap_refs(c_p->heap, c_p->htop, literals, lit_bsize)) + goto literal_gc; + *redsp += 1; + if (any_heap_refs(c_p->old_heap, c_p->old_htop, literals, lit_bsize)) + goto literal_gc; + + /* Check dictionary */ + *redsp += 1; + if (c_p->dictionary) { + Eterm* start = ERTS_PD_START(c_p->dictionary); + Eterm* end = start + ERTS_PD_SIZE(c_p->dictionary); + + if (any_heap_ref_ptrs(start, end, literals, lit_bsize)) + goto literal_gc; + } + + /* Check heap fragments */ + for (hfrag = c_p->mbuf; hfrag; hfrag = hfrag->next) { + Eterm *hp, *hp_end; + + *redsp += 1; + + hp = &hfrag->mem[0]; + hp_end = &hfrag->mem[hfrag->used_size]; + if (any_heap_refs(hp, hp_end, literals, lit_bsize)) + goto literal_gc; + } + + /* + * Message buffer fragments (matched messages) + * - off heap lists should already have been moved into + * process off heap structure. + * - Check for literals + */ + for (msgp = c_p->msg_frag; msgp; msgp = msgp->next) { + hfrag = erts_message_to_heap_frag(msgp); + for (; hfrag; hfrag = hfrag->next) { + Eterm *hp, *hp_end; + + *redsp += 1; + + hp = &hfrag->mem[0]; + hp_end = &hfrag->mem[hfrag->used_size]; + + if (any_heap_refs(hp, hp_end, literals, lit_bsize)) + goto literal_gc; + } + } + + return am_ok; + +literal_gc: + + if (!gc_allowed) + return am_need_gc; + + if (c_p->flags & F_DISABLE_GC) + return THE_NON_VALUE; + + FLAGS(c_p) |= F_NEED_FULLSWEEP; + + *redsp += erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg, c_p->arity, fcalls); + + erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize, oh); + + *redsp += lit_bsize / 64; /* Need, better value... */ + + return am_ok; +} + +static Eterm +check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls) +{ + BeamInstr* start; + char* mod_start; + Uint mod_size; + Eterm* sp; + + *redsp += 1; + + /* + * Pick up limits for the module. + */ + start = (BeamInstr*) modp->old.code_hdr; + mod_start = (char *) start; + mod_size = modp->old.code_length; + + /* + * Check if current instruction or continuation pointer points into module. + */ + if (ErtsInArea(rp->i, mod_start, mod_size) + || ErtsInArea(rp->cp, mod_start, mod_size)) { + return am_true; + } + + *redsp += (STACK_START(rp) - rp->stop) / 32; + + /* + * Check all continuation pointers stored on the stack. + */ + for (sp = rp->stop; sp < STACK_START(rp); sp++) { + if (is_CP(*sp) && ErtsInArea(cp_val(*sp), mod_start, mod_size)) { + return am_true; + } + } + + /* + * Check all continuation pointers stored in stackdump + * and clear exception stackdump if there is a pointer + * to the module. + */ + if (rp->ftrace != NIL) { + struct StackTrace *s; + ASSERT(is_list(rp->ftrace)); + s = (struct StackTrace *) big_val(CDR(list_val(rp->ftrace))); + if ((s->pc && ErtsInArea(s->pc, mod_start, mod_size)) || + (s->current && ErtsInArea(s->current, mod_start, mod_size))) { + rp->freason = EXC_NULL; + rp->fvalue = NIL; + rp->ftrace = NIL; + } else { + int i; + for (i = 0; i < s->depth; i++) { + if (ErtsInArea(s->trace[i], mod_start, mod_size)) { + rp->freason = EXC_NULL; + rp->fvalue = NIL; + rp->ftrace = NIL; + break; + } + } + } + } + + return am_false; +} + +#else /* !ERTS_NEW_PURGE_STRATEGY, i.e, old style purge... */ + static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls) { @@ -861,8 +1204,14 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ); - literals = (char*) modp->old.code_hdr->literals_start; - lit_bsize = (char*) modp->old.code_hdr->literals_end - literals; + if (modp->old.code_hdr->literal_area) { + literals = (char*) modp->old.code_hdr->literal_area->start; + lit_bsize = (char*) modp->old.code_hdr->literal_area->end - literals; + } + else { + literals = NULL; + lit_bsize = 0; + } for (msgp = rp->msg.first; msgp; msgp = msgp->next) { if (msgp->data.attached == ERTS_MSG_COMBINED_HFRAG) @@ -903,12 +1252,6 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls /* Check heap, stack etc... */ if (check_mod_funs(rp, &rp->off_heap, mod_start, mod_size)) goto try_gc; - if (!(flags & ERTS_CPC_COPY_LITERALS)) { - /* Process ok. May contain old literals but we will be called - * again before module is purged. - */ - return am_false; - } if (any_heap_ref_ptrs(&rp->fvalue, &rp->fvalue+1, literals, lit_bsize)) { rp->freason = EXC_NULL; rp->fvalue = NIL; @@ -916,6 +1259,10 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls } if (any_heap_ref_ptrs(rp->stop, rp->hend, literals, lit_bsize)) goto try_literal_gc; +#ifdef HIPE + if (nstack_any_heap_ref_ptrs(rp, literals, lit_bsize)) + goto try_literal_gc; +#endif if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize)) goto try_literal_gc; if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize)) @@ -994,7 +1341,7 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls } if (need_gc & ERTS_LITERAL_GC__) { struct erl_off_heap_header* oh; - oh = modp->old.code_hdr->literals_off_heap; + oh = modp->old.code_hdr->literal_area->off_heap; *redsp += lit_bsize / 64; /* Need, better value... */ erts_garbage_collect_literals(rp, (Eterm*)literals, lit_bsize, oh); done_gc |= ERTS_LITERAL_GC__; @@ -1007,6 +1354,8 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls } +#endif /* !ERTS_NEW_PURGE_STRATEGY */ + static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size) { @@ -1135,200 +1484,438 @@ hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp, } } -#undef in_area +#ifdef ERTS_NEW_PURGE_STRATEGY + +ErtsThrPrgrLaterOp later_literal_area_switch; #ifdef ERTS_SMP -static void copy_literals_commit(void*); +static void +complete_literal_area_switch(void *unused) +{ + Process *p = erts_literal_area_collector; + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_resume(p, ERTS_PROC_LOCK_STATUS); + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); +} #endif -copy_literals_t erts_clrange = {NULL, 0, THE_NON_VALUE}; +#endif /* ERTS_NEW_PURGE_STRATEGY */ -/* copy literals - * - * copy_literals.ptr = LitPtr - * copy_literals.sz = LitSz - * ------ THR PROG COMMIT ----- - * - * - check process code - * - check process code - * ... - * copy_literals.ptr = NULL - * copy_literals.sz = 0 - * ------ THR PROG COMMIT ----- - * ... - */ +BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) +{ +#ifndef ERTS_NEW_PURGE_STRATEGY + BIF_ERROR(BIF_P, EXC_NOTSUP); +#else + ErtsLiteralAreaRef *la_ref; + if (BIF_P != erts_literal_area_collector) + BIF_ERROR(BIF_P, EXC_NOTSUP); -BIF_RETTYPE erts_internal_copy_literals_2(BIF_ALIST_2) -{ - ErtsCodeIndex code_ix; - Eterm res = am_true; + erts_smp_mtx_lock(&release_literal_areas.mtx); - if (is_not_atom(BIF_ARG_1) || (am_true != BIF_ARG_2 && am_false != BIF_ARG_2)) { - BIF_ERROR(BIF_P, BADARG); + la_ref = release_literal_areas.first; + if (la_ref) { + release_literal_areas.first = la_ref->next; + if (!release_literal_areas.first) + release_literal_areas.last = NULL; } - if (!erts_try_seize_code_write_permission(BIF_P)) { - ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_copy_literals_2], - BIF_P, BIF_ARG_1, BIF_ARG_2); - } + erts_smp_mtx_unlock(&release_literal_areas.mtx); - code_ix = erts_active_code_ix(); + if (erts_copy_literal_area) + erts_release_literal_area(erts_copy_literal_area); - if (BIF_ARG_2 == am_true) { - Module* modp = erts_get_module(BIF_ARG_1, code_ix); - if (!modp || !modp->old.code_hdr) { - res = am_false; - goto done; - } - if (erts_clrange.ptr != NULL - && !(BIF_P->static_flags & ERTS_STC_FLG_SYSTEM_PROC)) { - res = am_aborted; - goto done; - } - erts_clrange.ptr = modp->old.code_hdr->literals_start; - erts_clrange.sz = modp->old.code_hdr->literals_end - erts_clrange.ptr; - erts_clrange.pid = BIF_P->common.id; - } else if (BIF_ARG_2 == am_false) { - if (erts_clrange.pid != BIF_P->common.id) { - res = am_false; - goto done; - } - erts_clrange.ptr = NULL; - erts_clrange.sz = 0; - erts_clrange.pid = THE_NON_VALUE; + if (!la_ref) { + erts_copy_literal_area = NULL; + BIF_RET(am_false); } -#ifdef ERTS_SMP - ASSERT(committer_state.stager == NULL); - committer_state.stager = BIF_P; - erts_schedule_thr_prgr_later_op(copy_literals_commit, NULL, &committer_state.lop); - erts_proc_inc_refc(BIF_P); + erts_copy_literal_area = la_ref->literal_area; + + erts_free(ERTS_ALC_T_LITERAL_REF, la_ref); + +#ifndef ERTS_SMP + BIF_RET(am_true); +#else + erts_schedule_thr_prgr_later_op(complete_literal_area_switch, + NULL, + &later_literal_area_switch); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); #endif -done: - erts_release_code_write_permission(); - BIF_RET(res); + +#endif /* ERTS_NEW_PURGE_STRATEGY */ +} + +void +erts_purge_state_add_fun(ErlFunEntry *fe) +{ + ASSERT(is_value(purge_state.module)); + if (purge_state.fe_ix >= purge_state.fe_size) { + ErlFunEntry **funs; + purge_state.fe_size += 100; + funs = erts_alloc(ERTS_ALC_T_PURGE_DATA, + sizeof(ErlFunEntry *)*purge_state.fe_size); + sys_memcpy((void *) funs, + (void *) purge_state.funs, + purge_state.fe_ix*sizeof(ErlFunEntry *)); + if (purge_state.funs != &purge_state.def_funs[0]) + erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.funs); + purge_state.funs = funs; + } + purge_state.funs[purge_state.fe_ix++] = fe; +} + +Export * +erts_suspend_process_on_pending_purge_lambda(Process *c_p) +{ + erts_smp_mtx_lock(&purge_state.mtx); + if (is_value(purge_state.module)) { + /* + * The process c_p is about to call a fun in the code + * that we are trying to purge. Suspend it and call + * erts_code_purger:pending_purge_lambda/3. The process + * will be resumed when the purge completes or aborts, + * and will then try to do the call again. + */ + if (purge_state.sp_ix >= purge_state.sp_size) { + Eterm *sprocs; + purge_state.sp_size += 100; + sprocs = erts_alloc(ERTS_ALC_T_PURGE_DATA, + (sizeof(ErlFunEntry *) + * purge_state.sp_size)); + sys_memcpy((void *) sprocs, + (void *) purge_state.sprocs, + purge_state.sp_ix*sizeof(ErlFunEntry *)); + if (purge_state.sprocs != &purge_state.def_sprocs[0]) + erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs); + purge_state.sprocs = sprocs; + } + purge_state.sprocs[purge_state.sp_ix++] = c_p->common.id; + erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); + ERTS_VBUMP_ALL_REDS(c_p); + } + erts_smp_mtx_unlock(&purge_state.mtx); + return purge_state.pending_purge_lambda; +} + +static void +finalize_purge_operation(Process *c_p, int succeded) +{ + Uint ix; + + if (c_p) + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + + erts_smp_mtx_lock(&purge_state.mtx); + + ASSERT(purge_state.module != THE_NON_VALUE); + + purge_state.module = THE_NON_VALUE; + + /* + * Resume all processes that have tried to call + * funs in this code. + */ + for (ix = 0; ix < purge_state.sp_ix; ix++) { + Process *rp = erts_pid2proc(NULL, 0, + purge_state.sprocs[ix], + ERTS_PROC_LOCK_STATUS); + if (rp) { + erts_resume(rp, ERTS_PROC_LOCK_STATUS); + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + } + } + + erts_smp_mtx_unlock(&purge_state.mtx); + + if (c_p) + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + + if (purge_state.sprocs != &purge_state.def_sprocs[0]) { + erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs); + purge_state.sprocs = &purge_state.def_sprocs[0]; + purge_state.sp_size = sizeof(purge_state.def_sprocs); + purge_state.sp_size /= sizeof(purge_state.def_sprocs[0]); + } + purge_state.sp_ix = 0; + + if (purge_state.funs != &purge_state.def_funs[0]) { + erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.funs); + purge_state.funs = &purge_state.def_funs[0]; + purge_state.fe_size = sizeof(purge_state.def_funs); + purge_state.fe_size /= sizeof(purge_state.def_funs[0]); + } + purge_state.fe_ix = 0; } #ifdef ERTS_SMP -static void copy_literals_commit(void* null) { - Process* p = committer_state.stager; -#ifdef DEBUG - committer_state.stager = NULL; -#endif - erts_release_code_write_permission(); + +static ErtsThrPrgrLaterOp purger_lop_data; + +static void +resume_purger(void *unused) +{ + Process *p = erts_code_purger; erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - if (!ERTS_PROC_IS_EXITING(p)) { - erts_resume(p, ERTS_PROC_LOCK_STATUS); - } + erts_resume(p, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); - erts_proc_dec_refc(p); } -#endif /* ERTS_SMP */ +static void +finalize_purge_abort(void *unused) +{ + erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix); + + finalize_purge_operation(NULL, 0); -/* Do the actualy module purging and return: - * true for success - * false if no such old module - * BADARG if not an atom - */ -BIF_RETTYPE erts_internal_purge_module_1(BIF_ALIST_1) + resume_purger(NULL); +} + +#endif /* ERTS_SMP */ + +BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) { - ErtsCodeIndex code_ix; - BeamInstr* code; - BeamInstr* end; - Module* modp; - int is_blocking = 0; - Eterm ret; + if (BIF_P != erts_code_purger) + BIF_ERROR(BIF_P, EXC_NOTSUP); - if (is_not_atom(BIF_ARG_1)) { + if (is_not_atom(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); - } - if (!erts_try_seize_code_write_permission(BIF_P)) { - ERTS_BIF_YIELD1(bif_export[BIF_erts_internal_purge_module_1], - BIF_P, BIF_ARG_1); - } + switch (BIF_ARG_2) { - code_ix = erts_active_code_ix(); + case am_prepare: { + /* + * Prepare for purge by marking all fun + * entries referring to the code to purge + * with "pending purge" markers. + */ + ErtsCodeIndex code_ix; + Module* modp; + Eterm res; - /* - * Correct module? - */ + if (is_value(purge_state.module)) + BIF_ERROR(BIF_P, BADARG); - if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { - ERTS_BIF_PREP_RET(ret, am_false); - } - else { - erts_rwlock_old_code(code_ix); + code_ix = erts_active_code_ix(); /* - * Any code to purge? + * Correct module? */ - if (!modp->old.code_hdr) { - ERTS_BIF_PREP_RET(ret, am_false); - } + modp = erts_get_module(BIF_ARG_1, code_ix); + if (!modp) + res = am_false; else { /* - * Unload any NIF library + * Any code to purge? */ - if (modp->old.nif != NULL) { - /* ToDo: Do unload nif without blocking */ - erts_rwunlock_old_code(code_ix); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - is_blocking = 1; - erts_rwlock_old_code(code_ix); - erts_unload_nif(modp->old.nif); - modp->old.nif = NULL; + erts_rlock_old_code(code_ix); + if (!modp->old.code_hdr) + res = am_false; + else { + BeamInstr* code; + BeamInstr* end; + erts_smp_mtx_lock(&purge_state.mtx); + purge_state.module = BIF_ARG_1; + erts_smp_mtx_unlock(&purge_state.mtx); + res = am_true; + code = (BeamInstr*) modp->old.code_hdr; + end = (BeamInstr *)((char *)code + modp->old.code_length); + erts_fun_purge_prepare(code, end); +#if !defined(ERTS_NEW_PURGE_STRATEGY) + ASSERT(!erts_copy_literal_area); + erts_copy_literal_area = modp->old.code_hdr->literal_area; +#endif } - + erts_runlock_old_code(code_ix); + } + +#ifndef ERTS_SMP + BIF_RET(res); +#else + if (res != am_true) + BIF_RET(res); + else { /* - * Remove the old code. + * We'll be resumed when all schedulers are guaranteed + * to see the "pending purge" markers that we've made on + * all fun entries of the code that we are about to purge. + * Processes trying to call these funs will be suspended + * before calling the funs. That is we are guaranteed not + * to get any more direct references into the code while + * checking for such references... */ - ASSERT(erts_total_code_size >= modp->old.code_length); - erts_total_code_size -= modp->old.code_length; - code = (BeamInstr*) modp->old.code_hdr; - end = (BeamInstr *)((char *)code + modp->old.code_length); - erts_cleanup_funs_on_purge(code, end); - beam_catches_delmod(modp->old.catches, code, modp->old.code_length, - code_ix); - decrement_refc(modp->old.code_hdr); - if (modp->old.code_hdr->literals_start) { - erts_free(ERTS_ALC_T_LITERAL, modp->old.code_hdr->literals_start); - } - erts_free(ERTS_ALC_T_CODE, (void *) code); - modp->old.code_hdr = NULL; - modp->old.code_length = 0; - modp->old.catches = BEAM_CATCHES_NIL; - erts_remove_from_ranges(code); - ERTS_BIF_PREP_RET(ret, am_true); + erts_schedule_thr_prgr_later_op(resume_purger, + NULL, + &purger_lop_data); + erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); + ERTS_BIF_YIELD_RETURN(BIF_P, am_true); } - erts_rwunlock_old_code(code_ix); +#endif } - if (is_blocking) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + + case am_abort: { + /* + * Soft purge that detected direct references into the code + * we set out to purge. Abort the purge. + */ + + if (purge_state.module != BIF_ARG_1) + BIF_ERROR(BIF_P, BADARG); + + erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix); + +#if !defined(ERTS_NEW_PURGE_STRATEGY) + ASSERT(erts_copy_literal_area); + erts_copy_literal_area = NULL; +#endif +#ifndef ERTS_SMP + erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix); + finalize_purge_operation(BIF_P, 0); + BIF_RET(am_false); +#else + /* + * We need to restore the code addresses of the funs in + * two stages in order to ensure that we do not get any + * stale suspended processes due to the purge abort. + * Restore address pointer (erts_fun_purge_abort_prepare); + * wait for thread progress; clear pending purge address + * pointer (erts_fun_purge_abort_finalize), and then + * resume processes that got suspended + * (finalize_purge_operation). + */ + erts_schedule_thr_prgr_later_op(finalize_purge_abort, + NULL, + &purger_lop_data); + erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); + ERTS_BIF_YIELD_RETURN(BIF_P, am_false); +#endif } - erts_release_code_write_permission(); - return ret; -} -static void -decrement_refc(BeamCodeHeader* code_hdr) -{ - struct erl_off_heap_header* oh = code_hdr->literals_off_heap; - - while (oh) { - Binary* bptr; - ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG); - bptr = ((ProcBin*)oh)->val; - if (erts_refc_dectest(&bptr->refc, 0) == 0) { - erts_bin_free(bptr); + case am_complete: { + ErtsCodeIndex code_ix; + BeamInstr* code; + Module* modp; + int is_blocking = 0; + Eterm ret; + ErtsLiteralArea *literals = NULL; + + + /* + * We have no direct references into the code. + * Complete to purge. + */ + + if (purge_state.module != BIF_ARG_1) + BIF_ERROR(BIF_P, BADARG); + + if (!erts_try_seize_code_write_permission(BIF_P)) { + ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_purge_module_2], + BIF_P, BIF_ARG_1, BIF_ARG_2); + } + + code_ix = erts_active_code_ix(); + + /* + * Correct module? + */ + + if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { + ERTS_BIF_PREP_RET(ret, am_false); } - oh = oh->next; + else { + + erts_rwlock_old_code(code_ix); + + /* + * Any code to purge? + */ + if (!modp->old.code_hdr) { + ERTS_BIF_PREP_RET(ret, am_false); + } + else { + /* + * Unload any NIF library + */ + if (modp->old.nif != NULL) { + /* ToDo: Do unload nif without blocking */ + erts_rwunlock_old_code(code_ix); + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + is_blocking = 1; + erts_rwlock_old_code(code_ix); + erts_unload_nif(modp->old.nif); + modp->old.nif = NULL; + } + + /* + * Remove the old code. + */ + ASSERT(erts_total_code_size >= modp->old.code_length); + erts_total_code_size -= modp->old.code_length; + code = (BeamInstr*) modp->old.code_hdr; + erts_fun_purge_complete(purge_state.funs, purge_state.fe_ix); + beam_catches_delmod(modp->old.catches, code, modp->old.code_length, + code_ix); + literals = modp->old.code_hdr->literal_area; + modp->old.code_hdr->literal_area = NULL; + erts_free(ERTS_ALC_T_CODE, (void *) code); + modp->old.code_hdr = NULL; + modp->old.code_length = 0; + modp->old.catches = BEAM_CATCHES_NIL; + erts_remove_from_ranges(code); + ERTS_BIF_PREP_RET(ret, am_true); + } + erts_rwunlock_old_code(code_ix); + } + if (is_blocking) { + erts_smp_thr_progress_unblock(); + erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + } + + erts_release_code_write_permission(); + + finalize_purge_operation(BIF_P, ret == am_true); + +#if !defined(ERTS_NEW_PURGE_STRATEGY) + + ASSERT(erts_copy_literal_area == literals); + erts_copy_literal_area = NULL; + erts_release_literal_area(literals); + +#else /* ERTS_NEW_PURGE_STRATEGY */ + + if (literals) { + ErtsLiteralAreaRef *ref; + ref = erts_alloc(ERTS_ALC_T_LITERAL_REF, + sizeof(ErtsLiteralAreaRef)); + ref->literal_area = literals; + ref->next = NULL; + erts_smp_mtx_lock(&release_literal_areas.mtx); + if (release_literal_areas.last) { + release_literal_areas.last->next = ref; + release_literal_areas.last = ref; + } + else { + release_literal_areas.first = ref; + release_literal_areas.last = ref; + } + erts_smp_mtx_unlock(&release_literal_areas.mtx); + erts_queue_message(erts_literal_area_collector, + 0, + erts_alloc_message(0, NULL), + am_copy_literals, + BIF_P->common.id); + } + +#endif /* ERTS_NEW_PURGE_STRATEGY */ + + return ret; + } + + default: + BIF_ERROR(BIF_P, BADARG); + } } diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 8489897d3a..920c8b1ed0 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -858,7 +858,8 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) if (flags & MATCH_SET_RX_TRACE) { erts_trace_return(p, ep->code, result, &ERTS_TRACER(p)); } - if (flags & MATCH_SET_RETURN_TO_TRACE) { + if (flags & MATCH_SET_RETURN_TO_TRACE && + IS_TRACED_FL(p, F_TRACE_RETURN_TO)) { /* can only happen if(local)*/ if (applying) { /* Apply of BIF, cp is in calling function */ diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 4716460a6b..073b7e39b6 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -2161,7 +2161,10 @@ void process_main(void) c_p->i = (BeamInstr *) Arg(0); /* L1 */ SWAPOUT; c_p->arity = 0; - erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); + + if (!ERTS_PTMR_IS_TIMED_OUT(c_p)) + erts_smp_atomic32_read_band_relb(&c_p->state, + ~ERTS_PSFLG_ACTIVE); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); c_p->current = NULL; @@ -2826,13 +2829,7 @@ do { \ goto context_switch3; } - if (ERTS_MSACC_IS_ENABLED_CACHED_X()) { - if (GET_BIF_MODULE(Arg(0)) == am_ets) { - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS); - } else { - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_BIF); - } - } + ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(Arg(0)), GET_BIF_ADDRESS(Arg(0))); bf = GET_BIF_ADDRESS(Arg(0)); @@ -3032,6 +3029,7 @@ do { \ if (i == 0) { StoreBifResult(4, Op1); } + ires = big_size(Op1); goto big_shift; } } else if (is_big(Op2)) { @@ -3047,7 +3045,6 @@ do { \ OpCase(i_bsl_jIssd): GetArg2(2, Op1, Op2); - do_bsl: if (is_small(Op2)) { i = signed_val(Op2); @@ -3073,16 +3070,12 @@ do { \ StoreBifResult(4, Op1); } } - Op1 = small_to_big(ires, tmp_big); -#ifdef TAG_LITERAL_PTR - Op1 |= TAG_LITERAL_PTR; -#endif + ires = 1; /* big_size(small_to_big(Op1)) */ big_shift: if (i > 0) { /* Left shift. */ - ires = big_size(Op1) + (i / D_EXP); + ires += (i / D_EXP); } else { /* Right shift. */ - ires = big_size(Op1); if (ires <= (-i / D_EXP)) ires = 3; /* ??? */ else @@ -3101,6 +3094,9 @@ do { \ goto lb_Cl_error; } TestHeapPreserve(ires+1, Arg(1), Op1); + if (is_small(Op1)) { + Op1 = small_to_big(signed_val(Op1), tmp_big); + } bigp = HTOP; Op1 = big_lshift(Op1, i, bigp); if (is_big(Op1)) { @@ -3123,6 +3119,7 @@ do { \ if (i == 0) { StoreBifResult(4, Op1); } + ires = big_size(Op1); goto big_shift; } } else if (is_big(Op2)) { @@ -3593,13 +3590,7 @@ do { \ goto context_switch; } - if (ERTS_MSACC_IS_ENABLED_CACHED_X()) { - if ((Eterm)I[-3] == am_ets) { - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS); - } else { - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_BIF); - } - } + ERTS_MSACC_SET_BIF_STATE_CACHED_X((Eterm)I[-3], (BifFunction)Arg(0)); c_p->current = I-3; /* In case we apply process_info/1,2 or load_nif/1 */ c_p->i = I; /* In case we apply check_process_code/2. */ @@ -5391,7 +5382,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) ASSERT(!c_p->scheduler_data); erts_pre_dirty_nif(esdp, &env, c_p, - (struct erl_module_nif*)I[2], NULL); + (struct erl_module_nif*)I[2]); #ifdef DEBUG result = @@ -5400,7 +5391,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) #endif (*fp)(&env, arity, reg); - erts_post_nif(&env); + erts_post_dirty_nif(&env); ASSERT(!is_value(result)); ASSERT(c_p->freason == TRAP); @@ -6535,34 +6526,49 @@ call_fun(Process* p, /* Current process. */ * representation (the module has never been loaded), * or the module defining the fun has been unloaded. */ + module = fe->module; - if ((modp = erts_get_module(module, code_ix)) != NULL - && modp->curr.code_hdr != NULL) { + + ERTS_SMP_READ_MEMORY_BARRIER; + if (fe->pend_purge_address) { /* - * There is a module loaded, but obviously the fun is not - * defined in it. We must not call the error_handler - * (or we will get into an infinite loop). + * The system is currently trying to purge the + * module containing this fun. Suspend the process + * and let it try again when the purge operation is + * done (may succeed or not). */ - goto badfun; + ep = erts_suspend_process_on_pending_purge_lambda(p); + ASSERT(ep); } + else { + if ((modp = erts_get_module(module, code_ix)) != NULL + && modp->curr.code_hdr != NULL) { + /* + * There is a module loaded, but obviously the fun is not + * defined in it. We must not call the error_handler + * (or we will get into an infinite loop). + */ + goto badfun; + } - /* - * No current code for this module. Call the error_handler module - * to attempt loading the module. - */ + /* + * No current code for this module. Call the error_handler module + * to attempt loading the module. + */ - ep = erts_find_function(erts_proc_get_error_handler(p), - am_undefined_lambda, 3, code_ix); - if (ep == NULL) { /* No error handler */ - p->current = NULL; - p->freason = EXC_UNDEF; - return NULL; + ep = erts_find_function(erts_proc_get_error_handler(p), + am_undefined_lambda, 3, code_ix); + if (ep == NULL) { /* No error handler */ + p->current = NULL; + p->freason = EXC_UNDEF; + return NULL; + } } reg[0] = module; reg[1] = fun; reg[2] = args; reg[3] = NIL; - return ep->addressv[erts_active_code_ix()]; + return ep->addressv[code_ix]; } } } else if (is_export_header(hdr)) { @@ -7017,7 +7023,11 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) /* The expensive case, need to build a hashmap */ if (n > MAP_SMALL_MAP_LIMIT) { - res = erts_hashmap_from_ks_and_vs(p,flatmap_get_keys(mp),flatmap_get_values(mp),n); + ErtsHeapFactory factory; + erts_factory_proc_init(&factory, p); + res = erts_hashmap_from_ks_and_vs(&factory,flatmap_get_keys(mp), + flatmap_get_values(mp),n); + erts_factory_close(&factory); } return res; } diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 0c2743beb2..b7e802775d 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -105,7 +105,7 @@ typedef struct { */ typedef struct genop { - int op; /* Opcode. */ + unsigned int op; /* Opcode. */ int arity; /* Number of arguments. */ GenOpArg def_args[MAX_OPARGS]; /* Default buffer for arguments. */ GenOpArg* a; /* The arguments. */ @@ -283,8 +283,8 @@ typedef struct LoaderState { byte* code_start; /* Start of code file. */ unsigned code_size; /* Size of code file. */ int specific_op; /* Specific opcode (-1 if not found). */ - int num_functions; /* Number of functions in module. */ - int num_labels; /* Number of labels. */ + unsigned int num_functions; /* Number of functions in module. */ + unsigned int num_labels; /* Number of labels. */ BeamCodeHeader* hdr; /* Loaded code header */ BeamInstr* codev; /* Loaded code buffer */ int codev_size; /* Size of code buffer in words. */ @@ -303,13 +303,13 @@ typedef struct LoaderState { * Atom table. */ - int num_atoms; /* Number of atoms in atom table. */ + unsigned int num_atoms; /* Number of atoms in atom table. */ Eterm* atom; /* Atom table. */ - int num_exps; /* Number of exports. */ + unsigned int num_exps; /* Number of exports. */ ExportEntry* export; /* Pointer to export table. */ - int num_imports; /* Number of imports. */ + unsigned int num_imports; /* Number of imports. */ ImportEntry* import; /* Import entry (translated information). */ /* @@ -323,8 +323,8 @@ typedef struct LoaderState { * Lambda table. */ - int num_lambdas; /* Number of lambdas in table. */ - int lambdas_allocated; /* Size of allocated lambda table. */ + unsigned int num_lambdas; /* Number of lambdas in table. */ + unsigned int lambdas_allocated; /* Size of allocated lambda table. */ Lambda* lambdas; /* Pointer to lambdas. */ Lambda def_lambdas[16]; /* Default storage for lambda table. */ char* lambda_error; /* Delayed missing 'FunT' error. */ @@ -333,8 +333,8 @@ typedef struct LoaderState { * Literals (constant pool). */ - int num_literals; /* Number of literals in table. */ - int allocated_literals; /* Number of literal entries allocated. */ + unsigned int num_literals; /* Number of literals in table. */ + unsigned int allocated_literals; /* Number of literal entries allocated. */ Literal* literals; /* Array of literals. */ LiteralPatch* literal_patches; /* Operands that need to be patched. */ Uint total_literal_size; /* Total heap size for all literals. */ @@ -343,13 +343,13 @@ typedef struct LoaderState { * Line table. */ BeamInstr* line_item; /* Line items from the BEAM file. */ - int num_line_items; /* Number of line items. */ + unsigned int num_line_items;/* Number of line items. */ LineInstr* line_instr; /* Line instructions */ - int num_line_instrs; /* Maximum number of line instructions */ - int current_li; /* Current line instruction */ - int* func_line; /* Mapping from function to first line instr */ + unsigned int num_line_instrs; /* Maximum number of line instructions */ + unsigned int current_li; /* Current line instruction */ + unsigned int* func_line; /* Mapping from function to first line instr */ Eterm* fname; /* List of file names */ - int num_fnames; /* Number of filenames in fname table */ + unsigned int num_fnames; /* Number of filenames in fname table */ int loc_size; /* Size of location info in bytes (2/4) */ } LoaderState; @@ -663,7 +663,7 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader, stp->hdr->compile_ptr = NULL; stp->hdr->compile_size = 0; stp->hdr->compile_size_on_heap = 0; - stp->hdr->literals_start = NULL; + stp->hdr->literal_area = NULL; stp->hdr->md5_ptr = NULL; /* @@ -1005,8 +1005,9 @@ loader_state_dtor(Binary* magic) stp->bin = 0; } if (stp->hdr != 0) { - if (stp->hdr->literals_start) { - erts_free(ERTS_ALC_T_LITERAL, stp->hdr->literals_start); + if (stp->hdr->literal_area) { + erts_release_literal_area(stp->hdr->literal_area); + stp->hdr->literal_area = NULL; } erts_free(ERTS_ALC_T_CODE, stp->hdr); stp->hdr = 0; @@ -1330,7 +1331,7 @@ verify_chunks(LoaderState* stp) static int load_atom_table(LoaderState* stp) { - int i; + unsigned int i; GetInt(stp, 4, stp->num_atoms); stp->num_atoms++; @@ -1375,13 +1376,13 @@ load_atom_table(LoaderState* stp) static int load_import_table(LoaderState* stp) { - int i; + unsigned int i; GetInt(stp, 4, stp->num_imports); stp->import = erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_imports * sizeof(ImportEntry)); for (i = 0; i < stp->num_imports; i++) { - int n; + unsigned int n; Eterm mod; Eterm func; Uint arity; @@ -1389,17 +1390,17 @@ load_import_table(LoaderState* stp) GetInt(stp, 4, n); if (n >= stp->num_atoms) { - LoadError2(stp, "import entry %d: invalid atom number %d", i, n); + LoadError2(stp, "import entry %u: invalid atom number %u", i, n); } mod = stp->import[i].module = stp->atom[n]; GetInt(stp, 4, n); if (n >= stp->num_atoms) { - LoadError2(stp, "import entry %d: invalid atom number %d", i, n); + LoadError2(stp, "import entry %u: invalid atom number %u", i, n); } func = stp->import[i].function = stp->atom[n]; GetInt(stp, 4, arity); if (arity > MAX_REG) { - LoadError2(stp, "import entry %d: invalid arity %d", i, arity); + LoadError2(stp, "import entry %u: invalid arity %d", i, arity); } stp->import[i].arity = arity; stp->import[i].patches = 0; @@ -1427,12 +1428,12 @@ load_import_table(LoaderState* stp) static int read_export_table(LoaderState* stp) { - int i; + unsigned int i; BeamInstr* address; GetInt(stp, 4, stp->num_exps); if (stp->num_exps > stp->num_functions) { - LoadError2(stp, "%d functions exported; only %d functions defined", + LoadError2(stp, "%u functions exported; only %u functions defined", stp->num_exps, stp->num_functions); } stp->export @@ -1450,16 +1451,16 @@ read_export_table(LoaderState* stp) stp->export[i].function = func; GetInt(stp, 4, arity); if (arity > MAX_REG) { - LoadError2(stp, "export table entry %d: absurdly high arity %d", i, arity); + LoadError2(stp, "export table entry %u: absurdly high arity %u", i, arity); } stp->export[i].arity = arity; GetInt(stp, 4, n); if (n >= stp->num_labels) { - LoadError3(stp, "export table entry %d: invalid label %d (highest defined label is %d)", i, n, stp->num_labels); + LoadError3(stp, "export table entry %u: invalid label %u (highest defined label is %u)", i, n, stp->num_labels); } value = stp->labels[n].value; if (value == 0) { - LoadError2(stp, "export table entry %d: label %d not resolved", i, n); + LoadError2(stp, "export table entry %u: label %u not resolved", i, n); } stp->export[i].address = address = stp->codev + value; @@ -1520,7 +1521,7 @@ is_bif(Eterm mod, Eterm func, unsigned arity) static int read_lambda_table(LoaderState* stp) { - int i; + unsigned int i; GetInt(stp, 4, stp->num_lambdas); if (stp->num_lambdas > stp->lambdas_allocated) { @@ -1540,12 +1541,12 @@ read_lambda_table(LoaderState* stp) GetAtom(stp, n, stp->lambdas[i].function); GetInt(stp, 4, arity); if (arity > MAX_REG) { - LoadError2(stp, "lambda entry %d: absurdly high arity %d", i, arity); + LoadError2(stp, "lambda entry %u: absurdly high arity %u", i, arity); } stp->lambdas[i].arity = arity; GetInt(stp, 4, n); if (n >= stp->num_labels) { - LoadError3(stp, "lambda entry %d: invalid label %d (highest defined label is %d)", + LoadError3(stp, "lambda entry %u: invalid label %u (highest defined label is %u)", i, n, stp->num_labels); } stp->lambdas[i].label = n; @@ -1566,7 +1567,7 @@ read_lambda_table(LoaderState* stp) static int read_literal_table(LoaderState* stp) { - int i; + unsigned int i; uLongf uncompressed_sz; byte* uncompressed = 0; @@ -1588,7 +1589,7 @@ read_literal_table(LoaderState* stp) } for (i = 0; i < stp->num_literals; i++) { - int sz; + Uint sz; Sint heap_size; byte* p; Eterm val; @@ -1597,7 +1598,7 @@ read_literal_table(LoaderState* stp) GetInt(stp, 4, sz); /* Size of external term format. */ GetString(stp, p, sz); if ((heap_size = erts_decode_ext_size(p, sz)) < 0) { - LoadError1(stp, "literal %d: bad external format", i); + LoadError1(stp, "literal %u: bad external format", i); } if (heap_size > 0) { @@ -1607,7 +1608,7 @@ read_literal_table(LoaderState* stp) val = erts_decode_ext(&factory, &p, 0); if (is_non_value(val)) { - LoadError1(stp, "literal %d: bad external format", i); + LoadError1(stp, "literal %u: bad external format", i); } erts_factory_close(&factory); stp->literals[i].heap_frags = factory.heap_frags; @@ -1617,7 +1618,7 @@ read_literal_table(LoaderState* stp) erts_factory_dummy_init(&factory); val = erts_decode_ext(&factory, &p, 0); if (is_non_value(val)) { - LoadError1(stp, "literal %d: bad external format", i); + LoadError1(stp, "literal %u: bad external format", i); } ASSERT(is_immed(val)); stp->literals[i].heap_frags = NULL; @@ -1640,9 +1641,9 @@ read_line_table(LoaderState* stp) { unsigned version; ERTS_DECLARE_DUMMY(unsigned flags); - int num_line_items; + unsigned int num_line_items; BeamInstr* lp; - int i; + unsigned int i; BeamInstr fname_index; BeamInstr tag; @@ -1721,7 +1722,7 @@ read_line_table(LoaderState* stp) } } else if (tag == TAG_a) { if (val > stp->num_fnames) { - LoadError2(stp, "file index overflow (%d/%d)", + LoadError2(stp, "file index overflow (%u/%u)", val, stp->num_fnames); } fname_index = val; @@ -1757,9 +1758,9 @@ read_line_table(LoaderState* stp) stp->num_line_instrs * sizeof(LineInstr)); stp->current_li = 0; - stp->func_line = (int *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, - stp->num_functions * - sizeof(int)); + stp->func_line = (unsigned int *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, + stp->num_functions * + sizeof(int)); return 1; @@ -1783,6 +1784,10 @@ read_code_header(LoaderState* stp) */ GetInt(stp, 4, head_size); + if (head_size > stp->file_left) { + LoadError2(stp, "invalid code header size %u; bytes left %u", + head_size, stp->file_left); + } stp->code_start = stp->file_p + head_size; stp->code_size = stp->file_left - head_size; stp->file_left = head_size; @@ -1887,7 +1892,7 @@ load_code(LoaderState* stp) ci = stp->ci; for (;;) { - int new_op; + unsigned int new_op; GenOp* tmp_op; ASSERT(ci <= codev_size); @@ -1895,10 +1900,10 @@ load_code(LoaderState* stp) get_next_instr: GetByte(stp, new_op); if (new_op >= NUM_GENERIC_OPS) { - LoadError1(stp, "invalid opcode %d", new_op); + LoadError1(stp, "invalid opcode %u", new_op); } if (gen_opc[new_op].name[0] == '\0') { - LoadError1(stp, "invalid opcode %d", new_op); + LoadError1(stp, "invalid opcode %u", new_op); } @@ -2368,7 +2373,7 @@ load_code(LoaderState* stp) VerifyTag(stp, tag, TAG_u); last_label = tmp_op->a[arg].val; if (!(0 < last_label && last_label < stp->num_labels)) { - LoadError2(stp, "invalid label num %d (0 < label < %d)", + LoadError2(stp, "invalid label num %u (0 < label < %u)", tmp_op->a[arg].val, stp->num_labels); } if (stp->labels[last_label].value != 0) { @@ -2512,7 +2517,7 @@ load_code(LoaderState* stp) { Sint offset; if (function_number >= stp->num_functions) { - LoadError1(stp, "too many functions in module (header said %d)", + LoadError1(stp, "too many functions in module (header said %u)", stp->num_functions); } @@ -2591,14 +2596,14 @@ load_code(LoaderState* stp) if (stp->line_item) { BeamInstr item = code[ci-1]; BeamInstr loc; - int li; + unsigned int li; if (item >= stp->num_line_items) { - LoadError2(stp, "line instruction index overflow (%d/%d)", + LoadError2(stp, "line instruction index overflow (%u/%u)", item, stp->num_line_items); } li = stp->current_li; if (li >= stp->num_line_instrs) { - LoadError2(stp, "line instruction table overflow (%d/%d)", + LoadError2(stp, "line instruction table overflow (%u/%u)", li, stp->num_line_instrs); } loc = stp->line_item[item]; @@ -4560,13 +4565,16 @@ freeze_code(LoaderState* stp) Eterm* ptr; LiteralPatch* lp; ErlOffHeap code_off_heap; + ErtsLiteralArea *literal_area; + Uint lit_asize; ERTS_INIT_OFF_HEAP(&code_off_heap); - ptr = (Eterm*)erts_alloc(ERTS_ALC_T_LITERAL, - stp->total_literal_size*sizeof(Eterm)); - code_hdr->literals_start = ptr; - code_hdr->literals_end = ptr + stp->total_literal_size; + lit_asize = ERTS_LITERAL_AREA_ALLOC_SIZE(stp->total_literal_size); + literal_area = erts_alloc(ERTS_ALC_T_LITERAL, lit_asize); + ptr = &literal_area->start[0]; + literal_area->end = ptr + stp->total_literal_size; + for (i = 0; i < stp->num_literals; i++) { if (is_not_immed(stp->literals[i].term)) { erts_move_multi_frags(&ptr, &code_off_heap, @@ -4576,7 +4584,7 @@ freeze_code(LoaderState* stp) ptr_val(stp->literals[i].term))); } } - code_hdr->literals_off_heap = code_off_heap.first; + literal_area->off_heap = code_off_heap.first; lp = stp->literal_patches; while (lp != 0) { BeamInstr* op_ptr; @@ -4587,6 +4595,7 @@ freeze_code(LoaderState* stp) op_ptr[0] = lit->term; lp = lp->next; } + code_hdr->literal_area = literal_area; } CHKBLK(ERTS_ALC_T_CODE,code); @@ -4598,8 +4607,8 @@ freeze_code(LoaderState* stp) str_table = (byte *) (codev + stp->ci); } else { BeamCodeLineTab* const line_tab = (BeamCodeLineTab *) (codev+stp->ci); - const int ftab_size = stp->num_functions; - const int num_instrs = stp->current_li; + const unsigned int ftab_size = stp->num_functions; + const unsigned int num_instrs = stp->current_li; const BeamInstr** const line_items = (const BeamInstr**) &line_tab->func_tab[ftab_size + 1]; @@ -4759,7 +4768,7 @@ freeze_code(LoaderState* stp) static void final_touch(LoaderState* stp, struct erl_module_instance* inst_p) { - int i; + unsigned int i; int on_load = stp->on_load; unsigned catches; Uint index; @@ -5431,7 +5440,7 @@ new_genop(LoaderState* stp) static int new_label(LoaderState* stp) { - int num = stp->num_labels; + unsigned int num = stp->num_labels; stp->num_labels++; stp->labels = (Label *) erts_realloc(ERTS_ALC_T_PREPARED_CODE, @@ -5642,6 +5651,28 @@ has_native(BeamCodeHeader *code_hdr) return result; } +void +erts_release_literal_area(ErtsLiteralArea* literal_area) +{ + struct erl_off_heap_header* oh; + + if (!literal_area) + return; + + oh = literal_area->off_heap; + + while (oh) { + Binary* bptr; + ASSERT(thing_subtag(oh->thing_word) == REFC_BINARY_SUBTAG); + bptr = ((ProcBin*)oh)->val; + if (erts_refc_dectest(&bptr->refc, 0) == 0) { + erts_bin_free(bptr); + } + oh = oh->next; + } + erts_free(ERTS_ALC_T_LITERAL, literal_area); +} + int erts_is_module_native(BeamCodeHeader* code_hdr) { @@ -6029,11 +6060,11 @@ stub_copy_info(LoaderState* stp, static int stub_read_export_table(LoaderState* stp) { - int i; + unsigned int i; GetInt(stp, 4, stp->num_exps); if (stp->num_exps > stp->num_functions) { - LoadError2(stp, "%d functions exported; only %d functions defined", + LoadError2(stp, "%u functions exported; only %u functions defined", stp->num_exps, stp->num_functions); } stp->export @@ -6047,7 +6078,7 @@ stub_read_export_table(LoaderState* stp) GetAtom(stp, n, stp->export[i].function); GetInt(stp, 4, n); if (n > MAX_REG) { - LoadError2(stp, "export table entry %d: absurdly high arity %d", i, n); + LoadError2(stp, "export table entry %u: absurdly high arity %u", i, n); } stp->export[i].arity = n; GetInt(stp, 4, n); /* Ignore label */ @@ -6061,8 +6092,8 @@ stub_read_export_table(LoaderState* stp) static void stub_final_touch(LoaderState* stp, BeamInstr* fp) { - int i; - int n = stp->num_exps; + unsigned int i; + unsigned int n = stp->num_exps; Eterm mod = fp[2]; Eterm function = fp[3]; int arity = fp[4]; @@ -6373,9 +6404,7 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info) code_hdr->compile_ptr = NULL; code_hdr->compile_size = 0; code_hdr->compile_size_on_heap = 0; - code_hdr->literals_start = NULL; - code_hdr->literals_end = NULL; - code_hdr->literals_off_heap = 0; + code_hdr->literal_area = NULL; code_hdr->on_load_function_ptr = NULL; code_hdr->line_table = NULL; code_hdr->md5_ptr = NULL; diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h index fd2dd97fee..1200bb9c6f 100644 --- a/erts/emulator/beam/beam_load.h +++ b/erts/emulator/beam/beam_load.h @@ -50,6 +50,7 @@ extern BeamInstr* em_call_error_handler; extern BeamInstr* em_apply_bif; extern BeamInstr* em_call_nif; +struct ErtsLiteralArea_; /* * The following variables keep a sorted list of address ranges for @@ -89,9 +90,7 @@ typedef struct beam_code_header { /* * Literal area (constant pool). */ - Eterm* literals_start; - Eterm* literals_end; - struct erl_off_heap_header* literals_off_heap; + struct ErtsLiteralArea_ *literal_area; /* * Pointer to the on_load function (or NULL if none). @@ -120,7 +119,12 @@ typedef struct beam_code_header { }BeamCodeHeader; +void erts_release_literal_area(struct ErtsLiteralArea_* literal_area); int erts_is_module_native(BeamCodeHeader* code); +void erts_beam_bif_load_init(void); +struct erl_fun_entry; +void erts_purge_state_add_fun(struct erl_fun_entry *fe); +Export *erts_suspend_process_on_pending_purge_lambda(Process *c_p); /* * Layout of the line table. diff --git a/erts/emulator/beam/benchmark.c b/erts/emulator/beam/benchmark.c deleted file mode 100644 index c8409784ef..0000000000 --- a/erts/emulator/beam/benchmark.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2002-2016. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * %CopyrightEnd% - */ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "sys.h" -#include "global.h" -#include "benchmark.h" - -#ifdef BM_COUNTERS -unsigned long long processes_busy; -unsigned long long processes_spawned; -unsigned long long messages_sent; -unsigned long long messages_copied; -unsigned long long messages_ego; -unsigned long long minor_gc; -unsigned long long major_gc; -#endif /* BM_COUNTERS */ - -#ifdef BM_TIMERS - -/* assuming Solaris */ -#include <time.h> -BM_TIMER_T system_clock; - -unsigned long local_pause_times[MAX_PAUSE_TIME]; -unsigned long pause_times[MAX_PAUSE_TIME]; -unsigned long pause_times_old[MAX_PAUSE_TIME]; - -BM_TIMER_T mmu; -BM_TIMER_T mmu_counter; - -BM_NEW_TIMER(timer); -BM_NEW_TIMER(system); -BM_NEW_TIMER(gc); -BM_NEW_TIMER(minor_gc); -BM_NEW_TIMER(major_gc); -BM_NEW_TIMER(minor_global_gc); -BM_NEW_TIMER(major_global_gc); -BM_NEW_TIMER(send); -BM_NEW_TIMER(copy); -BM_NEW_TIMER(size); -BM_NEW_TIMER(max_minor); -BM_NEW_TIMER(max_major); -BM_NEW_TIMER(max_global_minor); -BM_NEW_TIMER(max_global_major); -BM_NEW_TIMER(misc0); -BM_NEW_TIMER(misc1); -BM_NEW_TIMER(misc2); -#endif /* BM_TIMERS */ - -#ifdef BM_HEAP_SIZES -unsigned long long max_used_heap; -unsigned long long max_allocated_heap; -unsigned long long max_used_global_heap; -unsigned long long max_allocated_global_heap; -#endif /* BM_HEAP_SIZES */ - -#ifdef BM_MESSAGE_SIZES -unsigned long long words_sent; -unsigned long long words_copied; -unsigned long long words_prealloc; -unsigned long long message_sizes[1000]; -#endif /* BM_MESSAGE_SIZES */ - -/***** - * The following functions have to be defined, but they only have contents - * if certain keywords are defined. - */ - -void init_benchmarking() -{ -#ifdef BM_TIMERS - int i; - for (i = 0; i < 1000; i++) - { - BM_START_TIMER(system); - BM_STOP_TIMER(system); - } - timer_time = system_time / 1000; - - for (i = 0; i < MAX_PAUSE_TIME; i++) { - local_pause_times[i] = 0; - pause_times[i] = 0; - pause_times_old[i] = 0; - } - - mmu = 0; - mmu_counter = 0; - - BM_MMU_INIT(); -#endif /* BM_TIMERS */ - -#ifdef BM_COUNTERS - processes_busy = 0; - processes_spawned = 0; - messages_sent = 0; - messages_copied = 0; - messages_ego = 0; - minor_gc = 0; - major_gc = 0; -#endif /* BM_COUNTERS */ - -#ifdef BM_HEAP_SIZES - max_used_heap = 0; - max_allocated_heap = 0; - max_used_global_heap = 0; - max_allocated_global_heap = 0; -#endif /* BM_HEAP_SIZES */ - -#ifdef BM_MESSAGE_SIZES - words_sent = 0; - words_copied = 0; - words_prealloc = 0; - { - int i; - for (i = 0; i < 1000; i++) - message_sizes[i] = 0; - } -#endif /* BM_MESSAGE_SIZES */ -} - -void save_statistics() -{ -#ifdef BM_STATISTICS - FILE *file = fopen(BM_STATISTICS_FILE,"a"); - long i = 0; - - if (file) - { - erts_fprintf(file,"-------------------------------------------------------------------------\n"); - erts_fprintf(file,"The counters are reset at system start and are sums over the entire node.\n"); - erts_fprintf(file,"You may reset them manually using the BIFs in the module hipe_bifs.\n"); - erts_fprintf(file,"All times are given in milliseconds.\n"); - erts_fprintf(file,"-------------------------------------------------------------------------\n"); - - erts_fprintf(file,"Node: %T\n",erts_this_node->sysname); - -#ifdef BM_COUNTERS - erts_fprintf(file,"Number of processes spawned: %lld\n",processes_spawned); - erts_fprintf(file,"Number of local minor GCs: %lld\n",minor_gc); - erts_fprintf(file,"Number of local major GCs: %lld\n",major_gc); - erts_fprintf(file,"Number of messages sent: %lld\n",messages_sent); - erts_fprintf(file,"Number of messages copied: %lld\n",messages_copied); - erts_fprintf(file,"Number of messages sent to self: %lld\n",messages_ego); -#endif /* BM_COUNTERS */ - -#ifdef BM_MESSAGE_SIZES - erts_fprintf(file,"Number of words sent: %lld\n",words_sent); - erts_fprintf(file,"Number of words copied: %lld\n",words_copied); - erts_fprintf(file,"Number of words preallocated: %lld\n",words_prealloc); -#endif /* BM_MESSAGE_SIZES */ - -#ifdef BM_HEAP_SIZES - erts_fprintf(file,"Biggest local heap used (in words): %lld\n",max_used_heap); - erts_fprintf(file,"Biggest local heap allocated (in words): %lld\n",max_allocated_heap); - erts_fprintf(file,"Biggest global heap used (in words): %lld\n",max_used_global_heap); - erts_fprintf(file,"Biggest global heap allocated (in words): %lld\n",max_allocated_global_heap); -#endif /* BM_HEAP_SIZES */ - -#ifdef BM_TIMERS - erts_fprintf(file,"--- The total active system time is the sum of all times below ---\n"); - BM_TIME_PRINTER("Mutator time",system_time); - BM_TIME_PRINTER("Time spent in send (excluding size & copy)",send_time); - BM_TIME_PRINTER("Time spent in size",size_time); - BM_TIME_PRINTER("Time spent in copy",copy_time); - BM_TIME_PRINTER("Time spent in local minor GC",minor_gc_time); - BM_TIME_PRINTER("Time spent in local major GC",major_gc_time); - BM_TIME_PRINTER("Time spent in global minor GC",minor_global_gc_time); - BM_TIME_PRINTER("Time spent in global major GC",major_global_gc_time); - erts_fprintf(file,"---\n"); - BM_TIME_PRINTER("Maximum time spent in one separate local minor GC",max_minor_time); - BM_TIME_PRINTER("Maximum time spent in one separate local major GC",max_major_time); - BM_TIME_PRINTER("Maximum time spent in one separate global minor GC",max_global_minor_time); - BM_TIME_PRINTER("Maximum time spent in one separate global major GC",max_global_major_time); -#endif /* BM_TIMERS */ - -#if 0 - /* Save a log file for import into excel */ - - long long total_time, n; - long left, right, mid; - -#ifdef BM_COUNTERS - erts_fprintf(file,"Spawns\tLocalGC\tMAGC\tMessages\tMutator_t\tLocalGC_t\tMAGC_t\tLocMaxP\tLocMeanP\tLocGeoMP\tMAMaxP\tMAMeanP\tMAGeoMP\t\tCMAGC\tCMAGC_t\n"); - erts_fprintf(file,"%lld\t%lld\t%lld\t%lld\t", - processes_spawned, - minor_garbage_cols + major_garbage_cols, - minor_global_garbage_cols + major_global_garbage_cols, - messages_sent); -#endif /* BM_COUNTERS */ - -#ifdef BM_TIMERS - erts_fprintf(file,"%lld\t%lld\t%lld\t", - (long long)(system_time + send_time + size_time + copy_time), - (long long)(minor_gc_time + major_gc_time), - (long long)(minor_global_gc_time + major_global_gc_time)); - - total_time = 0; n = 0; - left = 0; right = 0; mid = 0; - for (i = 0; i < MAX_PAUSE_TIME; i++) { - total_time += local_pause_times[i] * i; - n += local_pause_times[i]; - if (i > mid) - right += local_pause_times[i]; - while(right > left) { - left += local_pause_times[mid++]; - right -= local_pause_times[mid]; - } - } - erts_fprintf(file,"%lld\t%lld\t%ld\t", - (long long)((max_minor_time > max_major_time ? - max_minor_time : - max_major_time)*1000), - total_time / n, - mid); - - total_time = 0; n = 0; - left = 0; right = 0; mid = 0; - for (i = 0; i < MAX_PAUSE_TIME; i++) { - if (pause_times[i] > 0) { - total_time += pause_times[i] * i; - n += pause_times[i]; - if (i > mid) - right += pause_times[i]; - while(right > left) { - left += pause_times[mid++]; - right -= pause_times[mid]; - } - } - } - erts_fprintf(file,"%lld\t%lld\t%ld\t", - (long long)((max_global_minor_time > max_global_major_time ? - max_global_minor_time : - max_global_major_time)*1000), - (n > 0 ? total_time / n : 0), - mid); - - erts_fprintf(file,"\t%lld\t%lld\n",n,total_time); - - erts_fprintf(file,"\nMinor:\n"); - for (i = 0; i < MAX_PAUSE_TIME; i++) { - if (i < 1000 || pause_times[i] > 0) { - erts_fprintf(file,"%d\t%ld\n",i,pause_times[i]); - } - } - - fprintf(file,"Major:\n"); - for (i = 0; i < MAX_PAUSE_TIME; i++) { - if (pause_times_old[i] > 0) { - fprintf(file,"%d\t%ld\n",i,pause_times_old[i]); - } - } -#endif /* BM_TIMERS */ - -#ifdef BM_TIMERS - total_time = 0; n = 0; - left = 0; right = 0; mid = 0; - fprintf(file,"\nLocal:\n"); - for (i = 0; i < MAX_PAUSE_TIME; i++) { - if (local_pause_times[i] > 0) { - erts_fprintf(file,"%d\t%ld\n",i,local_pause_times[i]); - total_time += local_pause_times[i] * i; - n += local_pause_times[i]; - if (i > mid) - right += local_pause_times[i]; - while(right > left) { - left += local_pause_times[mid++]; - right -= local_pause_times[mid]; - } - } - } - erts_fprintf(file,"Mid: %ld Mean: %ld\n",(long)mid, - (long)(n > 0 ? total_time / n : 0)); -#endif -#endif /* 0 */ - fclose(file); - } - else - fprintf(stderr,"Sorry... Can not write to %s!\n\r",BM_STATISTICS_FILE); -#endif /* BM_STATISTICS */ -} diff --git a/erts/emulator/beam/benchmark.h b/erts/emulator/beam/benchmark.h deleted file mode 100644 index 0272896f4f..0000000000 --- a/erts/emulator/beam/benchmark.h +++ /dev/null @@ -1,295 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2002-2016. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * %CopyrightEnd% - */ - -#ifndef __BENCHMARK_H__ -#define __BENCHMARK_H__ - -/* The define __BENCHMARK__ is the master switch to turn on and off - * benchmarking. This will enable the benchmark-BIFs in hipe_bif1.c. - * Documentation for the BIFs is in hipe_bif1.c, and that is where you - * will find the information about how to accually get some data out - * from these timers and counters. - */ -/* #define __BENCHMARK__ */ - -#ifdef __BENCHMARK__ -/* - * The defines below enables different parts of the benchmaring. - * Counters and timers that are disabled, always report zero in - * the BIFs. - */ - -/* BM_TIMERS keeps track of the time spent in diferent parts of the - * system. It only measures accual active time, not time spent in idle - * mode. Currently, the Solaris hrtime_t will be used. - * To add new timers look below. - */ -#define BM_TIMERS - -/* BM_COUNTERS count all kinds of events that occurs in the system. - * Among other things it counts the number of messages, then number of - * garbage collections, the number of processes spawned etc. - * To add new counters look below. - */ -#define BM_COUNTERS - -/* BM_MESSAGE_SIZES keeps a log of the size of all messages sent in - * the system. This introduce an overhead in time for the shared heap - * system since all message sizes have to be calculated at send. - */ -/* #define BM_MESSAGE_SIZES */ - -/* BM_HEAP_SIZES goes through all processes at garbage collection time - * to sum their allocated and used heap sizes. In anything else than a - * shared heap system, this will cost. - */ -/* #define BM_HEAP_SIZES */ - -/* BM_STATISTICS saves an entry in the file BM_STATISTICS_FILE. This - * is done for each erlang node at exit time. - */ -/* #define BM_STATISTICS */ - -#endif /* __BENCHMARK__ */ - - -#ifdef BM_STATISTICS -# define BM_STATISTICS_FILE "/tmp/erlang_statistics.joppe.log" -#endif /* BM_STATISTICS */ - - -/************ There are no more settings below this line *************/ - -/* - * Maintenance and how to add new stuff is documented by the code - * below ;-) - */ - -#ifdef BM_COUNTERS -/********************************************************************* - * To add new counters: - * - * Add the variable here AND in benchmark.c. Use the macro - * BM_COUNT(var) in the code where you want to increase it. - * - */ -extern unsigned long long processes_busy; -extern unsigned long long processes_spawned; -extern unsigned long long messages_sent; -extern unsigned long long messages_copied; -extern unsigned long long messages_ego; -extern unsigned long long minor_gc; -extern unsigned long long major_gc; - -#define BM_COUNT(var) (var)++; - -#define BM_EGO_COUNT(send,rec) { \ - if ((send) == (rec)) \ - BM_COUNT(messages_ego); } - -#define BM_LAZY_COPY_START long long gcs = minor_global_gc + major_global_gc; -#define BM_LAZY_COPY_STOP { gcs = (minor_global_gc + major_global_gc) - gcs; \ - if (gcs > gc_in_copy) gc_in_copy = gcs; } - -#else /* !BM_COUNTERS */ -# define BM_COUNT(var) -# define BM_EGO_COUNT(send,rec) -# define BM_LAZY_COPY_START -# define BM_LAZY_COPY_STOP -#endif /* BM_COUNTERS */ - - -#ifdef BM_TIMERS -/********************************************************************* - * To add new timers: - * - * Add the variable below using the form extern BM_TIMER_T blah_time. - * Also add them in benchmark.c using the macro NEW_TIMER(blah). Use - * the macro BM_SWAP_TIMER(from,blah) ... BM_SWAP_TIMER(blah,to) to - * start and stop the new timer. Note, that you have to know what - * timer is running at the place where you want to insert your new - * timer to be able to stop and start (from,to) it. - * - * You can use the macros BM_STOP_TIMER(blah) and BM_START_TIMER(blah) - * around code that should not be timed at all. As above, you have to - * know what timer to start and stop. The system timer is running at - * most places in the emulator. Only the garbage collector and the - * message sending has its own timers at the moment. - * - * The timer_time used when stopping timers is the time it takes to - * start and stop the timers, calculated in init_benchmarking(). If it - * is not there, the time it takes to do this will accually be - * substantial compared to some small times in the system we want to - * meassure (send time in shared heap for instance). - */ - -/* (Assuming Solaris) */ - -#define BM_TIMER_T ErtsMonotonicTime -#define BM_START_TIMER(t) system_clock = ERTS_MONOTONIC_TO_NSEC(erts_os_monotonic_time()) -#define BM_STOP_TIMER(t) do { \ - BM_TIMER_T tmp = (ERTS_MONOTONIC_TO_NSEC(erts_os_monotonic_time()) - system_clock) - timer_time; \ - t##_time += (tmp > 0 ? tmp : 0); \ -} while(0) - -#define BM_TIME_PRINTER(str,time) do { \ - int min,sec,milli,micro; \ - BM_TIMER_T tmp; \ - tmp = (time) / 1000; \ - micro = tmp % 1000; \ - tmp /= 1000; \ - milli = tmp % 1000; \ - tmp /= 1000; \ - sec = tmp % 60; \ - min = tmp / 60; \ - erts_fprintf(file,str": %d:%02d.%03d %03d\n",min,sec,milli,micro); \ -} while(0) - -extern BM_TIMER_T system_clock; - -extern BM_TIMER_T timer_time; -extern BM_TIMER_T system_time; -extern BM_TIMER_T gc_time; -extern BM_TIMER_T minor_gc_time; -extern BM_TIMER_T major_gc_time; -extern BM_TIMER_T minor_global_gc_time; -extern BM_TIMER_T major_global_gc_time; -extern BM_TIMER_T send_time; -extern BM_TIMER_T copy_time; -extern BM_TIMER_T size_time; -extern BM_TIMER_T max_minor_time; -extern BM_TIMER_T max_major_time; -extern BM_TIMER_T max_global_minor_time; -extern BM_TIMER_T max_global_major_time; -extern BM_TIMER_T misc0_time; -extern BM_TIMER_T misc1_time; -extern BM_TIMER_T misc2_time; - -#define MAX_PAUSE_TIME 500000 -extern unsigned long local_pause_times[MAX_PAUSE_TIME]; -extern unsigned long pause_times[MAX_PAUSE_TIME]; -extern unsigned long pause_times_old[MAX_PAUSE_TIME]; - -#define MMU_INTERVAL 5 /* milli seconds */ -extern BM_TIMER_T mmu_counter; -extern BM_TIMER_T mmu; - -#define BM_NEW_TIMER(t) BM_TIMER_T t##_time = 0; -#define BM_RESET_TIMER(t) t##_time = 0; -#define BM_SWAP_TIMER(t1,t2) do { BM_STOP_TIMER(t1); BM_START_TIMER(t2); } while(0) -#define BM_MMU_INIT() do { \ - BM_TIMER_T gc = gc_time; \ - while (gc > 0) { \ - if (gc > MMU_INTERVAL) { \ - gc -= MMU_INTERVAL - mmu_counter; \ - erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \ - mmu_counter = 0; mmu = 0; \ - } else { \ - mmu_counter += gc; \ - if (mmu_counter >= MMU_INTERVAL) { \ - mmu_counter -= MMU_INTERVAL; \ - erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \ - mmu = 0; \ - } \ - gc = 0; \ - } \ - } \ - BM_RESET_TIMER(system); \ - BM_RESET_TIMER(send); \ - BM_RESET_TIMER(copy); \ - BM_RESET_TIMER(size); \ -} while(0) - -#define BM_MMU_READ() do { \ - BM_TIMER_T mut = system_time + send_time + copy_time + size_time; \ - while (mut > 0) { \ - if (mut > MMU_INTERVAL) { \ - BM_TIMER_T tmp = MMU_INTERVAL - mmu_counter; \ - mmu += tmp; mut -= tmp; \ - erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \ - mmu_counter = 0; mmu = 0; \ - } else { \ - mmu_counter += mut; mmu += mut; \ - if (mmu_counter >= MMU_INTERVAL) { \ - mmu_counter -= MMU_INTERVAL; \ - mmu -= mmu_counter; \ - erts_printf("%d\n",(int)((mmu / MMU_INTERVAL) * 100)); \ - mmu = mmu_counter; \ - } \ - mut = 0; \ - } \ - } \ -} while(0) - -#else /* !BM_TIMERS */ -# define BM_NEW_TIMER(t) -# define BM_START_TIMER(t) -# define BM_STOP_TIMER(t) -# define BM_RESET_TIMER(t) -# define BM_SWAP_TIMER(t1,t2) -# define BM_TIME_PRINTER(str,time) -# define BM_MMU_INIT() -# define BM_MMU_READ() -#endif /* BM_TIMERS */ - -#ifdef BM_HEAP_SIZES -extern unsigned long long max_used_heap; -extern unsigned long long max_allocated_heap; -extern unsigned long long max_used_global_heap; -extern unsigned long long max_allocated_global_heap; -#endif /* BM_HEAP_SIZES */ - -#ifdef BM_MESSAGE_SIZES -extern unsigned long long words_sent; -extern unsigned long long words_copied; -extern unsigned long long words_prealloc; -extern unsigned long long message_sizes[1000]; - -#define BM_MESSAGE_COPIED(size) { \ - words_copied += size; \ - BM_COUNT(messages_copied); } - -#define BM_PREALLOC_DATA(size) { \ - words_prealloc += size; } - -#define BM_MESSAGE(mess,send,rec) { \ - Uint msize = size_object(mess); \ - words_sent += msize; \ - if (msize < 1000) \ - message_sizes[msize]++; \ - else \ - message_sizes[999]++; \ - BM_EGO_COUNT(send,rec); \ - BM_COUNT(messages_sent); } - -#else /* !BM_MESSAGE_SIZES */ - -#define BM_MESSAGE_COPIED(size) BM_COUNT(messages_copied); -#define BM_PREALLOC_DATA(size) -#define BM_MESSAGE(mess,send,rec) { \ - BM_EGO_COUNT(send,rec); \ - BM_COUNT(messages_sent); } - -#endif /* BM_MESSAGE_SIZES */ - -void init_benchmarking(void); -void save_statistics(void); - -#endif /* _BENCHMARK_H_ */ diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index fc14061a44..d9048065c8 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -4305,8 +4305,9 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) else { locks &= ~ERTS_PROC_LOCK_STATUS; erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS); - if (erts_smp_atomic32_read_nob(&new_member->state) - & !(ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + if (new_member == BIF_P + || !(erts_smp_atomic32_read_nob(&new_member->state) + & (ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS))) { new_member->group_leader = STORE_NC_IN_PROC(new_member, BIF_ARG_1); } @@ -4326,6 +4327,7 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) BIF_ARG_1); bp->next = new_member->mbuf; new_member->mbuf = bp; + new_member->mbuf_sz += bp->used_size; } } } diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 065018514a..80db4eb6ff 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -161,6 +161,7 @@ bif erts_internal:port_close/1 bif erts_internal:port_connect/2 bif erts_internal:request_system_task/3 +bif erts_internal:request_system_task/4 bif erts_internal:check_process_code/2 bif erts_internal:map_to_tuple_keys/1 @@ -174,6 +175,8 @@ bif erts_internal:is_system_process/1 bif erts_internal:system_check/1 +bif erts_internal:release_literal_area_switch/0 + # inet_db support bif erlang:port_set_data/2 bif erlang:port_get_data/1 @@ -642,8 +645,9 @@ bif erts_debug:map_info/1 # New in 19.0 # -bif erts_internal:copy_literals/2 -bif erts_internal:purge_module/1 +bif erts_internal:is_process_executing_dirty/1 +bif erts_internal:check_dirty_process_code/2 +bif erts_internal:purge_module/2 bif binary:split/2 bif binary:split/3 bif erts_debug:size_shared/1 diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h index 464acd67f6..4a96d971c3 100644 --- a/erts/emulator/beam/big.h +++ b/erts/emulator/beam/big.h @@ -21,17 +21,8 @@ #ifndef __BIG_H__ #define __BIG_H__ -#ifndef __SYS_H__ #include "sys.h" -#endif - -#ifndef __CONFIG_H__ -#include "erl_vm.h" -#endif - -#ifndef __GLOBAL_H__ #include "global.h" -#endif typedef Uint ErtsDigit; diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 227fedfb69..971052b947 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -279,6 +279,7 @@ type TRACER_NIF LONG_LIVED SYSTEM tracer_nif type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls type DIRTY_START STANDARD PROCESSES dirty_start +type DIRTY_SL SHORT_LIVED SYSTEM dirty_short_lived +if threads_no_smp # Need thread safe allocs, but std_alloc and fix_alloc are not; @@ -367,6 +368,8 @@ type MONITOR_LH STANDARD PROCESSES monitor_lh type NLINK_LH STANDARD PROCESSES nlink_lh type CODE LONG_LIVED CODE code type LITERAL LITERAL CODE literal +type LITERAL_REF SHORT_LIVED CODE literal_area_ref +type PURGE_DATA SHORT_LIVED CODE purge_data type DB_HEIR_DATA STANDARD ETS db_heir_data type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 3fb866733c..29ba12dfdb 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -2284,9 +2284,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) { DistEntry *dep; i = 0; - /* Need to be the only thread running... */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); for (dep = erts_visible_dist_entries; dep; dep = dep->next) ++i; for (dep = erts_hidden_dist_entries; dep; dep = dep->next) @@ -2309,8 +2307,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = CONS(hp, tpl, res); hp += 2; } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); BIF_RET(res); } else if (BIF_ARG_1 == am_system_version) { erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0); @@ -2886,6 +2883,27 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(AM_tag); #endif } + else if (ERTS_IS_ATOM_STR("check_process_code",BIF_ARG_1)) { + Eterm terms[3]; + Sint length = 1; + Uint sz = 0; + Eterm *hp, res; + DECL_AM(direct_references); + + terms[0] = AM_direct_references; +#if !defined(ERTS_NEW_PURGE_STRATEGY) + { + DECL_AM(indirect_references); + terms[1] = AM_indirect_references; + terms[2] = am_copy_literals; + length = 3; + } +#endif + erts_bld_list(NULL, &sz, length, terms); + hp = HAlloc(BIF_P, sz); + res = erts_bld_list(&hp, NULL, length, terms); + BIF_RET(res); + } BIF_ERROR(BIF_P, BADARG); } diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index bad34211a5..128a7b3865 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -1094,7 +1094,7 @@ BIF_RETTYPE ets_insert_2(BIF_ALIST_2) CHECK_TABLES(); - /* Write lock table if more than one object to keep atomicy */ + /* Write lock table if more than one object to keep atomicity */ kind = ((is_list(BIF_ARG_2) && CDR(list_val(BIF_ARG_2)) != NIL) ? LCK_WRITE : LCK_WRITE_REC); @@ -1164,7 +1164,7 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2) Eterm lookup_ret; DbTableMethod* meth; - /* More than one object, use LCK_WRITE to keep atomicy */ + /* More than one object, use LCK_WRITE to keep atomicity */ kind = LCK_WRITE; tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind); if (tb == NULL) { diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 74979f984a..5e6fe4f460 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -40,7 +40,7 @@ ** DB_FINE_LOCKED set. The table variable is_thread_safe will then indicate ** if operations need to obtain fine grained locks or not. Some operations ** will for example always use exclusive table lock to guarantee -** a higher level of atomicy. +** a higher level of atomicity. */ /* FIXATION: @@ -2866,15 +2866,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, q->hvalue = hval; q->next = NULL; *bp = b = q; - - { - int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); - int nactive = NACTIVE(tb); - - if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) { - grow(tb, nactive); - } - } + flags |= DB_INC_TRY_GROW; } else { HashDbTerm *q, *next = b->next; @@ -2910,6 +2902,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) HashDbTerm **bp = (HashDbTerm **) handle->bp; HashDbTerm *b = *bp; erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck; + HashDbTerm* free_me = NULL; ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */ @@ -2921,21 +2914,34 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) b->hvalue = INVALID_HASH; } else { *bp = b->next; - free_term(tb, b); + free_me = b; } WUNLOCK_HASH(lck); erts_smp_atomic_dec_nob(&tb->common.nitems); try_shrink(tb); - } else if (handle->flags & DB_MUST_RESIZE) { - db_finalize_resize(handle, offsetof(HashDbTerm,dbterm)); - WUNLOCK_HASH(lck); - - free_term(tb, b); - } - else { - WUNLOCK_HASH(lck); + } else { + if (handle->flags & DB_MUST_RESIZE) { + db_finalize_resize(handle, offsetof(HashDbTerm,dbterm)); + free_me = b; + } + if (handle->flags & DB_INC_TRY_GROW) { + int nactive; + int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); + WUNLOCK_HASH(lck); + nactive = NACTIVE(tb); + + if (nitems > nactive * (CHAIN_LEN + 1) && !IS_FIXED(tb)) { + grow(tb, nactive); + } + } else { + WUNLOCK_HASH(lck); + } } + + if (free_me) + free_term(tb, free_me); + #ifdef DEBUG handle->dbterm = 0; #endif diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index 60f7067d70..4acedbfed0 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -79,6 +79,7 @@ typedef union db_table DbTable; #define DB_MUST_RESIZE 1 #define DB_NEW_OBJECT 2 +#define DB_INC_TRY_GROW 4 /* Info about a database entry while it's being updated * (by update_counter or update_element) diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c index 6ce1376c81..c639ba623f 100644 --- a/erts/emulator/beam/erl_fun.c +++ b/erts/emulator/beam/erl_fun.c @@ -199,14 +199,13 @@ erts_erase_fun_entry(ErlFunEntry* fe) } void -erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end) +erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end) { int limit; HashBucket** bucket; - ErlFunEntry* to_delete = NULL; int i; - erts_fun_write_lock(); + erts_fun_read_lock(); limit = erts_fun_table.size; bucket = erts_fun_table.bucket; for (i = 0; i < limit; i++) { @@ -217,22 +216,51 @@ erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end) BeamInstr* addr = fe->address; if (start <= addr && addr < end) { + fe->pend_purge_address = addr; + ERTS_SMP_WRITE_MEMORY_BARRIER; fe->address = unloaded_fun; - if (erts_refc_dectest(&fe->refc, 0) == 0) { - fe->address = (void *) to_delete; - to_delete = fe; - } + erts_purge_state_add_fun(fe); } b = b->next; } } + erts_fun_read_unlock(); +} + +void +erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no) +{ + Uint ix; - while (to_delete != NULL) { - ErlFunEntry* next = (ErlFunEntry *) to_delete->address; - erts_erase_fun_entry_unlocked(to_delete); - to_delete = next; + for (ix = 0; ix < no; ix++) { + ErlFunEntry *fe = funs[ix]; + if (fe->address == unloaded_fun) + fe->address = fe->pend_purge_address; + fe->pend_purge_address = NULL; } - erts_fun_write_unlock(); +} + +void +erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no) +{ + Uint ix; + + for (ix = 0; ix < no; ix++) + funs[ix]->pend_purge_address = NULL; +} + +void +erts_fun_purge_complete(ErlFunEntry **funs, Uint no) +{ + Uint ix; + + for (ix = 0; ix < no; ix++) { + ErlFunEntry *fe = funs[ix]; + fe->pend_purge_address = NULL; + if (erts_refc_dectest(&fe->refc, 0) == 0) + erts_erase_fun_entry(fe); + } + ERTS_SMP_WRITE_MEMORY_BARRIER; } void @@ -294,6 +322,7 @@ fun_alloc(ErlFunEntry* template) obj->module = template->module; erts_refc_init(&obj->refc, -1); obj->address = unloaded_fun; + obj->pend_purge_address = NULL; #ifdef HIPE obj->native_address = NULL; #endif diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h index 8c4deea7a0..73c3e19c1c 100644 --- a/erts/emulator/beam/erl_fun.h +++ b/erts/emulator/beam/erl_fun.h @@ -44,6 +44,7 @@ typedef struct erl_fun_entry { Eterm module; /* Tagged atom for module. */ erts_refc_t refc; /* Reference count: One for code + one for each fun object in each process. */ + BeamInstr *pend_purge_address; /* address stored during a pending purge */ } ErlFunEntry; /* @@ -81,7 +82,10 @@ ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index, void erts_erase_fun_entry(ErlFunEntry* fe); void erts_cleanup_funs(ErlFunThing* funp); -void erts_cleanup_funs_on_purge(BeamInstr* start, BeamInstr* end); +void erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end); +void erts_fun_purge_abort_prepare(ErlFunEntry **funs, Uint no); +void erts_fun_purge_abort_finalize(ErlFunEntry **funs, Uint no); +void erts_fun_purge_complete(ErlFunEntry **funs, Uint no); void erts_dump_fun_entries(int, void *); #endif diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index d0d74bbf44..8b0dd9a5a6 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -672,6 +672,7 @@ do_major_collection: killed before a GC could be done. */ if (reds == -2) { ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL; + int res; erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); erts_send_exit_signal(p, p->common.id, p, &locks, @@ -683,7 +684,9 @@ do_major_collection: erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); /* We have to make sure that we have space for need on the heap */ - return delay_garbage_collection(p, live_hf_end, need, fcalls); + res = delay_garbage_collection(p, live_hf_end, need, fcalls); + ERTS_MSACC_POP_STATE_M(); + return res; } erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); @@ -882,6 +885,58 @@ erts_garbage_collect_hibernate(Process* p) } +/* + * HiPE native code stack scanning procedures: + * - fullsweep_nstack() + * - gensweep_nstack() + * - offset_nstack() + * - sweep_literals_nstack() + */ +#if defined(HIPE) + +#define GENSWEEP_NSTACK(p,old_htop,n_htop) \ + do { \ + Eterm *tmp_old_htop = old_htop; \ + Eterm *tmp_n_htop = n_htop; \ + gensweep_nstack((p), &tmp_old_htop, &tmp_n_htop); \ + old_htop = tmp_old_htop; \ + n_htop = tmp_n_htop; \ + } while(0) + +/* + * offset_nstack() can ignore the descriptor-based traversal the other + * nstack procedures use and simply call offset_heap_ptr() instead. + * This relies on two facts: + * 1. The only live non-Erlang terms on an nstack are return addresses, + * and they will be skipped thanks to the low/high range check. + * 2. Dead values, even if mistaken for pointers into the low/high area, + * can be offset safely since they won't be dereferenced. + * + * XXX: WARNING: If HiPE starts storing other non-Erlang values on the + * nstack, such as floats, then this will have to be changed. + */ +static ERTS_INLINE void offset_nstack(Process* p, Sint offs, + char* area, Uint area_size) +{ + if (p->hipe.nstack) { + ASSERT(p->hipe.nsp && p->hipe.nstend); + offset_heap_ptr(hipe_nstack_start(p), hipe_nstack_used(p), + offs, area, area_size); + } + else { + ASSERT(!p->hipe.nsp && !p->hipe.nstend); + } +} + +#else /* !HIPE */ + +#define fullsweep_nstack(p,n_htop) (n_htop) +#define GENSWEEP_NSTACK(p,old_htop,n_htop) do{}while(0) +#define offset_nstack(p,offs,area,area_size) do{}while(0) +#define sweep_literals_nstack(p,old_htop,area,area_size) (old_htop) + +#endif /* HIPE */ + void erts_garbage_collect_literals(Process* p, Eterm* literals, Uint byte_lit_size, @@ -944,7 +999,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, area_size = byte_lit_size; n = setup_rootset(p, p->arg_reg, p->arity, &rootset); roots = rootset.roots; - old_htop = p->old_htop; + old_htop = sweep_literals_nstack(p, p->old_htop, area, area_size); while (n--) { Eterm* g_ptr = roots->v; Uint g_sz = roots->sz; @@ -1211,56 +1266,6 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end, return -1; } -/* - * HiPE native code stack scanning procedures: - * - fullsweep_nstack() - * - gensweep_nstack() - * - offset_nstack() - */ -#if defined(HIPE) - -#define GENSWEEP_NSTACK(p,old_htop,n_htop) \ - do { \ - Eterm *tmp_old_htop = old_htop; \ - Eterm *tmp_n_htop = n_htop; \ - gensweep_nstack((p), &tmp_old_htop, &tmp_n_htop); \ - old_htop = tmp_old_htop; \ - n_htop = tmp_n_htop; \ - } while(0) - -/* - * offset_nstack() can ignore the descriptor-based traversal the other - * nstack procedures use and simply call offset_heap_ptr() instead. - * This relies on two facts: - * 1. The only live non-Erlang terms on an nstack are return addresses, - * and they will be skipped thanks to the low/high range check. - * 2. Dead values, even if mistaken for pointers into the low/high area, - * can be offset safely since they won't be dereferenced. - * - * XXX: WARNING: If HiPE starts storing other non-Erlang values on the - * nstack, such as floats, then this will have to be changed. - */ -static ERTS_INLINE void offset_nstack(Process* p, Sint offs, - char* area, Uint area_size) -{ - if (p->hipe.nstack) { - ASSERT(p->hipe.nsp && p->hipe.nstend); - offset_heap_ptr(hipe_nstack_start(p), hipe_nstack_used(p), - offs, area, area_size); - } - else { - ASSERT(!p->hipe.nsp && !p->hipe.nstend); - } -} - -#else /* !HIPE */ - -#define fullsweep_nstack(p,n_htop) (n_htop) -#define GENSWEEP_NSTACK(p,old_htop,n_htop) do{}while(0) -#define offset_nstack(p,offs,area,area_size) do{}while(0) - -#endif /* HIPE */ - static void do_minor(Process *p, ErlHeapFragment *live_hf_end, char *mature, Uint mature_size, diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c index ebeff51aac..f1bef28186 100644 --- a/erts/emulator/beam/erl_hl_timer.c +++ b/erts/emulator/beam/erl_hl_timer.c @@ -735,7 +735,10 @@ proc_timeout_common(Process *proc, void *tmr) if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&proc->common.timer, ERTS_PTMR_TIMEDOUT, (erts_aint_t) tmr)) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + erts_aint32_t state; + erts_smp_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); + state = erts_smp_atomic32_read_acqb(&proc->state); + erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING))) erts_schedule_process(proc, state, 0); return 1; diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index fbdafec4ef..781bf024dd 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -332,8 +332,6 @@ erl_init(int ncpu, int node_tab_delete_delay, ErtsDbSpinCount db_spin_count) { - init_benchmarking(); - erts_bif_unique_init(); erts_init_monitors(); erts_init_time(time_correction, time_warp_mode); @@ -384,6 +382,7 @@ erl_init(int ncpu, erts_init_unicode(); /* after RE to get access to PCRE unicode */ erts_init_external(); erts_init_map(); + erts_beam_bif_load_init(); erts_delay_trap = erts_export_put(am_erlang, am_delay_trap, 2); erts_late_init_process(); #if HAVE_ERTS_MSEG @@ -2250,7 +2249,42 @@ erl_start(int argc, char **argv) otp_ring0_pid = erl_first_process_otp("otp_ring0", NULL, 0, boot_argc, boot_argv); - (void) erl_system_process_otp(otp_ring0_pid, "erts_code_purger"); + { + /* + * The erts_code_purger and the erts_literal_area_collector + * system processes are *always* alive. If they terminate + * they bring the whole VM down. + */ + Eterm pid; + + pid = erl_system_process_otp(otp_ring0_pid, "erts_code_purger"); + erts_code_purger + = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, + internal_pid_index(pid)); + ASSERT(erts_code_purger && erts_code_purger->common.id == pid); + erts_proc_inc_refc(erts_code_purger); + +#ifdef ERTS_NEW_PURGE_STRATEGY + pid = erl_system_process_otp(otp_ring0_pid, "erts_literal_area_collector"); + erts_literal_area_collector + = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, + internal_pid_index(pid)); + ASSERT(erts_literal_area_collector + && erts_literal_area_collector->common.id == pid); + erts_proc_inc_refc(erts_literal_area_collector); +#endif + +#ifdef ERTS_DIRTY_SCHEDULERS + pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker"); + erts_dirty_process_code_checker + = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, + internal_pid_index(pid)); + ASSERT(erts_dirty_process_code_checker + && erts_dirty_process_code_checker->common.id == pid); + erts_proc_inc_refc(erts_dirty_process_code_checker); +#endif + + } #ifdef ERTS_SMP erts_start_schedulers(); @@ -2345,19 +2379,18 @@ erts_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2) { system_cleanup(flush_async); - save_statistics(); - if (erts_mtrace_enabled) erts_mtrace_exit((Uint32) n); + if (fmt != NULL && *fmt != '\0') + erl_error(fmt, args2); /* Print error message. */ + /* Produce an Erlang core dump if error */ if (((n == ERTS_ERROR_EXIT && erts_no_crash_dump == 0) || n == ERTS_DUMP_EXIT) && erts_initialized) { erl_crash_dump_v((char*) NULL, 0, fmt, args1); } - if (fmt != NULL && *fmt != '\0') - erl_error(fmt, args2); /* Print error message. */ sys_tty_reset(n); if (n == ERTS_INTR_EXIT) diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 39c0617143..06266363b5 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -96,6 +96,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "dist_entry", "address" }, { "dist_entry_links", "address" }, { "code_write_permission", NULL }, + { "purge_state", NULL }, { "proc_status", "pid" }, { "proc_trace", "pid" }, { "ports_snapshot", NULL }, @@ -112,6 +113,9 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "export_tab", NULL }, { "fun_tab", NULL }, { "environ", NULL }, +#ifdef ERTS_NEW_PURGE_STRATEGY + { "release_literal_areas", NULL }, +#endif #endif { "efile_drv", "address" }, { "drv_ev_state_grow", NULL, }, diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c index 8efc983f04..979a0040b0 100644 --- a/erts/emulator/beam/erl_map.c +++ b/erts/emulator/beam/erl_map.c @@ -497,18 +497,50 @@ Eterm erts_hashmap_from_array(ErtsHeapFactory* factory, Eterm *leafs, Uint n, return res; } +Eterm erts_map_from_ks_and_vs(ErtsHeapFactory *factory, Eterm *ks0, Eterm *vs0, Uint n) +{ + if (n < MAP_SMALL_MAP_LIMIT) { + Eterm *ks, *vs, *hp; + flatmap_t *mp; + Eterm keys; -Eterm erts_hashmap_from_ks_and_vs_extra(Process *p, Eterm *ks, Eterm *vs, Uint n, + hp = erts_produce_heap(factory, 3 + 1 + (2 * n), 0); + keys = make_tuple(hp); + *hp++ = make_arityval(n); + ks = hp; + hp += n; + mp = (flatmap_t*)hp; + hp += MAP_HEADER_FLATMAP_SZ; + vs = hp; + + mp->thing_word = MAP_HEADER_FLATMAP; + mp->size = n; + mp->keys = keys; + + sys_memcpy(ks, ks0, n * sizeof(Eterm)); + sys_memcpy(vs, vs0, n * sizeof(Eterm)); + + erts_validate_and_sort_flatmap(mp); + + return make_flatmap(mp); + } else { + return erts_hashmap_from_ks_and_vs(factory, ks0, vs0, n); + } + return THE_NON_VALUE; +} + + +Eterm erts_hashmap_from_ks_and_vs_extra(ErtsHeapFactory *factory, + Eterm *ks, Eterm *vs, Uint n, Eterm key, Eterm value) { Uint32 sw, hx; Uint i,sz; hxnode_t *hxns; - ErtsHeapFactory factory; Eterm *hp, res; sz = (key == THE_NON_VALUE) ? n : (n + 1); ASSERT(sz > MAP_SMALL_MAP_LIMIT); - hp = HAlloc(p, 2 * sz); + hp = erts_produce_heap(factory, 2 * sz, 0); /* create tmp hx values and leaf ptrs */ hxns = (hxnode_t *)erts_alloc(ERTS_ALC_T_TMP, sz * sizeof(hxnode_t)); @@ -531,12 +563,9 @@ Eterm erts_hashmap_from_ks_and_vs_extra(Process *p, Eterm *ks, Eterm *vs, Uint n hxns[i].i = i; } - erts_factory_proc_init(&factory, p); - res = hashmap_from_unsorted_array(&factory, hxns, sz, 0); - erts_factory_close(&factory); + res = hashmap_from_unsorted_array(factory, hxns, sz, 0); erts_free(ERTS_ALC_T_TMP, (void *) hxns); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(p); return res; } @@ -1780,11 +1809,14 @@ Eterm erts_maps_put(Process *p, Eterm key, Eterm value, Eterm map) { /* the map will grow */ if (n >= MAP_SMALL_MAP_LIMIT) { + ErtsHeapFactory factory; HRelease(p, shp + MAP_HEADER_FLATMAP_SZ + n, shp); ks = flatmap_get_keys(mp); vs = flatmap_get_values(mp); - res = erts_hashmap_from_ks_and_vs_extra(p,ks,vs,n,key,value); + erts_factory_proc_init(&factory, p); + res = erts_hashmap_from_ks_and_vs_extra(&factory,ks,vs,n,key,value); + erts_factory_close(&factory); return res; } diff --git a/erts/emulator/beam/erl_map.h b/erts/emulator/beam/erl_map.h index 8b5c9582ba..61a841f7f0 100644 --- a/erts/emulator/beam/erl_map.h +++ b/erts/emulator/beam/erl_map.h @@ -98,10 +98,12 @@ Eterm* hashmap_iterator_prev(struct ErtsWStack_* s); int hashmap_key_hash_cmp(Eterm* ap, Eterm* bp); Eterm erts_hashmap_from_array(ErtsHeapFactory*, Eterm *leafs, Uint n, int reject_dupkeys); -#define erts_hashmap_from_ks_and_vs(P, KS, VS, N) \ - erts_hashmap_from_ks_and_vs_extra((P), (KS), (VS), (N), THE_NON_VALUE, THE_NON_VALUE); +#define erts_hashmap_from_ks_and_vs(F, KS, VS, N) \ + erts_hashmap_from_ks_and_vs_extra((F), (KS), (VS), (N), THE_NON_VALUE, THE_NON_VALUE); -Eterm erts_hashmap_from_ks_and_vs_extra(Process *p, Eterm *ks, Eterm *vs, Uint n, +Eterm erts_map_from_ks_and_vs(ErtsHeapFactory *factory, Eterm *ks, Eterm *vs, Uint n); +Eterm erts_hashmap_from_ks_and_vs_extra(ErtsHeapFactory *factory, + Eterm *ks, Eterm *vs, Uint n, Eterm k, Eterm v); const Eterm *erts_maps_get(Eterm key, Eterm map); diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 71ab92937d..91e06cde2d 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -697,9 +697,6 @@ erts_send_message(Process* sender, #ifdef SHCOPY_SEND erts_shcopy_t info; #endif - BM_STOP_TIMER(system); - BM_MESSAGE(message,sender,receiver); - BM_START_TIMER(send); #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; @@ -720,7 +717,6 @@ erts_send_message(Process* sender, #ifdef USE_VM_PROBES Uint dt_utag_size = 0; #endif - BM_SWAP_TIMER(send,size); /* SHCOPY corrupts the heap between * copy_shared_calculate, and @@ -747,8 +743,6 @@ erts_send_message(Process* sender, #else msize = size_object(message); #endif - BM_SWAP_TIMER(size,send); - mp = erts_alloc_message_heap_state(receiver, &receiver_state, receiver_locks, @@ -760,8 +754,6 @@ erts_send_message(Process* sender, &hp, &ohp); - BM_SWAP_TIMER(send,copy); - #ifdef SHCOPY_SEND if (is_not_immed(message)) message = copy_shared_perform(message, msize, &info, &hp, ohp); @@ -792,9 +784,6 @@ erts_send_message(Process* sender, msize, tok_label, tok_lastcnt, tok_serial); } #endif - BM_MESSAGE_COPIED(msize); - BM_SWAP_TIMER(copy,send); - } else { Eterm *hp; @@ -803,22 +792,18 @@ erts_send_message(Process* sender, msize = 0; } else { - BM_SWAP_TIMER(send,size); #ifdef SHCOPY_SEND INITIALIZE_SHCOPY(info); msize = copy_shared_calculate(message, &info); #else msize = size_object(message); #endif - BM_SWAP_TIMER(size,send); - mp = erts_alloc_message_heap_state(receiver, &receiver_state, receiver_locks, msize, &hp, &ohp); - BM_SWAP_TIMER(send,copy); #ifdef SHCOPY_SEND if (is_not_immed(message)) message = copy_shared_perform(message, msize, &info, &hp, ohp); @@ -827,8 +812,6 @@ erts_send_message(Process* sender, if (is_not_immed(message)) message = copy_struct(message, msize, &hp, ohp); #endif - BM_MESSAGE_COPIED(msz); - BM_SWAP_TIMER(copy,send); } #ifdef USE_VM_PROBES DTRACE6(message_send, sender_name, receiver_name, @@ -846,8 +829,6 @@ erts_send_message(Process* sender, mp, message, sender->common.id); - BM_SWAP_TIMER(send,system); - return res; } diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c index 544bc8b983..421445fbad 100644 --- a/erts/emulator/beam/erl_msacc.c +++ b/erts/emulator/beam/erl_msacc.c @@ -40,10 +40,11 @@ #include "erl_bif_unique.h" #include "erl_map.h" #include "erl_msacc.h" +#include "erl_bif_table.h" #if ERTS_ENABLE_MSACC -static Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, Eterm **hpp, Uint *szp); +static Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory); static void erts_msacc_reset(ErtsMsAcc *msacc); static ErtsMsAcc* get_msacc(void); @@ -52,7 +53,9 @@ erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key); #else ErtsMsAcc *ERTS_WRITE_UNLIKELY(erts_msacc) = NULL; #endif +#ifndef ERTS_MSACC_ALWAYS_ON int ERTS_WRITE_UNLIKELY(erts_msacc_enabled); +#endif static Eterm *erts_msacc_state_atoms = NULL; static erts_rwmtx_t msacc_mutex; @@ -62,6 +65,12 @@ static ErtsMsAcc *msacc_unmanaged = NULL; static Uint msacc_unmanaged_count = 0; #endif +#if ERTS_MSACC_STATE_COUNT < MAP_SMALL_MAP_LIMIT +#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + REF_THING_SIZE) +#else +#define DEFAULT_MSACC_MSG_SIZE (3 + ERTS_MSACC_STATE_COUNT * 3 + 3 + REF_THING_SIZE) +#endif + /* we have to split initiation as atoms are not inited in early init */ void erts_msacc_early_init(void) { #ifndef ERTS_MSACC_ALWAYS_ON @@ -88,7 +97,8 @@ void erts_msacc_init(void) { void erts_msacc_init_thread(char *type, int id, int managed) { ErtsMsAcc *msacc; - msacc = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc)); + msacc = erts_alloc(ERTS_ALC_T_MSACC, sizeof(ErtsMsAcc) + + sizeof(ErtsMsAccPerfCntr) * ERTS_MSACC_STATE_COUNT); msacc->type = strdup(type); msacc->id = make_small(id); @@ -122,79 +132,80 @@ void erts_msacc_init_thread(char *type, int id, int managed) { #endif } +#ifdef ERTS_MSACC_EXTENDED_STATES + +void erts_msacc_set_bif_state(ErtsMsAcc *__erts_msacc_cache, Eterm mod, void *fn) { + +#ifdef ERTS_MSACC_EXTENDED_BIFS +#define BIF_LIST(Mod,Func,Arity,FuncAddr,Num) \ + if (fn == &FuncAddr) { \ + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATIC_STATE_COUNT + Num); \ + } else +#include "erl_bif_list.h" +#undef BIF_LIST + { /* The last else in the macro expansion, + this happens for internal bifs, i.e. traps etc */ + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_BIF); + } +#else + if (mod == am_ets) { + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS); + } else { + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_BIF); + } +#endif +} + +#endif + /* * Creates a structure looking like this * #{ type => scheduler, id => 1, counters => #{ State1 => Counter1 ... StateN => CounterN}} */ static -Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, Eterm **hpp, Uint *szp) { +Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory) { + Uint sz = 0; + Eterm *hp, cvs[ERTS_MSACC_STATE_COUNT]; + Eterm key, state_map; int i; - Eterm *hp; - Eterm key, state_key, state_map; - Eterm res = THE_NON_VALUE; - flatmap_t *map; - - if (szp) { - *szp += MAP_HEADER_FLATMAP_SZ + 1 + 2*(3); - *szp += MAP_HEADER_FLATMAP_SZ + 1 + 2*(ERTS_MSACC_STATE_COUNT); - for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) { - (void)erts_bld_sint64(NULL,szp,(Sint64)msacc->perf_counters[i]); + flatmap_t *map; + + hp = erts_produce_heap(factory, 4, 0); + key = TUPLE3(hp,am_counters,am_id,am_type); + + for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) { + cvs[i] = erts_bld_sint64(NULL, &sz,(Sint64)msacc->counters[i].pc); #ifdef ERTS_MSACC_STATE_COUNTERS - (void)erts_bld_uint64(NULL,szp,msacc->state_counters[i]); - *szp += 3; /* tuple to put state+perf counter in */ + erts_bld_uint64(NULL,&sz,msacc->counters[i].sc); + sz += 3; #endif - } } - if (hpp) { - Eterm counters[ERTS_MSACC_STATE_COUNT]; - hp = *hpp; - for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) { - Eterm counter = erts_bld_sint64(&hp,NULL,(Sint64)msacc->perf_counters[i]); + hp = erts_produce_heap(factory, sz, 0); + + for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) { + cvs[i] = erts_bld_sint64(&hp,NULL,(Sint64)msacc->counters[i].pc); #ifdef ERTS_MSACC_STATE_COUNTERS - Eterm counter__ = erts_bld_uint64(&hp,NULL,msacc->state_counters[i]); - counters[i] = TUPLE2(hp,counter,counter__); - hp += 3; -#else - counters[i] = counter; + Eterm counter__ = erts_bld_uint64(&hp,NULL,msacc->counters[i].sc); + cvs[i] = TUPLE2(hp,cvs[i],counter__); + hp += 3; #endif - } - - key = TUPLE3(hp,am_counters,am_id,am_type); - hp += 4; - - state_key = make_tuple(hp); - hp[0] = make_arityval(ERTS_MSACC_STATE_COUNT); - - for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) - hp[1+i] = erts_msacc_state_atoms[i]; - hp += 1 + ERTS_MSACC_STATE_COUNT; - - map = (flatmap_t*)hp; - hp += MAP_HEADER_FLATMAP_SZ; - map->thing_word = MAP_HEADER_FLATMAP; - map->size = ERTS_MSACC_STATE_COUNT; - map->keys = state_key; - for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) - hp[i] = counters[i]; - hp += ERTS_MSACC_STATE_COUNT; - state_map = make_flatmap(map); - - map = (flatmap_t*)hp; - hp += MAP_HEADER_FLATMAP_SZ; - map->thing_word = MAP_HEADER_FLATMAP; - map->size = 3; - map->keys = key; - hp[0] = state_map; - hp[1] = msacc->id; - hp[2] = am_atom_put(msacc->type,strlen(msacc->type)); - hp += 3; - - *hpp = hp; - res = make_flatmap(map); } - return res; + state_map = erts_map_from_ks_and_vs(factory, erts_msacc_state_atoms, cvs, + ERTS_MSACC_STATE_COUNT); + + hp = erts_produce_heap(factory, MAP_HEADER_FLATMAP_SZ + 3, 0); + map = (flatmap_t*)hp; + hp += MAP_HEADER_FLATMAP_SZ; + map->thing_word = MAP_HEADER_FLATMAP; + map->size = 3; + map->keys = key; + hp[0] = state_map; + hp[1] = msacc->id; + hp[2] = am_atom_put(msacc->type,strlen(msacc->type)); + + return make_flatmap(map); } typedef struct { @@ -222,40 +233,31 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); Process *rp = msaccrp->proc; ErtsMessage *msgp = NULL; - Eterm **hpp, *hp; + Eterm *hp; Eterm ref_copy = NIL, msg; - Uint sz, *szp; - ErlOffHeap *ohp = NULL; ErtsProcLocks rp_locks = (esdp && msaccrp->req_sched == esdp->no ? ERTS_PROC_LOCK_MAIN : 0); + ErtsHeapFactory factory; - sz = 0; - hpp = NULL; - szp = &sz; - - if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx); - - while (1) { - if (hpp) - ref_copy = STORE_NC(hpp, ohp, msaccrp->ref); - else - *szp += REF_THING_SIZE; - - if (msaccrp->action != ERTS_MSACC_GATHER) - msg = ref_copy; - else { - msg = erts_msacc_gather_stats(msacc, hpp, szp); - msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg); - } - if (hpp) - break; - - msgp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp); - hpp = &hp; - szp = NULL; - } + if (msaccrp->action == ERTS_MSACC_GATHER) { + + msgp = erts_factory_message_create(&factory, rp, &rp_locks, DEFAULT_MSACC_MSG_SIZE); + + if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx); - if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx); + hp = erts_produce_heap(&factory, REF_THING_SIZE + 3 /* tuple */, 0); + ref_copy = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref); + msg = erts_msacc_gather_stats(msacc, &factory); + msg = TUPLE2(hp, ref_copy, msg); + + if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx); + + erts_factory_close(&factory); + } else { + ErlOffHeap *ohp = NULL; + msgp = erts_alloc_message_heap(rp, &rp_locks, REF_THING_SIZE, &hp, &ohp); + msg = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref); + } erts_queue_message(rp, rp_locks, msgp, msg, am_system); @@ -308,9 +310,9 @@ static void erts_msacc_reset(ErtsMsAcc *msacc) { if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx); for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) { - msacc->perf_counters[i] = 0; + msacc->counters[i].pc = 0; #ifdef ERTS_MSACC_STATE_COUNTERS - msacc->state_counters[i] = 0; + msacc->counters[i].sc = 0; #endif } @@ -415,7 +417,7 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) ErtsSysPerfCounter perf_counter; /* if enabled update stats */ perf_counter = erts_sys_perf_counter(); - unmanaged[i]->perf_counters[unmanaged[i]->state] += + unmanaged[i]->counters[unmanaged[i]->state].pc += perf_counter - unmanaged[i]->perf_counter; unmanaged[i]->perf_counter = perf_counter; } @@ -454,7 +456,7 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) for (msacc = msacc_unmanaged; msacc != NULL; msacc = msacc->next) { erts_mtx_lock(&msacc->mtx); perf_counter = erts_sys_perf_counter(); - msacc->perf_counters[msacc->state] += perf_counter - msacc->perf_counter; + msacc->counters[msacc->state].pc += perf_counter - msacc->perf_counter; msacc->perf_counter = 0; erts_mtx_unlock(&msacc->mtx); } diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h index 284388f7aa..ad7c8c5eee 100644 --- a/erts/emulator/beam/erl_msacc.h +++ b/erts/emulator/beam/erl_msacc.h @@ -35,6 +35,10 @@ this reduces overhead a little bit when profiling */ /* #define ERTS_MSACC_ALWAYS_ON 1 */ +/* Uncomment this to keep individual stats for all + of the bifs when extended states is enabled */ +/* #define ERTS_MSACC_EXTENDED_BIFS 1 */ + #define ERTS_MSACC_DISABLE 0 #define ERTS_MSACC_ENABLE 1 #define ERTS_MSACC_RESET 2 @@ -92,7 +96,13 @@ static char *erts_msacc_states[] = { #define ERTS_MSACC_STATE_SLEEP 13 #define ERTS_MSACC_STATE_TIMERS 14 -#define ERTS_MSACC_STATE_COUNT 15 +#define ERTS_MSACC_STATIC_STATE_COUNT 15 + +#ifdef ERTS_MSACC_EXTENDED_BIFS +#define ERTS_MSACC_STATE_COUNT (ERTS_MSACC_STATIC_STATE_COUNT + BIF_SIZE) +#else +#define ERTS_MSACC_STATE_COUNT ERTS_MSACC_STATIC_STATE_COUNT +#endif #if ERTS_MSACC_STATE_STRINGS static char *erts_msacc_states[] = { @@ -111,22 +121,26 @@ static char *erts_msacc_states[] = { "send", "sleep", "timers" +#ifdef ERTS_MSACC_EXTENDED_BIFS +#define BIF_LIST(Mod,Func,Arity,FuncAddr,Num) \ + ,"bif_" #Mod "_" #Func "_" #Arity +#include "erl_bif_list.h" +#undef BIF_LIST +#endif }; #endif #endif typedef struct erl_msacc_t_ ErtsMsAcc; - -struct erl_msacc_t_ { - - /* the the values below are protected by mtx iff unmanaged = 1 */ - ErtsSysPerfCounter perf_counter; - ErtsSysPerfCounter perf_counters[ERTS_MSACC_STATE_COUNT]; +typedef struct erl_msacc_p_cnt_t_ { + ErtsSysPerfCounter pc; #ifdef ERTS_MSACC_STATE_COUNTERS - Uint64 state_counters[ERTS_MSACC_STATE_COUNT]; + Uint64 sc; #endif - Uint state; +} ErtsMsAccPerfCntr; + +struct erl_msacc_t_ { /* protected by msacc_mutex in erl_msacc.c, and should be constant */ int unmanaged; @@ -135,12 +149,16 @@ struct erl_msacc_t_ { erts_tid_t tid; Eterm id; char *type; + + /* the the values below are protected by mtx iff unmanaged = 1 */ + ErtsSysPerfCounter perf_counter; + Uint state; + ErtsMsAccPerfCntr counters[]; + }; #if ERTS_ENABLE_MSACC -#define ERTS_MSACC_INLINE ERTS_GLB_INLINE - #ifdef USE_THREADS extern erts_tsd_key_t erts_msacc_key; #else @@ -276,20 +294,20 @@ void erts_msacc_init_thread(char *type, int id, int liberty); #define ERTS_MSACC_PUSH_AND_SET_STATE_M(state) \ ERTS_MSACC_PUSH_STATE_M(); ERTS_MSACC_SET_STATE_CACHED_M(state) -ERTS_MSACC_INLINE +ERTS_GLB_INLINE void erts_msacc_set_state_um__(ErtsMsAcc *msacc,Uint state,int increment); -ERTS_MSACC_INLINE +ERTS_GLB_INLINE void erts_msacc_set_state_m__(ErtsMsAcc *msacc,Uint state,int increment); -ERTS_MSACC_INLINE +ERTS_GLB_INLINE Uint erts_msacc_get_state_um__(ErtsMsAcc *msacc); -ERTS_MSACC_INLINE +ERTS_GLB_INLINE Uint erts_msacc_get_state_m__(ErtsMsAcc *msacc); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -ERTS_MSACC_INLINE +ERTS_GLB_INLINE Uint erts_msacc_get_state_um__(ErtsMsAcc *msacc) { Uint state; if (msacc->unmanaged) @@ -300,12 +318,12 @@ Uint erts_msacc_get_state_um__(ErtsMsAcc *msacc) { return state; } -ERTS_MSACC_INLINE +ERTS_GLB_INLINE Uint erts_msacc_get_state_m__(ErtsMsAcc *msacc) { return msacc->state; } -ERTS_MSACC_INLINE +ERTS_GLB_INLINE void erts_msacc_set_state_um__(ErtsMsAcc *msacc, Uint new_state, int increment) { if (ERTS_UNLIKELY(msacc->unmanaged)) { erts_mtx_lock(&msacc->mtx); @@ -322,7 +340,7 @@ void erts_msacc_set_state_um__(ErtsMsAcc *msacc, Uint new_state, int increment) erts_mtx_unlock(&msacc->mtx); } -ERTS_MSACC_INLINE +ERTS_GLB_INLINE void erts_msacc_set_state_m__(ErtsMsAcc *msacc, Uint new_state, int increment) { ErtsSysPerfCounter prev_perf_counter; Sint64 diff; @@ -334,9 +352,9 @@ void erts_msacc_set_state_m__(ErtsMsAcc *msacc, Uint new_state, int increment) { msacc->perf_counter = erts_sys_perf_counter(); diff = msacc->perf_counter - prev_perf_counter; ASSERT(diff >= 0); - msacc->perf_counters[msacc->state] += diff; + msacc->counters[msacc->state].pc += diff; #ifdef ERTS_MSACC_STATE_COUNTERS - msacc->state_counters[new_state] += increment; + msacc->counters[new_state].sc += increment; #endif msacc->state = new_state; } @@ -364,7 +382,7 @@ void erts_msacc_set_state_m__(ErtsMsAcc *msacc, Uint new_state, int increment) { #define ERTS_MSACC_SET_STATE_CACHED_M(state) #define ERTS_MSACC_POP_STATE_M() #define ERTS_MSACC_PUSH_AND_SET_STATE_M(state) - +#define ERTS_MSACC_SET_BIF_STATE_CACHED_X(Mod,Addr) #endif /* ERTS_ENABLE_MSACC */ @@ -385,9 +403,13 @@ void erts_msacc_set_state_m__(ErtsMsAcc *msacc, Uint new_state, int increment) { #define ERTS_MSACC_SET_STATE_CACHED_M_X(state) #define ERTS_MSACC_POP_STATE_M_X() #define ERTS_MSACC_PUSH_AND_SET_STATE_M_X(state) +#define ERTS_MSACC_PUSH_AND_SET_STATE_CACHED_M_X(state) +#define ERTS_MSACC_SET_BIF_STATE_CACHED_X(Mod,Addr) #else +void erts_msacc_set_bif_state(ErtsMsAcc *msacc, Eterm mod, void *addr); + #define ERTS_MSACC_PUSH_STATE_X() ERTS_MSACC_PUSH_STATE() #define ERTS_MSACC_POP_STATE_X() ERTS_MSACC_POP_STATE() #define ERTS_MSACC_SET_STATE_X(state) ERTS_MSACC_SET_STATE(state) @@ -403,6 +425,9 @@ void erts_msacc_set_state_m__(ErtsMsAcc *msacc, Uint new_state, int increment) { #define ERTS_MSACC_SET_STATE_CACHED_M_X(state) ERTS_MSACC_SET_STATE_CACHED_M(state) #define ERTS_MSACC_POP_STATE_M_X() ERTS_MSACC_POP_STATE_M() #define ERTS_MSACC_PUSH_AND_SET_STATE_M_X(state) ERTS_MSACC_PUSH_AND_SET_STATE_M(state) +#define ERTS_MSACC_SET_BIF_STATE_CACHED_X(Mod,Addr) \ + if (ERTS_MSACC_IS_ENABLED_CACHED_X()) \ + erts_msacc_set_bif_state(__erts_msacc_cache, Mod, Addr) #endif /* !ERTS_MSACC_EXTENDED_STATES */ diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index 23931f0e54..ef2fb93106 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -120,8 +120,10 @@ execution_state(ErlNifEnv *env, Process **c_pp, int *schedp) else { Process *c_p = env->proc; - if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) - ASSERT(is_scheduler() > 0); + if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) { + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + & ERTS_PROC_LOCK_MAIN); + } else { c_p = env->proc->next; ASSERT(is_scheduler() < 0); @@ -208,11 +210,16 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, #endif } +static void full_cache_env(ErlNifEnv *env); +static void cache_env(ErlNifEnv* env); +static void full_flush_env(ErlNifEnv *env); +static void flush_env(ErlNifEnv* env); + +#ifdef ERTS_DIRTY_SCHEDULERS void erts_pre_dirty_nif(ErtsSchedulerData *esdp, - ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, - Process* tracee) + ErlNifEnv* env, Process* p, + struct erl_module_nif* mod_nif) { -#ifdef ERTS_DIRTY_SCHEDULERS Process *sproc; #ifdef DEBUG erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); @@ -223,7 +230,7 @@ void erts_pre_dirty_nif(ErtsSchedulerData *esdp, ASSERT(esdp); #endif - erts_pre_nif(env, p, mod_nif, tracee); + erts_pre_nif(env, p, mod_nif, NULL); sproc = esdp->dirty_shadow_process; ASSERT(sproc); @@ -235,22 +242,10 @@ void erts_pre_dirty_nif(ErtsSchedulerData *esdp, sproc->next = p; sproc->common.id = p->common.id; - sproc->htop = p->htop; - sproc->stop = p->stop; - sproc->hend = p->hend; - sproc->heap = p->heap; - sproc->abandoned_heap = p->abandoned_heap; - sproc->heap_sz = p->heap_sz; - sproc->high_water = p->high_water; - sproc->old_hend = p->old_hend; - sproc->old_htop = p->old_htop; - sproc->old_heap = p->old_heap; - sproc->mbuf = NULL; - sproc->mbuf_sz = 0; - ERTS_INIT_OFF_HEAP(&sproc->off_heap); env->proc = sproc; -#endif + full_cache_env(env); } +#endif /* Temporary object header, auto-deallocated when NIF returns * or when independent environment is cleared. @@ -274,32 +269,37 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env) void erts_post_nif(ErlNifEnv* env) { erts_unblock_fpe(env->fpe_was_unmasked); + full_flush_env(env); + free_tmp_objs(env); + env->exiting = ERTS_PROC_IS_EXITING(env->proc); +} #ifdef ERTS_DIRTY_SCHEDULERS - if (!(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) +void erts_post_dirty_nif(ErlNifEnv* env) +{ + Process *c_p; + ASSERT(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); + ASSERT(env->proc->next); + erts_unblock_fpe(env->fpe_was_unmasked); + full_flush_env(env); + free_tmp_objs(env); + c_p = env->proc->next; + env->exiting = ERTS_PROC_IS_EXITING(c_p); + ERTS_VBUMP_ALL_REDS(c_p); +} #endif - { - ASSERT(is_scheduler() > 0); - if (env->heap_frag == NULL) { - ASSERT(env->hp_end == HEAP_LIMIT(env->proc)); - ASSERT(env->hp >= HEAP_TOP(env->proc)); - ASSERT(env->hp <= HEAP_LIMIT(env->proc)); - HEAP_TOP(env->proc) = env->hp; - } - else { - ASSERT(env->hp_end != HEAP_LIMIT(env->proc)); - ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size); - env->heap_frag->used_size = env->hp - env->heap_frag->mem; - ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size); - } - env->exiting = ERTS_PROC_IS_EXITING(env->proc); - } + +static void full_flush_env(ErlNifEnv* env) +{ #ifdef ERTS_DIRTY_SCHEDULERS - else { /* Dirty nif call using shadow process struct */ + if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { + /* Dirty nif call using shadow process struct */ Process *c_p = env->proc->next; ASSERT(is_scheduler() < 0); ASSERT(env->proc->common.id == c_p->common.id); + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + & ERTS_PROC_LOCK_MAIN); if (!env->heap_frag) { ASSERT(env->hp_end == HEAP_LIMIT(c_p)); @@ -339,11 +339,48 @@ void erts_post_nif(ErlNifEnv* env) } c_p->off_heap.overhead += env->proc->off_heap.overhead; - env->exiting = ERTS_PROC_IS_EXITING(c_p); - BUMP_ALL_REDS(c_p); + return; } #endif - free_tmp_objs(env); + + flush_env(env); +} + +static void full_cache_env(ErlNifEnv* env) +{ +#ifdef ERTS_DIRTY_SCHEDULERS + if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { + /* Dirty nif call using shadow process struct */ + Process *sproc = env->proc; + Process *c_p = sproc->next; + ASSERT(c_p); + ASSERT(is_scheduler() < 0); + ASSERT(env->proc->common.id == c_p->common.id); + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + & ERTS_PROC_LOCK_MAIN); + + sproc->htop = c_p->htop; + sproc->stop = c_p->stop; + sproc->hend = c_p->hend; + sproc->heap = c_p->heap; + sproc->abandoned_heap = c_p->abandoned_heap; + sproc->heap_sz = c_p->heap_sz; + sproc->high_water = c_p->high_water; + sproc->old_hend = c_p->old_hend; + sproc->old_htop = c_p->old_htop; + sproc->old_heap = c_p->old_heap; + sproc->mbuf = NULL; + sproc->mbuf_sz = 0; + ERTS_INIT_OFF_HEAP(&sproc->off_heap); + + env->hp_end = HEAP_LIMIT(c_p); + env->hp = HEAP_TOP(c_p); + env->heap_frag = NULL; + return; + } +#endif + + cache_env(env); } /* Flush out our cached heap pointers to allow an ordinary HAlloc @@ -600,17 +637,32 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, if (scheduler > 0) { /* Normal scheduler */ rp = erts_proc_lookup(receiver); - if (c_p == rp) - rp_locks = ERTS_PROC_LOCK_MAIN; + if (!rp) + return 0; } else { - if (c_p && ERTS_PROC_IS_EXITING(c_p)) - return 0; - rp = erts_pid2proc_opt(c_p, 0, receiver, rp_locks, + if (c_p) { + ASSERT(scheduler < 0); /* Dirty scheduler */ + if (ERTS_PROC_IS_EXITING(c_p)) + return 0; + + if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + } + } + + rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, + receiver, rp_locks, ERTS_P2P_FLG_INC_REFC); + if (!rp) { + if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + return 0; + } } - if (rp == NULL) - return 0; + + if (c_p == rp) + rp_locks = ERTS_PROC_LOCK_MAIN; if (menv) { flush_env(msg_env); @@ -631,9 +683,9 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ErlOffHeap *ohp; Eterm *hp; if (env && !env->tracee) { - flush_env(env); + full_flush_env(env); mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp); - cache_env(env); + full_cache_env(env); } else { erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state); @@ -656,8 +708,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, if (!env || !env->tracee) { - if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) + if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND)) { + full_flush_env(env); trace_send(c_p, receiver, msg); + full_cache_env(env); + } } #ifdef ERTS_SMP else { @@ -690,10 +745,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { if (!msgq) { -#ifdef ERTS_SMP - ErtsThrPrgrDelayHandle dhndl; -#endif - msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE, sizeof(ErlTraceMessageQueue)); msgq->receiver = receiver; @@ -707,15 +758,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); -#ifdef ERTS_SMP - if (!scheduler) - dhndl = erts_thr_progress_unmanaged_delay(); -#endif - erts_schedule_flush_trace_messages(t_p->common.id); -#ifdef ERTS_SMP - if (!scheduler) - erts_thr_progress_unmanaged_continue(dhndl); -#endif + erts_schedule_flush_trace_messages(t_p, 0); } else { msgq->len++; *msgq->last = mp; @@ -740,6 +783,8 @@ done: rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks & ~lc_locks) erts_smp_proc_unlock(rp, rp_locks & ~lc_locks); + if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); #endif if (scheduler <= 0) erts_proc_dec_refc(rp); @@ -3502,6 +3547,7 @@ Eterm erts_nif_call_function(Process *p, Process *tracee, struct enif_environment_t env; ErlHeapFragment *orig_hf = MBUF(p); ErlOffHeap orig_oh = MSO(p); + Eterm *orig_htop = HEAP_TOP(p); ASSERT(is_internal_pid(p->common.id)); MBUF(p) = NULL; clear_offheap(&MSO(p)); @@ -3523,6 +3569,7 @@ Eterm erts_nif_call_function(Process *p, Process *tracee, /* restore original heap fragment list */ MBUF(p) = orig_hf; MSO(p) = orig_oh; + HEAP_TOP(p) = orig_htop; } else { /* Nif call was done without a process context, so we create a phony one. */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 66f22979ad..c2735b17e1 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -448,6 +448,9 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP; typedef enum { ERTS_PSTT_GC, /* Garbage Collect */ ERTS_PSTT_CPC, /* Check Process Code */ +#ifdef ERTS_NEW_PURGE_STRATEGY + ERTS_PSTT_CLA, /* Copy Literal Area */ +#endif ERTS_PSTT_COHMQ, /* Change off heap message queue */ ERTS_PSTT_FTMQ /* Flush trace msg queue */ } ErtsProcSysTaskType; @@ -6671,14 +6674,24 @@ erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks) } static int -schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st) +schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, + erts_aint32_t *fail_state_p) { int res; int locked; ErtsProcSysTaskQs *stqs, *free_stqs; - erts_aint32_t state, a, n, enq_prio; + erts_aint32_t fail_state, state, a, n, enq_prio; int enqueue; /* < 0 -> use proxy */ unsigned int prof_runnable_procs; + int strict_fail_state; + + fail_state = *fail_state_p; + /* + * If fail state something other than just exiting process, + * ensure that the task wont be scheduled when the + * receiver is in the failure state. + */ + strict_fail_state = fail_state != ERTS_PSFLG_EXITING; res = 1; /* prepare for success */ st->next = st->prev = st; /* Prep for empty prio queue */ @@ -6705,7 +6718,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st) erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); state = erts_smp_atomic32_read_nob(&p->state); - if (state & ERTS_PSFLG_EXITING) { + if (state & fail_state) { + *fail_state_p = (state & fail_state); free_stqs = stqs; res = 0; goto cleanup; @@ -6759,7 +6773,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st) /* Status lock prevents out of order "runnable proc" trace msgs */ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - if (!prof_runnable_procs) { + if (!prof_runnable_procs && !strict_fail_state) { erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); locked = 0; } @@ -6770,6 +6784,11 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st) erts_aint32_t e; n = e = a; + if (strict_fail_state && (a & fail_state)) { + *fail_state_p = (a & fail_state); + goto cleanup; + } + if (a & ERTS_PSFLG_FREE) goto cleanup; /* We don't want to schedule free processes... */ @@ -8177,6 +8196,8 @@ sched_dirty_cpu_thread_func(void *vesdp) esdp->thr_id += erts_no_schedulers; + erts_msacc_init_thread("dirty_cpu_scheduler", no, 0); + erts_thr_progress_register_unmanaged_thread(&callbacks); #ifdef ERTS_ENABLE_LOCK_CHECK { @@ -8222,6 +8243,8 @@ sched_dirty_io_thread_func(void *vesdp) esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers; + erts_msacc_init_thread("dirty_io_scheduler", no, 0); + erts_thr_progress_register_unmanaged_thread(&callbacks); #ifdef ERTS_ENABLE_LOCK_CHECK { @@ -9088,6 +9111,27 @@ resume_process_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); } +BIF_RETTYPE +erts_internal_is_process_executing_dirty_1(BIF_ALIST_1) +{ + if (is_not_internal_pid(BIF_ARG_1)) + BIF_ERROR(BIF_P, BADARG); +#ifdef ERTS_DIRTY_SCHEDULERS + else { + Process *rp = erts_proc_lookup(BIF_ARG_1); + if (rp) { + erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + if (state & (ERTS_PSFLG_DIRTY_RUNNING + |ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + BIF_RET(am_true); + } + } + } +#endif + BIF_RET(am_false); +} + + Uint erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched) { @@ -9494,15 +9538,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) p->reds += actual_reds; -#ifdef ERTS_SMP - erts_smp_proc_lock(p, ERTS_PROC_LOCK_TRACE); - if (p->trace_msg_q) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_TRACE); - erts_schedule_flush_trace_messages(p->common.id); - } else - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_TRACE); -#endif - state = erts_smp_atomic32_read_nob(&p->state); if (IS_TRACED(p)) { @@ -9522,7 +9557,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + +#ifdef ERTS_SMP + if (p->trace_msg_q) { + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_schedule_flush_trace_messages(p, 1); + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + } +#endif /* have to re-read state after taking lock */ state = erts_smp_atomic32_read_nob(&p->state); @@ -9530,9 +9573,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) #ifdef ERTS_SMP if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT)) erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN + | ERTS_PROC_LOCK_TRACE | ERTS_PROC_LOCK_STATUS)); if (p->pending_suspenders) handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN + | ERTS_PROC_LOCK_TRACE | ERTS_PROC_LOCK_STATUS)); #endif @@ -9552,7 +9597,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) p->scheduler_data = NULL; #endif - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); + erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN + | ERTS_PROC_LOCK_STATUS + | ERTS_PROC_LOCK_TRACE)); ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER); @@ -9588,8 +9635,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) erts_smp_runq_lock(rq); } } - BM_STOP_TIMER(system); - } ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking()); @@ -9825,10 +9870,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) goto check_activities_to_run; } - ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR); - - BM_START_TIMER(system); - /* * Take the chosen process out of the queue. */ @@ -9934,6 +9975,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR); + #ifdef ERTS_SMP if (flags & ERTS_RUNQ_FLG_PROTECTED) @@ -9984,6 +10027,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (state & (ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_PENDING_EXIT | ERTS_PSFLG_EXITING)) { + /* + * IMPORTANT! We need to take care of + * scheduled check-process-code requests + * before continuing with dirty execution! + */ /* Migrate to normal scheduler... */ goto sunlock_sched_out_proc; } @@ -10393,6 +10441,27 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) } break; } +#ifdef ERTS_NEW_PURGE_STRATEGY + case ERTS_PSTT_CLA: { + int fcalls; + int cla_reds = 0; + if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) + fcalls = reds; + else + fcalls = reds - CONTEXT_REDS; + st_res = erts_proc_copy_literal_area(c_p, + &cla_reds, + fcalls, + st->arg[0] == am_true); + reds -= cla_reds; + if (is_non_value(st_res)) { + /* Needed gc, but gc was disabled */ + save_gc_task(c_p, st, st_prio); + st = NULL; + } + break; + } +#endif case ERTS_PSTT_COHMQ: reds -= erts_complete_off_heap_message_queue_change(c_p); st_res = am_true; @@ -10443,14 +10512,15 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) switch (st->type) { case ERTS_PSTT_GC: - st_res = am_false; - break; case ERTS_PSTT_CPC: - st_res = am_false; - break; case ERTS_PSTT_COHMQ: st_res = am_false; break; +#ifdef ERTS_NEW_PURGE_STRATEGY + case ERTS_PSTT_CLA: + st_res = am_ok; + break; +#endif #ifdef ERTS_SMP case ERTS_PSTT_FTMQ: reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN); @@ -10471,22 +10541,83 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) return reds; } -BIF_RETTYPE -erts_internal_request_system_task_3(BIF_ALIST_3) +#ifdef ERTS_DIRTY_SCHEDULERS + +static BIF_RETTYPE +dispatch_system_task(Process *c_p, erts_aint_t fail_state, + ErtsProcSysTask *st, Eterm target, + Eterm prio, Eterm operation) { - Process *rp = erts_proc_lookup(BIF_ARG_1); + Process *rp; + ErtsProcLocks rp_locks = 0; + ErlOffHeap *ohp; + ErtsMessage *mp; + Eterm *hp, msg; + Uint hsz, osz; + BIF_RETTYPE ret; + + switch (st->type) { + case ERTS_PSTT_CPC: + rp = erts_dirty_process_code_checker; + ASSERT(fail_state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)); + if (c_p == rp) { + ERTS_BIF_PREP_RET(ret, am_dirty_execution); + return ret; + } + break; + default: + rp = NULL; + ERTS_INTERNAL_ERROR("Non-dispatchable system task"); + break; + } + + ERTS_BIF_PREP_RET(ret, am_ok); + + /* + * Send message on the form: {Requester, Target, Operation} + */ + + ASSERT(is_immed(st->requester)); + ASSERT(is_immed(target)); + ASSERT(is_immed(prio)); + + osz = size_object(operation); + hsz = 5 /* 4-tuple */ + osz; + + mp = erts_alloc_message_heap(rp, &rp_locks, hsz, &hp, &ohp); + + msg = copy_struct(operation, osz, &hp, ohp); + msg = TUPLE4(hp, st->requester, target, prio, msg); + + erts_queue_message(rp, rp_locks, mp, msg, st->requester); + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + + return ret; +} + +#endif + +static BIF_RETTYPE +request_system_task(Process *c_p, Eterm requester, Eterm target, + Eterm priority, Eterm operation) +{ + BIF_RETTYPE ret; + Process *rp = erts_proc_lookup(target); ErtsProcSysTask *st = NULL; - erts_aint32_t prio; + erts_aint32_t prio, fail_state = ERTS_PSFLG_EXITING; Eterm noproc_res, req_type; - if (!rp && !is_internal_pid(BIF_ARG_1)) { - if (!is_external_pid(BIF_ARG_1)) + if (!rp && !is_internal_pid(target)) { + if (!is_external_pid(target)) goto badarg; - if (external_pid_dist_entry(BIF_ARG_1) != erts_this_dist_entry) + if (external_pid_dist_entry(target) != erts_this_dist_entry) goto badarg; } - switch (BIF_ARG_2) { + switch (priority) { case am_max: prio = PRIORITY_MAX; break; case am_high: prio = PRIORITY_HIGH; break; case am_normal: prio = PRIORITY_NORMAL; break; @@ -10494,11 +10625,11 @@ erts_internal_request_system_task_3(BIF_ALIST_3) default: goto badarg; } - if (is_not_tuple(BIF_ARG_3)) + if (is_not_tuple(operation)) goto badarg; else { int i; - Eterm *tp = tuple_val(BIF_ARG_3); + Eterm *tp = tuple_val(operation); Uint arity = arityval(*tp); Eterm req_id; Uint req_id_sz; @@ -10536,7 +10667,7 @@ erts_internal_request_system_task_3(BIF_ALIST_3) ERTS_INIT_OFF_HEAP(&st->off_heap); hp = &st->heap[0]; - st->requester = BIF_P->common.id; + st->requester = requester; st->reply_tag = req_type; st->req_id_sz = req_id_sz; st->req_id = req_id_sz == 0 ? req_id : copy_struct(req_id, @@ -10570,26 +10701,85 @@ erts_internal_request_system_task_3(BIF_ALIST_3) st->type = ERTS_PSTT_CPC; if (!rp) goto noproc; +#ifdef ERTS_DIRTY_SCHEDULERS + /* + * If the process should start executing dirty + * code it is important that this task is + * aborted. Therefore this strict fail state... + */ + fail_state |= (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS); +#endif break; +#ifdef ERTS_NEW_PURGE_STRATEGY + case am_copy_literals: + if (st->arg[0] != am_true && st->arg[0] != am_false) + goto badarg; + st->type = ERTS_PSTT_CLA; + noproc_res = am_ok; + if (!rp) + goto noproc; + break; +#endif + default: goto badarg; } - if (!schedule_process_sys_task(rp, prio, st)) { - noproc: - notify_sys_task_executed(BIF_P, st, noproc_res); + if (!schedule_process_sys_task(rp, prio, st, &fail_state)) { + Eterm failure; + if (fail_state & ERTS_PSFLG_EXITING) { + noproc: + failure = noproc_res; + } +#ifdef ERTS_DIRTY_SCHEDULERS + else if (fail_state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + ret = dispatch_system_task(c_p, fail_state, st, + target, priority, operation); + goto cleanup_return; + } +#endif + else { + ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()"); + failure = am_internal_error; + } + notify_sys_task_executed(c_p, st, failure); } - BIF_RET(am_ok); + ERTS_BIF_PREP_RET(ret, am_ok); + + return ret; badarg: + ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); + +#ifdef ERTS_DIRTY_SCHEDULERS +cleanup_return: +#endif + if (st) { erts_cleanup_offheap(&st->off_heap); erts_free(ERTS_ALC_T_PROC_SYS_TSK, st); } - BIF_ERROR(BIF_P, BADARG); + + return ret; +} + +BIF_RETTYPE +erts_internal_request_system_task_3(BIF_ALIST_3) +{ + return request_system_task(BIF_P, BIF_P->common.id, + BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); +} + +BIF_RETTYPE +erts_internal_request_system_task_4(BIF_ALIST_4) +{ + return request_system_task(BIF_P, BIF_ARG_1, + BIF_ARG_2, BIF_ARG_3, BIF_ARG_4); } static void @@ -10598,7 +10788,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type) Process *rp = erts_proc_lookup(pid); if (rp) { ErtsProcSysTask *st; - erts_aint32_t state; + erts_aint32_t state, fail_state; int i; st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK, @@ -10613,21 +10803,103 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type) ERTS_INIT_OFF_HEAP(&st->off_heap); state = erts_smp_atomic32_read_nob(&rp->state); - if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state), st)) + fail_state = ERTS_PSFLG_EXITING; + + if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state), + st, &fail_state)) erts_free(ERTS_ALC_T_PROC_SYS_TSK, st); } } + void erts_schedule_complete_off_heap_message_queue_change(Eterm pid) { erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ); } +#ifdef ERTS_DIRTY_SCHEDULERS + +static void +flush_dirty_trace_messages(void *vpid) +{ + Process *proc; + Eterm pid; +#ifdef ARCH_64 + pid = (Eterm) vpid; +#else + pid = *((Eterm *) vpid); + erts_free(ERTS_ALC_T_DIRTY_SL, vpid); +#endif + + proc = erts_proc_lookup(pid); + if (proc) + (void) erts_flush_trace_messages(proc, 0); +} + +#endif /* ERTS_DIRTY_SCHEDULERS */ + void -erts_schedule_flush_trace_messages(Eterm pid) +erts_schedule_flush_trace_messages(Process *proc, int force_on_proc) { +#ifdef ERTS_SMP + ErtsThrPrgrDelayHandle dhndl; +#endif + Eterm pid = proc->common.id; + +#ifdef ERTS_DIRTY_SCHEDULERS + erts_aint32_t state; + + if (!force_on_proc) { + state = erts_smp_atomic32_read_nob(&proc->state); + if (state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + goto sched_flush_dirty; + } + } +#endif + +#ifdef ERTS_SMP + dhndl = erts_thr_progress_unmanaged_delay(); +#endif + erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ); + +#ifdef ERTS_SMP + erts_thr_progress_unmanaged_continue(dhndl); +#endif + +#ifdef ERTS_DIRTY_SCHEDULERS + if (!force_on_proc) { + state = erts_smp_atomic32_read_mb(&proc->state); + if (state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + void *vargp; + + sched_flush_dirty: + /* + * We traced 'proc' from another thread than + * it is executing on, and it is executing + * on a dirty scheduler. It might take a + * significant amount of time before it is + * scheduled out (where it gets opportunity + * to flush messages). We therefore schedule + * the flush on the first ordinary scheduler. + */ + +#ifdef ARCH_64 + vargp = (void *) pid; +#else + { + Eterm *argp = erts_alloc(ERTS_ALC_T_DIRTY_SL, sizeof(Eterm)); + *argp = pid; + vargp = (void *) argp; + } +#endif + erts_schedule_misc_aux_work(1, flush_dirty_trace_messages, vargp); + } + } +#endif } static void @@ -11154,18 +11426,11 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). || (erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); -#ifdef BM_COUNTERS - processes_busy++; -#endif - BM_COUNT(processes_spawned); - - BM_SWAP_TIMER(system,size); #ifdef SHCOPY_SPAWN arg_size = copy_shared_calculate(args, &info); #else arg_size = size_object(args); #endif - BM_SWAP_TIMER(size,system); heap_need = arg_size; p->flags = flags; @@ -11242,18 +11507,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->max_arg_reg = sizeof(p->def_arg_reg)/sizeof(p->def_arg_reg[0]); p->arg_reg[0] = mod; p->arg_reg[1] = func; - BM_STOP_TIMER(system); - BM_MESSAGE(args,p,parent); - BM_START_TIMER(system); - BM_SWAP_TIMER(system,copy); #ifdef SHCOPY_SPAWN p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap); DESTROY_SHCOPY(info); #else p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap); #endif - BM_MESSAGE_COPIED(arg_size); - BM_SWAP_TIMER(copy,system); p->arity = 3; p->fvalue = NIL; @@ -12289,7 +12548,6 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) erts_port_demonitor(pcontext->p, ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED, prt, mon->ref, NULL); - return; /* let erts_port_demonitor do the deletion */ } else { /* remote by pid */ ASSERT(is_external_pid(mon->pid)); dep = external_pid_dist_entry(mon->pid); @@ -12826,9 +13084,6 @@ erts_continue_exit_process(Process *p) dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL; erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); -#ifdef BM_COUNTERS - processes_busy--; -#endif if (dep) { erts_do_net_exits(dep, reason); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 7c98b60647..f1788c3f71 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -989,8 +989,7 @@ struct process { Uint32 rcount; /* suspend count */ int schedule_count; /* Times left to reschedule a low prio process */ Uint reds; /* No of reductions for this process */ - Eterm group_leader; /* Pid in charge - (can be boxed) */ + Eterm group_leader; /* Pid in charge (can be boxed) */ Uint flags; /* Trap exit, etc (no trace flags anymore) */ Eterm fvalue; /* Exit & Throw value (failure reason) */ Uint freason; /* Reason for detected failure */ @@ -1768,7 +1767,7 @@ void erts_schedule_thr_prgr_later_cleanup_op(void (*)(void *), ErtsThrPrgrLaterOp *, UWord); void erts_schedule_complete_off_heap_message_queue_change(Eterm pid); -void erts_schedule_flush_trace_messages(Eterm pid); +void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc); int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks); #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c index 21938e7684..26d71f573f 100644 --- a/erts/emulator/beam/erl_thr_progress.c +++ b/erts/emulator/beam/erl_thr_progress.c @@ -969,8 +969,10 @@ erts_thr_progress_unmanaged_continue__(ErtsThrPrgrDelayHandle handle) #ifdef ERTS_ENABLE_LOCK_CHECK ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL); ERTS_LC_ASSERT(tpd && tpd->is_delaying); - tpd->is_delaying = 0; - return_tmp_thr_prgr_data(tpd); + tpd->is_delaying--; + ASSERT(tpd->is_delaying >= 0); + if (!tpd->is_delaying) + return_tmp_thr_prgr_data(tpd); #endif ASSERT(!erts_thr_progress_is_managed_thread()); @@ -995,7 +997,7 @@ erts_thr_progress_unmanaged_delay__(void) #ifdef ERTS_ENABLE_LOCK_CHECK { ErtsThrPrgrData *tpd = tmp_thr_prgr_data(NULL); - tpd->is_delaying = 1; + tpd->is_delaying++; } #endif return (ErtsThrPrgrDelayHandle) umrefc_ix; diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index 4cf38bf894..f4d92564c1 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -493,8 +493,8 @@ erts_get_system_seq_tracer(void) if (st != erts_tracer_nil && call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED, am_trace_status, am_undefined) == am_remove) { - erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil); - st = erts_tracer_nil; + st = erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil); + ERTS_TRACER_CLEAR(&st); } return st; @@ -813,6 +813,9 @@ trace_send(Process *p, Eterm to, Eterm msg) ErtsTracerNif *tnif = NULL; ErtsTracingEvent* te; Eterm pam_result; +#ifdef ERTS_SMP + ErtsThrPrgrDelayHandle dhndl; +#endif ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND)); @@ -837,6 +840,10 @@ trace_send(Process *p, Eterm to, Eterm msg) } else pam_result = am_true; +#ifdef ERTS_SMP + dhndl = erts_thr_progress_unmanaged_delay(); +#endif + if (is_internal_pid(to)) { if (!erts_proc_lookup(to)) goto send_to_non_existing_process; @@ -852,6 +859,11 @@ trace_send(Process *p, Eterm to, Eterm msg) send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SEND, operation, msg, to, pam_result); } + +#ifdef ERTS_SMP + erts_thr_progress_unmanaged_continue(dhndl); +#endif + erts_match_set_release_result_trace(p, pam_result); } @@ -1167,6 +1179,8 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, Eterm transformed_args[MAX_ARG]; ErtsTracer pre_ms_tracer = erts_tracer_nil; + ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN); + ASSERT(tracer); if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) { /* Breakpoint trace enabled without specifying tracer => @@ -3108,6 +3122,7 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) if (is_not_nil(*tracer)) { Uint offs = 2; UWord size = 2 * sizeof(Eterm) + sizeof(ErtsThrPrgrLaterOp); + ErtsThrPrgrLaterOp *lop; ASSERT(is_list(*tracer)); if (is_not_immed(ERTS_TRACER_STATE(*tracer))) { hf = (void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))); @@ -3115,6 +3130,16 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) size = hf->alloc_size * sizeof(Eterm) + sizeof(ErlHeapFragment); ASSERT(offs == size_object(*tracer)); } + + /* sparc assumes that all structs are double word aligned, so we + have to align the ErtsThrPrgrLaterOp struct otherwise it may + segfault.*/ + if ((UWord)(ptr_val(*tracer) + offs) % (sizeof(UWord)*2) == sizeof(UWord)) + offs += 1; + + lop = (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs); + ASSERT((UWord)lop % (sizeof(UWord)*2) == 0); + /* We schedule the free:ing of the tracer until after a thread progress has been made so that we know that no schedulers have any references to it. Because we do this, it is possible to release all locks of a @@ -3122,9 +3147,7 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) without having to worry if it is free'd. */ erts_schedule_thr_prgr_later_cleanup_op( - free_tracer, (void*)(*tracer), - (ErtsThrPrgrLaterOp*)(ptr_val(*tracer) + offs), - size); + free_tracer, (void*)(*tracer), lop, size); } if (is_nil(new_tracer)) { @@ -3141,9 +3164,10 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) Eterm *hp, tracer_state = ERTS_TRACER_STATE(new_tracer), tracer_module = ERTS_TRACER_MODULE(new_tracer); Uint sz = size_object(tracer_state); - hf = new_message_buffer(sz + 2 /* cons cell */ + (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm)); + hf = new_message_buffer(sz + 2 /* cons cell */ + + (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1); hp = hf->mem + 2; - hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm); + hf->used_size -= (sizeof(ErtsThrPrgrLaterOp)+sizeof(Eterm)-1)/sizeof(Eterm) + 1; *tracer = copy_struct(tracer_state, sz, &hp, &hf->off_heap); *tracer = CONS(hf->mem, tracer_module, *tracer); ASSERT((void*)(((char*)(ptr_val(*tracer)) - offsetof(ErlHeapFragment, mem))) == hf); diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h index 8c81cbd410..1e7bb8514b 100644 --- a/erts/emulator/beam/export.h +++ b/erts/emulator/beam/export.h @@ -21,14 +21,8 @@ #ifndef __EXPORT_H__ #define __EXPORT_H__ -#ifndef __SYS_H__ #include "sys.h" -#endif - -#ifndef __INDEX_H__ #include "index.h" -#endif - #include "code_ix.h" /* diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index f3d4ac56cd..e6dc5303a8 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -35,7 +35,6 @@ #include "register.h" #include "erl_fun.h" #include "erl_node_tables.h" -#include "benchmark.h" #include "erl_process.h" #include "erl_sys_driver.h" #include "erl_debug.h" @@ -62,9 +61,12 @@ struct enif_environment_t /* ErlNifEnv */ extern void erts_pre_nif(struct enif_environment_t*, Process*, struct erl_module_nif*, Process* tracee); extern void erts_post_nif(struct enif_environment_t* env); +#ifdef ERTS_DIRTY_SCHEDULERS extern void erts_pre_dirty_nif(ErtsSchedulerData *, struct enif_environment_t*, Process*, - struct erl_module_nif*, Process* tracee); + struct erl_module_nif*); +extern void erts_post_dirty_nif(struct enif_environment_t* env); +#endif extern Eterm erts_nif_taints(Process* p); extern void erts_print_nif_taints(int to, void* to_arg); void erts_unload_nif(struct erl_module_nif* nif); @@ -999,17 +1001,30 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2); /* beam_bif_load.c */ #define ERTS_CPC_ALLOW_GC (1 << 0) -#define ERTS_CPC_COPY_LITERALS (1 << 1) -#define ERTS_CPC_ALL (ERTS_CPC_ALLOW_GC | ERTS_CPC_COPY_LITERALS) +#define ERTS_CPC_ALL ERTS_CPC_ALLOW_GC Eterm erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls); +#ifdef ERTS_NEW_PURGE_STRATEGY +Eterm erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed); +#endif -typedef struct { - Eterm *ptr; - Uint sz; - Eterm pid; -} copy_literals_t; +typedef struct ErtsLiteralArea_ { + struct erl_off_heap_header *off_heap; + Eterm *end; + Eterm start[1]; /* beginning of area */ +} ErtsLiteralArea; -extern copy_literals_t erts_clrange; +#define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \ + (sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1)) + +extern ErtsLiteralArea *erts_copy_literal_area; +#ifdef ERTS_NEW_PURGE_STRATEGY +extern Process *erts_literal_area_collector; +#endif +#ifdef ERTS_DIRTY_SCHEDULERS +extern Process *erts_dirty_process_code_checker; +#endif + +extern Process *erts_code_purger; /* beam_load.c */ typedef struct { @@ -1092,12 +1107,19 @@ typedef struct { #define INITIALIZE_SHCOPY(info) \ do { \ + ErtsLiteralArea *larea__ = erts_copy_literal_area; \ info.queue_start = info.queue_default; \ info.bitstore_start = info.bitstore_default; \ info.shtable_start = info.shtable_default; \ info.literal_size = 0; \ - info.range_ptr = erts_clrange.ptr; \ - info.range_sz = erts_clrange.sz; \ + if (larea__) { \ + info.range_ptr = &larea__->start[0]; \ + info.range_sz = larea__->end - info.range_ptr; \ + } \ + else { \ + info.range_ptr = NULL; \ + info.range_sz = 0; \ + } \ } while(0) #define DESTROY_SHCOPY(info) \ diff --git a/erts/emulator/beam/hash.c b/erts/emulator/beam/hash.c index e255b961f1..cd038d100b 100644 --- a/erts/emulator/beam/hash.c +++ b/erts/emulator/beam/hash.c @@ -35,9 +35,9 @@ static const int h_size_table[] = { 2, 5, 11, 23, 47, 97, 197, 397, 797, /* double upto here */ 1201, 1597, - 2411, 3203, + 2411, 3203, 4813, 6421, - 9643, 12853, + 9643, 12853, 19289, 25717, 51437, 102877, @@ -49,8 +49,8 @@ static const int h_size_table[] = { 6584983, 13169977, 26339969, - 52679969, - -1 + 52679969, + -1 }; /* @@ -69,7 +69,7 @@ void hash_get_info(HashInfo *hi, Hash *h) for (i = 0; i < size; i++) { int depth = 0; HashBucket* b = h->bucket[i]; - + while (b != (HashBucket*) 0) { objects++; depth++; @@ -112,7 +112,7 @@ void hash_info(int to, void *arg, Hash* h) /* * Returns size of table in bytes. Stored objects not included. */ -int +int hash_table_sz(Hash *h) { int i; @@ -190,7 +190,7 @@ void hash_delete(Hash* h) HashBucket* b = h->bucket[i]; while (b != (HashBucket*) 0) { HashBucket* b_next = b->next; - + h->fun.free((void*) b); b = b_next; } @@ -250,7 +250,7 @@ void* hash_get(Hash* h, void* tmpl) HashValue hval = h->fun.hash(tmpl); int ix = hval % h->size; HashBucket* b = h->bucket[ix]; - + while(b != (HashBucket*) 0) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) return (void*) b; @@ -294,7 +294,7 @@ void* hash_erase(Hash* h, void* tmpl) int ix = hval % h->size; HashBucket* b = h->bucket[ix]; HashBucket* prev = 0; - + while(b != 0) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { if (prev != 0) @@ -326,7 +326,7 @@ hash_remove(Hash *h, void *tmpl) int ix = hval % h->size; HashBucket *b = h->bucket[ix]; HashBucket *prev = NULL; - + while (b) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { if (prev) @@ -355,4 +355,3 @@ void hash_foreach(Hash* h, void (*func)(void *, void *), void *func_arg2) } } } - diff --git a/erts/emulator/beam/hash.h b/erts/emulator/beam/hash.h index 9f773d8faa..4e769c0119 100644 --- a/erts/emulator/beam/hash.h +++ b/erts/emulator/beam/hash.h @@ -25,9 +25,7 @@ #ifndef __HASH_H__ #define __HASH_H__ -#ifndef __SYS_H__ #include "sys.h" -#endif typedef unsigned long HashValue; typedef struct hash Hash; diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h index 218779c33b..0a109d8699 100644 --- a/erts/emulator/beam/index.h +++ b/erts/emulator/beam/index.h @@ -26,13 +26,8 @@ #ifndef __INDEX_H__ #define __INDEX_H__ -#ifndef __HASH_H__ #include "hash.h" -#endif - -#ifndef ERL_ALLOC_H__ #include "erl_alloc.h" -#endif typedef struct index_slot { diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h index 4e12731d85..5a60bc90d9 100644 --- a/erts/emulator/beam/module.h +++ b/erts/emulator/beam/module.h @@ -21,9 +21,7 @@ #ifndef __MODULE_H__ #define __MODULE_H__ -#ifndef __INDEX_H__ #include "index.h" -#endif struct erl_module_instance { BeamCodeHeader* code_hdr; diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h index 6910b33004..285103cb17 100644 --- a/erts/emulator/beam/safe_hash.h +++ b/erts/emulator/beam/safe_hash.h @@ -26,14 +26,9 @@ #ifndef __SAFE_HASH_H__ #define __SAFE_HASH_H__ - -#ifndef __SYS_H__ #include "sys.h" -#endif - #include "erl_alloc.h" - typedef unsigned long SafeHashValue; typedef int (*SHCMP_FUN)(void*, void*); diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 675fafa726..85647b8500 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -85,7 +85,7 @@ erts_heap_alloc(Process* p, Uint need, Uint xtra) && HEAP_TOP(p) >= p->space_verified_from && HEAP_TOP(p) + need <= p->space_verified_from + p->space_verified && HEAP_LIMIT(p) - HEAP_TOP(p) >= need) { - + Uint consumed = need + (HEAP_TOP(p) - p->space_verified_from); ASSERT(consumed <= p->space_verified); p->space_verified -= consumed; @@ -638,7 +638,7 @@ erts_bld_atom_uword_2tup_list(Uint **hpp, Uint *szp, ui = uint_to_big(uints[i], *hpp); *hpp += BIG_UINT_HEAP_SIZE; } - + res = CONS(*hpp+3, TUPLE2(*hpp, atoms[i], ui), res); *hpp += 5; } @@ -676,14 +676,14 @@ erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length, ui1 = uint_to_big(uints1[i], *hpp); *hpp += BIG_UINT_HEAP_SIZE; } - + if (IS_USMALL(0, uints2[i])) ui2 = make_small(uints2[i]); else { ui2 = uint_to_big(uints2[i], *hpp); *hpp += BIG_UINT_HEAP_SIZE; } - + res = CONS(*hpp+4, TUPLE3(*hpp, atoms[i], ui1, ui2), res); *hpp += 6; } @@ -794,7 +794,7 @@ hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash) Uint b; Uint lshift = bitoffs; Uint rshift = 8 - lshift; - + while (sz--) { b = (previous << lshift) & 0xFF; previous = *ptr++; @@ -805,7 +805,7 @@ hash_binary_bytes(Eterm bin, Uint sz, Uint32 hash) b = (previous << lshift) & 0xFF; previous = *ptr++; b |= previous >> rshift; - + b >>= 8 - bitsize; hash = (hash*FUNNY_NUMBER1 + b) * FUNNY_NUMBER12 + bitsize; } @@ -835,21 +835,21 @@ Uint32 make_hash(Eterm term_arg) do { \ Uint32 x = (Uint32) (Expr); \ hash = \ - (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \ - ((x >> 8) & 0xFF)) * (Prime1) + \ - ((x >> 16) & 0xFF)) * (Prime1) + \ + (((((hash)*(Prime1) + (x & 0xFF)) * (Prime1) + \ + ((x >> 8) & 0xFF)) * (Prime1) + \ + ((x >> 16) & 0xFF)) * (Prime1) + \ (x >> 24)); \ } while(0) -#define UINT32_HASH_RET(Expr, Prime1, Prime2) \ +#define UINT32_HASH_RET(Expr, Prime1, Prime2) \ UINT32_HASH_STEP(Expr, Prime1); \ hash = hash * (Prime2); \ - break - - + break + + /* * Significant additions needed for real 64 bit port with larger fixnums. - */ + */ /* * Note, for the simple 64bit port, not utilizing the @@ -864,7 +864,7 @@ tail_recur: hash = hash*FUNNY_NUMBER3 + 1; break; case ATOM_DEF: - hash = hash*FUNNY_NUMBER1 + + hash = hash*FUNNY_NUMBER1 + (atom_tab(atom_val(term))->slot.bucket.hvalue); break; case SMALL_DEF: @@ -893,9 +893,9 @@ tail_recur: Export* ep = *((Export **) (export_val(term) + 1)); hash = hash * FUNNY_NUMBER11 + ep->code[2]; - hash = hash*FUNNY_NUMBER1 + + hash = hash*FUNNY_NUMBER1 + (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue); - hash = hash*FUNNY_NUMBER1 + + hash = hash*FUNNY_NUMBER1 + (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue); break; } @@ -906,7 +906,7 @@ tail_recur: Uint num_free = funp->num_free; hash = hash * FUNNY_NUMBER10 + num_free; - hash = hash*FUNNY_NUMBER1 + + hash = hash*FUNNY_NUMBER1 + (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue); hash = hash*FUNNY_NUMBER2 + funp->fe->old_index; hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq; @@ -931,7 +931,7 @@ tail_recur: UINT32_HASH_RET(internal_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10); case EXTERNAL_REF_DEF: UINT32_HASH_RET(external_ref_numbers(term)[0],FUNNY_NUMBER9,FUNNY_NUMBER10); - case FLOAT_DEF: + case FLOAT_DEF: { FloatDef ff; GET_DOUBLE(term, ff); @@ -958,12 +958,12 @@ tail_recur: ** as multiplications on a Sparc is so slow. */ hash = hash*FUNNY_NUMBER2 + unsigned_val(*list); - + if (is_not_list(CDR(list))) { WSTACK_PUSH(stack, MAKE_HASH_CDR_POST_OP); term = CDR(list); goto tail_recur; - } + } list = list_val(CDR(list)); } WSTACK_PUSH2(stack, CDR(list), MAKE_HASH_CDR_PRE_OP); @@ -1004,17 +1004,17 @@ tail_recur: } hash *= is_neg ? FUNNY_NUMBER4 : FUNNY_NUMBER3; break; - } + } case MAP_DEF: hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term); break; - case TUPLE_DEF: + case TUPLE_DEF: { Eterm* ptr = tuple_val(term); Uint arity = arityval(*ptr); WSTACK_PUSH3(stack, (UWord) arity, (UWord)(ptr+1), (UWord) arity); - op = MAKE_HASH_TUPLE_OP; + op = MAKE_HASH_TUPLE_OP; }/*fall through*/ case MAKE_HASH_TUPLE_OP: case MAKE_HASH_TERM_ARRAY_OP: @@ -1031,8 +1031,8 @@ tail_recur: hash = hash*FUNNY_NUMBER9 + arity; } break; - } - + } + default: erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash(0x%X,0x%X)\n", term, op); return 0; @@ -1159,8 +1159,8 @@ make_hash2(Eterm term) if (y < 0) { \ UINT32_HASH(-y, AConst); \ /* Negative numbers are unnecessarily mixed twice. */ \ - } \ - UINT32_HASH(y, AConst); \ + } \ + UINT32_HASH(y, AConst); \ } while(0) #define IS_SSMALL28(x) (((Uint) (((x) >> (28-1)) + 1)) < 2) @@ -1242,7 +1242,7 @@ make_hash2(Eterm term) int arity = header_arity(hdr); Eterm* elem = tuple_val(term); UINT32_HASH(arity, HCONST_9); - if (arity == 0) /* Empty tuple */ + if (arity == 0) /* Empty tuple */ goto hash2_common; for (i = arity; ; i--) { term = elem[i]; @@ -1329,7 +1329,7 @@ make_hash2(Eterm term) { Export* ep = *((Export **) (export_val(term) + 1)); UINT32_HASH_2 - (ep->code[2], + (ep->code[2], atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue, HCONST); UINT32_HASH @@ -1343,7 +1343,7 @@ make_hash2(Eterm term) ErlFunThing* funp = (ErlFunThing *) fun_val(term); Uint num_free = funp->num_free; UINT32_HASH_2 - (num_free, + (num_free, atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue, HCONST); UINT32_HASH_2 @@ -1468,7 +1468,7 @@ make_hash2(Eterm term) goto hash2_common; } break; - + default: erts_exit(ERTS_ERROR_EXIT, "Invalid tag in make_hash2(0x%X)\n", term); } @@ -1541,7 +1541,7 @@ make_hash2(Eterm term) } case HASH_MAP_PAIR: hash_xor_pairs ^= hash; - hash = 0; + hash = 0; goto hash2_common; default: break; @@ -1678,17 +1678,22 @@ make_internal_hash(Eterm term) * the order in which keys and values are encountered. * We therefore calculate context independent hashes for all . * key-value pairs and then xor them together. + * + * We *do* need to use an initial seed for each pair, i.e. the + * hash value, so the hash value is reset for each pair with 'hash'. + * If we don't, no additional entropy is given to the system and the + * hash collision resolution in maps:from_list/1 would fail. */ ESTACK_PUSH(s, hash_xor_pairs); ESTACK_PUSH(s, hash); ESTACK_PUSH(s, HASH_MAP_TAIL); - hash = 0; - hash_xor_pairs = 0; for (i = size - 1; i >= 0; i--) { + ESTACK_PUSH(s, hash); /* initial seed for all pairs */ ESTACK_PUSH(s, HASH_MAP_PAIR); ESTACK_PUSH(s, vs[i]); ESTACK_PUSH(s, ks[i]); } + hash_xor_pairs = 0; goto pop_next; } case HAMT_SUBTAG_HEAD_ARRAY: @@ -1700,7 +1705,6 @@ make_internal_hash(Eterm term) ESTACK_PUSH(s, hash_xor_pairs); ESTACK_PUSH(s, hash); ESTACK_PUSH(s, HASH_MAP_TAIL); - hash = 0; hash_xor_pairs = 0; } switch (hdr & _HEADER_MAP_SUBTAG_MASK) { @@ -1717,6 +1721,7 @@ make_internal_hash(Eterm term) while (i) { if (is_list(*ptr)) { Eterm* cons = list_val(*ptr); + ESTACK_PUSH(s, hash); /* initial seed for all pairs */ ESTACK_PUSH(s, HASH_MAP_PAIR); ESTACK_PUSH(s, CDR(cons)); ESTACK_PUSH(s, CAR(cons)); @@ -1906,6 +1911,7 @@ make_internal_hash(Eterm term) pop_next: if (ESTACK_ISEMPTY(s)) { DESTROY_ESTACK(s); + return hash; } @@ -1920,7 +1926,7 @@ make_internal_hash(Eterm term) } case HASH_MAP_PAIR: hash_xor_pairs ^= hash; - hash = 0; + hash = (Uint32) ESTACK_POP(s); /* initial seed for all pairs */ goto pop_next; case HASH_CDR: @@ -1953,8 +1959,8 @@ Uint32 make_broken_hash(Eterm term) DECLARE_WSTACK(stack); unsigned op; tail_recur: - op = tag_val_def(term); - for (;;) { + op = tag_val_def(term); + for (;;) { switch (op) { case NIL_DEF: hash = hash*FUNNY_NUMBER3 + 1; @@ -1976,8 +1982,7 @@ tail_recur: { /* like a bignum */ Uint32 y4 = (Uint32) y2; hash = hash*FUNNY_NUMBER2 + ((y4 << 16) | (y4 >> 16)); - if (y3) - { + if (y3) { hash = hash*FUNNY_NUMBER2 + ((y3 << 16) | (y3 >> 16)); arity++; } @@ -2020,9 +2025,9 @@ tail_recur: Export* ep = *((Export **) (export_val(term) + 1)); hash = hash * FUNNY_NUMBER11 + ep->code[2]; - hash = hash*FUNNY_NUMBER1 + + hash = hash*FUNNY_NUMBER1 + (atom_tab(atom_val(ep->code[0]))->slot.bucket.hvalue); - hash = hash*FUNNY_NUMBER1 + + hash = hash*FUNNY_NUMBER1 + (atom_tab(atom_val(ep->code[1]))->slot.bucket.hvalue); break; } @@ -2033,7 +2038,7 @@ tail_recur: Uint num_free = funp->num_free; hash = hash * FUNNY_NUMBER10 + num_free; - hash = hash*FUNNY_NUMBER1 + + hash = hash*FUNNY_NUMBER1 + (atom_tab(atom_val(funp->fe->module))->slot.bucket.hvalue); hash = hash*FUNNY_NUMBER2 + funp->fe->old_index; hash = hash*FUNNY_NUMBER2 + funp->fe->old_uniq; @@ -2065,7 +2070,7 @@ tail_recur: case EXTERNAL_REF_DEF: hash = hash*FUNNY_NUMBER9 + external_ref_numbers(term)[0]; break; - case FLOAT_DEF: + case FLOAT_DEF: { FloatDef ff; GET_DOUBLE(term, ff); @@ -2149,7 +2154,7 @@ tail_recur: } #else -#error "unsupported D_EXP size" +#error "unsupported D_EXP size" #endif hash = hash * (is_neg ? FUNNY_NUMBER3 : FUNNY_NUMBER2) + arity; } @@ -2158,14 +2163,14 @@ tail_recur: case MAP_DEF: hash = hash*FUNNY_NUMBER13 + FUNNY_NUMBER14 + make_hash2(term); break; - case TUPLE_DEF: + case TUPLE_DEF: { Eterm* ptr = tuple_val(term); Uint arity = arityval(*ptr); WSTACK_PUSH3(stack, (UWord) arity, (UWord) (ptr+1), (UWord) arity); op = MAKE_HASH_TUPLE_OP; - }/*fall through*/ + }/*fall through*/ case MAKE_HASH_TUPLE_OP: case MAKE_HASH_TERM_ARRAY_OP: { @@ -2193,7 +2198,7 @@ tail_recur: DESTROY_WSTACK(stack); return hash; - + #undef MAKE_HASH_TUPLE_OP #undef MAKE_HASH_TERM_ARRAY_OP #undef MAKE_HASH_CDR_PRE_OP @@ -2353,13 +2358,13 @@ static int do_send_term_to_logger(Eterm tag, Eterm gleader, } static ERTS_INLINE int -send_info_to_logger(Eterm gleader, char *buf, int len) +send_info_to_logger(Eterm gleader, char *buf, int len) { return do_send_to_logger(am_info_msg, gleader, buf, len); } static ERTS_INLINE int -send_warning_to_logger(Eterm gleader, char *buf, int len) +send_warning_to_logger(Eterm gleader, char *buf, int len) { Eterm tag; switch (erts_error_logger_warnings) { @@ -2371,7 +2376,7 @@ send_warning_to_logger(Eterm gleader, char *buf, int len) } static ERTS_INLINE int -send_error_to_logger(Eterm gleader, char *buf, int len) +send_error_to_logger(Eterm gleader, char *buf, int len) { return do_send_to_logger(am_error, gleader, buf, len); } @@ -2613,7 +2618,7 @@ tailrecur_ne: break; /* not equal */ case TAG_PRIMARY_BOXED: - { + { Eterm hdr = *boxed_val(a); switch (hdr & _TAG_HEADER_MASK) { case ARITYVAL_SUBTAG: @@ -2639,7 +2644,7 @@ tailrecur_ne: Uint b_bitsize; Uint a_bitoffs; Uint b_bitoffs; - + if (!is_binary(b)) { goto not_equal; } @@ -2671,7 +2676,7 @@ tailrecur_ne: { ErlFunThing* f1; ErlFunThing* f2; - + if (!is_fun(b)) goto not_equal; f1 = (ErlFunThing *) fun_val(a); @@ -2702,7 +2707,7 @@ tailrecur_ne: if(ap->header == bp->header && ap->node == bp->node) { ASSERT(1 == external_data_words(a)); ASSERT(1 == external_data_words(b)); - + if (ap->data.ui[0] == bp->data.ui[0]) goto pop_next; } break; /* not equal */ @@ -2759,7 +2764,7 @@ tailrecur_ne: if (alen == 3 && blen == 3) { /* Most refs are of length 3 */ if (anum[1] == bnum[1] && anum[2] == bnum[2]) { - goto pop_next; + goto pop_next; } else { goto not_equal; } @@ -2784,7 +2789,7 @@ tailrecur_ne: for (i = common_len; i < blen; i++) if (bnum[i] != 0) goto not_equal; - } + } } goto pop_next; } @@ -2792,7 +2797,7 @@ tailrecur_ne: case NEG_BIG_SUBTAG: { int i; - + if (!is_big(b)) goto not_equal; aa = big_val(a); @@ -2810,7 +2815,7 @@ tailrecur_ne: { FloatDef af; FloatDef bf; - + if (is_float(b)) { GET_DOUBLE(a, af); GET_DOUBLE(b, bf); @@ -2889,7 +2894,7 @@ term_array: /* arrays in 'aa' and 'bb', length in 'sz' */ } goto tailrecur_ne; } - + pop_next: if (!WSTACK_ISEMPTY(stack)) { UWord something = WSTACK_POP(stack); @@ -3093,7 +3098,7 @@ tailrecur_ne: } anode = erts_this_node; adata = internal_port_data(a); - + port_common: CMP_NODES(anode, bnode); ON_CMP_GOTO((Sint)(adata - bdata)); @@ -3111,7 +3116,7 @@ tailrecur_ne: } anode = erts_this_node; adata = internal_pid_data(a); - + pid_common: if (adata != bdata) { RETURN_NEQ(adata < bdata ? -1 : 1); @@ -3336,7 +3341,7 @@ tailrecur_ne: diff = f1->num_free - f2->num_free; if (diff != 0) { RETURN_NEQ(diff); - } + } i = f1->num_free; if (i == 0) goto pop_next; aa = f1->env; @@ -3397,10 +3402,10 @@ tailrecur_ne: anum = internal_thing_ref_numbers(athing); alen = internal_thing_ref_no_of_numbers(athing); } - + ref_common: CMP_NODES(anode, bnode); - + ASSERT(alen > 0 && blen > 0); if (alen != blen) { if (alen > blen) { @@ -3418,7 +3423,7 @@ tailrecur_ne: } while (alen < blen); } } - + ASSERT(alen == blen); for (i = (Sint) alen - 1; i >= 0; i--) if (anum[i] != bnum[i]) @@ -3633,8 +3638,8 @@ term_array: /* arrays in 'aa' and 'bb', length in 'i' */ } a = *aa; b = *bb; - goto tailrecur; - + goto tailrecur; + pop_next: if (!WSTACK_ISEMPTY(stack)) { UWord something = WSTACK_POP(stack); @@ -3897,19 +3902,19 @@ intlist_to_buf(Eterm list, char *buf, Sint len) Eterm* listptr; Sint sz = 0; - if (is_nil(list)) + if (is_nil(list)) return 0; if (is_not_list(list)) return -1; listptr = list_val(list); while (sz < len) { - if (!is_byte(*listptr)) + if (!is_byte(*listptr)) return -1; buf[sz++] = unsigned_val(*listptr); if (is_nil(*(listptr + 1))) return(sz); - if (is_not_list(*(listptr + 1))) + if (is_not_list(*(listptr + 1))) return -1; listptr = list_val(*(listptr + 1)); } @@ -4157,10 +4162,10 @@ do { \ } else if (yield_support && --yield_count <= 0) goto L_yield; } - + res = len; - L_return: + L_return: DESTROY_ESTACK(s); @@ -5053,7 +5058,7 @@ Process *p; if(p) print_process_info(ERTS_PRINT_STDERR, NULL, p); } - + void ppi(Eterm pid) { pp(erts_proc_lookup(pid)); @@ -5079,5 +5084,3 @@ ps(Process* p, Eterm* stop) } } #endif - - diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 5ce0e1de9e..254d3baeb1 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -11407,6 +11407,7 @@ static void packet_inet_command(ErlDrvData e, char* buf, ErlDrvSizeT len) VALGRIND_MAKE_MEM_DEFINED(mhdr.msg_control, mhdr.msg_controllen); /*suppress "uninitialised bytes"*/ mhdr.msg_flags = 0; /* Not used with "sendmsg" */ + inet_output_count(desc, data_len); /* Now do the actual sending. NB: "flags" in "sendmsg" itself are NOT used: */ code = sock_sendmsg(desc->s, &mhdr, 0); diff --git a/erts/emulator/hipe/hipe_amd64_asm.m4 b/erts/emulator/hipe/hipe_amd64_asm.m4 index 2c0fbbee2d..409fd0ef89 100644 --- a/erts/emulator/hipe/hipe_amd64_asm.m4 +++ b/erts/emulator/hipe/hipe_amd64_asm.m4 @@ -121,6 +121,22 @@ define(NSP,%rsp)dnl /* + * Debugging macros + * + * Keeps track of whether context has been saved in the debug build, allowing us + * to detect when the garbage collector is called when it shouldn't. + */ +`#ifdef DEBUG +# define SET_GC_UNSAFE \ + movq $1, P_GCUNSAFE(P) +# define SET_GC_SAFE \ + movq $0, P_GCUNSAFE(P) +#else +# define SET_GC_UNSAFE +# define SET_GC_SAFE +#endif' + +/* * Context switching macros. */ `#define SWITCH_C_TO_ERLANG_QUICK \ @@ -133,12 +149,14 @@ define(NSP,%rsp)dnl `#define SAVE_CACHED_STATE \ SAVE_HP; \ - SAVE_FCALLS' + SAVE_FCALLS; \ + SET_GC_SAFE' `#define RESTORE_CACHED_STATE \ RESTORE_HP; \ RESTORE_HEAP_LIMIT; \ - RESTORE_FCALLS' + RESTORE_FCALLS; \ + SET_GC_UNSAFE' `#define SWITCH_C_TO_ERLANG \ RESTORE_CACHED_STATE; \ diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4 index 9cf3bf74fd..21739726bb 100644 --- a/erts/emulator/hipe/hipe_amd64_bifs.m4 +++ b/erts/emulator/hipe/hipe_amd64_bifs.m4 @@ -600,10 +600,11 @@ noproc_primop_interface_0(nbif_handle_fp_exception, erts_restore_fpu) define(gc_bif_interface_0,`nofail_primop_interface_0($1, $2)') /* - * Implement gc_bif_interface_N as standard_bif_interface_N (N=1,2). + * Implement gc_bif_interface_N as standard_bif_interface_N (N=1,2,3). */ define(gc_bif_interface_1,`standard_bif_interface_1($1, $2)') define(gc_bif_interface_2,`standard_bif_interface_2($1, $2)') +define(gc_bif_interface_3,`standard_bif_interface_3($1, $2)') /* * Implement gc_nofail_primop_interface_1 as nofail_primop_interface_1. diff --git a/erts/emulator/hipe/hipe_amd64_glue.S b/erts/emulator/hipe/hipe_amd64_glue.S index b37ed3c68a..f3404888d5 100644 --- a/erts/emulator/hipe/hipe_amd64_glue.S +++ b/erts/emulator/hipe/hipe_amd64_glue.S @@ -94,6 +94,7 @@ ASYM(nbif_return): .nosave_exit: /* switch to C stack */ SWITCH_ERLANG_TO_C_QUICK + SET_GC_SAFE /* restore C callee-save registers, drop frame, return */ movq (%rsp), %rbp # kills P movq 8(%rsp), %rbx @@ -398,6 +399,7 @@ nbif_4_simple_exception: movl %eax, P_NARITY(P) # Note: narity is a 32-bit field /* find and prepare to invoke the handler */ SWITCH_ERLANG_TO_C_QUICK # The cached state is clean and need not be saved. + SET_GC_SAFE movq P, %rdi call CSYM(hipe_handle_exception) # Note: hipe_handle_exception() conses SWITCH_C_TO_ERLANG # %rsp updated by hipe_find_handler() diff --git a/erts/emulator/hipe/hipe_arm_asm.m4 b/erts/emulator/hipe/hipe_arm_asm.m4 index ae9ec752bb..68a6faa70b 100644 --- a/erts/emulator/hipe/hipe_arm_asm.m4 +++ b/erts/emulator/hipe/hipe_arm_asm.m4 @@ -48,6 +48,24 @@ define(NR_ARG_REGS,3)dnl admissible values are 0 to 6, inclusive `#define TEMP_LR r8' /* + * Debugging macros + * + * Keeps track of whether context has been saved in the debug build, allowing us + * to detect when the garbage collector is called when it shouldn't. + */ +`#ifdef DEBUG +# define SET_GC_UNSAFE(SCRATCH) \ + mov SCRATCH, #1; \ + str SCRATCH, [P, #P_GCUNSAFE] +# define SET_GC_SAFE(SCRATCH) \ + mov SCRATCH, #0; \ + str SCRATCH, [P, #P_GCUNSAFE] +#else +# define SET_GC_UNSAFE(SCRATCH) +# define SET_GC_SAFE(SCRATCH) +#endif' + +/* * Context switching macros. * * RESTORE_CONTEXT and RESTORE_CONTEXT_QUICK do not affect @@ -59,12 +77,14 @@ define(NR_ARG_REGS,3)dnl admissible values are 0 to 6, inclusive `#define RESTORE_CONTEXT_QUICK \ mov lr, TEMP_LR' -`#define SAVE_CACHED_STATE \ - str HP, [P, #P_HP]; \ - str NSP, [P, #P_NSP]' +`#define SAVE_CACHED_STATE \ + str HP, [P, #P_HP]; \ + str NSP, [P, #P_NSP]; \ + SET_GC_SAFE(HP)' -`#define RESTORE_CACHED_STATE \ - ldr HP, [P, #P_HP]; \ +`#define RESTORE_CACHED_STATE \ + SET_GC_UNSAFE(HP); \ + ldr HP, [P, #P_HP]; \ ldr NSP, [P, #P_NSP]' `#define SAVE_CONTEXT_BIF \ @@ -75,12 +95,14 @@ define(NR_ARG_REGS,3)dnl admissible values are 0 to 6, inclusive ldr HP, [P, #P_HP]' `#define SAVE_CONTEXT_GC \ + SET_GC_SAFE(TEMP_LR); \ mov TEMP_LR, lr; \ str lr, [P, #P_NRA]; \ str NSP, [P, #P_NSP]; \ str HP, [P, #P_HP]' `#define RESTORE_CONTEXT_GC \ + SET_GC_UNSAFE(HP); \ ldr HP, [P, #P_HP]' /* diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4 index d9c9952dbf..d7a2fec04a 100644 --- a/erts/emulator/hipe/hipe_arm_bifs.m4 +++ b/erts/emulator/hipe/hipe_arm_bifs.m4 @@ -198,8 +198,9 @@ $1: * gc_bif_interface_0(nbif_name, cbif_name) * gc_bif_interface_1(nbif_name, cbif_name) * gc_bif_interface_2(nbif_name, cbif_name) + * gc_bif_interface_3(nbif_name, cbif_name) * - * Generate native interface for a BIF with 0-2 parameters and + * Generate native interface for a BIF with 0-3 parameters and * standard failure mode. * The BIF may do a GC. */ @@ -279,6 +280,36 @@ $1: .type $1, %function #endif') +define(gc_bif_interface_3, +` +#ifndef HAVE_$1 +#`define' HAVE_$1 + .global $1 +$1: + /* Set up C argument registers. */ + mov r0, P + NBIF_ARG(r1,3,0) + NBIF_ARG(r2,3,1) + NBIF_ARG(r3,3,2) + + /* Save caller-save registers and call the C function. */ + SAVE_CONTEXT_GC + str r1, [r0, #P_ARG0] /* Store BIF__ARGS in def_arg_reg[] */ + str r2, [r0, #P_ARG1] + str r3, [r0, #P_ARG2] + add r1, r0, #P_ARG0 + CALL_BIF($2) + TEST_GOT_MBUF(3) + + /* Restore registers. Check for exception. */ + cmp r0, #THE_NON_VALUE + RESTORE_CONTEXT_GC + beq nbif_3_simple_exception + NBIF_RET(3) + .size $1, .-$1 + .type $1, %function +#endif') + /* * gc_nofail_primop_interface_1(nbif_name, cbif_name) * diff --git a/erts/emulator/hipe/hipe_arm_glue.S b/erts/emulator/hipe/hipe_arm_glue.S index 49ffa8b1d8..5b7f8ad52d 100644 --- a/erts/emulator/hipe/hipe_arm_glue.S +++ b/erts/emulator/hipe/hipe_arm_glue.S @@ -342,6 +342,7 @@ nbif_4_gc_after_bif: str r1, [P, #P_NARITY] str TEMP_LR, [P, #P_NRA] str NSP, [P, #P_NSP] + SET_GC_SAFE(TEMP_LR) mov TEMP_LR, lr mov r3, #0 /* Pass 0 in arity */ mov r2, #0 /* Pass NULL in regs */ @@ -349,6 +350,7 @@ nbif_4_gc_after_bif: mov r0, P bl erts_gc_after_bif_call mov lr, TEMP_LR + SET_GC_UNSAFE(TEMP_LR) ldr TEMP_LR, [P, #P_NRA] mov r1, #0 str r1, [P, #P_NARITY] @@ -404,6 +406,7 @@ nbif_4_simple_exception: str NSP, [P, #P_NSP] str TEMP_LR, [P, #P_NRA] str r1, [P, #P_NARITY] + SET_GC_SAFE(r0) /* find and prepare to invoke the handler */ mov r0, P bl hipe_handle_exception /* Note: hipe_handle_exception() conses */ @@ -423,6 +426,7 @@ nbif_4_simple_exception: str NSP, [P, #P_NSP] str r1, [P, #P_NARITY] str TEMP_LR, [P, #P_NRA] + SET_GC_SAFE(NSP) b .nosave_exit /* diff --git a/erts/emulator/hipe/hipe_bif1.c b/erts/emulator/hipe/hipe_bif1.c index 08adbd474e..5e127755c6 100644 --- a/erts/emulator/hipe/hipe_bif1.c +++ b/erts/emulator/hipe/hipe_bif1.c @@ -124,757 +124,3 @@ BIF_RETTYPE hipe_bifs_trap_count_clear_0(BIF_ALIST_0) hipe_trap_count = 0; BIF_RET(make_small(count)); } - -/***************************************************************************** - * BIFs for benchmarking. These only do useful things if - * __BENCHMARK__ is defined in beam/benchmark.h. For documentation - * about how to add new counters or maintain the existing counters, - * see benchmark.h. - * - * If benchmarking is not enabled all BIFs will return false. If the - * required benchmark feature is not enabled, the counter will remain - * zero. - * - * process_info/0 -> { Number of live processes, - * Processes spawned in total } - * - * Live processes are increased when a new process is created, and - * decreased when a process dies. Processes spawned is increased - * when a process is created. - * - * - * process_info_clear/0 -> true - * - * Will reset the processes spawned-counters to zero. If this is - * done at some improper time, live processes may become a negative - * value. This is not a problem in itself, just as long as you know - * about it. - * - * - * message_info/0 -> { Messages sent, - * Messages copied, - * Ego messages (sender = receiver), - * Words sent, - * Words copied, - * Words preallocated } - * - * Counting the words sent in a shared heap system will affect - * runtime performance since it means that we have to calculate the - * size of the mesage. With private heaps, this is done anyway and - * will not affect performance. - * - * - * message_info_clear/0 -> true - * - * Reset the message counters to zero. - * - * - * message_sizes/0 -> true - * - * Displays a text-mode bar diagram with message sizes. There are no - * guaranties that this is printed in a way the Erlang system is - * supposed to print things. - * - * - * gc_info/0 -> { Minor collections, - * Major collections, - * Used heap, - * Allocated heap, - * Max used heap, - * Max allocated heap } - * - * Information about private heap garbage collections. Number of - * minor and major collections, how much heap is used and allocated - * and how much heap has been in use and allocated at most since the - * counters were reset. - * - * - * shared_gc_info/0 -> { Minor collections of the shared heap, - * Major collections of the shared heap, - * Used shared heap, - * Allocated shared heap, - * Max used shared heap, - * Max allocated shared heap } - * - * The same as above, but for the shared heap / message area. Note, - * that in a shared heap system the max used heap and max allocated - * heap are mostly the same, since the heap allways is filled before - * a garbage collection, and most garbage collections do not enlarge - * the heap. The private heap numbers are much more interesting. - * - * - * incremental_gc_info/0 -> { Complete minor GC cycles, - * Complete major GC cycles, - * Minor GC stages, - * Major GC stages } - * - * - * gc_info_clear/0 -> true - * - * Reset counters for both private and shared garbage collection. - * - * - * BM Timers - * --------- - * - * All timers returns tuples of the kind: { Minutes, Seconds, Milliseconds } - * except for the max times in garbage collection where times are normally - * small. The tuple is therefor: { Seconds, Milliseconds, Microseconds } - * - * system_timer/0 -> Mutator time - * - * This timer is not a real-time clock, it only runs when a process - * is scheduled to run. You can not find out the accual time a - * program has taken to run using this timer. - * - * - * system_timer_clear/0 -> true - * - * Reset system timer to zero. - * - * - * send_timer/0 -> { Send time, - * Copy time, - * Size time } - * - * Time spent in sending messages. The copy time and size time are - * only active if the copying is needed in send. Copying of data - * into ETS-tables etc is not timed with this timer. - * - * - * send_timer_clear/0 -> true - * - * Reset send timers to zero. - * - * - * gc_timer/0 -> { Time in minor collection, - * Time in major collection, - * Max time in minor collection (�s), - * Max time in major collection (�s) } - * - * Total time spent in garbage collection of the private heaps. The - * max times are for one separate collection. - * - * - * shared_gc_timer/0 -> { Time in minor collection, - * Time in major collection, - * Max time in minor collection (�s), - * Max time in major collection (�s) } - * - * Total time spent in garbage collection of the shared heap / - * message area. The max times are for one separate collection. - * - * - * gc_timer_clear/0 -> true - * - * Reset private and shared garbage collection timers to zero. Note, - * that the max-times are also reset. - * - * - * misc_timer/0 -> { Misc 0, Misc 1, Misc 2 } - * - * Timers for debug purposes. In a normal system, these timers are - * never used. Add these timers at places where you want to time - * something not covered here. Use BM_SWAP_TIMER(from,to) to start - * one of the misc timers. - * - * ... code timed by the system timer ... - * BM_SWAP_TIMER(system,misc1); - * ... code we want to time ... - * BM_SWAP_TIMER(misc1,system); - * ... back on system time ... - * - * - * misc_timer_clear/0 -> true - * - * Reset misc timers to zero. - */ - -BIF_RETTYPE hipe_bifs_process_info_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ -#ifndef BM_COUNTERS - Uint processes_busy = 0; - Uint processes_spawned = 0; -#endif - Eterm *hp; - - hp = HAlloc(BIF_P, 3); - BIF_RET(TUPLE2(hp, - make_small(processes_busy), - make_small(processes_spawned))); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_process_info_clear_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ -#ifdef BM_COUNTERS - processes_spawned = 0; -#endif - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_message_info_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ - Eterm *hp; -#ifndef BM_COUNTERS - unsigned long messages_sent = 0; - unsigned long messages_copied = 0; - unsigned long messages_ego = 0; -#endif -#ifndef BM_MESSAGE_SIZES - unsigned long words_sent = 0; - unsigned long words_copied = 0; - unsigned long words_prealloc = 0; -#endif - - hp = HAlloc(BIF_P, 7); - BIF_RET(TUPLE6(hp, - make_small(messages_sent), - make_small(messages_copied), - make_small(messages_ego), - make_small(words_sent), - make_small(words_copied), - make_small(words_prealloc))); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_message_info_clear_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ -#ifdef BM_COUNTERS - messages_sent = 0; - messages_copied = 0; - messages_ego = 0; -#endif -#ifdef BM_MESSAGE_SIZES - words_sent = 0; - words_copied = 0; - words_prealloc = 0; - { - int i; - for (i = 0; i < 1000; i++) - message_sizes[i] = 0; - } -#endif - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_message_sizes_0(BIF_ALIST_0) -{ -#ifdef BM_MESSAGE_SIZES - int i, j, max = 0; - int tmp[12] = {0,0,0,0,0,0,0,0,0,0,0,0}; - - for (i = 0; i < 65; i++) { - tmp[0] += message_sizes[i]; - if (tmp[0] > max) - max = tmp[0]; - } - for (i = 65; i < 999; i++) { - tmp[i / 100 + 1] += message_sizes[i]; - if (tmp[i / 100 + 1] > max) - max = tmp[i / 100 + 1]; - } - tmp[11] = message_sizes[999]; - if (tmp[11] > max) - max = tmp[11]; - for (i = -1; i < 11; i++) { - int num = (tmp[i + 1] * 50) / max; - if (i == -1) - printf("\n\r 0 - 64: (%6d) |", tmp[0]); - else if (i == 0) - printf("\n\r 65 - 99: (%6d) |", tmp[1]); - else if (i == 10) - printf("\n\r >= 1000: (%6d) |", tmp[11]); - else - printf("\n\r%3d - %3d: (%6d) |", i * 100, i * 100 + 99, - tmp[i + 1]); - - for (j = 0; j < num; j++) - printf("."); - } - printf("\n\r"); - - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_gc_info_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ -#ifndef BM_COUNTERS - Uint minor_gc = 0; - Uint major_gc = 0; -#endif -#ifndef BM_HEAP_SIZES - Uint max_used_heap = 0; - Uint max_allocated_heap = 0; -#endif - Eterm *hp; - Uint used_heap = (BIF_P->htop - BIF_P->heap) + - (OLD_HTOP(BIF_P) - OLD_HEAP(BIF_P)) + - MBUF_SIZE(BIF_P); - - Uint alloc_heap = (BIF_P->hend - BIF_P->heap) + - (OLD_HEND(BIF_P) - OLD_HEAP(BIF_P)) + - MBUF_SIZE(BIF_P); - - hp = HAlloc(BIF_P, 7); - BIF_RET(TUPLE6(hp, - make_small((Uint)minor_gc), - make_small((Uint)major_gc), - make_small((Uint)used_heap), - make_small((Uint)alloc_heap), - make_small(max_used_heap), - make_small(max_allocated_heap))); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_shared_gc_info_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ -#if !(defined(BM_COUNTERS)) - Uint minor_global_gc = 0; - Uint major_global_gc = 0; -#endif -#ifndef BM_HEAP_SIZES - Uint max_used_global_heap = 0; - Uint max_allocated_global_heap = 0; -#endif - Eterm *hp; - - Uint tmp_used_heap = 0; - Uint tmp_allocated_heap = 0; - - hp = HAlloc(BIF_P, 7); - BIF_RET(TUPLE6(hp, - make_small((uint)minor_global_gc), - make_small((uint)major_global_gc), - make_small(tmp_used_heap), - make_small(tmp_allocated_heap), - make_small(max_used_global_heap), - make_small(max_allocated_global_heap))); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_incremental_gc_info_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ -#if !defined(BM_COUNTERS) - Uint minor_gc_cycles = 0; - Uint major_gc_cycles = 0; - Uint minor_gc_stages = 0; - Uint major_gc_stages = 0; -#endif - Eterm *hp; - - hp = HAlloc(BIF_P, 5); - BIF_RET(TUPLE4(hp, - make_small(minor_gc_cycles), - make_small(major_gc_cycles), - make_small(minor_gc_stages), - make_small(major_gc_stages))); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_gc_info_clear_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ - -#ifdef BM_COUNTERS - minor_gc = 0; - major_gc = 0; -#endif - -#ifdef BM_HEAP_SIZES - max_used_heap = 0; - max_allocated_heap = 0; - max_used_global_heap = 0; - max_allocated_global_heap = 0; -#endif - - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_pause_times_0(BIF_ALIST_0) -{ -#ifdef BM_TIMERS - int i; - int total_time = 0, n = 0; - int left = 0, right = 0, mid = 0; - - printf("Pause times in minor collection:\r\n"); - for (i = 0; i < MAX_PAUSE_TIME; i++) { - if (pause_times[i] > 0) { - printf("%d: %ld\r\n", i, pause_times[i]); - total_time += pause_times[i] * i; - n += pause_times[i]; - - if (i > mid) - right += pause_times[i]; - - while (right > left) { - left += pause_times[mid++]; - right -= pause_times[mid]; - } - } - } - - printf("Number of collections: %d\r\n", n); - printf("Total collection time: %d\r\n", total_time); - if (n > 0) - printf("Mean pause time: %d\r\n", total_time / n); - - printf("Geometrical mean: %d\r\n", mid); - - total_time = 0; n = 0; - left = 0; right = 0; mid = 0; - printf("Pause times in major collection:\r\n"); - for (i = 0; i < MAX_PAUSE_TIME; i++) { - if (pause_times_old[i] > 0) { - printf("%d: %ld\r\n", i, pause_times_old[i]); - total_time += pause_times_old[i] * i; - n += pause_times_old[i]; - } - } - - printf("Number of collections: %d\r\n", n); - printf("Total collection time: %d\r\n", total_time); - if (n > 0) - printf("Mean pause time: %d\r\n", total_time / n); - - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -/* XXX: these macros have free variables */ -#ifdef BM_TIMERS -#define MAKE_TIME(_timer_) { \ - BM_TIMER_T tmp = _timer_##_time / 1000000; \ - milli = tmp % 1000; \ - tmp /= 1000; \ - sec = tmp % 60; \ - min = tmp / 60; } - -#define MAKE_MICRO_TIME(_timer_) { \ - BM_TIMER_T tmp = _timer_##_time / 1000; \ - micro = tmp % 1000; \ - tmp /= 1000; \ - milli = tmp % 1000; \ - sec = tmp / 1000; } - -#else -#define MAKE_TIME(_timer_) -#define MAKE_MICRO_TIME(_timer_) -#endif - -BIF_RETTYPE hipe_bifs_system_timer_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ - uint min = 0; - uint sec = 0; - uint milli = 0; - Eterm *hp; - - hp = HAlloc(BIF_P, 4); - MAKE_TIME(system); - BIF_RET(TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli))); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_system_timer_clear_0(BIF_ALIST_0) -{ -#ifdef BM_TIMERS - system_time = 0; - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_send_timer_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ - uint min = 0; - uint sec = 0; - uint milli = 0; - Eterm *hp; - Eterm sendtime, copytime, sizetime; - - hp = HAlloc(BIF_P, 4 * 4); - - MAKE_TIME(send); - sendtime = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - - MAKE_TIME(copy); - copytime = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - - MAKE_TIME(size); - sizetime = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - BIF_RET(TUPLE3(hp, sendtime, copytime, sizetime)); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_send_timer_clear_0(BIF_ALIST_0) -{ -#ifdef BM_TIMERS - send_time = 0; - copy_time = 0; - size_time = 0; - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_gc_timer_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ - Eterm *hp; - uint min = 0; - uint sec = 0; - uint milli = 0; - uint micro = 0; - Eterm minor, major, max_min, max_maj; - - hp = HAlloc(BIF_P, 4 * 4 + 5); - - MAKE_TIME(minor_gc); - minor = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - - MAKE_TIME(major_gc); - major = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - - MAKE_MICRO_TIME(max_minor); - max_min = TUPLE3(hp, - make_small(sec), - make_small(milli), - make_small(micro)); - hp += 4; - - MAKE_MICRO_TIME(max_major); - max_maj = TUPLE3(hp, - make_small(sec), - make_small(milli), - make_small(micro)); - hp += 4; - - BIF_RET(TUPLE4(hp, minor, major, max_min, max_maj)); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_shared_gc_timer_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ - Eterm *hp; - uint min = 0; - uint sec = 0; - uint milli = 0; - uint micro = 0; - Eterm minor, major, max_min, max_maj; - - hp = HAlloc(BIF_P, 4 * 4 + 5); - - MAKE_TIME(minor_global_gc); - minor = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - - MAKE_TIME(major_global_gc); - major = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - - MAKE_MICRO_TIME(max_global_minor); - max_min = TUPLE3(hp, - make_small(sec), - make_small(milli), - make_small(micro)); - hp += 4; - - MAKE_MICRO_TIME(max_global_major); - max_maj = TUPLE3(hp, - make_small(sec), - make_small(milli), - make_small(micro)); - hp += 4; - - BIF_RET(TUPLE4(hp, minor, major, max_min, max_maj)); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_gc_timer_clear_0(BIF_ALIST_0) -{ -#ifdef BM_TIMERS - minor_gc_time = 0; - major_gc_time = 0; - max_minor_time = 0; - max_major_time = 0; - minor_global_gc_time = 0; - major_global_gc_time = 0; - max_global_minor_time = 0; - max_global_major_time = 0; - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_misc_timer_0(BIF_ALIST_0) -{ -#ifdef __BENCHMARK__ - uint min = 0; - uint sec = 0; - uint milli = 0; - Eterm *hp; - Eterm misctime1, misctime2, misctime3; - - hp = HAlloc(BIF_P, 4 * 4); - - MAKE_TIME(misc0); - misctime1 = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - - MAKE_TIME(misc1); - misctime2 = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - - MAKE_TIME(misc2); - misctime3 = TUPLE3(hp, - make_small(min), - make_small(sec), - make_small(milli)); - hp += 4; - BIF_RET(TUPLE3(hp, misctime1, misctime2, misctime3)); -#else - BIF_RET(am_false); -#endif -} - -BIF_RETTYPE hipe_bifs_misc_timer_clear_0(BIF_ALIST_0) -{ -#ifdef BM_TIMERS - misc0_time = 0; - misc1_time = 0; - misc2_time = 0; - BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif -} - -#undef MAKE_TIME -#undef MAKE_MICRO_TIME - -/* - * HiPE hrvtime(). - * These implementations are currently available: - * + The fallback, which is the same as {X,_} = runtime(statistics). - */ - -static double fallback_get_hrvtime(void) -{ - unsigned long ms_user; - - elapsed_time_both(&ms_user, NULL, NULL, NULL); - return (double)ms_user; -} - -/* - * Fallback, if nothing better exists. - * This is the same as {X,_} = statistics(runtime), which uses - * times(2) on Unix systems. - */ - -#define hrvtime_is_started() 1 -#define start_hrvtime() do{}while(0) -#define stop_hrvtime() do{}while(0) -#define get_hrvtime() fallback_get_hrvtime() - -BIF_RETTYPE hipe_bifs_get_hrvtime_0(BIF_ALIST_0) -{ - Eterm *hp; - Eterm res; - FloatDef f; - - if (!hrvtime_is_started()) - start_hrvtime(); - f.fd = get_hrvtime(); - hp = HAlloc(BIF_P, FLOAT_SIZE_OBJECT); - res = make_float(hp); - PUT_DOUBLE(f, hp); - BIF_RET(res); -} - -BIF_RETTYPE hipe_bifs_stop_hrvtime_0(BIF_ALIST_0) -{ - stop_hrvtime(); - BIF_RET(am_true); -} diff --git a/erts/emulator/hipe/hipe_bif1.tab b/erts/emulator/hipe/hipe_bif1.tab index c5b452f199..4be0ad0e9c 100644 --- a/erts/emulator/hipe/hipe_bif1.tab +++ b/erts/emulator/hipe/hipe_bif1.tab @@ -27,24 +27,3 @@ bif hipe_bifs:call_count_get/1 bif hipe_bifs:call_count_clear/1 bif hipe_bifs:trap_count_get/0 bif hipe_bifs:trap_count_clear/0 -bif hipe_bifs:process_info/0 -bif hipe_bifs:process_info_clear/0 -bif hipe_bifs:message_info/0 -bif hipe_bifs:message_info_clear/0 -bif hipe_bifs:message_sizes/0 -bif hipe_bifs:gc_info/0 -bif hipe_bifs:shared_gc_info/0 -bif hipe_bifs:incremental_gc_info/0 -bif hipe_bifs:gc_info_clear/0 -bif hipe_bifs:pause_times/0 -bif hipe_bifs:system_timer/0 -bif hipe_bifs:system_timer_clear/0 -bif hipe_bifs:send_timer/0 -bif hipe_bifs:send_timer_clear/0 -bif hipe_bifs:gc_timer/0 -bif hipe_bifs:shared_gc_timer/0 -bif hipe_bifs:gc_timer_clear/0 -bif hipe_bifs:misc_timer/0 -bif hipe_bifs:misc_timer_clear/0 -bif hipe_bifs:get_hrvtime/0 -bif hipe_bifs:stop_hrvtime/0 diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4 index 29095a5389..dcf3447af9 100644 --- a/erts/emulator/hipe/hipe_bif_list.m4 +++ b/erts/emulator/hipe/hipe_bif_list.m4 @@ -96,6 +96,7 @@ * gc_bif_interface_0(nbif_name, cbif_name) * gc_bif_interface_1(nbif_name, cbif_name) * gc_bif_interface_2(nbif_name, cbif_name) + * gc_bif_interface_3(nbif_name, cbif_name) * * A BIF which may do a GC or walk the native stack. * May read NSP, NSP_LIMIT, NRA, HP, HP_LIMIT, and FCALLS. @@ -263,32 +264,34 @@ noproc_primop_interface_1(nbif_atomic_inc, hipe_atomic_inc) ',)dnl /* - * Standard BIFs. - * BIF_LIST(ModuleAtom,FunctionAtom,Arity,CFun,Index) + * BIFs that disable GC while trapping are called via a wrapper + * to reserve stack space for the "trap frame". + * They occasionally need to call the garbage collector in order to make room + * for the trap frame on the BEAM stack. */ +gc_bif_interface_1(nbif_term_to_binary_1, hipe_wrapper_term_to_binary_1) +gc_bif_interface_2(nbif_term_to_binary_2, hipe_wrapper_term_to_binary_2) +gc_bif_interface_1(nbif_binary_to_term_1, hipe_wrapper_binary_to_term_1) +gc_bif_interface_2(nbif_binary_to_term_2, hipe_wrapper_binary_to_term_2) +gc_bif_interface_1(nbif_binary_to_list_1, hipe_wrapper_binary_to_list_1) +gc_bif_interface_3(nbif_binary_to_list_3, hipe_wrapper_binary_to_list_3) +gc_bif_interface_1(nbif_bitstring_to_list_1, hipe_wrapper_bitstring_to_list_1) +gc_bif_interface_1(nbif_list_to_binary_1, hipe_wrapper_list_to_binary_1) +gc_bif_interface_1(nbif_iolist_to_binary_1, hipe_wrapper_iolist_to_binary_1) +gc_bif_interface_1(nbif_binary_list_to_bin_1, hipe_wrapper_binary_list_to_bin_1) +gc_bif_interface_1(nbif_list_to_bitstring_1, hipe_wrapper_list_to_bitstring_1) +gc_bif_interface_2(nbif_send_2, hipe_wrapper_send_2) +gc_bif_interface_3(nbif_send_3, hipe_wrapper_send_3) +gc_bif_interface_2(nbif_ebif_bang_2, hipe_wrapper_ebif_bang_2) +gc_bif_interface_2(nbif_maps_merge_2, hipe_wrapper_maps_merge_2) -/* BIFs that disable GC while trapping are called via a wrapper - * to reserve stack space for the "trap frame". + +/* + * Standard BIFs. + * BIF_LIST(ModuleAtom,FunctionAtom,Arity,CFun,Index) */ -define(CFUN,`ifelse( -$1, term_to_binary_1, hipe_wrapper_$1, -$1, term_to_binary_2, hipe_wrapper_$1, -$1, binary_to_term_1, hipe_wrapper_$1, -$1, binary_to_term_2, hipe_wrapper_$1, -$1, binary_to_list_1, hipe_wrapper_$1, -$1, binary_to_list_3, hipe_wrapper_$1, -$1, bitstring_to_list_1, hipe_wrapper_$1, -$1, list_to_binary_1, hipe_wrapper_$1, -$1, iolist_to_binary_1, hipe_wrapper_$1, -$1, binary_list_to_bin_1, hipe_wrapper_$1, -$1, list_to_bitstring_1, hipe_wrapper_$1, -$1, send_2, hipe_wrapper_$1, -$1, send_3, hipe_wrapper_$1, -$1, ebif_bang_2, hipe_wrapper_$1, -$1, maps_merge_2, hipe_wrapper_$1, -$1)') -define(BIF_LIST,`standard_bif_interface_$3(nbif_$4, CFUN($4))') +define(BIF_LIST,`standard_bif_interface_$3(nbif_$4, $4)') include(TARGET/`erl_bif_list.h') /* diff --git a/erts/emulator/hipe/hipe_gc.c b/erts/emulator/hipe/hipe_gc.c index d0619a0609..68c65dea27 100644 --- a/erts/emulator/hipe/hipe_gc.c +++ b/erts/emulator/hipe/hipe_gc.c @@ -46,6 +46,8 @@ Eterm *fullsweep_nstack(Process *p, Eterm *n_htop) /* arch-specific nstack walk state */ struct nstack_walk_state walk_state; + ASSERT(!p->hipe.gc_is_unsafe); + if (!p->hipe.nstack) { ASSERT(!p->hipe.nsp && !p->hipe.nstend); return n_htop; @@ -136,6 +138,8 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop) char *mature; Uint mature_size; + ASSERT(!p->hipe.gc_is_unsafe); + if (!p->hipe.nstack) { ASSERT(!p->hipe.nsp && !p->hipe.nstend); return; @@ -233,3 +237,122 @@ void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop) } abort(); } + +Eterm *sweep_literals_nstack(Process *p, Eterm *old_htop, char *area, + Uint area_size) +{ + /* known nstack walk state */ + Eterm *nsp; + Eterm *nsp_end; + const struct sdesc *sdesc; + /* arch-specific nstack walk state */ + struct nstack_walk_state walk_state; + + ASSERT(!p->hipe.gc_is_unsafe); + + if (!p->hipe.nstack) { + ASSERT(!p->hipe.nsp && !p->hipe.nstend); + return old_htop; + } + if (!nstack_walk_init_check(p)) + return old_htop; + + ASSERT(p->hipe.nsp && p->hipe.nstend); + nsp = nstack_walk_nsp_begin(p); + nsp_end = nstack_walk_nsp_end(p); + sdesc = nstack_walk_init_sdesc_ignore_trap(p, &walk_state); + + while (!nstack_walk_nsp_reached_end(nsp, nsp_end)) { + unsigned long ra; + unsigned sdesc_size = nstack_walk_frame_size(sdesc); + unsigned i = 0; + unsigned mask = sdesc->livebits[0]; + for (;;) { + if (mask & 1) { + Eterm *nsp_i = nstack_walk_frame_index(nsp, i); + Eterm gval = *nsp_i; + if (is_boxed(gval)) { + Eterm *ptr = boxed_val(gval); + Eterm val = *ptr; + if (IS_MOVED_BOXED(val)) { + ASSERT(is_boxed(val)); + *nsp_i = val; + } else if (ErtsInArea(ptr, area, area_size)) { + MOVE_BOXED(ptr, val, old_htop, nsp_i); + } + } else if (is_list(gval)) { + Eterm *ptr = list_val(gval); + Eterm val = *ptr; + if (IS_MOVED_CONS(val)) { + *nsp_i = ptr[1]; + } else if (ErtsInArea(ptr, area, area_size)) { + MOVE_CONS(ptr, val, old_htop, nsp_i); + } + } + } + if (++i >= sdesc_size) + break; + if (i & 31) + mask >>= 1; + else + mask = sdesc->livebits[i >> 5]; + } + ra = nstack_walk_frame_ra(nsp, sdesc); + if (ra == (unsigned long)nbif_stack_trap_ra) + ra = (unsigned long)p->hipe.ngra; + sdesc = hipe_find_sdesc(ra); + nsp = nstack_walk_next_frame(nsp, sdesc_size); + } + return old_htop; +} + +int +nstack_any_heap_ref_ptrs(Process *rp, char* mod_start, Uint mod_size) +{ + Eterm *nsp; + Eterm *nsp_end; + const struct sdesc *sdesc; + /* arch-specific nstack walk state */ + struct nstack_walk_state walk_state; + + ASSERT(!rp->hipe.gc_is_unsafe); + + if (!rp->hipe.nstack || !nstack_walk_init_check(rp)) return 0; + ASSERT(rp->hipe.nsp && rp->hipe.nstend); + nsp = nstack_walk_nsp_begin(rp); + nsp_end = nstack_walk_nsp_end(rp); + sdesc = nstack_walk_init_sdesc_ignore_trap(rp, &walk_state); + + while (!nstack_walk_nsp_reached_end(nsp, nsp_end)) { + unsigned long ra; + unsigned sdesc_size = nstack_walk_frame_size(sdesc); + unsigned i = 0; + unsigned mask = sdesc->livebits[0]; + for (;;) { + if (mask & 1) { + Eterm *nsp_i = nstack_walk_frame_index(nsp, i); + Eterm val = *nsp_i; + switch (primary_tag(val)) { + case TAG_PRIMARY_BOXED: + case TAG_PRIMARY_LIST: + if (ErtsInArea(val, mod_start, mod_size)) { + return 1; + } + break; + } + } + if (++i >= sdesc_size) + break; + if (i & 31) + mask >>= 1; + else + mask = sdesc->livebits[i >> 5]; + } + ra = nstack_walk_frame_ra(nsp, sdesc); + if (ra == (unsigned long)nbif_stack_trap_ra) + ra = (unsigned long)rp->hipe.ngra; + sdesc = hipe_find_sdesc(ra); + nsp = nstack_walk_next_frame(nsp, sdesc_size); + } + return 0; +} diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c index 0d3493ec6c..b9d4226705 100644 --- a/erts/emulator/hipe/hipe_mkliterals.c +++ b/erts/emulator/hipe/hipe_mkliterals.c @@ -525,6 +525,12 @@ static const struct rts_param rts_params[] = { { 51, "P_CALLEE_EXP", 1, offsetof(struct process, hipe.u.callee_exp) }, { 52, "THE_NON_VALUE", 1, (int)THE_NON_VALUE }, + + { 53, "P_GCUNSAFE", +#ifdef DEBUG + 1, offsetof(struct process, hipe.gc_is_unsafe) +#endif + }, }; #define NR_PARAMS ARRAY_SIZE(rts_params) diff --git a/erts/emulator/hipe/hipe_ppc_bifs.m4 b/erts/emulator/hipe/hipe_ppc_bifs.m4 index 57b4208bee..b540562185 100644 --- a/erts/emulator/hipe/hipe_ppc_bifs.m4 +++ b/erts/emulator/hipe/hipe_ppc_bifs.m4 @@ -212,8 +212,9 @@ ASYM($1): * gc_bif_interface_0(nbif_name, cbif_name) * gc_bif_interface_1(nbif_name, cbif_name) * gc_bif_interface_2(nbif_name, cbif_name) + * gc_bif_interface_3(nbif_name, cbif_name) * - * Generate native interface for a BIF with 0-2 parameters and + * Generate native interface for a BIF with 0-3 parameters and * standard failure mode. * The BIF may do a GC. */ @@ -300,6 +301,39 @@ ASYM($1): TYPE_FUNCTION(ASYM($1)) #endif') +define(gc_bif_interface_3, +` +#ifndef HAVE_$1 +#`define' HAVE_$1 + GLOBAL(ASYM($1)) +ASYM($1): + /* Set up C argument registers. */ + mr r3, P + NBIF_ARG(r4,3,0) + NBIF_ARG(r5,3,1) + NBIF_ARG(r6,3,2) + + /* Save caller-save registers and call the C function. */ + SAVE_CONTEXT_GC + STORE r4, P_ARG0(r3) /* Store BIF__ARGS in def_arg_reg[] */ + STORE r5, P_ARG1(r3) + STORE r6, P_ARG2(r3) + addi r4, r3, P_ARG0 + CALL_BIF($2) + TEST_GOT_MBUF + + /* Restore registers. Check for exception. */ + CMPI r3, THE_NON_VALUE + RESTORE_CONTEXT_GC + beq- 1f + NBIF_RET(3) +1: /* workaround for bc:s small offset operand */ + b CSYM(nbif_3_simple_exception) + HANDLE_GOT_MBUF(3) + SET_SIZE(ASYM($1)) + TYPE_FUNCTION(ASYM($1)) +#endif') + /* * gc_nofail_primop_interface_1(nbif_name, cbif_name) * diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h index 21c4239753..a8d5972280 100644 --- a/erts/emulator/hipe/hipe_process.h +++ b/erts/emulator/hipe/hipe_process.h @@ -52,6 +52,9 @@ struct hipe_process_state { #if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) void (*bif_callee)(void); /* When calling BIF's via debug wrapper */ #endif +#ifdef DEBUG + UWord gc_is_unsafe; /* Nonzero when GC-required state is on stack */ +#endif }; extern void hipe_arch_print_pcb(struct hipe_process_state *p); @@ -68,6 +71,9 @@ static __inline__ void hipe_init_process(struct hipe_process_state *p) p->nra = NULL; #endif p->narity = 0; +#ifdef DEBUG + p->gc_is_unsafe = 0; +#endif } static __inline__ void hipe_delete_process(struct hipe_process_state *p) diff --git a/erts/emulator/hipe/hipe_risc_gc.h b/erts/emulator/hipe/hipe_risc_gc.h index 315f8e7f9f..09568c140e 100644 --- a/erts/emulator/hipe/hipe_risc_gc.h +++ b/erts/emulator/hipe/hipe_risc_gc.h @@ -51,6 +51,19 @@ nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state) return sdesc; } +static inline const struct sdesc* +nstack_walk_init_sdesc_ignore_trap(const Process *p, + struct nstack_walk_state *state) +{ + unsigned long ra = (unsigned long)p->hipe.nra; + const struct sdesc *sdesc; + if (ra == (unsigned long)&nbif_stack_trap_ra) + ra = (unsigned long)p->hipe.ngra; + sdesc = hipe_find_sdesc(ra); + state->sdesc0 = sdesc; + return sdesc; +} + static inline void nstack_walk_update_trap(Process *p, const struct sdesc *sdesc0) { Eterm *nsp = p->hipe.nsp; diff --git a/erts/emulator/hipe/hipe_sparc_bifs.m4 b/erts/emulator/hipe/hipe_sparc_bifs.m4 index 2e886ec1d1..1389beaa61 100644 --- a/erts/emulator/hipe/hipe_sparc_bifs.m4 +++ b/erts/emulator/hipe/hipe_sparc_bifs.m4 @@ -210,8 +210,9 @@ $1: * gc_bif_interface_0(nbif_name, cbif_name) * gc_bif_interface_1(nbif_name, cbif_name) * gc_bif_interface_2(nbif_name, cbif_name) + * gc_bif_interface_3(nbif_name, cbif_name) * - * Generate native interface for a BIF with 0-2 parameters and + * Generate native interface for a BIF with 0-3 parameters and * standard failure mode. * The BIF may do a GC. */ @@ -295,6 +296,37 @@ $1: .type $1, #function #endif') +define(gc_bif_interface_3, +` +#ifndef HAVE_$1 +#`define' HAVE_$1 + .global $1 +$1: + /* Set up C argument registers. */ + mov P, %o0 + NBIF_ARG(%o1,3,0) + NBIF_ARG(%o2,3,1) + NBIF_ARG(%o3,3,2) + + /* Save caller-save registers and call the C function. */ + SAVE_CONTEXT_GC + st %o1, [%o0+P_ARG0] ! Store BIF__ARGS in def_arg_reg + st %o2, [%o0+P_ARG1] + st %o3, [%o0+P_ARG2] + add %o0, P_ARG0, %o1 + CALL_BIF($2) + nop + TEST_GOT_MBUF + + /* Restore registers. Check for exception. */ + TEST_GOT_EXN(3) + RESTORE_CONTEXT_GC + NBIF_RET(3) + HANDLE_GOT_MBUF(3) + .size $1, .-$1 + .type $1, #function +#endif') + /* * gc_nofail_primop_interface_1(nbif_name, cbif_name) * diff --git a/erts/emulator/hipe/hipe_stack.h b/erts/emulator/hipe/hipe_stack.h index 4ea7d5c031..afa0ed4256 100644 --- a/erts/emulator/hipe/hipe_stack.h +++ b/erts/emulator/hipe/hipe_stack.h @@ -131,5 +131,8 @@ static __inline__ void hipe_check_nstack(Process *p, unsigned nwords) */ extern Eterm *fullsweep_nstack(Process *p, Eterm *n_htop); extern void gensweep_nstack(Process *p, Eterm **ptr_old_htop, Eterm **ptr_n_htop); +extern Eterm *sweep_literals_nstack(Process *p, Eterm *n_htop, char *area, + Uint area_size); +extern int nstack_any_heap_ref_ptrs(Process *, char* mod_start, Uint mod_size); #endif /* HIPE_STACK_H */ diff --git a/erts/emulator/hipe/hipe_x86_bifs.m4 b/erts/emulator/hipe/hipe_x86_bifs.m4 index b8ac5046d5..c0c149733c 100644 --- a/erts/emulator/hipe/hipe_x86_bifs.m4 +++ b/erts/emulator/hipe/hipe_x86_bifs.m4 @@ -671,10 +671,11 @@ noproc_primop_interface_0(nbif_handle_fp_exception, erts_restore_fpu) define(gc_bif_interface_0,`nofail_primop_interface_0($1, $2)') /* - * Implement gc_bif_interface_N as standard_bif_interface_N (N=1,2). + * Implement gc_bif_interface_N as standard_bif_interface_N (N=1,2,3). */ define(gc_bif_interface_1,`standard_bif_interface_1($1, $2)') define(gc_bif_interface_2,`standard_bif_interface_2($1, $2)') +define(gc_bif_interface_3,`standard_bif_interface_3($1, $2)') /* * Implement gc_nofail_primop_interface_1 as nofail_primop_interface_1. diff --git a/erts/emulator/hipe/hipe_x86_gc.h b/erts/emulator/hipe/hipe_x86_gc.h index c22b28c2d5..00fe03d8f9 100644 --- a/erts/emulator/hipe/hipe_x86_gc.h +++ b/erts/emulator/hipe/hipe_x86_gc.h @@ -81,6 +81,23 @@ nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state) #endif } +static inline const struct sdesc* +nstack_walk_init_sdesc_ignore_trap(const Process *p, + struct nstack_walk_state *state) +{ +#ifdef SKIP_YOUNGEST_FRAME + unsigned long ra = p->hipe.nsp[0]; + const struct sdesc *sdesc; + if (ra == (unsigned long)nbif_stack_trap_ra) + ra = (unsigned long)p->hipe.ngra; + sdesc = hipe_find_sdesc(ra); + state->sdesc0 = sdesc; + return sdesc; +#else + return nstack_walk_init_sdesc(p, state); +#endif +} + static inline void nstack_walk_update_trap(Process *p, const struct sdesc *sdesc0) { #ifdef SKIP_YOUNGEST_FRAME diff --git a/erts/emulator/internal_doc/Tracing.md b/erts/emulator/internal_doc/Tracing.md index 30bc5327a7..728f315263 100644 --- a/erts/emulator/internal_doc/Tracing.md +++ b/erts/emulator/internal_doc/Tracing.md @@ -57,7 +57,7 @@ generations of breakpoints are kept and indentified by index of 0 and 1. The global atomic variables `erts_active_bp_index` will determine which generation of breakpoints running code will use. -### Atomicy Without Atomic Operations +### Atomicity Without Atomic Operations Not using the code loading generations (or any other code duplication) means that `trace_pattern` must at some point write to the active beam diff --git a/erts/emulator/sys/unix/erl_child_setup.c b/erts/emulator/sys/unix/erl_child_setup.c index 6beb316350..6b9ddd8da4 100644 --- a/erts/emulator/sys/unix/erl_child_setup.c +++ b/erts/emulator/sys/unix/erl_child_setup.c @@ -54,6 +54,7 @@ #include <stdlib.h> #include <stdio.h> +#include <stdarg.h> #include <sys/wait.h> #define WANT_NONBLOCKING @@ -74,15 +75,22 @@ //#define HARD_DEBUG #ifdef HARD_DEBUG -#define DEBUG_PRINT(fmt, ...) fprintf(stderr, fmt "\r\n", ##__VA_ARGS__) +#define DEBUG_PRINT(fmt, ...) fprintf(stderr, "%d:" fmt "\r\n", getpid(), ##__VA_ARGS__) #else #define DEBUG_PRINT(fmt, ...) #endif -#define ABORT(fmt, ...) do { \ - fprintf(stderr, "erl_child_setup: " fmt "\r\n", ##__VA_ARGS__); \ - abort(); \ - } while(0) +static char abort_reason[200]; /* for core dump inspection */ + +static void ABORT(const char* fmt, ...) +{ + va_list arglist; + va_start(arglist, fmt); + vsprintf(abort_reason, fmt, arglist); + fprintf(stderr, "erl_child_setup: %s\r\n", abort_reason); + va_end(arglist); + abort(); +} #ifdef DEBUG void @@ -123,12 +131,13 @@ static int sigchld_pipe[2]; static int start_new_child(int pipes[]) { + int errln = -1; int size, res, i, pos = 0; char *buff, *o_buff; - char *cmd, *wd, **new_environ, **args = NULL; + char *cmd, *cwd, *wd, **new_environ, **args = NULL; - Sint cnt, flags; + Sint32 cnt, flags; /* only child executes here */ @@ -137,6 +146,7 @@ start_new_child(int pipes[]) } while(res < 0 && (errno == EINTR || errno == ERRNO_BLOCK)); if (res <= 0) { + errln = __LINE__; goto child_error; } @@ -148,10 +158,12 @@ start_new_child(int pipes[]) if ((res = read(pipes[0], buff + pos, size - pos)) < 0) { if (errno == ERRNO_BLOCK || errno == EINTR) continue; + errln = __LINE__; goto child_error; } if (res == 0) { errno = EPIPE; + errln = __LINE__; goto child_error; } pos += res; @@ -160,12 +172,16 @@ start_new_child(int pipes[]) o_buff = buff; flags = get_int32(buff); - buff += sizeof(Sint32); + buff += sizeof(flags); DEBUG_PRINT("flags = %d", flags); cmd = buff; buff += strlen(buff) + 1; + + cwd = buff; + buff += strlen(buff) + 1; + if (*buff == '\0') { wd = NULL; } else { @@ -177,10 +193,10 @@ start_new_child(int pipes[]) DEBUG_PRINT("wd = %s", wd); cnt = get_int32(buff); - buff += sizeof(Sint32); + buff += sizeof(cnt); new_environ = malloc(sizeof(char*)*(cnt + 1)); - DEBUG_PRINT("env_len = %ld", cnt); + DEBUG_PRINT("env_len = %d", cnt); for (i = 0; i < cnt; i++, buff++) { new_environ[i] = buff; while(*buff != '\0') buff++; @@ -190,7 +206,7 @@ start_new_child(int pipes[]) if (o_buff + size != buff) { /* This is a spawn executable call */ cnt = get_int32(buff); - buff += sizeof(Sint32); + buff += sizeof(cnt); args = malloc(sizeof(char*)*(cnt + 1)); for (i = 0; i < cnt; i++, buff++) { args[i] = buff; @@ -201,7 +217,12 @@ start_new_child(int pipes[]) if (o_buff + size != buff) { errno = EINVAL; - goto child_error; + errln = __LINE__; + fprintf(stderr,"erl_child_setup: failed with protocol " + "error %d on line %d", errno, errln); + /* we abort here as it is most likely a symptom of an + emulator/erl_child_setup bug */ + abort(); } DEBUG_PRINT("read ack"); @@ -213,12 +234,32 @@ start_new_child(int pipes[]) ASSERT(res == sizeof(proto)); } } while(res < 0 && (errno == EINTR || errno == ERRNO_BLOCK)); + if (res < 1) { errno = EPIPE; + errln = __LINE__; goto child_error; } - DEBUG_PRINT("Do that forking business: '%s'\n",cmd); + DEBUG_PRINT("Set cwd to: '%s'",cwd); + + if (chdir(cwd) < 0) { + /* This is not good, it probably means that the cwd of + beam is invalid. We ignore it and try anyways as + the child might now need a cwd or the chdir below + could take us to a valid directory. + */ + } + + DEBUG_PRINT("Set wd to: '%s'",wd); + + if (wd && chdir(wd) < 0) { + int err = errno; + fprintf(stderr,"spawn: Could not cd to %s\r\n", wd); + _exit(err); + } + + DEBUG_PRINT("Do that forking business: '%s'",cmd); /* When the dup2'ing below is done, only fd's 0, 1, 2 and maybe 3, 4 should survive the @@ -228,25 +269,34 @@ start_new_child(int pipes[]) if (flags & FORKER_FLAG_USE_STDIO) { /* stdin for process */ if (flags & FORKER_FLAG_DO_WRITE && - dup2(pipes[0], 0) < 0) + dup2(pipes[0], 0) < 0) { + errln = __LINE__; goto child_error; + } /* stdout for process */ if (flags & FORKER_FLAG_DO_READ && - dup2(pipes[1], 1) < 0) + dup2(pipes[1], 1) < 0) { + errln = __LINE__; goto child_error; + } } else { /* XXX will fail if pipes[0] == 4 (unlikely..) */ - if (flags & FORKER_FLAG_DO_READ && dup2(pipes[1], 4) < 0) + if (flags & FORKER_FLAG_DO_READ && dup2(pipes[1], 4) < 0) { + errln = __LINE__; goto child_error; - if (flags & FORKER_FLAG_DO_WRITE && dup2(pipes[0], 3) < 0) + } + if (flags & FORKER_FLAG_DO_WRITE && dup2(pipes[0], 3) < 0) { + errln = __LINE__; goto child_error; + } } - if (dup2(pipes[2], 2) < 0) - goto child_error; - - if (wd && chdir(wd) < 0) + /* we do the dup2 of stderr last so that errors + in child_error will be printed to stderr */ + if (dup2(pipes[2], 2) < 0) { + errln = __LINE__; goto child_error; + } #if defined(USE_SETPGRP_NOARGS) /* SysV */ (void) setpgrp(); @@ -268,9 +318,14 @@ start_new_child(int pipes[]) } else { execle(SHELL, "sh", "-c", cmd, (char *) NULL, new_environ); } + + DEBUG_PRINT("exec error: %d",errno); + _exit(errno); + child_error: - DEBUG_PRINT("exec error: %d\r\n",errno); - _exit(128 + errno); + fprintf(stderr,"erl_child_setup: failed with error %d on line %d\r\n", + errno, errln); + _exit(errno); } @@ -461,7 +516,7 @@ main(int argc, char *argv[]) proto.action = ErtsSysForkerProtoAction_SigChld; proto.u.sigchld.error_number = ibuff[1]; - DEBUG_PRINT("send %s to %d", buff, uds_fd); + DEBUG_PRINT("send sigchld to %d (errno = %d)", uds_fd, ibuff[1]); if (write(uds_fd, &proto, sizeof(proto)) < 0) { if (errno == EINTR) continue; diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h index 3a0d23cd36..b64b0d87f6 100644 --- a/erts/emulator/sys/unix/erl_unix_sys.h +++ b/erts/emulator/sys/unix/erl_unix_sys.h @@ -282,7 +282,7 @@ ERTS_GLB_INLINE ErtsSysPerfCounter erts_sys_perf_counter(void); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -ERTS_GLB_INLINE ErtsSysPerfCounter +ERTS_GLB_FORCE_INLINE ErtsSysPerfCounter erts_sys_perf_counter() { return (*erts_sys_time_data__.r.o.perf_counter)(); diff --git a/erts/emulator/sys/unix/sys_drivers.c b/erts/emulator/sys/unix/sys_drivers.c index 812112fb91..400f163652 100644 --- a/erts/emulator/sys/unix/sys_drivers.c +++ b/erts/emulator/sys/unix/sys_drivers.c @@ -554,7 +554,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, ErtsSysDriverData *dd; char *cmd_line; char wd_buff[MAXPATHLEN+1]; - char *wd; + char *wd, *cwd; int ifd[2], ofd[2], stderrfd; if (pipe(ifd) < 0) return ERL_DRV_ERROR_ERRNO; @@ -631,24 +631,22 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, return ERL_DRV_ERROR_ERRNO; } - if (opts->wd == NULL) { - if ((wd = getcwd(wd_buff, MAXPATHLEN+1)) == NULL) { - /* on some OSs this call opens a fd in the - background which means that this can - return EMFILE */ - int err = errno; - close_pipes(ifd, ofd); - erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); - if (new_environ != environ) - erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); - erts_smp_rwmtx_runlock(&environ_rwmtx); - errno = err; - return ERL_DRV_ERROR_ERRNO; - } - } else { - wd = opts->wd; + if ((cwd = getcwd(wd_buff, MAXPATHLEN+1)) == NULL) { + /* on some OSs this call opens a fd in the + background which means that this can + return EMFILE */ + int err = errno; + close_pipes(ifd, ofd); + erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); + if (new_environ != environ) + erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); + erts_smp_rwmtx_runlock(&environ_rwmtx); + errno = err; + return ERL_DRV_ERROR_ERRNO; } + wd = opts->wd; + { struct iovec *io_vector; int iov_len = 5; @@ -660,6 +658,8 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, | (opts->read_write & DO_READ ? FORKER_FLAG_DO_READ : 0) | (opts->read_write & DO_WRITE ? FORKER_FLAG_DO_WRITE : 0); + if (wd) iov_len++; + /* count number of elements in environment */ while(new_environ[env_len] != NULL) env_len++; @@ -688,6 +688,10 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, return ERL_DRV_ERROR_ERRNO; } + /* + * Whitebox test port_SUITE:pipe_limit_env + * assumes this command payload format. + */ io_vector[i].iov_base = (void*)&buffsz; io_vector[i++].iov_len = sizeof(buffsz); @@ -700,10 +704,16 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, io_vector[i++].iov_len = len; buffsz += len; - io_vector[i].iov_base = wd; + io_vector[i].iov_base = cwd; io_vector[i].iov_len = strlen(io_vector[i].iov_base) + 1; buffsz += io_vector[i++].iov_len; + if (wd) { + io_vector[i].iov_base = wd; + io_vector[i].iov_len = strlen(io_vector[i].iov_base) + 1; + buffsz += io_vector[i++].iov_len; + } + io_vector[i].iov_base = nullbuff; io_vector[i++].iov_len = 1; buffsz += io_vector[i-1].iov_len; @@ -765,9 +775,14 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, if (res < (buffsz + sizeof(buffsz))) { /* we only wrote part of the command payload. Enqueue the rest. */ for (i = 0; i < iov_len; i++) { - driver_enq(port_num, io_vector[i].iov_base, io_vector[i].iov_len); + if (res >= io_vector[i].iov_len) + res -= io_vector[i].iov_len; + else { + driver_enq(port_num, io_vector[i].iov_base + res, + io_vector[i].iov_len - res); + res = 0; + } } - driver_deq(port_num, res); driver_select(port_num, ofd[1], ERL_DRV_WRITE|ERL_DRV_USE, 1); } diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile index b580211eff..2e48c475d5 100644 --- a/erts/emulator/test/Makefile +++ b/erts/emulator/test/Makefile @@ -70,6 +70,7 @@ MODULES= \ guard_SUITE \ hash_SUITE \ hibernate_SUITE \ + hipe_SUITE \ list_bif_SUITE \ lttng_SUITE \ map_SUITE \ diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl index 2347a3d4ef..34515efa3d 100644 --- a/erts/emulator/test/code_SUITE.erl +++ b/erts/emulator/test/code_SUITE.erl @@ -20,7 +20,8 @@ -module(code_SUITE). -export([all/0, suite/0, init_per_suite/1, end_per_suite/1, - versions/1,new_binary_types/1, + versions/1,new_binary_types/1, call_purged_fun_code_gone/1, + call_purged_fun_code_reload/1, call_purged_fun_code_there/1, t_check_process_code/1,t_check_old_code/1, t_check_process_code_ets/1, external_fun/1,get_chunk/1,module_md5/1,make_stub/1, @@ -34,7 +35,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> - [versions, new_binary_types, t_check_process_code, + [versions, new_binary_types, call_purged_fun_code_gone, + call_purged_fun_code_reload, call_purged_fun_code_there, t_check_process_code, t_check_process_code_ets, t_check_old_code, external_fun, get_chunk, module_md5, make_stub, make_stub_many_funs, constant_pools, constant_refc_binaries, false_dependency, @@ -127,12 +129,169 @@ new_binary_types(Config) when is_list(Config) -> bit_sized_binary(Bin))), ok. +call_purged_fun_code_gone(Config) when is_list(Config) -> + Priv = proplists:get_value(priv_dir, Config), + Data = proplists:get_value(data_dir, Config), + call_purged_fun_test(Priv, Data, code_gone), + ok. + +call_purged_fun_code_reload(Config) when is_list(Config) -> + Priv = proplists:get_value(priv_dir, Config), + Data = proplists:get_value(data_dir, Config), + Path = code:get_path(), + true = code:add_path(Priv), + try + call_purged_fun_test(Priv, Data, code_reload) + after + code:set_path(Path) + end, + ok. + +call_purged_fun_code_there(Config) when is_list(Config) -> + Priv = proplists:get_value(priv_dir, Config), + Data = proplists:get_value(data_dir, Config), + call_purged_fun_test(Priv, Data, code_there), + ok. + +call_purged_fun_test(Priv, Data, Type) -> + File = filename:join(Data, "my_code_test2"), + Code = filename:join(Priv, "my_code_test2"), + + catch erlang:purge_module(my_code_test2), + catch erlang:delete_module(my_code_test2), + catch erlang:purge_module(my_code_test2), + + {ok,my_code_test2} = c:c(File, [{outdir,Priv}]), + + T = ets:new(my_code_test2_fun_table, []), + ets:insert(T, {my_fun,my_code_test2:make_fun(4711)}), + ets:insert(T, {my_fun2,my_code_test2:make_fun2()}), + + spawn(fun () -> + [{my_fun2,F2}] = ets:lookup(T, my_fun2), + F2(fun () -> + receive after infinity -> ok end + end, + fun () -> ok end), + exit(completed) + end), + + PurgeType = case Type of + code_gone -> + ok = file:delete(Code++".beam"), + true; + code_reload -> + true; + code_there -> + false + end, + + true = erlang:delete_module(my_code_test2), + + Purge = start_purge(my_code_test2, PurgeType), + + {P0, M0} = spawn_monitor(fun () -> + [{my_fun,F}] = ets:lookup(T, my_fun), + 4712 = F(1), + exit(completed) + end), + + wait_until(fun () -> + {status, suspended} + == process_info(P0, status) + end), + + ok = continue_purge(Purge), + + {P1, M1} = spawn_monitor(fun () -> + [{my_fun,F}] = ets:lookup(T, my_fun), + 4713 = F(2), + exit(completed) + end), + {P2, M2} = spawn_monitor(fun () -> + [{my_fun,F}] = ets:lookup(T, my_fun), + 4714 = F(3), + exit(completed) + end), + + wait_until(fun () -> + {status, suspended} + == process_info(P1, status) + end), + wait_until(fun () -> + {status, suspended} + == process_info(P2, status) + end), + + {current_function, + {erts_code_purger, + pending_purge_lambda, + 3}} = process_info(P0, current_function), + {current_function, + {erts_code_purger, + pending_purge_lambda, + 3}} = process_info(P1, current_function), + {current_function, + {erts_code_purger, + pending_purge_lambda, + 3}} = process_info(P2, current_function), + + case Type of + code_there -> + false = complete_purge(Purge); + _ -> + {true, true} = complete_purge(Purge) + end, + + case Type of + code_gone -> + receive + {'DOWN', M0, process, P0, Reason0} -> + {undef, _} = Reason0 + end, + receive + {'DOWN', M1, process, P1, Reason1} -> + {undef, _} = Reason1 + end, + receive + {'DOWN', M2, process, P2, Reason2} -> + {undef, _} = Reason2 + end; + _ -> + receive + {'DOWN', M0, process, P0, Reason0} -> + completed = Reason0 + end, + receive + {'DOWN', M1, process, P1, Reason1} -> + completed = Reason1 + end, + receive + {'DOWN', M2, process, P2, Reason2} -> + completed = Reason2 + end, + catch erlang:purge_module(my_code_test2), + catch erlang:delete_module(my_code_test2), + catch erlang:purge_module(my_code_test2) + end, + ok. + t_check_process_code(Config) when is_list(Config) -> + case check_process_code_handle(indirect_references) of + false -> {skipped, "check_process_code() ignores funs"}; + true -> t_check_process_code_test(Config) + end. + +t_check_process_code_test(Config) -> Priv = proplists:get_value(priv_dir, Config), Data = proplists:get_value(data_dir, Config), File = filename:join(Data, "my_code_test"), Code = filename:join(Priv, "my_code_test"), + catch erlang:purge_module(my_code_test), + catch erlang:delete_module(my_code_test), + catch erlang:purge_module(my_code_test), + {ok,my_code_test} = c:c(File, [{outdir,Priv}]), MyFun = fun(X, Y) -> X + Y end, %Confuse things. @@ -231,11 +390,16 @@ gc1() -> ok. %% Test check_process_code/2 in combination with a fun obtained from an ets table. t_check_process_code_ets(Config) when is_list(Config) -> - case test_server:is_native(?MODULE) of - true -> - {skip,"Native code"}; - false -> - do_check_process_code_ets(Config) + case check_process_code_handle(indirect_references) of + false -> + {skipped, "check_process_code() ignores funs"}; + true -> + case test_server:is_native(?MODULE) of + true -> + {skip,"Native code"}; + false -> + do_check_process_code_ets(Config) + end end. do_check_process_code_ets(Config) -> @@ -243,8 +407,9 @@ do_check_process_code_ets(Config) -> Data = proplists:get_value(data_dir, Config), File = filename:join(Data, "my_code_test"), - erlang:purge_module(my_code_test), - erlang:delete_module(my_code_test), + catch erlang:purge_module(my_code_test), + catch erlang:delete_module(my_code_test), + catch erlang:purge_module(my_code_test), {ok,my_code_test} = c:c(File, [{outdir,Priv}]), T = ets:new(my_code_test, []), @@ -295,8 +460,8 @@ t_check_old_code(Config) when is_list(Config) -> Data = proplists:get_value(data_dir, Config), File = filename:join(Data, "my_code_test"), - erlang:purge_module(my_code_test), - erlang:delete_module(my_code_test), + catch erlang:purge_module(my_code_test), + catch erlang:delete_module(my_code_test), catch erlang:purge_module(my_code_test), false = erlang:check_old_code(my_code_test), @@ -971,3 +1136,39 @@ flush() -> receive _ -> flush() after 0 -> ok end. id(I) -> I. + +check_process_code_handle(What) -> + lists:member(What, erlang:system_info(check_process_code)). + +wait_until(Fun) -> + case Fun() of + true -> + ok; + false -> + receive after 100 -> ok end, + wait_until(Fun) + end. + +start_purge(Mod, Type) when is_atom(Mod) + andalso ((Type == true) + orelse (Type == false)) -> + Ref = make_ref(), + erts_code_purger ! {test_purge, Mod, self(), Type, Ref}, + receive + {started, Ref} -> + Ref + end. + +continue_purge(Ref) when is_reference(Ref) -> + erts_code_purger ! {continue, Ref}, + receive + {continued, Ref} -> + ok + end. + +complete_purge(Ref) when is_reference(Ref) -> + erts_code_purger ! {complete, Ref}, + receive + {test_purge, Res, Ref} -> + Res + end. diff --git a/erts/emulator/test/code_SUITE_data/my_code_test.erl b/erts/emulator/test/code_SUITE_data/my_code_test.erl index d2386157d6..9d12aa9897 100644 --- a/erts/emulator/test/code_SUITE_data/my_code_test.erl +++ b/erts/emulator/test/code_SUITE_data/my_code_test.erl @@ -24,5 +24,3 @@ make_fun(A) -> fun(X) -> A + X end. - - diff --git a/erts/emulator/test/code_SUITE_data/my_code_test2.erl b/erts/emulator/test/code_SUITE_data/my_code_test2.erl new file mode 100644 index 0000000000..57973535d4 --- /dev/null +++ b/erts/emulator/test/code_SUITE_data/my_code_test2.erl @@ -0,0 +1,32 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 1999-2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +-module(my_code_test2). + +-export([make_fun/1, make_fun2/0]). + +make_fun(A) -> + fun(X) -> A + X end. + +make_fun2() -> + fun (F1,F2) -> + F1(), + F2() + end. diff --git a/erts/emulator/test/dirty_nif_SUITE.erl b/erts/emulator/test/dirty_nif_SUITE.erl index 1a6737d59a..5e4c81a9cf 100644 --- a/erts/emulator/test/dirty_nif_SUITE.erl +++ b/erts/emulator/test/dirty_nif_SUITE.erl @@ -33,7 +33,8 @@ dirty_nif_exception/1, call_dirty_nif_exception/1, dirty_scheduler_exit/1, dirty_call_while_terminated/1, dirty_heap_access/1, dirty_process_info/1, - dirty_process_register/1, dirty_process_trace/1]). + dirty_process_register/1, dirty_process_trace/1, + code_purge/1, dirty_nif_send_traced/1]). -define(nif_stub,nif_stub_error(?LINE)). @@ -48,7 +49,9 @@ all() -> dirty_heap_access, dirty_process_info, dirty_process_register, - dirty_process_trace]. + dirty_process_trace, + code_purge, + dirty_nif_send_traced]. init_per_suite(Config) -> try erlang:system_info(dirty_cpu_schedulers) of @@ -353,6 +356,103 @@ dirty_process_trace(Config) when is_list(Config) -> ok end). +dirty_code_test_code() -> + " +-module(dirty_code_test). + +-export([func/1]). + +func(Fun) -> + Fun(), + blipp:blapp(). + +". + +code_purge(Config) when is_list(Config) -> + Path = ?config(data_dir, Config), + File = filename:join(Path, "dirty_code_test.erl"), + ok = file:write_file(File, dirty_code_test_code()), + {ok, dirty_code_test, Bin} = compile:file(File, [binary]), + {module, dirty_code_test} = erlang:load_module(dirty_code_test, Bin), + Start = erlang:monotonic_time(), + {Pid1, Mon1} = spawn_monitor(fun () -> + dirty_code_test:func(fun () -> + %% Sleep for 6 seconds + %% in dirty nif... + dirty_sleeper() + end) + end), + {module, dirty_code_test} = erlang:load_module(dirty_code_test, Bin), + {Pid2, Mon2} = spawn_monitor(fun () -> + dirty_code_test:func(fun () -> + %% Sleep for 6 seconds + %% in dirty nif... + dirty_sleeper() + end) + end), + receive + {'DOWN', Mon1, process, Pid1, _} -> + ct:fail(premature_death) + after 100 -> + ok + end, + true = erlang:purge_module(dirty_code_test), + receive + {'DOWN', Mon1, process, Pid1, Reason1} -> + killed = Reason1 + end, + receive + {'DOWN', Mon2, process, Pid2, _} -> + ct:fail(premature_death) + after 100 -> + ok + end, + true = erlang:delete_module(dirty_code_test), + receive + {'DOWN', Mon2, process, Pid2, _} -> + ct:fail(premature_death) + after 100 -> + ok + end, + true = erlang:purge_module(dirty_code_test), + receive + {'DOWN', Mon2, process, Pid2, Reason2} -> + killed = Reason2 + end, + End = erlang:monotonic_time(), + Time = erlang:convert_time_unit(End-Start, native, milli_seconds), + io:format("Time=~p~n", [Time]), + true = Time =< 1000, + ok. + +dirty_nif_send_traced(Config) when is_list(Config) -> + Parent = self(), + Rcvr = spawn_link(fun() -> + Self = self(), + receive {ok, Self} -> ok end, + Parent ! {Self, received} + end), + Sndr = spawn_link(fun () -> + receive {Parent, go} -> ok end, + {ok, Rcvr} = send_wait_from_dirty_nif(Rcvr), + Parent ! {self(), sent} + end), + 1 = erlang:trace(Sndr, true, [send]), + Start = erlang:monotonic_time(), + Sndr ! {self(), go}, + receive {trace, Sndr, send, {ok, Rcvr}, Rcvr} -> ok end, + receive {Rcvr, received} -> ok end, + End1 = erlang:monotonic_time(), + Time1 = erlang:convert_time_unit(End1-Start, native, 1000), + io:format("Time1: ~p milliseconds~n", [Time1]), + true = Time1 < 500, + receive {Sndr, sent} -> ok end, + End2 = erlang:monotonic_time(), + Time2 = erlang:convert_time_unit(End2-Start, native, 1000), + io:format("Time2: ~p milliseconds~n", [Time2]), + true = Time2 >= 1900, + ok. + %% %% Internal... %% @@ -435,6 +535,7 @@ mcall(Node, Funs) -> lib_loaded() -> false. call_dirty_nif(_,_,_) -> ?nif_stub. send_from_dirty_nif(_) -> ?nif_stub. +send_wait_from_dirty_nif(_) -> ?nif_stub. call_dirty_nif_exception(_) -> ?nif_stub. call_dirty_nif_zero_args() -> ?nif_stub. dirty_call_while_terminated_nif(_) -> ?nif_stub. diff --git a/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c b/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c index d92933a096..a0019e5d95 100644 --- a/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c +++ b/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c @@ -100,6 +100,32 @@ static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_ return result; } +static ERL_NIF_TERM send_wait_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + ERL_NIF_TERM result; + ErlNifPid pid; + ErlNifEnv* menv; + int res; + + if (!enif_get_local_pid(env, argv[0], &pid)) + return enif_make_badarg(env); + result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid)); + menv = enif_alloc_env(); + res = enif_send(env, &pid, menv, result); + enif_free_env(menv); + +#ifdef __WIN32__ + Sleep(2000); +#else + sleep(2); +#endif + + if (!res) + return enif_make_badarg(env); + else + return result; +} + static ERL_NIF_TERM call_dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { switch (argc) { @@ -237,6 +263,7 @@ static ErlNifFunc nif_funcs[] = {"lib_loaded", 0, lib_loaded}, {"call_dirty_nif", 3, call_dirty_nif}, {"send_from_dirty_nif", 1, send_from_dirty_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, + {"send_wait_from_dirty_nif", 1, send_wait_from_dirty_nif, ERL_NIF_DIRTY_JOB_IO_BOUND}, {"call_dirty_nif_exception", 1, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND}, {"call_dirty_nif_zero_args", 0, call_dirty_nif_zero_args, ERL_NIF_DIRTY_JOB_CPU_BOUND}, {"dirty_sleeper", 0, dirty_sleeper, ERL_NIF_DIRTY_JOB_IO_BOUND}, diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl index c6939a695d..159fbc806b 100644 --- a/erts/emulator/test/distribution_SUITE.erl +++ b/erts/emulator/test/distribution_SUITE.erl @@ -43,7 +43,7 @@ lost_exit/1, link_to_dead/1, link_to_dead_new_node/1, applied_monitor_node/1, ref_port_roundtrip/1, nil_roundtrip/1, trap_bif_1/1, trap_bif_2/1, trap_bif_3/1, - stop_dist/1, + stop_dist/1, dist_auto_connect_never/1, dist_auto_connect_once/1, dist_parallel_send/1, atom_roundtrip/1, @@ -66,13 +66,13 @@ sendersender/4, sendersender2/4]). %% epmd_module exports --export([start_link/0, register_node/2, port_please/2]). +-export([start_link/0, register_node/2, register_node/3, port_please/2]). suite() -> [{ct_hooks,[ts_install_cth]}, {timetrap, {minutes, 4}}]. -all() -> +all() -> [ping, {group, bulk_send}, {group, local_send}, link_to_busy, exit_to_busy, lost_exit, link_to_dead, link_to_dead_new_node, applied_monitor_node, @@ -83,7 +83,7 @@ all() -> bad_dist_structure, {group, bad_dist_ext}, start_epmd_false, epmd_module]. -groups() -> +groups() -> [{bulk_send, [], [bulk_send_small, bulk_send_big, bulk_send_bigbig]}, {local_send, [], [local_send_small, local_send_big, local_send_legal]}, @@ -844,59 +844,50 @@ dist_auto_connect_once(Config) when is_list(Config) -> %% Result is sent here through relay node. dist_auto_connect_never(Config) when is_list(Config) -> Self = self(), - {ok, RelayNode} = - start_node(dist_auto_connect_relay), - spawn(RelayNode, + {ok, RelayNode} = start_node(dist_auto_connect_relay), + spawn(RelayNode, fun() -> register(dist_auto_connect_relay, self()), - dist_auto_connect_relay(Self) + dist_auto_connect_relay(Self) end), {ok, Handle} = dist_auto_connect_start(dist_auto_connect, never), - Result = - receive - {do_dist_auto_connect, ok} -> - ok; - {do_dist_auto_connect, Error} -> - {error, Error}; - Other -> - {error, Other} - after 32000 -> - timeout - end, + Result = receive + {do_dist_auto_connect, ok} -> + ok; + {do_dist_auto_connect, Error} -> + {error, Error}; + Other -> + {error, Other} + after 32000 -> + timeout + end, stop_node(RelayNode), - Stopped = dist_auto_connect_stop(Handle), - Junk = - receive - {do_dist_auto_connect, _} = J -> - J - after 0 -> - ok - end, + Stopped = dist_auto_connect_stop(Handle), + Junk = receive + {do_dist_auto_connect, _} = J -> J + after 0 -> ok + end, {ok, ok, ok} = {Result, Stopped, Junk}, ok. do_dist_auto_connect([never]) -> Node = list_to_atom("dist_auto_connect_relay@" ++ hostname()), - io:format("~p:do_dist_auto_connect([false]) Node=~p~n", - [?MODULE, Node]), + io:format("~p:do_dist_auto_connect([false]) Node=~p~n", [?MODULE, Node]), Ping = net_adm:ping(Node), - io:format("~p:do_dist_auto_connect([false]) Ping=~p~n", - [?MODULE, Ping]), + io:format("~p:do_dist_auto_connect([false]) Ping=~p~n", [?MODULE, Ping]), Result = case Ping of pang -> ok; _ -> {error, Ping} end, - io:format("~p:do_dist_auto_connect([false]) Result=~p~n", - [?MODULE, Result]), + io:format("~p:do_dist_auto_connect([false]) Result=~p~n", [?MODULE, Result]), net_kernel:connect_node(Node), catch {dist_auto_connect_relay, Node} ! {do_dist_auto_connect, Result}; % receive after 1000 -> ok end, % halt(); do_dist_auto_connect(Arg) -> - io:format("~p:do_dist_auto_connect(~p)~n", - [?MODULE, Arg]), + io:format("~p:do_dist_auto_connect(~p)~n", [?MODULE, Arg]), receive after 10000 -> ok end, halt(). @@ -912,11 +903,11 @@ dist_auto_connect_start(Name, Value) when is_list(Name), is_atom(Value) -> [%"xterm -e ", atom_to_list(lib:progname()), % " -noinput ", - " -detached ", + " -detached ", long_or_short(), " ", Name, " -setcookie ", Cookie, " -pa ", ModuleDir, - " -s ", atom_to_list(?MODULE), + " -s ", atom_to_list(?MODULE), " do_dist_auto_connect ", ValueStr, " -kernel dist_auto_connect ", ValueStr]), io:format("~p:dist_auto_connect_start() cmd: ~p~n", [?MODULE, Cmd]), @@ -947,7 +938,7 @@ dist_auto_connect_stop(Port, Node, Pid, N) when is_integer(N) -> end. -dist_auto_connect_relay(Parent) -> +dist_auto_connect_relay(Parent) -> receive X -> catch Parent ! X end, @@ -1321,7 +1312,7 @@ get_conflicting_unicode_atoms(CIX, N) -> start_monitor(Offender,P) -> Parent = self(), Q = spawn(Offender, - fun () -> + fun () -> Ref = erlang:monitor(process,P), Parent ! {self(),ref,Ref}, receive @@ -1458,8 +1449,8 @@ bad_dist_structure(Config) when is_list(Config) -> pong = rpc:call(Victim, net_adm, ping, [Offender]), P ! two, P ! check_msgs, - receive - {P, messages_checked} -> ok + receive + {P, messages_checked} -> ok after 5000 -> exit(victim_is_dead) end, @@ -1765,7 +1756,7 @@ send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,PayLoad) -> pong = net_adm:ping(Node), DPrt = dport(Node), Bad1 = case WhereToPutSelf of - 0 -> + 0 -> Bad; N when N > 0 -> setelement(N,Bad,self()) @@ -1779,8 +1770,8 @@ send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,PayLoad) -> port_command(DPrt, DData), Parent ! {DData,Done} end), - receive - {WhatSent,Done} -> + receive + {WhatSent,Done} -> io:format("Offender sent ~p~n",[WhatSent]), ok after 5000 -> @@ -1887,7 +1878,7 @@ dmsg_fake_hdr2() -> 1, size(A2), A2, 2, size(A3), A3]. -dmsg_ext(Term) -> +dmsg_ext(Term) -> <<131, Res/binary>> = term_to_binary(Term), Res. @@ -1934,7 +1925,9 @@ epmd_module(Config) when is_list(Config) -> start_link() -> ignore. -register_node(_Name, Port) -> +register_node(Name, Port) -> + register_node(Name, Port, inet_tcp). +register_node(_Name, Port, _Driver) -> %% Save the port number we're listening on. application:set_env(kernel, dist_listen_port, Port), Creation = rand:uniform(3), @@ -1972,7 +1965,7 @@ start_node(Name, Args, Rel) when is_atom(Name), is_list(Rel) -> [] -> []; _ -> [{erl,[{release,Rel}]}] end, - test_server:start_node(Name, slave, + test_server:start_node(Name, slave, [{args, Args++" -setcookie "++Cookie++" -pa \""++Pa++"\""} | RelArg]); @@ -2040,17 +2033,15 @@ inet_rpc_server_loop(Sock) -> start_relay_node(Node, Args) -> Pa = filename:dirname(code:which(?MODULE)), Cookie = "NOT"++atom_to_list(erlang:get_cookie()), - {ok, LSock} = gen_tcp:listen(0, [binary, {packet, 4}, - {active, false}]), + {ok, LSock} = gen_tcp:listen(0, [binary, {packet, 4}, {active, false}]), {ok, Port} = inet:port(LSock), {ok, Host} = inet:gethostname(), RunArg = "-run " ++ atom_to_list(?MODULE) ++ " inet_rpc_server " ++ Host ++ " " ++ integer_to_list(Port), - {ok, NN} = - test_server:start_node(Node, peer, - [{args, Args ++ - " -setcookie "++Cookie++" -pa "++Pa++" "++ - RunArg}]), + {ok, NN} = test_server:start_node(Node, peer, + [{args, Args ++ + " -setcookie "++Cookie++" -pa "++Pa++" "++ + RunArg}]), [N,H] = string:tokens(atom_to_list(NN),"@"), {ok, Sock} = gen_tcp:accept(LSock), pang = net_adm:ping(NN), @@ -2066,7 +2057,7 @@ wait_dead(N,H,0) -> wait_dead(N,H,X) -> case erl_epmd:port_please(N,H) of {port,_,_} -> - receive + receive after 1000 -> ok end, diff --git a/erts/emulator/test/hipe_SUITE.erl b/erts/emulator/test/hipe_SUITE.erl new file mode 100644 index 0000000000..3e682b8d88 --- /dev/null +++ b/erts/emulator/test/hipe_SUITE.erl @@ -0,0 +1,64 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +-module(hipe_SUITE). +-export([all/0, t_copy_literals/1]). + +all() -> + case erlang:system_info(hipe_architecture) of + undefined -> {skip, "HiPE is disabled"}; + _ -> [t_copy_literals] + end. + +t_copy_literals(doc) -> + "Check that BEAM literals referenced from HiPE stack are copied by" + " check_process_code"; +t_copy_literals(Config) when is_list(Config) -> + %% Compile the the ref_cell and literals modules. + Data = proplists:get_value(data_dir, Config), + Priv = proplists:get_value(priv_dir, Config), + RefFile = filename:join(Data, "ref_cell"), + {ok,ref_cell} = c:c(RefFile, [{outdir,Priv},native]), + true = code:is_module_native(ref_cell), + LitFile = filename:join(Data, "literals"), + {ok,literals} = c:c(LitFile, [{outdir,Priv}]), + + %% store references to literals on HiPE stacks + PA = ref_cell:start_link(), + ref_cell:call(PA, {put_res_of, fun literals:a/0}), + PB = ref_cell:start_link_deep(), + ref_cell:call(PB, {put_res_of, fun literals:b/0}), + + %% purge the literals + _ = (catch erlang:purge_module(literals)), + true = erlang:delete_module(literals), + true = erlang:purge_module(literals), + + %% check that the ex-literals are ok + [a,b,c] = ref_cell:call(PA, get), + {a,b,c} = ref_cell:call(PB, get), + + %% cleanup + ref_cell:call(PA, done), + ref_cell:call(PB, done), + _ = (catch erlang:purge_module(ref_cell)), + true = erlang:delete_module(ref_cell), + true = erlang:purge_module(ref_cell), + ok. diff --git a/erts/emulator/test/hipe_SUITE_data/literals.erl b/erts/emulator/test/hipe_SUITE_data/literals.erl new file mode 100644 index 0000000000..31e443970f --- /dev/null +++ b/erts/emulator/test/hipe_SUITE_data/literals.erl @@ -0,0 +1,26 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +-module(literals). + +-export([a/0, b/0]). + +a() -> [a,b,c]. +b() -> {a,b,c}. diff --git a/erts/emulator/test/hipe_SUITE_data/ref_cell.erl b/erts/emulator/test/hipe_SUITE_data/ref_cell.erl new file mode 100644 index 0000000000..2654e4077b --- /dev/null +++ b/erts/emulator/test/hipe_SUITE_data/ref_cell.erl @@ -0,0 +1,64 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +-module(ref_cell). + +-export([start_link/0, start_link_deep/0, call/2]). + +-compile(native). + +-define(DEPTH, 100). +-define(ALLOCS, 500). + +start_link() -> + spawn_link(fun() -> loop(undefined) end). + +start_link_deep() -> + spawn_link(fun() -> go_deep(?DEPTH) end). + +%% Create a stack large enough to get a graylimit trap placed next time there's +%% a minor gc. +go_deep(0) -> + alloc_some(?ALLOCS), + loop(undefined), + 0; +go_deep(Depth) -> + go_deep(Depth-1)+1. + +%% Do some allocation to trigger a minor gc +alloc_some(Amount) -> + Check = (Amount * (Amount + 1)) div 2, + Check = lists:sum(lists:seq(1, Amount)). + +call(Pid, Call) -> + Pid ! {Call, self()}, + receive {Pid, Res} -> Res end. + +loop(Thing) -> + receive + {done, Pid} -> Pid ! {self(), done}; + {{put_res_of, Fun}, Pid} -> + NewThing = Fun(), + Pid ! {self(), put}, + loop(NewThing); + {get, Pid} -> + Pid ! {self(), Thing}, + loop(Thing) + end. diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl index b3870f0313..5af676c409 100644 --- a/erts/emulator/test/map_SUITE.erl +++ b/erts/emulator/test/map_SUITE.erl @@ -77,6 +77,7 @@ t_ets/1, t_dets/1, t_tracing/1, + t_hash_entropy/1, %% instruction-level tests t_has_map_fields/1, @@ -140,6 +141,7 @@ all() -> [t_build_and_match_literals, t_build_and_match_literals_large, t_pdict, t_ets, t_tracing, + t_hash_entropy, %% instruction-level tests t_has_map_fields, @@ -3020,6 +3022,39 @@ do_badmap_17(Config) -> id(I) -> I. +%% OTP-13763 +t_hash_entropy(Config) when is_list(Config) -> + %% entropy bug in 18.3, 19.0 + M1 = maps:from_list([{#{"id" => I}, ok}||I <- lists:seq(1,50000)]), + + #{ #{"id" => 100} := ok, + #{"id" => 200} := ok, + #{"id" => 300} := ok, + #{"id" => 400} := ok, + #{"id" => 500} := ok, + #{"id" => 600} := ok, + #{"id" => 700} := ok, + #{"id" => 800} := ok, + #{"id" => 900} := ok, + #{"id" => 25061} := ok, + #{"id" => 39766} := ok } = M1, + + M0 = maps:from_list([{I,ok}||I <- lists:seq(1,33)]), + M2 = maps:from_list([{M0#{"id" => I}, ok}||I <- lists:seq(1,50000)]), + + ok = maps:get(M0#{"id" => 100}, M2), + ok = maps:get(M0#{"id" => 200}, M2), + ok = maps:get(M0#{"id" => 300}, M2), + ok = maps:get(M0#{"id" => 400}, M2), + ok = maps:get(M0#{"id" => 500}, M2), + ok = maps:get(M0#{"id" => 600}, M2), + ok = maps:get(M0#{"id" => 700}, M2), + ok = maps:get(M0#{"id" => 800}, M2), + ok = maps:get(M0#{"id" => 900}, M2), + ok = maps:get(M0#{"id" => 25061}, M2), + ok = maps:get(M0#{"id" => 39766}, M2), + ok. + %% OTP-13146 %% Provoke major GC with a lot of "fat" maps on external format in msg queue %% causing heap fragments to be allocated. diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl index ee07699884..a9814d7df9 100644 --- a/erts/emulator/test/port_SUITE.erl +++ b/erts/emulator/test/port_SUITE.erl @@ -83,6 +83,7 @@ bad_port_messages/1, basic_ping/1, cd/1, + cd_relative/1, close_deaf_port/1, count_fds/1, dying_port/1, @@ -91,6 +92,7 @@ exit_status/1, exit_status_multi_scheduling_block/1, huge_env/1, + pipe_limit_env/1, input_only/1, iter_max_ports/1, line/1, @@ -102,6 +104,7 @@ mon_port_name_demonitor/1, mon_port_named/1, mon_port_origin_dies/1, + mon_port_owner_dies/1, mon_port_pid_demonitor/1, mon_port_remote_on_remote/1, mon_port_driver_die/1, @@ -137,7 +140,7 @@ win_massive_client/1 ]). --export([do_iter_max_ports/2]). +-export([do_iter_max_ports/2, relative_cd/0]). %% Internal exports. -export([tps/3]). @@ -158,7 +161,7 @@ all() -> {group, multiple_packets}, parallell, dying_port, port_program_with_path, open_input_file_port, open_output_file_port, name1, env, huge_env, bad_env, cd, - bad_args, + cd_relative, pipe_limit_env, bad_args, exit_status, iter_max_ports, count_fds, t_exit, {group, tps}, line, stderr_to_stdout, otp_3906, otp_4389, win_massive, mix_up_ports, otp_5112, otp_5119, @@ -171,6 +174,7 @@ all() -> mon_port_remote_on_remote, mon_port_bad_remote_on_local, mon_port_origin_dies, + mon_port_owner_dies, mon_port_named, mon_port_bad_named, mon_port_pid_demonitor, @@ -1002,6 +1006,55 @@ huge_env(Config) when is_list(Config) -> ct:fail("Open port failed ~p:~p",[E,R]) end. +%% Test to spawn program with command payload buffer +%% just around pipe capacity (9f779819f6bda734c5953468f7798) +pipe_limit_env(Config) when is_list(Config) -> + Cmd = "true", + CmdSize = command_payload_size(Cmd), + Limits = [4096, 16384, 65536], % Try a couple of common pipe buffer sizes + + lists:foreach(fun(Lim) -> + lists:foreach(fun(L) -> pipe_limit_env_do(L, Cmd, CmdSize) + end, lists:seq(Lim-5, Lim+5)) + end, Limits), + ok. + +pipe_limit_env_do(Bytes, Cmd, CmdSize) -> + case env_of_bytes(Bytes-CmdSize) of + [] -> skip; + Env -> + try erlang:open_port({spawn,Cmd},[exit_status, {env, Env}]) of + P -> + receive + {P, {exit_status,N}} = M -> + %% Bug caused exit_status 150 (EINVAL+128) + 0 = N + end + catch E:R -> + %% Have to catch the error here, as printing the stackdump + %% in the ct log is way to heavy for some test machines. + ct:fail("Open port failed ~p:~p",[E,R]) + end + end. + +%% environ format: KEY=VALUE\0 +env_of_bytes(Bytes) when Bytes > 3 -> + Env = [{"X",lists:duplicate(Bytes-3, $x)}]; +env_of_bytes(_) -> []. + +%% White box assumption about payload written to pipe +%% for Cmd and current environment (see spawn_start in sys_driver.c) +command_payload_size(Cmd) -> + EnvSize = lists:foldl(fun(E,Acc) -> length(E) + 1 + Acc end, + 0, os:getenv()), + {ok, PWD} = file:get_cwd(), + (4 % buffsz + + 4 % flags + + 5 + length(Cmd) + 1 % "exec $Cmd" + + length(PWD) + 1 % $PWD + + 1 % nullbuff + + 4 % env_len + + EnvSize). %% Test bad 'args' options. bad_args(Config) when is_list(Config) -> @@ -1036,8 +1089,7 @@ cd(Config) when is_list(Config) -> Cmd = Program ++ " -pz " ++ DataDir ++ " -noshell -s port_test pwd -s erlang halt", _ = open_port({spawn, Cmd}, - [{cd, TestDir}, - {line, 256}]), + [{cd, TestDir}, {line, 256}]), receive {_, {data, {eol, String}}} -> case filename_equal(String, TestDir) of @@ -1063,7 +1115,74 @@ cd(Config) when is_list(Config) -> Other3 -> ct:fail({env, Other3}) end, - ok. + + InvalidDir = filename:join(DataDir, "invaliddir"), + try open_port({spawn, Cmd}, + [{cd, InvalidDir}, exit_status, {line, 256}]) of + _ -> + receive + {_, {exit_status, _}} -> + ok; + Other4 -> + ct:fail({env, Other4}) + end + catch error:eacces -> + %% This happens on Windows + ok + end, + + %% Check that there are no lingering messages + receive + Other5 -> + ct:fail({env, Other5}) + after 10 -> + ok + end. + +%% Test that an emulator that has set it's cwd to +%% something other then when it started, can use +%% relative {cd,"./"} to open port and that cd will +%% be relative the new cwd and not the original +cd_relative(Config) -> + + Program = atom_to_list(lib:progname()), + DataDir = proplists:get_value(data_dir, Config), + TestDir = filename:join(DataDir, "dir"), + + Cmd = Program ++ " -pz " ++ filename:dirname(code:where_is_file("port_SUITE.beam")) ++ + " -noshell -s port_SUITE relative_cd -s erlang halt", + + _ = open_port({spawn, Cmd}, [{line, 256}, {cd, TestDir}]), + + receive + {_, {data, {eol, String}}} -> + case filename_equal(String, TestDir) of + true -> + ok; + false -> + ct:fail({cd_relative, String}) + end; + Other -> + ct:fail(Other) + end. + +relative_cd() -> + + Program = atom_to_list(lib:progname()), + ok = file:set_cwd(".."), + {ok, Cwd} = file:get_cwd(), + + Cmd = Program ++ " -pz " ++ Cwd ++ + " -noshell -s port_test pwd -s erlang halt", + + _ = open_port({spawn, Cmd}, [{line, 256}, {cd, "./dir"}, exit_status]), + + receive + {_, {data, {eol, String}}} -> + io:format("~s~n",[String]); + Other -> + io:format("ERROR: ~p~n",[Other]) + end. filename_equal(A, B) -> case os:type() of @@ -2520,6 +2639,29 @@ mon_port_origin_dies(Config) -> Port5 ! {self(), {command, <<"1">>}}, % make port quit ok. +%% Port and Monitor owner dies before port is closed +%% This testcase checks for a regression memory leak in erts +%% when the controlling and monitoring process is the same process +%% and the process dies +mon_port_owner_dies(Config) -> + Self = self(), + Proc = spawn(fun() -> + Port = create_port(Config, ["-h1", "-q"]), + Self ! {test_started, Port}, + erlang:monitor(port, Port), + receive stop -> ok end + end), + erlang:monitor(process, Proc), % we want to sync with its death + Port = receive {test_started,P} -> P + after 1000 -> ?assert(false) end, + ?assertMatch({proc_monitors, true, port_monitored_by, true}, + port_is_monitored(Proc, Port)), + Proc ! stop, + %% receive from monitor + receive ExitP5 -> ?assertMatch({'DOWN', _, process, Proc, _}, ExitP5) + after 1000 -> ?assert(false) end, + ok. + %% Monitor a named port mon_port_named(Config) -> Name6 = test_port6, diff --git a/erts/emulator/test/trace_local_SUITE.erl b/erts/emulator/test/trace_local_SUITE.erl index 74c05f24e0..c297acd78b 100644 --- a/erts/emulator/test/trace_local_SUITE.erl +++ b/erts/emulator/test/trace_local_SUITE.erl @@ -439,6 +439,14 @@ return_test() -> ?RT(?MODULE,slave,2), shutdown(), ?NM, + + %% Test a regression where turning off return_to tracing + %% on yourself would cause a segfault. + Pid = setup([call,return_to]), + erlang:trace_pattern({'_','_','_'},[],[local]), + apply_slave(erlang,trace,[Pid, false, [all]]), + shutdown(), + ok. on_and_off_test() -> |