diff options
Diffstat (limited to 'erts/emulator')
158 files changed, 10521 insertions, 11490 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 985ef72517..7e966c81bb 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -332,15 +332,11 @@ LIBS += $(THR_LIBS) ifneq ($(findstring erts_internal_r, $(THR_LIBS)),erts_internal_r) -ifeq ($(findstring vxworks,$(TARGET)),vxworks) -ERTS_INTERNAL_LIB=erts_internal -else ifneq ($(strip $(THR_LIB_NAME)),) ERTS_INTERNAL_LIB=erts_internal_r else ERTS_INTERNAL_LIB=erts_internal endif -endif LIBS += -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER) @@ -463,8 +459,6 @@ _create_dirs := $(shell mkdir -p $(CREATE_DIRS)) GENERATE = HIPE_ASM = -ifeq ($(findstring vxworks,$(TARGET)),vxworks) -else ifdef HIPE_ENABLED HIPE_ASM += $(TTF_DIR)/hipe_x86_asm.h \ $(TTF_DIR)/hipe_amd64_asm.h \ @@ -476,7 +470,6 @@ GENERATE += $(HIPE_ASM) \ $(TTF_DIR)/hipe_literals.h \ $(BINDIR)/hipe_mkliterals$(TF_MARKER) endif -endif ifdef DTRACE_ENABLED # global.h causes problems by including dtrace-wrapper.h which includes @@ -616,11 +609,6 @@ ifdef PERFCTR_PATH INCLUDES += -I$(PERFCTR_PATH)/usr.lib -I$(PERFCTR_PATH)/linux/include endif -# Need to include etc dir on VxWorks -ifeq ($(findstring vxworks,$(TARGET)),vxworks) -INCLUDES += -I$(ERL_TOP)/erts/etc/vxworks -endif - ifeq ($(TARGET),win32) $(OBJDIR)/dll_sys.o: sys/$(ERLANG_OSTYPE)/sys.c $(CC) $(CFLAGS) -DERL_RUN_SHARED_LIB=1 $(INCLUDES) -c $< -o $@ @@ -663,12 +651,6 @@ $(OBJDIR)/%.o: drivers/common/%.c $(OBJDIR)/%.o: drivers/$(ERLANG_OSTYPE)/%.c $(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE) -I../etc/$(ERLANG_OSTYPE) -c $< -o $@ -# VxWorks uses unix drivers too... -ifeq ($(findstring vxworks,$(TARGET)),vxworks) -$(OBJDIR)/%.o: drivers/unix/%.c - $(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -c $< -o $@ -endif - # ---------------------------------------------------------------------- # Specials # @@ -709,7 +691,9 @@ EMU_OBJS = \ $(OBJDIR)/beam_emu.o $(OBJDIR)/beam_opcodes.o \ $(OBJDIR)/beam_load.o $(OBJDIR)/beam_bif_load.o \ $(OBJDIR)/beam_debug.o $(OBJDIR)/beam_bp.o \ - $(OBJDIR)/beam_catches.o + $(OBJDIR)/beam_catches.o \ + $(OBJDIR)/code_ix.o \ + $(OBJDIR)/beam_ranges.o RUN_OBJS = \ $(OBJDIR)/erl_pbifs.o $(OBJDIR)/benchmark.o \ @@ -779,12 +763,8 @@ OS_OBJS = \ $(OBJDIR)/gzio.o \ $(OBJDIR)/elib_memmove.o -ifeq ($(findstring vxworks,$(TARGET)),vxworks) - OS_OBJS += $(OBJDIR)/int64.o -else OS_OBJS += $(OBJDIR)/sys_float.o \ $(OBJDIR)/sys_time.o -endif DRV_OBJS = \ $(OBJDIR)/efile_drv.o \ $(OBJDIR)/inet_drv.o \ @@ -792,9 +772,7 @@ DRV_OBJS = \ $(OBJDIR)/ram_file_drv.o endif -ifneq ($(findstring vxworks,$(TARGET)),vxworks) DRV_OBJS += $(OBJDIR)/ttsl_drv.o -endif ifeq ($(ERTS_ENABLE_KERNEL_POLL),yes) OS_OBJS += $(OBJDIR)/erl_poll.kp.o \ @@ -932,31 +910,6 @@ $(OBJDIR)/hipe_arm.o: hipe/hipe_arm.c # end of HiPE section ######################################## -ifeq ($(findstring vxworks,$(TARGET)),vxworks) -######################################## -# Extract what we need from libgcc.a -######################################## -GCCLIBFLAGS=@GCCLIBFLAGS@ -STRIP=@STRIP@ -SYMPREFIX=@SYMPREFIX@ - -NEEDFUNCTIONS=__divdi3 __moddi3 __udivdi3 -KEEPSYMS=$(NEEDFUNCTIONS:%=-K $(SYMPREFIX)%) - -$(OBJDIR)/int64.o: $(TARGET)/int64.c - $(CC) -o $(OBJDIR)/int64tmp.o -c $(TARGET)/int64.c - $(LD) -o $(OBJDIR)/int64.o $(OBJDIR)/int64tmp.o $(LDFLAGS) $(GCCLIBFLAGS) - $(STRIP) $(KEEPSYMS) $(OBJDIR)/int64.o - -$(TARGET)/int64.c: - echo 'void dummy(void); void dummy(void) {' > $(TARGET)/int64.c - for x in $(NEEDFUNCTIONS); do echo 'extern void '$$x'();' \ - >> $(TARGET)/int64.c; done - for x in $(NEEDFUNCTIONS); do echo $$x'();' >> $(TARGET)/int64.c; done - echo '}' >> $(TARGET)/int64.c - -endif - # ---------------------------------------------------------------------- # The emulator itself diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 106fad030b..afcbd732df 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -252,6 +252,7 @@ atom heap_block_size atom heap_size atom heap_sizes atom heap_type +atom heart_port atom heir atom hidden atom hide diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 02f37bf9bc..9e4add823d 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -37,25 +37,83 @@ static void set_default_trace_pattern(Eterm module); static Eterm check_process_code(Process* rp, Module* modp); -static void delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp); -static void delete_export_references(Eterm module); -static int purge_module(int module); +static void delete_code(Module* modp); static void decrement_refc(BeamInstr* code); static int is_native(BeamInstr* code); static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size); static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size); -static void remove_from_address_table(BeamInstr* code); -Eterm -load_module_2(BIF_ALIST_2) + + +BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1) +{ + Module* modp; + Eterm res; + ErtsCodeIndex code_ix; + + if (is_not_atom(BIF_ARG_1)) { + BIF_ERROR(BIF_P, BADARG); + } + code_ix = erts_active_code_ix(); + if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { + return am_undefined; + } + erts_rlock_old_code(code_ix); + res = ((modp->curr.code && is_native(modp->curr.code)) || + (modp->old.code != 0 && is_native(modp->old.code))) ? + am_true : am_false; + erts_runlock_old_code(code_ix); + return res; +} + +BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) { - Eterm reason; - Eterm* hp; - int sz; - byte* code; + Module* modp; Eterm res; + + if (!erts_try_seize_code_write_permission(BIF_P)) { + ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3], + BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); + } + + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + + modp = erts_get_module(BIF_ARG_1, erts_active_code_ix()); + + if (modp && modp->curr.num_breakpoints > 0) { + ASSERT(modp->curr.code != NULL); + erts_clear_module_break(modp); + ASSERT(modp->curr.num_breakpoints == 0); + } + + erts_start_staging_code_ix(); + + res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); + + if (res == BIF_ARG_1) { + erts_end_staging_code_ix(); + erts_commit_staging_code_ix(); + } + else { + erts_abort_staging_code_ix(); + } + erts_smp_thr_progress_unblock(); + erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_release_code_write_permission(); + return res; +} + +BIF_RETTYPE +prepare_loading_2(BIF_ALIST_2) +{ byte* temp_alloc = NULL; - struct LoaderState* stp; + byte* code; + Uint sz; + Binary* magic; + Eterm reason; + Eterm* hp; + Eterm res; if (is_not_atom(BIF_ARG_1)) { error: @@ -65,108 +123,307 @@ load_module_2(BIF_ALIST_2) if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) { goto error; } - hp = HAlloc(BIF_P, 3); - /* - * Read the BEAM file and prepare the module for loading. - */ - stp = erts_alloc_loader_state(); + magic = erts_alloc_loader_state(); sz = binary_size(BIF_ARG_2); - reason = erts_prepare_loading(stp, BIF_P, BIF_P->group_leader, + reason = erts_prepare_loading(magic, BIF_P, BIF_P->group_leader, &BIF_ARG_1, code, sz); erts_free_aligned_binary_bytes(temp_alloc); if (reason != NIL) { + hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, am_error, reason); BIF_RET(res); } + hp = HAlloc(BIF_P, PROC_BIN_SIZE); + res = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), magic); + erts_refc_dec(&magic->refc, 1); + BIF_RET(res); +} + +struct m { + Binary* code; + Eterm module; + Module* modp; + Uint exception; +}; + +static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int); +#ifdef ERTS_SMP +static void smp_code_ix_commiter(void*); + +static struct /* Protected by code_write_permission */ +{ + Process* stager; + ErtsThrPrgrLaterOp lop; +}commiter_state; +#endif + +static Eterm +exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions) +{ + Eterm* hp = HAlloc(p, 3 + 2*exceptions); + Eterm res = NIL; + + mp += exceptions - 1; + while (exceptions > 0) { + if (mp->exception) { + res = CONS(hp, mp->module, res); + hp += 2; + exceptions--; + } + mp--; + } + return TUPLE2(hp, tag, res); +} + + +BIF_RETTYPE +finish_loading_1(BIF_ALIST_1) +{ + int i; + int n; + struct m* p = NULL; + Uint exceptions; + Eterm res; + int is_blocking = 0; + int do_commit = 0; + + if (!erts_try_seize_code_write_permission(BIF_P)) { + ERTS_BIF_YIELD1(bif_export[BIF_finish_loading_1], BIF_P, BIF_ARG_1); + } /* - * Stop all other processes and finish the loading of the module. + * Validate the argument before we start loading; it must be a + * proper list where each element is a magic binary containing + * prepared (not previously loaded) code. + * + * First count the number of elements and allocate an array + * to keep the elements in. */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - reason = erts_finish_loading(stp, BIF_P, 0, &BIF_ARG_1); - if (reason != NIL) { - res = TUPLE2(hp, am_error, reason); - } else { - set_default_trace_pattern(BIF_ARG_1); - res = TUPLE2(hp, am_module, BIF_ARG_1); + n = list_length(BIF_ARG_1); + if (n == -1) { + ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); + goto done; } + p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m)); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_RET(res); -} + /* + * We now know that the argument is a proper list. Validate + * and collect the binaries into the array. + */ -BIF_RETTYPE purge_module_1(BIF_ALIST_1) -{ - int purge_res; + for (i = 0; i < n; i++) { + Eterm* cons = list_val(BIF_ARG_1); + Eterm term = CAR(cons); + ProcBin* pb; - if (is_not_atom(BIF_ARG_1)) { - BIF_ERROR(BIF_P, BADARG); + if (!ERTS_TERM_IS_MAGIC_BINARY(term)) { + ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); + goto done; + } + pb = (ProcBin*) binary_val(term); + p[i].code = pb->val; + p[i].module = erts_module_for_prepared_code(p[i].code); + if (p[i].module == NIL) { + ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); + goto done; + } + BIF_ARG_1 = CDR(cons); } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + /* + * Since we cannot handle atomic loading of a group of modules + * if one or more of them uses on_load, we will only allow one + * element in the list. This limitation is intended to be + * lifted in the future. + */ - erts_export_consolidate(); - purge_res = purge_module(atom_val(BIF_ARG_1)); + if (n > 1) { + ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT); + goto done; + } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + /* + * All types are correct. There cannot be a BADARG from now on. + * Before we can start loading, we must check whether any of + * the modules already has old code. To avoid a race, we must + * not allow other process to initiate a code loading operation + * from now on. + */ - if (purge_res < 0) { - BIF_ERROR(BIF_P, BADARG); + res = am_ok; + erts_start_staging_code_ix(); + + for (i = 0; i < n; i++) { + p[i].modp = erts_put_module(p[i].module); + } + for (i = 0; i < n; i++) { + if (p[i].modp->curr.num_breakpoints > 0 || + p[i].modp->curr.num_traced_exports > 0 || + erts_is_default_trace_enabled()) { + /* tracing involved, fallback with thread blocking */ + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + is_blocking = 1; + break; + } } - BIF_RET(am_true); -} -BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1) -{ - Module* modp; + if (is_blocking) { + for (i = 0; i < n; i++) { + if (p[i].modp->curr.num_breakpoints) { + erts_clear_module_break(p[i].modp); + ASSERT(p[i].modp->curr.num_breakpoints == 0); + } + } + } - if (is_not_atom(BIF_ARG_1)) { - BIF_ERROR(BIF_P, BADARG); + exceptions = 0; + for (i = 0; i < n; i++) { + p[i].exception = 0; + if (p[i].modp->curr.code && p[i].modp->old.code) { + p[i].exception = 1; + exceptions++; + } } - if ((modp = erts_get_module(BIF_ARG_1)) == NULL) { - return am_undefined; + + if (exceptions) { + res = exception_list(BIF_P, am_not_purged, p, exceptions); + } else { + /* + * Now we can load all code. This can't fail. + */ + + exceptions = 0; + for (i = 0; i < n; i++) { + Eterm mod; + Eterm retval; + + erts_refc_inc(&p[i].code->refc, 1); + retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod); + ASSERT(retval == NIL || retval == am_on_load); + if (retval == am_on_load) { + p[i].exception = 1; + exceptions++; + } + } + + /* + * Check whether any module has an on_load_handler. + */ + + if (exceptions) { + res = exception_list(BIF_P, am_on_load, p, exceptions); + } + do_commit = 1; } - return ((modp->code && is_native(modp->code)) || - (modp->old_code != 0 && is_native(modp->old_code))) ? - am_true : am_false; + +done: + return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n); } -BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) -{ - Eterm res; +static Eterm +staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, + struct m* loaded, int nloaded) +{ +#ifdef ERTS_SMP + if (is_blocking || !commit) +#endif + { + if (commit) { + erts_end_staging_code_ix(); + erts_commit_staging_code_ix(); + if (loaded) { + int i; + for (i=0; i < nloaded; i++) { + set_default_trace_pattern(loaded[i].module); + } + } + } + else { + erts_abort_staging_code_ix(); + } + if (loaded) { + erts_free(ERTS_ALC_T_LOADER_TMP, loaded); + } + if (is_blocking) { + erts_smp_thr_progress_unblock(); + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + } + erts_release_code_write_permission(); + return res; + } +#ifdef ERTS_SMP + else { + ASSERT(is_value(res)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + if (loaded) { + erts_free(ERTS_ALC_T_LOADER_TMP, loaded); + } + erts_end_staging_code_ix(); + /* + * Now we must wait for all schedulers to do a memory barrier before + * we can commit and let them access the new staged code. This allows + * schedulers to read active code_ix in a safe way while executing + * without any memory barriers at all. + */ + ASSERT(commiter_state.stager == NULL); + commiter_state.stager = c_p; + erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &commiter_state.lop); + erts_smp_proc_inc_refc(c_p); + erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); + /* + * smp_code_ix_commiter() will do the rest "later" + * and resume this process to return 'res'. + */ + ERTS_BIF_YIELD_RETURN(c_p, res); + } +#endif +} - erts_export_consolidate(); - res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - return res; +#ifdef ERTS_SMP +static void smp_code_ix_commiter(void* null) +{ + Process* p = commiter_state.stager; + + erts_commit_staging_code_ix(); + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + if (!ERTS_PROC_IS_EXITING(p)) { + erts_resume(p, ERTS_PROC_LOCK_STATUS); + } + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_proc_dec_refc(p); +#ifdef DEBUG + commiter_state.stager = NULL; +#endif + erts_release_code_write_permission(); } +#endif /* ERTS_SMP */ + + BIF_RETTYPE check_old_code_1(BIF_ALIST_1) { + ErtsCodeIndex code_ix; Module* modp; + Eterm res = am_false; if (is_not_atom(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } - modp = erts_get_module(BIF_ARG_1); - if (modp == NULL) { /* Doesn't exist. */ - BIF_RET(am_false); - } else if (modp->old_code == NULL) { /* No old code. */ - BIF_RET(am_false); + code_ix = erts_active_code_ix(); + modp = erts_get_module(BIF_ARG_1, code_ix); + if (modp != NULL) { + erts_rlock_old_code(code_ix); + if (modp->old.code != NULL) { + res = am_true; + } + erts_runlock_old_code(code_ix); } - BIF_RET(am_true); + BIF_RET(res); } Eterm @@ -180,14 +437,20 @@ check_process_code_2(BIF_ALIST_2) } if (is_internal_pid(BIF_ARG_1)) { Eterm res; + ErtsCodeIndex code_ix; if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) goto error; - modp = erts_get_module(BIF_ARG_2); + code_ix = erts_active_code_ix(); + modp = erts_get_module(BIF_ARG_2, code_ix); if (modp == NULL) { /* Doesn't exist. */ return am_false; - } else if (modp->old_code == NULL) { /* No old code. */ + } + erts_rlock_old_code(code_ix); + if (modp->old.code == NULL) { /* No old code. */ + erts_runlock_old_code(code_ix); return am_false; } + erts_runlock_old_code(code_ix); #ifdef ERTS_SMP rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, @@ -202,7 +465,14 @@ check_process_code_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } - res = check_process_code(rp, modp); + erts_rlock_old_code(code_ix); + if (modp->old.code != NULL) { /* must check again */ + res = check_process_code(rp, modp); + } + else { + res = am_false; + } + erts_runlock_old_code(code_ix); #ifdef ERTS_SMP if (BIF_P != rp) { erts_resume(rp, ERTS_PROC_LOCK_MAIN); @@ -223,56 +493,71 @@ check_process_code_2(BIF_ALIST_2) BIF_RETTYPE delete_module_1(BIF_ALIST_1) { - int res; + ErtsCodeIndex code_ix; + Module* modp; + int is_blocking = 0; + int success = 0; + Eterm res = NIL; - if (is_not_atom(BIF_ARG_1)) - goto badarg; + if (is_not_atom(BIF_ARG_1)) { + BIF_ERROR(BIF_P, BADARG); + } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + if (!erts_try_seize_code_write_permission(BIF_P)) { + ERTS_BIF_YIELD1(bif_export[BIF_delete_module_1], BIF_P, BIF_ARG_1); + } { - Module *modp = erts_get_module(BIF_ARG_1); + erts_start_staging_code_ix(); + code_ix = erts_staging_code_ix(); + modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp) { res = am_undefined; } - else if (modp->old_code != 0) { + else if (modp->old.code != 0) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Module %T must be purged before loading\n", BIF_ARG_1); erts_send_error_to_logger(BIF_P->group_leader, dsbufp); - res = am_badarg; + ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); } else { - delete_export_references(BIF_ARG_1); - delete_code(BIF_P, 0, modp); + if (modp->curr.num_breakpoints > 0 || + modp->curr.num_traced_exports > 0) { + /* we have tracing, retry single threaded */ + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + is_blocking = 1; + if (modp->curr.num_breakpoints) { + erts_clear_module_break(modp); + ASSERT(modp->curr.num_breakpoints == 0); + } + } + delete_code(modp); res = am_true; + success = 1; } } - - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - - if (res == am_badarg) { - badarg: - BIF_ERROR(BIF_P, BADARG); - } - BIF_RET(res); + return staging_epilogue(BIF_P, success, res, is_blocking, NULL, 0); } BIF_RETTYPE module_loaded_1(BIF_ALIST_1) { Module* modp; + ErtsCodeIndex code_ix; + Eterm res = am_false; if (is_not_atom(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } - if ((modp = erts_get_module(BIF_ARG_1)) == NULL || - modp->code == NULL || - modp->code[MI_ON_LOAD_FUNCTION_PTR] != 0) { - BIF_RET(am_false); + code_ix = erts_active_code_ix(); + if ((modp = erts_get_module(BIF_ARG_1, code_ix)) != NULL) { + if (modp->curr.code != NULL + && modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] == 0) { + res = am_true; + } } - BIF_RET(am_true); + BIF_RET(res); } BIF_RETTYPE pre_loaded_0(BIF_ALIST_0) @@ -282,27 +567,28 @@ BIF_RETTYPE pre_loaded_0(BIF_ALIST_0) BIF_RETTYPE loaded_0(BIF_ALIST_0) { + ErtsCodeIndex code_ix = erts_active_code_ix(); + Module* modp; Eterm previous = NIL; Eterm* hp; int i; int j = 0; - - for (i = 0; i < module_code_size(); i++) { - if (module_code(i) != NULL && - ((module_code(i)->code_length != 0) || - (module_code(i)->old_code_length != 0))) { + + for (i = 0; i < module_code_size(code_ix); i++) { + if ((modp = module_code(i, code_ix)) != NULL && + ((modp->curr.code_length != 0) || + (modp->old.code_length != 0))) { j++; } } if (j > 0) { hp = HAlloc(BIF_P, j*2); - for (i = 0; i < module_code_size(); i++) { - if (module_code(i) != NULL && - ((module_code(i)->code_length != 0) || - (module_code(i)->old_code_length != 0))) { - previous = CONS(hp, make_atom(module_code(i)->module), - previous); + for (i = 0; i < module_code_size(code_ix); i++) { + if ((modp=module_code(i,code_ix)) != NULL && + ((modp->curr.code_length != 0) || + (modp->old.code_length != 0))) { + previous = CONS(hp, make_atom(modp->module), previous); hp += 2; } } @@ -312,54 +598,65 @@ BIF_RETTYPE loaded_0(BIF_ALIST_0) BIF_RETTYPE call_on_load_function_1(BIF_ALIST_1) { - Module* modp = erts_get_module(BIF_ARG_1); - Eterm on_load; + Module* modp = erts_get_module(BIF_ARG_1, erts_active_code_ix()); - if (!modp || modp->code == 0) { - error: - BIF_ERROR(BIF_P, BADARG); + if (modp && modp->curr.code) { + BIF_TRAP_CODE_PTR_0(BIF_P, modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]); } - if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) { - goto error; + else { + BIF_ERROR(BIF_P, BADARG); } - BIF_TRAP_CODE_PTR_0(BIF_P, on_load); } BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) { - Module* modp = erts_get_module(BIF_ARG_1); + ErtsCodeIndex code_ix; + Module* modp; Eterm on_load; - if (!modp || modp->code == 0) { + if (!erts_try_seize_code_write_permission(BIF_P)) { + ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2], + BIF_P, BIF_ARG_1, BIF_ARG_2); + } + + /* ToDo: Use code_ix staging instead of thread blocking */ + + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + + code_ix = erts_active_code_ix(); + modp = erts_get_module(BIF_ARG_1, code_ix); + + if (!modp || modp->curr.code == 0) { error: + erts_smp_thr_progress_unblock(); + erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } - if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) { + if ((on_load = modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]) == 0) { goto error; } if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) { goto error; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - if (BIF_ARG_2 == am_true) { int i; /* * The on_load function succeded. Fix up export entries. */ - for (i = 0; i < export_list_size(); i++) { - Export *ep = export_list(i); + for (i = 0; i < export_list_size(code_ix); i++) { + Export *ep = export_list(i,code_ix); if (ep != NULL && ep->code[0] == BIF_ARG_1 && ep->code[4] != 0) { - ep->address = (void *) ep->code[4]; + ep->addressv[code_ix] = (void *) ep->code[4]; ep->code[4] = 0; } } - modp->code[MI_ON_LOAD_FUNCTION_PTR] = 0; + modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] = 0; set_default_trace_pattern(BIF_ARG_1); } else if (BIF_ARG_2 == am_false) { BeamInstr* code; @@ -370,19 +667,21 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) * This is an combination of delete and purge. We purge * the current code; the old code is not touched. */ - erts_total_code_size -= modp->code_length; - code = modp->code; - end = (BeamInstr *)((char *)code + modp->code_length); + erts_total_code_size -= modp->curr.code_length; + code = modp->curr.code; + end = (BeamInstr *)((char *)code + modp->curr.code_length); erts_cleanup_funs_on_purge(code, end); - beam_catches_delmod(modp->catches, code, modp->code_length); + beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length, + erts_active_code_ix()); erts_free(ERTS_ALC_T_CODE, (void *) code); - modp->code = NULL; - modp->code_length = 0; - modp->catches = BEAM_CATCHES_NIL; - remove_from_address_table(code); + modp->curr.code = NULL; + modp->curr.code_length = 0; + modp->curr.catches = BEAM_CATCHES_NIL; + erts_remove_from_ranges(code); } erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_release_code_write_permission(); BIF_RET(am_true); } @@ -403,11 +702,11 @@ set_default_trace_pattern(Eterm module) if (trace_pattern_is_on) { Eterm mfa[1]; mfa[0] = module; - (void) erts_set_trace_pattern(mfa, 1, + (void) erts_set_trace_pattern(0, mfa, 1, match_spec, meta_match_spec, 1, trace_pattern_flags, - meta_tracer_pid); + meta_tracer_pid, 1); } } @@ -427,10 +726,10 @@ check_process_code(Process* rp, Module* modp) /* * Pick up limits for the module. */ - start = modp->old_code; - end = (BeamInstr *)((char *)start + modp->old_code_length); + start = modp->old.code; + end = (BeamInstr *)((char *)start + modp->old.code_length); mod_start = (char *) start; - mod_size = modp->old_code_length; + mod_size = modp->old.code_length; /* * Check if current instruction or continuation pointer points into module. @@ -562,10 +861,10 @@ check_process_code(Process* rp, Module* modp) done_gc = 1; FLAGS(rp) |= F_NEED_FULLSWEEP; (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity); - literals = (Eterm *) modp->old_code[MI_LITERALS_START]; - lit_size = (Eterm *) modp->old_code[MI_LITERALS_END] - literals; + literals = (Eterm *) modp->old.code[MI_LITERALS_START]; + lit_size = (Eterm *) modp->old.code[MI_LITERALS_END] - literals; oh = (struct erl_off_heap_header *) - modp->old_code[MI_LITERALS_OFF_HEAP]; + modp->old.code[MI_LITERALS_OFF_HEAP]; erts_garbage_collect_literals(rp, literals, lit_size, oh); } } @@ -624,53 +923,82 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size) #undef in_area - -static int -purge_module(int module) +BIF_RETTYPE purge_module_1(BIF_ALIST_1) { + ErtsCodeIndex code_ix; BeamInstr* code; BeamInstr* end; Module* modp; + int is_blocking = 0; + Eterm ret; - /* - * Correct module? - */ - - if ((modp = erts_get_module(make_atom(module))) == NULL) { - return -2; + if (is_not_atom(BIF_ARG_1)) { + BIF_ERROR(BIF_P, BADARG); } - /* - * Any code to purge? - */ - if (modp->old_code == 0) { - return -1; + if (!erts_try_seize_code_write_permission(BIF_P)) { + ERTS_BIF_YIELD1(bif_export[BIF_purge_module_1], BIF_P, BIF_ARG_1); } + code_ix = erts_active_code_ix(); + /* - * Unload any NIF library + * Correct module? */ - if (modp->old_nif != NULL) { - erts_unload_nif(modp->old_nif); - modp->old_nif = NULL; + + if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) { + ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); } + else { + erts_rwlock_old_code(code_ix); - /* - * Remove the old code. - */ - ASSERT(erts_total_code_size >= modp->old_code_length); - erts_total_code_size -= modp->old_code_length; - code = modp->old_code; - end = (BeamInstr *)((char *)code + modp->old_code_length); - erts_cleanup_funs_on_purge(code, end); - beam_catches_delmod(modp->old_catches, code, modp->old_code_length); - decrement_refc(code); - erts_free(ERTS_ALC_T_CODE, (void *) code); - modp->old_code = NULL; - modp->old_code_length = 0; - modp->old_catches = BEAM_CATCHES_NIL; - remove_from_address_table(code); - return 0; + /* + * Any code to purge? + */ + if (modp->old.code == 0) { + ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); + } + else { + /* + * Unload any NIF library + */ + if (modp->old.nif != NULL) { + /* ToDo: Do unload nif without blocking */ + erts_rwunlock_old_code(code_ix); + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + is_blocking = 1; + erts_rwlock_old_code(code_ix); + erts_unload_nif(modp->old.nif); + modp->old.nif = NULL; + } + + /* + * Remove the old code. + */ + ASSERT(erts_total_code_size >= modp->old.code_length); + erts_total_code_size -= modp->old.code_length; + code = modp->old.code; + end = (BeamInstr *)((char *)code + modp->old.code_length); + erts_cleanup_funs_on_purge(code, end); + beam_catches_delmod(modp->old.catches, code, modp->old.code_length, + code_ix); + decrement_refc(code); + erts_free(ERTS_ALC_T_CODE, (void *) code); + modp->old.code = NULL; + modp->old.code_length = 0; + modp->old.catches = BEAM_CATCHES_NIL; + erts_remove_from_ranges(code); + ERTS_BIF_PREP_RET(ret, am_true); + } + erts_rwunlock_old_code(code_ix); + } + if (is_blocking) { + erts_smp_thr_progress_unblock(); + erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + } + erts_release_code_write_permission(); + return ret; } static void @@ -690,92 +1018,48 @@ decrement_refc(BeamInstr* code) } } -static void -remove_from_address_table(BeamInstr* code) -{ - int i; - - for (i = 0; i < num_loaded_modules; i++) { - if (modules[i].start == code) { - num_loaded_modules--; - while (i < num_loaded_modules) { - modules[i] = modules[i+1]; - i++; - } - mid_module = &modules[num_loaded_modules/2]; - return; - } - } - ASSERT(0); /* Not found? */ -} - - /* - * Move code from current to old. + * Move code from current to old and null all export entries for the module */ -static void -delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp) -{ -#ifdef ERTS_ENABLE_LOCK_CHECK -#ifdef ERTS_SMP - if (c_p && c_p_locks) - erts_proc_lc_chk_only_proc_main(c_p); - else -#endif - erts_lc_check_exact(NULL, 0); -#endif - - /* - * Clear breakpoints if any - */ - if (modp->code != NULL && modp->code[MI_NUM_BREAKPOINTS] > 0) { - if (c_p && c_p_locks) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - erts_clear_module_break(modp); - modp->code[MI_NUM_BREAKPOINTS] = 0; - erts_smp_thr_progress_unblock(); - if (c_p && c_p_locks) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); - } - modp->old_code = modp->code; - modp->old_code_length = modp->code_length; - modp->old_catches = modp->catches; - modp->old_nif = modp->nif; - modp->code = NULL; - modp->code_length = 0; - modp->catches = BEAM_CATCHES_NIL; - modp->nif = NULL; -} - - -/* null all references on the export table for the module called with the - atom index below */ - static void -delete_export_references(Eterm module) +delete_code(Module* modp) { + ErtsCodeIndex code_ix = erts_staging_code_ix(); + Eterm module = make_atom(modp->module); int i; - ASSERT(is_atom(module)); - - for (i = 0; i < export_list_size(); i++) { - Export *ep = export_list(i); + for (i = 0; i < export_list_size(code_ix); i++) { + Export *ep = export_list(i, code_ix); if (ep != NULL && (ep->code[0] == module)) { - if (ep->address == ep->code+3 && - (ep->code[3] == (BeamInstr) em_apply_bif)) { - continue; + if (ep->addressv[code_ix] == ep->code+3) { + if (ep->code[3] == (BeamInstr) em_apply_bif) { + continue; + } + else if (ep->code[3] == + (BeamInstr) BeamOp(op_i_generic_breakpoint)) { + ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(modp->curr.num_traced_exports > 0); + erts_clear_export_break(modp, ep->code+3); + } + else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler + || !erts_initialized); } - ep->address = ep->code+3; + ep->addressv[code_ix] = ep->code+3; ep->code[3] = (BeamInstr) em_call_error_handler; ep->code[4] = 0; - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = NULL; } } + + ASSERT(modp->curr.num_breakpoints == 0); + ASSERT(modp->curr.num_traced_exports == 0); + modp->old = modp->curr; + modp->curr.code = NULL; + modp->curr.code_length = 0; + modp->curr.catches = BEAM_CATCHES_NIL; + modp->curr.nif = NULL; } - + Eterm beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module) @@ -787,11 +1071,10 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module) * if not, delete old code; error if old code already exists. */ - if (modp->code != NULL && modp->old_code != NULL) { + if (modp->curr.code != NULL && modp->old.code != NULL) { return am_not_purged; - } else if (modp->old_code == NULL) { /* Make the current version old. */ - delete_code(c_p, c_p_locks, modp); - delete_export_references(module); + } else if (modp->old.code == NULL) { /* Make the current version old. */ + delete_code(modp); } return NIL; } diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index d772bea02f..58e0090a76 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -44,67 +44,34 @@ #define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ)) #define Free(P) erts_free(ERTS_ALC_T_BPD, (P)) -/* -** Doubly linked ring macros -*/ - -#define BpInit(a,i) \ -do { \ - (a)->orig_instr = (i); \ - (a)->next = (a); \ - (a)->prev = (a); \ -} while (0) - -#define BpSpliceNext(a,b) \ -do { \ - register BpData *c = (a), *d = (b), *e; \ - e = c->next->prev; \ - c->next->prev = d->next->prev; \ - d->next->prev = e; \ - e = c->next; \ - c->next = d->next; \ - d->next = e; \ -} while (0) - -#define BpSplicePrev(a,b) \ -do { \ - register BpData *c = (a), *d = (b), *e; \ - e = c->prev->next; \ - c->prev->next = d->prev->next; \ - d->prev->next = e; \ - e = c->prev; \ - c->prev = d->prev; \ - d->prev = e; \ -} while (0) - -#ifdef DEBUG -# define BpSingleton(a) ((a)->next == (a) && (a)->prev == (a)) +#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ + if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN) +# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ + if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) #else -# define BpSingleton(a) ((a)->next == (a)) +# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) +# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) #endif -#define BpInitAndSpliceNext(a,i,b) \ -do { \ - (a)->orig_instr = (i); \ - (a)->prev = (b); \ - (b)->next->prev = (a); \ - (a)->next = (b)->next; \ - (b)->next = (a); \ -} while (0) +#define ERTS_BPF_LOCAL_TRACE 0x01 +#define ERTS_BPF_META_TRACE 0x02 +#define ERTS_BPF_COUNT 0x04 +#define ERTS_BPF_COUNT_ACTIVE 0x08 +#define ERTS_BPF_DEBUG 0x10 +#define ERTS_BPF_TIME_TRACE 0x20 +#define ERTS_BPF_TIME_TRACE_ACTIVE 0x40 +#define ERTS_BPF_GLOBAL_TRACE 0x80 -#define BpInitAndSplicePrev(a,i,b) \ -do { \ - (a)->orig_instr = (i); \ - (a)->next = (b); \ - (b)->prev->next = (a); \ - (a)->prev = (b)->prev; \ - (b)->prev = (a); \ -} while (0) +#define ERTS_BPF_ALL 0xFF +extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */ +extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */ +extern Eterm beam_exception_trace[1]; /* OpCode(i_exception_trace) */ +extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ -#define BREAK_IS_BIF (1) -#define BREAK_IS_ERL (0) - +erts_smp_atomic32_t erts_active_bp_index; +erts_smp_atomic32_t erts_staging_bp_index; /* ************************************************************************* ** Local prototypes @@ -113,26 +80,30 @@ do { \ /* ** Helpers */ - -static int set_break(Eterm mfa[3], int specified, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid); -static int set_module_break(Module *modp, Eterm mfa[3], int specified, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid); -static int set_function_break(Module *modp, BeamInstr *pc, int bif, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid); - -static int clear_break(Eterm mfa[3], int specified, - BeamInstr break_op); -static int clear_module_break(Module *modp, Eterm mfa[3], int specified, - BeamInstr break_op); -static int clear_function_break(Module *modp, BeamInstr *pc, int bif, - BeamInstr break_op); - -static BpData *is_break(BeamInstr *pc, BeamInstr break_op); -static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op); +static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg, + int local, Binary* ms, Eterm tracer_pid); +static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags, + enum erts_break_op count_op, Eterm tracer_pid); +static void set_function_break(BeamInstr *pc, + Binary *match_spec, + Uint break_flags, + enum erts_break_op count_op, + Eterm tracer_pid); + +static void clear_break(BpFunctions* f, Uint break_flags); +static int clear_function_break(BeamInstr *pc, Uint break_flags); + +static BpDataTime* get_time_break(BeamInstr *pc); +static GenericBpData* check_break(BeamInstr *pc, Uint break_flags); +static void bp_time_diff(bp_data_time_item_t *item, + process_breakpoint_time_t *pbt, + Uint ms, Uint s, Uint us); + +static void bp_meta_unref(BpMetaPid* bmp); +static void bp_count_unref(BpCount* bcp); +static void bp_time_unref(BpDataTime* bdt); +static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local); +static void uninstall_breakpoint(BeamInstr* pc); /* bp_hash */ #define BP_TIME_ADD(pi0, pi1) \ @@ -152,240 +123,996 @@ static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_da static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem); static void bp_hash_delete(bp_time_hash_t *hash); - /* ************************************************************************* ** External interfaces */ -erts_smp_spinlock_t erts_bp_lock; - void erts_bp_init(void) { - erts_smp_spinlock_init(&erts_bp_lock, "breakpoints"); + erts_smp_atomic32_init_nob(&erts_active_bp_index, 0); + erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1); } -int -erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec, - Eterm tracer_pid) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, match_spec, - (BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid); + +void +erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified) +{ + ErtsCodeIndex code_ix = erts_active_code_ix(); + Uint max_funcs = 0; + int current; + int max_modules = module_code_size(code_ix); + int num_modules = 0; + Module* modp; + Module** module; + Uint i; + + module = (Module **) Alloc(max_modules*sizeof(Module *)); + num_modules = 0; + for (current = 0; current < max_modules; current++) { + modp = module_code(current, code_ix); + if (modp->curr.code) { + max_funcs += modp->curr.code[MI_NUM_FUNCTIONS]; + module[num_modules++] = modp; + } + } + + f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction)); + i = 0; + for (current = 0; current < num_modules; current++) { + BeamInstr** code_base = (BeamInstr **) module[current]->curr.code; + BeamInstr* code; + Uint num_functions = (Uint) code_base[MI_NUM_FUNCTIONS]; + Uint fi; + + if (specified > 0) { + if (mfa[0] != make_atom(module[current]->module)) { + /* Wrong module name */ + continue; + } + } + + for (fi = 0; fi < num_functions; fi++) { + Eterm* pc; + int wi; + + code = code_base[MI_FUNCTIONS+fi]; + ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); + pc = code+5; + if (erts_is_native_break(pc)) { + continue; + } + if (is_nil(code[3])) { /* Ignore BIF stub */ + continue; + } + for (wi = 0; + wi < specified && (Eterm) code[2+wi] == mfa[wi]; + wi++) { + /* Empty loop body */ + } + if (wi == specified) { + /* Store match */ + f->matching[i].pc = pc; + f->matching[i].mod = module[current]; + i++; + } + } + } + f->matched = i; + Free(module); } -int -erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec, - Eterm tracer_pid) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, match_spec, - (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid); +void +erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified) +{ + ErtsCodeIndex code_ix = erts_active_code_ix(); + int i; + int num_exps = export_list_size(code_ix); + int ne; + + f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction)); + ne = 0; + for (i = 0; i < num_exps; i++) { + Export* ep = export_list(i, code_ix); + BeamInstr* pc; + int j; + + for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) { + /* Empty loop body */ + } + if (j < specified) { + continue; + } + pc = ep->code+3; + if (ep->addressv[code_ix] == pc) { + if ((*pc == (BeamInstr) em_apply_bif || + *pc == (BeamInstr) em_call_error_handler)) { + continue; + } + ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)); + } else if (erts_is_native_break(ep->addressv[code_ix])) { + continue; + } + + f->matching[ne].pc = pc; + f->matching[ne].mod = erts_get_module(ep->code[0], code_ix); + ne++; + + } + f->matched = ne; } -/* set breakpoint data for on exported bif entry */ +void +erts_bp_free_matched_functions(BpFunctions* f) +{ + Free(f->matching); +} void -erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid); +erts_consolidate_bp_data(BpFunctions* f, int local) +{ + BpFunction* fs = f->matching; + Uint i; + Uint n = f->matched; + + ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + + for (i = 0; i < n; i++) { + consolidate_bp_data(fs[i].mod, fs[i].pc, local); + } } -void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) { - set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL); +void +erts_consolidate_bif_bp_data(void) +{ + int i; + + ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + for (i = 0; i < BIF_SIZE; i++) { + Export *ep = bif_export[i]; + consolidate_bp_data(0, ep->code+3, 0); + } } -void erts_clear_time_trace_bif(BeamInstr *pc) { - clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint)); +static void +consolidate_bp_data(Module* modp, BeamInstr* pc, int local) +{ + GenericBp* g = (GenericBp *) pc[-4]; + GenericBpData* src; + GenericBpData* dst; + Uint flags; + + if (g == 0) { + return; + } + + src = &g->data[erts_active_bp_ix()]; + dst = &g->data[erts_staging_bp_ix()]; + + /* + * The contents of the staging area may be out of date. + * Decrement all reference pointers. + */ + + flags = dst->flags; + if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + MatchSetUnref(dst->local_ms); + } + if (flags & ERTS_BPF_META_TRACE) { + bp_meta_unref(dst->meta_pid); + MatchSetUnref(dst->meta_ms); + } + if (flags & ERTS_BPF_COUNT) { + bp_count_unref(dst->count); + } + if (flags & ERTS_BPF_TIME_TRACE) { + bp_time_unref(dst->time); + } + + /* + * If all flags are zero, deallocate all breakpoint data. + */ + + flags = dst->flags = src->flags; + if (flags == 0) { + if (modp) { + if (local) { + modp->curr.num_breakpoints--; + } else { + modp->curr.num_traced_exports--; + } + ASSERT(modp->curr.num_breakpoints >= 0); + ASSERT(modp->curr.num_traced_exports >= 0); + ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint)); + } + pc[-4] = 0; + Free(g); + return; + } + + /* + * Copy the active data to the staging area (making it ready + * for the next time it will be used). + */ + + if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + dst->local_ms = src->local_ms; + MatchSetRef(dst->local_ms); + } + if (flags & ERTS_BPF_META_TRACE) { + dst->meta_pid = src->meta_pid; + erts_refc_inc(&dst->meta_pid->refc, 1); + dst->meta_ms = src->meta_ms; + MatchSetRef(dst->meta_ms); + } + if (flags & ERTS_BPF_COUNT) { + dst->count = src->count; + erts_refc_inc(&dst->count->refc, 1); + } + if (flags & ERTS_BPF_TIME_TRACE) { + dst->time = src->time; + erts_refc_inc(&dst->time->refc, 1); + ASSERT(dst->time->hash); + } } -int -erts_set_debug_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, NULL, - (BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL); +void +erts_commit_staged_bp(void) +{ + ErtsBpIndex staging = erts_staging_bp_ix(); + ErtsBpIndex active = erts_active_bp_ix(); + + erts_smp_atomic32_set_nob(&erts_active_bp_index, staging); + erts_smp_atomic32_set_nob(&erts_staging_bp_index, active); } -int -erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, NULL, - (BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL); +void +erts_install_breakpoints(BpFunctions* f) +{ + Uint i; + Uint n = f->matched; + BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint); + + for (i = 0; i < n; i++) { + BeamInstr* pc = f->matching[i].pc; + GenericBp* g = (GenericBp *) pc[-4]; + if (*pc != br && g) { + Module* modp = f->matching[i].mod; + + /* + * The breakpoint must be disabled in the active data + * (it will enabled later by switching bp indices), + * and enabled in the staging data. + */ + ASSERT(g->data[erts_active_bp_ix()].flags == 0); + ASSERT(g->data[erts_staging_bp_ix()].flags != 0); + + /* + * The following write is not protected by any lock. We + * assume that the hardware guarantees that a write of an + * aligned word-size (or half-word) writes is atomic + * (i.e. that other processes executing this code will not + * see a half pointer). + */ + *pc = br; + modp->curr.num_breakpoints++; + } + } } -int -erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return set_break(mfa, specified, NULL, - (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL); +void +erts_uninstall_breakpoints(BpFunctions* f) +{ + Uint i; + Uint n = f->matched; + + for (i = 0; i < n; i++) { + BeamInstr* pc = f->matching[i].pc; + uninstall_breakpoint(pc); + } } -int -erts_clear_trace_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_trace_breakpoint)); +static void +uninstall_breakpoint(BeamInstr* pc) +{ + if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { + GenericBp* g = (GenericBp *) pc[-4]; + if (g->data[erts_active_bp_ix()].flags == 0) { + /* + * The following write is not protected by any lock. We + * assume that the hardware guarantees that a write of an + * aligned word-size (or half-word) writes is atomic + * (i.e. that other processes executing this code will not + * see a half pointer). + */ + *pc = g->orig_instr; + } + } } -int -erts_clear_mtrace_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_mtrace_breakpoint)); +void +erts_set_trace_break(BpFunctions* f, Binary *match_spec) +{ + set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true); } void -erts_clear_mtrace_bif(BeamInstr *pc) { - clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint)); +erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid) +{ + set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid); } -int -erts_clear_debug_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_debug_breakpoint)); +void +erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local) +{ + Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE; + + set_function_break(pc, match_spec, flags, 0, NIL); } -int -erts_clear_count_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_count_breakpoint)); +void +erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) +{ + set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid); } -int -erts_clear_time_break(Eterm mfa[3], int specified) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, - (BeamInstr) BeamOp(op_i_time_breakpoint)); +void +erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) +{ + set_function_break(pc, NULL, + ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE, + count_op, NIL); } -int -erts_clear_break(Eterm mfa[3], int specified) { +void +erts_clear_time_trace_bif(BeamInstr *pc) { + clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE); +} + +void +erts_set_debug_break(BpFunctions* f) { + set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL); +} + +void +erts_set_count_break(BpFunctions* f, enum erts_break_op count_op) +{ + set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE, + count_op, NIL); +} + +void +erts_set_time_break(BpFunctions* f, enum erts_break_op count_op) +{ + set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE, + count_op, NIL); +} + +void +erts_clear_trace_break(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_LOCAL_TRACE); +} + +void +erts_clear_call_trace_bif(BeamInstr *pc, int local) +{ + GenericBp* g = (GenericBp *) pc[-4]; + + if (g) { + Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE; + if (g->data[erts_staging_bp_ix()].flags & flags) { + clear_function_break(pc, flags); + } + } +} + +void +erts_clear_mtrace_break(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_META_TRACE); +} + +void +erts_clear_mtrace_bif(BeamInstr *pc) +{ + clear_function_break(pc, ERTS_BPF_META_TRACE); +} + +void +erts_clear_debug_break(BpFunctions* f) +{ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - return clear_break(mfa, specified, 0); + clear_break(f, ERTS_BPF_DEBUG); +} + +void +erts_clear_count_break(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE); +} + +void +erts_clear_time_break(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE); +} + +void +erts_clear_all_breaks(BpFunctions* f) +{ + clear_break(f, ERTS_BPF_ALL); } int erts_clear_module_break(Module *modp) { + BeamInstr** code_base; + Uint n; + Uint i; + ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); ASSERT(modp); - return clear_module_break(modp, NULL, 0, 0); + code_base = (BeamInstr **) modp->curr.code; + if (code_base == NULL) { + return 0; + } + n = (Uint) code_base[MI_NUM_FUNCTIONS]; + for (i = 0; i < n; ++i) { + BeamInstr* pc; + + pc = code_base[MI_FUNCTIONS+i] + 5; + if (erts_is_native_break(pc)) { + continue; + } + clear_function_break(pc, ERTS_BPF_ALL); + } + + erts_commit_staged_bp(); + + for (i = 0; i < n; ++i) { + BeamInstr* pc; + + pc = code_base[MI_FUNCTIONS+i] + 5; + if (erts_is_native_break(pc)) { + continue; + } + uninstall_breakpoint(pc); + consolidate_bp_data(modp, pc, 1); + ASSERT(pc[-4] == 0); + } + return n; } -int -erts_clear_function_break(Module *modp, BeamInstr *pc) { +void +erts_clear_export_break(Module* modp, BeamInstr* pc) +{ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - ASSERT(modp); - return clear_function_break(modp, pc, BREAK_IS_ERL, 0); + + clear_function_break(pc, ERTS_BPF_ALL); + erts_commit_staged_bp(); + *pc = (BeamInstr) 0; + consolidate_bp_data(modp, pc, 0); + ASSERT(pc[-4] == 0); } +BeamInstr +erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg) +{ + GenericBp* g; + GenericBpData* bp; + Uint bp_flags; + ErtsBpIndex ix = erts_active_bp_ix(); + + g = (GenericBp *) I[-4]; + bp = &g->data[ix]; + bp_flags = bp->flags; + ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0); + if (bp_flags & (ERTS_BPF_LOCAL_TRACE| + ERTS_BPF_GLOBAL_TRACE| + ERTS_BPF_TIME_TRACE_ACTIVE) && + !IS_TRACED_FL(c_p, F_TRACE_CALLS)) { + bp_flags &= ~(ERTS_BPF_LOCAL_TRACE| + ERTS_BPF_GLOBAL_TRACE| + ERTS_BPF_TIME_TRACE| + ERTS_BPF_TIME_TRACE_ACTIVE); + if (bp_flags == 0) { /* Quick exit */ + return g->orig_instr; + } + } + + if (bp_flags & ERTS_BPF_LOCAL_TRACE) { + ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0); + (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true); + } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) { + (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true); + } + + if (bp_flags & ERTS_BPF_META_TRACE) { + Eterm old_pid; + Eterm new_pid; + + old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid); + new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid); + if (new_pid != old_pid) { + erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid); + } + } + + if (bp_flags & ERTS_BPF_COUNT_ACTIVE) { + erts_smp_atomic_inc_nob(&bp->count->acount); + } + + if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) { + Eterm w; + erts_trace_time_call(c_p, I, bp->time); + w = (BeamInstr) *c_p->cp; + if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) || + w == (BeamInstr) BeamOp(op_return_trace) || + w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) { + Eterm* E = c_p->stop; + ASSERT(c_p->htop <= E && E <= c_p->hend); + if (E - 2 < c_p->htop) { + (void) erts_garbage_collect(c_p, 2, reg, I[-1]); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + } + E = c_p->stop; + ASSERT(c_p->htop <= E && E <= c_p->hend); + + E -= 2; + E[0] = make_cp(I); + E[1] = make_cp(c_p->cp); /* original return address */ + c_p->cp = beam_return_time_trace; + c_p->stop = E; + } + } + + if (bp_flags & ERTS_BPF_DEBUG) { + return (BeamInstr) BeamOp(op_i_debug_breakpoint); + } else { + return g->orig_instr; + } +} /* - * SMP NOTE: Process p may have become exiting on return! + * Entry point called by the trace wrap functions in erl_bif_wrap.c + * + * The trace wrap functions are themselves called through the export + * entries instead of the original BIF functions. */ -BeamInstr -erts_trace_break(Process *p, BeamInstr *pc, Eterm *args, - Uint32 *ret_flags, Eterm *tracer_pid) { - Eterm tpid1, tpid2; - BpData **bds = (BpData **) (pc)[-4]; - BpDataTrace *bdt = NULL; +Eterm +erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) +{ + Eterm result; + Eterm (*func)(Process*, Eterm*, BeamInstr*); + Export* ep = bif_export[bif_index]; + Uint32 flags = 0, flags_meta = 0; + Eterm meta_tracer_pid = NIL; + int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif + * is actually in the + * export entry */ + BeamInstr *cp = p->cp; + GenericBp* g; + GenericBpData* bp = NULL; + Uint bp_flags = 0; + + ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + + g = (GenericBp *) ep->fake_op_func_info_for_hipe[1]; + if (g) { + bp = &g->data[erts_active_bp_ix()]; + bp_flags = bp->flags; + } - ASSERT(bds); - ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)]; - ASSERT(bdt); - bdt = (BpDataTrace *) bdt->next; - ASSERT(bdt); - ASSERT(ret_flags); - ASSERT(tracer_pid); - - ErtsSmpBPLock(bdt); - tpid1 = tpid2 = bdt->tracer_pid; - ErtsSmpBPUnlock(bdt); - - *ret_flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args, - 1, &tpid2); - *tracer_pid = tpid2; - if (tpid1 != tpid2) { - ErtsSmpBPLock(bdt); - bdt->tracer_pid = tpid2; - ErtsSmpBPUnlock(bdt); - } - bds[bp_sched2ix_proc(p)] = (BpData *) bdt; - return bdt->orig_instr; + /* + * Make continuation pointer OK, it is not during direct BIF calls, + * but it is correct during apply of bif. + */ + if (!applying) { + p->cp = I; + } + if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) && + IS_TRACED_FL(p, F_TRACE_CALLS)) { + int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE); + flags = erts_call_trace(p, ep->code, bp->local_ms, args, + local, &p->tracer_proc); + } + if (bp_flags & ERTS_BPF_META_TRACE) { + Eterm tpid1, tpid2; + + tpid1 = tpid2 = + (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid); + flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args, + 0, &tpid2); + meta_tracer_pid = tpid2; + if (tpid1 != tpid2) { + erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2); + } + } + if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE && + IS_TRACED_FL(p, F_TRACE_CALLS)) { + BeamInstr *pc = (BeamInstr *)ep->code+3; + erts_trace_time_call(p, pc, bp->time); + } + + /* Restore original continuation pointer (if changed). */ + p->cp = cp; + + func = bif_table[bif_index].f; + + result = func(p, args, I); + + if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) { + BeamInstr i_return_trace = beam_return_trace[0]; + BeamInstr i_return_to_trace = beam_return_to_trace[0]; + BeamInstr i_return_time_trace = beam_return_time_trace[0]; + Eterm *cpp; + /* Maybe advance cp to skip trace stack frames */ + for (cpp = p->stop; ; cp = cp_val(*cpp++)) { + if (*cp == i_return_trace) { + /* Skip stack frame variables */ + while (is_not_CP(*cpp)) cpp++; + cpp += 2; /* Skip return_trace parameters */ + } else if (*cp == i_return_time_trace) { + /* Skip stack frame variables */ + while (is_not_CP(*cpp)) cpp++; + cpp += 1; /* Skip return_time_trace parameters */ + } else if (*cp == i_return_to_trace) { + /* A return_to trace message is going to be generated + * by normal means, so we do not have to. + */ + cp = NULL; + break; + } else break; + } + } + + /* Try to get these in the order + * they usually appear in normal code... */ + if (is_non_value(result)) { + Uint reason = p->freason; + if (reason != TRAP) { + Eterm class; + Eterm value = p->fvalue; + DeclareTmpHeapNoproc(nocatch,3); + UseTmpHeapNoproc(3); + /* Expand error value like in handle_error() */ + if (reason & EXF_ARGLIST) { + Eterm *tp; + ASSERT(is_tuple(value)); + tp = tuple_val(value); + value = tp[1]; + } + if ((reason & EXF_THROWN) && (p->catches <= 0)) { + value = TUPLE2(nocatch, am_nocatch, value); + reason = EXC_ERROR; + } + /* Note: expand_error_value() could theoretically + * allocate on the heap, but not for any error + * returned by a BIF, and it would do no harm, + * just be annoying. + */ + value = expand_error_value(p, reason, value); + class = exception_tag[GET_EXC_CLASS(reason)]; + + if (flags_meta & MATCH_SET_EXCEPTION_TRACE) { + erts_trace_exception(p, ep->code, class, value, + &meta_tracer_pid); + } + if (flags & MATCH_SET_EXCEPTION_TRACE) { + erts_trace_exception(p, ep->code, class, value, + &p->tracer_proc); + } + if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) { + /* can only happen if(local)*/ + Eterm *ptr = p->stop; + ASSERT(is_CP(*ptr)); + ASSERT(ptr <= STACK_START(p)); + /* Search the nearest stack frame for a catch */ + while (++ptr < STACK_START(p)) { + if (is_CP(*ptr)) break; + if (is_catch(*ptr)) { + if (applying) { + /* Apply of BIF, cp is in calling function */ + if (cp) erts_trace_return_to(p, cp); + } else { + /* Direct bif call, I points into + * calling function */ + erts_trace_return_to(p, I); + } + } + } + } + UnUseTmpHeapNoproc(3); + if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) { + erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + p->trace_flags |= F_EXCEPTION_TRACE; + erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + } + } + } else { + if (flags_meta & MATCH_SET_RX_TRACE) { + erts_trace_return(p, ep->code, result, &meta_tracer_pid); + } + /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */ + if (flags & MATCH_SET_RX_TRACE) { + erts_trace_return(p, ep->code, result, &p->tracer_proc); + } + if (flags & MATCH_SET_RETURN_TO_TRACE) { + /* can only happen if(local)*/ + if (applying) { + /* Apply of BIF, cp is in calling function */ + if (cp) erts_trace_return_to(p, cp); + } else { + /* Direct bif call, I points into calling function */ + erts_trace_return_to(p, I); + } + } + } + ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + return result; +} + +static Eterm +do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg, + int local, Binary* ms, Eterm tracer_pid) +{ + Eterm* cpp; + int return_to_trace = 0; + BeamInstr w; + BeamInstr *cp_save; + Uint32 flags; + Uint need = 0; + Eterm* E = c_p->stop; + + w = *c_p->cp; + if (w == (BeamInstr) BeamOp(op_return_trace)) { + cpp = &E[2]; + } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) { + return_to_trace = 1; + cpp = &E[0]; + } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) { + cpp = &E[0]; + } else { + cpp = NULL; + } + if (cpp) { + for (;;) { + BeamInstr w = *cp_val(*cpp); + if (w == (BeamInstr) BeamOp(op_return_trace)) { + cpp += 3; + } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) { + return_to_trace = 1; + cpp += 1; + } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) { + cpp += 2; + } else { + break; + } + } + cp_save = c_p->cp; + c_p->cp = (BeamInstr *) cp_val(*cpp); + ASSERT(is_CP(*cpp)); + } + ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid); + ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + if (cpp) { + c_p->cp = cp_save; + } + + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) { + need += 1; + } + if (flags & MATCH_SET_RX_TRACE) { + need += 3; + } + if (need) { + ASSERT(c_p->htop <= E && E <= c_p->hend); + if (E - need < c_p->htop) { + (void) erts_garbage_collect(c_p, need, reg, I[-1]); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + E = c_p->stop; + } + } + if (flags & MATCH_SET_RETURN_TO_TRACE && !return_to_trace) { + E -= 1; + ASSERT(c_p->htop <= E && E <= c_p->hend); + E[0] = make_cp(c_p->cp); + c_p->cp = (BeamInstr *) beam_return_to_trace; + } + if (flags & MATCH_SET_RX_TRACE) { + E -= 3; + ASSERT(c_p->htop <= E && E <= c_p->hend); + ASSERT(is_CP((Eterm) (UWord) (I - 3))); + ASSERT(am_true == tracer_pid || + is_internal_pid(tracer_pid) || is_internal_port(tracer_pid)); + E[2] = make_cp(c_p->cp); + E[1] = tracer_pid; + E[0] = make_cp(I - 3); /* We ARE at the beginning of an + instruction, + the funcinfo is above i. */ + c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ? + beam_exception_trace : beam_return_trace; + erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + c_p->trace_flags |= F_EXCEPTION_TRACE; + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + } + c_p->stop = E; + return tracer_pid; } +void +erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt) +{ + Uint ms,s,us; + process_breakpoint_time_t *pbt = NULL; + bp_data_time_item_t sitem, *item = NULL; + bp_time_hash_t *h = NULL; + BpDataTime *pbdt = NULL; + ASSERT(c_p); + ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING); -/* - * SMP NOTE: Process p may have become exiting on return! - */ -Uint32 -erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local, - Eterm *tracer_pid) { - BpData **bds = (BpData **) (pc)[-4]; - BpDataTrace *bdt = NULL; + /* get previous timestamp and breakpoint + * from the process psd */ + pbt = ERTS_PROC_GET_CALL_TIME(c_p); + get_sys_now(&ms, &s, &us); - ASSERT(tracer_pid); - if (bds) { - Eterm tpid1, tpid2; - Uint32 flags; - bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)]; + /* get pbt + * timestamp = t0 + * lookup bdt from code + * set ts0 to pbt + * add call count here? + */ + if (pbt == 0) { + /* First call of process to instrumented function */ + pbt = Alloc(sizeof(process_breakpoint_time_t)); + (void *) ERTS_PROC_SET_CALL_TIME(c_p, ERTS_PROC_LOCK_MAIN, pbt); + } else { + ASSERT(pbt->pc); + /* add time to previous code */ + bp_time_diff(&sitem, pbt, ms, s, us); + sitem.pid = c_p->id; + sitem.count = 0; - ErtsSmpBPLock(bdt); - tpid1 = tpid2 = bdt->tracer_pid; - ErtsSmpBPUnlock(bdt); + /* previous breakpoint */ + pbdt = get_time_break(pbt->pc); - flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args, - local, &tpid2); - *tracer_pid = tpid2; - if (tpid1 != tpid2) { - ErtsSmpBPLock(bdt); - bdt->tracer_pid = tpid2; - ErtsSmpBPUnlock(bdt); + /* if null then the breakpoint was removed */ + if (pbdt) { + h = &(pbdt->hash[bp_sched2ix_proc(c_p)]); + + ASSERT(h); + ASSERT(h->item); + + item = bp_hash_get(h, &sitem); + if (!item) { + item = bp_hash_put(h, &sitem); + } else { + BP_TIME_ADD(item, &sitem); + } } - return flags; } - *tracer_pid = NIL; - return 0; + + /* Add count to this code */ + sitem.pid = c_p->id; + sitem.count = 1; + sitem.s_time = 0; + sitem.us_time = 0; + + /* this breakpoint */ + ASSERT(bdt); + h = &(bdt->hash[bp_sched2ix_proc(c_p)]); + + ASSERT(h); + ASSERT(h->item); + + item = bp_hash_get(h, &sitem); + if (!item) { + item = bp_hash_put(h, &sitem); + } else { + BP_TIME_ADD(item, &sitem); + } + + pbt->pc = I; + pbt->ms = ms; + pbt->s = s; + pbt->us = us; } +void +erts_trace_time_return(Process *p, BeamInstr *pc) +{ + Uint ms,s,us; + process_breakpoint_time_t *pbt = NULL; + bp_data_time_item_t sitem, *item = NULL; + bp_time_hash_t *h = NULL; + BpDataTime *pbdt = NULL; + + ASSERT(p); + ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING); + /* get previous timestamp and breakpoint + * from the process psd */ + + pbt = ERTS_PROC_GET_CALL_TIME(p); + get_sys_now(&ms,&s,&us); + + /* get pbt + * lookup bdt from code + * timestamp = t1 + * get ts0 from pbt + * get item from bdt->hash[bp_hash(p->id)] + * ack diff (t1, t0) to item + */ + + if (pbt) { + /* might have been removed due to + * trace_pattern(false) + */ + ASSERT(pbt->pc); + + bp_time_diff(&sitem, pbt, ms, s, us); + sitem.pid = p->id; + sitem.count = 0; + + /* previous breakpoint */ + pbdt = get_time_break(pbt->pc); + + /* beware, the trace_pattern might have been removed */ + if (pbdt) { + h = &(pbdt->hash[bp_sched2ix_proc(p)]); + + ASSERT(h); + ASSERT(h->item); + + item = bp_hash_get(h, &sitem); + if (!item) { + item = bp_hash_put(h, &sitem); + } else { + BP_TIME_ADD(item, &sitem); + } + } + + pbt->pc = pc; + pbt->ms = ms; + pbt->s = s; + pbt->us = us; + } +} int -erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) { - BpDataTrace *bdt = - (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_trace_breakpoint)); - - if (bdt) { +erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local) +{ + Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE; + GenericBpData* bp = check_break(pc, flags); + + if (bp) { if (match_spec_ret) { - *match_spec_ret = bdt->match_spec; + *match_spec_ret = bp->local_ms; } - if (tracer_pid_ret) { - ErtsSmpBPLock(bdt); - *tracer_pid_ret = bdt->tracer_pid; - ErtsSmpBPUnlock(bdt); - } - return !0; + return 1; } return 0; } int -erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) { - BpDataTrace *bdt = - (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_mtrace_breakpoint)); +erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, + Eterm *tracer_pid_ret) +{ + GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE); - if (bdt) { + if (bp) { if (match_spec_ret) { - *match_spec_ret = bdt->match_spec; + *match_spec_ret = bp->meta_ms; } if (tracer_pid_ret) { - ErtsSmpBPLock(bdt); - *tracer_pid_ret = bdt->tracer_pid; - ErtsSmpBPUnlock(bdt); + *tracer_pid_ret = + (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid); } - return !0; + return 1; } return 0; } @@ -402,15 +1129,15 @@ erts_is_native_break(BeamInstr *pc) { } int -erts_is_count_break(BeamInstr *pc, Sint *count_ret) { - BpDataCount *bdc = - (BpDataCount *) is_break(pc, (BeamInstr) BeamOp(op_i_count_breakpoint)); +erts_is_count_break(BeamInstr *pc, Uint *count_ret) +{ + GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT); - if (bdc) { + if (bp) { if (count_ret) { - *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount); + *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount); } - return !0; + return 1; } return 0; } @@ -421,7 +1148,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) { Uint size; Eterm *hp, t; bp_data_time_item_t *item = NULL; - BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); + BpDataTime *bdt = get_time_break(pc); if (bdt) { if (retval) { @@ -464,7 +1191,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) { } bp_hash_delete(&hash); } - return !0; + return 1; } return 0; @@ -478,15 +1205,16 @@ erts_find_local_func(Eterm mfa[3]) { BeamInstr* code_ptr; Uint i,n; - if ((modp = erts_get_module(mfa[0])) == NULL) + if ((modp = erts_get_module(mfa[0], erts_active_code_ix())) == NULL) return NULL; - if ((code_base = (BeamInstr **) modp->code) == NULL) + if ((code_base = (BeamInstr **) modp->curr.code) == NULL) return NULL; n = (BeamInstr) code_base[MI_NUM_FUNCTIONS]; for (i = 0; i < n; ++i) { code_ptr = code_base[MI_FUNCTIONS+i]; ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]); - ASSERT(mfa[0] == ((Eterm) code_ptr[2])); + ASSERT(mfa[0] == ((Eterm) code_ptr[2]) || + is_nil((Eterm) code_ptr[2])); if (mfa[1] == ((Eterm) code_ptr[3]) && ((BeamInstr) mfa[2]) == code_ptr[4]) { return code_ptr + 5; @@ -654,7 +1382,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) { * the previous breakpoint. */ - pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); + pbdt = get_time_break(pbt->pc); if (pbdt) { get_sys_now(&ms,&s,&us); bp_time_diff(&sitem, pbt, ms, s, us); @@ -692,669 +1420,259 @@ void erts_schedule_time_break(Process *p, Uint schedule) { } /* pbt */ } -/* call_time breakpoint - * Accumulated times are added to the previous bp, - * not the current one. The current one is saved - * for future reference. - * The previous breakpoint is stored in the process it self, the psd. - * We do not need to store in a stack frame. - * There is no need for locking, each thread has its own - * area in each bp to save data. - * Since we need to diffrentiate between processes for each bp, - * every bp has a hash (per thread) to process-bp statistics. - * - egil - */ - -void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) { - Uint ms,s,us; - process_breakpoint_time_t *pbt = NULL; - bp_data_time_item_t sitem, *item = NULL; - bp_time_hash_t *h = NULL; - BpDataTime *pbdt = NULL; - - ASSERT(p); - ASSERT(p->status == P_RUNNING); - - /* get previous timestamp and breakpoint - * from the process psd */ - - pbt = ERTS_PROC_GET_CALL_TIME(p); - get_sys_now(&ms,&s,&us); - - switch(type) { - /* get pbt - * timestamp = t0 - * lookup bdt from code - * set ts0 to pbt - * add call count here? - */ - case ERTS_BP_CALL_TIME_CALL: - case ERTS_BP_CALL_TIME_TAIL_CALL: - - if (pbt) { - ASSERT(pbt->pc); - /* add time to previous code */ - bp_time_diff(&sitem, pbt, ms, s, us); - sitem.pid = p->id; - sitem.count = 0; - - /* previous breakpoint */ - pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); - - /* if null then the breakpoint was removed */ - if (pbdt) { - h = &(pbdt->hash[bp_sched2ix_proc(p)]); - - ASSERT(h); - ASSERT(h->item); - - item = bp_hash_get(h, &sitem); - if (!item) { - item = bp_hash_put(h, &sitem); - } else { - BP_TIME_ADD(item, &sitem); - } - } - - } else { - /* first call of process to instrumented function */ - pbt = Alloc(sizeof(process_breakpoint_time_t)); - (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt); - } - /* add count to this code */ - sitem.pid = p->id; - sitem.count = 1; - sitem.s_time = 0; - sitem.us_time = 0; - - /* this breakpoint */ - ASSERT(bdt); - h = &(bdt->hash[bp_sched2ix_proc(p)]); - - ASSERT(h); - ASSERT(h->item); - - item = bp_hash_get(h, &sitem); - if (!item) { - item = bp_hash_put(h, &sitem); - } else { - BP_TIME_ADD(item, &sitem); - } - - pbt->pc = pc; - pbt->ms = ms; - pbt->s = s; - pbt->us = us; - break; - - case ERTS_BP_CALL_TIME_RETURN: - /* get pbt - * lookup bdt from code - * timestamp = t1 - * get ts0 from pbt - * get item from bdt->hash[bp_hash(p->id)] - * ack diff (t1, t0) to item - */ - - if(pbt) { - /* might have been removed due to - * trace_pattern(false) - */ - ASSERT(pbt->pc); - - bp_time_diff(&sitem, pbt, ms, s, us); - sitem.pid = p->id; - sitem.count = 0; - - /* previous breakpoint */ - pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); - - /* beware, the trace_pattern might have been removed */ - if (pbdt) { - h = &(pbdt->hash[bp_sched2ix_proc(p)]); - - ASSERT(h); - ASSERT(h->item); - - item = bp_hash_get(h, &sitem); - if (!item) { - item = bp_hash_put(h, &sitem); - } else { - BP_TIME_ADD(item, &sitem); - } - } - - pbt->pc = pc; - pbt->ms = ms; - pbt->s = s; - pbt->us = us; - } - break; - default : - ASSERT(0); - /* will never happen */ - break; - } -} - - /* ************************************************************************* ** Local helpers */ -static int set_break(Eterm mfa[3], int specified, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid) +static void +set_break(BpFunctions* f, Binary *match_spec, Uint break_flags, + enum erts_break_op count_op, Eterm tracer_pid) { - Module *modp; - int num_processed = 0; - if (!specified) { - /* Find and process all modules in the system... */ - int current; - int last = module_code_size(); - for (current = 0; current < last; current++) { - modp = module_code(current); - ASSERT(modp != NULL); - num_processed += - set_module_break(modp, mfa, specified, - match_spec, break_op, count_op, - tracer_pid); - } - } else { - /* Process a single module */ - if ((modp = erts_get_module(mfa[0])) != NULL) { - num_processed += - set_module_break(modp, mfa, specified, - match_spec, break_op, count_op, - tracer_pid); - } - } - return num_processed; -} - -static int set_module_break(Module *modp, Eterm mfa[3], int specified, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid) { - BeamInstr** code_base; - BeamInstr* code_ptr; - int num_processed = 0; - Uint i,n; + Uint i; + Uint n; - ASSERT(break_op); - ASSERT(modp); - code_base = (BeamInstr **) modp->code; - if (code_base == NULL) { - return 0; + n = f->matched; + for (i = 0; i < n; i++) { + BeamInstr* pc = f->matching[i].pc; + set_function_break(pc, match_spec, break_flags, + count_op, tracer_pid); } - n = (BeamInstr) code_base[MI_NUM_FUNCTIONS]; - for (i = 0; i < n; ++i) { - code_ptr = code_base[MI_FUNCTIONS+i]; - ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) && - (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) { - BeamInstr *pc = code_ptr+5; - - num_processed += - set_function_break(modp, pc, BREAK_IS_ERL, match_spec, - break_op, count_op, tracer_pid); - } - } - return num_processed; } -static int set_function_break(Module *modp, BeamInstr *pc, int bif, - Binary *match_spec, BeamInstr break_op, - enum erts_break_op count_op, Eterm tracer_pid) { - - BeamInstr **code_base = NULL; - BpData *bd, **r, ***rs; - size_t size; - Uint ix = 0; - - if (bif == BREAK_IS_ERL) { - code_base = (BeamInstr **)modp->code; - ASSERT(code_base); - ASSERT(code_base <= (BeamInstr **)pc); - ASSERT((BeamInstr **)pc < code_base + (modp->code_length/sizeof(BeamInstr *))); - } else { - ASSERT(*pc == (BeamInstr) em_apply_bif); - ASSERT(modp == NULL); +static void +set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags, + enum erts_break_op count_op, Eterm tracer_pid) +{ + GenericBp* g; + GenericBpData* bp; + Uint common; + ErtsBpIndex ix = erts_staging_bp_ix(); + + ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + g = (GenericBp *) pc[-4]; + if (g == 0) { + int i; + if (count_op == erts_break_reset || count_op == erts_break_stop) { + /* Do not insert a new breakpoint */ + return; + } + g = Alloc(sizeof(GenericBp)); + g->orig_instr = *pc; + for (i = 0; i < ERTS_NUM_BP_IX; i++) { + g->data[i].flags = 0; + } + pc[-4] = (BeamInstr) g; } + bp = &g->data[ix]; /* - * Currently no trace support for native code. + * If we are changing an existing breakpoint, clean up old data. */ - if (erts_is_native_break(pc)) { - return 0; - } - /* Do not allow two breakpoints of the same kind */ - if ( (bd = is_break(pc, break_op))) { - if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) - || break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) { - - BpDataTrace *bdt = (BpDataTrace *) bd; - Binary *old_match_spec; - - /* Update match spec and tracer */ - MatchSetRef(match_spec); - ErtsSmpBPLock(bdt); - old_match_spec = bdt->match_spec; - bdt->match_spec = match_spec; - bdt->tracer_pid = tracer_pid; - ErtsSmpBPUnlock(bdt); - MatchSetUnref(old_match_spec); - } else { - BpDataCount *bdc = (BpDataCount *) bd; - erts_aint_t count = 0; - erts_aint_t res = 0; - - ASSERT(! match_spec); - ASSERT(is_nil(tracer_pid)); - - if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) { - if (count_op == erts_break_stop) { - count = erts_smp_atomic_read_nob(&bdc->acount); - if (count >= 0) { - while(1) { - res = erts_smp_atomic_cmpxchg_nob(&bdc->acount, -count - 1, count); - if ((res == count) || count < 0) break; - count = res; - } - } - } else { - /* Reset call counter */ - erts_smp_atomic_set_nob(&bdc->acount, 0); - } - - } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { - BpDataTime *bdt = (BpDataTime *) bd; - Uint i = 0; - - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); - - if (count_op == erts_break_stop) { - bdt->pause = 1; - } else { - bdt->pause = 0; - for (i = 0; i < bdt->n; i++) { - bp_hash_delete(&(bdt->hash[i])); - bp_hash_init(&(bdt->hash[i]), 32); - } - } - } else { - ASSERT (! count_op); - } - } - return 1; - } - if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) || - break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) { - size = sizeof(BpDataTrace); - } else { - ASSERT(! match_spec); - ASSERT(is_nil(tracer_pid)); - if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) { - if (count_op == erts_break_reset || count_op == erts_break_stop) { - /* Do not insert a new breakpoint */ - return 1; - } - size = sizeof(BpDataCount); - } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { - if (count_op == erts_break_reset || count_op == erts_break_stop) { - /* Do not insert a new breakpoint */ - return 1; - } - size = sizeof(BpDataTime); + common = break_flags & bp->flags; + if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + MatchSetUnref(bp->local_ms); + } else if (common & ERTS_BPF_META_TRACE) { + MatchSetUnref(bp->meta_ms); + bp_meta_unref(bp->meta_pid); + } else if (common & ERTS_BPF_COUNT) { + if (count_op == erts_break_stop) { + bp->flags &= ~ERTS_BPF_COUNT_ACTIVE; } else { - ASSERT(! count_op); - ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint)); - size = sizeof(BpDataDebug); + bp->flags |= ERTS_BPF_COUNT_ACTIVE; + erts_smp_atomic_set_nob(&bp->count->acount, 0); } - } - rs = (BpData ***) (pc-4); - if (! *rs) { - size_t ssize = sizeof(BeamInstr) * erts_no_schedulers; - *rs = (BpData **) Alloc(ssize); - sys_memzero(*rs, ssize); - } - - r = &((*rs)[0]); - - if (! *r) { - ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint)); - ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint)); - ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint)); - ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint)); - ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint)); - /* First breakpoint; create singleton ring */ - bd = Alloc(size); - BpInit(bd, *pc); - *r = bd; - if (bif == BREAK_IS_ERL) { - *pc = break_op; - } - } else { - ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) || - *pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) || - *pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) || - *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) || - *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) || - *pc == (BeamInstr) em_apply_bif); - if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) { - /* Debug bp must be last, so if it is also first; - * it must be singleton. */ - ASSERT(BpSingleton(*r)); - /* Insert new bp first in the ring, i.e second to last. */ - bd = Alloc(size); - BpInitAndSpliceNext(bd, *pc, *r); - if (bif == BREAK_IS_ERL) { - *pc = break_op; - } - } else if ((*r)->prev->orig_instr - == (BeamInstr) BeamOp(op_i_debug_breakpoint)) { - /* Debug bp last in the ring; insert new second to last. */ - bd = Alloc(size); - BpInitAndSplicePrev(bd, (*r)->prev->orig_instr, *r); - (*r)->prev->orig_instr = break_op; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + return; + } else if (common & ERTS_BPF_TIME_TRACE) { + BpDataTime* bdt = bp->time; + Uint i = 0; + + if (count_op == erts_break_stop) { + bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE; } else { - /* Just insert last in the ring */ - bd = Alloc(size); - BpInitAndSpliceNext(bd, (*r)->orig_instr, *r); - (*r)->orig_instr = break_op; - *r = bd; + bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE; + for (i = 0; i < bdt->n; i++) { + bp_hash_delete(&(bdt->hash[i])); + bp_hash_init(&(bdt->hash[i]), 32); + } } - } - for (ix = 1; ix < erts_no_schedulers; ++ix) { - (*rs)[ix] = (*rs)[0]; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + return; } - bd->this_instr = break_op; - /* Init the bp type specific data */ - if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) || - break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) { - - BpDataTrace *bdt = (BpDataTrace *) bd; - - MatchSetRef(match_spec); - bdt->match_spec = match_spec; - bdt->tracer_pid = tracer_pid; - } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { - BpDataTime *bdt = (BpDataTime *) bd; - Uint i = 0; - - bdt->pause = 0; - bdt->n = erts_no_schedulers; - bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n)); + /* + * Initialize the new breakpoint data. + */ + if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + MatchSetRef(match_spec); + bp->local_ms = match_spec; + } else if (break_flags & ERTS_BPF_META_TRACE) { + BpMetaPid* bmp; + MatchSetRef(match_spec); + bp->meta_ms = match_spec; + bmp = Alloc(sizeof(BpMetaPid)); + erts_refc_init(&bmp->refc, 1); + erts_smp_atomic_init_nob(&bmp->pid, tracer_pid); + bp->meta_pid = bmp; + } else if (break_flags & ERTS_BPF_COUNT) { + BpCount* bcp; + + ASSERT((bp->flags & ERTS_BPF_COUNT) == 0); + bcp = Alloc(sizeof(BpCount)); + erts_refc_init(&bcp->refc, 1); + erts_smp_atomic_init_nob(&bcp->acount, 0); + bp->count = bcp; + } else if (break_flags & ERTS_BPF_TIME_TRACE) { + BpDataTime* bdt; + int i; + + ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0); + bdt = Alloc(sizeof(BpDataTime)); + erts_refc_init(&bdt->refc, 1); + bdt->n = erts_no_schedulers; + bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n)); for (i = 0; i < bdt->n; i++) { bp_hash_init(&(bdt->hash[i]), 32); } - } else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) { - BpDataCount *bdc = (BpDataCount *) bd; - erts_smp_atomic_init_nob(&bdc->acount, 0); + bp->time = bdt; } - if (bif == BREAK_IS_ERL) { - ++(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]); + bp->flags |= break_flags; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); +} + +static void +clear_break(BpFunctions* f, Uint break_flags) +{ + Uint i; + Uint n; + + n = f->matched; + for (i = 0; i < n; i++) { + BeamInstr* pc = f->matching[i].pc; + clear_function_break(pc, break_flags); } - return 1; } -static int clear_break(Eterm mfa[3], int specified, BeamInstr break_op) +static int +clear_function_break(BeamInstr *pc, Uint break_flags) { - int num_processed = 0; - Module *modp; + GenericBp* g; + GenericBpData* bp; + Uint common; + ErtsBpIndex ix = erts_staging_bp_ix(); - if (!specified) { - /* Iterate over all modules */ - int current; - int last = module_code_size(); + ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); - for (current = 0; current < last; current++) { - modp = module_code(current); - ASSERT(modp != NULL); - num_processed += clear_module_break(modp, mfa, specified, break_op); - } - } else { - /* Process a single module */ - if ((modp = erts_get_module(mfa[0])) != NULL) { - num_processed += - clear_module_break(modp, mfa, specified, break_op); - } + if ((g = (GenericBp *) pc[-4]) == 0) { + return 1; } - return num_processed; -} -static int clear_module_break(Module *m, Eterm mfa[3], int specified, - BeamInstr break_op) { - BeamInstr** code_base; - BeamInstr* code_ptr; - int num_processed = 0; - Uint i; - BeamInstr n; - - ASSERT(m); - code_base = (BeamInstr **) m->code; - if (code_base == NULL) { - return 0; + bp = &g->data[ix]; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + common = bp->flags & break_flags; + bp->flags &= ~break_flags; + if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) { + MatchSetUnref(bp->local_ms); } - n = (BeamInstr) code_base[MI_NUM_FUNCTIONS]; - for (i = 0; i < n; ++i) { - code_ptr = code_base[MI_FUNCTIONS+i]; - if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) && - (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) { - BeamInstr *pc = code_ptr + 5; - - num_processed += - clear_function_break(m, pc, BREAK_IS_ERL, break_op); - } + if (common & ERTS_BPF_META_TRACE) { + MatchSetUnref(bp->meta_ms); + } + if (common & ERTS_BPF_COUNT) { + ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0); + bp_count_unref(bp->count); + } + if (common & ERTS_BPF_TIME_TRACE) { + ASSERT((bp->flags & ERTS_BPF_TIME_TRACE_ACTIVE) == 0); + bp_time_unref(bp->time); } - return num_processed; -} -static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) { - BpData *bd; - Uint ix = 0; - BeamInstr **code_base = NULL; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + return 1; +} - if (bif == BREAK_IS_ERL) { - code_base = (BeamInstr **)m->code; - ASSERT(code_base); - ASSERT(code_base <= (BeamInstr **)pc); - ASSERT((BeamInstr **)pc < code_base + (m->code_length/sizeof(BeamInstr *))); - } else { - ASSERT(*pc == (BeamInstr) em_apply_bif); - ASSERT(m == NULL); +static void +bp_meta_unref(BpMetaPid* bmp) +{ + if (erts_refc_dectest(&bmp->refc, 0) <= 0) { + Free(bmp); } +} - /* - * Currently no trace support for native code. - */ - if (erts_is_native_break(pc)) { - return 0; +static void +bp_count_unref(BpCount* bcp) +{ + if (erts_refc_dectest(&bcp->refc, 0) <= 0) { + Free(bcp); } +} - while ( (bd = is_break(pc, break_op))) { - /* Remove all breakpoints of this type. - * There should be only one of each type, - * but break_op may be 0 which matches any type. +static void +bp_time_unref(BpDataTime* bdt) +{ + if (erts_refc_dectest(&bdt->refc, 0) <= 0) { + Uint i = 0; + Uint j = 0; + Process *h_p = NULL; + bp_data_time_item_t* item = NULL; + process_breakpoint_time_t* pbt = NULL; + + /* remove all psd associated with the hash + * and then delete the hash. + * ... sigh ... */ - BeamInstr op; - BpData ***rs = (BpData ***) (pc - 4); - BpData **r = NULL; - -#ifdef DEBUG - for (ix = 1; ix < erts_no_schedulers; ++ix) { - ASSERT((*rs)[ix] == (*rs)[0]); - } -#endif - - r = &((*rs)[0]); - - ASSERT(*r); - /* Find opcode for this breakpoint */ - if (break_op) { - op = break_op; - } else { - if (bd == (*r)->next) { - /* First breakpoint in ring */ - op = *pc; - } else { - op = bd->prev->orig_instr; - } - } - if (BpSingleton(bd)) { - ASSERT(*r == bd); - /* Only one breakpoint to remove */ - if (bif == BREAK_IS_ERL) { - *pc = bd->orig_instr; - } - Free(*rs); - *rs = NULL; - } else { - BpData *bd_prev = bd->prev; - - BpSpliceNext(bd, bd_prev); - ASSERT(BpSingleton(bd)); - if (bd == *r) { - /* We removed the last breakpoint in the ring */ - *r = bd_prev; - bd_prev->orig_instr = bd->orig_instr; - } else if (bd_prev == *r) { - /* We removed the first breakpoint in the ring */ - if (bif == BREAK_IS_ERL) { - *pc = bd->orig_instr; - } - } else { - bd_prev->orig_instr = bd->orig_instr; - } - } - if (op == (BeamInstr) BeamOp(op_i_trace_breakpoint) || - op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) { - - BpDataTrace *bdt = (BpDataTrace *) bd; - MatchSetUnref(bdt->match_spec); - } - if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) { - BpDataTime *bdt = (BpDataTime *) bd; - Uint i = 0; - Uint j = 0; - Process *h_p = NULL; - bp_data_time_item_t *item = NULL; - process_breakpoint_time_t *pbt = NULL; - - /* remove all psd associated with the hash - * and then delete the hash. - * ... sigh ... - */ - for( i = 0; i < bdt->n; ++i) { - if (bdt->hash[i].used) { - for (j = 0; j < bdt->hash[i].n; ++j) { - item = &(bdt->hash[i].item[j]); - if (item->pid != NIL) { - h_p = process_tab[internal_pid_index(item->pid)]; - if (h_p) { - pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL); - if (pbt) { - Free(pbt); - } + for (i = 0; i < bdt->n; ++i) { + if (bdt->hash[i].used) { + for (j = 0; j < bdt->hash[i].n; ++j) { + item = &(bdt->hash[i].item[j]); + if (item->pid != NIL) { + h_p = erts_pid2proc(NULL, 0, item->pid, + ERTS_PROC_LOCK_MAIN); + if (h_p) { + pbt = ERTS_PROC_SET_CALL_TIME(h_p, + ERTS_PROC_LOCK_MAIN, + NULL); + if (pbt) { + Free(pbt); } + erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN); } } } - bp_hash_delete(&(bdt->hash[i])); } - Free(bdt->hash); - bdt->hash = NULL; - bdt->n = 0; + bp_hash_delete(&(bdt->hash[i])); } - Free(bd); - if (bif == BREAK_IS_ERL) { - ASSERT(((BeamInstr) code_base[MI_NUM_BREAKPOINTS]) > 0); - --(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]); - } - if (*rs) { - for (ix = 1; ix < erts_no_schedulers; ++ix) { - (*rs)[ix] = (*rs)[0]; - } - } - } /* while bd != NULL */ - return 1; + Free(bdt->hash); + Free(bdt); + } } - - -/* -** Searches (linear forward) the breakpoint ring for a specified opcode -** and returns a pointer to the breakpoint data structure or NULL if -** not found. If the specified opcode is 0, the last breakpoint is -** returned. The program counter must point to the first executable -** (breakpoint) instruction of the function. -*/ - -BpData *erts_get_time_break(Process *p, BeamInstr *pc) { - return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint)); +static BpDataTime* +get_time_break(BeamInstr *pc) +{ + GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE); + return bp ? bp->time : 0; } -static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) { - ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - if (! erts_is_native_break(pc)) { - BpData **rs = (BpData **) pc[-4]; - BpData *bd = NULL, *ebd = NULL; - - if (! rs) { - return NULL; - } - - bd = ebd = rs[bp_sched2ix_proc(p)]; - ASSERT(bd); - if (bd->this_instr == break_op) { - return bd; - } - - bd = bd->next; - while (bd != ebd) { - ASSERT(bd); - if (bd->this_instr == break_op) { - ASSERT(bd); - return bd; - } - bd = bd->next; - } - } - return NULL; -} +static GenericBpData* +check_break(BeamInstr *pc, Uint break_flags) +{ + GenericBp* g = (GenericBp *) pc[-4]; -static BpData *is_break(BeamInstr *pc, BeamInstr break_op) { - BpData **rs; - BpData *bd = NULL, *ebd = NULL; ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - if (erts_is_native_break(pc)) { - return NULL; - } - rs = (BpData **) pc[-4]; - if (! rs) { - return NULL; - } - - bd = ebd = rs[erts_bp_sched2ix()]; - ASSERT(bd); - if ( (break_op == 0) || (bd->this_instr == break_op)) { - return bd; + return 0; } - - bd = bd->next; - while (bd != ebd) { - ASSERT(bd); - if (bd->this_instr == break_op) { - ASSERT(bd); - return bd; + if (g) { + GenericBpData* bp = &g->data[erts_active_bp_ix()]; + ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); + if (bp->flags & break_flags) { + return bp; } - bd = bd->next; } - return NULL; + return 0; } diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h index 167069552f..28aaaa462a 100644 --- a/erts/emulator/beam/beam_bp.h +++ b/erts/emulator/beam/beam_bp.h @@ -25,77 +25,6 @@ #include "erl_vm.h" #include "global.h" - - -/* A couple of gotchas: - * - * The breakpoint structure from BeamInstr, - * In beam_emu where the instruction counter pointer, I (or pc), - * points to the *current* instruction. At that time, if the instruction - * is a breakpoint instruction the pc looks like the following, - * - * I[-5] | op_i_func_info_IaaI | scheduler specific entries - * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN | - * I[-3] | Tagged Module | | | - * I[-2] | Tagged Function | V V - * I[-1] | Arity | BpData -> BpData -> BpData -> BpData - * I[0] | The bp instruction | ^ * the bp wheel * | - * |------------------------------ - * - * Common struct to all bp_data_* - * - * 1) The type of bp_data structure in the ring is deduced from the - * orig_instr field of the structure _before_ in the ring, except for - * the first structure in the ring that has its instruction in - * pc[0] of the code to execute. - * This is valid as long as you don't search for the function while it is - * being executed by something else. Or is in the middle of its rotation for - * any other reason. - * A key, the bp beam instruction, is included for this reason. - * - * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the - * breakpoints are being executed. - * - * So, as an example, when a breakpointed function starts to execute, - * the first instruction that is a breakpoint instruction at pc[0] finds - * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer - * to the correct bp_data type. -*/ - -typedef struct bp_data { - struct bp_data *next; /* Doubly linked ring pointers */ - struct bp_data *prev; /* -"- */ - BeamInstr orig_instr; /* The original instruction to execute */ - BeamInstr this_instr; /* key */ -} BpData; -/* -** All the following bp_data_.. structs must begin the same way -*/ - -typedef struct bp_data_trace { - struct bp_data *next; - struct bp_data *prev; - BeamInstr orig_instr; - BeamInstr this_instr; /* key */ - Binary *match_spec; - Eterm tracer_pid; -} BpDataTrace; - -typedef struct bp_data_debug { - struct bp_data *next; - struct bp_data *prev; - BeamInstr orig_instr; - BeamInstr this_instr; /* key */ -} BpDataDebug; - -typedef struct bp_data_count { /* Call count */ - struct bp_data *next; - struct bp_data *prev; - BeamInstr orig_instr; - BeamInstr this_instr; /* key */ - erts_smp_atomic_t acount; -} BpDataCount; - typedef struct { Eterm pid; Sint count; @@ -110,13 +39,9 @@ typedef struct { } bp_time_hash_t; typedef struct bp_data_time { /* Call time */ - struct bp_data *next; - struct bp_data *prev; - BeamInstr orig_instr; - BeamInstr this_instr; /* key */ - Uint pause; - Uint n; - bp_time_hash_t *hash; + Uint n; + bp_time_hash_t *hash; + erts_refc_t refc; } BpDataTime; typedef struct { @@ -126,64 +51,42 @@ typedef struct { BeamInstr *pc; } process_breakpoint_time_t; /* used within psd */ -extern erts_smp_spinlock_t erts_bp_lock; +typedef struct { + erts_smp_atomic_t acount; + erts_refc_t refc; +} BpCount; + +typedef struct { + erts_smp_atomic_t pid; + erts_refc_t refc; +} BpMetaPid; + +typedef struct generic_bp_data { + Uint flags; + Binary* local_ms; /* Match spec for local call trace */ + Binary* meta_ms; /* Match spec for meta trace */ + BpMetaPid* meta_pid; /* Meta trace pid */ + BpCount* count; /* For call count */ + BpDataTime* time; /* For time trace */ +} GenericBpData; + +#define ERTS_NUM_BP_IX 2 + +typedef struct generic_bp { + BeamInstr orig_instr; + GenericBpData data[ERTS_NUM_BP_IX]; +} GenericBp; #define ERTS_BP_CALL_TIME_SCHEDULE_IN (0) #define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1) #define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2) -#define ERTS_BP_CALL_TIME_CALL (0) -#define ERTS_BP_CALL_TIME_RETURN (1) -#define ERTS_BP_CALL_TIME_TAIL_CALL (2) - -#ifdef ERTS_SMP -#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock) -#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock) -#else -#define ErtsSmpBPLock(BDC) -#define ErtsSmpBPUnlock(BDC) -#endif - #ifdef ERTS_SMP #define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1) #else #define bp_sched2ix_proc(p) (0) #endif -#define ErtsCountBreak(p, pc,instr_result) \ -do { \ - BpData **bds = (BpData **) (pc)[-4]; \ - BpDataCount *bdc = NULL; \ - Uint ix = bp_sched2ix_proc( (p) ); \ - erts_aint_t count = 0; \ - \ - ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \ - ASSERT(bds); \ - bdc = (BpDataCount *) bds[ix]; \ - bdc = (BpDataCount *) bdc->next; \ - ASSERT(bdc); \ - bds[ix] = (BpData *) bdc; \ - count = erts_smp_atomic_read_nob(&bdc->acount); \ - if (count >= 0) erts_smp_atomic_inc_nob(&bdc->acount); \ - *(instr_result) = bdc->orig_instr; \ -} while (0) - -#define ErtsBreakSkip(p, pc,instr_result) \ -do { \ - BpData **bds = (BpData **) (pc)[-4]; \ - BpData *bd = NULL; \ - Uint ix = bp_sched2ix_proc( (p) ); \ - \ - ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \ - ASSERT(bds); \ - bd = bds[ix]; \ - ASSERT(bd); \ - bd = bd->next; \ - ASSERT(bd); \ - bds[ix] = bd; \ - *(instr_result) = bd->orig_instr; \ -} while (0) - enum erts_break_op{ erts_break_nop = 0, /* Must be false */ erts_break_set = !0, /* Must be true */ @@ -191,7 +94,17 @@ enum erts_break_op{ erts_break_stop }; +typedef Uint32 ErtsBpIndex; +typedef struct { + BeamInstr* pc; + Module* mod; +} BpFunction; + +typedef struct { + Uint matched; /* Number matched */ + BpFunction* matching; /* Matching functions */ +} BpFunctions; /* ** Function interface exported from beam_bp.c @@ -199,49 +112,66 @@ enum erts_break_op{ void erts_bp_init(void); -int erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec, - Eterm tracer_pid); -int erts_clear_trace_break(Eterm mfa[3], int specified); -int erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec, +void erts_prepare_bp_staging(void); +void erts_commit_staged_bp(void); + +ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void); +ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void); + +void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified); +void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified); +void erts_bp_free_matched_functions(BpFunctions* f); + +void erts_install_breakpoints(BpFunctions* f); +void erts_uninstall_breakpoints(BpFunctions* f); +void erts_consolidate_bp_data(BpFunctions* f, int local); +void erts_consolidate_bif_bp_data(void); + +void erts_set_trace_break(BpFunctions *f, Binary *match_spec); +void erts_clear_trace_break(BpFunctions *f); + +void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local); +void erts_clear_call_trace_bif(BeamInstr *pc, int local); + +void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec, Eterm tracer_pid); -int erts_clear_mtrace_break(Eterm mfa[3], int specified); +void erts_clear_mtrace_break(BpFunctions *f); void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid); void erts_clear_mtrace_bif(BeamInstr *pc); -int erts_set_debug_break(Eterm mfa[3], int specified); -int erts_clear_debug_break(Eterm mfa[3], int specified); -int erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op); -int erts_clear_count_break(Eterm mfa[3], int specified); +void erts_set_debug_break(BpFunctions *f); +void erts_clear_debug_break(BpFunctions *f); +void erts_set_count_break(BpFunctions *f, enum erts_break_op); +void erts_clear_count_break(BpFunctions *f); -int erts_clear_break(Eterm mfa[3], int specified); + +void erts_clear_all_breaks(BpFunctions* f); int erts_clear_module_break(Module *modp); -int erts_clear_function_break(Module *modp, BeamInstr *pc); +void erts_clear_export_break(Module *modp, BeamInstr* pc); +BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg); BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args, Uint32 *ret_flags, Eterm *tracer_pid); -Uint32 erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, - int local, Eterm *tracer_pid); -int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, - Eterm *tracer_pid_ret); +int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local); int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_rte); int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret); int erts_is_native_break(BeamInstr *pc); -int erts_is_count_break(BeamInstr *pc, Sint *count_ret); +int erts_is_count_break(BeamInstr *pc, Uint *count_ret); int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time); -void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type); +void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt); +void erts_trace_time_return(Process* c_p, BeamInstr* pc); void erts_schedule_time_break(Process *p, Uint out); -int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op); -int erts_clear_time_break(Eterm mfa[3], int specified); +void erts_set_time_break(BpFunctions *f, enum erts_break_op); +void erts_clear_time_break(BpFunctions *f); int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time); void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op); void erts_clear_time_trace_bif(BeamInstr *pc); -BpData *erts_get_time_break(Process *p, BeamInstr *pc); BeamInstr *erts_find_local_func(Eterm mfa[3]); @@ -258,6 +188,19 @@ ERTS_GLB_INLINE Uint erts_bp_sched2ix(void) return 0; #endif } + +extern erts_smp_atomic32_t erts_active_bp_index; +extern erts_smp_atomic32_t erts_staging_bp_index; + +ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void) +{ + return erts_smp_atomic32_read_nob(&erts_active_bp_index); +} + +ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void) +{ + return erts_smp_atomic32_read_nob(&erts_staging_bp_index); +} #endif #endif /* _BEAM_BP_H */ diff --git a/erts/emulator/beam/beam_catches.c b/erts/emulator/beam/beam_catches.c index 406ef1db5f..92f7ffe5a2 100644 --- a/erts/emulator/beam/beam_catches.c +++ b/erts/emulator/beam/beam_catches.c @@ -31,78 +31,144 @@ typedef struct { unsigned cdr; } beam_catch_t; -static int free_list; -static unsigned high_mark; -static unsigned tabsize; -static beam_catch_t *beam_catches; +#ifdef DEBUG +# define IF_DEBUG(x) x +#else +# define IF_DEBUG(x) +#endif + +struct bc_pool { + int free_list; + unsigned high_mark; + unsigned tabsize; + beam_catch_t *beam_catches; + /* + * Note that the 'beam_catches' area is shared by pools. Used slots + * are readonly as long as the module is not purgable. The free-list is + * protected by the code_ix lock. + */ + + IF_DEBUG(int is_staging;) +}; + +static struct bc_pool bccix[ERTS_NUM_CODE_IX]; void beam_catches_init(void) { - tabsize = DEFAULT_TABSIZE; - free_list = -1; - high_mark = 0; + int i; + + bccix[0].tabsize = DEFAULT_TABSIZE; + bccix[0].free_list = -1; + bccix[0].high_mark = 0; + bccix[0].beam_catches = erts_alloc(ERTS_ALC_T_CODE, + sizeof(beam_catch_t)*DEFAULT_TABSIZE); + IF_DEBUG(bccix[0].is_staging = 0); + for (i=1; i<ERTS_NUM_CODE_IX; i++) { + bccix[i] = bccix[i-1]; + } + /* For initial load: */ + IF_DEBUG(bccix[erts_staging_code_ix()].is_staging = 1); +} - beam_catches = erts_alloc(ERTS_ALC_T_CODE, sizeof(beam_catch_t)*DEFAULT_TABSIZE); + +static void gc_old_vec(beam_catch_t* vec) +{ + int i; + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + if (bccix[i].beam_catches == vec) { + return; + } + } + erts_free(ERTS_ALC_T_CODE, vec); +} + + +void beam_catches_start_staging(void) +{ + ErtsCodeIndex dst = erts_staging_code_ix(); + ErtsCodeIndex src = erts_active_code_ix(); + beam_catch_t* prev_vec = bccix[dst].beam_catches; + + ASSERT(!bccix[src].is_staging && !bccix[dst].is_staging); + + bccix[dst] = bccix[src]; + gc_old_vec(prev_vec); + IF_DEBUG(bccix[dst].is_staging = 1); +} + +void beam_catches_end_staging(int commit) +{ + IF_DEBUG(bccix[erts_staging_code_ix()].is_staging = 0); } unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr) { int i; + struct bc_pool* p = &bccix[erts_staging_code_ix()]; + ASSERT(p->is_staging); /* * Allocate from free_list while it is non-empty. * If free_list is empty, allocate at high_mark. - * - * This avoids the need to initialise the free list in - * beam_catches_init(), which would cost O(TABSIZ) time. */ - if( free_list >= 0 ) { - i = free_list; - free_list = beam_catches[i].cdr; - } else if( high_mark < tabsize ) { - i = high_mark; - high_mark++; - } else { - /* No free slots and table is full: realloc table */ - tabsize = 2*tabsize; - beam_catches = erts_realloc(ERTS_ALC_T_CODE, beam_catches, sizeof(beam_catch_t)*tabsize); - i = high_mark; - high_mark++; + if (p->free_list >= 0) { + i = p->free_list; + p->free_list = p->beam_catches[i].cdr; + } + else { + if (p->high_mark >= p->tabsize) { + /* No free slots and table is full: realloc table */ + beam_catch_t* prev_vec = p->beam_catches; + unsigned newsize = p->tabsize*2; + + p->beam_catches = erts_alloc(ERTS_ALC_T_CODE, + newsize*sizeof(beam_catch_t)); + sys_memcpy(p->beam_catches, prev_vec, + p->tabsize*sizeof(beam_catch_t)); + gc_old_vec(prev_vec); + p->tabsize = newsize; + } + i = p->high_mark++; } - beam_catches[i].cp = cp; - beam_catches[i].cdr = cdr; + p->beam_catches[i].cp = cp; + p->beam_catches[i].cdr = cdr; return i; } BeamInstr *beam_catches_car(unsigned i) { - if( i >= tabsize ) { + struct bc_pool* p = &bccix[erts_active_code_ix()]; + + if (i >= p->tabsize ) { erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i); } - return beam_catches[i].cp; + return p->beam_catches[i].cp; } -void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes) +void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes, + ErtsCodeIndex code_ix) { + struct bc_pool* p = &bccix[code_ix]; unsigned i, cdr; + ASSERT((code_ix == erts_active_code_ix()) != bccix[erts_staging_code_ix()].is_staging); for(i = head; i != (unsigned)-1;) { - if( i >= tabsize ) { + if (i >= p->tabsize) { erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i); } - if( (char*)beam_catches[i].cp - (char*)code >= code_bytes ) { + if( (char*)p->beam_catches[i].cp - (char*)code >= code_bytes ) { erl_exit(1, "beam_catches_delmod: item %#x has cp %#lx which is not " "in module's range [%#lx,%#lx[\r\n", - i, (long)beam_catches[i].cp, + i, (long)p->beam_catches[i].cp, (long)code, (long)((char*)code + code_bytes)); } - beam_catches[i].cp = 0; - cdr = beam_catches[i].cdr; - beam_catches[i].cdr = free_list; - free_list = i; + p->beam_catches[i].cp = 0; + cdr = p->beam_catches[i].cdr; + p->beam_catches[i].cdr = p->free_list; + p->free_list = i; i = cdr; } } diff --git a/erts/emulator/beam/beam_catches.h b/erts/emulator/beam/beam_catches.h index 6223427f0d..b2bd2351a5 100644 --- a/erts/emulator/beam/beam_catches.h +++ b/erts/emulator/beam/beam_catches.h @@ -20,12 +20,21 @@ #ifndef __BEAM_CATCHES_H #define __BEAM_CATCHES_H +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif +#include "sys.h" +#include "code_ix.h" + #define BEAM_CATCHES_NIL (-1) void beam_catches_init(void); +void beam_catches_start_staging(void); +void beam_catches_end_staging(int commit); unsigned beam_catches_cons(BeamInstr* cp, unsigned cdr); BeamInstr *beam_catches_car(unsigned i); -void beam_catches_delmod(unsigned head, BeamInstr* code, unsigned code_bytes); +void beam_catches_delmod(unsigned head, BeamInstr* code, unsigned code_bytes, + ErtsCodeIndex); #define catch_pc(x) beam_catches_car(catch_val((x))) diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index 8041c92162..a609ed8c71 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -84,6 +84,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2) int i; int specified = 0; Eterm res; + BpFunctions f; if (bool != am_true && bool != am_false) goto error; @@ -114,18 +115,30 @@ erts_debug_breakpoint_2(BIF_ALIST_2) mfa[2] = signed_val(mfa[2]); } + if (!erts_try_seize_code_write_permission(BIF_P)) { + ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], + BIF_P, BIF_ARG_1, BIF_ARG_2); + } erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); + erts_bp_match_functions(&f, mfa, specified); if (bool == am_true) { - res = make_small(erts_set_debug_break(mfa, specified)); + erts_set_debug_break(&f); + erts_install_breakpoints(&f); + erts_commit_staged_bp(); } else { - res = make_small(erts_clear_debug_break(mfa, specified)); + erts_clear_debug_break(&f); + erts_commit_staged_bp(); + erts_uninstall_breakpoints(&f); } + erts_consolidate_bp_data(&f, 1); + res = make_small(f.matched); + erts_bp_free_matched_functions(&f); erts_smp_thr_progress_unblock(); erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); - + erts_release_code_write_permission(); return res; error: @@ -207,6 +220,7 @@ erts_debug_disassemble_1(BIF_ALIST_1) BIF_RET(am_false); } } else if (is_tuple(addr)) { + ErtsCodeIndex code_ix; Module* modp; Eterm mod; Eterm name; @@ -225,14 +239,14 @@ erts_debug_disassemble_1(BIF_ALIST_1) goto error; } arity = signed_val(tp[3]); - modp = erts_get_module(mod); + code_ix = erts_active_code_ix(); + modp = erts_get_module(mod, code_ix); /* * Try the export entry first to allow disassembly of special functions * such as erts_debug:apply/4. Then search for it in the module. */ - - if ((ep = erts_find_function(mod, name, arity)) != NULL) { + if ((ep = erts_find_function(mod, name, arity, code_ix)) != NULL) { /* XXX: add "&& ep->address != ep->code+3" condition? * Consider a traced function. * Its ep will have ep->address == ep->code+3. @@ -241,9 +255,9 @@ erts_debug_disassemble_1(BIF_ALIST_1) * But this code_ptr will point to the start of the Export, * not the function's func_info instruction. BOOM !? */ - code_ptr = ((BeamInstr *) ep->address) - 5; + code_ptr = ((BeamInstr *) ep->addressv[code_ix]) - 5; funcinfo = code_ptr+2; - } else if (modp == NULL || (code_base = modp->code) == NULL) { + } else if (modp == NULL || (code_base = modp->curr.code) == NULL) { BIF_RET(am_undef); } else { n = code_base[MI_NUM_FUNCTIONS]; diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 6d3b15cd46..7b1ae624ce 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -63,11 +63,7 @@ # define PROCESS_MAIN_CHK_LOCKS(P) \ do { \ if ((P)) { \ - erts_pix_lock_t *pix_lock__ = ERTS_PIX2PIXLOCK(internal_pid_index((P)->id));\ erts_proc_lc_chk_only_proc_main((P)); \ - erts_pix_lock(pix_lock__); \ - ASSERT(0 < (P)->lock.refc && (P)->lock.refc < erts_no_schedulers*5);\ - erts_pix_unlock(pix_lock__); \ } \ else \ erts_lc_check_exact(NULL, 0); \ @@ -221,7 +217,6 @@ BeamInstr beam_continue_exit[1]; BeamInstr* em_call_error_handler; BeamInstr* em_apply_bif; -BeamInstr* em_call_traced_function; /* NOTE These should be the only variables containing trace instructions. @@ -236,11 +231,6 @@ BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ /* - * We should warn only once for tuple funs. - */ -static erts_smp_atomic_t warned_for_tuple_funs; - -/* * All Beam instructions in numerical order. */ @@ -495,7 +485,7 @@ extern int count_instructions; do { \ if (FCALLS > 0) { \ Eterm* dis_next; \ - SET_I(((Export *) Arg(0))->address); \ + SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \ dis_next = (Eterm *) *I; \ FCALLS--; \ CHECK_ARGS(I); \ @@ -504,7 +494,7 @@ extern int count_instructions; && FCALLS > neg_o_reds) { \ goto save_calls1; \ } else { \ - SET_I(((Export *) Arg(0))->address); \ + SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \ CHECK_ARGS(I); \ goto context_switch; \ } \ @@ -965,17 +955,9 @@ static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, static struct StackTrace * get_trace_from_exc(Eterm exc); static Eterm make_arglist(Process* c_p, Eterm* reg, int a); -#if defined(VXWORKS) -static int init_done; -#endif - void init_emulator(void) { -#if defined(VXWORKS) - init_done = 0; -#endif - erts_smp_atomic_init_nob(&warned_for_tuple_funs, (erts_aint_t) 0); process_main(); } @@ -1107,9 +1089,7 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf) */ void process_main(void) { -#if !defined(VXWORKS) static int init_done = 0; -#endif Process* c_p = NULL; int reds_used; #ifdef DEBUG @@ -1507,7 +1487,7 @@ void process_main(void) */ #ifdef USE_VM_CALL_PROBES if (DTRACE_ENABLED(global_function_entry)) { - BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address); + BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]); DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]); } #endif @@ -1522,7 +1502,7 @@ void process_main(void) SET_CP(c_p, I+2); #ifdef USE_VM_CALL_PROBES if (DTRACE_ENABLED(global_function_entry)) { - BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address); + BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]); DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]); } #endif @@ -1535,7 +1515,7 @@ void process_main(void) OpCase(i_call_ext_only_e): #ifdef USE_VM_CALL_PROBES if (DTRACE_ENABLED(global_function_entry)) { - BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address); + BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]); DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]); } #endif @@ -1826,13 +1806,12 @@ void process_main(void) msgp = PEEK_MESSAGE(c_p); if (msgp) erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - else { + else #endif + { SET_I((BeamInstr *) Arg(0)); Goto(*I); /* Jump to a wait or wait_timeout instruction */ -#ifdef ERTS_SMP } -#endif } ErtsMoveMsgAttachmentIntoProc(msgp, c_p, E, HTOP, FCALLS, { @@ -2061,11 +2040,11 @@ void process_main(void) OpCase(wait_f): wait2: { - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); c_p->i = (BeamInstr *) Arg(0); /* L1 */ SWAPOUT; c_p->arity = 0; - c_p->status = P_WAITING; + erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); c_p->current = NULL; goto do_schedule; @@ -3144,10 +3123,6 @@ void process_main(void) c_p->arg_reg[0] = r(0); SWAPOUT; c_p->i = I; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); - if (c_p->status != P_SUSPENDED) - erts_add_to_runq(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); goto do_schedule1; } @@ -4580,64 +4555,6 @@ void process_main(void) * Trace and debugging support. */ - /* - * At this point, I points to the code[3] in the export entry for - * a trace-enabled function. - * - * code[0]: Module - * code[1]: Function - * code[2]: Arity - * code[3]: &&call_traced_function - * code[4]: Address of function. - */ - OpCase(call_traced_function): { - if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) { - unsigned offset = offsetof(Export, code) + 3*sizeof(BeamInstr); - Export* ep = (Export *) (((char *)I)-offset); - Uint32 flags; - - SWAPOUT; - reg[0] = r(0); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg, - 0, &c_p->tracer_proc); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - SWAPIN; - - if (flags & MATCH_SET_RX_TRACE) { - ASSERT(c_p->htop <= E && E <= c_p->hend); - if (E - 3 < HTOP) { - /* SWAPOUT, SWAPIN was done and r(0) was saved above */ - PROCESS_MAIN_CHK_LOCKS(c_p); - FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - r(0) = reg[0]; - SWAPIN; - } - E -= 3; - ASSERT(c_p->htop <= E && E <= c_p->hend); - ASSERT(is_CP((BeamInstr)(ep->code))); - ASSERT(is_internal_pid(c_p->tracer_proc) || - is_internal_port(c_p->tracer_proc)); - E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */ - E[1] = am_true; /* Process tracer */ - E[0] = make_cp(ep->code); - c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) - ? beam_exception_trace : beam_return_trace; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - c_p->trace_flags |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - } - } - SET_I((BeamInstr *)Arg(0)); - Dispatch(); - } - OpCase(return_trace): { BeamInstr* code = (BeamInstr *) (UWord) E[0]; @@ -4652,80 +4569,22 @@ void process_main(void) Goto(*I); } - OpCase(i_count_breakpoint): { + OpCase(i_generic_breakpoint): { BeamInstr real_I; - - ErtsCountBreak(c_p, (BeamInstr *) I, &real_I); + ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); + SWAPOUT; + reg[0] = r(0); + real_I = erts_generic_breakpoint(c_p, I, reg); + r(0) = reg[0]; + SWAPIN; ASSERT(VALID_INSTR(real_I)); Goto(real_I); } - /* need to send mfa instead of bdt pointer - * the pointer might be deallocated. - */ - - OpCase(i_time_breakpoint): { - BeamInstr real_I; - BpData **bds = (BpData **) (I)[-4]; - BpDataTime *bdt = NULL; - Uint ix = 0; -#ifdef ERTS_SMP - ix = c_p->scheduler_data->no - 1; -#else - ix = 0; -#endif - bdt = (BpDataTime *)bds[ix]; - - ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - ASSERT(bdt); - bdt = (BpDataTime *) bdt->next; - ASSERT(bdt); - bds[ix] = (BpData *) bdt; - real_I = bdt->orig_instr; - ASSERT(VALID_INSTR(real_I)); - - if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) { - if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) || - (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) || - (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) { - /* This _IS_ a tail recursive call */ - SWAPOUT; - erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL); - SWAPIN; - } else { - SWAPOUT; - erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL); - - /* r register needs to be copied to the array - * for the garbage collector - */ - ASSERT(c_p->htop <= E && E <= c_p->hend); - if (E - 2 < HTOP) { - reg[0] = r(0); - PROCESS_MAIN_CHK_LOCKS(c_p); - FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - r(0) = reg[0]; - } - SWAPIN; - - ASSERT(c_p->htop <= E && E <= c_p->hend); - - E -= 2; - E[0] = make_cp(I); - E[1] = make_cp(c_p->cp); /* original return address */ - c_p->cp = beam_return_time_trace; - } - } - - Goto(real_I); - } - OpCase(i_return_time_trace): { BeamInstr *pc = (BeamInstr *) (UWord) E[0]; SWAPOUT; - erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN); + erts_trace_time_return(c_p, pc); SWAPIN; c_p->cp = NULL; SET_I((BeamInstr *) cp_val(E[1])); @@ -4733,114 +4592,6 @@ void process_main(void) Goto(*I); } - OpCase(i_trace_breakpoint): - if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) { - BeamInstr real_I; - - ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I); - Goto(real_I); - } - /* Fall through to next case */ - OpCase(i_mtrace_breakpoint): { - BeamInstr real_I; - Uint32 flags; - Eterm tracer_pid; - Uint* cpp; - int return_to_trace = 0, need = 0; - flags = 0; - SWAPOUT; - reg[0] = r(0); - - if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) { - cpp = &E[2]; - } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) { - return_to_trace = !0; - cpp = &E[0]; - } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) { - return_to_trace = !0; - cpp = &E[0]; - } else { - cpp = NULL; - } - if (cpp) { - /* This _IS_ a tail recursive call, if there are - * return_trace and/or i_return_to_trace stackframes - * on the stack, they are not intermixed with y registers - */ - BeamInstr *cp_save = c_p->cp; - for (;;) { - ASSERT(is_CP(*cpp)); - if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) { - cpp += 3; - } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) { - return_to_trace = !0; - cpp += 1; - } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) { - cpp += 2; - } else - break; - } - c_p->cp = (BeamInstr *) cp_val(*cpp); - ASSERT(is_CP(*cpp)); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - SWAPIN; /* Needed by shared heap. */ - c_p->cp = cp_save; - } else { - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - SWAPIN; /* Needed by shared heap. */ - } - - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - - if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) { - need += 1; - } - if (flags & MATCH_SET_RX_TRACE) { - need += 3; - } - if (need) { - ASSERT(c_p->htop <= E && E <= c_p->hend); - if (E - need < HTOP) { - /* SWAPOUT was done and r(0) was saved above */ - PROCESS_MAIN_CHK_LOCKS(c_p); - FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - r(0) = reg[0]; - SWAPIN; - } - } - if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) { - E -= 1; - ASSERT(c_p->htop <= E && E <= c_p->hend); - E[0] = make_cp(c_p->cp); - c_p->cp = (BeamInstr *) beam_return_to_trace; - } - if (flags & MATCH_SET_RX_TRACE) { - E -= 3; - ASSERT(c_p->htop <= E && E <= c_p->hend); - ASSERT(is_CP((Eterm) (UWord) (I - 3))); - ASSERT(am_true == tracer_pid || - is_internal_pid(tracer_pid) || is_internal_port(tracer_pid)); - E[2] = make_cp(c_p->cp); - E[1] = tracer_pid; - E[0] = make_cp(I - 3); /* We ARE at the beginning of an - instruction, - the funcinfo is above i. */ - c_p->cp = - (flags & MATCH_SET_EXCEPTION_TRACE) - ? beam_exception_trace : beam_return_trace; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - c_p->trace_flags |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - } - Goto(real_I); - } - OpCase(i_return_to_trace): { if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) { Uint *cpp = (Uint*) E; @@ -5110,9 +4861,6 @@ void process_main(void) c_p->arity = 1; /* One living register (the 'true' return value) */ SWAPOUT; c_p->i = I + 1; /* Next instruction */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); - erts_add_to_runq(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); c_p->current = NULL; goto do_schedule; } @@ -5193,7 +4941,6 @@ void process_main(void) #endif /* NO_JUMP_TABLE */ em_call_error_handler = OpCode(call_error_handler); - em_call_traced_function = OpCode(call_traced_function); em_apply_bif = OpCode(apply_bif); beam_apply[0] = (BeamInstr) OpCode(i_apply); @@ -5234,7 +4981,7 @@ void process_main(void) save_calls(c_p, (Export *) Arg(0)); - SET_I(((Export *) Arg(0))->address); + SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); dis_next = (Eterm *) *I; FCALLS--; @@ -5911,7 +5658,8 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func) /* * Search for the error_handler module. */ - ep = erts_find_function(erts_proc_get_error_handler(p), func, 3); + ep = erts_find_function(erts_proc_get_error_handler(p), func, 3, + erts_active_code_ix()); if (ep == NULL) { /* No error handler */ p->current = fi; p->freason = EXC_UNDEF; @@ -5941,7 +5689,7 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func) reg[0] = fi[0]; reg[1] = fi[1]; reg[2] = args; - return ep->address; + return ep->addressv[erts_active_code_ix()]; } @@ -5955,7 +5703,7 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity, * there is no error handler module. */ - if ((ep = erts_find_export_entry(erts_proc_get_error_handler(p), + if ((ep = erts_active_export_entry(erts_proc_get_error_handler(p), am_undefined_function, 3)) == NULL) { return NULL; } else { @@ -6062,7 +5810,7 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg) * Note: All BIFs have export entries; thus, no special case is needed. */ - if ((ep = erts_find_export_entry(module, function, arity)) == NULL) { + if ((ep = erts_active_export_entry(module, function, arity)) == NULL) { if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error; } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) { save_calls(p, ep); @@ -6070,11 +5818,11 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg) #ifdef USE_VM_CALL_PROBES if (DTRACE_ENABLED(global_function_entry)) { - BeamInstr *fptr = (BeamInstr *) ep->address; + BeamInstr *fptr = (BeamInstr *) ep->addressv[erts_active_code_ix()]; DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]); } #endif - return ep->address; + return ep->addressv[erts_active_code_ix()]; } static BeamInstr* @@ -6116,7 +5864,7 @@ fixed_apply(Process* p, Eterm* reg, Uint arity) * Note: All BIFs have export entries; thus, no special case is needed. */ - if ((ep = erts_find_export_entry(module, function, arity)) == NULL) { + if ((ep = erts_active_export_entry(module, function, arity)) == NULL) { if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error; } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) { @@ -6125,11 +5873,11 @@ fixed_apply(Process* p, Eterm* reg, Uint arity) #ifdef USE_VM_CALL_PROBES if (DTRACE_ENABLED(global_function_entry)) { - BeamInstr *fptr = (BeamInstr *) ep->address; + BeamInstr *fptr = (BeamInstr *) ep->addressv[erts_active_code_ix()]; DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]); } #endif - return ep->address; + return ep->addressv[erts_active_code_ix()]; } int @@ -6206,9 +5954,7 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re */ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - if (c_p->msg.len > 0) { - erts_add_to_runq(c_p); - } else { + if (!c_p->msg.len) { erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->fvalue = NIL; PROCESS_MAIN_CHK_LOCKS(c_p); @@ -6216,14 +5962,12 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); #ifdef ERTS_SMP ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - if (c_p->msg.len > 0) - erts_add_to_runq(c_p); - else + if (!c_p->msg.len) #endif - c_p->status = P_WAITING; + erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); } erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->current = bif_export[BIF_hibernate_3]->code; @@ -6240,7 +5984,6 @@ call_fun(Process* p, /* Current process. */ Eterm fun = reg[arity]; Eterm hdr; int i; - Eterm function; Eterm* hp; if (!is_boxed(fun)) { @@ -6311,7 +6054,7 @@ call_fun(Process* p, /* Current process. */ Export* ep; Module* modp; Eterm module; - + ErtsCodeIndex code_ix = erts_active_code_ix(); /* * No arity. There is no module loaded that defines the fun, @@ -6319,9 +6062,9 @@ call_fun(Process* p, /* Current process. */ * representation (the module has never been loaded), * or the module defining the fun has been unloaded. */ - module = fe->module; - if ((modp = erts_get_module(module)) != NULL && modp->code != NULL) { + if ((modp = erts_get_module(module, code_ix)) != NULL + && modp->curr.code != NULL) { /* * There is a module loaded, but obviously the fun is not * defined in it. We must not call the error_handler @@ -6336,7 +6079,7 @@ call_fun(Process* p, /* Current process. */ */ ep = erts_find_function(erts_proc_get_error_handler(p), - am_undefined_lambda, 3); + am_undefined_lambda, 3, code_ix); if (ep == NULL) { /* No error handler */ p->current = NULL; p->freason = EXC_UNDEF; @@ -6346,7 +6089,7 @@ call_fun(Process* p, /* Current process. */ reg[1] = fun; reg[2] = args; reg[3] = NIL; - return ep->address; + return ep->addressv[erts_active_code_ix()]; } } } else if (is_export_header(hdr)) { @@ -6358,7 +6101,7 @@ call_fun(Process* p, /* Current process. */ if (arity == actual_arity) { DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]); - return ep->address; + return ep->addressv[erts_active_code_ix()]; } else { /* * Wrong arity. First build a list of the arguments. @@ -6378,63 +6121,6 @@ call_fun(Process* p, /* Current process. */ p->fvalue = TUPLE2(hp, fun, args); return NULL; } - } else if (hdr == make_arityval(2)) { - Eterm* tp; - Export* ep; - Eterm module; - - tp = tuple_val(fun); - module = tp[1]; - function = tp[2]; - if (!is_atom(module) || !is_atom(function)) { - goto badfun; - } - - /* - * If this is the first time a tuple fun is used, - * send a warning to the logger. - */ - if (erts_smp_atomic_xchg_nob(&warned_for_tuple_funs, - (erts_aint_t) 1) == 0) { - erts_dsprintf_buf_t* dsbufp; - - dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "Call to tuple fun {%T,%T}.\n\n" - "Tuple funs are deprecated and will be removed " - "in R16. Use \"fun M:F/A\" instead, for example " - "\"fun %T:%T/%d\".\n\n" - "(This warning will only be shown the first time " - "a tuple fun is called.)\n", - module, function, module, function, arity); - erts_send_warning_to_logger(p->group_leader, dsbufp); - } - - if ((ep = erts_find_export_entry(module, function, arity)) == NULL) { - ep = erts_find_export_entry(erts_proc_get_error_handler(p), - am_undefined_function, 3); - if (ep == NULL) { - p->freason = EXC_UNDEF; - return 0; - } - if (is_non_value(args)) { - Uint sz = 2 * arity; - if (HeapWordsLeft(p) < sz) { - erts_garbage_collect(p, sz, reg, arity); - } - hp = HEAP_TOP(p); - HEAP_TOP(p) += sz; - args = NIL; - while (arity-- > 0) { - args = CONS(hp, reg[arity], args); - hp += 2; - } - } - reg[0] = module; - reg[1] = function; - reg[2] = args; - } - DTRACE_GLOBAL_CALL(p, module, function, arity); - return ep->address; } else { badfun: p->current = NULL; @@ -6537,7 +6223,8 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity) if ((ep = export_get(&e)) == NULL) { return 0; } - return ep->address == ep->code+3 && (ep->code[3] == (BeamInstr) em_apply_bif); + return ep->addressv[erts_active_code_ix()] == ep->code+3 + && (ep->code[3] == (BeamInstr) em_apply_bif); } diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index dd788df6e4..b51f076a5d 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -352,27 +352,6 @@ typedef struct LoaderState { int loc_size; /* Size of location info in bytes (2/4) */ } LoaderState; -/* - * Layout of the line table. - */ - -#define MI_LINE_FNAME_PTR 0 -#define MI_LINE_LOC_TAB 1 -#define MI_LINE_LOC_SIZE 2 -#define MI_LINE_FUNC_TAB 3 - -#define LINE_INVALID_LOCATION (0) - -/* - * Macros for manipulating locations. - */ - -#define IS_VALID_LOCATION(File, Line) \ - ((unsigned) (File) < 255 && (unsigned) (Line) < ((1 << 24) - 1)) -#define MAKE_LOCATION(File, Line) (((File) << 24) | (Line)) -#define LOC_FILE(Loc) ((Loc) >> 24) -#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1)) - #define GetTagAndValue(Stp, Tag, Val) \ do { \ BeamInstr __w; \ @@ -496,7 +475,8 @@ typedef struct LoaderState { } while (0) -static void free_state(LoaderState* stp); +static void free_loader_state(Binary* magic); +static void loader_state_dtor(Binary* magic); static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks, Eterm group_leader, Eterm module, BeamInstr* code, Uint size); @@ -507,6 +487,7 @@ static int verify_chunks(LoaderState* stp); static int load_atom_table(LoaderState* stp); static int load_import_table(LoaderState* stp); static int read_export_table(LoaderState* stp); +static int is_bif(Eterm mod, Eterm func, unsigned arity); static int read_lambda_table(LoaderState* stp); static int read_literal_table(LoaderState* stp); static int read_line_table(LoaderState* stp); @@ -548,22 +529,9 @@ static Eterm native_addresses(Process* p, Eterm mod); int patch_funentries(Eterm Patchlist); int patch(Eterm Addresses, Uint fe); static int safe_mul(UWord a, UWord b, UWord* resp); -static void lookup_loc(FunctionInfo* fi, BeamInstr* pc, - BeamInstr* modp, int idx); - static int must_swap_floats; -/* - * The following variables keep a sorted list of address ranges for - * each module. It allows us to quickly find a function given an - * instruction pointer. - */ -Range* modules = NULL; /* Sorted lists of module addresses. */ -int num_loaded_modules; /* Number of loaded modules. */ -int allocated_modules; /* Number of slots allocated. */ -Range* mid_module = NULL; /* Cached search start point */ - Uint erts_total_code_size; /**********************************************************************/ @@ -579,11 +547,7 @@ void init_load(void) f.fd = 1.0; must_swap_floats = (f.fw[0] == 0); - allocated_modules = 128; - modules = (Range *) erts_alloc(ERTS_ALC_T_MODULE_REFS, - allocated_modules*sizeof(Range)); - mid_module = modules; - num_loaded_modules = 0; + erts_init_ranges(); } static void @@ -595,7 +559,7 @@ define_file(LoaderState* stp, char* name, int idx) } Eterm -erts_load_module(Process *c_p, +erts_preload_module(Process *c_p, ErtsProcLocks c_p_locks, Eterm group_leader, /* Group leader or NIL if none. */ Eterm* modp, /* @@ -605,15 +569,16 @@ erts_load_module(Process *c_p, byte* code, /* Points to the code to load */ Uint size) /* Size of code to load. */ { - LoaderState* stp = erts_alloc_loader_state(); + Binary* magic = erts_alloc_loader_state(); Eterm retval; - retval = erts_prepare_loading(stp, c_p, group_leader, modp, + ASSERT(!erts_initialized); + retval = erts_prepare_loading(magic, c_p, group_leader, modp, code, size); if (retval != NIL) { return retval; } - return erts_finish_loading(stp, c_p, c_p_locks, modp); + return erts_finish_loading(magic, c_p, c_p_locks, modp); } /* #define LOAD_MEMORY_HARD_DEBUG 1*/ @@ -629,11 +594,13 @@ extern void check_allocated_block(Uint type, void *blk); #endif Eterm -erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader, +erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader, Eterm* modp, byte* code, Uint unloaded_size) { Eterm retval = am_badfile; + LoaderState* stp; + stp = ERTS_MAGIC_BIN_DATA(magic); stp->module = *modp; stp->group_leader = group_leader; @@ -666,7 +633,7 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader, /* * Initialize code area. */ - stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0); + stp->code_buffer_size = 2048 + stp->num_functions; stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE, sizeof(BeamInstr) * stp->code_buffer_size); @@ -679,8 +646,6 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader, stp->code[MI_COMPILE_PTR] = 0; stp->code[MI_COMPILE_SIZE] = 0; stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0; - stp->code[MI_NUM_BREAKPOINTS] = 0; - /* * Read the atom table. @@ -774,23 +739,24 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader, load_error: if (retval != NIL) { - free_state(stp); + free_loader_state(magic); } return retval; } Eterm -erts_finish_loading(LoaderState* stp, Process* c_p, +erts_finish_loading(Binary* magic, Process* c_p, ErtsProcLocks c_p_locks, Eterm* modp) { Eterm retval; + LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic); /* * No other process may run since we will update the export * table which is not protected by any locks. */ - ERTS_SMP_LC_ASSERT(erts_initialized == 0 || + ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() || erts_smp_thr_progress_is_blocking()); /* @@ -811,7 +777,6 @@ erts_finish_loading(LoaderState* stp, Process* c_p, * exported and imported functions. This can't fail. */ - erts_export_consolidate(); CHKBLK(ERTS_ALC_T_CODE,stp->code); final_touch(stp); @@ -837,16 +802,20 @@ erts_finish_loading(LoaderState* stp, Process* c_p, } load_error: - free_state(stp); + free_loader_state(magic); return retval; } -LoaderState* +Binary* erts_alloc_loader_state(void) { LoaderState* stp; + Binary* magic; - stp = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LoaderState)); + magic = erts_create_magic_binary(sizeof(LoaderState), + loader_state_dtor); + erts_refc_inc(&magic->refc, 1); + stp = ERTS_MAGIC_BIN_DATA(magic); stp->bin = NULL; stp->function = THE_NON_VALUE; /* Function not known yet */ stp->arity = 0; @@ -875,76 +844,123 @@ erts_alloc_loader_state(void) stp->line_instr = 0; stp->func_line = 0; stp->fname = 0; - return stp; + return magic; +} + +/* + * Return the module name (a tagged atom) for the prepared code + * in the magic binary, or NIL if the binary does not contain + * prepared code. + */ +Eterm +erts_module_for_prepared_code(Binary* magic) +{ + LoaderState* stp; + + if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != loader_state_dtor) { + return NIL; + } + stp = ERTS_MAGIC_BIN_DATA(magic); + if (stp->code != 0) { + return stp->module; + } else { + return NIL; + } } static void -free_state(LoaderState* stp) +free_loader_state(Binary* magic) { + loader_state_dtor(magic); + if (erts_refc_dectest(&magic->refc, 0) == 0) { + erts_bin_free(magic); + } +} + +/* + * This destructor function can safely be called multiple times. + */ +static void +loader_state_dtor(Binary* magic) +{ + LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic); + if (stp->bin != 0) { driver_free_binary(stp->bin); + stp->bin = 0; } if (stp->code != 0) { erts_free(ERTS_ALC_T_CODE, stp->code); + stp->code = 0; } - if (stp->labels != NULL) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->labels); + if (stp->labels != 0) { + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels); + stp->labels = 0; } - if (stp->atom != NULL) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->atom); + if (stp->atom != 0) { + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->atom); + stp->atom = 0; } - if (stp->import != NULL) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->import); + if (stp->import != 0) { + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->import); + stp->import = 0; } - if (stp->export != NULL) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->export); + if (stp->export != 0) { + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->export); + stp->export = 0; } if (stp->lambdas != stp->def_lambdas) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->lambdas); + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->lambdas); + stp->lambdas = stp->def_lambdas; } - if (stp->literals != NULL) { + if (stp->literals != 0) { int i; for (i = 0; i < stp->num_literals; i++) { - if (stp->literals[i].heap != NULL) { - erts_free(ERTS_ALC_T_LOADER_TMP, + if (stp->literals[i].heap != 0) { + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literals[i].heap); + stp->literals[i].heap = 0; } } - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literals); + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literals); + stp->literals = 0; } - while (stp->literal_patches != NULL) { + while (stp->literal_patches != 0) { LiteralPatch* next = stp->literal_patches->next; - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literal_patches); + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literal_patches); stp->literal_patches = next; } - while (stp->string_patches != NULL) { + while (stp->string_patches != 0) { StringPatch* next = stp->string_patches->next; - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->string_patches); + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->string_patches); stp->string_patches = next; } - while (stp->genop_blocks) { - GenOpBlock* next = stp->genop_blocks->next; - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->genop_blocks); - stp->genop_blocks = next; - } if (stp->line_item != 0) { - erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_item); + erts_free(ERTS_ALC_T_PREPARED_CODE, stp->line_item); + stp->line_item = 0; } if (stp->line_instr != 0) { - erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_instr); + erts_free(ERTS_ALC_T_PREPARED_CODE, stp->line_instr); + stp->line_instr = 0; } if (stp->func_line != 0) { - erts_free(ERTS_ALC_T_LOADER_TMP, stp->func_line); + erts_free(ERTS_ALC_T_PREPARED_CODE, stp->func_line); + stp->func_line = 0; } if (stp->fname != 0) { - erts_free(ERTS_ALC_T_LOADER_TMP, stp->fname); + erts_free(ERTS_ALC_T_PREPARED_CODE, stp->fname); + stp->fname = 0; } - erts_free(ERTS_ALC_T_LOADER_TMP, stp); + /* + * The following data items should have been freed earlier. + */ + + ASSERT(stp->genop_blocks == 0); } static Eterm @@ -954,7 +970,6 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks, { Module* modp; Eterm retval; - int i; if ((retval = beam_make_current_old(c_p, c_p_locks, module)) != NIL) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); @@ -971,30 +986,15 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks, erts_total_code_size += size; modp = erts_put_module(module); - modp->code = code; - modp->code_length = size; - modp->catches = BEAM_CATCHES_NIL; /* Will be filled in later. */ + modp->curr.code = code; + modp->curr.code_length = size; + modp->curr.catches = BEAM_CATCHES_NIL; /* Will be filled in later. */ /* - * Update address table (used for finding a function from a PC value). + * Update ranges (used for finding a function from a PC value). */ - if (num_loaded_modules == allocated_modules) { - allocated_modules *= 2; - modules = (Range *) erts_realloc(ERTS_ALC_T_MODULE_REFS, - (void *) modules, - allocated_modules * sizeof(Range)); - } - for (i = num_loaded_modules; i > 0; i--) { - if (code > modules[i-1].start) { - break; - } - modules[i] = modules[i-1]; - } - modules[i].start = code; - modules[i].end = (BeamInstr *) (((byte *)code) + size); - num_loaded_modules++; - mid_module = &modules[num_loaded_modules/2]; + erts_update_ranges(code, size); return NIL; } @@ -1217,9 +1217,8 @@ load_atom_table(LoaderState* stp) GetInt(stp, 4, stp->num_atoms); stp->num_atoms++; - stp->atom = erts_alloc(ERTS_ALC_T_LOADER_TMP, - erts_next_heap_size((stp->num_atoms*sizeof(Eterm)), - 0)); + stp->atom = erts_alloc(ERTS_ALC_T_PREPARED_CODE, + stp->num_atoms*sizeof(Eterm)); /* * Read all atoms. @@ -1263,10 +1262,8 @@ load_import_table(LoaderState* stp) int i; GetInt(stp, 4, stp->num_imports); - stp->import = erts_alloc(ERTS_ALC_T_LOADER_TMP, - erts_next_heap_size((stp->num_imports * - sizeof(ImportEntry)), - 0)); + stp->import = erts_alloc(ERTS_ALC_T_PREPARED_CODE, + stp->num_imports * sizeof(ImportEntry)); for (i = 0; i < stp->num_imports; i++) { int n; Eterm mod; @@ -1296,7 +1293,7 @@ load_import_table(LoaderState* stp) * If the export entry refers to a BIF, get the pointer to * the BIF function. */ - if ((e = erts_find_export_entry(mod, func, arity)) != NULL) { + if ((e = erts_active_export_entry(mod, func, arity)) != NULL) { if (e->code[3] == (BeamInstr) em_apply_bif) { stp->import[i].bf = (BifFunction) e->code[4]; if (func == am_load_nif && mod == am_erlang && arity == 2) { @@ -1315,16 +1312,8 @@ load_import_table(LoaderState* stp) static int read_export_table(LoaderState* stp) { - static struct { - Eterm mod; - Eterm func; - int arity; - } allow_redef[] = { - /* The BIFs that are allowed to be redefined by Erlang code */ - {am_erlang,am_apply,2}, - {am_erlang,am_apply,3}, - }; int i; + BeamInstr* address; GetInt(stp, 4, stp->num_exps); if (stp->num_exps > stp->num_functions) { @@ -1332,7 +1321,7 @@ read_export_table(LoaderState* stp) stp->num_exps, stp->num_functions); } stp->export - = (ExportEntry *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + = (ExportEntry *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, (stp->num_exps * sizeof(ExportEntry))); for (i = 0; i < stp->num_exps; i++) { @@ -1340,7 +1329,6 @@ read_export_table(LoaderState* stp) Uint value; Eterm func; Uint arity; - Export* e; GetInt(stp, 4, n); GetAtom(stp, n, func); @@ -1358,29 +1346,34 @@ read_export_table(LoaderState* stp) if (value == 0) { LoadError2(stp, "export table entry %d: label %d not resolved", i, n); } - stp->export[i].address = stp->code + value; + stp->export[i].address = address = stp->code + value; /* - * Check that we are not redefining a BIF (except the ones allowed to - * redefine). + * Find out if there is a BIF with the same name. */ - if ((e = erts_find_export_entry(stp->module, func, arity)) != NULL) { - if (e->code[3] == (BeamInstr) em_apply_bif) { - int j; - for (j = 0; j < sizeof(allow_redef)/sizeof(allow_redef[0]); j++) { - if (stp->module == allow_redef[j].mod && - func == allow_redef[j].func && - arity == allow_redef[j].arity) { - break; - } - } - if (j == sizeof(allow_redef)/sizeof(allow_redef[0])) { - LoadError2(stp, "exported function %T/%d redefines BIF", - func, arity); - } - } + if (!is_bif(stp->module, func, arity)) { + continue; + } + + /* + * This is a stub for a BIF. + * + * It should not be exported, and the information in its + * func_info instruction should be invalidated so that it + * can be filtered out by module_info(functions) and by + * any other functions that walk through all local functions. + */ + + if (stp->labels[n].patches) { + LoadError3(stp, "there are local calls to the stub for " + "the BIF %T:%T/%d", + stp->module, func, arity); } + stp->export[i].address = NULL; + address[-1] = 0; + address[-2] = NIL; + address[-3] = NIL; } return 1; @@ -1388,15 +1381,39 @@ read_export_table(LoaderState* stp) return 0; } + +static int +is_bif(Eterm mod, Eterm func, unsigned arity) +{ + Export* e = erts_active_export_entry(mod, func, arity); + if (e == NULL) { + return 0; + } + if (e->code[3] != (BeamInstr) em_apply_bif) { + return 0; + } + if (mod == am_erlang && func == am_apply && arity == 3) { + /* + * erlang:apply/3 is a special case -- it is implemented + * as an instruction and it is OK to redefine it. + */ + return 0; + } + return 1; +} + static int read_lambda_table(LoaderState* stp) { int i; GetInt(stp, 4, stp->num_lambdas); - stp->lambdas_allocated = stp->num_lambdas; - stp->lambdas = (Lambda *) erts_alloc(ERTS_ALC_T_LOADER_TMP, - stp->num_lambdas * sizeof(Lambda)); + if (stp->num_lambdas > stp->lambdas_allocated) { + ASSERT(stp->lambdas == stp->def_lambdas); + stp->lambdas_allocated = stp->num_lambdas; + stp->lambdas = (Lambda *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, + stp->num_lambdas * sizeof(Lambda)); + } for (i = 0; i < stp->num_lambdas; i++) { Uint n; Uint32 Index; @@ -1446,7 +1463,7 @@ read_literal_table(LoaderState* stp) stp->file_p = uncompressed; stp->file_left = (unsigned) uncompressed_sz; GetInt(stp, 4, stp->num_literals); - stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_literals * sizeof(Literal)); stp->allocated_literals = stp->num_literals; @@ -1466,7 +1483,7 @@ read_literal_table(LoaderState* stp) if ((heap_size = erts_decode_ext_size(p, sz)) < 0) { LoadError1(stp, "literal %d: bad external format", i); } - hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_LOADER_TMP, + hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE, heap_size*sizeof(Eterm)); stp->literals[i].off_heap.first = 0; stp->literals[i].off_heap.overhead = 0; @@ -1538,7 +1555,7 @@ read_line_table(LoaderState* stp) */ num_line_items++; - lp = (BeamInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + lp = (BeamInstr *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, num_line_items * sizeof(BeamInstr)); stp->line_item = lp; stp->num_line_items = num_line_items; @@ -1594,7 +1611,7 @@ read_line_table(LoaderState* stp) */ if (stp->num_fnames != 0) { - stp->fname = (Eterm *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + stp->fname = (Eterm *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_fnames * sizeof(Eterm)); for (i = 0; i < stp->num_fnames; i++) { @@ -1610,11 +1627,11 @@ read_line_table(LoaderState* stp) /* * Allocate the arrays to be filled while code is being loaded. */ - stp->line_instr = (LineInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + stp->line_instr = (LineInstr *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_line_instrs * sizeof(LineInstr)); stp->current_li = 0; - stp->func_line = (int *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + stp->func_line = (int *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_functions * sizeof(int)); @@ -1677,7 +1694,7 @@ read_code_header(LoaderState* stp) * Initialize label table. */ - stp->labels = (Label *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + stp->labels = (Label *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_labels * sizeof(Label)); for (i = 0; i < stp->num_labels; i++) { stp->labels[i].value = 0; @@ -1703,9 +1720,9 @@ read_code_header(LoaderState* stp) #define CodeNeed(w) do { \ ASSERT(ci <= code_buffer_size); \ if (code_buffer_size < ci+(w)) { \ - code_buffer_size = erts_next_heap_size(ci+(w), 0); \ - stp->code = code \ - = (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, \ + code_buffer_size = 2*ci+(w); \ + stp->code = code = \ + (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, \ (void *) code, \ code_buffer_size * sizeof(BeamInstr)); \ } \ @@ -1731,6 +1748,7 @@ load_code(LoaderState* stp) GenOp* last_op = NULL; GenOp** last_op_next = NULL; int arity; + int retval = 1; /* * The size of the loaded func_info instruction is needed @@ -2457,7 +2475,11 @@ load_code(LoaderState* stp) case op_int_code_end: stp->code_buffer_size = code_buffer_size; stp->ci = ci; - return 1; + stp->function = THE_NON_VALUE; + stp->genop = NULL; + stp->specific_op = -1; + retval = 1; + goto cleanup; } /* @@ -2471,9 +2493,20 @@ load_code(LoaderState* stp) } } - load_error: - return 0; + retval = 0; + + cleanup: + /* + * Clean up everything that is not needed any longer. + */ + + while (stp->genop_blocks) { + GenOpBlock* next = stp->genop_blocks->next; + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->genop_blocks); + stp->genop_blocks = next; + } + return retval; } @@ -4265,24 +4298,31 @@ final_touch(LoaderState* stp) index = next; } modp = erts_put_module(stp->module); - modp->catches = catches; + modp->curr.catches = catches; /* * Export functions. */ for (i = 0; i < stp->num_exps; i++) { - Export* ep = erts_export_put(stp->module, stp->export[i].function, - stp->export[i].arity); + Export* ep; + BeamInstr* address = stp->export[i].address; + + if (address == NULL) { + /* Skip stub for a BIF */ + continue; + } + ep = erts_export_put(stp->module, stp->export[i].function, + stp->export[i].arity); if (!on_load) { - ep->address = stp->export[i].address; + ep->addressv[erts_staging_code_ix()] = address; } else { /* * Don't make any of the exported functions * callable yet. */ - ep->address = ep->code+3; - ep->code[4] = (BeamInstr) stp->export[i].address; + ep->addressv[erts_staging_code_ix()] = ep->code+3; + ep->code[4] = (BeamInstr) address; } } @@ -4926,7 +4966,7 @@ new_label(LoaderState* stp) int num = stp->num_labels; stp->num_labels++; - stp->labels = (Label *) erts_realloc(ERTS_ALC_T_LOADER_TMP, + stp->labels = (Label *) erts_realloc(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels, stp->num_labels * sizeof(Label)); stp->labels[num].value = 0; @@ -4937,7 +4977,8 @@ new_label(LoaderState* stp) static void new_literal_patch(LoaderState* stp, int pos) { - LiteralPatch* p = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LiteralPatch)); + LiteralPatch* p = erts_alloc(ERTS_ALC_T_PREPARED_CODE, + sizeof(LiteralPatch)); p->pos = pos; p->next = stp->literal_patches; stp->literal_patches = p; @@ -4946,7 +4987,7 @@ new_literal_patch(LoaderState* stp, int pos) static void new_string_patch(LoaderState* stp, int pos) { - StringPatch* p = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(StringPatch)); + StringPatch* p = erts_alloc(ERTS_ALC_T_PREPARED_CODE, sizeof(StringPatch)); p->pos = pos; p->next = stp->string_patches; stp->string_patches = p; @@ -4964,14 +5005,14 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size) ASSERT(stp->num_literals == 0); stp->allocated_literals = 8; need = stp->allocated_literals * sizeof(Literal); - stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, need); } else if (stp->allocated_literals <= stp->num_literals) { Uint need; stp->allocated_literals *= 2; need = stp->allocated_literals * sizeof(Literal); - stp->literals = (Literal *) erts_realloc(ERTS_ALC_T_LOADER_TMP, + stp->literals = (Literal *) erts_realloc(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literals, need); } @@ -4980,7 +5021,7 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size) lit = stp->literals + stp->num_literals; lit->offset = 0; lit->heap_size = heap_size; - lit->heap = erts_alloc(ERTS_ALC_T_LOADER_TMP, heap_size*sizeof(Eterm)); + lit->heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE, heap_size*sizeof(Eterm)); lit->term = make_boxed(lit->heap); lit->off_heap.first = 0; lit->off_heap.overhead = 0; @@ -4999,7 +5040,7 @@ erts_module_info_0(Process* p, Eterm module) return THE_NON_VALUE; } - if (erts_get_module(module) == NULL) { + if (erts_get_module(module, erts_active_code_ix()) == NULL) { return THE_NON_VALUE; } @@ -5054,32 +5095,43 @@ functions_in_module(Process* p, /* Process whose heap to use. */ BeamInstr* code; int i; Uint num_functions; + Uint need; Eterm* hp; + Eterm* hp_end; Eterm result = NIL; if (is_not_atom(mod)) { return THE_NON_VALUE; } - modp = erts_get_module(mod); + modp = erts_get_module(mod, erts_active_code_ix()); if (modp == NULL) { return THE_NON_VALUE; } - code = modp->code; + code = modp->curr.code; num_functions = code[MI_NUM_FUNCTIONS]; - hp = HAlloc(p, 5*num_functions); + need = 5*num_functions; + hp = HAlloc(p, need); + hp_end = hp + need; for (i = num_functions-1; i >= 0 ; i--) { BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i]; Eterm name = (Eterm) func_info[3]; int arity = (int) func_info[4]; Eterm tuple; - ASSERT(is_atom(name)); - tuple = TUPLE2(hp, name, make_small(arity)); - hp += 3; - result = CONS(hp, tuple, result); - hp += 2; + /* + * If the function name is [], this entry is a stub for + * a BIF that should be ignored. + */ + ASSERT(is_atom(name) || is_nil(name)); + if (is_atom(name)) { + tuple = TUPLE2(hp, name, make_small(arity)); + hp += 3; + result = CONS(hp, tuple, result); + hp += 2; + } } + HRelease(p, hp_end, hp); return result; } @@ -5106,12 +5158,12 @@ native_addresses(Process* p, Eterm mod) return THE_NON_VALUE; } - modp = erts_get_module(mod); + modp = erts_get_module(mod, erts_active_code_ix()); if (modp == NULL) { return THE_NON_VALUE; } - code = modp->code; + code = modp->curr.code; num_functions = code[MI_NUM_FUNCTIONS]; need = (6+BIG_UINT_HEAP_SIZE)*num_functions; hp = HAlloc(p, need); @@ -5122,9 +5174,11 @@ native_addresses(Process* p, Eterm mod) int arity = (int) func_info[4]; Eterm tuple; - ASSERT(is_atom(name)); + ASSERT(is_atom(name) || is_nil(name)); /* [] if BIF stub */ if (func_info[1] != 0) { - Eterm addr = erts_bld_uint(&hp, NULL, func_info[1]); + Eterm addr; + ASSERT(is_atom(name)); + addr = erts_bld_uint(&hp, NULL, func_info[1]); tuple = erts_bld_tuple(&hp, NULL, 3, name, make_small(arity), addr); result = erts_bld_cons(&hp, NULL, tuple, result); } @@ -5149,18 +5203,20 @@ exported_from_module(Process* p, /* Process whose heap to use. */ Eterm* hp = NULL; Eterm* hend = NULL; Eterm result = NIL; + ErtsCodeIndex code_ix; if (is_not_atom(mod)) { return THE_NON_VALUE; } - for (i = 0; i < export_list_size(); i++) { - Export* ep = export_list(i); + code_ix = erts_active_code_ix(); + for (i = 0; i < export_list_size(code_ix); i++) { + Export* ep = export_list(i,code_ix); if (ep->code[0] == mod) { Eterm tuple; - if (ep->address == ep->code+3 && + if (ep->addressv[code_ix] == ep->code+3 && ep->code[3] == (BeamInstr) em_call_error_handler) { /* There is a call to the function, but it does not exist. */ continue; @@ -5204,11 +5260,11 @@ attributes_for_module(Process* p, /* Process whose heap to use. */ return THE_NON_VALUE; } - modp = erts_get_module(mod); + modp = erts_get_module(mod, erts_active_code_ix()); if (modp == NULL) { return THE_NON_VALUE; } - code = modp->code; + code = modp->curr.code; ext = (byte *) code[MI_ATTR_PTR]; if (ext != NULL) { hp = HAlloc(p, code[MI_ATTR_SIZE_ON_HEAP]); @@ -5244,11 +5300,11 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */ return THE_NON_VALUE; } - modp = erts_get_module(mod); + modp = erts_get_module(mod, erts_active_code_ix()); if (modp == NULL) { return THE_NON_VALUE; } - code = modp->code; + code = modp->curr.code; ext = (byte *) code[MI_COMPILE_PTR]; if (ext != NULL) { hp = HAlloc(p, code[MI_COMPILE_SIZE_ON_HEAP]); @@ -5263,113 +5319,6 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */ } /* - * Find a function from the given pc and fill information in - * the FunctionInfo struct. If the full_info is non-zero, fill - * in all available information (including location in the - * source code). If no function is found, the 'current' field - * will be set to NULL. - */ - -void -erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info) -{ - Range* low = modules; - Range* high = low + num_loaded_modules; - Range* mid = mid_module; - - fi->current = NULL; - fi->needed = 5; - fi->loc = LINE_INVALID_LOCATION; - while (low < high) { - if (pc < mid->start) { - high = mid; - } else if (pc > mid->end) { - low = mid + 1; - } else { - BeamInstr** low1 = (BeamInstr **) (mid->start + MI_FUNCTIONS); - BeamInstr** high1 = low1 + mid->start[MI_NUM_FUNCTIONS]; - BeamInstr** mid1; - - while (low1 < high1) { - mid1 = low1 + (high1-low1) / 2; - if (pc < mid1[0]) { - high1 = mid1; - } else if (pc < mid1[1]) { - mid_module = mid; - fi->current = mid1[0]+2; - if (full_info) { - BeamInstr** fp = (BeamInstr **) (mid->start + - MI_FUNCTIONS); - int idx = mid1 - fp; - lookup_loc(fi, pc, mid->start, idx); - } - return; - } else { - low1 = mid1 + 1; - } - } - return; - } - mid = low + (high-low) / 2; - } -} - -static void -lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx) -{ - Eterm* line = (Eterm *) modp[MI_LINE_TABLE]; - Eterm* low; - Eterm* high; - Eterm* mid; - Eterm pc; - - if (line == 0) { - return; - } - - pc = (Eterm) (BeamInstr) orig_pc; - fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR]; - low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx]; - high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1]; - while (high > low) { - mid = low + (high-low) / 2; - if (pc < mid[0]) { - high = mid; - } else if (pc < mid[1]) { - int file; - int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB]; - - if (line[MI_LINE_LOC_SIZE] == 2) { - Uint16* loc_table = - (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB]; - fi->loc = loc_table[index]; - } else { - Uint32* loc_table = - (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB]; - ASSERT(line[MI_LINE_LOC_SIZE] == 4); - fi->loc = loc_table[index]; - } - if (fi->loc == LINE_INVALID_LOCATION) { - return; - } - fi->needed += 3+2+3+2; - file = LOC_FILE(fi->loc); - if (file == 0) { - /* Special case: Module name with ".erl" appended */ - Atom* mod_atom = atom_tab(atom_val(fi->current[0])); - fi->needed += 2*(mod_atom->len+4); - } else { - Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1])); - fi->needed += 2*ap->len; - } - return; - } else { - low = mid + 1; - } - } -} - -/* * Build a single {M,F,A,Loction} item to be part of * a stack trace. */ @@ -5449,6 +5398,7 @@ code_get_chunk_2(BIF_ALIST_2) Process* p = BIF_P; Eterm Bin = BIF_ARG_1; Eterm Chunk = BIF_ARG_2; + Binary* magic = 0; LoaderState* stp; Uint chunk = 0; ErlSubBin* sb; @@ -5461,12 +5411,13 @@ code_get_chunk_2(BIF_ALIST_2) Eterm real_bin; byte* temp_alloc = NULL; - stp = erts_alloc_loader_state(); + magic = erts_alloc_loader_state(); + stp = ERTS_MAGIC_BIN_DATA(magic); if ((start = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) { error: erts_free_aligned_binary_bytes(temp_alloc); - if (stp) { - free_state(stp); + if (magic) { + free_loader_state(magic); } BIF_ERROR(p, BADARG); } @@ -5511,7 +5462,7 @@ code_get_chunk_2(BIF_ALIST_2) done: erts_free_aligned_binary_bytes(temp_alloc); - free_state(stp); + free_loader_state(magic); return res; } @@ -5524,14 +5475,16 @@ code_module_md5_1(BIF_ALIST_1) { Process* p = BIF_P; Eterm Bin = BIF_ARG_1; + Binary* magic; LoaderState* stp; byte* bytes; byte* temp_alloc = NULL; Eterm res; - stp = erts_alloc_loader_state(); + magic = erts_alloc_loader_state(); + stp = ERTS_MAGIC_BIN_DATA(magic); if ((bytes = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) { - free_state(stp); + free_loader_state(magic); BIF_ERROR(p, BADARG); } stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */ @@ -5545,7 +5498,7 @@ code_module_md5_1(BIF_ALIST_1) done: erts_free_aligned_binary_bytes(temp_alloc); - free_state(stp); + free_loader_state(magic); return res; } @@ -5574,7 +5527,8 @@ stub_copy_info(LoaderState* stp, int chunk, /* Chunk: ATTR_CHUNK or COMPILE_CHUNK */ byte* info, /* Where to store info. */ BeamInstr* ptr_word, /* Where to store pointer into info. */ - BeamInstr* size_word) /* Where to store size of info. */ + BeamInstr* size_word, /* Where to store size into info. */ + BeamInstr* size_on_heap_word) /* Where to store size on heap. */ { Sint decoded_size; Uint size = stp->chunks[chunk].size; @@ -5585,7 +5539,8 @@ stub_copy_info(LoaderState* stp, if (decoded_size < 0) { return 0; } - *size_word = decoded_size; + *size_word = (BeamInstr) size; + *size_on_heap_word = decoded_size; } return info + size; } @@ -5601,7 +5556,7 @@ stub_read_export_table(LoaderState* stp) stp->num_exps, stp->num_functions); } stp->export - = (ExportEntry *) erts_alloc(ERTS_ALC_T_LOADER_TMP, + = (ExportEntry *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_exps * sizeof(ExportEntry)); for (i = 0; i < stp->num_exps; i++) { @@ -5627,20 +5582,29 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp) { int i; int n = stp->num_exps; + Eterm mod = fp[2]; Eterm function = fp[3]; int arity = fp[4]; #ifdef HIPE Lambda* lp; #endif + if (is_bif(mod, function, arity)) { + fp[1] = 0; + fp[2] = 0; + fp[3] = 0; + fp[4] = 0; + return; + } + /* * Test if the function should be exported. */ for (i = 0; i < n; i++) { if (stp->export[i].function == function && stp->export[i].arity == arity) { - Export* ep = erts_export_put(fp[2], function, arity); - ep->address = fp+5; + Export* ep = erts_export_put(mod, function, arity); + ep->addressv[erts_staging_code_ix()] = fp+5; return; } } @@ -5819,6 +5783,7 @@ patch_funentries(Eterm Patchlist) Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info) { + Binary* magic; LoaderState* stp; BeamInstr Funcs; BeamInstr Patchlist; @@ -5840,7 +5805,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info) * Must initialize stp->lambdas here because the error handling code * at label 'error' uses it. */ - stp = erts_alloc_loader_state(); + magic = erts_alloc_loader_state(); + stp = ERTS_MAGIC_BIN_DATA(magic); if (is_not_atom(Mod)) { goto error; @@ -5920,7 +5886,6 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info) code[MI_COMPILE_PTR] = 0; code[MI_COMPILE_SIZE] = 0; code[MI_COMPILE_SIZE_ON_HEAP] = 0; - code[MI_NUM_BREAKPOINTS] = 0; code[MI_LITERALS_START] = 0; code[MI_LITERALS_END] = 0; code[MI_LITERALS_OFF_HEAP] = 0; @@ -5997,12 +5962,16 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info) info = (byte *) fp; info = stub_copy_info(stp, ATTR_CHUNK, info, - code+MI_ATTR_PTR, code+MI_ATTR_SIZE_ON_HEAP); + code+MI_ATTR_PTR, + code+MI_ATTR_SIZE, + code+MI_ATTR_SIZE_ON_HEAP); if (info == NULL) { goto error; } info = stub_copy_info(stp, COMPILE_CHUNK, info, - code+MI_COMPILE_PTR, code+MI_COMPILE_SIZE_ON_HEAP); + code+MI_COMPILE_PTR, + code+MI_COMPILE_SIZE, + code+MI_COMPILE_SIZE_ON_HEAP); if (info == NULL) { goto error; } @@ -6028,13 +5997,13 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info) if (patch_funentries(Patchlist)) { erts_free_aligned_binary_bytes(temp_alloc); - free_state(stp); + free_loader_state(magic); return Mod; } error: erts_free_aligned_binary_bytes(temp_alloc); - free_state(stp); + free_loader_state(magic); BIF_ERROR(p, BADARG); } @@ -6051,3 +6020,4 @@ static int safe_mul(UWord a, UWord b, UWord* resp) return (res / b) == a; } } + diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h index 997ba197db..1048f258a5 100644 --- a/erts/emulator/beam/beam_load.h +++ b/erts/emulator/beam/beam_load.h @@ -49,11 +49,6 @@ extern void** beam_ops; extern BeamInstr beam_debug_apply[]; extern BeamInstr* em_call_error_handler; extern BeamInstr* em_apply_bif; -extern BeamInstr* em_call_traced_function; -typedef struct { - BeamInstr* start; /* Pointer to start of module. */ - BeamInstr* end; /* Points one word beyond last function in module. */ -} Range; /* * The following variables keep a sorted list of address ranges for @@ -61,11 +56,6 @@ typedef struct { * instruction pointer. */ -extern Range* modules; -extern int num_loaded_modules; -extern int allocated_modules; -extern Range* mid_module; - /* Total code size in bytes */ extern Uint erts_total_code_size; /* @@ -94,27 +84,22 @@ extern Uint erts_total_code_size; #define MI_COMPILE_SIZE_ON_HEAP 6 /* - * Number of breakpoints in module is stored in this word - */ -#define MI_NUM_BREAKPOINTS 7 - -/* * Literal area (constant pool). */ -#define MI_LITERALS_START 8 -#define MI_LITERALS_END 9 -#define MI_LITERALS_OFF_HEAP 10 +#define MI_LITERALS_START 7 +#define MI_LITERALS_END 8 +#define MI_LITERALS_OFF_HEAP 9 /* * Pointer to the on_load function (or NULL if none). */ -#define MI_ON_LOAD_FUNCTION_PTR 11 +#define MI_ON_LOAD_FUNCTION_PTR 10 /* * Pointer to the line table (or NULL if none). */ -#define MI_LINE_TABLE 12 +#define MI_LINE_TABLE 11 /* * Start of function pointer table. This table contains pointers to @@ -125,5 +110,27 @@ extern Uint erts_total_code_size; * this table. */ -#define MI_FUNCTIONS 13 +#define MI_FUNCTIONS 12 + +/* + * Layout of the line table. + */ + +#define MI_LINE_FNAME_PTR 0 +#define MI_LINE_LOC_TAB 1 +#define MI_LINE_LOC_SIZE 2 +#define MI_LINE_FUNC_TAB 3 + +#define LINE_INVALID_LOCATION (0) + +/* + * Macros for manipulating locations. + */ + +#define IS_VALID_LOCATION(File, Line) \ + ((unsigned) (File) < 255 && (unsigned) (Line) < ((1 << 24) - 1)) +#define MAKE_LOCATION(File, Line) (((File) << 24) | (Line)) +#define LOC_FILE(Loc) ((Loc) >> 24) +#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1)) + #endif /* _BEAM_LOAD_H */ diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c new file mode 100644 index 0000000000..0f2d5d0c2a --- /dev/null +++ b/erts/emulator/beam/beam_ranges.c @@ -0,0 +1,349 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "sys.h" +#include "erl_vm.h" +#include "global.h" +#include "beam_load.h" + +typedef struct { + BeamInstr* start; /* Pointer to start of module. */ + erts_smp_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */ +} Range; + +/* Range 'end' needs to be atomic as we purge module + by setting end=start in active code_ix */ +#define RANGE_END(R) ((BeamInstr*)erts_smp_atomic_read_nob(&(R)->end)) + +static Range* find_range(BeamInstr* pc); +static void lookup_loc(FunctionInfo* fi, BeamInstr* pc, + BeamInstr* modp, int idx); + +/* + * The following variables keep a sorted list of address ranges for + * each module. It allows us to quickly find a function given an + * instruction pointer. + */ +struct ranges { + Range* modules; /* Sorted lists of module addresses. */ + Sint n; /* Number of range entries. */ + Sint allocated; /* Number of allocated entries. */ + erts_smp_atomic_t mid; /* Cached search start point */ +}; +static struct ranges r[ERTS_NUM_CODE_IX]; +static erts_smp_atomic_t mem_used; + +#ifdef HARD_DEBUG +static void check_consistency(struct ranges* p) +{ + int i; + + ASSERT(p->n <= p->allocated); + ASSERT((Uint)(p->mid - p->modules) < p->n || + (p->mid == p->modules && p->n == 0)); + for (i = 0; i < p->n; i++) { + ASSERT(p->modules[i].start <= RANGE_END(&p->modules[i])); + ASSERT(!i || RANGE_END(&p->modules[i-1]) < p->modules[i].start); + } +} +# define CHECK(r) check_consistency(r) +#else +# define CHECK(r) +#endif /* HARD_DEBUG */ + + +void +erts_init_ranges(void) +{ + Sint i; + + erts_smp_atomic_init_nob(&mem_used, 0); + for (i = 0; i < ERTS_NUM_CODE_IX; i++) { + r[i].modules = 0; + r[i].n = 0; + r[i].allocated = 0; + erts_smp_atomic_init_nob(&r[i].mid, 0); + } +} + +void +erts_start_staging_ranges(void) +{ + ErtsCodeIndex dst = erts_staging_code_ix(); + + if (r[dst].modules) { + erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated); + erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules); + r[dst].modules = NULL; + } +} + +void +erts_end_staging_ranges(int commit) +{ + ErtsCodeIndex dst = erts_staging_code_ix(); + + if (commit && r[dst].modules == NULL) { + Sint i; + Sint n; + + /* No modules added, just clone src and remove purged code. */ + ErtsCodeIndex src = erts_active_code_ix(); + + erts_smp_atomic_add_nob(&mem_used, r[src].n); + r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS, + r[src].n * sizeof(Range)); + r[dst].allocated = r[src].n; + n = 0; + for (i = 0; i < r[src].n; i++) { + Range* rp = r[src].modules+i; + if (rp->start < RANGE_END(rp)) { + /* Only insert a module that has not been purged. */ + r[dst].modules[n] = *rp; + n++; + } + } + r[dst].n = n; + erts_smp_atomic_set_nob(&r[dst].mid, + (erts_aint_t) (r[dst].modules + n / 2)); + } +} + +void +erts_update_ranges(BeamInstr* code, Uint size) +{ + ErtsCodeIndex dst = erts_staging_code_ix(); + ErtsCodeIndex src = erts_active_code_ix(); + Sint i; + Sint n; + Sint need; + + if (src == dst) { + ASSERT(!erts_initialized); + + /* + * During start-up of system, the indices are the same. + * Handle this by faking a source area. + */ + src = (src+1) % ERTS_NUM_CODE_IX; + if (r[src].modules) { + erts_smp_atomic_add_nob(&mem_used, -r[src].allocated); + erts_free(ERTS_ALC_T_MODULE_REFS, r[src].modules); + } + r[src] = r[dst]; + r[dst].modules = 0; + } + + CHECK(&r[src]); + + ASSERT(r[dst].modules == NULL); + need = r[dst].allocated = r[src].n + 1; + erts_smp_atomic_add_nob(&mem_used, need); + r[dst].modules = (Range *) erts_alloc(ERTS_ALC_T_MODULE_REFS, + need * sizeof(Range)); + n = 0; + for (i = 0; i < r[src].n; i++) { + Range* rp = r[src].modules+i; + if (code < rp->start) { + r[dst].modules[n].start = code; + erts_smp_atomic_init_nob(&r[dst].modules[n].end, + (erts_aint_t)(((byte *)code) + size)); + ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code); + n++; + break; + } + if (rp->start < RANGE_END(rp)) { + /* Only insert a module that has not been purged. */ + r[dst].modules[n].start = rp->start; + erts_smp_atomic_init_nob(&r[dst].modules[n].end, + (erts_aint_t)(RANGE_END(rp))); + ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start); + n++; + } + } + + while (i < r[src].n) { + Range* rp = r[src].modules+i; + if (rp->start < RANGE_END(rp)) { + /* Only insert a module that has not been purged. */ + r[dst].modules[n].start = rp->start; + erts_smp_atomic_init_nob(&r[dst].modules[n].end, + (erts_aint_t)(RANGE_END(rp))); + ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start); + n++; + } + i++; + } + + if (n == 0 || code > r[dst].modules[n-1].start) { + r[dst].modules[n].start = code; + erts_smp_atomic_init_nob(&r[dst].modules[n].end, + (erts_aint_t)(((byte *)code) + size)); + ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code); + n++; + } + + ASSERT(n <= r[src].n+1); + r[dst].n = n; + erts_smp_atomic_set_nob(&r[dst].mid, + (erts_aint_t) (r[dst].modules + n / 2)); + + CHECK(&r[dst]); + CHECK(&r[src]); +} + +void +erts_remove_from_ranges(BeamInstr* code) +{ + Range* rp = find_range(code); + erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); +} + +UWord +erts_ranges_sz(void) +{ + return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range); +} + +/* + * Find a function from the given pc and fill information in + * the FunctionInfo struct. If the full_info is non-zero, fill + * in all available information (including location in the + * source code). If no function is found, the 'current' field + * will be set to NULL. + */ + +void +erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info) +{ + BeamInstr** low; + BeamInstr** high; + BeamInstr** mid; + Range* rp; + + fi->current = NULL; + fi->needed = 5; + fi->loc = LINE_INVALID_LOCATION; + rp = find_range(pc); + if (rp == 0) { + return; + } + + low = (BeamInstr **) (rp->start + MI_FUNCTIONS); + high = low + rp->start[MI_NUM_FUNCTIONS]; + while (low < high) { + mid = low + (high-low) / 2; + if (pc < mid[0]) { + high = mid; + } else if (pc < mid[1]) { + fi->current = mid[0]+2; + if (full_info) { + BeamInstr** fp = (BeamInstr **) (rp->start + + MI_FUNCTIONS); + int idx = mid - fp; + lookup_loc(fi, pc, rp->start, idx); + } + return; + } else { + low = mid + 1; + } + } +} + +static Range* +find_range(BeamInstr* pc) +{ + ErtsCodeIndex active = erts_active_code_ix(); + Range* low = r[active].modules; + Range* high = low + r[active].n; + Range* mid = (Range *) erts_smp_atomic_read_nob(&r[active].mid); + + CHECK(&r[active]); + while (low < high) { + if (pc < mid->start) { + high = mid; + } else if (pc > RANGE_END(mid)) { + low = mid + 1; + } else { + erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid); + return mid; + } + mid = low + (high-low) / 2; + } + return 0; +} + +static void +lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx) +{ + Eterm* line = (Eterm *) modp[MI_LINE_TABLE]; + Eterm* low; + Eterm* high; + Eterm* mid; + Eterm pc; + + if (line == 0) { + return; + } + + pc = (Eterm) (BeamInstr) orig_pc; + fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR]; + low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx]; + high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1]; + while (high > low) { + mid = low + (high-low) / 2; + if (pc < mid[0]) { + high = mid; + } else if (pc < mid[1]) { + int file; + int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB]; + + if (line[MI_LINE_LOC_SIZE] == 2) { + Uint16* loc_table = + (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB]; + fi->loc = loc_table[index]; + } else { + Uint32* loc_table = + (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB]; + ASSERT(line[MI_LINE_LOC_SIZE] == 4); + fi->loc = loc_table[index]; + } + if (fi->loc == LINE_INVALID_LOCATION) { + return; + } + fi->needed += 3+2+3+2; + file = LOC_FILE(fi->loc); + if (file == 0) { + /* Special case: Module name with ".erl" appended */ + Atom* mod_atom = atom_tab(atom_val(fi->current[0])); + fi->needed += 2*(mod_atom->len+4); + } else { + Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1])); + fi->needed += 2*ap->len; + } + return; + } else { + low = mid + 1; + } + } +} diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index fc00b42454..1cdce49eef 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -233,15 +233,17 @@ BIF_RETTYPE link_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); - res_no_proc: - if (BIF_P->flags & F_TRAPEXIT) { - ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN; - erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL); - erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks); - BIF_RET(am_true); +res_no_proc: { + erts_aint32_t state = erts_smp_atomic32_read_nob(&BIF_P->state); + if (state & ERTS_PSFLG_TRAP_EXIT) { + ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN; + erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL); + erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks); + BIF_RET(am_true); + } + else + BIF_ERROR(BIF_P, EXC_NOPROC); } - else - BIF_ERROR(BIF_P, EXC_NOPROC); } #define ERTS_DEMONITOR_FALSE 2 @@ -1103,8 +1105,9 @@ BIF_RETTYPE hibernate_3(BIF_ALIST_3) if (erts_hibernate(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, reg)) { /* - * If hibernate succeeded, TRAP. The process will be suspended - * if status is P_WAITING or continue (if any message was in the queue). + * If hibernate succeeded, TRAP. The process will be wait in a + * hibernated state if its state is inactive (!ERTS_PSFLG_ACTIVE); + * otherwise, continue executing (if any message was in the queue). */ BIF_TRAP_CODE_PTR_(BIF_P, BIF_P->i); } @@ -1403,9 +1406,8 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) } else { rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; - rp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN, - BIF_ARG_1, rp_locks, - ERTS_P2P_FLG_SMP_INC_REFC); + rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, + BIF_ARG_1, rp_locks); if (!rp) { BIF_RET(am_true); } @@ -1427,8 +1429,6 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); - if (rp != BIF_P) - erts_smp_proc_dec_refc(rp); #endif /* * We may have exited ourselves and may have to take action. @@ -1502,14 +1502,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) BIF_RET(old_value); } else if (BIF_ARG_1 == am_priority) { - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS); old_value = erts_set_process_priority(BIF_P, BIF_ARG_2); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); if (old_value == THE_NON_VALUE) goto error; BIF_RET(old_value); } else if (BIF_ARG_1 == am_trap_exit) { + erts_aint32_t state; Uint trap_exit; if (BIF_ARG_2 == am_true) { trap_exit = 1; @@ -1524,59 +1523,52 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) * For more info, see implementation of erts_send_exit_signal(). */ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS); + ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS) + & erts_proc_lc_my_proc_locks(BIF_P)); ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - old_value = ERTS_PROC_IS_TRAPPING_EXITS(BIF_P) ? am_true : am_false; - if (trap_exit) { - ERTS_PROC_SET_TRAP_EXIT(BIF_P); - } else { - ERTS_PROC_UNSET_TRAP_EXIT(BIF_P); - } + if (trap_exit) + state = erts_smp_atomic32_read_bor_nob(&BIF_P->state, + ERTS_PSFLG_TRAP_EXIT); + else + state = erts_smp_atomic32_read_band_nob(&BIF_P->state, + ~ERTS_PSFLG_TRAP_EXIT); + old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false; erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); BIF_RET(old_value); } else if (BIF_ARG_1 == am_scheduler) { - int yield; - ErtsRunQueue *old; - ErtsRunQueue *new; + ErtsRunQueue *old, *new, *curr; Sint sched; + erts_aint32_t state; + if (!is_small(BIF_ARG_2)) goto error; sched = signed_val(BIF_ARG_2); if (sched < 0 || erts_no_schedulers < sched) goto error; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS); - old = BIF_P->bound_runq; -#ifdef ERTS_SMP - ASSERT(!old || old == BIF_P->run_queue); -#endif - new = !sched ? NULL : erts_schedid2runq(sched); -#ifndef ERTS_SMP - yield = 0; -#else - if (new == old) - yield = 0; + + if (sched == 0) { + new = NULL; + state = erts_smp_atomic32_read_band_mb(&BIF_P->state, + ~ERTS_PSFLG_BOUND); + } else { - ErtsRunQueue *curr = BIF_P->run_queue; - if (!new) - erts_smp_runq_lock(curr); - else - erts_smp_runqs_lock(curr, new); - yield = new && BIF_P->run_queue != new; -#endif - BIF_P->bound_runq = new; + new = erts_schedid2runq(sched); #ifdef ERTS_SMP - if (new) - BIF_P->run_queue = new; - if (!new) - erts_smp_runq_unlock(curr); - else - erts_smp_runqs_unlock(curr, new); - } + erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new); #endif - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); + state = erts_smp_atomic32_read_bor_mb(&BIF_P->state, + ERTS_PSFLG_BOUND); + } + + curr = ERTS_GET_SCHEDULER_DATA_FROM_PROC(BIF_P)->run_queue; + old = (ERTS_PSFLG_BOUND & state) ? curr : NULL; + + ASSERT(!old || old == curr); + old_value = old ? make_small(old->ix+1) : make_small(0); - if (yield) + if (new && new != curr) ERTS_BIF_YIELD_RETURN_X(BIF_P, old_value, am_scheduler); else BIF_RET(old_value); @@ -1826,8 +1818,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { if (internal_pid_index(to) >= erts_max_processes) return SEND_BADARG; - rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN, - to, 0, ERTS_P2P_FLG_SMP_INC_REFC); + rp = erts_proc_lookup_raw(to); if (!rp) { ERTS_SMP_ASSERT_IS_NOT_EXITING(p); @@ -1865,7 +1856,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { } erts_whereis_name(p, ERTS_PROC_LOCK_MAIN, to, - &rp, 0, ERTS_P2P_FLG_SMP_INC_REFC, + &rp, 0, 0, &pt); if (pt) { @@ -2017,7 +2008,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { erts_whereis_name(p, ERTS_PROC_LOCK_MAIN, tp[1], - &rp, 0, ERTS_P2P_FLG_SMP_INC_REFC, + &rp, 0, 0, &pt); if (pt) { portid = pt->id; @@ -2060,23 +2051,15 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { rp_locks |= ERTS_PROC_LOCK_MAIN; #endif /* send to local process */ - erts_send_message(p, rp, &rp_locks, msg, 0); - if (!erts_use_sender_punish) + res = erts_send_message(p, rp, &rp_locks, msg, 0); + if (erts_use_sender_punish) + res *= 4; + else res = 0; - else { -#ifdef ERTS_SMP - res = rp->msg_inq.len*4; - if (ERTS_PROC_LOCK_MAIN & rp_locks) - res += rp->msg.len*4; -#else - res = rp->msg.len*4; -#endif - } erts_smp_proc_unlock(rp, p == rp ? (rp_locks & ~ERTS_PROC_LOCK_MAIN) : rp_locks); - erts_smp_proc_dec_refc(rp); return res; } } @@ -2861,7 +2844,7 @@ BIF_RETTYPE float_to_list_1(BIF_ALIST_1) if (is_not_float(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); GET_DOUBLE(BIF_ARG_1, f); - if ((i = sys_double_to_chars(f.fd, fbuf)) <= 0) + if ((i = sys_double_to_chars(f.fd, fbuf, sizeof(fbuf))) <= 0) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); need = i*2; hp = HAlloc(BIF_P, need); @@ -3486,7 +3469,7 @@ BIF_RETTYPE garbage_collect_1(BIF_ALIST_1) if (rp == ERTS_PROC_LOCK_BUSY) ERTS_BIF_YIELD1(bif_export[BIF_garbage_collect_1], BIF_P, BIF_ARG_1); #else - rp = erts_pid2proc(BIF_P, 0, BIF_ARG_1, 0); + rp = erts_proc_lookup(BIF_ARG_1); #endif if (!rp) BIF_RET(am_false); @@ -3780,7 +3763,8 @@ BIF_RETTYPE function_exported_3(BIF_ALIST_3) is_not_small(BIF_ARG_3)) { BIF_ERROR(BIF_P, BADARG); } - if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3)) == NULL) { + if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3), + erts_active_code_ix()) == NULL) { BIF_RET(am_false); } BIF_RET(am_true); @@ -4216,8 +4200,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) erts_smp_thr_progress_block(); for (i = 0; i < erts_max_processes; i++) { - if (process_tab[i] != (Process*) 0) { - Process* p = process_tab[i]; + Process *p = erts_pix2proc(i); + if (p) { #ifdef USE_VM_PROBES p->seq_trace_token = (p->dt_utag != NIL) ? am_have_dt_utag : NIL; #else @@ -4513,6 +4497,21 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p, Export bif_return_trap_export; +void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a, + Eterm (*bif)(BIF_ALIST_0)) +{ + int i; + sys_memset((void *) ep, 0, sizeof(Export)); + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + ep->addressv[i] = &ep->code[3]; + } + ep->code[0] = m; + ep->code[1] = f; + ep->code[2] = a; + ep->code[3] = (BeamInstr) em_apply_bif; + ep->code[4] = (BeamInstr) bif; +} + void erts_init_bif(void) { reference0 = 0; @@ -4528,17 +4527,13 @@ void erts_init_bif(void) * yield the calling process traps to. The only thing it does: * return the value passed as argument. */ - sys_memset((void *) &bif_return_trap_export, 0, sizeof(Export)); - bif_return_trap_export.address = &bif_return_trap_export.code[3]; - bif_return_trap_export.code[0] = am_erlang; - bif_return_trap_export.code[1] = am_bif_return_trap; + erts_init_trap_export(&bif_return_trap_export, am_erlang, am_bif_return_trap, #ifdef DEBUG - bif_return_trap_export.code[2] = 2; + 2 #else - bif_return_trap_export.code[2] = 1; + 1 #endif - bif_return_trap_export.code[3] = (BeamInstr) em_apply_bif; - bif_return_trap_export.code[4] = (BeamInstr) &bif_return_trap; + , &bif_return_trap); flush_monitor_message_trap = erts_export_put(am_erlang, am_flush_monitor_message, diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h index d20089a9fb..7cb2c78815 100644 --- a/erts/emulator/beam/bif.h +++ b/erts/emulator/beam/bif.h @@ -125,7 +125,7 @@ do { \ #define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \ do { \ (Proc)->arity = 0; \ - (Proc)->i = (BeamInstr*) ((Trap)->address); \ + (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \ (Proc)->freason = TRAP; \ (Ret) = THE_NON_VALUE; \ } while (0) @@ -135,7 +135,7 @@ do { \ Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ (Proc)->arity = 1; \ reg[0] = (Eterm) (A0); \ - (Proc)->i = (BeamInstr*) ((Trap)->address); \ + (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \ (Proc)->freason = TRAP; \ (Ret) = THE_NON_VALUE; \ } while (0) @@ -146,7 +146,7 @@ do { \ (Proc)->arity = 2; \ reg[0] = (Eterm) (A0); \ reg[1] = (Eterm) (A1); \ - (Proc)->i = (BeamInstr*) ((Trap)->address); \ + (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \ (Proc)->freason = TRAP; \ (Ret) = THE_NON_VALUE; \ } while (0) @@ -158,7 +158,7 @@ do { \ reg[0] = (Eterm) (A0); \ reg[1] = (Eterm) (A1); \ reg[2] = (Eterm) (A2); \ - (Proc)->i = (BeamInstr*) ((Trap)->address); \ + (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \ (Proc)->freason = TRAP; \ (Ret) = THE_NON_VALUE; \ } while (0) @@ -170,13 +170,13 @@ do { \ reg[0] = (Eterm) (A0); \ reg[1] = (Eterm) (A1); \ reg[2] = (Eterm) (A2); \ - (Proc)->i = (BeamInstr*) ((Trap)->address); \ + (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \ (Proc)->freason = TRAP; \ } while (0) #define BIF_TRAP0(p, Trap_) do { \ (p)->arity = 0; \ - (p)->i = (BeamInstr*) ((Trap_)->address); \ + (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \ (p)->freason = TRAP; \ return THE_NON_VALUE; \ } while(0) @@ -185,7 +185,7 @@ do { \ Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \ (p)->arity = 1; \ reg[0] = (A0); \ - (p)->i = (BeamInstr*) ((Trap_)->address); \ + (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \ (p)->freason = TRAP; \ return THE_NON_VALUE; \ } while(0) @@ -195,7 +195,7 @@ do { \ (p)->arity = 2; \ reg[0] = (A0); \ reg[1] = (A1); \ - (p)->i = (BeamInstr*) ((Trap_)->address); \ + (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \ (p)->freason = TRAP; \ return THE_NON_VALUE; \ } while(0) @@ -206,7 +206,7 @@ do { \ reg[0] = (A0); \ reg[1] = (A1); \ reg[2] = (A2); \ - (p)->i = (BeamInstr*) ((Trap_)->address); \ + (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \ (p)->freason = TRAP; \ return THE_NON_VALUE; \ } while(0) diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 797bce43ab..f7dad2767f 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -142,8 +142,6 @@ bif erlang:list_to_pid/1 bif 'erl.lang.proc':string_to_pid/1 ebif_string_to_pid_1 list_to_pid_1 bif erlang:list_to_tuple/1 bif 'erl.lang.tuple':from_list/1 ebif_list_to_tuple_1 -bif erlang:load_module/2 -bif 'erl.system.code':load/2 ebif_load_module_2 bif erlang:loaded/0 bif 'erl.system.code':loaded/0 ebif_loaded_0 bif erlang:localtime/0 @@ -829,6 +827,13 @@ bif erlang:dt_restore_tag/1 bif erlang:dt_prepend_vm_tag_data/1 bif erlang:dt_append_vm_tag_data/1 + +# +# New in R16B. +# +bif erlang:prepare_loading/2 +bif erlang:finish_loading/1 + # # Obsolete # diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 93aa2fb8d0..63136d86c9 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -71,9 +71,10 @@ process_info(int to, void *to_arg) { int i; for (i = 0; i < erts_max_processes; i++) { - if ((process_tab[i] != NULL) && (process_tab[i]->i != ENULL)) { - if (process_tab[i]->status != P_EXITING) - print_process_info(to, to_arg, process_tab[i]); + Process *p = erts_pix2proc(i); + if (p && p->i != ENULL) { + if (!ERTS_PROC_IS_EXITING(p)) + print_process_info(to, to_arg, p); } } @@ -89,7 +90,8 @@ process_killer(void) erts_printf("\n\nProcess Information\n\n"); erts_printf("--------------------------------------------------\n"); for (i = erts_max_processes-1; i >= 0; i--) { - if (((rp = process_tab[i]) != NULL) && rp->i != ENULL) { + rp = erts_pix2proc(i); + if (rp && rp->i != ENULL) { int br; print_process_info(ERTS_PRINT_STDOUT, NULL, rp); erts_printf("(k)ill (n)ext (r)eturn:\n"); @@ -97,11 +99,20 @@ process_killer(void) if ((j = sys_get_key(0)) <= 0) erl_exit(0, ""); switch(j) { - case 'k': - if (rp->status == P_WAITING) { - ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; - erts_smp_proc_inc_refc(rp); - erts_smp_proc_lock(rp, rp_locks); + case 'k': { + ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; + erts_aint32_t state; + erts_smp_proc_inc_refc(rp); + erts_smp_proc_lock(rp, rp_locks); + state = erts_smp_atomic32_read_acqb(&rp->state); + if (state & (ERTS_PSFLG_FREE + | ERTS_PSFLG_EXITING + | ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_IN_RUNQ + | ERTS_PSFLG_RUNNING)) { + erts_printf("Can only kill WAITING processes this way\n"); + } + else { (void) erts_send_exit_signal(NULL, NIL, rp, @@ -110,12 +121,10 @@ process_killer(void) NIL, NULL, 0); - erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); } - else - erts_printf("Can only kill WAITING processes this way\n"); - + erts_smp_proc_unlock(rp, rp_locks); + erts_smp_proc_dec_refc(rp); + } case 'n': br = 1; break; case 'r': return; default: return; @@ -180,42 +189,38 @@ static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext) void print_process_info(int to, void *to_arg, Process *p) { + time_t approx_started; int garbing = 0; int running = 0; - time_t tmp_t; struct saved_calls *scb; + erts_aint32_t state; /* display the PID */ erts_print(to, to_arg, "=proc:%T\n", p->id); /* Display the state */ erts_print(to, to_arg, "State: "); - switch (p->status) { - case P_FREE: + + state = erts_smp_atomic32_read_acqb(&p->state); + if (state & ERTS_PSFLG_FREE) erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */ - break; - case P_RUNABLE: - erts_print(to, to_arg, "Scheduled\n"); - break; - case P_WAITING: - erts_print(to, to_arg, "Waiting\n"); - break; - case P_SUSPENDED: - erts_print(to, to_arg, "Suspended\n"); - break; - case P_RUNNING: - erts_print(to, to_arg, "Running\n"); - running = 1; - break; - case P_EXITING: + else if (state & ERTS_PSFLG_EXITING) erts_print(to, to_arg, "Exiting\n"); - break; - case P_GARBING: - erts_print(to, to_arg, "Garbing\n"); + else if (state & ERTS_PSFLG_GC) { garbing = 1; running = 1; - break; + erts_print(to, to_arg, "Garbing\n"); } + else if (state & ERTS_PSFLG_SUSPENDED) + erts_print(to, to_arg, "Suspended\n"); + else if (state & ERTS_PSFLG_RUNNING) { + running = 1; + erts_print(to, to_arg, "Running\n"); + } + else if (state & ERTS_PSFLG_ACTIVE) + erts_print(to, to_arg, "Scheduled\n"); + else + erts_print(to, to_arg, "Waiting\n"); /* * If the process is registered as a global process, display the @@ -245,8 +250,8 @@ print_process_info(int to, void *to_arg, Process *p) } erts_print(to, to_arg, "Spawned by: %T\n", p->parent); - tmp_t = p->started.tv_sec; - erts_print(to, to_arg, "Started: %s", ctime(&tmp_t)); + approx_started = (time_t) p->approx_started; + erts_print(to, to_arg, "Started: %s", ctime(&approx_started)); ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len); @@ -377,17 +382,22 @@ loaded(int to, void *to_arg) int old = 0; int cur = 0; BeamInstr* code; + Module* modp; + ErtsCodeIndex code_ix; + + code_ix = erts_active_code_ix(); + erts_rlock_old_code(code_ix); /* * Calculate and print totals. */ - for (i = 0; i < module_code_size(); i++) { - if (module_code(i) != NULL && - ((module_code(i)->code_length != 0) || - (module_code(i)->old_code_length != 0))) { - cur += module_code(i)->code_length; - if (module_code(i)->old_code_length != 0) { - old += module_code(i)->old_code_length; + for (i = 0; i < module_code_size(code_ix); i++) { + if ((modp = module_code(i, code_ix)) != NULL && + ((modp->curr.code_length != 0) || + (modp->old.code_length != 0))) { + cur += modp->curr.code_length; + if (modp->old.code_length != 0) { + old += modp->old.code_length; } } } @@ -398,21 +408,22 @@ loaded(int to, void *to_arg) * Print one line per module. */ - for (i = 0; i < module_code_size(); i++) { + for (i = 0; i < module_code_size(code_ix); i++) { + modp = module_code(i, code_ix); if (!ERTS_IS_CRASH_DUMPING) { /* * Interactive dump; keep it brief. */ - if (module_code(i) != NULL && - ((module_code(i)->code_length != 0) || - (module_code(i)->old_code_length != 0))) { - erts_print(to, to_arg, "%T", make_atom(module_code(i)->module)); - cur += module_code(i)->code_length; - erts_print(to, to_arg, " %d", module_code(i)->code_length ); - if (module_code(i)->old_code_length != 0) { + if (modp != NULL && + ((modp->curr.code_length != 0) || + (modp->old.code_length != 0))) { + erts_print(to, to_arg, "%T", make_atom(modp->module)); + cur += modp->curr.code_length; + erts_print(to, to_arg, " %d", modp->curr.code_length ); + if (modp->old.code_length != 0) { erts_print(to, to_arg, " (%d old)", - module_code(i)->old_code_length ); - old += module_code(i)->old_code_length; + modp->old.code_length ); + old += modp->old.code_length; } erts_print(to, to_arg, "\n"); } @@ -420,15 +431,15 @@ loaded(int to, void *to_arg) /* * To crash dump; make it parseable. */ - if (module_code(i) != NULL && - ((module_code(i)->code_length != 0) || - (module_code(i)->old_code_length != 0))) { + if (modp != NULL && + ((modp->curr.code_length != 0) || + (modp->old.code_length != 0))) { erts_print(to, to_arg, "=mod:"); - erts_print(to, to_arg, "%T", make_atom(module_code(i)->module)); + erts_print(to, to_arg, "%T", make_atom(modp->module)); erts_print(to, to_arg, "\n"); erts_print(to, to_arg, "Current size: %d\n", - module_code(i)->code_length); - code = module_code(i)->code; + modp->curr.code_length); + code = modp->curr.code; if (code != NULL && code[MI_ATTR_PTR]) { erts_print(to, to_arg, "Current attributes: "); dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR], @@ -440,9 +451,9 @@ loaded(int to, void *to_arg) code[MI_COMPILE_SIZE]); } - if (module_code(i)->old_code_length != 0) { - erts_print(to, to_arg, "Old size: %d\n", module_code(i)->old_code_length); - code = module_code(i)->old_code; + if (modp->old.code_length != 0) { + erts_print(to, to_arg, "Old size: %d\n", modp->old.code_length); + code = modp->old.code; if (code[MI_ATTR_PTR]) { erts_print(to, to_arg, "Old attributes: "); dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR], @@ -457,6 +468,7 @@ loaded(int to, void *to_arg) } } } + erts_runlock_old_code(code_ix); } @@ -616,7 +628,8 @@ bin_check(void) int i, printed = 0; for (i=0; i < erts_max_processes; i++) { - if ((rp = process_tab[i]) == NULL) + rp = erts_pix2proc(i); + if (!rp) continue; for (hdr = rp->off_heap.first; hdr; hdr = hdr->next) { if (hdr->thing_word == HEADER_PROC_BIN) { @@ -650,10 +663,13 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */ #endif int fd; + size_t envsz; time_t now; + char env[21]; /* enough to hold any 64-bit integer */ size_t dumpnamebufsize = MAXPATHLEN; char dumpnamebuf[MAXPATHLEN]; char* dumpname; + int secs; if (ERTS_SOMEONE_IS_CRASH_DUMPING) return; @@ -676,9 +692,41 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_writing_erl_crash_dump = 1; #endif - erts_sys_prepare_crash_dump(); + envsz = sizeof(env); + /* ERL_CRASH_DUMP_SECONDS not set + * same as ERL_CRASH_DUMP_SECONDS = 0 + * - do not write dump + * - do not set an alarm + * - break immediately + * + * ERL_CRASH_DUMP_SECONDS = 0 + * - do not write dump + * - do not set an alarm + * - break immediately + * + * ERL_CRASH_DUMP_SECONDS < 0 + * - do not set alarm + * - write dump until done + * + * ERL_CRASH_DUMP_SECONDS = S (and S positive) + * - Don't dump file forever + * - set alarm (set in sys) + * - write dump until alarm or file is written completely + */ + + if (erts_sys_getenv__("ERL_CRASH_DUMP_SECONDS", env, &envsz) != 0) { + return; /* break immediately */ + } else { + secs = atoi(env); + } + + if (secs == 0) { + return; + } + + erts_sys_prepare_crash_dump(secs); - if (erts_sys_getenv_raw("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0) + if (erts_sys_getenv__("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0) dumpname = "erl_crash.dump"; else dumpname = &dumpnamebuf[0]; @@ -704,7 +752,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_print_nif_taints(fd, NULL); erts_fdprintf(fd, "Atoms: %d\n", atom_table_size()); info(fd, NULL); /* General system info */ - if (process_tab != NULL) /* XXX true at init */ + if (erts_proc.tab) process_info(fd, NULL); /* Info about each process and port */ db_info(fd, NULL, 0); erts_print_bif_timer_info(fd, NULL); diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c new file mode 100644 index 0000000000..c66d5a2f05 --- /dev/null +++ b/erts/emulator/beam/code_ix.c @@ -0,0 +1,168 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "code_ix.h" +#include "global.h" +#include "beam_catches.h" + + + +#if 0 +# define CIX_TRACE(text) erts_fprintf(stderr, "CIX_TRACE: " text " act=%u load=%u\r\n", erts_active_code_ix(), erts_staging_code_ix()) +#else +# define CIX_TRACE(text) +#endif + +erts_smp_atomic32_t the_active_code_index; +erts_smp_atomic32_t the_staging_code_index; + +static Process* code_writing_process = NULL; +struct code_write_queue_item { + Process *p; + struct code_write_queue_item* next; +}; +static struct code_write_queue_item* code_write_queue = NULL; +static erts_smp_mtx_t code_write_permission_mtx; + +#ifdef ERTS_ENABLE_LOCK_CHECK +static erts_tsd_key_t has_code_write_permission; +#endif + +void erts_code_ix_init(void) +{ + /* We start emulator by initializing preloaded modules + * single threaded with active and staging set both to zero. + * Preloading is finished by a commit that will set things straight. + */ + erts_smp_atomic32_init_nob(&the_active_code_index, 0); + erts_smp_atomic32_init_nob(&the_staging_code_index, 0); + erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission"); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_tsd_key_create(&has_code_write_permission); +#endif + CIX_TRACE("init"); +} + +void erts_start_staging_code_ix(void) +{ + beam_catches_start_staging(); + export_start_staging(); + module_start_staging(); + erts_start_staging_ranges(); + CIX_TRACE("start"); +} + + +void erts_end_staging_code_ix(void) +{ + beam_catches_end_staging(1); + export_end_staging(1); + module_end_staging(1); + erts_end_staging_ranges(1); + CIX_TRACE("end"); +} + +void erts_commit_staging_code_ix(void) +{ + ErtsCodeIndex ix; + /* We need to this lock as we are now making the staging export table active */ + export_staging_lock(); + ix = erts_staging_code_ix(); + erts_smp_atomic32_set_nob(&the_active_code_index, ix); + ix = (ix + 1) % ERTS_NUM_CODE_IX; + erts_smp_atomic32_set_nob(&the_staging_code_index, ix); + export_staging_unlock(); + CIX_TRACE("activate"); +} + +void erts_abort_staging_code_ix(void) +{ + beam_catches_end_staging(0); + export_end_staging(0); + module_end_staging(0); + erts_end_staging_ranges(0); + CIX_TRACE("abort"); +} + + +/* + * Calller _must_ yield if we return 0 + */ +int erts_try_seize_code_write_permission(Process* c_p) +{ + int success; +#ifdef ERTS_SMP + ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ +#endif + ASSERT(c_p != NULL); + + erts_smp_mtx_lock(&code_write_permission_mtx); + success = (code_writing_process == NULL); + if (success) { + code_writing_process = c_p; +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_tsd_set(has_code_write_permission, (void *) 1); +#endif + } + else { /* Already locked */ + struct code_write_queue_item* qitem; + ASSERT(code_writing_process != c_p); + qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem)); + qitem->p = c_p; + erts_smp_proc_inc_refc(c_p); + qitem->next = code_write_queue; + code_write_queue = qitem; + erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); + } + erts_smp_mtx_unlock(&code_write_permission_mtx); + return success; +} + +void erts_release_code_write_permission(void) +{ + erts_smp_mtx_lock(&code_write_permission_mtx); + ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + while (code_write_queue != NULL) { /* unleash the entire herd */ + struct code_write_queue_item* qitem = code_write_queue; + erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); + if (!ERTS_PROC_IS_EXITING(qitem->p)) { + erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS); + } + erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); + code_write_queue = qitem->next; + erts_smp_proc_dec_refc(qitem->p); + erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem); + } + code_writing_process = NULL; +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_tsd_set(has_code_write_permission, (void *) 0); +#endif + erts_smp_mtx_unlock(&code_write_permission_mtx); +} + +#ifdef ERTS_ENABLE_LOCK_CHECK +int erts_has_code_write_permission(void) +{ + return (code_writing_process != NULL) && erts_tsd_get(has_code_write_permission); +} +#endif diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h new file mode 100644 index 0000000000..e2bd0da112 --- /dev/null +++ b/erts/emulator/beam/code_ix.h @@ -0,0 +1,141 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +/* Description: + * This is the interface that facilitates changing the beam code + * (load,upgrade,delete) while allowing executing Erlang processes to + * access the code without any locks or other expensive memory barriers. + * + * The basic idea is to maintain several "logical copies" of the code. These + * copies are identified by a global 'code index', an integer of 0, 1 or 2. + * The code index is used as argument to code access structures like + * export, module, beam_catches, beam_ranges. + * + * The current 'active' code index is used to access the current running + * code. The 'staging' code index is used by the process that performs + * a code change operation. When a code change operation completes + * succesfully, the staging code index becomes the new active code index. + * + * The third code index is not explicitly used. It can be thought of as + * the "previous active" or the "next staging" index. It is needed to make + * sure that we do not reuse a code index for staging until we are sure + * that no executing BIFs are still referencing it. + * We could get by with only two (0 and 1), but that would require that we + * must wait for all schedulers to re-schedule before each code change + * operation can start staging. + * + * Note that the 'code index' is very loosely coupled to the concept of + * 'current' and 'old' module versions. You can almost say that they are + * orthogonal to each other. Code index is an emulator global concept while + * 'current' and 'old' is specific for each module. + */ + +#ifndef __CODE_IX_H__ +#define __CODE_IX_H__ + +#ifndef __SYS_H__ +# ifdef HAVE_CONFIG_H +# include "config.h" +# endif +# include "sys.h" +#endif +struct process; + + +#define ERTS_NUM_CODE_IX 3 +typedef unsigned ErtsCodeIndex; + + +/* Called once at emulator initialization. + */ +void erts_code_ix_init(void); + +/* Return active code index. + * Is guaranteed to be valid until the calling BIF returns. + * To get a consistent view of the code, only one call to erts_active_code_ix() + * should be made and the returned ix reused within the same BIF call. + */ +ERTS_GLB_INLINE +ErtsCodeIndex erts_active_code_ix(void); + +/* Return staging code ix. + * Only used by a process performing code loading/upgrading/deleting/purging. + * code_ix must be locked. + */ +ERTS_GLB_INLINE +ErtsCodeIndex erts_staging_code_ix(void); + +/* Try seize exclusive code write permission. Needed for code staging. + * Main process lock (only) must be held. + * System thread progress must not be blocked. + * Caller is suspended and *must* yield if 0 is returned. + */ +int erts_try_seize_code_write_permission(struct process*); + +/* Release code write permission. + * Will resume any suspended waiters. + */ +void erts_release_code_write_permission(void); + +/* Prepare the "staging area" to be a complete copy of the active code. + * Code write permission must have been seized. + * Must be followed by calls to either "end" and "commit" or "abort" before + * code write permission can be released. + */ +void erts_start_staging_code_ix(void); + +/* End the staging. + * Preceded by "start" and must be followed by "commit". + */ +void erts_end_staging_code_ix(void); + +/* Set staging code index as new active code index. + * Preceded by "end". + */ +void erts_commit_staging_code_ix(void); + +/* Abort the staging. + * Preceded by "start". + */ +void erts_abort_staging_code_ix(void); + +#ifdef ERTS_ENABLE_LOCK_CHECK +int erts_has_code_write_permission(void); +#endif + + + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +extern erts_smp_atomic32_t the_active_code_index; +extern erts_smp_atomic32_t the_staging_code_index; + +ERTS_GLB_INLINE ErtsCodeIndex erts_active_code_ix(void) +{ + return erts_smp_atomic32_read_nob(&the_active_code_index); +} +ERTS_GLB_INLINE ErtsCodeIndex erts_staging_code_ix(void) +{ + return erts_smp_atomic32_read_nob(&the_staging_code_index); +} + +#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ + +#endif /* !__CODE_IX_H__ */ + diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 025258e8de..28c4621ff2 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -1312,7 +1312,7 @@ int erts_net_message(Port *prt, if (is_not_pid(from) || is_not_atom(to)){ goto invalid_message; } - rp = erts_whereis_process(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC); + rp = erts_whereis_process(NULL, 0, to, 0, 0); if (rp) { Uint xsize = (type == DOP_REG_SEND ? 0 @@ -1338,7 +1338,6 @@ int erts_net_message(Port *prt, erts_queue_dist_message(rp, &locks, ede_copy, token); if (locks) erts_smp_proc_unlock(rp, locks); - erts_smp_proc_dec_refc(rp); } break; @@ -1364,7 +1363,7 @@ int erts_net_message(Port *prt, if (is_not_pid(to)) { goto invalid_message; } - rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC); + rp = erts_proc_lookup(to); if (rp) { Uint xsize = type == DOP_SEND ? 0 : ERTS_HEAP_FRAG_SIZE(token_size); ErtsProcLocks locks = 0; @@ -1388,7 +1387,6 @@ int erts_net_message(Port *prt, erts_queue_dist_message(rp, &locks, ede_copy, token); if (locks) erts_smp_proc_unlock(rp, locks); - erts_smp_proc_dec_refc(rp); } break; @@ -1544,8 +1542,7 @@ int erts_net_message(Port *prt, if (is_not_pid(from) || is_not_internal_pid(to)) { goto invalid_message; } - rp = erts_pid2proc_opt(NULL, 0, to, rp_locks, - ERTS_P2P_FLG_SMP_INC_REFC); + rp = erts_pid2proc(NULL, 0, to, rp_locks); if (rp) { (void) erts_send_exit_signal(NULL, from, @@ -1556,7 +1553,6 @@ int erts_net_message(Port *prt, NULL, 0); erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); } break; } @@ -1973,6 +1969,7 @@ erts_dist_command(Port *prt, int reds_limit) bw(foq.first->extp, size); #endif reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); + erts_smp_atomic_add_nob(&erts_bytes_out, size); fob = foq.first; obufsize += size_obuf(fob); foq.first = foq.first->next; @@ -2056,6 +2053,7 @@ erts_dist_command(Port *prt, int reds_limit) bw(oq.first->extp, size); #endif reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); + erts_smp_atomic_add_nob(&erts_bytes_out, size); fob = oq.first; obufsize += size_obuf(fob); oq.first = oq.first->next; @@ -2242,7 +2240,7 @@ static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp) void *arg = ((struct print_to_data *) vptdp)->arg; Process *rp; ErtsMonitor *rmon; - rp = erts_pid2proc_unlocked(mon->pid); + rp = erts_proc_lookup(mon->pid); if (!rp || (rmon = erts_lookup_monitor(rp->monitors, mon->ref)) == NULL) { erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid); } else if (mon->type == MON_ORIGIN) { @@ -2281,7 +2279,7 @@ static void doit_print_link_info2(ErtsLink *lnk, void *vpplc) static void doit_print_link_info(ErtsLink *lnk, void *vptdp) { - if (is_internal_pid(lnk->pid) && erts_pid2proc_unlocked(lnk->pid)) { + if (is_internal_pid(lnk->pid) && erts_proc_lookup(lnk->pid)) { PrintLinkContext plc = {(struct print_to_data *) vptdp, lnk->pid}; erts_doforall_links(ERTS_LINK_ROOT(lnk), &doit_print_link_info2, &plc); } @@ -2303,7 +2301,7 @@ static void doit_print_nodelink_info(ErtsLink *lnk, void *vpcontext) { PrintNodeLinkContext *pcontext = vpcontext; - if (is_internal_pid(lnk->pid) && erts_pid2proc_unlocked(lnk->pid)) + if (is_internal_pid(lnk->pid) && erts_proc_lookup(lnk->pid)) erts_print(pcontext->ptd.to, pcontext->ptd.arg, "Remote monitoring: %T %T\n", lnk->pid, pcontext->sysname); } @@ -2451,15 +2449,15 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2) goto error; /* Check that all trap functions are defined !! */ - if (dsend2_trap->address == NULL || - dsend3_trap->address == NULL || + if (dsend2_trap->addressv[0] == NULL || + dsend3_trap->addressv[0] == NULL || /* dsend_nosuspend_trap->address == NULL ||*/ - dlink_trap->address == NULL || - dunlink_trap->address == NULL || - dmonitor_node_trap->address == NULL || - dgroup_leader_trap->address == NULL || - dmonitor_p_trap->address == NULL || - dexit_trap->address == NULL) { + dlink_trap->addressv[0] == NULL || + dunlink_trap->addressv[0] == NULL || + dmonitor_node_trap->addressv[0] == NULL || + dgroup_leader_trap->addressv[0] == NULL || + dmonitor_p_trap->addressv[0] == NULL || + dexit_trap->addressv[0] == NULL) { goto error; } @@ -2706,9 +2704,8 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) } else { lp_locks = ERTS_PROC_LOCKS_XSIG_SEND; - lp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN, - local, lp_locks, - ERTS_P2P_FLG_SMP_INC_REFC); + lp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, + local, lp_locks); if (!lp) { BIF_RET(am_true); /* ignore */ } @@ -2727,9 +2724,7 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) lp_locks &= ~ERTS_PROC_LOCK_MAIN; #endif erts_smp_proc_unlock(lp, lp_locks); - if (lp != BIF_P) - erts_smp_proc_dec_refc(lp); - else { + if (lp == BIF_P) { /* * We may have exited current process and may have to take action. */ diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 6fce032f9d..3eee53eba3 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -1675,7 +1675,7 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...) t_str = type_no_str(n); if (!t_str) { - sprintf(buf, "%d", (int) n); + erts_snprintf(buf, sizeof(buf), "%d", (int) n); t_str = buf; } @@ -2182,9 +2182,9 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg) if (want.code) { size.code = module_table_sz(); size.code += export_table_sz(); - size.code += export_list_size() * sizeof(Export); + size.code += export_entries_sz(); size.code += erts_fun_table_sz(); - size.code += allocated_modules*sizeof(Range); + size.code += erts_ranges_sz(); size.code += erts_total_code_size; } @@ -2300,11 +2300,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc) values[i].name = "static"; values[i].ui[0] = erts_max_ports*sizeof(Port) /* Port table */ - + erts_timer_wheel_memory_size() /* Timer wheel */ -#ifdef SYS_TMP_BUF_SIZE - + SYS_TMP_BUF_SIZE /* tmp_buf in sys on vxworks & ose */ -#endif - ; + + erts_timer_wheel_memory_size(); /* Timer wheel */ i++; erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space); @@ -2332,7 +2328,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc) values[i].arity = 2; values[i].name = "export_list"; - values[i].ui[0] = export_list_size() * sizeof(Export); + values[i].ui[0] = export_entries_sz(); i++; values[i].arity = 2; @@ -2347,7 +2343,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc) values[i].arity = 2; values[i].name = "module_refs"; - values[i].ui[0] = allocated_modules*sizeof(Range); + values[i].ui[0] = erts_ranges_sz(); i++; values[i].arity = 2; @@ -3576,12 +3572,12 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func) ftype = type_no_str(found_type); if (!ftype) { - sprintf(fbuf, "%d", (int) found_type); + erts_snprintf(fbuf, sizeof(fbuf), "%d", (int) found_type); ftype = fbuf; } otype = type_no_str(n); if (!otype) { - sprintf(obuf, "%d", (int) n); + erts_snprintf(obuf, sizeof(obuf), "%d", (int) n); otype = obuf; } diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 4aa8fa82fb..0a4407f009 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -164,6 +164,7 @@ type MSG_REF FIXED_SIZE PROCESSES msg_ref type MSG_ROOTS TEMPORARY PROCESSES msg_roots type ROOTSET TEMPORARY PROCESSES root_set type LOADER_TMP TEMPORARY CODE loader_tmp +type PREPARED_CODE SHORT_LIVED CODE prepared_code type BIF_TIMER_TABLE LONG_LIVED SYSTEM bif_timer_table type SL_BIF_TIMER SHORT_LIVED PROCESSES bif_timer_sl type LL_BIF_TIMER STANDARD PROCESSES bif_timer_ll @@ -263,6 +264,8 @@ type ZLIB STANDARD SYSTEM zlib type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q +type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q +type PROC_INTERVAL LONG_LIVED SYSTEM process_interval +if threads_no_smp # Need thread safe allocs, but std_alloc and fix_alloc are not; @@ -321,6 +324,8 @@ type SL_PTIMER SHORT_LIVED SYSTEM ptimer_sl type LL_PTIMER STANDARD SYSTEM ptimer_ll type SYS_MSG_Q SHORT_LIVED PROCESSES system_messages_queue type FP_EXCEPTION LONG_LIVED SYSTEM fp_exception +type LL_MPATHS LONG_LIVED SYSTEM ll_migration_paths +type SL_MPATHS SHORT_LIVED SYSTEM sl_migration_paths +endif +if hipe @@ -414,28 +419,4 @@ type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf +endif -+if vxworks - -type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf -type PEND_DATA SYSTEM SYSTEM pending_data -type FD_TAB LONG_LIVED SYSTEM fd_tab -type FD_ENTRY_BUF SYSTEM SYSTEM fd_entry_buf - -+endif - -+if ose - -type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf -type PUTENV_STR SYSTEM SYSTEM putenv_string -type GETENV_STR SYSTEM SYSTEM getenv_string -type GETENV_STATE SYSTEM SYSTEM getenv_state -type SIG_ENTRY SYSTEM SYSTEM sig_entry -type DRIVER_DATA SYSTEM SYSTEM driver_data -type PGM_TAB SYSTEM SYSTEM pgm_tab -type PGM_ENTRY SYSTEM SYSTEM pgm_entry -type PRT_TAB SYSTEM SYSTEM prt_tab -type PRT_ENTRY SYSTEM SYSTEM prt_entry - -+endif - # ---------------------------------------------------------------------------- diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c index f308039baf..c5f432bea1 100644 --- a/erts/emulator/beam/erl_async.c +++ b/erts/emulator/beam/erl_async.c @@ -122,6 +122,8 @@ typedef struct { #endif } ErtsAsyncData; +#if defined(USE_THREADS) && defined(USE_VM_PROBES) + /* * Some compilers, e.g. GCC 4.2.1 and -O3, will optimize away DTrace * calls if they're the last thing in the function. :-( @@ -129,6 +131,7 @@ typedef struct { * https://github.com/memcached/memcached/commit/6298b3978687530bc9d219b6ac707a1b681b2a46 */ static unsigned gcc_optimizer_hack = 0; +#endif int erts_async_max_threads; /* Initialized by erl_init.c */ int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */ @@ -281,8 +284,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q) len = -1; DTRACE2(aio_pool_add, port_str, len); } -#endif gcc_optimizer_hack++; +#endif } static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index cc4f2be8eb..474151d454 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -72,52 +72,29 @@ binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3); void erts_init_bif_binary(void) { - sys_memset((void *) &binary_match_trap_export, 0, sizeof(Export)); - binary_match_trap_export.address = &binary_match_trap_export.code[3]; - binary_match_trap_export.code[0] = am_erlang; - binary_match_trap_export.code[1] = am_binary_match_trap; - binary_match_trap_export.code[2] = 3; - binary_match_trap_export.code[3] = (BeamInstr) em_apply_bif; - binary_match_trap_export.code[4] = (BeamInstr) &binary_match_trap; - - sys_memset((void *) &binary_matches_trap_export, 0, sizeof(Export)); - binary_matches_trap_export.address = &binary_matches_trap_export.code[3]; - binary_matches_trap_export.code[0] = am_erlang; - binary_matches_trap_export.code[1] = am_binary_matches_trap; - binary_matches_trap_export.code[2] = 3; - binary_matches_trap_export.code[3] = (BeamInstr) em_apply_bif; - binary_matches_trap_export.code[4] = (BeamInstr) &binary_matches_trap; - - sys_memset((void *) &binary_longest_prefix_trap_export, 0, sizeof(Export)); - binary_longest_prefix_trap_export.address = &binary_longest_prefix_trap_export.code[3]; - binary_longest_prefix_trap_export.code[0] = am_erlang; - binary_longest_prefix_trap_export.code[1] = am_binary_longest_prefix_trap; - binary_longest_prefix_trap_export.code[2] = 3; - binary_longest_prefix_trap_export.code[3] = (BeamInstr) em_apply_bif; - binary_longest_prefix_trap_export.code[4] = (BeamInstr) &binary_longest_prefix_trap; - - sys_memset((void *) &binary_longest_suffix_trap_export, 0, sizeof(Export)); - binary_longest_suffix_trap_export.address = &binary_longest_suffix_trap_export.code[3]; - binary_longest_suffix_trap_export.code[0] = am_erlang; - binary_longest_suffix_trap_export.code[1] = am_binary_longest_suffix_trap; - binary_longest_suffix_trap_export.code[2] = 3; - binary_longest_suffix_trap_export.code[3] = (BeamInstr) em_apply_bif; - binary_longest_suffix_trap_export.code[4] = (BeamInstr) &binary_longest_suffix_trap; - - sys_memset((void *) &binary_bin_to_list_trap_export, 0, sizeof(Export)); - binary_bin_to_list_trap_export.address = &binary_bin_to_list_trap_export.code[3]; - binary_bin_to_list_trap_export.code[0] = am_erlang; - binary_bin_to_list_trap_export.code[1] = am_binary_bin_to_list_trap; - binary_bin_to_list_trap_export.code[2] = 3; - binary_bin_to_list_trap_export.code[3] = (BeamInstr) em_apply_bif; - binary_bin_to_list_trap_export.code[4] = (BeamInstr) &binary_bin_to_list_trap; - sys_memset((void *) &binary_copy_trap_export, 0, sizeof(Export)); - binary_copy_trap_export.address = &binary_copy_trap_export.code[3]; - binary_copy_trap_export.code[0] = am_erlang; - binary_copy_trap_export.code[1] = am_binary_copy_trap; - binary_copy_trap_export.code[2] = 2; - binary_copy_trap_export.code[3] = (BeamInstr) em_apply_bif; - binary_copy_trap_export.code[4] = (BeamInstr) &binary_copy_trap; + erts_init_trap_export(&binary_match_trap_export, + am_erlang, am_binary_match_trap, 3, + &binary_match_trap); + + erts_init_trap_export(&binary_matches_trap_export, + am_erlang, am_binary_matches_trap, 3, + &binary_matches_trap); + + erts_init_trap_export(&binary_longest_prefix_trap_export, + am_erlang, am_binary_longest_prefix_trap, 3, + &binary_longest_prefix_trap); + + erts_init_trap_export(&binary_longest_suffix_trap_export, + am_erlang, am_binary_longest_suffix_trap, 3, + &binary_longest_suffix_trap); + + erts_init_trap_export(&binary_bin_to_list_trap_export, + am_erlang, am_binary_bin_to_list_trap, 3, + &binary_bin_to_list_trap); + + erts_init_trap_export(&binary_copy_trap_export, + am_erlang, am_binary_copy_trap, 2, + &binary_copy_trap); max_loop_limit = 0; return; diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c index 06b7ffdf32..ff5ce3cc7a 100644 --- a/erts/emulator/beam/erl_bif_chksum.c +++ b/erts/emulator/beam/erl_bif_chksum.c @@ -42,16 +42,9 @@ static Export chksum_md5_2_exp; void erts_init_bif_chksum(void) { /* Non visual BIF to trap to. */ - memset(&chksum_md5_2_exp, 0, sizeof(Export)); - chksum_md5_2_exp.address = - &chksum_md5_2_exp.code[3]; - chksum_md5_2_exp.code[0] = am_erlang; - chksum_md5_2_exp.code[1] = am_atom_put("md5_trap",8); - chksum_md5_2_exp.code[2] = 2; - chksum_md5_2_exp.code[3] = - (BeamInstr) em_apply_bif; - chksum_md5_2_exp.code[4] = - (BeamInstr) &md5_2; + erts_init_trap_export(&chksum_md5_2_exp, + am_erlang, am_atom_put("md5_trap",8), 2, + &md5_2); } diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index c338ee1c4b..7f7c975e78 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -368,13 +368,11 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) #endif for (j = 0; j < erts_max_ports; j++) { Port* prt = &erts_port[j]; -#ifdef DDLL_SMP erts_smp_port_state_lock(prt); -#endif if (!(prt->status & FREE_PORT_FLAGS) && prt->drv_ptr->handle == dh) { -#if DDLL_SMP erts_smp_atomic_inc_nob(&prt->refc); +#if DDLL_SMP /* Extremely rare spinlock */ while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { erts_smp_port_state_unlock(prt); @@ -598,13 +596,11 @@ done: #endif for (j = 0; j < erts_max_ports; j++) { Port* prt = &erts_port[j]; -#if DDLL_SMP erts_smp_port_state_lock(prt); -#endif if (!(prt->status & FREE_PORT_FLAGS) && prt->drv_ptr->handle == dh) { -#if DDLL_SMP erts_smp_atomic_inc_nob(&prt->refc); +#if DDLL_SMP /* Extremely rare spinlock */ while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { erts_smp_port_state_unlock(prt); @@ -1060,13 +1056,11 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) #endif for (j = 0; j < erts_max_ports; j++) { Port* prt = &erts_port[j]; -#if DDLL_SMP erts_smp_port_state_lock(prt); -#endif if (!(prt->status & FREE_PORT_FLAGS) && prt->drv_ptr->handle == dh) { -#if DDLL_SMP erts_smp_atomic_inc_nob(&prt->refc); +#if DDLL_SMP while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { erts_smp_port_state_unlock(prt); erts_smp_port_state_lock(prt); diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 45dc5fb11c..a3811ccdb0 100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -1338,13 +1338,15 @@ process_info_aux(Process *BIF_P, hp = HAlloc(BIF_P, 3); break; - case am_trap_exit: + case am_trap_exit: { + erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); hp = HAlloc(BIF_P, 3); - if (rp->flags & F_TRAPEXIT) + if (state & ERTS_PSFLG_TRAP_EXIT) res = am_true; else res = am_false; break; + } case am_error_handler: hp = HAlloc(BIF_P, 3); @@ -3098,15 +3100,13 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1) if(internal_pid_index(BIF_ARG_1) >= erts_max_processes) BIF_ERROR(BIF_P, BADARG); - rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, - BIF_ARG_1, ERTS_PROC_LOCK_STATUS); + rp = erts_proc_lookup(BIF_ARG_1); if (!rp) { BIF_RET(am_false); } else { - int have_pending_exit = ERTS_PROC_PENDING_EXIT(rp); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - if (have_pending_exit) + if (erts_smp_atomic32_read_acqb(&rp->state) + & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false); else BIF_RET(am_true); @@ -3710,9 +3710,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) && is_internal_pid(tp[2])) { int xres; ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; - Process *rp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN, - tp[2], rp_locks, - ERTS_P2P_FLG_SMP_INC_REFC); + Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, + tp[2], rp_locks); if (!rp) { DECL_AM(dead); BIF_RET(AM_dead); @@ -3738,7 +3737,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) rp_locks &= ~ERTS_PROC_LOCK_MAIN; #endif erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); if (xres > 1) { DECL_AM(message); BIF_RET(AM_message); @@ -3949,7 +3947,7 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock id = am_atom_put(ltype, strlen(ltype)); } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) { /* use registered names as id's for process locks if available */ - proc = erts_pid2proc_unlocked(lock->id); + proc = erts_proc_lookup(lock->id); if (proc && proc->reg) { id = proc->reg->name; } else { diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c index 831e05493a..1062d4379b 100644 --- a/erts/emulator/beam/erl_bif_os.c +++ b/erts/emulator/beam/erl_bif_os.c @@ -58,7 +58,7 @@ BIF_RETTYPE os_getpid_0(BIF_ALIST_0) char pid_string[21]; /* enough for a 64 bit number */ int n; Eterm* hp; - sys_get_pid(pid_string); /* In sys.c */ + sys_get_pid(pid_string, sizeof(pid_string)); /* In sys.c */ n = sys_strlen(pid_string); hp = HAlloc(BIF_P, n*2); BIF_RET(buf_to_intlist(&hp, pid_string, n, NIL)); diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c index 6b843d2e08..b036c5ef5c 100644 --- a/erts/emulator/beam/erl_bif_re.c +++ b/erts/emulator/beam/erl_bif_re.c @@ -71,14 +71,9 @@ void erts_init_bif_re(void) erts_pcre_stack_free = &erts_erts_pcre_stack_free; default_table = NULL; /* ISO8859-1 default, forced into pcre */ max_loop_limit = CONTEXT_REDS * LOOP_FACTOR; - - sys_memset((void *) &re_exec_trap_export, 0, sizeof(Export)); - re_exec_trap_export.address = &re_exec_trap_export.code[3]; - re_exec_trap_export.code[0] = am_erlang; - re_exec_trap_export.code[1] = am_re_run_trap; - re_exec_trap_export.code[2] = 3; - re_exec_trap_export.code[3] = (BeamInstr) em_apply_bif; - re_exec_trap_export.code[4] = (BeamInstr) &re_exec_trap; + + erts_init_trap_export(&re_exec_trap_export, am_erlang, am_re_run_trap, 3, + &re_exec_trap); grun_trap_exportp = erts_export_put(am_re,am_grun,3); urun_trap_exportp = erts_export_put(am_re,am_urun,3); diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c index d806be0704..525b11f61c 100644 --- a/erts/emulator/beam/erl_bif_timer.c +++ b/erts/emulator/beam/erl_bif_timer.c @@ -324,10 +324,9 @@ bif_timer_timeout(ErtsBifTimer* btm) ASSERT(!erts_get_current_process()); if (btm->flags & BTM_FLG_BYNAME) - rp = erts_whereis_process(NULL,0,btm->receiver.name,0,ERTS_P2P_FLG_SMP_INC_REFC); + rp = erts_whereis_process(NULL, 0, btm->receiver.name, 0, 0); else { rp = btm->receiver.proc.ess; - erts_smp_proc_inc_refc(rp); unlink_proc(btm); } @@ -379,7 +378,6 @@ bif_timer_timeout(ErtsBifTimer* btm) #endif ); erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); } } diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index 80f774523c..805d788177 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -42,14 +42,33 @@ #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0}; + +/* + * The following variables are protected by code write permission. + */ static int erts_default_trace_pattern_is_on; static Binary *erts_default_match_spec; static Binary *erts_default_meta_match_spec; static struct trace_pattern_flags erts_default_trace_pattern_flags; static Eterm erts_default_meta_tracer_pid; +static struct { /* Protected by code write permission */ + int current; + int install; + int local; + BpFunctions f; /* Local functions */ + BpFunctions e; /* Export entries */ +#ifdef ERTS_SMP + Process* stager; + ErtsThrPrgrLaterOp lop; +#endif +} finish_bp; + static Eterm trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist); +#ifdef ERTS_SMP +static void smp_bp_finisher(void* arg); +#endif static BIF_RETTYPE system_monitor(Process *p, Eterm monitor_pid, Eterm list); @@ -60,12 +79,11 @@ static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key); static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key); static Eterm trace_info_on_load(Process* p, Eterm key); -static int setup_func_trace(Export* ep, void* match_prog); -static int reset_func_trace(Export* ep); -static void reset_bif_trace(int bif_index); -static void setup_bif_trace(int bif_index); -static void set_trace_bif(int bif_index, void* match_prog); -static void clear_trace_bif(int bif_index); +static void reset_bif_trace(void); +static void setup_bif_trace(void); +static void install_exp_breakpoints(BpFunctions* f); +static void uninstall_exp_breakpoints(BpFunctions* f); +static void clean_export_entries(BpFunctions* f); void erts_bif_trace_init(void) @@ -98,7 +116,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) { DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */ int i; - int matches = 0; + int matches = -1; int specified = 0; enum erts_break_op on; Binary* match_prog_set; @@ -108,8 +126,10 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) Process *meta_tracer_proc = p; Eterm meta_tracer_pid = p->id; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + if (!erts_try_seize_code_write_permission(p)) { + ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist); + } + finish_bp.current = -1; UseTmpHeap(3,p); /* @@ -241,7 +261,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) erts_default_meta_match_spec = NULL; erts_default_meta_tracer_pid = NIL; } - MatchSetUnref(match_prog_set); if (erts_default_trace_pattern_flags.breakpoint && flags.breakpoint) { /* Breakpoint trace -> breakpoint trace */ @@ -297,8 +316,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) erts_default_trace_pattern_is_on = !!flags.breakpoint; } } - - goto done; + matches = 0; } else if (is_tuple(MFA)) { Eterm *tp = tuple_val(MFA); if (tp[0] != make_arityval(3)) { @@ -322,36 +340,64 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) if (is_small(mfa[2])) { mfa[2] = signed_val(mfa[2]); } - } else { - goto error; - } - if (meta_tracer_proc) { - meta_tracer_proc->trace_flags |= F_TRACER; - } + if (meta_tracer_proc) { + meta_tracer_proc->trace_flags |= F_TRACER; + } + matches = erts_set_trace_pattern(p, mfa, specified, + match_prog_set, match_prog_set, + on, flags, meta_tracer_pid, 0); + } - matches = erts_set_trace_pattern(mfa, specified, - match_prog_set, match_prog_set, - on, flags, meta_tracer_pid); + error: MatchSetUnref(match_prog_set); - - done: UnUseTmpHeap(3,p); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); - return make_small(matches); +#ifdef ERTS_SMP + if (finish_bp.current >= 0) { + ASSERT(matches >= 0); + ASSERT(finish_bp.stager == NULL); + finish_bp.stager = p; + erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop); + erts_smp_proc_inc_refc(p); + erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL); + ERTS_BIF_YIELD_RETURN(p, make_small(matches)); + } +#endif - error: + erts_release_code_write_permission(); - MatchSetUnref(match_prog_set); + if (matches >= 0) { + return make_small(matches); + } + else { + BIF_ERROR(p, BADARG); + } +} - UnUseTmpHeap(3,p); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); - BIF_ERROR(p, BADARG); +#ifdef ERTS_SMP +static void smp_bp_finisher(void* null) +{ + if (erts_finish_breakpointing()) { /* Not done */ + /* Arrange for being called again */ + erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop); + } + else { /* Done */ + Process* p = finish_bp.stager; +#ifdef DEBUG + finish_bp.stager = NULL; +#endif + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + if (!ERTS_PROC_IS_EXITING(p)) { + erts_resume(p, ERTS_PROC_LOCK_STATUS); + } + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_proc_dec_refc(p); + erts_release_code_write_permission(); + } } +#endif /* ERTS_SMP */ void erts_get_default_trace_pattern(int *trace_pattern_is_on, @@ -360,6 +406,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, struct trace_pattern_flags *trace_pattern_flags, Eterm *meta_tracer_pid) { + ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() || + erts_smp_thr_progress_is_blocking()); if (trace_pattern_is_on) *trace_pattern_is_on = erts_default_trace_pattern_is_on; if (match_spec) @@ -372,7 +420,12 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, *meta_tracer_pid = erts_default_meta_tracer_pid; } - +int erts_is_default_trace_enabled(void) +{ + ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() || + erts_smp_thr_progress_is_blocking()); + return erts_default_trace_pattern_is_on; +} Uint erts_trace_flag2bit(Eterm flag) @@ -466,6 +519,10 @@ Eterm trace_3(BIF_ALIST_3) BIF_ERROR(p, BADARG); } + if (!erts_try_seize_code_write_permission(BIF_P)) { + ERTS_BIF_YIELD3(bif_export[BIF_trace_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); + } + if (is_nil(tracer) || is_internal_pid(tracer)) { Process *tracer_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, @@ -653,8 +710,7 @@ Eterm trace_3(BIF_ALIST_3) if (procs || mods) { /* tracing of processes */ for (i = 0; i < erts_max_processes; i++) { - Process* tracee_p = process_tab[i]; - + Process* tracee_p = erts_pix2proc(i); if (! tracee_p) continue; if (tracer != NIL) { @@ -730,6 +786,7 @@ Eterm trace_3(BIF_ALIST_3) erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); } #endif + erts_release_code_write_permission(); BIF_RET(make_small(matches)); @@ -745,6 +802,7 @@ Eterm trace_3(BIF_ALIST_3) erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); } #endif + erts_release_code_write_permission(); BIF_ERROR(p, BADARG); } @@ -772,8 +830,7 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer) return 1; } else if(is_internal_pid(tracee_port->tracer_proc)) { - Process *tracer_p = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN, - tracee_port->tracer_proc, 0); + Process *tracer_p = erts_proc_lookup(tracee_port->tracer_proc); if (!tracer_p) { /* Current trace process now invalid * - discard it and approve the new. */ @@ -813,8 +870,7 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer) return 1; } else if(is_internal_pid(tracee_p->tracer_proc)) { - Process *tracer_p = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN, - tracee_p->tracer_proc, 0); + Process *tracer_p = erts_proc_lookup(tracee_p->tracer_proc); if (!tracer_p) { /* Current trace process now invalid * - discard it and approve the new. */ @@ -841,6 +897,11 @@ Eterm trace_info_2(BIF_ALIST_2) Eterm What = BIF_ARG_1; Eterm Key = BIF_ARG_2; Eterm res; + + if (!erts_try_seize_code_write_permission(p)) { + ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, What, Key); + } + if (What == am_on_load) { res = trace_info_on_load(p, Key); } else if (is_atom(What) || is_pid(What)) { @@ -848,8 +909,10 @@ Eterm trace_info_2(BIF_ALIST_2) } else if (is_tuple(What)) { res = trace_info_func(p, What, Key); } else { + erts_release_code_write_permission(); BIF_ERROR(p, BADARG); } + erts_release_code_write_permission(); BIF_RET(res); } @@ -876,7 +939,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) } if (is_internal_pid(tracer)) { - if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, tracer, 0)) { + if (!erts_proc_lookup(tracer)) { reset_tracer: tracee->trace_flags &= ~TRACEE_FLAGS; trace_flags = tracee->trace_flags; @@ -977,64 +1040,54 @@ static int function_is_traced(Process *p, Binary **ms, /* out */ Binary **ms_meta, /* out */ Eterm *tracer_pid_meta, /* out */ - Sint *count, /* out */ + Uint *count, /* out */ Eterm *call_time) /* out */ { Export e; Export* ep; - int i; - BeamInstr *code; + BeamInstr* pc; /* First look for an export entry */ e.code[0] = mfa[0]; e.code[1] = mfa[1]; e.code[2] = mfa[2]; if ((ep = export_get(&e)) != NULL) { - if (ep->address == ep->code+3 && - ep->code[3] != (BeamInstr) em_call_error_handler) { - if (ep->code[3] == (BeamInstr) em_call_traced_function) { - *ms = ep->match_prog_set; + pc = ep->code+3; + if (ep->addressv[erts_active_code_ix()] == pc && + *pc != (BeamInstr) em_call_error_handler) { + + int r = 0; + + ASSERT(*pc == (BeamInstr) em_apply_bif || + *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)); + + if (erts_is_trace_break(pc, ms, 0)) { return FUNC_TRACE_GLOBAL_TRACE; } - if (ep->code[3] == (BeamInstr) em_apply_bif) { - for (i = 0; i < BIF_SIZE; ++i) { - if (bif_export[i] == ep) { - int r = 0; - - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) { - *ms = ep->match_prog_set; - return FUNC_TRACE_GLOBAL_TRACE; - } else { - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) { - r |= FUNC_TRACE_LOCAL_TRACE; - *ms = ep->match_prog_set; - } - if (erts_is_mtrace_break(ep->code+3, ms_meta, - tracer_pid_meta)) { - r |= FUNC_TRACE_META_TRACE; - } - if (erts_is_time_break(p, ep->code+3, call_time)) { - r |= FUNC_TRACE_TIME_TRACE; - } - } - return r ? r : FUNC_TRACE_UNTRACED; - } - } - erl_exit(1,"Impossible ghost bif encountered in trace_info."); + + if (erts_is_trace_break(pc, ms, 1)) { + r |= FUNC_TRACE_LOCAL_TRACE; + } + if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) { + r |= FUNC_TRACE_META_TRACE; } + if (erts_is_time_break(p, pc, call_time)) { + r |= FUNC_TRACE_TIME_TRACE; + } + return r ? r : FUNC_TRACE_UNTRACED; } } /* OK, now look for breakpoint tracing */ - if ((code = erts_find_local_func(mfa)) != NULL) { + if ((pc = erts_find_local_func(mfa)) != NULL) { int r = - (erts_is_trace_break(code, ms, NULL) + (erts_is_trace_break(pc, ms, 1) ? FUNC_TRACE_LOCAL_TRACE : 0) - | (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta) + | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta) ? FUNC_TRACE_META_TRACE : 0) - | (erts_is_count_break(code, count) + | (erts_is_count_break(pc, count) ? FUNC_TRACE_COUNT_TRACE : 0) - | (erts_is_time_break(p, code, call_time) + | (erts_is_time_break(p, pc, call_time) ? FUNC_TRACE_TIME_TRACE : 0); return r ? r : FUNC_TRACE_UNTRACED; @@ -1049,7 +1102,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key) Eterm* hp; DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */ Binary *ms = NULL, *ms_meta = NULL; - Sint count = 0; + Uint count = 0; Eterm traced = am_false; Eterm match_spec = am_false; Eterm retval = am_false; @@ -1137,9 +1190,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key) break; case am_call_count: if (r & FUNC_TRACE_COUNT_TRACE) { - retval = count < 0 ? - erts_make_integer(-count-1, p) : - erts_make_integer(count, p); + retval = erts_make_integer(count, p); } break; case am_call_time: @@ -1328,38 +1379,53 @@ trace_info_on_load(Process* p, Eterm key) #undef FUNC_TRACE_LOCAL_TRACE int -erts_set_trace_pattern(Eterm* mfa, int specified, +erts_set_trace_pattern(Process*p, Eterm* mfa, int specified, Binary* match_prog_set, Binary *meta_match_prog_set, int on, struct trace_pattern_flags flags, - Eterm meta_tracer_pid) + Eterm meta_tracer_pid, int is_blocking) { + const ErtsCodeIndex code_ix = erts_active_code_ix(); int matches = 0; int i; + int n; + BpFunction* fp; /* * First work on normal functions (not real BIFs). */ - - for (i = 0; i < export_list_size(); i++) { - Export* ep = export_list(i); - int j; - - if (ExportIsBuiltIn(ep)) { - continue; - } - - for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) { - /* Empty loop body */ - } - if (j == specified) { - if (on) { - if (! flags.breakpoint) - matches += setup_func_trace(ep, match_prog_set); - else - reset_func_trace(ep); - } else if (! flags.breakpoint) { - matches += reset_func_trace(ep); + erts_bp_match_export(&finish_bp.e, mfa, specified); + fp = finish_bp.e.matching; + n = finish_bp.e.matched; + + for (i = 0; i < n; i++) { + BeamInstr* pc = fp[i].pc; + Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code)); + + if (on && !flags.breakpoint) { + /* Turn on global call tracing */ + if (ep->addressv[code_ix] != pc) { + fp[i].mod->curr.num_traced_exports++; +#ifdef DEBUG + pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI); +#endif + pc[0] = (BeamInstr) BeamOp(op_jump_f); + pc[1] = (BeamInstr) ep->addressv[code_ix]; + } + erts_set_call_trace_bif(pc, match_prog_set, 0); + if (ep->addressv[code_ix] != pc) { + pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint); + } + } else if (!on && flags.breakpoint) { + /* Turn off breakpoint tracing -- nothing to do here. */ + } else { + /* + * Turn off global tracing, either explicitly or implicitly + * before turning on breakpoint tracing. + */ + erts_clear_call_trace_bif(pc, 0); + if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { + pc[0] = (BeamInstr) BeamOp(op_jump_f); } } } @@ -1384,26 +1450,15 @@ erts_set_trace_pattern(Eterm* mfa, int specified, /* Empty loop body */ } if (j == specified) { + BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3; + if (! flags.breakpoint) { /* Export entry call trace */ if (on) { - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) { - ASSERT(ExportIsBuiltIn(bif_export[i])); - erts_clear_mtrace_bif - ((BeamInstr *)bif_export[i]->code + 3); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META; - } - set_trace_bif(i, match_prog_set); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL; - erts_bif_trace_flags[i] |= BIF_TRACE_AS_GLOBAL; - setup_bif_trace(i); + erts_clear_call_trace_bif(pc, 1); + erts_clear_mtrace_bif(pc); + erts_set_call_trace_bif(pc, match_prog_set, 0); } else { /* off */ - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) { - clear_trace_bif(i); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL; - } - if (! erts_bif_trace_flags[i]) { - reset_bif_trace(i); - } + erts_clear_call_trace_bif(pc, 0); } matches++; } else { /* Breakpoint call trace */ @@ -1411,52 +1466,33 @@ erts_set_trace_pattern(Eterm* mfa, int specified, if (on) { if (flags.local) { - set_trace_bif(i, match_prog_set); - erts_bif_trace_flags[i] |= BIF_TRACE_AS_LOCAL; - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL; + erts_clear_call_trace_bif(pc, 0); + erts_set_call_trace_bif(pc, match_prog_set, 1); m = 1; } if (flags.meta) { - erts_set_mtrace_bif - ((BeamInstr *)bif_export[i]->code + 3, - meta_match_prog_set, meta_tracer_pid); - erts_bif_trace_flags[i] |= BIF_TRACE_AS_META; - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL; + erts_set_mtrace_bif(pc, meta_match_prog_set, + meta_tracer_pid); m = 1; } if (flags.call_time) { - erts_set_time_trace_bif(bif_export[i]->code + 3, on); + erts_set_time_trace_bif(pc, on); /* I don't want to remove any other tracers */ - erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME; m = 1; } - if (erts_bif_trace_flags[i]) { - setup_bif_trace(i); - } } else { /* off */ if (flags.local) { - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) { - clear_trace_bif(i); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL; - } + erts_clear_call_trace_bif(pc, 1); m = 1; } if (flags.meta) { - if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) { - erts_clear_mtrace_bif - ((BeamInstr *)bif_export[i]->code + 3); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META; - } + erts_clear_mtrace_bif(pc); m = 1; } if (flags.call_time) { - erts_clear_time_trace_bif(bif_export[i]->code + 3); - erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME; + erts_clear_time_trace_bif(pc); m = 1; } - if (! erts_bif_trace_flags[i]) { - reset_bif_trace(i); - } } matches += m; } @@ -1466,176 +1502,241 @@ erts_set_trace_pattern(Eterm* mfa, int specified, /* ** So, now for breakpoint tracing */ + erts_bp_match_functions(&finish_bp.f, mfa, specified); if (on) { if (! flags.breakpoint) { - erts_clear_trace_break(mfa, specified); - erts_clear_mtrace_break(mfa, specified); - erts_clear_count_break(mfa, specified); - erts_clear_time_break(mfa, specified); + erts_clear_all_breaks(&finish_bp.f); } else { - int m = 0; if (flags.local) { - m = erts_set_trace_break(mfa, specified, match_prog_set, - am_true); + erts_set_trace_break(&finish_bp.f, match_prog_set); } if (flags.meta) { - m = erts_set_mtrace_break(mfa, specified, meta_match_prog_set, - meta_tracer_pid); + erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set, + meta_tracer_pid); } if (flags.call_count) { - m = erts_set_count_break(mfa, specified, on); + erts_set_count_break(&finish_bp.f, on); } if (flags.call_time) { - m = erts_set_time_break(mfa, specified, on); + erts_set_time_break(&finish_bp.f, on); } - /* All assignments to 'm' above should give the same value, - * so just use the last */ - matches += m; } } else { - int m = 0; if (flags.local) { - m = erts_clear_trace_break(mfa, specified); + erts_clear_trace_break(&finish_bp.f); } if (flags.meta) { - m = erts_clear_mtrace_break(mfa, specified); + erts_clear_mtrace_break(&finish_bp.f); } if (flags.call_count) { - m = erts_clear_count_break(mfa, specified); + erts_clear_count_break(&finish_bp.f); } if (flags.call_time) { - m = erts_clear_time_break(mfa, specified); + erts_clear_time_break(&finish_bp.f); } - /* All assignments to 'm' above should give the same value, - * so just use the last */ - matches += m; } + finish_bp.current = 0; + finish_bp.install = on; + finish_bp.local = flags.breakpoint; + +#ifdef ERTS_SMP + if (is_blocking) { + ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); +#endif + while (erts_finish_breakpointing()) { + /* Empty loop body */ + } +#ifdef ERTS_SMP + finish_bp.current = -1; + } +#endif + + if (flags.breakpoint) { + matches += finish_bp.f.matched; + } else { + matches += finish_bp.e.matched; + } return matches; } -/* - * Setup function tracing for the given exported function. - * - * Return Value: 1 if entry refers to a BIF or loaded function, - * 0 if the entry refers to a function not loaded. - */ - -static int -setup_func_trace(Export* ep, void* match_prog) +int +erts_finish_breakpointing(void) { - if (ep->address == ep->code+3) { - if (ep->code[3] == (BeamInstr) em_call_error_handler) { - return 0; - } else if (ep->code[3] == (BeamInstr) em_call_traced_function) { - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = match_prog; - MatchSetRef(ep->match_prog_set); - return 1; - } else { - /* - * We ignore apply/3 and anything else. - */ - return 0; - } - } - + ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + /* - * Currently no trace support for native code. + * Memory barriers will be issued for all processes *before* + * each of the stages below. (Unless the other schedulers + * are blocked, in which case memory barriers will be issued + * when they are awaken.) */ - if (erts_is_native_break(ep->address)) { + switch (finish_bp.current++) { + case 0: + /* + * At this point, in all functions that are to be breakpointed, + * a pointer to a GenericBp struct has already been added, + * + * Insert the new breakpoints (if any) into the + * code. Different schedulers may see breakpoint instruction + * at different times, but it does not matter since the newly + * added breakpoints are disabled. + */ + if (finish_bp.install) { + if (finish_bp.local) { + erts_install_breakpoints(&finish_bp.f); + } else { + install_exp_breakpoints(&finish_bp.e); + } + } + setup_bif_trace(); + return 1; + case 1: + /* + * Switch index for the breakpoint data, activating the staged + * data. (Depending on the changes in the breakpoint data, + * that could either activate breakpoints or disable + * breakpoints.) + */ + erts_commit_staged_bp(); + return 1; + case 2: + /* + * Remove breakpoints instructions for disabled breakpoints + * (if any). + */ + if (finish_bp.install) { + if (finish_bp.local) { + uninstall_exp_breakpoints(&finish_bp.e); + } else { + erts_uninstall_breakpoints(&finish_bp.f); + } + } else { + if (finish_bp.local) { + erts_uninstall_breakpoints(&finish_bp.f); + } else { + uninstall_exp_breakpoints(&finish_bp.e); + } + } + reset_bif_trace(); + return 1; + case 3: + /* + * Now all breakpoints have either been inserted or removed. + * For all updated breakpoints, copy the active breakpoint + * data to the staged breakpoint data to make them equal + * (simplifying for the next time breakpoints are to be + * updated). If any breakpoints have been totally disabled, + * deallocate the GenericBp structs for them. + */ + erts_consolidate_bif_bp_data(); + clean_export_entries(&finish_bp.e); + erts_consolidate_bp_data(&finish_bp.e, 0); + erts_consolidate_bp_data(&finish_bp.f, 1); + erts_bp_free_matched_functions(&finish_bp.e); + erts_bp_free_matched_functions(&finish_bp.f); return 0; + default: + ASSERT(0); } - - ep->code[3] = (BeamInstr) em_call_traced_function; - ep->code[4] = (BeamInstr) ep->address; - ep->address = ep->code+3; - ep->match_prog_set = match_prog; - MatchSetRef(ep->match_prog_set); - return 1; + return 0; } -static void setup_bif_trace(int bif_index) { - Export *ep = bif_export[bif_index]; - - ASSERT(ExportIsBuiltIn(ep)); - ASSERT(ep->code[4]); - ep->code[4] = (BeamInstr) bif_table[bif_index].traced; -} +static void +install_exp_breakpoints(BpFunctions* f) +{ + const ErtsCodeIndex code_ix = erts_active_code_ix(); + BpFunction* fp = f->matching; + Uint ne = f->matched; + Uint i; + Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); -static void set_trace_bif(int bif_index, void* match_prog) { - Export *ep = bif_export[bif_index]; - -#ifdef HARDDEBUG - erts_fprintf(stderr, "set_trace_bif: %T:%T/%bpu\n", - ep->code[0], ep->code[1], ep->code[2]); -#endif - ASSERT(ExportIsBuiltIn(ep)); - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = match_prog; - MatchSetRef(ep->match_prog_set); -} + for (i = 0; i < ne; i++) { + BeamInstr* pc = fp[i].pc; + Export* ep = (Export *) (((char *)pc)-offset); -/* - * Reset function tracing for the given exported function. - * - * Return Value: 1 if entry refers to a BIF or loaded function, - * 0 if the entry refers to a function not loaded. - */ + ep->addressv[code_ix] = pc; + } +} -static int -reset_func_trace(Export* ep) +static void +uninstall_exp_breakpoints(BpFunctions* f) { - if (ep->address == ep->code+3) { - if (ep->code[3] == (BeamInstr) em_call_error_handler) { - return 0; - } else if (ep->code[3] == (BeamInstr) em_call_traced_function) { - ep->address = (Uint *) ep->code[4]; - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = NULL; - return 1; - } else { - /* - * We ignore apply/3 and anything else. - */ - return 0; + const ErtsCodeIndex code_ix = erts_active_code_ix(); + BpFunction* fp = f->matching; + Uint ne = f->matched; + Uint i; + Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); + + for (i = 0; i < ne; i++) { + BeamInstr* pc = fp[i].pc; + Export* ep = (Export *) (((char *)pc)-offset); + + if (ep->addressv[code_ix] != pc) { + continue; } + ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f)); + ep->addressv[code_ix] = (BeamInstr *) ep->code[4]; } - - /* - * Currently no trace support for native code. - */ - if (erts_is_native_break(ep->address)) { - return 0; - } - - /* - * Nothing to do, but the export entry matches. - */ +} - return 1; +static void +clean_export_entries(BpFunctions* f) +{ + const ErtsCodeIndex code_ix = erts_active_code_ix(); + BpFunction* fp = f->matching; + Uint ne = f->matched; + Uint i; + Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr); + + for (i = 0; i < ne; i++) { + BeamInstr* pc = fp[i].pc; + Export* ep = (Export *) (((char *)pc)-offset); + + if (ep->addressv[code_ix] == pc) { + continue; + } + if (*pc == (BeamInstr) BeamOp(op_jump_f)) { + ep->code[3] = (BeamInstr) 0; + ep->code[4] = (BeamInstr) 0; + } + } } -static void reset_bif_trace(int bif_index) { - Export *ep = bif_export[bif_index]; - - ASSERT(ExportIsBuiltIn(ep)); - ASSERT(ep->code[4]); - ASSERT(! ep->match_prog_set); - ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL)); - ep->code[4] = (BeamInstr) bif_table[bif_index].f; +static void +setup_bif_trace(void) +{ + int i; + + for (i = 0; i < BIF_SIZE; ++i) { + Export *ep = bif_export[i]; + GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1]; + if (g) { + if (ExportIsBuiltIn(ep)) { + ASSERT(ep->code[4]); + ep->code[4] = (BeamInstr) bif_table[i].traced; + } + } + } } -static void clear_trace_bif(int bif_index) { - Export *ep = bif_export[bif_index]; - -#ifdef HARDDEBUG - erts_fprintf(stderr, "clear_trace_bif: %T:%T/%bpu\n", - ep->code[0], ep->code[1], ep->code[2]); -#endif - ASSERT(ExportIsBuiltIn(ep)); - MatchSetUnref(ep->match_prog_set); - ep->match_prog_set = NULL; +static void +reset_bif_trace(void) +{ + int i; + ErtsBpIndex active = erts_active_bp_ix(); + + for (i = 0; i < BIF_SIZE; ++i) { + Export *ep = bif_export[i]; + BeamInstr* pc = ep->code+3; + GenericBp* g = (GenericBp *) pc[-4]; + if (g && g->data[active].flags == 0) { + if (ExportIsBuiltIn(ep)) { + ASSERT(ep->code[4]); + ep->code[4] = (BeamInstr) bif_table[i].f; + } + } + } } /* @@ -2142,9 +2243,9 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) /* Check if valid process, no locks are taken */ if (is_internal_pid(profiler)) { - if (internal_pid_index(profiler) >= erts_max_processes) goto error; - profiler_p = process_tab[internal_pid_index(profiler)]; - if (INVALID_PID(profiler_p, profiler)) goto error; + profiler_p = erts_proc_lookup(profiler); + if (!profiler_p) + goto error; } else if (is_internal_port(profiler)) { if (internal_port_index(profiler) >= erts_max_ports) goto error; profiler_port = &erts_port[internal_port_index(profiler)]; diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index 6bc227eeda..3753b618e1 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -1247,6 +1247,12 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term, */ erts_bin_offset = 8*sb->size + sb->bitsize; + if (unit > 1) { + if ((unit == 8 && (erts_bin_offset & 7) != 0) || + (erts_bin_offset % unit) != 0) { + goto badarg; + } + } used_size_in_bits = erts_bin_offset + build_size_in_bits; sb->is_writable = 0; /* Make sure that no one else can write. */ pb->size = NBYTES(used_size_in_bits); @@ -1316,6 +1322,12 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term, */ ERTS_GET_BINARY_BYTES(bin, src_bytes, bitoffs, bitsize); erts_bin_offset = 8*binary_size(bin) + bitsize; + if (unit > 1) { + if ((unit == 8 && (erts_bin_offset & 7) != 0) || + (erts_bin_offset % unit) != 0) { + goto badarg; + } + } used_size_in_bits = erts_bin_offset + build_size_in_bits; used_size_in_bytes = NBYTES(used_size_in_bits); bin_size = 2*used_size_in_bytes; @@ -1363,12 +1375,6 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term, /* * Now copy the data into the binary. */ - if (unit > 1) { - if ((unit == 8 && (erts_bin_offset & 7) != 0) || - (erts_bin_offset % unit) != 0) { - return THE_NON_VALUE; - } - } copy_binary_to_buffer(erts_current_bin, 0, src_bytes, bitoffs, erts_bin_offset); return make_binary(sb); diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c index fe3693d0ca..3f90f34736 100644 --- a/erts/emulator/beam/erl_cpu_topology.c +++ b/erts/emulator/beam/erl_cpu_topology.c @@ -486,7 +486,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp) erts_thr_set_main_status(1, (int) esdp->no); /* Make sure we check if we should bind to a cpu or not... */ - esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND; + (void) ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND); } #endif @@ -498,9 +498,6 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) erts_cpu_groups_map_t *cgm; erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_callback_call_t *cgcc; -#ifdef ERTS_SMP - esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND; -#endif erts_smp_runq_unlock(esdp->run_queue); erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); cpu_id = scheduler2cpu_map[esdp->no].bind_id; diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 51bdf53823..1ba1048afa 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -224,8 +224,9 @@ Export ets_select_continue_exp; static Export ets_delete_continue_exp; static void -free_dbtable(DbTable* tb) +free_dbtable(void *vtb) { + DbTable *tb = (DbTable *) vtb; #ifdef HARDDEBUG if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n", @@ -250,21 +251,8 @@ free_dbtable(DbTable* tb) #endif ASSERT(is_immed(tb->common.heir_data)); erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable)); - ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable)); - ERTS_SMP_MEMORY_BARRIER; } -#ifdef ERTS_SMP -static void -chk_free_dbtable(void *vtb) -{ - DbTable * tb = (DbTable *) vtb; - ERTS_THR_MEMORY_BARRIER; - if (erts_refc_dectest(&tb->common.ref, 0) == 0) - free_dbtable(tb); -} -#endif - static void schedule_free_dbtable(DbTable* tb) { /* @@ -275,15 +263,10 @@ static void schedule_free_dbtable(DbTable* tb) * need to unlock the table lock after this * function has returned). */ -#ifdef ERTS_SMP - int scheds = erts_get_max_no_executing_schedulers(); - ASSERT(scheds >= 1); ASSERT(erts_refc_read(&tb->common.ref, 0) == 0); - erts_refc_init(&tb->common.ref, scheds); - erts_schedule_multi_misc_aux_work(0, scheds, chk_free_dbtable, tb); -#else - free_dbtable(tb); -#endif + erts_schedule_thr_prgr_later_op(free_dbtable, + (void *) tb, + &tb->release.data); } static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock, @@ -542,10 +525,6 @@ static int remove_named_tab(DbTable *tb, int have_lock) &rwlock); #ifdef ERTS_SMP if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) { - /* - * We keep our increased refc over this op in order to - * prevent the table from disapearing. - */ db_unlock(tb, LCK_WRITE); erts_smp_rwmtx_rwlock(rwlock); db_lock(tb, LCK_WRITE); @@ -1443,7 +1422,6 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0); tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); - ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable)); erts_smp_atomic_init_nob(&tb->common.memory_size, erts_smp_atomic_read_nob(&init_tb.common.memory_size)); } @@ -1481,7 +1459,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) "** Too many db tables **\n"); free_heir_data(tb); tb->common.meth->db_free_table(tb); - free_dbtable(tb); + free_dbtable((void *) tb); BIF_ERROR(BIF_P, SYSTEM_LIMIT); } @@ -2816,7 +2794,6 @@ void init_db(void) { DbTable init_tb; int i; - extern BeamInstr* em_apply_bif; Eterm *hp; unsigned bits; size_t size; @@ -2850,7 +2827,7 @@ void init_db(void) else db_max_tabs = user_requested_db_max_tabs; - bits = erts_fit_in_bits(db_max_tabs-1); + bits = erts_fit_in_bits_int32(db_max_tabs-1); if (bits > SMALL_BITS) { erl_exit(1,"Max limit for ets tabled too high %u (max %u).", db_max_tabs, ((Uint)1)<<SMALL_BITS); @@ -2888,7 +2865,6 @@ void init_db(void) meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); - ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable)); erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size, erts_smp_atomic_read_nob(&init_tb.common.memory_size)); @@ -2920,7 +2896,6 @@ void init_db(void) meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); - ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable)); erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size, erts_smp_atomic_read_nob(&init_tb.common.memory_size)); @@ -2949,49 +2924,24 @@ void init_db(void) } /* Non visual BIF to trap to. */ - memset(&ets_select_delete_continue_exp, 0, sizeof(Export)); - ets_select_delete_continue_exp.address = - &ets_select_delete_continue_exp.code[3]; - ets_select_delete_continue_exp.code[0] = am_ets; - ets_select_delete_continue_exp.code[1] = am_atom_put("delete_trap",11); - ets_select_delete_continue_exp.code[2] = 1; - ets_select_delete_continue_exp.code[3] = - (BeamInstr) em_apply_bif; - ets_select_delete_continue_exp.code[4] = - (BeamInstr) &ets_select_delete_1; + erts_init_trap_export(&ets_select_delete_continue_exp, + am_ets, am_atom_put("delete_trap",11), 1, + &ets_select_delete_1); /* Non visual BIF to trap to. */ - memset(&ets_select_count_continue_exp, 0, sizeof(Export)); - ets_select_count_continue_exp.address = - &ets_select_count_continue_exp.code[3]; - ets_select_count_continue_exp.code[0] = am_ets; - ets_select_count_continue_exp.code[1] = am_atom_put("count_trap",11); - ets_select_count_continue_exp.code[2] = 1; - ets_select_count_continue_exp.code[3] = - (BeamInstr) em_apply_bif; - ets_select_count_continue_exp.code[4] = - (BeamInstr) &ets_select_count_1; + erts_init_trap_export(&ets_select_count_continue_exp, + am_ets, am_atom_put("count_trap",11), 1, + &ets_select_count_1); /* Non visual BIF to trap to. */ - memset(&ets_select_continue_exp, 0, sizeof(Export)); - ets_select_continue_exp.address = - &ets_select_continue_exp.code[3]; - ets_select_continue_exp.code[0] = am_ets; - ets_select_continue_exp.code[1] = am_atom_put("select_trap",11); - ets_select_continue_exp.code[2] = 1; - ets_select_continue_exp.code[3] = - (BeamInstr) em_apply_bif; - ets_select_continue_exp.code[4] = - (BeamInstr) &ets_select_trap_1; + erts_init_trap_export(&ets_select_continue_exp, + am_ets, am_atom_put("select_trap",11), 1, + &ets_select_trap_1); /* Non visual BIF to trap to. */ - memset(&ets_delete_continue_exp, 0, sizeof(Export)); - ets_delete_continue_exp.address = &ets_delete_continue_exp.code[3]; - ets_delete_continue_exp.code[0] = am_ets; - ets_delete_continue_exp.code[1] = am_atom_put("delete_trap",11); - ets_delete_continue_exp.code[2] = 1; - ets_delete_continue_exp.code[3] = (BeamInstr) em_apply_bif; - ets_delete_continue_exp.code[4] = (BeamInstr) &ets_delete_trap; + erts_init_trap_export(&ets_delete_continue_exp, + am_ets, am_atom_put("delete_trap",11), 1, + &ets_delete_trap); hp = ms_delete_all_buff; ms_delete_all = CONS(hp, am_true, NIL); @@ -3124,7 +3074,7 @@ retry: if (to_proc == NULL) { return 0; /* heir not alive, table still mine */ } - if (erts_cmp_timeval(&to_proc->started, &tb->common.heir_started) != 0) { + if (to_proc->started_interval != tb->common.heir_started_interval) { erts_smp_proc_unlock(to_proc, to_locks); return 0; /* heir dead and pid reused, table still mine */ } @@ -3520,14 +3470,14 @@ static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data) return; } if (heir == me->id) { - tb->common.heir_started = me->started; + erts_ensure_later_proc_interval(me->started_interval); + tb->common.heir_started_interval = me->started_interval; } else { - Process* heir_proc= erts_pid2proc_opt(me, ERTS_PROC_LOCK_MAIN, heir, - 0, ERTS_P2P_FLG_SMP_INC_REFC); + Process* heir_proc= erts_proc_lookup(heir); if (heir_proc != NULL) { - tb->common.heir_started = heir_proc->started; - erts_smp_proc_dec_refc(heir_proc); + erts_ensure_later_proc_interval(heir_proc->started_interval); + tb->common.heir_started_interval = heir_proc->started_interval; } else { tb->common.heir = am_none; } diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index 2e5deaf338..e9a661efbc 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -27,6 +27,10 @@ #define __DB_H__ #include "sys.h" +#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY +#define ERL_THR_PROGRESS_TSD_TYPE_ONLY +#include "erl_thr_progress.h" +#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY #include "bif.h" #include "erl_db_util.h" /* Flags */ @@ -36,6 +40,11 @@ Uint erts_get_ets_misc_mem_size(void); +typedef struct { + DbTableCommon common; + ErtsThrPrgrLaterOp data; +} DbTableRelease; + /* * So, the structure for a database table, NB this is only * interesting in db.c. @@ -44,6 +53,7 @@ union db_table { DbTableCommon common; /* Any type of db table */ DbTableHash hash; /* Linear hash array specific data */ DbTableTree tree; /* AVL tree specific data */ + DbTableRelease release; /*TT*/ }; diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index 312050b931..faa7f31d99 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -452,16 +452,8 @@ DbTableMethod db_tree = void db_initialize_tree(void) { - memset(&ets_select_reverse_exp, 0, sizeof(Export)); - ets_select_reverse_exp.address = - &ets_select_reverse_exp.code[3]; - ets_select_reverse_exp.code[0] = am_ets; - ets_select_reverse_exp.code[1] = am_reverse; - ets_select_reverse_exp.code[2] = 3; - ets_select_reverse_exp.code[3] = - (BeamInstr) em_apply_bif; - ets_select_reverse_exp.code[4] = - (BeamInstr) &ets_select_reverse; + erts_init_trap_export(&ets_select_reverse_exp, am_ets, am_reverse, 3, + &ets_select_reverse); return; }; diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index 42907e2e84..0c9ca83ce4 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -2480,7 +2480,7 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei) vnum = tmp->variable; } if (vnum >= 0) - sprintf(buff,tmp->error_string, vnum); + erts_snprintf(buff,sizeof(buff)+20,tmp->error_string, vnum); else strcpy(buff,tmp->error_string); sl = strlen(buff); @@ -4485,7 +4485,9 @@ static DMCRet dmc_fun(DMCContext *context, if (context->err_info != NULL) { /* Ugly, should define a better RETURN_TERM_ERROR interface... */ char buff[100]; - sprintf(buff, "Function %%T/%d does_not_exist.", (int)a - 1); + erts_snprintf(buff, sizeof(buff), + "Function %%T/%d does_not_exist.", + (int)a - 1); RETURN_TERM_ERROR(buff, p[1], context, *constant); } else { return retFail; @@ -4500,7 +4502,7 @@ static DMCRet dmc_fun(DMCContext *context, if (context->err_info != NULL) { /* Ugly, should define a better RETURN_TERM_ERROR interface... */ char buff[100]; - sprintf(buff, + erts_snprintf(buff, sizeof(buff), "Function %%T/%d cannot be called in this context.", (int)a - 1); RETURN_TERM_ERROR(buff, p[1], context, *constant); @@ -4764,7 +4766,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info) for (j = 0; j < x && DMC_PEEK(heap,j) != n; ++j) ; ASSERT(j < x); - sprintf(buff+1,"%u", (unsigned) j); + erts_snprintf(buff+1, sizeof(buff) - 1, "%u", (unsigned) j); /* Yes, writing directly into terms, they ARE off heap */ *p = am_atom_put(buff, strlen(buff)); } diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index 6a96e174e1..dcecc4251a 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -209,7 +209,7 @@ typedef struct db_fixation { */ typedef struct db_table_common { - erts_refc_t ref; /* fixation counter and delete counter */ + erts_refc_t ref; /* fixation counter */ #ifdef ERTS_SMP erts_smp_rwmtx_t rwlock; /* rw lock on table */ erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */ @@ -219,7 +219,7 @@ typedef struct db_table_common { Eterm owner; /* Pid of the creator */ Eterm heir; /* Pid of the heir */ UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */ - SysTimeval heir_started; /* To further identify the heir */ + Uint64 heir_started_interval; /* To further identify the heir */ Eterm the_name; /* an atom */ Eterm id; /* atom | integer */ DbTableMethod* meth; /* table methods */ diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h index 1ae9a211d7..771ee46d2b 100644 --- a/erts/emulator/beam/erl_driver.h +++ b/erts/emulator/beam/erl_driver.h @@ -87,10 +87,7 @@ #include <stdlib.h> #include <string.h> /* ssize_t on Mac OS X */ -#if defined(VXWORKS) -# include <ioLib.h> -typedef struct iovec SysIOVec; -#elif defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_) +#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_) #ifndef STATIC_ERLANG_DRIVER /* Windows dynamic drivers, everything is different... */ #define ERL_DRIVER_TYPES_ONLY @@ -370,11 +367,7 @@ typedef struct erl_drv_entry { /* For windows dynamic drivers */ #ifndef ERL_DRIVER_TYPES_ONLY -#if defined(VXWORKS) -# define DRIVER_INIT(DRIVER_NAME) \ - ErlDrvEntry* DRIVER_NAME ## _init(void); \ - ErlDrvEntry* DRIVER_NAME ## _init(void) -#elif defined(__WIN32__) +#if defined(__WIN32__) # define DRIVER_INIT(DRIVER_NAME) \ __declspec(dllexport) ErlDrvEntry* driver_init(void); \ __declspec(dllexport) ErlDrvEntry* driver_init(void) diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h index 5c66a0bf73..b673ef6b3c 100644 --- a/erts/emulator/beam/erl_fun.h +++ b/erts/emulator/beam/erl_fun.h @@ -77,8 +77,6 @@ ErlFunEntry* erts_get_fun_entry(Eterm mod, int uniq, int index); ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index, byte* uniq, int index, int arity); -ErlFunEntry* erts_get_fun_entry2(Eterm mod, int old_uniq, int old_index, - byte* uniq, int index, int arity); void erts_erase_fun_entry(ErlFunEntry* fe); void erts_cleanup_funs(ErlFunThing* funp); diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 52a6e52e6c..5ae4b9254b 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -336,6 +336,19 @@ erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity) return result; } +static ERTS_INLINE void reset_active_writer(Process *p) +{ + struct erl_off_heap_header* ptr; + ptr = MSO(p).first; + while (ptr) { + if (ptr->thing_word == HEADER_PROC_BIN) { + ProcBin *pbp = (ProcBin*) ptr; + pbp->flags &= ~PB_ACTIVE_WRITER; + } + ptr = ptr->next; + } +} + /* * Garbage collect a process. * @@ -357,11 +370,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) trace_gc(p, am_gc_start); } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - p->gcstatus = p->status; - p->status = P_GARBING; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); - + erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); if (erts_system_monitor_long_gc != 0) { get_now(&ms1, &s1, &us1); } @@ -395,6 +404,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) DTRACE2(gc_minor_end, pidbuf, reclaimed_now); } } + reset_active_writer(p); /* * Finish. @@ -404,9 +414,8 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj) ErtsGcQuickSanityCheck(p); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - p->status = p->gcstatus; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, am_gc_end); } @@ -490,10 +499,7 @@ erts_garbage_collect_hibernate(Process* p) /* * Preliminaries. */ - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - p->gcstatus = p->status; - p->status = P_GARBING; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); ErtsGcQuickSanityCheck(p); ASSERT(p->mbuf_sz == 0); ASSERT(p->mbuf == 0); @@ -604,9 +610,7 @@ erts_garbage_collect_hibernate(Process* p) ErtsGcQuickSanityCheck(p); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - p->status = p->gcstatus; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); } @@ -630,10 +634,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, /* * Set GC state. */ - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - p->gcstatus = p->status; - p->status = P_GARBING; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); /* * We assume that the caller has already done a major collection @@ -775,9 +776,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, /* * Restore status. */ - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - p->status = p->gcstatus; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); } static int @@ -2181,7 +2180,6 @@ link_live_proc_bin(struct shrink_cand_data *shrink, if (pbp->flags & PB_ACTIVE_WRITER) { - pbp->flags &= ~PB_ACTIVE_WRITER; shrink->no_of_active++; } else { /* inactive */ diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 1eb3dba240..4b90e5394a 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -55,6 +55,61 @@ #endif /* + * The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands + * only. Do not remove even though they aren't used elsewhere in the emulator! + */ +#ifdef ERTS_SMP +const int etp_smp_compiled = 1; +#else +const int etp_smp_compiled = 0; +#endif +#ifdef USE_THREADS +const int etp_thread_compiled = 1; +#else +const int etp_thread_compiled = 0; +#endif +const char etp_erts_version[] = ERLANG_VERSION; +const char etp_otp_release[] = ERLANG_OTP_RELEASE; +const char etp_compile_date[] = ERLANG_COMPILE_DATE; +const char etp_arch[] = ERLANG_ARCHITECTURE; +#ifdef ERTS_ENABLE_KERNEL_POLL +const int etp_kernel_poll_support = 1; +#else +const int etp_kernel_poll_support = 0; +#endif +#if defined(ARCH_64) +const int etp_arch_bits = 64; +#elif defined(ARCH_32) +const int etp_arch_bits = 32; +#else +# error "Not 64-bit, nor 32-bit arch" +#endif +#if HALFWORD_HEAP +const int etp_halfword = 1; +#else +const int etp_halfword = 0; +#endif +#ifdef HIPE +const int etp_hipe = 1; +#else +const int etp_hipe = 0; +#endif +#ifdef DEBUG +const int etp_debug_compiled = 1; +#else +const int etp_debug_compiled = 0; +#endif +#ifdef ERTS_ENABLE_LOCK_COUNT +const int etp_lock_count = 1; +#else +const int etp_lock_count = 0; +#endif +#ifdef ERTS_ENABLE_LOCK_CHECK +const int etp_lock_check = 1; +#else +const int etp_lock_check = 0; +#endif +/* * Note about VxWorks: All variables must be initialized by executable code, * not by an initializer. Otherwise a new instance of the emulator will * inherit previous values. @@ -241,6 +296,7 @@ erl_init(int ncpu) erts_init_trace(); erts_init_binary(); erts_init_bits(); + erts_code_ix_init(); erts_init_fun_table(); init_atom_table(); init_export_table(); @@ -289,7 +345,8 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** Eterm env; start_mod = am_atom_put(modname, sys_strlen(modname)); - if (erts_find_function(start_mod, am_start, 2) == NULL) { + if (erts_find_function(start_mod, am_start, 2, + erts_active_code_ix()) == NULL) { erl_exit(5, "No function %s:start/2\n", modname); } @@ -388,7 +445,7 @@ load_preloaded(void) if ((code = sys_preload_begin(&preload_p[i])) == 0) erl_exit(1, "Failed to find preloaded code for module %s\n", name); - res = erts_load_module(NULL, 0, NIL, &module_name, code, length); + res = erts_preload_module(NULL, 0, NIL, &module_name, code, length); sys_preload_end(&preload_p[i]); if (res != NIL) erl_exit(1,"Failed loading preloaded module %s (%T)\n", @@ -555,6 +612,7 @@ early_init(int *argc, char **argv) /* erts_printf_eterm_func = erts_printf_term; erts_disable_tolerant_timeofday = 0; display_items = 200; + erts_proc.max = ERTS_DEFAULT_MAX_PROCESSES; erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE; erts_async_max_threads = 0; erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE; @@ -1097,7 +1155,7 @@ erl_start(int argc, char **argv) case 'P': /* set maximum number of processes */ Parg = get_arg(argv[i]+2, argv[i+1], &i); - erts_max_processes = atoi(Parg); + erts_proc.max = atoi(Parg); /* Check of result is delayed until later. This is because +R may be given after +P. */ break; @@ -1396,10 +1454,10 @@ erl_start(int argc, char **argv) } /* Delayed check of +P flag */ - if (erts_max_processes < ERTS_MIN_PROCESSES - || erts_max_processes > ERTS_MAX_PROCESSES + if (erts_proc.max < ERTS_MIN_PROCESSES + || erts_proc.max > ERTS_MAX_PROCESSES || (erts_use_r9_pids_ports - && erts_max_processes > ERTS_MAX_R9_PROCESSES)) { + && erts_proc.max > ERTS_MAX_R9_PROCESSES)) { erts_fprintf(stderr, "bad number of processes %s\n", Parg); erts_usage(); } @@ -1427,6 +1485,8 @@ erl_start(int argc, char **argv) erl_init(ncpu); load_preloaded(); + erts_end_staging_code_ix(); + erts_commit_staging_code_ix(); erts_initialized = 1; diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index b545ec07c0..314d2f6a9c 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -82,8 +82,8 @@ static erts_lc_lock_order_t erts_lock_order[] = { #ifdef ERTS_SMP { "bif_timers", NULL }, { "reg_tab", NULL }, - { "migration_info_update", NULL }, { "proc_main", "pid" }, + { "old_code", "address" }, #ifdef HIPE { "hipe_mfait_lock", NULL }, #endif @@ -93,6 +93,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "proc_msgq", "pid" }, { "dist_entry", "address" }, { "dist_entry_links", "address" }, + { "code_write_permission", NULL }, { "proc_status", "pid" }, { "proc_tab", NULL }, { "ports_snapshot", NULL }, @@ -124,6 +125,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "removed_fd_pre_alloc_lock", "address" }, { "state_prealloc", NULL }, { "schdlr_sspnd", NULL }, + { "migration_info_update", NULL }, { "run_queue", "address" }, { "cpu_info", NULL }, { "pollset", "address" }, @@ -443,7 +445,7 @@ print_lock2(char *prefix, Sint16 id, Wterm extra, Uint16 flags, char *suffix) "%s'%s:%p%s'%s%s", prefix, lname, - boxed_val(extra), + _unchecked_boxed_val(extra), lock_type(flags), rw_op_str(flags), suffix); diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h index b67f36fa06..df7b3758e1 100644 --- a/erts/emulator/beam/erl_lock_check.h +++ b/erts/emulator/beam/erl_lock_check.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2005-2011. All Rights Reserved. + * Copyright Ericsson AB 2005-2012. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -104,7 +104,7 @@ void erts_lc_unrequire_lock(erts_lc_lock_t *lck); #define ERTS_LC_ASSERT(A) \ - ((void) ((A) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A))) + ((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A))) #ifdef ERTS_SMP #define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A) #else diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 919567ab27..d5b7d01048 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -296,36 +296,6 @@ erts_msg_distext2heap(Process *pp, return THE_NON_VALUE; } -static ERTS_INLINE void -notify_new_message(Process *receiver) -{ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS - & erts_proc_lc_my_proc_locks(receiver)); - - switch (receiver->status) { - case P_GARBING: - switch (receiver->gcstatus) { - case P_SUSPENDED: - goto suspended; - case P_WAITING: - goto waiting; - default: - break; - } - break; - case P_SUSPENDED: - suspended: - receiver->rstatus = P_RUNABLE; - break; - case P_WAITING: - waiting: - erts_add_to_runq(receiver); - break; - default: - break; - } -} - void erts_queue_dist_message(Process *rcvr, ErtsProcLocks *rcvr_locks, @@ -339,7 +309,7 @@ erts_queue_dist_message(Process *rcvr, Sint tok_serial = 0; #endif #ifdef ERTS_SMP - ErtsProcLocks need_locks; + erts_aint_t state; #endif ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); @@ -347,20 +317,21 @@ erts_queue_dist_message(Process *rcvr, mp = message_alloc(); #ifdef ERTS_SMP - need_locks = ~(*rcvr_locks) & (ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - if (need_locks) { - *rcvr_locks |= need_locks; - if (erts_smp_proc_trylock(rcvr, need_locks) == EBUSY) { - if (need_locks == ERTS_PROC_LOCK_MSGQ) { + if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { + if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; + if (*rcvr_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS); - need_locks = (ERTS_PROC_LOCK_MSGQ - | ERTS_PROC_LOCK_STATUS); + need_locks |= ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(rcvr, need_locks); } } - if (rcvr->is_exiting || ERTS_PROC_PENDING_EXIT(rcvr)) { + state = erts_smp_atomic32_read_acqb(&rcvr->state); + if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { + if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) + erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ if (is_not_nil(token)) { ErlHeapFragment *heap_frag; @@ -376,6 +347,8 @@ erts_queue_dist_message(Process *rcvr, /* Ahh... need to decode it in order to trace it... */ ErlHeapFragment *mbuf; Eterm msg; + if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) + erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); message_free(mp); msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext); if (is_value(msg)) @@ -437,26 +410,33 @@ erts_queue_dist_message(Process *rcvr, mp->data.dist_ext = dist_ext; LINK_MESSAGE(rcvr, mp); - notify_new_message(rcvr); + if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) + erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + + erts_proc_notify_new_message(rcvr); } } /* Add a message last in message queue */ -void -erts_queue_message(Process* receiver, - ErtsProcLocks *receiver_locks, - ErlHeapFragment* bp, - Eterm message, - Eterm seq_trace_token +static Sint +queue_message(Process *c_p, + Process* receiver, + ErtsProcLocks *receiver_locks, + erts_aint32_t *receiver_state, + ErlHeapFragment* bp, + Eterm message, + Eterm seq_trace_token #ifdef USE_VM_PROBES , Eterm dt_utag #endif -) + ) { + Sint res; ErlMessage* mp; -#ifdef ERTS_SMP - ErtsProcLocks need_locks; -#else + int locked_msgq = 0; + erts_aint_t state; + +#ifndef ERTS_SMP ASSERT(bp != NULL || receiver->mbuf == NULL); #endif @@ -464,31 +444,45 @@ erts_queue_message(Process* receiver, mp = message_alloc(); + if (receiver_state) + state = *receiver_state; + else + state = erts_smp_atomic32_read_acqb(&receiver->state); + #ifdef ERTS_SMP - need_locks = ~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ - | ERTS_PROC_LOCK_STATUS); - if (need_locks) { - *receiver_locks |= need_locks; - if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) { - if (need_locks == ERTS_PROC_LOCK_MSGQ) { + + if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) + goto exiting; + + if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) { + if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; + if (*receiver_locks & ERTS_PROC_LOCK_STATUS) { erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS); - need_locks = (ERTS_PROC_LOCK_MSGQ - | ERTS_PROC_LOCK_STATUS); + need_locks |= ERTS_PROC_LOCK_STATUS; } erts_smp_proc_lock(receiver, need_locks); } + locked_msgq = 1; + state = erts_smp_atomic32_read_nob(&receiver->state); + if (receiver_state) + *receiver_state = state; } - if (receiver->is_exiting || ERTS_PROC_PENDING_EXIT(receiver)) { - /* Drop message if receiver is exiting or has a pending - * exit ... - */ +#endif + + if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { +#ifdef ERTS_SMP + exiting: +#endif + /* Drop message if receiver is exiting or has a pending exit... */ + if (locked_msgq) + erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); if (bp) free_message_buffer(bp); message_free(mp); - return; + return 0; } -#endif ERL_MESSAGE_TERM(mp) = message; ERL_MESSAGE_TOKEN(mp) = seq_trace_token; @@ -498,7 +492,10 @@ erts_queue_message(Process* receiver, mp->next = NULL; mp->data.heap_frag = bp; -#ifdef ERTS_SMP +#ifndef ERTS_SMP + res = receiver->msg.len; +#else + res = receiver->u.alive.msg_inq.len; if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place @@ -508,15 +505,15 @@ erts_queue_message(Process* receiver, * we don't need to include the 'in queue' in * the root set when garbage collecting. */ + res += receiver->msg.len; ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); } - else { + else +#endif + { LINK_MESSAGE(receiver, mp); } -#else - LINK_MESSAGE(receiver, mp); -#endif #ifdef USE_VM_PROBES if (DTRACE_ENABLED(message_queued)) { @@ -536,15 +533,43 @@ erts_queue_message(Process* receiver, tok_label, tok_lastcnt, tok_serial); } #endif - notify_new_message(receiver); - if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { + if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) trace_receive(receiver, message); - } + + if (locked_msgq) + erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + + erts_proc_notify_new_message(receiver); #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); #endif + return res; +} + +void +erts_queue_message(Process* receiver, + ErtsProcLocks *receiver_locks, + ErlHeapFragment* bp, + Eterm message, + Eterm seq_trace_token +#ifdef USE_VM_PROBES + , Eterm dt_utag +#endif + ) +{ + queue_message(NULL, + receiver, + receiver_locks, + NULL, + bp, + message, + seq_trace_token +#ifdef USE_VM_PROBES + , dt_utag +#endif + ); } void @@ -576,9 +601,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg) #endif #ifdef HARD_DEBUG - ProcBin *dbg_mso_start = off_heap->mso; - ErlFunThing *dbg_fun_start = off_heap->funs; - ExternalThing *dbg_external_start = off_heap->externals; + struct erl_off_heap_header* dbg_oh_start = off_heap->first; Eterm dbg_term, dbg_token; ErlHeapFragment *dbg_bp; Uint *dbg_hp, *dbg_thp_start; @@ -752,48 +775,16 @@ copy_done: int i, j; ErlHeapFragment* frag; { - ProcBin *mso = off_heap->mso; + struct erl_off_heap_header* dbg_oh = off_heap->first; i = j = 0; - while (mso != dbg_mso_start) { - mso = mso->next; + while (dbg_oh != dbg_oh_start) { + dbg_oh = dbg_oh->next; i++; } for (frag=bp; frag; frag=frag->next) { - mso = frag->off_heap.mso; - while (mso) { - mso = mso->next; - j++; - } - } - ASSERT(i == j); - } - { - ErlFunThing *fun = off_heap->funs; - i = j = 0; - while (fun != dbg_fun_start) { - fun = fun->next; - i++; - } - for (frag=bp; frag; frag=frag->next) { - fun = frag->off_heap.funs; - while (fun) { - fun = fun->next; - j++; - } - } - ASSERT(i == j); - } - { - ExternalThing *external = off_heap->externals; - i = j = 0; - while (external != dbg_external_start) { - external = external->next; - i++; - } - for (frag=bp; frag; frag=frag->next) { - external = frag->off_heap.externals; - while (external) { - external = external->next; + dbg_oh = frag->off_heap.first; + while (dbg_oh) { + dbg_oh = dbg_oh->next; j++; } } @@ -878,7 +869,7 @@ erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *ms * Send a local message when sender & receiver processes are known. */ -void +Sint erts_send_message(Process* sender, Process* receiver, ErtsProcLocks *receiver_locks, @@ -888,6 +879,7 @@ erts_send_message(Process* sender, Uint msize; ErlHeapFragment* bp = NULL; Eterm token = NIL; + Sint res = 0; #ifdef USE_VM_PROBES DTRACE_CHARBUF(sender_name, 64); DTRACE_CHARBUF(receiver_name, 64); @@ -974,15 +966,17 @@ erts_send_message(Process* sender, msize, tok_label, tok_lastcnt, tok_serial); } #endif - erts_queue_message(receiver, - receiver_locks, - bp, - message, - token + res = queue_message(NULL, + receiver, + receiver_locks, + NULL, + bp, + message, + token #ifdef USE_VM_PROBES - , utag + , utag #endif - ); + ); BM_SWAP_TIMER(send,system); } else if (sender == receiver) { /* Drop message if receiver has a pending exit ... */ @@ -1026,31 +1020,45 @@ erts_send_message(Process* sender, ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, mp); + res = receiver->msg.len; + if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } } BM_SWAP_TIMER(send,system); - return; } else { #ifdef ERTS_SMP ErlOffHeap *ohp; Eterm *hp; + erts_aint32_t state; + BM_SWAP_TIMER(send,size); msize = size_object(message); BM_SWAP_TIMER(size,send); - hp = erts_alloc_message_heap(msize,&bp,&ohp,receiver,receiver_locks); + hp = erts_alloc_message_heap_state(msize, + &bp, + &ohp, + receiver, + receiver_locks, + &state); BM_SWAP_TIMER(send,copy); message = copy_struct(message, msize, &hp, ohp); BM_MESSAGE_COPIED(msz); BM_SWAP_TIMER(copy,send); DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); - erts_queue_message(receiver, receiver_locks, bp, message, token + res = queue_message(sender, + receiver, + receiver_locks, + &state, + bp, + message, + token #ifdef USE_VM_PROBES - , NIL + , NIL #endif - ); + ); BM_SWAP_TIMER(send,system); #else ErlMessage* mp = message_alloc(); @@ -1080,19 +1088,16 @@ erts_send_message(Process* sender, mp->next = NULL; mp->data.attached = NULL; LINK_MESSAGE(receiver, mp); + res = receiver->msg.len; + erts_proc_notify_new_message(receiver); - if (receiver->status == P_WAITING) { - erts_add_to_runq(receiver); - } else if (receiver->status == P_SUSPENDED) { - receiver->rstatus = P_RUNABLE; - } if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { trace_receive(receiver, message); } BM_SWAP_TIMER(send,system); #endif /* #ifndef ERTS_SMP */ - return; } + return res; } /* diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 3e9a24ee81..2ae94965b1 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -90,7 +90,7 @@ typedef struct { ErlMessage* first; ErlMessage** last; /* point to the last next pointer */ ErlMessage** save; - int len; /* queue length */ + Sint len; /* queue length */ /* * The following two fields are used by the recv_mark/1 and @@ -105,7 +105,7 @@ typedef struct { typedef struct { ErlMessage* first; ErlMessage** last; /* point to the last next pointer */ - int len; /* queue length */ + Sint len; /* queue length */ } ErlMessageInQueue; #endif @@ -125,23 +125,23 @@ typedef struct { #ifdef ERTS_SMP /* Move in message queue to end of private message queue */ -#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \ -do { \ - if ((P)->msg_inq.first) { \ - *(P)->msg.last = (P)->msg_inq.first; \ - (P)->msg.last = (P)->msg_inq.last; \ - (P)->msg.len += (P)->msg_inq.len; \ - (P)->msg_inq.first = NULL; \ - (P)->msg_inq.last = &(P)->msg_inq.first; \ - (P)->msg_inq.len = 0; \ - } \ +#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \ +do { \ + if ((P)->u.alive.msg_inq.first) { \ + *(P)->msg.last = (P)->u.alive.msg_inq.first; \ + (P)->msg.last = (P)->u.alive.msg_inq.last; \ + (P)->msg.len += (P)->u.alive.msg_inq.len; \ + (P)->u.alive.msg_inq.first = NULL; \ + (P)->u.alive.msg_inq.last = &(P)->u.alive.msg_inq.first; \ + (P)->u.alive.msg_inq.len = 0; \ + } \ } while (0) /* Add message last in message queue */ #define LINK_MESSAGE(p, mp) do { \ - *(p)->msg_inq.last = (mp); \ - (p)->msg_inq.last = &(mp)->next; \ - (p)->msg_inq.len++; \ + *(p)->u.alive.msg_inq.last = (mp); \ + (p)->u.alive.msg_inq.last = &(mp)->next; \ + (p)->u.alive.msg_inq.len++; \ } while(0) #else @@ -234,7 +234,7 @@ void erts_queue_message(Process*, ErtsProcLocks*, ErlHeapFragment*, Eterm, Eterm #endif ); void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm); -void erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned); +Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned); void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp); void erts_move_msg_mbuf_to_heap(Eterm**, ErlOffHeap*, ErlMessage *); diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c index 358c67bf20..5a6fb8589f 100644 --- a/erts/emulator/beam/erl_mtrace.c +++ b/erts/emulator/beam/erl_mtrace.c @@ -611,7 +611,7 @@ void erts_mtrace_init(char *receiver, char *nodename) if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN) != 0) hostname[0] = '\0'; hostname[MAXHOSTNAMELEN-1] = '\0'; - sys_get_pid(pid); + sys_get_pid(pid, sizeof(pid)); write_trace_header(nodename ? nodename : "", pid, hostname); erts_mtrace_update_heap_size(); } diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index 4109c20fa7..632d756481 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -315,6 +315,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, #endif Eterm receiver = to_pid->pid; int flush_me = 0; + int scheduler = erts_get_scheduler_id() != 0; if (env != NULL) { c_p = env->proc; @@ -334,8 +335,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, #if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) rp_had_locks = rp_locks; #endif - rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, - receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC); + + rp = (scheduler + ? erts_proc_lookup(receiver) + : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, + receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC)); if (rp == NULL) { ASSERT(env == NULL || receiver != c_p->id); return 0; @@ -362,12 +366,12 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, , NIL #endif ); - if (rp_locks) { - ERTS_SMP_LC_ASSERT(rp_locks == (rp_had_locks | (ERTS_PROC_LOCK_MSGQ | - ERTS_PROC_LOCK_STATUS))); - erts_smp_proc_unlock(rp, (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS)); - } - erts_smp_proc_dec_refc(rp); + if (c_p == rp) + rp_locks &= ~ERTS_PROC_LOCK_MAIN; + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + if (!scheduler) + erts_smp_proc_dec_refc(rp); if (flush_me) { cache_env(env); } @@ -1392,6 +1396,44 @@ size_t enif_sizeof_resource(void* obj) return ERTS_MAGIC_BIN_DATA_SIZE(bin) - offsetof(ErlNifResource,data); } + +void* enif_dlopen(const char* lib, + void (*err_handler)(void*,const char*), void* err_arg) +{ + ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; + void* handle; + void* init_func; + if (erts_sys_ddll_open2(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) { + if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) == ERL_DE_NO_ERROR) { + erts_sys_ddll_call_nif_init(init_func); + } + } + else { + if (err_handler != NULL) { + (*err_handler)(err_arg, errdesc.str); + } + handle = NULL; + } + erts_sys_ddll_free_error(&errdesc); + return handle; +} + +void* enif_dlsym(void* handle, const char* symbol, + void (*err_handler)(void*,const char*), void* err_arg) +{ + ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT; + void* ret; + if (erts_sys_ddll_sym2(handle, symbol, &ret, &errdesc) != ERL_DE_NO_ERROR) { + if (err_handler != NULL) { + (*err_handler)(err_arg, errdesc.str); + } + erts_sys_ddll_free_error(&errdesc); + return NULL; + } + return ret; +} + + /*************************************************************************** ** load_nif/2 ** ***************************************************************************/ @@ -1524,6 +1566,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) if (len < 0) { BIF_ERROR(BIF_P, BADARG); } + lib_name = (char *) erts_alloc(ERTS_ALC_T_TMP, len + 1); if (intlist_to_buf(BIF_ARG_1, lib_name, len) != len) { @@ -1532,6 +1575,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } lib_name[len] = '\0'; + if (!erts_try_seize_code_write_permission(BIF_P)) { + erts_free(ERTS_ALC_T_TMP, lib_name); + ERTS_BIF_YIELD2(bif_export[BIF_load_nif_2], + BIF_P, BIF_ARG_1, BIF_ARG_2); + } + /* Block system (is this the right place to do it?) */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); @@ -1545,11 +1594,11 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) ASSERT(caller != NULL); mod_atom = caller[0]; ASSERT(is_atom(mod_atom)); - mod=erts_get_module(mod_atom); + mod=erts_get_module(mod_atom, erts_active_code_ix()); ASSERT(mod != NULL); - if (!in_area(caller, mod->code, mod->code_length)) { - ASSERT(in_area(caller, mod->old_code, mod->old_code_length)); + if (!in_area(caller, mod->curr.code, mod->curr.code_length)) { + ASSERT(in_area(caller, mod->old.code, mod->old.code_length)); ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old " "module '%T' not allowed", mod_atom); @@ -1595,7 +1644,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) BeamInstr** code_pp; ErlNifFunc* f = &entry->funcs[i]; if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom) - || (code_pp = get_func_pp(mod->code, f_atom, f->arity))==NULL) { + || (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) { ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u", mod_atom, f->name, f->arity); } @@ -1624,18 +1673,18 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) erts_refc_init(&lib->rt_dtor_cnt, 0); lib->mod = mod; env.mod_nif = lib; - if (mod->nif != NULL) { /* Reload */ + if (mod->curr.nif != NULL) { /* Reload */ int k; - lib->priv_data = mod->nif->priv_data; + lib->priv_data = mod->curr.nif->priv_data; - ASSERT(mod->nif->entry != NULL); + ASSERT(mod->curr.nif->entry != NULL); if (entry->reload == NULL) { ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library."); goto error; } /* Check that no NIF is removed */ - for (k=0; k < mod->nif->entry->num_of_funcs; k++) { - ErlNifFunc* old_func = &mod->nif->entry->funcs[k]; + for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) { + ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k]; for (i=0; i < entry->num_of_funcs; i++) { if (old_func->arity == entry->funcs[i].arity && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) { @@ -1656,24 +1705,24 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful."); } else { - mod->nif->entry = NULL; /* to prevent 'unload' callback */ - erts_unload_nif(mod->nif); + mod->curr.nif->entry = NULL; /* to prevent 'unload' callback */ + erts_unload_nif(mod->curr.nif); reload_warning = 1; } } else { lib->priv_data = NULL; - if (mod->old_nif != NULL) { /* Upgrade */ - void* prev_old_data = mod->old_nif->priv_data; + if (mod->old.nif != NULL) { /* Upgrade */ + void* prev_old_data = mod->old.nif->priv_data; if (entry->upgrade == NULL) { ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library."); goto error; } erts_pre_nif(&env, BIF_P, lib); - veto = entry->upgrade(&env, &lib->priv_data, &mod->old_nif->priv_data, BIF_ARG_2); + veto = entry->upgrade(&env, &lib->priv_data, &mod->old.nif->priv_data, BIF_ARG_2); erts_post_nif(&env); if (veto) { - mod->old_nif->priv_data = prev_old_data; + mod->old.nif->priv_data = prev_old_data; ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful."); } /*else if (mod->old_nif->priv_data != prev_old_data) { @@ -1693,20 +1742,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) /* ** Everything ok, patch the beam code with op_call_nif */ - mod->nif = lib; + mod->curr.nif = lib; for (i=0; i < entry->num_of_funcs; i++) { BeamInstr* code_ptr; erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom); - code_ptr = *get_func_pp(mod->code, f_atom, entry->funcs[i].arity); + code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity); if (code_ptr[1] == 0) { code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif); } else { /* Function traced, patch the original instruction word */ - BpData** bps = (BpData**) code_ptr[1]; - BpData* bp = (BpData*) bps[erts_bp_sched2ix()]; - bp->orig_instr = (BeamInstr) BeamOp(op_call_nif); + GenericBp* g = (GenericBp *) code_ptr[1]; + ASSERT(code_ptr[5+0] == + (BeamInstr) BeamOp(op_i_generic_breakpoint)); + g->orig_instr = (BeamInstr) BeamOp(op_call_nif); } code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr; code_ptr[5+2] = (BeamInstr) lib; @@ -1726,6 +1776,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) erts_smp_thr_progress_unblock(); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_release_code_write_permission(); erts_free(ERTS_ALC_T_TMP, lib_name); if (reload_warning) { diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index 50f99c90c4..93e56332e1 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -187,11 +187,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks; #else # define ERL_NIF_INIT_GLOB # define ERL_NIF_INIT_BODY -# if defined(VXWORKS) -# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _init(void) -# else -# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void) -# endif +# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void) #endif diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h index 6396af09d0..51ff1eaa48 100644 --- a/erts/emulator/beam/erl_nif_api_funcs.h +++ b/erts/emulator/beam/erl_nif_api_funcs.h @@ -138,6 +138,8 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint64,(ErlNifEnv*, ErlNifUInt64)); ERL_NIF_API_FUNC_DECL(int,enif_is_exception,(ErlNifEnv*, ERL_NIF_TERM term)); ERL_NIF_API_FUNC_DECL(int,enif_make_reverse_list,(ErlNifEnv*, ERL_NIF_TERM term, ERL_NIF_TERM *list)); ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term)); +ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg)); +ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg)); /* ** Add new entries here to keep compatibility on Windows!!! @@ -260,6 +262,8 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term)); # define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception) # define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list) # define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number) +# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen) +# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym) /* ** Add new entries here diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h index 329a2204cc..7b4cb7b042 100644 --- a/erts/emulator/beam/erl_node_container_utils.h +++ b/erts/emulator/beam/erl_node_container_utils.h @@ -128,8 +128,47 @@ extern int erts_use_r9_pids_ports; * Pids * \* */ -#define internal_pid_index(x) (internal_pid_data((x)) \ - & erts_process_tab_index_mask) +#define erts_max_processes erts_proc.max + +typedef struct { + erts_smp_atomic_t *tab; + int max; + int tab_cache_lines; + int pix_per_cache_line; + int pix_cl_mask; + int pix_cl_shift; + int pix_cli_mask; + int pix_cli_shift; +} ErtsProcTab; + +extern ErtsProcTab erts_proc; + +ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data) +{ + int n, pix; + + n = (int) pid_data; + if (erts_proc.pix_cl_mask) { + pix = ((n & erts_proc.pix_cl_mask) << erts_proc.pix_cl_shift); + pix += ((n >> erts_proc.pix_cli_shift) & erts_proc.pix_cli_mask); + } + else { + n %= erts_proc.max; + pix = n % erts_proc.tab_cache_lines; + pix *= erts_proc.pix_per_cache_line; + pix += n / erts_proc.tab_cache_lines; + } + ASSERT(0 <= pix && pix < erts_proc.max); + return pix; +} + +#endif + +#define internal_pid_index(x) erts_pid_data2ix(internal_pid_data((x))) #define internal_pid_node_name(x) (internal_pid_node((x))->sysname) #define external_pid_node_name(x) (external_pid_node((x))->sysname) diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index 5574cb0ac4..40837d3817 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -1298,21 +1298,22 @@ setup_reference_table(void) UnUseTmpHeapNoproc(3); /* Insert all processes */ - for (i = 0; i < erts_max_processes; i++) - if (process_tab[i]) { + for (i = 0; i < erts_max_processes; i++) { + Process *proc = erts_pix2proc(i); + if (proc) { ErlMessage *msg; /* Insert Heap */ - insert_offheap(&(process_tab[i]->off_heap), + insert_offheap(&(proc->off_heap), HEAP_REF, - process_tab[i]->id); + proc->id); /* Insert message buffers */ - for(hfp = process_tab[i]->mbuf; hfp; hfp = hfp->next) + for(hfp = proc->mbuf; hfp; hfp = hfp->next) insert_offheap(&(hfp->off_heap), HEAP_REF, - process_tab[i]->id); + proc->id); /* Insert msg msg buffers */ - for (msg = process_tab[i]->msg.first; msg; msg = msg->next) { + for (msg = proc->msg.first; msg; msg = msg->next) { ErlHeapFragment *heap_frag = NULL; if (msg->data.attached) { if (is_value(ERL_MESSAGE_TERM(msg))) @@ -1320,7 +1321,7 @@ setup_reference_table(void) else { if (msg->data.dist_ext->dep) insert_dist_entry(msg->data.dist_ext->dep, - HEAP_REF, process_tab[i]->id, 0); + HEAP_REF, proc->id, 0); if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) heap_frag = erts_dist_ext_trailer(msg->data.dist_ext); } @@ -1328,10 +1329,10 @@ setup_reference_table(void) if (heap_frag) insert_offheap(&(heap_frag->off_heap), HEAP_REF, - process_tab[i]->id); + proc->id); } #ifdef ERTS_SMP - for (msg = process_tab[i]->msg_inq.first; msg; msg = msg->next) { + for (msg = proc->u.alive.msg_inq.first; msg; msg = msg->next) { ErlHeapFragment *heap_frag = NULL; if (msg->data.attached) { if (is_value(ERL_MESSAGE_TERM(msg))) @@ -1339,7 +1340,7 @@ setup_reference_table(void) else { if (msg->data.dist_ext->dep) insert_dist_entry(msg->data.dist_ext->dep, - HEAP_REF, process_tab[i]->id, 0); + HEAP_REF, proc->id, 0); if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) heap_frag = erts_dist_ext_trailer(msg->data.dist_ext); } @@ -1347,21 +1348,22 @@ setup_reference_table(void) if (heap_frag) insert_offheap(&(heap_frag->off_heap), HEAP_REF, - process_tab[i]->id); + proc->id); } #endif /* Insert links */ - if(process_tab[i]->nlinks) - insert_links(process_tab[i]->nlinks, process_tab[i]->id); - if(process_tab[i]->monitors) - insert_monitors(process_tab[i]->monitors, process_tab[i]->id); + if(proc->nlinks) + insert_links(proc->nlinks, proc->id); + if(proc->monitors) + insert_monitors(proc->monitors, proc->id); /* Insert controller */ { - DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(process_tab[i]); + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc); if (dep) - insert_dist_entry(dep, CTRL_REF, process_tab[i]->id, 0); + insert_dist_entry(dep, CTRL_REF, proc->id, 0); } } + } #ifdef ERTS_SMP erts_foreach_sys_msg_in_q(insert_sys_msg); diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 0f1a0d441a..b6bc59a1c3 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -56,12 +56,6 @@ #define ERTS_PORT_IS_IN_RUNQ(RQ, P) \ ((P)->sched.next || (P)->sched.prev || (RQ)->ports.start == (P)) -#define ERTS_PORT_NOT_IN_RUNQ(P) \ -do { \ - (P)->sched.prev = NULL; \ - (P)->sched.next = NULL; \ -} while (0) - #ifdef USE_VM_PROBES #define DTRACE_DRIVER(PROBE_NAME, PP) \ if (DTRACE_ENABLED(driver_ready_input)) { \ @@ -167,7 +161,7 @@ enqueue_port(ErtsRunQueue *runq, Port *pp) { ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); pp->sched.next = NULL; - pp->sched.prev = runq->ports.end; + pp->sched.in_runq = 1; if (runq->ports.end) { ASSERT(runq->ports.start); runq->ports.end->sched.next = pp; @@ -177,39 +171,10 @@ enqueue_port(ErtsRunQueue *runq, Port *pp) runq->ports.start = pp; } - runq->ports.info.len++; - if (runq->ports.info.max_len < runq->ports.info.len) - runq->ports.info.max_len = runq->ports.info.len; - runq->len++; - if (runq->max_len < runq->len) - runq->max_len = runq->len; runq->ports.end = pp; ASSERT(runq->ports.start && runq->ports.end); -} -static ERTS_INLINE void -dequeue_port(ErtsRunQueue *runq, Port *pp) -{ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); - if (pp->sched.next) - pp->sched.next->sched.prev = pp->sched.prev; - else { - ASSERT(runq->ports.end == pp); - runq->ports.end = pp->sched.prev; - } - if (pp->sched.prev) - pp->sched.prev->sched.next = pp->sched.next; - else { - ASSERT(runq->ports.start == pp); - runq->ports.start = pp->sched.next; - } - - ASSERT(runq->ports.info.len > 0); - runq->ports.info.len--; - ASSERT(runq->len > 0); - runq->len--; - ASSERT(runq->ports.start || !runq->ports.end); - ASSERT(runq->ports.end || !runq->ports.start); + erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); } static ERTS_INLINE Port * @@ -222,16 +187,11 @@ pop_port(ErtsRunQueue *runq) } else { runq->ports.start = runq->ports.start->sched.next; - if (runq->ports.start) - runq->ports.start->sched.prev = NULL; - else { + if (!runq->ports.start) { ASSERT(runq->ports.end == pp); runq->ports.end = NULL; } - ASSERT(runq->ports.info.len > 0); - runq->ports.info.len--; - ASSERT(runq->len > 0); - runq->len--; + erts_smp_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); } ASSERT(runq->ports.start || !runq->ports.end); @@ -285,7 +245,7 @@ check_port_queue(ErtsRunQueue *runq, Port *chk_pp, int inq) } ASSERT(no_forward == no_backward); } - ASSERT(no_forward == runq->ports.info.len); + ASSERT(no_forward == RUNQ_READ_LEN(&runq->ports.info.len)); if (chk_pp) { if (chk_pp->sched.taskq || chk_pp->sched.exe_taskq) { ASSERT(chk_pp->sched.taskq != chk_pp->sched.exe_taskq); @@ -467,10 +427,11 @@ erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp) ErtsPortTaskQueue *ptqp; ErtsPortTask *ptp; Port *pp; - int port_is_dequeued = 0; pp = &erts_port[internal_port_index(id)]; runq = erts_port_runq(pp); + if (!runq) + return 1; ptp = handle2task(pthp); @@ -505,22 +466,12 @@ erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp) if (ptqp->first || pp->sched.taskq != ptqp) ptqp = NULL; - else { + else pp->sched.taskq = NULL; - if (!pp->sched.exe_taskq) { - dequeue_port(runq, pp); - ERTS_PORT_NOT_IN_RUNQ(pp); - port_is_dequeued = 1; - } - } ERTS_PT_CHK_PRES_PORTQ(runq, pp); erts_smp_runq_unlock(runq); - - if (erts_system_profile_flags.runnable_ports && port_is_dequeued) { - profile_runnable_port(pp, am_inactive); - } port_task_free(ptp); if (ptqp) @@ -573,26 +524,29 @@ erts_port_task_schedule(Eterm id, ERTS_PT_CHK_PRES_PORTQ(runq, pp); - if (!pp->sched.taskq) { - pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp); - enq_port = !pp->sched.exe_taskq; - } - + if (!pp->sched.taskq && !pp->sched.in_runq && !pp->sched.exe_taskq) { #ifdef ERTS_SMP - if (enq_port) { ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (xrunq) { /* Port emigrated ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); - runq = xrunq; + runq = erts_port_runq(pp); + if (!runq) + return -1; } - } + enq_port = !pp->sched.taskq && !pp->sched.in_runq && !pp->sched.exe_taskq; +#else + enq_port = 1; #endif + } + + ASSERT(!enq_port + || !(ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_SUSPENDED)); - ASSERT(!enq_port || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED)); + if (!pp->sched.taskq) + pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp); - ASSERT(pp->sched.taskq); ASSERT(ptp); ptp->type = type; @@ -633,7 +587,7 @@ erts_port_task_schedule(Eterm id, #elif defined(DEBUG) if (!enq_port && !pp->sched.exe_taskq) { /* We should be in port run q */ - ASSERT(pp->sched.prev || runq->ports.start == pp); + ASSERT(pp->sched.in_runq); } #endif #endif @@ -661,63 +615,54 @@ void erts_port_task_free_port(Port *pp) { ErtsRunQueue *runq; - int port_is_dequeued = 0; + ErtsPortTaskQueue *ptqp; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD)); runq = erts_port_runq(pp); ASSERT(runq); ERTS_PT_CHK_PRES_PORTQ(runq, pp); - if (pp->sched.exe_taskq) { + ptqp = pp->sched.exe_taskq; + if (ptqp) { /* I (this thread) am currently executing this port, free it when scheduled out... */ - ErtsPortTask *ptp = port_task_alloc(); + ErtsPortTask *ptp; + enqueue_free: + ptp = port_task_alloc(); erts_smp_port_state_lock(pp); pp->status &= ~ERTS_PORT_SFLG_CLOSING; pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; erts_may_save_closed_port(pp); erts_smp_port_state_unlock(pp); - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 1); + ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 1); ptp->type = ERTS_PORT_TASK_FREE; ptp->event = (ErlDrvEvent) -1; ptp->event_data = NULL; set_handle(ptp, NULL); - push_task(pp->sched.exe_taskq, ptp); + push_task(ptqp, ptp); ERTS_PT_CHK_PRES_PORTQ(runq, pp); erts_smp_runq_unlock(runq); } else { - ErtsPortTaskQueue *ptqp = pp->sched.taskq; - if (ptqp) { - dequeue_port(runq, pp); - ERTS_PORT_NOT_IN_RUNQ(pp); - port_is_dequeued = 1; + if (pp->sched.in_runq) { + ptqp = pp->sched.taskq; + if (!ptqp) + pp->sched.taskq = ptqp = port_taskq_init(port_taskq_alloc(), pp); + goto enqueue_free; } + ASSERT(!pp->sched.taskq); erts_smp_port_state_lock(pp); pp->status &= ~ERTS_PORT_SFLG_CLOSING; pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; erts_may_save_closed_port(pp); erts_smp_port_state_unlock(pp); -#ifdef ERTS_SMP erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ -#endif - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ + ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ handle_remaining_tasks(runq, pp); /* May release runq lock */ ASSERT(!pp->sched.exe_taskq && (!ptqp || !ptqp->first)); pp->sched.taskq = NULL; ERTS_PT_CHK_PRES_PORTQ(runq, pp); -#ifndef ERTS_SMP - ASSERT(pp->status & ERTS_PORT_SFLG_PORT_DEBUG); - erts_port_status_set(pp, ERTS_PORT_SFLG_FREE); -#endif erts_smp_runq_unlock(runq); - - if (erts_system_profile_flags.runnable_ports && port_is_dequeued) { - profile_runnable_port(pp, am_inactive); - } - - if (ptqp) - port_taskq_free(ptqp); } } @@ -738,7 +683,6 @@ typedef struct { int erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) { - int port_was_enqueued = 0; Port *pp; ErtsPortTaskQueue *ptqp; ErtsPortTask *ptp; @@ -757,11 +701,18 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) goto done; } - ERTS_PORT_NOT_IN_RUNQ(pp); + ASSERT(pp->sched.in_runq); + pp->sched.in_runq = 0; + if (!pp->sched.taskq) { + if (erts_system_profile_flags.runnable_ports) + profile_runnable_port(pp, am_inactive); + res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) + != (erts_aint_t) 0); + goto done; + } *curr_port_pp = pp; - ASSERT(pp->sched.taskq); ASSERT(pp->sched.taskq->first); ptqp = pp->sched.taskq; pp->sched.taskq = NULL; @@ -823,12 +774,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) handle_remaining_tasks(runq, pp); ASSERT(!ptqp->first && (!pp->sched.taskq || !pp->sched.taskq->first)); -#ifdef ERTS_SMP erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ -#else - erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE); -#endif + ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ port_task_free(ptp); if (pp->sched.taskq) @@ -918,6 +865,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) if (!pp->sched.taskq) { ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; + if (erts_system_profile_flags.runnable_ports) + profile_runnable_port(pp, am_inactive); } else { #ifdef ERTS_SMP @@ -940,14 +889,20 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) else { /* Port emigrated ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); - enqueue_port(xrunq, pp); - ASSERT(pp->sched.exe_taskq); - pp->sched.exe_taskq = NULL; - erts_smp_runq_unlock(xrunq); - erts_smp_notify_inc_runq(xrunq); + erts_smp_runq_unlock(runq); + + xrunq = erts_port_runq(pp); + if (xrunq) { + enqueue_port(xrunq, pp); + ASSERT(pp->sched.exe_taskq); + pp->sched.exe_taskq = NULL; + erts_smp_runq_unlock(xrunq); + erts_smp_notify_inc_runq(xrunq); + } + + erts_smp_runq_lock(runq); } #endif - port_was_enqueued = 1; } res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) @@ -957,10 +912,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) port_taskq_free(ptqp); - if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) { - profile_runnable_port(pp, am_inactive); - } - /* trace port scheduling, out */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { trace_sched_ports(pp, am_out); @@ -984,8 +935,9 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) #endif done: - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + runq->scheduler->reductions += reds; + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds); return res; @@ -1049,63 +1001,33 @@ erts_port_is_scheduled(Port *pp) { int res; ErtsRunQueue *runq = erts_port_runq(pp); + if (!runq) + return 0; res = pp->sched.taskq || pp->sched.exe_taskq; erts_smp_runq_unlock(runq); return res; } #ifdef ERTS_SMP +void +erts_enqueue_port(ErtsRunQueue *rq, Port *pp) +{ + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); + ASSERT(pp->sched.in_runq); + enqueue_port(rq, pp); +} -ErtsMigrateResult -erts_port_migrate(Port *prt, int *prt_locked, - ErtsRunQueue *from_rq, int *from_locked, - ErtsRunQueue *to_rq, int *to_locked) +Port * +erts_dequeue_port(ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(*from_locked); - ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked); - ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked); - - if (!*from_locked || !*to_locked) { - if (from_rq < to_rq) { - if (!*to_locked) { - if (!*from_locked) - erts_smp_runq_lock(from_rq); - erts_smp_runq_lock(to_rq); - } - else if (erts_smp_runq_trylock(from_rq) == EBUSY) { - erts_smp_runq_unlock(to_rq); - erts_smp_runq_lock(from_rq); - erts_smp_runq_lock(to_rq); - } - } - else { - if (!*from_locked) { - if (!*to_locked) - erts_smp_runq_lock(to_rq); - erts_smp_runq_lock(from_rq); - } - else if (erts_smp_runq_trylock(to_rq) == EBUSY) { - erts_smp_runq_unlock(from_rq); - erts_smp_runq_lock(to_rq); - erts_smp_runq_lock(from_rq); - } - } - *to_locked = *from_locked = 1; - } - ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked); - ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked); - - /* Refuse to migrate to a suspended run queue */ - if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED) - return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED; - if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue)) - return ERTS_MIGRATE_FAILED_RUNQ_CHANGED; - if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt)) - return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ; - dequeue_port(from_rq, prt); - erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) to_rq); - enqueue_port(to_rq, prt); - return ERTS_MIGRATE_SUCCESS; + Port *pp; + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + pp = pop_port(rq); + ASSERT(!pp + || rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); + ASSERT(!pp || pp->sched.in_runq); + return pp; } #endif diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index d7104e1143..fd88b1c1ff 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -62,7 +62,7 @@ typedef struct ErtsPortTaskQueue_ ErtsPortTaskQueue; typedef struct { Port *next; - Port *prev; + int in_runq; ErtsPortTaskQueue *taskq; ErtsPortTaskQueue *exe_taskq; } ErtsPortTaskSched; @@ -92,7 +92,7 @@ ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp) { ptsp->next = NULL; - ptsp->prev = NULL; + ptsp->in_runq = 0; ptsp->taskq = NULL; ptsp->exe_taskq = NULL; } @@ -123,13 +123,10 @@ int erts_port_task_schedule(Eterm, ErlDrvEventData); void erts_port_task_free_port(Port *); int erts_port_is_scheduled(Port *); + #ifdef ERTS_SMP -ErtsMigrateResult erts_port_migrate(Port *, - int *, - ErtsRunQueue *, - int *, - ErtsRunQueue *, - int *); +void erts_enqueue_port(ErtsRunQueue *rq, Port *pp); +Port *erts_dequeue_port(ErtsRunQueue *rq); #endif #undef ERTS_INCLUDE_SCHEDULER_INTERNALS #endif /* ERL_PORT_TASK_H__ */ diff --git a/erts/emulator/beam/erl_posix_str.c b/erts/emulator/beam/erl_posix_str.c index 02db10905b..deb7e3e173 100644 --- a/erts/emulator/beam/erl_posix_str.c +++ b/erts/emulator/beam/erl_posix_str.c @@ -619,6 +619,7 @@ erl_errno_id(error) case WSAEINVALIDPROVIDER: return "einvalidprovider"; #endif #ifdef WSAEPROVIDERFAILEDINIT + /* You could get this if SYSTEMROOT env variable is set incorrectly */ case WSAEPROVIDERFAILEDINIT: return "eproviderfailedinit"; #endif #ifdef WSASYSCALLFAILURE diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c index 34da9cab84..2320b64295 100644 --- a/erts/emulator/beam/erl_printf_term.c +++ b/erts/emulator/beam/erl_printf_term.c @@ -437,7 +437,10 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount, } break; case BINARY_DEF: - { + if (header_is_bin_matchstate(*boxed_val(wobj))) { + PRINT_STRING(res, fn, arg, "#MatchState"); + } + else { ProcBin* pb = (ProcBin *) binary_val(wobj); if (pb->size == 1) PRINT_STRING(res, fn, arg, "<<1 byte>>"); diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 0fa2def5af..61554780c4 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -43,6 +43,9 @@ #include "erl_async.h" #include "dtrace-wrapper.h" +#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0) +#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2) + #define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS) #define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \ (ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED/2) @@ -94,34 +97,194 @@ #define HIGH_BIT (1 << PRIORITY_HIGH) #define NORMAL_BIT (1 << PRIORITY_NORMAL) #define LOW_BIT (1 << PRIORITY_LOW) +#define PORT_BIT (1 << ERTS_PORT_PRIO_LEVEL) -#define ERTS_MAYBE_SAVE_TERMINATING_PROCESS(P) \ -do { \ - ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx)); \ - if (saved_term_procs.end) \ - save_terminating_process((P)); \ -} while (0) +#define ERTS_EMPTY_RUNQ(RQ) \ + ((ERTS_RUNQ_FLGS_GET_NOB((RQ)) & ERTS_RUNQ_FLGS_QMASK) == 0 \ + && (RQ)->misc.start == NULL) -#define ERTS_EMPTY_RUNQ(RQ) \ - ((RQ)->len == 0 && (RQ)->misc.start == NULL) +#undef RUNQ_READ_RQ +#undef RUNQ_SET_RQ +#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_smp_atomic_read_nob((X))) +#define RUNQ_SET_RQ(X, RQ) erts_smp_atomic_set_nob((X), (erts_aint_t) (RQ)) + +#ifdef DEBUG +# if defined(ARCH_64) && !HALFWORD_HEAP +# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \ + (RUNQ_SET_RQ((RQP), (0xdeadbeefdead0003LL | ((N) << 4))) +# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \ +do { \ + ASSERT((RQP) != NULL); \ + ASSERT(((((Uint) (RQP)) & ((Uint) 0x3))) == ((Uint) 0)); \ + ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdeadbeefdead0000LL));\ +} while (0) +# else +# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \ + (RUNQ_SET_RQ((RQP), (0xdead0003 | ((N) << 4)))) +# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \ +do { \ + ASSERT((RQP) != NULL); \ + ASSERT(((((UWord) (RQP)) & ((UWord) 1))) == ((UWord) 0)); \ + ASSERT((((UWord) (RQP)) & ~((UWord) 0xffff)) != ((UWord) 0xdead0000)); \ +} while (0) +# endif +#else +# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) +# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) +#endif #define ERTS_EMPTY_RUNQ_PORTS(RQ) \ - ((RQ)->ports.info.len == 0 && (RQ)->misc.start == NULL) + (RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL) extern BeamInstr beam_apply[]; extern BeamInstr beam_exit[]; extern BeamInstr beam_continue_exit[]; -static Sint p_last; -static Sint p_next; -static Sint p_serial; -static Uint p_serial_mask; -static Uint p_serial_shift; +#ifdef ARCH_32 + +union { + erts_smp_dw_atomic_t pid_data; + char align[ERTS_CACHE_LINE_SIZE]; +} last erts_align_attribute(ERTS_CACHE_LINE_SIZE); + + +static ERTS_INLINE Uint64 +dw_aint_to_uint64(erts_dw_aint_t *dw) +{ +#ifdef ETHR_SU_DW_NAINT_T__ + return (Uint64) dw->dw_sint; +#else + Uint64 res; + res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]); + res <<= 32; + res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]); + return res; +#endif +} + +static void +unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val) +{ +#ifdef ETHR_SU_DW_NAINT_T__ + dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val; +#else + dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff); + dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff); +#endif +} + +static ERTS_INLINE void +last_pid_data_init_nob(Uint64 val) +{ + erts_dw_aint_t dw; + unint64_to_dw_aint(&dw, val); + erts_smp_dw_atomic_init_nob(&last.pid_data, &dw); +} + +static ERTS_INLINE void +last_pid_data_set_relb(Uint64 val) +{ + erts_dw_aint_t dw; + unint64_to_dw_aint(&dw, val); + erts_smp_dw_atomic_set_relb(&last.pid_data, &dw); +} + +static ERTS_INLINE Uint64 +last_pid_data_read_nob(void) +{ + erts_dw_aint_t dw; + erts_smp_dw_atomic_read_nob(&last.pid_data, &dw); + return dw_aint_to_uint64(&dw); +} + +static ERTS_INLINE Uint64 +last_pid_data_read_acqb(void) +{ + erts_dw_aint_t dw; + erts_smp_dw_atomic_read_acqb(&last.pid_data, &dw); + return dw_aint_to_uint64(&dw); +} + +static ERTS_INLINE Uint64 +last_pid_data_cmpxchg_relb(Uint64 new, Uint64 exp) +{ + erts_dw_aint_t dw_new, dw_xchg; + + unint64_to_dw_aint(&dw_new, new); + unint64_to_dw_aint(&dw_xchg, exp); + + if (erts_smp_dw_atomic_cmpxchg_relb(&last.pid_data, &dw_new, &dw_xchg)) + return exp; + else + return dw_aint_to_uint64(&dw_xchg); +} + +#elif defined(ARCH_64) + +union { + erts_smp_atomic_t pid_data; + char align[ERTS_CACHE_LINE_SIZE]; +} last erts_align_attribute(ERTS_CACHE_LINE_SIZE); + +static ERTS_INLINE void +last_pid_data_init_nob(Uint64 val) +{ + erts_smp_atomic_init_nob(&last.pid_data, (erts_aint_t) val); +} + +static ERTS_INLINE void +last_pid_data_set_relb(Uint64 val) +{ + erts_smp_atomic_set_relb(&last.pid_data, (erts_aint_t) val); +} + +static ERTS_INLINE Uint64 +last_pid_data_read_nob(void) +{ + return (Uint64) erts_smp_atomic_read_nob(&last.pid_data); +} + +static ERTS_INLINE Uint64 +last_pid_data_read_acqb(void) +{ + return (Uint64) erts_smp_atomic_read_acqb(&last.pid_data); +} + +static ERTS_INLINE Uint64 +last_pid_data_cmpxchg_relb(Uint64 new, Uint64 exp) +{ + return (Uint64) erts_smp_atomic_cmpxchg_relb(&last.pid_data, + (erts_aint_t) new, + (erts_aint_t) exp); +} + +#else +# error "Not 64-bit, nor 32-bit architecture..." +#endif + +static ERTS_INLINE int +last_pid_data_cmp(Uint64 lpd1, Uint64 lpd2) +{ + Uint64 lpd1_wrap; + + if (lpd1 == lpd2) + return 0; + + lpd1_wrap = lpd1 + (((Uint64) 1) << 63); + + if (lpd1 < lpd1_wrap) + return (lpd1 < lpd2 && lpd2 < lpd1_wrap) ? -1 : 1; + else + return (lpd1_wrap <= lpd2 && lpd2 < lpd1) ? 1 : -1; +} + + +#define ERTS_PID_DATA_MASK__ ((1 << _PID_DATA_SIZE) - 1) int erts_sched_compact_load; Uint erts_no_schedulers; -Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES; -Uint erts_process_tab_index_mask; + +ErtsProcTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE); int erts_sched_thread_suggested_stack_size = -1; @@ -188,7 +351,7 @@ static struct { struct { int active_runqs; int reds; - int max_len; + erts_aint32_t max_len; } prev_rise; Uint n; } balance_info; @@ -208,7 +371,7 @@ erts_sched_stat_t erts_sched_stat; static erts_tsd_key_t sched_data_key; #endif -static erts_smp_mtx_t proc_tab_mtx; +erts_smp_rwmtx_t erts_proc_tab_rwmtx; static erts_smp_atomic32_t function_calls; @@ -231,7 +394,6 @@ typedef union { static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info; -Process** process_tab; static Uint last_reductions; static Uint last_exact_reductions; Uint erts_default_process_flags; @@ -257,11 +419,11 @@ struct ErtsTermProcElement_ { union { struct { Eterm pid; - SysTimeval spawned; - SysTimeval exited; + Uint64 spawned; + Uint64 exited; } process; struct { - SysTimeval time; + Uint64 interval; } bif_invocation; } u; }; @@ -363,6 +525,7 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_DD; valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR; + valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; #endif #if HAVE_ERTS_MSEG valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK; @@ -399,6 +562,53 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq) } #endif +static erts_interval_t *proc_interval; + +static void +proc_interval_init(void) +{ + proc_interval = erts_alloc_permanent_cache_aligned( + ERTS_ALC_T_PROC_INTERVAL, + sizeof(erts_interval_t)); + erts_smp_interval_init(proc_interval); +} + +static ERTS_INLINE Uint64 +get_proc_interval(void) +{ + return erts_smp_current_interval_nob(proc_interval); +} + +static ERTS_INLINE Uint64 +ensure_later_proc_interval(Uint64 interval) +{ + return erts_smp_ensure_later_interval_nob(proc_interval, interval); +} + +static ERTS_INLINE Uint64 +step_proc_interval(void) +{ + return erts_smp_step_interval_nob(proc_interval); +} + +Uint64 +erts_get_proc_interval(void) +{ + return get_proc_interval(); +} + +Uint64 +erts_ensure_later_proc_interval(Uint64 interval) +{ + return ensure_later_proc_interval(interval); +} + +Uint64 +erts_step_proc_interval(void) +{ + return step_proc_interval(); +} + void erts_pre_init_process(void) { @@ -448,7 +658,16 @@ erts_pre_init_process(void) void erts_init_process(int ncpu) { - Uint proc_bits = ERTS_PROC_BITS; + int proc_tab_sz; + int max_proc_bits; + int proc_bits = ERTS_PROC_BITS; + erts_smp_atomic_t *proc_entry; + char *proc_tab_end; + erts_smp_rwmtx_opt_t proc_tab_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + proc_tab_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + proc_tab_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + + proc_interval_init(); #ifdef ERTS_SMP erts_disable_proc_not_running_opt = 0; @@ -459,23 +678,51 @@ erts_init_process(int ncpu) erts_smp_atomic32_init_nob(&process_count, 0); - if (erts_use_r9_pids_ports) { + if (erts_use_r9_pids_ports) proc_bits = ERTS_R9_PROC_BITS; - ASSERT(erts_max_processes <= (1 << ERTS_R9_PROC_BITS)); + + if (erts_proc.max > (1 << proc_bits)) + erts_proc.max = 1 << proc_bits; + + proc_tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(erts_proc.max + * sizeof(erts_smp_atomic_t)); + erts_proc.tab = erts_alloc(ERTS_ALC_T_PROC_TABLE, proc_tab_sz); + proc_tab_end = ((char *) erts_proc.tab) + proc_tab_sz; + proc_entry = erts_proc.tab; + while (proc_tab_end > ((char *) proc_entry)) { + erts_smp_atomic_init_nob(proc_entry, ERTS_AINT_NULL); + proc_entry++; } - process_tab = (Process**) erts_alloc(ERTS_ALC_T_PROC_TABLE, - erts_max_processes*sizeof(Process*)); - sys_memzero(process_tab, erts_max_processes * sizeof(Process*)); + erts_smp_rwmtx_init_opt(&erts_proc_tab_rwmtx, + &proc_tab_rwmtx_opts, + "proc_tab"); + last_pid_data_init_nob(~((Uint64) 0)); - erts_smp_mtx_init(&proc_tab_mtx, "proc_tab"); - p_last = -1; - p_next = 0; - p_serial = 0; + max_proc_bits = erts_fit_in_bits_int32((Sint32) erts_proc.max - 1); + + erts_proc.tab_cache_lines = proc_tab_sz/ERTS_CACHE_LINE_SIZE; + erts_proc.pix_per_cache_line = ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t); + if ((erts_proc.max & (erts_proc.max - 1)) + | (erts_proc.pix_per_cache_line & (erts_proc.pix_per_cache_line - 1))) { + /* + * erts_proc.max or erts_proc.pix_per_cache_line + * not a power of 2 :( + */ + erts_proc.pix_cl_mask = 0; + erts_proc.pix_cl_shift = 0; + erts_proc.pix_cli_mask = 0; + erts_proc.pix_cli_shift = 0; + } + else { + ASSERT((erts_proc.tab_cache_lines + & (erts_proc.tab_cache_lines - 1)) == 0); + erts_proc.pix_cl_mask = erts_proc.tab_cache_lines-1; + erts_proc.pix_cl_shift = erts_fit_in_bits_int32(erts_proc.pix_per_cache_line-1); + erts_proc.pix_cli_shift = erts_fit_in_bits_int32(erts_proc.pix_cl_mask); + erts_proc.pix_cli_mask = (1 << (max_proc_bits - erts_proc.pix_cli_shift)) - 1; + } - p_serial_shift = erts_fit_in_bits(erts_max_processes - 1); - p_serial_mask = ((~(~((Uint) 0) << proc_bits)) >> p_serial_shift); - erts_process_tab_index_mask = ~(~((Uint) 0) << p_serial_shift); last_reductions = 0; last_exact_reductions = 0; erts_default_process_flags = 0; @@ -733,8 +980,9 @@ static ERTS_INLINE ErtsProcList * proclist_create(Process *p) { ErtsProcList *plp = proclist_alloc(); + ensure_later_proc_interval(p->started_interval); plp->pid = p->id; - plp->started = p->started; + plp->started_interval = p->started_interval; return plp; } @@ -747,8 +995,7 @@ proclist_destroy(ErtsProcList *plp) static ERTS_INLINE int proclist_same(ErtsProcList *plp, Process *p) { - return (plp->pid == p->id - && erts_cmp_timeval(&plp->started, &p->started) == 0); + return plp->pid == p->id && plp->started_interval == p->started_interval; } ErtsProcList * @@ -932,9 +1179,15 @@ haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp) } static ERTS_INLINE erts_aint32_t -handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) +handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { int jix, max_jix; + + ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY); + + if (!waiting && awdp->delayed_wakeup.next > awdp->esdp->reductions) + return aux_work; + unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP); ERTS_THR_MEMORY_BARRIER; @@ -950,11 +1203,14 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(sched-1), aux_work); } + awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY; return aux_work & ~ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP; } static ERTS_INLINE void -schedule_aux_work_wakeup(ErtsAuxWorkData *awdp, int sched, erts_aint32_t aux_work) +schedule_aux_work_wakeup(ErtsAuxWorkData *awdp, + int sched, + erts_aint32_t aux_work) { int jix = awdp->delayed_wakeup.sched2jix[sched]; if (jix >= 0) { @@ -967,7 +1223,20 @@ schedule_aux_work_wakeup(ErtsAuxWorkData *awdp, int sched, erts_aint32_t aux_wor awdp->delayed_wakeup.job[jix].sched = sched; awdp->delayed_wakeup.job[jix].aux_work = aux_work; } - set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP); + + if (awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY) { + ASSERT(erts_atomic32_read_nob(&awdp->ssi->aux_work) + & ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP); + } + else { + awdp->delayed_wakeup.next = (awdp->esdp->reductions + + ERTS_DELAYED_WAKEUP_REDUCTIONS); + + ASSERT(!(erts_atomic32_read_nob(&awdp->ssi->aux_work) + & ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP)); + set_aux_work_flags_wakeup_nob(awdp->ssi, + ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP); + } } #endif @@ -1046,7 +1315,8 @@ misc_aux_work_clean(ErtsThrQ_t *q, static ERTS_INLINE erts_aint32_t handle_misc_aux_work(ErtsAuxWorkData *awdp, - erts_aint32_t aux_work) + erts_aint32_t aux_work, + int waiting) { ErtsThrQ_t *q = &misc_aux_work_queues[awdp->sched_id].q; @@ -1066,7 +1336,8 @@ handle_misc_aux_work(ErtsAuxWorkData *awdp, static ERTS_INLINE erts_aint32_t handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp, - erts_aint32_t aux_work) + erts_aint32_t aux_work, + int waiting) { if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->misc.thr_prgr)) @@ -1145,7 +1416,8 @@ erts_notify_check_async_ready_queue(void *vno) static ERTS_INLINE erts_aint32_t handle_async_ready(ErtsAuxWorkData *awdp, - erts_aint32_t aux_work) + erts_aint32_t aux_work, + int waiting) { ErtsSchedulerSleepInfo *ssi = awdp->ssi; unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY); @@ -1167,7 +1439,8 @@ handle_async_ready(ErtsAuxWorkData *awdp, static ERTS_INLINE erts_aint32_t handle_async_ready_clean(ErtsAuxWorkData *awdp, - erts_aint32_t aux_work) + erts_aint32_t aux_work, + int waiting) { void *thr_prgr_p; @@ -1200,10 +1473,11 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp, } } -#endif +#endif /* ERTS_USE_ASYNC_READY_Q */ + static ERTS_INLINE erts_aint32_t -handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) +handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { ErtsSchedulerSleepInfo *ssi = awdp->ssi; erts_aint32_t res; @@ -1237,7 +1511,7 @@ erts_alloc_notify_delayed_dealloc(int ix) } static ERTS_INLINE erts_aint32_t -handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) +handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { ErtsSchedulerSleepInfo *ssi = awdp->ssi; int need_thr_progress = 0; @@ -1275,7 +1549,7 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) } static ERTS_INLINE erts_aint32_t -handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) +handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { ErtsSchedulerSleepInfo *ssi; int need_thr_progress; @@ -1319,6 +1593,77 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR; } +/* + * Handle scheduled thread progress later operations. + */ +#define ERTS_MAX_THR_PRGR_LATER_OPS 50 + +static ERTS_INLINE erts_aint32_t +handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) +{ + int lops; + ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); + + for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) { + ErtsThrPrgrLaterOp *lop = awdp->later_op.first; + if (!erts_thr_progress_has_reached_this(current, lop->later)) + return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; + awdp->later_op.first = lop->next; + if (!awdp->later_op.first) { + awdp->later_op.last = NULL; + } + lop->func(lop->data); + if (!awdp->later_op.first) { + awdp->later_op.last = NULL; + unset_aux_work_flags(awdp->ssi, + ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP); + return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; + } + } + + return aux_work; +} + +#endif /* ERTS_SMP */ + +void +erts_schedule_thr_prgr_later_op(void (*later_func)(void *), + void *later_data, + ErtsThrPrgrLaterOp *lop) +{ +#ifndef ERTS_SMP + later_func(later_data); +#else + ErtsSchedulerData *esdp; + ErtsAuxWorkData *awdp; + int request_wakeup = 1; + + esdp = erts_get_scheduler_data(); + ASSERT(esdp); + awdp = &esdp->aux_work_data; + + lop->func = later_func; + lop->data = later_data; + lop->later = erts_thr_progress_later(esdp); + lop->next = NULL; + if (!awdp->later_op.last) + awdp->later_op.first = lop; + else { + ErtsThrPrgrLaterOp *last = awdp->later_op.last; + last->next = lop; + if (erts_thr_progress_equal(last->later, lop->later)) + request_wakeup = 0; + } + awdp->later_op.last = lop; + set_aux_work_flags_wakeup_nob(awdp->ssi, + ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP); + if (request_wakeup) + erts_thr_progress_wakeup(esdp, lop->later); +#endif +} + +#ifdef ERTS_SMP + static erts_atomic32_t completed_dealloc_count; static void @@ -1403,7 +1748,7 @@ erts_smp_notify_check_children_needed(void) } static ERTS_INLINE erts_aint32_t -handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) +handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_CHECK_CHILDREN); erts_check_children(); @@ -1426,7 +1771,7 @@ erts_smp_atomic32_t erts_halt_progress; int erts_halt_code; static ERTS_INLINE erts_aint32_t -handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) +handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS); awdp->esdp->run_queue->halt_in_progress = 1; @@ -1474,7 +1819,7 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) #if HAVE_ERTS_MSEG static ERTS_INLINE erts_aint32_t -handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) +handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK); erts_mseg_cache_check(); @@ -1484,7 +1829,7 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) #endif static ERTS_INLINE erts_aint32_t -handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work) +handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO); setup_aux_work_timer(); @@ -1498,7 +1843,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) #define HANDLE_AUX_WORK(FLG, HNDLR) \ ignore |= FLG; \ if (aux_work & FLG) { \ - aux_work = HNDLR(awdp, aux_work); \ + aux_work = HNDLR(awdp, aux_work, waiting); \ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \ if (!(aux_work & ~ignore)) { \ ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \ @@ -1544,6 +1889,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC), handle_fix_alloc); +#ifdef ERTS_SMP + HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP, + handle_thr_prgr_later_op); +#endif + #if ERTS_USE_ASYNC_READY_Q HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY, handle_async_ready); @@ -1734,8 +2084,8 @@ sched_waiting_sys(Uint no, ErtsRunQueue *rq) { ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); ASSERT(rq->waiting >= 0); - rq->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK - | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); + (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK + | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); rq->waiting++; rq->waiting *= -1; rq->woken = 0; @@ -1811,8 +2161,8 @@ static ERTS_INLINE void sched_waiting(Uint no, ErtsRunQueue *rq) { ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - rq->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK - | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); + (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK + | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); if (rq->waiting < 0) rq->waiting--; else @@ -1844,9 +2194,8 @@ ongoing_multi_scheduling_block(void) static ERTS_INLINE void empty_runq(ErtsRunQueue *rq) { - erts_aint32_t oifls = erts_smp_atomic32_read_band_nob(&rq->info_flags, - ~ERTS_RUNQ_IFLG_NONEMPTY); - if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) { + Uint32 old_flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED); + if (old_flags & ERTS_RUNQ_FLG_NONEMPTY) { #ifdef DEBUG erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); /* @@ -1862,9 +2211,8 @@ empty_runq(ErtsRunQueue *rq) static ERTS_INLINE void non_empty_runq(ErtsRunQueue *rq) { - erts_aint32_t oifls = erts_smp_atomic32_read_bor_nob(&rq->info_flags, - ERTS_RUNQ_IFLG_NONEMPTY); - if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) { + Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY); + if (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY)) { #ifdef DEBUG erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); /* @@ -2479,19 +2827,16 @@ try_inc_no_active_runqs(int active) static ERTS_INLINE int chk_wake_sched(ErtsRunQueue *crq, int ix, int activate) { - erts_aint32_t iflgs; + Uint32 flags; ErtsRunQueue *wrq; if (crq->ix == ix) return 0; wrq = ERTS_RUNQ_IX(ix); - iflgs = erts_smp_atomic32_read_nob(&wrq->info_flags); - if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) { + flags = ERTS_RUNQ_FLGS_GET(wrq); + if (!(flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_NONEMPTY))) { if (activate) { - if (try_inc_no_active_runqs(ix+1)) { - erts_smp_xrunq_lock(crq, wrq); - wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE; - erts_smp_xrunq_unlock(crq, wrq); - } + if (try_inc_no_active_runqs(ix+1)) + (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE); } wake_scheduler(wrq, 0); return 1; @@ -2558,9 +2903,7 @@ erts_sched_notify_check_cpu_bind(void) int ix; for (ix = 0; ix < erts_no_run_queues; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); - erts_smp_runq_lock(rq); - rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND; - erts_smp_runq_unlock(rq); + (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND); wake_scheduler(rq, 0); } #else @@ -2569,409 +2912,547 @@ erts_sched_notify_check_cpu_bind(void) } -#ifdef ERTS_SMP +static ERTS_INLINE void +enqueue_process(ErtsRunQueue *runq, int prio, Process *p) +{ + ErtsRunPrioQueue *rpq; -ErtsRunQueue * -erts_prepare_emigrate(ErtsRunQueue *c_rq, ErtsRunQueueInfo *c_rqi, int prio) + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + + erts_smp_inc_runq_len(runq, &runq->procs.prio_info[prio], prio); + + if (prio == PRIORITY_LOW) { + p->schedule_count = RESCHEDULE_LOW; + rpq = &runq->procs.prio[PRIORITY_NORMAL]; + } + else { + p->schedule_count = 1; + rpq = &runq->procs.prio[prio]; + } + + p->next = NULL; + if (rpq->last) + rpq->last->next = p; + else + rpq->first = p; + rpq->last = p; +} + + +static ERTS_INLINE void +unqueue_process(ErtsRunQueue *runq, + ErtsRunPrioQueue *rpq, + ErtsRunQueueInfo *rqi, + int prio, + Process *prev_proc, + Process *proc) { - ASSERT(ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio)); - ASSERT(ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio) - || c_rqi->len >= c_rqi->migrate.limit.this); + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); - while (1) { - ErtsRunQueue *n_rq = c_rqi->migrate.runq; - ERTS_DBG_VERIFY_VALID_RUNQP(n_rq); - erts_smp_xrunq_lock(c_rq, n_rq); - - /* - * erts_smp_xrunq_lock() may release lock on c_rq! We have - * to check that we still want to emigrate and emigrate - * to the same run queue as before. - */ + if (prev_proc) + prev_proc->next = proc->next; + else + rpq->first = proc->next; + if (!proc->next) + rpq->last = prev_proc; - if (ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio)) { - Uint32 force = (ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio) - | (c_rq->flags & ERTS_RUNQ_FLG_INACTIVE)); - if (force || c_rqi->len > c_rqi->migrate.limit.this) { - ErtsRunQueueInfo *n_rqi; - /* We still want to emigrate */ - - if (n_rq != c_rqi->migrate.runq) { - /* Ahh... run queue changed; need to do it all over again... */ - erts_smp_runq_unlock(n_rq); - continue; - } - else { + if (!rpq->first) + rpq->last = NULL; - if (prio == ERTS_PORT_PRIO_LEVEL) - n_rqi = &n_rq->ports.info; - else - n_rqi = &n_rq->procs.prio_info[prio]; + erts_smp_dec_runq_len(runq, rqi, prio); +} - if (force || (n_rqi->len < c_rqi->migrate.limit.other)) { - /* emigrate ... */ - return n_rq; - } - } - } - } - ASSERT(n_rq != c_rq); - erts_smp_runq_unlock(n_rq); - if (!(c_rq->flags & ERTS_RUNQ_FLG_INACTIVE)) { - /* No more emigrations to this runq */ - ERTS_UNSET_RUNQ_FLG_EMIGRATE(c_rq->flags, prio); - ERTS_DBG_SET_INVALID_RUNQP(c_rqi->migrate.runq, 0x3); - } +static ERTS_INLINE Process * +dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) +{ + erts_aint32_t state; + int prio; + ErtsRunPrioQueue *rpq; + ErtsRunQueueInfo *rqi; + Process *p; + + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + + ASSERT(PRIORITY_NORMAL == prio_q + || PRIORITY_HIGH == prio_q + || PRIORITY_MAX == prio_q); + rpq = &runq->procs.prio[prio_q]; + p = rpq->first; + if (!p) return NULL; + + ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + + state = erts_smp_atomic32_read_nob(&p->state); + if (statep) + *statep = state; + + prio = (int) (ERTS_PSFLG_PRIO_MASK & state); + + rqi = &runq->procs.prio_info[prio]; + + if (p) + unqueue_process(runq, rpq, rqi, prio, NULL, p); + + return p; +} + +static ERTS_INLINE int +check_requeue_process(ErtsRunQueue *rq, int prio_q) +{ + ErtsRunPrioQueue *rpq = &rq->procs.prio[prio_q]; + Process *p = rpq->first; + if (--p->schedule_count > 0 && p != rpq->last) { + /* reschedule */ + rpq->first = p->next; + rpq->last->next = p; + rpq->last = p; + p->next = NULL; + return 1; } + return 0; +} + +#ifdef ERTS_SMP + +static ErtsRunQueue * +check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio) +{ + int len; + Uint32 f_flags, f_rq_flags; + ErtsRunQueue *f_rq; + + f_flags = mp->prio[prio].flags; + + ASSERT(ERTS_CHK_RUNQ_FLG_IMMIGRATE(mp->flags, prio)); + + f_rq = mp->prio[prio].runq; + if (!f_rq) + return NULL; + + f_rq_flags = ERTS_RUNQ_FLGS_GET(f_rq); + if (f_rq_flags & ERTS_RUNQ_FLG_PROTECTED) + return NULL; + + if (ERTS_CHK_RUNQ_FLG_EVACUATE(f_flags, prio)) + return f_rq; + + if (f_rq_flags & ERTS_RUNQ_FLG_INACTIVE) + return f_rq; + + if (prio == ERTS_PORT_PRIO_LEVEL) + len = RUNQ_READ_LEN(&c_rq->ports.info.len); + else + len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len); + + if (len < mp->prio[prio].limit.this) { + if (prio == ERTS_PORT_PRIO_LEVEL) + len = RUNQ_READ_LEN(&f_rq->ports.info.len); + else + len = RUNQ_READ_LEN(&f_rq->procs.prio_info[prio].len); + + if (len > mp->prio[prio].limit.other) + return f_rq; + } + return NULL; } static void -immigrate(ErtsRunQueue *rq) +immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) { - int prio; + Uint32 iflags, iflag; + erts_smp_runq_unlock(c_rq); - ASSERT(rq->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK); + ASSERT(erts_thr_progress_is_managed_thread()); - for (prio = 0; prio < ERTS_NO_PRIO_LEVELS; prio++) { - if (ERTS_CHK_RUNQ_FLG_IMMIGRATE(rq->flags, prio)) { - ErtsRunQueueInfo *rqi = (prio == ERTS_PORT_PRIO_LEVEL - ? &rq->ports.info - : &rq->procs.prio_info[prio]); - ErtsRunQueue *from_rq = rqi->migrate.runq; - int rq_locked, from_rq_locked; + iflags = mp->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK; - ERTS_DBG_VERIFY_VALID_RUNQP(from_rq); + iflag = iflags & -iflags; - rq_locked = 1; - from_rq_locked = 1; - erts_smp_xrunq_lock(rq, from_rq); - /* - * erts_smp_xrunq_lock() may release lock on rq! We have - * to check that we still want to immigrate from the same - * run queue as before. - */ - if (ERTS_CHK_RUNQ_FLG_IMMIGRATE(rq->flags, prio) - && from_rq == rqi->migrate.runq) { - ErtsRunQueueInfo *from_rqi = (prio == ERTS_PORT_PRIO_LEVEL - ? &from_rq->ports.info - : &from_rq->procs.prio_info[prio]); - if ((ERTS_CHK_RUNQ_FLG_EVACUATE(rq->flags, prio) - && ERTS_CHK_RUNQ_FLG_EVACUATE(from_rq->flags, prio) - && from_rqi->len) - || (from_rqi->len > rqi->migrate.limit.other - && rqi->len < rqi->migrate.limit.this)) { - if (prio == ERTS_PORT_PRIO_LEVEL) { - Port *prt = from_rq->ports.start; - if (prt) { - int prt_locked = 0; - (void) erts_port_migrate(prt, &prt_locked, - from_rq, &from_rq_locked, - rq, &rq_locked); - if (prt_locked) - erts_smp_port_unlock(prt); - } - } - else { - Process *proc; - ErtsRunPrioQueue *from_rpq; - from_rpq = (prio == PRIORITY_LOW - ? &from_rq->procs.prio[PRIORITY_NORMAL] - : &from_rq->procs.prio[prio]); - for (proc = from_rpq->first; proc; proc = proc->next) - if (proc->prio == prio && !proc->bound_runq) - break; - if (proc) { - ErtsProcLocks proc_locks = 0; - (void) erts_proc_migrate(proc, &proc_locks, - from_rq, &from_rq_locked, - rq, &rq_locked); - if (proc_locks) - erts_smp_proc_unlock(proc, proc_locks); - } + while (iflag) { + ErtsRunQueue *rq; + int prio; + + switch (iflag) { + case (MAX_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT): + prio = PRIORITY_MAX; + break; + case (HIGH_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT): + prio = PRIORITY_HIGH; + break; + case (NORMAL_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT): + prio = PRIORITY_NORMAL; + break; + case (LOW_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT): + prio = PRIORITY_LOW; + break; + case (PORT_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT): + prio = ERTS_PORT_PRIO_LEVEL; + break; + default: + erl_exit(ERTS_ABORT_EXIT, + "%s:%d:%s(): Invalid immigrate queue mask", + __FILE__, __LINE__, __func__); + prio = 0; + break; + } + + iflags &= ~iflag; + iflag = iflags & -iflags; + + rq = check_immigration_need(c_rq, mp, prio); + if (rq) { + erts_smp_runq_lock(rq); + if (prio == ERTS_PORT_PRIO_LEVEL) { + Port *prt; + prt = erts_dequeue_port(rq); + if (prt) + RUNQ_SET_RQ(&prt->run_queue, c_rq); + erts_smp_runq_unlock(rq); + if (prt) { + /* port might terminate while we have no lock... */ + rq = erts_port_runq(prt); + if (rq) { + if (rq != c_rq) + erl_exit(ERTS_ABORT_EXIT, + "%s:%d:%s(): Internal error", + __FILE__, __LINE__, __func__); + erts_enqueue_port(c_rq, prt); + if (!iflag) + return; /* done */ + erts_smp_runq_unlock(c_rq); } } - else { - ERTS_UNSET_RUNQ_FLG_IMMIGRATE(rq->flags, prio); - ERTS_DBG_SET_INVALID_RUNQP(rqi->migrate.runq, 0x1); + } + else { + ErtsRunPrioQueue *rpq = &rq->procs.prio[prio == PRIORITY_LOW + ? PRIORITY_NORMAL + : prio]; + Process *prev_proc = NULL; + Process *proc = rpq->first; + int rq_locked = 1; + + while (proc) { + erts_aint32_t state; + state = erts_smp_atomic32_read_acqb(&proc->state); + if (!(ERTS_PSFLG_BOUND & state) + && (prio == (int) (ERTS_PSFLG_PRIO_MASK & state))) { + ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio]; + unqueue_process(rq, rpq, rqi, prio, prev_proc, proc); + erts_smp_runq_unlock(rq); + RUNQ_SET_RQ(&proc->run_queue, c_rq); + rq_locked = 0; + + erts_smp_runq_lock(c_rq); + enqueue_process(c_rq, prio, proc); + if (!iflag) + return; /* done */ + erts_smp_runq_unlock(c_rq); + break; + } + prev_proc = proc; + proc = proc->next; } + if (rq_locked) + erts_smp_runq_unlock(rq); } - if (from_rq_locked) - erts_smp_runq_unlock(from_rq); - if (!rq_locked) - erts_smp_runq_lock(rq); } } + + erts_smp_runq_lock(c_rq); +} + +static ERTS_INLINE void +suspend_run_queue(ErtsRunQueue *rq) +{ + erts_smp_atomic32_read_bor_nob(&rq->scheduler->ssi->flags, + ERTS_SSI_FLG_SUSPENDED); + (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED); + + wake_scheduler(rq, 0); +} + +static void scheduler_ix_resume_wake(Uint ix); + +static ERTS_INLINE void +resume_run_queue(ErtsRunQueue *rq) +{ + int pix; + + erts_smp_runq_lock(rq); + + (void) ERTS_RUNQ_FLGS_MASK_SET(rq, + (ERTS_RUNQ_FLG_OUT_OF_WORK + | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK + | ERTS_RUNQ_FLG_SUSPENDED), + (ERTS_RUNQ_FLG_OUT_OF_WORK + | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); + + rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; + for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { + rq->procs.prio_info[pix].max_len = 0; + rq->procs.prio_info[pix].reds = 0; + } + rq->ports.info.max_len = 0; + rq->ports.info.reds = 0; + rq->max_len = 0; + + erts_smp_runq_unlock(rq); + + scheduler_ix_resume_wake(rq->ix); } +typedef struct { + Process *first; + Process *last; +} ErtsStuckBoundProcesses; + static void -evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq) +schedule_bound_processes(ErtsRunQueue *rq, + ErtsStuckBoundProcesses *sbpp) { - Port *prt; - int notify_to_rq = 0; - int prio; - int prt_locked = 0; - int rq_locked = 0; - int evac_rq_locked = 1; - ErtsMigrateResult mres; + Process *proc, *next; + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - erts_smp_runq_lock(evac_rq); + proc = sbpp->first; + while (proc) { + erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + next = proc->next; + enqueue_process(rq, (int) (ERTS_PSFLG_PRIO_MASK & state), proc); + proc = next; + } +} - erts_smp_atomic32_read_bor_nob(&evac_rq->scheduler->ssi->flags, - ERTS_SSI_FLG_SUSPENDED); +static void +evacuate_run_queue(ErtsRunQueue *rq, + ErtsStuckBoundProcesses *sbpp) +{ + int prio_q; + ErtsRunQueue *to_rq; + ErtsMigrationPaths *mps; + ErtsMigrationPath *mp; - evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK; - evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK - | ERTS_RUNQ_FLGS_EVACUATE_QMASK - | ERTS_RUNQ_FLG_SUSPENDED); + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - erts_smp_atomic32_read_bor_nob(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED); - /* - * Need to set up evacuation paths first since we - * may release the run queue lock on evac_rq - * when evacuating. - */ - evac_rq->misc.evac_runq = rq; - evac_rq->ports.info.migrate.runq = rq; - for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++) - evac_rq->procs.prio_info[prio].migrate.runq = rq; + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); + + mps = erts_get_migration_paths_managed(); + mp = &mps->mpath[rq->ix]; /* Evacuate scheduled misc ops */ - if (evac_rq->misc.start) { - rq_locked = 1; - erts_smp_xrunq_lock(evac_rq, rq); - if (rq->misc.end) - rq->misc.end->next = evac_rq->misc.start; + if (rq->misc.start) { + ErtsMiscOpList *start, *end; + + to_rq = mp->misc_evac_runq; + if (!to_rq) + return; + + start = rq->misc.start; + end = rq->misc.end; + rq->misc.start = NULL; + rq->misc.end = NULL; + erts_smp_runq_unlock(rq); + + erts_smp_runq_lock(to_rq); + if (to_rq->misc.end) + to_rq->misc.end->next = start; else - rq->misc.start = evac_rq->misc.start; - rq->misc.end = evac_rq->misc.end; - evac_rq->misc.start = NULL; - evac_rq->misc.end = NULL; + to_rq->misc.start = start; + + to_rq->misc.end = end; + erts_smp_runq_unlock(to_rq); + smp_notify_inc_runq(to_rq); + erts_smp_runq_lock(to_rq); } - /* Evacuate scheduled ports */ - prt = evac_rq->ports.start; - while (prt) { - mres = erts_port_migrate(prt, &prt_locked, - evac_rq, &evac_rq_locked, - rq, &rq_locked); - if (mres == ERTS_MIGRATE_SUCCESS) - notify_to_rq = 1; - if (prt_locked) - erts_smp_port_unlock(prt); - if (!evac_rq_locked) { - evac_rq_locked = 1; - erts_smp_runq_lock(evac_rq); + if (rq->ports.start) { + Port *prt; + + to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq; + if (!to_rq) + return; + + /* Evacuate scheduled ports */ + prt = rq->ports.start; + while (prt) { + ErtsRunQueue *prt_rq; + prt = erts_dequeue_port(rq); + RUNQ_SET_RQ(&prt->run_queue, to_rq); + erts_smp_runq_unlock(rq); + /* + * The port might terminate while + * we have no lock on it... + */ + prt_rq = erts_port_runq(prt); + if (prt_rq) { + if (prt_rq != to_rq) + erl_exit(ERTS_ABORT_EXIT, + "%s:%d:%s() internal error\n", + __FILE__, __LINE__, __func__); + erts_enqueue_port(to_rq, prt); + erts_smp_runq_unlock(to_rq); + } + erts_smp_runq_lock(rq); + prt = rq->ports.start; } - prt = evac_rq->ports.start; + smp_notify_inc_runq(to_rq); } /* Evacuate scheduled processes */ - for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++) { + for (prio_q = 0; prio_q < ERTS_NO_PROC_PRIO_QUEUES; prio_q++) { + erts_aint32_t state; Process *proc; + int notify = 0; + to_rq = NULL; - switch (prio) { - case PRIORITY_MAX: - case PRIORITY_HIGH: - case PRIORITY_NORMAL: - proc = evac_rq->procs.prio[prio].first; - while (proc) { - ErtsProcLocks proc_locks = 0; - - /* Bound processes are stuck... */ - while (proc->bound_runq) { - proc = proc->next; - if (!proc) - goto end_of_proc; - } - - mres = erts_proc_migrate(proc, &proc_locks, - evac_rq, &evac_rq_locked, - rq, &rq_locked); - if (mres == ERTS_MIGRATE_SUCCESS) - notify_to_rq = 1; - if (proc_locks) - erts_smp_proc_unlock(proc, proc_locks); - if (!evac_rq_locked) { - erts_smp_runq_lock(evac_rq); - evac_rq_locked = 1; - } + if (!mp->prio[prio_q].runq) + return; + if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq) + return; - proc = evac_rq->procs.prio[prio].first; + proc = dequeue_process(rq, prio_q, &state); + while (proc) { + if (ERTS_PSFLG_BOUND & state) { + /* Bound processes get stuck here... */ + proc->next = NULL; + if (sbpp->last) + sbpp->last->next = proc; + else + sbpp->first = proc; + sbpp->last = proc; } + else { + int prio = (int) (ERTS_PSFLG_PRIO_MASK & state); + erts_smp_runq_unlock(rq); - end_of_proc: + to_rq = mp->prio[prio].runq; + RUNQ_SET_RQ(&proc->run_queue, to_rq); -#ifdef DEBUG - for (proc = evac_rq->procs.prio[prio].first; - proc; - proc = proc->next) { - ASSERT(proc->bound_runq); + erts_smp_runq_lock(to_rq); + enqueue_process(to_rq, prio, proc); + erts_smp_runq_unlock(to_rq); + notify = 1; + + erts_smp_runq_lock(rq); } -#endif - break; - case PRIORITY_LOW: - break; - default: - ASSERT(!"Invalid process priority"); - break; + proc = dequeue_process(rq, prio_q, &state); } + if (notify) + smp_notify_inc_runq(to_rq); } - if (rq_locked) - erts_smp_runq_unlock(rq); - - if (evac_rq_locked) - erts_smp_runq_unlock(evac_rq); - - if (notify_to_rq) - smp_notify_inc_runq(rq); - - wake_scheduler(evac_rq, 0); + if (ERTS_EMPTY_RUNQ(rq)) + empty_runq(rq); } static int -try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq) +try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, Uint32 flags) { - Process *proc; - int vrq_locked; + Uint32 procs_qmask = flags & ERTS_RUNQ_FLGS_PROCS_QMASK; + int max_prio_bit; + ErtsRunPrioQueue *rpq; - if (*rq_lockedp) - erts_smp_xrunq_lock(rq, vrq); - else - erts_smp_runq_lock(vrq); - vrq_locked = 1; + if (*rq_lockedp) { + erts_smp_runq_unlock(rq); + *rq_lockedp = 0; + } + + ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq)); - ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp); - ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked); + erts_smp_runq_lock(vrq); if (rq->halt_in_progress) - goto try_steal_port; + goto no_procs; /* * Check for a runnable process to steal... */ - switch (vrq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) { - case MAX_BIT: - case MAX_BIT|HIGH_BIT: - case MAX_BIT|NORMAL_BIT: - case MAX_BIT|LOW_BIT: - case MAX_BIT|HIGH_BIT|NORMAL_BIT: - case MAX_BIT|HIGH_BIT|LOW_BIT: - case MAX_BIT|NORMAL_BIT|LOW_BIT: - case MAX_BIT|HIGH_BIT|NORMAL_BIT|LOW_BIT: - for (proc = vrq->procs.prio[PRIORITY_MAX].last; - proc; - proc = proc->prev) { - if (!proc->bound_runq) - break; - } - if (proc) + while (procs_qmask) { + Process *prev_proc; + Process *proc; + + max_prio_bit = procs_qmask & -procs_qmask; + switch (max_prio_bit) { + case MAX_BIT: + rpq = &vrq->procs.prio[PRIORITY_MAX]; break; - case HIGH_BIT: - case HIGH_BIT|NORMAL_BIT: - case HIGH_BIT|LOW_BIT: - case HIGH_BIT|NORMAL_BIT|LOW_BIT: - for (proc = vrq->procs.prio[PRIORITY_HIGH].last; - proc; - proc = proc->prev) { - if (!proc->bound_runq) - break; - } - if (proc) + case HIGH_BIT: + rpq = &vrq->procs.prio[PRIORITY_HIGH]; break; - case NORMAL_BIT: - case LOW_BIT: - case NORMAL_BIT|LOW_BIT: - for (proc = vrq->procs.prio[PRIORITY_NORMAL].last; - proc; - proc = proc->prev) { - if (!proc->bound_runq) - break; - } - if (proc) + case NORMAL_BIT: + case LOW_BIT: + rpq = &vrq->procs.prio[PRIORITY_NORMAL]; break; - case 0: - proc = NULL; - break; - default: - ASSERT(!"Invalid queue mask"); - proc = NULL; - break; - } + case 0: + goto no_procs; + default: + ASSERT(!"Invalid queue mask"); + goto no_procs; + } + + prev_proc = NULL; + proc = rpq->first; - if (proc) { - ErtsProcLocks proc_locks = 0; - int res; - ErtsMigrateResult mres; - mres = erts_proc_migrate(proc, &proc_locks, - vrq, &vrq_locked, - rq, rq_lockedp); - if (proc_locks) - erts_smp_proc_unlock(proc, proc_locks); - res = !0; - switch (mres) { - case ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED: - res = 0; - case ERTS_MIGRATE_SUCCESS: - if (vrq_locked) + while (proc) { + erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + if (!(ERTS_PSFLG_BOUND & state)) { + /* Steal process */ + int prio = (int) (ERTS_PSFLG_PRIO_MASK & state); + ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio]; + unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc); erts_smp_runq_unlock(vrq); - return res; - default: /* Other failures */ - break; - } - } + RUNQ_SET_RQ(&proc->run_queue, rq); - ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp); - ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked); + erts_smp_runq_lock(rq); + *rq_lockedp = 1; + enqueue_process(rq, prio, proc); + return !0; + } + prev_proc = proc; + proc = proc->next; + } - if (!vrq_locked) { - if (*rq_lockedp) - erts_smp_xrunq_lock(rq, vrq); - else - erts_smp_runq_lock(vrq); - vrq_locked = 1; + procs_qmask &= ~max_prio_bit; } - try_steal_port: +no_procs: - ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp); - ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked); + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(vrq)); /* * Check for a runnable port to steal... */ - if (vrq->ports.info.len) { - Port *prt = vrq->ports.end; - int prt_locked = 0; - int res; - ErtsMigrateResult mres; - - mres = erts_port_migrate(prt, &prt_locked, - vrq, &vrq_locked, - rq, rq_lockedp); - if (prt_locked) - erts_smp_port_unlock(prt); - res = !0; - switch (mres) { - case ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED: - res = 0; - case ERTS_MIGRATE_SUCCESS: - if (vrq_locked) - erts_smp_runq_unlock(vrq); - return res; - default: /* Other failures */ - break; + if (vrq->ports.start) { + ErtsRunQueue *prt_rq; + Port *prt = erts_dequeue_port(vrq); + RUNQ_SET_RQ(&prt->run_queue, rq); + erts_smp_runq_unlock(vrq); + + /* + * The port might terminate while + * we have no lock on it... + */ + + prt_rq = erts_port_runq(prt); + if (!prt_rq) + return 0; + else { + if (prt_rq != rq) + erl_exit(ERTS_ABORT_EXIT, + "%s:%d:%s() internal error\n", + __FILE__, __LINE__, __func__); + *rq_lockedp = 1; + erts_enqueue_port(rq, prt); + return !0; } } - if (vrq_locked) - erts_smp_runq_unlock(vrq); + erts_smp_runq_unlock(vrq); return 0; } @@ -2981,9 +3462,10 @@ static ERTS_INLINE int check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix) { ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix); - erts_aint32_t iflgs = erts_smp_atomic32_read_nob(&vrq->info_flags); - if (iflgs & ERTS_RUNQ_IFLG_NONEMPTY) - return try_steal_task_from_victim(rq, rq_lockedp, vrq); + Uint32 flags = ERTS_RUNQ_FLGS_GET(vrq); + if ((flags & (ERTS_RUNQ_FLG_NONEMPTY + | ERTS_RUNQ_FLG_PROTECTED)) == ERTS_RUNQ_FLG_NONEMPTY) + return try_steal_task_from_victim(rq, rq_lockedp, vrq, flags); else return 0; } @@ -2993,15 +3475,12 @@ static int try_steal_task(ErtsRunQueue *rq) { int res, rq_locked, vix, active_rqs, blnc_rqs; + Uint32 flags; - /* - * We are not allowed to steal jobs to this run queue - * if it is suspended. Note that it might get suspended - * at any time when we don't have the lock on the run - * queue. - */ - if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) - return 0; + /* Protect jobs we steal from getting stolen from us... */ + flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_PROTECTED); + if (flags & ERTS_RUNQ_FLG_SUSPENDED) + return 0; /* go suspend instead... */ res = 0; rq_locked = 1; @@ -3120,12 +3599,123 @@ do { \ ASSERT(sum__ == (RQ)->full_reds_history_sum); \ } while (0); +#define ERTS_PRE_ALLOCED_MPATHS 8 + +erts_atomic_t erts_migration_paths; + +static struct { + size_t size; + ErtsMigrationPaths *freelist; + struct { + ErtsMigrationPaths *first; + ErtsMigrationPaths *last; + } retired; +} mpaths; + +static void +init_migration_paths(void) +{ + int qix, i; + char *p; + ErtsMigrationPaths *mps; + + mpaths.size = sizeof(ErtsMigrationPaths); + mpaths.size += sizeof(ErtsMigrationPath)*(erts_no_schedulers-1); + mpaths.size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(mpaths.size); + + p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_LL_MPATHS, + (mpaths.size + * ERTS_PRE_ALLOCED_MPATHS)); + mpaths.freelist = NULL; + for (i = 0; i < ERTS_PRE_ALLOCED_MPATHS-1; i++) { + mps = (ErtsMigrationPaths *) p; + mps->next = mpaths.freelist; + mpaths.freelist = mps; + p += mpaths.size; + } + + mps = (ErtsMigrationPaths *) p; + mps->block = NULL; + for (qix = 0; qix < erts_no_run_queues; qix++) { + int pix; + mps->mpath[qix].flags = 0; + mps->mpath[qix].misc_evac_runq = NULL; + for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { + mps->mpath[qix].prio[pix].limit.this = -1; + mps->mpath[qix].prio[pix].limit.other = -1; + mps->mpath[qix].prio[pix].runq = NULL; + mps->mpath[qix].prio[pix].flags = 0; + } + } + erts_atomic_init_wb(&erts_migration_paths, (erts_aint_t) mps); +} + +static ERTS_INLINE ErtsMigrationPaths * +alloc_mpaths(void) +{ + void *block; + ErtsMigrationPaths *res; + ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx)); + + res = mpaths.freelist; + if (res) { + mpaths.freelist = res->next; + res->block = NULL; + return res; + } + res = erts_alloc(ERTS_ALC_T_SL_MPATHS, + mpaths.size+ERTS_CACHE_LINE_SIZE); + block = (void *) res; + if (((UWord) res) & ERTS_CACHE_LINE_MASK) + res = (ErtsMigrationPaths *) ((((UWord) res) & ~ERTS_CACHE_LINE_MASK) + + ERTS_CACHE_LINE_SIZE); + res->block = block; + return res; +} + +static ERTS_INLINE void +retire_mpaths(ErtsMigrationPaths *mps) +{ + ErtsThrPrgrVal current; + + ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx)); + + current = erts_thr_progress_current(); + + while (mpaths.retired.first) { + ErtsMigrationPaths *tmp = mpaths.retired.first; + if (!erts_thr_progress_has_reached_this(current, tmp->thr_prgr)) + break; + mpaths.retired.first = tmp->next; + if (tmp->block) { + erts_free(ERTS_ALC_T_SL_MPATHS, tmp->block); + } + else { + tmp->next = mpaths.freelist; + mpaths.freelist = tmp; + } + } + + if (!mpaths.retired.first) + mpaths.retired.last = NULL; + + mps->thr_prgr = erts_thr_progress_later(NULL); + mps->next = NULL; + + if (mpaths.retired.last) + mpaths.retired.last->next = mps; + else + mpaths.retired.first = mps; + mpaths.retired.last = mps; +} + static void check_balance(ErtsRunQueue *c_rq) { #if ERTS_MAX_PROCESSES >= (1 << 27) # error check_balance() assumes ERTS_MAX_PROCESS < (1 << 27) #endif + ErtsMigrationPaths *new_mpaths, *old_mpaths; ErtsRunQueueBalance avg = {0}; Sint64 scheds_reds, full_scheds_reds; int forced, active, current_active, oowc, half_full_scheds, full_scheds, @@ -3151,9 +3741,9 @@ check_balance(ErtsRunQueue *c_rq) ERTS_FOREACH_RUNQ(rq, { if (rq->waiting) - rq->flags |= ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK; + (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); else - rq->flags &= ~ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK; + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; }); @@ -3195,7 +3785,7 @@ check_balance(ErtsRunQueue *c_rq) ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); erts_smp_runq_lock(rq); - run_queue_info[qix].flags = rq->flags; + run_queue_info[qix].flags = ERTS_RUNQ_FLGS_GET_NOB(rq); for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { run_queue_info[qix].prio[pix].max_len = rq->procs.prio_info[pix].max_len; @@ -3529,47 +4119,27 @@ erts_fprintf(stderr, "--------------------------------\n"); set_no_active_runqs(active); balance_info.halftime = 1; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + new_mpaths = alloc_mpaths(); + + /* Write migration paths */ - /* Write migration paths and reset balance statistics in all queues */ for (qix = 0; qix < blnc_no_rqs; qix++) { int mqix; - Uint32 flags; - ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); - ErtsRunQueueInfo *rqi; - flags = run_queue_info[qix].flags; - erts_smp_runq_lock(rq); - flags |= (rq->flags & ~ERTS_RUNQ_FLGS_MIGRATION_INFO); - ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK)); - if (rq->waiting) - flags |= ERTS_RUNQ_FLG_OUT_OF_WORK; - - rq->full_reds_history_sum - = run_queue_info[qix].full_reds_history_sum; - rq->full_reds_history[freds_hist_ix] - = run_queue_info[qix].full_reds_history_change; + Uint32 flags = run_queue_info[qix].flags; + ErtsMigrationPath *mp = &new_mpaths->mpath[qix]; - ERTS_DBG_CHK_FULL_REDS_HISTORY(rq); + mp->flags = flags; + mp->misc_evac_runq = NULL; - rq->out_of_work_count = 0; - rq->flags = flags; - rq->max_len = rq->len; for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { - rqi = (pix == ERTS_PORT_PRIO_LEVEL - ? &rq->ports.info - : &rq->procs.prio_info[pix]); - rqi->max_len = rqi->len; - rqi->reds = 0; if (!(ERTS_CHK_RUNQ_FLG_EMIGRATE(flags, pix) | ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix))) { ASSERT(run_queue_info[qix].prio[pix].immigrate_from < 0); ASSERT(run_queue_info[qix].prio[pix].emigrate_to < 0); -#ifdef DEBUG - rqi->migrate.limit.this = -1; - rqi->migrate.limit.other = -1; - ERTS_DBG_SET_INVALID_RUNQP(rqi->migrate.runq, 0x2); -#endif - + mp->prio[pix].limit.this = -1; + mp->prio[pix].limit.other = -1; + mp->prio[pix].runq = NULL; + mp->prio[pix].flags = 0; } else if (ERTS_CHK_RUNQ_FLG_EMIGRATE(flags, pix)) { ASSERT(!ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix)); @@ -3577,11 +4147,12 @@ erts_fprintf(stderr, "--------------------------------\n"); ASSERT(run_queue_info[qix].prio[pix].emigrate_to >= 0); mqix = run_queue_info[qix].prio[pix].emigrate_to; - rqi->migrate.limit.this + mp->prio[pix].limit.this = run_queue_info[qix].prio[pix].migration_limit; - rqi->migrate.limit.other + mp->prio[pix].limit.other = run_queue_info[mqix].prio[pix].migration_limit; - rqi->migrate.runq = ERTS_RUNQ_IX(mqix); + mp->prio[pix].runq = ERTS_RUNQ_IX(mqix); + mp->prio[pix].flags = run_queue_info[mqix].flags; } else { ASSERT(ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix)); @@ -3589,24 +4160,142 @@ erts_fprintf(stderr, "--------------------------------\n"); ASSERT(run_queue_info[qix].prio[pix].immigrate_from >= 0); mqix = run_queue_info[qix].prio[pix].immigrate_from; - rqi->migrate.limit.this + mp->prio[pix].limit.this = run_queue_info[qix].prio[pix].migration_limit; - rqi->migrate.limit.other + mp->prio[pix].limit.other = run_queue_info[mqix].prio[pix].migration_limit; - rqi->migrate.runq = ERTS_RUNQ_IX(mqix); + mp->prio[pix].runq = ERTS_RUNQ_IX(mqix); + mp->prio[pix].flags = run_queue_info[mqix].flags; } } + } + + old_mpaths = erts_get_migration_paths_managed(); + + /* Keep offline run-queues as is */ + for (qix = blnc_no_rqs; qix < erts_no_schedulers; qix++) { + ErtsMigrationPath *nmp = &new_mpaths->mpath[qix]; + ErtsMigrationPath *omp = &old_mpaths->mpath[qix]; + + nmp->flags = omp->flags; + nmp->misc_evac_runq = omp->misc_evac_runq; + + for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { + nmp->prio[pix].limit.this = omp->prio[pix].limit.this; + nmp->prio[pix].limit.other = omp->prio[pix].limit.other; + nmp->prio[pix].runq = omp->prio[pix].runq; + nmp->prio[pix].flags = omp->prio[pix].flags; + } + } + + + /* Publish new migration paths... */ + erts_atomic_set_wb(&erts_migration_paths, (erts_aint_t) new_mpaths); + + /* Reset balance statistics in all online queues */ + for (qix = 0; qix < blnc_no_rqs; qix++) { + Uint32 flags = run_queue_info[qix].flags; + ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); + + erts_smp_runq_lock(rq); + ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK)); + if (rq->waiting) + flags |= ERTS_RUNQ_FLG_OUT_OF_WORK; + + rq->full_reds_history_sum + = run_queue_info[qix].full_reds_history_sum; + rq->full_reds_history[freds_hist_ix] + = run_queue_info[qix].full_reds_history_change; + + ERTS_DBG_CHK_FULL_REDS_HISTORY(rq); + + rq->out_of_work_count = 0; + (void) ERTS_RUNQ_FLGS_MASK_SET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags); + + rq->max_len = rq->len; + for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { + ErtsRunQueueInfo *rqi; + rqi = (pix == ERTS_PORT_PRIO_LEVEL + ? &rq->ports.info + : &rq->procs.prio_info[pix]); + erts_smp_reset_max_len(rq, rqi); + rqi->reds = 0; + } rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; erts_smp_runq_unlock(rq); } + erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + balance_info.n++; + retire_mpaths(old_mpaths); erts_smp_mtx_unlock(&balance_info.update_mtx); erts_smp_runq_lock(c_rq); } +static void +change_no_used_runqs(int used) +{ + ErtsMigrationPaths *new_mpaths, *old_mpaths; + int active, qix; + erts_smp_mtx_lock(&balance_info.update_mtx); + get_no_runqs(&active, NULL); + set_no_used_runqs(used); + + old_mpaths = erts_get_migration_paths_managed(); + new_mpaths = alloc_mpaths(); + + /* Write migration paths... */ + + for (qix = 0; qix < used; qix++) { + int pix; + ErtsMigrationPath *omp = &old_mpaths->mpath[qix]; + ErtsMigrationPath *nmp = &new_mpaths->mpath[qix]; + + nmp->flags = omp->flags & ~ERTS_RUNQ_FLGS_MIGRATION_QMASKS; + nmp->misc_evac_runq = NULL; + + for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { + nmp->prio[pix].limit.this = -1; + nmp->prio[pix].limit.other = -1; + nmp->prio[pix].runq = NULL; + nmp->prio[pix].flags = 0; + } + } + for (qix = used; qix < erts_no_run_queues; qix++) { + int pix; + ErtsRunQueue *to_rq = ERTS_RUNQ_IX(qix % used); + ErtsMigrationPath *nmp = &new_mpaths->mpath[qix]; + + nmp->flags = (ERTS_RUNQ_FLGS_EMIGRATE_QMASK + | ERTS_RUNQ_FLGS_EVACUATE_QMASK); + nmp->misc_evac_runq = to_rq; + for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { + nmp->prio[pix].limit.this = -1; + nmp->prio[pix].limit.other = -1; + nmp->prio[pix].runq = to_rq; + nmp->prio[pix].flags = 0; + } + } + + /* ... and publish them. */ + erts_atomic_set_wb(&erts_migration_paths, (erts_aint_t) new_mpaths); + + retire_mpaths(old_mpaths); + + /* Make sure that we balance soon... */ + balance_info.forced_check_balance = 1; + + erts_smp_mtx_unlock(&balance_info.update_mtx); + + erts_smp_runq_lock(ERTS_RUNQ_IX(0)); + ERTS_RUNQ_IX(0)->check_balance_reds = 0; + erts_smp_runq_unlock(ERTS_RUNQ_IX(0)); +} + + #endif /* #ifdef ERTS_SMP */ Uint @@ -3674,11 +4363,11 @@ static struct { int limit; int dec_shift; int dec_mask; - void (*check)(ErtsRunQueue *rq); + void (*check)(ErtsRunQueue *rq, Uint32 flags); } wakeup_other; static void -wakeup_other_check(ErtsRunQueue *rq) +wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { @@ -3696,6 +4385,8 @@ wakeup_other_check(ErtsRunQueue *rq) if (rq->wakeup_other > wakeup_other.limit) { int empty_rqs = erts_smp_atomic32_read_acqb(&no_empty_run_queues); + if (flags & ERTS_RUNQ_FLG_PROTECTED) + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); if (empty_rqs != 0) wake_scheduler_on_empty_runq(rq); rq->wakeup_other = 0; @@ -3740,18 +4431,21 @@ wakeup_other_set_limit(void) } static void -wakeup_other_check_legacy(ErtsRunQueue *rq) +wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - if (rq->len < 2) { + erts_aint32_t len = rq->len; + if (len < 2) { rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds; if (rq->wakeup_other < 0) rq->wakeup_other = 0; } else if (rq->wakeup_other < wakeup_other.limit) - rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY; + rq->wakeup_other += len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY; else { + if (flags & ERTS_RUNQ_FLG_PROTECTED) + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { wake_scheduler_on_empty_runq(rq); rq->wakeup_other = 0; @@ -3908,6 +4602,8 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->dd.completed_callback = NULL; awdp->dd.completed_arg = NULL; + awdp->later_op.first = NULL; + awdp->later_op.last = NULL; #endif #ifdef ERTS_USE_ASYNC_READY_Q #ifdef ERTS_SMP @@ -3917,6 +4613,7 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->async_ready.queue = NULL; #endif #ifdef ERTS_SMP + awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY; if (!dawwp) { awdp->delayed_wakeup.job = NULL; awdp->delayed_wakeup.sched2jix = NULL; @@ -3971,7 +4668,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); rq->ix = ix; - erts_smp_atomic32_init_nob(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY); /* make sure that the "extra" id correponds to the schedulers * id if the esdp->no <-> ix+1 mapping change. @@ -3982,7 +4678,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) rq->waiting = 0; rq->woken = 0; - rq->flags = 0; + ERTS_RUNQ_FLGS_INIT(rq, ERTS_RUNQ_FLG_NONEMPTY); rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; rq->full_reds_history_sum = 0; for (rix = 0; rix < ERTS_FULL_REDS_HISTORY_SIZE; rix++) { @@ -3996,19 +4692,14 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) rq->wakeup_other_reds = 0; rq->halt_in_progress = 0; - rq->procs.len = 0; rq->procs.pending_exiters = NULL; rq->procs.context_switches = 0; rq->procs.reductions = 0; for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { - rq->procs.prio_info[pix].len = 0; + erts_smp_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0); rq->procs.prio_info[pix].max_len = 0; rq->procs.prio_info[pix].reds = 0; - rq->procs.prio_info[pix].migrate.limit.this = 0; - rq->procs.prio_info[pix].migrate.limit.other = 0; - ERTS_DBG_SET_INVALID_RUNQP(rq->procs.prio_info[pix].migrate.runq, - 0x0); if (pix < ERTS_NO_PROC_PRIO_LEVELS - 1) { rq->procs.prio[pix].first = NULL; rq->procs.prio[pix].last = NULL; @@ -4017,14 +4708,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) rq->misc.start = NULL; rq->misc.end = NULL; - rq->misc.evac_runq = NULL; - rq->ports.info.len = 0; + erts_smp_atomic32_init_nob(&rq->ports.info.len, 0); rq->ports.info.max_len = 0; rq->ports.info.reds = 0; - rq->ports.info.migrate.limit.this = 0; - rq->ports.info.migrate.limit.other = 0; - rq->ports.info.migrate.runq = NULL; rq->ports.start = NULL; rq->ports.end = NULL; } @@ -4121,6 +4808,9 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) #ifdef ERTS_SMP daww_ptr += daww_sz; #endif + + esdp->reductions = 0; + init_sched_wall_time(&esdp->sched_wall_time); } @@ -4159,10 +4849,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) balance_info.prev_rise.reds = 0; balance_info.n = 0; + init_migration_paths(); + if (no_schedulers_online < no_schedulers) { + change_no_used_runqs(no_schedulers_online); for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++) - evacuate_run_queue(ERTS_RUNQ_IX(ix), - ERTS_RUNQ_IX(ix % no_schedulers_online)); + suspend_run_queue(ERTS_RUNQ_IX(ix)); } schdlr_sspnd.wait_curr_online = no_schedulers_online; @@ -4225,91 +4917,197 @@ erts_get_scheduler_data(void) #endif -static int remove_proc_from_runq(ErtsRunQueue *rq, Process *p, int to_inactive); +/* + * scheduler_out_process() return with c_rq locked. + */ +static ERTS_INLINE int +schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p) +{ + erts_aint32_t a, e, n; + int res = 0; + + a = state; + + while (1) { + n = e = a; + + ASSERT(a & ERTS_PSFLG_RUNNING); + ASSERT(!(a & ERTS_PSFLG_IN_RUNQ)); + + n &= ~ERTS_PSFLG_RUNNING; + if ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) + n |= ERTS_PSFLG_IN_RUNQ; + a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + if (a == e) + break; + } + + if (!(n & ERTS_PSFLG_IN_RUNQ)) { + if (erts_system_profile_flags.runnable_procs) + profile_runnable_proc(p, am_inactive); + } + else { + int prio = (int) (ERTS_PSFLG_PRIO_MASK & n); + ErtsRunQueue *runq = erts_get_runq_proc(p); + + ASSERT(!(n & ERTS_PSFLG_SUSPENDED)); + +#ifdef ERTS_SMP + if (!(ERTS_PSFLG_BOUND & n)) { + ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio); + if (new_runq) { + RUNQ_SET_RQ(&p->run_queue, new_runq); + runq = new_runq; + } + } +#endif + ASSERT(runq); + res = 1; + + erts_smp_runq_lock(runq); + + /* Enqueue the process */ + enqueue_process(runq, prio, p); + + if (runq == c_rq) + return res; + erts_smp_runq_unlock(runq); + smp_notify_inc_runq(runq); + } + erts_smp_runq_lock(c_rq); + return res; +} static ERTS_INLINE void -suspend_process(ErtsRunQueue *rq, Process *p) +add2runq(Process *p, erts_aint32_t state) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - p->rcount++; /* count number of suspend */ -#ifdef ERTS_SMP - ASSERT(!(p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) - || p == erts_get_current_process()); - ASSERT(p->status != P_RUNNING - || p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING); - if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) - goto runable; -#endif - switch(p->status) { - case P_SUSPENDED: - break; - case P_RUNABLE: + int prio = (int) (ERTS_PSFLG_PRIO_MASK & state); + ErtsRunQueue *runq = erts_get_runq_proc(p); + #ifdef ERTS_SMP - runable: - if (!ERTS_PROC_PENDING_EXIT(p)) + if (!(ERTS_PSFLG_BOUND & state)) { + ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio); + if (new_runq) { + RUNQ_SET_RQ(&p->run_queue, new_runq); + runq = new_runq; + } + } #endif - remove_proc_from_runq(rq, p, 1); - /* else: - * leave process in schedq so it will discover the pending exit - */ - p->rstatus = P_RUNABLE; /* wakeup as runnable */ - break; - case P_RUNNING: - p->rstatus = P_RUNABLE; /* wakeup as runnable */ - break; - case P_WAITING: - p->rstatus = P_WAITING; /* wakeup as waiting */ - break; - case P_EXITING: - return; /* ignore this */ - case P_GARBING: - case P_FREE: - erl_exit(1, "bad state in suspend_process()\n"); + ASSERT(runq); + + erts_smp_runq_lock(runq); + + /* Enqueue the process */ + enqueue_process(runq, prio, p); + + erts_smp_runq_unlock(runq); + smp_notify_inc_runq(runq); + +} + +static ERTS_INLINE void +schedule_process(Process *p, erts_aint32_t state, int active_enq) +{ + erts_aint32_t a = state, n; + + while (1) { + erts_aint32_t e; + n = e = a; + + if (a & ERTS_PSFLG_FREE) + return; /* We don't want to schedule free processes... */ + n |= ERTS_PSFLG_ACTIVE; + if (!(a & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_RUNNING))) + n |= ERTS_PSFLG_IN_RUNQ; + a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + if (a == e) + break; + if (!active_enq && (a & ERTS_PSFLG_ACTIVE)) + return; /* Someone else activated process ... */ } - if ((erts_system_profile_flags.runnable_procs) && (p->rcount == 1) && (p->status != P_WAITING)) { - profile_runnable_proc(p, am_inactive); + if (erts_system_profile_flags.runnable_procs + && !(a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) { + profile_runnable_proc(p, am_active); } - p->status = P_SUSPENDED; - + if ((n & ERTS_PSFLG_IN_RUNQ) && !(a & ERTS_PSFLG_IN_RUNQ)) + add2runq(p, n); } -static ERTS_INLINE void -resume_process(Process *p) +void +erts_schedule_process(Process *p, erts_aint32_t state) +{ + schedule_process(p, state, 0); +} + +static ERTS_INLINE int +suspend_process(Process *c_p, Process *p) { - Uint32 *statusp; + erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + int suspended = 0; ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - switch (p->status) { - case P_SUSPENDED: - statusp = &p->status; - break; - case P_GARBING: - if (p->gcstatus == P_SUSPENDED) { - statusp = &p->gcstatus; - break; + + if ((state & ERTS_PSFLG_SUSPENDED)) + suspended = -1; + else { + if (c_p == p) { + state = erts_smp_atomic32_read_bor_relb(&p->state, + ERTS_PSFLG_SUSPENDED); + state |= ERTS_PSFLG_SUSPENDED; + ASSERT(state & ERTS_PSFLG_RUNNING); + suspended = 1; } - /* Fall through */ - default: - return; + else { + while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) { + erts_aint32_t e, n; + n = e = state; + n |= ERTS_PSFLG_SUSPENDED; + state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e); + if (state == e) { + state = n; + suspended = 1; + break; + } + } + } + } + + if (state & ERTS_PSFLG_SUSPENDED) { + + ASSERT(!(ERTS_PSFLG_RUNNING & state) + || p == erts_get_current_process()); + + if (erts_system_profile_flags.runnable_procs + && (p->rcount == 0) + && (state & ERTS_PSFLG_ACTIVE)) { + profile_runnable_proc(p, am_inactive); + } + + p->rcount++; /* count number of suspend */ } + return suspended; +} + +static ERTS_INLINE void +resume_process(Process *p) +{ + erts_aint32_t state; + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); ASSERT(p->rcount > 0); if (--p->rcount > 0) /* multiple suspend */ return; - switch(p->rstatus) { - case P_RUNABLE: - erts_add_to_runq(p); - break; - case P_WAITING: - *statusp = P_WAITING; - break; - default: - erl_exit(1, "bad state in resume_process()\n"); + + state = erts_smp_atomic32_read_band_mb(&p->state, ~ERTS_PSFLG_SUSPENDED); + state &= ~ERTS_PSFLG_SUSPENDED; + if ((state & (ERTS_PSFLG_EXITING + | ERTS_PSFLG_ACTIVE + | ERTS_PSFLG_IN_RUNQ + | ERTS_PSFLG_RUNNING)) == ERTS_PSFLG_ACTIVE) { + schedule_process(p, state, 1); } - p->rstatus = P_FREE; } int @@ -4433,6 +5231,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) int wake = 0; erts_aint32_t aux_work; int thr_prgr_active = 1; + ErtsStuckBoundProcesses sbp = {NULL, NULL}; /* * Schedulers may be suspended in two different ways: @@ -4447,6 +5246,8 @@ suspend_scheduler(ErtsSchedulerData *esdp) ASSERT(no != 1); + evacuate_run_queue(esdp->run_queue, &sbp); + erts_smp_runq_unlock(esdp->run_queue); erts_sched_check_cpu_bind_prep_suspend(esdp); @@ -4510,19 +5311,28 @@ suspend_scheduler(ErtsSchedulerData *esdp) erts_smp_mtx_unlock(&schdlr_sspnd.mtx); while (1) { + erts_aint32_t qmask; erts_aint32_t flgs; + qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue) + & ERTS_RUNQ_FLGS_QMASK); aux_work = erts_atomic32_read_acqb(&ssi->aux_work); - if (aux_work) { + if (aux_work|qmask) { if (!thr_prgr_active) { erts_thr_progress_active(esdp, thr_prgr_active = 1); sched_wall_time_change(esdp, 1); } - aux_work = handle_aux_work(&esdp->aux_work_data, - aux_work, - 1); + if (aux_work) + aux_work = handle_aux_work(&esdp->aux_work_data, + aux_work, + 1); if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); + if (qmask) { + erts_smp_runq_lock(esdp->run_queue); + evacuate_run_queue(esdp->run_queue, &sbp); + erts_smp_runq_unlock(esdp->run_queue); + } } if (!aux_work) { @@ -4592,56 +5402,11 @@ suspend_scheduler(ErtsSchedulerData *esdp) erts_smp_runq_lock(esdp->run_queue); non_empty_runq(esdp->run_queue); + schedule_bound_processes(esdp->run_queue, &sbp); + erts_sched_check_cpu_bind_post_suspend(esdp); } -#define ERTS_RUNQ_RESET_SUSPEND_INFO(RQ, DBG_ID) \ -do { \ - int pix__; \ - (RQ)->misc.evac_runq = NULL; \ - (RQ)->ports.info.migrate.runq = NULL; \ - (RQ)->flags &= ~(ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \ - | ERTS_RUNQ_FLGS_EMIGRATE_QMASK \ - | ERTS_RUNQ_FLGS_EVACUATE_QMASK \ - | ERTS_RUNQ_FLG_SUSPENDED); \ - (RQ)->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK \ - | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); \ - (RQ)->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; \ - erts_smp_atomic32_read_band_nob(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\ - for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) { \ - (RQ)->procs.prio_info[pix__].max_len = 0; \ - (RQ)->procs.prio_info[pix__].reds = 0; \ - ERTS_DBG_SET_INVALID_RUNQP((RQ)->procs.prio_info[pix__].migrate.runq,\ - (DBG_ID)); \ - } \ - (RQ)->ports.info.max_len = 0; \ - (RQ)->ports.info.reds = 0; \ -} while (0) - -#define ERTS_RUNQ_RESET_MIGRATION_PATHS__(RQ) \ -do { \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \ - (RQ)->misc.evac_runq = NULL; \ - (RQ)->ports.info.migrate.runq = NULL; \ - (RQ)->flags &= ~(ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \ - | ERTS_RUNQ_FLGS_EMIGRATE_QMASK \ - | ERTS_RUNQ_FLGS_EVACUATE_QMASK); \ -} while (0) - -#ifdef DEBUG -#define ERTS_RUNQ_RESET_MIGRATION_PATHS(RQ, DBG_ID) \ -do { \ - int pix__; \ - ERTS_RUNQ_RESET_MIGRATION_PATHS__((RQ)); \ - for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) \ - ERTS_DBG_SET_INVALID_RUNQP((RQ)->procs.prio_info[pix__].migrate.runq,\ - (DBG_ID)); \ -} while (0) -#else -#define ERTS_RUNQ_RESET_MIGRATION_PATHS(RQ, DBG_ID) \ - ERTS_RUNQ_RESET_MIGRATION_PATHS__((RQ)) -#endif - ErtsSchedSuspendResult erts_schedulers_state(Uint *total, Uint *online, @@ -4711,27 +5476,13 @@ erts_set_schedulers_online(Process *p, have_unlocked_plocks = 1; erts_smp_proc_unlock(p, plocks); } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); - erts_smp_mtx_lock(&balance_info.update_mtx); - for (ix = online; ix < no; ix++) { - ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); - erts_smp_runq_lock(rq); - ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x5); - erts_smp_runq_unlock(rq); - scheduler_ix_resume_wake(ix); - } - /* - * Spread evacuation paths among all online - * run queues. - */ - for (ix = no; ix < erts_no_run_queues; ix++) { - ErtsRunQueue *from_rq = ERTS_RUNQ_IX(ix); - ErtsRunQueue *to_rq = ERTS_RUNQ_IX(ix % no); - evacuate_run_queue(from_rq, to_rq); - } - set_no_used_runqs(no); - erts_smp_mtx_unlock(&balance_info.update_mtx); - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + change_no_used_runqs(no); + + for (ix = online; ix < no; ix++) + resume_run_queue(ERTS_RUNQ_IX(ix)); + + for (ix = no; ix < erts_no_run_queues; ix++) + suspend_run_queue(ERTS_RUNQ_IX(ix)); } res = ERTS_SCHDLR_SSPND_DONE; } @@ -4758,25 +5509,11 @@ erts_set_schedulers_online(Process *p, have_unlocked_plocks = 1; erts_smp_proc_unlock(p, plocks); } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); - erts_smp_mtx_lock(&balance_info.update_mtx); - - for (ix = 0; ix < online; ix++) { - ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); - erts_smp_runq_lock(rq); - ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x6); - erts_smp_runq_unlock(rq); - } - /* - * Evacutation order important! Newly suspended run queues - * has to be evacuated last. - */ - for (ix = erts_no_run_queues-1; ix >= no; ix--) - evacuate_run_queue(ERTS_RUNQ_IX(ix), - ERTS_RUNQ_IX(ix % no)); - set_no_used_runqs(no); - erts_smp_mtx_unlock(&balance_info.update_mtx); - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + + change_no_used_runqs(no); + for (ix = no; ix < erts_no_run_queues; ix++) + suspend_run_queue(ERTS_RUNQ_IX(ix)); + for (ix = no; ix < online; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); wake_scheduler(rq, 0); @@ -4873,25 +5610,14 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) schdlr_sspnd.msb.wait_active = 2; } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); - erts_smp_mtx_lock(&balance_info.update_mtx); - set_no_used_runqs(1); - for (ix = 0; ix < online; ix++) { + change_no_used_runqs(1); + for (ix = 1; ix < erts_no_run_queues; ix++) + suspend_run_queue(ERTS_RUNQ_IX(ix)); + + for (ix = 1; ix < online; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); - erts_smp_runq_lock(rq); - ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED)); - ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7); - erts_smp_runq_unlock(rq); + wake_scheduler(rq, 0); } - /* - * Evacuate all activities in all other run queues - * into the first run queue. Note order is important, - * online run queues has to be evacuated last. - */ - for (ix = erts_no_run_queues-1; ix >= 1; ix--) - evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0)); - erts_smp_mtx_unlock(&balance_info.update_mtx); - erts_smp_mtx_lock(&schdlr_sspnd.mtx); if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active) != schdlr_sspnd.msb.wait_active) { @@ -4935,15 +5661,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) plp = proclist_create(p); plp->next = schdlr_sspnd.msb.procs; schdlr_sspnd.msb.procs = plp; -#ifdef DEBUG - ERTS_FOREACH_RUNQ(srq, - { - if (srq != ERTS_RUNQ_IX(0)) { - ASSERT(ERTS_EMPTY_RUNQ(srq)); - ASSERT(srq->flags & ERTS_RUNQ_FLG_SUSPENDED); - } - }); -#endif ASSERT(p->scheduler_data); } } @@ -4975,27 +5692,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED; else { ERTS_SCHDLR_SSPND_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0); -#ifdef DEBUG - ERTS_FOREACH_RUNQ(rq, - { - if (rq != p->scheduler_data->run_queue) { - if (!ERTS_EMPTY_RUNQ(rq)) { - Process *rp; - int pix; - ASSERT(rq->ports.info.len == 0); - for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { - for (rp = rq->procs.prio[pix].first; - rp; - rp = rp->next) { - ASSERT(rp->bound_runq); - } - } - } - - ASSERT(rq->flags & ERTS_RUNQ_FLG_SUSPENDED); - } - }); -#endif p->flags &= ~F_HAVE_BLCKD_MSCHED; schdlr_sspnd.msb.ongoing = 0; if (schdlr_sspnd.online == 1) { @@ -5005,35 +5701,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) } else { int online = schdlr_sspnd.online; - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); if (plocks) { have_unlocked_plocks = 1; erts_smp_proc_unlock(p, plocks); } - erts_smp_mtx_lock(&balance_info.update_mtx); + + change_no_used_runqs(online); /* Resume all online run queues */ - for (ix = 1; ix < online; ix++) { - ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); - erts_smp_runq_lock(rq); - ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x4); - erts_smp_runq_unlock(rq); - scheduler_ix_resume_wake(ix); - } + for (ix = 1; ix < online; ix++) + resume_run_queue(ERTS_RUNQ_IX(ix)); - /* Spread evacuation paths among all online run queues */ for (ix = online; ix < erts_no_run_queues; ix++) - evacuate_run_queue(ERTS_RUNQ_IX(ix), - ERTS_RUNQ_IX(ix % online)); - - set_no_used_runqs(online); - /* Make sure that we balance soon... */ - balance_info.forced_check_balance = 1; - erts_smp_runq_lock(ERTS_RUNQ_IX(0)); - ERTS_RUNQ_IX(0)->check_balance_reds = 0; - erts_smp_runq_unlock(ERTS_RUNQ_IX(0)); - erts_smp_mtx_unlock(&balance_info.update_mtx); - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + suspend_run_queue(ERTS_RUNQ_IX(ix)); } res = ERTS_SCHDLR_SSPND_DONE; } @@ -5340,10 +6020,7 @@ handle_pend_sync_suspend(Process *suspendee, if (suspender) { ASSERT(is_nil(suspender->suspendee)); if (suspendee_alive) { - ErtsRunQueue *rq = erts_get_runq_proc(suspendee); - erts_smp_runq_lock(rq); - suspend_process(rq, suspendee); - erts_smp_runq_unlock(rq); + erts_suspend(suspendee, suspendee_locks, NULL); suspender->suspendee = suspendee->id; } /* suspender is suspended waiting for suspendee to suspend; @@ -5386,10 +6063,9 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, resume_process(rp); } else { - ErtsRunQueue *cp_rq, *rp_rq; rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, - pid, ERTS_PROC_LOCK_STATUS); + pid, pid_locks|ERTS_PROC_LOCK_STATUS); if (!rp) { c_p->flags &= ~F_P2PNR_RESCHED; @@ -5398,58 +6074,36 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, ASSERT(!(c_p->flags & F_P2PNR_RESCHED)); - cp_rq = erts_get_runq_proc(c_p); - rp_rq = erts_get_runq_proc(rp); - erts_smp_runqs_lock(cp_rq, rp_rq); - if (rp->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) { - running: - /* Phiu... */ - - /* - * If we got pending suspenders and suspend ourselves waiting - * to suspend another process we might deadlock. - * In this case we have to yield, be suspended by - * someone else and then do it all over again. - */ - if (!c_p->pending_suspenders) { - /* Mark rp pending for suspend by c_p */ - add_pend_suspend(rp, c_p->id, handle_pend_sync_suspend); - ASSERT(is_nil(c_p->suspendee)); - - /* Suspend c_p; when rp is suspended c_p will be resumed. */ - suspend_process(cp_rq, c_p); - c_p->flags |= F_P2PNR_RESCHED; - } - /* Yield (caller is assumed to yield immediately in bif). */ - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - rp = ERTS_PROC_LOCK_BUSY; + if (suspend) { + if (suspend_process(c_p, rp)) + goto done; } else { - ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { - erts_smp_runqs_unlock(cp_rq, rp_rq); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, - pid, pid_locks|ERTS_PROC_LOCK_STATUS); - if (!rp) - goto done; - /* run-queues may have changed */ - cp_rq = erts_get_runq_proc(c_p); - rp_rq = erts_get_runq_proc(rp); - erts_smp_runqs_lock(cp_rq, rp_rq); - if (rp->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) { - /* Ahh... */ - erts_smp_proc_unlock(rp, - pid_locks & ~ERTS_PROC_LOCK_STATUS); - goto running; - } - } + if (!(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&rp->state))) + goto done; + + } + + /* Other process running */ + + /* + * If we got pending suspenders and suspend ourselves waiting + * to suspend another process we might deadlock. + * In this case we have to yield, be suspended by + * someone else and then do it all over again. + */ + if (!c_p->pending_suspenders) { + /* Mark rp pending for suspend by c_p */ + add_pend_suspend(rp, c_p->id, handle_pend_sync_suspend); + ASSERT(is_nil(c_p->suspendee)); - /* rp is not running and we got the locks we want... */ - if (suspend) - suspend_process(rp_rq, rp); + /* Suspend c_p; when rp is suspended c_p will be resumed. */ + suspend_process(c_p, c_p); + c_p->flags |= F_P2PNR_RESCHED; } - erts_smp_runqs_unlock(cp_rq, rp_rq); + /* Yield (caller is assumed to yield immediately in bif). */ + erts_smp_proc_unlock(rp, pid_locks|ERTS_PROC_LOCK_STATUS); + rp = ERTS_PROC_LOCK_BUSY; } done: @@ -5506,36 +6160,26 @@ erts_pid2proc_nropt(Process *c_p, ErtsProcLocks c_p_locks, return erts_pid2proc_not_running(c_p, c_p_locks, pid, pid_locks); } -static ERTS_INLINE void -do_bif_suspend_process(ErtsSuspendMonitor *smon, - Process *suspendee, - ErtsRunQueue *locked_runq) +static ERTS_INLINE int +do_bif_suspend_process(Process *c_p, + ErtsSuspendMonitor *smon, + Process *suspendee) { ASSERT(suspendee); - ASSERT(!suspendee->is_exiting); + ASSERT(!ERTS_PROC_IS_EXITING(suspendee)); ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(suspendee)); if (smon) { if (!smon->active) { - ErtsRunQueue *rq; - - if (locked_runq) - rq = locked_runq; - else { - rq = erts_get_runq_proc(suspendee); - erts_smp_runq_lock(rq); - } - - suspend_process(rq, suspendee); - - if (!locked_runq) - erts_smp_runq_unlock(rq); + if (!suspend_process(c_p, suspendee)) + return 0; } smon->active += smon->pending; ASSERT(smon->active); smon->pending = 0; + return 1; } - + return 0; } static void @@ -5558,10 +6202,17 @@ handle_pend_bif_sync_suspend(Process *suspendee, erts_delete_suspend_monitor(&suspender->suspend_monitors, suspendee->id); else { +#ifdef DEBUG + int res; +#endif ErtsSuspendMonitor *smon; smon = erts_lookup_suspend_monitor(suspender->suspend_monitors, suspendee->id); - do_bif_suspend_process(smon, suspendee, NULL); +#ifdef DEBUG + res = +#endif + do_bif_suspend_process(suspendee, smon, suspendee); + ASSERT(!smon || res != 0); suspender->suspendee = suspendee->id; } /* suspender is suspended waiting for suspendee to suspend; @@ -5593,10 +6244,17 @@ handle_pend_bif_async_suspend(Process *suspendee, erts_delete_suspend_monitor(&suspender->suspend_monitors, suspendee->id); else { +#ifdef DEBUG + int res; +#endif ErtsSuspendMonitor *smon; smon = erts_lookup_suspend_monitor(suspender->suspend_monitors, suspendee->id); - do_bif_suspend_process(smon, suspendee, NULL); +#ifdef DEBUG + res = +#endif + do_bif_suspend_process(suspendee, smon, suspendee); + ASSERT(!smon || res != 0); } erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK); } @@ -5681,7 +6339,8 @@ suspend_process_2(BIF_ALIST_2) /* This is really a piece of cake without SMP support... */ if (!smon->active) { - suspend_process(ERTS_RUNQ_IX(0), suspendee); + erts_smp_atomic32_read_bor_nob(&suspendee->state, ERTS_PSFLG_SUSPENDED); + suspend_process(BIF_P, suspendee); smon->active++; res = am_true; } @@ -5724,21 +6383,15 @@ suspend_process_2(BIF_ALIST_2) if (smon->pending && unless_suspending) res = am_false; else { - ErtsRunQueue *rq; if (smon->pending == INT_MAX) goto system_limit; smon->pending++; - rq = erts_get_runq_proc(suspendee); - erts_smp_runq_lock(rq); - if (suspendee->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) + if (!do_bif_suspend_process(BIF_P, smon, suspendee)) add_pend_suspend(suspendee, BIF_P->id, handle_pend_bif_async_suspend); - else - do_bif_suspend_process(smon, suspendee, rq); - erts_smp_runq_unlock(rq); res = am_true; } @@ -5775,7 +6428,6 @@ suspend_process_2(BIF_ALIST_2) /* done */ } else { - ErtsRunQueue *cp_rq, *s_rq; /* We haven't got any active suspends on the suspendee */ /* @@ -5792,12 +6444,7 @@ suspend_process_2(BIF_ALIST_2) if (!unless_suspending || smon->pending == 0) smon->pending++; - cp_rq = erts_get_runq_proc(BIF_P); - s_rq = erts_get_runq_proc(suspendee); - erts_smp_runqs_lock(cp_rq, s_rq); - if (!(suspendee->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)) { - do_bif_suspend_process(smon, suspendee, s_rq); - erts_smp_runqs_unlock(cp_rq, s_rq); + if (do_bif_suspend_process(BIF_P, smon, suspendee)) { res = (!unless_suspending || smon->active == 1 ? am_true : am_false); @@ -5817,8 +6464,7 @@ suspend_process_2(BIF_ALIST_2) * This time with BIF_P->suspendee == BIF_ARG_1 (see * above). */ - suspend_process(cp_rq, BIF_P); - erts_smp_runqs_unlock(cp_rq, s_rq); + suspend_process(BIF_P, BIF_P); goto yield; } } @@ -5826,9 +6472,15 @@ suspend_process_2(BIF_ALIST_2) } #endif /* ERTS_SMP */ - - ASSERT(suspendee->status == P_SUSPENDED || (asynchronous && smon->pending)); - ASSERT(suspendee->status == P_SUSPENDED || !smon->active); +#ifdef DEBUG + { + erts_aint32_t state = erts_smp_atomic32_read_acqb(&suspendee->state); + ASSERT((state & ERTS_PSFLG_SUSPENDED) + || (asynchronous && smon->pending)); + ASSERT((state & ERTS_PSFLG_SUSPENDED) + || !smon->active); + } +#endif erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(BIF_P, xlocks); @@ -5923,9 +6575,8 @@ resume_process_1(BIF_ALIST_1) if (!suspendee) goto no_suspendee; - ASSERT(suspendee->status == P_SUSPENDED - || (suspendee->status == P_GARBING - && suspendee->gcstatus == P_SUSPENDED)); + ASSERT(ERTS_PSFLG_SUSPENDED + & erts_smp_atomic32_read_nob(&suspendee->state)); resume_process(suspendee); erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); @@ -5954,449 +6605,46 @@ erts_run_queues_len(Uint *qlen) Uint len = 0; ERTS_ATOMIC_FOREACH_RUNQ(rq, { - if (qlen) - qlen[i++] = rq->procs.len; - len += rq->procs.len; + Sint pqlen = 0; + int pix; + for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) + pqlen += RUNQ_READ_LEN(&rq->procs.prio_info[pix].len); + + if (pqlen < 0) + pqlen = 0; + if (qlen) + qlen[i++] = pqlen; + len += pqlen; } ); return len; } -#ifdef HARDDEBUG_RUNQS -static void -check_procs_runq(ErtsRunQueue *runq, Process *p_in_q, Process *p_not_in_q) -{ - int len[ERTS_NO_PROC_PRIO_LEVELS] = {0}; - int tot_len; - int prioq, prio; - int found_p_in_q; - Process *p, *prevp; - - found_p_in_q = 0; - for (prioq = 0; prioq < ERTS_NO_PROC_PRIO_LEVELS - 1; prioq++) { - prevp = NULL; - for (p = runq->procs.prio[prioq].first; p; p = p->next) { - ASSERT(p != p_not_in_q); - if (p == p_in_q) - found_p_in_q = 1; - switch (p->prio) { - case PRIORITY_MAX: - case PRIORITY_HIGH: - case PRIORITY_NORMAL: - ASSERT(prioq == p->prio); - break; - case PRIORITY_LOW: - ASSERT(prioq == PRIORITY_NORMAL); - break; - default: - ASSERT(!"Bad prio on process"); - } - len[p->prio]++; - ASSERT(prevp == p->prev); - if (p->prev) { - ASSERT(p->prev->next == p); - } - else { - ASSERT(runq->procs.prio[prioq].first == p); - } - if (p->next) { - ASSERT(p->next->prev == p); - } - else { - ASSERT(runq->procs.prio[prioq].last == p); - } - ASSERT(p->run_queue == runq); - prevp = p; - } - } - - ASSERT(!p_in_q || found_p_in_q); - - tot_len = 0; - for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++) { - ASSERT(len[prio] == runq->procs.prio_info[prio].len); - if (len[prio]) { - ASSERT(runq->flags & (1 << prio)); - } - else { - ASSERT(!(runq->flags & (1 << prio))); - } - tot_len += len[prio]; - } - ASSERT(runq->procs.len == tot_len); -} -# define ERTS_DBG_CHK_PROCS_RUNQ(RQ) check_procs_runq((RQ), NULL, NULL) -# define ERTS_DBG_CHK_PROCS_RUNQ_PROC(RQ, P) check_procs_runq((RQ), (P), NULL) -# define ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(RQ, P) check_procs_runq((RQ), NULL, (P)) -#else -# define ERTS_DBG_CHK_PROCS_RUNQ(RQ) -# define ERTS_DBG_CHK_PROCS_RUNQ_PROC(RQ, P) -# define ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(RQ, P) -#endif - - -static ERTS_INLINE void -enqueue_process(ErtsRunQueue *runq, Process *p) -{ - ErtsRunPrioQueue *rpq; - ErtsRunQueueInfo *rqi; - - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - - ASSERT(p->bound_runq || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED)); - - rqi = &runq->procs.prio_info[p->prio]; - rqi->len++; - if (rqi->max_len < rqi->len) - rqi->max_len = rqi->len; - - runq->procs.len++; - runq->len++; - if (runq->max_len < runq->len) - runq->max_len = runq->len; - - runq->flags |= (1 << p->prio); - - rpq = (p->prio == PRIORITY_LOW - ? &runq->procs.prio[PRIORITY_NORMAL] - : &runq->procs.prio[p->prio]); - - p->next = NULL; - p->prev = rpq->last; - if (rpq->last) - rpq->last->next = p; - else - rpq->first = p; - rpq->last = p; - - switch (p->status) { - case P_EXITING: - break; - case P_GARBING: - p->gcstatus = P_RUNABLE; - break; - default: - p->status = P_RUNABLE; - break; - } - -#ifdef ERTS_SMP - p->status_flags |= ERTS_PROC_SFLG_INRUNQ; -#endif - - ERTS_DBG_CHK_PROCS_RUNQ_PROC(runq, p); -} - - -static ERTS_INLINE int -dequeue_process(ErtsRunQueue *runq, Process *p) -{ - ErtsRunPrioQueue *rpq; - int res = 1; - - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - - ERTS_DBG_CHK_PROCS_RUNQ(runq); - - rpq = &runq->procs.prio[p->prio == PRIORITY_LOW ? PRIORITY_NORMAL : p->prio]; - if (p->prev) { - p->prev->next = p->next; - } - else if (rpq->first == p) { - rpq->first = p->next; - } - else { - res = 0; - } - if (p->next) { - p->next->prev = p->prev; - } - else if (rpq->last == p) { - rpq->last = p->prev; - } - else { - ASSERT(res == 0); - } - - if (res) { - - if (--runq->procs.prio_info[p->prio].len == 0) - runq->flags &= ~(1 << p->prio); - runq->procs.len--; - runq->len--; - -#ifdef ERTS_SMP - p->status_flags &= ~ERTS_PROC_SFLG_INRUNQ; -#endif - } - - ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p); - return res; -} - -/* schedule a process */ -static ERTS_INLINE ErtsRunQueue * -internal_add_to_runq(ErtsRunQueue *runq, Process *p) -{ - Uint32 prev_status = p->status; - ErtsRunQueue *add_runq; -#ifdef ERTS_SMP - - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); - - if (p->status_flags & ERTS_PROC_SFLG_INRUNQ) - return NULL; - else if (p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) { - ASSERT(ERTS_PROC_IS_EXITING(p) || p->rcount == 0); - ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p); - p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ; - return NULL; - } - ASSERT(!p->scheduler_data); -#endif - - ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p); -#ifndef ERTS_SMP - /* Never schedule a suspended process (ok in smp case) */ - ASSERT(ERTS_PROC_IS_EXITING(p) || p->rcount == 0); - add_runq = runq; -#else - ASSERT(!p->bound_runq || p->bound_runq == p->run_queue); - if (p->bound_runq) { - if (p->bound_runq == runq) - add_runq = runq; - else { - add_runq = p->bound_runq; - erts_smp_xrunq_lock(runq, add_runq); - } - } - else { - add_runq = erts_check_emigration_need(runq, p->prio); - if (!add_runq) - add_runq = runq; - else /* Process emigrated */ - p->run_queue = add_runq; - } -#endif - - /* Enqueue the process */ - enqueue_process(add_runq, p); - - if ((erts_system_profile_flags.runnable_procs) - && (prev_status == P_WAITING - || prev_status == P_SUSPENDED)) { - profile_runnable_proc(p, am_active); - } - - if (add_runq != runq) - erts_smp_runq_unlock(add_runq); - - return add_runq; -} - - -void -erts_add_to_runq(Process *p) -{ - ErtsRunQueue *notify_runq; - ErtsRunQueue *runq = erts_get_runq_proc(p); - erts_smp_runq_lock(runq); - notify_runq = internal_add_to_runq(runq, p); - erts_smp_runq_unlock(runq); - smp_notify_inc_runq(notify_runq); - -} - -/* Possibly remove a scheduled process we need to suspend */ - -static int -remove_proc_from_runq(ErtsRunQueue *rq, Process *p, int to_inactive) -{ - int res; - - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - -#ifdef ERTS_SMP - if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) { - p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ; - ASSERT(!remove_proc_from_runq(rq, p, 0)); - return 1; - } -#endif - - res = dequeue_process(rq, p); - - if (res && erts_system_profile_flags.runnable_procs && to_inactive) - profile_runnable_proc(p, am_inactive); - -#ifdef ERTS_SMP - ASSERT(!(p->status_flags & ERTS_PROC_SFLG_INRUNQ)); -#endif - - return res; -} - -#ifdef ERTS_SMP - -ErtsMigrateResult -erts_proc_migrate(Process *p, ErtsProcLocks *plcks, - ErtsRunQueue *from_rq, int *from_locked, - ErtsRunQueue *to_rq, int *to_locked) -{ - ERTS_SMP_LC_ASSERT(*plcks == erts_proc_lc_my_proc_locks(p)); - ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_STATUS & *plcks) - || from_locked); - ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked); - ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked); - - /* - * If we have the lock on the run queue to migrate to, - * check that it isn't suspended. If it is suspended, - * we will refuse to migrate to it anyway. - */ - if (*to_locked && (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED)) - return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED; - - /* We need status lock on process and locks on both run queues */ - - if (!(ERTS_PROC_LOCK_STATUS & *plcks)) { - if (erts_smp_proc_trylock(p, ERTS_PROC_LOCK_STATUS) == EBUSY) { - ErtsProcLocks lcks = *plcks; - Eterm pid = p->id; - Process *proc = *plcks ? p : NULL; - - if (*from_locked) { - *from_locked = 0; - erts_smp_runq_unlock(from_rq); - } - if (*to_locked) { - *to_locked = 0; - erts_smp_runq_unlock(to_rq); - } - - proc = erts_pid2proc_opt(proc, - lcks, - pid, - lcks|ERTS_PROC_LOCK_STATUS, - ERTS_P2P_FLG_ALLOW_OTHER_X); - if (!proc) { - *plcks = 0; - return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ; - } - ASSERT(proc == p); - } - *plcks |= ERTS_PROC_LOCK_STATUS; - } - - ASSERT(!p->bound_runq); - - ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked); - ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked); - - if (p->run_queue != from_rq) - return ERTS_MIGRATE_FAILED_RUNQ_CHANGED; - - if (!*from_locked || !*to_locked) { - if (from_rq < to_rq) { - if (!*to_locked) { - if (!*from_locked) - erts_smp_runq_lock(from_rq); - erts_smp_runq_lock(to_rq); - } - else if (erts_smp_runq_trylock(from_rq) == EBUSY) { - erts_smp_runq_unlock(to_rq); - erts_smp_runq_lock(from_rq); - erts_smp_runq_lock(to_rq); - } - } - else { - if (!*from_locked) { - if (!*to_locked) - erts_smp_runq_lock(to_rq); - erts_smp_runq_lock(from_rq); - } - else if (erts_smp_runq_trylock(to_rq) == EBUSY) { - erts_smp_runq_unlock(from_rq); - erts_smp_runq_lock(to_rq); - erts_smp_runq_lock(from_rq); - } - } - *to_locked = *from_locked = 1; - } - - ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked); - ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked); - - /* Ok we now got all locks we need; do it... */ - - /* Refuse to migrate to a suspended run queue */ - if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED) - return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED; - - if ((p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) - || !(p->status_flags & ERTS_PROC_SFLG_INRUNQ)) - return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ; - - dequeue_process(from_rq, p); - p->run_queue = to_rq; - enqueue_process(to_rq, p); - - return ERTS_MIGRATE_SUCCESS; -} -#endif /* ERTS_SMP */ - Eterm erts_process_status(Process *c_p, ErtsProcLocks c_p_locks, Process *rp, Eterm rpid) { Eterm res = am_undefined; - Process *p; - - if (rp) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS - & erts_proc_lc_my_proc_locks(rp)); - p = rp; - } - else { - p = erts_pid2proc_opt(c_p, c_p_locks, - rpid, ERTS_PROC_LOCK_STATUS, - ERTS_P2P_FLG_ALLOW_OTHER_X); - } + Process *p = rp ? rp : erts_proc_lookup_raw(rpid); if (p) { - switch (p->status) { - case P_RUNABLE: - res = am_runnable; - break; - case P_WAITING: - res = am_waiting; - break; - case P_RUNNING: - res = am_running; - break; - case P_EXITING: + erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + if (state & ERTS_PSFLG_FREE) + res = am_free; + else if (state & ERTS_PSFLG_EXITING) res = am_exiting; - break; - case P_GARBING: + else if (state & ERTS_PSFLG_GC) res = am_garbage_collecting; - break; - case P_SUSPENDED: + else if (state & ERTS_PSFLG_SUSPENDED) res = am_suspended; - break; - case P_FREE: /* We cannot look up a process in P_FREE... */ - default: /* Not a valid status... */ - erl_exit(1, "Bad status (%b32u) found for process %T\n", - p->status, p->id); - break; - } - -#ifdef ERTS_SMP - if (!rp && (p != c_p || !(ERTS_PROC_LOCK_STATUS & c_p_locks))) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + else if (state & ERTS_PSFLG_RUNNING) + res = am_running; + else if (state & ERTS_PSFLG_ACTIVE) + res = am_runnable; + else + res = am_waiting; } +#ifdef ERTS_SMP else { int i; ErtsSchedulerData *esdp; @@ -6411,42 +6659,40 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks, } erts_smp_runq_unlock(esdp->run_queue); } - -#endif - } - +#endif return res; } /* -** Suspend a process +** Suspend a currently executing process ** If we are to suspend on a port the busy_port is the thing ** otherwise busy_port is NIL */ void -erts_suspend(Process* process, ErtsProcLocks process_locks, Port *busy_port) +erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) { - ErtsRunQueue *rq; - - ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); - if (!(process_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS); - - rq = erts_get_runq_proc(process); - - erts_smp_runq_lock(rq); +#ifdef DEBUG + int res; +#endif + ASSERT(c_p == erts_get_current_process()); + ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); + if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); - suspend_process(rq, process); +#ifdef DEBUG + res = +#endif + suspend_process(c_p, c_p); - erts_smp_runq_unlock(rq); + ASSERT(res); if (busy_port) - erts_wake_process_later(busy_port, process); + erts_wake_process_later(busy_port, c_p); - if (!(process_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS); + if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); } @@ -6487,57 +6733,54 @@ erts_resume_processes(ErtsProcList *plp) Eterm erts_get_process_priority(Process *p) { - ErtsRunQueue *rq; - Eterm value; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - rq = erts_get_runq_proc(p); - erts_smp_runq_lock(rq); - switch(p->prio) { - case PRIORITY_MAX: value = am_max; break; - case PRIORITY_HIGH: value = am_high; break; - case PRIORITY_NORMAL: value = am_normal; break; - case PRIORITY_LOW: value = am_low; break; - default: ASSERT(0); value = am_undefined; break; + erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + switch (state & ERTS_PSFLG_PRIO_MASK) { + case PRIORITY_MAX: return am_max; + case PRIORITY_HIGH: return am_high; + case PRIORITY_NORMAL: return am_normal; + case PRIORITY_LOW: return am_low; + default: ASSERT(0); return am_undefined; } - erts_smp_runq_unlock(rq); - return value; } Eterm -erts_set_process_priority(Process *p, Eterm new_value) +erts_set_process_priority(Process *p, Eterm value) { - ErtsRunQueue *rq; - Eterm old_value; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - rq = erts_get_runq_proc(p); -#ifdef ERTS_SMP - ASSERT(!(p->status_flags & ERTS_PROC_SFLG_INRUNQ)); -#endif - erts_smp_runq_lock(rq); - switch(p->prio) { - case PRIORITY_MAX: old_value = am_max; break; - case PRIORITY_HIGH: old_value = am_high; break; - case PRIORITY_NORMAL: old_value = am_normal; break; - case PRIORITY_LOW: old_value = am_low; break; - default: ASSERT(0); old_value = am_undefined; break; - } - switch (new_value) { - case am_max: p->prio = PRIORITY_MAX; break; - case am_high: p->prio = PRIORITY_HIGH; break; - case am_normal: p->prio = PRIORITY_NORMAL; break; - case am_low: p->prio = PRIORITY_LOW; break; - default: old_value = THE_NON_VALUE; break; + erts_aint32_t a, oprio, nprio; + + switch (value) { + case am_max: nprio = (erts_aint32_t) PRIORITY_MAX; break; + case am_high: nprio = (erts_aint32_t) PRIORITY_HIGH; break; + case am_normal: nprio = (erts_aint32_t) PRIORITY_NORMAL; break; + case am_low: nprio = (erts_aint32_t) PRIORITY_LOW; break; + default: return THE_NON_VALUE; break; } - erts_smp_runq_unlock(rq); - return old_value; -} -/* note that P_RUNNING is only set so that we don't try to remove -** running processes from the schedule queue if they exit - a running -** process not being in the schedule queue!! -** Schedule for up to INPUT_REDUCTIONS context switches, -** return 1 if more to do. -*/ + a = erts_smp_atomic32_read_nob(&p->state); + if (nprio == (a & ERTS_PSFLG_PRIO_MASK)) + oprio = nprio; + else { + erts_aint32_t e, n; + do { + oprio = a & ERTS_PSFLG_PRIO_MASK; + n = e = a; + + ASSERT(!(a & ERTS_PSFLG_IN_RUNQ)); + + n &= ~ERTS_PSFLG_PRIO_MASK; + n |= nprio; + a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + } while (a != e); + } + + switch (oprio) { + case PRIORITY_MAX: return am_max; + case PRIORITY_HIGH: return am_high; + case PRIORITY_NORMAL: return am_normal; + case PRIORITY_LOW: return am_low; + default: ASSERT(0); return am_undefined; + } +} /* * schedule() is called from BEAM (process_main()) or HiPE @@ -6560,7 +6803,6 @@ erts_set_process_priority(Process *p, Eterm new_value) Process *schedule(Process *p, int calls) { ErtsRunQueue *rq; - ErtsRunPrioQueue *rpq; erts_aint_t dt; ErtsSchedulerData *esdp; int context_reds; @@ -6568,6 +6810,8 @@ Process *schedule(Process *p, int calls) int input_reductions; int actual_reds; int reds; + Uint32 flags; + erts_aint32_t state = 0; /* Supress warning... */ #ifdef USE_VM_PROBES if (p != NULL && DTRACE_ENABLED(process_unscheduled)) { @@ -6623,92 +6867,62 @@ Process *schedule(Process *p, int calls) erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - if ((erts_system_profile_flags.runnable_procs) - && (p->status == P_WAITING)) { - profile_runnable_proc(p, am_inactive); - } + state = erts_smp_atomic32_read_acqb(&p->state); if (IS_TRACED(p)) { - if (IS_TRACED_FL(p, F_TRACE_CALLS) && p->status != P_FREE) { + if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) { erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT); } - switch (p->status) { - case P_EXITING: + if (state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) { if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) - trace_sched(p, am_out_exiting); - break; - case P_FREE: - if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) - trace_sched(p, am_out_exited); - break; - default: + trace_sched(p, ((state & ERTS_PSFLG_FREE) + ? am_out_exited + : am_out_exiting)); + } + else { if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED)) trace_sched(p, am_out); else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS)) trace_virtual_sched(p, am_out); - break; } } #ifdef ERTS_SMP - if (ERTS_PROC_PENDING_EXIT(p)) { - erts_handle_pending_exit(p, - ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ; - } - - if (p->pending_suspenders) { - handle_pending_suspend(p, - ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - ASSERT(!(p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) - || p->rcount == 0); - } + if (state & ERTS_PSFLG_PENDING_EXIT) + erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN + | ERTS_PROC_LOCK_STATUS)); + if (p->pending_suspenders) + handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN + | ERTS_PROC_LOCK_STATUS)); #endif - erts_smp_runq_lock(rq); - ERTS_PROC_REDUCTIONS_EXECUTED(rq, p->prio, reds, actual_reds); + esdp->reductions += reds; + + schedule_out_process(rq, state, p); /* Returns with rq locked! */ + + ERTS_PROC_REDUCTIONS_EXECUTED(rq, + (int) (state & ERTS_PSFLG_PRIO_MASK), + reds, + actual_reds); esdp->current_process = NULL; #ifdef ERTS_SMP p->scheduler_data = NULL; - p->runq_flags &= ~ERTS_PROC_RUNQ_FLG_RUNNING; - p->status_flags &= ~ERTS_PROC_SFLG_RUNNING; - - if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) { - ErtsRunQueue *notify_runq; - p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ; - notify_runq = internal_add_to_runq(rq, p); - if (notify_runq != rq) - smp_notify_inc_runq(notify_runq); - } #endif - if (p->status == P_FREE) { + if (state & ERTS_PSFLG_FREE) { #ifdef ERTS_SMP ASSERT(esdp->free_process == p); esdp->free_process = NULL; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - erts_smp_proc_dec_refc(p); #else erts_free_proc(p); #endif - } else { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); } -#ifdef ERTS_SMP - { - ErtsProcList *pnd_xtrs = rq->procs.pending_exiters; - rq->procs.pending_exiters = NULL; + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - if (pnd_xtrs) { - erts_smp_runq_unlock(rq); - handle_pending_exiters(pnd_xtrs); - erts_smp_runq_lock(rq); - } - - } +#ifdef ERTS_SMP ASSERT(!esdp->free_process); #endif ASSERT(!esdp->current_process); @@ -6728,8 +6942,23 @@ Process *schedule(Process *p, int calls) ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); check_activities_to_run: { +#ifdef ERTS_SMP + ErtsMigrationPaths *mps; + ErtsMigrationPath *mp; #ifdef ERTS_SMP + { + ErtsProcList *pnd_xtrs = rq->procs.pending_exiters; + rq->procs.pending_exiters = NULL; + + if (pnd_xtrs) { + erts_smp_runq_unlock(rq); + handle_pending_exiters(pnd_xtrs); + erts_smp_runq_lock(rq); + } + + } +#endif if (rq->check_balance_reds <= 0) check_balance(rq); @@ -6737,20 +6966,28 @@ Process *schedule(Process *p, int calls) ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - if (rq->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK) - immigrate(rq); + mps = erts_get_migration_paths_managed(); + mp = &mps->mpath[rq->ix]; + + if (mp->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK) + immigrate(rq, mp); - continue_check_activities_to_run: + continue_check_activities_to_run: + flags = ERTS_RUNQ_FLGS_GET_NOB(rq); + continue_check_activities_to_run_known_flags: - if (rq->flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND - | ERTS_RUNQ_FLG_SUSPENDED)) { - if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) { - ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags) - & ERTS_SSI_FLG_SUSPENDED); + + if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) { + + if (flags & ERTS_RUNQ_FLG_SUSPENDED) { suspend_scheduler(esdp); + flags = ERTS_RUNQ_FLGS_GET_NOB(rq); } - if (rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) + if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) { + flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND); + flags &= ~ ERTS_RUNQ_FLG_CHK_CPU_BIND; erts_sched_check_cpu_bind(esdp); + } } { @@ -6779,14 +7016,12 @@ Process *schedule(Process *p, int calls) } #endif /* ERTS_SMP */ - ASSERT(rq->len == rq->procs.len + rq->ports.info.len); - - if ((rq->len == 0 && !rq->misc.start) - || (rq->halt_in_progress - && rq->ports.info.len == 0 && !rq->misc.start)) { + flags = ERTS_RUNQ_FLGS_GET_NOB(rq); + if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start) + || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) { + /* Prepare for scheduler wait */ #ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); rq->wakeup_other = 0; @@ -6794,21 +7029,27 @@ Process *schedule(Process *p, int calls) empty_runq(rq); - if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) { - ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags) - & ERTS_SSI_FLG_SUSPENDED); + flags = ERTS_RUNQ_FLGS_GET_NOB(rq); + if (flags & ERTS_RUNQ_FLG_SUSPENDED) { non_empty_runq(rq); - goto continue_check_activities_to_run; + goto continue_check_activities_to_run_known_flags; } - else if (!(rq->flags & ERTS_RUNQ_FLG_INACTIVE)) { + else if (!(flags & ERTS_RUNQ_FLG_INACTIVE)) { + if (try_steal_task(rq)) { + non_empty_runq(rq); + goto continue_check_activities_to_run; + } + + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); + /* * Check for ERTS_RUNQ_FLG_SUSPENDED has to be done * after trying to steal a task. */ - if (try_steal_task(rq) - || (rq->flags & ERTS_RUNQ_FLG_SUSPENDED)) { + flags = ERTS_RUNQ_FLGS_GET_NOB(rq); + if (flags & ERTS_RUNQ_FLG_SUSPENDED) { non_empty_runq(rq); - goto continue_check_activities_to_run; + goto continue_check_activities_to_run_known_flags; } } @@ -6839,6 +7080,7 @@ Process *schedule(Process *p, int calls) erl_sys_schedule(1); dt = erts_do_time_read_and_reset(); if (dt) erts_bump_timer(dt); + #ifdef ERTS_SMP erts_smp_runq_lock(rq); clear_sys_scheduling(); @@ -6852,14 +7094,14 @@ Process *schedule(Process *p, int calls) exec_misc_ops(rq); #ifdef ERTS_SMP - wakeup_other.check(rq); + wakeup_other.check(rq, flags); #endif /* * Find a new port to run. */ - if (rq->ports.info.len) { + if (RUNQ_READ_LEN(&rq->ports.info.len)) { int have_outstanding_io; have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port); if ((have_outstanding_io && fcalls > 2*input_reductions) @@ -6886,159 +7128,122 @@ Process *schedule(Process *p, int calls) /* * Find a new process to run. */ - pick_next_process: - - ERTS_DBG_CHK_PROCS_RUNQ(rq); - - switch (rq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) { - case MAX_BIT: - case MAX_BIT|HIGH_BIT: - case MAX_BIT|NORMAL_BIT: - case MAX_BIT|LOW_BIT: - case MAX_BIT|HIGH_BIT|NORMAL_BIT: - case MAX_BIT|HIGH_BIT|LOW_BIT: - case MAX_BIT|NORMAL_BIT|LOW_BIT: - case MAX_BIT|HIGH_BIT|NORMAL_BIT|LOW_BIT: - rpq = &rq->procs.prio[PRIORITY_MAX]; - break; - case HIGH_BIT: - case HIGH_BIT|NORMAL_BIT: - case HIGH_BIT|LOW_BIT: - case HIGH_BIT|NORMAL_BIT|LOW_BIT: - rpq = &rq->procs.prio[PRIORITY_HIGH]; - break; - case NORMAL_BIT: - rpq = &rq->procs.prio[PRIORITY_NORMAL]; - break; - case LOW_BIT: - rpq = &rq->procs.prio[PRIORITY_NORMAL]; - break; - case NORMAL_BIT|LOW_BIT: - rpq = &rq->procs.prio[PRIORITY_NORMAL]; - ASSERT(rpq->first != NULL); - p = rpq->first; - if (p->prio == PRIORITY_LOW) { - if (p == rpq->last || p->skipped >= RESCHEDULE_LOW-1) - p->skipped = 0; - else { - /* skip it */ - p->skipped++; - rpq->first = p->next; - rpq->first->prev = NULL; - rpq->last->next = p; - p->prev = rpq->last; - p->next = NULL; - rpq->last = p; + pick_next_process: { + int prio_q; + int qmask; + + flags = ERTS_RUNQ_FLGS_GET_NOB(rq); + qmask = (int) (flags & ERTS_RUNQ_FLGS_PROCS_QMASK); + switch (qmask & -qmask) { + case MAX_BIT: + prio_q = PRIORITY_MAX; + break; + case HIGH_BIT: + prio_q = PRIORITY_HIGH; + break; + case NORMAL_BIT: + case LOW_BIT: + prio_q = PRIORITY_NORMAL; + if (check_requeue_process(rq, PRIORITY_NORMAL)) goto pick_next_process; - } + break; + case 0: /* No process at all */ + default: + ASSERT(qmask == 0); + goto check_activities_to_run; } - break; - case 0: /* No process at all */ - default: - ASSERT((rq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) == 0); - ASSERT(rq->procs.len == 0); - goto check_activities_to_run; - } - - BM_START_TIMER(system); - - /* - * Take the chosen process out of the queue. - */ - ASSERT(rpq->first); /* Wrong qmask in rq->flags? */ - p = rpq->first; -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(rq == p->run_queue); -#endif - rpq->first = p->next; - if (!rpq->first) - rpq->last = NULL; - else - rpq->first->prev = NULL; - p->next = p->prev = NULL; + BM_START_TIMER(system); - if (--rq->procs.prio_info[p->prio].len == 0) - rq->flags &= ~(1 << p->prio); - ASSERT(rq->procs.len > 0); - rq->procs.len--; - ASSERT(rq->len > 0); - rq->len--; + /* + * Take the chosen process out of the queue. + */ + p = dequeue_process(rq, prio_q, &state); - { - Uint32 ee_flgs = (ERTS_RUNQ_FLG_EVACUATE(p->prio) - | ERTS_RUNQ_FLG_EMIGRATE(p->prio)); + ASSERT(p); /* Wrong qmask in rq->flags? */ - if ((rq->flags & (ERTS_RUNQ_FLG_SUSPENDED|ee_flgs)) == ee_flgs) - ERTS_UNSET_RUNQ_FLG_EVACUATE(rq->flags, p->prio); - } + while (1) { + erts_aint32_t exp, new, tmp; + tmp = new = exp = state; + new &= ~ERTS_PSFLG_IN_RUNQ; + tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT); + if (tmp != ERTS_PSFLG_SUSPENDED) + new |= ERTS_PSFLG_RUNNING; + state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp); + if (state == exp) { + tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT); + if (tmp == ERTS_PSFLG_SUSPENDED) + goto pick_next_process; + state = new; + break; + } + } - ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(rq, p); + rq->procs.context_switches++; - rq->procs.context_switches++; + esdp->current_process = p; - esdp->current_process = p; + } #ifdef ERTS_SMP - p->runq_flags |= ERTS_PROC_RUNQ_FLG_RUNNING; erts_smp_runq_unlock(rq); + if (flags & ERTS_RUNQ_FLG_PROTECTED) + (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); + ERTS_SMP_CHK_NO_PROC_LOCKS; erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); if (erts_sched_stat.enabled) { + int prio; UWord old = ERTS_PROC_SCHED_ID(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_STATUS), (UWord) esdp->no); int migrated = old && old != esdp->no; + prio = (int) (state & ERTS_PSFLG_PRIO_MASK); + erts_smp_spin_lock(&erts_sched_stat.lock); - erts_sched_stat.prio[p->prio].total_executed++; - erts_sched_stat.prio[p->prio].executed++; + erts_sched_stat.prio[prio].total_executed++; + erts_sched_stat.prio[prio].executed++; if (migrated) { - erts_sched_stat.prio[p->prio].total_migrated++; - erts_sched_stat.prio[p->prio].migrated++; + erts_sched_stat.prio[prio].total_migrated++; + erts_sched_stat.prio[prio].migrated++; } erts_smp_spin_unlock(&erts_sched_stat.lock); } - p->status_flags |= ERTS_PROC_SFLG_RUNNING; - p->status_flags &= ~ERTS_PROC_SFLG_INRUNQ; if (ERTS_PROC_PENDING_EXIT(p)) { erts_handle_pending_exit(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); + state = erts_smp_atomic32_read_nob(&p->state); } ASSERT(!p->scheduler_data); p->scheduler_data = esdp; - #endif - ASSERT(p->status != P_SUSPENDED); /* Never run a suspended process */ + /* Never run a suspended process */ + ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state))); reds = context_reds; if (IS_TRACED(p)) { - switch (p->status) { - case P_EXITING: + if (state & ERTS_PSFLG_EXITING) { if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) trace_sched(p, am_in_exiting); - break; - default: + } + else { if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED)) trace_sched(p, am_in); else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS)) trace_virtual_sched(p, am_in); - break; } if (IS_TRACED_FL(p, F_TRACE_CALLS)) { erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN); } } - if (p->status != P_EXITING) - p->status = P_RUNNING; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); #ifdef ERTS_SMP @@ -7046,7 +7251,7 @@ Process *schedule(Process *p, int calls) erts_check_my_tracer_proc(p); #endif - if (!ERTS_PROC_IS_EXITING(p) + if (!(state & ERTS_PSFLG_EXITING) && ((FLAGS(p) & F_FORCE_GC) || (MSO(p).overhead > BIN_VHEAP_SZ(p)))) { reds -= erts_garbage_collect(p, 0, p->arg_reg, p->arity); @@ -7137,17 +7342,19 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0); ErtsMiscOpList *molp = misc_op_list_alloc(); +#ifdef ERTS_SMP + ErtsMigrationPaths *mpaths = erts_get_migration_paths(); - erts_smp_runq_lock(rq); - - while (rq->misc.evac_runq) { - ErtsRunQueue *tmp_rq = rq->misc.evac_runq; - erts_smp_runq_unlock(rq); - rq = tmp_rq; - erts_smp_runq_lock(rq); + if (!mpaths) + rq = ERTS_RUNQ_IX(0); + else { + ErtsRunQueue *erq = mpaths->mpath[rq->ix].misc_evac_runq; + if (erq) + rq = erq; } +#endif - ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED)); + erts_smp_runq_lock(rq); molp->next = NULL; molp->func = func; @@ -7157,7 +7364,9 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) else rq->misc.start = molp; rq->misc.end = molp; + erts_smp_runq_unlock(rq); + smp_notify_inc_runq(rq); } @@ -7247,44 +7456,50 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) Sint erts_test_next_pid(int set, Uint next) { + Uint64 lpd; Sint res; - Sint p_prev; - + Eterm pid_data; + int first_pix = -1; - erts_smp_mtx_lock(&proc_tab_mtx); + erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); - if (!set) { - res = p_next < 0 ? -1 : (p_serial << p_serial_shift | p_next); - } + if (!set) + lpd = last_pid_data_read_nob(); else { - p_serial = (Sint) ((next >> p_serial_shift) & p_serial_mask); - p_next = (Sint) (erts_process_tab_index_mask & next); - - if (p_next >= erts_max_processes) { - p_next = 0; - p_serial++; - p_serial &= p_serial_mask; + lpd = (Uint64) next; + pid_data = (Eterm) (lpd & ERTS_PID_DATA_MASK__); + if (ERTS_INVALID_PID == make_internal_pid(pid_data)) { + lpd += erts_proc.max; + ASSERT(erts_pid_data2ix(pid_data) + == erts_pid_data2ix(lpd & ERTS_PID_DATA_MASK__)); } + last_pid_data_set_relb(lpd); + } - p_prev = p_next; - - do { - if (!process_tab[p_next]) - break; - p_next++; - if(p_next >= erts_max_processes) { - p_next = 0; - p_serial++; - p_serial &= p_serial_mask; + while (1) { + int pix; + lpd++; + pix = (int) (lpd % erts_proc.max); + if (first_pix < 0) + first_pix = pix; + else if (pix == first_pix) { + res = -1; + break; + } + if (ERTS_AINT_NULL == erts_smp_atomic_read_nob(&erts_proc.tab[pix])) { + pid_data = (Eterm) (lpd & ERTS_PID_DATA_MASK__); + if (ERTS_INVALID_PID == make_internal_pid(pid_data)) { + lpd += erts_proc.max; + ASSERT(erts_pid_data2ix(pid_data) + == erts_pid_data2ix(lpd & ERTS_PID_DATA_MASK__)); } - } while (p_prev != p_next); - - res = process_tab[p_next] ? -1 : (p_serial << p_serial_shift | p_next); - + res = lpd & ERTS_PID_DATA_MASK__; + break; + } } - erts_smp_mtx_unlock(&proc_tab_mtx); + erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); return res; @@ -7293,6 +7508,8 @@ erts_test_next_pid(int set, Uint next) Uint erts_process_count(void) { erts_aint32_t res = erts_smp_atomic32_read_nob(&process_count); + if (res > erts_proc.max) + return erts_proc.max; ASSERT(res >= 0); return (Uint) res; } @@ -7300,97 +7517,138 @@ Uint erts_process_count(void) void erts_free_proc(Process *p) { -#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) - erts_lcnt_proc_lock_destroy(p); +#ifdef ERTS_SMP + erts_proc_lock_fin(p); #endif erts_free(ERTS_ALC_T_PROC, (void *) p); } - /* ** Allocate process and find out where to place next process. */ static Process* -alloc_process(void) +alloc_process(ErtsRunQueue *rq, erts_aint32_t state) { -#ifdef ERTS_SMP - erts_pix_lock_t *pix_lock; -#endif + int pix; Process* p; - int p_prev; + Uint64 lpd, exp_lpd; + Eterm pid_data; + erts_aint32_t proc_count; +#ifdef DEBUG + Eterm pid; +#endif - erts_smp_mtx_lock(&proc_tab_mtx); + erts_smp_rwmtx_rlock(&erts_proc_tab_rwmtx); + + proc_count = erts_smp_atomic32_inc_read_acqb(&process_count); + if (proc_count > erts_proc.max) { + while (1) { + erts_aint32_t act_proc_count; - if (p_next == -1) { - p = NULL; - goto error; /* Process table full! */ + act_proc_count = erts_smp_atomic32_cmpxchg_relb(&process_count, + proc_count-1, + proc_count); + if (act_proc_count == proc_count) + goto system_limit; + proc_count = act_proc_count; + if (proc_count <= erts_proc.max) + break; + } } p = (Process*) erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process)); if (!p) - goto error; /* ENOMEM */ + goto enomem; - p_last = p_next; + p->approx_started = erts_get_approx_time(); + p->started_interval = get_proc_interval(); - erts_get_emu_time(&p->started); + lpd = last_pid_data_read_acqb(); -#ifdef ERTS_SMP - pix_lock = ERTS_PIX2PIXLOCK(p_next); - erts_pix_lock(pix_lock); -#endif - ASSERT(!process_tab[p_next]); + /* Reserve slot */ + while (1) { + lpd++; + pix = erts_pid_data2ix((Eterm) (lpd & ERTS_PID_DATA_MASK__)); + if (erts_smp_atomic_read_nob(&erts_proc.tab[pix]) == ERTS_AINT_NULL) { + erts_aint_t val; + val = erts_smp_atomic_cmpxchg_relb(&erts_proc.tab[pix], + ((erts_aint_t) + ERTS_PROC_LOCK_BUSY), + ERTS_AINT_NULL); + + if (ERTS_AINT_NULL == val) + break; + } + } - process_tab[p_next] = p; - erts_smp_atomic32_inc_nob(&process_count); - p->id = make_internal_pid(p_serial << p_serial_shift | p_next); + pid_data = (Eterm) lpd & ERTS_PID_DATA_MASK__; + + p->id = make_internal_pid(pid_data); if (p->id == ERTS_INVALID_PID) { /* Do not use the invalid pid; change serial */ - p_serial++; - p_serial &= p_serial_mask; - p->id = make_internal_pid(p_serial << p_serial_shift | p_next); + lpd += erts_proc.max; + ASSERT(pix == erts_pid_data2ix((Eterm) (lpd & ERTS_PID_DATA_MASK__))); + pid_data = (Eterm) lpd & ERTS_PID_DATA_MASK__; + p->id = make_internal_pid(pid_data); ASSERT(p->id != ERTS_INVALID_PID); } - ASSERT(internal_pid_serial(p->id) <= (erts_use_r9_pids_ports - ? ERTS_MAX_PID_R9_SERIAL - : ERTS_MAX_PID_SERIAL)); + + exp_lpd = last_pid_data_read_nob(); + + /* Move last pid data forward */ + while (1) { + Uint64 act_lpd; + if (last_pid_data_cmp(lpd, exp_lpd) < 0) + break; + act_lpd = last_pid_data_cmpxchg_relb(lpd, exp_lpd); + if (act_lpd == exp_lpd) + break; + exp_lpd = act_lpd; + } #ifdef ERTS_SMP - erts_proc_lock_init(p); /* All locks locked */ - erts_pix_unlock(pix_lock); + RUNQ_SET_RQ(&p->run_queue, rq); #endif - p->rstatus = P_FREE; - p->rcount = 0; + erts_smp_atomic32_init_relb(&p->state, state); - /* - * set p_next to the next available slot - */ +#ifdef DEBUG + pid = p->id; +#endif - p_prev = p_next; +#ifdef ERTS_SMP + erts_proc_lock_init(p); /* All locks locked */ +#endif - while (1) { - p_next++; - if(p_next >= erts_max_processes) { - p_serial++; - p_serial &= p_serial_mask; - p_next = 0; - } + /* Move into slot reserved */ +#ifdef DEBUG + ASSERT(ERTS_PROC_LOCK_BUSY + == (Process *) erts_smp_atomic_xchg_relb(&erts_proc.tab[pix], + (erts_aint_t) p)); +#else + erts_smp_atomic_set_relb(&erts_proc.tab[pix], (erts_aint_t) p); +#endif - if (p_prev == p_next) { - p_next = -1; - break; /* Table full! */ - } + ASSERT(internal_pid_serial(p->id) <= (erts_use_r9_pids_ports + ? ERTS_MAX_PID_R9_SERIAL + : ERTS_MAX_PID_SERIAL)); - if (!process_tab[p_next]) - break; /* found a free slot */ - } + erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx); - error: + p->rcount = 0; - erts_smp_mtx_unlock(&proc_tab_mtx); + ASSERT(p == (Process *) + erts_smp_atomic_read_nob( + &erts_proc.tab[internal_pid_index(pid)])); return p; +enomem: +system_limit: + + erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx); + return NULL; + } Eterm @@ -7400,13 +7658,15 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). Eterm args, /* Arguments for function (must be well-formed list). */ ErlSpawnOpts* so) /* Options for spawn. */ { - ErtsRunQueue *rq, *notify_runq; + ErtsRunQueue *rq = NULL; Process *p; Sint arity; /* Number of arguments. */ Uint arg_size; /* Size of arguments. */ Uint sz; /* Needed words on heap. */ Uint heap_need; /* Size needed on heap. */ Eterm res = THE_NON_VALUE; + erts_aint32_t state = 0; + erts_aint32_t prio = (erts_aint32_t) PRIORITY_NORMAL; #ifdef ERTS_SMP erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); @@ -7420,8 +7680,24 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). so->error_code = BADARG; goto error; } - p = alloc_process(); /* All proc locks are locked by this thread - on success */ + + if (so->flags & SPO_USE_ARGS) { + if (so->scheduler) { + int ix = so->scheduler-1; + ASSERT(0 <= ix && ix < erts_no_run_queues); + rq = ERTS_RUNQ_IX(ix); + state |= ERTS_PSFLG_BOUND; + } + prio = (erts_aint32_t) so->priority; + } + + state |= (prio & ERTS_PSFLG_PRIO_MASK); + + if (!rq) + rq = erts_get_runq_proc(parent); + + p = alloc_process(rq, state); /* All proc locks are locked by this thread + on success */ if (!p) { erts_send_error_to_logger_str(parent->group_leader, "Too many processes\n"); @@ -7441,22 +7717,16 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->flags = erts_default_process_flags; - /* Scheduler queue mutex should be locked when changeing - * prio. In this case we don't have to lock it, since - * noone except us has access to the process. - */ if (so->flags & SPO_USE_ARGS) { p->min_heap_size = so->min_heap_size; p->min_vheap_size = so->min_vheap_size; - p->prio = so->priority; p->max_gen_gcs = so->max_gen_gcs; } else { p->min_heap_size = H_MIN_SIZE; p->min_vheap_size = BIN_VH_MIN_SIZE; - p->prio = PRIORITY_NORMAL; p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); } - p->skipped = 0; + p->schedule_count = 0; ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0)); p->initial[INITIAL_MOD] = mod; @@ -7525,9 +7795,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->reds = 0; #ifdef ERTS_SMP - p->u.ptimer = NULL; + p->u.alive.ptimer = NULL; #else - sys_memset(&p->u.tm, 0, sizeof(ErlTimer)); + sys_memset(&p->u.alive.tm, 0, sizeof(ErlTimer)); #endif p->reg = NULL; @@ -7555,10 +7825,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->msg.save = &p->msg.first; p->msg.len = 0; #ifdef ERTS_SMP - p->msg_inq.first = NULL; - p->msg_inq.last = &p->msg_inq.first; - p->msg_inq.len = 0; - p->bound_runq = NULL; + p->u.alive.msg_inq.first = NULL; + p->u.alive.msg_inq.last = &p->u.alive.msg_inq.first; + p->u.alive.msg_inq.len = 0; #endif p->bif_timers = NULL; p->mbuf = NULL; @@ -7644,47 +7913,25 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #ifdef ERTS_SMP p->scheduler_data = NULL; - p->is_exiting = 0; - p->status_flags = 0; - p->runq_flags = 0; p->suspendee = NIL; p->pending_suspenders = NULL; - p->pending_exit.reason = THE_NON_VALUE; - p->pending_exit.bp = NULL; + p->u.alive.pending_exit.reason = THE_NON_VALUE; + p->u.alive.pending_exit.bp = NULL; #endif #if !defined(NO_FPE_SIGNALS) || defined(HIPE) p->fp_exception = 0; #endif + erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + + res = p->id; + /* * Schedule process for execution. */ - if (!((so->flags & SPO_USE_ARGS) && so->scheduler)) - rq = erts_get_runq_proc(parent); - else { - int ix = so->scheduler-1; - ASSERT(0 <= ix && ix < erts_no_run_queues); - rq = ERTS_RUNQ_IX(ix); - p->bound_runq = rq; - } - - erts_smp_runq_lock(rq); - -#ifdef ERTS_SMP - p->run_queue = rq; -#endif - - p->status = P_WAITING; - notify_runq = internal_add_to_runq(rq, p); - - erts_smp_runq_unlock(rq); - - smp_notify_inc_runq(notify_runq); - - res = p->id; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + schedule_process(p, state, 0); VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->id)); @@ -7720,12 +7967,8 @@ void erts_init_empty_process(Process *p) p->max_gen_gcs = 0; p->min_heap_size = 0; p->min_vheap_size = 0; - p->status = P_RUNABLE; - p->gcstatus = P_RUNABLE; - p->rstatus = P_RUNABLE; p->rcount = 0; p->id = ERTS_INVALID_PID; - p->prio = PRIORITY_NORMAL; p->reds = 0; p->tracer_proc = NIL; p->trace_flags = F_INITIAL_TRACE_FLAGS; @@ -7741,10 +7984,9 @@ void erts_init_empty_process(Process *p) p->bin_old_vheap = 0; p->bin_vheap_mature = 0; #ifdef ERTS_SMP - p->u.ptimer = NULL; - p->bound_runq = NULL; + p->u.alive.ptimer = NULL; #else - memset(&(p->u.tm), 0, sizeof(ErlTimer)); + memset(&(p->u.alive.tm), 0, sizeof(ErlTimer)); #endif p->next = NULL; p->off_heap.first = NULL; @@ -7793,8 +8035,8 @@ void erts_init_empty_process(Process *p) p->def_arg_reg[5] = 0; p->parent = NIL; - p->started.tv_sec = 0; - p->started.tv_usec = 0; + p->approx_started = 0; + p->started_interval = 0; #ifdef HIPE hipe_init_process(&p->hipe); @@ -7808,22 +8050,20 @@ void erts_init_empty_process(Process *p) p->last_old_htop = NULL; #endif + erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL); #ifdef ERTS_SMP p->scheduler_data = NULL; - p->is_exiting = 0; - p->status_flags = 0; - p->runq_flags = 0; - p->msg_inq.first = NULL; - p->msg_inq.last = &p->msg_inq.first; - p->msg_inq.len = 0; + p->u.alive.msg_inq.first = NULL; + p->u.alive.msg_inq.last = &p->u.alive.msg_inq.first; + p->u.alive.msg_inq.len = 0; p->suspendee = NIL; p->pending_suspenders = NULL; - p->pending_exit.reason = THE_NON_VALUE; - p->pending_exit.bp = NULL; + p->u.alive.pending_exit.reason = THE_NON_VALUE; + p->u.alive.pending_exit.bp = NULL; erts_proc_lock_init(p); erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); - p->run_queue = ERTS_RUNQ_IX(0); + RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0)); #endif #if !defined(NO_FPE_SIGNALS) || defined(HIPE) @@ -7870,12 +8110,12 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->parent == NIL); #ifdef ERTS_SMP - ASSERT(p->msg_inq.first == NULL); - ASSERT(p->msg_inq.len == 0); + ASSERT(p->u.alive.msg_inq.first == NULL); + ASSERT(p->u.alive.msg_inq.len == 0); ASSERT(p->suspendee == NIL); ASSERT(p->pending_suspenders == NULL); - ASSERT(p->pending_exit.reason == THE_NON_VALUE); - ASSERT(p->pending_exit.bp == NULL); + ASSERT(p->u.alive.pending_exit.reason == THE_NON_VALUE); + ASSERT(p->u.alive.pending_exit.bp == NULL); #endif /* Thing that erts_cleanup_empty_process() cleans up */ @@ -7901,8 +8141,8 @@ erts_cleanup_empty_process(Process* p) free_message_buffer(p->mbuf); p->mbuf = NULL; } -#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) - erts_lcnt_proc_lock_destroy(p); +#ifdef ERTS_SMP + erts_proc_lock_fin(p); #endif #ifdef DEBUG erts_debug_verify_clean_empty_process(p); @@ -7999,26 +8239,34 @@ delete_process(Process* p) p->fvalue = NIL; } +static ERTS_INLINE erts_aint32_t +set_proc_exiting_state(Process *p, erts_aint32_t state) +{ + erts_aint32_t a, n, e; + a = state; + while (1) { + n = e = a; + n &= ~(ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT); + n |= ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE; + if (!(a & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING))) + n |= ERTS_PSFLG_IN_RUNQ; + a = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e); + if (a == e) + break; + } + return a; +} + static ERTS_INLINE void -set_proc_exiting(Process *p, Eterm reason, ErlHeapFragment *bp) +set_proc_exiting(Process *p, + erts_aint32_t state, + Eterm reason, + ErlHeapFragment *bp) { -#ifdef ERTS_SMP - erts_pix_lock_t *pix_lock = ERTS_PID2PIXLOCK(p->id); ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL); - /* - * You are required to have all proc locks and the pix lock when going - * to status P_EXITING. This makes it is enough to take any lock when - * looking up a process (pid2proc()) to prevent the looked up process - * from exiting until the lock has been released. - */ - erts_pix_lock(pix_lock); - p->is_exiting = 1; -#endif - p->status = P_EXITING; -#ifdef ERTS_SMP - erts_pix_unlock(pix_lock); -#endif + state = set_proc_exiting_state(p, state); + p->fvalue = reason; if (bp) erts_link_mbuf_to_proc(p, bp); @@ -8031,6 +8279,14 @@ set_proc_exiting(Process *p, Eterm reason, ErlHeapFragment *bp) KILL_CATCHES(p); cancel_timer(p); p->i = (BeamInstr *) beam_exit; + + if (erts_system_profile_flags.runnable_procs + && !(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) { + profile_runnable_proc(p, am_active); + } + + if (!(state & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING))) + add2runq(p, state); } @@ -8040,11 +8296,11 @@ void erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) { ErtsProcLocks xlocks; - ASSERT(is_value(c_p->pending_exit.reason)); + ASSERT(is_value(c_p->u.alive.pending_exit.reason)); ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks); ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_LC_ASSERT(c_p->status != P_EXITING); - ERTS_SMP_LC_ASSERT(c_p->status != P_FREE); + ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE) + & erts_smp_atomic32_read_nob(&c_p->state))); /* Ensure that all locks on c_p are locked before proceeding... */ if (locks == ERTS_PROC_LOCKS_ALL) @@ -8057,9 +8313,12 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) } } - set_proc_exiting(c_p, c_p->pending_exit.reason, c_p->pending_exit.bp); - c_p->pending_exit.reason = THE_NON_VALUE; - c_p->pending_exit.bp = NULL; + set_proc_exiting(c_p, + erts_smp_atomic32_read_acqb(&c_p->state), + c_p->u.alive.pending_exit.reason, + c_p->u.alive.pending_exit.bp); + c_p->u.alive.pending_exit.reason = THE_NON_VALUE; + c_p->u.alive.pending_exit.bp = NULL; if (xlocks) erts_smp_proc_unlock(c_p, xlocks); @@ -8073,11 +8332,12 @@ handle_pending_exiters(ErtsProcList *pnd_xtrs) while (plp) { Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL); if (p) { - if (proclist_same(plp, p) - && !(p->status_flags & ERTS_PROC_SFLG_RUNNING)) { - ASSERT(p->status_flags & ERTS_PROC_SFLG_INRUNQ); - ASSERT(ERTS_PROC_PENDING_EXIT(p)); - erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL); + if (proclist_same(plp, p)) { + erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + if (!(state & ERTS_PSFLG_RUNNING)) { + ASSERT(state & ERTS_PSFLG_PENDING_EXIT); + erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL); + } } erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); } @@ -8105,7 +8365,7 @@ save_pending_exiter(Process *p) rq->procs.pending_exiters = plp; erts_smp_runq_unlock(rq); - + wake_scheduler(rq, 1); } #endif @@ -8167,7 +8427,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp, * SMP emulator). When the signal is received the receiver receives an * 'EXIT' message if it is trapping exits; otherwise, it will either * ignore the signal if the exit reason is normal, or go into an - * exiting state (status P_EXITING). When a process has gone into the + * exiting state (ERTS_PSFLG_EXITING). When a process has gone into the * exiting state it will not execute any more Erlang code, but it might * take a while before it actually exits. The exit signal is being * received when the 'EXIT' message is put in the message queue, the @@ -8240,6 +8500,7 @@ send_exit_signal(Process *c_p, /* current process if and only Uint32 flags /* flags */ ) { + erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); Eterm rsn = reason == am_kill ? am_killed : reason; ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp)); @@ -8261,7 +8522,7 @@ send_exit_signal(Process *c_p, /* current process if and only } #endif - if (ERTS_PROC_IS_TRAPPING_EXITS(rp) + if ((state & ERTS_PSFLG_TRAP_EXIT) && (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) { if (is_not_nil(token) #ifdef USE_VM_PROBES @@ -8277,10 +8538,8 @@ send_exit_signal(Process *c_p, /* current process if and only } else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) { #ifdef ERTS_SMP - if (!ERTS_PROC_PENDING_EXIT(rp) && !rp->is_exiting) { - ASSERT(rp->status != P_EXITING); - ASSERT(rp->status != P_FREE); - ASSERT(!rp->pending_exit.bp); + if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) { + ASSERT(!rp->u.alive.pending_exit.bp); if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) { /* Ensure that all locks on c_p are locked before @@ -8295,9 +8554,9 @@ send_exit_signal(Process *c_p, /* current process if and only } *rp_locks = ERTS_PROC_LOCKS_ALL; } - set_proc_exiting(c_p, rsn, NULL); + set_proc_exiting(c_p, state, rsn, NULL); } - else if (!(rp->status_flags & ERTS_PROC_SFLG_RUNNING)) { + else if (!(state & ERTS_PSFLG_RUNNING)) { /* Process not running ... */ ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL; if (need_locks @@ -8314,6 +8573,7 @@ send_exit_signal(Process *c_p, /* current process if and only /* ...and we have all locks on it... */ *rp_locks = ERTS_PROC_LOCKS_ALL; set_proc_exiting(rp, + state, (is_immed(rsn) ? rsn : copy_object(rsn, rp)), @@ -8329,7 +8589,7 @@ send_exit_signal(Process *c_p, /* current process if and only set_pending_exit: if (is_immed(rsn)) { - rp->pending_exit.reason = rsn; + rp->u.alive.pending_exit.reason = rsn; } else { Eterm *hp; @@ -8337,17 +8597,15 @@ send_exit_signal(Process *c_p, /* current process if and only ErlHeapFragment *bp = new_message_buffer(sz); hp = &bp->mem[0]; - rp->pending_exit.reason = copy_struct(rsn, - sz, - &hp, - &bp->off_heap); - rp->pending_exit.bp = bp; + rp->u.alive.pending_exit.reason = copy_struct(rsn, + sz, + &hp, + &bp->off_heap); + rp->u.alive.pending_exit.bp = bp; } - ASSERT(ERTS_PROC_PENDING_EXIT(rp)); + erts_smp_atomic32_read_bor_relb(&rp->state, + ERTS_PSFLG_PENDING_EXIT); } - if (!(rp->status_flags - & (ERTS_PROC_SFLG_INRUNQ|ERTS_PROC_SFLG_RUNNING))) - erts_add_to_runq(rp); } /* else: * @@ -8359,17 +8617,14 @@ send_exit_signal(Process *c_p, /* current process if and only * exit or by itself before seeing the pending exit. */ #else /* !ERTS_SMP */ - if (c_p == rp) { - rp->status = P_EXITING; - c_p->fvalue = rsn; - } - else if (rp->status != P_EXITING) { /* No recursive process exits /PaN */ - Eterm old_status = rp->status; + erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + if (!(state & ERTS_PSFLG_EXITING)) { set_proc_exiting(rp, - is_immed(rsn) ? rsn : copy_object(rsn, rp), + state, + (is_immed(rsn) || c_p == rp + ? rsn + : copy_object(rsn, rp)), NULL); - if (old_status != P_RUNABLE && old_status != P_RUNNING) - erts_add_to_runq(rp); } #endif return -1; /* Receiver will exit */ @@ -8672,12 +8927,14 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) erts_destroy_suspend_monitor(smon); } -static void -continue_exit_process(Process *p #ifdef ERTS_SMP - , erts_pix_lock_t *pix_lock +static void +proc_dec_refc(void *vproc) +{ + erts_smp_proc_dec_refc((Process *) vproc); +} #endif - ); + /* this function fishishes a process and propagates exit messages - called by process_main when a process dies */ @@ -8685,9 +8942,8 @@ void erts_do_exit_process(Process* p, Eterm reason) { #ifdef ERTS_SMP - erts_pix_lock_t *pix_lock = ERTS_PID2PIXLOCK(p->id); + erts_aint32_t state; #endif - p->arity = 0; /* No live registers */ p->fvalue = reason; @@ -8705,32 +8961,22 @@ erts_do_exit_process(Process* p, Eterm reason) #ifdef ERTS_SMP ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* By locking all locks (main lock is already locked) when going - to status P_EXITING, it is enough to take any lock when + to exiting state (ERTS_PSFLG_EXITING), it is enough to take any lock when looking up a process (erts_pid2proc()) to prevent the looked up process from exiting until the lock has been released. */ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); #endif - - if (erts_system_profile_flags.runnable_procs && (p->status != P_WAITING)) { - profile_runnable_proc(p, am_inactive); - } - -#ifdef ERTS_SMP - erts_pix_lock(pix_lock); - p->is_exiting = 1; -#endif - - p->status = P_EXITING; - -#ifdef ERTS_SMP - erts_pix_unlock(pix_lock); - if (ERTS_PROC_PENDING_EXIT(p)) { +#ifndef ERTS_SMP + set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state)); +#else + state = set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state)); + if (state & ERTS_PSFLG_PENDING_EXIT) { /* Process exited before pending exit was received... */ - p->pending_exit.reason = THE_NON_VALUE; - if (p->pending_exit.bp) { - free_message_buffer(p->pending_exit.bp); - p->pending_exit.bp = NULL; + p->u.alive.pending_exit.reason = THE_NON_VALUE; + if (p->u.alive.pending_exit.bp) { + free_message_buffer(p->u.alive.pending_exit.bp); + p->u.alive.pending_exit.bp = NULL; } } @@ -8764,29 +9010,11 @@ erts_do_exit_process(Process* p, Eterm reason) erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); -#ifdef ERTS_SMP - continue_exit_process(p, pix_lock); -#else - continue_exit_process(p); -#endif + erts_continue_exit_process(p); } void -erts_continue_exit_process(Process *c_p) -{ -#ifdef ERTS_SMP - continue_exit_process(c_p, ERTS_PID2PIXLOCK(c_p->id)); -#else - continue_exit_process(c_p); -#endif -} - -static void -continue_exit_process(Process *p -#ifdef ERTS_SMP - , erts_pix_lock_t *pix_lock -#endif - ) +erts_continue_exit_process(Process *p) { ErtsLink* lnk; ErtsMonitor *mon; @@ -8802,11 +9030,7 @@ continue_exit_process(Process *p ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); -#ifdef DEBUG - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - ASSERT(p->status == P_EXITING); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); -#endif + ASSERT(ERTS_PROC_IS_EXITING(p)); #ifdef ERTS_SMP if (p->flags & F_HAVE_BLCKD_MSCHED) { @@ -8874,48 +9098,47 @@ continue_exit_process(Process *p #endif { + int maybe_save; int pix; /* Do *not* use erts_get_runq_proc() */ ErtsRunQueue *rq; rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p)); - ASSERT(internal_pid_index(p->id) < erts_max_processes); pix = internal_pid_index(p->id); - erts_smp_mtx_lock(&proc_tab_mtx); + erts_smp_rwmtx_rlock(&erts_proc_tab_rwmtx); + maybe_save = saved_term_procs.end != NULL; + if (maybe_save) { + erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx); + erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); + } + erts_smp_runq_lock(rq); #ifdef ERTS_SMP - erts_pix_lock(pix_lock); - ASSERT(p->scheduler_data); ASSERT(p->scheduler_data->current_process == p); ASSERT(p->scheduler_data->free_process == NULL); p->scheduler_data->current_process = NULL; p->scheduler_data->free_process = p; - p->status_flags = 0; #endif - process_tab[pix] = NULL; /* Time of death! */ + /* Time of death! */ + erts_smp_atomic_set_relb(&erts_proc.tab[pix], ERTS_AINT_NULL); + ASSERT(erts_smp_atomic32_read_nob(&process_count) > 0); - erts_smp_atomic32_dec_nob(&process_count); + erts_smp_atomic32_dec_relb(&process_count); -#ifdef ERTS_SMP - erts_pix_unlock(pix_lock); -#endif erts_smp_runq_unlock(rq); - if (p_next < 0) { - if (p_last >= p_next) { - p_serial++; - p_serial &= p_serial_mask; - } - p_next = pix; + if (!maybe_save) + erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx); + else { + if (saved_term_procs.end) + save_terminating_process(p); + erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); } - ERTS_MAYBE_SAVE_TERMINATING_PROCESS(p); - - erts_smp_mtx_unlock(&proc_tab_mtx); } /* @@ -8930,7 +9153,21 @@ continue_exit_process(Process *p lnk = p->nlinks; p->nlinks = NULL; - p->status = P_FREE; + + { + /* Inactivate and notify free */ + erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state); + while (1) { + n = e = a; + ASSERT(a & ERTS_PSFLG_EXITING); + n |= ERTS_PSFLG_FREE; + n &= ~ERTS_PSFLG_ACTIVE; + a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + if (a == e) + break; + } + } + dep = ((p->flags & F_DISTRIBUTION) ? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL) : NULL); @@ -8985,8 +9222,13 @@ continue_exit_process(Process *p delete_process(p); +#ifdef ERTS_SMP + erts_schedule_thr_prgr_later_op(proc_dec_refc, + (void *) p, + &p->u.release_data); erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); +#endif return; @@ -8999,8 +9241,6 @@ continue_exit_process(Process *p ERTS_SMP_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); - ASSERT(p->status == P_EXITING); - p->i = (BeamInstr *) beam_continue_exit; if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) { @@ -9008,8 +9248,6 @@ continue_exit_process(Process *p curr_locks |= ERTS_PROC_LOCK_STATUS; } - erts_add_to_runq(p); - if (curr_locks != ERTS_PROC_LOCK_MAIN) erts_smp_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks); @@ -9021,33 +9259,15 @@ continue_exit_process(Process *p static void timeout_proc(Process* p) { + erts_aint32_t state; BeamInstr** pi = (BeamInstr **) p->def_arg_reg; p->i = *pi; p->flags |= F_TIMO; p->flags &= ~F_INSLPQUEUE; - switch (p->status) { - case P_GARBING: - switch (p->gcstatus) { - case P_SUSPENDED: - goto suspended; - case P_WAITING: - goto waiting; - default: - break; - } - break; - case P_WAITING: - waiting: - erts_add_to_runq(p); - break; - case P_SUSPENDED: - suspended: - p->rstatus = P_RUNABLE; /* MUST set resume status to runnable */ - break; - default: - break; - } + state = erts_smp_atomic32_read_acqb(&p->state); + if (!(state & ERTS_PSFLG_ACTIVE)) + schedule_process(p, state, 0); } @@ -9057,9 +9277,9 @@ cancel_timer(Process* p) ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); p->flags &= ~(F_INSLPQUEUE|F_TIMO); #ifdef ERTS_SMP - erts_cancel_smp_ptimer(p->u.ptimer); + erts_cancel_smp_ptimer(p->u.alive.ptimer); #else - erts_cancel_timer(&p->u.tm); + erts_cancel_timer(&p->u.alive.tm); #endif } @@ -9080,12 +9300,12 @@ set_timer(Process* p, Uint timeout) p->flags &= ~F_TIMO; #ifdef ERTS_SMP - erts_create_smp_ptimer(&p->u.ptimer, + erts_create_smp_ptimer(&p->u.alive.ptimer, p->id, (ErlTimeoutProc) timeout_proc, timeout); #else - erts_set_timer(&p->u.tm, + erts_set_timer(&p->u.alive.tm, (ErlTimeoutProc) timeout_proc, NULL, (void*) p, @@ -9115,6 +9335,7 @@ erts_stack_dump(int to, void *to_arg, Process *p) void erts_program_counter_info(int to, void *to_arg, Process *p) { + erts_aint32_t state; int i; erts_print(to, to_arg, "Program counter: %p (", p->i); @@ -9123,7 +9344,8 @@ erts_program_counter_info(int to, void *to_arg, Process *p) erts_print(to, to_arg, "CP: %p (", p->cp); print_function_from_pc(to, to_arg, p->cp); erts_print(to, to_arg, ")\n"); - if (!((p->status == P_RUNNING) || (p->status == P_GARBING))) { + state = erts_smp_atomic32_read_acqb(&p->state); + if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_GC))) { erts_print(to, to_arg, "arity = %d\n",p->arity); if (!ERTS_IS_CRASH_DUMPING) { /* @@ -9169,7 +9391,7 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg) erts_print(to, to_arg, "\n%p ", sp); } else { char sbuf[16]; - sprintf(sbuf, "y(%d)", yreg); + erts_snprintf(sbuf, sizeof(sbuf), "y(%d)", yreg); erts_print(to, to_arg, "%-8s ", sbuf); yreg++; } @@ -9296,13 +9518,13 @@ do { \ #endif #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS -# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, TVP) \ - debug_processes_check_found_pid((PBDP), (PID), (TVP), 1) -# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, TVP) \ - debug_processes_check_found_pid((PBDP), (PID), (TVP), 0) +# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, IC) \ + debug_processes_check_found_pid((PBDP), (PID), (IC), 1) +# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, IC) \ + debug_processes_check_found_pid((PBDP), (PID), (IC), 0) #else -# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, TVP) -# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, TVP) +# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, IC) +# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, IC) #endif #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST @@ -9347,7 +9569,7 @@ static Uint processes_bif_tab_chunks; static Export processes_trap_export; typedef struct { - SysTimeval time; + Uint64 interval; } ErtsProcessesBifChunkInfo; typedef enum { @@ -9372,7 +9594,7 @@ typedef struct { struct { Eterm caller; #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - SysTimeval *pid_started; + Uint64 *pid_started; #endif #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC Eterm *heap; @@ -9401,11 +9623,10 @@ static void debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp); #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS static void debug_processes_check_found_pid(ErtsProcessesBifData *pbdp, Eterm pid, - SysTimeval *started, + Uint64 ic, int pid_should_be_found); #endif #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST -static SysTimeval debug_tv_start; static void debug_processes_check_term_proc_list(void); static void debug_processes_check_term_proc_free_list(ErtsTermProcElement *tpep); #endif @@ -9416,7 +9637,7 @@ save_terminating_process(Process *p) ErtsTermProcElement *tpep = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL, sizeof(ErtsTermProcElement)); ERTS_PROCS_ASSERT(saved_term_procs.start && saved_term_procs.end); - ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx)); + ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx)); ERTS_PROCS_DBG_CHK_TPLIST(); @@ -9424,19 +9645,19 @@ save_terminating_process(Process *p) tpep->next = NULL; tpep->ix = internal_pid_index(p->id); tpep->u.process.pid = p->id; - tpep->u.process.spawned = p->started; - erts_get_emu_time(&tpep->u.process.exited); + tpep->u.process.spawned = p->started_interval; + tpep->u.process.exited = get_proc_interval(); saved_term_procs.end->next = tpep; saved_term_procs.end = tpep; ERTS_PROCS_DBG_CHK_TPLIST(); - ERTS_PROCS_ASSERT((tpep->prev->ix >= 0 - ? erts_cmp_timeval(&tpep->u.process.exited, - &tpep->prev->u.process.exited) - : erts_cmp_timeval(&tpep->u.process.exited, - &tpep->prev->u.bif_invocation.time)) > 0); + ERTS_PROCS_ASSERT(tpep->prev->ix >= 0 + ? (tpep->u.process.exited + >= tpep->prev->u.process.exited) + : (tpep->u.process.exited + >= tpep->prev->u.bif_invocation.interval)); } static void @@ -9467,7 +9688,7 @@ cleanup_processes_bif_data(Binary *bp) if (pbdp->bif_invocation) { ErtsTermProcElement *tpep; - erts_smp_mtx_lock(&proc_tab_mtx); + erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); ERTS_PROCS_DBG_TRACE(pbdp->debug.caller, cleanup_processes_bif_data, @@ -9521,7 +9742,7 @@ cleanup_processes_bif_data(Binary *bp) ERTS_PROCS_DBG_CHK_TPLIST(); - erts_smp_mtx_unlock(&proc_tab_mtx); + erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); } } @@ -9549,7 +9770,7 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) pbdp->tix = 0; pbdp->pid_ix = 0; - erts_smp_mtx_lock(&proc_tab_mtx); + erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); locked = 1; ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, init); @@ -9560,7 +9781,7 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS pbdp->debug.pid_started = erts_alloc(ERTS_ALC_T_PROCS_PIDS, - sizeof(SysTimeval)*pbdp->pid_sz); + sizeof(Uint64)*pbdp->pid_sz); #endif ERTS_PROCS_DBG_SAVE_PIDS(pbdp); @@ -9575,7 +9796,8 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) pbdp->bif_invocation = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL, sizeof(ErtsTermProcElement)); pbdp->bif_invocation->ix = -1; - erts_get_emu_time(&pbdp->bif_invocation->u.bif_invocation.time); + pbdp->bif_invocation->u.bif_invocation.interval + = step_proc_interval(); ERTS_PROCS_DBG_CHK_TPLIST(); pbdp->bif_invocation->next = NULL; @@ -9602,30 +9824,31 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) int indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE; int cix = ix / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE; int end_ix = ix + indices; - SysTimeval *invocation_timep; + Uint64 *invocation_interval_p; - invocation_timep = (pbdp->bif_invocation - ? &pbdp->bif_invocation->u.bif_invocation.time - : NULL); + invocation_interval_p + = (pbdp->bif_invocation + ? &pbdp->bif_invocation->u.bif_invocation.interval + : NULL); ERTS_PROCS_ASSERT(is_nil(*res_accp)); if (!locked) { - erts_smp_mtx_lock(&proc_tab_mtx); + erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); locked = 1; } - ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx)); + ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx)); ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_table); if (cix != 0) - erts_get_emu_time(&pbdp->chunk[cix].time); + pbdp->chunk[cix].interval = step_proc_interval(); else if (pbdp->bif_invocation) - pbdp->chunk[0].time = *invocation_timep; - /* else: Time is irrelevant */ + pbdp->chunk[0].interval = *invocation_interval_p; + /* else: interval is irrelevant */ - if (end_ix >= erts_max_processes) { + if (end_ix >= erts_proc.max) { ERTS_PROCS_ASSERT(cix+1 == processes_bif_tab_chunks); - end_ix = erts_max_processes; + end_ix = erts_proc.max; indices = end_ix - ix; /* What to do when done with this chunk */ pbdp->state = (processes_bif_tab_chunks == 1 @@ -9634,16 +9857,15 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) } for (; ix < end_ix; ix++) { - Process *rp = process_tab[ix]; + Process *rp = erts_pix2proc(ix); if (rp - && (!invocation_timep - || erts_cmp_timeval(&rp->started, - invocation_timep) < 0)) { + && (!invocation_interval_p + || rp->started_interval < *invocation_interval_p)) { ERTS_PROCS_ASSERT(is_internal_pid(rp->id)); pbdp->pid[pbdp->pid_ix] = rp->id; #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - pbdp->debug.pid_started[pbdp->pid_ix] = rp->started; + pbdp->debug.pid_started[pbdp->pid_ix] = rp->started_interval; #endif pbdp->pid_ix++; @@ -9653,7 +9875,7 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) pbdp->tix = end_ix; - erts_smp_mtx_unlock(&proc_tab_mtx); + erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); locked = 0; reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED; @@ -9665,8 +9887,8 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) ix = pbdp->tix; indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE; end_ix = ix + indices; - if (end_ix > erts_max_processes) { - end_ix = erts_max_processes; + if (end_ix > erts_proc.max) { + end_ix = erts_proc.max; indices = end_ix - ix; } @@ -9685,20 +9907,20 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) int i; int max_reds; int free_term_procs = 0; - SysTimeval *invocation_timep; + Uint64 invocation_interval; ErtsTermProcElement *tpep; ErtsTermProcElement *free_list = NULL; tpep = pbdp->bif_invocation; ERTS_PROCS_ASSERT(tpep); - invocation_timep = &tpep->u.bif_invocation.time; + invocation_interval = tpep->u.bif_invocation.interval; max_reds = have_reds = ERTS_BIF_REDS_LEFT(p); if (max_reds > ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS) max_reds = ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS; reds = 0; - erts_smp_mtx_lock(&proc_tab_mtx); + erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_term_procs); ERTS_PROCS_DBG_CHK_TPLIST(); @@ -9737,20 +9959,19 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) } else { int cix = tpep->ix/ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE; - SysTimeval *chunk_timep = &pbdp->chunk[cix].time; + Uint64 chunk_interval = pbdp->chunk[cix].interval; Eterm pid = tpep->u.process.pid; ERTS_PROCS_ASSERT(is_internal_pid(pid)); - if (erts_cmp_timeval(&tpep->u.process.spawned, - invocation_timep) < 0) { - if (erts_cmp_timeval(&tpep->u.process.exited, - chunk_timep) < 0) { + if (tpep->u.process.spawned < invocation_interval) { + if (tpep->u.process.exited < chunk_interval) { ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp, pid, - &tpep->u.process.spawned); + tpep->u.process.spawned); pbdp->pid[pbdp->pid_ix] = pid; #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - pbdp->debug.pid_started[pbdp->pid_ix] = tpep->u.process.spawned; + pbdp->debug.pid_started[pbdp->pid_ix] + = tpep->u.process.spawned; #endif pbdp->pid_ix++; ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz); @@ -9758,13 +9979,13 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) else { ERTS_PROCS_DBG_CHK_PID_FOUND(pbdp, pid, - &tpep->u.process.spawned); + tpep->u.process.spawned); } } else { ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp, pid, - &tpep->u.process.spawned); + tpep->u.process.spawned); } i++; @@ -9813,7 +10034,7 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) ERTS_PROCS_DBG_CHK_TPLIST(); ERTS_PROCS_DBG_CHK_FREELIST(free_list); - erts_smp_mtx_unlock(&proc_tab_mtx); + erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); /* * We do the actual free of term proc structures now when we @@ -9873,8 +10094,9 @@ processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) sizeof(Eterm)*pbdp->pid_sz); #if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS pbdp->debug.pid_started = erts_realloc(ERTS_ALC_T_PROCS_PIDS, - pbdp->debug.pid_started, - sizeof(SysTimeval)*pbdp->pid_sz); + pbdp->debug.pid_started, + (sizeof(Uint64) + * pbdp->pid_sz)); #endif } reds = conses/ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED; @@ -9994,22 +10216,13 @@ init_processes_bif(void) { saved_term_procs.start = NULL; saved_term_procs.end = NULL; - processes_bif_tab_chunks = (((erts_max_processes - 1) + processes_bif_tab_chunks = (((erts_proc.max - 1) / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE) + 1); /* processes_trap/2 is a hidden BIF that the processes/0 BIF traps to. */ - sys_memset((void *) &processes_trap_export, 0, sizeof(Export)); - processes_trap_export.address = &processes_trap_export.code[3]; - processes_trap_export.code[0] = am_erlang; - processes_trap_export.code[1] = am_processes_trap; - processes_trap_export.code[2] = 2; - processes_trap_export.code[3] = (BeamInstr) em_apply_bif; - processes_trap_export.code[4] = (BeamInstr) &processes_trap; - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST - erts_get_emu_time(&debug_tv_start); -#endif + erts_init_trap_export(&processes_trap_export, am_erlang, am_processes_trap, 2, + &processes_trap); } @@ -10042,31 +10255,29 @@ erts_debug_processes(Process *c_p) Eterm res; Eterm* hp; Process *p; -#ifdef DEBUG Eterm *hp_end; -#endif - erts_smp_mtx_lock(&proc_tab_mtx); + erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); res = NIL; need = erts_process_count() * 2; hp = HAlloc(c_p, need); /* we need two heap words for each pid */ -#ifdef DEBUG hp_end = hp + need; -#endif /* make the list by scanning bakward */ - for (i = erts_max_processes-1; i >= 0; i--) { - if ((p = process_tab[i]) != NULL) { - res = CONS(hp, process_tab[i]->id, res); + for (i = erts_proc.max-1; i >= 0; i--) { + p = erts_pix2proc(i); + if (p) { + res = CONS(hp, p->id, res); hp += 2; } } - ASSERT(hp == hp_end); - erts_smp_mtx_unlock(&proc_tab_mtx); + erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); + + HRelease(c_p, hp_end, hp); return res; } @@ -10098,14 +10309,12 @@ erts_debug_processes_bif_info(Process *c_p) static void debug_processes_check_found_pid(ErtsProcessesBifData *pbdp, Eterm pid, - SysTimeval *tvp, + Uint64 ic, int pid_should_be_found) { int i; for (i = 0; i < pbdp->pid_ix; i++) { - if (pbdp->pid[i] == pid - && pbdp->debug.pid_started[i].tv_sec == tvp->tv_sec - && pbdp->debug.pid_started[i].tv_usec == tvp->tv_usec) { + if (pbdp->pid[i] == pid && pbdp->debug.pid_started[i] == ic) { ERTS_PROCS_ASSERT(pid_should_be_found); return; } @@ -10139,8 +10348,8 @@ debug_processes_save_all_pids(ErtsProcessesBifData *pbdp) pbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PROCS_PIDS, sizeof(Eterm)*pbdp->pid_sz); - for (tix = 0, cpix = 0; tix < erts_max_processes; tix++) { - Process *rp = process_tab[tix]; + for (tix = 0, cpix = 0; tix < erts_proc.max; tix++) { + Process *rp = erts_pix2proc(tix); if (rp) { ERTS_PROCS_ASSERT(is_internal_pid(rp->id)); pbdp->debug.correct_pids[cpix++] = rp->id; @@ -10193,14 +10402,13 @@ debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp) static void debug_processes_check_term_proc_list(void) { - ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx)); + ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx)); if (!saved_term_procs.start) ERTS_PROCS_ASSERT(!saved_term_procs.end); else { - SysTimeval tv_now; - SysTimeval *prev_xtvp = NULL; + Uint64 curr_interval = get_proc_interval(); + Uint64 *prev_x_interval_p = NULL; ErtsTermProcElement *tpep; - erts_get_emu_time(&tv_now); for (tpep = saved_term_procs.start; tpep; tpep = tpep->next) { if (!tpep->prev) @@ -10212,20 +10420,17 @@ debug_processes_check_term_proc_list(void) else ERTS_PROCS_ASSERT(tpep->next->prev == tpep); if (tpep->ix < 0) { - SysTimeval *tvp = &tpep->u.bif_invocation.time; - ERTS_PROCS_ASSERT(erts_cmp_timeval(&debug_tv_start, tvp) < 0 - && erts_cmp_timeval(tvp, &tv_now) < 0); + Uint64 interval = tpep->u.bif_invocation.interval; + ERTS_PROCS_ASSERT(interval <= curr_interval); } else { - SysTimeval *stvp = &tpep->u.process.spawned; - SysTimeval *xtvp = &tpep->u.process.exited; + Uint64 s_interval = tpep->u.process.spawned; + Uint64 x_interval = tpep->u.process.exited; - ERTS_PROCS_ASSERT(erts_cmp_timeval(&debug_tv_start, - stvp) < 0); - ERTS_PROCS_ASSERT(erts_cmp_timeval(stvp, xtvp) < 0); - if (prev_xtvp) - ERTS_PROCS_ASSERT(erts_cmp_timeval(prev_xtvp, xtvp) < 0); - prev_xtvp = xtvp; + ERTS_PROCS_ASSERT(s_interval <= x_interval); + if (prev_x_interval_p) + ERTS_PROCS_ASSERT(*prev_x_interval_p <= x_interval); + prev_x_interval_p = &tpep->u.process.exited; ERTS_PROCS_ASSERT(is_internal_pid(tpep->u.process.pid)); ERTS_PROCS_ASSERT(tpep->ix == internal_pid_index(tpep->u.process.pid)); diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 9e7a5a5c74..e789c873fb 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -112,28 +112,29 @@ extern int erts_sched_thread_suggested_stack_size; #define PRIORITY_NORMAL 2 #define PRIORITY_LOW 3 #define ERTS_NO_PROC_PRIO_LEVELS 4 +#define ERTS_NO_PROC_PRIO_QUEUES 3 #define ERTS_PORT_PRIO_LEVEL ERTS_NO_PROC_PRIO_LEVELS +#define ERTS_NO_PRIO_LEVELS (ERTS_NO_PROC_PRIO_LEVELS + 1) #define ERTS_RUNQ_FLGS_PROCS_QMASK \ ((((Uint32) 1) << ERTS_NO_PROC_PRIO_LEVELS) - 1) -#define ERTS_NO_PRIO_LEVELS (ERTS_NO_PROC_PRIO_LEVELS + 1) -#define ERTS_RUNQ_FLGS_MIGRATE_QMASK \ +#define ERTS_RUNQ_FLGS_QMASK \ ((((Uint32) 1) << ERTS_NO_PRIO_LEVELS) - 1) #define ERTS_RUNQ_FLGS_EMIGRATE_SHFT \ - ERTS_NO_PROC_PRIO_LEVELS + ERTS_NO_PRIO_LEVELS #define ERTS_RUNQ_FLGS_IMMIGRATE_SHFT \ (ERTS_RUNQ_FLGS_EMIGRATE_SHFT + ERTS_NO_PRIO_LEVELS) #define ERTS_RUNQ_FLGS_EVACUATE_SHFT \ (ERTS_RUNQ_FLGS_IMMIGRATE_SHFT + ERTS_NO_PRIO_LEVELS) #define ERTS_RUNQ_FLGS_EMIGRATE_QMASK \ - (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_EMIGRATE_SHFT) + (ERTS_RUNQ_FLGS_QMASK << ERTS_RUNQ_FLGS_EMIGRATE_SHFT) #define ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \ - (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT) + (ERTS_RUNQ_FLGS_QMASK << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT) #define ERTS_RUNQ_FLGS_EVACUATE_QMASK \ - (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_EVACUATE_SHFT) + (ERTS_RUNQ_FLGS_QMASK << ERTS_RUNQ_FLGS_EVACUATE_SHFT) #define ERTS_RUNQ_FLG_BASE2 \ (ERTS_RUNQ_FLGS_EVACUATE_SHFT + ERTS_NO_PRIO_LEVELS) @@ -148,14 +149,18 @@ extern int erts_sched_thread_suggested_stack_size; (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3)) #define ERTS_RUNQ_FLG_INACTIVE \ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4)) +#define ERTS_RUNQ_FLG_NONEMPTY \ + (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 5)) +#define ERTS_RUNQ_FLG_PROTECTED \ + (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6)) #define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \ (ERTS_RUNQ_FLGS_EMIGRATE_QMASK \ | ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \ | ERTS_RUNQ_FLGS_EVACUATE_QMASK) + #define ERTS_RUNQ_FLGS_MIGRATION_INFO \ - (ERTS_RUNQ_FLGS_MIGRATION_QMASKS \ - | ERTS_RUNQ_FLG_INACTIVE \ + (ERTS_RUNQ_FLG_INACTIVE \ | ERTS_RUNQ_FLG_OUT_OF_WORK \ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK) @@ -186,33 +191,45 @@ extern int erts_sched_thread_suggested_stack_size; #define ERTS_UNSET_RUNQ_FLG_EVACUATE(FLGS, PRIO) \ ((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO))) -#define ERTS_RUNQ_IFLG_SUSPENDED (((erts_aint32_t) 1) << 0) -#define ERTS_RUNQ_IFLG_NONEMPTY (((erts_aint32_t) 1) << 1) - - -#ifdef DEBUG -# if defined(ARCH_64) && !HALFWORD_HEAP -# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \ - (*((char **) &(RQP)) = (char *) (0xdeadbeefdead0003 | ((N) << 4))) -# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \ -do { \ - ASSERT((RQP) != NULL); \ - ASSERT(((((Uint) (RQP)) & ((Uint) 0x3))) == ((Uint) 0)); \ - ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdeadbeefdead0000));\ -} while (0) -# else -# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \ - (*((char **) &(RQP)) = (char *) (0xdead0003 | ((N) << 4))) -# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \ -do { \ - ASSERT((RQP) != NULL); \ - ASSERT(((((UWord) (RQP)) & ((UWord) 1))) == ((UWord) 0)); \ - ASSERT((((UWord) (RQP)) & ~((UWord) 0xffff)) != ((UWord) 0xdead0000)); \ -} while (0) -# endif -#else -# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) -# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) +#define ERTS_RUNQ_FLGS_INIT(RQ, INIT) \ + erts_smp_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT)) +#define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \ + ((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \ + (erts_aint32_t) (FLGS))) +#define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \ + ((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \ + (erts_aint32_t) ~(FLGS))) +#define ERTS_RUNQ_FLGS_GET(RQ) \ + ((Uint32) erts_smp_atomic32_read_acqb(&(RQ)->flags)) +#define ERTS_RUNQ_FLGS_GET_NOB(RQ) \ + ((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags)) +#define ERTS_RUNQ_FLGS_GET_MB(RQ) \ + ((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags)) +#define ERTS_RUNQ_FLGS_MASK_SET(RQ, MSK, FLGS) \ + ((Uint32) erts_smp_atomic32_mask_set_relb(&(RQ)->flags, \ + (erts_aint32_t) (MSK), \ + (erts_aint32_t) (FLGS))) + +ERTS_GLB_INLINE erts_aint32_t +erts_smp_atomic32_mask_set_relb(erts_smp_atomic32_t *a32p, + erts_aint32_t mask, + erts_aint32_t set); +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint32_t +erts_smp_atomic32_mask_set_relb(erts_smp_atomic32_t *a32p, + erts_aint32_t mask, + erts_aint32_t set) +{ + erts_aint32_t act = erts_smp_atomic32_read_nob(a32p); + while (1) { + erts_aint32_t exp = act; + erts_aint32_t new = exp & ~mask; + new |= (mask & set); + act = erts_smp_atomic32_cmpxchg_relb(a32p, new, exp); + if (act == exp) + return act; + } +} #endif typedef enum { @@ -258,14 +275,15 @@ typedef enum { #define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 2) #define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 3) #define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 4) -#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 5) -#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 6) -#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 7) -#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 8) -#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 9) -#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 10) -#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 11) -#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 12) +#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP (((erts_aint32_t) 1) << 5) +#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 6) +#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 7) +#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 8) +#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 9) +#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 10) +#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 11) +#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 12) +#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 13) typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo; @@ -291,7 +309,7 @@ struct ErtsSchedulerSleepInfo_ { typedef struct ErtsProcList_ ErtsProcList; struct ErtsProcList_ { Eterm pid; - SysTimeval started; + Uint64 started_interval; ErtsProcList* next; }; @@ -312,21 +330,39 @@ typedef struct ErtsSchedulerData_ ErtsSchedulerData; typedef struct ErtsRunQueue_ ErtsRunQueue; typedef struct { - int len; - int max_len; + erts_smp_atomic32_t len; + erts_aint32_t max_len; int reds; +} ErtsRunQueueInfo; + +#ifdef ERTS_SMP + +typedef struct { + Uint32 flags; + ErtsRunQueue *misc_evac_runq; struct { struct { int this; int other; } limit; ErtsRunQueue *runq; - } migrate; -} ErtsRunQueueInfo; + Uint32 flags; + } prio[ERTS_NO_PRIO_LEVELS]; +} ErtsMigrationPath; + +typedef struct ErtsMigrationPaths_ ErtsMigrationPaths; + +struct ErtsMigrationPaths_ { + void *block; + ErtsMigrationPaths *next; + ErtsThrPrgrVal thr_prgr; + ErtsMigrationPath mpath[1]; +}; + +#endif /* ERTS_SMP */ struct ErtsRunQueue_ { int ix; - erts_smp_atomic32_t info_flags; erts_smp_mtx_t mtx; erts_smp_cnd_t cnd; @@ -334,19 +370,18 @@ struct ErtsRunQueue_ { ErtsSchedulerData *scheduler; int waiting; /* < 0 in sys schedule; > 0 on cnd variable */ int woken; - Uint32 flags; + erts_smp_atomic32_t flags; int check_balance_reds; int full_reds_history_sum; int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE]; int out_of_work_count; - int max_len; - int len; + erts_aint32_t max_len; + erts_aint32_t len; int wakeup_other; int wakeup_other_reds; int halt_in_progress; struct { - int len; ErtsProcList *pending_exiters; Uint context_switches; Uint reductions; @@ -361,7 +396,7 @@ struct ErtsRunQueue_ { struct { ErtsMiscOpList *start; ErtsMiscOpList *end; - ErtsRunQueue *evac_runq; + erts_smp_atomic_t evac_runq; } misc; struct { @@ -381,7 +416,7 @@ extern ErtsAlignedRunQueue *erts_aligned_run_queues; #define ERTS_PROC_REDUCTIONS_EXECUTED(RQ, PRIO, REDS, AREDS) \ do { \ (RQ)->procs.reductions += (AREDS); \ - (RQ)->procs.prio_info[p->prio].reds += (REDS); \ + (RQ)->procs.prio_info[(PRIO)].reds += (REDS); \ (RQ)->check_balance_reds -= (REDS); \ (RQ)->wakeup_other_reds += (AREDS); \ } while (0) @@ -427,6 +462,10 @@ typedef struct { void (*completed_callback)(void *); void (*completed_arg)(void *); } dd; + struct { + ErtsThrPrgrLaterOp *first; + ErtsThrPrgrLaterOp *last; + } later_op; #endif #ifdef ERTS_USE_ASYNC_READY_Q struct { @@ -439,6 +478,7 @@ typedef struct { #endif #ifdef ERTS_SMP struct { + Uint64 next; int *sched2jix; int jix; ErtsDelayedAuxWorkWakeupJob *job; @@ -481,6 +521,7 @@ struct ErtsSchedulerData_ { ErtsSchedAllocData alloc_data; + Uint64 reductions; ErtsSchedWallTime sched_wall_time; #ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC @@ -500,6 +541,90 @@ extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data; extern ErtsSchedulerData *erts_scheduler_data; #endif +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +int erts_smp_lc_runq_is_locked(ErtsRunQueue *); +#endif + +#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS + +/* + * Run queue locked during modifications. We use atomic ops since + * other threads peek at values without run queue lock. + */ + +ERTS_GLB_INLINE void erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); +ERTS_GLB_INLINE void erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); +ERTS_GLB_INLINE void erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE void +erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) +{ + erts_aint32_t len; + + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + + len = erts_smp_atomic32_read_nob(&rqi->len); + ASSERT(len >= 0); + if (len == 0) { + ASSERT((erts_smp_atomic32_read_nob(&rq->flags) + & ((erts_aint32_t) (1 << prio))) == 0); + erts_smp_atomic32_read_bor_nob(&rq->flags, + (erts_aint32_t) (1 << prio)); + } + len++; + if (rqi->max_len < len) + rqi->max_len = len; + + erts_smp_atomic32_set_relb(&rqi->len, len); + + rq->len++; + if (rq->max_len < rq->len) + rq->max_len = len; + ASSERT(rq->len > 0); +} + +ERTS_GLB_INLINE void +erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) +{ + erts_aint32_t len; + + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + + len = erts_smp_atomic32_read_nob(&rqi->len); + len--; + ASSERT(len >= 0); + if (len == 0) { + ASSERT((erts_smp_atomic32_read_nob(&rq->flags) + & ((erts_aint32_t) (1 << prio)))); + erts_smp_atomic32_read_band_nob(&rq->flags, + ~((erts_aint32_t) (1 << prio))); + } + erts_smp_atomic32_set_relb(&rqi->len, len); + + rq->len--; + ASSERT(rq->len >= 0); +} + +ERTS_GLB_INLINE void +erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) +{ + erts_aint32_t len; + + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + + len = erts_smp_atomic32_read_nob(&rqi->len); + ASSERT(rqi->max_len >= len); + rqi->max_len = len; +} + +#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ + +#define RUNQ_READ_LEN(X) erts_smp_atomic32_read_nob((X)) + +#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */ + /* * Process Specific Data. * @@ -612,6 +737,8 @@ struct ErtsPendingSuspend_ { # define BIN_OLD_VHEAP(p) (p)->bin_old_vheap struct process { + Eterm id; /* The pid of this process + (need to be first in struct) */ /* All fields in the PCB that differs between different heap * architectures, have been moved to the end of this struct to * make sure that as few offsets as possible differ. Different @@ -654,13 +781,9 @@ struct process { * Number of reductions left to execute. * Only valid for the current process. */ - Uint32 status; /* process STATE */ - Uint32 gcstatus; /* process gc STATE */ - Uint32 rstatus; /* process resume STATE */ Uint32 rcount; /* suspend count */ - Eterm id; /* The pid of this process */ int prio; /* Priority of process */ - int skipped; /* Times a low prio process has been rescheduled */ + int schedule_count; /* Times left to reschedule a low prio process */ Uint reds; /* No of reductions for this process */ Eterm tracer_proc; /* If proc is traced, this is the tracer (can NOT be boxed) */ @@ -673,7 +796,6 @@ struct process { Eterm ftrace; /* Latest exception stack trace dump */ Process *next; /* Pointer to next process in run queue */ - Process *prev; /* Pointer to prev process in run queue */ struct reg_proc *reg; /* NULL iff not registered */ ErtsLink *nlinks; @@ -711,8 +833,8 @@ struct process { * Information mainly for post-mortem use (erl crash dump). */ Eterm parent; /* Pid of process that created this process. */ - SysTimeval started; /* Time when started. */ - + erts_approx_time_t approx_started; /* Time when started. */ + Uint64 started_interval; /* This is the place, where all fields that differs between memory * architectures, have gone to. @@ -735,27 +857,29 @@ struct process { Uint64 bin_old_vheap; /* Virtual old heap size for binaries */ union { + struct { #ifdef ERTS_SMP - ErtsSmpPTimer *ptimer; + ErtsSmpPTimer *ptimer; + ErlMessageInQueue msg_inq; + ErtsPendExit pending_exit; #else - ErlTimer tm; /* Timer entry */ + ErlTimer tm; /* Timer entry */ +#endif + } alive; /* when process is alive */ +#ifdef ERTS_SMP + ErtsThrPrgrLaterOp release_data; /* when releasing process struct */ #endif void *exit_data; /* Misc data referred during termination */ } u; - ErtsRunQueue *bound_runq; + erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ #ifdef ERTS_SMP erts_proc_lock_t lock; ErtsSchedulerData *scheduler_data; - int is_exiting; - Uint32 runq_flags; - Uint32 status_flags; - ErlMessageInQueue msg_inq; Eterm suspendee; ErtsPendingSuspend *pending_suspenders; - ErtsPendExit pending_exit; - ErtsRunQueue *run_queue; + erts_smp_atomic_t run_queue; #ifdef HIPE struct hipe_process_state_smp hipe_smp; #endif @@ -809,6 +933,29 @@ void erts_check_for_holes(Process* p); #define SEQ_TRACE_TOKEN(p) ((p)->seq_trace_token) +#if ERTS_NO_PROC_PRIO_LEVELS > 4 +# error "Need to increase ERTS_PSFLG_PRIO_SHIFT" +#endif + +#define ERTS_PSFLG_PRIO_SHIFT 2 + +#define ERTS_PSFLG_BIT(N) \ + (((erts_aint32_t) 1) << (ERTS_PSFLG_PRIO_SHIFT + (N))) + +#define ERTS_PSFLG_PRIO_MASK (ERTS_PSFLG_BIT(0) - 1) + +#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(0) +#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(1) +#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(2) +#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(3) +#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(4) +#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(5) +#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(6) +#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(7) +#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(8) +#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(9) + + /* The sequential tracing token is a tuple of size 5: * * {Flags, Label, Serial, Sender} @@ -881,9 +1028,8 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra); Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz); #endif -extern Process** process_tab; -extern Uint erts_max_processes; -extern Uint erts_process_tab_index_mask; +extern erts_smp_rwmtx_t erts_proc_tab_rwmtx; +extern erts_smp_atomic_t *erts_proc_tab; extern Uint erts_default_process_flags; extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx; /* If any of the erts_system_monitor_* variables are set (enabled), @@ -911,17 +1057,13 @@ struct erts_system_profile_flags_t { unsigned int exclusive : 1; }; extern struct erts_system_profile_flags_t erts_system_profile_flags; - -#define INVALID_PID(p, pid) ((p) == NULL \ - || (p)->id != (pid) \ - || (p)->status == P_EXITING) #define IS_TRACED(p) ( (p)->tracer_proc != NIL ) #define ARE_TRACE_FLAGS_ON(p,tf) ( ((p)->trace_flags & (tf|F_SENSITIVE)) == (tf) ) #define IS_TRACED_FL(p,tf) ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) ) /* process flags */ -#define F_TRAPEXIT (1 << 0) +#define F_HIBERNATE_SCHED (1 << 0) /* Schedule out after hibernate op */ #define F_INSLPQUEUE (1 << 1) /* Set if in timer queue */ #define F_TIMO (1 << 2) /* Set if timeout */ #define F_HEAP_GROW (1 << 3) @@ -932,7 +1074,6 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags; #define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */ #define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */ #define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */ -#define F_HIBERNATE_SCHED (1 << 11) /* Schedule out after hibernate op */ /* process trace_flags */ #define F_SENSITIVE (1 << 0) @@ -999,67 +1140,10 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags; #define DT_UTAG_FLAGS(P) ((P)->dt_utag_flags) #endif - -#ifdef ERTS_SMP -/* Status flags ... */ -#define ERTS_PROC_SFLG_PENDADD2SCHEDQ (((Uint32) 1) << 0) /* Pending - add to - schedule q */ -#define ERTS_PROC_SFLG_INRUNQ (((Uint32) 1) << 1) /* Process is - in run q */ -#define ERTS_PROC_SFLG_TRAPEXIT (((Uint32) 1) << 2) /* Process is - trapping - exit */ -#define ERTS_PROC_SFLG_RUNNING (((Uint32) 1) << 3) /* Process is - running */ -/* Scheduler flags in process struct... */ -#define ERTS_PROC_RUNQ_FLG_RUNNING (((Uint32) 1) << 0) /* Process is - running */ - -#endif - - -#ifdef ERTS_SMP -#define ERTS_PROC_IS_TRAPPING_EXITS(P) \ - (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) \ - & ERTS_PROC_LOCK_STATUS), \ - (P)->status_flags & ERTS_PROC_SFLG_TRAPEXIT) - -#define ERTS_PROC_SET_TRAP_EXIT(P) \ - (ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS) \ - & erts_proc_lc_my_proc_locks((P))) \ - == (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)), \ - (P)->status_flags |= ERTS_PROC_SFLG_TRAPEXIT, \ - (P)->flags |= F_TRAPEXIT, \ - 1) - -#define ERTS_PROC_UNSET_TRAP_EXIT(P) \ - (ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS) \ - & erts_proc_lc_my_proc_locks((P))) \ - == (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)), \ - (P)->status_flags &= ~ERTS_PROC_SFLG_TRAPEXIT, \ - (P)->flags &= ~F_TRAPEXIT, \ - 0) -#else -#define ERTS_PROC_IS_TRAPPING_EXITS(P) ((P)->flags & F_TRAPEXIT) -#define ERTS_PROC_SET_TRAP_EXIT(P) ((P)->flags |= F_TRAPEXIT, 1) -#define ERTS_PROC_UNSET_TRAP_EXIT(P) ((P)->flags &= ~F_TRAPEXIT, 0) -#endif - /* Option flags to erts_send_exit_signal() */ #define ERTS_XSIG_FLG_IGN_KILL (((Uint32) 1) << 0) #define ERTS_XSIG_FLG_NO_IGN_NORMAL (((Uint32) 1) << 1) - -/* Process status values */ -#define P_FREE 0 -#define P_RUNABLE 1 -#define P_WAITING 2 -#define P_RUNNING 3 -#define P_EXITING 4 -#define P_GARBING 5 -#define P_SUSPENDED 6 - #define CANCEL_TIMER(p) \ do { \ if ((p)->flags & (F_INSLPQUEUE)) \ @@ -1081,6 +1165,9 @@ void erts_early_init_scheduling(int); void erts_init_scheduling(int, int); Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable); +Uint64 erts_get_proc_interval(void); +Uint64 erts_ensure_later_proc_interval(Uint64); +Uint64 erts_step_proc_interval(void); ErtsProcList *erts_proclist_create(Process *); void erts_proclist_destroy(ErtsProcList *); @@ -1090,6 +1177,10 @@ int erts_sched_set_wakeup_other_thresold(char *str); int erts_sched_set_wakeup_other_type(char *str); int erts_sched_set_busy_wait_threshold(char *str); +void erts_schedule_thr_prgr_later_op(void (*)(void *), + void *, + ErtsThrPrgrLaterOp *); + #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) int erts_dbg_check_halloc_lock(Process *p); #endif @@ -1116,6 +1207,10 @@ void erts_smp_notify_check_children_needed(void); #if ERTS_USE_ASYNC_READY_Q void erts_notify_check_async_ready_queue(void *); #endif +#ifdef ERTS_SMP +void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later); +void erts_notify_finish_breakpointing(Process* p); +#endif void erts_schedule_misc_aux_work(int sched_id, void (*func)(void *), void *arg); @@ -1136,14 +1231,6 @@ Eterm erts_get_schedulers_binds(Process *c_p); Eterm erts_set_cpu_topology(Process *c_p, Eterm term); Eterm erts_bind_schedulers(Process *c_p, Eterm how); ErtsRunQueue *erts_schedid2runq(Uint); -#ifdef ERTS_SMP -ErtsMigrateResult erts_proc_migrate(Process *, - ErtsProcLocks *, - ErtsRunQueue *, - int *, - ErtsRunQueue *, - int *); -#endif Process *schedule(Process*, int); void erts_schedule_misc_op(void (*)(void *), void *); Eterm erl_create_process(Process*, Eterm, Eterm, Eterm, ErlSpawnOpts*); @@ -1192,8 +1279,7 @@ int erts_send_exit_signal(Process *, #ifdef ERTS_SMP void erts_handle_pending_exit(Process *, ErtsProcLocks); #define ERTS_PROC_PENDING_EXIT(P) \ - (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) & ERTS_PROC_LOCK_STATUS),\ - (P)->pending_exit.reason != THE_NON_VALUE) + (ERTS_PSFLG_PENDING_EXIT & erts_smp_atomic32_read_acqb(&(P)->state)) #else #define ERTS_PROC_PENDING_EXIT(P) 0 #endif @@ -1245,13 +1331,26 @@ ErtsSchedulerData *erts_get_scheduler_data(void) #endif #endif +void erts_schedule_process(Process *, erts_aint32_t); + +ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p); +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE void +erts_proc_notify_new_message(Process *p) +{ + /* No barrier needed, due to msg lock */ + erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + if (!(state & ERTS_PSFLG_ACTIVE)) + erts_schedule_process(p, state); +} +#endif + #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) #define ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__ #include "erl_process_lock.h" #undef ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__ -int erts_smp_lc_runq_is_locked(ErtsRunQueue *); #define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) \ do { \ if ((L)) \ @@ -1377,13 +1476,91 @@ erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler) #endif +#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS + #ifdef ERTS_SMP -ErtsRunQueue *erts_prepare_emigrate(ErtsRunQueue *c_rq, - ErtsRunQueueInfo *c_rqi, - int prio); +#include "erl_thr_progress.h" + +extern erts_atomic_t erts_migration_paths; + +ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths_managed(void); +ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths(void); ERTS_GLB_INLINE ErtsRunQueue *erts_check_emigration_need(ErtsRunQueue *c_rq, int prio); +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE ErtsMigrationPaths * +erts_get_migration_paths_managed(void) +{ + return (ErtsMigrationPaths *) erts_atomic_read_ddrb(&erts_migration_paths); +} + +ERTS_GLB_INLINE ErtsMigrationPaths * +erts_get_migration_paths(void) +{ + if (erts_thr_progress_is_managed_thread()) + return erts_get_migration_paths_managed(); + else + return NULL; +} + +ERTS_GLB_INLINE ErtsRunQueue * +erts_check_emigration_need(ErtsRunQueue *c_rq, int prio) +{ + ErtsMigrationPaths *mps = erts_get_migration_paths(); + ErtsMigrationPath *mp; + Uint32 flags; + + if (!mps) + return NULL; + + mp = &mps->mpath[c_rq->ix]; + flags = mp->flags; + + if (ERTS_CHK_RUNQ_FLG_EMIGRATE(flags, prio)) { + int len; + + if (ERTS_CHK_RUNQ_FLG_EVACUATE(flags, prio)) { + /* force emigration */ + return mp->prio[prio].runq; + } + + if (flags & ERTS_RUNQ_FLG_INACTIVE) { + /* + * Run queue was inactive at last balance. Verify that + * it still is before forcing emigration. + */ + if (ERTS_RUNQ_FLGS_GET(c_rq) & ERTS_RUNQ_FLG_INACTIVE) + return mp->prio[prio].runq; + } + + + if (prio == ERTS_PORT_PRIO_LEVEL) + len = RUNQ_READ_LEN(&c_rq->ports.info.len); + else + len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len); + + if (len > mp->prio[prio].limit.this) { + ErtsRunQueue *n_rq = mp->prio[prio].runq; + if (n_rq) { + if (prio == ERTS_PORT_PRIO_LEVEL) + len = RUNQ_READ_LEN(&n_rq->ports.info.len); + else + len = RUNQ_READ_LEN(&n_rq->procs.prio_info[prio].len); + + if (len < mp->prio[prio].limit.other) + return n_rq; + } + } + } + return NULL; +} + +#endif + +#endif + #endif ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp); @@ -1406,29 +1583,6 @@ ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) #if ERTS_GLB_INLINE_INCL_FUNC_DEF -#ifdef ERTS_SMP -ERTS_GLB_INLINE ErtsRunQueue * -erts_check_emigration_need(ErtsRunQueue *c_rq, int prio) -{ - ErtsRunQueueInfo *c_rqi; - - if (!ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio)) - return NULL; - - if (prio == ERTS_PORT_PRIO_LEVEL) - c_rqi = &c_rq->ports.info; - else - c_rqi = &c_rq->procs.prio_info[prio]; - - if (!ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio) - && !(c_rq->flags & ERTS_RUNQ_FLG_INACTIVE) - && c_rqi->len <= c_rqi->migrate.limit.this) - return NULL; - - return erts_prepare_emigrate(c_rq, c_rqi, prio); -} -#endif - ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp) { @@ -1466,10 +1620,9 @@ Uint erts_get_scheduler_id(void) ERTS_GLB_INLINE ErtsRunQueue * erts_get_runq_proc(Process *p) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); #ifdef ERTS_SMP - ASSERT(p->run_queue); - return p->run_queue; + ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue)); + return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue); #else return ERTS_RUNQ_IX(0); #endif @@ -1640,25 +1793,13 @@ extern int erts_disable_proc_not_running_opt; #ifdef DEBUG #define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) \ - do { ASSERT(!(P)->is_exiting); } while (0) + do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0) #else #define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) #endif -/* NOTE: At least one process lock has to be held on P! */ -#ifdef ERTS_ENABLE_LOCK_CHECK -#define ERTS_PROC_IS_EXITING(P) \ - (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) != 0 \ - || erts_lc_pix_lock_is_locked(ERTS_PID2PIXLOCK((P)->id))),\ - (P)->is_exiting) -#else -#define ERTS_PROC_IS_EXITING(P) ((P)->is_exiting) -#endif - #else /* !ERTS_SMP */ -#define ERTS_PROC_IS_EXITING(P) ((P)->status == P_EXITING) - #define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) #define erts_pid2proc_not_running erts_pid2proc @@ -1666,6 +1807,10 @@ extern int erts_disable_proc_not_running_opt; #endif +#define ERTS_PROC_IS_EXITING(P) \ + (ERTS_PSFLG_EXITING & erts_smp_atomic32_read_acqb(&(P)->state)) + + /* Minimum NUMBER of processes for a small system to start */ #ifdef ERTS_SMP #define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index 3550f1396c..542c5ed0d9 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -65,14 +65,11 @@ erts_deep_process_dump(int to, void *to_arg) all_binaries = NULL; for (i = 0; i < erts_max_processes; i++) { - if ((process_tab[i] != NULL) && (process_tab[i]->i != ENULL)) { - if (process_tab[i]->status != P_EXITING) { - Process* p = process_tab[i]; - - if (p->status != P_GARBING) { - dump_process_info(to, to_arg, p); - } - } + Process *p = erts_pix2proc(i); + if (p && p->i != ENULL) { + erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC))) + dump_process_info(to, to_arg, p); } } @@ -326,7 +323,7 @@ heap_dump(int to, void *to_arg, Eterm x) int i; GET_DOUBLE_DATA((ptr+1), f); - i = sys_double_to_chars(f.fd, (char*) sbuf); + i = sys_double_to_chars(f.fd, (char*) sbuf, sizeof(sbuf)); sys_memset(sbuf+i, 0, 31-i); erts_print(to, to_arg, "F%X:%s\n", i, sbuf); *ptr = OUR_NIL; diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index f28ef6a6d7..84a8270d06 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -67,10 +67,12 @@ #include "erl_process.h" -const Process erts_proc_lock_busy; +const Process erts_proc_lock_busy = {ERTS_INVALID_PID}; #ifdef ERTS_SMP +#if ERTS_PROC_LOCK_OWN_IMPL + #define ERTS_PROC_LOCK_SPIN_COUNT_MAX 2000 #define ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC 32 #define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000 @@ -90,6 +92,13 @@ static void check_queue(erts_proc_lock_t *lck); #error "The size of the 'uflgs' field of the erts_tse_t type is too small" #endif +static int proc_lock_spin_count; +static int aux_thr_proc_lock_spin_count; + +static void cleanup_tse(void); + +#endif /* ERTS_PROC_LOCK_OWN_IMPL */ + #ifdef ERTS_ENABLE_LOCK_CHECK static struct { Sint16 proc_lock_main; @@ -101,10 +110,6 @@ static struct { erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS]; -static int proc_lock_spin_count; -static int aux_thr_proc_lock_spin_count; - -static void cleanup_tse(void); void erts_init_proc_lock(int cpus) @@ -118,13 +123,8 @@ erts_init_proc_lock(int cpus) erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock"); #endif } +#if ERTS_PROC_LOCK_OWN_IMPL erts_thr_install_exit_handler(cleanup_tse); -#ifdef ERTS_ENABLE_LOCK_CHECK - lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main"); - lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link"); - lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq"); - lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status"); -#endif if (cpus > 1) { proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE; proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC @@ -141,8 +141,17 @@ erts_init_proc_lock(int cpus) } if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX) proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX; +#endif +#ifdef ERTS_ENABLE_LOCK_CHECK + lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main"); + lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link"); + lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq"); + lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status"); +#endif } +#if ERTS_PROC_LOCK_OWN_IMPL + #ifdef ERTS_ENABLE_LOCK_CHECK #define CHECK_UNUSED_TSE(W) ERTS_LC_ASSERT((W)->uflgs == 0) #else @@ -164,13 +173,6 @@ tse_return(erts_tse_t *tse) erts_tse_return(tse); } -void -erts_proc_lock_prepare_proc_lock_waiter(void) -{ - tse_return(tse_fetch(NULL)); -} - - static void cleanup_tse(void) { @@ -560,6 +562,16 @@ erts_proc_unlock_failed(Process *p, transfer_locks(p, wait_locks, pix_lock, 1); /* unlocks pix_lock */ } +#endif /* ERTS_PROC_LOCK_OWN_IMPL */ + +void +erts_proc_lock_prepare_proc_lock_waiter(void) +{ +#if ERTS_PROC_LOCK_OWN_IMPL + tse_return(tse_fetch(NULL)); +#endif +} + /* * proc_safelock() locks process locks on two processes. In order * to avoid a deadlock, proc_safelock() unlocks those locks that @@ -568,12 +580,11 @@ erts_proc_unlock_failed(Process *p, */ static void -proc_safelock(Process *a_proc, - erts_pix_lock_t *a_pix_lck, +proc_safelock(int is_sched, + Process *a_proc, ErtsProcLocks a_have_locks, ErtsProcLocks a_need_locks, Process *b_proc, - erts_pix_lock_t *b_pix_lck, ErtsProcLocks b_have_locks, ErtsProcLocks b_need_locks) { @@ -581,7 +592,6 @@ proc_safelock(Process *a_proc, #ifdef ERTS_ENABLE_LOCK_CHECK Eterm pid1, pid2; #endif - erts_pix_lock_t *pix_lck1, *pix_lck2; ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2; ErtsProcLocks unlock_mask; int lock_no, refc1 = 0, refc2 = 0; @@ -598,14 +608,12 @@ proc_safelock(Process *a_proc, #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = a_proc->id; #endif - pix_lck1 = a_pix_lck; need_locks1 = a_need_locks; have_locks1 = a_have_locks; p2 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = b_proc->id; #endif - pix_lck2 = b_pix_lck; need_locks2 = b_need_locks; have_locks2 = b_have_locks; } @@ -614,14 +622,12 @@ proc_safelock(Process *a_proc, #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = b_proc->id; #endif - pix_lck1 = b_pix_lck; need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = a_proc->id; #endif - pix_lck2 = a_pix_lck; need_locks2 = a_need_locks; have_locks2 = a_have_locks; } @@ -632,14 +638,12 @@ proc_safelock(Process *a_proc, #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = a_proc->id; #endif - pix_lck1 = a_pix_lck; need_locks1 = a_need_locks | b_need_locks; have_locks1 = a_have_locks | b_have_locks; p2 = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = 0; #endif - pix_lck2 = NULL; need_locks2 = 0; have_locks2 = 0; } @@ -649,14 +653,12 @@ proc_safelock(Process *a_proc, #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = b_proc->id; #endif - pix_lck1 = b_pix_lck; need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = 0; #endif - pix_lck2 = NULL; need_locks2 = 0; have_locks2 = 0; #ifdef ERTS_ENABLE_LOCK_CHECK @@ -704,21 +706,21 @@ proc_safelock(Process *a_proc, if (unlock_locks) { have_locks1 &= ~unlock_locks; need_locks1 |= unlock_locks; - if (!have_locks1) { + if (!is_sched && !have_locks1) { refc1 = 1; erts_smp_proc_inc_refc(p1); } - erts_smp_proc_unlock__(p1, pix_lck1, unlock_locks); + erts_smp_proc_unlock(p1, unlock_locks); } unlock_locks = unlock_mask & have_locks2; if (unlock_locks) { have_locks2 &= ~unlock_locks; need_locks2 |= unlock_locks; - if (!have_locks2) { + if (!is_sched && !have_locks2) { refc2 = 1; erts_smp_proc_inc_refc(p2); } - erts_smp_proc_unlock__(p2, pix_lck2, unlock_locks); + erts_smp_proc_unlock(p2, unlock_locks); } } @@ -749,7 +751,7 @@ proc_safelock(Process *a_proc, if (need_locks2 & lock) lock_no--; locks = need_locks1 & lock_mask; - erts_smp_proc_lock__(p1, pix_lck1, locks); + erts_smp_proc_lock(p1, locks); have_locks1 |= locks; need_locks1 &= ~locks; } @@ -760,7 +762,7 @@ proc_safelock(Process *a_proc, lock = (1 << ++lock_no); } locks = need_locks2 & lock_mask; - erts_smp_proc_lock__(p2, pix_lck2, locks); + erts_smp_proc_lock(p2, locks); have_locks2 |= locks; need_locks2 &= ~locks; } @@ -795,10 +797,12 @@ proc_safelock(Process *a_proc, } #endif - if (refc1) - erts_smp_proc_dec_refc(p1); - if (refc2) - erts_smp_proc_dec_refc(p2); + if (!is_sched) { + if (refc1) + erts_smp_proc_dec_refc(p1); + if (refc2) + erts_smp_proc_dec_refc(p2); + } } void @@ -809,77 +813,191 @@ erts_proc_safelock(Process *a_proc, ErtsProcLocks b_have_locks, ErtsProcLocks b_need_locks) { - proc_safelock(a_proc, - a_proc ? ERTS_PID2PIXLOCK(a_proc->id) : NULL, + proc_safelock(erts_get_scheduler_id() != 0, + a_proc, a_have_locks, a_need_locks, b_proc, - b_proc ? ERTS_PID2PIXLOCK(b_proc->id) : NULL, b_have_locks, b_need_locks); } -/* - * erts_pid2proc_safelock() is called from erts_pid2proc_opt() when - * it wasn't possible to trylock all locks needed. - * c_p - current process - * c_p_have_locks - locks held on c_p - * pid - process id of process we are looking up - * proc - process struct of process we are looking - * up (both in and out argument) - * need_locks - all locks we need (including have_locks) - * pix_lock - pix lock for process we are looking up - * flags - option flags - */ -void -erts_pid2proc_safelock(Process *c_p, - ErtsProcLocks c_p_have_locks, - Process **proc, - ErtsProcLocks need_locks, - erts_pix_lock_t *pix_lock, - int flags) +Process * +erts_pid2proc_opt(Process *c_p, + ErtsProcLocks c_p_have_locks, + Eterm pid, + ErtsProcLocks pid_need_locks, + int flags) { - Process *p = *proc; - ERTS_LC_ASSERT(p->lock.refc > 0); - ERTS_LC_ASSERT(process_tab[internal_pid_index(p->id)] == p); - p->lock.refc++; - erts_pix_unlock(pix_lock); - - proc_safelock(c_p, - c_p ? ERTS_PID2PIXLOCK(c_p->id) : NULL, - c_p_have_locks, - c_p_have_locks, - p, - pix_lock, - 0, - need_locks); + Process *dec_refc_proc = NULL; + int need_ptl; + ErtsProcLocks need_locks; + Uint pix; + Process *proc; +#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) + ErtsProcLocks lcnt_locks; +#endif - erts_pix_lock(pix_lock); +#ifdef ERTS_ENABLE_LOCK_CHECK + if (c_p) { + ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; + if (might_unlock) + erts_proc_lc_might_unlock(c_p, might_unlock); + } +#endif - if (!p->is_exiting - || ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) - && process_tab[internal_pid_index(p->id)] == p)) { - ERTS_LC_ASSERT(p->lock.refc > 1); - p->lock.refc--; + if (is_not_internal_pid(pid)) + return NULL; + pix = internal_pid_index(pid); + + ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); + need_locks = pid_need_locks; + + if (c_p && c_p->id == pid) { + ASSERT(c_p->id != ERTS_INVALID_PID); + ASSERT(c_p == erts_pix2proc(pix)); + + if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) + && ERTS_PROC_IS_EXITING(c_p)) + return NULL; + need_locks &= ~c_p_have_locks; + if (!need_locks) { + if (flags & ERTS_P2P_FLG_SMP_INC_REFC) + erts_smp_proc_inc_refc(c_p); + return c_p; + } } - else { - /* No proc. Note, we need to keep refc until after process unlock */ - erts_pix_unlock(pix_lock); - erts_smp_proc_unlock__(p, pix_lock, need_locks); - *proc = NULL; - erts_pix_lock(pix_lock); - ERTS_LC_ASSERT(p->lock.refc > 0); - if (--p->lock.refc == 0) { - erts_pix_unlock(pix_lock); - erts_free_proc(p); - erts_pix_lock(pix_lock); + + need_ptl = !erts_get_scheduler_id(); + + if (need_ptl) + erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); + + proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); + + if (proc) { + if (proc->id != pid) + proc = NULL; + else if (!need_locks) { + if (flags & ERTS_P2P_FLG_SMP_INC_REFC) + erts_smp_proc_inc_refc(proc); } + else { + int busy; + +#if ERTS_PROC_LOCK_OWN_IMPL +#ifdef ERTS_ENABLE_LOCK_COUNT + lcnt_locks = need_locks; + if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { + erts_lcnt_proc_lock(&proc->lock, need_locks); + } +#endif + +#ifdef ERTS_ENABLE_LOCK_CHECK + /* Make sure erts_pid2proc_safelock() is enough to handle + a potential lock order violation situation... */ + busy = erts_proc_lc_trylock_force_busy(proc, need_locks); + if (!busy) +#endif +#endif /* ERTS_PROC_LOCK_OWN_IMPL */ + { + /* Try a quick trylock to grab all the locks we need. */ + busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); + +#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) + erts_proc_lc_trylock(proc, need_locks, !busy); +#endif +#ifdef ERTS_PROC_LOCK_DEBUG + if (!busy) + erts_proc_lock_op_debug(proc, need_locks, 1); +#endif + } + +#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) + if (flags & ERTS_P2P_FLG_TRY_LOCK) + erts_lcnt_proc_trylock(&proc->lock, need_locks, + busy ? EBUSY : 0); +#endif + + if (!busy) { + if (flags & ERTS_P2P_FLG_SMP_INC_REFC) + erts_smp_proc_inc_refc(proc); + +#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) + /* all is great */ + if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) + erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, + __FILE__, __LINE__); +#endif + + } + else { + if (flags & ERTS_P2P_FLG_TRY_LOCK) + proc = ERTS_PROC_LOCK_BUSY; + else { + if (flags & ERTS_P2P_FLG_SMP_INC_REFC) + erts_smp_proc_inc_refc(proc); + +#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) + erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); +#endif + + if (need_ptl) { + erts_smp_proc_inc_refc(proc); + dec_refc_proc = proc; + erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); + need_ptl = 0; + } + + proc_safelock(!need_ptl, + c_p, + c_p_have_locks, + c_p_have_locks, + proc, + 0, + need_locks); + } + } + } } + + if (need_ptl) + erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); + + if (need_locks + && proc + && proc != ERTS_PROC_LOCK_BUSY + && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) + ? ERTS_PROC_IS_EXITING(proc) + : (proc + != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) { + + erts_smp_proc_unlock(proc, need_locks); + + if (flags & ERTS_P2P_FLG_SMP_INC_REFC) + dec_refc_proc = proc; + proc = NULL; + + } + + if (dec_refc_proc) + erts_smp_proc_dec_refc(dec_refc_proc); + +#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) + ERTS_LC_ASSERT(!proc + || proc == ERTS_PROC_LOCK_BUSY + || (pid_need_locks == + (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) + & pid_need_locks))); +#endif + + return proc; } void erts_proc_lock_init(Process *p) { +#if ERTS_PROC_LOCK_OWN_IMPL int i; /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL @@ -890,25 +1008,60 @@ erts_proc_lock_init(Process *p) #endif for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) p->lock.queue[i] = NULL; - p->lock.refc = 1; -#ifdef ERTS_ENABLE_LOCK_COUNT - erts_lcnt_proc_lock_init(p); - erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL); - erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__); -#endif - #ifdef ERTS_ENABLE_LOCK_CHECK erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1); #endif +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + erts_mtx_init_x(&p->lock.main, "proc_main", p->id); + ethr_mutex_lock(&p->lock.main.mtx); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_lc_trylock(1, &p->lock.main.lc); +#endif + erts_mtx_init_x(&p->lock.link, "proc_link", p->id); + ethr_mutex_lock(&p->lock.link.mtx); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_lc_trylock(1, &p->lock.link.lc); +#endif + erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->id); + ethr_mutex_lock(&p->lock.msgq.mtx); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_lc_trylock(1, &p->lock.msgq.lc); +#endif + erts_mtx_init_x(&p->lock.status, "proc_status", p->id); + ethr_mutex_lock(&p->lock.status.mtx); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_lc_trylock(1, &p->lock.status.lc); +#endif +#endif + erts_atomic32_init_nob(&p->lock.refc, 1); #ifdef ERTS_PROC_LOCK_DEBUG for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); #endif +#ifdef ERTS_ENABLE_LOCK_COUNT + erts_lcnt_proc_lock_init(p); + erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL); + erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__); +#endif +} + +void +erts_proc_lock_fin(Process *p) +{ +#if ERTS_PROC_LOCK_RAW_MUTEX_IMPL + erts_mtx_destroy(&p->lock.main); + erts_mtx_destroy(&p->lock.link); + erts_mtx_destroy(&p->lock.msgq); + erts_mtx_destroy(&p->lock.status); +#endif +#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) + erts_lcnt_proc_lock_destroy(p); +#endif } /* --- Process lock counting ----------------------------------------------- */ -#ifdef ERTS_ENABLE_LOCK_COUNT +#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) void erts_lcnt_proc_lock_init(Process *p) { if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { if (p->id != ERTS_INVALID_PID) { @@ -1027,7 +1180,7 @@ void erts_lcnt_enable_proc_lock_count(int enable) { int i; for (i = 0; i < erts_max_processes; ++i) { - Process* p = process_tab[i]; + Process* p = erts_pix2proc(i); if (p) { if (enable) { if (!ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) { @@ -1049,6 +1202,8 @@ void erts_lcnt_enable_proc_lock_count(int enable) { #ifdef ERTS_ENABLE_LOCK_CHECK +#if ERTS_PROC_LOCK_OWN_IMPL + void erts_proc_lc_lock(Process *p, ErtsProcLocks locks) { @@ -1121,9 +1276,12 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks) } } +#endif /* ERTS_PROC_LOCK_OWN_IMPL */ + void erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks) { +#if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, p->id, ERTS_LC_FLG_LT_PROCLOCK); @@ -1143,11 +1301,22 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks) lck.id = lc_id.proc_lock_main; erts_lc_might_unlock(&lck); } +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + if (locks & ERTS_PROC_LOCK_MAIN) + erts_lc_might_unlock(&p->lock.main.lc); + if (locks & ERTS_PROC_LOCK_LINK) + erts_lc_might_unlock(&p->lock.link.lc); + if (locks & ERTS_PROC_LOCK_MSGQ) + erts_lc_might_unlock(&p->lock.msgq.lc); + if (locks & ERTS_PROC_LOCK_STATUS) + erts_lc_might_unlock(&p->lock.status.lc); +#endif } void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks) { +#if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, p->id, ERTS_LC_FLG_LT_PROCLOCK); @@ -1167,11 +1336,22 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks) lck.id = lc_id.proc_lock_status; erts_lc_require_lock(&lck); } +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + if (locks & ERTS_PROC_LOCK_MAIN) + erts_lc_require_lock(&p->lock.main.lc); + if (locks & ERTS_PROC_LOCK_LINK) + erts_lc_require_lock(&p->lock.link.lc); + if (locks & ERTS_PROC_LOCK_MSGQ) + erts_lc_require_lock(&p->lock.msgq.lc); + if (locks & ERTS_PROC_LOCK_STATUS) + erts_lc_require_lock(&p->lock.status.lc); +#endif } void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks) { +#if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, p->id, ERTS_LC_FLG_LT_PROCLOCK); @@ -1191,8 +1371,19 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks) lck.id = lc_id.proc_lock_main; erts_lc_unrequire_lock(&lck); } +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + if (locks & ERTS_PROC_LOCK_MAIN) + erts_lc_unrequire_lock(&p->lock.main.lc); + if (locks & ERTS_PROC_LOCK_LINK) + erts_lc_unrequire_lock(&p->lock.link.lc); + if (locks & ERTS_PROC_LOCK_MSGQ) + erts_lc_unrequire_lock(&p->lock.msgq.lc); + if (locks & ERTS_PROC_LOCK_STATUS) + erts_lc_unrequire_lock(&p->lock.status.lc); +#endif } +#if ERTS_PROC_LOCK_OWN_IMPL int erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks) @@ -1218,21 +1409,30 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks) return 0; } +#endif /* ERTS_PROC_LOCK_OWN_IMPL */ + void erts_proc_lc_chk_only_proc_main(Process *p) { +#if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main, p->id, ERTS_LC_FLG_LT_PROCLOCK); erts_lc_check_exact(&proc_main, 1); +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + erts_lc_check_exact(&p->lock.main.lc, 1); +#endif } +#if ERTS_PROC_LOCK_OWN_IMPL #define ERTS_PROC_LC_EMPTY_LOCK_INIT \ ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK) +#endif /* ERTS_PROC_LOCK_OWN_IMPL */ void erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks) { int have_locks_len = 0; +#if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT, ERTS_PROC_LC_EMPTY_LOCK_INIT, ERTS_PROC_LC_EMPTY_LOCK_INIT, @@ -1253,7 +1453,17 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks) have_locks[have_locks_len].id = lc_id.proc_lock_status; have_locks[have_locks_len++].extra = p->id; } - +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + erts_lc_lock_t have_locks[4]; + if (locks & ERTS_PROC_LOCK_MAIN) + have_locks[have_locks_len++] = p->lock.main.lc; + if (locks & ERTS_PROC_LOCK_LINK) + have_locks[have_locks_len++] = p->lock.link.lc; + if (locks & ERTS_PROC_LOCK_MSGQ) + have_locks[have_locks_len++] = p->lock.msgq.lc; + if (locks & ERTS_PROC_LOCK_STATUS) + have_locks[have_locks_len++] = p->lock.status.lc; +#endif erts_lc_check(have_locks, have_locks_len, NULL, 0); } @@ -1262,6 +1472,7 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks) { int have_locks_len = 0; int have_not_locks_len = 0; +#if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT, ERTS_PROC_LC_EMPTY_LOCK_INIT, ERTS_PROC_LC_EMPTY_LOCK_INIT, @@ -1303,6 +1514,27 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks) have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status; have_not_locks[have_not_locks_len++].extra = p->id; } +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + erts_lc_lock_t have_locks[4]; + erts_lc_lock_t have_not_locks[4]; + + if (locks & ERTS_PROC_LOCK_MAIN) + have_locks[have_locks_len++] = p->lock.main.lc; + else + have_not_locks[have_not_locks_len++] = p->lock.main.lc; + if (locks & ERTS_PROC_LOCK_LINK) + have_locks[have_locks_len++] = p->lock.link.lc; + else + have_not_locks[have_not_locks_len++] = p->lock.link.lc; + if (locks & ERTS_PROC_LOCK_MSGQ) + have_locks[have_locks_len++] = p->lock.msgq.lc; + else + have_not_locks[have_not_locks_len++] = p->lock.msgq.lc; + if (locks & ERTS_PROC_LOCK_STATUS) + have_locks[have_locks_len++] = p->lock.status.lc; + else + have_not_locks[have_not_locks_len++] = p->lock.status.lc; +#endif erts_lc_check(have_locks, have_locks_len, have_not_locks, have_not_locks_len); @@ -1312,6 +1544,8 @@ ErtsProcLocks erts_proc_lc_my_proc_locks(Process *p) { int resv[4]; + ErtsProcLocks res = 0; +#if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main, p->id, ERTS_LC_FLG_LT_PROCLOCK), @@ -1324,8 +1558,12 @@ erts_proc_lc_my_proc_locks(Process *p) ERTS_LC_LOCK_INIT(lc_id.proc_lock_status, p->id, ERTS_LC_FLG_LT_PROCLOCK)}; - - ErtsProcLocks res = 0; +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + erts_lc_lock_t locks[4] = {p->lock.main.lc, + p->lock.link.lc, + p->lock.msgq.lc, + p->lock.status.lc}; +#endif erts_lc_have_locks(resv, locks, 4); if (resv[0]) @@ -1349,7 +1587,7 @@ erts_proc_lc_chk_no_proc_locks(char *file, int line) lc_id.proc_lock_msgq, lc_id.proc_lock_status}; erts_lc_have_lock_ids(resv, ids, 4); - if (resv[0] || resv[1] || resv[2] || resv[3]) { + if (!ERTS_IS_CRASH_DUMPING && (resv[0] || resv[1] || resv[2] || resv[3])) { erts_lc_fail("%s:%d: Thread has process locks locked when expected " "not to have any process locks locked", file, line); @@ -1358,7 +1596,7 @@ erts_proc_lc_chk_no_proc_locks(char *file, int line) #endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */ -#ifdef ERTS_PROC_LOCK_HARD_DEBUG +#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_HARD_DEBUG) void check_queue(erts_proc_lock_t *lck) { @@ -1391,4 +1629,4 @@ check_queue(erts_proc_lock_t *lck) } #endif -#endif /* ERTS_SMP (the whole file) */ +#endif /* ERTS_SMP */ diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 062b8366d3..4aec19c8c3 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -37,10 +37,21 @@ #include "erl_smp.h" +#if defined(VALGRIND) || defined(ETHR_DISABLE_NATIVE_IMPLS) +# define ERTS_PROC_LOCK_OWN_IMPL 0 +#else +# define ERTS_PROC_LOCK_OWN_IMPL 1 +#endif + #define ERTS_PROC_LOCK_ATOMIC_IMPL 0 #define ERTS_PROC_LOCK_SPINLOCK_IMPL 0 #define ERTS_PROC_LOCK_MUTEX_IMPL 0 +#if !ERTS_PROC_LOCK_OWN_IMPL +#define ERTS_PROC_LOCK_RAW_MUTEX_IMPL 1 +#else +#define ERTS_PROC_LOCK_RAW_MUTEX_IMPL 0 + #if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS) # undef ERTS_PROC_LOCK_ATOMIC_IMPL # define ERTS_PROC_LOCK_ATOMIC_IMPL 1 @@ -52,27 +63,38 @@ # define ERTS_PROC_LOCK_MUTEX_IMPL 1 #endif +#endif + #define ERTS_PROC_LOCK_MAX_BIT 3 typedef erts_aint32_t ErtsProcLocks; typedef struct erts_proc_lock_t_ { +#if ERTS_PROC_LOCK_OWN_IMPL #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_smp_atomic32_t flags; #else ErtsProcLocks flags; #endif erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1]; - Sint32 refc; -#ifdef ERTS_PROC_LOCK_DEBUG - erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1]; -#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_t lcnt_main; erts_lcnt_lock_t lcnt_link; erts_lcnt_lock_t lcnt_msgq; erts_lcnt_lock_t lcnt_status; #endif +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + erts_mtx_t main; + erts_mtx_t link; + erts_mtx_t msgq; + erts_mtx_t status; +#else +# error "no implementation" +#endif + erts_atomic32_t refc; +#ifdef ERTS_PROC_LOCK_DEBUG + erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1]; +#endif } erts_proc_lock_t; /* Process lock flags */ @@ -105,11 +127,9 @@ typedef struct erts_proc_lock_t_ { /* * Status lock: * Protects the following fields in the process structure: - * * status - * * rstatus - * * status_flags * * pending_suspenders * * suspendee + * * ... */ #define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT) @@ -141,14 +161,11 @@ typedef struct erts_proc_lock_t_ { * Other rules regarding process locking: * * Exiting processes: - * When changing status to P_EXITING on a process, you are required - * to take all process locks (ERTS_PROC_LOCKS_ALL). Thus, by holding - * at least one process lock (whichever one doesn't matter) you - * are guaranteed that the process won't exit until the lock you are - * holding has been released. Appart from all process locks also - * the pix lock corresponding to the process has to be held. - * At the same time as status is changed to P_EXITING, also the - * field 'is_exiting' in the process structure is set to a value != 0. + * When changing state to exiting (ERTS_PSFLG_EXITING) on a process, + * you are required to take all process locks (ERTS_PROC_LOCKS_ALL). + * Thus, by holding at least one process lock (whichever one doesn't + * matter) you are guaranteed that the process won't exit until the + * lock you are holding has been released. * * Lock order: * Process locks with low numeric values has to be locked before @@ -260,12 +277,10 @@ typedef struct { } u; } erts_pix_lock_t; -#define ERTS_PIX2PIXLOCKIX(PIX) \ - ((PIX) & ((1 << ERTS_PIX_LOCKS_BITS) - 1)) -#define ERTS_PIX2PIXLOCK(PIX) \ - (&erts_pix_locks[ERTS_PIX2PIXLOCKIX((PIX))]) #define ERTS_PID2PIXLOCK(PID) \ - ERTS_PIX2PIXLOCK(internal_pid_data((PID))) + (&erts_pix_locks[(internal_pid_data((PID)) & ((1 << ERTS_PIX_LOCKS_BITS) - 1))]) + +#if ERTS_PROC_LOCK_OWN_IMPL #if ERTS_PROC_LOCK_ATOMIC_IMPL @@ -335,11 +350,13 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new, #define ERTS_PROC_LOCK_FLGS_READ_(L) ((L)->flags) #endif /* end no opt atomic ops */ +#endif /* ERTS_PROC_LOCK_OWN_IMPL */ extern erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS]; void erts_init_proc_lock(int cpus); void erts_proc_lock_prepare_proc_lock_waiter(void); +#if ERTS_PROC_LOCK_OWN_IMPL void erts_proc_lock_failed(Process *, erts_pix_lock_t *, ErtsProcLocks, @@ -347,6 +364,7 @@ void erts_proc_lock_failed(Process *, void erts_proc_unlock_failed(Process *, erts_pix_lock_t *, ErtsProcLocks); +#endif ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *); ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *); @@ -410,6 +428,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck) ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks) { +#if ERTS_PROC_LOCK_OWN_IMPL ErtsProcLocks expct_lflgs = 0; while (1) { @@ -429,8 +448,38 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks) /* cmpxchg failed, try again (should be rare). */ expct_lflgs = lflgs; } -} +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + + if (locks & ERTS_PROC_LOCK_MAIN) + if (erts_mtx_trylock(&p->lock.main) == EBUSY) + goto busy_main; + if (locks & ERTS_PROC_LOCK_LINK) + if (erts_mtx_trylock(&p->lock.link) == EBUSY) + goto busy_link; + if (locks & ERTS_PROC_LOCK_MSGQ) + if (erts_mtx_trylock(&p->lock.msgq) == EBUSY) + goto busy_msgq; + if (locks & ERTS_PROC_LOCK_STATUS) + if (erts_mtx_trylock(&p->lock.status) == EBUSY) + goto busy_status; + + return 0; + +busy_status: + if (locks & ERTS_PROC_LOCK_MSGQ) + erts_mtx_unlock(&p->lock.msgq); +busy_msgq: + if (locks & ERTS_PROC_LOCK_LINK) + erts_mtx_unlock(&p->lock.link); +busy_link: + if (locks & ERTS_PROC_LOCK_MAIN) + erts_mtx_unlock(&p->lock.main); +busy_main: + + return EBUSY; +#endif +} ERTS_GLB_INLINE void #ifdef ERTS_ENABLE_LOCK_COUNT @@ -444,10 +493,13 @@ erts_smp_proc_lock__(Process *p, ErtsProcLocks locks) #endif { +#if ERTS_PROC_LOCK_OWN_IMPL + ErtsProcLocks old_lflgs; #if !ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lck); #endif + #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_proc_lock(&(p->lock), locks); #endif @@ -471,12 +523,14 @@ erts_smp_proc_lock__(Process *p, erts_pix_unlock(pix_lck); } #endif + #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line); #endif #ifdef ERTS_ENABLE_LOCK_CHECK erts_proc_lc_lock(p, locks); #endif + #ifdef ERTS_PROC_LOCK_DEBUG erts_proc_lock_op_debug(p, locks, 1); #endif @@ -484,6 +538,22 @@ erts_smp_proc_lock__(Process *p, #if ERTS_PROC_LOCK_ATOMIC_IMPL ETHR_COMPILER_BARRIER; #endif + +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + if (locks & ERTS_PROC_LOCK_MAIN) + erts_mtx_lock(&p->lock.main); + if (locks & ERTS_PROC_LOCK_LINK) + erts_mtx_lock(&p->lock.link); + if (locks & ERTS_PROC_LOCK_MSGQ) + erts_mtx_lock(&p->lock.msgq); + if (locks & ERTS_PROC_LOCK_STATUS) + erts_mtx_lock(&p->lock.status); + +#ifdef ERTS_PROC_LOCK_DEBUG + erts_proc_lock_op_debug(p, locks, 1); +#endif + +#endif } ERTS_GLB_INLINE void @@ -491,6 +561,7 @@ erts_smp_proc_unlock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) { +#if ERTS_PROC_LOCK_OWN_IMPL ErtsProcLocks old_lflgs; #if ERTS_PROC_LOCK_ATOMIC_IMPL @@ -555,6 +626,23 @@ erts_smp_proc_unlock__(Process *p, break; } + +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + +#ifdef ERTS_PROC_LOCK_DEBUG + erts_proc_lock_op_debug(p, locks, 0); +#endif + + if (locks & ERTS_PROC_LOCK_STATUS) + erts_mtx_unlock(&p->lock.status); + if (locks & ERTS_PROC_LOCK_MSGQ) + erts_mtx_unlock(&p->lock.msgq); + if (locks & ERTS_PROC_LOCK_LINK) + erts_mtx_unlock(&p->lock.link); + if (locks & ERTS_PROC_LOCK_MAIN) + erts_mtx_unlock(&p->lock.main); +#endif + } ERTS_GLB_INLINE int @@ -562,6 +650,7 @@ erts_smp_proc_trylock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) { +#if ERTS_PROC_LOCK_OWN_IMPL int res; #ifdef ERTS_ENABLE_LOCK_CHECK @@ -573,6 +662,7 @@ erts_smp_proc_trylock__(Process *p, else #endif { + #if !ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lck); #endif @@ -611,8 +701,18 @@ erts_smp_proc_trylock__(Process *p, #if ERTS_PROC_LOCK_ATOMIC_IMPL ETHR_COMPILER_BARRIER; #endif - return res; + +#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL + if (erts_smp_proc_raw_trylock__(p, locks) != 0) + return EBUSY; + else { +#ifdef ERTS_PROC_LOCK_DEBUG + erts_proc_lock_op_debug(p, locks, 1); +#endif + return 0; + } +#endif } #ifdef ERTS_PROC_LOCK_DEBUG @@ -711,44 +811,36 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks) #endif } - ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *p) { #ifdef ERTS_SMP - erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id); - erts_pix_lock(pixlck); - ERTS_LC_ASSERT(p->lock.refc > 0); - p->lock.refc++; - erts_pix_unlock(pixlck); +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_aint32_t refc = erts_atomic32_inc_read_nob(&p->lock.refc); + ERTS_SMP_LC_ASSERT(refc > 1); +#else + erts_atomic32_inc_nob(&p->lock.refc); +#endif #endif } ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p) { #ifdef ERTS_SMP - Process *fp; - erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id); - erts_pix_lock(pixlck); - ERTS_LC_ASSERT(p->lock.refc > 0); - fp = --p->lock.refc == 0 ? p : NULL; - erts_pix_unlock(pixlck); - if (fp) - erts_free_proc(fp); + erts_aint32_t refc = erts_atomic32_dec_read_nob(&p->lock.refc); + ERTS_SMP_LC_ASSERT(refc >= 0); + if (refc == 0) + erts_free_proc(p); #endif } -ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 refc) +ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 add_refc) { #ifdef ERTS_SMP - Process *fp; - erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id); - erts_pix_lock(pixlck); - ERTS_LC_ASSERT(p->lock.refc > 0); - p->lock.refc += refc; - fp = p->lock.refc == 0 ? p : NULL; - erts_pix_unlock(pixlck); - if (fp) - erts_free_proc(fp); + erts_aint32_t refc = erts_atomic32_add_read_nob(&p->lock.refc, + (erts_aint32_t) add_refc); + ERTS_SMP_LC_ASSERT(refc >= 0); + if (refc == 0) + erts_free_proc(p); #endif } @@ -756,6 +848,7 @@ ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 refc) #ifdef ERTS_SMP void erts_proc_lock_init(Process *); +void erts_proc_lock_fin(Process *); void erts_proc_safelock(Process *a_proc, ErtsProcLocks a_have_locks, ErtsProcLocks a_need_locks, @@ -788,211 +881,75 @@ extern const Process erts_proc_lock_busy; #define erts_pid2proc(PROC, HL, PID, NL) \ erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0) -ERTS_GLB_INLINE Process * -erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int); -#ifdef ERTS_SMP -void -erts_pid2proc_safelock(Process *c_p, - ErtsProcLocks c_p_have_locks, - Process **proc, - ErtsProcLocks need_locks, - erts_pix_lock_t *pix_lock, - int flags); -ERTS_GLB_INLINE Process *erts_pid2proc_unlocked_opt(Eterm pid, int flags); -#define erts_pid2proc_unlocked(PID) erts_pid2proc_unlocked_opt((PID), 0) -#else -#define erts_pid2proc_unlocked_opt(PID, FLGS) \ - erts_pid2proc_opt(NULL, 0, (PID), 0, FLGS) -#define erts_pid2proc_unlocked(PID) erts_pid2proc_opt(NULL, 0, (PID), 0, 0) +ERTS_GLB_INLINE Process *erts_pix2proc(int ix); +ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid); +ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid); + +#ifndef ERTS_SMP +ERTS_GLB_INLINE #endif +Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -ERTS_GLB_INLINE Process * -#ifdef ERTS_SMP -erts_pid2proc_unlocked_opt(Eterm pid, int flags) -#else -erts_pid2proc_opt(Process *c_p_unused, - ErtsProcLocks c_p_have_locks_unused, - Eterm pid, - ErtsProcLocks pid_need_locks_unused, - int flags) -#endif +ERTS_GLB_INLINE Process *erts_pix2proc(int ix) { - Uint pix; Process *proc; + ASSERT(0 <= ix && ix < erts_proc.max); + proc = (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[ix]); + return proc == ERTS_PROC_LOCK_BUSY ? NULL : proc; +} + +ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid) +{ + Process *proc; + int pix; + + /* + * In SMP case: Only scheduler threads are allowed + * to use this function. Other threads need to + * atomicaly increment refc at lookup, i.e., use + * erts_pid2proc_opt() with ERTS_P2P_FLG_SMP_INC_REFC. + */ + ERTS_SMP_LC_ASSERT(erts_get_scheduler_id()); if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); - if(pix >= erts_max_processes) + + proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); + + if (proc && proc->id != pid) return NULL; - proc = process_tab[pix]; - if (proc) { - if (proc->id != pid - || (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) - && proc->status == P_EXITING)) - proc = NULL; - } + return proc; } -#ifdef ERTS_SMP +ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid) +{ + Process *proc = erts_proc_lookup_raw(pid); + if (proc && ERTS_PROC_IS_EXITING(proc)) + return NULL; + return proc; +} +#ifndef ERTS_SMP ERTS_GLB_INLINE Process * -erts_pid2proc_opt(Process *c_p, - ErtsProcLocks c_p_have_locks, +erts_pid2proc_opt(Process *c_p_unused, + ErtsProcLocks c_p_have_locks_unused, Eterm pid, - ErtsProcLocks pid_need_locks, + ErtsProcLocks pid_need_locks_unused, int flags) { - erts_pix_lock_t *pix_lock; - ErtsProcLocks need_locks; - Uint pix; - Process *proc; -#ifdef ERTS_ENABLE_LOCK_COUNT - ErtsProcLocks lcnt_locks; -#endif - -#ifdef ERTS_ENABLE_LOCK_CHECK - if (c_p) { - ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; - if (might_unlock) - erts_proc_lc_might_unlock(c_p, might_unlock); - } -#endif - if (is_not_internal_pid(pid)) { - proc = NULL; - goto done; - } - pix = internal_pid_index(pid); - if(pix >= erts_max_processes) { - proc = NULL; - goto done; - } - - ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); - need_locks = pid_need_locks; - - pix_lock = ERTS_PIX2PIXLOCK(pix); - - if (c_p && c_p->id == pid) { - ASSERT(c_p->id != ERTS_INVALID_PID); - ASSERT(c_p == process_tab[pix]); - if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && c_p->is_exiting) { - proc = NULL; - goto done; - } - need_locks &= ~c_p_have_locks; - if (!need_locks) { - proc = c_p; - erts_pix_lock(pix_lock); - if (flags & ERTS_P2P_FLG_SMP_INC_REFC) - proc->lock.refc++; - erts_pix_unlock(pix_lock); - goto done; - } - } - - erts_pix_lock(pix_lock); - - proc = process_tab[pix]; - if (proc) { - if (proc->id != pid || (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) - && ERTS_PROC_IS_EXITING(proc))) { - proc = NULL; - } - else if (!need_locks) { - if (flags & ERTS_P2P_FLG_SMP_INC_REFC) - proc->lock.refc++; - } - else { - int busy; - -#ifdef ERTS_ENABLE_LOCK_COUNT - lcnt_locks = need_locks; - if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { - erts_lcnt_proc_lock(&proc->lock, need_locks); - } -#endif - -#ifdef ERTS_ENABLE_LOCK_CHECK - /* Make sure erts_pid2proc_safelock() is enough to handle - a potential lock order violation situation... */ - busy = erts_proc_lc_trylock_force_busy(proc, need_locks); - if (!busy) -#endif - { - /* Try a quick trylock to grab all the locks we need. */ - busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); -#ifdef ERTS_ENABLE_LOCK_CHECK - erts_proc_lc_trylock(proc, need_locks, !busy); -#endif -#ifdef ERTS_PROC_LOCK_DEBUG - if (!busy) - erts_proc_lock_op_debug(proc, need_locks, 1); -#endif - } - -#ifdef ERTS_ENABLE_LOCK_COUNT - if (flags & ERTS_P2P_FLG_TRY_LOCK) { - if (busy) { - erts_lcnt_proc_trylock(&proc->lock, need_locks, EBUSY); - } else { - erts_lcnt_proc_trylock(&proc->lock, need_locks, 0); - } - } -#endif - if (!busy) { - if (flags & ERTS_P2P_FLG_SMP_INC_REFC) - proc->lock.refc++; -#ifdef ERTS_ENABLE_LOCK_COUNT - /* all is great */ - if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { - erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); - } -#endif - } - else { - if (flags & ERTS_P2P_FLG_TRY_LOCK) - proc = ERTS_PROC_LOCK_BUSY; - else { -#ifdef ERTS_ENABLE_LOCK_COUNT - erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); -#endif - erts_pid2proc_safelock(c_p, - c_p_have_locks, - &proc, - pid_need_locks, - pix_lock, - flags); - if (proc && (flags & ERTS_P2P_FLG_SMP_INC_REFC)) - proc->lock.refc++; - } - } - } - } - - erts_pix_unlock(pix_lock); -#ifdef ERTS_PROC_LOCK_DEBUG - ERTS_LC_ASSERT(!proc - || proc == ERTS_PROC_LOCK_BUSY - || (pid_need_locks == - (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) - & pid_need_locks))); -#endif - - - done: - -#if ERTS_PROC_LOCK_ATOMIC_IMPL - ETHR_COMPILER_BARRIER; -#endif - - return proc; + Process *proc = erts_proc_lookup_raw(pid); + return ((!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) + && proc + && ERTS_PROC_IS_EXITING(proc)) + ? NULL + : proc); } -#endif /* ERTS_SMP */ +#endif /* !ERTS_SMP */ #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_resolv_dns.c b/erts/emulator/beam/erl_resolv_dns.c deleted file mode 100644 index 9d76fa89f8..0000000000 --- a/erts/emulator/beam/erl_resolv_dns.c +++ /dev/null @@ -1,23 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 1997-2009. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ - -/* - * Set this to non-zero value if DNS should be used. - */ -int erl_use_resolver = 1; diff --git a/erts/emulator/beam/erl_resolv_nodns.c b/erts/emulator/beam/erl_resolv_nodns.c deleted file mode 100644 index f14ab68e27..0000000000 --- a/erts/emulator/beam/erl_resolv_nodns.c +++ /dev/null @@ -1,23 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 1997-2009. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ - -/* - * Set this to non-zero value if DNS should be used. - */ -int erl_use_resolver = 0; diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c index f77e8b798f..bf7774f882 100644 --- a/erts/emulator/beam/erl_term.c +++ b/erts/emulator/beam/erl_term.c @@ -105,7 +105,7 @@ unsigned tag_val_def(Wterm x) break; } } - sprintf(msg, "tag_val_def: %#lx", (unsigned long) x); + erts_snprintf(msg, sizeof(msg), "tag_val_def: %#lx", (unsigned long) x); et_abort(msg, file, line); #undef file #undef line diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h index 89486b065b..e72321cf48 100644 --- a/erts/emulator/beam/erl_thr_progress.h +++ b/erts/emulator/beam/erl_thr_progress.h @@ -78,6 +78,16 @@ void erts_thr_progress_fatal_error_block(SWord timeout, #endif /* ERTS_SMP */ +typedef struct ErtsThrPrgrLaterOp_ ErtsThrPrgrLaterOp; +struct ErtsThrPrgrLaterOp_ { +#ifdef ERTS_SMP + ErtsThrPrgrVal later; +#endif + void (*func)(void *); + void *data; + ErtsThrPrgrLaterOp *next; +}; + #endif #if !defined(ERL_THR_PROGRESS_H__) && !defined(ERL_THR_PROGRESS_TSD_TYPE_ONLY) diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h index ee47c98009..17628286bc 100644 --- a/erts/emulator/beam/erl_threads.h +++ b/erts/emulator/beam/erl_threads.h @@ -258,10 +258,6 @@ #include "sys.h" -typedef struct { SWord sint[2]; } erts_no_dw_atomic_t; -typedef SWord erts_no_atomic_t; -typedef Sint32 erts_no_atomic32_t; - #ifdef USE_THREADS #define ETHR_TRY_INLINE_FUNCS @@ -411,12 +407,15 @@ typedef struct { typedef int erts_rwmtx_t; typedef int erts_tsd_key_t; typedef int erts_tse_t; -#define erts_dw_aint_t erts_no_dw_atomic_t -#define erts_dw_atomic_t erts_no_dw_atomic_t -#define erts_aint_t SWord -#define erts_atomic_t erts_no_atomic_t -#define erts_aint32_t Sint32 -#define erts_atomic32_t erts_no_atomic32_t + +typedef struct { SWord sint[2]; } erts_dw_aint_t; +typedef SWord erts_aint_t; +typedef Sint32 erts_aint32_t; + +#define erts_dw_atomic_t erts_dw_aint_t +#define erts_atomic_t erts_aint_t +#define erts_atomic32_t erts_aint32_t + #if __GNUC__ > 2 typedef struct { } erts_spinlock_t; typedef struct { } erts_rwlock_t; @@ -425,6 +424,14 @@ typedef struct { int gcc_is_buggy; } erts_spinlock_t; typedef struct { int gcc_is_buggy; } erts_rwlock_t; #endif +#ifdef WORDS_BIGENDIAN +#define ERTS_DW_AINT_LOW_WORD 1 +#define ERTS_DW_AINT_HIGH_WORD 0 +#else +#define ERTS_DW_AINT_LOW_WORD 0 +#define ERTS_DW_AINT_HIGH_WORD 1 +#endif + #define ERTS_MTX_INITER 0 #define ERTS_CND_INITER 0 #define ERTS_THR_INIT_DATA_DEF_INITER 0 @@ -433,6 +440,10 @@ typedef struct { int gcc_is_buggy; } erts_rwlock_t; #endif /* #ifdef USE_THREADS */ +#define erts_no_dw_atomic_t erts_dw_aint_t +#define erts_no_atomic_t erts_aint_t +#define erts_no_atomic32_t erts_aint32_t + #define ERTS_AINT_NULL ((erts_aint_t) NULL) #define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1))) diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h index 6c6e193818..4bbdcaa3e3 100644 --- a/erts/emulator/beam/erl_time.h +++ b/erts/emulator/beam/erl_time.h @@ -118,9 +118,11 @@ ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t elapsed) void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec); #endif +typedef UWord erts_approx_time_t; +erts_approx_time_t erts_get_approx_time(void); + void erts_get_timeval(SysTimeval *tv); erts_time_t erts_get_time(void); -void erts_get_emu_time(SysTimeval *); ERTS_GLB_INLINE int erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p); diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c index 04147408d5..3272a5326d 100644 --- a/erts/emulator/beam/erl_time_sup.c +++ b/erts/emulator/beam/erl_time_sup.c @@ -91,6 +91,41 @@ static SysTimeval then; /* Used in get_now */ static SysTimeval last_emu_time; /* Used in erts_get_emu_time() */ SysTimeval erts_first_emu_time; /* Used in erts_get_emu_time() */ +union { + erts_smp_atomic_t time; + char align[ERTS_CACHE_LINE_SIZE]; +} approx erts_align_attribute(ERTS_CACHE_LINE_SIZE); + +static void +init_approx_time(void) +{ + erts_smp_atomic_init_nob(&approx.time, 0); +} + +static ERTS_INLINE erts_approx_time_t +get_approx_time(void) +{ + return (erts_approx_time_t) erts_smp_atomic_read_nob(&approx.time); +} + +static ERTS_INLINE void +update_approx_time(SysTimeval *tv) +{ + erts_approx_time_t new_secs = (erts_approx_time_t) tv->tv_sec; + erts_approx_time_t old_secs = get_approx_time(); + if (old_secs != new_secs) + erts_smp_atomic_set_nob(&approx.time, new_secs); +} + +/* + * erts_get_approx_time() returns an *approximate* time + * in seconds. NOTE that this time may jump backwards!!! + */ +erts_approx_time_t +erts_get_approx_time(void) +{ + return get_approx_time(); +} #ifdef HAVE_GETHRTIME @@ -398,6 +433,8 @@ erts_init_time_sup(void) { erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday"); + init_approx_time(); + last_emu_time.tv_sec = 0; last_emu_time.tv_usec = 0; @@ -417,7 +454,7 @@ erts_init_time_sup(void) gtv = inittv; then.tv_sec = then.tv_usec = 0; - erts_get_emu_time(&erts_first_emu_time); + erts_deliver_time(); return CLOCK_RESOLUTION; } @@ -883,6 +920,8 @@ get_now(Uint* megasec, Uint* sec, Uint* microsec) *megasec = (Uint) (now.tv_sec / 1000000); *sec = (Uint) (now.tv_sec % 1000000); *microsec = (Uint) (now.tv_usec); + + update_approx_time(&now); } void @@ -895,6 +934,8 @@ get_sys_now(Uint* megasec, Uint* sec, Uint* microsec) *megasec = (Uint) (now.tv_sec / 1000000); *sec = (Uint) (now.tv_sec % 1000000); *microsec = (Uint) (now.tv_usec); + + update_approx_time(&now); } @@ -911,6 +952,8 @@ void erts_deliver_time(void) { do_erts_deliver_time(&now); erts_smp_mtx_unlock(&erts_timeofday_mtx); + + update_approx_time(&now); } /* get *real* time (not ticks) remaining until next timeout - if there @@ -959,6 +1002,7 @@ void erts_get_timeval(SysTimeval *tv) erts_smp_mtx_lock(&erts_timeofday_mtx); get_tolerant_timeofday(tv); erts_smp_mtx_unlock(&erts_timeofday_mtx); + update_approx_time(tv); } erts_time_t @@ -971,7 +1015,9 @@ erts_get_time(void) get_tolerant_timeofday(&sys_tv); erts_smp_mtx_unlock(&erts_timeofday_mtx); - + + update_approx_time(&sys_tv); + return sys_tv.tv_sec; } @@ -987,38 +1033,3 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) { *sec = (Uint)(tp.tv_sec % 1000000); } #endif - - -/* - * erts_get_emu_time() is similar to get_now(). You will - * always get different times from erts_get_emu_time(), but they - * may equal a time from get_now(). - * - * erts_get_emu_time() is only used internally in the emulator in - * order to order emulator internal events. - */ - -void -erts_get_emu_time(SysTimeval *this_emu_time_p) -{ - erts_smp_mtx_lock(&erts_timeofday_mtx); - - get_tolerant_timeofday(this_emu_time_p); - - /* Make sure time is later than last */ - if (last_emu_time.tv_sec > this_emu_time_p->tv_sec || - (last_emu_time.tv_sec == this_emu_time_p->tv_sec - && last_emu_time.tv_usec >= this_emu_time_p->tv_usec)) { - *this_emu_time_p = last_emu_time; - this_emu_time_p->tv_usec++; - } - /* Check for carry from above + general reasonability */ - if (this_emu_time_p->tv_usec >= 1000000) { - this_emu_time_p->tv_usec = 0; - this_emu_time_p->tv_sec++; - } - - last_emu_time = *this_emu_time_p; - - erts_smp_mtx_unlock(&erts_timeofday_mtx); -} diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index 009ca1eb52..d04a91f18c 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -64,7 +64,7 @@ int erts_cpu_timestamp; #endif static erts_smp_mtx_t smq_mtx; -static erts_smp_mtx_t sys_trace_mtx; +static erts_smp_rwmtx_t sys_trace_rwmtx; enum ErtsSysMsgType { SYS_MSG_TYPE_UNDEFINED, @@ -91,7 +91,12 @@ static void init_sys_msg_dispatcher(void); #endif void erts_init_trace(void) { - erts_smp_mtx_init(&sys_trace_mtx, "sys_tracers"); + erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + + erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers"); + #ifdef HAVE_ERTS_NOW_CPU erts_cpu_timestamp = 0; #endif @@ -151,8 +156,8 @@ do { (RES) = (TPID); } while(0) #define ERTS_TRACER_REF_TYPE Process * #define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \ do { \ - (RES) = process_tab[internal_pid_index((TPID))]; \ - if (INVALID_PID((RES), (TPID)) || !((RES)->trace_flags & F_TRACER)) { \ + (RES) = erts_proc_lookup((TPID)); \ + if (!(RES) || !((RES)->trace_flags & F_TRACER)) { \ (TPID) = NIL; \ (TRACEE_FLGS) &= ~TRACEE_FLAGS; \ return; \ @@ -169,10 +174,10 @@ erts_system_profile_setup_active_schedulers(void) active_sched = erts_active_schedulers(); } -void -erts_trace_check_exiting(Eterm exiting) +static void +exiting_reset(Eterm exiting) { - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); if (exiting == default_tracer) { default_tracer = NIL; default_trace_flags &= TRACEE_FLAGS; @@ -202,29 +207,49 @@ erts_trace_check_exiting(Eterm exiting) erts_system_profile_clear(NULL); #endif } - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); +} + +void +erts_trace_check_exiting(Eterm exiting) +{ + int reset = 0; + erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + if (exiting == default_tracer) + reset = 1; + else if (exiting == system_seq_tracer) + reset = 1; + else if (exiting == system_monitor) + reset = 1; + else if (exiting == system_profile) + reset = 1; + erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + if (reset) + exiting_reset(exiting); +} + +static ERTS_INLINE int +is_valid_tracer(Eterm tracer) +{ + return erts_proc_lookup(tracer) || erts_is_valid_tracer_port(tracer); } Eterm erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, Eterm new) { - Eterm old = THE_NON_VALUE; + Eterm old; - if (new != am_false) { - if (!erts_pid2proc(c_p, c_p_locks, new, 0) - && !erts_is_valid_tracer_port(new)) { - return old; - } - } + if (new != am_false && !is_valid_tracer(new)) + return THE_NON_VALUE; - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); old = system_seq_tracer; system_seq_tracer = new; #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old); #endif - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); return old; } @@ -232,12 +257,12 @@ Eterm erts_get_system_seq_tracer(void) { Eterm st; - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rlock(&sys_trace_rwmtx); st = system_seq_tracer; #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "get seq tracer %T\n", st); #endif - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_runlock(&sys_trace_rwmtx); return st; } @@ -250,7 +275,7 @@ get_default_tracing(Uint *flagsp, Eterm *tracerp) if (is_nil(default_tracer)) { default_trace_flags &= ~TRACEE_FLAGS; } else if (is_internal_pid(default_tracer)) { - if (!erts_pid2proc(NULL, 0, default_tracer, 0)) { + if (!erts_proc_lookup(default_tracer)) { reset_tracer: default_trace_flags &= ~TRACEE_FLAGS; default_tracer = NIL; @@ -270,7 +295,7 @@ get_default_tracing(Uint *flagsp, Eterm *tracerp) void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp) { - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); if (flagsp) { if (setflags) default_trace_flags |= *flagsp; @@ -280,48 +305,48 @@ erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp) if (tracerp) default_tracer = *tracerp; get_default_tracing(flagsp, tracerp); - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp) { - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rlock(&sys_trace_rwmtx); get_default_tracing(flagsp, tracerp); - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_runlock(&sys_trace_rwmtx); } void erts_set_system_monitor(Eterm monitor) { - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); system_monitor = monitor; - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); } Eterm erts_get_system_monitor(void) { Eterm monitor; - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rlock(&sys_trace_rwmtx); monitor = system_monitor; - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_runlock(&sys_trace_rwmtx); return monitor; } /* Performance monitoring */ void erts_set_system_profile(Eterm profile) { - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); system_profile = profile; - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); } Eterm erts_get_system_profile(void) { Eterm profile; - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rlock(&sys_trace_rwmtx); profile = system_profile; - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_runlock(&sys_trace_rwmtx); return profile; } @@ -384,13 +409,9 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to, } #ifndef ERTS_SMP - if (!INVALID_TRACER_PORT(trace_port, trace_port->id)) { + if (!INVALID_TRACER_PORT(trace_port, trace_port->id)) #endif erts_raw_port_command(trace_port, buffer, ptr-buffer); -#ifndef ERTS_SMP - erts_port_release(trace_port); - } -#endif erts_free(ERTS_ALC_T_TMP, (void *) buffer); } @@ -465,13 +486,13 @@ send_to_port(Process *c_p, Eterm message, trace_port = NULL; #else - if (is_not_internal_port(*tracer_pid)) - goto invalid_tracer_port; - trace_port = &erts_port[internal_port_index(*tracer_pid)]; + trace_port = erts_id2port_sflgs(*tracer_pid, + NULL, + 0, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); - if (INVALID_TRACER_PORT(trace_port, *tracer_pid)) { - invalid_tracer_port: + if (!trace_port) { *tracee_flags &= ~TRACEE_FLAGS; *tracer_pid = NIL; return; @@ -491,6 +512,7 @@ send_to_port(Process *c_p, Eterm message, SYS_MSG_TYPE_TRACE, message); #ifndef ERTS_SMP + erts_port_release(trace_port); return; } @@ -537,6 +559,9 @@ send_to_port(Process *c_p, Eterm message, */ do_send_schedfix_to_port(trace_port, c_p->id, ts); } + + erts_port_release(trace_port); + UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE #endif @@ -566,23 +591,28 @@ profile_send(Eterm from, Eterm message) { Port *profiler_port = NULL; /* not smp */ - - - profiler_port = &erts_port[internal_port_index(profiler)]; - - do_send_to_port(profiler, - profiler_port, - NIL, /* or current process->id */ - SYS_MSG_TYPE_SYSPROF, - message); + + profiler_port = erts_id2port_sflgs(profiler, + NULL, + 0, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); + if (profiler_port) { + do_send_to_port(profiler, + profiler_port, + NIL, /* or current process->id */ + SYS_MSG_TYPE_SYSPROF, + message); + erts_port_release(profiler_port); + } } else { ASSERT(is_internal_pid(profiler) && internal_pid_index(profiler) < erts_max_processes); - profile_p = process_tab[internal_pid_index(profiler)]; + profile_p = erts_proc_lookup(profiler); - if (INVALID_PID(profile_p, profiler)) return; + if (!profile_p) + return; sz = size_object(message); hp = erts_alloc_message_heap(sz, &bp, &off_heap, profile_p, 0); @@ -626,13 +656,11 @@ seq_trace_send_to_port(Process *c_p, trace_port = NULL; #else - if (is_not_internal_port(seq_tracer)) - goto invalid_tracer_port; - - trace_port = &erts_port[internal_port_index(seq_tracer)]; - - if (INVALID_TRACER_PORT(trace_port, seq_tracer)) { - invalid_tracer_port: + trace_port = erts_id2port_sflgs(seq_tracer, + NULL, + 0, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); + if (!trace_port) { system_seq_tracer = am_false; #ifndef ERTS_SMP UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); @@ -650,6 +678,7 @@ seq_trace_send_to_port(Process *c_p, message); #ifndef ERTS_SMP + erts_port_release(trace_port); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); return; } @@ -691,6 +720,9 @@ seq_trace_send_to_port(Process *c_p, */ do_send_schedfix_to_port(trace_port, c_p->id, ts); } + + erts_port_release(trace_port); + UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE #endif @@ -789,13 +821,8 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched) ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags); } - if (ERTS_PROC_IS_EXITING(p) -#ifndef ERTS_SMP - || p->status == P_FREE -#endif - ) { + if (ERTS_PROC_IS_EXITING(p)) curr_func = 0; - } else { if (!p->current) p->current = find_function_from_pc(p->i); @@ -874,7 +901,7 @@ trace_send(Process *p, Eterm to, Eterm msg) operation = am_send; if (is_internal_pid(to)) { - if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, to, 0)) + if (!erts_proc_lookup(to)) goto send_to_non_existing_process; } else if(is_external_pid(to) @@ -1116,8 +1143,8 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type, #ifndef ERTS_SMP - tracer = process_tab[internal_pid_index(seq_tracer)]; - if (INVALID_PID(tracer, tracer->id)) { + tracer = erts_proc_lookup(seq_tracer); + if (!tracer) { system_seq_tracer = am_false; return; /* no need to send anything */ } @@ -2107,187 +2134,6 @@ void save_calls(Process *p, Export *e) } } -/* - * Entry point called by the trace wrap functions in erl_bif_wrap.c - * - * The trace wrap functions are themselves called through the export - * entries instead of the original BIF functions. - */ -Eterm -erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) -{ - Eterm result; - int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META); - - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); - - if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) { - /* Warning! This is an Optimization. - * - * If neither meta trace is active nor process trace flags then - * no tracing will occur. Doing the whole else branch will - * also do nothing, only slower. - */ - Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f; - result = func(p, args, I); - } else { - Eterm (*func)(Process*, Eterm*, BeamInstr*); - Export* ep = bif_export[bif_index]; - Uint32 flags = 0, flags_meta = 0; - int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL); - int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL); - int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME); - Eterm meta_tracer_pid = NIL; - int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif - * is actually in the - * export entry */ - BeamInstr *cp = p->cp; - - /* - * Make continuation pointer OK, it is not during direct BIF calls, - * but it is correct during apply of bif. - */ - if (!applying) { - p->cp = I; - } - if (global || local) { - flags = erts_call_trace(p, ep->code, ep->match_prog_set, args, - local, &p->tracer_proc); - } - if (meta) { - flags_meta = erts_bif_mtrace(p, ep->code+3, args, local, - &meta_tracer_pid); - } - if (time) { - BpDataTime *bdt = NULL; - BeamInstr *pc = (BeamInstr *)ep->code+3; - - bdt = (BpDataTime *) erts_get_time_break(p, pc); - ASSERT(bdt); - - if (!bdt->pause) { - erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL); - } - } - /* Restore original continuation pointer (if changed). */ - p->cp = cp; - - func = bif_table[bif_index].f; - - result = func(p, args, I); - - if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) { - BeamInstr i_return_trace = beam_return_trace[0]; - BeamInstr i_return_to_trace = beam_return_to_trace[0]; - BeamInstr i_return_time_trace = beam_return_time_trace[0]; - Eterm *cpp; - /* Maybe advance cp to skip trace stack frames */ - for (cpp = p->stop; ; cp = cp_val(*cpp++)) { - if (*cp == i_return_trace) { - /* Skip stack frame variables */ - while (is_not_CP(*cpp)) cpp++; - cpp += 2; /* Skip return_trace parameters */ - } else if (*cp == i_return_time_trace) { - /* Skip stack frame variables */ - while (is_not_CP(*cpp)) cpp++; - cpp += 1; /* Skip return_time_trace parameters */ - } else if (*cp == i_return_to_trace) { - /* A return_to trace message is going to be generated - * by normal means, so we do not have to. - */ - cp = NULL; - break; - } else break; - } - } - - /* Try to get these in the order - * they usually appear in normal code... */ - if (is_non_value(result)) { - Uint reason = p->freason; - if (reason != TRAP) { - Eterm class; - Eterm value = p->fvalue; - DeclareTmpHeapNoproc(nocatch,3); - UseTmpHeapNoproc(3); - /* Expand error value like in handle_error() */ - if (reason & EXF_ARGLIST) { - Eterm *tp; - ASSERT(is_tuple(value)); - tp = tuple_val(value); - value = tp[1]; - } - if ((reason & EXF_THROWN) && (p->catches <= 0)) { - value = TUPLE2(nocatch, am_nocatch, value); - reason = EXC_ERROR; - } - /* Note: expand_error_value() could theoretically - * allocate on the heap, but not for any error - * returned by a BIF, and it would do no harm, - * just be annoying. - */ - value = expand_error_value(p, reason, value); - class = exception_tag[GET_EXC_CLASS(reason)]; - - if (flags_meta & MATCH_SET_EXCEPTION_TRACE) { - erts_trace_exception(p, ep->code, class, value, - &meta_tracer_pid); - } - if (flags & MATCH_SET_EXCEPTION_TRACE) { - erts_trace_exception(p, ep->code, class, value, - &p->tracer_proc); - } - if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) { - /* can only happen if(local)*/ - Eterm *ptr = p->stop; - ASSERT(is_CP(*ptr)); - ASSERT(ptr <= STACK_START(p)); - /* Search the nearest stack frame for a catch */ - while (++ptr < STACK_START(p)) { - if (is_CP(*ptr)) break; - if (is_catch(*ptr)) { - if (applying) { - /* Apply of BIF, cp is in calling function */ - if (cp) erts_trace_return_to(p, cp); - } else { - /* Direct bif call, I points into - * calling function */ - erts_trace_return_to(p, I); - } - } - } - } - UnUseTmpHeapNoproc(3); - if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) { - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); - p->trace_flags |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); - } - } - } else { - if (flags_meta & MATCH_SET_RX_TRACE) { - erts_trace_return(p, ep->code, result, &meta_tracer_pid); - } - /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */ - if (flags & MATCH_SET_RX_TRACE) { - erts_trace_return(p, ep->code, result, &p->tracer_proc); - } - if (flags & MATCH_SET_RETURN_TO_TRACE) { - /* can only happen if(local)*/ - if (applying) { - /* Apply of BIF, cp is in calling function */ - if (cp) erts_trace_return_to(p, cp); - } else { - /* Direct bif call, I points into calling function */ - erts_trace_return_to(p, I); - } - } - } - } - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); - return result; -} - /* Sends trace message: * {trace_ts, Pid, What, Msg, Timestamp} * or {trace, Pid, What, Msg} @@ -2451,10 +2297,9 @@ monitor_long_gc(Process *p, Uint time) { #ifndef ERTS_SMP ASSERT(is_internal_pid(system_monitor) && internal_pid_index(system_monitor) < erts_max_processes); - monitor_p = process_tab[internal_pid_index(system_monitor)]; - if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) { + monitor_p = erts_proc_lookup(system_monitor); + if (!monitor_p || p == monitor_p) return; - } #endif hsz = 0; @@ -2527,8 +2372,8 @@ monitor_large_heap(Process *p) { #ifndef ERTS_SMP ASSERT(is_internal_pid(system_monitor) && internal_pid_index(system_monitor) < erts_max_processes); - monitor_p = process_tab[internal_pid_index(system_monitor)]; - if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) { + monitor_p = erts_proc_lookup(system_monitor); + if (monitor_p || p == monitor_p) { return; } #endif @@ -2582,10 +2427,9 @@ monitor_generic(Process *p, Eterm type, Eterm spec) { #ifndef ERTS_SMP ASSERT(is_internal_pid(system_monitor) && internal_pid_index(system_monitor) < erts_max_processes); - monitor_p = process_tab[internal_pid_index(system_monitor)]; - if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) { + monitor_p = erts_proc_lookup(system_monitor); + if (!monitor_p || p == monitor_p) return; - } #endif hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p); @@ -3149,10 +2993,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver) break; case SYS_MSG_TYPE_SEQTRACE: /* Reset seq_tracer if it hasn't changed */ - erts_smp_mtx_lock(&sys_trace_mtx); + erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); if (system_seq_tracer == receiver) system_seq_tracer = am_false; - erts_smp_mtx_unlock(&sys_trace_mtx); + erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); break; case SYS_MSG_TYPE_SYSMON: if (receiver == NIL @@ -3407,12 +3251,12 @@ sys_msg_dispatcher_func(void *unused) goto queue_proc_msg; } else if (is_internal_port(receiver)) { - port = erts_id2port(receiver, NULL, 0); - if (INVALID_TRACER_PORT(port, receiver)) { - if (port) - erts_port_release(port); + port = erts_id2port_sflgs(receiver, + NULL, + 0, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); + if (!port) goto failure; - } else { write_sys_msg_to_port(receiver, port, diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c index 26ac231ddb..51559aea1c 100644 --- a/erts/emulator/beam/erl_unicode.c +++ b/erts/emulator/beam/erl_unicode.c @@ -77,66 +77,25 @@ void erts_init_unicode(void) { max_loop_limit = CONTEXT_REDS * LOOP_FACTOR; /* Non visual BIFs to trap to. */ - memset(&characters_to_utf8_trap_exp, 0, sizeof(Export)); - characters_to_utf8_trap_exp.address = - &characters_to_utf8_trap_exp.code[3]; - characters_to_utf8_trap_exp.code[0] = am_erlang; - characters_to_utf8_trap_exp.code[1] = - am_atom_put("characters_to_utf8_trap",23); - characters_to_utf8_trap_exp.code[2] = 3; - characters_to_utf8_trap_exp.code[3] = - (BeamInstr) em_apply_bif; - characters_to_utf8_trap_exp.code[4] = - (BeamInstr) &characters_to_utf8_trap; - - memset(&characters_to_list_trap_1_exp, 0, sizeof(Export)); - characters_to_list_trap_1_exp.address = - &characters_to_list_trap_1_exp.code[3]; - characters_to_list_trap_1_exp.code[0] = am_erlang; - characters_to_list_trap_1_exp.code[1] = - am_atom_put("characters_to_list_trap_1",25); - characters_to_list_trap_1_exp.code[2] = 3; - characters_to_list_trap_1_exp.code[3] = - (BeamInstr) em_apply_bif; - characters_to_list_trap_1_exp.code[4] = - (BeamInstr) &characters_to_list_trap_1; - - memset(&characters_to_list_trap_2_exp, 0, sizeof(Export)); - characters_to_list_trap_2_exp.address = - &characters_to_list_trap_2_exp.code[3]; - characters_to_list_trap_2_exp.code[0] = am_erlang; - characters_to_list_trap_2_exp.code[1] = - am_atom_put("characters_to_list_trap_2",25); - characters_to_list_trap_2_exp.code[2] = 3; - characters_to_list_trap_2_exp.code[3] = - (BeamInstr) em_apply_bif; - characters_to_list_trap_2_exp.code[4] = - (BeamInstr) &characters_to_list_trap_2; - - - memset(&characters_to_list_trap_3_exp, 0, sizeof(Export)); - characters_to_list_trap_3_exp.address = - &characters_to_list_trap_3_exp.code[3]; - characters_to_list_trap_3_exp.code[0] = am_erlang; - characters_to_list_trap_3_exp.code[1] = - am_atom_put("characters_to_list_trap_3",25); - characters_to_list_trap_3_exp.code[2] = 3; - characters_to_list_trap_3_exp.code[3] = - (BeamInstr) em_apply_bif; - characters_to_list_trap_3_exp.code[4] = - (BeamInstr) &characters_to_list_trap_3; - - memset(&characters_to_list_trap_4_exp, 0, sizeof(Export)); - characters_to_list_trap_4_exp.address = - &characters_to_list_trap_4_exp.code[3]; - characters_to_list_trap_4_exp.code[0] = am_erlang; - characters_to_list_trap_4_exp.code[1] = - am_atom_put("characters_to_list_trap_4",25); - characters_to_list_trap_4_exp.code[2] = 1; - characters_to_list_trap_4_exp.code[3] = - (BeamInstr) em_apply_bif; - characters_to_list_trap_4_exp.code[4] = - (BeamInstr) &characters_to_list_trap_4; + erts_init_trap_export(&characters_to_utf8_trap_exp, + am_erlang, am_atom_put("characters_to_utf8_trap",23), 3, + &characters_to_utf8_trap); + + erts_init_trap_export(&characters_to_list_trap_1_exp, + am_erlang, am_atom_put("characters_to_list_trap_1",25), 3, + &characters_to_list_trap_1); + + erts_init_trap_export(&characters_to_list_trap_2_exp, + am_erlang, am_atom_put("characters_to_list_trap_2",25), 3, + &characters_to_list_trap_2); + + erts_init_trap_export(&characters_to_list_trap_3_exp, + am_erlang, am_atom_put("characters_to_list_trap_3",25), 3, + &characters_to_list_trap_3); + + erts_init_trap_export(&characters_to_list_trap_4_exp, + am_erlang, am_atom_put("characters_to_list_trap_4",25), 1, + &characters_to_list_trap_4); c_to_b_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_binary_int,2); c_to_l_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_list_int,2); diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d index bdfde58845..e3ebbb84f4 100644 --- a/erts/emulator/beam/erlang_dtrace.d +++ b/erts/emulator/beam/erlang_dtrace.d @@ -639,9 +639,9 @@ provider erlang { * Entry into the efile_drv.c file I/O driver * * For a list of command numbers used by this driver, see the section - * "Guide to probe arguments" in ../../../README.md. That section - * also contains explanation of the various integer and string - * arguments that may be present when any particular probe fires. + * "Guide to efile_drv.c probe arguments" in ../../../HOWTO/DTRACE.md. + * That section also contains explanation of the various integer and + * string arguments that may be present when any particular probe fires. * * NOTE: Not all Linux platforms (using SystemTap) can support * arguments beyond arg9. diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c index fb0ee99119..6b5121f917 100644 --- a/erts/emulator/beam/export.c +++ b/erts/emulator/beam/export.c @@ -32,98 +32,162 @@ #define EXPORT_HASH(m,f,a) ((m)*(f)+(a)) -static IndexTable export_table; /* Not locked. */ -static Hash secondary_export_table; /* Locked. */ +#ifdef DEBUG +# define IF_DEBUG(x) x +#else +# define IF_DEBUG(x) +#endif -#include "erl_smp.h" +static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */ -static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table. */ +static erts_smp_atomic_t total_entries_bytes; -#define export_read_lock() erts_smp_rwmtx_rlock(&export_table_lock) -#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock) -#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock) -#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock) +#include "erl_smp.h" + +/* This lock protects the staging export table from concurrent access + * AND it protects the staging table from becoming active. + */ +erts_smp_mtx_t export_staging_lock; extern BeamInstr* em_call_error_handler; extern BeamInstr* em_call_traced_function; +struct export_entry +{ + IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */ + Export* ep; +}; + +/* Helper struct that brings things together in one allocation +*/ +struct export_blob +{ + Export exp; + struct export_entry entryv[ERTS_NUM_CODE_IX]; + /* Note that entryv is not indexed by "code_ix". + */ +}; + +/* Helper struct only used as template +*/ +struct export_templ +{ + struct export_entry entry; + Export exp; +}; + +static struct export_blob* entry_to_blob(struct export_entry* ee) +{ + return (struct export_blob*) + ((char*)ee->ep - offsetof(struct export_blob,exp)); +} + void export_info(int to, void *to_arg) { #ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - export_read_lock(); + export_staging_lock(); #endif - index_info(to, to_arg, &export_table); - hash_info(to, to_arg, &secondary_export_table); + index_info(to, to_arg, &export_tables[erts_active_code_ix()]); + hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable); #ifdef ERTS_SMP if (lock) - export_read_unlock(); + export_staging_unlock(); #endif } static HashValue -export_hash(Export* x) +export_hash(struct export_entry* ee) { + Export* x = ee->ep; return EXPORT_HASH(x->code[0], x->code[1], x->code[2]); } static int -export_cmp(Export* tmpl, Export* obj) +export_cmp(struct export_entry* tmpl_e, struct export_entry* obj_e) { + Export* tmpl = tmpl_e->ep; + Export* obj = obj_e->ep; return !(tmpl->code[0] == obj->code[0] && tmpl->code[1] == obj->code[1] && tmpl->code[2] == obj->code[2]); } -static Export* -export_alloc(Export* tmpl) +static struct export_entry* +export_alloc(struct export_entry* tmpl_e) { - Export* obj = (Export*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(Export)); - - obj->fake_op_func_info_for_hipe[0] = 0; - obj->fake_op_func_info_for_hipe[1] = 0; - obj->code[0] = tmpl->code[0]; - obj->code[1] = tmpl->code[1]; - obj->code[2] = tmpl->code[2]; - obj->slot.index = -1; - obj->address = obj->code+3; - obj->code[3] = (BeamInstr) em_call_error_handler; - obj->code[4] = 0; - obj->match_prog_set = NULL; - return obj; + struct export_blob* blob; + unsigned ix; + + if (tmpl_e->slot.index == -1) { /* Template, allocate blob */ + Export* tmpl = tmpl_e->ep; + Export* obj; + + blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob)); + erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); + obj = &blob->exp; + obj->fake_op_func_info_for_hipe[0] = 0; + obj->fake_op_func_info_for_hipe[1] = 0; + obj->code[0] = tmpl->code[0]; + obj->code[1] = tmpl->code[1]; + obj->code[2] = tmpl->code[2]; + obj->code[3] = (BeamInstr) em_call_error_handler; + obj->code[4] = 0; + + for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) { + obj->addressv[ix] = obj->code+3; + + blob->entryv[ix].slot.index = -1; + blob->entryv[ix].ep = &blob->exp; + } + ix = 0; + } + else { /* Existing entry in another table, use free entry in blob */ + blob = entry_to_blob(tmpl_e); + for (ix = 0; blob->entryv[ix].slot.index >= 0; ix++) { + ASSERT(ix < ERTS_NUM_CODE_IX); + } + } + return &blob->entryv[ix]; } - -static void -export_free(Export* obj) +static void +export_free(struct export_entry* obj) { - erts_free(ERTS_ALC_T_EXPORT, (void*) obj); + struct export_blob* blob = entry_to_blob(obj); + int i; + obj->slot.index = -1; + for (i=0; i < ERTS_NUM_CODE_IX; i++) { + if (blob->entryv[i].slot.index >= 0) { + return; + } + } + erts_free(ERTS_ALC_T_EXPORT, blob); + erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); } - void init_export_table(void) { HashFunctions f; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + int i; - erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab"); + erts_smp_mtx_init(&export_staging_lock, "export_tab"); + erts_smp_atomic_init_nob(&total_entries_bytes, 0); f.hash = (H_FUN) export_hash; f.cmp = (HCMP_FUN) export_cmp; f.alloc = (HALLOC_FUN) export_alloc; f.free = (HFREE_FUN) export_free; - erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_table, "export_list", - EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f); - hash_init(ERTS_ALC_T_EXPORT_TABLE, &secondary_export_table, - "secondary_export_table", 50, f); + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_tables[i], "export_list", + EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f); + } } /* @@ -138,29 +202,42 @@ init_export_table(void) * called through such an export entry. * 3) This function is suitable for the implementation of erlang:apply/3. */ +extern Export* /* inline-helper */ +erts_find_export_entry(Eterm m, Eterm f, unsigned int a,ErtsCodeIndex code_ix); Export* -erts_find_export_entry(Eterm m, Eterm f, unsigned int a) +erts_find_export_entry(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix) { HashValue hval = EXPORT_HASH((BeamInstr) m, (BeamInstr) f, (BeamInstr) a); int ix; HashBucket* b; - ix = hval % export_table.htable.size; - b = export_table.htable.bucket[ix]; + ix = hval % export_tables[code_ix].htable.size; + b = export_tables[code_ix].htable.bucket[ix]; /* * Note: We have inlined the code from hash.c for speed. */ while (b != (HashBucket*) 0) { - Export* ep = (Export *) b; + Export* ep = ((struct export_entry*) b)->ep; if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) { - break; + return ep; } b = b->next; } - return (Export*)b; + return NULL; +} + +static struct export_entry* init_template(struct export_templ* templ, + Eterm m, Eterm f, unsigned a) +{ + templ->entry.ep = &templ->exp; + templ->entry.slot.index = -1; + templ->exp.code[0] = m; + templ->exp.code[1] = f; + templ->exp.code[2] = a; + return &templ->entry; } @@ -176,47 +253,42 @@ erts_find_export_entry(Eterm m, Eterm f, unsigned int a) */ Export* -erts_find_function(Eterm m, Eterm f, unsigned int a) +erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix) { - Export e; - Export* ep; - - e.code[0] = m; - e.code[1] = f; - e.code[2] = a; - - ep = hash_get(&export_table.htable, (void*) &e); - if (ep != NULL && ep->address == ep->code+3 && - ep->code[3] != (BeamInstr) em_call_traced_function) { - ep = NULL; + struct export_templ templ; + struct export_entry* ee; + + ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a)); + if (ee == NULL || + (ee->ep->addressv[code_ix] == ee->ep->code+3 && + ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) { + return NULL; } - return ep; + return ee->ep; } /* * Returns a pointer to an existing export entry for a MFA, * or creates a new one and returns the pointer. * - * This function provides unlocked write access to the main export - * table. It should only be used during start up or when - * all other threads are blocked. + * This function acts on the staging export table. It should only be used + * to load new code. */ Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity) { - Export e; - int ix; + ErtsCodeIndex code_ix = erts_staging_code_ix(); + struct export_templ templ; + struct export_entry* ee; - ERTS_SMP_LC_ASSERT(erts_initialized == 0 - || erts_smp_thr_progress_is_blocking()); ASSERT(is_atom(mod)); ASSERT(is_atom(func)); - e.code[0] = mod; - e.code[1] = func; - e.code[2] = arity; - ix = index_put(&export_table, (void*) &e); - return (Export*) erts_index_lookup(&export_table, ix); + export_staging_lock(); + ee = (struct export_entry*) index_put_entry(&export_tables[code_ix], + init_template(&templ, mod, func, arity)); + export_staging_unlock(); + return ee->ep; } /* @@ -224,77 +296,117 @@ erts_export_put(Eterm mod, Eterm func, unsigned int arity) * export entry (making a call through it will cause the error_handler to * be called). * - * Stub export entries will be placed in the secondary export table. - * erts_export_consolidate() will move all stub export entries into the - * main export table (will be done the next time code is loaded). + * Stub export entries will be placed in the loader export table. */ Export* erts_export_get_or_make_stub(Eterm mod, Eterm func, unsigned int arity) { - Export e; + ErtsCodeIndex code_ix; Export* ep; + IF_DEBUG(int retrying = 0;) ASSERT(is_atom(mod)); ASSERT(is_atom(func)); - - e.code[0] = mod; - e.code[1] = func; - e.code[2] = arity; - ep = erts_find_export_entry(mod, func, arity); - if (ep == 0) { - /* - * The code is not loaded (yet). Put the export in the secondary - * export table, to avoid having to lock the main export table. - */ - export_write_lock(); - ep = (Export *) hash_put(&secondary_export_table, (void*) &e); - export_write_unlock(); - } + + do { + code_ix = erts_active_code_ix(); + ep = erts_find_export_entry(mod, func, arity, code_ix); + if (ep == 0) { + /* + * The code is not loaded (yet). Put the export in the staging + * export table, to avoid having to lock the active export table. + */ + export_staging_lock(); + if (erts_active_code_ix() == code_ix) { + struct export_templ templ; + struct export_entry* entry; + + IndexTable* tab = &export_tables[erts_staging_code_ix()]; + init_template(&templ, mod, func, arity); + entry = (struct export_entry *) index_put_entry(tab, &templ.entry); + ep = entry->ep; + ASSERT(ep); + } + else { /* race */ + ASSERT(!retrying); + IF_DEBUG(retrying = 1); + } + export_staging_unlock(); + } + } while (!ep); return ep; } -/* - * To be called before loading code (with other threads blocked). - * This function will move all export entries from the secondary - * export table into the primary. - */ -void -erts_export_consolidate(void) +Export *export_list(int i, ErtsCodeIndex code_ix) { -#ifdef DEBUG - HashInfo hi; -#endif - - ERTS_SMP_LC_ASSERT(erts_initialized == 0 - || erts_smp_thr_progress_is_blocking()); - - export_write_lock(); - erts_index_merge(&secondary_export_table, &export_table); - erts_hash_merge(&secondary_export_table, &export_table.htable); - export_write_unlock(); -#ifdef DEBUG - hash_get_info(&hi, &export_table.htable); - ASSERT(export_table.entries == hi.objs); -#endif + return ((struct export_entry*) erts_index_lookup(&export_tables[code_ix], i))->ep; } -Export *export_list(int i) +int export_list_size(ErtsCodeIndex code_ix) { - return (Export*) erts_index_lookup(&export_table, i); + return export_tables[code_ix].entries; } -int export_list_size(void) +int export_table_sz(void) +{ + int i, bytes = 0; + + export_staging_lock(); + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + bytes += index_table_sz(&export_tables[i]); + } + export_staging_unlock(); + return bytes; +} +int export_entries_sz(void) { - return export_table.entries; + return erts_smp_atomic_read_nob(&total_entries_bytes); } +Export *export_get(Export *e) +{ + struct export_entry ee; + struct export_entry* entry; -int export_table_sz(void) + ee.ep = e; + entry = (struct export_entry*)hash_get(&export_tables[erts_active_code_ix()].htable, &ee); + return entry ? entry->ep : NULL; +} + +IF_DEBUG(static ErtsCodeIndex debug_start_load_ix = 0;) + + +void export_start_staging(void) { - return index_table_sz(&export_table); + ErtsCodeIndex dst_ix = erts_staging_code_ix(); + ErtsCodeIndex src_ix = erts_active_code_ix(); + IndexTable* dst = &export_tables[dst_ix]; + IndexTable* src = &export_tables[src_ix]; + struct export_entry* src_entry; + struct export_entry* dst_entry; + int i; + + ASSERT(dst_ix != src_ix); + ASSERT(debug_start_load_ix == -1); + + export_staging_lock(); + /* + * Insert all entries in src into dst table + */ + for (i = 0; i < src->entries; i++) { + src_entry = (struct export_entry*) erts_index_lookup(src, i); + src_entry->ep->addressv[dst_ix] = src_entry->ep->addressv[src_ix]; + dst_entry = (struct export_entry*) index_put_entry(dst, src_entry); + ASSERT(entry_to_blob(src_entry) == entry_to_blob(dst_entry)); + } + export_staging_unlock(); + + IF_DEBUG(debug_start_load_ix = dst_ix); } -Export *export_get(Export *e) +void export_end_staging(int commit) { - return hash_get(&export_table.htable, e); + ASSERT(debug_start_load_ix == erts_staging_code_ix()); + IF_DEBUG(debug_start_load_ix = -1); } + diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h index c604fdf7c3..ee06e69aff 100644 --- a/erts/emulator/beam/export.h +++ b/erts/emulator/beam/export.h @@ -28,14 +28,15 @@ #include "index.h" #endif +#include "code_ix.h" + /* ** Export entry */ + typedef struct export { - IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */ - void* address; /* Pointer to code for function. */ - struct binary* match_prog_set; /* Match program for tracing. */ + void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */ BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */ /* @@ -44,12 +45,12 @@ typedef struct export * code[2]: Arity (untagged integer). * code[3]: This entry is 0 unless the 'address' field points to it. * Threaded code instruction to load function - * (em_call_error_handler), execute BIF (em_apply_bif, - * em_apply_apply), or call a traced function - * (em_call_traced_function). - * code[4]: Function pointer to BIF function (for BIFs only) + * (em_call_error_handler), execute BIF (em_apply_bif), + * or a breakpoint instruction (op_i_generic_breakpoint). + * code[4]: Function pointer to BIF function (for BIFs only), * or pointer to threaded code if the module has an - * on_load function that has not been run yet. + * on_load function that has not been run yet, or pointer + * to code for function code[3] is a breakpont instruction. * Otherwise: 0. */ BeamInstr code[5]; @@ -59,21 +60,38 @@ typedef struct export void init_export_table(void); void export_info(int, void *); -Export* erts_find_export_entry(Eterm m, Eterm f, unsigned int a); +ERTS_GLB_INLINE Export* erts_active_export_entry(Eterm m, Eterm f, unsigned a); Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity); - Export* erts_export_get_or_make_stub(Eterm, Eterm, unsigned); -void erts_export_consolidate(void); -Export *export_list(int); -int export_list_size(void); +Export *export_list(int,ErtsCodeIndex); +int export_list_size(ErtsCodeIndex); int export_table_sz(void); +int export_entries_sz(void); Export *export_get(Export*); +void export_start_staging(void); +void export_end_staging(int commit); + +extern erts_smp_mtx_t export_staging_lock; +#define export_staging_lock() erts_smp_mtx_lock(&export_staging_lock) +#define export_staging_unlock() erts_smp_mtx_unlock(&export_staging_lock) #include "beam_load.h" /* For em_* extern declarations */ #define ExportIsBuiltIn(EntryPtr) \ -(((EntryPtr)->address == (EntryPtr)->code + 3) && \ +(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->code + 3) && \ ((EntryPtr)->code[3] == (BeamInstr) em_apply_bif)) -#endif +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE Export* +erts_active_export_entry(Eterm m, Eterm f, unsigned int a) +{ + extern Export* erts_find_export_entry(Eterm m, Eterm f, unsigned a, ErtsCodeIndex); + return erts_find_export_entry(m, f, a, erts_active_code_ix()); +} + +#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ + +#endif /* __EXPORT_H__ */ + diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index 52f45b924f..ab1065aaa1 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -1850,8 +1850,8 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags, } else { *ep++ = FLOAT_EXT; - /* now the sprintf which does the work */ - i = sys_double_to_chars(f.fd, (char*) ep); + /* now the erts_snprintf which does the work */ + i = sys_double_to_chars(f.fd, (char*) ep, (size_t)31); /* Don't leave garbage after the float! (Bad practice in general, * and Purify complains.) @@ -2564,7 +2564,7 @@ dec_term_atom_common: goto error; } if (edep && (edep->flags & ERTS_DIST_EXT_BTT_SAFE)) { - if (!erts_find_export_entry(mod, name, arity)) + if (!erts_active_export_entry(mod, name, arity)) goto error; } *objp = make_export(hp); diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index dbf95f5bd7..4c0d3421c8 100755 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -28,6 +28,7 @@ #include "hash.h" #include "index.h" #include "atom.h" +#include "code_ix.h" #include "export.h" #include "module.h" #include "register.h" @@ -143,8 +144,8 @@ typedef struct ErtsXPortsList_ ErtsXPortsList; struct port { ErtsPortTaskSched sched; ErtsPortTaskHandle timeout_task; -#ifdef ERTS_SMP erts_smp_atomic_t refc; +#ifdef ERTS_SMP erts_smp_mtx_t *lock; ErtsXPortsList *xports; erts_smp_atomic_t run_queue; @@ -197,6 +198,8 @@ erts_port_runq(Port *prt) #ifdef ERTS_SMP ErtsRunQueue *rq1, *rq2; rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + if (!rq1) + return NULL; while (1) { erts_smp_runq_lock(rq1); rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); @@ -204,6 +207,8 @@ erts_port_runq(Port *prt) return rq1; erts_smp_runq_unlock(rq1); rq1 = rq2; + if (!rq1) + return NULL; } #else return ERTS_RUNQ_IX(0); @@ -764,6 +769,8 @@ void erts_queue_monitor_message(Process *, Eterm, Eterm, Eterm); +void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a, + Eterm (*bif)(Process*,Eterm*)); void erts_init_bif(void); Eterm erl_send(Process *p, Eterm to, Eterm msg); @@ -786,24 +793,33 @@ typedef struct { Eterm* fname_ptr; /* Pointer to fname table */ } FunctionInfo; -struct LoaderState* erts_alloc_loader_state(void); -Eterm erts_prepare_loading(struct LoaderState*, Process *c_p, +Binary* erts_alloc_loader_state(void); +Eterm erts_module_for_prepared_code(Binary* magic); +Eterm erts_prepare_loading(Binary* loader_state, Process *c_p, Eterm group_leader, Eterm* modp, byte* code, Uint size); -Eterm erts_finish_loading(struct LoaderState* stp, Process* c_p, +Eterm erts_finish_loading(Binary* loader_state, Process* c_p, ErtsProcLocks c_p_locks, Eterm* modp); -Eterm erts_load_module(Process *c_p, ErtsProcLocks c_p_locks, - Eterm group_leader, Eterm* mod, byte* code, Uint size); +Eterm erts_preload_module(Process *c_p, ErtsProcLocks c_p_locks, + Eterm group_leader, Eterm* mod, byte* code, Uint size); void init_load(void); BeamInstr* find_function_from_pc(BeamInstr* pc); Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp, Eterm args, Eterm* mfa_p); -void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info); void erts_set_current_function(FunctionInfo* fi, BeamInstr* current); Eterm erts_module_info_0(Process* p, Eterm module); Eterm erts_module_info_1(Process* p, Eterm module, Eterm what); Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info); +/* beam_ranges.c */ +void erts_init_ranges(void); +void erts_start_staging_ranges(void); +void erts_end_staging_ranges(int commit); +void erts_update_ranges(BeamInstr* code, Uint size); +void erts_remove_from_ranges(BeamInstr* code); +UWord erts_ranges_sz(void); +void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info); + /* break.c */ void init_break_handler(void); void erts_set_ignore_break(void); @@ -983,6 +999,9 @@ Uint erts_port_ioq_size(Port *pp); void erts_stale_drv_select(Eterm, ErlDrvEvent, int, int); void erts_port_cleanup(Port *); void erts_fire_port_monitor(Port *prt, Eterm ref); + +Port *erts_get_heart_port(void); + #ifdef ERTS_SMP void erts_smp_xports_unlock(Port *); #endif @@ -1024,28 +1043,29 @@ erts_smp_port_state_unlock(Port *prt) ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt) { -#ifdef ERTS_SMP int res; ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0); erts_smp_atomic_inc_nob(&prt->refc); + +#ifdef ERTS_SMP res = erts_smp_mtx_trylock(prt->lock); if (res == EBUSY) { erts_smp_atomic_dec_nob(&prt->refc); } +#else + res = 0; +#endif return res; -#else /* !ERTS_SMP */ - return 0; -#endif } ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt) { -#ifdef ERTS_SMP ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0); erts_smp_atomic_inc_nob(&prt->refc); +#ifdef ERTS_SMP erts_smp_mtx_lock(prt->lock); #endif } @@ -1053,14 +1073,14 @@ erts_smp_port_lock(Port *prt) ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt) { -#ifdef ERTS_SMP erts_aint_t refc; +#ifdef ERTS_SMP erts_smp_mtx_unlock(prt->lock); +#endif refc = erts_smp_atomic_dec_read_nob(&prt->refc); ASSERT(refc >= 0); if (refc == 0) erts_port_cleanup(prt); -#endif } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ @@ -1123,12 +1143,12 @@ erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 sflgs erts_smp_port_state_unlock(prt); prt = NULL; } -#ifdef ERTS_SMP else { erts_smp_atomic_inc_nob(&prt->refc); erts_smp_port_state_unlock(prt); - if (no_proc_locks) +#ifdef ERTS_SMP + if (no_proc_locks) erts_smp_mtx_lock(prt->lock); else if (erts_smp_mtx_trylock(prt->lock) == EBUSY) { /* Unlock process locks, and acquire locks in lock order... */ @@ -1144,21 +1164,17 @@ erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 sflgs erts_smp_port_unlock(prt); /* Also decrements refc... */ prt = NULL; } - } #endif + } + return prt; } ERTS_GLB_INLINE void erts_port_release(Port *prt) { -#ifdef ERTS_SMP erts_smp_port_unlock(prt); -#else - if (prt->status & ERTS_PORT_SFLGS_DEAD) - erts_port_cleanup(prt); -#endif } ERTS_GLB_INLINE Port* @@ -1275,6 +1291,122 @@ void erl_drv_thr_init(void); /* utils.c */ +typedef struct { +#ifdef DEBUG + int smp_api; +#endif + union { + Uint64 not_atomic; +#ifdef ARCH_64 + erts_atomic_t atomic; +#else + erts_dw_atomic_t atomic; +#endif + } counter; +} erts_interval_t; + +void erts_interval_init(erts_interval_t *); +void erts_smp_interval_init(erts_interval_t *); +Uint64 erts_step_interval_nob(erts_interval_t *); +Uint64 erts_step_interval_relb(erts_interval_t *); +Uint64 erts_smp_step_interval_nob(erts_interval_t *); +Uint64 erts_smp_step_interval_relb(erts_interval_t *); +Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64); +Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64); +Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64); +Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64); +#ifdef ARCH_32 +ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *); +#endif +ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +#ifdef ARCH_32 + +ERTS_GLB_INLINE Uint64 +erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw) +{ +#ifdef ETHR_SU_DW_NAINT_T__ + return (Uint64) dw->dw_sint; +#else + Uint64 res; + res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]); + res <<= 32; + res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]); + return res; +#endif +} + +#endif + +ERTS_GLB_INLINE Uint64 +erts_current_interval_nob__(erts_interval_t *icp) +{ +#ifdef ARCH_64 + return (Uint64) erts_atomic_read_nob(&icp->counter.atomic); +#else + erts_dw_aint_t dw; + erts_dw_atomic_read_nob(&icp->counter.atomic, &dw); + return erts_interval_dw_aint_to_val__(&dw); +#endif +} + +ERTS_GLB_INLINE Uint64 +erts_current_interval_acqb__(erts_interval_t *icp) +{ +#ifdef ARCH_64 + return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic); +#else + erts_dw_aint_t dw; + erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw); + return erts_interval_dw_aint_to_val__(&dw); +#endif +} + +ERTS_GLB_INLINE Uint64 +erts_current_interval_nob(erts_interval_t *icp) +{ + ASSERT(!icp->smp_api); + return erts_current_interval_nob__(icp); +} + +ERTS_GLB_INLINE Uint64 +erts_current_interval_acqb(erts_interval_t *icp) +{ + ASSERT(!icp->smp_api); + return erts_current_interval_acqb__(icp); +} + +ERTS_GLB_INLINE Uint64 +erts_smp_current_interval_nob(erts_interval_t *icp) +{ + ASSERT(icp->smp_api); +#ifdef ERTS_SMP + return erts_current_interval_nob__(icp); +#else + return icp->counter.not_atomic; +#endif +} + +ERTS_GLB_INLINE Uint64 +erts_smp_current_interval_acqb(erts_interval_t *icp) +{ + ASSERT(icp->smp_api); +#ifdef ERTS_SMP + return erts_current_interval_acqb__(icp); +#else + return icp->counter.not_atomic; +#endif +} + +#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ + /* * To be used to silence unused result warnings, but do not abuse it. */ @@ -1282,9 +1414,10 @@ void erts_silence_warn_unused_result(long unused); void erts_cleanup_offheap(ErlOffHeap *offheap); -Uint erts_fit_in_bits(Uint); +int erts_fit_in_bits_int64(Sint64); +int erts_fit_in_bits_int32(Sint32); int list_length(Eterm); -Export* erts_find_function(Eterm, Eterm, unsigned int); +Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex); int erts_is_builtin(Eterm, Eterm, int); Uint32 make_broken_hash(Eterm); Uint32 block_hash(byte *, unsigned, Uint32); @@ -1556,17 +1689,19 @@ struct trace_pattern_flags { }; extern const struct trace_pattern_flags erts_trace_pattern_flags_off; extern int erts_call_time_breakpoint_tracing; -int erts_set_trace_pattern(Eterm* mfa, int specified, +int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified, Binary* match_prog_set, Binary *meta_match_prog_set, int on, struct trace_pattern_flags, - Eterm meta_tracer_pid); + Eterm meta_tracer_pid, int is_blocking); void erts_get_default_trace_pattern(int *trace_pattern_is_on, Binary **match_spec, Binary **meta_match_spec, struct trace_pattern_flags *trace_pattern_flags, Eterm *meta_tracer_pid); +int erts_is_default_trace_enabled(void); void erts_bif_trace_init(void); +int erts_finish_breakpointing(void); /* ** Call_trace uses this API for the parameter matching functions @@ -1612,20 +1747,19 @@ extern void erts_match_prog_foreach_offheap(Binary *b, breakpoint functions */ #define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */ #define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE) -/* - * Flag values when tracing bif - * Future note: flag field is 8 bits - */ -#define BIF_TRACE_AS_LOCAL (0x1) -#define BIF_TRACE_AS_GLOBAL (0x2) -#define BIF_TRACE_AS_META (0x4) -#define BIF_TRACE_AS_CALL_TIME (0x8) extern erts_driver_t vanilla_driver; extern erts_driver_t spawn_driver; extern erts_driver_t fd_driver; /* Should maybe be placed in erl_message.h, but then we get an include mess. */ +ERTS_GLB_INLINE Eterm * +erts_alloc_message_heap_state(Uint size, + ErlHeapFragment **bpp, + ErlOffHeap **ohpp, + Process *receiver, + ErtsProcLocks *receiver_locks, + erts_aint32_t *statep); ERTS_GLB_INLINE Eterm * erts_alloc_message_heap(Uint size, @@ -1648,16 +1782,22 @@ erts_alloc_message_heap(Uint size, */ ERTS_GLB_INLINE Eterm * -erts_alloc_message_heap(Uint size, - ErlHeapFragment **bpp, - ErlOffHeap **ohpp, - Process *receiver, - ErtsProcLocks *receiver_locks) +erts_alloc_message_heap_state(Uint size, + ErlHeapFragment **bpp, + ErlOffHeap **ohpp, + Process *receiver, + ErtsProcLocks *receiver_locks, + erts_aint32_t *statep) { Eterm *hp; + erts_aint32_t state; #ifdef ERTS_SMP int locked_main = 0; - ErtsProcLocks ulocks = *receiver_locks & ERTS_PROC_LOCKS_MSG_SEND; + state = erts_smp_atomic32_read_acqb(&receiver->state); + if (statep) + *statep = state; + if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) + goto allocate_in_mbuf; #endif if (size > (Uint) INT_MAX) @@ -1673,20 +1813,19 @@ erts_alloc_message_heap(Uint size, #ifdef ERTS_SMP try_allocate_on_heap: #endif - if (ERTS_PROC_IS_EXITING(receiver) + state = erts_smp_atomic32_read_nob(&receiver->state); + if (statep) + *statep = state; + if ((state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) || HEAP_LIMIT(receiver) - HEAP_TOP(receiver) <= size) { #ifdef ERTS_SMP - if (locked_main) - ulocks |= ERTS_PROC_LOCK_MAIN; + if (locked_main) { + *receiver_locks &= ~ERTS_PROC_LOCK_MAIN; + erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MAIN); + } #endif goto allocate_in_mbuf; } -#ifdef ERTS_SMP - if (ulocks) { - erts_smp_proc_unlock(receiver, ulocks); - *receiver_locks &= ~ulocks; - } -#endif hp = HEAP_TOP(receiver); HEAP_TOP(receiver) = hp + size; *bpp = NULL; @@ -1702,12 +1841,6 @@ erts_alloc_message_heap(Uint size, else { ErlHeapFragment *bp; allocate_in_mbuf: -#ifdef ERTS_SMP - if (ulocks) { - *receiver_locks &= ~ulocks; - erts_smp_proc_unlock(receiver, ulocks); - } -#endif bp = new_message_buffer(size); hp = bp->mem; *bpp = bp; @@ -1717,6 +1850,17 @@ erts_alloc_message_heap(Uint size, return hp; } +ERTS_GLB_INLINE Eterm * +erts_alloc_message_heap(Uint size, + ErlHeapFragment **bpp, + ErlOffHeap **ohpp, + Process *receiver, + ErtsProcLocks *receiver_locks) +{ + return erts_alloc_message_heap_state(size, bpp, ohpp, receiver, + receiver_locks, NULL); +} + #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ #if !HEAP_ON_C_STACK diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c index a4a3007f93..79c3ecf1b3 100644 --- a/erts/emulator/beam/index.c +++ b/erts/emulator/beam/index.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2009. All Rights Reserved. + * Copyright Ericsson AB 1996-2012. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -68,21 +68,22 @@ erts_index_init(ErtsAlcType_t type, IndexTable* t, char* name, return t; } -int -index_put(IndexTable* t, void* tmpl) +IndexSlot* +index_put_entry(IndexTable* t, void* tmpl) { int ix; IndexSlot* p = (IndexSlot*) hash_put(&t->htable, tmpl); if (p->index >= 0) { - return p->index; + return p; } ix = t->entries; if (ix >= t->size) { Uint sz; if (ix >= t->limit) { - erl_exit(1, "no more index entries in %s (max=%d)\n", + /* A core dump is unnecessary */ + erl_exit(ERTS_DUMP_EXIT, "no more index entries in %s (max=%d)\n", t->htable.name, t->limit); } sz = INDEX_PAGE_SIZE*sizeof(IndexSlot*); @@ -92,7 +93,7 @@ index_put(IndexTable* t, void* tmpl) t->entries++; p->index = ix; t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p; - return ix; + return p; } int index_get(IndexTable* t, void* tmpl) @@ -135,3 +136,18 @@ void erts_index_merge(Hash* src, IndexTable* dst) } } } + +void index_erase_latest_from(IndexTable* t, Uint from_ix) +{ + if(from_ix < (Uint)t->entries) { + int ix; + for (ix = from_ix; ix < t->entries; ix++) { + IndexSlot* obj = t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK]; + hash_erase(&t->htable, obj); + } + t->entries = from_ix; + } + else { + ASSERT(from_ix == t->entries); + } +} diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h index 4eb9b1f992..3afe48d007 100644 --- a/erts/emulator/beam/index.h +++ b/erts/emulator/beam/index.h @@ -55,12 +55,24 @@ void index_info(int, void *, IndexTable*); int index_table_sz(IndexTable *); int index_get(IndexTable*, void*); -int index_put(IndexTable*, void*); + +IndexSlot* index_put_entry(IndexTable*, void*); void erts_index_merge(Hash*, IndexTable*); +/* Erase all entries with index 'ix' and higher +*/ +void index_erase_latest_from(IndexTable*, Uint ix); + +ERTS_GLB_INLINE int index_put(IndexTable*, void*); ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable*, Uint); #if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE int index_put(IndexTable* t, void* tmpl) +{ + return index_put_entry(t, tmpl)->index; +} + ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable* t, Uint ix) { diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 35b194f927..60b9238d38 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -259,10 +259,8 @@ get_free_port(void) } } port->status = ERTS_PORT_SFLG_INITIALIZING; -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 0); + ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 0); erts_smp_atomic_set_nob(&port->refc, 2); /* Port alive + lock */ -#endif erts_smp_port_state_unlock(port); return num & port_num_mask; } @@ -340,10 +338,14 @@ port_cleanup(Port *prt) prt->drv_ptr = NULL; ASSERT(driver); -#ifdef ERTS_SMP - ASSERT(prt->status & ERTS_PORT_SFLG_FREE_SCHEDULED); - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&prt->refc) == 0); + ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&prt->refc) == 0); + + ASSERT(prt->status & ERTS_PORT_SFLG_PORT_DEBUG); + ASSERT(!(prt->status & ERTS_PORT_SFLG_FREE)); + prt->status = ERTS_PORT_SFLG_FREE; + +#ifdef ERTS_SMP port_specific = (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK); @@ -352,10 +354,6 @@ port_cleanup(Port *prt) prt->lock = NULL; - ASSERT(prt->status & ERTS_PORT_SFLG_PORT_DEBUG); - ASSERT(!(prt->status & ERTS_PORT_SFLG_FREE)); - prt->status = ERTS_PORT_SFLG_FREE; - erts_smp_port_state_unlock(prt); erts_smp_mtx_unlock(mtx); @@ -606,10 +604,8 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ /* Need to mark the port as free again */ erts_smp_port_state_lock(port); port->status = ERTS_PORT_SFLG_FREE; -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 2); + ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 2); erts_smp_atomic_set_nob(&port->refc, 0); -#endif erts_smp_port_state_unlock(port); return -3; } @@ -1132,7 +1128,7 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list) Uint size; int fpe_was_unmasked; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p) || ERTS_IS_CRASH_DUMPING); ERTS_SMP_CHK_NO_PROC_LOCKS; p->caller = caller_id; @@ -1330,7 +1326,7 @@ void init_io(void) erts_max_ports = ERTS_MAX_R9_PORTS; } - port_extra_shift = erts_fit_in_bits(erts_max_ports - 1); + port_extra_shift = erts_fit_in_bits_int32(erts_max_ports - 1); port_num_mask = (1 << ports_bits) - 1; erts_port_tab_index_mask = ~(~((Uint) 0) << port_extra_shift); @@ -1354,8 +1350,8 @@ void init_io(void) for (i = 0; i < erts_max_ports; i++) { erts_port_task_init_sched(&erts_port[i].sched); -#ifdef ERTS_SMP erts_smp_atomic_init_nob(&erts_port[i].refc, 0); +#ifdef ERTS_SMP erts_port[i].lock = NULL; erts_port[i].xports = NULL; erts_smp_spinlock_init_x(&erts_port[i].state_lck, @@ -1594,6 +1590,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res) { Process *rp; ErtsProcLocks rp_locks = 0; + int scheduler = erts_get_scheduler_id() != 0; ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -1601,7 +1598,9 @@ deliver_result(Eterm sender, Eterm pid, Eterm res) && is_internal_pid(pid) && internal_pid_index(pid) < erts_max_processes); - rp = erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC); + rp = (scheduler + ? erts_proc_lookup(pid) + : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC)); if (rp) { Eterm tuple; @@ -1619,7 +1618,9 @@ deliver_result(Eterm sender, Eterm pid, Eterm res) #endif ); erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); + if (!scheduler) + erts_smp_proc_dec_refc(rp); + } } @@ -1644,6 +1645,7 @@ static void deliver_read_message(Port* prt, Eterm to, ErlHeapFragment *bp; ErlOffHeap *ohp; ErtsProcLocks rp_locks = 0; + int scheduler = erts_get_scheduler_id() != 0; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -1658,7 +1660,10 @@ static void deliver_read_message(Port* prt, Eterm to, need += 2*len; } - rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC); + rp = (scheduler + ? erts_proc_lookup(to) + : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC)); + if (!rp) return; @@ -1711,8 +1716,10 @@ static void deliver_read_message(Port* prt, Eterm to, , NIL #endif ); - erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + if (!scheduler) + erts_smp_proc_dec_refc(rp); } /* @@ -1779,6 +1786,7 @@ deliver_vec_message(Port* prt, /* Port */ ErlHeapFragment *bp; ErlOffHeap *ohp; ErtsProcLocks rp_locks = 0; + int scheduler = erts_get_scheduler_id() != 0; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -1787,7 +1795,10 @@ deliver_vec_message(Port* prt, /* Port */ * Check arguments for validity. */ - rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC); + + rp = (scheduler + ? erts_proc_lookup(to) + : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC)); if (!rp) return; @@ -1869,7 +1880,8 @@ deliver_vec_message(Port* prt, /* Port */ #endif ); erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); + if (!scheduler) + erts_smp_proc_dec_refc(rp); } @@ -2277,9 +2289,8 @@ void erts_port_command(Process *proc, { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; - Process* rp = erts_pid2proc_opt(NULL, 0, - port->connected, rp_locks, - ERTS_P2P_FLG_SMP_INC_REFC); + Process* rp = erts_pid2proc(NULL, 0, + port->connected, rp_locks); if (rp) { (void) erts_send_exit_signal(NULL, port->id, @@ -2290,7 +2301,6 @@ void erts_port_command(Process *proc, NULL, 0); erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); } } @@ -2841,13 +2851,17 @@ void driver_report_exit(int ix, int status) ErlHeapFragment *bp = NULL; ErlOffHeap *ohp; ErtsProcLocks rp_locks = 0; + int scheduler = erts_get_scheduler_id() != 0; ERTS_SMP_CHK_NO_PROC_LOCKS; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); pid = prt->connected; ASSERT(is_internal_pid(pid)); - rp = erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC); + + rp = (scheduler + ? erts_proc_lookup(pid) + : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC)); if (!rp) return; @@ -2864,7 +2878,8 @@ void driver_report_exit(int ix, int status) ); erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); + if (!scheduler) + erts_smp_proc_dec_refc(rp); } @@ -2995,6 +3010,7 @@ driver_deliver_term(ErlDrvPort port, ErlOffHeap *ohp; ErtsProcLocks rp_locks = 0; struct b2t_states__ b2t; + int scheduler = 1; /* Silence erroneous warning... */ init_b2t_states(&b2t); @@ -3186,7 +3202,13 @@ driver_deliver_term(ErlDrvPort port, if (res <= 0) goto done; - rp = erts_pid2proc_opt(NULL, 0, to, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC); + /* + * Increase refc on proc if done from a non-scheduler thread. + */ + scheduler = erts_get_scheduler_id() != 0; + rp = (scheduler + ? erts_proc_lookup(to) + : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC)); if (!rp) { res = 0; goto done; @@ -3438,7 +3460,8 @@ driver_deliver_term(ErlDrvPort port, if (rp) { if (rp_locks) erts_smp_proc_unlock(rp, rp_locks); - erts_smp_proc_dec_refc(rp); + if (!scheduler) + erts_smp_proc_dec_refc(rp); } #endif cleanup_b2t_states(&b2t); @@ -5235,3 +5258,27 @@ erl_drv_getenv(char *key, char *value, size_t *value_size) { return erts_sys_getenv_raw(key, value, value_size); } + +/* get heart_port + * used by erl_crash_dump + * - uses the fact that heart_port is registered when starting heart + */ + +Port *erts_get_heart_port() { + + Port* port; + Uint ix; + + for(ix = 0; ix < erts_max_ports; ix++) { + port = &erts_port[ix]; + /* only examine undead or alive ports */ + if (port->status & ERTS_PORT_SFLGS_DEAD) + continue; + /* immediate atom compare */ + if (port->reg && port->reg->name == am_heart_port) { + return port; + } + } + + return NULL; +} diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c index b93b1ad09a..01856007ee 100644 --- a/erts/emulator/beam/module.c +++ b/erts/emulator/beam/module.c @@ -26,21 +26,32 @@ #include "global.h" #include "module.h" +#ifdef DEBUG +# define IF_DEBUG(x) x +#else +# define IF_DEBUG(x) +#endif + #define MODULE_SIZE 50 #define MODULE_LIMIT (64*1024) -static IndexTable module_table; +static IndexTable module_tables[ERTS_NUM_CODE_IX]; -/* - * SMP note: We don't need to look accesses to the module table because - * there is one only scheduler thread when we update it. +erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; + +static erts_smp_atomic_t tot_module_bytes; + +/* SMP note: Active module table lookup and current module instance can be + * read without any locks. Old module instances are protected by + * "the_old_code_rwlocks" as purging is done on active module table. + * Staging table is protected by the "code_ix lock". */ #include "erl_smp.h" void module_info(int to, void *to_arg) { - index_info(to, to_arg, &module_table); + index_info(to, to_arg, &module_tables[erts_active_code_ix()]); } @@ -59,45 +70,67 @@ static int module_cmp(Module* tmpl, Module* obj) static Module* module_alloc(Module* tmpl) { Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module)); + erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module)); obj->module = tmpl->module; - obj->code = 0; - obj->old_code = 0; - obj->code_length = 0; - obj->old_code_length = 0; + obj->curr.code = 0; + obj->old.code = 0; + obj->curr.code_length = 0; + obj->old.code_length = 0; obj->slot.index = -1; - obj->nif = NULL; - obj->old_nif = NULL; + obj->curr.nif = NULL; + obj->old.nif = NULL; + obj->curr.num_breakpoints = 0; + obj->old.num_breakpoints = 0; + obj->curr.num_traced_exports = 0; + obj->old.num_traced_exports = 0; return obj; } +static void module_free(Module* mod) +{ + erts_free(ERTS_ALC_T_MODULE, mod); + erts_smp_atomic_add_nob(&tot_module_bytes, -sizeof(Module)); +} void init_module_table(void) { HashFunctions f; + int i; f.hash = (H_FUN) module_hash; f.cmp = (HCMP_FUN) module_cmp; f.alloc = (HALLOC_FUN) module_alloc; - f.free = 0; + f.free = (HFREE_FUN) module_free; + + for (i = 0; i < ERTS_NUM_CODE_IX; i++) { + erts_index_init(ERTS_ALC_T_MODULE_TABLE, &module_tables[i], "module_code", + MODULE_SIZE, MODULE_LIMIT, f); + } - erts_index_init(ERTS_ALC_T_MODULE_TABLE, &module_table, "module_code", - MODULE_SIZE, MODULE_LIMIT, f); + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + erts_smp_rwmtx_init_x(&the_old_code_rwlocks[i], "old_code", make_small(i)); + } + erts_smp_atomic_init_nob(&tot_module_bytes, 0); } Module* -erts_get_module(Eterm mod) +erts_get_module(Eterm mod, ErtsCodeIndex code_ix) { Module e; int index; + IndexTable* mod_tab; ASSERT(is_atom(mod)); + + mod_tab = &module_tables[code_ix]; + e.module = atom_val(mod); - index = index_get(&module_table, (void*) &e); + index = index_get(mod_tab, (void*) &e); if (index == -1) { return NULL; } else { - return (Module*) erts_index_lookup(&module_table, index); + return (Module*) erts_index_lookup(mod_tab, index); } } @@ -105,27 +138,101 @@ Module* erts_put_module(Eterm mod) { Module e; - int index; + IndexTable* mod_tab; + int oldsz, newsz; + Module* res; ASSERT(is_atom(mod)); ERTS_SMP_LC_ASSERT(erts_initialized == 0 - || erts_smp_thr_progress_is_blocking()); + || erts_has_code_write_permission()); + + mod_tab = &module_tables[erts_staging_code_ix()]; e.module = atom_val(mod); - index = index_put(&module_table, (void*) &e); - return (Module*) erts_index_lookup(&module_table, index); + oldsz = index_table_sz(mod_tab); + res = (Module*) index_put_entry(mod_tab, (void*) &e); + newsz = index_table_sz(mod_tab); + erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + return res; } -Module *module_code(int i) +Module *module_code(int i, ErtsCodeIndex code_ix) { - return (Module*) erts_index_lookup(&module_table, i); + return (Module*) erts_index_lookup(&module_tables[code_ix], i); } -int module_code_size(void) +int module_code_size(ErtsCodeIndex code_ix) { - return module_table.entries; + return module_tables[code_ix].entries; } int module_table_sz(void) { - return index_table_sz(&module_table); + return erts_smp_atomic_read_nob(&tot_module_bytes); +} + +#ifdef DEBUG +static ErtsCodeIndex dbg_load_code_ix = 0; +#endif + +static int entries_at_start_staging = 0; + +void module_start_staging(void) +{ + IndexTable* src = &module_tables[erts_active_code_ix()]; + IndexTable* dst = &module_tables[erts_staging_code_ix()]; + Module* src_mod; + Module* dst_mod; + int i, oldsz, newsz; + + ASSERT(dbg_load_code_ix == -1); + ASSERT(dst->entries <= src->entries); + + /* + * Make sure our existing modules are up-to-date + */ + for (i = 0; i < dst->entries; i++) { + src_mod = (Module*) erts_index_lookup(src, i); + dst_mod = (Module*) erts_index_lookup(dst, i); + ASSERT(src_mod->module == dst_mod->module); + + dst_mod->curr = src_mod->curr; + dst_mod->old = src_mod->old; + } + + /* + * Copy all new modules from active table + */ + oldsz = index_table_sz(dst); + for (i = dst->entries; i < src->entries; i++) { + src_mod = (Module*) erts_index_lookup(src, i); + dst_mod = (Module*) index_put_entry(dst, src_mod); + ASSERT(dst_mod != src_mod); + + dst_mod->curr = src_mod->curr; + dst_mod->old = src_mod->old; + } + newsz = index_table_sz(dst); + erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + + entries_at_start_staging = dst->entries; + IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix()); +} + +void module_end_staging(int commit) +{ + ASSERT(dbg_load_code_ix == erts_staging_code_ix()); + + if (!commit) { /* abort */ + IndexTable* tab = &module_tables[erts_staging_code_ix()]; + int oldsz, newsz; + + ASSERT(entries_at_start_staging <= tab->entries); + oldsz = index_table_sz(tab); + index_erase_latest_from(tab, entries_at_start_staging); + newsz = index_table_sz(tab); + erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + } + + IF_DEBUG(dbg_load_code_ix = -1); } + diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h index 694e4ab72f..6e123a0ffe 100644 --- a/erts/emulator/beam/module.h +++ b/erts/emulator/beam/module.h @@ -24,28 +24,72 @@ #include "index.h" #endif +struct erl_module_instance { + BeamInstr* code; + int code_length; /* Length of loaded code in bytes. */ + unsigned catches; + struct erl_module_nif* nif; + int num_breakpoints; + int num_traced_exports; +}; typedef struct erl_module { IndexSlot slot; /* Must be located at top of struct! */ int module; /* Atom index for module (not tagged). */ - BeamInstr* code; - BeamInstr* old_code; - int code_length; /* Length of loaded code in bytes. */ - int old_code_length; /* Length of old loaded code in bytes */ - unsigned catches, old_catches; - struct erl_module_nif* nif; - struct erl_module_nif* old_nif; + struct erl_module_instance curr; + struct erl_module_instance old; /* protected by "old_code" rwlock */ } Module; -Module* erts_get_module(Eterm mod); +Module* erts_get_module(Eterm mod, ErtsCodeIndex code_ix); Module* erts_put_module(Eterm mod); void init_module_table(void); +void module_start_staging(void); +void module_end_staging(int commit); void module_info(int, void *); -Module *module_code(int); -int module_code_size(void); +Module *module_code(int, ErtsCodeIndex); +int module_code_size(ErtsCodeIndex); int module_table_sz(void); +ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex); +ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex); +ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex); +ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex); +#ifdef ERTS_ENABLE_LOCK_CHECK +int erts_is_old_code_rlocked(ErtsCodeIndex); +#endif + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +extern erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; + +ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex code_ix) +{ + erts_smp_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]); +} +ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex code_ix) +{ + erts_smp_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]); +} +ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex code_ix) +{ + erts_smp_rwmtx_rlock(&the_old_code_rwlocks[code_ix]); +} +ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex code_ix) +{ + erts_smp_rwmtx_runlock(&the_old_code_rwlocks[code_ix]); +} + +#ifdef ERTS_ENABLE_LOCK_CHECK +ERTS_GLB_INLINE int erts_is_old_code_rlocked(ErtsCodeIndex code_ix) +{ + return erts_smp_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]); +} #endif + +#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ + + +#endif /* !__MODULE_H__ */ diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index 9b168889dd..6764e88c81 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -62,11 +62,8 @@ label L i_func_info I a a I int_code_end -i_trace_breakpoint -i_mtrace_breakpoint +i_generic_breakpoint i_debug_breakpoint -i_count_breakpoint -i_time_breakpoint i_return_time_trace i_return_to_trace i_yield @@ -522,7 +519,6 @@ apply_bif call_nif call_error_handler error_action_code -call_traced_function return_trace # @@ -829,16 +825,20 @@ call_ext_only Ar=u==2 Bif=u$bif:erlang:load_nif/2 => allocate u Ar | i_call_ext # -# The apply/2 and apply/3 BIFs are instructions. +# apply/2 is an instruction, not a BIF. # call_ext u==2 u$func:erlang:apply/2 => i_apply_fun call_ext_last u==2 u$func:erlang:apply/2 D => i_apply_fun_last D call_ext_only u==2 u$func:erlang:apply/2 => i_apply_fun_only -call_ext u==3 u$func:erlang:apply/3 => i_apply -call_ext_last u==3 u$func:erlang:apply/3 D => i_apply_last D -call_ext_only u==3 u$func:erlang:apply/3 => i_apply_only +# +# The apply/3 BIF is an instruction. +# + +call_ext u==3 u$bif:erlang:apply/3 => i_apply +call_ext_last u==3 u$bif:erlang:apply/3 D => i_apply_last D +call_ext_only u==3 u$bif:erlang:apply/3 => i_apply_only # # The exit/1 and throw/1 BIFs never execute the instruction following them; diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c index 26d64887d0..c02872ef80 100644 --- a/erts/emulator/beam/register.c +++ b/erts/emulator/beam/register.c @@ -93,19 +93,14 @@ reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks) reg_write_lock(); } +#endif + static ERTS_INLINE int is_proc_alive(Process *p) { - int res; - erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id); - erts_pix_lock(pixlck); - res = !p->is_exiting; - erts_pix_unlock(pixlck); - return res; + return !ERTS_PROC_IS_EXITING(p); } -#endif - void register_info(int to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; @@ -389,8 +384,7 @@ erts_whereis_name(Process *c_p, } #else if (rp->p - && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) - || rp->p->status != P_EXITING)) + && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p))) *proc = rp->p; else *proc = NULL; @@ -402,7 +396,9 @@ erts_whereis_name(Process *c_p, if (!rp || !rp->pt) *port = NULL; else { -#ifdef ERTS_SMP +#ifndef ERTS_SMP + erts_smp_atomic_inc_nob(&rp->pt->refc); +#else if (pending_port == rp->pt) pending_port = NULL; else { @@ -509,9 +505,11 @@ int erts_unregister_name(Process *c_p, if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) { if (rp->pt) { if (port != rp->pt) { -#ifdef ERTS_SMP +#ifndef ERTS_SMP + erts_smp_atomic_inc_nob(&rp->pt->refc); +#else if (port) { - ERTS_SMP_LC_ASSERT(port != c_prt); + ASSERT(port != c_prt); erts_smp_port_unlock(port); port = NULL; } diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 2406c52f14..c5af5b9577 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -25,11 +25,6 @@ # define NO_FPE_SIGNALS #endif -/* xxxP __VXWORKS__ */ -#ifdef VXWORKS -#include <vxWorks.h> -#endif - #ifdef DISABLE_CHILD_WAITER_THREAD #undef ENABLE_CHILD_WAITER_THREAD #endif @@ -39,10 +34,10 @@ #define ENABLE_CHILD_WAITER_THREAD 1 #endif +#define ERTS_I64_LITERAL(X) X##LL + #if defined (__WIN32__) # include "erl_win_sys.h" -#elif defined (VXWORKS) -# include "erl_vxworks_sys.h" #else # include "erl_unix_sys.h" #ifndef UNIX @@ -91,14 +86,22 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType; # endif #endif -#ifdef __GNUC__ -# if __GNUC__ < 3 && (__GNUC__ != 2 || __GNUC_MINOR__ < 96) -# define ERTS_LIKELY(BOOL) (BOOL) -# define ERTS_UNLIKELY(BOOL) (BOOL) -# else -# define ERTS_LIKELY(BOOL) __builtin_expect((BOOL), !0) -# define ERTS_UNLIKELY(BOOL) __builtin_expect((BOOL), 0) -# endif +#if !defined(__GNUC__) +# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0 +#elif !defined(__GNUC_MINOR__) +# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ + ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) +#elif !defined(__GNUC_PATCHLEVEL__) +# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ + (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) +#else +# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ + (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) +#endif + +#if ERTS_AT_LEAST_GCC_VSN__(2, 96, 0) +# define ERTS_LIKELY(BOOL) __builtin_expect((BOOL), !0) +# define ERTS_UNLIKELY(BOOL) __builtin_expect((BOOL), 0) #else # define ERTS_LIKELY(BOOL) (BOOL) # define ERTS_UNLIKELY(BOOL) (BOOL) @@ -172,10 +175,16 @@ void erl_assert_error(char* expr, char* file, int line); # define const #endif -#ifdef VXWORKS -/* Replace VxWorks' printf with a real one that does fprintf(stdout, ...) */ -int real_printf(const char *fmt, ...); -# define printf real_printf +#undef __deprecated +#if ERTS_AT_LEAST_GCC_VSN__(3, 0, 0) +# define __deprecated __attribute__((deprecated)) +#else +# define __deprecated +#endif +#if ERTS_AT_LEAST_GCC_VSN__(3, 0, 4) +# define erts_align_attribute(SZ) __attribute__ ((aligned (SZ))) +#else +# define erts_align_attribute(SZ) #endif /* In VC++, noreturn is a declspec that has to be before the types, @@ -185,12 +194,6 @@ int real_printf(const char *fmt, ...); #if __GNUC__ # define __decl_noreturn # define __noreturn __attribute__((noreturn)) -# undef __deprecated -# if __GNUC__ >= 3 -# define __deprecated __attribute__((deprecated)) -# else -# define __deprecated -# endif #else # if defined(__WIN32__) && defined(_MSC_VER) # define __noreturn @@ -199,7 +202,6 @@ int real_printf(const char *fmt, ...); # define __noreturn # define __decl_noreturn # endif -# define __deprecated #endif /* @@ -471,38 +473,28 @@ static unsigned long zero_value = 0, one_value = 1; # define SET_NONBLOCKING(fd) ioctlsocket((fd), FIONBIO, &one_value) # else -# ifdef VXWORKS -# include <fcntl.h> /* xxxP added for O_WRONLY etc ... macro:s ... */ -# include <ioLib.h> -static const int zero_value = 0, one_value = 1; -# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, (int)&zero_value) -# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, (int)&one_value) -# define ERRNO_BLOCK EWOULDBLOCK - -# else -# ifdef NB_FIONBIO /* Old BSD */ -# include <sys/ioctl.h> +# ifdef NB_FIONBIO /* Old BSD */ +# include <sys/ioctl.h> static const int zero_value = 0, one_value = 1; -# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value) -# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value) -# define ERRNO_BLOCK EWOULDBLOCK -# else /* !NB_FIONBIO */ -# include <fcntl.h> -# ifdef NB_O_NDELAY /* Nothing needs this? */ -# define NB_FLAG O_NDELAY -# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */ -# define ERRNO_BLOCK EWOULDBLOCK -# endif -# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */ -# define NB_FLAG O_NONBLOCK -# define ERRNO_BLOCK EAGAIN -# endif /* !NB_O_NDELAY */ -# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \ - fcntl((fd), F_GETFL, 0) & ~NB_FLAG) -# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \ - fcntl((fd), F_GETFL, 0) | NB_FLAG) -# endif /* !NB_FIONBIO */ -# endif /* _WXWORKS_ */ +# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value) +# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value) +# define ERRNO_BLOCK EWOULDBLOCK +# else /* !NB_FIONBIO */ +# include <fcntl.h> +# ifdef NB_O_NDELAY /* Nothing needs this? */ +# define NB_FLAG O_NDELAY +# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */ +# define ERRNO_BLOCK EWOULDBLOCK +# endif +# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */ +# define NB_FLAG O_NONBLOCK +# define ERRNO_BLOCK EAGAIN +# endif /* !NB_O_NDELAY */ +# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \ + fcntl((fd), F_GETFL, 0) & ~NB_FLAG) +# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \ + fcntl((fd), F_GETFL, 0) | NB_FLAG) +# endif /* !NB_FIONBIO */ # endif /* !__WIN32__ */ #endif /* WANT_NONBLOCKING */ @@ -626,7 +618,7 @@ typedef struct { #define ERTS_SYS_DDLL_ERROR_INIT {NULL} extern void erts_sys_ddll_free_error(ErtsSysDdllError*); extern void erl_sys_ddll_init(void); /* to initialize mutexes etc */ -extern int erts_sys_ddll_open2(char *path, void **handle, ErtsSysDdllError*); +extern int erts_sys_ddll_open2(const char *path, void **handle, ErtsSysDdllError*); #define erts_sys_ddll_open(P,H) erts_sys_ddll_open2(P,H,NULL) extern int erts_sys_ddll_open_noext(char *path, void **handle, ErtsSysDdllError*); extern int erts_sys_ddll_load_driver_init(void *handle, void **function); @@ -635,7 +627,7 @@ extern int erts_sys_ddll_close2(void *handle, ErtsSysDdllError*); #define erts_sys_ddll_close(H) erts_sys_ddll_close2(H,NULL) extern void *erts_sys_ddll_call_init(void *function); extern void *erts_sys_ddll_call_nif_init(void *function); -extern int erts_sys_ddll_sym2(void *handle, char *name, void **function, ErtsSysDdllError*); +extern int erts_sys_ddll_sym2(void *handle, const char *name, void **function, ErtsSysDdllError*); #define erts_sys_ddll_sym(H,N,F) erts_sys_ddll_sym2(H,N,F,NULL) extern char *erts_sys_ddll_error(int code); @@ -652,7 +644,7 @@ void erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec); void erts_sys_main_thread(void); #endif -extern void erts_sys_prepare_crash_dump(void); +extern void erts_sys_prepare_crash_dump(int secs); extern void erts_sys_pre_init(void); extern void erl_sys_init(void); extern void erl_sys_args(int *argc, char **argv); @@ -699,8 +691,8 @@ void fini_getenv_state(GETENV_STATE *); /* xxxP */ void init_sys_float(void); int sys_chars_to_double(char*, double*); -int sys_double_to_chars(double, char*); -void sys_get_pid(char *); +int sys_double_to_chars(double, char*, size_t); +void sys_get_pid(char *, size_t); /* erts_sys_putenv() returns, 0 on success and a value != 0 on failure. */ int erts_sys_putenv(char *key, char *value); @@ -858,13 +850,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val) extern int erts_use_kernel_poll; #endif -#if defined(VXWORKS) -/* NOTE! sys_calloc2 does not exist on other - platforms than VxWorks and OSE */ -void* sys_calloc2(Uint, Uint); -#endif /* VXWORKS || OSE */ - - #define sys_memcpy(s1,s2,n) memcpy(s1,s2,n) #define sys_memmove(s1,s2,n) memmove(s1,s2,n) #define sys_memcmp(s1,s2,n) memcmp(s1,s2,n) @@ -971,43 +956,6 @@ void erl_bin_write(unsigned char *, int, int); # define DEBUGF(x) #endif - -#ifdef VXWORKS -/* This includes redefines of malloc etc - this should be done after sys_alloc, etc, above */ -# include "reclaim.h" -/*********************Malloc and friends************************ - * There is a problem with the naming of malloc and friends, - * malloc is used throughout sys.c and the resolver to mean save_alloc, - * but it should actually mean either sys_alloc or sys_alloc2, - * so the definitions from reclaim_master.h are not any - * good, i redefine the malloc family here, although it's quite - * ugly, actually it would be preferrable to use the - * names sys_alloc and so on throughout the offending code, but - * that will be saved as an later exercise... - * I also add an own calloc, to make the BSD resolver source happy. - ***************************************************************/ -/* Undefine malloc and friends */ -# ifdef malloc -# undef malloc -# endif -# ifdef calloc -# undef calloc -# endif -# ifdef realloc -# undef realloc -# endif -# ifdef free -# undef free -# endif -/* Redefine malloc and friends */ -# define malloc sys_alloc -# define calloc sys_calloc -# define realloc sys_realloc -# define free sys_free - -#endif - #ifdef __WIN32__ #ifdef ARCH_64 #define ERTS_ALLOC_ALIGN_BYTES 16 @@ -1023,23 +971,20 @@ void erl_bin_write(unsigned char *, int, int); #ifdef __WIN32__ - void call_break_handler(void); char* last_error(void); char* win32_errorstr(int); - - #endif /************************************************************************ * Find out the native filename encoding of the process (look at locale of * Unix processes and just do UTF16 on windows ************************************************************************/ -#define ERL_FILENAME_UNKNOWN 0 -#define ERL_FILENAME_LATIN1 1 -#define ERL_FILENAME_UTF8 2 -#define ERL_FILENAME_UTF8_MAC 3 -#define ERL_FILENAME_WIN_WCHAR 4 +#define ERL_FILENAME_UNKNOWN (0) +#define ERL_FILENAME_LATIN1 (1) +#define ERL_FILENAME_UTF8 (2) +#define ERL_FILENAME_UTF8_MAC (3) +#define ERL_FILENAME_WIN_WCHAR (4) int erts_get_native_filename_encoding(void); /* The set function is only to be used by erl_init! */ @@ -1049,4 +994,3 @@ int erts_get_user_requested_filename_encoding(void); void erts_init_sys_common_misc(void); #endif - diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index bd708ceee6..1969fc762c 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -268,16 +268,42 @@ list_length(Eterm list) return i; } -Uint erts_fit_in_bits(Uint n) +static const struct { + Sint64 mask; + int bits; +} fib_data[] = {{ERTS_I64_LITERAL(0x2), 1}, + {ERTS_I64_LITERAL(0xc), 2}, + {ERTS_I64_LITERAL(0xf0), 4}, + {ERTS_I64_LITERAL(0xff00), 8}, + {ERTS_I64_LITERAL(0xffff0000), 16}, + {ERTS_I64_LITERAL(0xffffffff00000000), 32}}; + +static ERTS_INLINE int +fit_in_bits(Sint64 value, int start) { - Uint i; + int bits = 0; + int i; - i = 0; - while (n > 0) { - i++; - n >>= 1; - } - return i; + for (i = start; i >= 0; i--) { + if (value & fib_data[i].mask) { + value >>= fib_data[i].bits; + bits |= fib_data[i].bits; + } + } + + bits++; + + return bits; +} + +int erts_fit_in_bits_int64(Sint64 value) +{ + return fit_in_bits(value, 5); +} + +int erts_fit_in_bits_int32(Sint32 value) +{ + return fit_in_bits((Sint64) (Uint32) value, 4); } int @@ -1640,12 +1666,20 @@ static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len) } #ifndef ERTS_SMP - if ( #ifdef USE_THREADS - !erts_get_scheduler_data() || /* Must be scheduler thread */ + p = NULL; + if (erts_get_scheduler_data()) /* Must be scheduler thread */ #endif - (p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0)) == NULL - || p->status == P_RUNNING) { + { + p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0); + if (p) { + erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + if (state & ERTS_PSFLG_RUNNING) + p = NULL; + } + } + + if (!p) { /* buf *always* points to a null terminated string */ erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n", tag, buf); @@ -3240,7 +3274,7 @@ ptimer_timeout(ErtsSmpPTimer *ptimer) ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS, ERTS_P2P_FLG_ALLOW_OTHER_X); if (p) { - if (!p->is_exiting + if (!ERTS_PROC_IS_EXITING(p) && !(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) { ASSERT(*ptimer->timer.timer_ref == ptimer); *ptimer->timer.timer_ref = NULL; @@ -3436,6 +3470,254 @@ void erts_silence_warn_unused_result(long unused) } +/* + * Interval counts + */ +void +erts_interval_init(erts_interval_t *icp) +{ +#ifdef ARCH_64 + erts_atomic_init_nob(&icp->counter.atomic, 0); +#else + erts_dw_aint_t dw; +#ifdef ETHR_SU_DW_NAINT_T__ + dw.dw_sint = 0; +#else + dw.sint[ERTS_DW_AINT_HIGH_WORD] = 0; + dw.sint[ERTS_DW_AINT_LOW_WORD] = 0; +#endif + erts_dw_atomic_init_nob(&icp->counter.atomic, &dw); + +#endif +#ifdef DEBUG + icp->smp_api = 0; +#endif +} + +void +erts_smp_interval_init(erts_interval_t *icp) +{ +#ifdef ERTS_SMP + erts_interval_init(icp); +#else + icp->counter.not_atomic = 0; +#endif +#ifdef DEBUG + icp->smp_api = 1; +#endif +} + +static ERTS_INLINE Uint64 +step_interval_nob(erts_interval_t *icp) +{ +#ifdef ARCH_64 + return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic); +#else + erts_dw_aint_t exp; + + erts_dw_atomic_read_nob(&icp->counter.atomic, &exp); + while (1) { + erts_dw_aint_t new = exp; + +#ifdef ETHR_SU_DW_NAINT_T__ + new.dw_sint++; +#else + new.sint[ERTS_DW_AINT_LOW_WORD]++; + if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0) + new.sint[ERTS_DW_AINT_HIGH_WORD]++; +#endif + + if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp)) + return erts_interval_dw_aint_to_val__(&new); + + } +#endif +} + +static ERTS_INLINE Uint64 +step_interval_relb(erts_interval_t *icp) +{ +#ifdef ARCH_64 + return (Uint64) erts_atomic_inc_read_relb(&icp->counter.atomic); +#else + erts_dw_aint_t exp; + + erts_dw_atomic_read_nob(&icp->counter.atomic, &exp); + while (1) { + erts_dw_aint_t new = exp; + +#ifdef ETHR_SU_DW_NAINT_T__ + new.dw_sint++; +#else + new.sint[ERTS_DW_AINT_LOW_WORD]++; + if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0) + new.sint[ERTS_DW_AINT_HIGH_WORD]++; +#endif + + if (erts_dw_atomic_cmpxchg_relb(&icp->counter.atomic, &new, &exp)) + return erts_interval_dw_aint_to_val__(&new); + + } +#endif +} + + +static ERTS_INLINE Uint64 +ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) +{ + Uint64 curr_ic; +#ifdef ARCH_64 + curr_ic = (Uint64) erts_atomic_read_nob(&icp->counter.atomic); + if (curr_ic > ic) + return curr_ic; + return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic); +#else + erts_dw_aint_t exp; + + erts_dw_atomic_read_nob(&icp->counter.atomic, &exp); + curr_ic = erts_interval_dw_aint_to_val__(&exp); + if (curr_ic > ic) + return curr_ic; + + while (1) { + erts_dw_aint_t new = exp; + +#ifdef ETHR_SU_DW_NAINT_T__ + new.dw_sint++; +#else + new.sint[ERTS_DW_AINT_LOW_WORD]++; + if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0) + new.sint[ERTS_DW_AINT_HIGH_WORD]++; +#endif + + if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp)) + return erts_interval_dw_aint_to_val__(&new); + + curr_ic = erts_interval_dw_aint_to_val__(&exp); + if (curr_ic > ic) + return curr_ic; + } +#endif +} + + +static ERTS_INLINE Uint64 +ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) +{ + Uint64 curr_ic; +#ifdef ARCH_64 + curr_ic = (Uint64) erts_atomic_read_acqb(&icp->counter.atomic); + if (curr_ic > ic) + return curr_ic; + return (Uint64) erts_atomic_inc_read_acqb(&icp->counter.atomic); +#else + erts_dw_aint_t exp; + + erts_dw_atomic_read_acqb(&icp->counter.atomic, &exp); + curr_ic = erts_interval_dw_aint_to_val__(&exp); + if (curr_ic > ic) + return curr_ic; + + while (1) { + erts_dw_aint_t new = exp; + +#ifdef ETHR_SU_DW_NAINT_T__ + new.dw_sint++; +#else + new.sint[ERTS_DW_AINT_LOW_WORD]++; + if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0) + new.sint[ERTS_DW_AINT_HIGH_WORD]++; +#endif + + if (erts_dw_atomic_cmpxchg_acqb(&icp->counter.atomic, &new, &exp)) + return erts_interval_dw_aint_to_val__(&new); + + curr_ic = erts_interval_dw_aint_to_val__(&exp); + if (curr_ic > ic) + return curr_ic; + } +#endif +} + +Uint64 +erts_step_interval_nob(erts_interval_t *icp) +{ + ASSERT(!icp->smp_api); + return step_interval_nob(icp); +} + +Uint64 +erts_step_interval_relb(erts_interval_t *icp) +{ + ASSERT(!icp->smp_api); + return step_interval_relb(icp); +} + +Uint64 +erts_smp_step_interval_nob(erts_interval_t *icp) +{ + ASSERT(icp->smp_api); +#ifdef ERTS_SMP + return step_interval_nob(icp); +#else + return ++icp->counter.not_atomic; +#endif +} + +Uint64 +erts_smp_step_interval_relb(erts_interval_t *icp) +{ + ASSERT(icp->smp_api); +#ifdef ERTS_SMP + return step_interval_relb(icp); +#else + return ++icp->counter.not_atomic; +#endif +} + +Uint64 +erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) +{ + ASSERT(!icp->smp_api); + return ensure_later_interval_nob(icp, ic); +} + +Uint64 +erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) +{ + ASSERT(!icp->smp_api); + return ensure_later_interval_acqb(icp, ic); +} + +Uint64 +erts_smp_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) +{ + ASSERT(icp->smp_api); +#ifdef ERTS_SMP + return ensure_later_interval_nob(icp, ic); +#else + if (icp->counter.not_atomic > ic) + return icp->counter.not_atomic; + else + return ++icp->counter.not_atomic; +#endif +} + +Uint64 +erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) +{ + ASSERT(icp->smp_api); +#ifdef ERTS_SMP + return ensure_later_interval_acqb(icp, ic); +#else + if (icp->counter.not_atomic > ic) + return icp->counter.not_atomic; + else + return ++icp->counter.not_atomic; +#endif +} + + #ifdef DEBUG /* * Handy functions when using a debugger - don't use in the code! @@ -3468,7 +3750,7 @@ Process *p; void ppi(Eterm pid) { - pp(erts_pid2proc_unlocked(pid)); + pp(erts_proc_lookup(pid)); } void td(Eterm x) diff --git a/erts/emulator/drivers/common/gzio.c b/erts/emulator/drivers/common/gzio.c index a9303d55bc..ca6d25adb4 100644 --- a/erts/emulator/drivers/common/gzio.c +++ b/erts/emulator/drivers/common/gzio.c @@ -21,11 +21,6 @@ #include "erl_driver.h" #include "sys.h" -#ifdef VXWORKS -/* pull in FOPEN from zutil.h instead */ -#undef F_OPEN -#endif - #ifdef __WIN32__ #ifndef HAVE_CONFLICTING_FREAD_DECLARATION #define HAVE_CONFLICTING_FREAD_DECLARATION diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 8f4fff0f40..3210ffa92a 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -284,27 +284,15 @@ static unsigned long one_value = 1; #else -#ifdef VXWORKS -#include <sockLib.h> -#include <sys/times.h> -#include <iosLib.h> -#include <taskLib.h> -#include <selectLib.h> -#include <ioLib.h> -#else #include <sys/time.h> #ifdef NETDB_H_NEEDS_IN_H #include <netinet/in.h> #endif #include <netdb.h> -#endif #include <sys/socket.h> #include <netinet/in.h> -#ifdef VXWORKS -#include <rpc/rpctypes.h> -#endif #ifdef DEF_INADDR_LOOPBACK_IN_RPC_TYPES_H #include <rpc/types.h> #endif @@ -312,12 +300,10 @@ static unsigned long one_value = 1; #include <netinet/tcp.h> #include <arpa/inet.h> -#if (!defined(VXWORKS)) #include <sys/param.h> #ifdef HAVE_ARPA_NAMESER_H #include <arpa/nameser.h> #endif -#endif #ifdef HAVE_SYS_SOCKIO_H #include <sys/sockio.h> @@ -331,7 +317,7 @@ static unsigned long one_value = 1; /* SCTP support -- currently for UNIX platforms only: */ #undef HAVE_SCTP -#if (!defined(VXWORKS) && !defined(__WIN32__) && defined(HAVE_SCTP_H)) +#if (!defined(__WIN32__) && defined(HAVE_SCTP_H)) #include <netinet/sctp.h> @@ -478,15 +464,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define sock_connect(s, addr, len) connect((s), (addr), (len)) #define sock_listen(s, b) listen((s), (b)) #define sock_bind(s, addr, len) bind((s), (addr), (len)) -#ifdef VXWORKS -#define sock_getopt(s,t,n,v,l) wrap_sockopt(&getsockopt,\ - s,t,n,v,(unsigned int)(l)) -#define sock_setopt(s,t,n,v,l) wrap_sockopt(&setsockopt,\ - s,t,n,v,(unsigned int)(l)) -#else #define sock_getopt(s,t,n,v,l) getsockopt((s),(t),(n),(v),(l)) #define sock_setopt(s,t,n,v,l) setsockopt((s),(t),(n),(v),(l)) -#endif #define sock_name(s, addr, len) getsockname((s), (addr), (len)) #define sock_peer(s, addr, len) getpeername((s), (addr), (len)) #define sock_ntohs(x) ntohs((x)) @@ -535,6 +514,12 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #endif /* __WIN32__ */ +#ifdef HAVE_SOCKLEN_T +# define SOCKLEN_T socklen_t +#else +# define SOCKLEN_T int +#endif + #include "packet_parser.h" #define get_int24(s) ((((unsigned char*) (s))[0] << 16) | \ @@ -675,6 +660,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define UDP_OPT_MULTICAST_LOOP 13 /* set/get IP multicast loopback */ #define UDP_OPT_ADD_MEMBERSHIP 14 /* add an IP group membership */ #define UDP_OPT_DROP_MEMBERSHIP 15 /* drop an IP group membership */ +#define INET_OPT_IPV6_V6ONLY 16 /* IPv6 only socket, no mapped v4 addrs */ /* LOPT is local options */ #define INET_LOPT_BUFFER 20 /* min buffer size hint */ #define INET_LOPT_HEADER 21 /* list header size */ @@ -685,7 +671,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define INET_LOPT_EXITONCLOSE 26 /* exit port on active close or not ! */ #define INET_LOPT_TCP_HIWTRMRK 27 /* set local high watermark */ #define INET_LOPT_TCP_LOWTRMRK 28 /* set local low watermark */ -#define INET_LOPT_BIT8 29 /* set 8 bit detection */ + /* 29 unused */ #define INET_LOPT_TCP_SEND_TIMEOUT 30 /* set send timeout */ #define INET_LOPT_TCP_DELAY_SEND 31 /* Delay sends until next poll */ #define INET_LOPT_PACKET_SIZE 32 /* Max packet size */ @@ -720,12 +706,6 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define INET_IFOPT_FLAGS 6 #define INET_IFOPT_HWADDR 7 -/* INET_LOPT_BIT8 options */ -#define INET_BIT8_CLEAR 0 -#define INET_BIT8_SET 1 -#define INET_BIT8_ON 2 -#define INET_BIT8_OFF 3 - /* INET_REQ_GETSTAT enumeration */ #define INET_STAT_RECV_CNT 1 #define INET_STAT_RECV_MAX 2 @@ -921,7 +901,6 @@ typedef struct { int mode; /* BINARY | LIST (affect how to interpret hsz) */ int exitf; /* exit port on close or not */ - int bit8f; /* check if data has bit number 7 set */ int deliver; /* Delivery mode, TERM or PORT */ ErlDrvTermData caller; /* recipient of sync reply */ @@ -940,8 +919,6 @@ typedef struct { int sfamily; /* address family */ enum PacketParseType htype; /* header type (TCP only?) */ unsigned int psize; /* max packet size (TCP only?) */ - int bit8; /* set if bit8f==true and data some data - seen had the 7th bit set */ inet_address remote; /* remote address for connected sockets */ inet_address peer_addr; /* fake peer address */ inet_address name_addr; /* fake local address */ @@ -1191,6 +1168,7 @@ static ErlDrvTermData am_reuseaddr; static ErlDrvTermData am_dontroute; static ErlDrvTermData am_priority; static ErlDrvTermData am_tos; +static ErlDrvTermData am_ipv6_v6only; #endif /* speical errors for bad ports and sequences */ @@ -3363,17 +3341,6 @@ static int packet_error_message(udp_descriptor* udesc, int err) } -/* scan buffer for bit 7 */ -static void scanbit8(inet_descriptor* desc, const char* buf, int len) -{ - int c; - - if (!desc->bit8f || desc->bit8) return; - c = 0; - while(len--) c |= *buf++; - desc->bit8 = ((c & 0x80) != 0); -} - /* ** active=TRUE: ** (NOTE! distribution MUST use active=TRUE, deliver=PORT) @@ -3391,8 +3358,6 @@ static int tcp_reply_data(tcp_descriptor* desc, char* buf, int len) packet_get_body(desc->inet.htype, &body, &bodylen); - scanbit8(INETP(desc), body, bodylen); - if (desc->inet.deliver == INET_DELIVER_PORT) { code = inet_port_data(INETP(desc), body, bodylen); } @@ -3424,8 +3389,6 @@ tcp_reply_binary_data(tcp_descriptor* desc, ErlDrvBinary* bin, int offs, int len packet_get_body(desc->inet.htype, &body, &bodylen); offs = body - bin->orig_bytes; /* body offset now */ - scanbit8(INETP(desc), body, bodylen); - if (desc->inet.deliver == INET_DELIVER_PORT) code = inet_port_binary_data(INETP(desc), bin, offs, bodylen); else if ((code=packet_parse(desc->inet.htype, buf, len, &desc->http_state, @@ -3451,8 +3414,6 @@ packet_reply_binary_data(inet_descriptor* desc, unsigned int hsz, { int code; - scanbit8(desc, bin->orig_bytes+offs, len); - if (desc->active == INET_PASSIVE) /* "inet" is actually for both UDP and SCTP, as well as TCP! */ return inet_async_binary_data(desc, hsz, bin, offs, len, extra); @@ -3527,6 +3488,7 @@ static void inet_init_sctp(void) { INIT_ATOM(dontroute); INIT_ATOM(priority); INIT_ATOM(tos); + INIT_ATOM(ipv6_v6only); /* Option names */ INIT_ATOM(sctp_rtoinfo); @@ -5313,50 +5275,6 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p, #endif - - -#ifdef VXWORKS -/* -** THIS is a terrible creature, a bug in the TCP part -** of the old VxWorks stack (non SENS) created a race. -** If (and only if?) a socket got closed from the other -** end and we tried a set/getsockopt on the TCP level, -** the task would generate a bus error... -*/ -static STATUS wrap_sockopt(STATUS (*function)() /* Yep, no parameter - check */, - int s, int level, int optname, - char *optval, unsigned int optlen - /* optlen is a pointer if function - is getsockopt... */) -{ - fd_set rs; - struct timeval timeout; - int to_read; - int ret; - - FD_ZERO(&rs); - FD_SET(s,&rs); - memset(&timeout,0,sizeof(timeout)); - if (level == IPPROTO_TCP) { - taskLock(); - if (select(s+1,&rs,NULL,NULL,&timeout)) { - if (ioctl(s,FIONREAD,(int)&to_read) == ERROR || - to_read == 0) { /* End of file, other end closed? */ - sock_errno() = EBADF; - taskUnlock(); - return ERROR; - } - } - ret = (*function)(s,level,optname,optval,optlen); - taskUnlock(); - } else { - ret = (*function)(s,level,optname,optval,optlen); - } - return ret; -} -#endif - /* Per H @ Tail-f: The original code here had problems that possibly only occur if you abuse it for non-INET sockets, but anyway: a) If the getsockopt for SO_PRIORITY or IP_TOS failed, the actual @@ -5382,13 +5300,8 @@ static int setopt_prio_tos_trick int res; int res_prio; int res_tos; -#ifdef HAVE_SOCKLEN_T - socklen_t -#else - int -#endif - tmp_arg_sz_prio = sizeof(tmp_ival_prio), - tmp_arg_sz_tos = sizeof(tmp_ival_tos); + SOCKLEN_T tmp_arg_sz_prio = sizeof(tmp_ival_prio); + SOCKLEN_T tmp_arg_sz_tos = sizeof(tmp_ival_tos); res_prio = sock_getopt(fd, SOL_SOCKET, SO_PRIORITY, (char *) &tmp_ival_prio, &tmp_arg_sz_prio); @@ -5532,29 +5445,6 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) desc->exitf = ival; continue; - case INET_LOPT_BIT8: - DEBUGF(("inet_set_opts(%ld): s=%d, BIT8=%d\r\n", - (long)desc->port, desc->s, ival)); - switch(ival) { - case INET_BIT8_ON: - desc->bit8f = 1; - desc->bit8 = 0; - break; - case INET_BIT8_OFF: - desc->bit8f = 0; - desc->bit8 = 0; - break; - case INET_BIT8_CLEAR: - desc->bit8f = 1; - desc->bit8 = 0; - break; - case INET_BIT8_SET: - desc->bit8f = 1; - desc->bit8 = 1; - break; - } - continue; - case INET_LOPT_TCP_HIWTRMRK: if (desc->stype == SOCK_STREAM) { tcp_descriptor* tdesc = (tcp_descriptor*) desc; @@ -5636,23 +5526,11 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) case INET_OPT_SNDBUF: type = SO_SNDBUF; DEBUGF(("inet_set_opts(%ld): s=%d, SO_SNDBUF=%d\r\n", (long)desc->port, desc->s, ival)); - /* - * Setting buffer sizes in VxWorks gives unexpected results - * our workaround is to leave it at default. - */ -#ifdef VXWORKS - goto skip_os_setopt; -#else break; -#endif case INET_OPT_RCVBUF: type = SO_RCVBUF; DEBUGF(("inet_set_opts(%ld): s=%d, SO_RCVBUF=%d\r\n", (long)desc->port, desc->s, ival)); -#ifdef VXWORKS - goto skip_os_setopt; -#else break; -#endif case INET_OPT_LINGER: type = SO_LINGER; if (len < 4) return -1; @@ -5743,6 +5621,23 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) #endif /* HAVE_MULTICAST_SUPPORT */ + case INET_OPT_IPV6_V6ONLY: +#if HAVE_DECL_IPV6_V6ONLY + proto = IPPROTO_IPV6; + type = IPV6_V6ONLY; + propagate = 1; + DEBUGF(("inet_set_opts(%ld): s=%d, IPV6_V6ONLY=%d\r\n", + (long)desc->port, desc->s, ival)); + break; +#elif defined(__WIN32__) && defined(HAVE_IN6) && defined(AF_INET6) + /* Fake a'la OpenBSD; set to 'true' is fine but 'false' invalid. */ + if (ival != 0) continue; + else return -1; + break; +#else + continue; +#endif + case INET_OPT_RAW: if (len < 8) { return -1; @@ -5774,9 +5669,6 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) } DEBUGF(("inet_set_opts(%ld): s=%d returned %d\r\n", (long)desc->port, desc->s, res)); -#ifdef VXWORKS -skip_os_setopt: -#endif if (type == SO_RCVBUF) { /* make sure we have desc->bufsz >= SO_RCVBUF */ if (ival > desc->bufsz) @@ -6075,6 +5967,22 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len) continue; /* Option not supported -- ignore it */ # endif + case INET_OPT_IPV6_V6ONLY: +# if HAVE_DECL_IPV6_V6ONLY + { + arg.ival= get_int32 (curr); curr += 4; + proto = IPPROTO_IPV6; + type = IPV6_V6ONLY; + arg_ptr = (char*) (&arg.ival); + arg_sz = sizeof ( arg.ival); + break; + } +# elif defined(__WIN32__) && defined(HAVE_IN6) && defined(AF_INET6) +# error Here is a fix for Win IPv6 SCTP missing +# else + continue; /* Option not supported -- ignore it */ +# endif + case SCTP_OPT_AUTOCLOSE: { arg.ival= get_int32 (curr); curr += 4; @@ -6437,15 +6345,6 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc, put_int32(desc->exitf, ptr); continue; - case INET_LOPT_BIT8: - *ptr++ = opt; - if (desc->bit8f) { - put_int32(desc->bit8, ptr); - } else { - put_int32(INET_BIT8_OFF, ptr); - } - continue; - case INET_LOPT_TCP_HIWTRMRK: if (desc->stype == SOCK_STREAM) { *ptr++ = opt; @@ -6572,6 +6471,22 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc, break; #endif /* HAVE_MULTICAST_SUPPORT */ + case INET_OPT_IPV6_V6ONLY: +#if HAVE_DECL_IPV6_V6ONLY + proto = IPPROTO_IPV6; + type = IPV6_V6ONLY; + break; +#elif defined(__WIN32__) && defined(HAVE_IN6) && defined(AF_INET6) + /* Fake reading 'true' */ + *ptr++ = opt; + put_int32(1, ptr); + ptr += 4; + continue; +#else + TRUNCATE_TO(0,ptr); + continue; /* skip - no result */ +#endif + case INET_OPT_RAW: { int data_provided; @@ -6876,6 +6791,7 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc, case INET_OPT_DONTROUTE: case INET_OPT_PRIORITY : case INET_OPT_TOS : + case INET_OPT_IPV6_V6ONLY: case SCTP_OPT_AUTOCLOSE: case SCTP_OPT_MAXSEG : /* The following options return true or false: */ @@ -6948,6 +6864,20 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc, continue; # endif } + case INET_OPT_IPV6_V6ONLY: +# if HAVE_DECL_IPV6_V6ONLY + { + proto = IPPROTO_IPV6; + type = IPV6_V6ONLY; + tag = am_ipv6_v6only; + break; + } +# elif defined(__WIN32__) && defined(HAVE_IN6) && defined(AF_INET6) +# error Here is a fix for Win IPv6 SCTP needed +# else + /* Not supported -- ignore */ + continue; +# endif case SCTP_OPT_AUTOCLOSE: { proto = IPPROTO_SCTP; @@ -7504,8 +7434,6 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol) desc->mode = INET_MODE_LIST; /* list mode */ desc->exitf = 1; /* exit port when close on active socket */ - desc->bit8f = 0; - desc->bit8 = 0; desc->deliver = INET_DELIVER_TERM; /* standard term format */ desc->active = INET_PASSIVE; /* start passive */ desc->oph = NULL; @@ -7813,8 +7741,8 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, desc->state = INET_STATE_BOUND; if ((port = inet_address_port(&local)) == 0) { - len = sizeof(local); - sock_name(desc->s, (struct sockaddr*) &local, (unsigned int*)&len); + SOCKLEN_T adrlen = sizeof(local); + sock_name(desc->s, &local.sa, &adrlen); port = inet_address_port(&local); } port = sock_ntohs(port); @@ -7849,8 +7777,6 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize); } -#ifndef VXWORKS - case INET_REQ_GETSERVBYNAME: { /* L1 Name-String L2 Proto-String */ char namebuf[256]; char protobuf[256]; @@ -7901,8 +7827,6 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, return ctl_reply(INET_REP_OK, srv->s_name, len, rbuf, rsize); } -#endif /* !VXWORKS */ - default: return ctl_xerror(EXBADPORT, rbuf, rsize); } @@ -8133,7 +8057,6 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s, /* Some flags must be inherited at this point */ copy_desc->inet.mode = desc->inet.mode; copy_desc->inet.exitf = desc->inet.exitf; - copy_desc->inet.bit8f = desc->inet.bit8f; copy_desc->inet.deliver = desc->inet.deliver; copy_desc->inet.htype = desc->inet.htype; copy_desc->inet.psize = desc->inet.psize; @@ -9838,7 +9761,6 @@ static udp_descriptor* sctp_inet_copy(udp_descriptor* desc, SOCKET s, int* err) /* Some flags must be inherited at this point */ copy_desc->inet.mode = desc->inet.mode; copy_desc->inet.exitf = desc->inet.exitf; - copy_desc->inet.bit8f = desc->inet.bit8f; copy_desc->inet.deliver = desc->inet.deliver; copy_desc->inet.htype = desc->inet.htype; copy_desc->inet.psize = desc->inet.psize; @@ -10240,6 +10162,7 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, } new_udesc->inet.state = INET_STATE_CONNECTED; new_udesc->inet.stype = SOCK_STREAM; + SET_NONBLOCKING(new_udesc->inet.s); inet_reply_ok_port(desc, new_udesc->inet.dport); (*rbuf)[0] = INET_REP; diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c index b250bac4dc..cf7af71b92 100644 --- a/erts/emulator/drivers/unix/unix_efile.c +++ b/erts/emulator/drivers/unix/unix_efile.c @@ -45,23 +45,6 @@ #include <fcntl.h> #endif /* DARWIN */ -#ifdef VXWORKS -#include <ioLib.h> -#include <dosFsLib.h> -#include <nfsLib.h> -#include <sys/stat.h> -/* -** Not nice to include usrLib.h as MANY normal variable names get reported -** as shadowing globals, like 'i' for example. -** Instead we declare the only function we use here -*/ -/* - * #include <usrLib.h> - */ -extern STATUS copy(char *, char *); -#include <errno.h> -#endif - #ifdef SUNOS4 # define getcwd(buf, size) getwd(buf) #endif @@ -93,276 +76,27 @@ extern STATUS copy(char *, char *); #define DIR_MODE 0777 #endif -#ifdef VXWORKS /* Currently only used on vxworks */ - -#define EF_ALLOC(S) driver_alloc((S)) -#define EF_REALLOC(P, S) driver_realloc((P), (S)) -#define EF_SAFE_ALLOC(S) ef_safe_alloc((S)) -#define EF_SAFE_REALLOC(P, S) ef_safe_realloc((P), (S)) -#define EF_FREE(P) do { if((P)) driver_free((P)); } while(0) - -void erl_exit(int n, char *fmt, ...); - -static void *ef_safe_alloc(Uint s) -{ - void *p = EF_ALLOC(s); - if (!p) erl_exit(1, - "unix efile drv: Can't allocate %lu bytes of memory\n", - (unsigned long)s); - return p; -} - -#if 0 /* Currently not used */ - -static void *ef_safe_realloc(void *op, Uint s) -{ - void *p = EF_REALLOC(op, s); - if (!p) erl_exit(1, - "unix efile drv: Can't reallocate %lu bytes of memory\n", - (unsigned long)s); - return p; -} - -#endif /* #if 0 */ -#endif /* #ifdef VXWORKS */ - #define IS_DOT_OR_DOTDOT(s) \ (s[0] == '.' && (s[1] == '\0' || (s[1] == '.' && s[2] == '\0'))) -#ifdef VXWORKS -static int vxworks_to_posix(int vx_errno); -#endif - -/* -** VxWorks (not) strikes again. Too long RESULTING paths -** may give the infamous bus error. Have to check ALL -** filenames and pathnames. No wonder the emulator is slow on -** these cards... -*/ -#ifdef VXWORKS -#define CHECK_PATHLEN(Name, ErrInfo) \ - if (path_size(Name) > PATH_MAX) { \ - errno = ENAMETOOLONG; \ - return check_error(-1, ErrInfo); \ - } -#else -#define CHECK_PATHLEN(X,Y) /* Nothing */ -#endif - static int check_error(int result, Efile_error* errInfo); static int check_error(int result, Efile_error *errInfo) { if (result < 0) { -#ifdef VXWORKS - errInfo->posix_errno = errInfo->os_errno = vxworks_to_posix(errno); -#else errInfo->posix_errno = errInfo->os_errno = errno; -#endif return 0; } return 1; } -#ifdef VXWORKS - -/* - * VxWorks has different error codes for different file systems. - * We map those to POSIX ones. - */ -static int -vxworks_to_posix(int vx_errno) -{ - DEBUGF(("[vxworks_to_posix] vx_errno: %08x\n", vx_errno)); - switch (vx_errno) { - /* dosFsLib mapping */ -#ifdef S_dosFsLib_FILE_ALREADY_EXISTS - case S_dosFsLib_FILE_ALREADY_EXISTS: return EEXIST; -#else - case S_dosFsLib_FILE_EXISTS: return EEXIST; -#endif -#ifdef S_dosFsLib_BAD_DISK - case S_dosFsLib_BAD_DISK: return EIO; -#endif -#ifdef S_dosFsLib_CANT_CHANGE_ROOT - case S_dosFsLib_CANT_CHANGE_ROOT: return EINVAL; -#endif -#ifdef S_dosFsLib_NO_BLOCK_DEVICE - case S_dosFsLib_NO_BLOCK_DEVICE: return ENOTBLK; -#endif -#ifdef S_dosFsLib_BAD_SEEK - case S_dosFsLib_BAD_SEEK: return ESPIPE; -#endif - case S_dosFsLib_VOLUME_NOT_AVAILABLE: return ENXIO; - case S_dosFsLib_DISK_FULL: return ENOSPC; - case S_dosFsLib_FILE_NOT_FOUND: return ENOENT; - case S_dosFsLib_NO_FREE_FILE_DESCRIPTORS: return ENFILE; - case S_dosFsLib_INVALID_NUMBER_OF_BYTES: return EINVAL; - case S_dosFsLib_ILLEGAL_NAME: return EINVAL; - case S_dosFsLib_CANT_DEL_ROOT: return EACCES; - case S_dosFsLib_NOT_FILE: return EISDIR; - case S_dosFsLib_NOT_DIRECTORY: return ENOTDIR; - case S_dosFsLib_NOT_SAME_VOLUME: return EXDEV; - case S_dosFsLib_READ_ONLY: return EACCES; - case S_dosFsLib_ROOT_DIR_FULL: return ENOSPC; - case S_dosFsLib_DIR_NOT_EMPTY: return EEXIST; - case S_dosFsLib_NO_LABEL: return ENXIO; - case S_dosFsLib_INVALID_PARAMETER: return EINVAL; - case S_dosFsLib_NO_CONTIG_SPACE: return ENOSPC; - case S_dosFsLib_FD_OBSOLETE: return EBADF; - case S_dosFsLib_DELETED: return EINVAL; - case S_dosFsLib_INTERNAL_ERROR: return EIO; - case S_dosFsLib_WRITE_ONLY: return EACCES; - /* nfsLib mapping - is needed since Windriver has used */ - /* inconsistent error codes (errno.h/nfsLib.h). */ - case S_nfsLib_NFS_OK: return 0; - case S_nfsLib_NFSERR_PERM: return EPERM; - case S_nfsLib_NFSERR_NOENT: return ENOENT; - case S_nfsLib_NFSERR_IO: return EIO; - case S_nfsLib_NFSERR_NXIO: return ENXIO; -#ifdef S_nfsLib_NFSERR_ACCES - case S_nfsLib_NFSERR_ACCES: return EACCES; -#else - case S_nfsLib_NFSERR_ACCESS: return EACCES; -#endif - case S_nfsLib_NFSERR_EXIST: return EEXIST; - case S_nfsLib_NFSERR_NODEV: return ENODEV; - case S_nfsLib_NFSERR_NOTDIR: return ENOTDIR; - case S_nfsLib_NFSERR_ISDIR: return EISDIR; - case S_nfsLib_NFSERR_FBIG: return EFBIG; - case S_nfsLib_NFSERR_NOSPC: return ENOSPC; - case S_nfsLib_NFSERR_ROFS: return EROFS; - case S_nfsLib_NFSERR_NAMETOOLONG: return ENAMETOOLONG; - case S_nfsLib_NFSERR_NOTEMPTY: return EEXIST; - case S_nfsLib_NFSERR_DQUOT: return ENOSPC; - case S_nfsLib_NFSERR_STALE: return EINVAL; - case S_nfsLib_NFSERR_WFLUSH: return ENXIO; - /* And sometimes (...) the error codes are from ioLib (as in the */ - /* case of the (for nfsLib) unimplemented rename function) */ - case S_ioLib_DISK_NOT_PRESENT: return EIO; -#if S_ioLib_DISK_NOT_PRESENT != S_ioLib_NO_DRIVER - case S_ioLib_NO_DRIVER: return ENXIO; -#endif - case S_ioLib_UNKNOWN_REQUEST: return ENOSYS; - case S_ioLib_DEVICE_TIMEOUT: return EIO; -#ifdef S_ioLib_UNFORMATED - /* Added (VxWorks 5.2 -> 5.3.1) */ - #if S_ioLib_UNFORMATED != S_ioLib_DEVICE_TIMEOUT - case S_ioLib_UNFORMATED: return EIO; - #endif -#endif -#if S_ioLib_DEVICE_TIMEOUT != S_ioLib_DEVICE_ERROR - case S_ioLib_DEVICE_ERROR: return ENXIO; -#endif - case S_ioLib_WRITE_PROTECTED: return EACCES; - case S_ioLib_NO_FILENAME: return EINVAL; - case S_ioLib_CANCELLED: return EINTR; - case S_ioLib_NO_DEVICE_NAME_IN_PATH: return EINVAL; - case S_ioLib_NAME_TOO_LONG: return ENAMETOOLONG; -#ifdef S_objLib_OBJ_UNAVAILABLE - case S_objLib_OBJ_UNAVAILABLE: return ENOENT; -#endif - - /* Temporary workaround for a weird error in passFs - (VxWorks Simsparc only). File operation fails because of - ENOENT, but errno is not set. */ -#ifdef SIMSPARCSOLARIS - case 0: return ENOENT; -#endif - - } - /* If the error code matches none of the above, assume */ - /* it is a POSIX one already. The upper bits (>=16) are */ - /* cleared since VxWorks uses those bits to indicate in */ - /* what module the error occured. */ - return vx_errno & 0xffff; -} - -static int -vxworks_enotsup(Efile_error *errInfo) -{ - errInfo->posix_errno = errInfo->os_errno = ENOTSUP; - return 0; -} - -static int -count_path_length(char *pathname, char *pathname2) -{ - static int stack[PATH_MAX / 2 + 1]; - int sp = 0; - char *tmp; - char *cpy = NULL; - int i; - int sum; - for(i = 0;i < 2;++i) { - if (!i) { - cpy = EF_SAFE_ALLOC(strlen(pathname)+1); - strcpy(cpy, pathname); - } else if (pathname2 != NULL) { - EF_FREE(cpy); - cpy = EF_SAFE_ALLOC(strlen(pathname2)+1); - strcpy(cpy, pathname2); - } else - break; - - for (tmp = strtok(cpy,"/"); tmp != NULL; tmp = strtok(NULL,"/")) { - if (!strcmp(tmp,"..") && sp > 0) - --sp; - else if (strcmp(tmp,".")) - stack[sp++] = strlen(tmp); - } - } - if (cpy != NULL) - EF_FREE(cpy); - sum = 0; - for(i = 0;i < sp; ++i) - sum += stack[i]+1; - return (sum) ? sum : 1; -} - -static int -path_size(char *pathname) -{ - static char currdir[PATH_MAX+2]; - if (*pathname == '/') - return count_path_length(pathname,NULL); - ioDefPathGet(currdir); - strcat(currdir,"/"); - return count_path_length(currdir,pathname); -} - -#endif /* VXWORKS */ - int efile_mkdir(Efile_error* errInfo, /* Where to return error codes. */ char* name) /* Name of directory to create. */ { - CHECK_PATHLEN(name,errInfo); #ifdef NO_MKDIR_MODE -#ifdef VXWORKS - /* This is a VxWorks/nfs workaround for erl_tar to create - * non-existant directories. (of some reason (...) VxWorks - * returns, the *non-module-prefixed*, 0xd code when - * trying to create a directory in a directory that doesn't exist). - * (see efile_openfile) - */ - if (mkdir(name) < 0) { - struct stat sb; - if (name[0] == '\0') { - /* Return the correct error code enoent */ - errno = S_nfsLib_NFSERR_NOENT; - } else if (stat(name, &sb) == OK) { - errno = S_nfsLib_NFSERR_EXIST; - } else if((strchr(name, '/') != NULL) && (errno == 0xd)) { - /* Return the correct error code enoent */ - errno = S_nfsLib_NFSERR_NOENT; - } - return check_error(-1, errInfo); - } else return 1; -#else return check_error(mkdir(name), errInfo); -#endif #else return check_error(mkdir(name, DIR_MODE), errInfo); #endif @@ -372,16 +106,9 @@ int efile_rmdir(Efile_error* errInfo, /* Where to return error codes. */ char* name) /* Name of directory to delete. */ { - CHECK_PATHLEN(name, errInfo); if (rmdir(name) == 0) { return 1; } -#ifdef VXWORKS - if (name[0] == '\0') { - /* Return the correct error code enoent */ - errno = S_nfsLib_NFSERR_NOENT; - } -#else if (errno == ENOTEMPTY) { errno = EEXIST; } @@ -401,7 +128,6 @@ efile_rmdir(Efile_error* errInfo, /* Where to return error codes. */ } errno = saved_errno; } -#endif return check_error(-1, errInfo); } @@ -409,7 +135,6 @@ int efile_delete_file(Efile_error* errInfo, /* Where to return error codes. */ char* name) /* Name of file to delete. */ { - CHECK_PATHLEN(name,errInfo); if (unlink(name) == 0) { return 1; } @@ -457,32 +182,13 @@ efile_rename(Efile_error* errInfo, /* Where to return error codes. */ char* src, /* Original name. */ char* dst) /* New name. */ { - CHECK_PATHLEN(src,errInfo); - CHECK_PATHLEN(dst,errInfo); -#ifdef VXWORKS - - /* First check if src == dst, if so, just return. */ - /* VxWorks dos file system destroys the file otherwise, */ - /* VxWorks nfs file system rename doesn't work at all. */ - if(strcmp(src, dst) == 0) - return 1; -#endif if (rename(src, dst) == 0) { return 1; } -#ifdef VXWORKS - /* nfs for VxWorks doesn't support rename. We try to emulate it */ - /* (by first copying src to dst and then deleting src). */ - if(errno == S_ioLib_UNKNOWN_REQUEST && /* error code returned - by ioLib (!) */ - copy(src, dst) == OK && - unlink(src) == OK) - return 1; -#endif if (errno == ENOTEMPTY) { errno = EEXIST; } -#if defined (sparc) && !defined(VXWORKS) +#if defined (sparc) /* * SunOS 4.1.4 reports overwriting a non-empty directory with a * directory as EINVAL instead of EEXIST (first rule out the correct @@ -543,7 +249,6 @@ int efile_chdir(Efile_error* errInfo, /* Where to return error codes. */ char* name) /* Name of directory to make current. */ { - CHECK_PATHLEN(name, errInfo); return check_error(chdir(name), errInfo); } @@ -600,8 +305,6 @@ efile_readdir(Efile_error* errInfo, /* Where to return error codes. */ * If this is the first call, we must open the directory. */ - CHECK_PATHLEN(name, errInfo); - if (*p_dir_handle == NULL) { dp = opendir(name); if (dp == NULL) @@ -641,26 +344,8 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */ struct stat statbuf; int fd; int mode; /* Open mode. */ -#ifdef VXWORKS - char pathbuff[PATH_MAX+2]; - char sbuff[PATH_MAX*2]; - char *totbuff = sbuff; - int nameneed; -#endif - - - CHECK_PATHLEN(name, errInfo); - -#ifdef VXWORKS - /* Have to check that it's not a directory. */ - if (stat(name,&statbuf) != ERROR && ISDIR(statbuf)) { - errno = EISDIR; - return check_error(-1, errInfo); - } -#endif if (stat(name, &statbuf) >= 0 && !ISREG(statbuf)) { -#if !defined(VXWORKS) && !defined(OSE) /* * For UNIX only, here is some ugly code to allow * /dev/null to be opened as a file. @@ -677,12 +362,9 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */ } } if (!(dev_null_ino && statbuf.st_ino == dev_null_ino)) { -#endif errno = EISDIR; return check_error(-1, errInfo); -#if !defined(VXWORKS) && !defined(OSE) } -#endif } switch (flags & (EFILE_MODE_READ|EFILE_MODE_WRITE)) { @@ -706,49 +388,14 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */ if (flags & EFILE_MODE_APPEND) { mode &= ~O_TRUNC; -#ifndef VXWORKS - mode |= O_APPEND; /* Dont make VxWorks think things it shouldn't */ -#endif + mode |= O_APPEND; } if (flags & EFILE_MODE_EXCL) { mode |= O_EXCL; } -#ifdef VXWORKS - if (*name != '/') { - /* Make sure it is an absolute pathname, because ftruncate needs it */ - ioDefPathGet(pathbuff); - strcat(pathbuff,"/"); - nameneed = strlen(pathbuff) + strlen(name) + 1; - if (nameneed > PATH_MAX*2) - totbuff = EF_SAFE_ALLOC(nameneed); - strcpy(totbuff,pathbuff); - strcat(totbuff,name); - fd = open(totbuff, mode, FILE_MODE); - if (totbuff != sbuff) - EF_FREE(totbuff); - } else { - fd = open(name, mode, FILE_MODE); - } -#else fd = open(name, mode, FILE_MODE); -#endif - -#ifdef VXWORKS - - /* This is a VxWorks/nfs workaround for erl_tar to create - * non-existant directories. (of some reason (...) VxWorks - * returns, the *non-module-prefixed*, 0xd code when - * trying to write a file in a directory that doesn't exist). - * (see efile_mkdir) - */ - if ((fd < 0) && (strchr(name, '/') != NULL) && (errno == 0xd)) { - /* Return the correct error code enoent */ - errno = S_nfsLib_NFSERR_NOENT; - return check_error(-1, errInfo); - } -#endif if (!check_error(fd, errInfo)) return 0; @@ -797,11 +444,7 @@ efile_fsync(Efile_error *errInfo, /* Where to return error codes. */ int fd) /* File descriptor for file to sync. */ { #ifdef NO_FSYNC -#ifdef VXWORKS - return check_error(ioctl(fd, FIOSYNC, 0), errInfo); -#else - undefined fsync -#endif /* VXWORKS */ + undefined fsync /* XXX: Really? */ #else #if defined(DARWIN) && defined(F_FULLFSYNC) return check_error(fcntl(fd, F_FULLFSYNC), errInfo); @@ -818,21 +461,8 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo, struct stat statbuf; /* Information about the file */ int result; -#ifdef VXWORKS - if (*name == '\0') { - errInfo->posix_errno = errInfo->os_errno = ENOENT; - return 0; - } -#endif - - CHECK_PATHLEN(name, errInfo); - if (info_for_link) { -#if (defined(VXWORKS)) - result = stat(name, &statbuf); -#else result = lstat(name, &statbuf); -#endif } else { result = stat(name, &statbuf); } @@ -849,19 +479,9 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo, #ifdef NO_ACCESS /* Just look at read/write access for owner. */ -#ifdef VXWORKS - - pInfo->access = FA_NONE; - if(statbuf.st_mode & S_IRUSR) - pInfo->access |= FA_READ; - if(statbuf.st_mode & S_IWUSR) - pInfo->access |= FA_WRITE; - -#else pInfo->access = ((statbuf.st_mode >> 6) & 07) >> 1; -#endif /* VXWORKS */ #else pInfo->access = FA_NONE; if (access(name, R_OK) == 0) @@ -902,35 +522,6 @@ efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name) { struct utimbuf tval; - CHECK_PATHLEN(name, errInfo); - -#ifdef VXWORKS - - if (pInfo->mode != -1) { - int fd; - struct stat statbuf; - - fd = open(name, O_RDONLY, 0); - if (!check_error(fd, errInfo)) - return 0; - if (fstat(fd, &statbuf) < 0) { - close(fd); - return check_error(-1, errInfo); - } - if (pInfo->mode & S_IWUSR) { - /* clear read only bit */ - statbuf.st_attrib &= ~DOS_ATTR_RDONLY; - } else { - /* set read only bit */ - statbuf.st_attrib |= DOS_ATTR_RDONLY; - } - /* This should work for dos files but not for nfs ditos, so don't - * report errors (to avoid problems when running e.g. erl_tar) - */ - ioctl(fd, FIOATTRIBSET, statbuf.st_attrib); - close(fd); - } -#else /* * On some systems chown will always fail for a non-root user unless * POSIX_CHOWN_RESTRICTED is not set. Others will succeed as long as @@ -952,20 +543,10 @@ efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name) } } -#endif /* !VXWORKS */ - tval.actime = pInfo->accessTime; tval.modtime = pInfo->modifyTime; -#ifdef VXWORKS - /* VxWorks' utime doesn't work when the file is a nfs mounted - * one, don't report error if utime fails. - */ - utime(name, &tval); - return 1; -#else return check_error(utime(name, &tval), errInfo); -#endif } @@ -979,11 +560,6 @@ efile_write(Efile_error* errInfo, /* Where to return error codes. */ { ssize_t written; /* Bytes written in last operation. */ -#ifdef VXWORKS - if (flags & EFILE_MODE_APPEND) { - lseek(fd, 0, SEEK_END); /* Naive append emulation on VXWORKS */ - } -#endif while (count > 0) { if ((written = write(fd, buf, count)) < 0) { if (errno != EINTR) @@ -1012,12 +588,6 @@ efile_writev(Efile_error* errInfo, /* Where to return error codes */ ASSERT(iovcnt >= 0); -#ifdef VXWORKS - if (flags & EFILE_MODE_APPEND) { - lseek(fd, 0, SEEK_END); /* Naive append emulation on VXWORKS */ - } -#endif - while (cnt < iovcnt) { if ((! iov[cnt].iov_base) || (iov[cnt].iov_len <= 0)) { /* Empty buffer - skip */ @@ -1226,118 +796,6 @@ efile_seek(Efile_error* errInfo, /* Where to return error codes. */ int efile_truncate_file(Efile_error* errInfo, int *fd, int flags) { -#ifdef VXWORKS - off_t offset; - char namebuf[PATH_MAX+1]; - char namebuf2[PATH_MAX+10]; - int new; - int dummy; - int i; - int left; - static char buff[1024]; - struct stat st; - Efile_error tmperr; - - if ((offset = lseek(*fd, 0, 1)) < 0) { - return check_error((int) offset,errInfo); - } - if (ftruncate(*fd, offset) < 0) { - if (vxworks_to_posix(errno) != EINVAL) { - return check_error(-1, errInfo); - } - /* - ** Kludge - */ - if(ioctl(*fd,FIOGETNAME,(int) namebuf) < 0) { - return check_error(-1, errInfo); - } - for(i=0;i<1000;++i) { - sprintf(namebuf2,"%s%d",namebuf,i); - CHECK_PATHLEN(namebuf2,errInfo); - if (stat(namebuf2,&st) < 0) { - break; - } - } - if (i > 1000) { - errno = EINVAL; - return check_error(-1, errInfo); - } - if (close(*fd) < 0) { - return check_error(-1, errInfo); - } - if (efile_rename(&tmperr,namebuf,namebuf2) < 0) { - i = check_error(-1,&tmperr); - if (!efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE, - fd,&dummy)) { - *fd = -1; - } else { - *errInfo = tmperr; - } - return i; - } - if ((*fd = open(namebuf2, O_RDONLY, 0)) < 0) { - i = check_error(-1,errInfo); - efile_rename(&tmperr,namebuf2,namebuf); /* at least try */ - if (!efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE, - fd,&dummy)) { - *fd = -1; - } else { - lseek(*fd,offset,SEEK_SET); - } - return i; - } - /* Point of no return... */ - - if ((new = open(namebuf,O_RDWR | O_CREAT, FILE_MODE)) < 0) { - close(*fd); - *fd = -1; - return 0; - } - left = offset; - - while (left) { - if ((i = read(*fd,buff,(left > 1024) ? 1024 : left)) < 0) { - i = check_error(-1,errInfo); - close(new); - close(*fd); - unlink(namebuf); - efile_rename(&tmperr,namebuf2,namebuf); /* at least try */ - if (!efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE, - fd,&dummy)) { - *fd = -1; - } else { - lseek(*fd,offset,SEEK_SET); - } - return i; - } - left -= i; - if (write(new,buff,i) < 0) { - i = check_error(-1,errInfo); - close(new); - close(*fd); - unlink(namebuf); - rename(namebuf2,namebuf); /* at least try */ - if (!efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE, - fd,&dummy)) { - *fd = -1; - } else { - lseek(*fd,offset,SEEK_SET); - } - return i; - } - } - close(*fd); - unlink(namebuf2); - close(new); - i = efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE,fd, - &dummy); - if (i) { - lseek(*fd,offset,SEEK_SET); - } - return i; - } - return 1; -#else #ifndef NO_FTRUNCATE off_t offset; @@ -1347,15 +805,11 @@ efile_truncate_file(Efile_error* errInfo, int *fd, int flags) #else return 1; #endif -#endif } int efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size) { -#ifdef VXWORKS - return vxworks_enotsup(errInfo); -#else int len; ASSERT(size > 0); len = readlink(name, buffer, size-1); @@ -1364,7 +818,6 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size) } buffer[len] = '\0'; return 1; -#endif } int @@ -1377,21 +830,13 @@ efile_altname(Efile_error* errInfo, char* name, char* buffer, size_t size) int efile_link(Efile_error* errInfo, char* old, char* new) { -#ifdef VXWORKS - return vxworks_enotsup(errInfo); -#else return check_error(link(old, new), errInfo); -#endif } int efile_symlink(Efile_error* errInfo, char* old, char* new) { -#ifdef VXWORKS - return vxworks_enotsup(errInfo); -#else return check_error(symlink(old, new), errInfo); -#endif } int @@ -1406,8 +851,8 @@ efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset, } #ifdef HAVE_SENDFILE -// For some reason the maximum size_t cannot be used as the max size -// 3GB seems to work on all platforms +/* For some reason the maximum size_t cannot be used as the max size + 3GB seems to work on all platforms */ #define SENDFILE_CHUNK_SIZE ((1UL << 30) -1) /* @@ -1444,7 +889,7 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, #if defined(__linux__) ssize_t retval; do { - // check if *nbytes is 0 or greater than chunk size + /* check if *nbytes is 0 or greater than chunk size */ if (*nbytes == 0 || *nbytes > SENDFILE_CHUNK_SIZE) retval = sendfile(out_fd, in_fd, offset, SENDFILE_CHUNK_SIZE); else @@ -1455,7 +900,7 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, } } while (retval == SENDFILE_CHUNK_SIZE); if (written != 0) { - // -1 is not returned by the linux API so we have to simulate it + /* -1 is not returned by the linux API so we have to simulate it */ retval = -1; errno = EAGAIN; } @@ -1468,23 +913,29 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, do { fdrec.sfv_off = *offset; len = 0; - // check if *nbytes is 0 or greater than chunk size + /* check if *nbytes is 0 or greater than chunk size */ if (*nbytes == 0 || *nbytes > SENDFILE_CHUNK_SIZE) fdrec.sfv_len = SENDFILE_CHUNK_SIZE; else fdrec.sfv_len = *nbytes; retval = sendfilev(out_fd, &fdrec, 1, &len); - if (retval != -1 || errno == EAGAIN || errno == EINTR) { + + /* Sometimes sendfilev can return -1 and still send data. + When that happens we just pretend that no error happend. */ + if (retval != -1 || errno == EAGAIN || errno == EINTR || + len != 0) { *offset += len; *nbytes -= len; written += len; + if (errno != EAGAIN && errno != EINTR && len != 0) + retval = len; } } while (len == SENDFILE_CHUNK_SIZE); #elif defined(DARWIN) int retval; off_t len; do { - // check if *nbytes is 0 or greater than chunk size + /* check if *nbytes is 0 or greater than chunk size */ if(*nbytes > SENDFILE_CHUNK_SIZE) len = SENDFILE_CHUNK_SIZE; else diff --git a/erts/emulator/drivers/win32/registry_drv.c b/erts/emulator/drivers/win32/registry_drv.c index 1fad34e380..5b200ebd32 100644 --- a/erts/emulator/drivers/win32/registry_drv.c +++ b/erts/emulator/drivers/win32/registry_drv.c @@ -344,7 +344,7 @@ fix_value_result(RegPort* rp, LONG result, DWORD type, #ifdef DEBUG if (ok != ERROR_SUCCESS) { char buff[256]; - sprintf(buff,"Failure in registry_drv line %d, error = %d", + erts_snprintf(buff, sizeof(buff), "Failure in registry_drv line %d, error = %d", __LINE__, GetLastError()); MessageBox(NULL, buff, "Internal error", MB_OK); ASSERT(ok == ERROR_SUCCESS); diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4 index ec25c0b9b7..0de69a617f 100644 --- a/erts/emulator/hipe/hipe_amd64_bifs.m4 +++ b/erts/emulator/hipe/hipe_amd64_bifs.m4 @@ -2,7 +2,7 @@ changecom(`/*', `*/')dnl /* * %CopyrightBegin% * - * Copyright Ericsson AB 2004-2011. All Rights Reserved. + * Copyright Ericsson AB 2004-2012. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -30,12 +30,12 @@ include(`hipe/hipe_amd64_asm.m4') #define TEST_GOT_EXN cmpq $THE_NON_VALUE, %rax #endif' -define(TEST_GOT_MBUF,`movq P_MBUF(P), %rdx # `TEST_GOT_MBUF' +define(TEST_GOT_MBUF,`movq P_MBUF(P), %rdx /* `TEST_GOT_MBUF' */ testq %rdx, %rdx jnz 3f 2:') define(HANDLE_GOT_MBUF,` -3: call nbif_$1_gc_after_bif # `HANDLE_GOT_MBUF' +3: call nbif_$1_gc_after_bif /* `HANDLE_GOT_MBUF' */ jmp 2b') `#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c index 23ced284bf..1562748f2d 100644 --- a/erts/emulator/hipe/hipe_bif0.c +++ b/erts/emulator/hipe/hipe_bif0.c @@ -609,8 +609,8 @@ static Uint *hipe_find_emu_address(Eterm mod, Eterm name, unsigned int arity) Uint *code_base; int i, n; - modp = erts_get_module(mod); - if (modp == NULL || (code_base = modp->code) == NULL) + modp = erts_get_module(mod, erts_active_code_ix()); + if (modp == NULL || (code_base = modp->curr.code) == NULL) return NULL; n = code_base[MI_NUM_FUNCTIONS]; for (i = 0; i < n; ++i) { @@ -648,7 +648,7 @@ static void *hipe_get_emu_address(Eterm m, Eterm f, unsigned int arity, int is_r /* if not found, stub it via the export entry */ /* no lock needed around erts_export_get_or_make_stub() */ Export *export_entry = erts_export_get_or_make_stub(m, f, arity); - address = export_entry->address; + address = export_entry->addressv[erts_active_code_ix()]; } return address; } @@ -1583,14 +1583,6 @@ BIF_RETTYPE hipe_nonclosure_address(BIF_ALIST_2) goto badfun; m = ep->code[0]; f = ep->code[1]; - } else if (hdr == make_arityval(2)) { - Eterm *tp = tuple_val(BIF_ARG_1); - m = tp[1]; - f = tp[2]; - if (is_not_atom(m) || is_not_atom(f)) - goto badfun; - if (!erts_find_export_entry(m, f, BIF_ARG_2)) - goto badfun; } else goto badfun; address = hipe_get_na_nofail(m, f, BIF_ARG_2, 1); diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c index 7eab1ec2ad..e09988e2c5 100644 --- a/erts/emulator/hipe/hipe_bif2.c +++ b/erts/emulator/hipe/hipe_bif2.c @@ -173,3 +173,10 @@ BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1) #endif /* ERTS_ENABLE_LOCK_CHECK && ERTS_SMP */ + +BIF_RETTYPE hipe_bifs_debug_native_called_2(BIF_ALIST_2) +{ + erts_printf("hipe_debug_native_called: %T(%T)\r\n", BIF_ARG_1, BIF_ARG_2); + BIF_RET(am_ok); +} + diff --git a/erts/emulator/hipe/hipe_bif2.tab b/erts/emulator/hipe/hipe_bif2.tab index c71b02fbeb..45a395bf57 100644 --- a/erts/emulator/hipe/hipe_bif2.tab +++ b/erts/emulator/hipe/hipe_bif2.tab @@ -29,3 +29,4 @@ bif hipe_bifs:show_term/1 bif hipe_bifs:in_native/0 bif hipe_bifs:modeswitch_debug_on/0 bif hipe_bifs:modeswitch_debug_off/0 +bif hipe_bifs:debug_native_called/2 diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4 index 942fa0c5cb..ab078b9583 100644 --- a/erts/emulator/hipe/hipe_bif_list.m4 +++ b/erts/emulator/hipe/hipe_bif_list.m4 @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2004-2011. All Rights Reserved. + * Copyright Ericsson AB 2004-2012. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -165,6 +165,7 @@ gc_bif_interface_2(nbif_put_2, put_2) gc_bif_interface_1(nbif_hipe_bifs_show_nstack_1, hipe_show_nstack_1) gc_bif_interface_1(nbif_hipe_bifs_show_pcb_1, hipe_bifs_show_pcb_1) gc_bif_interface_0(nbif_hipe_bifs_nstack_used_size_0, hipe_bifs_nstack_used_size_0) +gc_bif_interface_2(nbif_hipe_bifs_debug_native_called, hipe_bifs_debug_native_called_2) /* * Arithmetic operators called indirectly by the HiPE compiler. @@ -245,7 +246,7 @@ noproc_primop_interface_5(nbif_bs_put_big_integer, hipe_bs_put_big_integer) gc_bif_interface_0(nbif_check_get_msg, hipe_check_get_msg) -#ifdef NO_FPE_SIGNALS +#`ifdef' NO_FPE_SIGNALS nocons_nofail_primop_interface_0(nbif_emulate_fpe, hipe_emulate_fpe) #endif diff --git a/erts/emulator/hipe/hipe_debug.c b/erts/emulator/hipe/hipe_debug.c index 7ca11f8c6c..37615bf718 100644 --- a/erts/emulator/hipe/hipe_debug.c +++ b/erts/emulator/hipe/hipe_debug.c @@ -188,8 +188,6 @@ void hipe_print_pcb(Process *p) U("old_htop ", old_htop); U("old_head ", old_heap); U("min_heap_..", min_heap_size); - U("status ", status); - U("rstatus ", rstatus); U("rcount ", rcount); U("id ", id); U("prio ", prio); diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c index 6a3ce5608f..3c782b2f56 100644 --- a/erts/emulator/hipe/hipe_mode_switch.c +++ b/erts/emulator/hipe/hipe_mode_switch.c @@ -360,7 +360,8 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) p->flags &= ~F_HIBERNATE_SCHED; goto do_schedule; } - if (p->status == P_WAITING) { + + if (!(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) { for (i = 0; i < p->arity; ++i) p->arg_reg[i] = reg[i]; goto do_schedule; @@ -451,10 +452,6 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) case HIPE_MODE_SWITCH_RES_SUSPEND: { p->i = hipe_beam_pc_resume; p->arity = 0; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); - if (p->status != P_SUSPENDED) - erts_add_to_runq(p); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); goto do_schedule; } case HIPE_MODE_SWITCH_RES_WAIT: @@ -470,7 +467,8 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) #endif p->i = hipe_beam_pc_resume; p->arity = 0; - p->status = P_WAITING; + erts_smp_atomic32_read_band_relb(&p->state, + ~ERTS_PSFLG_ACTIVE); erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); do_schedule: { diff --git a/erts/emulator/hipe/hipe_native_bif.h b/erts/emulator/hipe/hipe_native_bif.h index 9e3a156fbc..3f460a5a5c 100644 --- a/erts/emulator/hipe/hipe_native_bif.h +++ b/erts/emulator/hipe/hipe_native_bif.h @@ -110,6 +110,9 @@ int hipe_bs_put_big_integer(Eterm, Uint, byte*, unsigned, unsigned); AEXTERN(Eterm,nbif_check_get_msg,(Process*)); Eterm hipe_check_get_msg(Process*); +AEXTERN(BIF_RETTYPE,nbif_hipe_bifs_debug_native_called,(Process*,Eterm,Eterm)); +BIF_RETTYPE hipe_bifs_debug_native_called_2(BIF_ALIST_2); + /* * SMP-specific stuff */ diff --git a/erts/emulator/hipe/hipe_primops.h b/erts/emulator/hipe/hipe_primops.h index 38509c105b..52b4681cfe 100644 --- a/erts/emulator/hipe/hipe_primops.h +++ b/erts/emulator/hipe/hipe_primops.h @@ -80,6 +80,7 @@ PRIMOP_LIST(am_fclearerror_error, &nbif_fclearerror_error) #ifdef NO_FPE_SIGNALS PRIMOP_LIST(am_emulate_fpe, &nbif_emulate_fpe) #endif +PRIMOP_LIST(am_debug_native_called, &nbif_hipe_bifs_debug_native_called) #if defined(__sparc__) #include "hipe_sparc_primops.h" diff --git a/erts/emulator/hipe/hipe_stack.c b/erts/emulator/hipe/hipe_stack.c index da462a64e1..53c316ba52 100644 --- a/erts/emulator/hipe/hipe_stack.c +++ b/erts/emulator/hipe/hipe_stack.c @@ -130,7 +130,7 @@ struct sdesc *hipe_decode_sdesc(Eterm arg) struct sdesc *sdesc; if (is_not_tuple(arg) || - (tuple_val(arg))[0] != make_arityval(5) || + (tuple_val(arg))[0] != make_arityval(6) || term_to_Uint((tuple_val(arg))[1], &ra) == 0 || term_to_Uint((tuple_val(arg))[2], &exnra) == 0 || is_not_small((tuple_val(arg))[3]) || @@ -183,5 +183,13 @@ struct sdesc *hipe_decode_sdesc(Eterm arg) off = unsigned_val(live[i]); sdesc->livebits[off / 32] |= (1 << (off & 31)); } +#ifdef DEBUG + { + Eterm mfa_tpl = tuple_val(arg)[6]; + sdesc->dbg_M = tuple_val(mfa_tpl)[1]; + sdesc->dbg_F = tuple_val(mfa_tpl)[2]; + sdesc->dbg_A = tuple_val(mfa_tpl)[3]; + } +#endif return sdesc; } diff --git a/erts/emulator/hipe/hipe_stack.h b/erts/emulator/hipe/hipe_stack.h index c4f2aacd8c..66f9f04c73 100644 --- a/erts/emulator/hipe/hipe_stack.h +++ b/erts/emulator/hipe/hipe_stack.h @@ -35,6 +35,10 @@ struct sdesc { struct sdesc *next; /* hash collision chain */ } bucket; unsigned int summary; /* frame size, exn handler presence flag, arity */ +#ifdef DEBUG + Eterm dbg_M, dbg_F; + unsigned dbg_A; +#endif unsigned int livebits[1]; /* size depends on arch & data in summary field */ }; diff --git a/erts/emulator/hipe/hipe_x86.c b/erts/emulator/hipe/hipe_x86.c index 24d232c968..4281730ae2 100644 --- a/erts/emulator/hipe/hipe_x86.c +++ b/erts/emulator/hipe/hipe_x86.c @@ -265,7 +265,7 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity) void hipe_arch_print_pcb(struct hipe_process_state *p) { #define U(n,x) \ - printf(" % 4d | %s | 0x%08x | |\r\n", offsetof(struct hipe_process_state,x), n, (unsigned)p->x) + printf(" % 4d | %s | 0x%08x | |\r\n", (int)offsetof(struct hipe_process_state,x), n, (unsigned)p->x) U("ncsp ", ncsp); U("narity ", narity); #undef U diff --git a/erts/emulator/hipe/hipe_x86_bifs.m4 b/erts/emulator/hipe/hipe_x86_bifs.m4 index 3cb7d67be0..dd6980f555 100644 --- a/erts/emulator/hipe/hipe_x86_bifs.m4 +++ b/erts/emulator/hipe/hipe_x86_bifs.m4 @@ -2,7 +2,7 @@ changecom(`/*', `*/')dnl /* * %CopyrightBegin% * - * Copyright Ericsson AB 2001-2011. All Rights Reserved. + * Copyright Ericsson AB 2001-2012. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -35,12 +35,12 @@ include(`hipe/hipe_x86_asm.m4') # define CALL_BIF(F) call CSYM(F) #endif' -define(TEST_GOT_MBUF,`movl P_MBUF(P), %edx # `TEST_GOT_MBUF' +define(TEST_GOT_MBUF,`movl P_MBUF(P), %edx /* `TEST_GOT_MBUF' */ testl %edx, %edx jnz 3f 2:') define(HANDLE_GOT_MBUF,` -3: call nbif_$1_gc_after_bif # `HANDLE_GOT_MBUF' +3: call nbif_$1_gc_after_bif /* `HANDLE_GOT_MBUF' */ jmp 2b') /* @@ -70,7 +70,7 @@ ASYM($1): NBIF_ARG_REG(0,P) NBIF_ARG(2,1,0) lea 8(%esp), %eax - NBIF_ARG_REG(1,%eax) # BIF__ARGS + NBIF_ARG_REG(1,%eax) /* BIF__ARGS */ CALL_BIF($2) TEST_GOT_MBUF @@ -105,7 +105,7 @@ ASYM($1): NBIF_ARG(2,2,0) NBIF_ARG(3,2,1) lea 8(%esp), %eax - NBIF_ARG_REG(1,%eax) # BIF__ARGS + NBIF_ARG_REG(1,%eax) /* BIF__ARGS */ CALL_BIF($2) TEST_GOT_MBUF @@ -141,7 +141,7 @@ ASYM($1): NBIF_ARG(3,3,1) NBIF_ARG(4,3,2) lea 8(%esp), %eax - NBIF_ARG_REG(1,%eax) # BIF__ARGS + NBIF_ARG_REG(1,%eax) /* BIF__ARGS */ CALL_BIF($2) TEST_GOT_MBUF diff --git a/erts/emulator/hipe/hipe_x86_gc.h b/erts/emulator/hipe/hipe_x86_gc.h index e4607ad27d..aa4abb6f59 100644 --- a/erts/emulator/hipe/hipe_x86_gc.h +++ b/erts/emulator/hipe/hipe_x86_gc.h @@ -69,6 +69,11 @@ nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state) nstkarity = 0; state->sdesc0[0].summary = (0 << 9) | (0 << 8) | nstkarity; state->sdesc0[0].livebits[0] = 0; +# ifdef DEBUG + state->sdesc0[0].dbg_M = 0; + state->sdesc0[0].dbg_F = am_init; + state->sdesc0[0].dbg_A = 0; +# endif /* XXX: this appears to prevent a gcc-4.1.1 bug on x86 */ __asm__ __volatile__("" : : "m"(*state) : "memory"); return &state->sdesc0[0]; diff --git a/erts/emulator/hipe/hipe_x86_glue.h b/erts/emulator/hipe/hipe_x86_glue.h index b0db93267c..63ad250d60 100644 --- a/erts/emulator/hipe/hipe_x86_glue.h +++ b/erts/emulator/hipe/hipe_x86_glue.h @@ -62,6 +62,9 @@ static __inline__ void hipe_arch_glue_init(void) .sdesc = { .bucket = { .hvalue = (unsigned long)nbif_return }, .summary = (1<<8), + #ifdef DEBUG + .dbg_F = am_return, + #endif }, }; hipe_init_sdesc_table(&nbif_return_sdesc.sdesc); diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index c1336c60d9..ce014c19c2 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -39,7 +39,6 @@ #include "dtrace-wrapper.h" #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS -# define ERTS_DRV_EV_STATE_EXTRA_SIZE 128 #else # include "safe_hash.h" # define DRV_EV_STATE_HTAB_SIZE 1024 @@ -334,7 +333,9 @@ static void grow_drv_ev_state(int min_ix) { int i; - int new_len = min_ix + 1 + ERTS_DRV_EV_STATE_EXTRA_SIZE; + int new_len; + + new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_ix + 1); if (new_len > max_fds) new_len = max_fds; diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 50a888323a..a523d67158 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -55,17 +55,12 @@ # ifdef SYS_SELECT_H # include <sys/select.h> # endif -# ifdef VXWORKS -# include <selectLib.h> -# endif #endif -#ifndef VXWORKS -# ifdef NO_SYSCONF -# if ERTS_POLL_USE_SELECT -# include <sys/param.h> -# else -# include <limits.h> -# endif +#ifdef NO_SYSCONF +# if ERTS_POLL_USE_SELECT +# include <sys/param.h> +# else +# include <limits.h> # endif #endif #include "erl_thr_progress.h" @@ -105,8 +100,8 @@ #define ERTS_POLL_COALESCE_KP_RES (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL) -#define FDS_STATUS_EXTRA_FREE_SIZE 128 -#define POLL_FDS_EXTRA_FREE_SIZE 128 +#define ERTS_EV_TABLE_MIN_LENGTH 1024 +#define ERTS_EV_TABLE_EXP_THRESHOLD (2048*1024) #ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT # define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 1 @@ -563,6 +558,28 @@ free_update_requests_block(ErtsPollSet ps, * --- Growing poll set structures ------------------------------------------- */ +int +ERTS_POLL_EXPORT(erts_poll_get_table_len) (int new_len) +{ + if (new_len < ERTS_EV_TABLE_MIN_LENGTH) { + new_len = ERTS_EV_TABLE_MIN_LENGTH; + } else if (new_len < ERTS_EV_TABLE_EXP_THRESHOLD) { + /* find next power of 2 */ + --new_len; + new_len |= new_len >> 1; + new_len |= new_len >> 2; + new_len |= new_len >> 4; + new_len |= new_len >> 8; + new_len |= new_len >> 16; + ++new_len; + } else { + /* grow incrementally */ + new_len += ERTS_EV_TABLE_EXP_THRESHOLD; + } + return new_len; +} + + #if ERTS_POLL_USE_KERNEL_POLL static void grow_res_events(ErtsPollSet ps, int new_len) @@ -575,7 +592,7 @@ grow_res_events(ErtsPollSet ps, int new_len) #elif ERTS_POLL_USE_KQUEUE struct kevent #endif - )*new_len; + ) * ERTS_POLL_EXPORT(erts_poll_get_table_len)(new_len); /* We do not need to save previously stored data */ if (ps->res_events) erts_free(ERTS_ALC_T_POLL_RES_EVS, ps->res_events); @@ -589,7 +606,7 @@ static void grow_poll_fds(ErtsPollSet ps, int min_ix) { int i; - int new_len = min_ix + 1 + POLL_FDS_EXTRA_FREE_SIZE; + int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_ix + 1); if (new_len > max_fds) new_len = max_fds; ps->poll_fds = (ps->poll_fds_len @@ -611,7 +628,7 @@ static void grow_fds_status(ErtsPollSet ps, int min_fd) { int i; - int new_len = min_fd + 1 + FDS_STATUS_EXTRA_FREE_SIZE; + int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_fd + 1); ASSERT(min_fd < max_fds); if (new_len > max_fds) new_len = max_fds; @@ -2200,10 +2217,6 @@ ERTS_POLL_EXPORT(erts_poll_max_fds)(void) * --- Initialization -------------------------------------------------------- */ -#ifdef VXWORKS -extern int erts_vxworks_max_files; -#endif - void ERTS_POLL_EXPORT(erts_poll_init)(void) { @@ -2212,9 +2225,7 @@ ERTS_POLL_EXPORT(erts_poll_init)(void) errno = 0; -#if defined(VXWORKS) - max_fds = erts_vxworks_max_files; -#elif !defined(NO_SYSCONF) +#if !defined(NO_SYSCONF) max_fds = sysconf(_SC_OPEN_MAX); #elif ERTS_POLL_USE_SELECT max_fds = NOFILE; diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h index 8dde619105..502290e4bb 100644 --- a/erts/emulator/sys/common/erl_poll.h +++ b/erts/emulator/sys/common/erl_poll.h @@ -246,4 +246,6 @@ void ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet, ErtsPollEvents [], int); +int ERTS_POLL_EXPORT(erts_poll_get_table_len)(int); + #endif /* #ifndef ERL_POLL_H__ */ diff --git a/erts/emulator/sys/unix/erl_unix_sys_ddll.c b/erts/emulator/sys/unix/erl_unix_sys_ddll.c index 336d9586c4..a35aec560a 100644 --- a/erts/emulator/sys/unix/erl_unix_sys_ddll.c +++ b/erts/emulator/sys/unix/erl_unix_sys_ddll.c @@ -101,7 +101,7 @@ void erl_sys_ddll_init(void) { /* * Open a shared object */ -int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err) +int erts_sys_ddll_open2(const char *full_name, void **handle, ErtsSysDdllError* err) { #if defined(HAVE_DLOPEN) char* dlname; @@ -153,7 +153,7 @@ int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err) /* * Find a symbol in the shared object */ -int erts_sys_ddll_sym2(void *handle, char *func_name, void **function, +int erts_sys_ddll_sym2(void *handle, const char *func_name, void **function, ErtsSysDdllError* err) { #if defined(HAVE_DLOPEN) diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c index c1fa00b4ea..b485dbf784 100644 --- a/erts/emulator/sys/unix/sys.c +++ b/erts/emulator/sys/unix/sys.c @@ -58,7 +58,6 @@ #define __DARWIN__ 1 #endif - #ifdef USE_THREADS #include "erl_threads.h" #endif @@ -71,7 +70,6 @@ static erts_smp_rwmtx_t environ_rwmtx; #define MAX_VSIZE 16 /* Max number of entries allowed in an I/O * vector sock_sendv(). */ - /* * Don't need global.h, but bif_table.h (included by bif.h), * won't compile otherwise @@ -123,6 +121,15 @@ struct ErtsSysReportExit_ { #endif }; +/* This data is shared by these drivers - initialized by spawn_init() */ +static struct driver_data { + int port_num, ofd, packet_bytes; + ErtsSysReportExit *report_exit; + int pid; + int alive; + int status; +} *driver_data; /* indexed by fd */ + static ErtsSysReportExit *report_exit_list; #if CHLDWTHR && !defined(ERTS_SMP) static ErtsSysReportExit *report_exit_transit_list; @@ -570,7 +577,7 @@ erl_sys_init(void) + 1); child_setup_prog = erts_alloc(ERTS_ALC_T_CS_PROG_PATH, csp_path_sz); erts_smp_atomic_add_nob(&sys_misc_mem_sz, csp_path_sz); - sprintf(child_setup_prog, + erts_snprintf(child_setup_prog, csp_path_sz, "%s%c%s", bindir, DIR_SEPARATOR_CHAR, @@ -680,17 +687,40 @@ static RETSIGTYPE break_handler(int sig) #endif /* 0 */ static ERTS_INLINE void -prepare_crash_dump(void) +prepare_crash_dump(int secs) { +#define NUFBUF (3) int i, max; char env[21]; /* enough to hold any 64-bit integer */ size_t envsz; + DeclareTmpHeapNoproc(heap,NUFBUF); + Port *heart_port; + Eterm *hp = heap; + Eterm list = NIL; + int heart_fd[2] = {-1,-1}; + + UseTmpHeapNoproc(NUFBUF); if (ERTS_PREPARED_CRASH_DUMP) return; /* We have already been called */ + heart_port = erts_get_heart_port(); + if (heart_port) { + /* hearts input fd + * We "know" drv_data is the in_fd since the port is started with read|write + */ + heart_fd[0] = (int)heart_port->drv_data; + heart_fd[1] = (int)driver_data[heart_fd[0]].ofd; + + list = CONS(hp, make_small(8), list); hp += 2; + + /* send to heart port, CMD = 8, i.e. prepare crash dump =o */ + erts_write_to_port(ERTS_INVALID_PID, heart_port, list); + } + /* Make sure we unregister at epmd (unknown fd) and get at least one free filedescriptor (for erl_crash.dump) */ + max = max_files; if (max < 1024) max = 1024; @@ -704,11 +734,15 @@ prepare_crash_dump(void) if (i == async_fd[0] || i == async_fd[1]) continue; #endif + /* We don't want to close our heart yet ... */ + if (i == heart_fd[0] || i == heart_fd[1]) + continue; + close(i); } envsz = sizeof(env); - i = erts_sys_getenv_raw("ERL_CRASH_DUMP_NICE", env, &envsz); + i = erts_sys_getenv__("ERL_CRASH_DUMP_NICE", env, &envsz); if (i >= 0) { int nice_val; nice_val = i != 0 ? 0 : atoi(env); @@ -717,21 +751,21 @@ prepare_crash_dump(void) } erts_silence_warn_unused_result(nice(nice_val)); } - - envsz = sizeof(env); - i = erts_sys_getenv_raw("ERL_CRASH_DUMP_SECONDS", env, &envsz); - if (i >= 0) { - unsigned sec; - sec = (unsigned) i != 0 ? 0 : atoi(env); - alarm(sec); - } + /* Positive secs means an alarm must be set + * 0 or negative means no alarm + */ + if (secs > 0) { + alarm((unsigned int)secs); + } + UnUseTmpHeapNoproc(NUFBUF); +#undef NUFBUF } void -erts_sys_prepare_crash_dump(void) +erts_sys_prepare_crash_dump(int secs) { - prepare_crash_dump(); + prepare_crash_dump(secs); } static ERTS_INLINE void @@ -773,7 +807,7 @@ sigusr1_exit(void) is hung somewhere, so it won't be able to poll any flag we set here. */ ERTS_SET_GOT_SIGUSR1; - prepare_crash_dump(); + prepare_crash_dump((int)0); erl_exit(1, "Received SIGUSR1\n"); } @@ -1021,15 +1055,6 @@ void fini_getenv_state(GETENV_STATE *state) #define ERTS_SYS_READ_BUF_SZ (64*1024) -/* This data is shared by these drivers - initialized by spawn_init() */ -static struct driver_data { - int port_num, ofd, packet_bytes; - ErtsSysReportExit *report_exit; - int pid; - int alive; - int status; -} *driver_data; /* indexed by fd */ - /* Driver interfaces */ static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*); static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*); @@ -1532,12 +1557,13 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op } #if !DISABLE_VFORK } +#define ENOUGH_BYTES (44) else { /* Use vfork() */ char **cs_argv= erts_alloc(ERTS_ALC_T_TMP,(CS_ARGV_NO_OF_ARGS + 1)* sizeof(char *)); - char fd_close_range[44]; /* 44 bytes are enough to */ - char dup2_op[CS_ARGV_NO_OF_DUP2_OPS][44]; /* hold any "%d:%d" string */ - /* on a 64-bit machine. */ + char fd_close_range[ENOUGH_BYTES]; /* 44 bytes are enough to */ + char dup2_op[CS_ARGV_NO_OF_DUP2_OPS][ENOUGH_BYTES]; /* hold any "%d:%d" string */ + /* on a 64-bit machine. */ /* Setup argv[] for the child setup program (implemented in erl_child_setup.c) */ @@ -1545,23 +1571,23 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op if (opts->use_stdio) { if (opts->read_write & DO_READ){ /* stdout for process */ - sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 1); + erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ifd[1], 1); if(opts->redir_stderr) /* stderr for process */ - sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 2); + erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ifd[1], 2); } if (opts->read_write & DO_WRITE) /* stdin for process */ - sprintf(&dup2_op[i++][0], "%d:%d", ofd[0], 0); + erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ofd[0], 0); } else { /* XXX will fail if ofd[0] == 4 (unlikely..) */ if (opts->read_write & DO_READ) - sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 4); + erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ifd[1], 4); if (opts->read_write & DO_WRITE) - sprintf(&dup2_op[i++][0], "%d:%d", ofd[0], 3); + erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ofd[0], 3); } for (; i < CS_ARGV_NO_OF_DUP2_OPS; i++) strcpy(&dup2_op[i][0], "-"); - sprintf(fd_close_range, "%d:%d", opts->use_stdio ? 3 : 5, max_files-1); + erts_snprintf(fd_close_range, ENOUGH_BYTES, "%d:%d", opts->use_stdio ? 3 : 5, max_files-1); cs_argv[CS_ARGV_PROGNAME_IX] = child_setup_prog; cs_argv[CS_ARGV_WD_IX] = opts->wd ? opts->wd : "."; @@ -1612,6 +1638,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op } erts_free(ERTS_ALC_T_TMP,cs_argv); } +#undef ENOUGH_BYTES #endif erts_sched_bind_atfork_parent(unbind); @@ -2355,10 +2382,10 @@ void erts_do_break_handling(void) ** no interpretatione of this should be done by the rest of the ** emulator. The buffer should be at least 21 bytes long. */ -void sys_get_pid(char *buffer){ +void sys_get_pid(char *buffer, size_t buffer_size){ pid_t p = getpid(); /* Assume the pid is scalar and can rest in an unsigned long... */ - sprintf(buffer,"%lu",(unsigned long) p); + erts_snprintf(buffer, buffer_size, "%lu",(unsigned long) p); } int @@ -2417,6 +2444,15 @@ erts_sys_getenv_raw(char *key, char *value, size_t *size) { return erts_sys_getenv(key, value, size); } +/* + * erts_sys_getenv + * returns: + * -1, if environment key is not set with a value + * 0, if environment key is set and value fits into buffer size + * 1, if environment key is set but does not fit into buffer size + * size is set with the needed buffer size value + */ + int erts_sys_getenv(char *key, char *value, size_t *size) { diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c index 8ec7b31ce0..3fcb4d88dc 100644 --- a/erts/emulator/sys/unix/sys_float.c +++ b/erts/emulator/sys/unix/sys_float.c @@ -745,18 +745,18 @@ void erts_sys_unblock_fpe(int unmasked) */ int -sys_double_to_chars(double fp, char *buf) +sys_double_to_chars(double fp, char *buffer, size_t buffer_size) { - char *s = buf; + char *s = buffer; - (void) sprintf(buf, "%.20e", fp); + (void) erts_snprintf(buffer, buffer_size, "%.20e", fp); /* Search upto decimal point */ if (*s == '+' || *s == '-') s++; while (ISDIGIT(*s)) s++; if (*s == ',') *s++ = '.'; /* Replace ',' with '.' */ /* Scan to end of string */ while (*s) s++; - return s-buf; /* i.e strlen(buf) */ + return s-buffer; /* i.e strlen(buffer) */ } /* Float conversion */ diff --git a/erts/emulator/sys/vxworks/driver_int.h b/erts/emulator/sys/vxworks/driver_int.h deleted file mode 100644 index f6bc71a799..0000000000 --- a/erts/emulator/sys/vxworks/driver_int.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 1997-2009. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ -/*---------------------------------------------------------------------- -** Purpose : System dependant driver declarations -**---------------------------------------------------------------------- */ - -#ifndef __DRIVER_INT_H__ -#define __DRIVER_INT_H__ - -#include <ioLib.h> - -typedef struct iovec SysIOVec; - -#endif diff --git a/erts/emulator/sys/vxworks/erl_main.c b/erts/emulator/sys/vxworks/erl_main.c deleted file mode 100644 index c9b44a635a..0000000000 --- a/erts/emulator/sys/vxworks/erl_main.c +++ /dev/null @@ -1,45 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2000-2009. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif -#include "sys.h" -#include "erl_vm.h" - -#if defined(__GNUC__) -/* - * The generated assembler does the usual trick (relative - * branch-and-link to next instruction) to get a copy of the - * instruction ptr. Instead of branching to an explicit zero offset, - * it branches to the symbol `__eabi' --- which is expected to be - * undefined and thus zero (if it is defined as non-zero, things will - * be interesting --- as in the Chinese curse). To shut up the VxWorks - * linker, we define `__eabi' as zero. - * - * This is just a work around. It's really Wind River's GCC's code - * generator that should be fixed. - */ -__asm__(".equ __eabi, 0"); -#endif - -void -erl_main(int argc, char **argv) -{ - erl_start(argc, argv); -} diff --git a/erts/emulator/sys/vxworks/erl_vxworks_sys.h b/erts/emulator/sys/vxworks/erl_vxworks_sys.h deleted file mode 100644 index 3d53238ea6..0000000000 --- a/erts/emulator/sys/vxworks/erl_vxworks_sys.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 1997-2011. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ -#ifndef __ERL_VXWORKS_SYS_H__ -#define __ERL_VXWORKS_SYS_H__ - -/* stdarg.h don't work without this one... */ -#include <vxWorks.h> - -#include <stdio.h> -#include <math.h> -#include <limits.h> -#include <stdlib.h> -#define index StringIndexFunctionThatIDontWantDeclared -#include <string.h> -#undef index - - - -#include <sys/times.h> -#include <time.h>/* xxxP */ - -#include <dirent.h> -#include <sys/stat.h> - -/* xxxP from unix_sys.h begin */ - -/* - * Make sure that MAXPATHLEN is defined. - */ - -#ifndef MAXPATHLEN -# ifdef PATH_MAX -# define MAXPATHLEN PATH_MAX -# else -# define MAXPATHLEN 2048 -# endif -#endif - -/* xxxP end */ - - -/* Unimplemented math functions */ -#define NO_ASINH -#define NO_ACOSH -#define NO_ATANH -#define NO_ERF -#define NO_ERFC - -/* Stuff that is useful for port programs, drivers, etc */ -#ifndef VXWORKS -#define VXWORKS -#endif - -#define DONT_USE_MAIN -#define NO_FSYNC -#define NO_MKDIR_MODE -#define NO_UMASK -#define NO_SYMBOLIC_LINKS -#define NO_DEVICE_FILES -#define NO_UID -#define NO_ACCESS -#define NO_FCNTL -#define NO_SYSLOG -#define NO_SYSCONF -#define NO_PWD /* XXX Means what? */ -#define NO_DAEMON -/* This chooses ~250 reductions instead of 500 in config.h */ -#if (CPU == CPU32) -#define SLOW_PROCESSOR -#endif - -/* - * Even though we does not always have small memories on VxWorks - * we certainly does not have virtual memory. - */ -#if !defined(LARGE_MEMORY) -#define SMALL_MEMORY -#endif - -/*************** Floating point exception handling ***************/ - -/* There are no known ways to customize the handling of invalid floating - point operations, such as matherr() or ieee_handler(), in VxWorks 5.1. */ - -#if (CPU == MC68040 || CPU == CPU32 || CPU == PPC860 || CPU == PPC32 || \ - CPU == PPC603 || CPU == PPC604 || CPU == SIMSPARCSOLARIS) - -/* VxWorks 5.1 on Motorola 68040 never generates SIGFPE, but sets the - result of invalid floating point ops to Inf and NaN - unfortunately - the way to test for those values is undocumented and hidden in a - "private" include file... */ -/* Haven't found any better way, as of yet, for ppc860 xxxP*/ - -#include <private/mathP.h> -#define NO_FPE_SIGNALS -#define erts_get_current_fp_exception() NULL -#define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0) -#define __ERTS_FP_ERROR(fpexnp, f, Action) if (isInf(f) || isNan(f)) { Action; } else {} -#define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action) -#define __ERTS_SAVE_FP_EXCEPTION(fpexnp) -#define __ERTS_RESTORE_FP_EXCEPTION(fpexnp) - -#define ERTS_FP_CHECK_INIT(p) __ERTS_FP_CHECK_INIT(&(p)->fp_exception) -#define ERTS_FP_ERROR(p, f, A) __ERTS_FP_ERROR(&(p)->fp_exception, f, A) -#define ERTS_SAVE_FP_EXCEPTION(p) __ERTS_SAVE_FP_EXCEPTION(&(p)->fp_exception) -#define ERTS_RESTORE_FP_EXCEPTION(p) __ERTS_RESTORE_FP_EXCEPTION(&(p)->fp_exception) -#define ERTS_FP_ERROR_THOROUGH(p, f, A) __ERTS_FP_ERROR_THOROUGH(&(p)->fp_exception, f, A) - -#define erts_sys_block_fpe() 0 -#define erts_sys_unblock_fpe(x) do{}while(0) - -#if (CPU == PPC603) -/* Need fppLib to change the Floating point registers - (fix_registers in sys.c)*/ - -#include <fppLib.h> - -#endif /* PPC603 */ - -#else - -Unsupported CPU value ! - -#endif - -typedef void *GETENV_STATE; - -#define HAVE_GETHRTIME - -extern int erts_clock_rate; - -#define SYS_CLK_TCK (erts_clock_rate) - -#define SYS_CLOCK_RESOLUTION 1 - -typedef struct _vxworks_tms { - clock_t tms_utime; - clock_t tms_stime; - clock_t tms_cutime; - clock_t tms_cstime; -} SysTimes; - -typedef long long SysHrTime; - -typedef time_t erts_time_t; -typedef struct timeval SysTimeval; - -extern int sys_init_hrtime(void); -extern SysHrTime sys_gethrtime(void); -extern void sys_gettimeofday(SysTimeval *tvp); -extern clock_t sys_times(SysTimes *t); - -#define SIZEOF_SHORT 2 -#define SIZEOF_INT 4 -#define SIZEOF_LONG 4 -#define SIZEOF_VOID_P 4 -#define SIZEOF_SIZE_T 4 -#define SIZEOF_OFF_T 4 - -/* - * Temporary buffer *only* used in sys code. - */ -#define SYS_TMP_BUF_SIZE 65536 - -/* Need to be able to interrupt erts_poll_wait() from signal handler */ -#define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT - -#endif /* __ERL_VXWORKS_SYS_H__ */ diff --git a/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c b/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c deleted file mode 100644 index c56c633b2f..0000000000 --- a/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c +++ /dev/null @@ -1,253 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2006-2009. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ - -/* - * Interface functions to the dynamic linker using dl* functions. - * (As far as I know it works on SunOS 4, 5, Linux and FreeBSD. /Seb) - */ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif -#include <vxWorks.h> -#include <stdio.h> -#include <string.h> -#include <stdarg.h> -#include <a_out.h> -#include <symLib.h> -#include <loadLib.h> -#include <unldLib.h> -#include <moduleLib.h> -#include <sysSymTbl.h> -#include "sys.h" -#include "global.h" -#include "erl_alloc.h" -#include "erl_driver.h" - -#define EXT_LEN 4 -#define FILE_EXT ".eld" -#define ALT_FILE_EXT ".o" -/* ALT_FILE_EXT must not be longer than FILE_EXT */ -#define DRIVER_INIT_SUFFIX "_init" - -static MODULE_ID get_mid(char *); -static FUNCPTR lookup(char *); - -typedef enum { - NoError, - ModuleNotFound, - ModuleNotUnloadable, - UnknownError -} FakeSytemError; - -static char *errcode_tab[] = { - "No error", - "Module/file not found", - "Module cannot be unloaded", - "Unknown error" -}; - -void erl_sys_ddll_init(void) { - return; -} -/* - * Open a shared object - */ -int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err) -{ - int len; - - if (erts_sys_ddll_open_noext(full_name, handle, err) == ERL_DE_NO_ERROR) { - return ERL_DE_NO_ERROR; - } - if ((len = sys_strlen(full_name)) > PATH_MAX-EXT_LEN) { - return ERL_DE_LOAD_ERROR_NAME_TO_LONG; - } else { - static char dlname[PATH_MAX + 1]; - - sys_strcpy(dlname, full_name); - sys_strcpy(dlname+len, FILE_EXT); - if (erts_sys_ddll_open_noext(dlname, handle, err) == ERL_DE_NO_ERROR) { - return ERL_DE_NO_ERROR; - } - sys_strcpy(dlname+len, ALT_FILE_EXT); - return erts_sys_ddll_open_noext(dlname, handle, err); - } -} -int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err) -{ - MODULE_ID mid; - - if((mid = get_mid(dlname)) == NULL) { - return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotFound); - } - *handle = (void *) mid; - return ERL_DE_NO_ERROR; -} - -/* - * Find a symbol in the shared object - */ -#define PREALLOC_BUFFER_SIZE 256 -int erts_sys_ddll_sym2(void *handle, char *func_name, void **function, ErtsSysDdllError* err) -{ - FUNCPTR proc; - static char statbuf[PREALLOC_BUFFER_SIZE]; - char *buf = statbuf; - int need; - - if ((proc = lookup(func_name)) == NULL) { - if ((need = strlen(func_name)+2) > PREALLOC_BUFFER_SIZE) { - buf = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF,need); - } - buf[0] = '_'; - sys_strcpy(buf+1,func_name); - proc = lookup(buf); - if (buf != statbuf) { - erts_free(ERTS_ALC_T_DDLL_TMP_BUF, buf); - } - if (proc == NULL) { - return ERL_DE_LOOKUP_ERROR_NOT_FOUND; - } - } - *function = (void *) proc; - return ERL_DE_NO_ERROR; -} - -/* XXX:PaN These two will be changed with new driver interface! */ - -/* - * Load the driver init function, might appear under different names depending on object arch... - */ - -int erts_sys_ddll_load_driver_init(void *handle, void **function) -{ - MODULE_ID mid = (MODULE_ID) handle; - char *modname; - char *cp; - static char statbuf[PREALLOC_BUFFER_SIZE]; - char *fname = statbuf; - int len; - int res; - void *func; - int need; - - if((modname = moduleNameGet(mid)) == NULL) { - return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotFound); - } - - if((cp = strrchr(modname, '.')) == NULL) { - len = strlen(modname); - } else { - len = cp - modname; - } - - need = len + strlen(DRIVER_INIT_SUFFIX) + 1; - if (need > PREALLOC_BUFFER_SIZE) { - fname = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, need); /* erts_alloc exits on failure */ - } - sys_strncpy(fname, modname, len); - fname[len] = '\0'; - sys_strcat(fname, DRIVER_INIT_SUFFIX); - res = erts_sys_ddll_sym(handle, fname, &func); - if (fname != statbuf) { - erts_free(ERTS_ALC_T_DDLL_TMP_BUF, fname); - } - if ( res != ERL_DE_NO_ERROR) { - return res; - } - *function = func; - return ERL_DE_NO_ERROR; -} - -int erts_sys_ddll_load_nif_init(void *handle, void **function, ErtsSysDdllError* err) -{ - /* NIFs not implemented for vxworks */ - return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY; -} - -/* - * Call the driver_init function, whatever it's really called, simple on unix... -*/ -void *erts_sys_ddll_call_init(void *function) { - void *(*initfn)(void) = function; - return (*initfn)(); -} -void *erts_sys_ddll_call_nif_init(void *function) { - return erts_sys_ddll_call_init(function); -} - - -/* - * Close a chared object - */ -int erts_sys_ddll_close2(void *handle, ErtsSysDdllError* err) -{ - MODULE_ID mid = (MODULE_ID) handle; - if (unld(mid, 0) < 0) { - return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotUnloadable); - } - return ERL_DE_NO_ERROR; -} - -/* - * Return string that describes the (current) error - */ -char *erts_sys_ddll_error(int code) -{ - int actual_code; - if (code > ERL_DE_DYNAMIC_ERROR_OFFSET) { - return "Unspecified error"; - } - actual_code = -1*(code - ERL_DE_DYNAMIC_ERROR_OFFSET); - if (actual_code > ((int) UnknownError)) { - actual_code = UnknownError; - } - return errcode_tab[actual_code]; -} - -static FUNCPTR lookup(char *sym) -{ - FUNCPTR entry; - SYM_TYPE type; - - if (symFindByNameAndType(sysSymTbl, sym, (char **)&entry, - &type, N_EXT | N_TEXT, N_EXT | N_TEXT) != OK) { - return NULL ; - } - return entry; -} - -static MODULE_ID get_mid(char* name) -{ - int fd; - MODULE_ID mid = NULL; - - if((fd = open(name, O_RDONLY, 0664)) >= 0) { - mid = loadModule(fd, GLOBAL_SYMBOLS); - close(fd); - } - return mid; -} - -void erts_sys_ddll_free_error(ErtsSysDdllError* err) -{ - /* NYI */ -} - diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c deleted file mode 100644 index 739b026fb1..0000000000 --- a/erts/emulator/sys/vxworks/sys.c +++ /dev/null @@ -1,2610 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 1997-2011. All Rights Reserved. - * - * The contents of this file are subject to the Erlang Public License, - * Version 1.1, (the "License"); you may not use this file except in - * compliance with the License. You should have received a copy of the - * Erlang Public License along with this software. If not, it can be - * retrieved online at http://www.erlang.org/. - * - * Software distributed under the License is distributed on an "AS IS" - * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See - * the License for the specific language governing rights and limitations - * under the License. - * - * %CopyrightEnd% - */ -/* - * system-dependent functions - * - */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif -#include <vxWorks.h> -#include <version.h> -#include <string.h> -#include <types.h> -#include <sigLib.h> -#include <ioLib.h> -#include <iosLib.h> -#include <envLib.h> -#include <fioLib.h> -#include <stdlib.h> -#include <stdio.h> -#include <errno.h> -#include <symLib.h> -#include <sysLib.h> -#include <sysSymTbl.h> -#include <loadLib.h> -#include <taskLib.h> -#include <taskVarLib.h> -#include <taskHookLib.h> -#include <tickLib.h> -#include <time.h> -#include <rngLib.h> -#include <semLib.h> -#include <selectLib.h> -#include <sockLib.h> -#include <a_out.h> -#include <wdLib.h> -#include <timers.h> -#include <ctype.h> -#include <sys/stat.h> -#include <sys/socket.h> -#include <netinet/in.h> -#include <netinet/tcp.h> -#include <stdarg.h> - - -#ifndef WANT_NONBLOCKING -#define WANT_NONBLOCKING -#endif - -#include "sys.h" -#include "erl_alloc.h" - -/* don't need global.h, but bif_table.h (included by bif.h) won't compile otherwise */ -#include "global.h" -#include "bif.h" - -#include "erl_sys_driver.h" - -#include "elib_stat.h" - -#include "reclaim_private.h" /* Some more or less private reclaim facilities */ - -#ifndef RETSIGTYPE -#define RETSIGTYPE void -#endif - -EXTERN_FUNCTION(void, erl_start, (int, char**)); -EXTERN_FUNCTION(void, erl_exit, (int n, char*, _DOTS_)); -EXTERN_FUNCTION(void, erl_error, (char*, va_list)); -EXTERN_FUNCTION(int, driver_interrupt, (int, int)); -EXTERN_FUNCTION(void, increment_time, (int)); -EXTERN_FUNCTION(int, erts_next_time, (_VOID_)); -EXTERN_FUNCTION(void, set_reclaim_free_function, (FreeFunction)); -EXTERN_FUNCTION(int, erl_mem_info_get, (MEM_PART_STATS *)); -EXTERN_FUNCTION(void, erl_crash_dump, (char* file, int line, char* fmt, ...)); - -#define ISREG(st) (((st).st_mode&S_IFMT) == S_IFREG) - -/* these are defined in usrLib.c */ -extern int spTaskPriority, spTaskOptions; - -/* forward declarations */ -static FUNCTION(FUNCPTR, lookup, (char*)); -static FUNCTION(int, read_fill, (int, char*, int)); -#if (CPU == SPARC) -static FUNCTION(RETSIGTYPE, fpe_sig_handler, (int)); /*where is this fun? */ -#elif (CPU == PPC603) -static FUNCTION(void, fix_registers, (void)); -#endif -static FUNCTION(void, close_pipes, (int*, int*, int)); -static FUNCTION(void, delete_hook, (void)); -static FUNCTION(void, initialize_allocation, (void)); - -FUNCTION(STATUS, uxPipeDrv, (void)); -FUNCTION(STATUS, pipe, (int*)); -FUNCTION(void, uxPipeShow, (int)); - -void erl_main(int argc, char **argv); -void argcall(char *args); - -/* Malloc-realted functions called from the VxWorks shell */ -EXTERN_FUNCTION(int, erl_set_memory_block, - (int, int, int, int, int, int, int, int, int, int)); -EXTERN_FUNCTION(int, erl_memory_show, - (int, int, int, int, int, int, int, int, int, int)); - -#define DEFAULT_PORT_STACK_SIZE 100000 -static int port_stack_size; - -static int erlang_id = 0; /* Inited at loading, set/reset at each run */ - -/* interval time reported to emulator */ -static int sys_itime; - -/* XXX - This is defined in .../config/all/configAll.h (NUM_FILES), - and not easily accessible at compile or run time - however, - in VxWorks 5.1 it is stored in the (undocumented?) maxFiles variable; - probably shouldn't depend on it, but we try to pick it up... */ -static int max_files = 50; /* default configAll.h */ - -int erts_vxworks_max_files; - -/* - * used by the break handler (set by signal handler on ctl-c) - */ -volatile int erts_break_requested; - -/********************* General functions ****************************/ - -/* - * Reset the terminal to the original settings on exit - * (nothing to do for WxWorks). - */ -void sys_tty_reset(int exit_code) -{ -} - -Uint -erts_sys_misc_mem_sz(void) -{ - Uint res = erts_check_io_size(); - /* res += FIXME */ - return res; -} - -/* - * XXX This declaration should not be here. - */ -void erl_sys_schedule_loop(void); - -#ifdef SOFTDEBUG -static void do_trace(int line, char *file, char *format, ...) -{ - va_list va; - int tid = taskIdSelf(); - char buff[512]; - - va_start(va, format); - sprintf(buff,"Trace: Task: 0x%08x, %s:%d - ", - tid, file, line); - vsprintf(buff + strlen(buff), format, va); - va_end(va); - strcat(buff,"\r\n"); - write(2,buff,strlen(buff)); -} - -#define TRACE() do_trace(__LINE__, __FILE__,"") -#define TRACEF(Args...) do_trace(__LINE__,__FILE__, ## Args) -#endif - -void -erts_sys_pre_init(void) -{ - if (erlang_id != 0) { - /* NOTE: This particular case must *not* call erl_exit() */ - erts_fprintf(stderr, "Sorry, erlang is already running (as task %d)\n", - erlang_id); - exit(1); - } - - /* This must be done as early as possible... */ - if(!reclaim_init()) - fprintf(stderr, "Warning : reclaim facility should be initiated before " - "erlang is started!\n"); - erts_vxworks_max_files = max_files = reclaim_max_files(); - - /* Floating point exceptions */ -#if (CPU == SPARC) - sys_sigset(SIGFPE, fpe_sig_handler); -#elif (CPU == PPC603) - fix_registers(); -#endif - - /* register the private delete hook in reclaim */ - save_delete_hook((FUNCPTR)delete_hook, (caddr_t)0); - erlang_id = taskIdSelf(); -#ifdef DEBUG - printf("emulator task id = 0x%x\n", erlang_id); -#endif -} - -void erts_sys_alloc_init(void) -{ - initialize_allocation(); -} - -void -erl_sys_init(void) -{ - setvbuf(stdout, (char *)NULL, _IOLBF, BUFSIZ); - /* XXX Bug in VxWorks stdio loses fputch()'ed output after the - setvbuf() but before a *printf(), and possibly worse (malloc - errors, crash?) - so let's give it a *printf().... */ - fprintf(stdout, "%s",""); -} - -void -erl_sys_args(int* argc, char** argv) -{ - erts_init_check_io(); - max_files = erts_check_io_max_files(); - ASSERT(max_files <= erts_vxworks_max_files); -} - -void -erts_sys_schedule_interrupt(int set) -{ - erts_check_io_interrupt(set); -} - -/* - * Called from schedule() when it runs out of runnable processes, - * or when Erlang code has performed INPUT_REDUCTIONS reduction - * steps. runnable == 0 iff there are no runnable Erlang processes. - */ -void -erl_sys_schedule(int runnable) -{ - erts_check_io(!runnable); -} - -void erts_do_break_handling(void) -{ - SET_BLOCKING(0); - /* call the break handling function, reset the flag */ - do_break(); - erts_break_requested = 0; - SET_NONBLOCKING(0); -} - -/* signal handling */ -RETSIGTYPE (*sys_sigset(sig, func))() - int sig; - RETSIGTYPE (*func)(); -{ - struct sigaction act, oact; - - sigemptyset(&act.sa_mask); - act.sa_flags = 0; - act.sa_handler = func; - sigaction(sig, &act, &oact); - return(oact.sa_handler); -} - -void sys_sigblock(int sig) -{ - sigset_t mask; - - sigemptyset(&mask); - sigaddset(&mask, sig); - sigprocmask(SIG_BLOCK, &mask, (sigset_t *)NULL); -} - -void sys_sigrelease(int sig) -{ - sigset_t mask; - - sigemptyset(&mask); - sigaddset(&mask, sig); - sigprocmask(SIG_UNBLOCK, &mask, (sigset_t *)NULL); -} - -void -erts_sys_prepare_crash_dump(void) -{ - -} - -/* register signal handlers XXX - they don't work, need to find out why... */ -/* set up signal handlers for break and quit */ -static void request_break(void) -{ - /* just set a flag - checked for and handled - * in main thread (not signal handler). - * see check_io() - */ -#ifdef DEBUG - fprintf(stderr,"break!\n"); -#endif - erts_break_requested = 1; - erts_check_io_async_sig_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */ -} - -static void do_quit(void) -{ - halt_0(0); -} - -void erts_set_ignore_break(void) { -} - -void init_break_handler(void) -{ - sys_sigset(SIGINT, request_break); - sys_sigset(SIGQUIT, do_quit); -} - -void erts_replace_intr(void) { -} - -int sys_max_files(void) -{ - return(max_files); -} - -/******************* Routines for time measurement *********************/ - -int sys_init_time(void) -{ - erts_clock_rate = sysClkRateGet(); - /* - ** One could imagine that it would be better returning - ** a resolution more near the clock rate, like in: - ** return 1000 / erts_clock_rate; - ** but tests show that such isn't the case (rounding errors?) - ** Well, we go for the Unix variant of returning 1 - ** as a constant virtual clock rate. - */ - return SYS_CLOCK_RESOLUTION; -} - -int erts_clock_rate; -static volatile int ticks_inuse; -static volatile unsigned long ticks_collected; /* will wrap */ -static WDOG_ID watchdog_id; -static ULONG user_time; -static int this_task_id, sys_itime; -static SysHrTime hrtime_wrap; -static unsigned long last_tick_count; - -static void tolerant_time_clockint(int count) -{ - if (watchdog_id != NULL) { - if (taskIsReady(this_task_id)) - user_time += 1; - ++count; - if (!ticks_inuse) { - ticks_collected += count; - count = 0; - } - wdStart(watchdog_id, 1, (FUNCPTR)tolerant_time_clockint, count); - } -} - -int sys_init_hrtime(void) -{ - this_task_id = taskIdSelf(); /* OK, this only works for one single task - in the system... */ - user_time = 0; - - ticks_inuse = 0; - ticks_collected = 0; - hrtime_wrap = 0; - last_tick_count = 0; - - sys_itime = 1000 / erts_clock_rate; - watchdog_id = wdCreate(); - wdStart(watchdog_id, 1, (FUNCPTR) tolerant_time_clockint, 0); - return 0; -} - -SysHrTime sys_gethrtime(void) -{ - SysHrTime ticks; - - ++ticks_inuse; - ticks = (SysHrTime) (ticks_collected & 0x7FFFFFFF); - ticks_inuse = 0; - if (ticks < (SysHrTime) last_tick_count) { - hrtime_wrap += 1UL << 31; - } - last_tick_count = ticks; - return (ticks + hrtime_wrap) * ((SysHrTime) (1000000000UL / - erts_clock_rate)); -} - -void sys_gettimeofday(SysTimeval *tvp) -{ - struct timespec now; - - clock_gettime(CLOCK_REALTIME, &now); - tvp->tv_sec = now.tv_sec; - tvp->tv_usec = now.tv_nsec / 1000; -} - -clock_t sys_times(SysTimes *t) -{ - t->tms_stime = t->tms_cutime = t->tms_cstime = 0; - ++ticks_inuse; - t->tms_utime = user_time; - ticks_inuse = 0; - return tickGet(); /* The best we can do... */ -} - -/* This is called when *this task* is deleted */ -static void delete_hook(void) -{ - if (watchdog_id != NULL) { - wdDelete(watchdog_id); - watchdog_id = NULL; - } - erlang_id = 0; - this_task_id = 0; -} - -/************************** OS info *******************************/ - -/* Used by erlang:info/1. */ -/* (This code was formerly in drv.XXX/XXX_os_drv.c) */ - -#define MAX_VER_STR 9 /* Number of characters to - consider in version string */ - -static FUNCTION(int, get_number, (char** str_ptr)); - -char os_type[] = "vxworks"; - -static int -get_number(char **str_ptr) -{ - char* s = *str_ptr; /* Pointer to beginning of string. */ - char* dot; /* Pointer to dot in string or NULL. */ - - if (!isdigit(*s)) - return 0; - if ((dot = strchr(s, '.')) == NULL) { - *str_ptr = s+strlen(s); - return atoi(s); - } else { - *dot = '\0'; - *str_ptr = dot+1; - return atoi(s); - } -} - -/* namebuf; Where to return the name. */ -/* size; Size of name buffer. */ -void -os_flavor(char *namebuf, unsigned size) -{ - strcpy(namebuf, "-"); -} - -/* int* pMajor; Pointer to major version. */ -/* int* pMinor; Pointer to minor version. */ -/* int* pBuild; Pointer to build number. */ -void -os_version(int *pMajor, int *pMinor, int *pBuild) -{ - char os_ver[MAX_VER_STR+2]; - char* release; /* Pointer to the release string: - * X.Y or X.Y.Z. - */ - strncpy(os_ver, vxWorksVersion, MAX_VER_STR); - release = os_ver; - *pMajor = get_number(&release); - *pMinor = get_number(&release); - *pBuild = get_number(&release); -} - -void init_getenv_state(GETENV_STATE *state) -{ - *state = NULL; -} - -char *getenv_string(GETENV_STATE *state0) -{ - return NULL; -} - -void fini_getenv_state(GETENV_STATE *state) -{ - *state = NULL; -} - -/************************** Port I/O *******************************/ - - -/* I. Common stuff */ - -#define TMP_BUF_MAX (tmp_buf_size - 1024) -static byte *tmp_buf; -static Uint tmp_buf_size; - -/* II. The spawn/fd/vanilla drivers */ - -/* This data is shared by these drivers - initialized by spawn_init() */ -static struct driver_data { - int port_num, ofd, packet_bytes, report_exit; - int exitcode, exit_reported; /* For returning of exit codes. */ -} *driver_data; /* indexed by fd */ - -/* - * Locking only for exitcodes and exit_reported, one global sem for all - * spawn ports as this is rare. - */ -static SEM_ID driver_data_sem = NULL; -/* - * Also locking when looking up entries in the load table - */ -static SEM_ID entry_data_sem = NULL; - -/* We maintain a linked fifo queue of these structs in order */ -/* to manage unfinnished reads/and writes on differenet fd's */ - -typedef struct pend { - char *cpos; - int fd; - int remain; - struct pend *next; - char buf[1]; /* this is a trick to be able to malloc one chunk */ -} Pend; - -static struct fd_data { - int inport, outport; - char *buf, *cpos; - int sz, remain; /* for input on fd */ - Pend* pending; /* pending outputs */ - -} *fd_data; /* indexed by fd */ - - -/* Driver interfaces */ -static ErlDrvData spawn_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts); -static ErlDrvData fd_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts); -static ErlDrvData vanilla_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts); -static int spawn_init(void); -static void fd_stop(ErlDrvData); -static void stop(ErlDrvData); -static void ready_input(ErlDrvData fd, ErlDrvEvent ready_fd); -static void ready_output(ErlDrvData fd, ErlDrvEvent ready_fd); -static void output(ErlDrvData fd, char *buf, ErlDrvSizeT len); -static void stop_select(ErlDrvEvent, void*); - -struct erl_drv_entry spawn_driver_entry = { - spawn_init, - spawn_start, - stop, - output, - ready_input, - ready_output, - "spawn", - NULL, /* finish */ - NULL, /* handle */ - NULL, /* control */ - NULL, /* timeout */ - NULL, /* outputv */ - NULL, /* ready_async */ - NULL, /* flush */ - NULL, /* call */ - NULL, /* event */ - ERL_DRV_EXTENDED_MARKER, - ERL_DRV_EXTENDED_MAJOR_VERSION, - ERL_DRV_EXTENDED_MINOR_VERSION, - 0, /* ERL_DRV_FLAGs */ - NULL, /* handle2 */ - NULL, /* process_exit */ - stop_select - -}; -struct erl_drv_entry fd_driver_entry = { - NULL, - fd_start, - fd_stop, - output, - ready_input, - ready_output, - "fd", - NULL, /* finish */ - NULL, /* handle */ - NULL, /* control */ - NULL, /* timeout */ - NULL, /* outputv */ - NULL, /* ready_async */ - NULL, /* flush */ - NULL, /* call */ - NULL, /* event */ - ERL_DRV_EXTENDED_MARKER, - ERL_DRV_EXTENDED_MAJOR_VERSION, - ERL_DRV_EXTENDED_MINOR_VERSION, - 0, /* ERL_DRV_FLAGs */ - NULL, /* handle2 */ - NULL, /* process_exit */ - stop_select -}; -struct erl_drv_entry vanilla_driver_entry = { - NULL, - vanilla_start, - stop, - output, - ready_input, - ready_output, - "vanilla", - NULL, /* finish */ - NULL, /* handle */ - NULL, /* control */ - NULL, /* timeout */ - NULL, /* outputv */ - NULL, /* ready_async */ - NULL, /* flush */ - NULL, /* call */ - NULL, /* event */ - ERL_DRV_EXTENDED_MARKER, - ERL_DRV_EXTENDED_MAJOR_VERSION, - ERL_DRV_EXTENDED_MINOR_VERSION, - 0, /* ERL_DRV_FLAGs */ - NULL, /* handle2 */ - NULL, /* process_exit */ - stop_select -}; - -/* -** Set up enough of the driver_data structure to be able to report exit status. -** Some things may be initiated again, but that is no real problem. -*/ -static int pre_set_driver_data(int ifd, int ofd, - int read_write, int report_exit) { - if (read_write & DO_READ) { - driver_data[ifd].report_exit = report_exit; - driver_data[ifd].exitcode = 0; - driver_data[ifd].exit_reported = 0; - if (read_write & DO_WRITE) { - driver_data[ifd].ofd = ofd; - if (ifd != ofd) { - driver_data[ofd] = driver_data[ifd]; - driver_data[ofd].report_exit = 0; - } - } else { /* DO_READ only */ - driver_data[ifd].ofd = -1; - } - return(ifd); - } else { /* DO_WRITE only */ - driver_data[ofd].report_exit = 0; - driver_data[ofd].exitcode = 0; - driver_data[ofd].exit_reported = 0; - driver_data[ofd].ofd = ofd; - return(ofd); - } -} - -/* -** Set up the driver_data structure, it may have been initiated -** partly by the function above, but we dont care. -*/ -static int set_driver_data(int port_num, int ifd, int ofd, - int packet_bytes, int read_write, - int report_exit) -{ - if (read_write & DO_READ) { - driver_data[ifd].packet_bytes = packet_bytes; - driver_data[ifd].port_num = port_num; - driver_data[ifd].report_exit = report_exit; - if (read_write & DO_WRITE) { - driver_data[ifd].ofd = ofd; - if (ifd != ofd) { - driver_data[ofd] = driver_data[ifd]; - driver_data[ofd].report_exit = 0; - } - } else { /* DO_READ only */ - driver_data[ifd].ofd = -1; - } - (void) driver_select(port_num, ifd, ERL_DRV_READ|ERL_DRV_USE, 1); - return(ifd); - } else { /* DO_WRITE only */ - driver_data[ofd].packet_bytes = packet_bytes; - driver_data[ofd].port_num = port_num; - driver_data[ofd].report_exit = 0; - driver_data[ofd].ofd = ofd; - return(ofd); - } -} - -static int need_new_sems = 1; - -static int spawn_init(void) -{ - char *stackenv; - int size; - driver_data = (struct driver_data *) - erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data)); - if (need_new_sems) { - driver_data_sem = semMCreate - (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE); - entry_data_sem = semMCreate - (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE); - } - if (driver_data_sem == NULL || entry_data_sem == NULL) { - erl_exit(1,"Could not allocate driver locking semaphore."); - } - need_new_sems = 0; - - (void)uxPipeDrv(); /* Install pipe driver */ - - if ((stackenv = getenv("ERLPORTSTACKSIZE")) != NULL && - (size = atoi(stackenv)) > 0) - port_stack_size = size; - else - port_stack_size = DEFAULT_PORT_STACK_SIZE; - return 0; -} - -/* Argv has to be built vith the save_xxx routines, not with whathever - sys_xxx2 has in mind... */ -#define argv_alloc save_malloc -#define argv_realloc save_realloc -#define argv_free save_free -/* Build argv, return argc or -1 on failure */ -static int build_argv(char *name, char ***argvp) -{ - int argvsize = 10, argc = 0; - char *args, *arglast = NULL, *argp; - char **argv; - -#ifdef DEBUG - fdprintf(2, "Building argv, %s =>\n", name); -#endif - if ((argv = (char **)argv_alloc(argvsize * sizeof(char *))) == NULL) - return(-1); - if ((args = argv_alloc(strlen(name) + 1)) == NULL) - return(-1); - strcpy(args, name); - argp = strtok_r(args, " \t", &arglast); - while (argp != NULL) { - if (argc + 1 >= argvsize) { - argvsize += 10; - argv = (char **)argv_realloc((char *)argv, argvsize*sizeof(char *)); - if (argv == NULL) { - argv_free(args); - return(-1); - } - } -#ifdef DEBUG - fdprintf(2, "%s\n", argp); -#endif - argv[argc++] = argp; - argp = strtok_r((char *)NULL, " \t", &arglast); - } - argv[argc] = NULL; - *argvp = argv; - return(argc); -} -#undef argv_alloc -#undef argv_realloc -#undef argv_free - - -/* Lookup and return global text symbol or NULL on failure - Symbol name is null-terminated and without the leading '_' */ -static FUNCPTR -lookup(char *sym) -{ - char buf[256]; - char *symname = buf; - int len; - FUNCPTR entry; - SYM_TYPE type; - - len = strlen(sym); - if (len > 254 && (symname = malloc(len+2)) == NULL) - return(NULL); -#if defined _ARCH_PPC || defined SIMSPARCSOLARIS - /* GCC for PPC and SIMSPARC doesn't add a leading _ to symbols */ - strcpy(symname, sym); -#else - sprintf(symname, "_%s", sym); -#endif - if (symFindByNameAndType(sysSymTbl, symname, (char **)&entry, - &type, N_EXT | N_TEXT, N_EXT | N_TEXT) != OK) - entry = NULL; - if (symname != buf) - free(symname); - return(entry); -} - -/* This function is spawned to build argc, argv, lookup the symbol to call, - connect and set up file descriptors, and make the actual call. - N.B. 'name' was allocated by the Erlang task (through plain_malloc) and - is freed by this port program task. - Note: 'name' may be a path containing '/'. */ - -static void call_proc(char *name, int ifd, int ofd, int read_write, - int redir_stderr, int driver_index, - int p6, int p7, int p8, int p9) -{ - int argc; - char **argv, *bname; - FUNCPTR entry; - int ret = -1; - - /* Must consume 'name' */ - argc = build_argv(name, &argv); - plain_free(name); - /* Find basename of path */ - if ((bname = strrchr(argv[0], '/')) != NULL) { - bname++; - } else { - bname = argv[0]; - } -#ifdef DEBUG - fdprintf(2, "Port program name: %s\n", bname); -#endif - semTake(entry_data_sem, WAIT_FOREVER); - - if (argc > 0) { - if ((entry = lookup(bname)) == NULL) { - int fd; - char *fn; - /* NOTE: We don't check the return value of loadModule, - since that was incompatibly changed from 5.0.2b to 5.1, - but rather do a repeated lookup(). */ - if ((fd = open(argv[0], O_RDONLY)) > 0) { - (void) loadModule(fd, GLOBAL_SYMBOLS); - close(fd); - entry = lookup(bname); - } - if (entry == NULL) { - /* filename == func failed, try func.o */ - if ((fn = malloc(strlen(argv[0]) + 3)) != NULL) { /* ".o\0" */ - strcpy(fn, argv[0]); - strcat(fn, ".o"); - if ((fd = open(fn, O_RDONLY)) > 0) { - (void) loadModule(fd, GLOBAL_SYMBOLS); - close(fd); - entry = lookup(bname); - } - free(fn); - } - } - } - } else { - entry = NULL; - } - semGive(entry_data_sem); - - if (read_write & DO_READ) { /* emulator read */ - save_fd(ofd); - ioTaskStdSet(0, 1, ofd); /* stdout for process */ - if(redir_stderr) - ioTaskStdSet(0, 2, ofd);/* stderr for process */ - } - if (read_write & DO_WRITE) { /* emulator write */ - save_fd(ifd); - ioTaskStdSet(0, 0, ifd); /* stdin for process */ - } - if (entry != NULL) { - ret = (*entry)(argc, argv, (char **)NULL); /* NULL for envp */ - } else { - fdprintf(2, "Could not exec \"%s\"\n", argv[0]); - ret = -1; - } - if (driver_data[driver_index].report_exit) { - semTake(driver_data_sem, WAIT_FOREVER); - driver_data[driver_index].exitcode = ret; - driver_data[driver_index].exit_reported = 1; - semGive(driver_data_sem); - } - /* We *don't* want to close the pipes here, but let the delete - hook take care of it - it might want to flush stdout and there'd - better be an open descriptor to flush to... */ - exit(ret); -} - -static void close_pipes(int ifd[2], int ofd[2], int read_write) -{ - if (read_write & DO_READ) { - (void) close(ifd[0]); - (void) close(ifd[1]); - } - if (read_write & DO_WRITE) { - (void) close(ofd[0]); - (void) close(ofd[1]); - } -} - -static void init_fd_data(int fd, int port_unused_argument) -{ - SET_NONBLOCKING(fd); - fd_data[fd].pending = NULL; - fd_data[fd].buf = fd_data[fd].cpos = NULL; - fd_data[fd].remain = fd_data[fd].sz = 0; -} - -static ErlDrvData spawn_start(ErlDrvPort port_num, char *name,SysDriverOpts* opts) -{ - int ifd[2], ofd[2], len, nl, id; - char taskname[11], *progname, *bname; - char *space_in_command; - int packet_bytes = opts->packet_bytes; - int read_write = opts->read_write; - int use_stdio = opts->use_stdio; - int redir_stderr = opts->redir_stderr; - int driver_index; - - if (!use_stdio){ - return (ErlDrvData) -3; - } - - /* Create pipes and set the Erlang task as owner of its - * read and write ends (through save_fd()). - */ - switch (read_write) { - case DO_READ: - if (pipe(ifd) < 0){ - return (ErlDrvData) -2; - } - if (ifd[0] >= max_files) { - close_pipes(ifd, ofd, read_write); - errno = ENFILE; - return (ErlDrvData) -2; - } - save_fd(ifd[0]); - break; - case DO_WRITE: - if (pipe(ofd) < 0) { - return (ErlDrvData) -2; - } - if (ofd[1] >= max_files) { - close_pipes(ifd, ofd, read_write); - errno = ENFILE; - return (ErlDrvData) -2; - } - save_fd(ofd[1]); - break; - case DO_READ|DO_WRITE: - if (pipe(ifd) < 0){ - return (ErlDrvData) -2; - } - if (ifd[0] >= max_files || pipe(ofd) < 0) { - close_pipes(ifd, ofd, DO_READ); - errno = ENFILE; - return (ErlDrvData) -2; - } - if (ofd[1] >= max_files) { - close_pipes(ifd, ofd, read_write); - errno = ENFILE; - return (ErlDrvData) -2; - } - save_fd(ifd[0]); - save_fd(ofd[1]); - break; - default: - return (ErlDrvData) -1; - } - - /* Allocate space for program name to be freed by the - * spawned task. We use plain_malloc so that the allocated - * space is not owned by the Erlang task. - */ - - if ((progname = plain_malloc(strlen(name) + 1)) == NULL) { - close_pipes(ifd, ofd, read_write); - errno = ENOMEM; - return (ErlDrvData) -2; - } - strcpy(progname, name); - - /* Check if name contains a space - * (e.g "port_test -o/home/gandalf/tornado/wind/target/erlang") - */ - if ((space_in_command = strrchr(progname, ' ')) != NULL) { - *space_in_command = '\0'; - } - - /* resulting in "port_test" */ - if ((bname = strrchr(progname, '/')) != NULL) - bname++; - else - bname = progname; - - /* resulting in "port_test" */ - len = strlen(bname); - nl = len > 10 ? 10 : len; - strncpy(taskname, bname, nl); - taskname[nl] = '\0'; - if (space_in_command != NULL) - *space_in_command = ' '; - driver_index = pre_set_driver_data(ifd[0], ofd[1], - read_write, opts->exit_status); - - /* resetting to "port_test -o/home/gandalf/tornado/wind/target/erlang" */ - if ((id = taskSpawn(taskname, spTaskPriority, spTaskOptions, - port_stack_size, (FUNCPTR)call_proc, (int)progname, - ofd[0], ifd[1], read_write, redir_stderr, driver_index, - 0,0,0,0)) - == ERROR) { - close_pipes(ifd, ofd, read_write); - plain_free(progname); /* only when spawn fails */ - errno = ENOMEM; - return (ErlDrvData) -2; - } -#ifdef DEBUG - fdprintf(2, "Spawned %s as %s[0x%x]\n", name, taskname, id); -#endif - if (read_write & DO_READ) - init_fd_data(ifd[0], port_num); - if (read_write & DO_WRITE) - init_fd_data(ofd[1], port_num); - return (ErlDrvData) (set_driver_data(port_num, ifd[0], ofd[1], - packet_bytes,read_write, - opts->exit_status)); -} - -static ErlDrvData fd_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts) -{ - if (((opts->read_write & DO_READ) && opts->ifd >= max_files) || - ((opts->read_write & DO_WRITE) && opts->ofd >= max_files)) { - return (ErlDrvData) -1; - } - - if (opts->read_write & DO_READ) - init_fd_data(opts->ifd, port_num); - if (opts->read_write & DO_WRITE) - init_fd_data(opts->ofd, port_num); - return (ErlDrvData) (set_driver_data(port_num, opts->ifd, opts->ofd, - opts->packet_bytes, opts->read_write, 0)); -} - -static void clear_fd_data(int fd) -{ - - if (fd_data[fd].sz > 0) - erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fd_data[fd].buf); - fd_data[fd].buf = NULL; - fd_data[fd].sz = 0; - fd_data[fd].remain = 0; - fd_data[fd].cpos = NULL; -} - -static void nbio_stop_fd(int port_num, int fd) -{ - Pend *p, *p1; - - driver_select(port_num, fd, ERL_DRV_READ|ERL_DRV_WRITE, 0); - clear_fd_data(fd); - p = fd_data[fd].pending; - SET_BLOCKING(fd); - while (p) { - p1 = p->next; - free(p); - p = p1; - } - fd_data[fd].pending = NULL; -} - -static void fd_stop(ErlDrvData drv_data) -{ - int ofd; - int fd = (int) drv_data; - - nbio_stop_fd(driver_data[fd].port_num, (int)fd); - ofd = driver_data[fd].ofd; - if (ofd != fd && ofd != -1) - nbio_stop_fd(driver_data[fd].port_num, (int)ofd); /* XXX fd = ofd? */ -} - -static ErlDrvData -vanilla_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts) -{ - int flags, fd; - struct stat statbuf; - - DEBUGF(("vanilla_start, name: %s [r=%1i w=%1i]\n", name, - opts->read_write & DO_READ, - opts->read_write & DO_WRITE)); - - flags = (opts->read_write == DO_READ ? O_RDONLY : - opts->read_write == DO_WRITE ? O_WRONLY|O_CREAT|O_TRUNC : - O_RDWR|O_CREAT); - if ((fd = open(name, flags, 0666)) < 0){ - errno = ENFILE; - return (ErlDrvData) -2; - } - if (fd >= max_files) { - close(fd); - errno = ENFILE; - return (ErlDrvData) -2; - } - if (fstat(fd, &statbuf) < 0) { - close(fd); - errno = ENFILE; - return (ErlDrvData) -2; - } - - /* Return error for reading regular files (doesn't work) */ - if (ISREG(statbuf) && ((opts->read_write) & DO_READ)) { - close(fd); - return (ErlDrvData) -3; - } - init_fd_data(fd, port_num); - return (ErlDrvData) (set_driver_data(port_num, fd, fd, - opts->packet_bytes, opts->read_write, 0)); -} - -/* Note that driver_data[fd].ifd == fd if the port was opened for reading, */ -/* otherwise (i.e. write only) driver_data[fd].ofd = fd. */ - -static void stop(ErlDrvData drv_data) -{ - int port_num, ofd; - int fd = (int) drv_data; - - port_num = driver_data[fd].port_num; - nbio_stop_fd(port_num, fd); - driver_select(port_num, fd, ERL_DRV_USE, 0); /* close(fd) */ - - ofd = driver_data[fd].ofd; - if (ofd != fd && ofd != -1) { - nbio_stop_fd(port_num, ofd); - driver_select(port_num, ofd, ERL_DRV_USE, 0); /* close(fd) */ - } -} - -static int sched_write(int port_num,int fd, char *buf, int len, int pb) -{ - Pend *p, *p2, *p3; - int p_bytes = len; - - p = (Pend*) erts_alloc_fnf(ERTS_ALC_T_PEND_DATA, pb + len + sizeof(Pend)); - if (!p) { - driver_failure(port_num, -1); - return(-1); - } - - switch(pb) { - case 4: put_int32(len, p->buf); break; - case 2: put_int16(len, p->buf); break; - case 1: put_int8(len, p->buf); break; - case 0: break; /* Handles this case too */ - } - sys_memcpy(p->buf + pb, buf, len); - driver_select(port_num, fd, ERL_DRV_WRITE|ERL_DRV_USE, 1); - p->cpos = p->buf; - p->fd = fd; - p->next = NULL; - p->remain = len + pb; - p2 = fd_data[fd].pending; - if (p2 == NULL) - fd_data[fd].pending = p; - else { - p3 = p2->next; - while(p3) { - p_bytes += p2->remain; - p2 = p2->next; - p3 = p3->next; - } - p2->next = p; - } - if (p_bytes > (1 << 13)) /* More than 8 k pending */ - set_busy_port(port_num, 1); - return(0); -} - -/* Fd is the value returned as drv_data by the start func */ -static void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) -{ - int buf_done, port_num, wval, pb, ofd; - byte lb[4]; - struct iovec iv[2]; - int fd = (int) drv_data; - - pb = driver_data[fd].packet_bytes; - port_num = driver_data[fd].port_num; - - if ((ofd = driver_data[fd].ofd) == -1) { - return; - } - - if (fd_data[ofd].pending) { - sched_write(port_num, ofd, buf, len, pb); - return; - } - - if ((pb == 2 && len > 65535) || (pb == 1 && len > 255)) { - driver_failure_posix(port_num, EINVAL); - return; - } - if (pb == 0) { - wval = write(ofd, buf, len); - } else { - lb[0] = (len >> 24) & 255; /* MSB */ - lb[1] = (len >> 16) & 255; - lb[2] = (len >> 8) & 255; - lb[3] = len & 255; /* LSB */ - iv[0].iov_base = (char*) lb + (4 - pb); - iv[0].iov_len = pb; - iv[1].iov_base = buf; - iv[1].iov_len = len; - wval = writev(ofd, iv, 2); - } - if (wval == pb + len ) { - return; - } - if (wval < 0) { - if ((errno == EINTR) || (errno == ERRNO_BLOCK)) { - if (pb) { - sched_write(port_num, ofd, buf ,len, pb); - } else if (pb == 0) { - sched_write(port_num, ofd, buf ,len, 0); - } - return; - } - driver_failure_posix(driver_data[fd].port_num, EINVAL); - return; - } - if (wval < pb) { - sched_write(port_num, ofd, (lb +4 -pb) + wval, pb-wval, 0); - sched_write(port_num, ofd, buf ,len, 0); - return; - } - - /* we now know that wval < (pb + len) */ - buf_done = wval - pb; - sched_write(port_num, ofd, buf + buf_done, len - buf_done,0); -} - -static void stop_select(ErlDrvEvent fd, void* _) -{ - close((int)fd); -} - -static int ensure_header(int fd,char *buf,int packet_size, int sofar) -{ - int res = 0; - int remaining = packet_size - sofar; - - SET_BLOCKING(fd); - if (read_fill(fd, buf+sofar, remaining) != remaining) - return -1; - switch (packet_size) { - case 1: res = get_int8(buf); break; - case 2: res = get_int16(buf); break; - case 4: res = get_int32(buf); break; - } - SET_NONBLOCKING(fd); - return(res); -} - -static int port_inp_failure(int port_num, int ready_fd, int res) -{ - (void) driver_select(port_num, ready_fd, ERL_DRV_READ|ERL_DRV_WRITE, 0); - clear_fd_data(ready_fd); - if (res == 0) { - if (driver_data[ready_fd].report_exit) { - int tmpexit = 0; - int reported; - /* Lock the driver_data structure */ - semTake(driver_data_sem, WAIT_FOREVER); - if ((reported = driver_data[ready_fd].exit_reported)) - tmpexit = driver_data[ready_fd].exitcode; - semGive(driver_data_sem); - if (reported) { - erts_fprintf(stderr,"Exitcode %d reported\r\n", tmpexit); - driver_report_exit(port_num, tmpexit); - } - } - driver_failure_eof(port_num); - } else { - driver_failure(port_num, res); - } - return 0; -} - -/* fd is the drv_data that is returned from the */ -/* initial start routine */ -/* ready_fd is the descriptor that is ready to read */ - -static void ready_input(ErlDrvData drv_data, ErlDrvEvent drv_event) -{ - int port_num, packet_bytes, res; - Uint h = 0; - char *buf; - int fd = (int) drv_data; - int ready_fd = (int) drv_event; - - port_num = driver_data[fd].port_num; - packet_bytes = driver_data[fd].packet_bytes; - - if (packet_bytes == 0) { - if ((res = read(ready_fd, tmp_buf, tmp_buf_size)) > 0) { - driver_output(port_num, (char*)tmp_buf, res); - return; - } - port_inp_failure(port_num, ready_fd, res); - return; - } - - if (fd_data[ready_fd].remain > 0) { /* We try to read the remainder */ - /* space is allocated in buf */ - res = read(ready_fd, fd_data[ready_fd].cpos, - fd_data[ready_fd].remain); - if (res < 0) { - if ((errno == EINTR) || (errno == ERRNO_BLOCK)) { - ; - } else { - port_inp_failure(port_num, ready_fd, res); - } - } else if (res == 0) { - port_inp_failure(port_num, ready_fd, res); - } else if (res == fd_data[ready_fd].remain) { /* we're done */ - driver_output(port_num, fd_data[ready_fd].buf, - fd_data[ready_fd].sz); - clear_fd_data(ready_fd); - } else { /* if (res < fd_data[ready_fd].remain) */ - fd_data[ready_fd].cpos += res; - fd_data[ready_fd].remain -= res; - } - return; - } - - - if (fd_data[ready_fd].remain == 0) { /* clean fd */ - /* We make one read attempt and see what happens */ - res = read(ready_fd, tmp_buf, tmp_buf_size); - if (res < 0) { - if ((errno == EINTR) || (errno == ERRNO_BLOCK)) - return; - port_inp_failure(port_num, ready_fd, res); - return; - } - else if (res == 0) { /* eof */ - port_inp_failure(port_num, ready_fd, res); - return; - } - else if (res < packet_bytes) { /* Ugly case... get at least */ - if ((h = ensure_header(ready_fd, tmp_buf, packet_bytes, res))==-1) { - port_inp_failure(port_num, ready_fd, -1); - return; - } - buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h); - if (!buf) { - port_inp_failure(port_num, ready_fd, -1); - return; - } - fd_data[ready_fd].buf = buf; - fd_data[ready_fd].sz = h; - fd_data[ready_fd].remain = h; - fd_data[ready_fd].cpos = buf; - return; - } - else { /* if (res >= packet_bytes) */ - unsigned char* cpos = tmp_buf; - int bytes_left = res; - while (1) { /* driver_output as many as possible */ - if (bytes_left == 0) { - clear_fd_data(ready_fd); - return; - } - if (bytes_left < packet_bytes) { /* Yet an ugly case */ - if((h=ensure_header(ready_fd, cpos, - packet_bytes, bytes_left))==-1) { - port_inp_failure(port_num, ready_fd, -1); - return; - } - buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h); - if (!buf) - port_inp_failure(port_num, ready_fd, -1); - fd_data[ready_fd].buf = buf; - fd_data[ready_fd].sz = h; - fd_data[ready_fd].remain = h; - fd_data[ready_fd].cpos = buf; - return; - } - switch (packet_bytes) { - case 1: h = get_int8(cpos); cpos += 1; break; - case 2: h = get_int16(cpos); cpos += 2; break; - case 4: h = get_int32(cpos); cpos += 4; break; - } - bytes_left -= packet_bytes; - /* we've got the header, now check if we've got the data */ - if (h <= (bytes_left)) { - driver_output(port_num, (char*) cpos, h); - cpos += h; - bytes_left -= h; - continue; - } - else { /* The last message we got was split */ - buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h); - if (!buf) { - port_inp_failure(port_num, ready_fd, -1); - } - sys_memcpy(buf, cpos, bytes_left); - fd_data[ready_fd].buf = buf; - fd_data[ready_fd].sz = h; - fd_data[ready_fd].remain = h - bytes_left; - fd_data[ready_fd].cpos = buf + bytes_left; - return; - } - } - return; - } - } - fprintf(stderr, "remain %d \n", fd_data[ready_fd].remain); - port_inp_failure(port_num, ready_fd, -1); -} - - -/* fd is the drv_data that is returned from the */ -/* initial start routine */ -/* ready_fd is the descriptor that is ready to read */ - -static void ready_output(ErlDrvData drv_data, ErlDrvEvent drv_event) -{ - Pend *p; - int wval; - - int fd = (int) drv_data; - int ready_fd = (int) drv_event; - - while(1) { - if ((p = fd_data[ready_fd].pending) == NULL) { - driver_select(driver_data[fd].port_num, ready_fd, - ERL_DRV_WRITE, 0); - return; - } - wval = write(p->fd, p->cpos, p->remain); - if (wval == p->remain) { - fd_data[ready_fd].pending = p->next; - erts_free(ERTS_ALC_T_PEND_DATA, p); - if (fd_data[ready_fd].pending == NULL) { - driver_select(driver_data[fd].port_num, ready_fd, - ERL_DRV_WRITE, 0); - set_busy_port(driver_data[fd].port_num, 0); - return; - } - else - continue; - } - else if (wval < 0) { - if (errno == ERRNO_BLOCK || errno == EINTR) - return; - else { - driver_select(driver_data[fd].port_num, ready_fd, - ERL_DRV_WRITE, 0); - driver_failure(driver_data[fd].port_num, -1); - return; - } - } - else if (wval < p->remain) { - p->cpos += wval; - p->remain -= wval; - return; - } - } -} - -/* Fills in the systems representation of the jam/beam process identifier. -** The Pid is put in STRING representation in the supplied buffer, -** no interpretatione of this should be done by the rest of the -** emulator. The buffer should be at least 21 bytes long. -*/ -void sys_get_pid(char *buffer){ - int p = taskIdSelf(); /* Hmm, may be negative??? requires some GB of - memory to make the TCB address convert to a - negative value. */ - sprintf(buffer,"%d", p); -} - -int -erts_sys_putenv(char *buffer, int sep_ix) -{ - return putenv(buffer); -} - -int -erts_sys_getenv(char *key, char *value, size_t *size) -{ - char *orig_value; - int res; - orig_value = getenv(key); - if (!orig_value) - res = -1; - else { - size_t len = sys_strlen(orig_value); - if (len >= *size) { - *size = len + 1; - res = 1; - } - else { - *size = len; - sys_memcpy((void *) value, (void *) orig_value, len+1); - res = 0; - } - } - return res; -} - -int -erts_sys_getenv__(char *key, char *value, size_t *size) -{ - return erts_sys_getenv(key, value, size); -} - -void -sys_init_io(void) -{ - tmp_buf = (byte *) erts_alloc(ERTS_ALC_T_SYS_TMP_BUF, SYS_TMP_BUF_SIZE); - tmp_buf_size = SYS_TMP_BUF_SIZE; - fd_data = (struct fd_data *) - erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data)); -} - - -/* Fill buffer, return buffer length, 0 for EOF, < 0 for error. */ - -static int read_fill(int fd, char *buf, int len) -{ - int i, got = 0; - do { - if ((i = read(fd, buf+got, len-got)) <= 0) { - return i; - } - got += i; - } while (got < len); - return (len); -} - - -/************************** Misc... *******************************/ - -extern const char pre_loaded_code[]; -extern char* const pre_loaded[]; - - -/* Float conversion */ - -int sys_chars_to_double(char *buf, double *fp) -{ - char *s = buf; - - /* The following check is incorporated from the Vee machine */ - -#define ISDIGIT(d) ((d) >= '0' && (d) <= '9') - - /* Robert says that something like this is what he really wanted: - * - * 7 == sscanf(Tbuf, "%[+-]%[0-9].%[0-9]%[eE]%[+-]%[0-9]%s", ....); - * if (*s2 == 0 || *s3 == 0 || *s4 == 0 || *s6 == 0 || *s7) - * break; - */ - - /* Scan string to check syntax. */ - if (*s == '+' || *s == '-') - s++; - - if (!ISDIGIT(*s)) /* Leading digits. */ - return -1; - while (ISDIGIT(*s)) s++; - if (*s++ != '.') /* Decimal part. */ - return -1; - if (!ISDIGIT(*s)) - return -1; - while (ISDIGIT(*s)) s++; - if (*s == 'e' || *s == 'E') { - /* There is an exponent. */ - s++; - if (*s == '+' || *s == '-') - s++; - if (!ISDIGIT(*s)) - return -1; - while (ISDIGIT(*s)) s++; - } - if (*s) /* That should be it */ - return -1; - - if (sscanf(buf, "%lf", fp) != 1) - return -1; - return 0; -} - -/* - ** Convert a double to ascii format 0.dddde[+|-]ddd - ** return number of characters converted - */ - -int sys_double_to_chars(double fp, char *buf) -{ - (void) sprintf(buf, "%.20e", fp); - return strlen(buf); -} - - -/* Floating point exceptions */ - -#if (CPU == SPARC) -jmp_buf fpe_jmp; - -RETSIGTYPE fpe_sig_handler(int sig) -{ - longjmp(fpe_jmp, 1); -} - -#elif (CPU == PPC603) -static void fix_registers(void){ - FP_CONTEXT fpcontext; - fppSave(&fpcontext); - fpcontext.fpcsr &= ~(_PPC_FPSCR_INIT); - fppRestore(&fpcontext); -} -#endif - - -/* Return a pointer to a vector of names of preloaded modules */ - -Preload* sys_preloaded(void) -{ - return (Preload *) pre_loaded; -} - -/* Return a pointer to preloaded code for module "module" */ -unsigned char* sys_preload_begin(Preload *pp) -{ - return pp->code; -} - -/* Clean up if allocated */ -void sys_preload_end(Preload *pp) -{ - /* Nothing */ -} - -/* Read a key from console (?) */ - -int sys_get_key(int fd) -{ - int c; - unsigned char rbuf[64]; - - fflush(stdout); /* Flush query ??? */ - - if ((c = read(fd,rbuf,64)) <= 0) - return c; - return rbuf[0]; -} - - -/* A real printf that does the equivalent of fprintf(stdout, ...) */ - -/* ARGSUSED */ -static STATUS -stdio_write(char *buf, int nchars, int fp) -{ - if (fwrite(buf, sizeof(char), nchars, (FILE *)fp) == 0) - return(ERROR); - return(OK); -} - -int real_printf(const char *fmt, ...) -{ - va_list ap; - int err; - - va_start(ap, fmt); - err = fioFormatV(fmt, ap, stdio_write, (int)stdout); - va_end(ap); - return(err); -} - - -/* - * Little function to do argc, argv calls from (e.g.) VxWorks shell - * The arguments should be in the form of a single ""-enclosed string - * NOTE: This isn't really part of the emulator, just included here - * so we can use the handy functions and memory reclamation. - */ -void argcall(char *args) -{ - int argc; - char **argv; - FUNCPTR entry; - - if (args != NULL) { - if ((argc = build_argv(args, &argv)) > 0) { - if ((entry = lookup(argv[0])) != NULL) - (*entry)(argc, argv, (char **)NULL); /* NULL for envp */ - else - fprintf(stderr, "Couldn't find %s\n", argv[0]); - } else - fprintf(stderr, "Failed to build argv!\n"); - } else - fprintf(stderr, "No argument list!\n"); -} - - -/* That concludes the Erlang stuff - now we just need to implement an OS... - - Just kidding, but resource reclamation isn't the strength of VxWorks */ -#undef calloc -#undef free -#undef cfree -#undef malloc -#undef realloc -#undef open -#undef creat -#undef socket -#undef accept -#undef close -#undef fopen -#undef fdopen -#undef freopen -#undef fclose - -/********************* Using elib_malloc ****************************/ -/* This gives us yet another level of malloc wrappers. The purpouse */ -/* is to be able to select between different varieties of memory */ -/* allocation without recompiling. */ -/* Maybe the performance is somewhat degraded by this, but */ -/* on the other hand, performance may be much better if the most */ -/* suiting malloc is used (not to mention the much lower */ -/* fragmentation). */ -/* /Patrik N */ -/********************************************************************/ - -/* - * I don't want to include the whole elib header, especially - * as it uses char * for generic pointers. Let's fool ANSI C instead. - */ -extern void *elib_malloc(size_t); -extern void *elib_realloc(void *, size_t); -extern void elib_free(void *); -extern void elib_init(void *, int); -extern void elib_force_init(void *, int); -extern size_t elib_sizeof(void *); - -/* Flags */ -#define USING_ELIB_MALLOC 1 /* We are using the elib_malloc */ -#define WARN_MALLOC_MIX 2 /* Warn if plain malloc or save_malloc - is mixed with sys_free2 or - sys_realloc2 */ -#define REALLOC_MOVES 4 /* Always move on realloc - (less fragmentation) */ -#define USER_POOL 8 /* The user supplied the memory - pool, it was not save_alloced. */ -#define RECLAIM_USER_POOL 16 /* Use the reclaim mechanism in the - user pool. */ -#define NEW_USER_POOL 32 /* The user pool is newly suppllied, - any old pool should be discarded */ - - -#define ELIB_LOCK \ -if(alloc_flags & USING_ELIB_MALLOC) \ - semTake(elib_malloc_sem, WAIT_FOREVER) - -#define ELIB_UNLOCK \ -if(alloc_flags & USING_ELIB_MALLOC) \ - semGive(elib_malloc_sem) - -#define USER_RECLAIM() ((alloc_flags & USING_ELIB_MALLOC) && \ - (alloc_flags & USER_POOL) && \ - (alloc_flags & RECLAIM_USER_POOL)) - -/* - * Global state - * The use of function pointers for the malloc/realloc/free functions - * is actually only useful in the malloc case, we must know what kind of - * realloc/free we are going to use, so we could call elib_xxx directly. - * However, as the overhead is small and this construction makes it - * fairly easy to add another malloc algorithm, the function pointers - * are used in realloc/free to. - */ -static MallocFunction actual_alloc = &save_malloc; -static ReallocFunction actual_realloc = &save_realloc; -static FreeFunction actual_free = &save_free; -static int alloc_flags = 0; -static int alloc_pool_size = 0; -static void *alloc_pool_ptr = NULL; -static SEM_ID elib_malloc_sem = NULL; - -/* - * Descide if we should use the save_free instead of elib_free or, - * in the case of the free used in a delete hook, if we should - * use plain free instead of elib_free. - */ -static int use_save_free(void *ptr){ - register int diff = ((char *) ptr) - ((char *) alloc_pool_ptr); - /* - * Hmmm... should it be save_free even if diff is exactly 0? - * The answer is Yes if the whole area is save_alloced and No if not, - * so reclaim_free_hook is NOT run in the case of one save_alloced area. - */ - return (!(alloc_flags & USING_ELIB_MALLOC) || - (diff < 0 || diff >= alloc_pool_size)); -} - -/* - * A free function used by the task deletion hook for the save_xxx functions. - * Set with the set_reclaim_free_function function. - */ -static void reclaim_free_hook(void *ptr){ - if(use_save_free(ptr)){ - free(ptr); - } else { - ELIB_LOCK; - (*actual_free)(ptr); - ELIB_UNLOCK; - } -} - - -/* - * Initialize, sets the values of pointers based on - * either nothing (the default) or what's set previously by the - * erl_set_memory_block function. - */ -static void initialize_allocation(void){ - set_reclaim_free_function(NULL); - if(alloc_pool_size == 0){ - actual_alloc = (void *(*)(size_t))&save_malloc; - actual_realloc = (void *(*)(void *, size_t))&save_realloc; - actual_free = &save_free; - alloc_flags &= ~(USING_ELIB_MALLOC | USER_POOL | RECLAIM_USER_POOL); - } else { - if(elib_malloc_sem == NULL) - elib_malloc_sem = semMCreate - (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE); - if(elib_malloc_sem == NULL) - erl_exit(1,"Could not create mutex semaphore for elib_malloc"); - if(!(alloc_flags & USER_POOL)){ - if((alloc_pool_ptr = save_malloc(alloc_pool_size)) == NULL) - erl_exit(1,"Erlang set to allocate a %d byte block initially;" - " not enough memory available.", alloc_pool_size); - elib_force_init(alloc_pool_ptr, alloc_pool_size); - } else if(alloc_flags & NEW_USER_POOL){ - elib_force_init(alloc_pool_ptr, alloc_pool_size); - } - actual_alloc=&elib_malloc; - actual_realloc=&elib_realloc; - actual_free=&elib_free; - alloc_flags |= USING_ELIB_MALLOC; - /* We MUST see to that the right free function is used - otherwise we'll get a very nasty crash! */ - if(USER_RECLAIM()) - set_reclaim_free_function(&reclaim_free_hook); - } - alloc_flags &= ~(NEW_USER_POOL); /* It's never new after initialization*/ -} - -/* This does not exist on other platforms, we just use it in sys.c - and the BSD resolver */ -void *sys_calloc2(Uint nelem, Uint elsize){ - void *ptr = erts_alloc_fnf(ERTS_ALC_T_UNDEF, nelem*elsize); - if(ptr != NULL) - memset(ptr,0,nelem*elsize); - return ptr; -} - -/* - * The malloc wrapper - */ -void * -erts_sys_alloc(ErtsAlcType_t type, void *extra, Uint size) -{ - register void *ret; - ELIB_LOCK; - if(USER_RECLAIM()) - ret = save_malloc2((size_t)size,actual_alloc); - else - ret = (*actual_alloc)((size_t)size); - ELIB_UNLOCK; - return ret; -} - -/* - * The realloc wrapper, may respond to the "realloc-always-moves" flag - * if the area is initially allocated with elib_malloc. - */ -void * -erts_sys_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size) -{ - register void *ret; - if(use_save_free(ptr)){ - if((alloc_flags & WARN_MALLOC_MIX) && - (alloc_flags & USING_ELIB_MALLOC)) - erts_fprintf(stderr,"Warning, save_malloced data realloced " - "by sys_realloc2\n"); - return save_realloc(ptr, (size_t) size); - } else { - ELIB_LOCK; - if((alloc_flags & REALLOC_MOVES) && - (alloc_flags & USING_ELIB_MALLOC)){ - size_t osz = elib_sizeof(ptr); - if(USER_RECLAIM()) - ret = save_malloc2((size_t) size, actual_alloc); - else - ret = (*actual_alloc)((size_t) size); - if(ret != NULL){ - memcpy(ret,ptr,(((size_t)size) < osz) ? ((size_t)size) : osz); - if(USER_RECLAIM()) - save_free2(ptr,actual_free); - else - (*actual_free)(ptr); - } - } else { - if(USER_RECLAIM()) - ret = save_realloc2(ptr,(size_t)size,actual_realloc); - else - ret = (*actual_realloc)(ptr,(size_t)size); - } - ELIB_UNLOCK; - return ret; - } -} - -/* - * Wrapped free(). - */ -void -erts_sys_free(ErtsAlcType_t type, void *extra, void *ptr) -{ - if(use_save_free(ptr)){ - /* - * This might happen when linked in drivers use save_malloc etc - * directly. - */ - if((alloc_flags & WARN_MALLOC_MIX) && - (alloc_flags & USING_ELIB_MALLOC)) - erts_fprintf(stderr,"Warning, save_malloced data freed by " - "sys_free2\n"); - save_free(ptr); - } else { - ELIB_LOCK; - if(USER_RECLAIM()) - save_free2(ptr,actual_free); - else - (*actual_free)(ptr); - ELIB_UNLOCK; - } -} - -/* - * External interface to be called before erlang is started - * Parameters: - * isize: The size of the memory block where erlang should malloc(). - * iptr: (optional) A pointer to a user supplied memory block of - * size isize. - * warn_save: Instructs sys_free2 and sys_realloc2 to warn if - * memory allocation/reallocation/freeing is mixed between - * pure malloc/save_malloc/sys_alloc2 routines (only - * warns if elib is actually used in the sys_alloc2 routines). - * realloc_moves: Always allocate a fresh memory block on reallocation - * (less fragmentation). - * reclaim_in_supplied: Use memory reclaim mechanisms inside the user - * supplied area, this makes one area reusable between - * starts of erlang and might be nice for drivers etc. - */ - -int erl_set_memory_block(int isize, int iptr, int warn_save, - int realloc_moves, int reclaim_in_supplied, int p5, - int p6, int p7, int p8, int p9){ - if(erlang_id != 0){ - erts_fprintf(stderr,"Error, cannot set erlang memory block while an " - "erlang task is running!\n"); - return 1; - } - if(isize < 8 * 1024 *1024) - erts_fprintf(stderr, - "Warning, the memory pool of %dMb may be to small to " - "run erlang in!\n", isize / (1024 * 1024)); - alloc_pool_size = (size_t) isize; - alloc_pool_ptr = (void *) iptr; - alloc_flags = 0; - /* USING_ELIB_MALLOC gets set by the initialization routine */ - if((void *)iptr != NULL) - alloc_flags |= (USER_POOL | NEW_USER_POOL); - if(realloc_moves) - alloc_flags |= REALLOC_MOVES; - if(warn_save) - alloc_flags |= WARN_MALLOC_MIX; - if((void *)iptr != NULL && reclaim_in_supplied) - alloc_flags |= RECLAIM_USER_POOL; - return 0; -} - -/* External statistics interface */ -int erl_memory_show(int p0, int p1, int p2, int p3, int p4, int p5, - int p6, int p7, int p8, int p9){ - struct elib_stat statistics; - if(!(alloc_flags & USING_ELIB_MALLOC) && erlang_id != 0){ - erts_printf("Using plain save_alloc, use memShow instead.\n"); - return 1; - } - if(erlang_id == 0 && !((alloc_flags & USER_POOL) && - !(alloc_flags & NEW_USER_POOL))){ - erts_printf("Sorry, no allocation statistics until erlang " - "is started.\n"); - return 1; - } - erts_printf("Allocation settings:\n"); - erts_printf("Using elib_malloc with memory pool size of %lu bytes.\n", - (unsigned long) alloc_pool_size); - erts_printf("Realloc-always-moves is %s\n", - (alloc_flags & REALLOC_MOVES) ? "on" : "off"); - erts_printf("Warnings about mixed malloc/free's are %s\n", - (alloc_flags & WARN_MALLOC_MIX) ? "on" : "off"); - if(alloc_flags & USER_POOL){ - erts_printf("The memory block used by elib is user supplied " - "at 0x%08x.\n", (unsigned int) alloc_pool_ptr); - if(alloc_flags & RECLAIM_USER_POOL) - erts_printf("Allocated memory within the user supplied pool\n" - " will be automatically reclaimed at task exit.\n"); - } else { - erts_printf("The memory block used by elib is save_malloc'ed " - "at 0x%08x.\n", (unsigned int) alloc_pool_ptr); - } - erts_printf("Statistics from elib_malloc:\n"); - ELIB_LOCK; - - elib_stat(&statistics); - ELIB_UNLOCK; - erts_printf("Type Size (bytes) Number of blocks\n"); - erts_printf("============= ============ ================\n"); - erts_printf("Total: %12lu %16lu\n", - (unsigned long) statistics.mem_total*4, - (unsigned long) statistics.mem_blocks); - erts_printf("Allocated: %12lu %16lu\n", - (unsigned long) statistics.mem_alloc*4, - (unsigned long) statistics.mem_blocks-statistics.free_blocks); - erts_printf("Free: %12lu %16lu\n", - (unsigned long) statistics.mem_free*4, - (unsigned long) statistics.free_blocks); - erts_printf("Largest free: %12lu -\n\n", - (unsigned long) statistics.max_free*4); - return 0; -} - - -/* -** More programmer friendly (as opposed to user friendly ;-) interface -** to the memory statistics. Resembles the VxWorks memPartInfoGet but -** does not take a partition id as parameter... -*/ -int erl_mem_info_get(MEM_PART_STATS *stats){ - struct elib_stat statistics; - if(!(alloc_flags & USING_ELIB_MALLOC)) - return -1; - ELIB_LOCK; - elib_stat(&statistics); - ELIB_UNLOCK; - stats->numBytesFree = statistics.mem_free*4; - stats->numBlocksFree = statistics.free_blocks; - stats->maxBlockSizeFree = statistics.max_free*4; - stats->numBytesAlloc = statistics.mem_alloc*4; - stats->numBlocksAlloc = statistics.mem_blocks-statistics.free_blocks; - return 0; -} - -/********************* Pipe driver **********************************/ -/* - * Purpose: Pipe driver with Unix (unnamed) pipe semantics. - * Author: Peter Hogfeldt ([email protected]) from an outline - * by Per Hedeland ([email protected]). - * - * Note: This driver must *not* use the reclaim facilities, hence it - * is placed here. (after the #undef's of open,malloc etc) - * - * This driver supports select() and non-blocking I/O via - * ioctl(fd, FIONBIO, val). - * - * 1997-03-21 Peter Hogfeldt - * Added non-blocking I/O. - * - */ - -/* - * SEMAPHORES - * - * Each end of a pipe has two semaphores: semExcl for serialising access to - * the pipe end, and semBlock for blocking I/O. - * - * reader->semBlock is available (full) if and only if the pipe is - * not empty, or the write end is closed. Otherwise - * it is unavailable (empty). It is initially - * unavailable. - * - * writer->semBlock is available (full) if and only if the pipe is - * not full, or if the reader end is closed. - * Otherwise it is unavailable. It is initially - * available. - */ - -#define UXPIPE_SIZE 4096 - -/* Forward declaration */ -typedef struct uxPipeDev UXPIPE_DEV; - -/* - * Pipe descriptor (one for each open pipe). - */ -typedef struct { - int drvNum; - UXPIPE_DEV *reader, *writer; - RING_ID ringId; -} UXPIPE; - -/* - * Device descriptor (one for each of the read and write - * ends of an open pipe). - */ -struct uxPipeDev { - UXPIPE *pipe; - int blocking; - SEL_WAKEUP_LIST wakeupList; - SEM_ID semExcl; - SEM_ID semBlock; -}; - -int uxPipeDrvNum = 0; /* driver number of pipe driver */ - -#define PIPE_NAME "/uxpipe" /* only used internally */ -#define PIPE_READ "/r" /* ditto */ -#define PIPE_WRITE "/w" /* ditto */ - -LOCAL char pipeRead[64], pipeWrite[64]; -LOCAL DEV_HDR devHdr; -LOCAL UXPIPE *newPipe; /* communicate btwn open()s in pipe() */ -LOCAL SEM_ID pipeSem; /* mutual exclusion in pipe() */ - -/* forward declarations */ -LOCAL int uxPipeOpen(DEV_HDR *pDv, char *name, int mode); -LOCAL int uxPipeClose(UXPIPE_DEV *pDev); -LOCAL int uxPipeRead(UXPIPE_DEV *pDev, char *buffer, int maxbytes); -LOCAL int uxPipeWrite(UXPIPE_DEV *pDev, char *buffer, int nbytes); -LOCAL STATUS uxPipeIoctl(FAST UXPIPE_DEV *pDev, FAST int function, int arg); - - -/*************************************************************************** - * - * uxPipeDrv - install Unix pipe driver - * - * This routine initializes the Unix pipe driver. It must be called - * before any other routine in this driver. - * - * RETURNS: - * OK, or ERROR if I/O system is unable to install driver. - */ - -STATUS -uxPipeDrv(void) -{ - if (uxPipeDrvNum > 0) - return (OK); /* driver already installed */ - if ((uxPipeDrvNum = iosDrvInstall((FUNCPTR) NULL, (FUNCPTR) NULL, - uxPipeOpen, uxPipeClose, uxPipeRead, - uxPipeWrite, uxPipeIoctl)) == ERROR) - return (ERROR); - if (iosDevAdd(&devHdr, PIPE_NAME, uxPipeDrvNum) == ERROR) - return (ERROR); - strcpy(pipeRead, PIPE_NAME); - strcat(pipeRead, PIPE_READ); - strcpy(pipeWrite, PIPE_NAME); - strcat(pipeWrite, PIPE_WRITE); - if ((pipeSem = semMCreate(SEM_Q_PRIORITY | SEM_DELETE_SAFE)) == NULL) - return (ERROR); - return (OK); -} - -/*************************************************************************** - * - * uxPipeOpen - open a pipe - * - * RETURNS: Pointer to device descriptor, or ERROR if memory cannot be - * allocated (errno = ENOMEM), or invalid argument (errno = EINVAL). - */ - -/* - * DEV_HDR *pDv; pointer to device header (dummy) - * char *name; name of pipe to open ("/r" or "/w") - * int mode; access mode (O_RDONLY or O_WRONLY) - */ -LOCAL int -uxPipeOpen(DEV_HDR *pDv, char *name, int mode) -{ - UXPIPE_DEV *reader, *writer; - - if (mode == O_RDONLY && strcmp(name, PIPE_READ) == 0) { - /* reader open */ - if ((newPipe = (UXPIPE *) malloc(sizeof(UXPIPE))) != NULL) { - if ((newPipe->ringId = rngCreate(UXPIPE_SIZE)) != NULL) { - if ((reader = (UXPIPE_DEV *) malloc(sizeof(UXPIPE_DEV))) != NULL) { - if ((reader->semExcl = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) { - if ((reader->semBlock = semBCreate(SEM_Q_FIFO, SEM_EMPTY)) != NULL) { - reader->pipe = newPipe; - reader->blocking = 1; - selWakeupListInit(&reader->wakeupList); - newPipe->reader = reader; - newPipe->writer = NULL; - newPipe->drvNum = uxPipeDrvNum; - return ((int) reader); - } - semDelete(reader->semExcl); - } - free(reader); - } - rngDelete(newPipe->ringId); - } - free(newPipe); - newPipe = NULL; - errno = ENOMEM; - } - } else if (mode == O_WRONLY && strcmp(name, PIPE_WRITE) == 0) { - /* writer open */ - if (newPipe != NULL && - (writer = (UXPIPE_DEV *) malloc(sizeof(UXPIPE_DEV))) != NULL) { - if ((writer->semExcl = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) { - if ((writer->semBlock = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) { - writer->blocking = 1; - writer->pipe = newPipe; - selWakeupListInit(&writer->wakeupList); - newPipe->writer = writer; - newPipe = NULL; - return ((int) writer); - } - semDelete(writer->semExcl); - } - free(writer); - } - if (newPipe != NULL) - free(newPipe); - newPipe = NULL; - errno = ENOMEM; - } else { - errno = EINVAL; - } - return (ERROR); -} - -/*************************************************************************** - * - * uxPipeClose - close read or write end of a pipe. - * - * RETURNS: - * OK, or ERROR if device descriptor does not refer to an open read or - write end of a pipe (errno = EBADF). - */ - -LOCAL int -uxPipeClose(UXPIPE_DEV *pDev) -{ - UXPIPE *pajp = pDev->pipe; - - taskLock(); - if (pDev == pajp->reader) { - /* Close this end */ - semDelete(pDev->semExcl); - semDelete(pDev->semBlock); - free(pDev); - pajp->reader = NULL; - /* Inform the other end */ - if (pajp->writer != NULL) { - selWakeupAll(&pajp->writer->wakeupList, SELWRITE); - semGive(pajp->writer->semBlock); - } - } else if (pDev == pajp->writer) { - /* Close this end */ - semDelete(pDev->semExcl); - semDelete(pDev->semBlock); - free(pDev); - pajp->writer = NULL; - /* Inform the other end */ - if (pajp->reader != NULL) { - selWakeupAll(&pajp->reader->wakeupList, SELREAD); - semGive(pajp->reader->semBlock); - } - } else { - errno = EBADF; - taskUnlock(); - return (ERROR); - } - if (pajp->reader == NULL && pajp->writer == NULL) { - rngDelete(pajp->ringId); - pajp->drvNum = 0; - free(pajp); - } - taskUnlock(); - return (OK); -} -/*************************************************************************** - * - * uxPipeRead - read from a pipe. - * - * Reads at most maxbytes bytes from the pipe. Blocks if blocking mode is - * set and the pipe is empty. - * - * RETURNS: - * number of bytes read, 0 on EOF, or ERROR if device descriptor does - * not refer to an open read end of a pipe (errno = EBADF), or if - * non-blocking mode is set and the pipe is empty (errno = EWOULDBLOCK). - */ - -LOCAL int -uxPipeRead(UXPIPE_DEV *pDev, char *buffer, int maxbytes) -{ - UXPIPE *pajp = pDev->pipe; - int nbytes = 0; - - if (pDev != pajp->reader) { - errno = EBADF; - return (ERROR); - } - if (maxbytes == 0) - return (0); - semTake(pDev->semExcl, WAIT_FOREVER); - /* - * Note that semBlock may be full, although there is nothing to read. - * This happens e.g. after the following sequence of operations: a - * reader task blocks, a writer task writes two times (the first - * write unblocks the reader task, the second write makes semBlock - * full). - */ - while (nbytes == 0) { - if (pDev->blocking) - semTake(pDev->semBlock, WAIT_FOREVER); - /* - * Reading and updating of the write end must not be interleaved - * with a write from another task - hence we lock this task. - */ - taskLock(); - nbytes = rngBufGet(pajp->ringId, buffer, maxbytes); - if (nbytes > 0) { - /* Give own semaphore if bytes remain or if write end is closed */ - if ((!rngIsEmpty(pajp->ringId) || pajp->writer == NULL) && - pDev->blocking) - semGive(pDev->semBlock); - /* Inform write end */ - if (pajp->writer != NULL) { - if (pajp->writer->blocking) - semGive(pajp->writer->semBlock); - selWakeupAll(&pajp->writer->wakeupList, SELWRITE); - } - } else if (pajp->writer == NULL) { - nbytes = 0; /* EOF */ - /* Give semaphore when write end is closed */ - if (pDev->blocking) - semGive(pDev->semBlock); - taskUnlock(); - semGive(pDev->semExcl); - return (nbytes); - } else if (!pDev->blocking) { - taskUnlock(); - semGive(pDev->semExcl); - errno = EWOULDBLOCK; - return (ERROR); - } - taskUnlock(); - } - semGive(pDev->semExcl); - return (nbytes); -} - -/*************************************************************************** - * - * uxPipeWrite - write to a pipe. - * - * Writes nbytes bytes to the pipe. Blocks if blocking mode is set, and if - * the pipe is full. - * - * RETURNS: - * number of bytes written, or ERROR if the device descriptor does not - * refer to an open write end of a pipe (errno = EBADF); or if the read end - * of the pipe is closed (errno = EPIPE); or if non-blocking mode is set - * and the pipe is full (errno = EWOULDBLOCK). - * - */ - -LOCAL int -uxPipeWrite(UXPIPE_DEV *pDev, char *buffer, int nbytes) -{ - - UXPIPE *pajp = pDev->pipe; - int sofar = 0, written; - - if (pDev != pajp->writer) { - errno = EBADF; - return (ERROR); - } - if (pajp->reader == NULL) { - errno = EPIPE; - return (ERROR); - } - if (nbytes == 0) - return (0); - semTake(pDev->semExcl, WAIT_FOREVER); - while (sofar < nbytes) { - if (pDev->blocking) - semTake(pDev->semBlock, WAIT_FOREVER); - if (pajp->reader == NULL) { - errno = EPIPE; - semGive(pDev->semBlock); - semGive(pDev->semExcl); - return (ERROR); - } - /* Writing and updating of the read end must not be interleaved - * with a read from another task - hence we lock this task. - */ - taskLock(); - written = rngBufPut(pajp->ringId, buffer + sofar, nbytes - sofar); - sofar += written; - /* Inform the read end if we really wrote something */ - if (written > 0 && pajp->reader != NULL) { - selWakeupAll(&pajp->reader->wakeupList, SELREAD); - if (pajp->reader->blocking) - semGive(pajp->reader->semBlock); - } - taskUnlock(); - if (!pDev->blocking) { - if (sofar == 0) { - errno = EWOULDBLOCK; - sofar = ERROR; - } - break; - } - } - /* Give own semaphore if space remains */ - if (!rngIsFull(pajp->ringId) && pDev->blocking) - semGive(pDev->semBlock); - semGive(pDev->semExcl); - return (sofar); -} - -/*************************************************************************** - * - * uxPipeIoctl - do device specific I/O control - * - * RETURNS: - * OK or ERROR. - */ - -LOCAL STATUS -uxPipeIoctl(FAST UXPIPE_DEV *pDev, FAST int function, int arg) - -{ - UXPIPE *pajp = pDev->pipe; - int status = OK; - - switch (function) { - case FIONBIO: - pDev->blocking = (*(int *)arg) ? 0 : 1; - break; - case FIOSELECT: - taskLock(); - selNodeAdd(&pDev->wakeupList, (SEL_WAKEUP_NODE *) arg); - if (selWakeupType((SEL_WAKEUP_NODE *) arg) == SELREAD && - pDev == pajp->reader && - (!rngIsEmpty(pajp->ringId) || pajp->writer == NULL)) - selWakeup((SEL_WAKEUP_NODE *) arg); - if (selWakeupType((SEL_WAKEUP_NODE *) arg) == SELWRITE && - pDev == pajp->writer && - (!rngIsFull(pajp->ringId) || pajp->reader == NULL)) - selWakeup((SEL_WAKEUP_NODE *) arg); - taskUnlock(); - break; - case FIOUNSELECT: - selNodeDelete(&pDev->wakeupList, (SEL_WAKEUP_NODE *) arg); - break; - default: - status = ERROR; - break; - } - return (status); -} - -/*************************************************************************** - * - * pipe - create an intertask channel - * - * Creates a pipe. fd[0] (fd[1]) is the read (write) file descriptor. - * - * RETURNS: - * OK or ERROR, if the pipe could not be created. - */ - -STATUS -pipe(int fd[2]) -{ - semTake(pipeSem, WAIT_FOREVER); - if ((fd[0] = open(pipeRead, O_RDONLY, 0)) != ERROR) { - if ((fd[1] = open(pipeWrite, O_WRONLY, 0)) != ERROR) { - semGive(pipeSem); - return (OK); - } - (void) close(fd[0]); - } - errno &= 0xFFFF; - if((errno & 0xFFFF) == EINTR) /* Why on earth EINTR??? */ - errno = ENFILE; /* It means we are out of file descriptors...*/ - semGive(pipeSem); - return (ERROR); -} - -/*************************************************************************** - * - * uxPipeShow - display pipe information - * - * RETURNS: - * N/A. - */ - -void -uxPipeShow(int fd) -{ - UXPIPE_DEV *pDev; - UXPIPE *pajp; - int drvValue; - - if ((drvValue = iosFdValue(fd)) == ERROR) { - erts_fprintf(stderr, "Error: file descriptor invalid\n"); - return; - } - pDev = (UXPIPE_DEV *)drvValue; - pajp = pDev->pipe; - if (pajp->drvNum != uxPipeDrvNum) { - erts_fprintf(stderr, "Error: Not a ux pipe device\n"); - return; - } - erts_fprintf(stderr, "Device : 0x%x\n", (int) pDev); - erts_fprintf(stderr, "Buffer size : %d\n", UXPIPE_SIZE); - erts_fprintf(stderr, "Bytes in buffer : %d\n\n", rngNBytes(pajp->ringId)); - erts_fprintf(stderr, "READ END\n\n"); - if (pajp->reader != NULL) { - erts_fprintf(stderr, "Mode : "); - erts_fprintf(stderr, "%s\n", - (pajp->reader->blocking) ? "blocking" : "non-blocking"); - } - erts_fprintf(stderr, "Status : "); - if (pajp->reader != NULL) { - erts_fprintf(stderr, "OPEN\n"); - erts_fprintf(stderr, "Wake-up list : %d\n\n", - selWakeupListLen(&pajp->reader->wakeupList)); - erts_fprintf(stderr, "Exclusion Semaphore\n"); - semShow(pajp->reader->semExcl, 1); - erts_fprintf(stderr, "Blocking Semaphore\n"); - semShow(pajp->reader->semBlock, 1); - } else - erts_fprintf(stderr, "CLOSED\n\n"); - erts_fprintf(stderr, "WRITE END\n\n"); - if (pajp->writer != NULL) { - erts_fprintf(stderr, "Mode : "); - erts_fprintf(stderr, "%s\n", - (pajp->writer->blocking) ? "blocking" : "non-blocking"); - } - erts_fprintf(stderr, "Status : "); - if (pajp->writer != NULL) { - erts_fprintf(stderr, "OPEN\n"); - erts_fprintf(stderr, "Wake-up list : %d\n\n", - selWakeupListLen(&pajp->writer->wakeupList)); - erts_fprintf(stderr, "Exclusion Semaphore\n"); - semShow(pajp->writer->semExcl, 1); - erts_fprintf(stderr, "Blocking Semaphore\n"); - semShow(pajp->writer->semBlock, 1); - } else - erts_fprintf(stderr, "CLOSED\n\n"); -} - -#ifdef DEBUG -void -erl_assert_error(char* expr, char* file, int line) -{ - fflush(stdout); - fprintf(stderr, "Assertion failed: %s in %s, line %d\n", - expr, file, line); - fflush(stderr); - erl_crash_dump(file, line, "Assertion failed: %s\n", expr); - abort(); -} -void -erl_debug(char* fmt, ...) -{ - char sbuf[1024]; /* Temporary buffer. */ - va_list va; - - va_start(va, fmt); - vsprintf(sbuf, fmt, va); - va_end(va); - fprintf(stderr, "%s\n", sbuf); -} -#endif diff --git a/erts/emulator/sys/win32/erl_win32_sys_ddll.c b/erts/emulator/sys/win32/erl_win32_sys_ddll.c index d00eed932b..54991a610c 100644 --- a/erts/emulator/sys/win32/erl_win32_sys_ddll.c +++ b/erts/emulator/sys/win32/erl_win32_sys_ddll.c @@ -58,7 +58,7 @@ void erl_sys_ddll_init(void) { /* * Open a shared object */ -int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err) +int erts_sys_ddll_open2(const char *full_name, void **handle, ErtsSysDdllError* err) { int len; char dlname[MAXPATHLEN + 1]; @@ -92,7 +92,7 @@ int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err) /* * Find a symbol in the shared object */ -int erts_sys_ddll_sym2(void *handle, char *func_name, void **function, +int erts_sys_ddll_sym2(void *handle, const char *func_name, void **function, ErtsSysDdllError* err) { FARPROC proc; diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h index b6f11209ba..5ce1a61303 100644 --- a/erts/emulator/sys/win32/erl_win_sys.h +++ b/erts/emulator/sys/win32/erl_win_sys.h @@ -90,6 +90,11 @@ #define strncasecmp _strnicmp +#ifndef __GNUC__ +# undef ERTS_I64_LITERAL +# define ERTS_I64_LITERAL(X) X##i64 +#endif + /* * Practial Windows specific macros. */ diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c index 6894d682e7..47d12ed5fe 100755 --- a/erts/emulator/sys/win32/sys.c +++ b/erts/emulator/sys/win32/sys.c @@ -256,10 +256,25 @@ void erl_sys_args(int* argc, char** argv) } void -erts_sys_prepare_crash_dump(void) +erts_sys_prepare_crash_dump(int secs) { + Port *heart_port; + Eterm heap[3]; + Eterm *hp = heap; + Eterm list = NIL; + + heart_port = erts_get_heart_port(); + + if (heart_port) { + + list = CONS(hp, make_small(8), list); hp += 2; + + /* send to heart port, CMD = 8, i.e. prepare crash dump =o */ + erts_write_to_port(NIL, heart_port, list); + } + /* Windows - free file descriptors are hopefully available */ - return; + /* Alarm not used on windows */ } static void @@ -420,6 +435,8 @@ typedef struct async_io { HANDLE ioAllowed; /* The thread will wait for this event * before starting a new read or write. */ + HANDLE flushEvent; /* Used to signal that a flush should be done. */ + HANDLE flushReplyEvent; /* Used to signal that a flush has been done. */ DWORD pendingError; /* Used to delay presentating an error to Erlang * until the check_io function is entered. */ @@ -878,6 +895,8 @@ init_async_io(AsyncIo* aio, int use_threads) aio->ov.Offset = 0L; aio->ov.OffsetHigh = 0L; aio->ioAllowed = NULL; + aio->flushEvent = NULL; + aio->flushReplyEvent = NULL; aio->pendingError = 0; aio->bytesTransferred = 0; #ifdef ERTS_SMP @@ -890,6 +909,12 @@ init_async_io(AsyncIo* aio, int use_threads) aio->ioAllowed = CreateAutoEvent(FALSE); if (aio->ioAllowed == NULL) return -1; + aio->flushEvent = CreateAutoEvent(FALSE); + if (aio->flushEvent == NULL) + return -1; + aio->flushReplyEvent = CreateAutoEvent(FALSE); + if (aio->flushReplyEvent == NULL) + return -1; } return 0; } @@ -923,6 +948,14 @@ release_async_io(AsyncIo* aio, ErlDrvPort port_num) if (aio->ioAllowed != NULL) CloseHandle(aio->ioAllowed); aio->ioAllowed = NULL; + + if (aio->flushEvent != NULL) + CloseHandle(aio->flushEvent); + aio->flushEvent = NULL; + + if (aio->flushReplyEvent != NULL) + CloseHandle(aio->flushReplyEvent); + aio->flushReplyEvent = NULL; } /* ---------------------------------------------------------------------- @@ -2083,16 +2116,26 @@ threaded_writer(LPVOID param) AsyncIo* aio = (AsyncIo *) param; HANDLE thread = GetCurrentThread(); char* buf; - DWORD numToWrite; + DWORD numToWrite, handle; int ok; + HANDLE handles[2]; + handles[0] = aio->ioAllowed; + handles[1] = aio->flushEvent; for (;;) { - WaitForSingleObject(aio->ioAllowed, INFINITE); + handle = WaitForMultipleObjects(2, handles, FALSE, INFINITE); if (aio->flags & DF_EXIT_THREAD) break; + buf = OV_BUFFER_PTR(aio); numToWrite = OV_NUM_TO_READ(aio); aio->pendingError = 0; + + if (handle == (WAIT_OBJECT_0 + 1) && numToWrite == 0) { + SetEvent(aio->flushReplyEvent); + continue; + } + ok = WriteFile(aio->fd, buf, numToWrite, &aio->bytesTransferred, NULL); if (!ok) { aio->pendingError = GetLastError(); @@ -2127,7 +2170,11 @@ threaded_writer(LPVOID param) } } } - SetEvent(aio->ov.hEvent); + OV_NUM_TO_READ(aio) = 0; + if (handle == (WAIT_OBJECT_0 + 1)) + SetEvent(aio->flushReplyEvent); + else + SetEvent(aio->ov.hEvent); if (aio->pendingError != NO_ERROR || aio->bytesTransferred == 0) break; if (aio->flags & DF_EXIT_THREAD) @@ -2193,6 +2240,43 @@ fd_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts) if ((dp = new_driver_data(port_num, opts->packet_bytes, 2, TRUE)) == NULL) return ERL_DRV_ERROR_GENERAL; + /** + * Here is a brief description about how the fd driver works on windows. + * + * fd_init: + * For each in/out fd pair a threaded_reader and threaded_writer thread is + * created. Within the DriverData struct each of the threads have an AsyncIO + * sctruct associated with it. Within AsyncIO there are two important HANDLEs, + * ioAllowed and ov.hEvent. ioAllowed is used to signal the threaded_* threads + * should read/write some data, and ov.hEvent is driver_select'ed to be used to + * signal that the thread is done reading/writing. + * + * The reason for the driver being threaded like this is because once the FD is open + * on windows, it is not possible to set the it in overlapped mode. So we have to + * simulate this using threads. + * + * output: + * When an output occurs the data to be outputted is copied to AsyncIO.ov. Then + * the ioAllowed HANDLE is set, ov.hEvent is cleared and the port is marked as busy. + * The threaded_writer thread is lying in WaitForMultipleObjects on ioAllowed, and + * when signalled it writes all data in AsyncIO.ov and then sets ov.hEvent so that + * ready_output gets triggered and (potentially) sends the reply to the port and + * marks the port an non-busy. + * + * input: + * The threaded_reader is lying waiting in ReadFile on the in fd and when a new + * line is written it sets ov.hEvent that new data is available and then goes + * and waits for ioAllowed to be set. ready_input is run when ov.hEvent is set and + * delivers the data to the port. Then ioAllowed is signalled again and threaded_reader + * goes back to ReadFile. + * + * shutdown: + * In order to guarantee that all io is outputted before the driver is stopped, + * fd_stop uses flushEvent and flushReplyEvent to make sure that there is no data + * in ov which needs writing before returning from fd_stop. + * + **/ + if (!create_file_thread(&dp->in, DO_READ)) { dp->port_num = PORT_FREE; return ERL_DRV_ERROR_GENERAL; @@ -2241,6 +2325,8 @@ static void fd_stop(ErlDrvData d) (void) driver_select(dp->port_num, (ErlDrvEvent)dp->out.ov.hEvent, ERL_DRV_WRITE, 0); + SetEvent(dp->out.flushEvent); + WaitForSingleObject(dp->out.flushReplyEvent, INFINITE); } } @@ -2764,10 +2850,10 @@ static void stop_select(ErlDrvEvent e, void* _) ** no interpretation of this should be done by the rest of the ** emulator. The buffer should be at least 21 bytes long. */ -void sys_get_pid(char *buffer){ +void sys_get_pid(char *buffer, size_t buffer_size){ DWORD p = GetCurrentProcessId(); /* The pid is scalar and is an unsigned long. */ - sprintf(buffer,"%lu",(unsigned long) p); + erts_snprintf(buffer, buffer_size, "%lu",(unsigned long) p); } void @@ -3107,7 +3193,8 @@ erl_assert_error(char* expr, char* file, int line) { char message[1024]; - sprintf(message, "File %hs, line %d: %hs", file, line, expr); + erts_snprintf(message, sizeof(message), + "File %hs, line %d: %hs", file, line, expr); MessageBox(GetActiveWindow(), message, "Assertion failed", MB_OK | MB_ICONERROR); #if 0 diff --git a/erts/emulator/sys/win32/sys_float.c b/erts/emulator/sys/win32/sys_float.c index 6558ad2d99..09dad89140 100644 --- a/erts/emulator/sys/win32/sys_float.c +++ b/erts/emulator/sys/win32/sys_float.c @@ -118,18 +118,18 @@ sys_chars_to_double(char *buf, double *fp) */ int -sys_double_to_chars(double fp, char *buf) +sys_double_to_chars(double fp, char *buffer, size_t buffer_size) { - char *s = buf; + char *s = buffer; - (void) sprintf(buf, "%.20e", fp); + (void) erts_snprintf(buffer, buffer_size, "%.20e", fp); /* Search upto decimal point */ if (*s == '+' || *s == '-') s++; while (isdigit(*s)) s++; if (*s == ',') *s++ = '.'; /* Replace ',' with '.' */ /* Scan to end of string */ while (*s) s++; - return s-buf; /* i.e strlen(buf) */ + return s-buffer; /* i.e strlen(buffer) */ } int diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c index b5123dc45d..2f2dfc8197 100644 --- a/erts/emulator/sys/win32/sys_time.c +++ b/erts/emulator/sys/win32/sys_time.c @@ -26,11 +26,7 @@ #include "sys.h" #include "assert.h" -#ifdef __GNUC__ -#define LL_LITERAL(X) X##LL -#else -#define LL_LITERAL(X) X##i64 -#endif +#define LL_LITERAL(X) ERTS_I64_LITERAL(X) /******************* Routines for time measurement *********************/ diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile index a7c8bd2cd2..9594ab48b1 100644 --- a/erts/emulator/test/Makefile +++ b/erts/emulator/test/Makefile @@ -47,6 +47,7 @@ MODULES= \ busy_port_SUITE \ call_trace_SUITE \ code_SUITE \ + code_parallel_load_SUITE \ crypto_SUITE \ ddll_SUITE \ decode_packet_SUITE \ @@ -137,10 +138,10 @@ TARGET_FILES = $(MODULES:%=$(EBIN)/%.$(EMULATOR)) EMAKEFILE=Emakefile -TEST_SPEC_FILES = emulator.spec \ - emulator.spec.win \ - emulator.spec.vxworks \ - emulator.spec.ose +TEST_SPEC_FILES= emulator.spec \ + emulator.spec.win \ + emulator_bench.spec + # ---------------------------------------------------- # Release directory specification # ---------------------------------------------------- diff --git a/erts/emulator/test/alloc_SUITE_data/bucket_mask.c b/erts/emulator/test/alloc_SUITE_data/bucket_mask.c index 13af7d861a..b214f87e4a 100644 --- a/erts/emulator/test/alloc_SUITE_data/bucket_mask.c +++ b/erts/emulator/test/alloc_SUITE_data/bucket_mask.c @@ -16,11 +16,19 @@ * $Id$ */ +#include "erl_int_sizes_config.h" + #include "testcase_driver.h" #include "allocator_test.h" #include <stdio.h> -#define SBCT (512*1024) +#ifdef __WIN32__ && SIZEOF_VOID_P == 8 +/* Use larger threashold for win64 as block alignment + is 16 bytes and not 8 */ +#define SBCT ((1024*1024)) +#else +#define SBCT ((512*1024)) +#endif char * testcase_name(void) diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl index 99ed8f1748..e2442861c7 100644 --- a/erts/emulator/test/bif_SUITE.erl +++ b/erts/emulator/test/bif_SUITE.erl @@ -25,7 +25,9 @@ init_per_group/2,end_per_group/2, init_per_testcase/2,end_per_testcase/2, display/1, display_huge/0, - types/1, + erl_bif_types/1,guard_bifs_in_erl_bif_types/1, + shadow_comments/1, + specs/1,improper_bif_stubs/1,auto_imports/1, t_list_to_existing_atom/1,os_env/1,otp_7526/1, binary_to_atom/1,binary_to_existing_atom/1, atom_to_binary/1,min_max/1, erlang_halt/1]). @@ -33,7 +35,9 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> - [types, t_list_to_existing_atom, os_env, otp_7526, + [erl_bif_types, guard_bifs_in_erl_bif_types, shadow_comments, + specs, improper_bif_stubs, auto_imports, + t_list_to_existing_atom, os_env, otp_7526, display, atom_to_binary, binary_to_atom, binary_to_existing_atom, min_max, erlang_halt]. @@ -86,33 +90,20 @@ deeep(N,Acc) -> deeep(N) -> deeep(N,[hello]). +erl_bif_types(Config) when is_list(Config) -> + ensure_erl_bif_types_compiled(), -types(Config) when is_list(Config) -> - c:l(erl_bif_types), - case erlang:function_exported(erl_bif_types, module_info, 0) of - false -> - %% Fail cleanly. - ?line ?t:fail("erl_bif_types not compiled"); - true -> - types_1() - end. - -types_1() -> - ?line List0 = erlang:system_info(snifs), + List0 = erlang:system_info(snifs), %% Ignore missing type information for hipe BIFs. - ?line List = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs], + List = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs], - case [MFA || MFA <- List, not known_types(MFA)] of - [] -> - types_2(List); - BadTypes -> - io:put_chars("No type information:\n"), - io:format("~p\n", [lists:sort(BadTypes)]), - ?line ?t:fail({length(BadTypes),bifs_without_types}) - end. + KnownTypes = [MFA || MFA <- List, known_types(MFA)], + io:format("There are ~p BIFs with type information in erl_bif_types.", + [length(KnownTypes)]), + erl_bif_types_2(KnownTypes). -types_2(List) -> +erl_bif_types_2(List) -> BadArity = [MFA || {M,F,A}=MFA <- List, begin Types = erl_bif_types:arg_types(M, F, A), @@ -120,14 +111,14 @@ types_2(List) -> end], case BadArity of [] -> - types_3(List); + erl_bif_types_3(List); [_|_] -> io:put_chars("Bifs with bad arity\n"), io:format("~p\n", [BadArity]), ?line ?t:fail({length(BadArity),bad_arity}) end. -types_3(List) -> +erl_bif_types_3(List) -> BadSmokeTest = [MFA || {M,F,A}=MFA <- List, begin try erl_bif_types:type(M, F, A) of @@ -151,9 +142,220 @@ types_3(List) -> ?line ?t:fail({length(BadSmokeTest),bad_smoke_test}) end. +guard_bifs_in_erl_bif_types(_Config) -> + ensure_erl_bif_types_compiled(), + + List0 = erlang:system_info(snifs), + List = [{F,A} || {erlang,F,A} <- List0, + erl_internal:guard_bif(F, A)], + Not = [FA || {F,A}=FA <- List, + not erl_bif_types:is_known(erlang, F, A)], + case Not of + [] -> + ok; + [_|_] -> + io:put_chars( + ["Dialyzer requires that all guard BIFs " + "have type information in erl_bif_types.\n\n" + "The following guard BIFs have no type information " + "in erl_bif_types:\n\n", + [io_lib:format(" ~p/~p\n", [F,A]) || {F,A} <- Not]]), + ?t:fail() + end. + +shadow_comments(_Config) -> + ensure_erl_bif_types_compiled(), + + List0 = erlang:system_info(snifs), + List1 = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs], + List = [MFA || MFA <- List1, not is_operator(MFA)], + HasTypes = [MFA || {M,F,A}=MFA <- List, + erl_bif_types:is_known(M, F, A)], + Path = get_code_path(), + BifRel = sofs:relation(HasTypes, [{m,f,a}]), + BifModules = sofs:to_external(sofs:projection(1, BifRel)), + AbstrByModule = [extract_abstract(Mod, Path) || Mod <- BifModules], + Specs0 = [extract_specs(Mod, Abstr) || + {Mod,Abstr} <- AbstrByModule], + Specs = lists:append(Specs0), + SpecFuns0 = [F || {F,_} <- Specs], + SpecFuns = sofs:relation(SpecFuns0, [{m,f,a}]), + HasTypesAndSpecs = sofs:intersection(BifRel, SpecFuns), + Commented0 = lists:append([extract_comments(Mod, Path) || + Mod <- BifModules]), + Commented = sofs:relation(Commented0, [{m,f,a}]), + {NoComments0,_,NoBifSpecs0} = + sofs:symmetric_partition(HasTypesAndSpecs, Commented), + NoComments = sofs:to_external(NoComments0), + NoBifSpecs = sofs:to_external(NoBifSpecs0), + + case NoComments of + [] -> + ok; + [_|_] -> + io:put_chars( + ["If a BIF stub has both a spec and has type information in " + "erl_bif_types, there *must*\n" + "be a comment in the source file to make that immediately " + "obvious.\n\nThe following comments are missing:\n\n", + [io_lib:format("%% Shadowed by erl_bif_types: ~p:~p/~p\n", + [M,F,A]) || {M,F,A} <- NoComments]]), + ?t:fail() + end, + + case NoBifSpecs of + [] -> + ok; + [_|_] -> + io:put_chars( + ["The following functions have \"shadowed\" comments " + "claiming that there is type information in erl_bif_types,\n" + "but actually there is no such type information.\n\n" + "Therefore, the following comments should be removed:\n\n", + [io_lib:format("%% Shadowed by erl_bif_types: ~p:~p/~p\n", + [M,F,A]) || {M,F,A} <- NoBifSpecs]]), + ?t:fail() + end. + +extract_comments(Mod, Path) -> + Beam = which(Mod, Path), + SrcDir = filename:join(filename:dirname(filename:dirname(Beam)), "src"), + Src = filename:join(SrcDir, atom_to_list(Mod) ++ ".erl"), + {ok,Bin} = file:read_file(Src), + Lines0 = binary:split(Bin, <<"\n">>, [global]), + Lines1 = [T || <<"%% Shadowed by erl_bif_types: ",T/binary>> <- Lines0], + {ok,ReMFA} = re:compile("([^:]*):([^/]*)/(\\d*)"), + Lines = [L || L <- Lines1, re:run(L, ReMFA, [{capture,[]}]) =:= match], + [begin + {match,[M,F,A]} = re:run(L, ReMFA, [{capture,all_but_first,list}]), + {list_to_atom(M),list_to_atom(F),list_to_integer(A)} + end || L <- Lines]. + +ensure_erl_bif_types_compiled() -> + c:l(erl_bif_types), + case erlang:function_exported(erl_bif_types, module_info, 0) of + false -> + %% Fail cleanly. + ?t:fail("erl_bif_types not compiled"); + true -> + ok + end. + known_types({M,F,A}) -> erl_bif_types:is_known(M, F, A). +specs(_) -> + List0 = erlang:system_info(snifs), + + %% Ignore missing type information for hipe BIFs. + List1 = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs], + + %% Ignore all operators. + List = [MFA || MFA <- List1, not is_operator(MFA)], + + %% Extract specs from the abstract code for all BIFs. + Path = get_code_path(), + BifRel = sofs:relation(List, [{m,f,a}]), + BifModules = sofs:to_external(sofs:projection(1, BifRel)), + AbstrByModule = [extract_abstract(Mod, Path) || Mod <- BifModules], + Specs0 = [extract_specs(Mod, Abstr) || + {Mod,Abstr} <- AbstrByModule], + Specs = lists:append(Specs0), + BifSet = sofs:set(List, [function]), + SpecRel0 = sofs:relation(Specs, [{function,spec}]), + SpecRel = sofs:restriction(SpecRel0, BifSet), + + %% Find BIFs without specs. + NoSpecs0 = sofs:difference(BifSet, sofs:domain(SpecRel)), + NoSpecs = sofs:to_external(NoSpecs0), + case NoSpecs of + [] -> + ok; + [_|_] -> + io:put_chars("The following BIFs don't have specs:\n"), + [print_mfa(MFA) || MFA <- NoSpecs], + ?t:fail() + end. + +is_operator({erlang,F,A}) -> + erl_internal:arith_op(F, A) orelse + erl_internal:bool_op(F, A) orelse + erl_internal:comp_op(F, A) orelse + erl_internal:list_op(F, A) orelse + erl_internal:send_op(F, A); +is_operator(_) -> false. + +extract_specs(M, Abstr) -> + [{make_mfa(M, Name),Spec} || {attribute,_,spec,{Name,Spec}} <- Abstr]. + +make_mfa(M, {F,A}) -> {M,F,A}; +make_mfa(M, {M,_,_}=MFA) -> MFA. + +improper_bif_stubs(_) -> + Bifs0 = erlang:system_info(snifs), + Bifs = [MFA || {M,_,_}=MFA <- Bifs0, M =/= hipe_bifs], + Path = get_code_path(), + BifRel = sofs:relation(Bifs, [{m,f,a}]), + BifModules = sofs:to_external(sofs:projection(1, BifRel)), + AbstrByModule = [extract_abstract(Mod, Path) || Mod <- BifModules], + Funcs0 = [extract_functions(Mod, Abstr) || + {Mod,Abstr} <- AbstrByModule], + Funcs = lists:append(Funcs0), + BifSet = sofs:set(Bifs, [function]), + FuncRel0 = sofs:relation(Funcs, [{function,code}]), + FuncRel = sofs:restriction(FuncRel0, BifSet), + [check_stub(MFA, Body) || {MFA,Body} <- sofs:to_external(FuncRel)], + ok. + +auto_imports(_Config) -> + Path = get_code_path(), + {erlang,Abstr} = extract_abstract(erlang, Path), + SpecFuns = [Name || {attribute,_,spec,{Name,_}} <- Abstr], + auto_imports(SpecFuns, 0). + +auto_imports([{F,A}|T], Errors) -> + case erl_internal:bif(F, A) of + false -> + io:format("~p/~p: not auto-imported, but spec claims it " + "is auto-imported", [F,A]), + auto_imports(T, Errors+1); + true -> + auto_imports(T, Errors) + end; +auto_imports([{erlang,F,A}|T], Errors) -> + case erl_internal:bif(F, A) of + false -> + auto_imports(T, Errors); + true -> + io:format("~p/~p: auto-imported, but " + "spec claims it is *not* auto-imported", [F,A]), + auto_imports(T, Errors+1) + end; +auto_imports([], 0) -> + ok; +auto_imports([], Errors) -> + ?t:fail({Errors,inconsistencies}). + +extract_functions(M, Abstr) -> + [{{M,F,A},Body} || {function,_,F,A,Body} <- Abstr]. + +check_stub({erlang,apply,3}, _) -> + ok; +check_stub({_,F,A}, B) -> + try + [{clause,_,Args,[],Body}] = B, + A = length(Args), + [{call,_,{remote,_,{atom,_,erlang},{atom,_,nif_error}},[_]}] = Body + catch + _:_ -> + io:put_chars("Invalid body for the following BIF stub:\n"), + Func = {function,0,F,A,B}, + io:put_chars(erl_pp:function(Func)), + io:nl(), + io:put_chars("The body should be: erlang:nif_error(undef)"), + ?t:fail() + end. + t_list_to_existing_atom(Config) when is_list(Config) -> ?line all = list_to_existing_atom("all"), ?line ?MODULE = list_to_existing_atom(?MODULE_STRING), @@ -483,6 +685,35 @@ erlang_halt(Config) when is_list(Config) -> id(I) -> I. +%% Get code path, including the path for the erts application. +get_code_path() -> + case code:lib_dir(erts) of + {error,bad_name} -> + Erts = filename:join([code:root_dir(),"erts","preloaded","ebin"]), + [Erts|code:get_path()]; + _ -> + code:get_path() + end. + +which(Mod, Path) -> + which_1(atom_to_list(Mod) ++ ".beam", Path). + +which_1(Base, [D|Ds]) -> + Path = filename:join(D, Base), + case filelib:is_regular(Path) of + true -> Path; + false -> which_1(Base, Ds) + end. +print_mfa({M,F,A}) -> + io:format("~p:~p/~p", [M,F,A]). + +extract_abstract(Mod, Path) -> + Beam = which(Mod, Path), + {ok,{Mod,[{abstract_code,{raw_abstract_v1,Abstr}}]}} = + beam_lib:chunks(Beam, [abstract_code]), + {Mod,Abstr}. + + hostname() -> hostname(atom_to_list(node())). diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl index 58e0cb4096..babdb3363f 100644 --- a/erts/emulator/test/binary_SUITE.erl +++ b/erts/emulator/test/binary_SUITE.erl @@ -626,8 +626,32 @@ safe_binary_to_term2(Config) when is_list(Config) -> bad_terms(suite) -> []; bad_terms(Config) when is_list(Config) -> ?line test_terms(fun corrupter/1). - + +corrupter(Term) when is_function(Term); + is_function(hd(Term)); + is_function(element(2,element(2,element(2,Term)))) -> + %% Check if lists is native compiled. If it is, we do not try to + %% corrupt funs as this can create some very strange behaviour. + %% To show the error print `Byte` in the foreach fun in corrupter/2. + case erlang:system_info(hipe_architecture) of + undefined -> + corrupter0(Term); + Architecture -> + {lists, ListsBinary, _ListsFilename} = code:get_object_code(lists), + ChunkName = hipe_unified_loader:chunk_name(Architecture), + NativeChunk = beam_lib:chunks(ListsBinary, [ChunkName]), + case NativeChunk of + {ok,{_,[{_,Bin}]}} when is_binary(Bin) -> + S = io_lib:format("Skipping corruption of: ~P", [Term,12]), + io:put_chars(S); + {error, beam_lib, _} -> + corrupter0(Term) + end + end; corrupter(Term) -> + corrupter0(Term). + +corrupter0(Term) -> ?line try S = io_lib:format("About to corrupt: ~P", [Term,12]), io:put_chars(S) diff --git a/erts/emulator/test/bs_bit_binaries_SUITE.erl b/erts/emulator/test/bs_bit_binaries_SUITE.erl index ff1088118d..7f2cd1e881 100644 --- a/erts/emulator/test/bs_bit_binaries_SUITE.erl +++ b/erts/emulator/test/bs_bit_binaries_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2006-2011. All Rights Reserved. +%% Copyright Ericsson AB 2006-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -177,14 +177,55 @@ append(Config) when is_list(Config) -> cs_init(), ?line <<(-1):256/signed-unit:8>> = cs(do_append(id(<<>>), 256*8)), ?line <<(-1):256/signed-unit:8>> = cs(do_append2(id(<<>>), 256*4)), + <<(-1):256/signed-unit:8>> = cs(do_append3(id(<<>>), 256*8)), cs_end(). do_append(Bin, N) when N > 0 -> do_append(<<Bin/bits,1:1>>, N-1); do_append(Bin, 0) -> Bin. -do_append2(Bin, N) when N > 0 -> do_append2(<<Bin/bits,3:2>>, N-1); +do_append2(Bin, N) when N > 0 -> do_append2(<<Bin/binary-unit:2,3:2>>, N-1); do_append2(Bin, 0) -> Bin. +do_append3(Bin, N) when N > 0 -> + Bits = bit_size(Bin), + if + Bits rem 2 =:= 0 -> + do_append3(<<Bin/binary-unit:2,1:1>>, N-1); + Bits rem 3 =:= 0 -> + do_append3(<<Bin/binary-unit:3,1:1>>, N-1); + Bits rem 4 =:= 0 -> + do_append3(<<Bin/binary-unit:4,1:1>>, N-1); + Bits rem 5 =:= 0 -> + do_append3(<<Bin/binary-unit:5,1:1>>, N-1); + Bits rem 6 =:= 0 -> + do_append3(<<Bin/binary-unit:6,1:1>>, N-1); + Bits rem 7 =:= 0 -> + do_append3(<<Bin/binary-unit:7,1:1>>, N-1); + Bits rem 8 =:= 0 -> + do_append3(<<Bin/binary-unit:8,1:1>>, N-1); + Bits rem 9 =:= 0 -> + do_append3(<<Bin/binary-unit:9,1:1>>, N-1); + Bits rem 10 =:= 0 -> + do_append3(<<Bin/binary-unit:10,1:1>>, N-1); + Bits rem 11 =:= 0 -> + do_append3(<<Bin/binary-unit:11,1:1>>, N-1); + Bits rem 12 =:= 0 -> + do_append3(<<Bin/binary-unit:12,1:1>>, N-1); + Bits rem 13 =:= 0 -> + do_append3(<<Bin/binary-unit:13,1:1>>, N-1); + Bits rem 14 =:= 0 -> + do_append3(<<Bin/binary-unit:14,1:1>>, N-1); + Bits rem 15 =:= 0 -> + do_append3(<<Bin/binary-unit:15,1:1>>, N-1); + Bits rem 16 =:= 0 -> + do_append3(<<Bin/binary-unit:16,1:1>>, N-1); + Bits rem 17 =:= 0 -> + do_append3(<<Bin/binary-unit:17,1:1>>, N-1); + true -> + do_append3(<<Bin/binary-unit:1,1:1>>, N-1) + end; +do_append3(Bin, 0) -> Bin. + cs_init() -> erts_debug:set_internal_state(available_internal_state, true), ok. diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl index 4dfe559104..123952d01d 100644 --- a/erts/emulator/test/bs_construct_SUITE.erl +++ b/erts/emulator/test/bs_construct_SUITE.erl @@ -28,7 +28,7 @@ mem_leak/1, coerce_to_float/1, bjorn/1, huge_float_field/1, huge_binary/1, system_limit/1, badarg/1, copy_writable_binary/1, kostis/1, dynamic/1, bs_add/1, - otp_7422/1, zero_width/1]). + otp_7422/1, zero_width/1, bad_append/1]). -include_lib("test_server/include/test_server.hrl"). @@ -38,7 +38,8 @@ all() -> [test1, test2, test3, test4, test5, testf, not_used, in_guard, mem_leak, coerce_to_float, bjorn, huge_float_field, huge_binary, system_limit, badarg, - copy_writable_binary, kostis, dynamic, bs_add, otp_7422, zero_width]. + copy_writable_binary, kostis, dynamic, bs_add, otp_7422, zero_width, + bad_append]. groups() -> []. @@ -863,5 +864,49 @@ zero_width(Config) when is_list(Config) -> ?line {'EXIT',{badarg,_}} = (catch <<(id(not_a_number)):0>>), ok. + +bad_append(_) -> + do_bad_append(<<127:1>>, fun append_unit_3/1), + do_bad_append(<<127:2>>, fun append_unit_3/1), + do_bad_append(<<127:17>>, fun append_unit_3/1), + + do_bad_append(<<127:3>>, fun append_unit_4/1), + do_bad_append(<<127:5>>, fun append_unit_4/1), + do_bad_append(<<127:7>>, fun append_unit_4/1), + do_bad_append(<<127:199>>, fun append_unit_4/1), + + do_bad_append(<<127:7>>, fun append_unit_8/1), + do_bad_append(<<127:9>>, fun append_unit_8/1), + + do_bad_append(<<0:8>>, fun append_unit_16/1), + do_bad_append(<<0:15>>, fun append_unit_16/1), + do_bad_append(<<0:17>>, fun append_unit_16/1), + ok. + +do_bad_append(Bin0, Appender) -> + {'EXIT',{badarg,_}} = (catch Appender(Bin0)), + + Bin1 = id(<<0:3,Bin0/bitstring>>), + <<_:3,Bin2/bitstring>> = Bin1, + {'EXIT',{badarg,_}} = (catch Appender(Bin2)), + + %% Create a writable binary. + Empty = id(<<>>), + Bin3 = <<Empty/bitstring,Bin0/bitstring>>, + {'EXIT',{badarg,_}} = (catch Appender(Bin3)), + ok. + +append_unit_3(Bin) -> + <<Bin/binary-unit:3,0:1>>. + +append_unit_4(Bin) -> + <<Bin/binary-unit:4,0:1>>. + +append_unit_8(Bin) -> + <<Bin/binary,0:1>>. + +append_unit_16(Bin) -> + <<Bin/binary-unit:16,0:1>>. + id(I) -> I. diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl index 9d80b01748..eaecd32f95 100644 --- a/erts/emulator/test/call_trace_SUITE.erl +++ b/erts/emulator/test/call_trace_SUITE.erl @@ -25,6 +25,7 @@ init_per_testcase/2,end_per_testcase/2, hipe/1,process_specs/1,basic/1,flags/1,errors/1,pam/1,change_pam/1, return_trace/1,exception_trace/1,on_load/1,deep_exception/1, + upgrade/1, exception_nocatch/1,bit_syntax/1]). %% Helper functions. @@ -46,6 +47,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> Common = [errors, on_load], NotHipe = [process_specs, basic, flags, pam, change_pam, + upgrade, return_trace, exception_trace, deep_exception, exception_nocatch, bit_syntax], Hipe = [hipe], @@ -76,7 +78,13 @@ init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> end_per_testcase(_Func, Config) -> Dog = ?config(watchdog, Config), - ?t:timetrap_cancel(Dog). + ?t:timetrap_cancel(Dog), + + %% Reloading the module will clear all trace patterns, and + %% in a debug-compiled emulator run assertions of the counters + %% for the number of traced exported functions in this module. + + c:l(?MODULE). hipe(Config) when is_list(Config) -> ?line 0 = erlang:trace_pattern({?MODULE,worker_foo,1}, true), @@ -185,8 +193,13 @@ basic() -> %% Trace some functions... ?line trace_func({lists,'_','_'}, []), + + %% Make sure that tracing the same functions more than once + %% does not cause any problems. + ?line 3 = trace_func({?MODULE,foo,'_'}, true), ?line 3 = trace_func({?MODULE,foo,'_'}, true), ?line 1 = trace_func({?MODULE,bar,0}, true), + ?line 1 = trace_func({?MODULE,bar,0}, true), ?line {traced,global} = trace_info({?MODULE,bar,0}, traced), ?line 1 = trace_func({erlang,list_to_integer,1}, true), ?line {traced,global} = trace_info({erlang,list_to_integer,1}, traced), @@ -267,6 +280,118 @@ foo() -> foo0. foo(X) -> X+1. foo(X, Y) -> X+Y. + +%% Note that the semantics that this test case verifies +%% are not explicitly specified in the docs (what I could find in R15B). +%% This test case was written to verify that we do not change +%% any behaviour with the introduction of "block-free" upgrade in R16. +%% In short: Do not refer to this test case as an authority of how it must work. +upgrade(doc) -> + "Test tracing on module being upgraded"; +upgrade(Config) when is_list(Config) -> + V1 = compile_version(my_upgrade_test, 1, Config), + V2 = compile_version(my_upgrade_test, 2, Config), + start_tracer(), + upgrade_do(V1, V2, false), + upgrade_do(V1, V2, true). + +upgrade_do(V1, V2, TraceLocalVersion) -> + {module,my_upgrade_test} = erlang:load_module(my_upgrade_test, V1), + + + %% Test that trace is cleared after load_module + + trace_func({my_upgrade_test,'_','_'}, [], [global]), + case TraceLocalVersion of + true -> trace_func({my_upgrade_test,local_version,0}, [], [local]); + _ -> ok + end, + 1 = my_upgrade_test:version(), + 1 = my_upgrade_test:do_local(), + 1 = my_upgrade_test:do_real_local(), + put('F1_exp', my_upgrade_test:make_fun_exp()), + put('F1_loc', my_upgrade_test:make_fun_local()), + 1 = (get('F1_exp'))(), + 1 = (get('F1_loc'))(), + + Self = self(), + expect({trace,Self,call,{my_upgrade_test,version,[]}}), + expect({trace,Self,call,{my_upgrade_test,do_local,[]}}), + expect({trace,Self,call,{my_upgrade_test,do_real_local,[]}}), + case TraceLocalVersion of + true -> expect({trace,Self,call,{my_upgrade_test,local_version,[]}}); + _ -> ok + end, + expect({trace,Self,call,{my_upgrade_test,make_fun_exp,[]}}), + expect({trace,Self,call,{my_upgrade_test,make_fun_local,[]}}), + expect({trace,Self,call,{my_upgrade_test,version,[]}}), % F1_exp + case TraceLocalVersion of + true -> expect({trace,Self,call,{my_upgrade_test,local_version,[]}}); % F1_loc + _ -> ok + end, + + {module,my_upgrade_test} = erlang:load_module(my_upgrade_test, V2), + 2 = my_upgrade_test:version(), + put('F2_exp', my_upgrade_test:make_fun_exp()), + put('F2_loc', my_upgrade_test:make_fun_local()), + 2 = (get('F1_exp'))(), + 1 = (get('F1_loc'))(), + 2 = (get('F2_exp'))(), + 2 = (get('F2_loc'))(), + expect(), + + put('F1_exp', undefined), + put('F1_loc', undefined), + erlang:garbage_collect(), + erlang:purge_module(my_upgrade_test), + + % Test that trace is cleared after delete_module + + trace_func({my_upgrade_test,'_','_'}, [], [global]), + case TraceLocalVersion of + true -> trace_func({my_upgrade_test,local_version,0}, [], [local]); + _ -> ok + end, + 2 = my_upgrade_test:version(), + 2 = my_upgrade_test:do_local(), + 2 = my_upgrade_test:do_real_local(), + 2 = (get('F2_exp'))(), + 2 = (get('F2_loc'))(), + + expect({trace,Self,call,{my_upgrade_test,version,[]}}), + expect({trace,Self,call,{my_upgrade_test,do_local,[]}}), + expect({trace,Self,call,{my_upgrade_test,do_real_local,[]}}), + case TraceLocalVersion of + true -> expect({trace,Self,call,{my_upgrade_test,local_version,[]}}); + _ -> ok + end, + expect({trace,Self,call,{my_upgrade_test,version,[]}}), % F2_exp + case TraceLocalVersion of + true -> expect({trace,Self,call,{my_upgrade_test,local_version,[]}}); % F2_loc + _ -> ok + end, + + true = erlang:delete_module(my_upgrade_test), + {'EXIT',{undef,_}} = (catch my_upgrade_test:version()), + {'EXIT',{undef,_}} = (catch ((get('F2_exp'))())), + 2 = (get('F2_loc'))(), + expect(), + + put('F2_exp', undefined), + put('F2_loc', undefined), + erlang:garbage_collect(), + erlang:purge_module(my_upgrade_test), + ok. + +compile_version(Module, Version, Config) -> + Data = ?config(data_dir, Config), + File = filename:join(Data, atom_to_list(Module)), + {ok,Module,Bin} = compile:file(File, [{d,'VERSION',Version}, + binary,report]), + Bin. + + + %% Test flags (arity, timestamp) for call_trace/3. %% Also, test the '{tracer,Pid}' option. flags(_Config) -> @@ -1151,11 +1276,13 @@ trace_info(What, Key) -> Res. trace_func(MFA, MatchSpec) -> - get(tracer) ! {apply,self(),{erlang,trace_pattern,[MFA, MatchSpec]}}, + trace_func(MFA, MatchSpec, []). +trace_func(MFA, MatchSpec, Flags) -> + get(tracer) ! {apply,self(),{erlang,trace_pattern,[MFA, MatchSpec, Flags]}}, Res = receive {apply_result,Result} -> Result end, - ok = io:format("trace_pattern(~p, ~p) -> ~p", [MFA,MatchSpec,Res]), + ok = io:format("trace_pattern(~p, ~p, ~p) -> ~p", [MFA,MatchSpec,Flags,Res]), Res. trace_pid(Pid, On, Flags) -> diff --git a/erts/emulator/test/call_trace_SUITE_data/my_upgrade_test.erl b/erts/emulator/test/call_trace_SUITE_data/my_upgrade_test.erl new file mode 100644 index 0000000000..11b8a95209 --- /dev/null +++ b/erts/emulator/test/call_trace_SUITE_data/my_upgrade_test.erl @@ -0,0 +1,26 @@ +-module(my_upgrade_test). + +-export([version/0]). +-export([do_local/0]). +-export([do_real_local/0]). +-export([make_fun_exp/0]). +-export([make_fun_local/0]). + + +version() -> + ?VERSION. + +do_local() -> + version(). + +do_real_local() -> + local_version(). + +local_version() -> + ?VERSION. + +make_fun_exp() -> + fun() -> ?MODULE:version() end. + +make_fun_local() -> + fun() -> local_version() end. diff --git a/erts/emulator/test/code_parallel_load_SUITE.erl b/erts/emulator/test/code_parallel_load_SUITE.erl new file mode 100644 index 0000000000..aa9e4c96c6 --- /dev/null +++ b/erts/emulator/test/code_parallel_load_SUITE.erl @@ -0,0 +1,198 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2012. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% +%% Author: Björn-Egil Dahlberg + +-module(code_parallel_load_SUITE). +-export([ + all/0, + suite/0, + init_per_suite/1, + end_per_suite/1, + init_per_testcase/2, + end_per_testcase/2 + ]). + +-export([ + multiple_load_check_purge_repeat/1, + many_load_distributed_only_once/1 + ]). + +-define(model, code_parallel_load_SUITE_model). +-define(interval, 50). +-define(number_of_processes, 160). +-define(passes, 4). + + +-include_lib("test_server/include/test_server.hrl"). + +suite() -> [{ct_hooks,[ts_install_cth]}]. + +all() -> + [ + multiple_load_check_purge_repeat, + many_load_distributed_only_once + ]. + + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> + Dog=?t:timetrap(?t:minutes(3)), + [{watchdog, Dog}|Config]. + +end_per_testcase(_Func, Config) -> + SConf = ?config(save_config, Config), + Pids = proplists:get_value(purge_pids, SConf), + + case check_old_code(?model) of + true -> check_and_purge_processes_code(Pids, ?model); + _ -> ok + end, + case erlang:delete_module(?model) of + true -> check_and_purge_processes_code(Pids, ?model); + _ -> ok + end, + Dog=?config(watchdog, Config), + ?t:timetrap_cancel(Dog). + + +multiple_load_check_purge_repeat(_Conf) -> + Ts = [v1,v2,v3,v4,v5,v6], + + %% generate code that receives a token, code switches to new code + %% then matches this token against a literal code token + %% should be identical + %% (smoke test for parallel code loading + Codes = [{T, generate(?model, [], [ + format("check(T) -> receive {_Pid, change, T1} -> " + " ~w:check(T1)\n" + " after 0 -> T = f(), check(T) end.\n", [?model]), + format("f() -> ~w.~n", [T]) + ])} || T <- Ts], + + Pids = setup_code_changer(Codes), + {save_config, [{purge_pids,Pids}]}. + +setup_code_changer([{Token,Code}|Cs] = Codes) -> + {module, ?model} = erlang:load_module(?model,Code), + Pids = setup_checkers(Token,?number_of_processes), + code_changer(Cs, Codes, ?interval,Pids,?passes), + Pids. + +code_changer(_, _, _, Pids, 0) -> + [unlink(Pid) || Pid <- Pids], + [exit(Pid, die) || Pid <- Pids], + io:format("done~n"), + ok; +code_changer([], Codes, T, Pids, Ps) -> + code_changer(Codes, Codes, T, Pids, Ps - 1); +code_changer([{Token,Code}|Cs], Codes, T, Pids, Ps) -> + receive after T -> + io:format("load code with token ~4w : pass ~4w~n", [Token, Ps]), + {module, ?model} = erlang:load_module(?model, Code), + % this is second time we call load_module for this module + % so it should have old code + [Pid ! {self(), change, Token} || Pid <- Pids], + % should we wait a moment or just blantantly try to check and purge repeatadly? + receive after 1 -> ok end, + ok = check_and_purge_processes_code(Pids, ?model), + code_changer(Cs, Codes, T, Pids, Ps) + end. + + + +many_load_distributed_only_once(_Conf) -> + Ts = [<<"first version">>, <<"second version">>], + + [{Token1,Code1},{Token2, Code2}] = [{T, generate(?model, [], [ + "check({<<\"second version\">> = V, Pid}) -> V = f(), Pid ! {self(), completed, V}, ok;\n" ++ + format("check(T) -> receive {Pid, change, T1, B} -> " + " Res = erlang:load_module(~w, B), Pid ! {self(), change, Res},\n" + " ~w:check({T1, Pid})\n" + " after 0 -> T = f(), check(T) end.\n", [?model, ?model]), + format("f() -> ~w.~n", [T]) + ])} || T <- Ts], + + + {module, ?model} = erlang:load_module(?model, Code1), + Pids = setup_checkers(Token1,?number_of_processes), + + receive after 1000 -> ok end, % give 'em some time to spin up + [Pid ! {self(), change, Token2, Code2} || Pid <- Pids], + Loads = [receive {Pid, change, Res} -> Res end || Pid <- Pids], + [receive {Pid, completed, Token2} -> ok end || Pid <- Pids], + + ok = ensure_only_one_load(Loads, 0), + {save_config, [{purge_pids,Pids}]}. + +ensure_only_one_load([], 1) -> ok; +ensure_only_one_load([], _) -> too_many_loads; +ensure_only_one_load([{module, ?model}|Loads], N) -> + ensure_only_one_load(Loads, N + 1); +ensure_only_one_load([{error, not_purged}|Loads], N) -> + ensure_only_one_load(Loads, N). +% no other return values are allowed from load_module + + +%% aux + +setup_checkers(_,0) -> []; +setup_checkers(T,N) -> [spawn_link(fun() -> ?model:check(T) end) | setup_checkers(T, N-1)]. + +check_and_purge_processes_code(Pids, M) -> + check_and_purge_processes_code(Pids, M, []). +check_and_purge_processes_code([], M, []) -> + erlang:purge_module(M), + ok; +check_and_purge_processes_code([], M, Pending) -> + io:format("Processes ~w are still executing old code - retrying.~n", [Pending]), + check_and_purge_processes_code(Pending, M, []); +check_and_purge_processes_code([Pid|Pids], M, Pending) -> + case erlang:check_process_code(Pid, M) of + false -> + check_and_purge_processes_code(Pids, M, Pending); + true -> + check_and_purge_processes_code(Pids, M, [Pid|Pending]) + end. + + +generate(Module, Attributes, FunStrings) -> + FunForms = function_forms(FunStrings), + Forms = [ + {attribute,1,module,Module}, + {attribute,2,export,[FA || {FA,_} <- FunForms]} + ] ++ [{attribute, 3, A, V}|| {A, V} <- Attributes] ++ + [ Function || {_, Function} <- FunForms], + {ok, Module, Bin} = compile:forms(Forms), + Bin. + + +function_forms([]) -> []; +function_forms([S|Ss]) -> + {ok, Ts,_} = erl_scan:string(S), + {ok, Form} = erl_parse:parse_form(Ts), + Fun = element(3, Form), + Arity = element(4, Form), + [{{Fun,Arity}, Form}|function_forms(Ss)]. + +format(F,Ts) -> lists:flatten(io_lib:format(F, Ts)). diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl index a833e357cf..f3a177faf2 100644 --- a/erts/emulator/test/distribution_SUITE.erl +++ b/erts/emulator/test/distribution_SUITE.erl @@ -98,19 +98,6 @@ end_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> Dog=?config(watchdog, Config), ?t:timetrap_cancel(Dog). -%%% Don't be too hard on vxworks, the cross server gets nodedown -%%% cause the card is too busy if we don't sleep a little between pings. -sleep() -> - case os:type() of - vxworks -> - receive - after 10 -> - ok - end; - _ -> - ok - end. - ping(doc) -> ["Tests pinging a node in different ways."]; ping(Config) when is_list(Config) -> @@ -122,23 +109,21 @@ ping(Config) when is_list(Config) -> ?line Host = hostname(), ?line BadName = list_to_atom("__pucko__@" ++ Host), ?line io:format("Pinging ~s (assumed to not exist)", [BadName]), - ?line test_server:do_times(Times, - fun() -> pang = net_adm:ping(BadName), - sleep() + ?line test_server:do_times(Times, fun() -> pang = net_adm:ping(BadName) end), %% Pings another node. ?line {ok, OtherNode} = start_node(distribution_SUITE_other), ?line io:format("Pinging ~s (assumed to exist)", [OtherNode]), - ?line test_server:do_times(Times, fun() -> pong = net_adm:ping(OtherNode),sleep() end), + ?line test_server:do_times(Times, fun() -> pong = net_adm:ping(OtherNode) end), ?line stop_node(OtherNode), %% Pings our own node many times. ?line Node = node(), ?line io:format("Pinging ~s (the same node)", [Node]), - ?line test_server:do_times(Times, fun() -> pong = net_adm:ping(Node),sleep() end), + ?line test_server:do_times(Times, fun() -> pong = net_adm:ping(Node) end), ok. diff --git a/erts/emulator/test/driver_SUITE_data/chkio_drv.c b/erts/emulator/test/driver_SUITE_data/chkio_drv.c index 40f1ad4fea..faf1040276 100644 --- a/erts/emulator/test/driver_SUITE_data/chkio_drv.c +++ b/erts/emulator/test/driver_SUITE_data/chkio_drv.c @@ -17,7 +17,7 @@ */ #ifndef UNIX -#if !defined(__WIN32__) && !defined(VXWORKS) +#if !defined(__WIN32__) #define UNIX 1 #endif #endif diff --git a/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c b/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c index e6a3edcd74..1e107309df 100644 --- a/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c +++ b/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c @@ -17,7 +17,7 @@ */ #ifndef UNIX -#if !defined(__WIN32__) && !defined(VXWORKS) +#if !defined(__WIN32__) #define UNIX 1 #endif #endif diff --git a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c index b2cc1e785a..d174771629 100644 --- a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c +++ b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c @@ -29,7 +29,7 @@ */ #ifndef UNIX -#if !defined(__WIN32__) && !defined(VXWORKS) +#if !defined(__WIN32__) #define UNIX 1 #endif #endif diff --git a/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c b/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c index e7d9a294fa..851f2c745b 100644 --- a/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c +++ b/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c @@ -17,7 +17,7 @@ */ #ifndef UNIX -#if !defined(__WIN32__) && !defined(VXWORKS) +#if !defined(__WIN32__) #define UNIX 1 #endif #endif diff --git a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c index 8e203f74ec..0c86a26604 100644 --- a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c +++ b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c @@ -28,7 +28,7 @@ */ #ifndef UNIX -#if !defined(__WIN32__) && !defined(VXWORKS) +#if !defined(__WIN32__) #define UNIX 1 #endif #endif diff --git a/erts/emulator/test/driver_SUITE_data/timer_drv.c b/erts/emulator/test/driver_SUITE_data/timer_drv.c index 8c3f203a64..57538e0d57 100644 --- a/erts/emulator/test/driver_SUITE_data/timer_drv.c +++ b/erts/emulator/test/driver_SUITE_data/timer_drv.c @@ -1,11 +1,3 @@ -#ifdef VXWORKS -#include <vxWorks.h> -#include <taskVarLib.h> -#include <taskLib.h> -#include <sysLib.h> -#include <string.h> -#include <ioLib.h> -#endif #include <stdio.h> #include "erl_driver.h" @@ -84,12 +76,8 @@ static void timer_read(ErlDrvData p, char *buf, ErlDrvSizeT len) driver_output(port, reply, 1); } else if (buf[0] == DELAY_START_TIMER) { #ifndef __WIN32__ -#ifdef VXWORKS - taskDelay(sysClkRateGet()); -#else sleep(1); #endif -#endif driver_set_timer(port, get_int32(buf + 1)); } } diff --git a/erts/emulator/test/emulator.spec.vxworks b/erts/emulator/test/emulator.spec.vxworks deleted file mode 100644 index 55675bdc29..0000000000 --- a/erts/emulator/test/emulator.spec.vxworks +++ /dev/null @@ -1,26 +0,0 @@ -{topcase, {dir, "../emulator_test"}}. - -% Added since R11 -{skip,{distribution_SUITE,link_to_dead_new_node,"Does not work in distributed test environments"}}. -{skip,{binary_SUITE,terms_float,"Floats, VxWorks, PPC = Floating points never equal..."}}. -{skip,{system_info_SUITE,process_count,"Fix-allocs starving VxWorks cards"}}. -{skip,{monitor_SUITE,mixer,"Fix-allocs starving VxWorks cards"}}. - -{skip,{node_container_SUITE,"Too memory consuming..."}}. - -{skip,{trace_SUITE,system_monitor_long_gc_1,"Too memory consuming..."}}. -{skip,{trace_SUITE,system_monitor_long_gc_2,"Too memory consuming..."}}. -{skip,{trace_SUITE,system_monitor_large_heap_1,"Too memory consuming..."}}. -{skip,{trace_SUITE,system_monitor_large_heap_2,"Too memory consuming..."}}. -% End added since R11 - -{skip, {distribution_SUITE,stop_dist,"Not written to work on VxWorks."}}. -{skip, {distribution_SUITE,dist_auto_connect_never, - "Not written to work on VxWorks."}}. -{skip, {distribution_SUITE,dist_auto_connect_once, - "Not written to work on VxWorks."}}. -{skip, {trace_SUITE,system_monitor_long_gc, - "Too memory consuming for VxWorks cards."}}. -{skip, {trace_meta_SUITE,stack_grow, - "Too memory consuming for VxWorks cards."}}. -{skip, {obsolete_SUITE, "Not on vxworks"}}. diff --git a/erts/emulator/test/emulator_bench.spec b/erts/emulator/test/emulator_bench.spec new file mode 100644 index 0000000000..f709d913b7 --- /dev/null +++ b/erts/emulator/test/emulator_bench.spec @@ -0,0 +1 @@ +{groups,"../emulator_test",estone_SUITE,[estone_bench]}. diff --git a/erts/emulator/test/estone_SUITE.erl b/erts/emulator/test/estone_SUITE.erl index 2417d4bcfe..21834bfa62 100644 --- a/erts/emulator/test/estone_SUITE.erl +++ b/erts/emulator/test/estone_SUITE.erl @@ -19,7 +19,7 @@ -module(estone_SUITE). %% Test functions -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, - init_per_group/2,end_per_group/2,estone/1]). + init_per_group/2,end_per_group/2,estone/1,estone_bench/1]). -export([init_per_testcase/2, end_per_testcase/2]). %% Internal exports for EStone tests @@ -46,6 +46,7 @@ -include_lib("test_server/include/test_server.hrl"). +-include_lib("common_test/include/ct_event.hrl"). %% Test suite defines -define(default_timeout, ?t:minutes(10)). @@ -80,7 +81,7 @@ all() -> [estone]. groups() -> - []. + [{estone_bench, [{repeat,50}],[estone_bench]}]. init_per_suite(Config) -> Config. @@ -108,6 +109,17 @@ estone(Config) when is_list(Config) -> ?line {comment,Mhz ++ " MHz, " ++ integer_to_list(Stones) ++ " ESTONES"}. +estone_bench(Config) -> + DataDir = ?config(data_dir,Config), + L = ?MODULE:macro(?MODULE:micros(),DataDir), + [ct_event:notify( + #event{name = benchmark_data, + data = [{name,proplists:get_value(title,Mark)}, + {value,proplists:get_value(estones,Mark)}]}) + || Mark <- L], + L. + + %% %% Calculate CPU speed %% diff --git a/erts/emulator/test/estone_SUITE_data/estone_cat.c b/erts/emulator/test/estone_SUITE_data/estone_cat.c index 8ed9f8375b..a34bda4384 100644 --- a/erts/emulator/test/estone_SUITE_data/estone_cat.c +++ b/erts/emulator/test/estone_SUITE_data/estone_cat.c @@ -12,11 +12,7 @@ #include <fcntl.h> #include <errno.h> -#ifdef VXWORKS -estone_cat(argc, argv) -#else main(argc, argv) -#endif int argc; char *argv[]; { diff --git a/erts/emulator/test/gc_SUITE.erl b/erts/emulator/test/gc_SUITE.erl index 771d2c9a7a..19fa433a53 100644 --- a/erts/emulator/test/gc_SUITE.erl +++ b/erts/emulator/test/gc_SUITE.erl @@ -54,17 +54,12 @@ grow_heap(doc) -> ["Produce a growing list of elements, ", "for X calls, then drop one item per call", "until the list is empty."]; grow_heap(Config) when is_list(Config) -> - ?line Dog=test_server:timetrap(test_server:minutes(40)), - ?line ok=grow_heap1(256), - case os:type() of - vxworks -> - stop_here; - _ -> - ?line ok=grow_heap1(512), - ?line ok=grow_heap1(1024), - ?line ok=grow_heap1(2048) - end, - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:minutes(40)), + ok = grow_heap1(256), + ok = grow_heap1(512), + ok = grow_heap1(1024), + ok = grow_heap1(2048), + test_server:timetrap_cancel(Dog), ok. grow_heap1(Len) -> @@ -82,10 +77,10 @@ grow_heap1(List, MaxLen, CurLen, up) -> grow_heap1([], _MaxLen, _, down) -> ok; grow_heap1([_|List], MaxLen, CurLen, down) -> - ?line {_,_,C}=erlang:now(), - ?line Num=C rem (length(List))+1, - ?line Elem=lists:nth(Num, List), - ?line NewList=lists:delete(Elem, List), + {_,_,C} = erlang:now(), + Num = C rem (length(List))+1, + Elem = lists:nth(Num, List), + NewList = lists:delete(Elem, List), grow_heap1(NewList, MaxLen, CurLen-1, down). @@ -93,16 +88,11 @@ grow_heap1([_|List], MaxLen, CurLen, down) -> grow_stack(doc) -> ["Increase and decrease stack size, and ", "drop off some garbage from time to time."]; grow_stack(Config) when is_list(Config) -> - ?line Dog=test_server:timetrap(test_server:minutes(80)), + Dog = test_server:timetrap(test_server:minutes(80)), show_heap("before:"), - case os:type() of - vxworks -> - ?line grow_stack1(25, 0); - _ -> - ?line grow_stack1(200, 0) - end, + grow_stack1(200, 0), show_heap("after:"), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. grow_stack1(0, _) -> @@ -123,16 +113,11 @@ grow_stack_heap(doc) -> ["While growing the heap, bounces the size ", "of the stack, and while reducing the heap", "bounces the stack usage."]; grow_stack_heap(Config) when is_list(Config) -> - case os:type() of - vxworks -> - {comment, "Takes too long to run on VxWorks/cpu32"}; - _ -> - ?line Dog=test_server:timetrap(test_server:minutes(40)), - ?line grow_stack_heap1(16), - ?line grow_stack_heap1(32), - ?line test_server:timetrap_cancel(Dog), - ok - end. + Dog = test_server:timetrap(test_server:minutes(40)), + grow_stack_heap1(16), + grow_stack_heap1(32), + test_server:timetrap_cancel(Dog), + ok. grow_stack_heap1(MaxLen) -> io:format("~ngrow_stack_heap with ~p items.",[MaxLen]), @@ -151,10 +136,10 @@ grow_stack_heap1(List, MaxLen, CurLen, up) -> grow_stack_heap1([], _MaxLen, _, down) -> ok; grow_stack_heap1([_|List], MaxLen, CurLen, down) -> grow_stack1(CurLen*2,0), - ?line {_,_,C}=erlang:now(), - ?line Num=C rem (length(List))+1, - ?line Elem=lists:nth(Num, List), - ?line NewList=lists:delete(Elem, List), + {_,_,C}=erlang:now(), + Num=C rem (length(List))+1, + Elem=lists:nth(Num, List), + NewList=lists:delete(Elem, List), grow_stack_heap1(NewList, MaxLen, CurLen-1, down), ok. diff --git a/erts/emulator/test/mtx_SUITE_data/Makefile.src b/erts/emulator/test/mtx_SUITE_data/Makefile.src index b6c843269c..37eb1daa72 100644 --- a/erts/emulator/test/mtx_SUITE_data/Makefile.src +++ b/erts/emulator/test/mtx_SUITE_data/Makefile.src @@ -27,4 +27,11 @@ LIBS = @ERTS_LIBS@ all: $(NIF_LIBS) +mtx_SUITE.c: force_rebuild + touch mtx_SUITE.c + +force_rebuild: + echo "Force rebuild to compensate for emulator type dependencies" + + @SHLIB_RULES@ diff --git a/erts/emulator/test/nofrag_SUITE.erl b/erts/emulator/test/nofrag_SUITE.erl index 6b6ac28e2e..71567ed0cb 100644 --- a/erts/emulator/test/nofrag_SUITE.erl +++ b/erts/emulator/test/nofrag_SUITE.erl @@ -26,7 +26,6 @@ init_per_testcase/2,end_per_testcase/2, error_handler/1,error_handler_apply/1, error_handler_fixed_apply/1,error_handler_fun/1, - error_handler_tuple_fun/1, debug_breakpoint/1]). %% Exported functions for an error_handler module. @@ -37,7 +36,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [error_handler, error_handler_apply, error_handler_fixed_apply, error_handler_fun, - error_handler_tuple_fun, debug_breakpoint]. + debug_breakpoint]. groups() -> []. @@ -178,29 +177,6 @@ collect_fun(N, Fun) -> undefined_lambda(foobarblurf, Fun, Args) when is_function(Fun) -> Args. -error_handler_tuple_fun(Config) when is_list(Config) -> - ?line process_flag(error_handler, ?MODULE), - ?line Term = collect_tuple_fun(1024, {?MODULE,very_undefined_function}), - ?line Term = binary_to_term(term_to_binary(Term)), - ?line 1024 = length(Term), - ?line [[{foo,bar},42.0,[e,f,g]]] = lists:usort(Term), - ok. - -collect_tuple_fun(0, _) -> - []; -collect_tuple_fun(N, Fun) -> - %% The next line calls the error handle function, which is - %% ?MODULE:undefined_function/3 (it simply returns the list - %% of args). - C = Fun({foo,id(bar)}, 42.0, [e,f,id(g)]), - - %% The variable C will be saved onto the stack frame; if C - %% points into a heap fragment the garbage collector will reach - %% it and the emulator will crash sooner or later (sooner if - %% the emulator is debug-compiled). - Res = collect_tuple_fun(N-1, Fun), - [C|Res]. - debug_breakpoint(Config) when is_list(Config) -> ?line process_flag(error_handler, ?MODULE), ?line erts_debug:breakpoint({?MODULE,foobar,5}, true), diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl index 9fa4df6373..873601ddd1 100644 --- a/erts/emulator/test/port_SUITE.erl +++ b/erts/emulator/test/port_SUITE.erl @@ -157,16 +157,16 @@ win_massive(Config) when is_list(Config) -> end. do_win_massive() -> - ?line Dog = test_server:timetrap(test_server:seconds(360)), - ?line SuiteDir = filename:dirname(code:which(?MODULE)), - ?line Env = " -env ERL_MAX_PORTS 8192", - ?line {ok, Node} = + Dog = test_server:timetrap(test_server:seconds(360)), + SuiteDir = filename:dirname(code:which(?MODULE)), + Env = " -env ERL_MAX_PORTS 8192", + {ok, Node} = test_server:start_node(win_massive, slave, [{args, " -pa " ++ SuiteDir ++ Env}]), - ?line ok = rpc:call(Node,?MODULE,win_massive_client,[3000]), - ?line test_server:stop_node(Node), - ?line test_server:timetrap_cancel(Dog), + ok = rpc:call(Node,?MODULE,win_massive_client,[3000]), + test_server:stop_node(Node), + test_server:timetrap_cancel(Dog), ok. win_massive_client(N) -> @@ -208,11 +208,11 @@ win_massive_loop(P,N) -> %% We will send only a small amount of data, to avoid deadlock. stream_small(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line stream_ping(Config, 512, "", []), - ?line stream_ping(Config, 1777, "", []), - ?line stream_ping(Config, 1777, "-s512", []), - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(10)), + stream_ping(Config, 512, "", []), + stream_ping(Config, 1777, "", []), + stream_ping(Config, 1777, "-s512", []), + test_server:timetrap_cancel(Dog), ok. %% Send big amounts of data (much bigger than the buffer size in port test). @@ -220,30 +220,22 @@ stream_small(Config) when is_list(Config) -> %% non-blocking reads and writes. stream_big(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(180)), - case os:type() of - vxworks -> - %% Don't stress VxWorks too much - ?line stream_ping(Config, 43755, "", []), - ?line stream_ping(Config, 51255, "", []), - ?line stream_ping(Config, 52345, " -s40000", []); - _ -> - ?line stream_ping(Config, 43755, "", []), - ?line stream_ping(Config, 100000, "", []), - ?line stream_ping(Config, 77777, " -s40000", []) - end, - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(180)), + stream_ping(Config, 43755, "", []), + stream_ping(Config, 100000, "", []), + stream_ping(Config, 77777, " -s40000", []), + test_server:timetrap_cancel(Dog), ok. %% Sends packet with header size of 1, 2, and 4, with packets of various %% sizes. basic_ping(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(120)), - ?line ping(Config, sizes(1), 1, "", []), - ?line ping(Config, sizes(2), 2, "", []), - ?line ping(Config, sizes(4), 4, "", []), - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(120)), + ping(Config, sizes(1), 1, "", []), + ping(Config, sizes(2), 2, "", []), + ping(Config, sizes(4), 4, "", []), + test_server:timetrap_cancel(Dog), ok. %% Let the port program insert delays between characters sent back to @@ -251,30 +243,29 @@ basic_ping(Config) when is_list(Config) -> %% small chunks rather than all at once. slow_writes(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(20)), - ?line ping(Config, [8], 4, "-s1", []), - ?line ping(Config, [10], 2, "-s2", []), - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(20)), + ping(Config, [8], 4, "-s1", []), + ping(Config, [10], 2, "-s2", []), + test_server:timetrap_cancel(Dog), ok. bad_packet(doc) -> ["Test that we get {'EXIT', Port, einval} if we try to send a bigger " "packet than the packet header allows."]; bad_packet(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line PortTest = port_test(Config), - ?line process_flag(trap_exit, true), + Dog = test_server:timetrap(test_server:seconds(10)), + PortTest = port_test(Config), + process_flag(trap_exit, true), - ?line bad_packet(PortTest, 1, 256), - ?line bad_packet(PortTest, 1, 257), - ?line bad_packet(PortTest, 2, 65536), - ?line bad_packet(PortTest, 2, 65537), + bad_packet(PortTest, 1, 256), + bad_packet(PortTest, 1, 257), + bad_packet(PortTest, 2, 65536), + bad_packet(PortTest, 2, 65537), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. bad_packet(PortTest, HeaderSize, PacketSize) -> - %% Intentionally no ?line macros. P = open_port({spawn, PortTest}, [{packet, HeaderSize}]), P ! {self(), {command, make_zero_packet(PacketSize)}}, receive @@ -292,16 +283,16 @@ make_zero_packet(N) -> %% Test sending bad messages to a port. bad_port_messages(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line PortTest = port_test(Config), - ?line process_flag(trap_exit, true), + Dog = test_server:timetrap(test_server:seconds(10)), + PortTest = port_test(Config), + process_flag(trap_exit, true), - ?line bad_message(PortTest, {a,b}), - ?line bad_message(PortTest, {a}), - ?line bad_message(PortTest, {self(),{command,bad_command}}), - ?line bad_message(PortTest, {self(),{connect,no_pid}}), + bad_message(PortTest, {a,b}), + bad_message(PortTest, {a}), + bad_message(PortTest, {self(),{command,bad_command}}), + bad_message(PortTest, {self(),{connect,no_pid}}), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. bad_message(PortTest, Message) -> @@ -319,108 +310,97 @@ bad_message(PortTest, Message) -> %% Tests the 'binary' option for a port. t_binary(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(300)), + Dog = test_server:timetrap(test_server:seconds(300)), %% Packet mode. - ?line ping(Config, sizes(1), 1, "", [binary]), - ?line ping(Config, sizes(2), 2, "", [binary]), - ?line ping(Config, sizes(4), 4, "", [binary]), + ping(Config, sizes(1), 1, "", [binary]), + ping(Config, sizes(2), 2, "", [binary]), + ping(Config, sizes(4), 4, "", [binary]), %% Stream mode. - case os:type() of - vxworks -> - %% don't stress VxWorks too much - ?line stream_ping(Config, 435, "", [binary]), - ?line stream_ping(Config, 43755, "", [binary]), - ?line stream_ping(Config, 50000, "", [binary]); - _ -> - ?line stream_ping(Config, 435, "", [binary]), - ?line stream_ping(Config, 43755, "", [binary]), - ?line stream_ping(Config, 100000, "", [binary]) - end, + stream_ping(Config, 435, "", [binary]), + stream_ping(Config, 43755, "", [binary]), + stream_ping(Config, 100000, "", [binary]), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. name1(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(100)), - ?line PortTest = port_test(Config), - ?line Command = lists:concat([PortTest, " "]), - ?line P = open_port({spawn, Command}, []), - ?line register(myport, P), - ?line P = whereis(myport), + Dog = test_server:timetrap(test_server:seconds(100)), + PortTest = port_test(Config), + Command = lists:concat([PortTest, " "]), + P = open_port({spawn, Command}, []), + register(myport, P), + P = whereis(myport), Text = "hej", - ?line myport ! {self(), {command, Text}}, - ?line receive - {P, {data, Text}} -> - ok - end, - ?line myport ! {self(), close}, - ?line receive - {P, closed} -> ok - end, - ?line undefined = whereis(myport), - ?line test_server:timetrap_cancel(Dog), + myport ! {self(), {command, Text}}, + receive + {P, {data, Text}} -> + ok + end, + myport ! {self(), close}, + receive + {P, closed} -> ok + end, + undefined = whereis(myport), + test_server:timetrap_cancel(Dog), ok. %% Test that the 'eof' option works. eof(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(100)), - ?line PortTest = port_test(Config), - ?line Command = lists:concat([PortTest, " -h0 -q"]), - ?line P = open_port({spawn, Command}, [eof]), - ?line receive - {P, eof} -> - ok - end, - ?line P ! {self(), close}, - ?line receive - {P, closed} -> ok - end, - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(100)), + PortTest = port_test(Config), + Command = lists:concat([PortTest, " -h0 -q"]), + P = open_port({spawn, Command}, [eof]), + receive + {P, eof} -> + ok + end, + P ! {self(), close}, + receive + {P, closed} -> ok + end, + test_server:timetrap_cancel(Dog), ok. %% Tests that the 'in' option for a port works. input_only(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(300)), - ?line expect_input(Config, [0, 1, 10, 13, 127, 128, 255], 1, "", [in]), - ?line expect_input(Config, [0, 1, 255, 2048], 2, "", [in]), - ?line expect_input(Config, [0, 1, 255, 2048], 4, "", [in]), - ?line expect_input(Config, [0, 1, 10, 13, 127, 128, 255], - 1, "", [in, binary]), - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(300)), + expect_input(Config, [0, 1, 10, 13, 127, 128, 255], 1, "", [in]), + expect_input(Config, [0, 1, 255, 2048], 2, "", [in]), + expect_input(Config, [0, 1, 255, 2048], 4, "", [in]), + expect_input(Config, [0, 1, 10, 13, 127, 128, 255], + 1, "", [in, binary]), + test_server:timetrap_cancel(Dog), ok. %% Tests that the 'out' option for a port works. output_only(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(100)), - ?line Dir = ?config(priv_dir, Config), - ?line Filename = filename:join(Dir, "output_only_stream"), - ?line output_and_verify(Config, Filename, "-h0", - random_packet(35777, "echo")), - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(100)), + Dir = ?config(priv_dir, Config), + Filename = filename:join(Dir, "output_only_stream"), + output_and_verify(Config, Filename, "-h0", + random_packet(35777, "echo")), + test_server:timetrap_cancel(Dog), ok. output_and_verify(Config, Filename, Options, Data) -> - ?line PortTest = port_test(Config), - ?line Command = lists:concat([PortTest, " ", - Options, " -o", Filename]), - ?line Port = open_port({spawn, Command}, [out]), - ?line Port ! {self(), {command, Data}}, - ?line Port ! {self(), close}, - ?line receive - {Port, closed} -> ok - end, - Wait_time = case os:type() of - vxworks -> 5000; - _ -> 500 - end, - ?line test_server:sleep(Wait_time), - ?line {ok, Written} = file:read_file(Filename), - ?line Data = binary_to_list(Written), + PortTest = port_test(Config), + Command = lists:concat([PortTest, " ", + Options, " -o", Filename]), + Port = open_port({spawn, Command}, [out]), + Port ! {self(), {command, Data}}, + Port ! {self(), close}, + receive + {Port, closed} -> ok + end, + Wait_time = 500, + test_server:sleep(Wait_time), + {ok, Written} = file:read_file(Filename), + Data = binary_to_list(Written), ok. %% Test that receiving several packages written in the same @@ -430,19 +410,11 @@ output_and_verify(Config, Filename, Options, Data) -> %% Basic test of receiving multiple packages, written in %% one operation by the other end. mul_basic(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(600)), - case os:type() of - vxworks -> - %% don't stress vxworks too much - ?line expect_input(Config, [0, 1, 255, 10, 13], 1, "", []), - ?line expect_input(Config, [0, 10, 13, 1600, 8191, 16383], 2, "", []), - ?line expect_input(Config, [10, 35000], 4, "", []); - _ -> - ?line expect_input(Config, [0, 1, 255, 10, 13], 1, "", []), - ?line expect_input(Config, [0, 10, 13, 1600, 32767, 65535], 2, "", []), - ?line expect_input(Config, [10, 70000], 4, "", []) - end, - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(600)), + expect_input(Config, [0, 1, 255, 10, 13], 1, "", []), + expect_input(Config, [0, 10, 13, 1600, 32767, 65535], 2, "", []), + expect_input(Config, [10, 70000], 4, "", []), + test_server:timetrap_cancel(Dog), ok. %% Test reading a buffer consisting of several packets, some @@ -451,9 +423,9 @@ mul_basic(Config) when is_list(Config) -> %% delays in between.) mul_slow_writes(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(250)), - ?line expect_input(Config, [0, 20, 255, 10, 1], 1, "-s64", []), - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(250)), + expect_input(Config, [0, 20, 255, 10, 1], 1, "-s64", []), + test_server:timetrap_cancel(Dog), ok. %% Runs several port tests in parallell. Each individual test @@ -461,27 +433,27 @@ mul_slow_writes(Config) when is_list(Config) -> %% should also finish in about 5 seconds. parallell(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(300)), - ?line Testers = - [fun() -> stream_ping(Config, 1007, "-s100", []) end, - fun() -> stream_ping(Config, 10007, "-s1000", []) end, - fun() -> stream_ping(Config, 10007, "-s1000", []) end, - - fun() -> expect_input(Config, [21, 22, 23, 24, 25], 1, - "-s10", [in]) end, - - fun() -> ping(Config, [10], 1, "-d", []) end, - fun() -> ping(Config, [20000], 2, "-d", []) end, - fun() -> ping(Config, [101], 1, "-s10", []) end, - fun() -> ping(Config, [1001], 2, "-s100", []) end, - fun() -> ping(Config, [10001], 4, "-s1000", []) end, - - fun() -> ping(Config, [501, 501], 2, "-s100", []) end, - fun() -> ping(Config, [11, 12, 13, 14, 11], 1, "-s5", []) end], - ?line process_flag(trap_exit, true), - ?line Pids = lists:map(fun fun_spawn/1, Testers), - ?line wait_for(Pids), - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(300)), + Testers = [ + fun() -> stream_ping(Config, 1007, "-s100", []) end, + fun() -> stream_ping(Config, 10007, "-s1000", []) end, + fun() -> stream_ping(Config, 10007, "-s1000", []) end, + + fun() -> expect_input(Config, [21, 22, 23, 24, 25], 1, + "-s10", [in]) end, + + fun() -> ping(Config, [10], 1, "-d", []) end, + fun() -> ping(Config, [20000], 2, "-d", []) end, + fun() -> ping(Config, [101], 1, "-s10", []) end, + fun() -> ping(Config, [1001], 2, "-s100", []) end, + fun() -> ping(Config, [10001], 4, "-s1000", []) end, + + fun() -> ping(Config, [501, 501], 2, "-s100", []) end, + fun() -> ping(Config, [11, 12, 13, 14, 11], 1, "-s5", []) end], + process_flag(trap_exit, true), + Pids = lists:map(fun fun_spawn/1, Testers), + wait_for(Pids), + test_server:timetrap_cancel(Dog), ok. wait_for([]) -> @@ -500,29 +472,29 @@ wait_for(Pids) -> dying_port(suite) -> []; dying_port(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(150)), - ?line process_flag(trap_exit, true), + Dog = test_server:timetrap(test_server:seconds(150)), + process_flag(trap_exit, true), - ?line P1 = make_dying_port(Config), - ?line P2 = make_dying_port(Config), - ?line P3 = make_dying_port(Config), - ?line P4 = make_dying_port(Config), - ?line P5 = make_dying_port(Config), + P1 = make_dying_port(Config), + P2 = make_dying_port(Config), + P3 = make_dying_port(Config), + P4 = make_dying_port(Config), + P5 = make_dying_port(Config), %% This should be big enough to be sure to block in the write. - ?line Garbage = random_packet(16384), + Garbage = random_packet(16384), - ?line P1 ! {self(), {command, Garbage}}, - ?line P3 ! {self(), {command, Garbage}}, - ?line P5 ! {self(), {command, Garbage}}, + P1 ! {self(), {command, Garbage}}, + P3 ! {self(), {command, Garbage}}, + P5 ! {self(), {command, Garbage}}, - ?line wait_for_port_exit(P1), - ?line wait_for_port_exit(P2), - ?line wait_for_port_exit(P3), - ?line wait_for_port_exit(P4), - ?line wait_for_port_exit(P5), + wait_for_port_exit(P1), + wait_for_port_exit(P2), + wait_for_port_exit(P3), + wait_for_port_exit(P4), + wait_for_port_exit(P5), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. wait_for_port_exit(Port) -> @@ -549,9 +521,9 @@ make_dying_port(Config) when is_list(Config) -> port_program_with_path(suite) -> []; port_program_with_path(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(100)), - ?line DataDir = ?config(data_dir, Config), - ?line PrivDir = ?config(priv_dir, Config), + Dog = test_server:timetrap(test_server:seconds(100)), + DataDir = ?config(data_dir, Config), + PrivDir = ?config(priv_dir, Config), %% Create a copy of the port test program in a directory not %% included in PATH (i.e. in priv_dir), with the name 'my_port_test.exe'. @@ -560,36 +532,31 @@ port_program_with_path(Config) when is_list(Config) -> %% (On Unix, there will be a single file created, which will be %% a copy of the port program.) - ?line PortTest = os:find_executable("port_test", DataDir), + PortTest = os:find_executable("port_test", DataDir), io:format("os:find_executable(~p, ~p) returned ~p", ["port_test", DataDir, PortTest]), - ?line {ok, PortTestPgm} = file:read_file(PortTest), - ?line NewName = filename:join(PrivDir, filename:basename(PortTest)), - ?line RedHerring = filename:rootname(NewName), - ?line ok = file:write_file(RedHerring, "I'm just here to confuse.\n"), - ?line ok = file:write_file(NewName, PortTestPgm), - ?line ok = file:write_file_info(NewName, #file_info{mode=8#111}), - ?line PgmWithPathAndNoExt = filename:rootname(NewName), + {ok, PortTestPgm} = file:read_file(PortTest), + NewName = filename:join(PrivDir, filename:basename(PortTest)), + RedHerring = filename:rootname(NewName), + ok = file:write_file(RedHerring, "I'm just here to confuse.\n"), + ok = file:write_file(NewName, PortTestPgm), + ok = file:write_file_info(NewName, #file_info{mode=8#111}), + PgmWithPathAndNoExt = filename:rootname(NewName), %% Open the port using the path to the copied port test program, %% but without the .exe extension, and verified that it was started. %% %% If the bug is present the open_port call will fail with badarg. - ?line Command = lists:concat([PgmWithPathAndNoExt, " -h2"]), - %% allow VxWorks time to write file - case os:type() of - vxworks -> test_server:sleep(2500); - _ -> time + Command = lists:concat([PgmWithPathAndNoExt, " -h2"]), + P = open_port({spawn, Command}, [{packet, 2}]), + Message = "echo back to me", + P ! {self(), {command, Message}}, + receive + {P, {data, Message}} -> + ok end, - ?line P = open_port({spawn, Command}, [{packet, 2}]), - ?line Message = "echo back to me", - ?line P ! {self(), {command, Message}}, - ?line receive - {P, {data, Message}} -> - ok - end, - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. @@ -597,56 +564,46 @@ port_program_with_path(Config) when is_list(Config) -> %% This used to fail on Windows. open_input_file_port(suite) -> []; open_input_file_port(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line PrivDir = ?config(priv_dir, Config), + Dog = test_server:timetrap(test_server:seconds(10)), + PrivDir = ?config(priv_dir, Config), %% Create a file with the file driver and read it back using %% open_port/2. - ?line MyFile1 = filename:join(PrivDir, "my_input_file"), - ?line FileData1 = "An input file", - ?line ok = file:write_file(MyFile1, FileData1), - case os:type() of - vxworks -> - %% Can't open input file with vanilla driver on VxWorks - ?line process_flag(trap_exit, true), - ?line case catch open_port(MyFile1, [in]) of - {'EXIT', {badarg, _}} -> - ok - end; - _ -> - ?line case open_port(MyFile1, [in]) of - InputPort when is_port(InputPort) -> - ?line receive - {InputPort, {data, FileData1}} -> - ok - end - end + MyFile1 = filename:join(PrivDir, "my_input_file"), + FileData1 = "An input file", + ok = file:write_file(MyFile1, FileData1), + case open_port(MyFile1, [in]) of + InputPort when is_port(InputPort) -> + receive + {InputPort, {data, FileData1}} -> + ok + end end, - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. %% Tests that files can be written using open_port(Filename, [out]). open_output_file_port(suite) -> []; open_output_file_port(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(100)), - ?line PrivDir = ?config(priv_dir, Config), + Dog = test_server:timetrap(test_server:seconds(100)), + PrivDir = ?config(priv_dir, Config), %% Create a file with open_port/2 and read it back with %% the file driver. - ?line MyFile2 = filename:join(PrivDir, "my_output_file"), - ?line FileData2_0 = "A file created ", - ?line FileData2_1 = "with open_port/2.\n", - ?line FileData2 = FileData2_0 ++ FileData2_1, - ?line OutputPort = open_port(MyFile2, [out]), - ?line OutputPort ! {self(), {command, FileData2_0}}, - ?line OutputPort ! {self(), {command, FileData2_1}}, - ?line OutputPort ! {self(), close}, - ?line {ok, Bin} = file:read_file(MyFile2), - ?line FileData2 = binary_to_list(Bin), - - ?line test_server:timetrap_cancel(Dog), + MyFile2 = filename:join(PrivDir, "my_output_file"), + FileData2_0 = "A file created ", + FileData2_1 = "with open_port/2.\n", + FileData2 = FileData2_0 ++ FileData2_1, + OutputPort = open_port(MyFile2, [out]), + OutputPort ! {self(), {command, FileData2_0}}, + OutputPort ! {self(), {command, FileData2_1}}, + OutputPort ! {self(), close}, + {ok, Bin} = file:read_file(MyFile2), + FileData2 = binary_to_list(Bin), + + test_server:timetrap_cancel(Dog), ok. %% @@ -670,17 +627,17 @@ iter_max_ports(Config) when is_list(Config) -> iter_max_ports_test(Config) -> - ?line Dog = test_server:timetrap(test_server:minutes(20)), - ?line PortTest = port_test(Config), - ?line Command = lists:concat([PortTest, " -h0 -q"]), - ?line Iters = case os:type() of + Dog = test_server:timetrap(test_server:minutes(20)), + PortTest = port_test(Config), + Command = lists:concat([PortTest, " -h0 -q"]), + Iters = case os:type() of {win32,_} -> 4; _ -> 10 end, - ?line L = do_iter_max_ports(Iters, Command), + L = do_iter_max_ports(Iters, Command), io:format("Result: ~p",[L]), - ?line all_equal(L), - ?line test_server:timetrap_cancel(Dog), + all_equal(L), + test_server:timetrap_cancel(Dog), {comment, "Max ports: " ++ integer_to_list(hd(L))}. do_iter_max_ports(N, Command) when N > 0 -> @@ -695,9 +652,9 @@ all_equal([]) -> ok. max_ports(Command) -> test_server:sleep(500), - ?line Ps = open_ports({spawn, Command}, [eof]), - ?line N = length(Ps), - ?line close_ports(Ps), + Ps = open_ports({spawn, Command}, [eof]), + N = length(Ps), + close_ports(Ps), io:format("Got ~p ports\n",[N]), N. @@ -727,105 +684,105 @@ open_ports(Name, Settings) -> enomem -> []; Other -> - ?line test_server:fail({open_ports, Other}) + test_server:fail({open_ports, Other}) end; Other -> - ?line test_server:fail({open_ports, Other}) + test_server:fail({open_ports, Other}) end. %% Tests that exit(Port, Term) works (has been known to crash the emulator). t_exit(suite) -> []; t_exit(Config) when is_list(Config) -> - ?line process_flag(trap_exit, true), - ?line Pid = fun_spawn(fun suicide_port/1, [Config]), - ?line receive - {'EXIT', Pid, die} -> - ok; - Other -> - test_server:fail({bad_message, Other}) - end. + process_flag(trap_exit, true), + Pid = fun_spawn(fun suicide_port/1, [Config]), + receive + {'EXIT', Pid, die} -> + ok; + Other -> + test_server:fail({bad_message, Other}) + end. suicide_port(Config) when is_list(Config) -> - ?line Port = port_expect(Config, [], 0, "", []), - ?line exit(Port, die), - ?line receive after infinity -> ok end. + Port = port_expect(Config, [], 0, "", []), + exit(Port, die), + receive after infinity -> ok end. tps_16_bytes(doc) -> ""; tps_16_bytes(suite) -> []; tps_16_bytes(Config) when is_list(Config) -> - ?line tps(16, Config). + tps(16, Config). tps_1K(doc) -> ""; tps_1K(suite) -> []; tps_1K(Config) when is_list(Config) -> - ?line tps(1024, Config). + tps(1024, Config). tps(Size, Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(300)), - ?line PortTest = port_test(Config), - ?line Packet = list_to_binary(random_packet(Size, "e")), - ?line Port = open_port({spawn, PortTest}, [binary, {packet, 2}]), - ?line Transactions = 10000, - ?line {Elapsed, ok} = test_server:timecall(?MODULE, tps, + Dog = test_server:timetrap(test_server:seconds(300)), + PortTest = port_test(Config), + Packet = list_to_binary(random_packet(Size, "e")), + Port = open_port({spawn, PortTest}, [binary, {packet, 2}]), + Transactions = 10000, + {Elapsed, ok} = test_server:timecall(?MODULE, tps, [Port, Packet, Transactions]), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), {comment, integer_to_list(trunc(Transactions/Elapsed+0.5)) ++ " transactions/s"}. tps(_Port, _Packet, 0) -> ok; tps(Port, Packet, N) -> - ?line port_command(Port, Packet), - ?line receive - {Port, {data, Packet}} -> - ?line tps(Port, Packet, N-1); - Other -> - ?line test_server:fail({bad_message, Other}) - end. + port_command(Port, Packet), + receive + {Port, {data, Packet}} -> + tps(Port, Packet, N-1); + Other -> + test_server:fail({bad_message, Other}) + end. %% Line I/O test line(Config) when is_list(Config) -> - ?line Siz = 110, - ?line Dog = test_server:timetrap(test_server:seconds(300)), - ?line Packet1 = random_packet(Siz), - ?line Packet2 = random_packet(Siz div 2), + Siz = 110, + Dog = test_server:timetrap(test_server:seconds(300)), + Packet1 = random_packet(Siz), + Packet2 = random_packet(Siz div 2), %% Test that packets are split into lines - ?line port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet2, + port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet2, io_lib:nl()]), [{eol, Packet1}, {eol, Packet2}]}], 0, "", [{line,Siz}]), %% Test the same for binaries - ?line port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet2, + port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet2, io_lib:nl()]), [{eol, Packet1}, {eol, Packet2}]}], 0, "", [{line,Siz},binary]), %% Test that too long lines get split - ?line port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet1, + port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet1, Packet2, io_lib:nl()]), [{eol, Packet1}, {noeol, Packet1}, {eol, Packet2}]}], 0, "", [{line,Siz}]), %% Test that last output from closing port program gets received. - ?line L1 = lists:append([Packet1, io_lib:nl(), Packet2]), - ?line S1 = lists:flatten(io_lib:format("-l~w", [length(L1)])), + L1 = lists:append([Packet1, io_lib:nl(), Packet2]), + S1 = lists:flatten(io_lib:format("-l~w", [length(L1)])), io:format("S1 = ~w, L1 = ~w~n", [S1,L1]), - ?line port_expect(Config,[{L1, + port_expect(Config,[{L1, [{eol, Packet1}, {noeol, Packet2}, eof]}], 0, S1, [{line,Siz},eof]), %% Test that lonely <CR> Don't get treated as newlines - ?line port_expect(Config,[{lists:append([Packet1, [13], Packet2, + port_expect(Config,[{lists:append([Packet1, [13], Packet2, io_lib:nl()]), [{noeol, Packet1}, {eol, [13 |Packet2]}]}], 0, "", [{line,Siz}]), %% Test that packets get built up to lines (delayed output from %% port program) - ?line port_expect(Config,[{Packet2,[]}, + port_expect(Config,[{Packet2,[]}, {lists:append([Packet2, io_lib:nl(), Packet1, io_lib:nl()]), [{eol, lists:append(Packet2, Packet2)}, {eol, Packet1}]}], 0, "-d", [{line,Siz}]), %% Test that we get badarg if trying both packet and line - ?line bad_argument(Config, [{packet, 5}, {line, 5}]), - ?line test_server:timetrap_cancel(Dog), + bad_argument(Config, [{packet, 5}, {line, 5}]), + test_server:timetrap_cancel(Dog), ok. %%% Redirection of stderr test @@ -834,15 +791,15 @@ stderr_to_stdout(suite) -> stderr_to_stdout(doc) -> "Test that redirection of standard error to standard output works."; stderr_to_stdout(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(60)), + Dog = test_server:timetrap(test_server:seconds(60)), %% See that it works - ?line Packet = random_packet(10), - ?line port_expect(Config,[{Packet,[Packet]}], 0, "-e -l10", + Packet = random_packet(10), + port_expect(Config,[{Packet,[Packet]}], 0, "-e -l10", [stderr_to_stdout]), - %% ?line stream_ping(Config, 10, "-e", [stderr_to_stdout]), + %% stream_ping(Config, 10, "-e", [stderr_to_stdout]), %% See that it doesn't always happen (will generate garbage on stderr) - ?line port_expect(Config,[{Packet,[eof]}], 0, "-e -l10", [line,eof]), - ?line test_server:timetrap_cancel(Dog), + port_expect(Config,[{Packet,[eof]}], 0, "-e -l10", [line,eof]), + test_server:timetrap_cancel(Dog), ok. @@ -862,47 +819,39 @@ env(suite) -> env(doc) -> ["Test that the 'env' option works"]; env(Config) when is_list(Config) -> - case os:type() of - vxworks -> - {skipped,"Environments not implemented on VxWorks (could be...)"}; - _ -> - env2(Config) - end. - -env2(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(60)), - ?line Priv = ?config(priv_dir, Config), - ?line Temp = filename:join(Priv, "env_fun.bin"), + Dog = test_server:timetrap(test_server:seconds(60)), + Priv = ?config(priv_dir, Config), + Temp = filename:join(Priv, "env_fun.bin"), PluppVal = "dirty monkey", - ?line env_slave(Temp, [{"plupp",PluppVal}]), + env_slave(Temp, [{"plupp",PluppVal}]), Long = "LongAndBoringEnvName", - ?line os:putenv(Long, "nisse"), - - ?line env_slave(Temp, [{"plupp",PluppVal}, - {"DIR_PLUPP","###glurfrik"}], - fun() -> - PluppVal = os:getenv("plupp"), - "###glurfrik" = os:getenv("DIR_PLUPP"), - "nisse" = os:getenv(Long) - end), + os:putenv(Long, "nisse"), + + env_slave(Temp, [{"plupp",PluppVal}, + {"DIR_PLUPP","###glurfrik"}], + fun() -> + PluppVal = os:getenv("plupp"), + "###glurfrik" = os:getenv("DIR_PLUPP"), + "nisse" = os:getenv(Long) + end), - - ?line env_slave(Temp, [{"must_define_something","some_value"}, - {"certainly_not_existing",false}, - {"ends_with_equal", "value="}, - {Long,false}, - {"glurf","a glorfy string"}]), + + env_slave(Temp, [{"must_define_something","some_value"}, + {"certainly_not_existing",false}, + {"ends_with_equal", "value="}, + {Long,false}, + {"glurf","a glorfy string"}]), %% A lot of non existing variables (mingled with existing) NotExistingList = [{lists:flatten(io_lib:format("V~p_not_existing",[X])),false} || X <- lists:seq(1,150)], ExistingList = [{lists:flatten(io_lib:format("V~p_existing",[X])),"a_value"} || X <- lists:seq(1,150)], - ?line env_slave(Temp, lists:sort(ExistingList ++ NotExistingList)), + env_slave(Temp, lists:sort(ExistingList ++ NotExistingList)), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. env_slave(File, Env) -> @@ -946,23 +895,15 @@ env_slave_main([File]) -> %% 'env' option %% Test bad environments. bad_env(Config) when is_list(Config) -> - case os:type() of - vxworks -> - {skipped,"Environments not implemented on VxWorks"}; - _ -> - bad_env_1() - end. - -bad_env_1() -> - ?line try_bad_env([abbb]), - ?line try_bad_env([{"key","value"}|{"another","value"}]), - ?line try_bad_env([{"key","value","value2"}]), - ?line try_bad_env([{"key",[a,b,c]}]), - ?line try_bad_env([{"key",value}]), - ?line try_bad_env({a,tuple}), - ?line try_bad_env(42), - ?line try_bad_env([a|b]), - ?line try_bad_env(self()), + try_bad_env([abbb]), + try_bad_env([{"key","value"}|{"another","value"}]), + try_bad_env([{"key","value","value2"}]), + try_bad_env([{"key",[a,b,c]}]), + try_bad_env([{"key",value}]), + try_bad_env({a,tuple}), + try_bad_env(42), + try_bad_env([a|b]), + try_bad_env(self()), ok. try_bad_env(Env) -> @@ -979,36 +920,29 @@ cd(suite) -> cd(doc) -> ["Test that the 'cd' option works"]; cd(Config) when is_list(Config) -> - case os:type() of - vxworks -> - {skipped,"Task specific directories does not exist on VxWorks"}; - _ -> - cd2(Config) - end. -cd2(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(60)), - - ?line Program = atom_to_list(lib:progname()), - ?line DataDir = ?config(data_dir, Config), - ?line TestDir = filename:join(DataDir, "dir"), - ?line Cmd = Program ++ " -pz " ++ DataDir ++ - " -noshell -s port_test pwd -s erlang halt", - ?line _ = open_port({spawn, Cmd}, - [{cd, TestDir}, - {line, 256}]), - ?line receive - {_, {data, {eol, String}}} -> - case filename_equal(String, TestDir) of - true -> - ok; - false -> - ?line test_server:fail({cd, String}) - end; - Other2 -> - ?line test_server:fail({env, Other2}) - end, + Dog = test_server:timetrap(test_server:seconds(60)), - ?line test_server:timetrap_cancel(Dog), + Program = atom_to_list(lib:progname()), + DataDir = ?config(data_dir, Config), + TestDir = filename:join(DataDir, "dir"), + Cmd = Program ++ " -pz " ++ DataDir ++ + " -noshell -s port_test pwd -s erlang halt", + _ = open_port({spawn, Cmd}, + [{cd, TestDir}, + {line, 256}]), + receive + {_, {data, {eol, String}}} -> + case filename_equal(String, TestDir) of + true -> + ok; + false -> + test_server:fail({cd, String}) + end; + Other2 -> + test_server:fail({env, Other2}) + end, + + test_server:timetrap_cancel(Dog), ok. filename_equal(A, B) -> @@ -1059,8 +993,8 @@ otp_3906(Config) when is_list(Config) -> -define(OTP_3906_MAX_CONC_OSP, 50). otp_3906(Config, OSName) -> - ?line DataDir = filename:dirname(proplists:get_value(data_dir,Config)), - ?line {ok, Variables} = file:consult( + DataDir = filename:dirname(proplists:get_value(data_dir,Config)), + {ok, Variables} = file:consult( filename:join([DataDir,"..","..", "test_server","variables"])), case lists:keysearch('CC', 1, Variables) of @@ -1105,7 +1039,7 @@ otp_3906(Config, OSName) -> succeded -> ok; _ -> - ?line test_server:fail(Result) + test_server:fail(Result) end; _ -> {skipped, "No C compiler found"} @@ -1246,15 +1180,15 @@ otp_4389(doc) -> []; otp_4389(Config) when is_list(Config) -> case os:type() of {unix, _} -> - ?line Dog = test_server:timetrap(test_server:seconds(240)), - ?line TCR = self(), + Dog = test_server:timetrap(test_server:seconds(240)), + TCR = self(), case get_true_cmd() of True when is_list(True) -> - ?line lists:foreach( + lists:foreach( fun (P) -> - ?line receive - {P, ok} -> ?line ok; - {P, Err} -> ?line ?t:fail(Err) + receive + {P, ok} -> ok; + {P, Err} -> ?t:fail(Err) end end, lists:map( @@ -1283,14 +1217,14 @@ otp_4389(Config) when is_list(Config) -> end) end, lists:duplicate(1000,[]))), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), {comment, "This test case doesn't always fail when the bug that " "it tests for is present (it is most likely to fail on" " a multi processor machine). If the test case fails it" " will fail by deadlocking the emulator."}; _ -> - ?line {skipped, "\"true\" command not found"} + {skipped, "\"true\" command not found"} end; _ -> {skip,"Only run on Unix"} @@ -1320,11 +1254,11 @@ exit_status(suite) -> exit_status(doc) -> ["Test that the 'exit_status' option works"]; exit_status(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(60)), - ?line port_expect(Config,[{"x", + Dog = test_server:timetrap(test_server:seconds(60)), + port_expect(Config,[{"x", [{exit_status, 5}]}], 1, "", [exit_status]), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. spawn_driver(suite) -> @@ -1332,36 +1266,36 @@ spawn_driver(suite) -> spawn_driver(doc) -> ["Test spawning a driver specifically"]; spawn_driver(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line Path = ?config(data_dir, Config), - ?line ok = load_driver(Path, "echo_drv"), - ?line Port = erlang:open_port({spawn_driver, "echo_drv"}, []), - ?line Port ! {self(), {command, "Hello port!"}}, - ?line receive + Dog = test_server:timetrap(test_server:seconds(10)), + Path = ?config(data_dir, Config), + ok = load_driver(Path, "echo_drv"), + Port = erlang:open_port({spawn_driver, "echo_drv"}, []), + Port ! {self(), {command, "Hello port!"}}, + receive {Port, {data, "Hello port!"}} = Msg1 -> io:format("~p~n", [Msg1]), ok; Other -> test_server:fail({unexpected, Other}) end, - ?line Port ! {self(), close}, - ?line receive {Port, closed} -> ok end, + Port ! {self(), close}, + receive {Port, closed} -> ok end, - ?line Port2 = erlang:open_port({spawn_driver, "echo_drv -Hello port?"}, + Port2 = erlang:open_port({spawn_driver, "echo_drv -Hello port?"}, []), - ?line receive + receive {Port2, {data, "Hello port?"}} = Msg2 -> io:format("~p~n", [Msg2]), ok; Other2 -> test_server:fail({unexpected2, Other2}) end, - ?line Port2 ! {self(), close}, - ?line receive {Port2, closed} -> ok end, - ?line {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, "ls"}, [])), - ?line {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, "cmd"}, [])), - ?line {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, os:find_executable("erl")}, [])), - ?line test_server:timetrap_cancel(Dog), + Port2 ! {self(), close}, + receive {Port2, closed} -> ok end, + {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, "ls"}, [])), + {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, "cmd"}, [])), + {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, os:find_executable("erl")}, [])), + test_server:timetrap_cancel(Dog), ok. spawn_executable(suite) -> @@ -1369,48 +1303,48 @@ spawn_executable(suite) -> spawn_executable(doc) -> ["Test spawning an executable specifically"]; spawn_executable(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line DataDir = ?config(data_dir, Config), - ?line EchoArgs1 = filename:join([DataDir,"echo_args"]), - ?line ExactFile1 = filename:nativename(os:find_executable(EchoArgs1)), - ?line [ExactFile1] = run_echo_args(DataDir,[]), - ?line ["echo_args"] = run_echo_args(DataDir,["echo_args"]), - ?line ["echo_arguments"] = run_echo_args(DataDir,["echo_arguments"]), - ?line [ExactFile1,"hello world","dlrow olleh"] = + Dog = test_server:timetrap(test_server:seconds(10)), + DataDir = ?config(data_dir, Config), + EchoArgs1 = filename:join([DataDir,"echo_args"]), + ExactFile1 = filename:nativename(os:find_executable(EchoArgs1)), + [ExactFile1] = run_echo_args(DataDir,[]), + ["echo_args"] = run_echo_args(DataDir,["echo_args"]), + ["echo_arguments"] = run_echo_args(DataDir,["echo_arguments"]), + [ExactFile1,"hello world","dlrow olleh"] = run_echo_args(DataDir,[ExactFile1,"hello world","dlrow olleh"]), - ?line [ExactFile1] = run_echo_args(DataDir,[default]), - ?line [ExactFile1,"hello world","dlrow olleh"] = + [ExactFile1] = run_echo_args(DataDir,[default]), + [ExactFile1,"hello world","dlrow olleh"] = run_echo_args(DataDir,[switch_order,ExactFile1,"hello world", "dlrow olleh"]), - ?line [ExactFile1,"hello world","dlrow olleh"] = + [ExactFile1,"hello world","dlrow olleh"] = run_echo_args(DataDir,[default,"hello world","dlrow olleh"]), - ?line [ExactFile1,"hello world","dlrow olleh"] = + [ExactFile1,"hello world","dlrow olleh"] = run_echo_args_2("\""++ExactFile1++"\" "++"\"hello world\" \"dlrow olleh\""), - ?line PrivDir = ?config(priv_dir, Config), - ?line SpaceDir =filename:join([PrivDir,"With Spaces"]), - ?line file:make_dir(SpaceDir), - ?line Executable = filename:basename(ExactFile1), - ?line file:copy(ExactFile1,filename:join([SpaceDir,Executable])), - ?line ExactFile2 = filename:nativename(filename:join([SpaceDir,Executable])), - ?line chmodplusx(ExactFile2), + PrivDir = ?config(priv_dir, Config), + SpaceDir =filename:join([PrivDir,"With Spaces"]), + file:make_dir(SpaceDir), + Executable = filename:basename(ExactFile1), + file:copy(ExactFile1,filename:join([SpaceDir,Executable])), + ExactFile2 = filename:nativename(filename:join([SpaceDir,Executable])), + chmodplusx(ExactFile2), io:format("|~s|~n",[ExactFile2]), - ?line [ExactFile2] = run_echo_args(SpaceDir,[]), - ?line ["echo_args"] = run_echo_args(SpaceDir,["echo_args"]), - ?line ["echo_arguments"] = run_echo_args(SpaceDir,["echo_arguments"]), - ?line [ExactFile2,"hello world","dlrow olleh"] = + [ExactFile2] = run_echo_args(SpaceDir,[]), + ["echo_args"] = run_echo_args(SpaceDir,["echo_args"]), + ["echo_arguments"] = run_echo_args(SpaceDir,["echo_arguments"]), + [ExactFile2,"hello world","dlrow olleh"] = run_echo_args(SpaceDir,[ExactFile2,"hello world","dlrow olleh"]), - ?line [ExactFile2] = run_echo_args(SpaceDir,[default]), - ?line [ExactFile2,"hello world","dlrow olleh"] = + [ExactFile2] = run_echo_args(SpaceDir,[default]), + [ExactFile2,"hello world","dlrow olleh"] = run_echo_args(SpaceDir,[switch_order,ExactFile2,"hello world", "dlrow olleh"]), - ?line [ExactFile2,"hello world","dlrow olleh"] = + [ExactFile2,"hello world","dlrow olleh"] = run_echo_args(SpaceDir,[default,"hello world","dlrow olleh"]), - ?line [ExactFile2,"hello world","dlrow olleh"] = + [ExactFile2,"hello world","dlrow olleh"] = run_echo_args_2("\""++ExactFile2++"\" "++"\"hello world\" \"dlrow olleh\""), - ?line ExeExt = + ExeExt = case string:to_lower(lists:last(string:tokens(ExactFile2,"."))) of "exe" -> ".exe"; @@ -1418,47 +1352,47 @@ spawn_executable(Config) when is_list(Config) -> "" end, Executable2 = "spoky name"++ExeExt, - ?line file:copy(ExactFile1,filename:join([SpaceDir,Executable2])), - ?line ExactFile3 = filename:nativename(filename:join([SpaceDir,Executable2])), - ?line chmodplusx(ExactFile3), - ?line [ExactFile3] = run_echo_args(SpaceDir,Executable2,[]), - ?line ["echo_args"] = run_echo_args(SpaceDir,Executable2,["echo_args"]), - ?line ["echo_arguments"] = run_echo_args(SpaceDir,Executable2,["echo_arguments"]), - ?line [ExactFile3,"hello world","dlrow olleh"] = + file:copy(ExactFile1,filename:join([SpaceDir,Executable2])), + ExactFile3 = filename:nativename(filename:join([SpaceDir,Executable2])), + chmodplusx(ExactFile3), + [ExactFile3] = run_echo_args(SpaceDir,Executable2,[]), + ["echo_args"] = run_echo_args(SpaceDir,Executable2,["echo_args"]), + ["echo_arguments"] = run_echo_args(SpaceDir,Executable2,["echo_arguments"]), + [ExactFile3,"hello world","dlrow olleh"] = run_echo_args(SpaceDir,Executable2,[ExactFile3,"hello world","dlrow olleh"]), - ?line [ExactFile3] = run_echo_args(SpaceDir,Executable2,[default]), - ?line [ExactFile3,"hello world","dlrow olleh"] = + [ExactFile3] = run_echo_args(SpaceDir,Executable2,[default]), + [ExactFile3,"hello world","dlrow olleh"] = run_echo_args(SpaceDir,Executable2, [switch_order,ExactFile3,"hello world", "dlrow olleh"]), - ?line [ExactFile3,"hello world","dlrow olleh"] = + [ExactFile3,"hello world","dlrow olleh"] = run_echo_args(SpaceDir,Executable2, [default,"hello world","dlrow olleh"]), - ?line [ExactFile3,"hello world","dlrow olleh"] = + [ExactFile3,"hello world","dlrow olleh"] = run_echo_args_2("\""++ExactFile3++"\" "++"\"hello world\" \"dlrow olleh\""), - ?line {'EXIT',{enoent,_}} = (catch run_echo_args(SpaceDir,"fnurflmonfi", + {'EXIT',{enoent,_}} = (catch run_echo_args(SpaceDir,"fnurflmonfi", [default,"hello world", "dlrow olleh"])), NonExec = "kronxfrt"++ExeExt, - ?line file:write_file(filename:join([SpaceDir,NonExec]), + file:write_file(filename:join([SpaceDir,NonExec]), <<"Not an executable">>), - ?line {'EXIT',{eacces,_}} = (catch run_echo_args(SpaceDir,NonExec, + {'EXIT',{eacces,_}} = (catch run_echo_args(SpaceDir,NonExec, [default,"hello world", "dlrow olleh"])), - ?line {'EXIT',{enoent,_}} = (catch open_port({spawn_executable,"cmd"},[])), - ?line {'EXIT',{enoent,_}} = (catch open_port({spawn_executable,"sh"},[])), + {'EXIT',{enoent,_}} = (catch open_port({spawn_executable,"cmd"},[])), + {'EXIT',{enoent,_}} = (catch open_port({spawn_executable,"sh"},[])), case os:type() of {win32,_} -> test_bat_file(SpaceDir); {unix,_} -> test_sh_file(SpaceDir) end, - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. unregister_name(Config) when is_list(Config) -> - ?line true = register(crash, open_port({spawn, "sleep 100"}, [])), - ?line true = unregister(crash). + true = register(crash, open_port({spawn, "sleep 100"}, [])), + true = unregister(crash). test_bat_file(Dir) -> FN = "tf.bat", @@ -1478,16 +1412,16 @@ test_bat_file(Dir) -> <<"\r\n">>, <<":done\r\n">>, <<"\r\n">>], - ?line file:write_file(Full,list_to_binary(D)), - ?line EF = filename:basename(FN), - ?line [DN,"hello","world"] = + file:write_file(Full,list_to_binary(D)), + EF = filename:basename(FN), + [DN,"hello","world"] = run_echo_args(Dir,FN, [default,"hello","world"]), %% The arg0 argumant should be ignored when running batch files - ?line [DN,"hello","world"] = + [DN,"hello","world"] = run_echo_args(Dir,FN, ["knaskurt","hello","world"]), - ?line EF = filename:basename(DN), + EF = filename:basename(DN), ok. test_sh_file(Dir) -> @@ -1501,20 +1435,20 @@ test_sh_file(Dir) -> <<" shift\n">>, <<" i=`expr $i + 1`\n">>, <<"done\n">>], - ?line file:write_file(Full,list_to_binary(D)), - ?line chmodplusx(Full), - ?line [Full,"hello","world"] = + file:write_file(Full,list_to_binary(D)), + chmodplusx(Full), + [Full,"hello","world"] = run_echo_args(Dir,FN, [default,"hello","world"]), - ?line [Full,"hello","world of spaces"] = + [Full,"hello","world of spaces"] = run_echo_args(Dir,FN, [default,"hello","world of spaces"]), - ?line file:write_file(filename:join([Dir,"testfile1"]),<<"testdata1">>), - ?line file:write_file(filename:join([Dir,"testfile2"]),<<"testdata2">>), - ?line Pattern = filename:join([Dir,"testfile*"]), - ?line L = filelib:wildcard(Pattern), - ?line 2 = length(L), - ?line [Full,"hello",Pattern] = + file:write_file(filename:join([Dir,"testfile1"]),<<"testdata1">>), + file:write_file(filename:join([Dir,"testfile2"]),<<"testdata2">>), + Pattern = filename:join([Dir,"testfile*"]), + L = filelib:wildcard(Pattern), + 2 = length(L), + [Full,"hello",Pattern] = run_echo_args(Dir,FN, [default,"hello",Pattern]), ok. @@ -1574,25 +1508,25 @@ mix_up_ports(suite) -> mix_up_ports(doc) -> ["Test that the emulator does not mix up ports when the port table wraps"]; mix_up_ports(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line Path = ?config(data_dir, Config), - ?line ok = load_driver(Path, "echo_drv"), - ?line Port = erlang:open_port({spawn, "echo_drv"}, []), - ?line Port ! {self(), {command, "Hello port!"}}, - ?line receive + Dog = test_server:timetrap(test_server:seconds(10)), + Path = ?config(data_dir, Config), + ok = load_driver(Path, "echo_drv"), + Port = erlang:open_port({spawn, "echo_drv"}, []), + Port ! {self(), {command, "Hello port!"}}, + receive {Port, {data, "Hello port!"}} = Msg1 -> io:format("~p~n", [Msg1]), ok; Other -> test_server:fail({unexpected, Other}) end, - ?line Port ! {self(), close}, - ?line receive {Port, closed} -> ok end, - ?line loop(start, done, + Port ! {self(), close}, + receive {Port, closed} -> ok end, + loop(start, done, fun(P) -> - ?line Q = + Q = (catch erlang:open_port({spawn, "echo_drv"}, [])), -%% ?line io:format("~p ", [Q]), +%% io:format("~p ", [Q]), if is_port(Q) -> Q; true -> @@ -1600,14 +1534,14 @@ mix_up_ports(Config) when is_list(Config) -> done end end), - ?line Port ! {self(), {command, "Hello again port!"}}, - ?line receive + Port ! {self(), {command, "Hello again port!"}}, + receive Msg2 -> test_server:fail({unexpected, Msg2}) after 1000 -> ok end, - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. loop(Stop, Stop, Fun) when is_function(Fun) -> @@ -1622,45 +1556,45 @@ otp_5112(doc) -> ["Test that link to connected process is taken away when port calls", "driver_exit() also when the port index has wrapped"]; otp_5112(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line Path = ?config(data_dir, Config), - ?line ok = load_driver(Path, "exit_drv"), - ?line Port = otp_5112_get_wrapped_port(), - ?line ?t:format("Max ports: ~p~n",[max_ports()]), - ?line ?t:format("Port: ~p~n",[Port]), - ?line {links, Links1} = process_info(self(),links), - ?line ?t:format("Links1: ~p~n",[Links1]), - ?line true = lists:member(Port, Links1), - ?line Port ! {self(), {command, ""}}, - ?line {links, Links2} = process_info(self(),links), - ?line ?t:format("Links2: ~p~n",[Links2]), - ?line false = lists:member(Port, Links2), %% This used to fail - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(10)), + Path = ?config(data_dir, Config), + ok = load_driver(Path, "exit_drv"), + Port = otp_5112_get_wrapped_port(), + ?t:format("Max ports: ~p~n",[max_ports()]), + ?t:format("Port: ~p~n",[Port]), + {links, Links1} = process_info(self(),links), + ?t:format("Links1: ~p~n",[Links1]), + true = lists:member(Port, Links1), + Port ! {self(), {command, ""}}, + {links, Links2} = process_info(self(),links), + ?t:format("Links2: ~p~n",[Links2]), + false = lists:member(Port, Links2), %% This used to fail + test_server:timetrap_cancel(Dog), ok. otp_5112_get_wrapped_port() -> - ?line P1 = erlang:open_port({spawn, "exit_drv"}, []), - ?line case port_ix(P1) < max_ports() of + P1 = erlang:open_port({spawn, "exit_drv"}, []), + case port_ix(P1) < max_ports() of true -> - ?line ?t:format("Need to wrap port index (~p)~n", [P1]), - ?line otp_5112_wrap_port_ix([P1]), - ?line P2 = erlang:open_port({spawn, "exit_drv"}, []), - ?line false = port_ix(P2) < max_ports(), - ?line P2; + ?t:format("Need to wrap port index (~p)~n", [P1]), + otp_5112_wrap_port_ix([P1]), + P2 = erlang:open_port({spawn, "exit_drv"}, []), + false = port_ix(P2) < max_ports(), + P2; false -> - ?line ?t:format("Port index already wrapped (~p)~n", [P1]), - ?line P1 + ?t:format("Port index already wrapped (~p)~n", [P1]), + P1 end. otp_5112_wrap_port_ix(Ports) -> - ?line case (catch erlang:open_port({spawn, "exit_drv"}, [])) of + case (catch erlang:open_port({spawn, "exit_drv"}, [])) of Port when is_port(Port) -> - ?line otp_5112_wrap_port_ix([Port|Ports]); + otp_5112_wrap_port_ix([Port|Ports]); _ -> %% Port table now full; empty port table - ?line lists:foreach(fun (P) -> P ! {self(), close} end, + lists:foreach(fun (P) -> P ! {self(), close} end, Ports), - ?line ok + ok end. @@ -1669,106 +1603,105 @@ otp_5119(suite) -> otp_5119(doc) -> ["Test that port index is not unnecessarily wrapped"]; otp_5119(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line Path = ?config(data_dir, Config), - ?line ok = load_driver(Path, "exit_drv"), - ?line PI1 = port_ix(otp_5119_fill_empty_port_tab([])), - ?line PI2 = port_ix(erlang:open_port({spawn, "exit_drv"}, [])), - ?line {PortIx1, PortIx2} - = case PI2 > PI1 of - true -> - ?line {PI1, PI2}; - false -> - ?line {port_ix(otp_5119_fill_empty_port_tab([PI2])), - port_ix(erlang:open_port({spawn, "exit_drv"}, []))} - end, - ?line MaxPorts = max_ports(), - ?line ?t:format("PortIx1 = ~p ~p~n", [PI1, PortIx1]), - ?line ?t:format("PortIx2 = ~p ~p~n", [PI2, PortIx2]), - ?line ?t:format("MaxPorts = ~p~n", [MaxPorts]), - ?line true = PortIx2 > PortIx1, - ?line true = PortIx2 =< PortIx1 + MaxPorts, - ?line test_server:timetrap_cancel(Dog), - ?line ok. + Dog = test_server:timetrap(test_server:seconds(10)), + Path = ?config(data_dir, Config), + ok = load_driver(Path, "exit_drv"), + PI1 = port_ix(otp_5119_fill_empty_port_tab([])), + PI2 = port_ix(erlang:open_port({spawn, "exit_drv"}, [])), + {PortIx1, PortIx2} = case PI2 > PI1 of + true -> + {PI1, PI2}; + false -> + {port_ix(otp_5119_fill_empty_port_tab([PI2])), + port_ix(erlang:open_port({spawn, "exit_drv"}, []))} + end, + MaxPorts = max_ports(), + ?t:format("PortIx1 = ~p ~p~n", [PI1, PortIx1]), + ?t:format("PortIx2 = ~p ~p~n", [PI2, PortIx2]), + ?t:format("MaxPorts = ~p~n", [MaxPorts]), + true = PortIx2 > PortIx1, + true = PortIx2 =< PortIx1 + MaxPorts, + test_server:timetrap_cancel(Dog), + ok. otp_5119_fill_empty_port_tab(Ports) -> - ?line case (catch erlang:open_port({spawn, "exit_drv"}, [])) of + case (catch erlang:open_port({spawn, "exit_drv"}, [])) of Port when is_port(Port) -> - ?line otp_5119_fill_empty_port_tab([Port|Ports]); + otp_5119_fill_empty_port_tab([Port|Ports]); _ -> %% Port table now full; empty port table - ?line lists:foreach(fun (P) -> P ! {self(), close} end, + lists:foreach(fun (P) -> P ! {self(), close} end, Ports), - ?line [LastPort|_] = Ports, - ?line LastPort + [LastPort|_] = Ports, + LastPort end. -define(DEF_MAX_PORTS, 1024). max_ports_env() -> - ?line case os:getenv("ERL_MAX_PORTS") of + case os:getenv("ERL_MAX_PORTS") of EMP when is_list(EMP) -> case catch list_to_integer(EMP) of - Int when is_integer(Int) -> ?line Int; - _ -> ?line false + Int when is_integer(Int) -> Int; + _ -> false end; - _ -> ?line false + _ -> false end. max_ports() -> - ?line PreMaxPorts + PreMaxPorts = case max_ports_env() of - Env when is_integer(Env) -> ?line Env; + Env when is_integer(Env) -> Env; _ -> - ?line case os:type() of + case os:type() of {unix, _} -> - ?line UlimStr = string:strip(os:cmd("ulimit -n") + UlimStr = string:strip(os:cmd("ulimit -n") -- "\n"), - ?line case catch list_to_integer(UlimStr) of - Ulim when is_integer(Ulim) -> ?line Ulim; - _ -> ?line ?DEF_MAX_PORTS + case catch list_to_integer(UlimStr) of + Ulim when is_integer(Ulim) -> Ulim; + _ -> ?DEF_MAX_PORTS end; - _ -> ?line ?DEF_MAX_PORTS + _ -> ?DEF_MAX_PORTS end end, - ?line case PreMaxPorts > ?DEF_MAX_PORTS of - true -> ?line PreMaxPorts; - false -> ?line ?DEF_MAX_PORTS + case PreMaxPorts > ?DEF_MAX_PORTS of + true -> PreMaxPorts; + false -> ?DEF_MAX_PORTS end. port_ix(Port) when is_port(Port) -> - ?line ["#Port",_,PortIxStr] = string:tokens(erlang:port_to_list(Port), + ["#Port",_,PortIxStr] = string:tokens(erlang:port_to_list(Port), "<.>"), - ?line list_to_integer(PortIxStr). + list_to_integer(PortIxStr). otp_6224(doc) -> ["Check that port command failure doesn't crash the emulator"]; otp_6224(suite) -> []; otp_6224(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line Path = ?config(data_dir, Config), - ?line ok = load_driver(Path, "failure_drv"), - ?line Go = make_ref(), - ?line Failer = spawn(fun () -> + Dog = test_server:timetrap(test_server:seconds(10)), + Path = ?config(data_dir, Config), + ok = load_driver(Path, "failure_drv"), + Go = make_ref(), + Failer = spawn(fun () -> receive Go -> ok end, - ?line Port = open_port({spawn, "failure_drv"}, + Port = open_port({spawn, "failure_drv"}, []), Port ! {self(), {command, "Fail, please!"}}, otp_6224_loop() end), - ?line Mon = erlang:monitor(process, Failer), - ?line Failer ! Go, - ?line receive + Mon = erlang:monitor(process, Failer), + Failer ! Go, + receive {'DOWN', Mon, process, Failer, Reason} -> - ?line case Reason of - {driver_failed, _} -> ?line ok; - driver_failed -> ?line ok; - _ -> ?line ?t:fail({unexpected_exit_reason, + case Reason of + {driver_failed, _} -> ok; + driver_failed -> ok; + _ -> ?t:fail({unexpected_exit_reason, Reason}) end end, - ?line test_server:timetrap_cancel(Dog), - ?line ok. + test_server:timetrap_cancel(Dog), + ok. otp_6224_loop() -> receive _ -> ok after 0 -> ok end, @@ -1781,11 +1714,11 @@ otp_6224_loop() -> exit_status_multi_scheduling_block(doc) -> []; exit_status_multi_scheduling_block(suite) -> []; exit_status_multi_scheduling_block(Config) when is_list(Config) -> - ?line Repeat = 3, - ?line case ?t:os_type() of + Repeat = 3, + case ?t:os_type() of {unix, _} -> - ?line Dog = ?t:timetrap(test_server:minutes(2*Repeat)), - ?line SleepSecs = 6, + Dog = ?t:timetrap(test_server:minutes(2*Repeat)), + SleepSecs = 6, try lists:foreach(fun (_) -> exit_status_msb_test(Config, @@ -1799,7 +1732,7 @@ exit_status_multi_scheduling_block(Config) when is_list(Config) -> ?t:timetrap_cancel(Dog), receive after SleepSecs+500 -> ok end end; - _ -> ?line {skip, "Not implemented for this OS"} + _ -> {skip, "Not implemented for this OS"} end. exit_status_msb_test(Config, SleepSecs) when is_list(Config) -> @@ -1808,22 +1741,22 @@ exit_status_msb_test(Config, SleepSecs) when is_list(Config) -> %% and we want these port programs to terminate while multi-scheduling %% is blocked. %% - ?line NoSchedsOnln = erlang:system_info(schedulers_online), - ?line Parent = self(), - ?line ?t:format("SleepSecs = ~p~n", [SleepSecs]), - ?line PortProg = "sleep " ++ integer_to_list(SleepSecs), - ?line Start = now(), - ?line NoProcs = case NoSchedsOnln of + NoSchedsOnln = erlang:system_info(schedulers_online), + Parent = self(), + ?t:format("SleepSecs = ~p~n", [SleepSecs]), + PortProg = "sleep " ++ integer_to_list(SleepSecs), + Start = now(), + NoProcs = case NoSchedsOnln of NProcs when NProcs < ?EXIT_STATUS_MSB_MAX_PROCS -> NProcs; _ -> ?EXIT_STATUS_MSB_MAX_PROCS end, - ?line NoPortsPerProc = case 20*NoProcs of + NoPortsPerProc = case 20*NoProcs of TNPorts when TNPorts < ?EXIT_STATUS_MSB_MAX_PORTS -> 20; _ -> ?EXIT_STATUS_MSB_MAX_PORTS div NoProcs end, - ?line ?t:format("NoProcs = ~p~nNoPortsPerProc = ~p~n", + ?t:format("NoProcs = ~p~nNoPortsPerProc = ~p~n", [NoProcs, NoPortsPerProc]), ProcFun = fun () -> @@ -1873,32 +1806,32 @@ exit_status_msb_test(Config, SleepSecs) when is_list(Config) -> PrtSIds), Parent ! {self(), done} end, - ?line Procs = lists:map(fun (N) -> + Procs = lists:map(fun (N) -> spawn_opt(ProcFun, [link, {scheduler, (N rem NoSchedsOnln)+1}]) end, lists:seq(1, NoProcs)), - ?line SIds = lists:map(fun (P) -> + SIds = lists:map(fun (P) -> receive {P, started, SIds} -> SIds end end, Procs), - ?line StartedTime = timer:now_diff(now(), Start)/1000000, - ?line ?t:format("StartedTime = ~p~n", [StartedTime]), - ?line true = StartedTime < SleepSecs, - ?line erlang:system_flag(multi_scheduling, block), - ?line lists:foreach(fun (P) -> receive {P, done} -> ok end end, Procs), - ?line DoneTime = timer:now_diff(now(), Start)/1000000, - ?line ?t:format("DoneTime = ~p~n", [DoneTime]), - ?line true = DoneTime > SleepSecs, - ?line ok = verify_multi_scheduling_blocked(), - ?line erlang:system_flag(multi_scheduling, unblock), - ?line case {length(lists:usort(lists:flatten(SIds))), NoSchedsOnln} of + StartedTime = timer:now_diff(now(), Start)/1000000, + ?t:format("StartedTime = ~p~n", [StartedTime]), + true = StartedTime < SleepSecs, + erlang:system_flag(multi_scheduling, block), + lists:foreach(fun (P) -> receive {P, done} -> ok end end, Procs), + DoneTime = timer:now_diff(now(), Start)/1000000, + ?t:format("DoneTime = ~p~n", [DoneTime]), + true = DoneTime > SleepSecs, + ok = verify_multi_scheduling_blocked(), + erlang:system_flag(multi_scheduling, unblock), + case {length(lists:usort(lists:flatten(SIds))), NoSchedsOnln} of {N, N} -> - ?line ok; + ok; {N, M} -> - ?line ?t:fail("Failed to create ports on all" + ?t:fail("Failed to create ports on all" ++ integer_to_list(M) ++ " available" "schedulers. Only created ports on " ++ integer_to_list(N) ++ " schedulers.") @@ -1921,18 +1854,18 @@ sid_proc(SIds) -> end. verify_multi_scheduling_blocked() -> - ?line Procs = lists:map(fun (_) -> + Procs = lists:map(fun (_) -> spawn_link(fun () -> sid_proc([]) end) end, lists:seq(1, 3*erlang:system_info(schedulers_online))), - ?line receive after 1000 -> ok end, - ?line SIds = lists:map(fun (P) -> + receive after 1000 -> ok end, + SIds = lists:map(fun (P) -> P ! {self(), want_sids}, receive {P, sids, PSIds} -> PSIds end end, Procs), - ?line 1 = length(lists:usort(lists:flatten(SIds))), - ?line ok. + 1 = length(lists:usort(lists:flatten(SIds))), + ok. %%% Pinging functions. @@ -1993,30 +1926,30 @@ build_cmd_line(FixedCmdLine, [], Result) -> port_expect(Config, Actions, HSize, CmdLine, Options0) -> % io:format("port_expect(~p, ~p, ~p, ~p)", % [Actions, HSize, CmdLine, Options0]), - ?line PortTest = port_test(Config), - ?line Cmd = lists:concat([PortTest, " -h", HSize, " ", CmdLine]), - ?line PortType = + PortTest = port_test(Config), + Cmd = lists:concat([PortTest, " -h", HSize, " ", CmdLine]), + PortType = case HSize of 0 -> stream; _ -> {packet, HSize} end, - ?line Options = [PortType|Options0], - ?line io:format("open_port({spawn, ~p}, ~p)", [Cmd, Options]), - ?line Port = open_port({spawn, Cmd}, Options), - ?line port_expect(Port, Actions, Options), + Options = [PortType|Options0], + io:format("open_port({spawn, ~p}, ~p)", [Cmd, Options]), + Port = open_port({spawn, Cmd}, Options), + port_expect(Port, Actions, Options), Port. port_expect(Port, [{Send, Expects}|Rest], Options) when is_list(Expects) -> - ?line port_send(Port, Send), - ?line IsBinaryPort = lists:member(binary, Options), - ?line Receiver = + port_send(Port, Send), + IsBinaryPort = lists:member(binary, Options), + Receiver = case {lists:member(stream, Options), line_option(Options)} of {false, _} -> fun receive_all/2; {true,false} -> fun stream_receive_all/2; {_, true} -> fun receive_all/2 end, - ?line Receiver(Port, maybe_to_binary(Expects, IsBinaryPort)), - ?line port_expect(Port, Rest, Options); + Receiver(Port, maybe_to_binary(Expects, IsBinaryPort)), + port_expect(Port, Rest, Options); port_expect(_, [], _) -> ok. @@ -2068,7 +2001,7 @@ receive_all(Port, [Expect|Rest]) -> _ -> %%% io:format("Unexpected message: ~s", [format(Other)]), io:format("Unexpected message: ~w", [Other]), - ?line test_server:fail(unexpected_message) + test_server:fail(unexpected_message) end end, receive_all(Port, Rest); @@ -2157,15 +2090,8 @@ build_packet(Left, Result, NextChar0) -> build_packet(Left-1, [NextChar0|Result], NextChar). sizes() -> - case os:type() of - vxworks -> - % don't stress VxWorks too much - [10, 13, 64, 127, 128, 255, 256, 1023, 1024, - 8191, 8192, 16383, 16384]; - _ -> - [10, 13, 64, 127, 128, 255, 256, 1023, 1024, - 32767, 32768, 65535, 65536] - end. + [10, 13, 64, 127, 128, 255, 256, 1023, 1024, + 32767, 32768, 65535, 65536]. sizes(Header_Size) -> sizes(Header_Size, sizes(), []). @@ -2203,18 +2129,18 @@ fun_spawn(Fun, Args) -> spawn_link(erlang, apply, [Fun, Args]). port_test(Config) when is_list(Config) -> - ?line filename:join(?config(data_dir, Config), "port_test"). + filename:join(?config(data_dir, Config), "port_test"). ports(doc) -> "Test that erlang:ports/0 returns a consistent snapshot of ports"; ports(suite) -> []; ports(Config) when is_list(Config) -> - ?line Path = ?config(data_dir, Config), - ?line ok = load_driver(Path, "exit_drv"), + Path = ?config(data_dir, Config), + ok = load_driver(Path, "exit_drv"), receive after 1000 -> ok end, % Wait for other ports to stabilize - ?line OtherPorts = erlang:ports(), + OtherPorts = erlang:ports(), io:format("Other ports: ~p\n",[OtherPorts]), MaxPorts = 1024 - length(OtherPorts), @@ -2222,7 +2148,7 @@ ports(Config) when is_list(Config) -> ports_snapshots(100, TrafficPid, OtherPorts), TrafficPid ! {self(),die}, - ?line receive {TrafficPid, dead} -> ok end, + receive {TrafficPid, dead} -> ok end, ok. ports_snapshots(0, _, _) -> @@ -2230,12 +2156,12 @@ ports_snapshots(0, _, _) -> ports_snapshots(Iter, TrafficPid, OtherPorts) -> TrafficPid ! start, - ?line receive after 1 -> ok end, + receive after 1 -> ok end, Snapshot = erlang:ports(), TrafficPid ! {self(), stop}, - ?line receive {TrafficPid, EventList, TrafficPorts} -> ok end, + receive {TrafficPid, EventList, TrafficPorts} -> ok end, %%io:format("Snapshot=~p\n", [Snapshot]), ports_verify(Snapshot, OtherPorts ++ TrafficPorts, EventList), @@ -2252,7 +2178,7 @@ ports_traffic_stopped(MaxPorts, {PortList, PortCnt}) -> %%io:format("Traffic started in ~p\n",[self()]), ports_traffic_started(MaxPorts, {PortList, PortCnt}, []); {Pid,die} -> - ?line lists:foreach(fun(Port)-> erlang:port_close(Port) end, + lists:foreach(fun(Port)-> erlang:port_close(Port) end, PortList), Pid ! {self(),dead} end. @@ -2272,15 +2198,15 @@ ports_traffic_do(MaxPorts, {PortList, PortCnt}, EventList) -> N = uniform(MaxPorts), case N > PortCnt of true -> % Open port - ?line P = open_port({spawn, "exit_drv"}, []), + P = open_port({spawn, "exit_drv"}, []), %%io:format("Created port ~p\n",[P]), ports_traffic_started(MaxPorts, {[P|PortList], PortCnt+1}, [{open,P}|EventList]); false -> % Close port - ?line P = lists:nth(N, PortList), + P = lists:nth(N, PortList), %%io:format("Close port ~p\n",[P]), - ?line true = erlang:port_close(P), + true = erlang:port_close(P), ports_traffic_started(MaxPorts, {lists:delete(P,PortList), PortCnt-1}, [{close,P}|EventList]) end. @@ -2301,7 +2227,7 @@ ports_verify(Ports, PortsAfter, EventList) -> ports_verify(Ports, [P | PortsAfter], Tail); [] -> - ?line test_server:fail("Inconsistent snapshot from erlang:ports()") + test_server:fail("Inconsistent snapshot from erlang:ports()") end end. @@ -2318,27 +2244,27 @@ close_deaf_port(doc) -> ["Send data to port program that does not read it, then "Primary targeting Windows to test threaded_handle_closer in sys.c"]; close_deaf_port(suite) -> []; close_deaf_port(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(100)), - ?line DataDir = ?config(data_dir, Config), - ?line DeadPort = os:find_executable("dead_port", DataDir), - ?line Port = open_port({spawn,DeadPort++" 60"},[]), - ?line erlang:port_command(Port,"Hello, can you hear me!?!?"), - ?line port_close(Port), + Dog = test_server:timetrap(test_server:seconds(100)), + DataDir = ?config(data_dir, Config), + DeadPort = os:find_executable("dead_port", DataDir), + Port = open_port({spawn,DeadPort++" 60"},[]), + erlang:port_command(Port,"Hello, can you hear me!?!?"), + port_close(Port), Res = close_deaf_port_1(0, DeadPort), io:format("Waiting for OS procs to terminate...\n"), receive after 5*1000 -> ok end, - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), Res. close_deaf_port_1(1000, _) -> ok; close_deaf_port_1(N, Cmd) -> Timeout = integer_to_list(random:uniform(5*1000)), - ?line try open_port({spawn_executable,Cmd},[{args,[Timeout]}]) of + try open_port({spawn_executable,Cmd},[{args,[Timeout]}]) of Port -> - ?line erlang:port_command(Port,"Hello, can you hear me!?!?"), - ?line port_close(Port), + erlang:port_command(Port,"Hello, can you hear me!?!?"), + port_close(Port), close_deaf_port_1(N+1, Cmd) catch _:eagain -> diff --git a/erts/emulator/test/port_SUITE_data/dead_port.c b/erts/emulator/test/port_SUITE_data/dead_port.c index 68e96fbf14..4dd9ee4cc2 100644 --- a/erts/emulator/test/port_SUITE_data/dead_port.c +++ b/erts/emulator/test/port_SUITE_data/dead_port.c @@ -17,15 +17,6 @@ * %CopyrightEnd% */ -#ifdef VXWORKS -#include <vxWorks.h> -#include <taskVarLib.h> -#include <taskLib.h> -#include <sysLib.h> -#include <string.h> -#include <ioLib.h> -#endif - #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -37,12 +28,7 @@ #ifndef __WIN32__ #include <unistd.h> -#ifdef VXWORKS -#include "reclaim.h" -#include <sys/times.h> -#else #include <sys/time.h> -#endif #define O_BINARY 0 #define _setmode(fd, mode) @@ -53,13 +39,7 @@ #include "winbase.h" #endif - -#ifdef VXWORKS -#define MAIN(argc, argv) port_test(argc, argv) -#else #define MAIN(argc, argv) main(argc, argv) -#endif - extern int errno; @@ -86,9 +66,6 @@ char *argv[]; static void delay(unsigned ms) { -#ifdef VXWORKS - taskDelay((sysClkRateGet() * ms) / 1000); -#else #ifdef __WIN32__ Sleep(ms); #else @@ -98,5 +75,4 @@ delay(unsigned ms) select(0, NULL, NULL, NULL, &t); #endif -#endif } diff --git a/erts/emulator/test/port_SUITE_data/port_test.c b/erts/emulator/test/port_SUITE_data/port_test.c index 7b4e386d87..7abefab2e3 100644 --- a/erts/emulator/test/port_SUITE_data/port_test.c +++ b/erts/emulator/test/port_SUITE_data/port_test.c @@ -3,15 +3,6 @@ * Purpose: A port program to be used for testing the open_port bif. */ -#ifdef VXWORKS -#include <vxWorks.h> -#include <taskVarLib.h> -#include <taskLib.h> -#include <sysLib.h> -#include <string.h> -#include <ioLib.h> -#endif - #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -23,12 +14,7 @@ #ifndef __WIN32__ #include <unistd.h> -#ifdef VXWORKS -#include "reclaim.h" -#include <sys/times.h> -#else #include <sys/time.h> -#endif #define O_BINARY 0 #define _setmode(fd, mode) @@ -40,22 +26,13 @@ #endif -#ifdef VXWORKS -#define REDIR_STDOUT(fd) ioTaskStdSet(0, 1, fd); -#else #define REDIR_STDOUT(fd) if (dup2(fd, 1) == -1) { \ fprintf(stderr, "%s: failed to duplicate handle %d to 1: %d\n", \ port_data->progname, fd, errno); \ exit(1); \ } -#endif -#ifdef VXWORKS -#define MAIN(argc, argv) port_test(argc, argv) -#else #define MAIN(argc, argv) main(argc, argv) -#endif - extern int errno; @@ -101,7 +78,6 @@ static void dump(unsigned char* buf, int sz, int max); static void replace_stdout(char* filename); static void generate_reply(char* spec); -#ifndef VXWORKS #ifndef HAVE_STRERROR extern int sys_nerr; #ifndef sys_errlist /* sys_errlist is sometimes defined to @@ -125,7 +101,6 @@ int err; return msgstr; } #endif -#endif MAIN(argc, argv) @@ -133,12 +108,6 @@ int argc; char *argv[]; { int ret; -#ifdef VXWORKS - if(taskVarAdd(0, (int *)&port_data) != OK) { - fprintf(stderr, "Can't do taskVarAdd in port_test\n"); - exit(1); - } -#endif if((port_data = (PORT_TEST_DATA *) malloc(sizeof(PORT_TEST_DATA))) == NULL) { fprintf(stderr, "Couldn't malloc for port_data"); exit(1); @@ -511,9 +480,6 @@ dump(buf, sz, max) static void delay(unsigned ms) { -#ifdef VXWORKS - taskDelay((sysClkRateGet() * ms) / 1000); -#else #ifdef __WIN32__ Sleep(ms); #else @@ -523,7 +489,6 @@ delay(unsigned ms) select(0, NULL, NULL, NULL, &t); #endif -#endif } /* diff --git a/erts/emulator/test/port_SUITE_data/reclaim.h b/erts/emulator/test/port_SUITE_data/reclaim.h deleted file mode 100644 index 1d57dc5b8a..0000000000 --- a/erts/emulator/test/port_SUITE_data/reclaim.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef __RECLAIM_H__ -#define __RECLAIM_H__ - - -/* The Erlang release for VxWorks includes a simple mechanism for - "resource reclamation" at task exit - it allows replacement of the - functions that open/close "files" and malloc/free memory with versions - that keep track, to be able to "reclaim" file descriptors and memory - when a task exits (regardless of *how* it exits). - - The interface to this mechanism is made available via this file, - with the following caveats: - - - The interface may change (or perhaps even be removed, though that - isn't likely until VxWorks itself provides similar functionality) - in future releases - i.e. you must always use the version of this - file that comes with the Erlang release you are using. - - - Disaster is guaranteed if you use the mechanism incorrectly (see - below for the correct way), e.g. allocate memory with the "tracking" - version of malloc() and free it with the "standard" version of free(). - - - The mechanism (of course) incurs some performance penalty - thus - for a simple program you may be better off with careful programming, - making sure that you do whatever close()/free()/etc calls that are - appropriate at all exit points (though if you need to guard against - taskDelete() etc, things get messy...). - - To use the mechanism, simply program your application normally, i.e. - use open()/close()/malloc()/free() etc as usual, but #include this - file before any usage of the relevant functions. NOTE: To avoid the - "disaster" mentioned above, you *must* #include it in *all* (or none) - of the files that manipulate a particular file descriptor, allocated - memory area, etc. Finally, note that you can obviously not load your - application before the Erlang system when using this interface. -*/ - -/* Sorry, no ANSI prototypes yet... */ -extern int save_open(),save_creat(),save_socket(),save_accept(),save_close(); -#define open save_open -#define creat save_creat -#define socket save_socket -#define accept save_accept -#define close save_close -extern FILE *save_fopen(), *save_fdopen(), *save_freopen(); -extern int save_fclose(); -#define fopen save_fopen -#define fdopen save_fdopen -#define freopen save_freopen -#define fclose save_fclose -/* XXX Should do opendir/closedir too... */ -extern char *save_malloc(), *save_calloc(), *save_realloc(); -extern void save_free(), save_cfree(); -#define malloc save_malloc -#define calloc save_calloc -#define realloc save_realloc -#define free save_free -#define cfree save_cfree - -#endif diff --git a/erts/emulator/test/port_bif_SUITE_data/port_test.c b/erts/emulator/test/port_bif_SUITE_data/port_test.c index c6b128df66..28324a56a6 100644 --- a/erts/emulator/test/port_bif_SUITE_data/port_test.c +++ b/erts/emulator/test/port_bif_SUITE_data/port_test.c @@ -3,15 +3,6 @@ * Purpose: A port program to be used for testing the open_port bif. */ -#ifdef VXWORKS -#include <vxWorks.h> -#include <taskVarLib.h> -#include <taskLib.h> -#include <sysLib.h> -#include <string.h> -#include <ioLib.h> -#endif - #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -23,12 +14,7 @@ #ifndef __WIN32__ #include <unistd.h> -#ifdef VXWORKS -#include "reclaim.h" -#include <sys/times.h> -#else #include <sys/time.h> -#endif #define O_BINARY 0 #define _setmode(fd, mode) @@ -40,22 +26,13 @@ #endif -#ifdef VXWORKS -#define REDIR_STDOUT(fd) ioTaskStdSet(0, 1, fd); -#else #define REDIR_STDOUT(fd) if (dup2(fd, 1) == -1) { \ fprintf(stderr, "%s: failed to duplicate handle %d to 1: %d\n", \ port_data->progname, fd, errno); \ exit(1); \ } -#endif -#ifdef VXWORKS -#define MAIN(argc, argv) port_test(argc, argv) -#else #define MAIN(argc, argv) main(argc, argv) -#endif - extern int errno; @@ -101,7 +78,6 @@ static void dump(unsigned char* buf, int sz, int max); static void replace_stdout(char* filename); static void generate_reply(char* spec); -#ifndef VXWORKS #ifndef HAVE_STRERROR extern int sys_nerr; #ifndef sys_errlist /* sys_errlist is sometimes defined to @@ -125,20 +101,13 @@ int err; return msgstr; } #endif -#endif - MAIN(argc, argv) int argc; char *argv[]; { int ret; -#ifdef VXWORKS - if(taskVarAdd(0, (int *)&port_data) != OK) { - fprintf(stderr, "Can't do taskVarAdd in port_test\n"); - exit(1); - } -#endif + if((port_data = (PORT_TEST_DATA *) malloc(sizeof(PORT_TEST_DATA))) == NULL) { fprintf(stderr, "Couldn't malloc for port_data"); exit(1); @@ -508,9 +477,6 @@ dump(buf, sz, max) static void delay(unsigned ms) { -#ifdef VXWORKS - taskDelay((sysClkRateGet() * ms) / 1000); -#else #ifdef __WIN32__ Sleep(ms); #else @@ -520,7 +486,6 @@ delay(unsigned ms) select(0, NULL, NULL, NULL, &t); #endif -#endif } /* diff --git a/erts/emulator/test/port_bif_SUITE_data/reclaim.h b/erts/emulator/test/port_bif_SUITE_data/reclaim.h deleted file mode 100644 index 1d57dc5b8a..0000000000 --- a/erts/emulator/test/port_bif_SUITE_data/reclaim.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef __RECLAIM_H__ -#define __RECLAIM_H__ - - -/* The Erlang release for VxWorks includes a simple mechanism for - "resource reclamation" at task exit - it allows replacement of the - functions that open/close "files" and malloc/free memory with versions - that keep track, to be able to "reclaim" file descriptors and memory - when a task exits (regardless of *how* it exits). - - The interface to this mechanism is made available via this file, - with the following caveats: - - - The interface may change (or perhaps even be removed, though that - isn't likely until VxWorks itself provides similar functionality) - in future releases - i.e. you must always use the version of this - file that comes with the Erlang release you are using. - - - Disaster is guaranteed if you use the mechanism incorrectly (see - below for the correct way), e.g. allocate memory with the "tracking" - version of malloc() and free it with the "standard" version of free(). - - - The mechanism (of course) incurs some performance penalty - thus - for a simple program you may be better off with careful programming, - making sure that you do whatever close()/free()/etc calls that are - appropriate at all exit points (though if you need to guard against - taskDelete() etc, things get messy...). - - To use the mechanism, simply program your application normally, i.e. - use open()/close()/malloc()/free() etc as usual, but #include this - file before any usage of the relevant functions. NOTE: To avoid the - "disaster" mentioned above, you *must* #include it in *all* (or none) - of the files that manipulate a particular file descriptor, allocated - memory area, etc. Finally, note that you can obviously not load your - application before the Erlang system when using this interface. -*/ - -/* Sorry, no ANSI prototypes yet... */ -extern int save_open(),save_creat(),save_socket(),save_accept(),save_close(); -#define open save_open -#define creat save_creat -#define socket save_socket -#define accept save_accept -#define close save_close -extern FILE *save_fopen(), *save_fdopen(), *save_freopen(); -extern int save_fclose(); -#define fopen save_fopen -#define fdopen save_fdopen -#define freopen save_freopen -#define fclose save_fclose -/* XXX Should do opendir/closedir too... */ -extern char *save_malloc(), *save_calloc(), *save_realloc(); -extern void save_free(), save_cfree(); -#define malloc save_malloc -#define calloc save_calloc -#define realloc save_realloc -#define free save_free -#define cfree save_cfree - -#endif diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl index fdc55a4cc5..6509871a7d 100644 --- a/erts/emulator/test/process_SUITE.erl +++ b/erts/emulator/test/process_SUITE.erl @@ -280,15 +280,7 @@ kb_128() -> big_binary()}. eight_kb() -> - %%% This is really much more than eight kb, so vxworks platforms - %%% gets away with 1/8 of the other platforms (due to limited - %%% memory resources). - B64 = case os:type() of - vxworks -> - ?line lists:seq(1, 8); - _ -> - ?line lists:seq(1, 64) - end, + B64 = lists:seq(1, 64), ?line B512 = {<<1>>,B64,<<2,3>>,B64,make_unaligned_sub_binary(<<4,5,6,7,8,9>>), B64,make_sub_binary([1,2,3,4,5,6]), B64,make_sub_binary(lists:seq(1, ?heap_binary_size+1)), diff --git a/erts/emulator/test/save_calls_SUITE.erl b/erts/emulator/test/save_calls_SUITE.erl index 390b49b604..26ac4f2f7f 100644 --- a/erts/emulator/test/save_calls_SUITE.erl +++ b/erts/emulator/test/save_calls_SUITE.erl @@ -21,8 +21,10 @@ -include_lib("test_server/include/test_server.hrl"). --export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, - init_per_group/2,end_per_group/2]). +-export([all/0, suite/0,groups/0, + init_per_suite/1, end_per_suite/1, + init_per_group/2,end_per_group/2, + init_per_testcase/2,end_per_testcase/2]). -export([save_calls_1/1,dont_break_reductions/1]). @@ -48,6 +50,27 @@ init_per_group(_GroupName, Config) -> end_per_group(_GroupName, Config) -> Config. +init_per_testcase(dont_break_reductions,Config) -> + %% Skip on --enable-native-libs as hipe rescedules after each + %% function call. + case erlang:system_info(hipe_architecture) of + undefined -> + Config; + Architecture -> + {lists, ListsBinary, _ListsFilename} = code:get_object_code(lists), + ChunkName = hipe_unified_loader:chunk_name(Architecture), + NativeChunk = beam_lib:chunks(ListsBinary, [ChunkName]), + case NativeChunk of + {ok,{_,[{_,Bin}]}} when is_binary(Bin) -> + {skip,"Does not work for --enable-native-libs"}; + {error, beam_lib, _} -> Config + end + end; +init_per_testcase(_,Config) -> + Config. + +end_per_testcase(_,_Config) -> + ok. dont_break_reductions(suite) -> []; diff --git a/erts/emulator/test/smoke_test_SUITE.erl b/erts/emulator/test/smoke_test_SUITE.erl index 98f1cf1ad5..6f5c2080c0 100644 --- a/erts/emulator/test/smoke_test_SUITE.erl +++ b/erts/emulator/test/smoke_test_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2011. All Rights Reserved. +%% Copyright Ericsson AB 2011-2012. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -51,7 +51,17 @@ end_per_group(_GroupName, Config) -> Config. +init_per_testcase(boot_combo = Case, Config) when is_list(Config) -> + case erlang:system_info(build_type) of + opt -> + init_per_tc(Case, Config); + _ -> + {skip,"Cannot test boot_combo in special builds since beam.* may not exist"} + end; init_per_testcase(Case, Config) when is_list(Config) -> + init_per_tc(Case, Config). + +init_per_tc(Case, Config) -> Dog = ?t:timetrap(?DEFAULT_TIMEOUT), [{testcase, Case},{watchdog, Dog}|Config]. @@ -121,18 +131,19 @@ start_node(Config) -> start_node(Config, ""). start_node(Config, Args) when is_list(Config) -> - ?line Pa = filename:dirname(code:which(?MODULE)), - ?line {A, B, C} = now(), - ?line Name = list_to_atom(atom_to_list(?MODULE) - ++ "-" - ++ atom_to_list(?config(testcase, Config)) - ++ "-" - ++ integer_to_list(A) - ++ "-" - ++ integer_to_list(B) - ++ "-" - ++ integer_to_list(C)), - ?line ?t:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]). + Pa = filename:dirname(code:which(?MODULE)), + {A, B, C} = now(), + Name = list_to_atom(atom_to_list(?MODULE) + ++ "-" + ++ atom_to_list(?config(testcase, Config)) + ++ "-" + ++ integer_to_list(A) + ++ "-" + ++ integer_to_list(B) + ++ "-" + ++ integer_to_list(C)), + Opts = [{args, "-pa "++Pa++" "++Args}], + ?t:start_node(Name, slave, Opts). stop_node(Node) -> ?t:stop_node(Node). diff --git a/erts/emulator/test/timer_bif_SUITE.erl b/erts/emulator/test/timer_bif_SUITE.erl index 7ff7449ff5..c9533d0748 100644 --- a/erts/emulator/test/timer_bif_SUITE.erl +++ b/erts/emulator/test/timer_bif_SUITE.erl @@ -71,38 +71,23 @@ end_per_group(_GroupName, Config) -> start_timer_1(doc) -> ["Basic start_timer/3 functionality"]; start_timer_1(Config) when is_list(Config) -> - ?line Ref1 = erlang:start_timer(1000, self(), plopp), - ?line ok = get(1100, {timeout, Ref1, plopp}), - - ?line false = erlang:read_timer(Ref1), - ?line false = erlang:cancel_timer(Ref1), - ?line false = erlang:read_timer(Ref1), - - ?line Ref2 = erlang:start_timer(1000, self(), plapp), - ?line Left2 = erlang:cancel_timer(Ref2), - UpperLimit = case os:type() of - vxworks -> - %% The ticks of vxworks have a far lesser granularity - %% than what is expected in this testcase, in - %% fact the Left2 variable can get a little more than 1000... - 1100; - _ -> - 1000 - end, - ?line RetVal = case os:type() of - vxworks -> - {comment, "VxWorks behaves slightly unexpected, should be fixed,"}; - _ -> - ok - end, - ?line true = (Left2 > 900) and (Left2 =< UpperLimit), - ?line empty = get_msg(), - ?line false = erlang:cancel_timer(Ref2), - - ?line Ref3 = erlang:start_timer(1000, self(), plopp), - ?line no_message = get(900, {timeout, Ref3, plopp}), - - RetVal. + Ref1 = erlang:start_timer(1000, self(), plopp), + ok = get(1100, {timeout, Ref1, plopp}), + + false = erlang:read_timer(Ref1), + false = erlang:cancel_timer(Ref1), + false = erlang:read_timer(Ref1), + + Ref2 = erlang:start_timer(1000, self(), plapp), + Left2 = erlang:cancel_timer(Ref2), + UpperLimit = 1000, + true = (Left2 > 900) and (Left2 =< UpperLimit), + empty = get_msg(), + false = erlang:cancel_timer(Ref2), + + Ref3 = erlang:start_timer(1000, self(), plopp), + no_message = get(900, {timeout, Ref3, plopp}), + ok. send_after_1(doc) -> ["Basic send_after/3 functionality"]; send_after_1(Config) when is_list(Config) -> @@ -153,19 +138,11 @@ send_after_2(Config) when is_list(Config) -> send_after_3(doc) -> ["send_after/3: messages in the right order, worse than send_after_2"]; send_after_3(Config) when is_list(Config) -> - case os:type() of - vxworks -> - {skipped, "VxWorks timer granularity and order is not working good, this is subject to change!"}; - _ -> - do_send_after_3() - end. - -do_send_after_3() -> - ?line _ = erlang:send_after(100, self(), b1), - ?line _ = erlang:send_after(101, self(), b2), - ?line _ = erlang:send_after(102, self(), b3), - ?line _ = erlang:send_after(103, self(), last), - ?line [b1, b2, b3, last] = collect(last), + _ = erlang:send_after(100, self(), b1), + _ = erlang:send_after(101, self(), b2), + _ = erlang:send_after(102, self(), b3), + _ = erlang:send_after(103, self(), last), + [b1, b2, b3, last] = collect(last), % This behaviour is not guaranteed: % ?line _ = erlang:send_after(100, self(), c1), diff --git a/erts/emulator/test/trace_local_SUITE.erl b/erts/emulator/test/trace_local_SUITE.erl index 32e2a98e3c..1e0705fabe 100644 --- a/erts/emulator/test/trace_local_SUITE.erl +++ b/erts/emulator/test/trace_local_SUITE.erl @@ -70,7 +70,8 @@ config(priv_dir,_) -> -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, basic/1, bit_syntax/1, - return/1, on_and_off/1, stack_grow/1,info/1, delete/1, + return/1, on_and_off/1, systematic_on_off/1, + stack_grow/1,info/1, delete/1, exception/1, exception_apply/1, exception_function/1, exception_apply_function/1, exception_nocatch/1, exception_nocatch_apply/1, @@ -80,6 +81,7 @@ config(priv_dir,_) -> exception_meta_nocatch/1, exception_meta_nocatch_apply/1, exception_meta_nocatch_function/1, exception_meta_nocatch_apply_function/1, + concurrency/1, init_per_testcase/2, end_per_testcase/2]). init_per_testcase(_Case, Config) -> ?line Dog=test_server:timetrap(test_server:minutes(2)), @@ -89,14 +91,23 @@ end_per_testcase(_Case, Config) -> shutdown(), Dog=?config(watchdog, Config), test_server:timetrap_cancel(Dog), - ok. + + %% Reloading the module will clear all trace patterns, and + %% in a debug-compiled emulator run assertions of the counters + %% for the number of functions with breakpoints. + + c:l(?MODULE). + + + suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> case test_server:is_native(trace_local_SUITE) of true -> [not_run]; false -> - [basic, bit_syntax, return, on_and_off, stack_grow, + [basic, bit_syntax, return, on_and_off, systematic_on_off, + stack_grow, info, delete, exception, exception_apply, exception_function, exception_apply_function, exception_nocatch, exception_nocatch_apply, @@ -106,7 +117,8 @@ all() -> exception_meta_apply_function, exception_meta_nocatch, exception_meta_nocatch_apply, exception_meta_nocatch_function, - exception_meta_nocatch_apply_function] + exception_meta_nocatch_apply_function, + concurrency] end. groups() -> @@ -350,7 +362,8 @@ same(A, B) -> basic_test() -> ?line setup([call]), - ?line erlang:trace_pattern({?MODULE,'_','_'},[],[local]), + NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]), + NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]), ?line erlang:trace_pattern({?MODULE,slave,'_'},false,[local]), ?line [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]), ?line ?CT(?MODULE,exported_wrap,[1]), @@ -572,7 +585,118 @@ on_and_off_test() -> end, ?line ?NM, ok. - + +systematic_on_off(Config) when is_list(Config) -> + setup([call]), + Local = combinations([local,meta,call_count,call_time]), + [systematic_on_off_1(Flags) || Flags <- Local], + + %% Make sure that we don't get any trace messages when trace + %% is supposed to be off. + receive_no_next(500). + +systematic_on_off_1(Local) -> + io:format("~p\n", [Local]), + + %% Global off. + verify_trace_info(false, []), + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, Local), + verify_trace_info(false, Local), + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, [global]), + verify_trace_info(false, Local), + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, Local), + verify_trace_info(false, []), + + %% Global on. + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, [global]), + verify_trace_info(true, []), + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, Local), + verify_trace_info(true, []), + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, [global]), + verify_trace_info(false, []), + + %% Implicitly turn off global call trace. + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, [global]), + verify_trace_info(true, []), + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, Local), + verify_trace_info(false, Local), + + %% Implicitly turn off local call trace. + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, [global]), + verify_trace_info(true, []), + + %% Turn off global call trace. Everything should be off now. + 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, [global]), + verify_trace_info(false, []), + + ok. + +verify_trace_info(Global, Local) -> + case erlang:trace_info({?MODULE,exported_wrap,1}, all) of + {all,false} -> + false = Global, + [] = Local; + {all,Ps} -> + io:format("~p\n", [Ps]), + [verify_trace_info(P, Global, Local) || P <- Ps] + end, + global_call(Global, Local), + local_call(Local), + ok. + +verify_trace_info({traced,global}, true, []) -> ok; +verify_trace_info({traced,local}, false, _) -> ok; +verify_trace_info({match_spec,[]}, _, _) -> ok; +verify_trace_info({meta_match_spec,[]}, _, _) -> ok; +verify_trace_info({LocalFlag,Bool}, _, Local) when is_boolean(Bool) -> + try + Bool = lists:member(LocalFlag, Local) + catch + error:_ -> + io:format("Line ~p: {~p,~p}, false, ~p\n", + [?LINE,LocalFlag,Bool,Local]), + ?t:fail() + end; +verify_trace_info({meta,Pid}, false, Local) when is_pid(Pid) -> + true = lists:member(meta, Local); +verify_trace_info({call_time,_}, false, Local) -> + true = lists:member(call_time, Local); +verify_trace_info({call_count,_}, false, Local) -> + true = lists:member(call_time, Local). + +global_call(Global, Local) -> + apply_slave(?MODULE, exported_wrap, [global_call]), + case Global of + false -> + recv_local_call(Local, [global_call]); + true -> + ?CT(?MODULE, exported_wrap, [global_call]) + end. + +local_call(Local) -> + lambda_slave(fun() -> exported_wrap(local_call) end), + recv_local_call(Local, [local_call]). + +recv_local_call(Local, Args) -> + case lists:member(local, Local) of + false -> + ok; + true -> + ?CT(?MODULE, exported_wrap, Args) + end, + case lists:member(meta, Local) of + false -> + ok; + true -> + ?CTT(?MODULE, exported_wrap, Args) + end, + ok. + +combinations([_]=One) -> + [One]; +combinations([H|T]) -> + Cs = combinations(T), + [[H|C] || C <- Cs] ++ Cs. stack_grow_test() -> ?line setup([call,return_to]), @@ -703,16 +827,10 @@ exception_test(Opts) -> ?line ok. exceptions() -> - ?line Ref = make_ref(), - ?line N = case os:type() of - vxworks -> - ?line 2000; % Limited memory on themachines, not actually - % VxWorks' fault /PaN - _ -> - ?line 200000 - end, - ?line LiL = seq(1, N-1, N), % Long Improper List - ?line LL = seq(1, N, []), % Long List + Ref = make_ref(), + N = 200000, + LiL = seq(1, N-1, N), % Long Improper List + LL = seq(1, N, []), % Long List [{{erlang,exit}, [done]}, {{erlang,error}, [1.0]}, {{erlang,error}, [Ref,[]]}, @@ -813,6 +931,42 @@ clean_location({crash,{Reason,Stk0}}) -> {crash,{Reason,Stk}}; clean_location(Term) -> Term. +concurrency(_Config) -> + N = erlang:system_info(schedulers), + + %% Spawn 2*N processes that spin in a tight infinite loop, + %% and one process that will turn on and off local call + %% trace on the infinite_loop/0 function. We expect the + %% emulator to crash if there is a memory barrier bug or + %% if an aligned word-sized write is not atomic. + + Ps0 = [spawn_monitor(fun() -> infinite_loop() end) || + _ <- lists:seq(1, 2*N)], + OnAndOff = fun() -> concurrency_on_and_off() end, + Ps1 = [spawn_monitor(OnAndOff)|Ps0], + ?t:sleep(1000), + + %% Now spawn off N more processes that turn on off and off + %% a local trace pattern. + Ps = [spawn_monitor(OnAndOff) || _ <- lists:seq(1, N)] ++ Ps1, + ?t:sleep(1000), + + %% Clean up. + [exit(Pid, kill) || {Pid,_} <- Ps], + [receive + {'DOWN',Ref,process,Pid,killed} -> ok + end || {Pid,Ref} <- Ps], + erlang:trace_pattern({?MODULE,infinite_loop,0}, false, [local]), + ok. + +concurrency_on_and_off() -> + 1 = erlang:trace_pattern({?MODULE,infinite_loop,0}, true, [local]), + 1 = erlang:trace_pattern({?MODULE,infinite_loop,0}, false, [local]), + concurrency_on_and_off(). + +infinite_loop() -> + infinite_loop(). + %%% Tracee target functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% diff --git a/erts/emulator/test/trace_port_SUITE.erl b/erts/emulator/test/trace_port_SUITE.erl index f81cab3114..cc2eadafbc 100644 --- a/erts/emulator/test/trace_port_SUITE.erl +++ b/erts/emulator/test/trace_port_SUITE.erl @@ -472,14 +472,9 @@ default_tracer(Config) when is_list(Config) -> ?line M = N, ok. - %%% Help functions. -huge_data() -> - case os:type() of - vxworks -> huge_data(4711); - _ -> huge_data(16384) - end. +huge_data() -> huge_data(16384). huge_data(0) -> []; huge_data(N) when N rem 2 == 0 -> P = huge_data(N div 2), diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops index 8fe2402ca8..16a949c2a6 100755 --- a/erts/emulator/utils/beam_makeops +++ b/erts/emulator/utils/beam_makeops @@ -1,4 +1,4 @@ -#!/usr/bin/env perl +#!/usr/bin/env perl -W # # %CopyrightBegin% # @@ -362,7 +362,7 @@ while (<>) { $gen_to_spec{"$name/$arity"} = undef; $num_specific{"$name/$arity"} = 0; $min_window{"$name/$arity"} = 255; - $obsolete[$op_num] = $obsolete eq '-'; + $obsolete[$op_num] = defined $obsolete; } else { # Unnumbered generic operation. push(@unnumbered_generic, [$name, $arity]); $unnumbered{$name,$arity} = 1; @@ -379,7 +379,7 @@ while (<>) { if @args > $max_spec_operands; &syntax_check($name, @args); my $arity = @args; - if ($obsolete[$gen_opnum{$name,$arity}]) { + if (defined $gen_opnum{$name,$arity} and $obsolete[$gen_opnum{$name,$arity}]) { error("specific instructions may not be specified for obsolete instructions"); } push(@{$specific_op{"$name/$arity"}}, [$name, $hot, @args]); @@ -810,8 +810,8 @@ sub compiler_output { # # Generate .hrl file. # - my($name) = "$outdir/${module}.hrl"; - open(STDOUT, ">$name") || die "Failed to open $name for writing: $!\n"; + my($hrl_name) = "$outdir/${module}.hrl"; + open(STDOUT, ">$hrl_name") || die "Failed to open $hrl_name for writing: $!\n"; &comment('erlang'); for ($i = 0; $i < @tag_type && $i < 8; $i++) { @@ -1251,8 +1251,8 @@ sub compile_transform { $arity++ unless $list[1] eq '*'; $_ = [ @list ]; } - - if ($obsolete[$gen_opnum{$name,$arity}]) { + + if (defined $gen_opnum{$name,$arity} && $obsolete[$gen_opnum{$name,$arity}]) { error("obsolete function must not be used in transformations"); } @@ -1704,14 +1704,15 @@ sub tr_gen_to { # my($first_ref) = shift(@code); my($size, $first, $key) = @$first_ref; - my($dummy, $op, $arity) = @$first; + my($dummy, $arity); + ($dummy, $op, $arity) = @$first; my($comment) = "\n/*\n * Line $line:\n * $orig_transform\n */\n\n"; $min_window{$key} = $min_window if $min_window{$key} > $min_window; my $prev_last; $prev_last = pop(@{$gen_transform{$key}}) - if defined @{$gen_transform{$key}}; # Fail + if defined $gen_transform{$key}; # Fail if ($prev_last && !is_instr($prev_last, 'fail')) { error("Line $line: A previous transformation shadows '$orig_transform'"); @@ -1719,7 +1720,7 @@ sub tr_gen_to { unless ($cannot_fail) { unshift(@code, make_op('', 'try_me_else', tr_code_len(@code))); - push(@code, make_op(""), make_op("$key", 'fail')); + push(@code, make_op("$key", 'fail')); } unshift(@code, make_op($comment)); push(@{$gen_transform{$key}}, @code), diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables index 91efb4c023..a841f26d6a 100755 --- a/erts/emulator/utils/make_tables +++ b/erts/emulator/utils/make_tables @@ -167,7 +167,6 @@ typedef struct bif_entry { extern BifEntry bif_table[]; extern Export* bif_export[]; -extern unsigned char erts_bif_trace_flags[]; #define BIF_SIZE $bif_size @@ -197,7 +196,6 @@ includes("export.h", "sys.h", "erl_vm.h", "erl_process.h", "bif.h", "erl_bif_table.h", "erl_atom_table.h"); print "\nExport* bif_export[BIF_SIZE];\n"; -print "unsigned char erts_bif_trace_flags[BIF_SIZE];\n\n"; print "BifEntry bif_table[] = {\n"; for ($i = 0; $i < @bif; $i++) { diff --git a/erts/emulator/valgrind/suppress.halfword b/erts/emulator/valgrind/suppress.halfword new file mode 100644 index 0000000000..8fe448d897 --- /dev/null +++ b/erts/emulator/valgrind/suppress.halfword @@ -0,0 +1,56 @@ +# Extra suppressions specific for the halfword emulator. + +# --- Suppress all offheap binaries --- +# Valgrinds leak check does not recognize pointers that are stored +# at unaligned addresses. In halfword emulator we store 64-bit pointers +# to offheap data on 32-bit aligned heaps. +# We solve this by suppressing allocation of all offheap structures +# that are not referenced by other tables (ie binaries). + +{ +Halfword erts_bin_nrml_alloc +Memcheck:Leak +... +fun:erts_bin_nrml_alloc +... +} + +{ +Halfword erts_bin_realloc +Memcheck:Leak +... +fun:erts_bin_realloc +... +} + +{ +Halfword erts_bin_realloc_fnf +Memcheck:Leak +... +fun:erts_bin_realloc_fnf +... +} + +{ +Halfword erts_bin_drv_alloc +Memcheck:Leak +... +fun:erts_bin_drv_alloc +... +} + +{ +Halfword erts_bin_drv_alloc_fnf +Memcheck:Leak +... +fun:erts_bin_drv_alloc_fnf +... +} + +{ +Halfword erts_create_magic_binary +Memcheck:Leak +... +fun:erts_create_magic_binary +... +} diff --git a/erts/emulator/valgrind/suppress.patched.3.6.0 b/erts/emulator/valgrind/suppress.patched.3.6.0 index 62ba032520..b3507bdba7 100644 --- a/erts/emulator/valgrind/suppress.patched.3.6.0 +++ b/erts/emulator/valgrind/suppress.patched.3.6.0 @@ -133,26 +133,18 @@ fun:pthread_create@@GLIBC_2.2.5 { zlib; ok according to zlib developers Memcheck:Cond -fun:longest_match +... fun:deflate_slow fun:deflate } { zlib; ok according to zlib developers Memcheck:Cond -fun:longest_match +... fun:deflate_fast fun:deflate } { -zlib; ok accordnig to zlib (this one popped up with valgrind-3.6.0) -Memcheck:Cond -fun:deflate_slow -fun:deflate -fun:zlib_deflate -fun:zlib_ctl -} -{ No leak; pointer into block Memcheck:Leak fun:malloc @@ -275,6 +267,14 @@ obj:*/ssleay.* } { + Harmless assembler bug in openssl + Memcheck:Addr8 + ... + fun:AES_cbc_encrypt + ... +} + +{ erts_bits_init_state; Why is this needed? Memcheck:Leak PossiblyLost diff --git a/erts/emulator/valgrind/suppress.standard b/erts/emulator/valgrind/suppress.standard index 5a129bfd10..beecf1a7b5 100644 --- a/erts/emulator/valgrind/suppress.standard +++ b/erts/emulator/valgrind/suppress.standard @@ -120,14 +120,14 @@ fun:pthread_create@@GLIBC_2.2.5 { zlib; ok according to zlib developers Memcheck:Cond -fun:longest_match +... fun:deflate_slow fun:deflate } { zlib; ok according to zlib developers Memcheck:Cond -fun:longest_match +... fun:deflate_fast fun:deflate } @@ -252,6 +252,14 @@ obj:*/ssleay.* } { + Harmless assembler bug in openssl + Memcheck:Addr8 + ... + fun:AES_cbc_encrypt + ... +} + +{ Prebuilt constant terms in os_info_init (PossiblyLost) Memcheck:Leak fun:malloc |