diff options
Diffstat (limited to 'erts/emulator')
125 files changed, 16064 insertions, 9644 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 7e966c81bb..3e44bbb8db 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -61,7 +61,7 @@ else ifeq ($(TYPE),purify) PURIFY = purify $(PURIFY_BUILD_OPTIONS) TYPEMARKER = .purify -TYPE_FLAGS = $(DEBUG_CFLAGS) -DPURIFY -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS +TYPE_FLAGS = $(DEBUG_CFLAGS) -DPURIFY -DNO_JUMP_TABLE ENABLE_ALLOC_TYPE_VARS += purify else @@ -92,7 +92,7 @@ else ifeq ($(TYPE),valgrind) PURIFY = TYPEMARKER = .valgrind -TYPE_FLAGS = $(DEBUG_CFLAGS) -DVALGRIND -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS +TYPE_FLAGS = $(DEBUG_CFLAGS) -DVALGRIND -DNO_JUMP_TABLE ENABLE_ALLOC_TYPE_VARS += valgrind else @@ -399,7 +399,7 @@ include zlib/zlib.mk include pcre/pcre.mk $(ERTS_LIB): - cd $(ERTS_LIB_DIR) && $(MAKE) $(TYPE) + $(V_at)cd $(ERTS_LIB_DIR) && $(MAKE) $(TYPE) .PHONY: clean clean: @@ -491,7 +491,7 @@ $(TTF_DIR)/beam_pred_funcs.h \ $(TTF_DIR)/beam_tr_funcs.h \ : $(TTF_DIR)/OPCODES-GENERATED $(TTF_DIR)/OPCODES-GENERATED: $(OPCODE_TABLES) utils/beam_makeops - LANG=C $(PERL) utils/beam_makeops \ + $(gen_verbose)LANG=C $(PERL) utils/beam_makeops \ -wordsize @EXTERNAL_WORD_SIZE@ \ -outdir $(TTF_DIR) \ -DUSE_VM_PROBES=$(if $(USE_VM_PROBES),1,0) \ @@ -525,22 +525,22 @@ $(TARGET)/erl_atom_table.h \ $(TARGET)/erl_pbifs.c \ : $(TARGET)/TABLES-GENERATED $(TARGET)/TABLES-GENERATED: $(ATOMS) $(BIFS) utils/make_tables - LANG=C $(PERL) utils/make_tables -src $(TARGET) -include $(TARGET)\ + $(gen_verbose)LANG=C $(PERL) utils/make_tables -src $(TARGET) -include $(TARGET)\ $(ATOMS) $(BIFS) && echo $? >$(TARGET)/TABLES-GENERATED GENERATE += $(TARGET)/TABLES-GENERATED $(TTF_DIR)/erl_alloc_types.h: beam/erl_alloc.types utils/make_alloc_types - LANG=C $(PERL) utils/make_alloc_types -src $< -dst $@ $(ENABLE_ALLOC_TYPE_VARS) + $(gen_verbose)LANG=C $(PERL) utils/make_alloc_types -src $< -dst $@ $(ENABLE_ALLOC_TYPE_VARS) GENERATE += $(TTF_DIR)/erl_alloc_types.h # version include file $(TARGET)/erl_version.h: ../vsn.mk - LANG=C $(PERL) utils/make_version -o $@ $(SYSTEM_VSN) $(VSN)$(SERIALNO) $(TARGET) + $(gen_verbose)LANG=C $(PERL) utils/make_version -o $@ $(SYSTEM_VSN) $(VSN)$(SERIALNO) $(TARGET) GENERATE += $(TARGET)/erl_version.h # driver table $(TTF_DIR)/driver_tab.c: Makefile.in - LANG=C $(PERL) utils/make_driver_tab -o $@ $(DRV_OBJS) + $(gen_verbose)LANG=C $(PERL) utils/make_driver_tab -o $@ $(DRV_OBJS) GENERATE += $(TTF_DIR)/driver_tab.c @@ -560,8 +560,9 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ $(ERL_TOP)/erts/preloaded/ebin/zlib.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \ $(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erlang.beam - LANG=C $(PERL) utils/make_preload $(MAKE_PRELOAD_EXTRA) -rc $^ > $@ + $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \ + $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam + $(gen_verbose)LANG=C $(PERL) utils/make_preload $(MAKE_PRELOAD_EXTRA) -rc $^ > $@ else PRELOAD_OBJ = $(OBJDIR)/preload.o PRELOAD_SRC = $(TARGET)/preload.c @@ -572,8 +573,9 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \ $(ERL_TOP)/erts/preloaded/ebin/zlib.beam \ $(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \ $(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \ - $(ERL_TOP)/erts/preloaded/ebin/erlang.beam - LANG=C $(PERL) utils/make_preload -old $^ > $@ + $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \ + $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam + $(gen_verbose)LANG=C $(PERL) utils/make_preload -old $^ > $@ endif .PHONY : generate @@ -584,13 +586,13 @@ else generate: $(TTF_DIR)/GENERATED $(PRELOAD_SRC) $(TTF_DIR)/GENERATED: $(GENERATE) - echo $? >$(TTF_DIR)/GENERATED + $(gen_verbose)echo $? >$(TTF_DIR)/GENERATED endif $(TARGET)/erlang_dtrace.h: beam/erlang_dtrace.d - dtrace -h -C -Ibeam -s $< -o ./erlang_dtrace.tmp - sed -e '/^#define[ ]*ERLANG_[A-Z0-9_]*(.*)/y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/' ./erlang_dtrace.tmp > $@ - rm ./erlang_dtrace.tmp + $(dtrace_verbose)dtrace -h -C -Ibeam -s $< -o ./erlang_dtrace.tmp + $(V_at)sed -e '/^#define[ ]*ERLANG_[A-Z0-9_]*(.*)/y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/' ./erlang_dtrace.tmp > $@ + $(V_at)rm ./erlang_dtrace.tmp # ---------------------------------------------------------------------- # Pattern rules @@ -611,45 +613,45 @@ endif ifeq ($(TARGET),win32) $(OBJDIR)/dll_sys.o: sys/$(ERLANG_OSTYPE)/sys.c - $(CC) $(CFLAGS) -DERL_RUN_SHARED_LIB=1 $(INCLUDES) -c $< -o $@ + $(V_CC) $(CFLAGS) -DERL_RUN_SHARED_LIB=1 $(INCLUDES) -c $< -o $@ $(OBJDIR)/beams.$(RES_EXT): $(TARGET)/beams.rc - $(RC) -o $@ -I$(ERL_TOP)/erts/etc/win32 $(TARGET)/beams.rc + $(V_RC) -o $@ -I$(ERL_TOP)/erts/etc/win32 $(TARGET)/beams.rc endif ifneq ($(filter tile-%,$(TARGET)),) $(OBJDIR)/beam_emu.o: beam/beam_emu.c - $(CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) \ + $(V_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) \ -OPT:Olimit=0 -WOPT:lpre=off:spre=off:epre=off \ $(INCLUDES) -c $< -o $@ else # Usually the same as the default rule, but certain platforms (e.g. win32) mix # different compilers $(OBJDIR)/beam_emu.o: beam/beam_emu.c - $(EMU_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ + $(V_EMU_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ endif $(OBJDIR)/%.o: beam/%.c - $(CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ + $(V_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ $(OBJDIR)/%.o: $(TARGET)/%.c - $(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -c $< -o $@ + $(V_CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -c $< -o $@ $(OBJDIR)/%.o: $(TTF_DIR)/%.c - $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ + $(V_CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ $(OBJDIR)/%.o: sys/$(ERLANG_OSTYPE)/%.c - $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ + $(V_CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ $(OBJDIR)/%.o: sys/common/%.c - $(CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ + $(V_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ $(OBJDIR)/%.o: drivers/common/%.c - $(CC) $(CFLAGS) -DLIBSCTP=$(LIBSCTP) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE) -c $< -o $@ + $(V_CC) $(CFLAGS) -DLIBSCTP=$(LIBSCTP) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE) -c $< -o $@ $(OBJDIR)/%.o: drivers/$(ERLANG_OSTYPE)/%.c - $(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE) -I../etc/$(ERLANG_OSTYPE) -c $< -o $@ + $(V_CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE) -I../etc/$(ERLANG_OSTYPE) -c $< -o $@ # ---------------------------------------------------------------------- # Specials @@ -657,19 +659,19 @@ $(OBJDIR)/%.o: drivers/$(ERLANG_OSTYPE)/%.c CS_SRC = sys/$(ERLANG_OSTYPE)/erl_child_setup.c $(BINDIR)/$(CS_EXECUTABLE): $(TTF_DIR)/GENERATED $(PRELOAD_SRC) $(CS_SRC) $(ERTS_LIB) - $(CS_PURIFY) $(CC) $(CS_LDFLAGS) -o $(BINDIR)/$(CS_EXECUTABLE) \ + $(ld_verbose)$(CS_PURIFY) $(CC) $(CS_LDFLAGS) -o $(BINDIR)/$(CS_EXECUTABLE) \ $(CS_CFLAGS) $(COMMON_INCLUDES) $(CS_SRC) $(CS_LIBS) $(OBJDIR)/%.kp.o: sys/common/%.c - $(CC) -DERTS_KERNEL_POLL_VERSION $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ + $(V_CC) -DERTS_KERNEL_POLL_VERSION $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ $(OBJDIR)/%.nkp.o: sys/common/%.c - $(CC) -DERTS_NO_KERNEL_POLL_VERSION $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ + $(V_CC) -DERTS_NO_KERNEL_POLL_VERSION $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ ifeq ($(GCC),yes) $(OBJDIR)/erl_goodfit_alloc.o: beam/erl_goodfit_alloc.c - $(CC) $(subst -O2, $(GEN_OPT_FLGS) $(UNROLL_FLG), $(CFLAGS)) $(INCLUDES) -c $< -o $@ + $(V_CC) $(subst -O2, $(GEN_OPT_FLGS) $(UNROLL_FLG), $(CFLAGS)) $(INCLUDES) -c $< -o $@ endif # ---------------------------------------------------------------------- @@ -735,7 +737,8 @@ RUN_OBJS = \ $(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \ $(OBJDIR)/erl_zlib.o $(OBJDIR)/erl_nif.o \ $(OBJDIR)/erl_bif_binary.o $(OBJDIR)/erl_ao_firstfit_alloc.o \ - $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o + $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o \ + $(OBJDIR)/erl_ptab.o ifeq ($(TARGET),win32) DRV_OBJS = \ @@ -841,28 +844,28 @@ $(OBJS): $(TTF_DIR)/GENERATED M4FLAGS += -DTARGET=$(TARGET) -DOPSYS=$(OPSYS) -DARCH=$(ARCH) $(TTF_DIR)/%.S: hipe/%.m4 - m4 $(M4FLAGS) $< > $@ + $(m4_verbose)m4 $(M4FLAGS) $< > $@ $(TTF_DIR)/%.h: hipe/%.m4 - m4 $(M4FLAGS) $< > $@ + $(m4_verbose)m4 $(M4FLAGS) $< > $@ $(OBJDIR)/%.o: $(TTF_DIR)/%.S - $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ + $(V_CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ $(OBJDIR)/%.o: hipe/%.S - $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ + $(V_CC) $(CFLAGS) $(INCLUDES) -c $< -o $@ $(OBJDIR)/%.o: hipe/%.c - $(CC) $(subst O2,O3, $(CFLAGS)) $(INCLUDES) -c $< -o $@ + $(V_CC) $(subst O2,O3, $(CFLAGS)) $(INCLUDES) -c $< -o $@ $(BINDIR)/hipe_mkliterals$(TF_MARKER): $(OBJDIR)/hipe_mkliterals.o - $(CC) $(CFLAGS) $(INCLUDES) -o $@ $< + $(ld_verbose)$(CC) $(CFLAGS) $(INCLUDES) -o $@ $< $(OBJDIR)/hipe_mkliterals.o: $(HIPE_ASM) $(TTF_DIR)/erl_alloc_types.h \ $(TTF_DIR)/OPCODES-GENERATED $(TARGET)/TABLES-GENERATED $(TTF_DIR)/hipe_literals.h: $(BINDIR)/hipe_mkliterals$(TF_MARKER) - $(BINDIR)/hipe_mkliterals$(TF_MARKER) -c > $@ + $(gen_verbose)$(BINDIR)/hipe_mkliterals$(TF_MARKER) -c > $@ $(OBJDIR)/hipe_x86_glue.o: hipe/hipe_x86_glue.S \ $(TTF_DIR)/hipe_x86_asm.h $(TTF_DIR)/hipe_literals.h \ @@ -905,7 +908,7 @@ $(OBJDIR)/hipe_arm_bifs.o: $(TTF_DIR)/hipe_arm_bifs.S \ # Use -fomit-frame-pointer to work around gcc (v4.5.2) bug causing # "error: r7 cannot be used in asm here" for DEBUG build. $(OBJDIR)/hipe_arm.o: hipe/hipe_arm.c - $(CC) $(subst O2,O3, $(CFLAGS)) -fomit-frame-pointer $(INCLUDES) -c $< -o $@ + $(V_CC) $(subst O2,O3, $(CFLAGS)) -fomit-frame-pointer $(INCLUDES) -c $< -o $@ # end of HiPE section ######################################## @@ -916,13 +919,13 @@ $(OBJDIR)/hipe_arm.o: hipe/hipe_arm.c ifeq ($(TARGET), win32) # Only the basic erlang to begin with eh? $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS) - $(PURIFY) $(LD) -dll -def:sys/$(ERLANG_OSTYPE)/erl.def -implib:$(BINDIR)/erl_dll.lib -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \ + $(ld_verbose)$(PURIFY) $(LD) -dll -def:sys/$(ERLANG_OSTYPE)/erl.def -implib:$(BINDIR)/erl_dll.lib -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \ $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) $(LIBS) else $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS) - $(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \ + $(ld_verbose)$(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \ $(HIPEBEAMLDFLAGS) $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) $(LIBS) endif @@ -1010,23 +1013,24 @@ depend: else depend: $(TTF_DIR)/depend.mk $(TTF_DIR)/depend.mk: $(TTF_DIR)/GENERATED $(PRELOAD_SRC) - $(DEP_CC) $(DEP_FLAGS) $(BEAM_SRC) \ + $(gen_verbose) + $(V_at)$(DEP_CC) $(DEP_FLAGS) $(BEAM_SRC) \ | $(SED_DEPEND) > $(TTF_DIR)/depend.mk - $(DEP_CC) $(DEP_FLAGS) -DLIBSCTP=$(LIBSCTP) $(DRV_COMMON_SRC) \ + $(V_at)$(DEP_CC) $(DEP_FLAGS) -DLIBSCTP=$(LIBSCTP) $(DRV_COMMON_SRC) \ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk - $(DEP_CC) $(DEP_FLAGS) -I../etc/$(ERLANG_OSTYPE) $(DRV_OSTYPE_SRC) \ + $(V_at)$(DEP_CC) $(DEP_FLAGS) -I../etc/$(ERLANG_OSTYPE) $(DRV_OSTYPE_SRC) \ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk - $(DEP_CC) $(DEP_FLAGS) $(SYS_SRC) \ + $(V_at)$(DEP_CC) $(DEP_FLAGS) $(SYS_SRC) \ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk - $(DEP_CC) $(DEP_FLAGS) $(TARGET_SRC) \ + $(V_at)$(DEP_CC) $(DEP_FLAGS) $(TARGET_SRC) \ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk - $(DEP_CC) $(DEP_FLAGS) $(ZLIB_SRC) \ + $(V_at)$(DEP_CC) $(DEP_FLAGS) $(ZLIB_SRC) \ | $(SED_DEPEND_ZLIB) >> $(TTF_DIR)/depend.mk ifdef HIPE_ENABLED - $(DEP_CC) $(DEP_FLAGS) $(HIPE_SRC) \ + $(V_at)$(DEP_CC) $(DEP_FLAGS) $(HIPE_SRC) \ | $(SED_DEPEND) >> $(TTF_DIR)/depend.mk endif - cd $(ERTS_LIB_DIR) && $(MAKE) depend + $(V_at)cd $(ERTS_LIB_DIR) && $(MAKE) depend endif ifneq ($(MAKECMDGOALS),clean) diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 59c9f39e7b..590b2fc960 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -96,6 +96,7 @@ atom asynchronous atom atom atom atom_used atom attributes +atom await_port_send_result atom await_proc_exit atom await_sched_wall_time_modifications atom awaiting_load @@ -145,6 +146,7 @@ atom close atom closed atom code atom command +atom compact atom compat_rel atom compile atom compressed @@ -154,6 +156,7 @@ atom connection_closed atom cons atom const atom context_switches +atom control atom copy atom cpu atom cpu_timestamp @@ -165,6 +168,7 @@ atom current_location atom current_stacktrace atom data atom debug_flags +atom decimals atom delay_trap atom dexit atom depth @@ -206,6 +210,7 @@ atom erlang atom ERROR='ERROR' atom error_handler atom error_logger +atom erts_internal atom ets atom ETS_TRANSFER='ETS-TRANSFER' atom event @@ -239,6 +244,7 @@ atom gc_end atom gc_start atom Ge='>=' atom generational +atom get_data atom get_seq_token atom get_tcw atom getenv @@ -410,6 +416,7 @@ atom overlapped_io atom owner atom packet atom packet_size +atom parallelism atom Plus='+' atom pause atom pending @@ -421,12 +428,12 @@ atom pid atom port atom ports atom port_count +atom port_limit atom print atom priority atom private atom process atom processes -atom processes_trap atom processes_used atom process_count atom process_display @@ -436,6 +443,7 @@ atom procs atom profile atom protected atom protection +atom ptab_list_continue atom public atom purify atom quantify @@ -476,6 +484,7 @@ atom scheduler atom scheduler_id atom schedulers_online atom scheme +atom scientific atom scope atom sensitive atom sequential_tracer @@ -483,6 +492,7 @@ atom sequential_trace_token atom serial atom set atom set_cpu_topology +atom set_data atom set_on_first_link atom set_on_first_spawn atom set_on_link diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 9e4add823d..e0a4f86d2d 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -438,8 +438,7 @@ check_process_code_2(BIF_ALIST_2) if (is_internal_pid(BIF_ARG_1)) { Eterm res; ErtsCodeIndex code_ix; - if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) - goto error; + code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_2, code_ix); if (modp == NULL) { /* Doesn't exist. */ diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 58e0090a76..c1e11f6448 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -65,10 +65,10 @@ #define ERTS_BPF_ALL 0xFF -extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */ -extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */ -extern Eterm beam_exception_trace[1]; /* OpCode(i_exception_trace) */ -extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ +extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */ +extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */ +extern BeamInstr beam_exception_trace[1]; /* OpCode(i_exception_trace) */ +extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ erts_smp_atomic32_t erts_active_bp_index; erts_smp_atomic32_t erts_staging_bp_index; @@ -161,7 +161,7 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified) for (current = 0; current < num_modules; current++) { BeamInstr** code_base = (BeamInstr **) module[current]->curr.code; BeamInstr* code; - Uint num_functions = (Uint) code_base[MI_NUM_FUNCTIONS]; + Uint num_functions = (Uint)(UWord) code_base[MI_NUM_FUNCTIONS]; Uint fi; if (specified > 0) { @@ -172,7 +172,7 @@ erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified) } for (fi = 0; fi < num_functions; fi++) { - Eterm* pc; + BeamInstr* pc; int wi; code = code_base[MI_FUNCTIONS+fi]; @@ -555,7 +555,7 @@ erts_clear_module_break(Module *modp) { if (code_base == NULL) { return 0; } - n = (Uint) code_base[MI_NUM_FUNCTIONS]; + n = (Uint)(UWord) code_base[MI_NUM_FUNCTIONS]; for (i = 0; i < n; ++i) { BeamInstr* pc; @@ -714,7 +714,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) IS_TRACED_FL(p, F_TRACE_CALLS)) { int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE); flags = erts_call_trace(p, ep->code, bp->local_ms, args, - local, &p->tracer_proc); + local, &ERTS_TRACER_PROC(p)); } if (bp_flags & ERTS_BPF_META_TRACE) { Eterm tpid1, tpid2; @@ -800,7 +800,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) } if (flags & MATCH_SET_EXCEPTION_TRACE) { erts_trace_exception(p, ep->code, class, value, - &p->tracer_proc); + &ERTS_TRACER_PROC(p)); } if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) { /* can only happen if(local)*/ @@ -825,7 +825,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) UnUseTmpHeapNoproc(3); if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) { erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); - p->trace_flags |= F_EXCEPTION_TRACE; + ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE; erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); } } @@ -835,7 +835,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) } /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */ if (flags & MATCH_SET_RX_TRACE) { - erts_trace_return(p, ep->code, result, &p->tracer_proc); + erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p)); } if (flags & MATCH_SET_RETURN_TO_TRACE) { /* can only happen if(local)*/ @@ -919,7 +919,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg, E -= 1; ASSERT(c_p->htop <= E && E <= c_p->hend); E[0] = make_cp(c_p->cp); - c_p->cp = (BeamInstr *) beam_return_to_trace; + c_p->cp = beam_return_to_trace; } if (flags & MATCH_SET_RX_TRACE) { E -= 3; @@ -935,7 +935,7 @@ do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg, c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ? beam_exception_trace : beam_return_trace; erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - c_p->trace_flags |= F_EXCEPTION_TRACE; + ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE; erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } c_p->stop = E; @@ -974,7 +974,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt) ASSERT(pbt->pc); /* add time to previous code */ bp_time_diff(&sitem, pbt, ms, s, us); - sitem.pid = c_p->id; + sitem.pid = c_p->common.id; sitem.count = 0; /* previous breakpoint */ @@ -997,7 +997,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt) } /* Add count to this code */ - sitem.pid = c_p->id; + sitem.pid = c_p->common.id; sitem.count = 1; sitem.s_time = 0; sitem.us_time = 0; @@ -1055,7 +1055,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc) ASSERT(pbt->pc); bp_time_diff(&sitem, pbt, ms, s, us); - sitem.pid = p->id; + sitem.pid = p->common.id; sitem.count = 0; /* previous breakpoint */ @@ -1386,7 +1386,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) { if (pbdt) { get_sys_now(&ms,&s,&us); bp_time_diff(&sitem, pbt, ms, s, us); - sitem.pid = p->id; + sitem.pid = p->common.id; sitem.count = 0; h = &(pbdt->hash[bp_sched2ix_proc(p)]); diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 7b1ae624ce..0e9d140908 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -516,7 +516,7 @@ extern int count_instructions; # define Dispatchfun() DispatchMacroFun() #endif -#define Self(R) R = c_p->id +#define Self(R) R = c_p->common.id #define Node(R) R = erts_this_node->sysname #define Arg(N) I[(N)+1] @@ -1074,11 +1074,11 @@ init_emulator(void) void dtrace_drvport_str(ErlDrvPort drvport, char *port_buf) { - Port *port = erts_drvport2port(drvport); + Port *port = erts_drvport2port(drvport, NULL); erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>", - port_channel_no(port->id), - port_number(port->id)); + port_channel_no(port->common.id), + port_number(port->common.id)); } #endif /* @@ -1195,7 +1195,7 @@ void process_main(void) c_p = schedule(c_p, reds_used); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); #ifdef DEBUG - pid = c_p->id; /* Save for debugging purpouses */ + pid = c_p->common.id; /* Save for debugging purpouses */ #endif ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); @@ -1227,7 +1227,7 @@ void process_main(void) reds = c_p->fcalls; if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) - && (c_p->trace_flags & F_SENSITIVE) == 0) { + && (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE) == 0) { neg_o_reds = -reds; FCALLS = REDS_IN(c_p) = 0; } else { @@ -1591,6 +1591,7 @@ void process_main(void) reg[0] = r(0); result = erl_send(c_p, r(0), x(1)); PreFetch(0, next); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) { @@ -1866,14 +1867,14 @@ void process_main(void) erts_fprintf(stderr, "Dtrace -> (%T) stop spreading " "tag %T with message %T\r\n", - c_p->id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp)); + c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp)); #endif } else { #ifdef DTRACE_TAG_HARDDEBUG erts_fprintf(stderr, "Dtrace -> (%T) kill tag %T with " "message %T\r\n", - c_p->id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp)); + c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp)); #endif DT_UTAG(c_p) = NIL; SEQ_TRACE_TOKEN(c_p) = NIL; @@ -1898,7 +1899,7 @@ void process_main(void) erts_fprintf(stderr, "Dtrace -> (%T) receive tag (%T) " "with message %T\r\n", - c_p->id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp)); + c_p->common.id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp)); #endif } else { #endif @@ -1914,7 +1915,7 @@ void process_main(void) } msg = ERL_MESSAGE_TERM(msgp); seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE, - c_p->id, c_p); + c_p->common.id, c_p); #ifdef USE_VM_PROBES } #endif @@ -2567,6 +2568,7 @@ void process_main(void) reg[0] = r(0); result = (*bf)(c_p, reg, I); ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); ERTS_HOLE_CHECK(c_p); ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); @@ -3301,7 +3303,6 @@ void process_main(void) PROCESS_MAIN_CHK_LOCKS(c_p); bif_nif_arity = I[-1]; ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); { @@ -3346,7 +3347,6 @@ void process_main(void) bif_nif_arity = I[-1]; ASSERT(bif_nif_arity <= 3); ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); reg[0] = r(0); { Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf; @@ -5257,7 +5257,7 @@ terminate_proc(Process* c_p, Eterm Value) /* EXF_LOG is a primary exception flag */ if (c_p->freason & EXF_LOG) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "Error in process %T ", c_p->id); + erts_dsprintf(dsbufp, "Error in process %T ", c_p->common.id); if (erts_is_alive) erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname); erts_dsprintf(dsbufp,"with exit value: %0.*T\n", display_items, Value); @@ -6186,7 +6186,7 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) MSO(p).first = (struct erl_off_heap_header*) funp; funp->fe = fe; funp->num_free = num_free; - funp->creator = p->id; + funp->creator = p->common.id; #ifdef HIPE funp->native_address = fe->native_address; #endif diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index a0b4a8c049..bde70911a2 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -37,10 +37,13 @@ #include "erl_db_util.h" #include "register.h" #include "erl_thr_progress.h" +#define ERTS_PTAB_WANT_BIF_IMPL__ +#include "erl_ptab.h" static Export* flush_monitor_message_trap = NULL; static Export* set_cpu_topology_trap = NULL; static Export* await_proc_exit_trap = NULL; +static Export* await_port_send_result_trap = NULL; Export* erts_format_cpu_topology_trap = NULL; static Export *await_sched_wall_time_mod_trap; @@ -83,8 +86,10 @@ static int insert_internal_link(Process* p, Eterm rpid) ASSERT(is_internal_pid(rpid)); #ifdef ERTS_SMP - if (IS_TRACED(p) && (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1))) + if (IS_TRACED(p) + && (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) { rp_locks = ERTS_PROC_LOCKS_ALL; + } erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); #endif @@ -100,27 +105,27 @@ static int insert_internal_link(Process* p, Eterm rpid) } if (p != rp) { - erts_add_link(&(p->nlinks), LINK_PID, rp->id); - erts_add_link(&(rp->nlinks), LINK_PID, p->id); + erts_add_link(&ERTS_P_LINKS(p), LINK_PID, rp->common.id); + erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, p->common.id); - ASSERT(is_nil(p->tracer_proc) - || is_internal_pid(p->tracer_proc) - || is_internal_port(p->tracer_proc)); + ASSERT(is_nil(ERTS_TRACER_PROC(p)) + || is_internal_pid(ERTS_TRACER_PROC(p)) + || is_internal_port(ERTS_TRACER_PROC(p))); if (IS_TRACED(p)) { - if (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) { - rp->trace_flags |= (p->trace_flags & TRACEE_FLAGS); - rp->tracer_proc = p->tracer_proc; /* maybe steal */ + if (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1)) { + ERTS_TRACE_FLAGS(rp) |= (ERTS_TRACE_FLAGS(p) & TRACEE_FLAGS); + ERTS_TRACER_PROC(rp) = ERTS_TRACER_PROC(p); /* maybe steal */ - if (p->trace_flags & F_TRACE_SOL1) { /* maybe override */ - rp->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL); - p->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL); + if (ERTS_TRACE_FLAGS(p) & F_TRACE_SOL1) { /* maybe override */ + ERTS_TRACE_FLAGS(rp) &= ~(F_TRACE_SOL1 | F_TRACE_SOL); + ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL); } } } } if (IS_TRACED_FL(rp, F_TRACE_PROCS)) - trace_proc(p, rp, am_getting_linked, p->id); + trace_proc(p, rp, am_getting_linked, p->common.id); if (p == rp) erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN); @@ -144,10 +149,6 @@ BIF_RETTYPE link_1(BIF_ALIST_1) /* check that the pid or port which is our argument is OK */ if (is_internal_pid(BIF_ARG_1)) { - if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) { - BIF_ERROR(BIF_P, BADARG); - } - if (insert_internal_link(BIF_P, BIF_ARG_1)) { BIF_RET(am_true); } @@ -157,19 +158,37 @@ BIF_RETTYPE link_1(BIF_ALIST_1) } if (is_internal_port(BIF_ARG_1)) { - Port *pt = erts_id2port(BIF_ARG_1, BIF_P, ERTS_PROC_LOCK_MAIN); - if (!pt) { + int send_link_signal = 0; + Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP); + if (!prt) { goto res_no_proc; } erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); - if (erts_add_link(&(BIF_P->nlinks), LINK_PID, BIF_ARG_1) >= 0) - erts_add_link(&(pt->nlinks), LINK_PID, BIF_P->id); + if (erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1) >= 0) + send_link_signal = 1; /* else: already linked */ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); - erts_smp_port_unlock(pt); + + if (send_link_signal) { + Eterm ref; + Eterm *refp = erts_port_synchronous_ops ? &ref : NULL; + + switch (erts_port_link(BIF_P, prt, BIF_P->common.id, refp)) { + case ERTS_PORT_OP_DROPPED: + case ERTS_PORT_OP_BADARG: + goto res_no_proc; + case ERTS_PORT_OP_SCHEDULED: + if (refp) { + ASSERT(is_internal_ref(ref)); + BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true); + } + default: + break; + } + } BIF_RET(am_true); } else if (is_external_port(BIF_ARG_1) @@ -182,7 +201,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1) erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); /* We may earn time by checking first that we're not linked already */ - if (erts_lookup_link(BIF_P->nlinks, BIF_ARG_1) != NULL) { + if (erts_lookup_link(ERTS_P_LINKS(BIF_P), BIF_ARG_1) != NULL) { erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } @@ -209,10 +228,10 @@ BIF_RETTYPE link_1(BIF_ALIST_1) erts_smp_de_links_lock(dep); - erts_add_link(&(BIF_P->nlinks), LINK_PID, BIF_ARG_1); + erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1); lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, - BIF_P->id); + BIF_P->common.id); ASSERT(lnk != NULL); erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1); @@ -220,7 +239,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1) erts_smp_de_runlock(dep); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); - code = erts_dsig_send_link(&dsd, BIF_P->id, BIF_ARG_1); + code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1); if (code == ERTS_DSIG_SEND_YIELD) ERTS_BIF_YIELD_RETURN(BIF_P, am_true); BIF_RET(am_true); @@ -289,7 +308,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) if (dmon) erts_destroy_monitor(dmon); } - mon = erts_remove_monitor(&c_p->monitors, ref); + mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); res = ERTS_DEMONITOR_TRUE; @@ -298,7 +317,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) case ERTS_DSIG_PREP_CONNECTED: erts_smp_de_links_lock(dep); - mon = erts_remove_monitor(&c_p->monitors, ref); + mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); dmon = erts_remove_monitor(&dep->monitors, ref); erts_smp_de_links_unlock(dep); erts_smp_de_runlock(dep); @@ -325,7 +344,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) * the atom is stored there. Yield if necessary. */ code = erts_dsig_send_demonitor(&dsd, - c_p->id, + c_p->common.id, (mon->name != NIL ? mon->name : mon->pid), @@ -387,7 +406,7 @@ static int demonitor(Process *c_p, Eterm ref) goto done; /* Cannot be this monitor's ref */ } - mon = erts_lookup_monitor(c_p->monitors, ref); + mon = erts_lookup_monitor(ERTS_P_MONITORS(c_p), ref); if (!mon) { res = ERTS_DEMONITOR_FALSE; goto done; @@ -426,7 +445,7 @@ static int demonitor(Process *c_p, Eterm ref) to, ERTS_PROC_LOCK_LINK, ERTS_P2P_FLG_ALLOW_OTHER_X); - mon = erts_remove_monitor(&c_p->monitors, ref); + mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); #ifndef ERTS_SMP ASSERT(mon); #else @@ -440,7 +459,7 @@ static int demonitor(Process *c_p, Eterm ref) } if (rp) { ErtsMonitor *rmon; - rmon = erts_remove_monitor(&(rp->monitors), ref); + rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (rp != c_p) erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon != NULL) @@ -582,7 +601,7 @@ local_pid_monitor(Process *p, Eterm target) mon_ref = erts_make_ref(p); ERTS_BIF_PREP_RET(ret, mon_ref); - if (target == p->id) { + if (target == p->common.id) { return ret; } @@ -599,8 +618,8 @@ local_pid_monitor(Process *p, Eterm target) else { ASSERT(rp != p); - erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, target, NIL); - erts_add_monitor(&(rp->monitors), MON_TARGET, mon_ref, p->id, NIL); + erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL); + erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } @@ -635,9 +654,9 @@ local_name_monitor(Process *p, Eterm target_name) UnUseTmpHeap(3,p); } else if (rp != p) { - erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, rp->id, + erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, rp->common.id, target_name); - erts_add_monitor(&(rp->monitors), MON_TARGET, mon_ref, p->id, + erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, target_name); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } @@ -689,16 +708,16 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, erts_smp_de_links_lock(dep); - erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, p_trgt, + erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, p_trgt, p_name); - erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->id, + erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->common.id, d_name); erts_smp_de_links_unlock(dep); erts_smp_de_runlock(dep); erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); - code = erts_dsig_send_monitor(&dsd, p->id, target, mon_ref); + code = erts_dsig_send_monitor(&dsd, p->common.id, target, mon_ref); if (code == ERTS_DSIG_SEND_YIELD) ERTS_BIF_PREP_YIELD_RETURN(ret, p, mon_ref); else @@ -941,36 +960,39 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) } if (is_internal_port(BIF_ARG_1)) { - Port *pt = erts_id2port_sflgs(BIF_ARG_1, - BIF_P, - ERTS_PROC_LOCK_MAIN, - ERTS_PORT_SFLGS_DEAD); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); #ifdef ERTS_SMP - if (ERTS_PROC_PENDING_EXIT(BIF_P)) { - if (pt) - erts_smp_port_unlock(pt); + if (ERTS_PROC_PENDING_EXIT(BIF_P)) goto handle_pending_exit; - } #endif - l = erts_remove_link(&BIF_P->nlinks, BIF_ARG_1); - - ASSERT(pt || !l); - - if (pt) { - rl = erts_remove_link(&pt->nlinks, BIF_P->id); - erts_smp_port_unlock(pt); - if (rl) - erts_destroy_link(rl); - } + l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); - if (l) + if (l) { + Port *prt; + erts_destroy_link(l); + /* Send unlink signal */ + prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_DEAD); + if (prt) { + ErtsPortOpResult res; + Eterm ref; + Eterm *refp = erts_port_synchronous_ops ? &ref : NULL; +#ifdef DEBUG + ref = NIL; +#endif + res = erts_port_unlink(BIF_P, prt, BIF_P->common.id, refp); + + if (refp && res == ERTS_PORT_OP_SCHEDULED) { + ASSERT(is_internal_ref(ref)); + BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true); + } + } + } + BIF_RET(am_true); } else if (is_external_port(BIF_ARG_1) @@ -993,7 +1015,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) if (ERTS_PROC_PENDING_EXIT(BIF_P)) goto handle_pending_exit; #endif - l = erts_remove_link(&BIF_P->nlinks,BIF_ARG_1); + l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); @@ -1022,8 +1044,8 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) #endif case ERTS_DSIG_PREP_CONNECTED: - erts_remove_dist_link(&dld, BIF_P->id, BIF_ARG_1, dep); - code = erts_dsig_send_unlink(&dsd, BIF_P->id, BIF_ARG_1); + erts_remove_dist_link(&dld, BIF_P->common.id, BIF_ARG_1, dep); + code = erts_dsig_send_unlink(&dsd, BIF_P->common.id, BIF_ARG_1); erts_destroy_dist_link(&dld); if (code == ERTS_DSIG_SEND_YIELD) ERTS_BIF_YIELD_RETURN(BIF_P, am_true); @@ -1037,10 +1059,6 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) /* Internal pid... */ - /* process ok ? */ - if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) - BIF_ERROR(BIF_P, BADARG); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); /* get process struct */ @@ -1059,7 +1077,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) #endif /* unlink and ignore errors */ - l = erts_remove_link(&BIF_P->nlinks,BIF_ARG_1); + l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); if (l != NULL) erts_destroy_link(l); @@ -1067,12 +1085,12 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); } else { - rl = erts_remove_link(&(rp->nlinks),BIF_P->id); + rl = erts_remove_link(&ERTS_P_LINKS(rp), BIF_P->common.id); if (rl != NULL) erts_destroy_link(rl); if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) { - trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->id); + trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->common.id); } if (rp != BIF_P) @@ -1345,15 +1363,28 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) */ if (is_internal_port(BIF_ARG_1)) { - Port *prt; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - prt = erts_id2port(BIF_ARG_1, NULL, 0); + Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP); + if (prt) { - erts_do_exit_port(prt, BIF_P->id, BIF_ARG_2); - erts_port_release(prt); + Eterm ref; + Eterm *refp = erts_port_synchronous_ops ? &ref : NULL; + ErtsPortOpResult res; + +#ifdef DEBUG + ref = NIL; +#endif + + res = erts_port_exit(BIF_P, 0, prt, BIF_P->common.id, BIF_ARG_2, refp); + + ERTS_BIF_CHK_EXITED(BIF_P); + + if (refp && res == ERTS_PORT_OP_SCHEDULED) { + ASSERT(is_internal_ref(ref)); + BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true); + } + } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - ERTS_BIF_CHK_EXITED(BIF_P); + BIF_RET(am_true); } else if(is_external_port(BIF_ARG_1) @@ -1379,7 +1410,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) case ERTS_DSIG_PREP_NOT_CONNECTED: BIF_TRAP2(dexit_trap, BIF_P, BIF_ARG_1, BIF_ARG_2); case ERTS_DSIG_PREP_CONNECTED: - code = erts_dsig_send_exit2(&dsd, BIF_P->id, BIF_ARG_1, BIF_ARG_2); + code = erts_dsig_send_exit2(&dsd, BIF_P->common.id, BIF_ARG_1, BIF_ARG_2); if (code == ERTS_DSIG_SEND_YIELD) ERTS_BIF_YIELD_RETURN(BIF_P, am_true); BIF_RET(am_true); @@ -1397,9 +1428,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) */ ErtsProcLocks rp_locks; - if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) - BIF_ERROR(BIF_P, BADARG); - if (BIF_ARG_1 == BIF_P->id) { + if (BIF_ARG_1 == BIF_P->common.id) { rp_locks = ERTS_PROC_LOCKS_ALL; rp = BIF_P; erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR); @@ -1417,7 +1446,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) * Send an exit signal. */ erts_send_exit_signal(BIF_P, - BIF_P->id, + BIF_P->common.id, rp, &rp_locks, BIF_ARG_2, @@ -1519,22 +1548,24 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) } /* * NOTE: It is important that we check for pending exit signals - * and handle them before flag trap_exit is set to true. - * For more info, see implementation of erts_send_exit_signal(). + * and handle them before returning if trap_exit is set to + * true. For more info, see implementation of + * erts_send_exit_signal(). */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS); - ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS) - & erts_proc_lc_my_proc_locks(BIF_P)); - ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, - ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); if (trap_exit) - state = erts_smp_atomic32_read_bor_nob(&BIF_P->state, - ERTS_PSFLG_TRAP_EXIT); + state = erts_smp_atomic32_read_bor_mb(&BIF_P->state, + ERTS_PSFLG_TRAP_EXIT); else - state = erts_smp_atomic32_read_band_nob(&BIF_P->state, - ~ERTS_PSFLG_TRAP_EXIT); + state = erts_smp_atomic32_read_band_mb(&BIF_P->state, + ~ERTS_PSFLG_TRAP_EXIT); +#ifdef ERTS_SMP + if (ERTS_PROC_PENDING_EXIT(BIF_P)) { + erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); + ERTS_BIF_EXITED(BIF_P); + } +#endif + old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); BIF_RET(old_value); } else if (BIF_ARG_1 == am_scheduler) { @@ -1617,11 +1648,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) goto error; } erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); - old_value = BIF_P->trace_flags & F_SENSITIVE ? am_true : am_false; + old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE + ? am_true + : am_false); if (is_sensitive) { - BIF_P->trace_flags |= F_SENSITIVE; + ERTS_TRACE_FLAGS(BIF_P) |= F_SENSITIVE; } else { - BIF_P->trace_flags &= ~F_SENSITIVE; + ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE; } erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); BIF_RET(old_value); @@ -1747,8 +1780,9 @@ ebif_bang_2(BIF_ALIST_2) #define SEND_BADARG (-4) #define SEND_USER_ERROR (-5) #define SEND_INTERNAL_ERROR (-6) +#define SEND_AWAIT_RESULT (-7) -Sint do_send(Process *p, Eterm to, Eterm msg, int suspend); +Sint do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp); static Sint remote_send(Process *p, DistEntry *dep, Eterm to, Eterm full_to, Eterm msg, int suspend) @@ -1802,7 +1836,7 @@ static Sint remote_send(Process *p, DistEntry *dep, } Sint -do_send(Process *p, Eterm to, Eterm msg, int suspend) { +do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { Eterm portid; Port *pt; Process* rp; @@ -1814,16 +1848,10 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { trace_send(p, to, msg); if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) save_calls(p, &exp_send); - - if (internal_pid_index(to) >= erts_max_processes) - return SEND_BADARG; - rp = erts_proc_lookup_raw(to); - - if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(p); + rp = erts_proc_lookup_raw(to); + if (!rp) return 0; - } } else if (is_external_pid(to)) { dep = external_pid_dist_entry(to); if(dep == erts_this_dist_entry) { @@ -1832,7 +1860,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { "Discarding message %T from %T to %T in an old " "incarnation (%d) of this node (%d)\n", msg, - p->id, + p->common.id, to, external_pid_creation(to), erts_this_node->creation); @@ -1841,45 +1869,24 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { } return remote_send(p, dep, to, to, msg, suspend); } else if (is_atom(to)) { - - /* Need to virtual schedule out sending process - * because of lock wait. This is only necessary - * for internal port calling but the lock is bundled - * with name lookup. - */ - - if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(p, am_out); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(p, am_inactive); - } - erts_whereis_name(p, ERTS_PROC_LOCK_MAIN, - to, - &rp, 0, 0, - &pt); + Eterm id = erts_whereis_name_to_id(p, to); + + rp = erts_proc_lookup(id); + if (rp) + goto send_message; + pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP); if (pt) { - portid = pt->id; + portid = id; goto port_common; } - - /* Not a port virtually schedule the process back in */ - if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(p, am_in); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(p, am_active); - } if (IS_TRACED(p)) trace_send(p, to, msg); if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) save_calls(p, &exp_send); - if (!rp) { - return SEND_BADARG; - } + return SEND_BADARG; } else if (is_external_port(to) && (external_port_dist_entry(to) == erts_this_dist_entry)) { @@ -1888,50 +1895,56 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { "Discarding message %T from %T to %T in an old " "incarnation (%d) of this node (%d)\n", msg, - p->id, + p->common.id, to, external_port_creation(to), erts_this_node->creation); erts_send_error_to_logger(p->group_leader, dsbufp); return 0; } else if (is_internal_port(to)) { + int ret_val; portid = to; - /* schedule out calling process, waiting for lock*/ - if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(p, am_out); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(p, am_inactive); - } - pt = erts_id2port(to, p, ERTS_PROC_LOCK_MAIN); + + pt = erts_port_lookup(portid, ERTS_PORT_SFLGS_INVALID_LOOKUP); + port_common: - ERTS_SMP_LC_ASSERT(!pt || erts_lc_is_port_locked(pt)); + ret_val = 0; - /* We have waited for locks, trace schedule ports */ - if (pt && IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(pt, am_in, am_command); - } - if (pt && erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) { - profile_runnable_port(pt, am_active); - } - - /* XXX let port_command handle the busy stuff !!! */ - if (pt && (pt->status & ERTS_PORT_SFLG_PORT_BUSY)) { - if (suspend) { - erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt); - if (erts_system_monitor_flags.busy_port) { - monitor_generic(p, am_busy_port, portid); + if (pt) { + int ps_flags = suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND; + *refp = NIL; + + switch (erts_port_command(p, ps_flags, pt, msg, refp)) { + case ERTS_PORT_OP_CALLER_EXIT: + /* We are exiting... */ + return SEND_USER_ERROR; + case ERTS_PORT_OP_BUSY: + /* Nothing has been sent */ + if (suspend) + erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt); + return SEND_YIELD; + case ERTS_PORT_OP_BUSY_SCHEDULED: + /* Message was sent */ + if (suspend) { + erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt); + ret_val = SEND_YIELD_RETURN; + break; } + /* Fall through */ + case ERTS_PORT_OP_SCHEDULED: + if (is_not_nil(*refp)) { + ASSERT(is_internal_ref(*refp)); + ret_val = SEND_AWAIT_RESULT; + } + break; + case ERTS_PORT_OP_DROPPED: + case ERTS_PORT_OP_BADARG: + case ERTS_PORT_OP_DONE: + break; + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_command() result"); + break; } - /* Virtually schedule out the port before releasing */ - if (IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(pt, am_out, am_command); - } - if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) { - profile_runnable_port(pt, am_inactive); - } - erts_port_release(pt); - return SEND_YIELD; } if (IS_TRACED(p)) /* trace once only !! */ @@ -1949,30 +1962,11 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { SEQ_TRACE_SEND, portid, p); } - /* XXX NO GC in port command */ - erts_port_command(p, p->id, pt, msg); - if (pt) { - /* Virtually schedule out the port before releasing */ - if (IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(pt, am_out, am_command); - } - if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) { - profile_runnable_port(pt, am_inactive); - } - erts_port_release(pt); - } - /* Virtually schedule in process */ - if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(p, am_in); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(p, am_active); - } if (ERTS_PROC_IS_EXITING(p)) { KILL_CATCHES(p); /* Must exit */ return SEND_USER_ERROR; } - return 0; + return ret_val; } else if (is_tuple(to)) { /* Remote send */ int ret; tp = tuple_val(to); @@ -1988,47 +1982,24 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { dep = erts_sysname_to_connected_dist_entry(tp[2]); if (dep == erts_this_dist_entry) { + Eterm id; erts_deref_dist_entry(dep); if (IS_TRACED(p)) trace_send(p, to, msg); if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) save_calls(p, &exp_send); - - /* Need to virtual schedule out sending process - * because of lock wait. This is only necessary - * for internal port calling but the lock is bundled. - */ - - if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(p, am_out); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(p, am_inactive); - } - erts_whereis_name(p, ERTS_PROC_LOCK_MAIN, - tp[1], - &rp, 0, 0, - &pt); + id = erts_whereis_name_to_id(p, tp[1]); + + rp = erts_proc_lookup_raw(id); + if (rp) + goto send_message; + pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP); if (pt) { - portid = pt->id; + portid = id; goto port_common; } - /* Port lookup failed, virtually schedule the process - * back in. - */ - - if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(p, am_in); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(p, am_active); - } - - if (!rp) { - return 0; - } - goto send_message; + return 0; } ret = remote_send(p, dep, tp[1], to, msg, suspend); @@ -2067,6 +2038,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) { BIF_RETTYPE send_3(BIF_ALIST_3) { + Eterm ref; Process *p = BIF_P; Eterm to = BIF_ARG_1; Eterm msg = BIF_ARG_2; @@ -2090,12 +2062,18 @@ BIF_RETTYPE send_3(BIF_ALIST_3) if(!is_nil(l)) { BIF_ERROR(p, BADARG); } - - result = do_send(p, to, msg, suspend); + +#ifdef DEBUG + ref = NIL; +#endif + + result = do_send(p, to, msg, suspend, &ref); if (result > 0) { ERTS_VBUMP_REDS(p, result); BIF_RET(am_ok); - } else switch (result) { + } + + switch (result) { case 0: BIF_RET(am_ok); break; @@ -2118,6 +2096,9 @@ BIF_RETTYPE send_3(BIF_ALIST_3) ERTS_BIF_YIELD_RETURN(p, am_ok); else BIF_RET(am_nosuspend); + case SEND_AWAIT_RESULT: + ASSERT(is_internal_ref(ref)); + BIF_TRAP3(await_port_send_result_trap, p, ref, am_nosuspend, am_ok); case SEND_BADARG: BIF_ERROR(p, BADARG); break; @@ -2142,12 +2123,21 @@ BIF_RETTYPE send_2(BIF_ALIST_2) Eterm erl_send(Process *p, Eterm to, Eterm msg) { - Sint result = do_send(p, to, msg, !0); + Eterm ref; + Sint result; + +#ifdef DEBUG + ref = NIL; +#endif + + result = do_send(p, to, msg, !0, &ref); if (result > 0) { ERTS_VBUMP_REDS(p, result); BIF_RET(msg); - } else switch (result) { + } + + switch (result) { case 0: BIF_RET(msg); break; @@ -2159,6 +2149,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg) break; case SEND_YIELD_RETURN: ERTS_BIF_YIELD_RETURN(p, msg); + case SEND_AWAIT_RESULT: + ASSERT(is_internal_ref(ref)); + BIF_TRAP3(await_port_send_result_trap, p, ref, msg, msg); case SEND_BADARG: BIF_ERROR(p, BADARG); break; @@ -2428,9 +2421,7 @@ BIF_RETTYPE setelement_3(BIF_ALIST_3) /* copy the tuple */ resp = hp; - while (size--) { /* XXX use memcpy? */ - *hp++ = *ptr++; - } + sys_memcpy(hp, ptr, sizeof(Eterm)*size); resp[ix] = BIF_ARG_3; BIF_RET(make_tuple(resp)); } @@ -2443,7 +2434,7 @@ BIF_RETTYPE make_tuple_2(BIF_ALIST_2) Eterm* hp; Eterm res; - if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0) { + if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0 || n > ERTS_MAX_TUPLE_SIZE) { BIF_ERROR(BIF_P, BADARG); } hp = HAlloc(BIF_P, n+1); @@ -2464,7 +2455,7 @@ BIF_RETTYPE make_tuple_3(BIF_ALIST_3) Eterm list = BIF_ARG_3; Eterm* tup; - if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0) { + if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0 || n > ERTS_MAX_TUPLE_SIZE) { error: BIF_ERROR(BIF_P, BADARG); } @@ -2516,11 +2507,16 @@ BIF_RETTYPE append_element_2(BIF_ALIST_2) Eterm res; if (is_not_tuple(BIF_ARG_1)) { + error: BIF_ERROR(BIF_P, BADARG); } - ptr = tuple_val(BIF_ARG_1); + ptr = tuple_val(BIF_ARG_1); arity = arityval(*ptr); - hp = HAlloc(BIF_P, arity + 2); + + if (arity + 1 > ERTS_MAX_TUPLE_SIZE) + goto error; + + hp = HAlloc(BIF_P, arity + 2); res = make_tuple(hp); *hp = make_arityval(arity+1); while (arity--) { @@ -2530,6 +2526,78 @@ BIF_RETTYPE append_element_2(BIF_ALIST_2) BIF_RET(res); } +BIF_RETTYPE insert_element_3(BIF_ALIST_3) +{ + Eterm* ptr; + Eterm* hp; + Uint arity; + Eterm res; + Sint ix; + + if (is_not_tuple(BIF_ARG_2) || is_not_small(BIF_ARG_1)) { + BIF_ERROR(BIF_P, BADARG); + } + + ptr = tuple_val(BIF_ARG_2); + arity = arityval(*ptr); + ix = signed_val(BIF_ARG_1); + + if ((ix < 1) || (ix > (arity + 1))) { + BIF_ERROR(BIF_P, BADARG); + } + + hp = HAlloc(BIF_P, arity + 1 + 1); + res = make_tuple(hp); + *hp = make_arityval(arity + 1); + + ix--; + arity -= ix; + + while (ix--) { *++hp = *++ptr; } + + *++hp = BIF_ARG_3; + + while(arity--) { *++hp = *++ptr; } + + BIF_RET(res); +} + +BIF_RETTYPE delete_element_2(BIF_ALIST_3) +{ + Eterm* ptr; + Eterm* hp; + Uint arity; + Eterm res; + Sint ix; + + if (is_not_tuple(BIF_ARG_2) || is_not_small(BIF_ARG_1)) { + BIF_ERROR(BIF_P, BADARG); + } + + ptr = tuple_val(BIF_ARG_2); + arity = arityval(*ptr); + ix = signed_val(BIF_ARG_1); + + if ((ix < 1) || (ix > arity) || (arity == 0)) { + BIF_ERROR(BIF_P, BADARG); + } + + hp = HAlloc(BIF_P, arity + 1 - 1); + res = make_tuple(hp); + *hp = make_arityval(arity - 1); + + ix--; + arity -= ix; + + while (ix--) { *++hp = *++ptr; } + + ++ptr; + + while(arity--) { *++hp = *++ptr; } + + BIF_RET(res); +} + /**********************************************************************/ /* convert an atom to a list of ascii integer */ @@ -2863,7 +2931,73 @@ BIF_RETTYPE float_to_list_1(BIF_ALIST_1) need = i*2; hp = HAlloc(BIF_P, need); BIF_RET(buf_to_intlist(&hp, fbuf, i, NIL)); - } +} + +BIF_RETTYPE float_to_list_2(BIF_ALIST_2) +{ + const static int arity_two = make_arityval(2); + int decimals = SYS_DEFAULT_FLOAT_DECIMALS; + int compact = 0; + enum fmt_type_ { + FMT_LEGACY, + FMT_FIXED, + FMT_SCIENTIFIC + } fmt_type = FMT_LEGACY; + Eterm list = BIF_ARG_2; + Eterm arg; + int i; + Uint need; + Eterm* hp; + FloatDef f; + char fbuf[256]; + + /* check the arguments */ + if (is_not_float(BIF_ARG_1)) + goto badarg; + + for(; is_list(list); list = CDR(list_val(list))) { + arg = CAR(list_val(list)); + if (arg == am_compact) { + compact = 1; + continue; + } else if (is_tuple(arg)) { + Eterm* tp = tuple_val(arg); + if (*tp == arity_two && is_small(tp[2])) { + decimals = signed_val(tp[2]); + if (decimals > 0 && decimals < sizeof(fbuf) - 6 /* "X." ++ "e+YY" */) + switch (tp[1]) { + case am_decimals: + fmt_type = FMT_FIXED; + continue; + case am_scientific: + fmt_type = FMT_SCIENTIFIC; + continue; + } + } + } + goto badarg; + } + if (is_not_nil(list)) { + goto badarg; + } + + GET_DOUBLE(BIF_ARG_1, f); + + if (fmt_type == FMT_FIXED) { + if ((i = sys_double_to_chars_fast(f.fd, fbuf, sizeof(fbuf), + decimals, compact)) <= 0) + goto badarg; + } else { + if ((i = sys_double_to_chars_ext(f.fd, fbuf, sizeof(fbuf), decimals)) <= 0) + goto badarg; + } + + need = i*2; + hp = HAlloc(BIF_P, need); + BIF_RET(buf_to_intlist(&hp, fbuf, i, NIL)); +badarg: + BIF_ERROR(BIF_P, BADARG); +} /**********************************************************************/ @@ -3118,7 +3252,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1) Eterm* hp; int len; - if ((len = list_length(list)) < 0) { + if ((len = list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) { BIF_ERROR(BIF_P, BADARG); } @@ -3140,7 +3274,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1) BIF_RETTYPE self_0(BIF_ALIST_0) { - BIF_RET(BIF_P->id); + BIF_RET(BIF_P->common.id); } /**********************************************************************/ @@ -3177,11 +3311,9 @@ static erts_smp_spinlock_t make_ref_lock; static erts_smp_mtx_t ports_snapshot_mtx; erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */ -Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]) +void +erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]) { - Eterm* hp = buffer; - Uint32 ref0, ref1, ref2; - erts_smp_spin_lock(&make_ref_lock); reference0++; @@ -3193,24 +3325,36 @@ Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]) } } - ref0 = reference0; - ref1 = reference1; - ref2 = reference2; + ref[0] = reference0; + ref[1] = reference1; + ref[2] = reference2; erts_smp_spin_unlock(&make_ref_lock); +} - write_ref_thing(hp, ref0, ref1, ref2); +Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]) +{ + Eterm* hp = buffer; + Uint32 ref[ERTS_MAX_REF_NUMBERS]; + + erts_make_ref_in_array(ref); + write_ref_thing(hp, ref[0], ref[1], ref[2]); return make_internal_ref(hp); } Eterm erts_make_ref(Process *p) { Eterm* hp; + Uint32 ref[ERTS_MAX_REF_NUMBERS]; ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); hp = HAlloc(p, REF_THING_SIZE); - return erts_make_ref_in_buffer(hp); + + erts_make_ref_in_array(ref); + write_ref_thing(hp, ref[0], ref[1], ref[2]); + + return make_internal_ref(hp); } BIF_RETTYPE make_ref_0(BIF_ALIST_0) @@ -3474,7 +3618,7 @@ BIF_RETTYPE garbage_collect_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); } - if (BIF_P->id == BIF_ARG_1) + if (BIF_P->common.id == BIF_ARG_1) rp = BIF_P; else { #ifdef ERTS_SMP @@ -3514,71 +3658,23 @@ BIF_RETTYPE garbage_collect_0(BIF_ALIST_0) } /**********************************************************************/ -/* Return a list of active ports */ +/* + * The erlang:processes/0 BIF. + */ -BIF_RETTYPE ports_0(BIF_ALIST_0) +BIF_RETTYPE processes_0(BIF_ALIST_0) { - Eterm res = NIL; - Eterm* port_buf = erts_alloc(ERTS_ALC_T_TMP, - sizeof(Eterm)*erts_max_ports); - Eterm* pp = port_buf; - Eterm* dead_ports; - int alive, dead; - Uint32 next_ss; - int i; - - /* To get a consistent snapshot... - * We add alive ports from start of the buffer - * while dying ports are added from the other end by the killing threads. - */ - - erts_smp_mtx_lock(&ports_snapshot_mtx); /* One snapshot at a time */ - - erts_smp_atomic_set_nob(&erts_dead_ports_ptr, - (erts_aint_t) (port_buf + erts_max_ports)); - - next_ss = erts_smp_atomic32_inc_read_relb(&erts_ports_snapshot); - - for (i = erts_max_ports-1; i >= 0; i--) { - Port* prt = &erts_port[i]; - erts_smp_port_state_lock(prt); - if (!(prt->status & ERTS_PORT_SFLGS_DEAD) - && prt->snapshot != next_ss) { - ASSERT(prt->snapshot == next_ss - 1); - *pp++ = prt->id; - prt->snapshot = next_ss; /* Consumed by this snapshot */ - } - erts_smp_port_state_unlock(prt); - } - - dead_ports = (Eterm*)erts_smp_atomic_xchg_nob(&erts_dead_ports_ptr, - (erts_aint_t) NULL); - erts_smp_mtx_unlock(&ports_snapshot_mtx); - - ASSERT(pp <= dead_ports); - - alive = pp - port_buf; - dead = port_buf + erts_max_ports - dead_ports; - - ASSERT((alive+dead) <= erts_max_ports); - - if (alive+dead > 0) { - erts_aint_t i; - Eterm *hp = HAlloc(BIF_P, (alive+dead)*2); - - for (i = 0; i < alive; i++) { - res = CONS(hp, port_buf[i], res); - hp += 2; - } - for (i = 0; i < dead; i++) { - res = CONS(hp, dead_ports[i], res); - hp += 2; - } - } + return erts_ptab_list(BIF_P, &erts_proc); +} - erts_free(ERTS_ALC_T_TMP, port_buf); +/**********************************************************************/ +/* + * The erlang:ports/0 BIF. + */ - BIF_RET(res); +BIF_RETTYPE ports_0(BIF_ALIST_0) +{ + return erts_ptab_list(BIF_P, &erts_port); } /**********************************************************************/ @@ -4208,12 +4304,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_RET(old_value); } } else if (BIF_ARG_1 == make_small(1)) { - Uint i; + int i, max; ErlMessage* mp; erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); - for (i = 0; i < erts_max_processes; i++) { + max = erts_ptab_max(&erts_proc); + for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p) { #ifdef USE_VM_PROBES @@ -4560,6 +4657,8 @@ void erts_init_bif(void) am_format_cpu_topology, 1); await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3); + await_port_send_result_trap + = erts_export_put(am_erts_internal, am_await_port_send_result, 3); await_sched_wall_time_mod_trap = erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2); erts_smp_atomic32_init_nob(&sched_wall_time, 0); @@ -4575,19 +4674,18 @@ bif erlang:send_to_logger/2 BIF_RETTYPE send_to_logger_2(BIF_ALIST_2) { byte *buf; - int len; + ErlDrvSizeT len; if (!is_atom(BIF_ARG_1) || !(is_list(BIF_ARG_2) || is_nil(BIF_ARG_1))) { BIF_ERROR(BIF_P,BADARG); } - len = io_list_len(BIF_ARG_2); - if (len < 0) + if (erts_iolist_size(BIF_ARG_2, &len) != 0) BIF_ERROR(BIF_P,BADARG); else if (len == 0) buf = ""; else { #ifdef DEBUG - int len2; + ErlDrvSizeT len2; #endif buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, len+1); #ifdef DEBUG @@ -4595,7 +4693,7 @@ BIF_RETTYPE send_to_logger_2(BIF_ALIST_2) #else (void) #endif - io_list_to_buf(BIF_ARG_2, buf, len); + erts_iolist_to_buf(BIF_ARG_2, buf, len); ASSERT(len2 == len); buf[len] = '\0'; switch (BIF_ARG_1) { @@ -4689,7 +4787,6 @@ BIF_RETTYPE dt_prepend_vm_tag_data_1(BIF_ALIST_1) #ifdef USE_VM_PROBES Eterm b; Eterm *hp; - hp = HAlloc(BIF_P,2); if (is_binary((DT_UTAG(BIF_P)))) { Uint sz = binary_size(DT_UTAG(BIF_P)); int i; @@ -4706,6 +4803,7 @@ BIF_RETTYPE dt_prepend_vm_tag_data_1(BIF_ALIST_1) } else { b = new_binary(BIF_P,(byte *)"\0",1); } + hp = HAlloc(BIF_P,2); BIF_RET(CONS(hp,b,BIF_ARG_1)); #else BIF_RET(BIF_ARG_1); @@ -4716,7 +4814,6 @@ BIF_RETTYPE dt_append_vm_tag_data_1(BIF_ALIST_1) #ifdef USE_VM_PROBES Eterm b; Eterm *hp; - hp = HAlloc(BIF_P,2); if (is_binary((DT_UTAG(BIF_P)))) { Uint sz = binary_size(DT_UTAG(BIF_P)); int i; @@ -4733,6 +4830,7 @@ BIF_RETTYPE dt_append_vm_tag_data_1(BIF_ALIST_1) } else { b = new_binary(BIF_P,(byte *)"\0",1); } + hp = HAlloc(BIF_P,2); BIF_RET(CONS(hp,BIF_ARG_1,b)); #else BIF_RET(BIF_ARG_1); @@ -4756,14 +4854,14 @@ BIF_RETTYPE dt_spread_tag_1(BIF_ALIST_1) #ifdef DTRACE_TAG_HARDDEBUG erts_fprintf(stderr, "Dtrace -> (%T) start spreading tag %T\r\n", - BIF_P->id,DT_UTAG(BIF_P)); + BIF_P->common.id,DT_UTAG(BIF_P)); #endif } else { DT_UTAG_FLAGS(BIF_P) &= ~DT_UTAG_SPREADING; #ifdef DTRACE_TAG_HARDDEBUG erts_fprintf(stderr, "Dtrace -> (%T) stop spreading tag %T\r\n", - BIF_P->id,DT_UTAG(BIF_P)); + BIF_P->common.id,DT_UTAG(BIF_P)); #endif } } @@ -4789,7 +4887,7 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1) #ifdef DTRACE_TAG_HARDDEBUG erts_fprintf(stderr, "Dtrace -> (%T) restore Killing tag!\r\n", - BIF_P->id); + BIF_P->common.id); #endif } DT_UTAG(BIF_P) = NIL; @@ -4806,12 +4904,12 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1) erts_fprintf(stderr, "Dtrace -> (%T) restore stop spreading " "tag %T\r\n", - BIF_P->id, tpl[2]); + BIF_P->common.id, tpl[2]); } else if ((x & DT_UTAG_SPREADING) && !(DT_UTAG_FLAGS(BIF_P) & DT_UTAG_SPREADING)) { erts_fprintf(stderr, "Dtrace -> (%T) restore start spreading " - "tag %T\r\n",BIF_P->id,tpl[2]); + "tag %T\r\n",BIF_P->common.id,tpl[2]); } #endif DT_UTAG_FLAGS(BIF_P) = x; diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h index 7cb2c78815..4e456988a3 100644 --- a/erts/emulator/beam/bif.h +++ b/erts/emulator/beam/bif.h @@ -59,6 +59,8 @@ do { \ } while(0) #define BUMP_REDS(p, gc) do { \ + ASSERT(p); \ + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\ (p)->fcalls -= (gc); \ if ((p)->fcalls < 0) { \ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \ @@ -322,27 +324,6 @@ do { \ ERTS_BIF_EXITED((PROC)); \ } while (0) -#ifdef ERTS_SMP -#define ERTS_SMP_BIF_CHK_PENDING_EXIT(P, L) \ -do { \ - ERTS_SMP_LC_ASSERT((L) == erts_proc_lc_my_proc_locks((P))); \ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & (L)); \ - if (!((L) & ERTS_PROC_LOCK_STATUS)) \ - erts_smp_proc_lock((P), ERTS_PROC_LOCK_STATUS); \ - if (ERTS_PROC_PENDING_EXIT((P))) { \ - erts_handle_pending_exit((P), (L)|ERTS_PROC_LOCK_STATUS); \ - erts_smp_proc_unlock((P), \ - (((L)|ERTS_PROC_LOCK_STATUS) \ - & ~ERTS_PROC_LOCK_MAIN)); \ - ERTS_BIF_EXITED((P)); \ - } \ - if (!((L) & ERTS_PROC_LOCK_STATUS)) \ - erts_smp_proc_unlock((P), ERTS_PROC_LOCK_STATUS); \ -} while (0) -#else -#define ERTS_SMP_BIF_CHK_PENDING_EXIT(P, L) -#endif - /* * The ERTS_BIF_*_AWAIT_X_*_TRAP makros either exits the caller, or * sets up a trap to erlang:await_proc_exit/3. diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index f7dad2767f..a79feb6da3 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -31,432 +31,234 @@ # # Important: Use "ubif" for guard BIFs and operators; use "bif" for ordinary BIFs. # -# Add new BIFs to the end of the file. Do not bother adding a "packaged BIF name" -# (such as 'erl.lang.number'); if/when packages will be supported we will add -# all those names. +# Add new BIFs to the end of the file. # # Note: Guards BIFs require special support in the compiler (to be able to actually # call them from within a guard). # ubif erlang:abs/1 -ubif 'erl.lang.number':abs/1 ebif_abs_1 bif erlang:adler32/1 -bif 'erl.util.crypt.adler32':sum/1 ebif_adler32_1 bif erlang:adler32/2 -bif 'erl.util.crypt.adler32':sum/2 ebif_adler32_2 bif erlang:adler32_combine/3 -bif 'erl.util.crypt.adler32':combine/3 ebif_adler32_combine_3 bif erlang:apply/3 -bif 'erl.lang':apply/3 ebif_apply_3 bif erlang:atom_to_list/1 -bif 'erl.lang.atom':to_string/1 ebif_atom_to_string_1 atom_to_list_1 bif erlang:binary_to_list/1 -bif 'erl.lang.binary':to_list/1 ebif_binary_to_list_1 bif erlang:binary_to_list/3 -bif 'erl.lang.binary':to_list/3 ebif_binary_to_list_3 bif erlang:binary_to_term/1 -bif 'erl.lang.binary':to_term/1 ebif_binary_to_term_1 bif erlang:check_process_code/2 -bif 'erl.system.code':check_process/2 ebif_check_process_code_2 bif erlang:crc32/1 -bif 'erl.util.crypt.crc32':sum/1 ebif_crc32_1 bif erlang:crc32/2 -bif 'erl.util.crypt.crc32':sum/2 ebif_crc32_2 bif erlang:crc32_combine/3 -bif 'erl.util.crypt.crc32':combine/3 ebif_crc32_combine_3 bif erlang:date/0 -bif 'erl.util.date':today/0 ebif_date_0 bif erlang:delete_module/1 -bif 'erl.system.code':delete/1 ebif_delete_module_1 bif erlang:display/1 -bif 'erl.system.debug':display/1 ebif_display_1 bif erlang:display_string/1 -bif 'erl.system.debug':display_string/1 ebif_display_string_1 bif erlang:display_nl/0 -bif 'erl.system.debug':display_nl/0 ebif_display_nl_0 ubif erlang:element/2 -ubif 'erl.lang.tuple':element/2 ebif_element_2 bif erlang:erase/0 -bif 'erl.lang.proc.pdict':erase/0 ebif_erase_0 bif erlang:erase/1 -bif 'erl.lang.proc.pdict':erase/1 ebif_erase_1 bif erlang:exit/1 -bif 'erl.lang':exit/1 ebif_exit_1 bif erlang:exit/2 -bif 'erl.lang.proc':signal/2 ebif_signal_2 exit_2 bif erlang:external_size/1 -bif 'erl.lang.term':external_size/1 ebif_external_size_1 bif erlang:external_size/2 -bif 'erl.lang.term':external_size/2 ebif_external_size_2 ubif erlang:float/1 -ubif 'erl.lang.number':to_float/1 ebif_to_float_1 float_1 bif erlang:float_to_list/1 -bif 'erl.lang.float':to_string/1 ebif_float_to_string_1 float_to_list_1 +bif erlang:float_to_list/2 bif erlang:fun_info/2 -bif 'erl.lang.function':info/2 ebif_fun_info_2 bif erlang:garbage_collect/0 -bif 'erl.system':garbage_collect/0 ebif_garbage_collect_0 bif erlang:garbage_collect/1 -bif 'erl.system':garbage_collect/1 ebif_garbage_collect_1 bif erlang:get/0 -bif 'erl.lang.proc.pdict':get/0 ebif_get_0 bif erlang:get/1 -bif 'erl.lang.proc.pdict':get/1 ebif_get_1 bif erlang:get_keys/1 -bif 'erl.lang.proc.pdict':get_keys/1 ebif_get_keys_1 bif erlang:group_leader/0 -bif 'erl.lang.proc':group_leader/0 ebif_group_leader_0 bif erlang:group_leader/2 -bif 'erl.lang.proc':set_group_leader/2 ebif_group_leader_2 bif erlang:halt/0 -bif 'erl.lang.system':halt/0 ebif_halt_0 bif erlang:halt/1 -bif 'erl.lang.system':halt/1 ebif_halt_1 bif erlang:halt/2 -bif 'erl.lang.system':halt/2 ebif_halt_2 bif erlang:phash/2 bif erlang:phash2/1 bif erlang:phash2/2 -bif 'erl.lang.term':hash/1 ebif_phash2_1 -bif 'erl.lang.term':hash/2 ebif_phash2_2 ubif erlang:hd/1 -ubif 'erl.lang.list':hd/1 ebif_hd_1 bif erlang:integer_to_list/1 -bif 'erl.lang.integer':to_string/1 ebif_integer_to_string_1 integer_to_list_1 bif erlang:is_alive/0 -bif 'erl.lang.node':is_alive/0 ebif_is_alive_0 ubif erlang:length/1 -ubif 'erl.lang.list':length/1 ebif_length_1 bif erlang:link/1 -bif 'erl.lang.proc':link/1 ebif_link_1 bif erlang:list_to_atom/1 -bif 'erl.lang.atom':from_string/1 ebif_string_to_atom_1 list_to_atom_1 bif erlang:list_to_binary/1 -bif 'erl.lang.binary':from_list/1 ebif_list_to_binary_1 bif erlang:list_to_float/1 -bif 'erl.lang.float':from_string/1 ebif_string_to_float_1 list_to_float_1 bif erlang:list_to_integer/1 -bif 'erl.lang.integer':from_string/1 ebif_string_to_integer_1 list_to_integer_1 bif erlang:list_to_pid/1 -bif 'erl.lang.proc':string_to_pid/1 ebif_string_to_pid_1 list_to_pid_1 bif erlang:list_to_tuple/1 -bif 'erl.lang.tuple':from_list/1 ebif_list_to_tuple_1 bif erlang:loaded/0 -bif 'erl.system.code':loaded/0 ebif_loaded_0 bif erlang:localtime/0 -bif 'erl.util.date':local/0 ebif_localtime_0 bif erlang:localtime_to_universaltime/2 -bif 'erl.util.date':local_to_utc/2 ebif_localtime_to_universaltime_2 bif erlang:make_ref/0 -bif 'erl.lang.ref':new/0 ebif_make_ref_0 bif erlang:md5/1 -bif 'erl.util.crypt.md5':digest/1 ebif_md5_1 bif erlang:md5_init/0 -bif 'erl.util.crypt.md5':init/0 ebif_md5_init_0 bif erlang:md5_update/2 -bif 'erl.util.crypt.md5':update/2 ebif_md5_update_2 bif erlang:md5_final/1 -bif 'erl.util.crypt.md5':final/1 ebif_md5_final_1 bif erlang:module_loaded/1 -bif 'erl.system.code':is_loaded/1 ebif_is_loaded_1 module_loaded_1 bif erlang:function_exported/3 -bif 'erl.system.code':is_loaded/3 ebif_is_loaded_3 function_exported_3 bif erlang:monitor_node/2 -bif 'erl.lang.node':monitor/2 ebif_monitor_node_2 bif erlang:monitor_node/3 -bif 'erl.lang.node':monitor/3 ebif_monitor_node_3 ubif erlang:node/1 -ubif 'erl.lang.node':node/1 ebif_node_1 ubif erlang:node/0 -ubif 'erl.lang.node':node/0 ebif_node_0 bif erlang:nodes/1 -bif 'erl.lang.node':nodes/1 ebif_nodes_1 bif erlang:now/0 -bif 'erl.system':now/0 ebif_now_0 bif erlang:open_port/2 -bif 'erl.lang.port':open/2 ebif_open_port_2 open_port_2 bif erlang:pid_to_list/1 -bif 'erl.lang.proc':pid_to_string/1 ebif_pid_to_string_1 pid_to_list_1 -bif erlang:port_info/1 -bif 'erl.lang.port':info/1 ebif_port_info_1 -bif erlang:port_info/2 -bif 'erl.lang.port':info/2 ebif_port_info_2 bif erlang:ports/0 -bif 'erl.lang.node':ports/0 ebif_ports_0 bif erlang:pre_loaded/0 -bif 'erl.system.code':preloaded/0 ebif_pre_loaded_0 bif erlang:process_flag/2 -bif 'erl.lang.proc':set_flag/2 ebif_process_flag_2 bif erlang:process_flag/3 -bif 'erl.lang.proc':set_flag/3 ebif_process_flag_3 bif erlang:process_info/1 -bif 'erl.lang.proc':info/1 ebif_process_info_1 bif erlang:process_info/2 -bif 'erl.lang.proc':info/2 ebif_process_info_2 bif erlang:processes/0 -bif 'erl.lang.node':processes/0 ebif_processes_0 bif erlang:purge_module/1 -bif 'erl.system.code':purge/1 ebif_purge_module_1 bif erlang:put/2 -bif 'erl.lang.proc.pdict':put/2 ebif_put_2 bif erlang:register/2 -bif 'erl.lang.node':register/2 ebif_register_2 bif erlang:registered/0 -bif 'erl.lang.node':registered/0 ebif_registered_0 ubif erlang:round/1 -ubif 'erl.lang.number':round/1 ebif_round_1 ubif erlang:self/0 -ubif 'erl.lang.proc':self/0 ebif_self_0 bif erlang:setelement/3 -bif 'erl.lang.tuple':setelement/3 ebif_setelement_3 ubif erlang:size/1 -ubif 'erl.lang.term':size/1 ebif_size_1 bif erlang:spawn/3 -bif 'erl.lang.proc':spawn/3 ebif_spawn_3 bif erlang:spawn_link/3 -bif 'erl.lang.proc':spawn_link/3 ebif_spawn_link_3 bif erlang:split_binary/2 -bif 'erl.lang.binary':split/2 ebif_split_binary_2 bif erlang:statistics/1 -bif 'erl.system':statistics/1 ebif_statistics_1 bif erlang:term_to_binary/1 -bif 'erl.lang.binary':from_term/1 ebif_term_to_binary_1 bif erlang:term_to_binary/2 -bif 'erl.lang.binary':from_term/2 ebif_term_to_binary_2 bif erlang:throw/1 -bif 'erl.lang':throw/1 ebif_throw_1 bif erlang:time/0 -bif 'erl.util.date':time_of_day/0 ebif_time_0 ubif erlang:tl/1 -ubif 'erl.lang.list':tl/1 ebif_tl_1 ubif erlang:trunc/1 -ubif 'erl.lang.number':trunc/1 ebif_trunc_1 bif erlang:tuple_to_list/1 -bif 'erl.lang.tuple':to_list/1 ebif_tuple_to_list_1 bif erlang:universaltime/0 -bif 'erl.util.date':utc/0 ebif_universaltime_0 bif erlang:universaltime_to_localtime/1 -bif 'erl.util.date':utc_to_local/1 ebif_universaltime_to_localtime_1 bif erlang:unlink/1 -bif 'erl.lang.proc':unlink/1 ebif_unlink_1 bif erlang:unregister/1 -bif 'erl.lang.node':unregister/1 ebif_unregister_1 bif erlang:whereis/1 -bif 'erl.lang.node':whereis/1 ebif_whereis_1 bif erlang:spawn_opt/1 -bif 'erl.lang.proc':spawn_opt/1 ebif_spawn_opt_1 bif erlang:setnode/2 bif erlang:setnode/3 bif erlang:dist_exit/3 -bif erlang:port_call/2 -bif 'erl.lang.port':call/2 ebif_port_call_2 -bif erlang:port_call/3 -bif 'erl.lang.port':call/3 ebif_port_call_3 -bif erlang:port_command/2 -bif 'erl.lang.port':command/2 ebif_port_command_2 -bif erlang:port_command/3 -bif 'erl.lang.port':command/3 ebif_port_command_3 -bif erlang:port_control/3 -bif 'erl.lang.port':control/3 ebif_port_control_3 -bif erlang:port_close/1 -bif 'erl.lang.port':close/1 ebif_port_close_1 -bif erlang:port_connect/2 -bif 'erl.lang.port':connect/2 ebif_port_connect_2 -bif erlang:port_set_data/2 -bif 'erl.lang.port':set_data/2 ebif_port_set_data_2 -bif erlang:port_get_data/1 -bif 'erl.lang.port':get_data/1 ebif_port_get_data_1 +# Static native functions in erts_internal +bif erts_internal:port_info/1 +bif erts_internal:port_info/2 +bif erts_internal:port_call/3 +bif erts_internal:port_command/3 +bif erts_internal:port_control/3 +bif erts_internal:port_close/1 +bif erts_internal:port_connect/2 +bif erts_internal:port_set_data/2 +bif erts_internal:port_get_data/1 # Tracing & debugging. bif erlang:trace_pattern/2 -bif 'erl.system.debug':trace_pattern/2 ebif_trace_pattern_2 bif erlang:trace_pattern/3 -bif 'erl.system.debug':trace_pattern/3 ebif_trace_pattern_3 bif erlang:trace/3 -bif 'erl.system.debug':trace/3 ebif_trace_3 bif erlang:trace_info/2 -bif 'erl.system.debug':trace_info/2 ebif_trace_info_2 bif erlang:trace_delivered/1 -bif 'erl.system.debug':trace_delivered/1 ebif_trace_delivered_1 bif erlang:seq_trace/2 -bif 'erl.system.debug':seq_trace/2 ebif_seq_trace_2 bif erlang:seq_trace_info/1 -bif 'erl.system.debug':seq_trace_info/1 ebif_seq_trace_info_1 bif erlang:seq_trace_print/1 -bif 'erl.system.debug':seq_trace_print/1 ebif_seq_trace_print_1 bif erlang:seq_trace_print/2 -bif 'erl.system.debug':seq_trace_print/2 ebif_seq_trace_print_2 bif erlang:suspend_process/2 -bif 'erl.system.debug':suspend_process/2 ebif_suspend_process_2 bif erlang:resume_process/1 -bif 'erl.system.debug':resume_process/1 ebif_resume_process_1 bif erlang:process_display/2 -bif 'erl.system.debug':process_display/2 ebif_process_display_2 bif erlang:bump_reductions/1 -bif 'erl.lang.proc':bump_reductions/1 ebif_bump_reductions_1 bif math:cos/1 -bif 'erl.lang.math':cos/1 ebif_math_cos_1 bif math:cosh/1 -bif 'erl.lang.math':cosh/1 ebif_math_cosh_1 bif math:sin/1 -bif 'erl.lang.math':sin/1 ebif_math_sin_1 bif math:sinh/1 -bif 'erl.lang.math':sinh/1 ebif_math_sinh_1 bif math:tan/1 -bif 'erl.lang.math':tan/1 ebif_math_tan_1 bif math:tanh/1 -bif 'erl.lang.math':tanh/1 ebif_math_tanh_1 bif math:acos/1 -bif 'erl.lang.math':acos/1 ebif_math_acos_1 bif math:acosh/1 -bif 'erl.lang.math':acosh/1 ebif_math_acosh_1 bif math:asin/1 -bif 'erl.lang.math':asin/1 ebif_math_asin_1 bif math:asinh/1 -bif 'erl.lang.math':asinh/1 ebif_math_asinh_1 bif math:atan/1 -bif 'erl.lang.math':atan/1 ebif_math_atan_1 bif math:atanh/1 -bif 'erl.lang.math':atanh/1 ebif_math_atanh_1 bif math:erf/1 -bif 'erl.lang.math':erf/1 ebif_math_erf_1 bif math:erfc/1 -bif 'erl.lang.math':erfc/1 ebif_math_erfc_1 bif math:exp/1 -bif 'erl.lang.math':exp/1 ebif_math_exp_1 bif math:log/1 -bif 'erl.lang.math':log/1 ebif_math_log_1 bif math:log10/1 -bif 'erl.lang.math':log10/1 ebif_math_log10_1 bif math:sqrt/1 -bif 'erl.lang.math':sqrt/1 ebif_math_sqrt_1 bif math:atan2/2 -bif 'erl.lang.math':atan2/2 ebif_math_atan2_2 bif math:pow/2 -bif 'erl.lang.math':pow/2 ebif_math_pow_2 bif erlang:start_timer/3 -bif 'erl.lang.timer':start/3 ebif_start_timer_3 bif erlang:send_after/3 -bif 'erl.lang.timer':send_after/3 ebif_send_after_3 bif erlang:cancel_timer/1 -bif 'erl.lang.timer':cancel/1 ebif_cancel_timer_1 bif erlang:read_timer/1 -bif 'erl.lang.timer':read/1 ebif_read_timer_1 bif erlang:make_tuple/2 -bif 'erl.lang.tuple':make/2 ebif_make_tuple_2 bif erlang:append_element/2 -bif 'erl.lang.tuple':append_element/2 ebif_append_element_2 bif erlang:make_tuple/3 bif erlang:system_flag/2 -bif 'erl.system':set_flag/2 ebif_system_flag_2 bif erlang:system_info/1 -bif 'erl.system':info/1 ebif_system_info_1 # New in R9C bif erlang:system_monitor/0 -bif 'erl.system':monitor/0 ebif_system_monitor_0 bif erlang:system_monitor/1 -bif 'erl.system':monitor/1 ebif_system_monitor_1 bif erlang:system_monitor/2 -bif 'erl.system':monitor/2 ebif_system_monitor_2 # Added 2006-11-07 bif erlang:system_profile/2 -bif 'erl.system':profile/2 ebif_system_profile_2 # End Added 2006-11-07 # Added 2007-01-17 bif erlang:system_profile/0 -bif 'erl.system':profile/0 ebif_system_profile_0 # End Added 2007-01-17 bif erlang:ref_to_list/1 -bif 'erl.lang.ref':to_string/1 ebif_ref_to_string_1 ref_to_list_1 bif erlang:port_to_list/1 -bif 'erl.lang.port':to_string/1 ebif_port_to_string_1 port_to_list_1 bif erlang:fun_to_list/1 -bif 'erl.lang.function':to_string/1 ebif_fun_to_string_1 fun_to_list_1 bif erlang:monitor/2 -bif 'erl.lang.proc':monitor/2 ebif_monitor_2 bif erlang:demonitor/1 -bif 'erl.lang.proc':demonitor/1 ebif_demonitor_1 bif erlang:demonitor/2 -bif 'erl.lang.proc':demonitor/2 ebif_demonitor_2 bif erlang:is_process_alive/1 -bif 'erl.lang.proc':is_alive/1 ebif_proc_is_alive_1 is_process_alive_1 bif erlang:error/1 error_1 -bif 'erl.lang':error/1 ebif_error_1 error_1 bif erlang:error/2 error_2 -bif 'erl.lang':error/2 ebif_error_2 error_2 bif erlang:raise/3 raise_3 -bif 'erl.lang':raise/3 ebif_raise_3 raise_3 bif erlang:get_stacktrace/0 -bif 'erl.lang.proc':get_stacktrace/0 ebif_get_stacktrace_0 bif erlang:is_builtin/3 -bif 'erl.system.code':is_builtin/3 ebif_is_builtin_3 ubif erlang:'and'/2 -ubif 'erl.lang.bool':'and'/2 ebif_and_2 ubif erlang:'or'/2 -ubif 'erl.lang.bool':'or'/2 ebif_or_2 ubif erlang:'xor'/2 -ubif 'erl.lang.bool':'xor'/2 ebif_xor_2 ubif erlang:'not'/1 -ubif 'erl.lang.bool':'not'/1 ebif_not_1 ubif erlang:'>'/2 sgt_2 -ubif 'erl.lang.term':greater/2 ebif_gt_2 sgt_2 ubif erlang:'>='/2 sge_2 -ubif 'erl.lang.term':greater_or_equal/2 ebif_ge_2 sge_2 ubif erlang:'<'/2 slt_2 -ubif 'erl.lang.term':less/2 ebif_lt_2 slt_2 ubif erlang:'=<'/2 sle_2 -ubif 'erl.lang.term':less_or_equal/2 ebif_le_2 sle_2 ubif erlang:'=:='/2 seq_2 -ubif 'erl.lang.term':equal/2 ebif_eq_2 seq_2 ubif erlang:'=='/2 seqeq_2 -ubif 'erl.lang.term':arith_equal/2 ebif_areq_2 seqeq_2 ubif erlang:'=/='/2 sneq_2 -ubif 'erl.lang.term':not_equal/2 ebif_neq_2 sneq_2 ubif erlang:'/='/2 sneqeq_2 -ubif 'erl.lang.term':not_arith_equal/2 ebif_nareq_2 sneqeq_2 ubif erlang:'+'/2 splus_2 -ubif 'erl.lang.number':plus/2 ebif_plus_2 splus_2 ubif erlang:'-'/2 sminus_2 -ubif 'erl.lang.number':minus/2 ebif_minus_2 sminus_2 ubif erlang:'*'/2 stimes_2 -ubif 'erl.lang.number':multiply/2 ebif_multiply_2 stimes_2 ubif erlang:'/'/2 div_2 -ubif 'erl.lang.number':divide/2 ebif_divide_2 div_2 ubif erlang:'div'/2 intdiv_2 -ubif 'erl.lang.integer':'div'/2 ebif_intdiv_2 ubif erlang:'rem'/2 -ubif 'erl.lang.integer':'rem'/2 ebif_rem_2 ubif erlang:'bor'/2 -ubif 'erl.lang.integer':'bor'/2 ebif_bor_2 ubif erlang:'band'/2 -ubif 'erl.lang.integer':'band'/2 ebif_band_2 ubif erlang:'bxor'/2 -ubif 'erl.lang.integer':'bxor'/2 ebif_bxor_2 ubif erlang:'bsl'/2 -ubif 'erl.lang.integer':'bsl'/2 ebif_bsl_2 ubif erlang:'bsr'/2 -ubif 'erl.lang.integer':'bsr'/2 ebif_bsr_2 ubif erlang:'bnot'/1 -ubif 'erl.lang.integer':'bnot'/1 ebif_bnot_1 ubif erlang:'-'/1 sminus_1 -ubif 'erl.lang.number':minus/1 ebif_minus_1 sminus_1 ubif erlang:'+'/1 splus_1 -ubif 'erl.lang.number':plus/1 ebif_plus_1 splus_1 # New operators in R8. These were the only operators missing. # erlang:send/2, erlang:append/2 and erlang:subtract/2 are now also @@ -464,45 +266,27 @@ ubif 'erl.lang.number':plus/1 ebif_plus_1 splus_1 # internal references have been updated to the new ebif_... entries. bif erlang:'!'/2 ebif_bang_2 -bif 'erl.lang.proc':send/2 ebif_send_2 send_2 bif erlang:send/2 -bif 'erl.lang':send/3 ebif_send_3 send_3 bif erlang:send/3 bif erlang:'++'/2 ebif_plusplus_2 -bif 'erl.lang.list':append/2 ebif_append_2 ebif_plusplus_2 bif erlang:append/2 bif erlang:'--'/2 ebif_minusminus_2 -bif 'erl.lang.list':subtract/2 ebif_list_subtract_2 ebif_minusminus_2 bif erlang:subtract/2 ubif erlang:is_atom/1 -ubif 'erl.lang.term':is_atom/1 ebif_is_atom_1 ubif erlang:is_list/1 -ubif 'erl.lang.term':is_list/1 ebif_is_list_1 ubif erlang:is_tuple/1 -ubif 'erl.lang.term':is_tuple/1 ebif_is_tuple_1 ubif erlang:is_float/1 -ubif 'erl.lang.term':is_float/1 ebif_is_float_1 ubif erlang:is_integer/1 -ubif 'erl.lang.term':is_integer/1 ebif_is_integer_1 ubif erlang:is_number/1 -ubif 'erl.lang.term':is_number/1 ebif_is_number_1 ubif erlang:is_pid/1 -ubif 'erl.lang.term':is_pid/1 ebif_is_pid_1 ubif erlang:is_port/1 -ubif 'erl.lang.term':is_port/1 ebif_is_port_1 ubif erlang:is_reference/1 -ubif 'erl.lang.term':is_reference/1 ebif_is_reference_1 ubif erlang:is_binary/1 -ubif 'erl.lang.term':is_binary/1 ebif_is_binary_1 ubif erlang:is_function/1 -ubif 'erl.lang.term':is_function/1 ebif_is_function_1 ubif erlang:is_function/2 -ubif 'erl.lang.term':is_function/2 ebif_is_function_2 ubif erlang:is_record/2 -ubif 'erl.lang.term':is_record/2 ebif_is_record_2 ubif erlang:is_record/3 -ubif 'erl.lang.term':is_record/3 ebif_is_record_3 bif erlang:match_spec_test/3 @@ -511,96 +295,53 @@ bif erlang:match_spec_test/3 # bif ets:all/0 -bif 'erl.lang.ets':all/0 ebif_ets_all_0 bif ets:new/2 -bif 'erl.lang.ets':new/2 ebif_ets_new_2 bif ets:delete/1 -bif 'erl.lang.ets':delete/1 ebif_ets_delete_1 bif ets:delete/2 -bif 'erl.lang.ets':delete/2 ebif_ets_delete_2 bif ets:delete_all_objects/1 -bif 'erl.lang.ets':delete_all_objects/1 ebif_ets_delete_all_objects_1 bif ets:delete_object/2 -bif 'erl.lang.ets':delete_object/2 ebif_ets_delete_object_2 bif ets:first/1 -bif 'erl.lang.ets':first/1 ebif_ets_first_1 bif ets:is_compiled_ms/1 -bif 'erl.lang.ets':is_compiled_ms/1 ebif_ets_is_compiled_ms_1 bif ets:lookup/2 -bif 'erl.lang.ets':lookup/2 ebif_ets_lookup_2 bif ets:lookup_element/3 -bif 'erl.lang.ets':lookup_element/3 ebif_ets_lookup_element_3 bif ets:info/1 -bif 'erl.lang.ets':info/1 ebif_ets_info_1 bif ets:info/2 -bif 'erl.lang.ets':info/2 ebif_ets_info_2 bif ets:last/1 -bif 'erl.lang.ets':last/1 ebif_ets_last_1 bif ets:match/1 -bif 'erl.lang.ets':match/1 ebif_ets_match_1 bif ets:match/2 -bif 'erl.lang.ets':match/2 ebif_ets_match_2 bif ets:match/3 -bif 'erl.lang.ets':match/3 ebif_ets_match_3 bif ets:match_object/1 -bif 'erl.lang.ets':match_object/1 ebif_ets_match_object_1 bif ets:match_object/2 -bif 'erl.lang.ets':match_object/2 ebif_ets_match_object_2 bif ets:match_object/3 -bif 'erl.lang.ets':match_object/3 ebif_ets_match_object_3 bif ets:member/2 -bif 'erl.lang.ets':is_key/2 ebif_ets_member_2 bif ets:next/2 -bif 'erl.lang.ets':next/2 ebif_ets_next_2 bif ets:prev/2 -bif 'erl.lang.ets':prev/2 ebif_ets_prev_2 bif ets:insert/2 -bif 'erl.lang.ets':insert/2 ebif_ets_insert_2 bif ets:insert_new/2 -bif 'erl.lang.ets':insert_new/2 ebif_ets_insert_new_2 bif ets:rename/2 -bif 'erl.lang.ets':rename/2 ebif_ets_rename_2 bif ets:safe_fixtable/2 -bif 'erl.lang.ets':fixtable/2 ebif_ets_safe_fixtable_2 bif ets:slot/2 -bif 'erl.lang.ets':slot/2 ebif_ets_slot_2 bif ets:update_counter/3 -bif 'erl.lang.ets':update_counter/3 ebif_ets_update_counter_3 bif ets:select/1 -bif 'erl.lang.ets':select/1 ebif_ets_select_1 bif ets:select/2 -bif 'erl.lang.ets':select/2 ebif_ets_select_2 bif ets:select/3 -bif 'erl.lang.ets':select/3 ebif_ets_select_3 bif ets:select_count/2 -bif 'erl.lang.ets':select/2 ebif_ets_select_count_2 bif ets:select_reverse/1 -bif 'erl.lang.ets':select_reverse/1 ebif_ets_select_reverse_1 bif ets:select_reverse/2 -bif 'erl.lang.ets':select_reverse/2 ebif_ets_select_reverse_2 bif ets:select_reverse/3 -bif 'erl.lang.ets':select_reverse/3 ebif_ets_select_reverse_3 bif ets:select_delete/2 -bif 'erl.lang.ets':select_delete/2 ebif_ets_select_delete_2 bif ets:match_spec_compile/1 -bif 'erl.lang.ets':match_spec_compile/1 ebif_ets_match_spec_compile_1 bif ets:match_spec_run_r/3 -bif 'erl.lang.ets':match_spec_run_r/3 ebif_ets_match_spec_run_r_3 # # Bifs in os module. # bif os:putenv/2 -bif 'erl.system.os':setenv/2 ebif_os_setenv_2 os_putenv_2 bif os:getenv/0 -bif 'erl.system.os':getenv/0 ebif_os_getenv_0 bif os:getenv/1 -bif 'erl.system.os':getenv/1 ebif_os_getenv_1 bif os:getpid/0 -bif 'erl.system.os':pid/0 ebif_os_pid_0 os_getpid_0 bif os:timestamp/0 -bif 'erl.system.os':timestamp/0 ebif_os_timestamp_0 os_timestamp_0 # # Bifs in the erl_ddll module (the module actually does not exist) @@ -627,13 +368,9 @@ bif re:run/3 # bif lists:member/2 -bif 'erl.lang.list':is_element/2 ebif_list_is_element_2 lists_member_2 bif lists:reverse/2 -bif 'erl.lang.list':reverse/2 ebif_list_reverse_2 lists_reverse_2 bif lists:keymember/3 -bif 'erl.lang.list.keylist':is_element/3 ebif_keylist_is_element_3 lists_keymember_3 bif lists:keysearch/3 -bif 'erl.lang.list.keylist':search/3 ebif_keylist_search_3 lists_keysearch_3 bif lists:keyfind/3 # @@ -641,21 +378,13 @@ bif lists:keyfind/3 # bif erts_debug:disassemble/1 -bif 'erl.system.debug':disassemble/1 ebif_erts_debug_disassemble_1 bif erts_debug:breakpoint/2 -bif 'erl.system.debug':breakpoint/2 ebif_erts_debug_breakpoint_2 bif erts_debug:same/2 -bif 'erl.system.debug':same/2 ebif_erts_debug_same_2 bif erts_debug:flat_size/1 -bif 'erl.system.debug':flat_size/1 ebif_erts_debug_flat_size_1 bif erts_debug:get_internal_state/1 -bif 'erl.system.debug':get_internal_state/1 ebif_erts_debug_get_internal_state_1 bif erts_debug:set_internal_state/2 -bif 'erl.system.debug':set_internal_state/2 ebif_erts_debug_set_internal_state_2 bif erts_debug:display/1 -bif 'erl.system.debug':display/1 ebif_erts_debug_display_1 bif erts_debug:dist_ext_to_term/2 -bif 'erl.system.debug':dist_ext_to_term/2 ebif_erts_debug_dist_ext_to_term_2 bif erts_debug:instructions/0 # @@ -675,13 +404,9 @@ bif erts_debug:lock_counters/1 # bif code:get_chunk/2 -bif 'erl.system.code':get_chunk/2 ebif_code_get_chunk_2 bif code:module_md5/1 -bif 'erl.system.code':module_md5/1 ebif_code_module_md5_1 bif code:make_stub_module/3 -bif 'erl.system.code':make_stub_module/3 ebif_code_make_stub_module_3 bif code:is_module_native/1 -bif 'erl.system.code':is_native/1 ebif_code_is_native_1 code_is_module_native_1 # # New Bifs in R9C. @@ -833,6 +558,8 @@ bif erlang:dt_append_vm_tag_data/1 # bif erlang:prepare_loading/2 bif erlang:finish_loading/1 +bif erlang:insert_element/3 +bif erlang:delete_element/2 # # Obsolete diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c index 3d2725e239..4441dab181 100644 --- a/erts/emulator/beam/binary.c +++ b/erts/emulator/beam/binary.c @@ -355,10 +355,10 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1) BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg) { Eterm bin; - Uint size; + ErlDrvSizeT size; byte* bytes; #ifdef DEBUG - int offset; + ErlDrvSizeT offset; #endif if (is_nil(arg)) { @@ -377,7 +377,7 @@ BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg) #ifdef DEBUG offset = #endif - io_list_to_buf(arg, (char*) bytes, size); + erts_iolist_to_buf(arg, (char*) bytes, size); ASSERT(offset == 0); BIF_RET(bin); diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index c1e9fe536d..9aa1e5f30d 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -61,16 +61,19 @@ extern char* erts_system_version[]; static void port_info(int to, void *to_arg) { - int i; - for (i = 0; i < erts_max_ports; i++) - print_port_info(to, to_arg, i); + int i, max = erts_ptab_max(&erts_port); + for (i = 0; i < max; i++) { + Port *p = erts_pix2port(i); + if (p) + print_port_info(p, to, to_arg); + } } void process_info(int to, void *to_arg) { - int i; - for (i = 0; i < erts_max_processes; i++) { + int i, max = erts_ptab_max(&erts_proc); + for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { if (!ERTS_PROC_IS_EXITING(p)) @@ -84,12 +87,12 @@ process_info(int to, void *to_arg) static void process_killer(void) { - int i, j; + int i, j, max = erts_ptab_max(&erts_proc); Process* rp; erts_printf("\n\nProcess Information\n\n"); erts_printf("--------------------------------------------------\n"); - for (i = erts_max_processes-1; i >= 0; i--) { + for (i = max-1; i >= 0; i--) { rp = erts_pix2proc(i); if (rp && rp->i != ENULL) { int br; @@ -196,7 +199,7 @@ print_process_info(int to, void *to_arg, Process *p) erts_aint32_t state; /* display the PID */ - erts_print(to, to_arg, "=proc:%T\n", p->id); + erts_print(to, to_arg, "=proc:%T\n", p->common.id); /* Display the state */ erts_print(to, to_arg, "State: "); @@ -226,8 +229,8 @@ print_process_info(int to, void *to_arg, Process *p) * If the process is registered as a global process, display the * registered name */ - if (p->reg != NULL) - erts_print(to, to_arg, "Name: %T\n", p->reg->name); + if (p->common.u.alive.reg) + erts_print(to, to_arg, "Name: %T\n", p->common.u.alive.reg->name); /* * Display the initial function name @@ -301,11 +304,11 @@ print_process_info(int to, void *to_arg, Process *p) } /* display the links only if there are any*/ - if (p->nlinks != NULL || p->monitors != NULL) { + if (ERTS_P_LINKS(p) || ERTS_P_MONITORS(p)) { PrintMonitorContext context = {1,to}; erts_print(to, to_arg,"Link list: ["); - erts_doforall_links(p->nlinks, &doit_print_link, &context); - erts_doforall_monitors(p->monitors, &doit_print_monitor, &context); + erts_doforall_links(ERTS_P_LINKS(p), &doit_print_link, &context); + erts_doforall_monitors(ERTS_P_MONITORS(p), &doit_print_monitor, &context); erts_print(to, to_arg,"]\n"); } @@ -625,9 +628,9 @@ bin_check(void) { Process *rp; struct erl_off_heap_header* hdr; - int i, printed = 0; + int i, printed = 0, max = erts_ptab_max(&erts_proc); - for (i=0; i < erts_max_processes; i++) { + for (i=0; i < max; i++) { rp = erts_pix2proc(i); if (!rp) continue; @@ -635,7 +638,7 @@ bin_check(void) if (hdr->thing_word == HEADER_PROC_BIN) { ProcBin *bp = (ProcBin*) hdr; if (!printed) { - erts_printf("Process %T holding binary data \n", rp->id); + erts_printf("Process %T holding binary data \n", rp->common.id); printed = 1; } erts_printf("%p orig_size: %bpd, norefs = %bpd\n", @@ -766,7 +769,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_print_nif_taints(fd, NULL); erts_fdprintf(fd, "Atoms: %d\n", atom_table_size()); info(fd, NULL); /* General system info */ - if (erts_proc.tab) + if (erts_ptab_initialized(&erts_proc)) process_info(fd, NULL); /* Info about each process and port */ db_info(fd, NULL, 0); erts_print_bif_timer_info(fd, NULL); diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index 36eda04de2..23c0fca6aa 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -47,7 +47,7 @@ copy_object(Eterm obj, Process* to) if (DTRACE_ENABLED(copy_object)) { DTRACE_CHARBUF(proc_name, 64); - erts_snprintf(proc_name, sizeof(proc_name), "%T", to->id); + erts_snprintf(proc_name, sizeof(proc_name), "%T", to->common.id); DTRACE2(copy_object, proc_name, size); } #endif diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 8c3bcd1de4..145e6861f6 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -67,7 +67,7 @@ dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz) { byte *extp = edep->extp; Eterm msg; - Sint size = erts_decode_dist_ext_size(edep, 0); + Sint size = erts_decode_dist_ext_size(edep); if (size < 0) { erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(%s) failed:\n", @@ -124,6 +124,13 @@ static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm); static void init_nodes_monitors(void); static erts_smp_atomic_t no_caches; +static erts_smp_atomic_t no_nodes; + +struct { + Eterm reason; + ErlHeapFragment *bp; +} nodedown; + static void delete_cache(ErtsAtomCache *cache) @@ -144,7 +151,7 @@ create_cache(DistEntry *dep) ERTS_SMP_LC_ASSERT( is_internal_port(dep->cid) - && erts_lc_is_port_locked(&erts_port[internal_port_index(dep->cid)])); + && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); ASSERT(!dep->cache); dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE, @@ -171,11 +178,10 @@ get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs) return NULL; } else { - ErtsProcList *plp; - plp = dep->suspended.first; - dep->suspended.first = NULL; - dep->suspended.last = NULL; - return plp; + ErtsProcList *suspended = dep->suspended; + dep->suspended = NULL; + erts_proclist_fetch(&suspended, NULL); + return suspended; } } @@ -252,7 +258,7 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp) if (mon->type == MON_ORIGIN) { /* local pid is beeing monitored */ - rmon = erts_remove_monitor(&(rp->monitors),mon->ref); + rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); /* ASSERT(rmon != NULL); nope, can happen during process exit */ if (rmon != NULL) { erts_destroy_monitor(rmon); @@ -262,7 +268,7 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp) Eterm watched; UseTmpHeapNoproc(3); ASSERT(mon->type == MON_TARGET); - rmon = erts_remove_monitor(&(rp->monitors),mon->ref); + rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); /* ASSERT(rmon != NULL); can happen during process exit */ if (rmon != NULL) { ASSERT(is_atom(rmon->name) || is_nil(rmon->name)); @@ -311,7 +317,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp) goto done; } - rlnk = erts_remove_link(&(rp->nlinks), sublnk->pid); + rlnk = erts_remove_link(&ERTS_P_LINKS(rp), sublnk->pid); xres = erts_send_exit_signal(NULL, sublnk->pid, rp, @@ -370,7 +376,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) if (!rp) { goto done; } - rlnk = erts_remove_link(&(rp->nlinks), name); + rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name); if (rlnk != NULL) { ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE)); erts_destroy_link(rlnk); @@ -394,6 +400,47 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) erts_destroy_link(lnk); } +static void +set_node_not_alive(void *unused) +{ + ErlHeapFragment *bp; + Eterm nodename = erts_this_dist_entry->sysname; + + ASSERT(erts_smp_atomic_read_nob(&no_nodes) == 0); + + erts_smp_thr_progress_block(); + erts_set_this_node(am_Noname, 0); + erts_is_alive = 0; + send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nodedown.reason); + nodedown.reason = NIL; + bp = nodedown.bp; + nodedown.bp = NULL; + erts_smp_thr_progress_unblock(); + if (bp) + free_message_buffer(bp); +} + +static ERTS_INLINE void +dec_no_nodes(void) +{ + erts_aint_t no = erts_smp_atomic_dec_read_mb(&no_nodes); + ASSERT(no >= 0); + ASSERT(erts_get_scheduler_id()); /* Need to be a scheduler */ + if (no == 0) + erts_schedule_misc_aux_work(erts_get_scheduler_id(), + set_node_not_alive, + NULL); +} + +static ERTS_INLINE void +inc_no_nodes(void) +{ +#ifdef DEBUG + erts_aint_t no = erts_smp_atomic_read_nob(&no_nodes); + ASSERT(erts_is_alive ? no > 0 : no == 0); +#endif + erts_smp_atomic_inc_mb(&no_nodes); +} /* * proc is currently running or exiting process. @@ -403,47 +450,76 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) Eterm nodename; if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */ + DistEntry *tdep; + int no_dist_port = 0; Eterm nd_reason = (reason == am_no_network ? am_no_network : am_net_kernel_terminated); erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) + no_dist_port++; + for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) + no_dist_port++; + /* KILL all port controllers */ - while(erts_visible_dist_entries || erts_hidden_dist_entries) { - DistEntry *tdep; - Eterm prt_id; - Port *prt; - if(erts_hidden_dist_entries) - tdep = erts_hidden_dist_entries; - else - tdep = erts_visible_dist_entries; - prt_id = tdep->cid; - ASSERT(is_internal_port(prt_id)); + if (no_dist_port == 0) erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + else { + Eterm def_buf[128]; + int i = 0; + Eterm *dist_port; - prt = erts_id2port(prt_id, NULL, 0); - if (prt) { - ASSERT(prt->status & ERTS_PORT_SFLG_DISTRIBUTION); - ASSERT(prt->dist_entry); - /* will call do_net_exists !!! */ - erts_do_exit_port(prt, prt_id, nd_reason); - erts_port_release(prt); + if (no_dist_port <= sizeof(def_buf)/sizeof(def_buf[0])) + dist_port = &def_buf[0]; + else + dist_port = erts_alloc(ERTS_ALC_T_TMP, + sizeof(Eterm)*no_dist_port); + for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) { + ASSERT(is_internal_port(tdep->cid)); + dist_port[i++] = tdep->cid; + } + for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) { + ASSERT(is_internal_port(tdep->cid)); + dist_port[i++] = tdep->cid; } + erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); - } + for (i = 0; i < no_dist_port; i++) { + Port *prt = erts_port_lookup(dist_port[i], + ERTS_PORT_SFLGS_INVALID_LOOKUP); + if (!prt) + continue; + ASSERT(erts_atomic32_read_nob(&prt->state) + & ERTS_PORT_SFLG_DISTRIBUTION); - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED, + prt, dist_port[i], nd_reason, NULL); + } - nodename = erts_this_dist_entry->sysname; - erts_smp_thr_progress_block(); - erts_set_this_node(am_Noname, 0); - erts_is_alive = 0; - send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nd_reason); - erts_smp_thr_progress_unblock(); + if (dist_port != &def_buf[0]) + erts_free(ERTS_ALC_T_TMP, dist_port); + } + /* + * When last dist port exits, node will be taken + * from alive to not alive. + */ + ASSERT(is_nil(nodedown.reason) && !nodedown.bp); + if (is_immed(nd_reason)) + nodedown.reason = nd_reason; + else { + Eterm *hp; + Uint sz = size_object(nd_reason); + nodedown.bp = new_message_buffer(sz); + hp = nodedown.bp->mem; + nodedown.reason = copy_struct(nd_reason, + sz, + &hp, + &nodedown.bp->off_heap); + } } - else { /* recursive call via erts_do_exit_port() will end up here */ + else { /* Call from distribution port */ NetExitsContext nec = {dep}; ErtsLink *nlinks; ErtsLink *node_links; @@ -454,10 +530,10 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) erts_smp_de_rwlock(dep); ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid) - && erts_lc_is_port_locked(&erts_port[internal_port_index(dep->cid)])); + && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); if (erts_port_task_is_scheduled(&dep->dist_cmd)) - erts_port_task_abort(dep->cid, &dep->dist_cmd); + erts_port_task_abort(&dep->dist_cmd); if (dep->status & ERTS_DE_SFLG_EXITING) { #ifdef DEBUG @@ -503,6 +579,9 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) clear_dist_entry(dep); } + + dec_no_nodes(); + return 1; } @@ -516,6 +595,10 @@ void init_dist(void) { init_nodes_monitors(); + nodedown.reason = NIL; + nodedown.bp = NULL; + + erts_smp_atomic_init_nob(&no_nodes, 0); erts_smp_atomic_init_nob(&no_caches, 0); /* Lookup/Install all references to trap functions */ @@ -769,7 +852,7 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message) *node_name = *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) { erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname); - erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id); + erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id); erts_snprintf(receiver_name, sizeof(receiver_name), "%T", remote); msize = size_object(message); if (token != NIL && token != am_have_dt_utag) { @@ -826,7 +909,7 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message) *node_name = *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) { erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname); - erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id); + erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id); erts_snprintf(receiver_name, sizeof(receiver_name), "{%T,%s}", remote_name, node_name); msize = size_object(message); @@ -840,10 +923,10 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message) if (token != NIL) ctl = TUPLE5(&ctl_heap[0], make_small(DOP_REG_SEND_TT), - sender->id, am_Cookie, remote_name, token); + sender->common.id, am_Cookie, remote_name, token); else ctl = TUPLE4(&ctl_heap[0], make_small(DOP_REG_SEND), - sender->id, am_Cookie, remote_name); + sender->common.id, am_Cookie, remote_name); DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); DTRACE7(message_send_remote, sender_name, node_name, receiver_name, @@ -889,7 +972,7 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote, *node_name = *sender_name = *remote_name = '\0'; if (DTRACE_ENABLED(process_exit_signal_remote)) { erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname); - erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id); + erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id); erts_snprintf(remote_name, sizeof(remote_name), "{%T,%s}", remote, node_name); erts_snprintf(reason_str, sizeof(reason), "%T", reason); @@ -1141,7 +1224,7 @@ int erts_net_message(Port *prt, } erts_smp_de_links_lock(dep); - res = erts_add_link(&(rp->nlinks), LINK_PID, from); + res = erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, from); if (res < 0) { /* It was already there! Lets skip the rest... */ @@ -1149,7 +1232,7 @@ int erts_net_message(Port *prt, erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); break; } - lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->id); + lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->common.id); erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from); erts_smp_de_links_unlock(dep); @@ -1176,7 +1259,7 @@ int erts_net_message(Port *prt, if (!rp) break; - lnk = erts_remove_link(&(rp->nlinks), from); + lnk = erts_remove_link(&ERTS_P_LINKS(rp), from); if (IS_TRACED_FL(rp, F_TRACE_PROCS) && lnk != NULL) { trace_proc(NULL, rp, am_getting_unlinked, from); @@ -1233,10 +1316,10 @@ int erts_net_message(Port *prt, } else { if (is_atom(watched)) - watched = rp->id; + watched = rp->common.id; erts_smp_de_links_lock(dep); erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name); - erts_add_monitor(&(rp->monitors), MON_TARGET, ref, watcher, name); + erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, watcher, name); erts_smp_de_links_unlock(dep); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } @@ -1275,7 +1358,7 @@ int erts_net_message(Port *prt, if (!rp) { break; } - mon = erts_remove_monitor(&(rp->monitors),ref); + mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); ASSERT(mon != NULL); if (mon == NULL) { @@ -1432,7 +1515,7 @@ int erts_net_message(Port *prt, erts_destroy_monitor(mon); - mon = erts_remove_monitor(&(rp->monitors),ref); + mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (mon == NULL) { erts_smp_proc_unlock(rp, rp_locks); @@ -1483,7 +1566,7 @@ int erts_net_message(Port *prt, if (!rp) lnk = NULL; else { - lnk = erts_remove_link(&(rp->nlinks), from); + lnk = erts_remove_link(&ERTS_P_LINKS(rp), from); /* If lnk == NULL, we have unlinked on this side, i.e. * ignore exit. @@ -1597,7 +1680,7 @@ int erts_net_message(Port *prt, erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl); } UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - erts_do_exit_port(prt, dep->cid, am_killed); + erts_deliver_port_exit(prt, dep->cid, am_killed, 0); ERTS_SMP_CHK_NO_PROC_LOCKS; return -1; } @@ -1693,7 +1776,6 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy) erts_smp_mtx_unlock(&dep->qlock); plp = erts_proclist_create(c_p); - plp->next = NULL; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); suspended = 1; erts_smp_mtx_lock(&dep->qlock); @@ -1726,11 +1808,7 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy) else { /* Enqueue suspended process on dist entry */ ASSERT(plp); - if (dep->suspended.last) - dep->suspended.last->next = plp; - else - dep->suspended.first = plp; - dep->suspended.last = plp; + erts_proclist_store_last(&dep->suspended, plp); } } @@ -1779,7 +1857,7 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy) erts_snprintf(port_str, sizeof(port_str), "%T", cid); erts_snprintf(remote_str, sizeof(remote_str), "%T", dep->sysname); - erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->id); + erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->common.id); DTRACE4(dist_port_busy, erts_this_node_sysname, port_str, remote_str, pid_str); } @@ -1812,7 +1890,7 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf) DTRACE_CHARBUF(port_str, 64); DTRACE_CHARBUF(remote_str, 64); - erts_snprintf(port_str, sizeof(port_str), "%T", prt->id); + erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id); erts_snprintf(remote_str, sizeof(remote_str), "%T", prt->dist_entry->sysname); DTRACE4(dist_output, erts_this_node_sysname, port_str, @@ -1866,7 +1944,7 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf) DTRACE_CHARBUF(port_str, 64); DTRACE_CHARBUF(remote_str, 64); - erts_snprintf(port_str, sizeof(port_str), "%T", prt->id); + erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id); erts_snprintf(remote_str, sizeof(remote_str), "%T", prt->dist_entry->sysname); DTRACE4(dist_outputv, erts_this_node_sysname, port_str, @@ -1903,13 +1981,13 @@ int erts_dist_command(Port *prt, int reds_limit) { Sint reds = ERTS_PORT_REDS_DIST_CMD_START; - int prt_busy; Uint32 status; Uint32 flags; Sint obufsize = 0; ErtsDistOutputQueue oq, foq; DistEntry *dep = prt->dist_entry; Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf); + erts_aint32_t sched_flags; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); @@ -1925,7 +2003,7 @@ erts_dist_command(Port *prt, int reds_limit) erts_smp_de_runlock(dep); if (status & ERTS_DE_SFLG_EXITING) { - erts_do_exit_port(prt, prt->id, am_killed); + erts_deliver_port_exit(prt, prt->common.id, am_killed, 0); erts_deref_dist_entry(dep); return reds + ERTS_PORT_REDS_DIST_CMD_EXIT; } @@ -1952,12 +2030,12 @@ erts_dist_command(Port *prt, int reds_limit) dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; + sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + if (reds > reds_limit) goto preempted; - prt_busy = (int) (prt->status & ERTS_PORT_SFLG_PORT_BUSY); - - if (!prt_busy && foq.first) { + if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && foq.first) { int preempt = 0; do { Uint size; @@ -1974,11 +2052,10 @@ erts_dist_command(Port *prt, int reds_limit) obufsize += size_obuf(fob); foq.first = foq.first->next; free_dist_obuf(fob); - preempt = reds > reds_limit || (prt->status & ERTS_PORT_SFLGS_DEAD); - if (prt->status & ERTS_PORT_SFLG_PORT_BUSY) { - prt_busy = 1; + sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); + if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) break; - } } while (foq.first && !preempt); if (!foq.first) foq.last = NULL; @@ -1986,7 +2063,7 @@ erts_dist_command(Port *prt, int reds_limit) goto preempted; } - if (prt_busy) { + if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) { if (oq.first) { ErtsDistOutputBuf *ob; int preempt; @@ -2060,12 +2137,10 @@ erts_dist_command(Port *prt, int reds_limit) obufsize += size_obuf(fob); oq.first = oq.first->next; free_dist_obuf(fob); - preempt = reds > reds_limit || (prt->status & ERTS_PORT_SFLGS_DEAD); - if (prt->status & ERTS_PORT_SFLG_PORT_BUSY) { - prt_busy = 1; - if (oq.first && !preempt) - goto finalize_only; - } + sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); + if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt) + goto finalize_only; } ASSERT(!oq.first || preempt); @@ -2093,7 +2168,7 @@ erts_dist_command(Port *prt, int reds_limit) ASSERT(dep->qsize >= obufsize); dep->qsize -= obufsize; obufsize = 0; - if (!prt_busy + if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && (dep->qflgs & ERTS_DE_QFLG_BUSY) && dep->qsize < erts_dist_buf_busy_limit) { ErtsProcList *suspendees; @@ -2139,11 +2214,15 @@ erts_dist_command(Port *prt, int reds_limit) return reds; preempted: + /* + * Here we assume that state has been read + * since last call to driver. + */ ASSERT(oq.first || !oq.last); ASSERT(!oq.first || oq.last); - if (prt->status & ERTS_PORT_SFLGS_DEAD) { + if (sched_flags & ERTS_PTS_FLG_EXIT) { /* * Port died during port command; clean up 'oq' * and 'foq'. Things buffered in dist entry after @@ -2201,7 +2280,7 @@ erts_dist_port_not_busy(Port *prt) DTRACE_CHARBUF(port_str, 64); DTRACE_CHARBUF(remote_str, 64); - erts_snprintf(port_str, sizeof(port_str), "%T", prt->id); + erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id); erts_snprintf(remote_str, sizeof(remote_str), "%T", prt->dist_entry->sysname); DTRACE3(dist_port_not_busy, erts_this_node_sysname, @@ -2243,7 +2322,7 @@ static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp) Process *rp; ErtsMonitor *rmon; rp = erts_proc_lookup(mon->pid); - if (!rp || (rmon = erts_lookup_monitor(rp->monitors, mon->ref)) == NULL) { + if (!rp || (rmon = erts_lookup_monitor(ERTS_P_MONITORS(rp), mon->ref)) == NULL) { erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid); } else if (mon->type == MON_ORIGIN) { /* Local pid is being monitored */ @@ -2488,6 +2567,7 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2) erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_smp_thr_progress_block(); + inc_no_nodes(); erts_set_this_node(BIF_ARG_1, (Uint32) creation); erts_is_alive = 1; send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL); @@ -2556,9 +2636,9 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) /* DFLAG_EXTENDED_REFERENCES is compulsory from R9 and forward */ if (!(DFLAG_EXTENDED_REFERENCES & flags)) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "%T", BIF_P->id); - if (BIF_P->reg) - erts_dsprintf(dsbufp, " (%T)", BIF_P->reg->name); + erts_dsprintf(dsbufp, "%T", BIF_P->common.id); + if (BIF_P->common.u.alive.reg) + erts_dsprintf(dsbufp, " (%T)", BIF_P->common.u.alive.reg->name); erts_dsprintf(dsbufp, " attempted to enable connection to node %T " "which is not able to handle extended references.\n", @@ -2578,10 +2658,14 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) else if (!dep) goto system_limit; /* Should never happen!!! */ - pp = erts_id2port(BIF_ARG_2, BIF_P, ERTS_PROC_LOCK_MAIN); + pp = erts_id2port_sflgs(BIF_ARG_2, + BIF_P, + ERTS_PROC_LOCK_MAIN, + ERTS_PORT_SFLGS_INVALID_LOOKUP); erts_smp_de_rwlock(dep); - if (!pp || (pp->status & ERTS_PORT_SFLG_EXITING)) + if (!pp || (erts_atomic32_read_nob(&pp->state) + & ERTS_PORT_SFLG_EXITING)) goto badarg; if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0) @@ -2596,11 +2680,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) plp->next = NULL; erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); erts_smp_mtx_lock(&dep->qlock); - if (dep->suspended.last) - dep->suspended.last->next = plp; - else - dep->suspended.first = plp; - dep->suspended.last = plp; + erts_proclist_store_last(&dep->suspended, plp); erts_smp_mtx_unlock(&dep->qlock); goto yield; } @@ -2610,7 +2690,16 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) if (pp->dist_entry || is_not_nil(dep->cid)) goto badarg; - erts_port_status_bor_set(pp, ERTS_PORT_SFLG_DISTRIBUTION); + erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION); + + /* + * Dist-ports do not use the "busy port message queue" functionality, but + * instead use "busy dist entry" functionality. + */ + { + ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED; + erl_drv_busy_msgq_limits((ErlDrvPort) pp, &disable, NULL); + } pp->dist_entry = dep; @@ -2642,6 +2731,8 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) erts_smp_de_rwunlock(dep); dep = NULL; /* inc of refc transferred to port (dist_entry field) */ + inc_no_nodes(); + send_nodes_mon_msgs(BIF_P, am_nodeup, BIF_ARG_1, @@ -2655,7 +2746,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) } if (pp) - erts_smp_port_unlock(pp); + erts_port_release(pp); return ret; @@ -2699,7 +2790,7 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) if (is_internal_pid(local)) { Process *lp; ErtsProcLocks lp_locks; - if (BIF_P->id == local) { + if (BIF_P->common.id == local) { lp_locks = ERTS_PROC_LOCKS_ALL; lp = BIF_P; erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); @@ -2727,11 +2818,17 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) #endif erts_smp_proc_unlock(lp, lp_locks); if (lp == BIF_P) { + erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state); /* * We may have exited current process and may have to take action. */ - ERTS_BIF_CHK_EXITED(BIF_P); - ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN); + if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { +#ifdef ERTS_SMP + if (state & ERTS_PSFLG_PENDING_EXIT) + erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); +#endif + ERTS_BIF_EXITED(BIF_P); + } } } else if (is_external_pid(local) @@ -2929,23 +3026,23 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) if (Bool == am_true) { ASSERT(dep->cid != NIL); lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE, - p->id); + p->common.id); ++ERTS_LINK_REFC(lnk); - lnk = erts_add_or_lookup_link(&(p->nlinks), LINK_NODE, Node); + lnk = erts_add_or_lookup_link(&ERTS_P_LINKS(p), LINK_NODE, Node); ++ERTS_LINK_REFC(lnk); } else { - lnk = erts_lookup_link(dep->node_links, p->id); + lnk = erts_lookup_link(dep->node_links, p->common.id); if (lnk != NULL) { if ((--ERTS_LINK_REFC(lnk)) == 0) { erts_destroy_link(erts_remove_link(&(dep->node_links), - p->id)); + p->common.id)); } } - lnk = erts_lookup_link(p->nlinks, Node); + lnk = erts_lookup_link(ERTS_P_LINKS(p), Node); if (lnk != NULL) { if ((--ERTS_LINK_REFC(lnk)) == 0) { - erts_destroy_link(erts_remove_link(&(p->nlinks), + erts_destroy_link(erts_remove_link(&ERTS_P_LINKS(p), Node)); } } @@ -3507,7 +3604,7 @@ erts_processes_monitoring_nodes(Process *c_p) olist = erts_bld_cons(hpp, szp, am_nodedown_reason, olist); res = erts_bld_cons(hpp, szp, erts_bld_tuple(hpp, szp, 2, - nmp->proc->id, + nmp->proc->common.id, olist), res); } diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index 129f637dba..310d09768d 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -188,11 +188,12 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) if (prt) { ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - ASSERT((erts_port_status_get(prt) & ERTS_PORT_SFLGS_DEAD) == 0); + ASSERT((erts_atomic32_read_nob(&prt->state) + & ERTS_PORT_SFLGS_DEAD) == 0); ASSERT(prt->dist_entry); dep = prt->dist_entry; - id = prt->id; + id = prt->common.id; } else { ASSERT(dist_entry); @@ -204,13 +205,8 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) id = dep->cid; } - if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) { - (void) erts_port_task_schedule(id, - &dep->dist_cmd, - ERTS_PORT_TASK_DIST_CMD, - (ErlDrvEvent) -1, - NULL); - } + if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) + erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD); } #endif diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c index 570cc59be2..306b32691c 100644 --- a/erts/emulator/beam/erl_afit_alloc.c +++ b/erts/emulator/beam/erl_afit_alloc.c @@ -37,6 +37,12 @@ #define GET_ERL_AF_ALLOC_IMPL #include "erl_afit_alloc.h" +struct AFFreeBlock_t_ { + Block_t block_head; + AFFreeBlock_t *prev; + AFFreeBlock_t *next; +}; +#define AF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->block_head) #define MIN_MBC_SZ (16*1024) #define MIN_MBC_FIRST_FREE_SZ (4*1024) @@ -80,7 +86,6 @@ erts_afalc_start(AFAllctr_t *afallctr, init->sbmbct = 0; /* Small mbc not supported by afit */ - allctr->mbc_header_size = sizeof(Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; allctr->min_block_size = sizeof(AFFreeBlock_t); @@ -118,7 +123,7 @@ get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size, ASSERT(!cand_blk || cand_size >= size); - if (afallctr->free_list && BLK_SZ(afallctr->free_list) >= size) { + if (afallctr->free_list && AF_BLK_SZ(afallctr->free_list) >= size) { AFFreeBlock_t *res = afallctr->free_list; afallctr->free_list = res->next; if (res->next) @@ -135,7 +140,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) AFFreeBlock_t *blk = (AFFreeBlock_t *) block; AFAllctr_t *afallctr = (AFAllctr_t *) allctr; - if (afallctr->free_list && BLK_SZ(afallctr->free_list) > BLK_SZ(blk)) { + if (afallctr->free_list && AF_BLK_SZ(afallctr->free_list) > AF_BLK_SZ(blk)) { blk->next = afallctr->free_list->next; blk->prev = afallctr->free_list; afallctr->free_list->next = blk; diff --git a/erts/emulator/beam/erl_afit_alloc.h b/erts/emulator/beam/erl_afit_alloc.h index ea408a7194..87caccac20 100644 --- a/erts/emulator/beam/erl_afit_alloc.h +++ b/erts/emulator/beam/erl_afit_alloc.h @@ -49,11 +49,6 @@ Allctr_t *erts_afalc_start(AFAllctr_t *, AFAllctrInit_t *, AllctrInit_t *); #include "erl_alloc_util.h" typedef struct AFFreeBlock_t_ AFFreeBlock_t; -struct AFFreeBlock_t_ { - Block_t block_head; - AFFreeBlock_t *prev; - AFFreeBlock_t *next; -}; struct AFAllctr_t_ { Allctr_t allctr; /* Has to be first! */ diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 061f229f59..007cdbdfa6 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -310,9 +310,9 @@ set_default_ll_alloc_opts(struct au_init *ip) ip->init.util.name_prefix = "ll_"; ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED; #ifndef SMALL_MEMORY - ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */ + ip->init.util.mmbcs = 2*1024*1024 - 40; /* Main carrier size */ #else - ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */ + ip->init.util.mmbcs = 1*1024*1024 - 40; /* Main carrier size */ #endif ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED; ip->init.util.asbcst = 0; @@ -1173,6 +1173,11 @@ handle_au_arg(struct au_init *auip, break; case 'e': auip->enable = get_bool_value(sub_param+1, argv, ip); +#if !HAVE_ERTS_SBMBC + if (auip->init.util.alloc_no == ERTS_ALC_A_SBMBC) { + auip->enable = 0; + } +#endif break; case 'l': if (has_prefix("lmbcs", sub_param)) { @@ -1233,10 +1238,16 @@ handle_au_arg(struct au_init *auip, auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip); } else if (has_prefix("sbmbcs", sub_param)) { - auip->init.util.sbmbcs = get_byte_value(sub_param + 6, argv, ip); +#if HAVE_ERTS_SBMBC + auip->init.util.sbmbcs = +#endif + get_byte_value(sub_param + 6, argv, ip); } else if (has_prefix("sbmbct", sub_param)) { - auip->init.util.sbmbct = get_byte_value(sub_param + 6, argv, ip); +#if HAVE_ERTS_SBMBC + auip->init.util.sbmbct = +#endif + get_byte_value(sub_param + 6, argv, ip); } else if (has_prefix("smbcs", sub_param)) { auip->default_.smbcs = 0; @@ -1403,6 +1414,9 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) else if (strcmp("max", arg) == 0) { for (a = 0; a < aui_sz; a++) aui[a]->enable = 1; +#if !HAVE_ERTS_SBMBC + init->sbmbc_alloc.enable = 0; +#endif } else if (strcmp("config", arg) == 0) { init->erts_alloc_config = 1; @@ -2128,6 +2142,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg) if (want_tot_or_sys || want.processes || want.processes_used) { + int max_processes = erts_ptab_max(&erts_proc); UWord tmp; if (ERTS_MEM_NEED_ALL_ALCU) @@ -2137,7 +2152,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg) fi, ERTS_ALC_NO_FIXED_SIZES); tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0); } - tmp += erts_max_processes*sizeof(Process*); + tmp += max_processes*sizeof(erts_smp_atomic_t); tmp += erts_bif_timer_memory_size(); tmp += erts_tot_link_lh_size(); @@ -2268,6 +2283,8 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc) Eterm res = THE_NON_VALUE; int i, length; Uint reserved_atom_space, atom_space; + int max_processes = erts_ptab_max(&erts_proc); + int max_ports = erts_ptab_max(&erts_port); if (proc) { ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN @@ -2299,7 +2316,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc) values[i].arity = 2; values[i].name = "static"; values[i].ui[0] = - erts_max_ports*sizeof(Port) /* Port table */ + max_ports*sizeof(erts_smp_atomic_t) /* Port table */ + erts_timer_wheel_memory_size(); /* Timer wheel */ i++; @@ -2378,7 +2395,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc) values[i].arity = 2; values[i].name = "process_table"; - values[i].ui[0] = erts_max_processes*sizeof(Process*); + values[i].ui[0] = max_processes*sizeof(Process*); i++; values[i].arity = 2; diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index e475f9d8a2..ba5ec9c367 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -267,6 +267,8 @@ typedef void (*erts_alloc_verify_func_t)(Allctr_t *); erts_alloc_verify_func_t erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr); +#define ERTS_ALC_DATA_ALIGN_SIZE(SZ) \ + (((((SZ) - 1) / 8) + 1) * 8) #define ERTS_ALC_CACHE_LINE_ALIGN_SIZE(SZ) \ (((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE) diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 2b649b589b..3d437652ce 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -148,6 +148,7 @@ class SYSTEM system_data type SBMBC SBMBC SYSTEM small_block_mbc type PROC FIXED_SIZE PROCESSES proc +type PORT DRIVER SYSTEM port type ATOM LONG_LIVED ATOM atom_entry type MODULE LONG_LIVED CODE module_entry type REG_PROC STANDARD PROCESSES reg_proc @@ -191,7 +192,10 @@ type PORT_TABLE LONG_LIVED SYSTEM port_tab type TIMER_WHEEL LONG_LIVED SYSTEM timer_wheel type DRV DRIVER SYSTEM drv_internal type DRV_BINARY BINARY BINARIES drv_binary -type DRIVER STANDARD SYSTEM driver +type DRIVER DRIVER SYSTEM driver +type DRV_CMD_DATA DRIVER SYSTEM driver_command_data +type DRV_CTRL_DATA DRIVER SYSTEM driver_control_data +type DRV_CALL_DATA DRIVER SYSTEM driver_call_data type NIF DRIVER SYSTEM nif_internal type BINARY BINARY BINARIES binary type NBIF_TABLE SYSTEM SYSTEM nbif_tab @@ -199,14 +203,12 @@ type ARG_REG STANDARD PROCESSES arg_reg type PROC_DICT STANDARD PROCESSES proc_dict type CALLS_BUF STANDARD PROCESSES calls_buf type BPD STANDARD SYSTEM bpd -type PORT_NAME STANDARD SYSTEM port_name type LINEBUF STANDARD SYSTEM line_buf type IOQ STANDARD SYSTEM io_queue type BITS_BUF STANDARD SYSTEM bits_buf type TMP_DIST_BUF TEMPORARY SYSTEM tmp_dist_buf type ASYNC_DATA LONG_LIVED SYSTEM internal_async_data type ESTACK TEMPORARY SYSTEM estack -type PORT_CALL_BUF TEMPORARY SYSTEM port_call_buf type DB_TABLE ETS ETS db_tab type DB_FIXATION SHORT_LIVED ETS db_fixation type DB_FIX_DEL SHORT_LIVED ETS fixed_del @@ -236,14 +238,14 @@ type DDLL_HANDLE STANDARD SYSTEM ddll_handle type DDLL_ERRCODES LONG_LIVED SYSTEM ddll_errcodes type DDLL_TMP_BUF TEMPORARY SYSTEM ddll_tmp_buf type PORT_TASK SHORT_LIVED SYSTEM port_task -type PORT_TASKQ SHORT_LIVED SYSTEM port_task_queue +type PT_HNDL_LIST SHORT_LIVED SYSTEM port_task_handle_list type MISC_OP_LIST SHORT_LIVED SYSTEM misc_op_list type PORT_NAMES SHORT_LIVED SYSTEM port_names -type PORT_DATA_LOCK STANDARD SYSTEM port_data_lock +type PORT_DATA_LOCK DRIVER SYSTEM port_data_lock type NODES_MON STANDARD PROCESSES nodes_monitor -type PROCS_TPROC_EL SHORT_LIVED PROCESSES processes_term_proc_el -type PROCS_CNKINF SHORT_LIVED PROCESSES processes_chunk_info -type PROCS_PIDS SHORT_LIVED PROCESSES processes_pids +type PTAB_LIST_DEL SHORT_LIVED PROCESSES ptab_list_deleted_el +type PTAB_LIST_CNKI SHORT_LIVED PROCESSES ptab_list_chunk_info +type PTAB_LIST_PIDS SHORT_LIVED PROCESSES ptab_list_pids type RE_TMP_BUF TEMPORARY SYSTEM re_tmp_buf type RE_SUBJECT SHORT_LIVED SYSTEM re_subject type RE_HEAP STANDARD SYSTEM re_heap @@ -268,6 +270,8 @@ type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q type PROC_INTERVAL LONG_LIVED SYSTEM process_interval +type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table +type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller +if threads_no_smp # Need thread safe allocs, but std_alloc and fix_alloc are not; diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index f8a8c00715..6de0099636 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -78,10 +78,12 @@ int erts_have_sbmbc_alloc; #if HAVE_ERTS_MSEG -#define INV_MSEG_UNIT_MASK ((UWord) (mseg_unit_size - 1)) -#define MSEG_UNIT_MASK (~INV_MSEG_UNIT_MASK) +#define MSEG_UNIT_SHIFT MSEG_ALIGN_BITS +#define MSEG_UNIT_SZ (1 << MSEG_UNIT_SHIFT) +#define MSEG_UNIT_MASK ((~(UWord)0) << MSEG_UNIT_SHIFT) + #define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK) -#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + INV_MSEG_UNIT_MASK) +#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + ~MSEG_UNIT_MASK) #endif @@ -104,7 +106,6 @@ int erts_have_sbmbc_alloc; static Uint sys_alloc_carrier_size; #if HAVE_ERTS_MSEG static Uint max_mseg_carriers; -static Uint mseg_unit_size; #endif #define ONE_GIGA (1000000000) @@ -117,16 +118,47 @@ static Uint mseg_unit_size; ? ((CC).giga_no--, (CC).no = ONE_GIGA - 1) \ : (CC).no--) -/* ... */ +/* Multi block carrier (MBC) memory layout in R16: + +Empty MBC: +[Carrier_t|pad|Block_t L0T|fhdr| free... ] + +MBC after allocating first block: +[Carrier_t|pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ] + +MBC after allocating second block: +[Carrier_t|pad|Block_t 000| udata |pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ] + +MBC after deallocating first block: +[Carrier_t|pad|Block_t 00T|fhdr| free |FreeBlkFtr_t|Block_t 0P0| udata |pad|Block_t L0T|fhdr| free... ] + + + udata = Allocated user data + pad = Padding to ensure correct alignment for user data + fhdr = Allocator specific header to keep track of free block + free = Unused free memory + T = This block is free (THIS_FREE_BLK_HDR_FLG) + P = Previous block is free (PREV_FREE_BLK_HDR_FLG) + L = Last block in carrier (LAST_BLK_HDR_FLG) +*/ + +/* Single block carrier (SBC): +[Carrier_t|pad|Block_t 111| udata... ] +*/ + /* Blocks ... */ -#define SBC_BLK_FTR_FLG (((UWord) 1) << 0) +#define UNUSED0_BLK_FTR_FLG (((UWord) 1) << 0) #define UNUSED1_BLK_FTR_FLG (((UWord) 1) << 1) #define UNUSED2_BLK_FTR_FLG (((UWord) 1) << 2) -#define ABLK_HDR_SZ (sizeof(Block_t)) -#define FBLK_FTR_SZ (sizeof(UWord)) +#if MBC_ABLK_OFFSET_BITS +# define ABLK_HDR_SZ (offsetof(Block_t,u)) +#else +# define ABLK_HDR_SZ (sizeof(Block_t)) +#endif +#define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t)) #define UMEMSZ2BLKSZ(AP, SZ) \ (ABLK_HDR_SZ + (SZ) <= (AP)->min_block_size \ @@ -136,88 +168,181 @@ static Uint mseg_unit_size; #define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ)) #define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ)) -#define PREV_BLK_SZ(B) \ - ((UWord) (*(((UWord *) (B)) - 1) & SZ_MASK)) +#define PREV_BLK_SZ(B) ((UWord) (((FreeBlkFtr_t *)(B))[-1])) #define SET_BLK_SZ_FTR(B, SZ) \ - (*((UWord *) (((char *) (B)) + (SZ) - sizeof(UWord))) = (SZ)) + (((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ)) #define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0) #define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1) #define LAST_BLK_HDR_FLG (((UWord) 1) << 2) -#define SET_BLK_SZ(B, SZ) \ +/* Special flag combo for (allocated) SBC blocks +*/ +#define SBC_BLK_HDR_FLG (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG) + +#define SET_MBC_ABLK_SZ(B, SZ) \ (ASSERT(((SZ) & FLG_MASK) == 0), \ - (*((Block_t *) (B)) = ((*((Block_t *) (B)) & FLG_MASK) | (SZ)))) -#define SET_BLK_FREE(B) \ - (*((Block_t *) (B)) |= THIS_FREE_BLK_HDR_FLG) -#define SET_BLK_ALLOCED(B) \ - (*((Block_t *) (B)) &= ~THIS_FREE_BLK_HDR_FLG) -#define SET_PREV_BLK_FREE(B) \ - (*((Block_t *) (B)) |= PREV_FREE_BLK_HDR_FLG) + (B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ)) +#define SET_MBC_FBLK_SZ(B, SZ) \ + (ASSERT(((SZ) & FLG_MASK) == 0), \ + (B)->bhdr = (((B)->bhdr) & ~MBC_FBLK_SZ_MASK) | (SZ)) +#define SET_SBC_BLK_SZ(B, SZ) \ + (ASSERT(((SZ) & FLG_MASK) == 0), \ + (B)->bhdr = (((B)->bhdr) & ~SBC_BLK_SZ_MASK) | (SZ)) +#define SET_PREV_BLK_FREE(AP,B) \ + (ASSERT(!IS_MBC_FIRST_BLK(AP,B)), \ + ASSERT(!IS_FREE_BLK(B)), \ + (B)->bhdr |= PREV_FREE_BLK_HDR_FLG) #define SET_PREV_BLK_ALLOCED(B) \ - (*((Block_t *) (B)) &= ~PREV_FREE_BLK_HDR_FLG) + ((B)->bhdr &= ~PREV_FREE_BLK_HDR_FLG) #define SET_LAST_BLK(B) \ - (*((Block_t *) (B)) |= LAST_BLK_HDR_FLG) + ((B)->bhdr |= LAST_BLK_HDR_FLG) #define SET_NOT_LAST_BLK(B) \ - (*((Block_t *) (B)) &= ~LAST_BLK_HDR_FLG) + ((B)->bhdr &= ~LAST_BLK_HDR_FLG) #define SBH_THIS_FREE THIS_FREE_BLK_HDR_FLG -#define SBH_THIS_ALLOCED ((UWord) 0) #define SBH_PREV_FREE PREV_FREE_BLK_HDR_FLG -#define SBH_PREV_ALLOCED ((UWord) 0) #define SBH_LAST_BLK LAST_BLK_HDR_FLG -#define SBH_NOT_LAST_BLK ((UWord) 0) -#define SET_BLK_HDR(B, Sz, F) \ - (ASSERT(((Sz) & FLG_MASK) == 0), *((Block_t *) (B)) = ((Sz) | (F))) + +#if MBC_ABLK_OFFSET_BITS + +# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << MSEG_ALIGN_BITS) + +# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> MSEG_UNIT_SHIFT) + +# define SET_MBC_ABLK_HDR(B, Sz, F, C) \ + (ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \ + ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \ + (B)->bhdr = ((Sz) | (F) | (BLK_CARRIER_OFFSET(B,C) << MBC_ABLK_OFFSET_SHIFT))) + +# define SET_MBC_FBLK_HDR(B, Sz, F, C) \ + (ASSERT(((Sz) & ~MBC_FBLK_SZ_MASK) == 0), \ + ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ + (B)->bhdr = ((Sz) | (F)), \ + (B)->u.carrier = (C)) + +# define ABLK_TO_MBC(B) \ + (ASSERT(IS_MBC_BLK(B) && IS_ALLOCED_BLK(B)), \ + (Carrier_t*)((MSEG_UNIT_FLOOR((UWord)(B)) - \ + (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << MSEG_UNIT_SHIFT)))) + +# define FBLK_TO_MBC(B) \ + (ASSERT(IS_MBC_BLK(B) && IS_FREE_BLK(B)), \ + (B)->u.carrier) + +# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B)) + +# define IS_MBC_FIRST_ABLK(AP,B) \ + ((((UWord)(B) & ~MSEG_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \ + && ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0) + +# define IS_MBC_FIRST_FBLK(AP,B) \ + ((char*)(B) == (char*)((B)->u.carrier) + MBC_HEADER_SIZE(AP)) + +# define IS_MBC_FIRST_BLK(AP,B) \ + (IS_FREE_BLK(B) ? IS_MBC_FIRST_FBLK(AP,B) : IS_MBC_FIRST_ABLK(AP,B)) + +# define SET_BLK_FREE(B) \ + (ASSERT(!IS_PREV_BLK_FREE(B)), \ + (B)->u.carrier = ABLK_TO_MBC(B), \ + (B)->bhdr |= THIS_FREE_BLK_HDR_FLG, \ + (B)->bhdr &= (MBC_ABLK_SZ_MASK|FLG_MASK)) + +# define SET_BLK_ALLOCED(B) \ + (ASSERT(((B)->bhdr & (MBC_ABLK_OFFSET_MASK|THIS_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ + (B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG, \ + (B)->bhdr |= (BLK_CARRIER_OFFSET(B,(B)->u.carrier) << MBC_ABLK_OFFSET_SHIFT)) + +#else /* !MBC_ABLK_OFFSET_BITS */ + +# define MBC_SZ_MAX_LIMIT ((UWord)~0) + +# define SET_MBC_ABLK_HDR(B, Sz, F, C) \ + (ASSERT(((Sz) & FLG_MASK) == 0), \ + ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \ + ASSERT((UWord)(F) < SBC_BLK_HDR_FLG), \ + (B)->bhdr = ((Sz) | (F)), \ + (B)->carrier = (C)) + +# define SET_MBC_FBLK_HDR(B, Sz, F, C) \ + (ASSERT(((Sz) & FLG_MASK) == 0), \ + ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \ + (B)->bhdr = ((Sz) | (F)), \ + (B)->carrier = (C)) + +# define BLK_TO_MBC(B) ((B)->carrier) +# define ABLK_TO_MBC(B) BLK_TO_MBC(B) +# define FBLK_TO_MBC(B) BLK_TO_MBC(B) + +# define IS_MBC_FIRST_BLK(AP,B) \ + ((char*)(B) == (char*)((B)->carrier) + MBC_HEADER_SIZE(AP)) +# define IS_MBC_FIRST_ABLK(AP,B) IS_MBC_FIRST_BLK(AP,B) +# define IS_MBC_FIRST_FBLK(AP,B) IS_MBC_FIRST_BLK(AP,B) + +# define SET_BLK_FREE(B) \ + (ASSERT(!IS_PREV_BLK_FREE(B)), \ + (B)->bhdr |= THIS_FREE_BLK_HDR_FLG) + +# define SET_BLK_ALLOCED(B) \ + ((B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG) + +#endif /* !MBC_ABLK_OFFSET_BITS */ + +#define SET_SBC_BLK_HDR(B, Sz) \ + (ASSERT(((Sz) & FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG))) + #define BLK_UMEM_SZ(B) \ (BLK_SZ(B) - (ABLK_HDR_SZ)) #define IS_PREV_BLK_FREE(B) \ - (*((Block_t *) (B)) & PREV_FREE_BLK_HDR_FLG) + ((B)->bhdr & PREV_FREE_BLK_HDR_FLG) #define IS_PREV_BLK_ALLOCED(B) \ (!IS_PREV_BLK_FREE((B))) #define IS_FREE_BLK(B) \ - (*((Block_t *) (B)) & THIS_FREE_BLK_HDR_FLG) + (ASSERT(!IS_SBC_BLK(B)), (B)->bhdr & THIS_FREE_BLK_HDR_FLG) #define IS_ALLOCED_BLK(B) \ (!IS_FREE_BLK((B))) #define IS_LAST_BLK(B) \ - (*((Block_t *) (B)) & LAST_BLK_HDR_FLG) + ((B)->bhdr & LAST_BLK_HDR_FLG) #define IS_NOT_LAST_BLK(B) \ (!IS_LAST_BLK((B))) #define GET_LAST_BLK_HDR_FLG(B) \ - (*((Block_t*) (B)) & LAST_BLK_HDR_FLG) + ((B)->bhdr & LAST_BLK_HDR_FLG) #define GET_THIS_FREE_BLK_HDR_FLG(B) \ - (*((Block_t*) (B)) & THIS_FREE_BLK_HDR_FLG) + ((B)->bhdr & THIS_FREE_BLK_HDR_FLG) #define GET_PREV_FREE_BLK_HDR_FLG(B) \ - (*((Block_t*) (B)) & PREV_FREE_BLK_HDR_FLG) + ((B)->bhdr & PREV_FREE_BLK_HDR_FLG) #define GET_BLK_HDR_FLGS(B) \ - (*((Block_t*) (B)) & FLG_MASK) - -#define IS_FIRST_BLK(B) \ - (IS_PREV_BLK_FREE((B)) && (PREV_BLK_SZ((B)) == 0)) -#define IS_NOT_FIRST_BLK(B) \ - (!IS_FIRST_BLK((B))) - -#define SET_SBC_BLK_FTR(FTR) \ - ((FTR) = (0 | SBC_BLK_FTR_FLG)) -#define SET_MBC_BLK_FTR(FTR) \ - ((FTR) = 0) + ((B)->bhdr & FLG_MASK) #define IS_SBC_BLK(B) \ - (IS_PREV_BLK_FREE((B)) && (((UWord *) (B))[-1] & SBC_BLK_FTR_FLG)) + (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG) #define IS_MBC_BLK(B) \ (!IS_SBC_BLK((B))) +#define MBC_BLK_SZ(B) (IS_FREE_BLK(B) ? MBC_FBLK_SZ(B) : MBC_ABLK_SZ(B)) + #define NXT_BLK(B) \ - ((Block_t *) (((char *) (B)) + BLK_SZ((B)))) + (ASSERT(IS_MBC_BLK(B)), \ + (Block_t *) (((char *) (B)) + MBC_BLK_SZ((B)))) #define PREV_BLK(B) \ ((Block_t *) (((char *) (B)) - PREV_BLK_SZ((B)))) +#define BLK_AFTER(B,Sz) \ + ((Block_t *) (((char *) (B)) + (Sz))) + +#define BLK_SZ(B) ((B)->bhdr & (((B)->bhdr & THIS_FREE_BLK_HDR_FLG) ? MBC_FBLK_SZ_MASK : MBC_ABLK_SZ_MASK)) + /* Carriers ... */ +#define SBC_HEADER_SIZE (UNIT_CEILING(sizeof(Carrier_t) + ABLK_HDR_SZ) \ + - ABLK_HDR_SZ) +#define MBC_HEADER_SIZE(AP) SBC_HEADER_SIZE + + #define MSEG_CARRIER_HDR_FLAG (((UWord) 1) << 0) #define SBC_CARRIER_HDR_FLAG (((UWord) 1) << 1) @@ -226,20 +351,20 @@ static Uint mseg_unit_size; #define SCH_MBC 0 #define SCH_SBC SBC_CARRIER_HDR_FLAG -#define SET_CARRIER_HDR(C, Sz, F) \ - (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F))) +#define SET_CARRIER_HDR(C, Sz, F, AP) \ + (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), (C)->allctr = (AP)) -#define BLK2SBC(AP, B) \ - ((Carrier_t *) (((char *) (B)) - (AP)->sbc_header_size)) -#define FBLK2MBC(AP, B) \ - ((Carrier_t *) (((char *) (B)) - (AP)->mbc_header_size)) +#define BLK_TO_SBC(B) \ + ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE)) +#define FIRST_BLK_TO_MBC(AP, B) \ + ((Carrier_t *) (((char *) (B)) - MBC_HEADER_SIZE(AP))) -#define MBC2FBLK(AP, P) \ - ((Block_t *) (((char *) (P)) + (AP)->mbc_header_size)) +#define MBC_TO_FIRST_BLK(AP, P) \ + ((Block_t *) (((char *) (P)) + MBC_HEADER_SIZE(AP))) #define SBC2BLK(AP, P) \ - ((Block_t *) (((char *) (P)) + (AP)->sbc_header_size)) + ((Block_t *) (((char *) (P)) + SBC_HEADER_SIZE)) #define SBC2UMEM(AP, P) \ - ((void *) (((char *) (P)) + ((AP)->sbc_header_size + ABLK_HDR_SZ))) + ((void *) (((char *) (P)) + (SBC_HEADER_SIZE + ABLK_HDR_SZ))) #define IS_MSEG_CARRIER(C) \ ((C)->chdr & MSEG_CARRIER_HDR_FLAG) @@ -250,15 +375,6 @@ static Uint mseg_unit_size; #define IS_MB_CARRIER(C) \ (!IS_SB_CARRIER((C))) -#define SET_MSEG_CARRIER(C) \ - ((C)->chdr |= MSEG_CARRIER_HDR_FLAG) -#define SET_SYS_ALLOC_CARRIER(C) \ - ((C)->chdr &= ~MSEG_CARRIER_HDR_FLAG) -#define SET_SB_CARRIER(C) \ - ((C)->chdr |= SBC_CARRIER_HDR_FLAG) -#define SET_MB_CARRIER(C) \ - ((C)->chdr &= ~SBC_CARRIER_HDR_FLAG) - #define SET_CARRIER_SZ(C, SZ) \ (ASSERT(((SZ) & FLG_MASK) == 0), \ ((C)->chdr = ((C)->chdr & FLG_MASK) | (SZ))) @@ -506,11 +622,11 @@ static void mbc_free(Allctr_t *allctr, void *p); #if HAVE_ERTS_MSEG static ERTS_INLINE void * -alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p) +alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags) { void *res; - res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, &allctr->mseg_opt); + res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, flags, &allctr->mseg_opt); INC_CC(allctr->calls.mseg_alloc); return res; } @@ -521,7 +637,7 @@ alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p) void *res; res = erts_mseg_realloc_opt(allctr->alloc_no, seg, old_size, new_size_p, - &allctr->mseg_opt); + ERTS_MSEG_FLG_NONE, &allctr->mseg_opt); INC_CC(allctr->calls.mseg_realloc); return res; } @@ -765,13 +881,9 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) #define ERTS_ALCU_DD_FIX_TYPE_OFFS \ ((sizeof(ErtsAllctrDDBlock_t)-1)/sizeof(UWord) + 1) -#define ERTS_AU_PREF_ALLOC_IX_MASK \ - ((((UWord) 1) << ERTS_AU_PREF_ALLOC_BITS) - 1) -#define ERTS_AU_PREF_ALLOC_SIZE_MASK \ - ((((UWord) 1) << (sizeof(UWord)*8 - ERTS_AU_PREF_ALLOC_BITS)) - 1) -static ERTS_INLINE int -get_pref_allctr(void *extra, Allctr_t **allctr) +static ERTS_INLINE Allctr_t* +get_pref_allctr(void *extra) { ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra; int pref_ix; @@ -781,34 +893,33 @@ get_pref_allctr(void *extra, Allctr_t **allctr) ASSERT(sizeof(UWord) == sizeof(Allctr_t *)); ASSERT(0 <= pref_ix && pref_ix < tspec->size); - *allctr = tspec->allctr[pref_ix]; - return pref_ix; + return tspec->allctr[pref_ix]; } -static ERTS_INLINE void * -get_used_allctr(void *extra, void *p, Allctr_t **allctr, UWord *sizep) +/* SMP note: + * get_used_allctr() must be safe WITHOUT locking the allocator while + * concurrent threads may be updating adjacent blocks. + * We rely on getting a consistent result (without atomic op) when reading + * the block header word even if a concurrent thread is updating + * the "PREV_FREE" flag bit. + */ +static ERTS_INLINE Allctr_t* +get_used_allctr(void *extra, void *p, UWord *sizep) { - ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra; - void *ptr = (void *) (((char *) p) - sizeof(UWord)); - UWord ainfo = *((UWord *) ptr); - int aix = (int) (ainfo & ERTS_AU_PREF_ALLOC_IX_MASK); - *allctr = tspec->allctr[aix]; - if (sizep) - *sizep = ((ainfo >> ERTS_AU_PREF_ALLOC_BITS) - & ERTS_AU_PREF_ALLOC_SIZE_MASK); - return ptr; -} + Block_t* blk = UMEM2BLK(p); + Carrier_t* crr; -static ERTS_INLINE void * -put_used_allctr(void *p, int ix, UWord size) -{ - UWord ainfo = (size >= ERTS_AU_PREF_ALLOC_SIZE_MASK - ? ERTS_AU_PREF_ALLOC_SIZE_MASK - : size); - ainfo <<= ERTS_AU_PREF_ALLOC_BITS; - ainfo |= (UWord) ix; - *((UWord *) p) = ainfo; - return (void *) (((char *) p) + sizeof(UWord)); + if (IS_SBC_BLK(blk)) { + crr = BLK_TO_SBC(blk); + if (sizep) + *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ; + } + else { + crr = ABLK_TO_MBC(blk); + if (sizep) + *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ; + } + return crr->allctr; } static void @@ -1209,10 +1320,8 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp, Uint32 *alcu_flgsp) if ((*alcu_flgsp) & ERTS_ALCU_FLG_SBMBC) blk = create_sbmbc(allctr, get_blk_sz); else { -#if HALFWORD_HEAP - blk = create_carrier(allctr, get_blk_sz, CFLG_MBC|CFLG_FORCE_MSEG); -#else blk = create_carrier(allctr, get_blk_sz, CFLG_MBC); +#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS if (!blk) { /* Emergency! We couldn't create the carrier as we wanted. Try to place it in a sys_alloced sbc. */ @@ -1242,6 +1351,7 @@ mbc_alloc_finalize(Allctr_t *allctr, Block_t *blk, Uint org_blk_sz, UWord flags, + Carrier_t *crr, Uint want_blk_sz, int valid_blk_info, Uint32 alcu_flgs) @@ -1262,22 +1372,18 @@ mbc_alloc_finalize(Allctr_t *allctr, /* Shrink block... */ blk_sz = want_blk_sz; nxt_blk_sz = org_blk_sz - blk_sz; - SET_BLK_HDR(blk, - blk_sz, - SBH_THIS_ALLOCED|SBH_NOT_LAST_BLK|prev_free_flg); + SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr); - nxt_blk = NXT_BLK(blk); - SET_BLK_HDR(nxt_blk, - nxt_blk_sz, - (SBH_THIS_FREE - | SBH_PREV_ALLOCED - | (flags & LAST_BLK_HDR_FLG))); + nxt_blk = BLK_AFTER(blk, blk_sz); + SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz, + SBH_THIS_FREE|(flags & LAST_BLK_HDR_FLG), + crr); if (!(flags & LAST_BLK_HDR_FLG)) { SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz); if (!valid_blk_info) { - Block_t *nxt_nxt_blk = NXT_BLK(nxt_blk); - SET_PREV_BLK_FREE(nxt_nxt_blk); + Block_t *nxt_nxt_blk = BLK_AFTER(nxt_blk, nxt_blk_sz); + SET_PREV_BLK_FREE(allctr, nxt_nxt_blk); } } (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs); @@ -1291,40 +1397,40 @@ mbc_alloc_finalize(Allctr_t *allctr, || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk))); ASSERT((flags & LAST_BLK_HDR_FLG) || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk))); - ASSERT(nxt_blk_sz == BLK_SZ(nxt_blk)); + ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk)); ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0); ASSERT(nxt_blk_sz >= allctr->min_block_size); + ASSERT(ABLK_TO_MBC(blk) == crr); + ASSERT(FBLK_TO_MBC(nxt_blk) == crr); } else { + ASSERT(org_blk_sz <= MBC_ABLK_SZ_MASK); blk_sz = org_blk_sz; if (flags & LAST_BLK_HDR_FLG) { if (valid_blk_info) SET_BLK_ALLOCED(blk); else - SET_BLK_HDR(blk, - blk_sz, - SBH_THIS_ALLOCED|SBH_LAST_BLK|prev_free_flg); + SET_MBC_ABLK_HDR(blk, blk_sz, SBH_LAST_BLK|prev_free_flg, crr); } else { if (valid_blk_info) SET_BLK_ALLOCED(blk); else - SET_BLK_HDR(blk, - blk_sz, - SBH_THIS_ALLOCED|SBH_NOT_LAST_BLK|prev_free_flg); - nxt_blk = NXT_BLK(blk); + SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr); + nxt_blk = BLK_AFTER(blk, blk_sz); SET_PREV_BLK_ALLOCED(nxt_blk); } ASSERT((flags & LAST_BLK_HDR_FLG) ? IS_LAST_BLK(blk) : IS_NOT_LAST_BLK(blk)); + ASSERT(ABLK_TO_MBC(blk) == crr); } STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs); ASSERT(IS_ALLOCED_BLK(blk)); - ASSERT(blk_sz == BLK_SZ(blk)); + ASSERT(blk_sz == MBC_BLK_SZ(blk)); ASSERT(blk_sz % sizeof(Unit_t) == 0); ASSERT(blk_sz >= allctr->min_block_size); ASSERT(blk_sz >= want_blk_sz); @@ -1348,8 +1454,9 @@ mbc_alloc(Allctr_t *allctr, Uint size) if (IS_MBC_BLK(blk)) mbc_alloc_finalize(allctr, blk, - BLK_SZ(blk), + MBC_FBLK_SZ(blk), GET_BLK_HDR_FLGS(blk), + FBLK_TO_MBC(blk), blk_sz, 1, alcu_flgs); @@ -1370,7 +1477,7 @@ mbc_free(Allctr_t *allctr, void *p) ASSERT(p); blk = UMEM2BLK(p); - blk_sz = BLK_SZ(blk); + blk_sz = MBC_ABLK_SZ(blk); if (blk_sz < allctr->sbmbc_threshold) alcu_flgs |= ERTS_ALCU_FLG_SBMBC; @@ -1381,17 +1488,18 @@ mbc_free(Allctr_t *allctr, void *p) STAT_MBC_BLK_FREE(allctr, blk_sz, alcu_flgs); - is_first_blk = IS_FIRST_BLK(blk); + is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk); is_last_blk = IS_LAST_BLK(blk); - if (!is_first_blk && IS_PREV_BLK_FREE(blk)) { + if (IS_PREV_BLK_FREE(blk)) { + ASSERT(!is_first_blk); /* Coalesce with previous block... */ blk = PREV_BLK(blk); (*allctr->unlink_free_block)(allctr, blk, alcu_flgs); - blk_sz += BLK_SZ(blk); - is_first_blk = IS_FIRST_BLK(blk); - SET_BLK_SZ(blk, blk_sz); + blk_sz += MBC_FBLK_SZ(blk); + is_first_blk = IS_MBC_FIRST_FBLK(allctr, blk); + SET_MBC_FBLK_SZ(blk, blk_sz); } else { SET_BLK_FREE(blk); @@ -1400,12 +1508,12 @@ mbc_free(Allctr_t *allctr, void *p) if (is_last_blk) SET_LAST_BLK(blk); else { - nxt_blk = NXT_BLK(blk); + nxt_blk = BLK_AFTER(blk, blk_sz); if (IS_FREE_BLK(nxt_blk)) { /* Coalesce with next block... */ (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs); - blk_sz += BLK_SZ(nxt_blk); - SET_BLK_SZ(blk, blk_sz); + blk_sz += MBC_FBLK_SZ(nxt_blk); + SET_MBC_FBLK_SZ(blk, blk_sz); is_last_blk = IS_LAST_BLK(nxt_blk); if (is_last_blk) @@ -1416,26 +1524,26 @@ mbc_free(Allctr_t *allctr, void *p) } } else { - SET_PREV_BLK_FREE(nxt_blk); + SET_PREV_BLK_FREE(allctr, nxt_blk); SET_NOT_LAST_BLK(blk); SET_BLK_SZ_FTR(blk, blk_sz); } } - ASSERT(is_last_blk ? IS_LAST_BLK(blk) : IS_NOT_LAST_BLK(blk)); - ASSERT(is_first_blk ? IS_FIRST_BLK(blk) : IS_NOT_FIRST_BLK(blk)); ASSERT(IS_FREE_BLK(blk)); + ASSERT(!is_last_blk == !IS_LAST_BLK(blk)); + ASSERT(!is_first_blk == !IS_MBC_FIRST_FBLK(allctr, blk)); ASSERT(is_first_blk || IS_PREV_BLK_ALLOCED(blk)); ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(blk))); - ASSERT(blk_sz == BLK_SZ(blk)); + ASSERT(blk_sz == MBC_BLK_SZ(blk)); ASSERT(is_last_blk || blk == PREV_BLK(NXT_BLK(blk))); ASSERT(blk_sz % sizeof(Unit_t) == 0); ASSERT(IS_MBC_BLK(blk)); if (is_first_blk && is_last_blk - && allctr->main_carrier != FBLK2MBC(allctr, blk)) { + && allctr->main_carrier != FIRST_BLK_TO_MBC(allctr, blk)) { if (alcu_flgs & ERTS_ALCU_FLG_SBMBC) destroy_sbmbc(allctr, blk); else @@ -1472,7 +1580,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) ASSERT(size < allctr->sbc_threshold); blk = (Block_t *) UMEM2BLK(p); - old_blk_sz = BLK_SZ(blk); + old_blk_sz = MBC_ABLK_SZ(blk); ASSERT(old_blk_sz >= allctr->min_block_size); @@ -1497,6 +1605,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) return p; else if (blk_sz < old_blk_sz) { /* Shrink block... */ + Carrier_t* crr; Block_t *nxt_nxt_blk; Uint diff_sz_val = old_blk_sz - blk_sz; Uint old_blk_sz_val = old_blk_sz; @@ -1516,16 +1625,18 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) return NULL; cand_blk_sz = old_blk_sz; - if (!IS_PREV_BLK_FREE(blk) || IS_FIRST_BLK(blk)) + if (!IS_PREV_BLK_FREE(blk)) { cand_blk = blk; + } else { + ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk)); cand_blk = PREV_BLK(blk); cand_blk_sz += PREV_BLK_SZ(blk); } if (!is_last_blk) { - nxt_blk = NXT_BLK(blk); + nxt_blk = BLK_AFTER(blk, old_blk_sz); if (IS_FREE_BLK(nxt_blk)) - cand_blk_sz += BLK_SZ(nxt_blk); + cand_blk_sz += MBC_FBLK_SZ(nxt_blk); } new_blk = (*allctr->get_free_block)(allctr, @@ -1541,52 +1652,48 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) nxt_blk_sz = old_blk_sz - blk_sz; - if ((is_last_blk || IS_ALLOCED_BLK(NXT_BLK(blk))) + if ((is_last_blk || IS_ALLOCED_BLK(BLK_AFTER(blk,old_blk_sz))) && (nxt_blk_sz < allctr->min_block_size)) return p; HARD_CHECK_BLK_CARRIER(allctr, blk); - SET_BLK_SZ(blk, blk_sz); + nxt_nxt_blk = BLK_AFTER(blk, old_blk_sz); + + SET_MBC_ABLK_SZ(blk, blk_sz); SET_NOT_LAST_BLK(blk); - nxt_blk = NXT_BLK(blk); - SET_BLK_HDR(nxt_blk, - nxt_blk_sz, - SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK); + nxt_blk = BLK_AFTER(blk, blk_sz); STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs); STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs); - ASSERT(BLK_SZ(blk) >= allctr->min_block_size); + ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size); - if (is_last_blk) - SET_LAST_BLK(nxt_blk); - else { - nxt_nxt_blk = NXT_BLK(nxt_blk); + if (!is_last_blk) { if (IS_FREE_BLK(nxt_nxt_blk)) { /* Coalesce with next free block... */ - nxt_blk_sz += BLK_SZ(nxt_nxt_blk); + nxt_blk_sz += MBC_FBLK_SZ(nxt_nxt_blk); (*allctr->unlink_free_block)(allctr, nxt_nxt_blk, alcu_flgs); - SET_BLK_SZ(nxt_blk, nxt_blk_sz); - is_last_blk = IS_LAST_BLK(nxt_nxt_blk); - if (is_last_blk) - SET_LAST_BLK(nxt_blk); - else - SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz); + is_last_blk = GET_LAST_BLK_HDR_FLG(nxt_nxt_blk); } else { - SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz); - SET_PREV_BLK_FREE(nxt_nxt_blk); + SET_PREV_BLK_FREE(allctr, nxt_nxt_blk); } + SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz); } + crr = ABLK_TO_MBC(blk); + SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz, + SBH_THIS_FREE | (is_last_blk ? SBH_LAST_BLK : 0), + crr); + (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs); ASSERT(IS_ALLOCED_BLK(blk)); - ASSERT(blk_sz == BLK_SZ(blk)); + ASSERT(blk_sz == MBC_BLK_SZ(blk)); ASSERT(blk_sz % sizeof(Unit_t) == 0); ASSERT(blk_sz >= allctr->min_block_size); ASSERT(blk_sz >= size + ABLK_HDR_SZ); @@ -1594,14 +1701,15 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) ASSERT(IS_FREE_BLK(nxt_blk)); ASSERT(IS_PREV_BLK_ALLOCED(nxt_blk)); - ASSERT(nxt_blk_sz == BLK_SZ(nxt_blk)); + ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk)); ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0); ASSERT(nxt_blk_sz >= allctr->min_block_size); ASSERT(IS_MBC_BLK(nxt_blk)); ASSERT(is_last_blk ? IS_LAST_BLK(nxt_blk) : IS_NOT_LAST_BLK(nxt_blk)); ASSERT(is_last_blk || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk))); ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk))); - + ASSERT(FBLK_TO_MBC(nxt_blk) == crr); + HARD_CHECK_BLK_CARRIER(allctr, blk); return p; @@ -1610,8 +1718,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) /* Need larger block... */ if (!is_last_blk) { - nxt_blk = NXT_BLK(blk); - nxt_blk_sz = BLK_SZ(nxt_blk); + nxt_blk = BLK_AFTER(blk, old_blk_sz); + nxt_blk_sz = MBC_BLK_SZ(nxt_blk); if (IS_FREE_BLK(nxt_blk) && get_blk_sz <= old_blk_sz + nxt_blk_sz) { /* Grow into next block... */ @@ -1624,7 +1732,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) if (nxt_blk_sz < allctr->min_block_size) { blk_sz += nxt_blk_sz; - SET_BLK_SZ(blk, blk_sz); + SET_MBC_ABLK_SZ(blk, blk_sz); if (is_last_blk) { SET_LAST_BLK(blk); @@ -1633,21 +1741,20 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) #endif } else { - nxt_blk = NXT_BLK(blk); + nxt_blk = BLK_AFTER(blk, blk_sz); SET_PREV_BLK_ALLOCED(nxt_blk); #ifdef DEBUG is_last_blk = IS_LAST_BLK(nxt_blk); - nxt_blk_sz = BLK_SZ(nxt_blk); + nxt_blk_sz = MBC_BLK_SZ(nxt_blk); #endif } } else { - SET_BLK_SZ(blk, blk_sz); + Carrier_t* crr = ABLK_TO_MBC(blk); + SET_MBC_ABLK_SZ(blk, blk_sz); - nxt_blk = NXT_BLK(blk); - SET_BLK_HDR(nxt_blk, - nxt_blk_sz, - SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK); + nxt_blk = BLK_AFTER(blk, blk_sz); + SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz, SBH_THIS_FREE, crr); if (is_last_blk) SET_LAST_BLK(nxt_blk); @@ -1657,6 +1764,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) (*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs); ASSERT(IS_FREE_BLK(nxt_blk)); + ASSERT(FBLK_TO_MBC(nxt_blk) == crr); } STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs); @@ -1664,14 +1772,14 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) ASSERT(IS_ALLOCED_BLK(blk)); - ASSERT(blk_sz == BLK_SZ(blk)); + ASSERT(blk_sz == MBC_BLK_SZ(blk)); ASSERT(blk_sz % sizeof(Unit_t) == 0); ASSERT(blk_sz >= allctr->min_block_size); ASSERT(blk_sz >= size + ABLK_HDR_SZ); ASSERT(IS_MBC_BLK(blk)); ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk)); - ASSERT(!nxt_blk || nxt_blk_sz == BLK_SZ(nxt_blk)); + ASSERT(!nxt_blk || nxt_blk_sz == MBC_BLK_SZ(nxt_blk)); ASSERT(!nxt_blk || nxt_blk_sz % sizeof(Unit_t) == 0); ASSERT(!nxt_blk || nxt_blk_sz >= allctr->min_block_size); ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk)); @@ -1696,18 +1804,19 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) /* Need to grow in another block */ - if (!IS_PREV_BLK_FREE(blk) || IS_FIRST_BLK(blk)) { + if (!IS_PREV_BLK_FREE(blk)) { cand_blk = NULL; cand_blk_sz = 0; } else { + ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk)); cand_blk = PREV_BLK(blk); cand_blk_sz = old_blk_sz + PREV_BLK_SZ(blk); if (!is_last_blk) { - nxt_blk = NXT_BLK(blk); + nxt_blk = BLK_AFTER(blk, old_blk_sz); if (IS_FREE_BLK(nxt_blk)) - cand_blk_sz += BLK_SZ(nxt_blk); + cand_blk_sz += MBC_FBLK_SZ(nxt_blk); } } @@ -1743,8 +1852,9 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) if (new_blk) { mbc_alloc_finalize(allctr, new_blk, - BLK_SZ(new_blk), + MBC_FBLK_SZ(new_blk), GET_BLK_HDR_FLGS(new_blk), + FBLK_TO_MBC(new_blk), blk_sz, 1, alcu_flgs); @@ -1754,6 +1864,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) return new_p; } else { + Carrier_t* crr; Uint new_blk_sz; UWord new_blk_flgs; Uint prev_blk_sz; @@ -1774,10 +1885,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) if (is_last_blk) new_blk_flgs |= LAST_BLK_HDR_FLG; else { - nxt_blk = NXT_BLK(blk); + nxt_blk = BLK_AFTER(blk, old_blk_sz); if (IS_FREE_BLK(nxt_blk)) { new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk); - new_blk_sz += BLK_SZ(nxt_blk); + new_blk_sz += MBC_FBLK_SZ(nxt_blk); (*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs); } } @@ -1790,6 +1901,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) new_p = BLK2UMEM(new_blk); blk_cpy_sz = MIN(blk_sz, old_blk_sz); + crr = FBLK_TO_MBC(new_blk); if (prev_blk_sz >= blk_cpy_sz) sys_memcpy(new_p, p, blk_cpy_sz - ABLK_HDR_SZ); @@ -1800,6 +1912,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) new_blk, new_blk_sz, new_blk_flgs, + crr, blk_sz, 0, alcu_flgs); @@ -1815,35 +1928,40 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs) #ifdef DEBUG #if HAVE_ERTS_MSEG -#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % mseg_unit_size == 0) +#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % MSEG_UNIT_SZ == 0) #else #define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) #endif -#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ) \ -do { \ - ASSERT(IS_FIRST_BLK((B))); \ - ASSERT(IS_LAST_BLK((B))); \ - ASSERT((CSZ) == CARRIER_SZ((C))); \ - ASSERT((BSZ) == BLK_SZ((B))); \ - ASSERT((BSZ) % sizeof(Unit_t) == 0); \ - if ((SBC)) { \ - ASSERT(IS_SBC_BLK((B))); \ - ASSERT(IS_SB_CARRIER((C))); \ - } \ - else { \ - ASSERT(IS_MBC_BLK((B))); \ - ASSERT(IS_MB_CARRIER((C))); \ - } \ - if ((MSEGED)) { \ - ASSERT(IS_MSEG_CARRIER((C))); \ - ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ)); \ - } \ - else { \ - ASSERT(IS_SYS_ALLOC_CARRIER((C))); \ - ASSERT((CSZ) % sizeof(Unit_t) == 0); \ - } \ -} while (0) +static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C, + UWord CSZ, Block_t* B, UWord BSZ) +{ + ASSERT(IS_LAST_BLK((B))); + ASSERT((CSZ) == CARRIER_SZ((C))); + ASSERT((BSZ) % sizeof(Unit_t) == 0); + if ((SBC)) { + ASSERT((BSZ) == SBC_BLK_SZ((B))); + ASSERT((char*)B == (char*)C + SBC_HEADER_SIZE); + ASSERT(IS_SBC_BLK((B))); + ASSERT(IS_SB_CARRIER((C))); + } + else { + ASSERT(IS_FREE_BLK(B)); + ASSERT((BSZ) == MBC_FBLK_SZ((B))); + ASSERT(IS_MBC_FIRST_FBLK(A, (B))); + ASSERT(IS_MBC_BLK((B))); + ASSERT(IS_MB_CARRIER((C))); + ASSERT(FBLK_TO_MBC(B) == (C)); + } + if ((MSEGED)) { + ASSERT(IS_MSEG_CARRIER((C))); + ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ)); + } + else { + ASSERT(IS_SYS_ALLOC_CARRIER((C))); + ASSERT((CSZ) % sizeof(Unit_t) == 0); + } +} #else #define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ) @@ -1865,37 +1983,18 @@ create_sbmbc(Allctr_t *allctr, Uint umem_sz) crr = erts_alloc(ERTS_ALC_T_SBMBC, crr_sz); INC_CC(allctr->calls.sbmbc_alloc); - SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC); - - blk = MBC2FBLK(allctr, crr); + SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr); -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - if (allctr->mbc_header_size % sizeof(Unit_t) == 0) - crr_sz -= sizeof(UWord); -#endif + blk = MBC_TO_FIRST_BLK(allctr, crr); - blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size); + blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr)); - SET_MBC_BLK_FTR(((UWord *) blk)[-1]); - SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK); - -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - *((Carrier_t **) NXT_BLK(blk)) = crr; -#endif + SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr); link_carrier(&allctr->sbmbc_list, crr); -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - if (allctr->mbc_header_size % sizeof(Unit_t) == 0) - crr_sz += sizeof(UWord); -#endif - STAT_SBMBC_ALLOC(allctr, crr_sz); CHECK_1BLK_CARRIER(allctr, 0, 0, crr, crr_sz, blk, blk_sz); -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - if (allctr->mbc_header_size % sizeof(Unit_t) == 0) - crr_sz -= sizeof(UWord); -#endif if (allctr->creating_mbc) (*allctr->creating_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC); @@ -1909,11 +2008,10 @@ destroy_sbmbc(Allctr_t *allctr, Block_t *blk) Uint crr_sz; Carrier_t *crr; - ASSERT(IS_FIRST_BLK(blk)); - ASSERT(IS_MBC_BLK(blk)); + ASSERT(IS_MBC_FIRST_FBLK(allctr, blk)); - crr = FBLK2MBC(allctr, blk); + crr = FIRST_BLK_TO_MBC(allctr, blk); crr_sz = CARRIER_SZ(crr); #ifdef DEBUG @@ -1952,14 +2050,25 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) Uint blk_sz, bcrr_sz, crr_sz; #if HAVE_ERTS_MSEG int have_tried_sys_alloc = 0, have_tried_mseg = 0; + Uint mseg_flags; #endif #ifdef DEBUG int is_mseg = 0; #endif +#if HALFWORD_HEAP + flags |= CFLG_FORCE_MSEG; +#elif HAVE_SUPER_ALIGNED_MB_CARRIERS + if (flags & CFLG_MBC) { + flags |= CFLG_FORCE_MSEG; + } +#endif + ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC)) || (flags & CFLG_MBC && !(flags & CFLG_SBC))); + ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC)); + blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz); #if HAVE_ERTS_MSEG @@ -1974,29 +2083,27 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs) goto try_sys_alloc; } +#if !HAVE_SUPER_ALIGNED_MB_CARRIERS else { if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs) goto try_sys_alloc; } +#endif try_mseg: if (flags & CFLG_SBC) { - crr_sz = blk_sz + allctr->sbc_header_size; + crr_sz = blk_sz + SBC_HEADER_SIZE; + mseg_flags = ERTS_MSEG_FLG_NONE; } else { crr_sz = (*allctr->get_next_mbc_size)(allctr); - if (crr_sz < allctr->mbc_header_size + blk_sz) - crr_sz = allctr->mbc_header_size + blk_sz; -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - if (allctr->mbc_header_size % sizeof(Unit_t) == 0) - crr_sz += sizeof(UWord); -#endif + if (crr_sz < MBC_HEADER_SIZE(allctr) + blk_sz) + crr_sz = MBC_HEADER_SIZE(allctr) + blk_sz; + mseg_flags = ERTS_MSEG_FLG_2POW; } - crr_sz = MSEG_UNIT_CEILING(crr_sz); - ASSERT(crr_sz % mseg_unit_size == 0); - crr = (Carrier_t *) alcu_mseg_alloc(allctr, &crr_sz); + crr = (Carrier_t *) alcu_mseg_alloc(allctr, &crr_sz, mseg_flags); if (!crr) { have_tried_mseg = 1; if (!(have_tried_sys_alloc || flags & CFLG_FORCE_MSEG)) @@ -2008,32 +2115,31 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) is_mseg = 1; #endif if (flags & CFLG_SBC) { - SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC); + SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC, allctr); STAT_MSEG_SBC_ALLOC(allctr, crr_sz, blk_sz); goto sbc_final_touch; } else { - SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC); +#ifndef ARCH_64 + ASSERT(crr_sz <= MBC_SZ_MAX_LIMIT); +#endif + SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC, allctr); STAT_MSEG_MBC_ALLOC(allctr, crr_sz); goto mbc_final_touch; } try_sys_alloc: + #endif /* #if HAVE_ERTS_MSEG */ if (flags & CFLG_SBC) { - bcrr_sz = blk_sz + allctr->sbc_header_size; + bcrr_sz = blk_sz + SBC_HEADER_SIZE; } else { - bcrr_sz = allctr->mbc_header_size + blk_sz; + bcrr_sz = MBC_HEADER_SIZE(allctr) + blk_sz; if (!(flags & CFLG_MAIN_CARRIER) && bcrr_sz < allctr->smallest_mbc_size) bcrr_sz = allctr->smallest_mbc_size; -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - if (allctr->mbc_header_size % sizeof(Unit_t) == 0) - bcrr_sz += sizeof(UWord); -#endif - } crr_sz = (flags & CFLG_FORCE_SIZE @@ -2057,7 +2163,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) } } if (flags & CFLG_SBC) { - SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC); + SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC, allctr); STAT_SYS_ALLOC_SBC_ALLOC(allctr, crr_sz, blk_sz); #if HAVE_ERTS_MSEG @@ -2066,8 +2172,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) blk = SBC2BLK(allctr, crr); - SET_SBC_BLK_FTR(((UWord *) blk)[-1]); - SET_BLK_HDR(blk, blk_sz, SBH_THIS_ALLOCED|SBH_PREV_FREE|SBH_LAST_BLK); + SET_SBC_BLK_HDR(blk, blk_sz); link_carrier(&allctr->sbc_list, crr); @@ -2075,28 +2180,18 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) } else { - SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC); + SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr); STAT_SYS_ALLOC_MBC_ALLOC(allctr, crr_sz); #if HAVE_ERTS_MSEG mbc_final_touch: #endif - blk = MBC2FBLK(allctr, crr); - -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - if (allctr->mbc_header_size % sizeof(Unit_t) == 0) - crr_sz -= sizeof(UWord); -#endif - - blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size); + blk = MBC_TO_FIRST_BLK(allctr, crr); - SET_MBC_BLK_FTR(((UWord *) blk)[-1]); - SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK); + blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr)); -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - *((Carrier_t **) NXT_BLK(blk)) = crr; -#endif + SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr); if (flags & CFLG_MAIN_CARRIER) { ASSERT(!allctr->main_carrier); @@ -2105,15 +2200,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) link_carrier(&allctr->mbc_list, crr); -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - if (allctr->mbc_header_size % sizeof(Unit_t) == 0) - crr_sz += sizeof(UWord); -#endif CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz); -#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG - if (allctr->mbc_header_size % sizeof(Unit_t) == 0) - crr_sz -= sizeof(UWord); -#endif if (allctr->creating_mbc) (*allctr->creating_mbc)(allctr, crr, 0); @@ -2142,8 +2229,8 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) HARD_CHECK_BLK_CARRIER(allctr, old_blk); - old_blk_sz = BLK_SZ(old_blk); - old_crr = BLK2SBC(allctr, old_blk); + old_blk_sz = SBC_BLK_SZ(old_blk); + old_crr = BLK_TO_SBC(old_blk); old_crr_sz = CARRIER_SZ(old_crr); ASSERT(IS_SB_CARRIER(old_crr)); ASSERT(IS_SBC_BLK(old_blk)); @@ -2157,7 +2244,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) if (!(flags & CFLG_FORCE_SYS_ALLOC)) { - new_crr_sz = new_blk_sz + allctr->sbc_header_size; + new_crr_sz = new_blk_sz + SBC_HEADER_SIZE; new_crr_sz = MSEG_UNIT_CEILING(new_crr_sz); new_crr = (Carrier_t *) alcu_mseg_realloc(allctr, old_crr, @@ -2166,7 +2253,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) if (new_crr) { SET_CARRIER_SZ(new_crr, new_crr_sz); new_blk = SBC2BLK(allctr, new_crr); - SET_BLK_SZ(new_blk, new_blk_sz); + SET_SBC_BLK_SZ(new_blk, new_blk_sz); STAT_MSEG_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz); relink_carrier(&allctr->sbc_list, new_crr); CHECK_1BLK_CARRIER(allctr, 1, 1, new_crr, new_crr_sz, @@ -2174,6 +2261,11 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) DEBUG_SAVE_ALIGNMENT(new_crr); return new_blk; } +#if HALFWORD_HEAP + /* Old carrier unchanged; restore stat */ + STAT_MSEG_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz); + return NULL; +#endif create_flags |= CFLG_FORCE_SYS_ALLOC; /* since mseg_realloc() failed */ } @@ -2196,7 +2288,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) else { if (!(flags & CFLG_FORCE_MSEG)) { #endif /* #if HAVE_ERTS_MSEG */ - new_bcrr_sz = new_blk_sz + allctr->sbc_header_size; + new_bcrr_sz = new_blk_sz + SBC_HEADER_SIZE; new_crr_sz = (flags & CFLG_FORCE_SIZE ? UNIT_CEILING(new_bcrr_sz) : SYS_ALLOC_CARRIER_CEILING(new_bcrr_sz)); @@ -2208,7 +2300,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) sys_realloc_success: SET_CARRIER_SZ(new_crr, new_crr_sz); new_blk = SBC2BLK(allctr, new_crr); - SET_BLK_SZ(new_blk, new_blk_sz); + SET_SBC_BLK_SZ(new_blk, new_blk_sz); STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz); STAT_SYS_ALLOC_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz); relink_carrier(&allctr->sbc_list, new_crr); @@ -2218,7 +2310,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags) return new_blk; } else if (new_crr_sz > UNIT_CEILING(new_bcrr_sz)) { - new_crr_sz = new_blk_sz + allctr->sbc_header_size; + new_crr_sz = new_blk_sz + SBC_HEADER_SIZE; new_crr_sz = UNIT_CEILING(new_crr_sz); new_crr = (Carrier_t *) alcu_sys_realloc(allctr, (void *) old_crr, @@ -2262,11 +2354,9 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk) Uint is_mseg = 0; #endif - ASSERT(IS_FIRST_BLK(blk)); - if (IS_SBC_BLK(blk)) { - Uint blk_sz = BLK_SZ(blk); - crr = BLK2SBC(allctr, blk); + Uint blk_sz = SBC_BLK_SZ(blk); + crr = BLK_TO_SBC(blk); crr_sz = CARRIER_SZ(crr); ASSERT(IS_LAST_BLK(blk)); @@ -2276,7 +2366,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk) #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(crr)) { is_mseg++; - ASSERT(crr_sz % mseg_unit_size == 0); + ASSERT(crr_sz % MSEG_UNIT_SZ == 0); STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz); } else @@ -2287,7 +2377,8 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk) } else { - crr = FBLK2MBC(allctr, blk); + ASSERT(IS_MBC_FIRST_FBLK(allctr, blk)); + crr = FIRST_BLK_TO_MBC(allctr, blk); crr_sz = CARRIER_SZ(crr); #ifdef DEBUG @@ -2305,7 +2396,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk) #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(crr)) { is_mseg++; - ASSERT(crr_sz % mseg_unit_size == 0); + ASSERT(crr_sz % MSEG_UNIT_SZ == 0); STAT_MSEG_MBC_FREE(allctr, crr_sz); } else @@ -3430,12 +3521,7 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) if (allctr->dd.use) ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1); #endif -#if HALFWORD_HEAP - blk = create_carrier(allctr, size, - CFLG_SBC | CFLG_FORCE_MSEG); -#else blk = create_carrier(allctr, size, CFLG_SBC); -#endif res = blk ? BLK2UMEM(blk) : NULL; } else @@ -3506,24 +3592,20 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size) void * erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) { - int pref_ix; Allctr_t *pref_allctr; void *res; - pref_ix = get_pref_allctr(extra, &pref_allctr); + pref_allctr = get_pref_allctr(extra); if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr); - res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord)); + res = do_erts_alcu_alloc(type, pref_allctr, size); if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); - if (res) - res = put_used_allctr(res, pref_ix, size); - DEBUG_CHECK_ALIGNMENT(res); @@ -3644,21 +3726,20 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p) { if (p) { Allctr_t *pref_allctr, *used_allctr; - void *ptr; - get_pref_allctr(extra, &pref_allctr); - ptr = get_used_allctr(extra, p, &used_allctr, NULL); + pref_allctr = get_pref_allctr(extra); + used_allctr = get_used_allctr(extra, p, NULL); if (pref_allctr != used_allctr) enqueue_dealloc_other_instance(type, used_allctr, - ptr, + p, (used_allctr->dd.ix - pref_allctr->dd.ix)); else { if (used_allctr->thread_safe) erts_mtx_lock(&used_allctr->mutex); ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr); - do_erts_alcu_free(type, used_allctr, ptr); + do_erts_alcu_free(type, used_allctr, p); if (used_allctr->thread_safe) erts_mtx_unlock(&used_allctr->mutex); } @@ -3739,13 +3820,13 @@ do_erts_alcu_realloc(ErtsAlcType_t type, if (IS_MBC_BLK(blk)) res = mbc_realloc(allctr, p, size, alcu_flgs); else { - Uint used_sz = allctr->sbc_header_size + ABLK_HDR_SZ + size; + Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size; Uint crr_sz; Uint diff_sz_val; Uint crr_sz_val; #if HAVE_ERTS_MSEG - if (IS_SYS_ALLOC_CARRIER(BLK2SBC(allctr, blk))) + if (IS_SYS_ALLOC_CARRIER(BLK_TO_SBC(blk))) #endif crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz); #if HAVE_ERTS_MSEG @@ -3775,7 +3856,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, if (res) { sys_memcpy((void*) res, (void*) p, - MIN(BLK_SZ(blk) - ABLK_HDR_SZ, size)); + MIN(SBC_BLK_SZ(blk) - ABLK_HDR_SZ, size)); destroy_carrier(allctr, blk); } } @@ -3798,16 +3879,12 @@ do_erts_alcu_realloc(ErtsAlcType_t type, else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE) return NULL; else { -#if HALFWORD_HEAP - new_blk = create_carrier(allctr, size, CFLG_SBC | CFLG_FORCE_MSEG); -#else new_blk = create_carrier(allctr, size, CFLG_SBC); -#endif if (new_blk) { res = BLK2UMEM(new_blk); sys_memcpy((void *) res, (void *) p, - MIN(BLK_SZ(blk) - ABLK_HDR_SZ, size)); + MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size)); mbc_free(allctr, p); } else @@ -3966,16 +4043,15 @@ static ERTS_INLINE void * realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, int force_move) { - int pref_ix; - void *ptr, *res; + void *res; Allctr_t *pref_allctr, *used_allctr; UWord old_user_size; if (!p) return erts_alcu_alloc_thr_pref(type, extra, size); - pref_ix = get_pref_allctr(extra, &pref_allctr); - ptr = get_used_allctr(extra, p, &used_allctr, &old_user_size); + pref_allctr = get_pref_allctr(extra); + used_allctr = get_used_allctr(extra, p, &old_user_size); ASSERT(used_allctr && pref_allctr); @@ -3985,56 +4061,33 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr); res = do_erts_alcu_realloc(type, used_allctr, - ptr, - size + sizeof(UWord), + p, + size, 0); if (used_allctr->thread_safe) erts_mtx_unlock(&used_allctr->mutex); - if (res) - res = put_used_allctr(res, pref_ix, size); } else { if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); - res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord)); - if (pref_allctr->thread_safe && (!force_move - || used_allctr != pref_allctr)) + res = do_erts_alcu_alloc(type, pref_allctr, size); + if (pref_allctr->thread_safe && used_allctr != pref_allctr) { erts_mtx_unlock(&pref_allctr->mutex); + } if (res) { - Block_t *blk; - size_t cpy_size; - - res = put_used_allctr(res, pref_ix, size); - DEBUG_CHECK_ALIGNMENT(res); - blk = UMEM2BLK(ptr); - if (old_user_size != ERTS_AU_PREF_ALLOC_SIZE_MASK) - cpy_size = old_user_size; - else { - if (used_allctr->thread_safe && (!force_move - || used_allctr != pref_allctr)) - erts_mtx_lock(&used_allctr->mutex); - ERTS_SMP_LC_ASSERT(!used_allctr->thread_safe || - erts_lc_mtx_is_locked(&used_allctr->mutex)); - cpy_size = BLK_SZ(blk); - if (used_allctr->thread_safe && (!force_move - || used_allctr != pref_allctr)) - erts_mtx_unlock(&used_allctr->mutex); - cpy_size -= ABLK_HDR_SZ + sizeof(UWord); - } - if (cpy_size > size) - cpy_size = size; - sys_memcpy(res, p, cpy_size); + sys_memcpy(res, p, MIN(size,old_user_size)); - if (!force_move || used_allctr != pref_allctr) + if (used_allctr != pref_allctr) { enqueue_dealloc_other_instance(type, used_allctr, - ptr, + p, (used_allctr->dd.ix - pref_allctr->dd.ix)); + } else { - do_erts_alcu_free(type, used_allctr, ptr); + do_erts_alcu_free(type, used_allctr, p); ASSERT(pref_allctr == used_allctr); if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); @@ -4111,7 +4164,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->ramv = init->ramv; allctr->main_carrier_size = init->mmbcs; - allctr->sbc_threshold = init->sbct; + #if HAVE_ERTS_MSEG allctr->mseg_opt.abs_shrink_th = init->asbcst; allctr->mseg_opt.rel_shrink_th = init->rsbcst; @@ -4120,20 +4173,29 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->mbc_move_threshold = init->rmbcmt; #if HAVE_ERTS_MSEG allctr->max_mseg_sbcs = init->mmsbc; +# if HAVE_SUPER_ALIGNED_MB_CARRIERS + allctr->max_mseg_mbcs = ~(Uint)0; +# else allctr->max_mseg_mbcs = init->mmmbc; +# endif #endif allctr->largest_mbc_size = MAX(init->lmbcs, init->smbcs); +#ifndef ARCH_64 + if (allctr->largest_mbc_size > MBC_SZ_MAX_LIMIT) { + allctr->largest_mbc_size = MBC_SZ_MAX_LIMIT; + } +#endif allctr->smallest_mbc_size = init->smbcs; allctr->mbc_growth_stages = MAX(1, init->mbcgs); if (allctr->min_block_size < ABLK_HDR_SZ) goto error; allctr->min_block_size = UNIT_CEILING(allctr->min_block_size - + sizeof(UWord)); + + sizeof(FreeBlkFtr_t)); #if ERTS_SMP if (init->tpref) { - Uint sz = sizeof(Block_t); + Uint sz = ABLK_HDR_SZ; sz += ERTS_ALCU_DD_FIX_TYPE_OFFS*sizeof(UWord); if (init->fix) sz += sizeof(UWord); @@ -4143,6 +4205,23 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) } #endif + allctr->sbc_threshold = init->sbct; +#ifndef ARCH_64 + if (allctr->sbc_threshold > 0) { + Uint max_mbc_block_sz = UNIT_CEILING(allctr->sbc_threshold - 1 + ABLK_HDR_SZ); + if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK + || max_mbc_block_sz < allctr->sbc_threshold) { /* wrap around */ + /* + * By limiting sbc_threshold to (hard limit - min_block_size) + * we avoid having to split off free "residue blocks" + * smaller than min_block_size. + */ + max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1); + allctr->sbc_threshold = max_mbc_block_sz - ABLK_HDR_SZ + 1; + } + } +#endif + allctr->sbmbc_threshold = init->sbmbct; @@ -4158,7 +4237,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->sbmbc_size = init->sbmbcs; min_size = allctr->sbmbc_threshold; min_size += allctr->min_block_size; - min_size += allctr->mbc_header_size; + min_size += MBC_HEADER_SIZE(allctr); if (allctr->sbmbc_size < min_size) allctr->sbmbc_size = min_size; } @@ -4203,59 +4282,26 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) if (!allctr->get_next_mbc_size) allctr->get_next_mbc_size = get_next_mbc_size; - if (allctr->mbc_header_size < sizeof(Carrier_t)) - goto error; #ifdef ERTS_SMP allctr->dd.use = 0; if (init->tpref) { - allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size - + FBLK_FTR_SZ - + ABLK_HDR_SZ - + sizeof(UWord)) - - ABLK_HDR_SZ - - sizeof(UWord)); - allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t) - + FBLK_FTR_SZ - + ABLK_HDR_SZ - + sizeof(UWord)) - - ABLK_HDR_SZ - - sizeof(UWord)); - allctr->dd.use = 1; init_dd_queue(&allctr->dd.q); allctr->dd.ix = init->ix; } - else #endif - { - allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size - + FBLK_FTR_SZ - + ABLK_HDR_SZ) - - ABLK_HDR_SZ); - allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t) - + FBLK_FTR_SZ - + ABLK_HDR_SZ) - - ABLK_HDR_SZ); - } if (allctr->main_carrier_size) { Block_t *blk; -#if HALFWORD_HEAP - blk = create_carrier(allctr, - allctr->main_carrier_size, - CFLG_MBC - | CFLG_FORCE_SIZE - | CFLG_FORCE_MSEG - | CFLG_MAIN_CARRIER); -#else blk = create_carrier(allctr, allctr->main_carrier_size, CFLG_MBC | CFLG_FORCE_SIZE +#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS | CFLG_FORCE_SYS_ALLOC - | CFLG_MAIN_CARRIER); #endif + | CFLG_MAIN_CARRIER); if (!blk) goto error; @@ -4303,9 +4349,9 @@ erts_alcu_stop(Allctr_t *allctr) while (allctr->sbc_list.first) destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first)); while (allctr->mbc_list.first) - destroy_carrier(allctr, MBC2FBLK(allctr, allctr->mbc_list.first)); + destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first)); while (allctr->sbmbc_list.first) - destroy_sbmbc(allctr, MBC2FBLK(allctr, allctr->sbmbc_list.first)); + destroy_sbmbc(allctr, MBC_TO_FIRST_BLK(allctr, allctr->sbmbc_list.first)); #ifdef USE_THREADS if (allctr->thread_safe) @@ -4319,17 +4365,9 @@ erts_alcu_stop(Allctr_t *allctr) void erts_alcu_init(AlcUInit_t *init) { - + ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */ #if HAVE_ERTS_MSEG - mseg_unit_size = erts_mseg_unit_size(); - - if (mseg_unit_size % sizeof(Unit_t)) /* A little paranoid... */ - erl_exit(-1, - "Mseg unit size (%d) not evenly divideble by " - "internal unit size of alloc_util (%d)\n", - mseg_unit_size, - sizeof(Unit_t)); - + ASSERT(erts_mseg_unit_size() == MSEG_UNIT_SZ); max_mseg_carriers = init->mmc; sys_alloc_carrier_size = MSEG_UNIT_CEILING(init->ycs); #else /* #if HAVE_ERTS_MSEG */ @@ -4372,11 +4410,10 @@ erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2) case 0x00b: return (unsigned long) CARRIER_SZ((Carrier_t *) a1); case 0x00c: return (unsigned long) SBC2BLK((Allctr_t *) a1, (Carrier_t *) a2); - case 0x00d: return (unsigned long) BLK2SBC((Allctr_t *) a1, - (Block_t *) a2); - case 0x00e: return (unsigned long) MBC2FBLK((Allctr_t *) a1, + case 0x00d: return (unsigned long) BLK_TO_SBC((Block_t *) a2); + case 0x00e: return (unsigned long) MBC_TO_FIRST_BLK((Allctr_t *) a1, (Carrier_t *) a2); - case 0x00f: return (unsigned long) FBLK2MBC((Allctr_t *) a1, + case 0x00f: return (unsigned long) FIRST_BLK_TO_MBC((Allctr_t *) a1, (Block_t *) a2); case 0x010: return (unsigned long) ((Allctr_t *) a1)->mbc_list.first; case 0x011: return (unsigned long) ((Allctr_t *) a1)->mbc_list.last; @@ -4388,7 +4425,7 @@ erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2) case 0x017: return (unsigned long) ((Allctr_t *) a1)->min_block_size; case 0x018: return (unsigned long) NXT_BLK((Block_t *) a1); case 0x019: return (unsigned long) PREV_BLK((Block_t *) a1); - case 0x01a: return (unsigned long) IS_FIRST_BLK((Block_t *) a1); + case 0x01a: return (unsigned long) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2); case 0x01b: return (unsigned long) sizeof(Unit_t); default: ASSERT(0); return ~((unsigned long) 0); } @@ -4432,6 +4469,13 @@ erts_alcu_verify_unused_ts(Allctr_t *allctr) #endif } +#ifdef DEBUG +int is_sbc_blk(Block_t* blk) +{ + return IS_SBC_BLK(blk); +} +#endif + #ifdef ERTS_ALLOC_UTIL_HARD_DEBUG static void @@ -4441,34 +4485,37 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk) CarrierList_t *cl; if (IS_SBC_BLK(iblk)) { - Carrier_t *sbc = BLK2SBC(allctr, iblk); + Carrier_t *sbc = BLK_TO_SBC(iblk); ASSERT(SBC2BLK(allctr, sbc) == iblk); - ASSERT(IS_ALLOCED_BLK(iblk)); - ASSERT(IS_FIRST_BLK(iblk)); - ASSERT(IS_LAST_BLK(iblk)); - ASSERT(CARRIER_SZ(sbc) - allctr->sbc_header_size >= BLK_SZ(iblk)); + ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk)); #if HAVE_ERTS_MSEG if (IS_MSEG_CARRIER(sbc)) { - ASSERT(CARRIER_SZ(sbc) % mseg_unit_size == 0); + ASSERT(CARRIER_SZ(sbc) % MSEG_UNIT_SZ == 0); } #endif crr = sbc; cl = &allctr->sbc_list; } else { - Carrier_t *mbc = NULL; Block_t *prev_blk = NULL; Block_t *blk; char *carrier_end; Uint is_free_blk; Uint tot_blk_sz; Uint blk_sz; + int has_wrapped_around = 0; blk = iblk; tot_blk_sz = 0; + crr = BLK_TO_MBC(blk); + ASSERT(IS_MB_CARRIER(crr)); + /* Step around the carrier one whole lap starting at 'iblk' + */ while (1) { + ASSERT(IS_MBC_BLK(blk)); + ASSERT(BLK_TO_MBC(blk) == crr); if (prev_blk) { ASSERT(NXT_BLK(prev_blk) == blk); @@ -4481,18 +4528,16 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk) } } - if (mbc) { + if (has_wrapped_around) { + ASSERT(((Block_t *) crr) < blk); if (blk == iblk) break; - ASSERT(((Block_t *) mbc) < blk && blk < iblk); + ASSERT(blk < iblk); } else ASSERT(blk >= iblk); - - ASSERT(IS_MBC_BLK(blk)); - - blk_sz = BLK_SZ(blk); + blk_sz = MBC_BLK_SZ(blk); ASSERT(blk_sz % sizeof(Unit_t) == 0); ASSERT(blk_sz >= allctr->min_block_size); @@ -4500,44 +4545,40 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk) tot_blk_sz += blk_sz; is_free_blk = (int) IS_FREE_BLK(blk); - if(is_free_blk) { - if (IS_NOT_LAST_BLK(blk)) - ASSERT(*((UWord *) (((char *) blk)+blk_sz-sizeof(UWord))) - == blk_sz); - } + ASSERT(!is_free_blk + || IS_LAST_BLK(blk) + || PREV_BLK_SZ(((char *) blk)+blk_sz) == blk_sz); if (allctr->check_block) (*allctr->check_block)(allctr, blk, (int) is_free_blk); if (IS_LAST_BLK(blk)) { carrier_end = ((char *) NXT_BLK(blk)); - mbc = *((Carrier_t **) NXT_BLK(blk)); + has_wrapped_around = 1; prev_blk = NULL; - blk = MBC2FBLK(allctr, mbc); - ASSERT(IS_FIRST_BLK(blk)); + blk = MBC_TO_FIRST_BLK(allctr, crr); + ASSERT(IS_MBC_FIRST_BLK(allctr,blk)); } else { prev_blk = blk; blk = NXT_BLK(blk); } } - - ASSERT(IS_MB_CARRIER(mbc)); - ASSERT((((char *) mbc) - + allctr->mbc_header_size + + ASSERT((((char *) crr) + + MBC_HEADER_SIZE(allctr) + tot_blk_sz) == carrier_end); - ASSERT(((char *) mbc) + CARRIER_SZ(mbc) - sizeof(Unit_t) <= carrier_end - && carrier_end <= ((char *) mbc) + CARRIER_SZ(mbc)); + ASSERT(((char *) crr) + CARRIER_SZ(crr) - sizeof(Unit_t) <= carrier_end + && carrier_end <= ((char *) crr) + CARRIER_SZ(crr)); if (allctr->check_mbc) - (*allctr->check_mbc)(allctr, mbc); + (*allctr->check_mbc)(allctr, crr); #if HAVE_ERTS_MSEG - if (IS_MSEG_CARRIER(mbc)) { - ASSERT(CARRIER_SZ(mbc) % mseg_unit_size == 0); + if (IS_MSEG_CARRIER(crr)) { + ASSERT(CARRIER_SZ(crr) % MSEG_UNIT_SZ == 0); } #endif - crr = mbc; cl = &allctr->mbc_list; } @@ -4559,4 +4600,5 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk) #endif } -#endif +#endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */ + diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index cedf4ccf85..e0754e7f69 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -216,16 +216,40 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); #define UNIT_FLOOR(X) ((X) & UNIT_MASK) #define UNIT_CEILING(X) UNIT_FLOOR((X) + INV_UNIT_MASK) +#define FLG_MASK INV_UNIT_MASK +#define SBC_BLK_SZ_MASK UNIT_MASK +#define MBC_FBLK_SZ_MASK UNIT_MASK +#define CARRIER_SZ_MASK UNIT_MASK -#define SZ_MASK (~((UWord) 0) << 3) -#define FLG_MASK (~(SZ_MASK)) +#if HAVE_ERTS_MSEG +# ifdef ARCH_64 +# define MBC_ABLK_OFFSET_BITS 24 +# elif HAVE_SUPER_ALIGNED_MB_CARRIERS +# define MBC_ABLK_OFFSET_BITS 9 + /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ +# endif +#endif +#ifndef MBC_ABLK_OFFSET_BITS +# define MBC_ABLK_OFFSET_BITS 0 /* no carrier offset in block header */ +#endif + +#if MBC_ABLK_OFFSET_BITS +# define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS) +# define MBC_ABLK_OFFSET_MASK (~((UWord)0) << MBC_ABLK_OFFSET_SHIFT) +# define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~FLG_MASK) +# define HAVE_ERTS_SBMBC 0 +#else +# define MBC_ABLK_SZ_MASK (~FLG_MASK) +# define HAVE_ERTS_SBMBC 1 +#endif -#define BLK_SZ(B) \ - (*((Block_t *) (B)) & SZ_MASK) +#define MBC_ABLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK) +#define MBC_FBLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK) +#define SBC_BLK_SZ(B) (ASSERT_EXPR(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK) #define CARRIER_SZ(C) \ - ((C)->chdr & SZ_MASK) + ((C)->chdr & CARRIER_SZ_MASK) extern int erts_have_sbmbc_alloc; @@ -236,6 +260,7 @@ struct Carrier_t_ { UWord chdr; Carrier_t *next; Carrier_t *prev; + Allctr_t *allctr; }; typedef struct { @@ -243,8 +268,19 @@ typedef struct { Carrier_t *last; } CarrierList_t; -typedef UWord Block_t; -typedef UWord FreeBlkFtr_t; +typedef struct { + UWord bhdr; +#if !MBC_ABLK_OFFSET_BITS + Carrier_t *carrier; +#else + union { + Carrier_t *carrier; /* if free */ + char udata__[1]; /* if allocated */ + }u; +#endif +} Block_t; + +typedef UWord FreeBlkFtr_t; /* Footer of a free block */ typedef struct { UWord giga_no; @@ -381,8 +417,6 @@ struct Allctr_t_ { #endif /* */ - Uint mbc_header_size; - Uint sbc_header_size; Uint min_mbc_size; Uint min_mbc_first_free_size; Uint min_block_size; @@ -469,6 +503,9 @@ void erts_alcu_verify_unused_ts(Allctr_t *allctr); unsigned long erts_alcu_test(unsigned long, unsigned long, unsigned long); +#ifdef DEBUG +int is_sbc_blk(Block_t*); +#endif #endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL) diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c index 5bdb752d3a..86b4696d8f 100644 --- a/erts/emulator/beam/erl_ao_firstfit_alloc.c +++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c @@ -91,6 +91,7 @@ struct AOFF_RBTree_t_ { AOFF_RBTree_t *right; Uint max_sz; /* of all blocks in this sub-tree */ }; +#define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr) #ifdef HARD_DEBUG static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint); @@ -102,7 +103,7 @@ static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint); */ static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x) { - Uint sz = BLK_SZ(x); + Uint sz = AOFF_BLK_SZ(x); if (x->left && x->left->max_sz > sz) { sz = x->left->max_sz; } @@ -183,7 +184,6 @@ erts_aoffalc_start(AOFFAllctr_t *alc, sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t)); - allctr->mbc_header_size = sizeof(Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; allctr->min_block_size = sizeof(AOFF_RBTree_t); @@ -587,7 +587,7 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block; AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC) ? &alc->sbmbc_root : &alc->mbc_root); - Uint blk_sz = BLK_SZ(blk); + Uint blk_sz = AOFF_BLK_SZ(blk); #ifdef HARD_DEBUG check_tree(*root, 0); @@ -659,7 +659,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size, if (x->left && x->left->max_sz >= size) { x = x->left; } - else if (BLK_SZ(x) >= size) { + else if (AOFF_BLK_SZ(x) >= size) { blk = x; break; } @@ -910,12 +910,12 @@ check_tree(AOFF_RBTree_t* root, Uint size) ASSERT(x->right > x); ASSERT(x->right->max_sz <= x->max_sz); } - ASSERT(x->max_sz >= BLK_SZ(x)); - ASSERT(x->max_sz == BLK_SZ(x) + ASSERT(x->max_sz >= AOFF_BLK_SZ(x)); + ASSERT(x->max_sz == AOFF_BLK_SZ(x) || x->max_sz == (x->left ? x->left->max_sz : 0) || x->max_sz == (x->right ? x->right->max_sz : 0)); - if (size && BLK_SZ(x) >= size) { + if (size && AOFF_BLK_SZ(x) >= size) { if (!res || x < res) { res = x; } @@ -956,7 +956,7 @@ print_tree_aux(AOFF_RBTree_t *x, int indent) } fprintf(stderr, "%s: sz=%lu addr=0x%lx max_size=%lu\r\n", IS_BLACK(x) ? "BLACK" : "RED", - BLK_SZ(x), (Uint)x, x->max_sz); + AOFF_BLK_SZ(x), (Uint)x, x->max_sz); print_tree_aux(x->left, indent + INDENT_STEP); } } diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c index c5f432bea1..f2ca193ace 100644 --- a/erts/emulator/beam/erl_async.c +++ b/erts/emulator/beam/erl_async.c @@ -382,10 +382,15 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, static ERTS_INLINE void call_async_ready(ErtsAsync *a) { +#if ERTS_USE_ASYNC_READY_Q Port *p = erts_id2port_sflgs(a->port, NULL, 0, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); +#else + Port *p = erts_thr_id2port_sflgs(a->port, + ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); +#endif if (!p) { if (a->async_free) a->async_free(a->async_data); @@ -395,7 +400,11 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a) if (a->async_free) a->async_free(a->async_data); } +#if ERTS_USE_ASYNC_READY_Q erts_port_release(p); +#else + erts_thr_port_release(p); +#endif } if (a->pdl) driver_pdl_dec_refc(a->pdl); @@ -603,7 +612,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key, sched_id = 1; #endif - prt = erts_drvport2port(ix); + prt = erts_drvport2port(ix, NULL); if (!prt) return -1; @@ -615,7 +624,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key, a->sched_id = sched_id; #endif a->hndl = (DE_Handle*)prt->drv_ptr->handle; - a->port = prt->id; + a->port = prt->common.id; a->pdl = NULL; a->async_data = async_data; a->async_invoke = async_invoke; diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c index c50fdeb4e8..743cbd93c9 100644 --- a/erts/emulator/beam/erl_bestfit_alloc.c +++ b/erts/emulator/beam/erl_bestfit_alloc.c @@ -73,6 +73,8 @@ #define SET_RED(N) (((RBTree_t *) (N))->flags |= RED_FLG) #define SET_BLACK(N) (((RBTree_t *) (N))->flags &= ~RED_FLG) +#define BF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr) + #undef ASSERT #define ASSERT ASSERT_EXPR @@ -177,7 +179,6 @@ erts_bfalc_start(BFAllctr_t *bfallctr, bfallctr->address_order = bfinit->ao; - allctr->mbc_header_size = sizeof(Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; allctr->min_block_size = (bfinit->ao @@ -592,7 +593,7 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) ? &bfallctr->sbmbc_root : &bfallctr->mbc_root); RBTree_t *blk = (RBTree_t *) block; - Uint blk_sz = BLK_SZ(blk); + Uint blk_sz = BF_BLK_SZ(blk); @@ -610,7 +611,7 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) while (1) { Uint size; - size = BLK_SZ(x); + size = BF_BLK_SZ(x); if (blk_sz < size || (blk_sz == size && blk < x)) { if (!x->left) { @@ -668,7 +669,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size, ASSERT(!cand_blk || cand_size >= size); while (x) { - blk_sz = BLK_SZ(x); + blk_sz = BF_BLK_SZ(x); if (blk_sz < size) { x = x->right; } @@ -686,7 +687,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size, #endif if (cand_blk) { - blk_sz = BLK_SZ(blk); + blk_sz = BF_BLK_SZ(blk); if (cand_size < blk_sz) return NULL; /* cand_blk was better */ if (cand_size == blk_sz && ((void *) cand_blk) < ((void *) blk)) @@ -711,7 +712,7 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) ? &bfallctr->sbmbc_root : &bfallctr->mbc_root); RBTree_t *blk = (RBTree_t *) block; - Uint blk_sz = BLK_SZ(blk); + Uint blk_sz = BF_BLK_SZ(blk); SET_TREE_NODE(blk); @@ -730,7 +731,7 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) while (1) { Uint size; - size = BLK_SZ(x); + size = BF_BLK_SZ(x); if (blk_sz == size) { @@ -796,7 +797,7 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) else if (LIST_NEXT(x)) { /* Replace tree node by next element in list... */ - ASSERT(BLK_SZ(LIST_NEXT(x)) == BLK_SZ(x)); + ASSERT(BF_BLK_SZ(LIST_NEXT(x)) == BF_BLK_SZ(x)); ASSERT(IS_TREE_NODE(x)); ASSERT(IS_LIST_ELEM(LIST_NEXT(x))); @@ -834,7 +835,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size, ASSERT(!cand_blk || cand_size >= size); while (x) { - blk_sz = BLK_SZ(x); + blk_sz = BF_BLK_SZ(x); if (blk_sz < size) { x = x->right; } @@ -855,11 +856,11 @@ bf_get_free_block(Allctr_t *allctr, Uint size, #ifdef HARD_DEBUG { RBTree_t *ct_blk = check_tree(root, 0, size); - ASSERT(BLK_SZ(ct_blk) == BLK_SZ(blk)); + ASSERT(BF_BLK_SZ(ct_blk) == BF_BLK_SZ(blk)); } #endif - if (cand_blk && cand_size <= BLK_SZ(blk)) + if (cand_blk && cand_size <= BF_BLK_SZ(blk)) return NULL; /* cand_blk was better */ /* Use next block if it exist in order to avoid replacing @@ -1093,36 +1094,36 @@ check_tree(RBTree_t *root, int ao, Uint size) if (x->left) { ASSERT(x->left->parent == x); if (ao) { - ASSERT(BLK_SZ(x->left) < BLK_SZ(x) - || (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x)); + ASSERT(BF_BLK_SZ(x->left) < BF_BLK_SZ(x) + || (BF_BLK_SZ(x->left) == BF_BLK_SZ(x) && x->left < x)); } else { ASSERT(IS_TREE_NODE(x->left)); - ASSERT(BLK_SZ(x->left) < BLK_SZ(x)); + ASSERT(BF_BLK_SZ(x->left) < BF_BLK_SZ(x)); } } if (x->right) { ASSERT(x->right->parent == x); if (ao) { - ASSERT(BLK_SZ(x->right) > BLK_SZ(x) - || (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x)); + ASSERT(BF_BLK_SZ(x->right) > BF_BLK_SZ(x) + || (BF_BLK_SZ(x->right) == BF_BLK_SZ(x) && x->right > x)); } else { ASSERT(IS_TREE_NODE(x->right)); - ASSERT(BLK_SZ(x->right) > BLK_SZ(x)); + ASSERT(BF_BLK_SZ(x->right) > BF_BLK_SZ(x)); } } - if (size && BLK_SZ(x) >= size) { + if (size && BF_BLK_SZ(x) >= size) { if (ao) { if (!res - || BLK_SZ(x) < BLK_SZ(res) - || (BLK_SZ(x) == BLK_SZ(res) && x < res)) + || BF_BLK_SZ(x) < BF_BLK_SZ(res) + || (BF_BLK_SZ(x) == BF_BLK_SZ(res) && x < res)) res = x; } else { - if (!res || BLK_SZ(x) < BLK_SZ(res)) + if (!res || BF_BLK_SZ(x) < BF_BLK_SZ(res)) res = x; } } @@ -1168,7 +1169,7 @@ print_tree_aux(RBTree_t *x, int indent) } fprintf(stderr, "%s: sz=%lu addr=0x%lx\r\n", IS_BLACK(x) ? "BLACK" : "RED", - BLK_SZ(x), + BF_BLK_SZ(x), (Uint) x); print_tree_aux(x->left, indent + INDENT_STEP); } diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index 59a53870f3..889fefacfc 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -104,16 +104,49 @@ static void dereference_all_processes(DE_Handle *dh); static void restore_process_references(DE_Handle *dh); static void ddll_no_more_references(void *vdh); -#define lock_drv_list() erts_smp_mtx_lock(&erts_driver_list_lock) -#define unlock_drv_list() erts_smp_mtx_unlock(&erts_driver_list_lock) +#define lock_drv_list() erts_smp_rwmtx_rwlock(&erts_driver_list_lock) +#define unlock_drv_list() erts_smp_rwmtx_rwunlock(&erts_driver_list_lock) #define assert_drv_list_locked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&erts_driver_list_lock)) + ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ + || erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) +#define assert_drv_list_rwlocked() \ + ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)) +#define assert_drv_list_rlocked() \ + ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define assert_drv_list_not_locked() \ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_mtx_is_locked(&erts_driver_list_lock)) + ERTS_SMP_LC_ASSERT(!erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ + && !erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING)) +static void +kill_ports_driver_unloaded(DE_Handle *dh) +{ + int ix, max = erts_ptab_max(&erts_port); + + for (ix = 0; ix < max; ix++) { + erts_aint32_t state; + Port* prt = erts_pix2port(ix); + if (!prt) + continue; + + ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + + state = erts_atomic32_read_nob(&prt->state); + if (state & FREE_PORT_FLAGS) + continue; + + erts_smp_port_lock(prt); + + state = erts_atomic32_read_nob(&prt->state); + if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh) + driver_failure_atom((ErlDrvPort) prt, "driver_unloaded"); + + erts_port_release(prt); + } +} + /* * try_load(Path, Name, OptionList) -> {ok,Status} | * {ok, PendingStatus, Ref} | @@ -149,7 +182,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) Eterm name_term = BIF_ARG_2; Eterm options = BIF_ARG_3; char *path = NULL; - Uint path_len; + ErlDrvSizeT path_len; char *name = NULL; DE_Handle *dh; erts_driver_t *drv; @@ -228,7 +261,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) goto error; } path = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, path_len + 1 /* might need path separator */ + sys_strlen(name) + 1); - if (io_list_to_buf(path_term, path, path_len) != 0) { + if (erts_iolist_to_buf(path_term, path, path_len) != 0) { goto error; } while (path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/')) { @@ -356,40 +389,16 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) ok_term = mkatom("loaded"); } } - assert_drv_list_locked(); + assert_drv_list_rwlocked(); if (kill_ports) { - int j; - /* Avoid closing the driver by referencing it */ + /* Avoid closing the driver by referencing it */ erts_ddll_reference_driver(dh); ASSERT(dh->status == ERL_DE_RELOAD); dh->status = ERL_DE_FORCE_RELOAD; #if DDLL_SMP unlock_drv_list(); #endif - for (j = 0; j < erts_max_ports; j++) { - Port* prt = &erts_port[j]; - erts_smp_port_state_lock(prt); - if (!(prt->status & FREE_PORT_FLAGS) && - prt->drv_ptr->handle == dh) { - erts_smp_atomic_inc_nob(&prt->refc); -#if DDLL_SMP - /* Extremely rare spinlock */ - while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { - erts_smp_port_state_unlock(prt); - erts_smp_port_state_lock(prt); - } - erts_smp_port_state_unlock(prt); - erts_smp_mtx_lock(prt->lock); - if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) { - driver_failure_atom(j, "driver_unloaded"); - } -#else - driver_failure_atom(j, "driver_unloaded"); -#endif - erts_port_release(prt); - } - else erts_smp_port_state_unlock(prt); - } + kill_ports_driver_unloaded(dh); /* Dereference, eventually causing driver destruction */ #if DDLL_SMP lock_drv_list(); @@ -579,45 +588,21 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) dh->reload_full_path = dh->reload_driver_name = NULL; dh->reload_flags = 0; } - if (dh->port_count > 0) { + if (erts_smp_atomic32_read_nob(&dh->port_count) > 0) { ++kill_ports; } dh->status = ERL_DE_UNLOAD; ok_term = am_pending_driver; done: - assert_drv_list_locked(); + assert_drv_list_rwlocked(); if (kill_ports > 1) { - int j; /* Avoid closing the driver by referencing it */ erts_ddll_reference_driver(dh); dh->status = ERL_DE_FORCE_UNLOAD; #if DDLL_SMP unlock_drv_list(); #endif - for (j = 0; j < erts_max_ports; j++) { - Port* prt = &erts_port[j]; - erts_smp_port_state_lock(prt); - if (!(prt->status & FREE_PORT_FLAGS) - && prt->drv_ptr->handle == dh) { - erts_smp_atomic_inc_nob(&prt->refc); -#if DDLL_SMP - /* Extremely rare spinlock */ - while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { - erts_smp_port_state_unlock(prt); - erts_smp_port_state_lock(prt); - } - erts_smp_port_state_unlock(prt); - erts_smp_mtx_lock(prt->lock); - if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) { - driver_failure_atom(j, "driver_unloaded"); - } -#else - driver_failure_atom(j, "driver_unloaded"); -#endif - erts_port_release(prt); - } - else erts_smp_port_state_unlock(prt); - } + kill_ports_driver_unloaded(dh); #if DDLL_SMP lock_drv_list(); #endif @@ -787,7 +772,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) } else if (drv->handle->status == ERL_DE_PERMANENT) { res = am_permanent; } else { - res = make_small(drv->handle->port_count); + res = make_small(erts_smp_atomic32_read_nob(&drv->handle->port_count)); } goto done; case am_linked_in_driver: @@ -1045,38 +1030,16 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) } dh->status = ERL_DE_UNLOAD; } - if (!left && drv->handle->port_count > 0) { + if (!left + && erts_smp_atomic32_read_nob(&drv->handle->port_count) > 0) { if (kill_ports) { - int j; DE_Handle *dh = drv->handle; erts_ddll_reference_driver(dh); dh->status = ERL_DE_FORCE_UNLOAD; #if DDLL_SMP unlock_drv_list(); #endif - for (j = 0; j < erts_max_ports; j++) { - Port* prt = &erts_port[j]; - erts_smp_port_state_lock(prt); - if (!(prt->status & FREE_PORT_FLAGS) && - prt->drv_ptr->handle == dh) { - erts_smp_atomic_inc_nob(&prt->refc); -#if DDLL_SMP - while(prt->status & ERTS_PORT_SFLG_INITIALIZING) { - erts_smp_port_state_unlock(prt); - erts_smp_port_state_lock(prt); - } - erts_smp_port_state_unlock(prt); - erts_smp_mtx_lock(prt->lock); - if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) { - driver_failure_atom(j, "driver_unloaded"); - } -#else - driver_failure_atom(j, "driver_unloaded"); -#endif - erts_port_release(prt); - } - else erts_smp_port_state_unlock(prt); - } + kill_ports_driver_unloaded(dh); #if DDLL_SMP lock_drv_list(); /* Needed for future list operations */ #endif @@ -1098,7 +1061,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) void erts_ddll_lock_driver(DE_Handle *dh, char *name) { DE_ProcEntry *p,*q; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); notify_all(dh, name, ERL_DE_PROC_AWAIT_LOAD, am_UP, am_permanent); notify_all(dh, name, @@ -1121,19 +1084,22 @@ void erts_ddll_lock_driver(DE_Handle *dh, char *name) void erts_ddll_increment_port_count(DE_Handle *dh) { assert_drv_list_locked(); - dh->port_count++; + erts_smp_atomic32_inc_nob(&dh->port_count); } void erts_ddll_decrement_port_count(DE_Handle *dh) { assert_drv_list_locked(); - ASSERT(dh->port_count > 0); - dh->port_count--; +#if DEBUG + ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0); +#else + erts_smp_atomic32_dec_nob(&dh->port_count); +#endif } static void first_ddll_reference(DE_Handle *dh) { - assert_drv_list_locked(); + assert_drv_list_rwlocked(); erts_refc_init(&(dh->refc),1); } @@ -1161,7 +1127,7 @@ void erts_ddll_dereference_driver(DE_Handle *dh) static void dereference_all_processes(DE_Handle *dh) { DE_ProcEntry *p; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); for(p = dh->procs;p != NULL; p = p->next) { if (p->awaiting_status == ERL_DE_PROC_LOADED) { ASSERT(!(p->flags & ERL_DE_FL_DEREFERENCED)); @@ -1174,7 +1140,7 @@ static void dereference_all_processes(DE_Handle *dh) static void restore_process_references(DE_Handle *dh) { DE_ProcEntry *p; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); ASSERT(erts_refc_read(&(dh->refc),0) == 0); for(p = dh->procs;p != NULL; p = p->next) { if (p->awaiting_status == ERL_DE_PROC_LOADED) { @@ -1402,7 +1368,7 @@ static int is_last_user(DE_Handle *dh, Process *proc) { DE_ProcEntry *p = dh->procs; int found = 0; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); while (p != NULL) { if (p->proc == proc && p->awaiting_status == ERL_DE_PROC_LOADED) { @@ -1423,7 +1389,7 @@ static DE_ProcEntry *find_proc_entry(DE_Handle *dh, Process *proc, Uint status) { DE_ProcEntry *p = dh->procs; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); while (p != NULL) { if (p->proc == proc && p->awaiting_status == status) { @@ -1450,7 +1416,7 @@ static int num_procs(DE_Handle *dh, Uint status) { DE_ProcEntry *p = dh->procs; int i = 0; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); while (p != NULL) { if (p->awaiting_status == status) { @@ -1465,7 +1431,7 @@ static int num_entries(DE_Handle *dh, Process *proc, Uint status) { DE_ProcEntry *p = dh->procs; int i = 0; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); while (p != NULL) { if (p->awaiting_status == status && p->proc == proc) { ++i; @@ -1478,7 +1444,7 @@ static int num_entries(DE_Handle *dh, Process *proc, Uint status) { static void add_proc_loaded(DE_Handle *dh, Process *proc) { DE_ProcEntry *p; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry)); p->proc = proc; p->flags = 0; @@ -1490,7 +1456,7 @@ static void add_proc_loaded(DE_Handle *dh, Process *proc) static void add_proc_loaded_deref(DE_Handle *dh, Process *proc) { DE_ProcEntry *p; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry)); p->proc = proc; p->awaiting_status = ERL_DE_PROC_LOADED; @@ -1510,7 +1476,7 @@ static void add_proc_waiting(DE_Handle *dh, Process *proc, Uint status, Eterm ref) { DE_ProcEntry *p; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry)); p->proc = proc; p->flags = 0; @@ -1524,7 +1490,7 @@ static Eterm add_monitor(Process *p, DE_Handle *dh, Uint status) { Eterm r; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); r = erts_make_ref(p); add_proc_waiting(dh, p, status, r); return r; @@ -1535,7 +1501,7 @@ static void set_driver_reloading(DE_Handle *dh, Process *proc, char *path, char { DE_ProcEntry *p; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry)); p->proc = proc; p->awaiting_status = ERL_DE_OK; @@ -1556,7 +1522,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name) int res; ErlDrvEntry *dp; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); if ((res = erts_sys_ddll_open(path, &(dh->handle))) != ERL_DE_NO_ERROR) { return res; @@ -1594,7 +1560,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name) goto error; } erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0); - dh->port_count = 0; + erts_smp_atomic32_init_nob(&dh->port_count, 0); dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1); sys_strcpy(dh->full_path, path); dh->flags = 0; @@ -1620,7 +1586,7 @@ static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name) { erts_driver_t *q, *p = driver_list; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); while (p != NULL) { if (p->handle == dh) { @@ -1660,11 +1626,11 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name) int res; DE_Handle *dh = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sizeof(DE_Handle)); - assert_drv_list_locked(); + assert_drv_list_rwlocked(); dh->handle = NULL; dh->procs = NULL; - dh->port_count = 0; + erts_smp_atomic32_init_nob(&dh->port_count, 0); erts_refc_init(&(dh->refc), (erts_aint_t) 0); dh->status = -1; dh->reload_full_path = NULL; @@ -1698,7 +1664,7 @@ static int reload_driver_entry(DE_Handle *dh) int loadres; Uint flags = dh->reload_flags; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); dh->reload_full_path = NULL; dh->reload_driver_name = NULL; @@ -1736,7 +1702,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type, ErtsProcLocks rp_locks = 0; ERTS_SMP_CHK_NO_PROC_LOCKS; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); if (errcode != 0) { int need = load_error_need(errcode); Eterm e; @@ -1769,7 +1735,7 @@ static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Ete { DE_ProcEntry **p; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); p = &(dh->procs); while (*p != NULL) { @@ -1878,7 +1844,7 @@ static Eterm mkatom(char *str) static char *pick_list_or_atom(Eterm name_term) { char *name = NULL; - Uint name_len; + ErlDrvSizeT name_len; if (is_atom(name_term)) { Atom *ap = atom_tab(atom_val(name_term)); if (ap->len == 0) { @@ -1894,7 +1860,7 @@ static char *pick_list_or_atom(Eterm name_term) goto error; } name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, name_len + 1); - if (io_list_to_buf(name_term, name, name_len) != 0) { + if (erts_iolist_to_buf(name_term, name, name_len) != 0) { goto error; } name[name_len] = '\0'; @@ -1915,10 +1881,10 @@ static int build_proc_info(DE_Handle *dh, ProcEntryInfo **out_pei, Uint filter) int i; DE_ProcEntry *pe; - assert_drv_list_locked(); + assert_drv_list_rwlocked(); for (pe = dh->procs; pe != NULL; pe = pe->next) { - Eterm id = pe->proc->id; + Eterm id = pe->proc->common.id; Uint stat = pe->awaiting_status; if (stat == ERL_DE_PROC_AWAIT_UNLOAD_ONLY) { stat = ERL_DE_PROC_AWAIT_UNLOAD; diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index c910bd0cb6..b8026063e6 100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -40,6 +40,8 @@ #include "erl_cpu_topology.h" #include "erl_async.h" #include "erl_thr_progress.h" +#define ERTS_PTAB_WANT_DEBUG_FUNCS__ +#include "erl_ptab.h" #ifdef HIPE #include "hipe_arch.h" #endif @@ -128,8 +130,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE static Eterm os_type_tuple; static Eterm os_version_tuple; -static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item); - static Eterm current_function(Process* p, Process* rp, Eterm** hpp, int full_info); static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp); @@ -873,8 +873,7 @@ BIF_RETTYPE process_info_1(BIF_ALIST_1) && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) BIF_RET(am_undefined); - if (is_not_internal_pid(BIF_ARG_1) - || internal_pid_index(BIF_ARG_1) >= erts_max_processes) { + if (is_not_internal_pid(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } @@ -909,8 +908,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2) && external_pid_dist_entry(pid) == erts_this_dist_entry) BIF_RET(am_undefined); - if (is_not_internal_pid(pid) - || internal_pid_index(BIF_ARG_1) >= erts_max_processes) { + if (is_not_internal_pid(pid)) { BIF_ERROR(BIF_P, BADARG); } @@ -1002,9 +1000,9 @@ process_info_aux(Process *BIF_P, switch (item) { case am_registered_name: - if (rp->reg != NULL) { + if (rp->common.u.alive.reg) { hp = HAlloc(BIF_P, 3); - res = rp->reg->name; + res = rp->common.u.alive.reg->name; } else { if (always_wrap) { hp = HAlloc(BIF_P, 3); @@ -1050,7 +1048,7 @@ process_info_aux(Process *BIF_P, ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); n = rp->msg.len; - if (n == 0 || rp->trace_flags & F_SENSITIVE) { + if (n == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) { hp = HAlloc(BIF_P, 3); } else { int remove_bad_messages = 0; @@ -1209,7 +1207,7 @@ process_info_aux(Process *BIF_P, INIT_MONITOR_INFOS(mic); - erts_doforall_links(rp->nlinks,&collect_one_link,&mic); + erts_doforall_links(ERTS_P_LINKS(rp),&collect_one_link,&mic); hp = HAlloc(BIF_P, 3 + mic.sz); res = NIL; @@ -1227,7 +1225,7 @@ process_info_aux(Process *BIF_P, int i; INIT_MONITOR_INFOS(mic); - erts_doforall_monitors(rp->monitors,&collect_one_origin_monitor,&mic); + erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_origin_monitor,&mic); hp = HAlloc(BIF_P, 3 + mic.sz); res = NIL; for (i = 0; i < mic.mi_i; i++) { @@ -1264,7 +1262,7 @@ process_info_aux(Process *BIF_P, Eterm item; INIT_MONITOR_INFOS(mic); - erts_doforall_monitors(rp->monitors,&collect_one_target_monitor,&mic); + erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_target_monitor,&mic); hp = HAlloc(BIF_P, 3 + mic.sz); res = NIL; @@ -1330,7 +1328,7 @@ process_info_aux(Process *BIF_P, } case am_dictionary: - if (rp->trace_flags & F_SENSITIVE) { + if (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) { res = NIL; } else { res = erts_dictionary_copy(BIF_P, rp->dictionary); @@ -1426,8 +1424,8 @@ process_info_aux(Process *BIF_P, ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); - erts_doforall_links(rp->nlinks, &one_link_size, &size); - erts_doforall_monitors(rp->monitors, &one_mon_size, &size); + erts_doforall_links(ERTS_P_LINKS(rp), &one_link_size, &size); + erts_doforall_monitors(ERTS_P_MONITORS(rp), &one_mon_size, &size); size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm); if (rp->old_hend && rp->old_heap) size += (rp->old_hend - rp->old_heap) * sizeof(Eterm); @@ -1500,7 +1498,7 @@ process_info_aux(Process *BIF_P, case am_trace: hp = HAlloc(BIF_P, 3); - res = make_small(rp->trace_flags & TRACEE_FLAGS); + res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS); break; case am_binary: { @@ -1605,7 +1603,7 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info) } } - if (BIF_P->id == rp->id) { + if (BIF_P == rp) { FunctionInfo fi2; /* @@ -1837,17 +1835,17 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */ # define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF_XML # endif #endif - Uint buf_size = 8*1024; /* Try with 8KB first */ + ErlDrvSizeT buf_size = 8*1024; /* Try with 8KB first */ char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size); - int r = io_list_to_buf(*tp, (char*) buf, buf_size - 1); - if (r < 0) { + ErlDrvSizeT r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1); + if (ERTS_IOLIST_TO_BUF_FAILED(r)) { erts_free(ERTS_ALC_T_TMP, (void *) buf); if (erts_iolist_size(*tp, &buf_size)) { goto badarg; } buf_size++; buf = erts_alloc(ERTS_ALC_T_TMP, buf_size); - r = io_list_to_buf(*tp, (char*) buf, buf_size - 1); + r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1); ASSERT(r == buf_size - 1); } buf[buf_size - 1 - r] = '\0'; @@ -2159,9 +2157,13 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE)); BIF_RET(res); } else if (BIF_ARG_1 == am_process_count) { - BIF_RET(make_small(erts_process_count())); + BIF_RET(make_small(erts_ptab_count(&erts_proc))); } else if (BIF_ARG_1 == am_process_limit) { - BIF_RET(make_small(erts_max_processes)); + BIF_RET(make_small(erts_ptab_max(&erts_proc))); + } else if (BIF_ARG_1 == am_port_count) { + BIF_RET(make_small(erts_ptab_count(&erts_port))); + } else if (BIF_ARG_1 == am_port_limit) { + BIF_RET(make_small(erts_ptab_max(&erts_port))); } else if (BIF_ARG_1 == am_info || BIF_ARG_1 == am_procs || BIF_ARG_1 == am_loaded @@ -2536,6 +2538,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) { res = make_small(erts_no_run_queues); BIF_RET(res); + } else if (ERTS_IS_ATOM_STR("port_parallelism", BIF_ARG_1)) { + res = erts_port_parallelism ? am_true : am_false; + BIF_RET(res); } else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) { Eterm *hp = NULL; Uint sz = 0; @@ -2703,66 +2708,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); } -BIF_RETTYPE -port_info_1(BIF_ALIST_1) -{ - Process* p = BIF_P; - Eterm pid = BIF_ARG_1; - static Eterm keys[] = { - am_name, - am_links, - am_id, - am_connected, - am_input, - am_output, - am_os_pid - }; - Eterm items[ASIZE(keys)]; - Eterm result = NIL; - Eterm reg_name; - Eterm* hp; - Uint need; - int i; - - /* - * Collect all information about the port. - */ - - for (i = 0; i < ASIZE(keys); i++) { - Eterm item; - - item = port_info(p, pid, keys[i]); - if (is_non_value(item)) { - return THE_NON_VALUE; - } - if (item == am_undefined) { - return am_undefined; - } - items[i] = item; - } - reg_name = port_info(p, pid, am_registered_name); - - /* - * Build the resulting list. - */ - - need = 2*ASIZE(keys); - if (is_tuple(reg_name)) { - need += 2; - } - hp = HAlloc(p, need); - for (i = ASIZE(keys) - 1; i >= 0; i--) { - result = CONS(hp, items[i], result); - hp += 2; - } - if (is_tuple(reg_name)) { - result = CONS(hp, reg_name, result); - } - - return result; -} - - /**********************************************************************/ /* Return information on ports */ /* Info: @@ -2775,38 +2720,20 @@ port_info_1(BIF_ALIST_1) ** os_pid The child's process ID */ -BIF_RETTYPE port_info_2(BIF_ALIST_2) -{ - return port_info(BIF_P, BIF_ARG_1, BIF_ARG_2); -} - -static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item) +Eterm +erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm item) { - BIF_RETTYPE ret; - Port *prt; - Eterm res; - Eterm* hp; - int count; - - if (is_internal_port(portid)) - prt = erts_id2port(portid, p, ERTS_PROC_LOCK_MAIN); - else if (is_atom(portid)) - erts_whereis_name(p, ERTS_PROC_LOCK_MAIN, - portid, NULL, 0, 0, &prt); - else if (is_external_port(portid) - && external_port_dist_entry(portid) == erts_this_dist_entry) - BIF_RET(am_undefined); - else { - BIF_ERROR(p, BADARG); - } + Eterm res = THE_NON_VALUE; - if (!prt) { - BIF_RET(am_undefined); - } + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); if (item == am_id) { - hp = HAlloc(p, 3); - res = make_small(internal_port_number(portid)); + if (hpp) + res = make_small(internal_port_index(prt->common.id)); + if (szp) { + res = am_true; + goto done; + } } else if (item == am_links) { MonitorInfoCollection mic; @@ -2815,17 +2742,26 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item) INIT_MONITOR_INFOS(mic); - erts_doforall_links(prt->nlinks, &collect_one_link, &mic); + erts_doforall_links(ERTS_P_LINKS(prt), &collect_one_link, &mic); - hp = HAlloc(p, 3 + mic.sz); - res = NIL; - for (i = 0; i < mic.mi_i; i++) { - item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity); - res = CONS(hp, item, res); - hp += 2; + if (szp) + *szp += mic.sz; + + if (hpp) { + res = NIL; + for (i = 0; i < mic.mi_i; i++) { + item = STORE_NC(hpp, ohp, mic.mi[i].entity); + res = CONS(*hpp, item, res); + *hpp += 2; + } } + DESTROY_MONITOR_INFOS(mic); + if (szp) { + res = am_true; + goto done; + } } else if (item == am_monitors) { MonitorInfoCollection mic; @@ -2834,79 +2770,96 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item) INIT_MONITOR_INFOS(mic); - erts_doforall_monitors(prt->monitors, &collect_one_origin_monitor, &mic); + erts_doforall_monitors(ERTS_P_MONITORS(prt), &collect_one_origin_monitor, &mic); - hp = HAlloc(p, 3 + mic.sz); - res = NIL; - for (i = 0; i < mic.mi_i; i++) { - Eterm t; - item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity); - t = TUPLE2(hp, am_process, item); - hp += 3; - res = CONS(hp, t, res); - hp += 2; + if (szp) + *szp += mic.sz; + + if (hpp) { + res = NIL; + for (i = 0; i < mic.mi_i; i++) { + Eterm t; + item = STORE_NC(hpp, ohp, mic.mi[i].entity); + t = TUPLE2(*hpp, am_process, item); + *hpp += 3; + res = CONS(*hpp, t, res); + *hpp += 2; + } } + DESTROY_MONITOR_INFOS(mic); + if (szp) { + res = am_true; + goto done; + } } else if (item == am_name) { - count = sys_strlen(prt->name); + int count = sys_strlen(prt->name); + + if (hpp) + res = buf_to_intlist(hpp, prt->name, count, NIL); - hp = HAlloc(p, 3 + 2*count); - res = buf_to_intlist(&hp, prt->name, count, NIL); + if (szp) { + *szp += 2*count; + res = am_true; + goto done; + } } else if (item == am_connected) { - hp = HAlloc(p, 3); - res = prt->connected; /* internal pid */ + if (hpp) + res = ERTS_PORT_GET_CONNECTED(prt); /* internal pid */ + if (szp) { + res = am_true; + goto done; + } } else if (item == am_input) { - Uint hsz = 3; - Uint n = prt->bytes_in; - (void) erts_bld_uint(NULL, &hsz, n); - hp = HAlloc(p, hsz); - res = erts_bld_uint(&hp, NULL, n); + res = erts_bld_uint(hpp, szp, prt->bytes_in); + if (szp) { + res = am_true; + goto done; + } } else if (item == am_output) { - Uint hsz = 3; - Uint n = prt->bytes_out; - (void) erts_bld_uint(NULL, &hsz, n); - hp = HAlloc(p, hsz); - res = erts_bld_uint(&hp, NULL, n); + res = erts_bld_uint(hpp, szp, prt->bytes_out); + if (szp) { + res = am_true; + goto done; + } } else if (item == am_os_pid) { - if (prt->os_pid >= 0) { - Uint hsz = 3; - UWord n = prt->os_pid; - (void) erts_bld_uword(NULL, &hsz, n); - hp = HAlloc(p, hsz); - res = erts_bld_uword(&hp, NULL, n); - } else { - hp = HAlloc(p, 3); - res = am_undefined; - } + res = (prt->os_pid < 0 + ? am_undefined + : erts_bld_uword(hpp, szp, (UWord) prt->os_pid)); + if (szp) { + res = am_true; + goto done; + } } else if (item == am_registered_name) { - RegProc *reg; - reg = prt->reg; - if (reg == NULL) { - ERTS_BIF_PREP_RET(ret, NIL); - goto done; - } else { - hp = HAlloc(p, 3); + RegProc *reg = prt->common.u.alive.reg; + if (reg) { res = reg->name; + if (szp) { + res = am_true; + goto done; + } + } + else { + if (szp) + return am_undefined; + return NIL; } } else if (item == am_memory) { /* All memory consumed in bytes (the Port struct should not be included though). */ - Uint hsz = 3; Uint size = 0; ErlHeapFragment* bp; - hp = HAlloc(p, 3); - - erts_doforall_links(prt->nlinks, &one_link_size, &size); + erts_doforall_links(ERTS_P_LINKS(prt), &one_link_size, &size); for (bp = prt->bp; bp; bp = bp->next) size += sizeof(ErlHeapFragment) + (bp->alloc_size - 1)*sizeof(Eterm); @@ -2920,51 +2873,72 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item) /* All memory allocated by the driver should be included, but it is hard to retrieve... */ - (void) erts_bld_uint(NULL, &hsz, size); - hp = HAlloc(p, hsz); - res = erts_bld_uint(&hp, NULL, size); + res = erts_bld_uint(hpp, szp, size); + if (szp) { + res = am_true; + goto done; + } } else if (item == am_queue_size) { Uint ioq_size = erts_port_ioq_size(prt); - Uint hsz = 3; - (void) erts_bld_uint(NULL, &hsz, ioq_size); - hp = HAlloc(p, hsz); - res = erts_bld_uint(&hp, NULL, ioq_size); + res = erts_bld_uint(hpp, szp, ioq_size); + if (szp) { + res = am_true; + goto done; + } } else if (ERTS_IS_ATOM_STR("locking", item)) { - hp = HAlloc(p, 3); + if (hpp) { #ifndef ERTS_SMP - res = am_false; + res = am_false; #else - if (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) { - DECL_AM(port_level); - ASSERT(prt->drv_ptr->flags - & ERL_DRV_FLAG_USE_PORT_LOCKING); - res = AM_port_level; + if (erts_atomic32_read_nob(&prt->state) + & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) { + DECL_AM(port_level); + ASSERT(prt->drv_ptr->flags + & ERL_DRV_FLAG_USE_PORT_LOCKING); + res = AM_port_level; + } + else { + DECL_AM(driver_level); + ASSERT(!(prt->drv_ptr->flags + & ERL_DRV_FLAG_USE_PORT_LOCKING)); + res = AM_driver_level; + } +#endif } - else { - DECL_AM(driver_level); - ASSERT(!(prt->drv_ptr->flags - & ERL_DRV_FLAG_USE_PORT_LOCKING)); - res = AM_driver_level; + if (szp) { + res = am_true; + goto done; } -#endif + } + else if (item == am_parallelism) { + if (szp) { + res = am_true; + goto done; + } + res = ((ERTS_PTS_FLG_PARALLELISM & + erts_smp_atomic32_read_nob(&prt->sched.flags)) + ? am_true + : am_false); } else { - ERTS_BIF_PREP_ERROR(ret, p, BADARG); - goto done; + if (szp) + return am_false; + return THE_NON_VALUE; } - ERTS_BIF_PREP_RET(ret, TUPLE2(hp, item, res)); - - done: - - erts_smp_port_unlock(prt); +done: + if (szp) + *szp += 3; + if (hpp) { + res = TUPLE2(*hpp, item, res); + *hpp += 3; + } - return ret; + return res; } - BIF_RETTYPE fun_info_2(BIF_ALIST_2) { @@ -3096,12 +3070,9 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1) if(is_internal_pid(BIF_ARG_1)) { Process *rp; - if (BIF_ARG_1 == BIF_P->id) + if (BIF_ARG_1 == BIF_P->common.id) BIF_RET(am_true); - if(internal_pid_index(BIF_ARG_1) >= erts_max_processes) - BIF_ERROR(BIF_P, BADARG); - rp = erts_proc_lookup(BIF_ARG_1); if (!rp) { BIF_RET(am_false); @@ -3319,10 +3290,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) /* Used by node_container_SUITE (emulator) */ Eterm res; if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)) - res = erts_test_next_pid(0, 0); - else { - res = erts_test_next_port(0, 0); - } + res = erts_ptab_test_next_id(&erts_proc, 0, 0); + else + res = erts_ptab_test_next_id(&erts_port, 0, 0); if (res < 0) BIF_RET(am_false); BIF_RET(erts_make_integer(res, BIF_P)); @@ -3358,11 +3328,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) { /* Used by process_SUITE (emulator) */ - BIF_RET(erts_debug_processes(BIF_P)); + BIF_RET(erts_debug_ptab_list(BIF_P, &erts_proc)); } else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) { /* Used by process_SUITE (emulator) */ - BIF_RET(erts_debug_processes_bif_info(BIF_P)); + BIF_RET(erts_debug_ptab_list_bif_info(BIF_P, &erts_proc)); } else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) { /* Used by distribution_SUITE (emulator) */ @@ -3423,17 +3393,20 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); BIF_RET(am_undefined); } - res = make_link_list(BIF_P, p->nlinks, NIL); + res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL); erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); BIF_RET(res); } else if(is_internal_port(tp[2])) { Eterm res; - Port *p = erts_id2port(tp[2], BIF_P, ERTS_PROC_LOCK_MAIN); + Port *p = erts_id2port_sflgs(tp[2], + BIF_P, + ERTS_PROC_LOCK_MAIN, + ERTS_PORT_SFLGS_INVALID_LOOKUP); if(!p) BIF_RET(am_undefined); - res = make_link_list(BIF_P, p->nlinks, NIL); - erts_smp_port_unlock(p); + res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL); + erts_port_release(p); BIF_RET(res); } else if(is_node_name_atom(tp[2])) { @@ -3465,7 +3438,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); BIF_RET(am_undefined); } - res = make_monitor_list(BIF_P, p->monitors); + res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p)); erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); BIF_RET(res); } else if(is_node_name_atom(tp[2])) { @@ -3608,7 +3581,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on); if (on) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "Process %T ", BIF_P->id); + erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id); if (erts_is_alive) erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname); erts_dsprintf(dsbufp, @@ -3676,10 +3649,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Eterm res; if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1)) - res = erts_test_next_pid(1, next); - else { - res = erts_test_next_port(1, next); - } + res = erts_ptab_test_next_id(&erts_proc, 1, next); + else + res = erts_ptab_test_next_id(&erts_port, 1, next); if (res < 0) BIF_RET(am_false); BIF_RET(erts_make_integer(res, BIF_P)); @@ -3950,8 +3922,8 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) { /* use registered names as id's for process locks if available */ proc = erts_proc_lookup(lock->id); - if (proc && proc->reg) { - id = proc->reg->name; + if (proc && proc->common.u.alive.reg) { + id = proc->common.u.alive.reg->name; } else { /* otherwise use process id */ id = lock->id; diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index 4b270414cb..a4b837541b 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -42,28 +42,26 @@ #include "erl_bits.h" #include "dtrace-wrapper.h" -static int open_port(Process* p, Eterm name, Eterm settings, int *err_nump); +static Port *open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump); static byte* convert_environment(Process* p, Eterm env); static char **convert_args(Eterm); static void free_args(char **); char *erts_default_arg0 = "default"; -static BIF_RETTYPE -port_call(Process* p, Eterm arg1, Eterm arg2, Eterm arg3); - BIF_RETTYPE open_port_2(BIF_ALIST_2) { - int port_num; - Eterm port_val; + Port *port; + Eterm port_id; char *str; - int err_num; + int err_type, err_num; - if ((port_num = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_num)) < 0) { - if (port_num == -3) { + port = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_type, &err_num); + if (!port) { + if (err_type == -3) { ASSERT(err_num == BADARG || err_num == SYSTEM_LIMIT); BIF_ERROR(BIF_P, err_num); - } else if (port_num == -2) { + } else if (err_type == -2) { str = erl_errno_id(err_num); } else { str = "einval"; @@ -74,546 +72,408 @@ BIF_RETTYPE open_port_2(BIF_ALIST_2) erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); - port_val = erts_port[port_num].id; - erts_add_link(&(erts_port[port_num].nlinks), LINK_PID, BIF_P->id); - erts_add_link(&(BIF_P->nlinks), LINK_PID, port_val); + port_id = port->common.id; + erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id); + erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, port_id); erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); - erts_port_release(&erts_port[port_num]); + erts_port_release(port); - BIF_RET(port_val); + BIF_RET(port_id); } -/**************************************************************************** - - PORT BIFS: - - port_command/2 -- replace Port ! {..., {command, Data}} - port_command(Port, Data) -> true - when port(Port), io-list(Data) - - port_control/3 -- new port_control(Port, Ctl, Data) -> Reply - port_control(Port, Ctl, Data) -> Reply - where integer(Ctl), io-list(Data), io-list(Reply) - - port_close/1 -- replace Port ! {..., close} - port_close(Port) -> true - when port(Port) - - port_connect/2 -- replace Port ! {..., {connect, Pid}} - port_connect(Port, Pid) - when port(Port), pid(Pid) - - ***************************************************************************/ - -static Port* -id_or_name2port(Process *c_p, Eterm id) +static ERTS_INLINE Port * +lookup_port(Process *c_p, Eterm id_or_name) { - Port *port; - if (is_not_atom(id)) - port = erts_id2port(id, c_p, ERTS_PROC_LOCK_MAIN); + /* TODO: Implement nicer lookup in register... */ + Eterm id; + if (is_atom(id_or_name)) + id = erts_whereis_name_to_id(c_p, id_or_name); else - erts_whereis_name(c_p, ERTS_PROC_LOCK_MAIN, id, NULL, 0, 0, &port); - return port; + id = id_or_name; + return erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP); } -#define ERTS_PORT_COMMAND_FLAG_FORCE (((Uint32) 1) << 0) -#define ERTS_PORT_COMMAND_FLAG_NOSUSPEND (((Uint32) 1) << 1) +/* + * erts_internal:port_command/3 is used by the + * erlang:port_command/2 and erlang:port_command/3 + * BIFs. + */ -static BIF_RETTYPE -do_port_command(Process *BIF_P, Eterm arg1, Eterm arg2, Eterm arg3, - Uint32 flags) +BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3) { BIF_RETTYPE res; - Port *p; - - /* Trace sched out before lock check wait */ - if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(BIF_P, am_out); - } - - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(BIF_P, am_inactive); - } - - p = id_or_name2port(BIF_P, arg1); - if (!p) { - if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(BIF_P, am_in); + Port *prt; + int flags = 0; + Eterm ref; + + if (is_not_nil(BIF_ARG_3)) { + Eterm l = BIF_ARG_3; + while (is_list(l)) { + Eterm* cons = list_val(l); + Eterm car = CAR(cons); + if (car == am_force) + flags |= ERTS_PORT_SIG_FLG_FORCE; + else if (car == am_nosuspend) + flags |= ERTS_PORT_SIG_FLG_NOSUSPEND; + else + BIF_RET(am_badarg); + l = CDR(cons); } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(BIF_P, am_active); - } - BIF_ERROR(BIF_P, BADARG); + if (!is_nil(l)) + BIF_RET(am_badarg); } - - /* Trace port in, id_or_name2port causes wait */ - if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(p, am_in, am_command); - } - if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { - profile_runnable_port(p, am_active); + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_badarg); + + if (flags & ERTS_PORT_SIG_FLG_FORCE) { + if (!(prt->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) + BIF_RET(am_notsup); } - ERTS_BIF_PREP_RET(res, am_true); +#ifdef DEBUG + ref = NIL; +#endif - if ((flags & ERTS_PORT_COMMAND_FLAG_FORCE) - && !(p->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) { - ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_NOTSUP); - } - else if (!(flags & ERTS_PORT_COMMAND_FLAG_FORCE) - && p->status & ERTS_PORT_SFLG_PORT_BUSY) { - if (flags & ERTS_PORT_COMMAND_FLAG_NOSUSPEND) { + switch (erts_port_output(BIF_P, flags, prt, prt->common.id, BIF_ARG_2, &ref)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_BADARG: + case ERTS_PORT_OP_DROPPED: + ERTS_BIF_PREP_RET(res, am_badarg); + break; + case ERTS_PORT_OP_BUSY: + ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE)); + if (flags & ERTS_PORT_SIG_FLG_NOSUSPEND) ERTS_BIF_PREP_RET(res, am_false); - } else { - erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, p); - if (erts_system_monitor_flags.busy_port) { - monitor_generic(BIF_P, am_busy_port, p->id); - } - ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P, - arg1, arg2, arg3); - } - } else { - int wres; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - ERTS_SMP_CHK_NO_PROC_LOCKS; - wres = erts_write_to_port(BIF_P->id, p, arg2); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - if (wres != 0) { - ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); + erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, prt); + ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_erts_internal_port_command_3], + BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } - } - - if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(p, am_out, am_command); - } - if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { - profile_runnable_port(p, am_inactive); + break; + case ERTS_PORT_OP_BUSY_SCHEDULED: + ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE)); + /* Fall through... */ + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(ref)); + ERTS_BIF_PREP_RET(res, ref); + break; + case ERTS_PORT_OP_DONE: + ERTS_BIF_PREP_RET(res, am_true); + break; + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_output() result"); + break; } - erts_port_release(p); - /* Trace sched in after port release */ - if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(BIF_P, am_in); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(BIF_P, am_active); - } - if (ERTS_PROC_IS_EXITING(BIF_P)) { KILL_CATCHES(BIF_P); /* Must exit */ ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR); } - return res; -} -BIF_RETTYPE port_command_2(BIF_ALIST_2) -{ - return do_port_command(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL, 0); + return res; } -BIF_RETTYPE port_command_3(BIF_ALIST_3) +BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3) { - Eterm l = BIF_ARG_3; - Uint32 flags = 0; - while (is_list(l)) { - Eterm* cons = list_val(l); - Eterm car = CAR(cons); - if (car == am_force) { - flags |= ERTS_PORT_COMMAND_FLAG_FORCE; - } else if (car == am_nosuspend) { - flags |= ERTS_PORT_COMMAND_FLAG_NOSUSPEND; - } else { - BIF_ERROR(BIF_P, BADARG); - } - l = CDR(cons); - } - if(!is_nil(l)) { - BIF_ERROR(BIF_P, BADARG); + Port* prt; + Eterm retval; + Uint uint_op; + unsigned int op; + erts_aint32_t state; + + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_badarg); + + if (!term_to_Uint(BIF_ARG_2, &uint_op)) + BIF_RET(am_badarg); + + if (uint_op > (Uint) UINT_MAX) + BIF_RET(am_badarg); + + op = (unsigned int) uint_op; + + switch (erts_port_call(BIF_P, prt, op, BIF_ARG_3, &retval)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_DROPPED: + case ERTS_PORT_OP_BADARG: + retval = am_badarg; + break; + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(retval)); + break; + case ERTS_PORT_OP_DONE: + ASSERT(is_not_internal_ref(retval)); + break; + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_call() result"); + retval = am_internal_error; + break; } - return do_port_command(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, flags); -} -BIF_RETTYPE port_call_2(BIF_ALIST_2) -{ - return port_call(BIF_P,BIF_ARG_1, make_small(0), BIF_ARG_2); -} + state = erts_smp_atomic32_read_acqb(&BIF_P->state); + if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { +#ifdef ERTS_SMP + if (state & ERTS_PSFLG_PENDING_EXIT) + erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); +#endif + ERTS_BIF_EXITED(BIF_P); + } -BIF_RETTYPE port_call_3(BIF_ALIST_3) -{ - return port_call(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); + BIF_RET(retval); } -static BIF_RETTYPE -port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3) +BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3) { - Uint op; - Port *p; - Uint size; - byte *bytes; - byte *endp; - ErlDrvSizeT real_size; - erts_driver_t *drv; - byte port_input[256]; /* Default input buffer to encode in */ - byte port_result[256]; /* Buffer for result from port. */ - byte* port_resp; /* Pointer to result buffer. */ - char *prc; - ErlDrvSSizeT ret; - Eterm res; - Sint result_size; - Eterm *hp; - Eterm *hp_end; - unsigned ret_flags = 0U; - int fpe_was_unmasked; - - bytes = &port_input[0]; - port_resp = port_result; - /* trace of port scheduling with virtual process descheduling - * lock wait - */ - if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(c_p, am_out); - } - - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(c_p, am_inactive); + Port* prt; + Eterm retval; + Uint uint_op; + unsigned int op; + erts_aint32_t state; + + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_badarg); + + if (!term_to_Uint(BIF_ARG_2, &uint_op)) + BIF_RET(am_badarg); + + if (uint_op > (Uint) UINT_MAX) + BIF_RET(am_badarg); + + op = (unsigned int) uint_op; + + switch (erts_port_control(BIF_P, prt, op, BIF_ARG_3, &retval)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_BADARG: + case ERTS_PORT_OP_DROPPED: + retval = am_badarg; + break; + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(retval)); + break; + case ERTS_PORT_OP_DONE: + ASSERT(is_not_internal_ref(retval)); + break; + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_control() result"); + retval = am_internal_error; + break; } - p = id_or_name2port(c_p, arg1); - if (!p) { - error: - if (port_resp != port_result && - !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) { - driver_free(port_resp); - } - if (bytes != &port_input[0]) - erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes); - /* Need to virtual schedule in the process if there - * was an error. - */ - if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(c_p, am_in); - } - - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(c_p, am_active); - } - - if (p) - erts_port_release(p); + state = erts_smp_atomic32_read_acqb(&BIF_P->state); + if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { #ifdef ERTS_SMP - ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN); -#else - ERTS_BIF_CHK_EXITED(c_p); + if (state & ERTS_PSFLG_PENDING_EXIT) + erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); #endif - BIF_ERROR(c_p, BADARG); - } - - if ((drv = p->drv_ptr) == NULL) { - goto error; - } - if (drv->call == NULL) { - goto error; - } - if (!term_to_Uint(arg2, &op)) { - goto error; - } - p->caller = c_p->id; - - /* Lock taken, virtual schedule of port */ - if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(p, am_in, am_call); - } - - if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { - profile_runnable_port(p, am_active); + ERTS_BIF_EXITED(BIF_P); } - size = erts_encode_ext_size(arg3); - if (size > sizeof(port_input)) - bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size); - endp = bytes; - erts_encode_ext(arg3, &endp); + BIF_RET(retval); +} - real_size = endp - bytes; - if (real_size > size) { - erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n", - __FILE__, __LINE__, endp - (bytes + size)); - } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); -#ifdef USE_VM_PROBES - if (DTRACE_ENABLED(driver_call)) { - DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); - DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); +/* + * erts_internal:port_close/1 is used by the + * erlang:port_close/1 BIF. + */ +BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1) +{ + Eterm ref; + Port *prt; - dtrace_pid_str(p->connected, process_str); - dtrace_port_str(p, port_str); - DTRACE5(driver_call, process_str, port_str, p->name, op, real_size); - } -#endif - prc = (char *) port_resp; - fpe_was_unmasked = erts_block_fpe(); - ret = drv->call((ErlDrvData)p->drv_data, - (unsigned) op, - (char *) bytes, - (int) real_size, - &prc, - (int) sizeof(port_result), - &ret_flags); - erts_unblock_fpe(fpe_was_unmasked); - if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(p, am_out, am_call); - } - - if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { - profile_runnable_port(p, am_inactive); - } - - port_resp = (byte *) prc; - p->caller = NIL; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); -#ifdef HARDDEBUG - { - ErlDrvSizeT z; - printf("real_size = %ld,%d, ret = %ld,%d\r\n", (unsigned long) real_size, - (int) real_size, (unsigned long)ret, (int) ret); - printf("["); - for(z = 0; z < real_size; ++z) { - printf("%d, ",(int) bytes[z]); - } - printf("]\r\n"); - printf("["); - for(z = 0; z < ret; ++z) { - printf("%d, ",(int) port_resp[z]); - } - printf("]\r\n"); - } -#endif - if (ret <= 0 || port_resp[0] != VERSION_MAGIC) { - /* Error or a binary without magic/ with wrong magic */ - goto error; - } - result_size = erts_decode_ext_size(port_resp, ret); - if (result_size < 0) { - goto error; - } - hp = HAlloc(c_p, result_size); - hp_end = hp + result_size; - endp = port_resp; - res = erts_decode_ext(&hp, &MSO(c_p), &endp); - if (res == THE_NON_VALUE) { - goto error; - } - HRelease(c_p, hp_end, hp); - if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) { - driver_free(port_resp); - } - if (bytes != &port_input[0]) - erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes); - if (p) - erts_port_release(p); -#ifdef ERTS_SMP - ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN); -#else - ERTS_BIF_CHK_EXITED(c_p); +#ifdef DEBUG + ref = NIL; #endif - if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(c_p, am_in); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(c_p, am_active); + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_badarg); + + + switch (erts_port_exit(BIF_P, 0, prt, prt->common.id, am_normal, &ref)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_BADARG: + case ERTS_PORT_OP_DROPPED: + BIF_RET(am_badarg); + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(ref)); + BIF_RET(ref); + case ERTS_PORT_OP_DONE: + BIF_RET(am_true); + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_exit() result"); + BIF_RET(am_internal_error); } - - return res; } - -BIF_RETTYPE port_control_3(BIF_ALIST_3) -{ - Port* p; - Uint op; - Eterm res = THE_NON_VALUE; - - /* Virtual schedule out calling process before lock wait */ - if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(BIF_P, am_out); - } - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(BIF_P, am_inactive); - } +/* + * erts_internal:port_connect/2 is used by the + * erlang:port_connect/2 BIF. + */ +BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2) +{ + Eterm ref; + Port* prt; - p = id_or_name2port(BIF_P, BIF_ARG_1); - if (!p) { - /* Schedule the process before exiting */ - if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(BIF_P, am_in); - } - - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(BIF_P, am_active); - } - - BIF_ERROR(BIF_P, BADARG); - } + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_badarg); - /* Trace the port for scheduling in */ - if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(p, am_in, am_control); - } - - if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { - profile_runnable_port(p, am_active); - } +#ifdef DEBUG + ref = NIL; +#endif - if (term_to_Uint(BIF_ARG_2, &op)) - res = erts_port_control(BIF_P, p, op, BIF_ARG_3); - - /* Trace the port for scheduling out */ - if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { - trace_sched_ports_where(p, am_out, am_control); + switch (erts_port_connect(BIF_P, 0, prt, prt->common.id, BIF_ARG_2, &ref)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_BADARG: + case ERTS_PORT_OP_DROPPED: + BIF_RET(am_badarg); + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(ref)); + BIF_RET(ref); + break; + case ERTS_PORT_OP_DONE: + BIF_RET(am_true); + break; + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_connect() result"); + BIF_RET(am_internal_error); } +} - if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) { - profile_runnable_port(p, am_inactive); - } +BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1) +{ + Eterm retval; + Port* prt; - erts_port_release(p); -#ifdef ERTS_SMP - ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN); -#else - ERTS_BIF_CHK_EXITED(BIF_P); -#endif - - if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) { - trace_virtual_sched(BIF_P, am_in); + if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) { + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_undefined); } - - if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) { - profile_runnable_proc(BIF_P, am_active); + else if (is_external_port(BIF_ARG_1)) { + if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry) + BIF_RET(am_undefined); + else + BIF_RET(am_badarg); } - - if (is_non_value(res)) { - BIF_ERROR(BIF_P, BADARG); + else { + BIF_RET(am_badarg); + } + + switch (erts_port_info(BIF_P, prt, THE_NON_VALUE, &retval)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_BADARG: + BIF_RET(am_badarg); + case ERTS_PORT_OP_DROPPED: + BIF_RET(am_undefined); + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(retval)); + BIF_RET(retval); + case ERTS_PORT_OP_DONE: + ASSERT(is_not_internal_ref(retval)); + BIF_RET(retval); + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_info() result"); + BIF_RET(am_internal_error); } - BIF_RET(res); } -BIF_RETTYPE port_close_1(BIF_ALIST_1) -{ - Port* p; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - p = id_or_name2port(NULL, BIF_ARG_1); - if (!p) { - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_ERROR(BIF_P, BADARG); - } - erts_do_exit_port(p, p->connected, am_normal); - /* if !ERTS_SMP: since we terminate port with reason normal - we SHOULD never get an exit signal ourselves - */ - erts_port_release(p); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_RET(am_true); -} -BIF_RETTYPE port_connect_2(BIF_ALIST_2) +BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2) { + Eterm retval; Port* prt; - Process* rp; - Eterm pid = BIF_ARG_2; - if (is_not_internal_pid(pid)) { - error: - BIF_ERROR(BIF_P, BADARG); + if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) { + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_undefined); } - prt = id_or_name2port(BIF_P, BIF_ARG_1); - if (!prt) { - goto error; + else if (is_external_port(BIF_ARG_1)) { + if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry) + BIF_RET(am_undefined); + else + BIF_RET(am_badarg); } - - rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, - pid, ERTS_PROC_LOCK_LINK); - if (!rp) { - erts_smp_port_unlock(prt); - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); - goto error; - } - - erts_add_link(&(rp->nlinks), LINK_PID, prt->id); - erts_add_link(&(prt->nlinks), LINK_PID, pid); - - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); - - prt->connected = pid; /* internal pid */ - erts_smp_port_unlock(prt); -#ifdef USE_VM_PROBES - if (DTRACE_ENABLED(port_connect)) { - DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); - DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); - DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE); - - dtrace_pid_str(prt->connected, process_str); - erts_snprintf(port_str, sizeof(port_str), "%T", prt->id); - dtrace_proc_str(rp, newprocess_str); - DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str); + else { + BIF_RET(am_badarg); + } + + switch (erts_port_info(BIF_P, prt, BIF_ARG_2, &retval)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_BADARG: + BIF_RET(am_badarg); + case ERTS_PORT_OP_DROPPED: + BIF_RET(am_undefined); + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(retval)); + BIF_RET(retval); + case ERTS_PORT_OP_DONE: + ASSERT(is_not_internal_ref(retval)); + BIF_RET(retval); + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_info() result"); + BIF_RET(am_internal_error); } -#endif - BIF_RET(am_true); } -BIF_RETTYPE port_set_data_2(BIF_ALIST_2) + +BIF_RETTYPE erts_internal_port_set_data_2(BIF_ALIST_2) { + Eterm ref; Port* prt; - Eterm portid = BIF_ARG_1; - Eterm data = BIF_ARG_2; - prt = id_or_name2port(BIF_P, portid); - if (!prt) { - BIF_ERROR(BIF_P, BADARG); - } - if (prt->bp != NULL) { - free_message_buffer(prt->bp); - prt->bp = NULL; - } - if (IS_CONST(data)) { - prt->data = data; - } else { - Uint size; - ErlHeapFragment* bp; - Eterm* hp; - - size = size_object(data); - prt->bp = bp = new_message_buffer(size); - hp = bp->mem; - prt->data = copy_struct(data, size, &hp, &bp->off_heap); + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_badarg); + + switch (erts_port_set_data(BIF_P, prt, BIF_ARG_2, &ref)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_BADARG: + case ERTS_PORT_OP_DROPPED: + BIF_RET(am_badarg); + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(ref)); + BIF_RET(ref); + case ERTS_PORT_OP_DONE: + BIF_RET(am_true); + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_set_data() result"); + BIF_RET(am_internal_error); } - erts_smp_port_unlock(prt); - BIF_RET(am_true); } -BIF_RETTYPE port_get_data_1(BIF_ALIST_1) +BIF_RETTYPE erts_internal_port_get_data_1(BIF_ALIST_1) { - BIF_RETTYPE res; + Eterm retval; Port* prt; - Eterm portid = BIF_ARG_1; - prt = id_or_name2port(BIF_P, portid); - if (!prt) { - BIF_ERROR(BIF_P, BADARG); - } - if (prt->bp == NULL) { /* MUST be CONST! */ - res = prt->data; - } else { - Eterm* hp = HAlloc(BIF_P, prt->bp->used_size); - res = copy_struct(prt->data, prt->bp->used_size, &hp, &MSO(BIF_P)); + prt = lookup_port(BIF_P, BIF_ARG_1); + if (!prt) + BIF_RET(am_badarg); + + switch (erts_port_get_data(BIF_P, prt, &retval)) { + case ERTS_PORT_OP_CALLER_EXIT: + case ERTS_PORT_OP_BADARG: + case ERTS_PORT_OP_DROPPED: + BIF_RET(am_badarg); + case ERTS_PORT_OP_SCHEDULED: + ASSERT(is_internal_ref(retval)); + BIF_RET(retval); + case ERTS_PORT_OP_DONE: + ASSERT(is_not_internal_ref(retval)); + BIF_RET(retval); + default: + ERTS_INTERNAL_ERROR("Unexpected erts_port_get_data() result"); + BIF_RET(am_internal_error); } - erts_smp_port_unlock(prt); - BIF_RET(res); } /* @@ -625,11 +485,10 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1) * either BADARG or SYSTEM_LIMIT). */ -static int -open_port(Process* p, Eterm name, Eterm settings, int *err_nump) +static Port * +open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) { -#define OPEN_PORT_ERROR(VAL) do { port_num = (VAL); goto do_return; } while (0) - int i, port_num; + int i; Eterm option; Uint arity; Eterm* tp; @@ -637,11 +496,11 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump) erts_driver_t* driver; char* name_buf = NULL; SysDriverOpts opts; - int binary_io; - int soft_eof; Sint linebuf; Eterm edir = NIL; byte dir[MAXPATHLEN]; + erts_aint32_t sflgs = 0; + Port *port; /* These are the defaults */ opts.packet_bytes = 0; @@ -655,8 +514,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump) opts.overlapped_io = 0; opts.spawn_type = ERTS_SPAWN_ANY; opts.argv = NULL; - binary_io = 0; - soft_eof = 0; + opts.parallelism = erts_port_parallelism; linebuf = 0; *err_nump = 0; @@ -734,6 +592,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump) } } else if (option == am_cd) { edir = *tp; + } else if (option == am_parallelism) { + if (*tp == am_true) + opts.parallelism = 1; + else if (*tp == am_false) + opts.parallelism = 0; + else + goto badarg; } else { goto badarg; } @@ -748,13 +613,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump) } else if (*nargs == am_nouse_stdio) { opts.use_stdio = 0; } else if (*nargs == am_binary) { - binary_io = 1; + sflgs |= ERTS_PORT_SFLG_BINARY_IO; } else if (*nargs == am_in) { opts.read_write |= DO_READ; } else if (*nargs == am_out) { opts.read_write |= DO_WRITE; } else if (*nargs == am_eof) { - soft_eof = 1; + sflgs |= ERTS_PORT_SFLG_SOFT_EOF; } else if (*nargs == am_hide) { opts.hide_window = 1; } else if (*nargs == am_exit_status) { @@ -902,9 +767,9 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump) heap[2] = make_small(0); heap[3] = NIL; iolist = make_list(heap); - r = io_list_to_buf(iolist, (char*) dir, MAXPATHLEN); + r = erts_iolist_to_buf(iolist, (char*) dir, MAXPATHLEN); UnUseTmpHeap(4,p); - if (r < 0) { + if (ERTS_IOLIST_TO_BUF_FAILED(r)) { goto badarg; } opts.wd = (char *) dir; @@ -926,44 +791,40 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump) erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - port_num = erts_open_driver(driver, p->id, name_buf, &opts, err_nump); + port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump); #ifdef USE_VM_PROBES - if (port_num >= 0 && DTRACE_ENABLED(port_open)) { + if (port && DTRACE_ENABLED(port_open)) { DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); dtrace_proc_str(p, process_str); - erts_snprintf(port_str, sizeof(port_str), "%T", erts_port[port_num].id); + erts_snprintf(port_str, sizeof(port_str), "%T", port->common.id); DTRACE3(port_open, process_str, name_buf, port_str); } #endif erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); - if (port_num < 0) { - DEBUGF(("open_driver returned %d(%d)\n", port_num, *err_nump)); + if (!port) { + DEBUGF(("open_driver returned (%d:%d)\n", + err_typep ? *err_typep : 4711, + err_nump ? *err_nump : 4711)); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } - OPEN_PORT_ERROR(port_num); + goto do_return; } if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_virtual_sched(p, am_in); } - if (binary_io) { - erts_port_status_bor_set(&erts_port[port_num], - ERTS_PORT_SFLG_BINARY_IO); - } - if (soft_eof) { - erts_port_status_bor_set(&erts_port[port_num], - ERTS_PORT_SFLG_SOFT_EOF); - } - if (linebuf && erts_port[port_num].linebuf == NULL){ - erts_port[port_num].linebuf = allocate_linebuf(linebuf); - erts_port_status_bor_set(&erts_port[port_num], - ERTS_PORT_SFLG_LINEBUF_IO); + if (linebuf && port->linebuf == NULL){ + port->linebuf = allocate_linebuf(linebuf); + sflgs |= ERTS_PORT_SFLG_LINEBUF_IO; } + + if (sflgs) + erts_atomic32_read_bor_relb(&port->state, sflgs); do_return: if (name_buf) @@ -974,13 +835,15 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump) if (opts.wd && opts.wd != ((char *)dir)) { erts_free(ERTS_ALC_T_TMP, (void *) opts.wd); } - return port_num; + return port; badarg: - *err_nump = BADARG; - OPEN_PORT_ERROR(-3); + if (err_typep) + *err_typep = -3; + if (err_nump) + *err_nump = BADARG; + port = NULL; goto do_return; -#undef OPEN_PORT_ERROR } /* Arguments can be given i unicode and as raw binaries, convert filename is used to convert */ diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c index b036c5ef5c..88f980d19f 100644 --- a/erts/emulator/beam/erl_bif_re.c +++ b/erts/emulator/beam/erl_bif_re.c @@ -413,7 +413,7 @@ build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, con static BIF_RETTYPE re_compile(Process* p, Eterm arg1, Eterm arg2) { - Uint slen; + ErlDrvSizeT slen; char *expr; pcre *result; int errcode = 0; @@ -444,7 +444,7 @@ re_compile(Process* p, Eterm arg1, Eterm arg2) BIF_ERROR(p,BADARG); } expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1); - if (io_list_to_buf(arg1, expr, slen) != 0) { + if (erts_iolist_to_buf(arg1, expr, slen) != 0) { erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); BIF_ERROR(p,BADARG); } @@ -797,7 +797,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) memcpy(tmpb,ap->name,ap->len); tmpb[ap->len] = '\0'; } else { - Uint slen; + ErlDrvSizeT slen; if (erts_iolist_size(val, &slen)) { goto error; } @@ -809,7 +809,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code) (tmpbsiz = slen + 1)); } } - if (io_list_to_buf(val, tmpb, slen) != 0) { + if (erts_iolist_to_buf(val, tmpb, slen) != 0) { goto error; } tmpb[slen] = '\0'; @@ -853,7 +853,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) const pcre *code_tmp; RestartContext restart; byte *temp_alloc = NULL; - Uint slength; + ErlDrvSizeT slength; int startoffset = 0; int options = 0, comp_options = 0; int ovsize; @@ -877,7 +877,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 4)) { if (is_binary(arg2) || is_list(arg2) || is_nil(arg2)) { /* Compile from textual RE */ - Uint slen; + ErlDrvSizeT slen; char *expr; pcre *result; int errcode = 0; @@ -896,7 +896,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) } expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1); - if (io_list_to_buf(arg2, expr, slen) != 0) { + if (erts_iolist_to_buf(arg2, expr, slen) != 0) { erts_free(ERTS_ALC_T_RE_TMP_BUF, expr); BIF_ERROR(p,BADARG); } @@ -1039,7 +1039,7 @@ handle_iolist: } restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength); - if (io_list_to_buf(arg1, restart.subject, slength) != 0) { + if (erts_iolist_to_buf(arg1, restart.subject, slength) != 0) { erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector); erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code); erts_free(ERTS_ALC_T_RE_SUBJECT, restart.subject); diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c index 525b11f61c..d67695e533 100644 --- a/erts/emulator/beam/erl_bif_timer.c +++ b/erts/emulator/beam/erl_bif_timer.c @@ -265,10 +265,10 @@ link_proc(Process *p, ErtsBifTimer* btm) { btm->receiver.proc.ess = p; btm->receiver.proc.prev = NULL; - btm->receiver.proc.next = p->bif_timers; - if (p->bif_timers) - p->bif_timers->receiver.proc.prev = btm; - p->bif_timers = btm; + btm->receiver.proc.next = p->u.bif_timers; + if (p->u.bif_timers) + p->u.bif_timers->receiver.proc.prev = btm; + p->u.bif_timers = btm; } static ERTS_INLINE void @@ -277,7 +277,7 @@ unlink_proc(ErtsBifTimer* btm) if (btm->receiver.proc.prev) btm->receiver.proc.prev->receiver.proc.next = btm->receiver.proc.next; else - btm->receiver.proc.ess->bif_timers = btm->receiver.proc.next; + btm->receiver.proc.ess->u.bif_timers = btm->receiver.proc.next; if (btm->receiver.proc.next) btm->receiver.proc.next->receiver.proc.prev = btm->receiver.proc.prev; } @@ -613,7 +613,7 @@ erts_print_bif_timer_info(int to, void *to_arg) for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) { Eterm receiver = (btm->flags & BTM_FLG_BYNAME ? btm->receiver.name - : btm->receiver.proc.ess->id); + : btm->receiver.proc.ess->common.id); erts_print(to, to_arg, "=timer:%T\n", receiver); erts_print(to, to_arg, "Message: %T\n", btm->message); erts_print(to, to_arg, "Time left: %u ms\n", @@ -637,7 +637,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks) erts_smp_proc_lock(p, plocks); } - btm = p->bif_timers; + btm = p->u.bif_timers; while (btm) { ErtsBifTimer *tmp_btm; ASSERT(!(btm->flags & BTM_FLG_CANCELED)); @@ -647,7 +647,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks) erts_cancel_timer(&tmp_btm->tm); } - p->bif_timers = NULL; + p->u.bif_timers = NULL; erts_smp_btm_rwunlock(); } @@ -696,7 +696,7 @@ erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *), for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) { (*func)((btm->flags & BTM_FLG_BYNAME ? btm->receiver.name - : btm->receiver.proc.ess->id), + : btm->receiver.proc.ess->common.id), btm->message, btm->bp, arg); diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index 805d788177..99a4394666 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -124,7 +124,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) struct trace_pattern_flags flags = erts_trace_pattern_flags_off; int is_global; Process *meta_tracer_proc = p; - Eterm meta_tracer_pid = p->id; + Eterm meta_tracer_pid = p->common.id; if (!erts_try_seize_code_write_permission(p)) { ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist); @@ -171,14 +171,12 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) } } else if (is_internal_port(meta_tracer_pid)) { Port *meta_tracer_port; - meta_tracer_proc = NULL; - if (internal_port_index(meta_tracer_pid) >= erts_max_ports) + meta_tracer_proc = NULL; + meta_tracer_port = (erts_port_lookup( + meta_tracer_pid, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)); + if (!meta_tracer_port) goto error; - meta_tracer_port = - &erts_port[internal_port_index(meta_tracer_pid)]; - if (INVALID_TRACER_PORT(meta_tracer_port, meta_tracer_pid)) { - goto error; - } } else { goto error; } @@ -254,7 +252,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) MatchSetRef(erts_default_meta_match_spec); erts_default_meta_tracer_pid = meta_tracer_pid; if (meta_tracer_proc) { - meta_tracer_proc->trace_flags |= F_TRACER; + ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER; } } else if (! flags.breakpoint) { MatchSetUnref(erts_default_meta_match_spec); @@ -342,7 +340,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) } if (meta_tracer_proc) { - meta_tracer_proc->trace_flags |= F_TRACER; + ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER; } matches = erts_set_trace_pattern(p, mfa, specified, @@ -526,24 +524,24 @@ Eterm trace_3(BIF_ALIST_3) if (is_nil(tracer) || is_internal_pid(tracer)) { Process *tracer_proc = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, - is_nil(tracer) ? p->id : tracer, + is_nil(tracer) ? p->common.id : tracer, ERTS_PROC_LOCKS_ALL); if (!tracer_proc) goto error; - tracer_proc->trace_flags |= F_TRACER; + ERTS_TRACE_FLAGS(tracer_proc) |= F_TRACER; erts_smp_proc_unlock(tracer_proc, (tracer_proc == p ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); } else if (is_internal_port(tracer)) { - Port *tracer_port = erts_id2port(tracer, p, ERTS_PROC_LOCK_MAIN); - if (!erts_is_valid_tracer_port(tracer)) { - if (tracer_port) - erts_smp_port_unlock(tracer_port); + Port *tracer_port = erts_id2port_sflgs(tracer, + p, + ERTS_PROC_LOCK_MAIN, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); + if (!tracer_port) goto error; - } - tracer_port->trace_flags |= F_TRACER; - erts_smp_port_unlock(tracer_port); + ERTS_TRACE_FLAGS(tracer_port) |= F_TRACER; + erts_port_release(tracer_port); } else goto error; @@ -554,7 +552,7 @@ Eterm trace_3(BIF_ALIST_3) case am_true: on = 1; if (is_nil(tracer)) - tracer = p->id; + tracer = p->common.id; break; default: goto error; @@ -576,26 +574,29 @@ Eterm trace_3(BIF_ALIST_3) if (pid_spec == tracer) goto error; - tracee_port = erts_id2port(pid_spec, p, ERTS_PROC_LOCK_MAIN); + tracee_port = erts_id2port_sflgs(pid_spec, + p, + ERTS_PROC_LOCK_MAIN, + ERTS_PORT_SFLGS_INVALID_LOOKUP); if (!tracee_port) goto error; if (tracer != NIL && port_already_traced(p, tracee_port, tracer)) { - erts_smp_port_unlock(tracee_port); + erts_port_release(tracee_port); goto already_traced; } if (on) - tracee_port->trace_flags |= mask; + ERTS_TRACE_FLAGS(tracee_port) |= mask; else - tracee_port->trace_flags &= ~mask; + ERTS_TRACE_FLAGS(tracee_port) &= ~mask; - if (!tracee_port->trace_flags) - tracee_port->tracer_proc = NIL; + if (!ERTS_TRACE_FLAGS(tracee_port)) + ERTS_TRACER_PROC(tracee_port) = NIL; else if (tracer != NIL) - tracee_port->tracer_proc = tracer; + ERTS_TRACER_PROC(tracee_port) = tracer; - erts_smp_port_unlock(tracee_port); + erts_port_release(tracee_port); matches = 1; } else if (is_pid(pid_spec)) { @@ -627,14 +628,14 @@ Eterm trace_3(BIF_ALIST_3) } if (on) - tracee_p->trace_flags |= mask; + ERTS_TRACE_FLAGS(tracee_p) |= mask; else - tracee_p->trace_flags &= ~mask; + ERTS_TRACE_FLAGS(tracee_p) &= ~mask; - if ((tracee_p->trace_flags & TRACEE_FLAGS) == 0) - tracee_p->tracer_proc = NIL; + if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS) == 0) + ERTS_TRACER_PROC(tracee_p) = NIL; else if (tracer != NIL) - tracee_p->tracer_proc = tracer; + ERTS_TRACER_PROC(tracee_p) = tracer; erts_smp_proc_unlock(tracee_p, (tracee_p == p @@ -708,47 +709,56 @@ Eterm trace_3(BIF_ALIST_3) ok = 1; if (procs || mods) { + int max = erts_ptab_max(&erts_proc); /* tracing of processes */ - for (i = 0; i < erts_max_processes; i++) { + for (i = 0; i < max; i++) { Process* tracee_p = erts_pix2proc(i); if (! tracee_p) continue; if (tracer != NIL) { - if (tracee_p->id == tracer) + if (tracee_p->common.id == tracer) continue; if (already_traced(NULL, tracee_p, tracer)) continue; } if (on) { - tracee_p->trace_flags |= mask; + ERTS_TRACE_FLAGS(tracee_p) |= mask; } else { - tracee_p->trace_flags &= ~mask; + ERTS_TRACE_FLAGS(tracee_p) &= ~mask; } - if(!(tracee_p->trace_flags & TRACEE_FLAGS)) { - tracee_p->tracer_proc = NIL; + if(!(ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)) { + ERTS_TRACER_PROC(tracee_p) = NIL; } else if (tracer != NIL) { - tracee_p->tracer_proc = tracer; + ERTS_TRACER_PROC(tracee_p) = tracer; } matches++; } } if (ports || mods) { + int max = erts_ptab_max(&erts_port); /* tracing of ports */ - for (i = 0; i < erts_max_ports; i++) { - Port *tracee_port = &erts_port[i]; - if (tracee_port->status & ERTS_PORT_SFLGS_DEAD) continue; + for (i = 0; i < max; i++) { + erts_aint32_t state; + Port *tracee_port = erts_pix2port(i); + if (!tracee_port) + continue; + state = erts_atomic32_read_nob(&tracee_port->state); + if (state & ERTS_PORT_SFLGS_DEAD) + continue; if (tracer != NIL) { - if (tracee_port->id == tracer) continue; - if (port_already_traced(NULL, tracee_port, tracer)) continue; + if (tracee_port->common.id == tracer) + continue; + if (port_already_traced(NULL, tracee_port, tracer)) + continue; } - if (on) tracee_port->trace_flags |= mask; - else tracee_port->trace_flags &= ~mask; + if (on) ERTS_TRACE_FLAGS(tracee_port) |= mask; + else ERTS_TRACE_FLAGS(tracee_port) &= ~mask; - if (!(tracee_port->trace_flags & TRACEE_FLAGS)) { - tracee_port->tracer_proc = NIL; + if (!(ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)) { + ERTS_TRACER_PROC(tracee_port) = NIL; } else if (tracer != NIL) { - tracee_port->tracer_proc = tracer; + ERTS_TRACER_PROC(tracee_port) = tracer; } /* matches are not counted for ports since it would violate compatibility */ /* This could be a reason to modify this function or make a new one. */ @@ -817,20 +827,20 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer) * * main lock is held on c_p * * all locks are held on port tracee_p */ - if ((tracee_port->trace_flags & TRACEE_FLAGS) - && tracee_port->tracer_proc != tracer) { + if ((ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS) + && ERTS_TRACER_PROC(tracee_port) != tracer) { /* This tracee is already being traced, and not by the * tracer to be */ - if (is_internal_port(tracee_port->tracer_proc)) { - if (!erts_is_valid_tracer_port(tracee_port->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(tracee_port))) { + if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_port))) { /* Current trace port now invalid * - discard it and approve the new. */ goto remove_tracer; } else return 1; } - else if(is_internal_pid(tracee_port->tracer_proc)) { - Process *tracer_p = erts_proc_lookup(tracee_port->tracer_proc); + else if(is_internal_pid(ERTS_TRACER_PROC(tracee_port))) { + Process *tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_port)); if (!tracer_p) { /* Current trace process now invalid * - discard it and approve the new. */ @@ -840,8 +850,8 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer) } else { remove_tracer: - tracee_port->trace_flags &= ~TRACEE_FLAGS; - tracee_port->tracer_proc = NIL; + ERTS_TRACE_FLAGS(tracee_port) &= ~TRACEE_FLAGS; + ERTS_TRACER_PROC(tracee_port) = NIL; } } return 0; @@ -857,20 +867,22 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer) * * main lock is held on c_p * * all locks multiple are held on tracee_p */ - if ((tracee_p->trace_flags & TRACEE_FLAGS) - && tracee_p->tracer_proc != tracer) { + if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS) + && ERTS_TRACER_PROC(tracee_p) != tracer) { /* This tracee is already being traced, and not by the * tracer to be */ - if (is_internal_port(tracee_p->tracer_proc)) { - if (!erts_is_valid_tracer_port(tracee_p->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(tracee_p))) { + if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_p))) { /* Current trace port now invalid * - discard it and approve the new. */ goto remove_tracer; } else return 1; } - else if(is_internal_pid(tracee_p->tracer_proc)) { - Process *tracer_p = erts_proc_lookup(tracee_p->tracer_proc); + else if(is_internal_pid(ERTS_TRACER_PROC(tracee_p))) { + Process *tracer_p; + + tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_p)); if (!tracer_p) { /* Current trace process now invalid * - discard it and approve the new. */ @@ -880,8 +892,8 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer) } else { remove_tracer: - tracee_p->trace_flags &= ~TRACEE_FLAGS; - tracee_p->tracer_proc = NIL; + ERTS_TRACE_FLAGS(tracee_p) &= ~TRACEE_FLAGS; + ERTS_TRACER_PROC(tracee_p) = NIL; } } return 0; @@ -925,8 +937,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) if (pid_spec == am_new) { erts_get_default_tracing(&trace_flags, &tracer); - } else if (is_internal_pid(pid_spec) - && internal_pid_index(pid_spec) < erts_max_processes) { + } else if (is_internal_pid(pid_spec)) { Process *tracee; tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid_spec, ERTS_PROC_LOCKS_ALL); @@ -934,16 +945,16 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) if (!tracee) { return am_undefined; } else { - tracer = tracee->tracer_proc; - trace_flags = tracee->trace_flags; + tracer = ERTS_TRACER_PROC(tracee); + trace_flags = ERTS_TRACE_FLAGS(tracee); } if (is_internal_pid(tracer)) { if (!erts_proc_lookup(tracer)) { reset_tracer: - tracee->trace_flags &= ~TRACEE_FLAGS; - trace_flags = tracee->trace_flags; - tracer = tracee->tracer_proc = NIL; + ERTS_TRACE_FLAGS(tracee) &= ~TRACEE_FLAGS; + trace_flags = ERTS_TRACE_FLAGS(tracee); + tracer = ERTS_TRACER_PROC(tracee) = NIL; } } else if (is_internal_port(tracer)) { @@ -1877,7 +1888,7 @@ new_seq_trace_token(Process* p) SEQ_TRACE_TOKEN(p) = TUPLE5(hp, make_small(0), /* Flags */ make_small(0), /* Label */ make_small(0), /* Serial */ - p->id, /* Internal pid */ /* From */ + p->common.id, /* Internal pid */ /* From */ make_small(p->seq_trace_lastcnt)); } } @@ -2247,9 +2258,11 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) if (!profiler_p) goto error; } else if (is_internal_port(profiler)) { - if (internal_port_index(profiler) >= erts_max_ports) goto error; - profiler_port = &erts_port[internal_port_index(profiler)]; - if (INVALID_TRACER_PORT(profiler_port, profiler)) goto error; + profiler_port = (erts_port_lookup( + profiler, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)); + if (!profiler_port) + goto error; } else { goto error; } @@ -2313,8 +2326,7 @@ trace_delivered_1(BIF_ALIST_1) p = NULL; } else if (! (p = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCKS_ALL))) { - if (is_not_internal_pid(BIF_ARG_1) - || internal_pid_index(BIF_ARG_1) >= erts_max_processes) { + if (is_not_internal_pid(BIF_ARG_1)) { BIF_ERROR(BIF_P, BADARG); } } @@ -2333,7 +2345,7 @@ trace_delivered_1(BIF_ALIST_1) msg = TUPLE3(hp, AM_trace_delivered, BIF_ARG_1, msg_ref); #ifdef ERTS_SMP - erts_send_sys_msg_proc(BIF_P->id, BIF_P->id, msg, bp); + erts_send_sys_msg_proc(BIF_P->common.id, BIF_P->common.id, msg, bp); if (p) erts_smp_proc_unlock(p, (BIF_P == p diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c index 3f90f34736..88456a85f3 100644 --- a/erts/emulator/beam/erl_cpu_topology.c +++ b/erts/emulator/beam/erl_cpu_topology.c @@ -34,7 +34,7 @@ #include "bif.h" #include "erl_cpu_topology.h" -#define ERTS_MAX_READER_GROUPS 8 +#define ERTS_MAX_READER_GROUPS 64 /* * Cpu topology hierarchy. @@ -620,30 +620,38 @@ write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size) int erts_init_scheduler_bind_type_string(char *how) { + ErtsCpuBindOrder order; + if (sys_strcmp(how, "u") == 0) - cpu_bind_order = ERTS_CPU_BIND_NONE; - else if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP) - return ERTS_INIT_SCHED_BIND_TYPE_NOT_SUPPORTED; - else if (!system_cpudata && !user_cpudata) - return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY; + order = ERTS_CPU_BIND_NONE; else if (sys_strcmp(how, "db") == 0) - cpu_bind_order = ERTS_CPU_BIND_DEFAULT_BIND; + order = ERTS_CPU_BIND_DEFAULT_BIND; else if (sys_strcmp(how, "s") == 0) - cpu_bind_order = ERTS_CPU_BIND_SPREAD; + order = ERTS_CPU_BIND_SPREAD; else if (sys_strcmp(how, "ps") == 0) - cpu_bind_order = ERTS_CPU_BIND_PROCESSOR_SPREAD; + order = ERTS_CPU_BIND_PROCESSOR_SPREAD; else if (sys_strcmp(how, "ts") == 0) - cpu_bind_order = ERTS_CPU_BIND_THREAD_SPREAD; + order = ERTS_CPU_BIND_THREAD_SPREAD; else if (sys_strcmp(how, "tnnps") == 0) - cpu_bind_order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD; + order = ERTS_CPU_BIND_THREAD_NO_NODE_PROCESSOR_SPREAD; else if (sys_strcmp(how, "nnps") == 0) - cpu_bind_order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD; + order = ERTS_CPU_BIND_NO_NODE_PROCESSOR_SPREAD; else if (sys_strcmp(how, "nnts") == 0) - cpu_bind_order = ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD; + order = ERTS_CPU_BIND_NO_NODE_THREAD_SPREAD; else if (sys_strcmp(how, "ns") == 0) - cpu_bind_order = ERTS_CPU_BIND_NO_SPREAD; + order = ERTS_CPU_BIND_NO_SPREAD; else - return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_BAD_TYPE; + return ERTS_INIT_SCHED_BIND_TYPE_ERROR_BAD_TYPE; + + if (order != ERTS_CPU_BIND_NONE) { + if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP) + return ERTS_INIT_SCHED_BIND_TYPE_NOT_SUPPORTED; + else if (!system_cpudata && !user_cpudata) + return ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY; + } + + cpu_bind_order = order; + return ERTS_INIT_SCHED_BIND_TYPE_SUCCESS; } diff --git a/erts/emulator/beam/erl_cpu_topology.h b/erts/emulator/beam/erl_cpu_topology.h index c5a9520b61..11915e1ea8 100644 --- a/erts/emulator/beam/erl_cpu_topology.h +++ b/erts/emulator/beam/erl_cpu_topology.h @@ -40,7 +40,7 @@ void erts_init_cpu_topology(void); #define ERTS_INIT_SCHED_BIND_TYPE_SUCCESS 0 #define ERTS_INIT_SCHED_BIND_TYPE_NOT_SUPPORTED 1 #define ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY 2 -#define ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_BAD_TYPE 3 +#define ERTS_INIT_SCHED_BIND_TYPE_ERROR_BAD_TYPE 3 int erts_init_scheduler_bind_type_string(char *how); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index f8a4882ec0..7932966539 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -425,7 +425,8 @@ DbTable* db_get_table_aux(Process *p, if (tb) { db_lock(tb, kind); if (tb->common.id != id - || ((tb->common.status & what) == 0 && p->id != tb->common.owner)) { + || ((tb->common.status & what) == 0 + && p->common.id != tb->common.owner)) { db_unlock(tb, kind); tb = NULL; } @@ -615,7 +616,7 @@ BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2) #ifdef HARDDEBUG erts_fprintf(stderr, "ets:safe_fixtable(%T,%T); Process: %T, initial: %T:%T/%bpu\n", - BIF_ARG_1, BIF_ARG_2, BIF_P->id, + BIF_ARG_1, BIF_ARG_2, BIF_P->common.id, BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]); #endif kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC; @@ -1194,7 +1195,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) #ifdef HARDDEBUG erts_fprintf(stderr, "ets:rename(%T,%T); Process: %T, initial: %T:%T/%bpu\n", - BIF_ARG_1, BIF_ARG_2, BIF_P->id, + BIF_ARG_1, BIF_ARG_2, BIF_P->common.id, BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]); #endif @@ -1437,7 +1438,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ), "db_tab", "db_tab_fix"); tb->common.keypos = keypos; - tb->common.owner = BIF_P->id; + tb->common.owner = BIF_P->common.id; set_heir(BIF_P, tb, heir, heir_data); erts_smp_atomic_init_nob(&tb->common.nitems, 0); @@ -1506,7 +1507,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) #ifdef HARDDEBUG erts_fprintf(stderr, "ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n", - BIF_ARG_1, BIF_ARG_2, ret, BIF_P->id, + BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id, BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]); erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n", erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size)); @@ -1518,7 +1519,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC); if (db_put_hash(meta_pid_to_tab, - TUPLE2(meta_tuple, BIF_P->id, make_small(slot)), + TUPLE2(meta_tuple, + BIF_P->common.id, + make_small(slot)), 0) != DB_ERROR_NONE) { erl_exit(1,"Could not update ets metadata."); } @@ -1637,7 +1640,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) #ifdef HARDDEBUG erts_fprintf(stderr, "ets:delete(%T); Process: %T, initial: %T:%T/%bpu\n", - BIF_ARG_1, BIF_P->id, + BIF_ARG_1, BIF_P->common.id, BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]); #endif @@ -1654,7 +1657,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE); tb->common.status |= DB_DELETE; - if (tb->common.owner != BIF_P->id) { + if (tb->common.owner != BIF_P->common.id) { DeclareTmpHeap(meta_tuple,3,BIF_P); /* @@ -1669,10 +1672,12 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) make_small(tb->common.slot)); BIF_P->flags |= F_USING_DB; - tb->common.owner = BIF_P->id; + tb->common.owner = BIF_P->common.id; db_put_hash(meta_pid_to_tab, - TUPLE2(meta_tuple,BIF_P->id,make_small(tb->common.slot)), + TUPLE2(meta_tuple, + BIF_P->common.id, + make_small(tb->common.slot)), 0); db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC); UnUseTmpHeap(3,BIF_P); @@ -1748,7 +1753,7 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3) } if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL - || tb->common.owner != BIF_P->id) { + || tb->common.owner != BIF_P->common.id) { goto badarg; } from_pid = tb->common.owner; @@ -1771,7 +1776,10 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3) db_unlock(tb,LCK_WRITE); erts_send_message(BIF_P, to_proc, &to_locks, - TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, from_pid, BIF_ARG_3), + TUPLE4(buf, am_ETS_TRANSFER, + tb->common.id, + from_pid, + BIF_ARG_3), 0); erts_smp_proc_unlock(to_proc, to_locks); UnUseTmpHeap(5,BIF_P); @@ -1833,7 +1841,7 @@ BIF_RETTYPE ets_setopts_2(BIF_ALIST_2) if (tail != NIL || (tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL - || tb->common.owner != BIF_P->id) { + || tb->common.owner != BIF_P->common.id) { goto badarg; } @@ -2647,7 +2655,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) */ /* If/when we implement lockless private tables: - if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->id) { + if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->common.id) { db_unlock(tb, LCK_READ); rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, owner, ERTS_PROC_LOCK_MAIN); @@ -3039,9 +3047,9 @@ static int give_away_to_heir(Process* p, DbTable* tb) Eterm to_pid; UWord heir_data; - ASSERT(tb->common.owner == p->id); + ASSERT(tb->common.owner == p->common.id); ASSERT(is_internal_pid(tb->common.heir)); - ASSERT(tb->common.heir != p->id); + ASSERT(tb->common.heir != p->common.id); retry: to_pid = tb->common.heir; to_proc = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN, @@ -3054,7 +3062,7 @@ retry: db_lock(tb,LCK_WRITE); ASSERT(tb != NULL); - if (tb->common.owner != p->id) { + if (tb->common.owner != p->common.id) { if (to_proc != NULL ) { erts_smp_proc_unlock(to_proc, to_locks); } @@ -3065,7 +3073,7 @@ retry: if (to_proc != NULL ) { erts_smp_proc_unlock(to_proc, to_locks); } - if (to_pid == p->id || to_pid == am_none) { + if (to_pid == p->common.id || to_pid == am_none) { return 0; /* no real heir, table still mine */ } goto retry; @@ -3074,7 +3082,8 @@ retry: if (to_proc == NULL) { return 0; /* heir not alive, table still mine */ } - if (to_proc->started_interval != tb->common.heir_started_interval) { + if (to_proc->common.u.alive.started_interval + != tb->common.heir_started_interval) { erts_smp_proc_unlock(to_proc, to_locks); return 0; /* heir dead and pid reused, table still mine */ } @@ -3099,7 +3108,11 @@ retry: heir_data = tpv[1]; } erts_send_message(p, to_proc, &to_locks, - TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, p->id, heir_data), + TUPLE4(buf, + am_ETS_TRANSFER, + tb->common.id, + p->common.id, + heir_data), 0); erts_smp_proc_unlock(to_proc, to_locks); return !0; @@ -3108,7 +3121,7 @@ retry: /* * erts_db_process_exiting() is called when a process terminates. * It returns 0 when completely done, and !0 when it wants to - * yield. c_p->u.exit_data can hold a pointer to a state while + * yield. c_p->u.terminate can hold a pointer to a state while * yielding. */ #define ERTS_DB_INTERNAL_ERROR(LSTR) \ @@ -3118,8 +3131,8 @@ retry: int erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks) { - ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.exit_data; - Eterm pid = c_p->id; + ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate; + Eterm pid = c_p->common.id; ErtsDbProcCleanupState default_state; int ret; @@ -3300,7 +3313,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks) if (state != &default_state) erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state); - c_p->u.exit_data = NULL; + c_p->u.terminate = NULL; return 0; default: @@ -3321,13 +3334,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks) break; } - ASSERT(c_p->u.exit_data == (void *) state + ASSERT(c_p->u.terminate == (void *) state || state == &default_state); if (state == &default_state) { - c_p->u.exit_data = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP, + c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP, sizeof(ErtsDbProcCleanupState)); - sys_memcpy(c_p->u.exit_data, + sys_memcpy(c_p->u.terminate, (void*) state, sizeof(ErtsDbProcCleanupState)); } @@ -3353,7 +3366,7 @@ static void fix_table_locked(Process* p, DbTable* tb) } else { for (; fix != NULL; fix = fix->next) { - if (fix->pid == p->id) { + if (fix->pid == p->common.id) { ++(fix->counter); #ifdef ERTS_SMP erts_smp_mtx_unlock(&tb->common.fixlock); @@ -3365,7 +3378,7 @@ static void fix_table_locked(Process* p, DbTable* tb) fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION, tb, sizeof(DbFixation)); ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation)); - fix->pid = p->id; + fix->pid = p->common.id; fix->counter = 1; fix->next = tb->common.fixations; tb->common.fixations = fix; @@ -3376,7 +3389,9 @@ static void fix_table_locked(Process* p, DbTable* tb) UseTmpHeap(3,p); db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC); if (db_put_hash(meta_pid_to_fixed_tab, - TUPLE2(meta_tuple, p->id, make_small(tb->common.slot)), + TUPLE2(meta_tuple, + p->common.id, + make_small(tb->common.slot)), 0) != DB_ERROR_NONE) { UnUseTmpHeap(3,p); erl_exit(1,"Could not insert ets metadata in safe_fixtable."); @@ -3396,7 +3411,7 @@ static void unfix_table_locked(Process* p, DbTable* tb, erts_smp_mtx_lock(&tb->common.fixlock); #endif for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) { - if ((*pp)->pid == p->id) { + if ((*pp)->pid == p->common.id) { DbFixation* fix = *pp; erts_refc_dec(&tb->common.ref,0); --(fix->counter); @@ -3410,7 +3425,7 @@ static void unfix_table_locked(Process* p, DbTable* tb, #endif db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC); db_erase_bag_exact2(meta_pid_to_fixed_tab, - p->id, make_small(tb->common.slot)); + p->common.id, make_small(tb->common.slot)); db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC); erts_db_free(ERTS_ALC_T_DB_FIXATION, tb, (void *) fix, sizeof(DbFixation)); @@ -3469,15 +3484,15 @@ static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data) if (heir == am_none) { return; } - if (heir == me->id) { - erts_ensure_later_proc_interval(me->started_interval); - tb->common.heir_started_interval = me->started_interval; + if (heir == me->common.id) { + erts_ensure_later_proc_interval(me->common.u.alive.started_interval); + tb->common.heir_started_interval = me->common.u.alive.started_interval; } else { Process* heir_proc= erts_proc_lookup(heir); if (heir_proc != NULL) { - erts_ensure_later_proc_interval(heir_proc->started_interval); - tb->common.heir_started_interval = heir_proc->started_interval; + erts_ensure_later_proc_interval(heir_proc->common.u.alive.started_interval); + tb->common.heir_started_interval = heir_proc->common.u.alive.started_interval; } else { tb->common.heir = am_none; } diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index 407a55a3d7..58c4d75c31 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -138,21 +138,23 @@ set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) { Uint flags; if (tracer == NIL) { - flags = tracee_p->trace_flags & ~TRACEE_FLAGS; + flags = ERTS_TRACE_FLAGS(tracee_p) & ~TRACEE_FLAGS; } else { - flags = ((tracee_p->trace_flags & ~d_flags) | e_flags); + flags = ((ERTS_TRACE_FLAGS(tracee_p) & ~d_flags) | e_flags); if (! flags) tracer = NIL; } - ret = tracee_p->tracer_proc != tracer || tracee_p->trace_flags != flags - ? am_true : am_false; - tracee_p->tracer_proc = tracer; - tracee_p->trace_flags = flags; + ret = ((ERTS_TRACER_PROC(tracee_p) != tracer + || ERTS_TRACE_FLAGS(tracee_p) != flags) + ? am_true + : am_false); + ERTS_TRACER_PROC(tracee_p) = tracer; + ERTS_TRACE_FLAGS(tracee_p) = flags; return ret; } /* ** Assuming all locks on tracee_p on entry ** -** Changes tracee_p->trace_flags and tracee_p->tracer_proc +** Changes ERTS_TRACE_FLAGS(tracee_p) and ERTS_TRACER_PROC(tracee_p) ** according to input disable/enable flags and tracer. ** ** Returns am_true|am_false on success, am_true if value changed, @@ -173,17 +175,20 @@ set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer, tracer, ERTS_PROC_LOCKS_ALL))) { if (tracee_p != tracer_p) { ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags); - tracer_p->trace_flags |= tracee_p->trace_flags ? F_TRACER : 0; + ERTS_TRACE_FLAGS(tracer_p) |= (ERTS_TRACE_FLAGS(tracee_p) + ? F_TRACER + : 0); erts_smp_proc_unlock(tracer_p, ERTS_PROC_LOCKS_ALL); } } else if (is_internal_port(tracer)) { Port *tracer_port = - erts_id2port(tracer, tracee_p, ERTS_PROC_LOCKS_ALL); + erts_id2port_sflgs(tracer, + tracee_p, + ERTS_PROC_LOCKS_ALL, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); if (tracer_port) { - if (! INVALID_TRACER_PORT(tracer_port, tracer)) { - ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags); - } - erts_smp_port_unlock(tracer_port); + ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags); + erts_port_release(tracer_port); } } else { ASSERT(is_nil(tracer)); @@ -2174,7 +2179,7 @@ restart: pc += n; break; case matchSelf: - *esp++ = c_p->id; + *esp++ = c_p->common.id; break; case matchWaste: --esp; @@ -2261,7 +2266,7 @@ restart: case matchEnableTrace: if ( (n = erts_trace_flag2bit(esp[-1]))) { BEGIN_ATOMIC_TRACE(c_p); - set_tracee_flags(c_p, c_p->tracer_proc, 0, n); + set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), 0, n); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; @@ -2274,7 +2279,7 @@ restart: BEGIN_ATOMIC_TRACE(c_p); if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) { /* Always take over the tracer of the current process */ - set_tracee_flags(tmpp, c_p->tracer_proc, 0, n); + set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), 0, n); esp[-1] = am_true; } } @@ -2282,7 +2287,7 @@ restart: case matchDisableTrace: if ( (n = erts_trace_flag2bit(esp[-1]))) { BEGIN_ATOMIC_TRACE(c_p); - set_tracee_flags(c_p, c_p->tracer_proc, n, 0); + set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), n, 0); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; @@ -2295,7 +2300,7 @@ restart: BEGIN_ATOMIC_TRACE(c_p); if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) { /* Always take over the tracer of the current process */ - set_tracee_flags(tmpp, c_p->tracer_proc, n, 0); + set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), n, 0); esp[-1] = am_true; } } @@ -2316,12 +2321,12 @@ restart: --esp; if (*esp == am_true) { erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - c_p->trace_flags |= F_TRACE_SILENT; + ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT; erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else if (*esp == am_false) { erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); - c_p->trace_flags &= ~F_TRACE_SILENT; + ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT; erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } break; @@ -2329,11 +2334,11 @@ restart: { /* disable enable */ Uint d_flags = 0, e_flags = 0; /* process trace flags */ - Eterm tracer = c_p->tracer_proc; + Eterm tracer = ERTS_TRACER_PROC(c_p); /* XXX Atomicity note: Not fully atomic. Default tracer * is sampled from current process but applied to * tracee and tracer later after releasing main - * locks on current process, so c_p->tracer_proc + * locks on current process, so ERTS_TRACER_PROC(c_p) * may actually have changed when tracee and tracer * gets updated. I do not think nobody will notice. * It is just the default value that is not fully atomic. @@ -2358,7 +2363,7 @@ restart: { /* disable enable */ Uint d_flags = 0, e_flags = 0; /* process trace flags */ - Eterm tracer = c_p->tracer_proc; + Eterm tracer = ERTS_TRACER_PROC(c_p); /* XXX Atomicity note. Not fully atomic. See above. * Above it could possibly be solved, but not here. */ @@ -5005,7 +5010,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace) static Eterm seq_trace_fake(Process *p, Eterm arg1) { Eterm result = erl_seq_trace_info(p, arg1); - if (is_tuple(result) && *tuple_val(result) == 2) { + if (!is_non_value(result) && is_tuple(result) && *tuple_val(result) == 2) { return (tuple_val(result))[2]; } return result; diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index dcecc4251a..d8f6e40d2e 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -320,10 +320,10 @@ ERTS_GLB_INLINE int db_eq(DbTableCommon* tb, Eterm a, DbTerm* b) #define DB_INFO (DB_PROTECTED|DB_PUBLIC|DB_PRIVATE) #define ONLY_WRITER(P,T) (((T)->common.status & (DB_PRIVATE|DB_PROTECTED)) \ - && (T)->common.owner == (P)->id) + && (T)->common.owner == (P)->common.id) #define ONLY_READER(P,T) (((T)->common.status & DB_PRIVATE) && \ -(T)->common.owner == (P)->id) +(T)->common.owner == (P)->common.id) /* Function prototypes */ BIF_RETTYPE db_get_trace_control_word(Process* p); diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c index 22e873afc6..b90d00f236 100644 --- a/erts/emulator/beam/erl_debug.c +++ b/erts/emulator/beam/erl_debug.c @@ -252,16 +252,16 @@ void erts_check_stack(Process *p) if (p->stop > stack_start) erl_exit(1, "<%lu.%lu.%lu>: Stack underflow\n", - internal_pid_channel_no(p->id), - internal_pid_number(p->id), - internal_pid_serial(p->id)); + internal_pid_channel_no(p->common.id), + internal_pid_number(p->common.id), + internal_pid_serial(p->common.id)); if (p->stop < stack_end) erl_exit(1, "<%lu.%lu.%lu>: Stack overflow\n", - internal_pid_channel_no(p->id), - internal_pid_number(p->id), - internal_pid_serial(p->id)); + internal_pid_channel_no(p->common.id), + internal_pid_number(p->common.id), + internal_pid_serial(p->common.id)); for (elemp = p->stop; elemp < stack_start; elemp++) { int in_mbuf = 0; @@ -284,9 +284,9 @@ void erts_check_stack(Process *p) erl_exit(1, "<%lu.%lu.%lu>: Wild stack pointer\n", - internal_pid_channel_no(p->id), - internal_pid_number(p->id), - internal_pid_serial(p->id)); + internal_pid_channel_no(p->common.id), + internal_pid_number(p->common.id), + internal_pid_serial(p->common.id)); } } @@ -387,16 +387,16 @@ void verify_process(Process *p) #define VERIFY_AREA(name,ptr,sz) { \ int n = (sz); \ while (n--) if(!verify_eterm(p,*(ptr+n))) \ - erl_exit(1,"Wild pointer found in " name " of %T!\n",p->id); } + erl_exit(1,"Wild pointer found in " name " of %T!\n",p->common.id); } #define VERIFY_ETERM(name,eterm) { \ if(!verify_eterm(p,eterm)) \ - erl_exit(1,"Wild pointer found in " name " of %T!\n",p->id); } + erl_exit(1,"Wild pointer found in " name " of %T!\n",p->common.id); } ErlMessage* mp = p->msg.first; - VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->id)); + VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->common.id)); while (mp != NULL) { VERIFY_ETERM("message term",ERL_MESSAGE_TERM(mp)); @@ -516,7 +516,7 @@ static void print_process_memory(Process *p) ErlHeapFragment* bp = MBUF(p); erts_printf("==============================\n"); - erts_printf("|| Memory info for %T ||\n",p->id); + erts_printf("|| Memory info for %T ||\n",p->common.id); erts_printf("==============================\n"); erts_printf("-- %-*s ---%s-%s-%s-%s--\n", @@ -601,7 +601,7 @@ void print_memory_info(Process *p) { if (p != NULL) { erts_printf("======================================\n"); - erts_printf("|| Memory info for %-12T ||\n",p->id); + erts_printf("|| Memory info for %-12T ||\n",p->common.id); erts_printf("======================================\n"); erts_printf("+- local heap ----%s-%s-%s-%s-+\n", dashes,dashes,dashes,dashes); diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h index 771ee46d2b..d50ba364d0 100644 --- a/erts/emulator/beam/erl_driver.h +++ b/erts/emulator/beam/erl_driver.h @@ -85,7 +85,7 @@ #include "erl_drv_nif.h" #include <stdlib.h> -#include <string.h> /* ssize_t on Mac OS X */ +#include <sys/types.h> /* ssize_t */ #if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_) #ifndef STATIC_ERLANG_DRIVER @@ -133,7 +133,7 @@ typedef struct { #define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed) #define ERL_DRV_EXTENDED_MAJOR_VERSION 2 -#define ERL_DRV_EXTENDED_MINOR_VERSION 0 +#define ERL_DRV_EXTENDED_MINOR_VERSION 1 /* * The emulator will refuse to load a driver with different major @@ -154,6 +154,7 @@ typedef struct { #define ERL_DRV_FLAG_USE_PORT_LOCKING (1 << 0) #define ERL_DRV_FLAG_SOFT_BUSY (1 << 1) +#define ERL_DRV_FLAG_NO_BUSY_MSGQ (1 << 2) /* * Integer types @@ -207,8 +208,8 @@ typedef struct erl_drv_binary { typedef struct _erl_drv_data* ErlDrvData; /* Data to be used by the driver itself. */ #ifndef ERL_SYS_DRV typedef struct _erl_drv_event* ErlDrvEvent; /* An event to be selected on. */ -typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */ #endif +typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */ typedef struct _erl_drv_port* ErlDrvThreadData; /* Thread data. */ #if !defined(__WIN32__) && !defined(_WIN32) && !defined(_WIN32_) && !defined(USE_SELECT) @@ -377,9 +378,18 @@ typedef struct erl_drv_entry { ErlDrvEntry* driver_init(void) #endif +#define ERL_DRV_BUSY_MSGQ_DISABLED (~((ErlDrvSizeT) 0)) +#define ERL_DRV_BUSY_MSGQ_READ_ONLY ((ErlDrvSizeT) 0) +#define ERL_DRV_BUSY_MSGQ_LIM_MAX (ERL_DRV_BUSY_MSGQ_DISABLED - 1) +#define ERL_DRV_BUSY_MSGQ_LIM_MIN ((ErlDrvSizeT) 1) + /* * These are the functions available for driver writers. */ +EXTERN void erl_drv_busy_msgq_limits(ErlDrvPort port, + ErlDrvSizeT *low, + ErlDrvSizeT *high); + EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on); EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event, ErlDrvEventData event_data); @@ -594,11 +604,33 @@ EXTERN ErlDrvPort driver_create_port(ErlDrvPort creator_port, ErlDrvData drv_data); +/* + * driver_output_term() is deprecated, and scheduled for removal in + * OTP-R17. Use erl_drv_output_term() instead. For more information + * see the erl_driver(3) documentation. + */ +EXTERN int driver_output_term(ErlDrvPort ix, + ErlDrvTermData* data, + int len) ERL_DRV_DEPRECATED_FUNC; +/* + * driver_send_term() is deprecated, and scheduled for removal in + * OTP-R17. Use erl_drv_send_term() instead. For more information + * see the erl_driver(3) documentation. + */ +EXTERN int driver_send_term(ErlDrvPort ix, + ErlDrvTermData to, + ErlDrvTermData* data, + int len) ERL_DRV_DEPRECATED_FUNC; + /* output term data to the port owner */ -EXTERN int driver_output_term(ErlDrvPort ix, ErlDrvTermData* data, int len); +EXTERN int erl_drv_output_term(ErlDrvTermData port, + ErlDrvTermData* data, + int len); /* output term data to a specific process */ -EXTERN int driver_send_term(ErlDrvPort ix, ErlDrvTermData to, - ErlDrvTermData* data, int len); +EXTERN int erl_drv_send_term(ErlDrvTermData port, + ErlDrvTermData to, + ErlDrvTermData* data, + int len); /* Async IO functions */ EXTERN long driver_async(ErlDrvPort ix, diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 5ae4b9254b..a33085315a 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -59,7 +59,7 @@ static Uint reclaimed; /* no of words reclaimed in GCs */ erts_fprintf(stderr, "htop=%p\n", (p)->htop); \ erts_fprintf(stderr, "heap=%p\n", (p)->heap); \ erl_exit(ERTS_ABORT_EXIT, "%s, line %d: %T: Overrun stack and heap\n", \ - __FILE__,__LINE__,(P)->id); \ + __FILE__,__LINE__,(P)->common.id); \ } #ifdef DEBUG @@ -129,7 +129,7 @@ static void disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int n #if defined(ARCH_64) && !HALFWORD_HEAP # define MAX_HEAP_SIZES 154 #else -# define MAX_HEAP_SIZES 55 +# define MAX_HEAP_SIZES 59 #endif static Sint heap_sizes[MAX_HEAP_SIZES]; /* Suitable heap sizes. */ @@ -144,6 +144,7 @@ void erts_init_gc(void) { int i = 0; + Sint max_heap_size = 0; ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word)); ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word)); @@ -168,16 +169,30 @@ erts_init_gc(void) * we really don't want that growth when the heaps are that big. */ - heap_sizes[0] = 34; - heap_sizes[1] = 55; - for (i = 2; i < 23; i++) { - heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-2]; + /* Growth stage 1 - Fibonacci + 1*/ + /* 12,38 will hit size 233, the old default */ + + heap_sizes[0] = 12; + heap_sizes[1] = 38; + + for(i = 2; i < 23; i++) { + /* one extra word for block header */ + heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-2] + 1; } + + /* for 32 bit we want max_heap_size to be MAX(32bit) / 4 [words] (and halfword) + * for 64 bit we want max_heap_size to be MAX(52bit) / 8 [words] + */ + + max_heap_size = sizeof(Eterm) < 8 ? (Sint)((~(Uint)0)/(sizeof(Eterm))) : + (Sint)(((Uint64)1 << 53)/sizeof(Eterm)); + + /* Growth stage 2 - 20% growth */ /* At 1.3 mega words heap, we start to slow down. */ for (i = 23; i < ALENGTH(heap_sizes); i++) { - heap_sizes[i] = 5*(heap_sizes[i-1]/4); - if (heap_sizes[i] < 0) { + heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-1]/5; + if ((heap_sizes[i] < 0) || heap_sizes[i] > max_heap_size) { /* Size turned negative. Discard this last size. */ i--; break; @@ -868,14 +883,12 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl) } } - if (wanted < MIN_HEAP_SIZE(p)) { - wanted = MIN_HEAP_SIZE(p); - } else { - wanted = next_heap_size(p, wanted, 0); - } + wanted = wanted < MIN_HEAP_SIZE(p) ? MIN_HEAP_SIZE(p) + : next_heap_size(p, wanted, 0); if (wanted < HEAP_SIZE(p)) { shrink_new_heap(p, wanted, objv, nobj); } + ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0)); return 1; /* We are done. */ } @@ -1434,11 +1447,10 @@ adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int I think this is better as fullsweep is used mainly on small memory systems, but I could be wrong... */ wanted = 2 * need_after; - if (wanted < p->min_heap_size) { - sz = p->min_heap_size; - } else { - sz = next_heap_size(p, wanted, 0); - } + + sz = wanted < p->min_heap_size ? p->min_heap_size + : next_heap_size(p, wanted, 0); + if (sz < HEAP_SIZE(p)) { shrink_new_heap(p, sz, objv, nobj); } @@ -1946,9 +1958,9 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset) n++; } #endif - ASSERT(is_nil(p->tracer_proc) || - is_internal_pid(p->tracer_proc) || - is_internal_port(p->tracer_proc)); + ASSERT(is_nil(ERTS_TRACER_PROC(p)) || + is_internal_pid(ERTS_TRACER_PROC(p)) || + is_internal_port(ERTS_TRACER_PROC(p))); ASSERT(is_pid(follow_moved(p->group_leader))); if (is_not_immed(p->group_leader)) { diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c index e7d4ac2b67..f98d377fd6 100644 --- a/erts/emulator/beam/erl_goodfit_alloc.c +++ b/erts/emulator/beam/erl_goodfit_alloc.c @@ -205,7 +205,6 @@ erts_gfalc_start(GFAllctr_t *gfallctr, init->sbmbct = 0; /* Small mbc not yet supported by goodfit */ - allctr->mbc_header_size = sizeof(Carrier_t); allctr->min_mbc_size = MIN_MBC_SZ; allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ; allctr->min_block_size = sizeof(GFFreeBlock_t); @@ -363,7 +362,7 @@ search_bucket(Allctr_t *allctr, int ix, Uint size) blk && i < max_blk_search; blk = blk->next, i++) { - blk_sz = BLK_SZ(blk); + blk_sz = MBC_FBLK_SZ(&blk->block_head); blk_on_lambc = (((char *) blk) < gfallctr->last_aux_mbc_end && gfallctr->last_aux_mbc_start <= ((char *) blk)); @@ -402,7 +401,7 @@ get_free_block(Allctr_t *allctr, Uint size, if (min_bi == unsafe_bi) { blk = search_bucket(allctr, min_bi, size); if (blk) { - if (cand_blk && cand_size <= BLK_SZ(blk)) + if (cand_blk && cand_size <= MBC_FBLK_SZ(blk)) return NULL; /* cand_blk was better */ unlink_free_block(allctr, blk, flags); return blk; @@ -422,7 +421,7 @@ get_free_block(Allctr_t *allctr, Uint size, /* We are guaranteed to find a block that fits in this bucket */ blk = search_bucket(allctr, min_bi, size); ASSERT(blk); - if (cand_blk && cand_size <= BLK_SZ(blk)) + if (cand_blk && cand_size <= MBC_FBLK_SZ(blk)) return NULL; /* cand_blk was better */ unlink_free_block(allctr, blk, flags); return blk; @@ -435,7 +434,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) { GFAllctr_t *gfallctr = (GFAllctr_t *) allctr; GFFreeBlock_t *blk = (GFFreeBlock_t *) block; - Uint sz = BLK_SZ(blk); + Uint sz = MBC_FBLK_SZ(&blk->block_head); int i = BKT_IX(gfallctr, sz); ASSERT(sz >= MIN_BLK_SZ); @@ -456,7 +455,7 @@ unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags) { GFAllctr_t *gfallctr = (GFAllctr_t *) allctr; GFFreeBlock_t *blk = (GFFreeBlock_t *) block; - Uint sz = BLK_SZ(blk); + Uint sz = MBC_FBLK_SZ(&blk->block_head); int i = BKT_IX(gfallctr, sz); if (!blk->prev) { @@ -618,7 +617,7 @@ check_block(Allctr_t *allctr, Block_t * blk, int free_block) GFFreeBlock_t *fblk; if(free_block) { - Uint blk_sz = BLK_SZ(blk); + Uint blk_sz = is_sbc_blk(blk) ? SBC_BLK_SZ(blk) : MBC_BLK_SZ(blk); bi = BKT_IX(gfallctr, blk_sz); ASSERT(gfallctr->bucket_mask.main & (((UWord) 1) << IX2SMIX(bi))); diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 369eab5980..61b3c09d16 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -44,6 +44,7 @@ #include "erl_thr_progress.h" #include "erl_thr_queue.h" #include "erl_async.h" +#include "erl_ptab.h" #ifdef HIPE #include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */ @@ -54,6 +55,8 @@ # include <sys/resource.h> #endif +#define ERTS_DEFAULT_NO_ASYNC_THREADS 10 + /* * The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands * only. Do not remove even though they aren't used elsewhere in the emulator! @@ -109,6 +112,11 @@ const int etp_lock_check = 1; #else const int etp_lock_check = 0; #endif +#ifdef WORDS_BIGENDIAN +const int etp_big_endian = 1; +#else +const int etp_big_endian = 0; +#endif /* * Note about VxWorks: All variables must be initialized by executable code, * not by an initializer. Otherwise a new instance of the emulator will @@ -121,9 +129,10 @@ extern void ConNormalExit(void); extern void ConWaitForExit(void); #endif -static void erl_init(int ncpu); - -#define ERTS_MIN_COMPAT_REL 7 +static void erl_init(int ncpu, + int proc_tab_sz, + int port_tab_sz, + int port_tab_sz_ignore_files); static erts_atomic_t exiting; @@ -206,8 +215,6 @@ ErtsModifiedTimings erts_modified_timings[] = { Export *erts_delay_trap = NULL; -int erts_use_r9_pids_ports; - int ignore_break; int replace_intr; @@ -271,12 +278,18 @@ void erts_short_init(void) { int ncpu = early_init(NULL, NULL); - erl_init(ncpu); + erl_init(ncpu, + ERTS_DEFAULT_MAX_PROCESSES, + ERTS_DEFAULT_MAX_PORTS, + 0); erts_initialized = 1; } static void -erl_init(int ncpu) +erl_init(int ncpu, + int proc_tab_sz, + int port_tab_sz, + int port_tab_sz_ignore_files) { init_benchmarking(); @@ -284,7 +297,7 @@ erl_init(int ncpu) erts_init_gc(); erts_init_time(); erts_init_sys_common_misc(); - erts_init_process(ncpu); + erts_init_process(ncpu, proc_tab_sz); erts_init_scheduling(no_schedulers, no_schedulers_online); erts_init_cpu_topology(); /* Must be after init_scheduling */ @@ -306,6 +319,7 @@ erl_init(int ncpu) erts_bif_info_init(); erts_ddll_init(); init_emulator(); + erts_ptab_init(); /* Must be after init_emulator() */ erts_bp_init(); init_db(); /* Must be after init_emulator */ erts_bif_timer_init(); @@ -313,7 +327,7 @@ erl_init(int ncpu) init_dist(); erl_drv_thr_init(); erts_init_async(); - init_io(); + erts_init_io(port_tab_sz, port_tab_sz_ignore_files); init_load(); erts_init_bif(); erts_init_bif_chksum(); @@ -457,6 +471,7 @@ load_preloaded(void) /* be helpful (or maybe downright rude:-) */ void erts_usage(void) { + int this_rel = this_rel_num(); erts_fprintf(stderr, "Usage: %s [flags] [ -- [init_args] ]\n", progname(program)); erts_fprintf(stderr, "The flags are:\n\n"); @@ -490,21 +505,25 @@ void erts_usage(void) /* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */ erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n"); - + erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n"); + erts_fprintf(stderr, " Note that this flag is deprecated!\n"); erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n"); erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n"); erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n"); erts_fprintf(stderr, " valid range is [%d-%d]\n", - ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES); + ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES); + erts_fprintf(stderr, "-Q number set maximum number of ports on this node,\n"); + erts_fprintf(stderr, " valid range is [%d-%d]\n", + ERTS_MIN_PORTS, ERTS_MAX_PORTS); erts_fprintf(stderr, "-R number set compatibility release number,\n"); erts_fprintf(stderr, " valid range [%d-%d]\n", - ERTS_MIN_COMPAT_REL, this_rel_num()); + this_rel-2, this_rel); erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n"); erts_fprintf(stderr, "-rg amount set reader groups limit\n"); erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n"); - erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n"); + erts_fprintf(stderr, "-stbt type u|ns|ts|ps|s|nnts|nnps|tnnps|db\n"); erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n"); erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n"); erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n"); @@ -512,13 +531,14 @@ void erts_usage(void) erts_fprintf(stderr, "-sct cput set cpu topology,\n"); erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n"); - erts_fprintf(stderr, " default|legacy|proposal.\n"); + erts_fprintf(stderr, " default|legacy.\n"); erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n"); erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n"); erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n"); erts_fprintf(stderr, " valid range is [%d-%d]\n", ERTS_SCHED_THREAD_MIN_STACK_SIZE, ERTS_SCHED_THREAD_MAX_STACK_SIZE); + erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n"); erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n"); erts_fprintf(stderr, " schedulers online (n2), valid range for both\n"); erts_fprintf(stderr, " numbers are [1-%d]\n", @@ -612,9 +632,8 @@ early_init(int *argc, char **argv) /* erts_printf_eterm_func = erts_printf_term; erts_disable_tolerant_timeofday = 0; display_items = 200; - erts_proc.max = ERTS_DEFAULT_MAX_PROCESSES; erts_backtrace_depth = DEFAULT_BACKTRACE_SIZE; - erts_async_max_threads = 0; + erts_async_max_threads = ERTS_DEFAULT_NO_ASYNC_THREADS; erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE; H_MIN_SIZE = H_DEFAULT_SIZE; BIN_VH_MIN_SIZE = VH_DEFAULT_SIZE; @@ -641,8 +660,6 @@ early_init(int *argc, char **argv) /* erts_compat_rel = this_rel_num(); - erts_use_r9_pids_ports = 0; - erts_sys_pre_init(); erts_atomic_init_nob(&exiting, 0); #ifdef ERTS_SMP @@ -685,7 +702,7 @@ early_init(int *argc, char **argv) /* if (erts_sys_getenv__("ERL_THREAD_POOL_SIZE", envbuf, &envbufsz) == 0) erts_async_max_threads = atoi(envbuf); else - erts_async_max_threads = 0; + erts_async_max_threads = ERTS_DEFAULT_NO_ASYNC_THREADS; if (erts_async_max_threads > ERTS_MAX_NO_OF_ASYNC_THREADS) erts_async_max_threads = ERTS_MAX_NO_OF_ASYNC_THREADS; @@ -897,11 +914,13 @@ erl_start(int argc, char **argv) { int i = 1; char* arg=NULL; - char* Parg = NULL; int have_break_handler = 1; char envbuf[21]; /* enough for any 64-bit integer */ size_t envbufsz; int ncpu = early_init(&argc, argv); + int proc_tab_sz = ERTS_DEFAULT_MAX_PROCESSES; + int port_tab_sz = ERTS_DEFAULT_MAX_PORTS; + int port_tab_sz_ignore_files = 0; envbufsz = sizeof(envbuf); if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0) @@ -972,6 +991,7 @@ erl_start(int argc, char **argv) break; case 'a': erts_set_user_requested_filename_encoding(ERL_FILENAME_UNKNOWN); + break; default: erts_fprintf(stderr, "bad filename encoding %s, can be (l,u or a)\n", arg); erts_usage(); @@ -1152,12 +1172,53 @@ erl_start(int argc, char **argv) arg); break; - case 'P': - /* set maximum number of processes */ - Parg = get_arg(argv[i]+2, argv[i+1], &i); - erts_proc.max = atoi(Parg); - /* Check of result is delayed until later. This is because +R - may be given after +P. */ + case 'n': + arg = get_arg(argv[i]+2, argv[i+1], &i); + switch (arg[0]) { + case 's': /* synchronous */ + erts_port_synchronous_ops = 1; + erts_port_schedule_all_ops = 0; + break; + case 'a': /* asynchronous */ + erts_port_synchronous_ops = 0; + erts_port_schedule_all_ops = 1; + break; + case 'd': /* Default - schedule on conflict (asynchronous) */ + erts_port_synchronous_ops = 0; + erts_port_schedule_all_ops = 0; + break; + default: + bad_n_option: + erts_fprintf(stderr, "bad -n option %s\n", arg); + erts_usage(); + } + if (arg[1] != '\0') + goto bad_n_option; + break; + + case 'P': /* set maximum number of processes */ + arg = get_arg(argv[i]+2, argv[i+1], &i); + errno = 0; + proc_tab_sz = strtol(arg, NULL, 10); + if (errno != 0 + || proc_tab_sz < ERTS_MIN_PROCESSES + || ERTS_MAX_PROCESSES < proc_tab_sz) { + erts_fprintf(stderr, "bad number of processes %s\n", arg); + erts_usage(); + } + break; + + case 'Q': /* set maximum number of ports */ + arg = get_arg(argv[i]+2, argv[i+1], &i); + errno = 0; + port_tab_sz = strtol(arg, NULL, 10); + if (errno != 0 + || port_tab_sz < ERTS_MIN_PROCESSES + || ERTS_MAX_PROCESSES < port_tab_sz) { + erts_fprintf(stderr, "bad number of ports %s\n", arg); + erts_usage(); + } + port_tab_sz_ignore_files = 1; break; case 'S' : /* Was handled in early_init() just read past it */ @@ -1179,7 +1240,7 @@ erl_start(int argc, char **argv) case ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_CPU_TOPOLOGY: estr = "no cpu topology available"; break; - case ERTS_INIT_SCHED_BIND_TYPE_ERROR_NO_BAD_TYPE: + case ERTS_INIT_SCHED_BIND_TYPE_ERROR_BAD_TYPE: estr = "invalid type"; break; default: @@ -1259,8 +1320,31 @@ erl_start(int argc, char **argv) erts_usage(); } } + else if (has_prefix("pp", sub_param)) { + arg = get_arg(sub_param+2, argv[i+1], &i); + if (sys_strcmp(arg, "true") == 0) + erts_port_parallelism = 1; + else if (sys_strcmp(arg, "false") == 0) + erts_port_parallelism = 0; + else { + erts_fprintf(stderr, + "bad port parallelism scheduling hint %s\n", + arg); + erts_usage(); + } + } else if (sys_strcmp("nsp", sub_param) == 0) erts_use_sender_punish = 0; + else if (has_prefix("tbt", sub_param)) { + arg = get_arg(sub_param+3, argv[i+1], &i); + res = erts_init_scheduler_bind_type_string(arg); + if (res == ERTS_INIT_SCHED_BIND_TYPE_ERROR_BAD_TYPE) { + erts_fprintf(stderr, + "setting scheduler bind type '%s' failed: invalid type\n", + arg); + erts_usage(); + } + } else if (sys_strcmp("wt", sub_param) == 0) { arg = get_arg(sub_param+2, argv[i+1], &i); if (erts_sched_set_wakeup_other_thresold(arg) != 0) { @@ -1340,22 +1424,19 @@ erl_start(int argc, char **argv) case 'R': { /* set compatibility release */ + int this_rel; arg = get_arg(argv[i]+2, argv[i+1], &i); erts_compat_rel = atoi(arg); - if (erts_compat_rel < ERTS_MIN_COMPAT_REL - || erts_compat_rel > this_rel_num()) { + this_rel = this_rel_num(); + if (erts_compat_rel < this_rel - 2 || this_rel < erts_compat_rel) { erts_fprintf(stderr, "bad compatibility release number %s\n", arg); erts_usage(); } - ASSERT(ERTS_MIN_COMPAT_REL >= 7); switch (erts_compat_rel) { - case 7: - case 8: - case 9: - erts_use_r9_pids_ports = 1; + /* Currently no compat features... */ default: break; } @@ -1397,8 +1478,6 @@ erl_start(int argc, char **argv) } break; } - case 'n': /* XXX obsolete */ - break; case 'c': if (argv[i][2] == 0) { /* -c: documented option */ erts_disable_tolerant_timeofday = 1; @@ -1453,14 +1532,13 @@ erl_start(int argc, char **argv) i++; } - /* Delayed check of +P flag */ - if (erts_proc.max < ERTS_MIN_PROCESSES - || erts_proc.max > ERTS_MAX_PROCESSES - || (erts_use_r9_pids_ports - && erts_proc.max > ERTS_MAX_R9_PROCESSES)) { - erts_fprintf(stderr, "bad number of processes %s\n", Parg); - erts_usage(); - } +/* Output format on windows for sprintf defaults to three exponents. + * We use two-exponent to mimic normal sprintf behaviour. + */ + +#if defined(__WIN32__) && defined(_TWO_DIGIT_EXPONENT) + _set_output_format(_TWO_DIGIT_EXPONENT); +#endif /* Restart will not reinstall the break handler */ #ifdef __WIN32__ @@ -1482,7 +1560,10 @@ erl_start(int argc, char **argv) boot_argc = argc - i; /* Number of arguments to init */ boot_argv = &argv[i]; - erl_init(ncpu); + erl_init(ncpu, + proc_tab_sz, + port_tab_sz, + port_tab_sz_ignore_files); load_preloaded(); erts_end_staging_code_ix(); diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 314d2f6a9c..69bb4be717 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -95,7 +95,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "dist_entry_links", "address" }, { "code_write_permission", NULL }, { "proc_status", "pid" }, - { "proc_tab", NULL }, { "ports_snapshot", NULL }, { "meta_name_tab", "address" }, { "meta_main_tab_slot", "address" }, @@ -115,9 +114,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { #if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP) { "child_status", NULL }, #endif -#ifdef __WIN32__ - { "sys_driver_data_lock", NULL }, -#endif { "drv_ev_state_grow", NULL, }, { "drv_ev_state", "address" }, { "safe_hash", "address" }, @@ -127,6 +123,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "schdlr_sspnd", NULL }, { "migration_info_update", NULL }, { "run_queue", "address" }, + { "process_table", NULL }, { "cpu_info", NULL }, { "pollset", "address" }, #ifdef __WIN32__ @@ -157,12 +154,10 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "pmmap", NULL }, #endif #ifdef ERTS_SMP + { "port_sched_lock", "port_id" }, { "port_task_pre_alloc_lock", "address" }, - { "port_taskq_pre_alloc_lock", "address" }, { "proclist_pre_alloc_lock", "address" }, - { "port_tasks_lock", NULL }, - { "get_free_port", NULL }, - { "port_state", "address" }, + { "port_table", NULL }, { "xports_list_pre_alloc_lock", "address" }, { "inet_buffer_stack_lock", NULL }, { "gc_info", NULL }, @@ -247,6 +242,7 @@ typedef struct { typedef struct erts_lc_locked_locks_t_ erts_lc_locked_locks_t; struct erts_lc_locked_locks_t_ { char *thread_name; + int emu_thread; erts_tid_t tid; erts_lc_locked_locks_t *next; erts_lc_locked_locks_t *prev; @@ -364,6 +360,7 @@ create_locked_locks(char *thread_name) if (!l_lcks->thread_name) lc_abort(); + l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); l_lcks->required.first = NULL; l_lcks->required.last = NULL; @@ -671,7 +668,7 @@ erts_lc_set_thread_name(char *thread_name) { erts_lc_locked_locks_t *l_lcks = get_my_locked_locks(); if (!l_lcks) - (void) create_locked_locks(thread_name); + l_lcks = create_locked_locks(thread_name); else { ASSERT(l_lcks->thread_name); free((void *) l_lcks->thread_name); @@ -679,6 +676,14 @@ erts_lc_set_thread_name(char *thread_name) if (!l_lcks->thread_name) lc_abort(); } + l_lcks->emu_thread = 1; +} + +int +erts_lc_is_emu_thr(void) +{ + erts_lc_locked_locks_t *l_lcks = get_my_locked_locks(); + return l_lcks->emu_thread; } int diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h index df7b3758e1..068340abe7 100644 --- a/erts/emulator/beam/erl_lock_check.h +++ b/erts/emulator/beam/erl_lock_check.h @@ -102,6 +102,7 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags); void erts_lc_require_lock(erts_lc_lock_t *lck); void erts_lc_unrequire_lock(erts_lc_lock_t *lck); +int erts_lc_is_emu_thr(void); #define ERTS_LC_ASSERT(A) \ ((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A))) diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index d5b7d01048..325d77e911 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -495,7 +495,7 @@ queue_message(Process *c_p, #ifndef ERTS_SMP res = receiver->msg.len; #else - res = receiver->u.alive.msg_inq.len; + res = receiver->msg_inq.len; if (*receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place @@ -894,8 +894,8 @@ erts_send_message(Process* sender, #ifdef USE_VM_PROBES *sender_name = *receiver_name = '\0'; if (DTRACE_ENABLED(message_send)) { - erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->id); - erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->id); + erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id); + erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id); } #endif if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { @@ -917,7 +917,7 @@ erts_send_message(Process* sender, seq_trace_update_send(sender); seq_trace_output(stoken, message, SEQ_TRACE_SEND, - receiver->id, sender); + receiver->common.id, sender); seq_trace_size = 6; /* TUPLE5 */ #ifdef USE_VM_PROBES } @@ -948,7 +948,7 @@ erts_send_message(Process* sender, #ifdef DTRACE_TAG_HARDDEBUG erts_fprintf(stderr, "Dtrace -> (%T) Spreading tag (%T) with " - "message %T!\r\n",sender->id, utag, message); + "message %T!\r\n",sender->common.id, utag, message); #endif } #endif @@ -1136,7 +1136,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp, save = TUPLE3(hp, am_EXIT, from_copy, mess); hp += 4; /* the trace token must in this case be updated by the caller */ - seq_trace_output(token, save, SEQ_TRACE_SEND, to->id, NULL); + seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL); temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap); erts_queue_message(to, to_locksp, bp, save, temptoken #ifdef USE_VM_PROBES diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 2ae94965b1..771eba431f 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -127,21 +127,21 @@ typedef struct { /* Move in message queue to end of private message queue */ #define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \ do { \ - if ((P)->u.alive.msg_inq.first) { \ - *(P)->msg.last = (P)->u.alive.msg_inq.first; \ - (P)->msg.last = (P)->u.alive.msg_inq.last; \ - (P)->msg.len += (P)->u.alive.msg_inq.len; \ - (P)->u.alive.msg_inq.first = NULL; \ - (P)->u.alive.msg_inq.last = &(P)->u.alive.msg_inq.first; \ - (P)->u.alive.msg_inq.len = 0; \ + if ((P)->msg_inq.first) { \ + *(P)->msg.last = (P)->msg_inq.first; \ + (P)->msg.last = (P)->msg_inq.last; \ + (P)->msg.len += (P)->msg_inq.len; \ + (P)->msg_inq.first = NULL; \ + (P)->msg_inq.last = &(P)->msg_inq.first; \ + (P)->msg_inq.len = 0; \ } \ } while (0) /* Add message last in message queue */ #define LINK_MESSAGE(p, mp) do { \ - *(p)->u.alive.msg_inq.last = (mp); \ - (p)->u.alive.msg_inq.last = &(mp)->next; \ - (p)->u.alive.msg_inq.len++; \ + *(p)->msg_inq.last = (mp); \ + (p)->msg_inq.last = &(mp)->next; \ + (p)->msg_inq.len++; \ } while(0) #else @@ -245,6 +245,9 @@ void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *); Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **, Eterm *, ErtsDistExternal *); +void erts_cleanup_offheap(ErlOffHeap *offheap); + + ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg); ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg); diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c index 1a84950120..63175c44d6 100644 --- a/erts/emulator/beam/erl_monitors.c +++ b/erts/emulator/beam/erl_monitors.c @@ -971,7 +971,7 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1) } } else { erts_printf("Dumping pid monitors--------------------\n"); - erts_dump_monitors(rp->monitors,0); + erts_dump_monitors(ERTS_P_MONITORS(rp),0); erts_printf("Monitors dumped-------------------------\n"); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); @@ -985,12 +985,15 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1) Process *rp; DistEntry *dep; if (is_internal_port(pid)) { - Port *rport = erts_id2port(pid, p, ERTS_PROC_LOCK_MAIN); + Port *rport = erts_id2port_sflgs(pid, + p, + ERTS_PROC_LOCK_MAIN, + ERTS_PORT_SFLGS_INVALID_LOOKUP); if (rport) { erts_printf("Dumping port links----------------------\n"); - erts_dump_links(rport->nlinks,0); + erts_dump_links(ERTS_P_LINKS(rport), 0); erts_printf("Links dumped----------------------------\n"); - erts_smp_port_unlock(rport); + erts_port_release(rport); BIF_RET(am_true); } else { BIF_ERROR(p,BADARG); @@ -1014,7 +1017,7 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1) } else { erts_printf("Dumping pid links-----------------------\n"); - erts_dump_links(rp->nlinks,0); + erts_dump_links(ERTS_P_LINKS(rp), 0); erts_printf("Links dumped----------------------------\n"); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h index d3f6d410dd..a7fa4e0145 100644 --- a/erts/emulator/beam/erl_monitors.h +++ b/erts/emulator/beam/erl_monitors.h @@ -137,8 +137,6 @@ typedef struct erts_suspend_monitor { #define ERTS_LINK_ROOT(Linkp) ((Linkp)->shared.root) #define ERTS_LINK_REFC(Linkp) ((Linkp)->shared.refc) -#define ERTS_LINK_ROOT_AS_UINT(Linkp) (*((Uint *) &((Linkp)->root))) - Uint erts_tot_link_lh_size(void); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index f00d5f86ce..fb295c9a8a 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -263,7 +263,7 @@ ErlNifEnv* enif_alloc_env(void) HEAP_LIMIT(&msg_env->phony_proc) = phony_heap; HEAP_END(&msg_env->phony_proc) = phony_heap; MBUF(&msg_env->phony_proc) = NULL; - msg_env->phony_proc.id = ERTS_INVALID_PID; + msg_env->phony_proc.common.id = ERTS_INVALID_PID; #ifdef FORCE_HEAP_FRAGS msg_env->phony_proc.space_verified = 0; msg_env->phony_proc.space_verified_from = NULL; @@ -287,7 +287,7 @@ void enif_clear_env(ErlNifEnv* env) struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env; Process* p = &menv->phony_proc; ASSERT(p == menv->env.proc); - ASSERT(p->id == ERTS_INVALID_PID); + ASSERT(p->common.id == ERTS_INVALID_PID); ASSERT(MBUF(p) == menv->env.heap_frag); if (MBUF(p) != NULL) { erts_cleanup_offheap(&MSO(p)); @@ -319,7 +319,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, if (env != NULL) { c_p = env->proc; - if (receiver == c_p->id) { + if (receiver == c_p->common.id) { rp_locks = ERTS_PROC_LOCK_MAIN; flush_me = 1; } @@ -341,7 +341,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC)); if (rp == NULL) { - ASSERT(env == NULL || receiver != c_p->id); + ASSERT(env == NULL || receiver != c_p->common.id); return 0; } flush_env(msg_env); @@ -397,7 +397,7 @@ static int is_offheap(const ErlOffHeap* oh) ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid) { - pid->pid = caller_env->proc->id; + pid->pid = caller_env->proc->common.id; return pid; } int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid) @@ -505,7 +505,7 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin) { struct enif_tmp_obj_t* tobj; ErtsAlcType_t allocator; - Uint sz; + ErlDrvSizeT sz; if (is_binary(term)) { return enif_inspect_binary(env,term,bin); } @@ -531,7 +531,7 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin) bin->size = sz; bin->bin_term = THE_NON_VALUE; bin->ref_bin = NULL; - io_list_to_buf(term, (char*) bin->data, sz); + erts_iolist_to_buf(term, (char*) bin->data, sz); ADD_READONLY_CHECK(env, bin->data, bin->size); return 1; } @@ -1854,7 +1854,7 @@ void erl_nif_init() #ifdef USE_VM_PROBES void dtrace_nifenv_str(ErlNifEnv *env, char *process_buf) { - dtrace_pid_str(env->proc->id, process_buf); + dtrace_pid_str(env->proc->common.id, process_buf); } #endif diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h index 7b4cb7b042..667bda255b 100644 --- a/erts/emulator/beam/erl_node_container_utils.h +++ b/erts/emulator/beam/erl_node_container_utils.h @@ -20,7 +20,7 @@ #ifndef ERL_NODE_CONTAINER_UTILS_H__ #define ERL_NODE_CONTAINER_UTILS_H__ -#include "erl_term.h" +#include "erl_ptab.h" /* * Note regarding node containers: @@ -29,9 +29,6 @@ * the emulator) for the Erlang data types that contain a reference * to a node, i.e. pids, ports, and references. * - * Observe! The layouts of the node container data types have been - * changed in R9. - * * Node containers are divided into internal and external node containers. * An internal node container refer to the current incarnation of the * node which it reside on. An external node container refer to @@ -52,13 +49,6 @@ * reference is a boxed data type. An internal node container have an * implicit reference to the 'erts_this_node' element in the node table. * - * Due to the R9 changes in layouts of node containers there are room to - * store more data than previously. Today (R9) this extra space is unused, - * but it is planned to be used in the future. For example only 18 bits - * are used for data in a pid but there is room for 28 bits of data (on a - * 32-bit machine). Some preparations have been made in the emulator for - * usage of this extra space. - * * OBSERVE! Pids doesn't use fixed size 'serial' and 'number' fields any * more. Previously the 15 bit 'number' field of a pid was used as index * into the process table, and the 3 bit 'serial' field was used as a @@ -104,8 +94,6 @@ #define internal_dist_entry(x) (erts_this_node->dist_entry) #define external_dist_entry(x) (external_node((x))->dist_entry) -extern int erts_use_r9_pids_ports; - /* * For this node (and previous incarnations of this node), 0 is used as * channel no. For other nodes, the atom index of the atom corresponding @@ -128,47 +116,20 @@ extern int erts_use_r9_pids_ports; * Pids * \* */ -#define erts_max_processes erts_proc.max - -typedef struct { - erts_smp_atomic_t *tab; - int max; - int tab_cache_lines; - int pix_per_cache_line; - int pix_cl_mask; - int pix_cl_shift; - int pix_cli_mask; - int pix_cli_shift; -} ErtsProcTab; - -extern ErtsProcTab erts_proc; - -ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data) -{ - int n, pix; - - n = (int) pid_data; - if (erts_proc.pix_cl_mask) { - pix = ((n & erts_proc.pix_cl_mask) << erts_proc.pix_cl_shift); - pix += ((n >> erts_proc.pix_cli_shift) & erts_proc.pix_cli_mask); - } - else { - n %= erts_proc.max; - pix = n % erts_proc.tab_cache_lines; - pix *= erts_proc.pix_per_cache_line; - pix += n / erts_proc.tab_cache_lines; - } - ASSERT(0 <= pix && pix < erts_proc.max); - return pix; -} +extern ErtsPTab erts_proc; -#endif +#define make_internal_pid(D) erts_ptab_make_id(&erts_proc, \ + (D), \ + _TAG_IMMED1_PID) -#define internal_pid_index(x) erts_pid_data2ix(internal_pid_data((x))) +#define internal_pid_index(PID) (ASSERT_EXPR(is_internal_pid((PID))), \ + erts_ptab_id2pix(&erts_proc, (PID))) + +#define internal_pid_data(PID) (ASSERT_EXPR(is_internal_pid((PID))), \ + erts_ptab_id2data(&erts_proc, (PID))) + +#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x))) +#define internal_pid_serial(x) _GET_PID_SER(internal_pid_data((x))) #define internal_pid_node_name(x) (internal_pid_node((x))->sysname) #define external_pid_node_name(x) (external_pid_node((x))->sysname) @@ -208,34 +169,37 @@ ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data) || is_external_pid((x))) #define is_not_pid(x) (!is_pid(x)) -#define ERTS_MAX_R9_PROCESSES (1 << ERTS_R9_PROC_BITS) - /* * Maximum number of processes. We want the number to fit in a SMALL on * 32-bit CPU. */ -#define ERTS_MAX_PROCESSES ((SWORD_CONSTANT(1) << 27)-1) -#if (ERTS_MAX_PROCESSES > MAX_SMALL) -# error "The maximum number of processes must fit in a SMALL." -#endif - +#define ERTS_MAX_PROCESSES (ERTS_PTAB_MAX_SIZE-1) #define ERTS_MAX_PID_DATA ((1 << _PID_DATA_SIZE) - 1) #define ERTS_MAX_PID_NUMBER ((1 << _PID_NUM_SIZE) - 1) #define ERTS_MAX_PID_SERIAL ((1 << _PID_SER_SIZE) - 1) -#define ERTS_MAX_PID_R9_SERIAL ((1 << _PID_R9_SER_SIZE) - 1) -#define ERTS_R9_PROC_BITS (_PID_R9_SER_SIZE + _PID_NUM_SIZE) #define ERTS_PROC_BITS (_PID_SER_SIZE + _PID_NUM_SIZE) -#define ERTS_INVALID_PID make_internal_pid(ERTS_MAX_PID_DATA) +#define ERTS_INVALID_PID ERTS_PTAB_INVALID_ID(_TAG_IMMED1_PID) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ * Ports * \* */ -#define internal_port_index(x) (internal_port_data((x)) \ - & erts_port_tab_index_mask) +extern ErtsPTab erts_port; + +#define make_internal_port(D) erts_ptab_make_id(&erts_port, \ + (D), \ + _TAG_IMMED1_PORT) + +#define internal_port_index(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \ + erts_ptab_id2pix(&erts_port, (PRT))) + +#define internal_port_data(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \ + erts_ptab_id2data(&erts_port, (PRT))) + +#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x))) #define internal_port_node_name(x) (internal_port_node((x))->sysname) #define external_port_node_name(x) (external_port_node((x))->sysname) @@ -274,18 +238,18 @@ ERTS_GLB_INLINE int erts_pid_data2ix(Eterm pid_data) #define is_not_port(x) (!is_port(x)) /* Highest port-ID part in a term of type Port - Not necessarily the same as the variable erts_max_ports + Not necessarily the same as current maximum port table size which defines the maximum number of simultaneous Ports in the Erlang node. ERTS_MAX_PORTS is a hard upper limit. */ -#define ERTS_MAX_R9_PORTS (1 << ERTS_R9_PORTS_BITS) -#define ERTS_MAX_PORTS (1 << ERTS_PORTS_BITS) - +#define ERTS_MAX_PORTS (ERTS_PTAB_MAX_SIZE-1) #define ERTS_MAX_PORT_DATA ((1 << _PORT_DATA_SIZE) - 1) #define ERTS_MAX_PORT_NUMBER ((1 << _PORT_NUM_SIZE) - 1) -#define ERTS_R9_PORTS_BITS (_PORT_R9_NUM_SIZE) #define ERTS_PORTS_BITS (_PORT_NUM_SIZE) + +#define ERTS_INVALID_PORT ERTS_PTAB_INVALID_ID(_TAG_IMMED1_PORT) + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ * Refs * \* */ diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index 40837d3817..ebfba065d1 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -116,8 +116,7 @@ dist_table_alloc(void *dep_tmpl) dep->qsize = 0; dep->out_queue.first = NULL; dep->out_queue.last = NULL; - dep->suspended.first = NULL; - dep->suspended.last = NULL; + dep->suspended = NULL; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; @@ -769,8 +768,7 @@ void erts_init_node_tables(void) erts_this_dist_entry->qsize = 0; erts_this_dist_entry->out_queue.first = NULL; erts_this_dist_entry->out_queue.last = NULL; - erts_this_dist_entry->suspended.first = NULL; - erts_this_dist_entry->suspended.last = NULL; + erts_this_dist_entry->suspended = NULL; erts_this_dist_entry->finalized_out_queue.first = NULL; erts_this_dist_entry->finalized_out_queue.last = NULL; @@ -1268,7 +1266,7 @@ setup_reference_table(void) ErlHeapFragment *hfp; DistEntry *dep; HashInfo hi; - int i; + int i, max; DeclareTmpHeapNoproc(heap,3); inserted_bins = NULL; @@ -1297,8 +1295,9 @@ setup_reference_table(void) UnUseTmpHeapNoproc(3); + max = erts_ptab_max(&erts_proc); /* Insert all processes */ - for (i = 0; i < erts_max_processes; i++) { + for (i = 0; i < max; i++) { Process *proc = erts_pix2proc(i); if (proc) { ErlMessage *msg; @@ -1306,12 +1305,12 @@ setup_reference_table(void) /* Insert Heap */ insert_offheap(&(proc->off_heap), HEAP_REF, - proc->id); + proc->common.id); /* Insert message buffers */ for(hfp = proc->mbuf; hfp; hfp = hfp->next) insert_offheap(&(hfp->off_heap), HEAP_REF, - proc->id); + proc->common.id); /* Insert msg msg buffers */ for (msg = proc->msg.first; msg; msg = msg->next) { ErlHeapFragment *heap_frag = NULL; @@ -1321,7 +1320,7 @@ setup_reference_table(void) else { if (msg->data.dist_ext->dep) insert_dist_entry(msg->data.dist_ext->dep, - HEAP_REF, proc->id, 0); + HEAP_REF, proc->common.id, 0); if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) heap_frag = erts_dist_ext_trailer(msg->data.dist_ext); } @@ -1329,10 +1328,10 @@ setup_reference_table(void) if (heap_frag) insert_offheap(&(heap_frag->off_heap), HEAP_REF, - proc->id); + proc->common.id); } #ifdef ERTS_SMP - for (msg = proc->u.alive.msg_inq.first; msg; msg = msg->next) { + for (msg = proc->msg_inq.first; msg; msg = msg->next) { ErlHeapFragment *heap_frag = NULL; if (msg->data.attached) { if (is_value(ERL_MESSAGE_TERM(msg))) @@ -1340,7 +1339,7 @@ setup_reference_table(void) else { if (msg->data.dist_ext->dep) insert_dist_entry(msg->data.dist_ext->dep, - HEAP_REF, proc->id, 0); + HEAP_REF, proc->common.id, 0); if (is_not_nil(ERL_MESSAGE_TOKEN(msg))) heap_frag = erts_dist_ext_trailer(msg->data.dist_ext); } @@ -1348,19 +1347,19 @@ setup_reference_table(void) if (heap_frag) insert_offheap(&(heap_frag->off_heap), HEAP_REF, - proc->id); + proc->common.id); } #endif /* Insert links */ - if(proc->nlinks) - insert_links(proc->nlinks, proc->id); - if(proc->monitors) - insert_monitors(proc->monitors, proc->id); + if (ERTS_P_LINKS(proc)) + insert_links(ERTS_P_LINKS(proc), proc->common.id); + if (ERTS_P_MONITORS(proc)) + insert_monitors(ERTS_P_MONITORS(proc), proc->common.id); /* Insert controller */ { DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc); if (dep) - insert_dist_entry(dep, CTRL_REF, proc->id, 0); + insert_dist_entry(dep, CTRL_REF, proc->common.id, 0); } } } @@ -1370,21 +1369,33 @@ setup_reference_table(void) #endif /* Insert all ports */ - for (i = 0; i < erts_max_ports; i++) { - if (erts_port[i].status & ERTS_PORT_SFLGS_DEAD) + max = erts_ptab_max(&erts_port); + for (i = 0; i < max; i++) { + erts_aint32_t state; + Port *prt; + + prt = erts_pix2port(i); + if (!prt) + continue; + + state = erts_atomic32_read_nob(&prt->state); + if (state & ERTS_PORT_SFLGS_DEAD) continue; /* Insert links */ - if(erts_port[i].nlinks) - insert_links(erts_port[i].nlinks, erts_port[i].id); + if (ERTS_P_LINKS(prt)) + insert_links(ERTS_P_LINKS(prt), prt->common.id); + /* Insert monitors */ + if (ERTS_P_MONITORS(prt)) + insert_monitors(ERTS_P_MONITORS(prt), prt->common.id); /* Insert port data */ - for(hfp = erts_port[i].bp; hfp; hfp = hfp->next) - insert_offheap(&(hfp->off_heap), HEAP_REF, erts_port[i].id); + for(hfp = prt->bp; hfp; hfp = hfp->next) + insert_offheap(&(hfp->off_heap), HEAP_REF, prt->common.id); /* Insert controller */ - if (erts_port[i].dist_entry) - insert_dist_entry(erts_port[i].dist_entry, + if (prt->dist_entry) + insert_dist_entry(prt->dist_entry, CTRL_REF, - erts_port[i].id, + prt->common.id, 0); } diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h index 4a015bdef9..af60071ea5 100644 --- a/erts/emulator/beam/erl_node_tables.h +++ b/erts/emulator/beam/erl_node_tables.h @@ -84,10 +84,6 @@ typedef struct { } ErtsDistOutputQueue; struct ErtsProcList_; -typedef struct { - struct ErtsProcList_ *first; - struct ErtsProcList_ *last; -} ErtsDistSuspended; /* * Lock order: @@ -100,7 +96,6 @@ typedef struct { */ struct erl_link; -struct port; typedef struct dist_entry_ { HashBucket hash_bucket; /* Hash bucket */ @@ -135,13 +130,13 @@ typedef struct dist_entry_ { Uint32 qflgs; Sint qsize; ErtsDistOutputQueue out_queue; - ErtsDistSuspended suspended; + struct ErtsProcList_ *suspended; ErtsDistOutputQueue finalized_out_queue; erts_smp_atomic_t dist_cmd_scheduled; ErtsPortTaskHandle dist_cmd; - Uint (*send)(struct port *prt, ErtsDistOutputBuf *obuf); + Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf); struct cache* cache; /* The atom cache */ } DistEntry; diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h new file mode 100644 index 0000000000..65b4cd0bfe --- /dev/null +++ b/erts/emulator/beam/erl_port.h @@ -0,0 +1,943 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +#ifndef ERL_PORT_TYPE__ +#define ERL_PORT_TYPE__ +typedef struct _erl_drv_port Port; +typedef struct ErtsProc2PortSigData_ ErtsProc2PortSigData; +#endif + +#if !defined(ERL_PORT_H__) && !defined(ERL_PORT_GET_PORT_TYPE_ONLY__) +#define ERL_PORT_H__ + +#include "erl_port_task.h" +#include "erl_ptab.h" +#include "erl_thr_progress.h" +#include "erl_trace.h" + +#define ERTS_DEFAULT_MAX_PORTS (1 << 16) +#define ERTS_MIN_PORTS 1024 + +extern int erts_port_synchronous_ops; +extern int erts_port_schedule_all_ops; +extern int erts_port_parallelism; + +typedef struct erts_driver_t_ erts_driver_t; + +#define ERTS_INVALID_ERL_DRV_PORT ((ErlDrvPort) (SWord) -1) +#define SMALL_IO_QUEUE 5 /* Number of fixed elements */ + +typedef struct { + ErlDrvSizeT size; /* total size in bytes */ + + SysIOVec* v_start; + SysIOVec* v_end; + SysIOVec* v_head; + SysIOVec* v_tail; + SysIOVec v_small[SMALL_IO_QUEUE]; + + ErlDrvBinary** b_start; + ErlDrvBinary** b_end; + ErlDrvBinary** b_head; + ErlDrvBinary** b_tail; + ErlDrvBinary* b_small[SMALL_IO_QUEUE]; +} ErlIOQueue; + +typedef struct line_buf { /* Buffer used in line oriented I/O */ + ErlDrvSizeT bufsiz; /* Size of character buffer */ + ErlDrvSizeT ovlen; /* Length of overflow data */ + ErlDrvSizeT ovsiz; /* Actual size of overflow buffer */ + char data[1]; /* Starting point of buffer data, + data[0] is a flag indicating an unprocess CR, + The rest is the overflow buffer. */ +} LineBuf; + +/* + * Items part of erlang:port_info/1 result. Note am_registered_name + * *need* to be first. + */ + +#define ERTS_PORT_INFO_1_ITEMS \ + { am_registered_name, /* Needs to be first */ \ + am_name, \ + am_links, \ + am_id, \ + am_connected, \ + am_input, \ + am_output, \ + am_os_pid } + +/* + * Port Specific Data. + * + * Only use PrtSD for very rarely used data. + */ + +#define ERTS_PRTSD_SCHED_ID 0 + +#define ERTS_PRTSD_SIZE 1 + +typedef struct { + void *data[ERTS_PRTSD_SIZE]; +} ErtsPrtSD; + +#ifdef ERTS_SMP +typedef struct ErtsXPortsList_ ErtsXPortsList; +#endif + +/* + * Port locking: + * + * Locking is done either driver specific or port specific. When + * driver specific locking is used, all instances of the driver, + * i.e. ports running the driver, share the same lock. When port + * specific locking is used each instance have its own lock. + * + * Most fields in the Port structure are protected by the lock + * referred to by the 'lock' field. This lock is shared between + * all ports running the same driver when driver specific locking + * is used. + * + * The 'sched' field is protected by the run queue lock that the + * port currently is assigned to. + * + */ + +struct _erl_drv_port { + ErtsPTabElementCommon common; /* *Need* to be first in struct */ + + ErtsPortTaskSched sched; + ErtsPortTaskHandle timeout_task; +#ifdef ERTS_SMP + erts_mtx_t *lock; + ErtsXPortsList *xports; + erts_smp_atomic_t run_queue; +#else + erts_atomic32_t refc; + int cleanup; +#endif + erts_atomic_t connected; /* A connected process */ + Eterm caller; /* Current caller. */ + Eterm data; /* Data associated with port. */ + ErlHeapFragment* bp; /* Heap fragment holding data (NULL if imm data). */ + Uint bytes_in; /* Number of bytes read */ + Uint bytes_out; /* Number of bytes written */ + + ErlIOQueue ioq; /* driver accessible i/o queue */ + DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */ + char *name; /* String used in the open */ + erts_driver_t* drv_ptr; + UWord drv_data; + SWord os_pid; /* Child process ID */ + ErtsProcList *suspended; /* List of suspended processes. */ + LineBuf *linebuf; /* Buffer to hold data not ready for + process to get (line oriented I/O)*/ + erts_atomic32_t state; /* Status and type flags */ + int control_flags; /* Flags for port_control() */ + ErlDrvPDL port_data_lock; + + ErtsPrtSD *psd; /* Port specific data */ +}; + +#define ERTS_PORT_GET_CONNECTED(PRT) \ + ((Eterm) erts_atomic_read_nob(&(PRT)->connected)) +#define ERTS_PORT_SET_CONNECTED(PRT, PID) \ + erts_atomic_set_relb(&(PRT)->connected, (erts_aint_t) (PID)) +#define ERTS_PORT_INIT_CONNECTED(PRT, PID) \ + erts_atomic_init_nob(&(PRT)->connected, (erts_aint_t) (PID)) + + +struct erl_drv_port_data_lock { + erts_mtx_t mtx; + erts_atomic_t refc; + Port *prt; +}; + +ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE ErtsRunQueue * +erts_port_runq(Port *prt) +{ +#ifdef ERTS_SMP + ErtsRunQueue *rq1, *rq2; + rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + if (!rq1) + return NULL; + while (1) { + erts_smp_runq_lock(rq1); + rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + if (rq1 == rq2) + return rq1; + erts_smp_runq_unlock(rq1); + rq1 = rq2; + if (!rq1) + return NULL; + } +#else + return ERTS_RUNQ_IX(0); +#endif +} + +#endif + + +ERTS_GLB_INLINE void *erts_prtsd_get(Port *p, int ix); +ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE void * +erts_prtsd_get(Port *prt, int ix) +{ + return prt->psd ? prt->psd->data[ix] : NULL; +} + +ERTS_GLB_INLINE void * +erts_prtsd_set(Port *prt, int ix, void *data) +{ + if (prt->psd) { + void *old = prt->psd->data[ix]; + prt->psd->data[ix] = data; + return old; + } + else { + prt->psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD)); + prt->psd->data[ix] = data; + return NULL; + } +} + +#endif + +extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */ +extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */ + + +/* port status flags */ + +#define ERTS_PORT_SFLG_CONNECTED ((Uint32) (1 << 0)) +/* Port have begun exiting */ +#define ERTS_PORT_SFLG_EXITING ((Uint32) (1 << 1)) +/* Distribution port */ +#define ERTS_PORT_SFLG_DISTRIBUTION ((Uint32) (1 << 2)) +#define ERTS_PORT_SFLG_BINARY_IO ((Uint32) (1 << 3)) +#define ERTS_PORT_SFLG_SOFT_EOF ((Uint32) (1 << 4)) +/* Flow control */ +/* Port is closing (no i/o accepted) */ +#define ERTS_PORT_SFLG_CLOSING ((Uint32) (1 << 5)) +/* Send a closed message when terminating */ +#define ERTS_PORT_SFLG_SEND_CLOSED ((Uint32) (1 << 6)) +/* Line orinted io on port */ +#define ERTS_PORT_SFLG_LINEBUF_IO ((Uint32) (1 << 7)) +/* Immortal port (only certain system ports) */ +#define ERTS_PORT_SFLG_FREE ((Uint32) (1 << 8)) +#define ERTS_PORT_SFLG_INITIALIZING ((Uint32) (1 << 9)) +/* Port uses port specific locking (opposed to driver specific locking) */ +#define ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK ((Uint32) (1 << 10)) +#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 11)) +/* Last port to terminate halts the emulator */ +#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 12)) +#ifdef DEBUG +/* Only debug: make sure all flags aren't cleared unintentionally */ +#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31)) +#endif + +/* Combinations of port status flags */ +#define ERTS_PORT_SFLGS_DEAD \ + (ERTS_PORT_SFLG_FREE | ERTS_PORT_SFLG_INITIALIZING) +#define ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \ + (ERTS_PORT_SFLGS_DEAD | ERTS_PORT_SFLG_INVALID) +#define ERTS_PORT_SFLGS_INVALID_LOOKUP \ + (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \ + | ERTS_PORT_SFLG_EXITING \ + | ERTS_PORT_SFLG_CLOSING) +#define ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP \ + (ERTS_PORT_SFLGS_INVALID_LOOKUP \ + | ERTS_PORT_SFLG_DISTRIBUTION) + +/* + * Costs in reductions for some port operations. + */ +#define ERTS_PORT_REDS_EXECUTE (CONTEXT_REDS/4) +#define ERTS_PORT_REDS_FREE (CONTEXT_REDS/400) +#define ERTS_PORT_REDS_TIMEOUT (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_INPUT (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_OUTPUT (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_EVENT (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_CMD_OUTPUTV (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_CMD_OUTPUT (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_EXIT (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_CONNECT (CONTEXT_REDS/200) +#define ERTS_PORT_REDS_UNLINK (CONTEXT_REDS/200) +#define ERTS_PORT_REDS_LINK (CONTEXT_REDS/200) +#define ERTS_PORT_REDS_BADSIG (CONTEXT_REDS/200) +#define ERTS_PORT_REDS_CONTROL (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_CALL (CONTEXT_REDS/50) +#define ERTS_PORT_REDS_INFO (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_SET_DATA (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_GET_DATA (CONTEXT_REDS/100) +#define ERTS_PORT_REDS_TERMINATE (CONTEXT_REDS/50) + +void print_port_info(Port *, int, void *); +void erts_port_free(Port *); +#ifndef ERTS_SMP +void erts_port_cleanup(Port *); +#endif +void erts_fire_port_monitor(Port *prt, Eterm ref); +#ifdef ERTS_SMP +int erts_port_handle_xports(Port *); +#endif + +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +int erts_lc_is_port_locked(Port *); +#endif + +ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt); +ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt); +ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc); + +ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt); +ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt); +ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt) +{ +#ifdef ERTS_SMP + erts_ptab_inc_refc(&prt->common); +#else + erts_atomic32_inc_nob(&prt->refc); +#endif +} + +ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt) +{ +#ifdef ERTS_SMP + int referred = erts_ptab_dec_test_refc(&prt->common); + if (!referred) + erts_port_free(prt); +#else + int refc = erts_atomic32_dec_read_nob(&prt->refc); + if (refc == 0) + erts_port_free(prt); +#endif +} + +ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc) +{ +#ifdef ERTS_SMP + int referred = erts_ptab_add_test_refc(&prt->common, add_refc); + if (!referred) + erts_port_free(prt); +#else + int refc = erts_atomic32_add_read_nob(&prt->refc, add_refc); + if (refc == 0) + erts_port_free(prt); +#endif +} + +ERTS_GLB_INLINE int +erts_smp_port_trylock(Port *prt) +{ +#ifdef ERTS_SMP + /* *Need* to be a managed thread */ + ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + return erts_mtx_trylock(prt->lock); +#else + return 0; +#endif +} + +ERTS_GLB_INLINE void +erts_smp_port_lock(Port *prt) +{ +#ifdef ERTS_SMP + /* *Need* to be a managed thread */ + ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + erts_mtx_lock(prt->lock); +#endif +} + +ERTS_GLB_INLINE void +erts_smp_port_unlock(Port *prt) +{ +#ifdef ERTS_SMP + /* *Need* to be a managed thread */ + ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + erts_mtx_unlock(prt->lock); +#endif +} + +#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ + + +#define ERTS_INVALID_PORT_OPT(PP, ID, FLGS) \ + (!(PP) \ + || (erts_atomic32_read_nob(&(PP)->state) & (FLGS)) \ + || (PP)->common.id != (ID)) + +/* port lookup */ + +#define INVALID_PORT(PP, ID) \ + ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_LOOKUP) + +/* Invalidate trace port if anything suspicious, for instance + * that the port is a distribution port or it is busy. + */ +#define INVALID_TRACER_PORT(PP, ID) \ + ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP) + +#define ERTS_PORT_SCHED_ID(P, ID) \ + ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID))) + +extern const Port erts_invalid_port; +#define ERTS_PORT_LOCK_BUSY ((Port *) &erts_invalid_port) + +int erts_is_port_ioq_empty(Port *); +void erts_terminate_port(Port *); + +#ifdef ERTS_SMP +Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks); +#endif + +ERTS_GLB_INLINE Port *erts_pix2port(int); +ERTS_GLB_INLINE Port *erts_port_lookup_raw(Eterm); +ERTS_GLB_INLINE Port *erts_port_lookup(Eterm, Uint32); +ERTS_GLB_INLINE Port*erts_id2port(Eterm id); +ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32); +ERTS_GLB_INLINE void erts_port_release(Port *); +#ifdef ERTS_SMP +ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs); +ERTS_GLB_INLINE void erts_thr_port_release(Port *prt); +#endif +ERTS_GLB_INLINE Port *erts_thr_drvport2port_raw(ErlDrvPort, int); +ERTS_GLB_INLINE Port *erts_drvport2port_raw(ErlDrvPort drvport); +ERTS_GLB_INLINE Port *erts_drvport2port(ErlDrvPort, erts_aint32_t *); +ERTS_GLB_INLINE Port *erts_drvportid2port(Eterm); +ERTS_GLB_INLINE Eterm erts_drvport2id(ErlDrvPort); +ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm); +ERTS_GLB_INLINE int erts_is_port_alive(Eterm); +ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm); +ERTS_GLB_INLINE int erts_port_driver_callback_epilogue(Port *, erts_aint32_t *); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE Port *erts_pix2port(int ix) +{ + Port *prt; + ASSERT(0 <= ix && ix < erts_ptab_max(&erts_port)); + prt = (Port *) erts_ptab_pix2intptr_nob(&erts_port, ix); + return prt == ERTS_PORT_LOCK_BUSY ? NULL : prt; +} + +ERTS_GLB_INLINE Port * +erts_port_lookup_raw(Eterm id) +{ + Port *prt; + + ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + + if (is_not_internal_port(id)) + return NULL; + + prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port, + internal_port_index(id)); + return prt && prt->common.id == id ? prt : NULL; +} + +ERTS_GLB_INLINE Port * +erts_port_lookup(Eterm id, Uint32 invalid_sflgs) +{ + Port *prt = erts_port_lookup_raw(id); + return (!prt + ? NULL + : ((invalid_sflgs & erts_atomic32_read_nob(&prt->state)) + ? NULL + : prt)); +} + + +ERTS_GLB_INLINE Port* +erts_id2port(Eterm id) +{ + erts_aint32_t state; + Port *prt; + + /* Only allowed to be called from managed threads */ + ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + + if (is_not_internal_port(id)) + return NULL; + + prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port, + internal_port_index(id)); + + if (!prt || prt->common.id != id) + return NULL; + + erts_smp_port_lock(prt); + state = erts_atomic32_read_nob(&prt->state); + if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) { + erts_smp_port_unlock(prt); + return NULL; + } + + return prt; +} + + +ERTS_GLB_INLINE Port* +erts_id2port_sflgs(Eterm id, + Process *c_p, ErtsProcLocks c_p_locks, + Uint32 invalid_sflgs) +{ +#ifdef ERTS_SMP + int no_proc_locks = !c_p || !c_p_locks; +#endif + erts_aint32_t state; + Port *prt; + + /* Only allowed to be called from managed threads */ + ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + + if (is_not_internal_port(id)) + return NULL; + + prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port, + internal_port_index(id)); + + if (!prt || prt->common.id != id) + return NULL; + +#ifdef ERTS_SMP + if (no_proc_locks) + erts_smp_port_lock(prt); + else if (erts_smp_port_trylock(prt) == EBUSY) { + /* Unlock process locks, and acquire locks in lock order... */ + erts_smp_proc_unlock(c_p, c_p_locks); + erts_smp_port_lock(prt); + erts_smp_proc_lock(c_p, c_p_locks); + } +#endif + state = erts_atomic32_read_nob(&prt->state); + if (state & invalid_sflgs) { +#ifdef ERTS_SMP + erts_smp_port_unlock(prt); +#endif + return NULL; + } + + return prt; +} + +ERTS_GLB_INLINE void +erts_port_release(Port *prt) +{ + /* Only allowed to be called from managed threads */ + ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); +#ifdef ERTS_SMP + erts_smp_port_unlock(prt); +#else + if (prt->cleanup) { + prt->cleanup = 0; + erts_port_cleanup(prt); + } +#endif +} + +#ifdef ERTS_SMP + +/* + * erts_thr_id2port_sflgs() and erts_thr_port_release() can + * be used by unmanaged threads in the SMP case. + */ +ERTS_GLB_INLINE Port * +erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs) +{ + Port *prt; + ErtsThrPrgrDelayHandle dhndl; + + if (is_not_internal_port(id)) + return NULL; + + dhndl = erts_thr_progress_unmanaged_delay(); + + prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port, + internal_port_index(id)); + + if (!prt || prt->common.id != id) { + erts_thr_progress_unmanaged_continue(dhndl); + prt = NULL; + } + else { + erts_aint32_t state; + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) { + erts_port_inc_refc(prt); + erts_thr_progress_unmanaged_continue(dhndl); + } + + erts_mtx_lock(prt->lock); + state = erts_atomic32_read_nob(&prt->state); + if (state & invalid_sflgs) { + erts_mtx_unlock(prt->lock); + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_port_dec_refc(prt); + prt = NULL; + } + } + + return prt; +} + +ERTS_GLB_INLINE void +erts_thr_port_release(Port *prt) +{ + erts_mtx_unlock(prt->lock); +#ifdef ERTS_SMP + if (!erts_thr_progress_is_managed_thread()) + erts_port_dec_refc(prt); +#endif +} + +#endif + +ERTS_GLB_INLINE Port* +erts_thr_drvport2port_raw(ErlDrvPort drvport, int lock_pdl) +{ +#if ERTS_ENABLE_LOCK_CHECK + int emu_thread = erts_lc_is_emu_thr(); +#endif + if (drvport == ERTS_INVALID_ERL_DRV_PORT) + return NULL; + else { + Port *prt = (Port *) drvport; + if (lock_pdl && prt->port_data_lock) + driver_pdl_lock(prt->port_data_lock); +#if ERTS_ENABLE_LOCK_CHECK + if (!ERTS_IS_CRASH_DUMPING) { + if (emu_thread) { + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt->port_data_lock + || erts_lc_mtx_is_locked(&prt->port_data_lock->mtx)); + } + else { + ERTS_LC_ASSERT(prt->port_data_lock); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&prt->port_data_lock->mtx)); + } + } +#endif + return prt; + } +} + +ERTS_GLB_INLINE Port* +erts_drvport2port_raw(ErlDrvPort drvport) +{ + ERTS_LC_ASSERT(erts_lc_is_emu_thr()); + if (drvport == ERTS_INVALID_ERL_DRV_PORT) + return NULL; + else { + Port *prt = (Port *) drvport; + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + || ERTS_IS_CRASH_DUMPING); + return prt; + } +} + +ERTS_GLB_INLINE Port* +erts_drvport2port(ErlDrvPort drvport, erts_aint32_t *statep) +{ + Port *prt = erts_drvport2port_raw(drvport); + erts_aint32_t state; + if (!prt) + return NULL; + state = erts_atomic32_read_nob(&prt->state); + if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) + return NULL; + if (statep) + *statep = state; + return prt; +} + +ERTS_GLB_INLINE Port* +erts_drvportid2port(Eterm id) +{ + Port *prt; + erts_aint32_t state; + if (is_not_internal_port(id)) + return NULL; + prt = (Port *) erts_ptab_pix2intptr_nob(&erts_port, + internal_port_index(id)); + if (!prt) + return NULL; + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + || ERTS_IS_CRASH_DUMPING); + if (prt->common.id != id) + return NULL; + state = erts_atomic32_read_nob(&prt->state); + if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) + return NULL; + return prt; +} + +ERTS_GLB_INLINE Eterm +erts_drvport2id(ErlDrvPort drvport) +{ + Port *prt = erts_drvport2port_raw(drvport); + if (!prt) + return am_undefined; + else + return prt->common.id; +} + +ERTS_GLB_INLINE Uint32 +erts_portid2status(Eterm id) +{ + Port *prt = erts_port_lookup_raw(id); + if (prt) + return (Uint32) erts_atomic32_read_acqb(&prt->state); + else + return ERTS_PORT_SFLG_INVALID; +} + +ERTS_GLB_INLINE int +erts_is_port_alive(Eterm id) +{ + return !(erts_portid2status(id) & (ERTS_PORT_SFLG_INVALID + | ERTS_PORT_SFLGS_DEAD)); +} + +ERTS_GLB_INLINE int +erts_is_valid_tracer_port(Eterm id) +{ + return !(erts_portid2status(id) & ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); +} + +ERTS_GLB_INLINE int +erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep) +{ + int reds = 0; + erts_aint32_t state; + + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + + state = erts_atomic32_read_nob(&prt->state); + if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(prt)) { + reds += ERTS_PORT_REDS_TERMINATE; + erts_terminate_port(prt); + state = erts_atomic32_read_nob(&prt->state); + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + } + +#ifdef ERTS_SMP + if (prt->xports) { + reds += erts_port_handle_xports(prt); + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ASSERT(!prt->xports); + } +#endif + + if (statep) + *statep = state; + + return reds; +} + +#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ + +void erts_port_resume_procs(Port *); + +struct binary; + +#define ERTS_P2P_SIG_TYPE_BAD 0 +#define ERTS_P2P_SIG_TYPE_OUTPUT 1 +#define ERTS_P2P_SIG_TYPE_OUTPUTV 2 +#define ERTS_P2P_SIG_TYPE_CONNECT 3 +#define ERTS_P2P_SIG_TYPE_EXIT 4 +#define ERTS_P2P_SIG_TYPE_CONTROL 5 +#define ERTS_P2P_SIG_TYPE_CALL 6 +#define ERTS_P2P_SIG_TYPE_INFO 7 +#define ERTS_P2P_SIG_TYPE_LINK 8 +#define ERTS_P2P_SIG_TYPE_UNLINK 9 +#define ERTS_P2P_SIG_TYPE_SET_DATA 10 +#define ERTS_P2P_SIG_TYPE_GET_DATA 11 + +#define ERTS_P2P_SIG_TYPE_BITS 4 +#define ERTS_P2P_SIG_TYPE_MASK \ + ((1 << ERTS_P2P_SIG_TYPE_BITS) - 1) + +#define ERTS_P2P_SIG_DATA_FLG(N) \ + (1 << (ERTS_P2P_SIG_TYPE_BITS + (N))) +#define ERTS_P2P_SIG_DATA_FLG_BANG_OP ERTS_P2P_SIG_DATA_FLG(0) +#define ERTS_P2P_SIG_DATA_FLG_REPLY ERTS_P2P_SIG_DATA_FLG(1) +#define ERTS_P2P_SIG_DATA_FLG_NOSUSPEND ERTS_P2P_SIG_DATA_FLG(2) +#define ERTS_P2P_SIG_DATA_FLG_FORCE ERTS_P2P_SIG_DATA_FLG(3) +#define ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG(4) +#define ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG(5) +#define ERTS_P2P_SIG_DATA_FLG_SCHED ERTS_P2P_SIG_DATA_FLG(6) + +struct ErtsProc2PortSigData_ { + int flags; + Eterm caller; + Uint32 ref[ERTS_MAX_REF_NUMBERS]; + union { + struct { + Eterm from; + ErlIOVec *evp; + ErlDrvBinary *cbinp; + } outputv; + struct { + Eterm from; + char *bufp; + ErlDrvSizeT size; + } output; + struct { + Eterm from; + Eterm connected; + } connect; + struct { + Eterm from; + Eterm reason; + ErlHeapFragment *bp; + } exit; + struct { + struct binary *binp; + unsigned int command; + char *bufp; + ErlDrvSizeT size; + } control; + struct { + unsigned int command; + char *bufp; + ErlDrvSizeT size; + } call; + struct { + Eterm item; + } info; + struct { + Eterm port; + Eterm to; + } link; + struct { + Eterm from; + } unlink; + struct { + ErlHeapFragment *bp; + Eterm data; + } set_data; + } u; +} ; + +ERTS_GLB_INLINE int +erts_proc2port_sig_is_command_op(ErtsProc2PortSigData *sigdp); +ERTS_GLB_INLINE ErlDrvSizeT +erts_proc2port_sig_command_data_size(ErtsProc2PortSigData *sigdp); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE int +erts_proc2port_sig_is_command_op(ErtsProc2PortSigData *sigdp) +{ + switch (sigdp->flags & ERTS_P2P_SIG_TYPE_MASK) { + case ERTS_P2P_SIG_TYPE_OUTPUT: return !0; + case ERTS_P2P_SIG_TYPE_OUTPUTV: return !0; + default: return 0; + } +} + +ERTS_GLB_INLINE ErlDrvSizeT +erts_proc2port_sig_command_data_size(ErtsProc2PortSigData *sigdp) +{ + switch (sigdp->flags & ERTS_P2P_SIG_TYPE_MASK) { + case ERTS_P2P_SIG_TYPE_OUTPUT: return sigdp->u.output.size; + case ERTS_P2P_SIG_TYPE_OUTPUTV: return sigdp->u.outputv.evp->size; + default: return (ErlDrvSizeT) 0; + } +} + +#endif + +#define ERTS_PROC2PORT_SIG_EXEC 0 +#define ERTS_PROC2PORT_SIG_ABORT 1 +#define ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND 2 +#define ERTS_PROC2PORT_SIG_ABORT_CLOSED 3 + +typedef int (*ErtsProc2PortSigCallback)(Port *, + erts_aint32_t, + int, + ErtsProc2PortSigData *); + +typedef enum { + ERTS_PORT_OP_BADARG, + ERTS_PORT_OP_CALLER_EXIT, + ERTS_PORT_OP_BUSY, + ERTS_PORT_OP_BUSY_SCHEDULED, + ERTS_PORT_OP_SCHEDULED, + ERTS_PORT_OP_DROPPED, + ERTS_PORT_OP_DONE +} ErtsPortOpResult; + +ErtsPortOpResult +erts_schedule_proc2port_signal(Process *, + Port *, + Eterm, + Eterm *, + ErtsProc2PortSigData *, + int, + ErtsProc2PortSigCallback); + +int erts_deliver_port_exit(Port *, Eterm, Eterm, int); + +/* + * Port signal flags + */ +#define ERTS_PORT_SIG_FLG_BANG_OP ERTS_P2P_SIG_DATA_FLG_BANG_OP +#define ERTS_PORT_SIG_FLG_NOSUSPEND ERTS_P2P_SIG_DATA_FLG_NOSUSPEND +#define ERTS_PORT_SIG_FLG_FORCE ERTS_P2P_SIG_DATA_FLG_FORCE +#define ERTS_PORT_SIG_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK +#define ERTS_PORT_SIG_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT +#define ERTS_PORT_SIG_FLG_FORCE_SCHED ERTS_P2P_SIG_DATA_FLG_SCHED +/* ERTS_PORT_SIG_FLG_FORCE_IMM_CALL only when crash dumping... */ +#define ERTS_PORT_SIG_FLG_FORCE_IMM_CALL ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT + +/* + * Port ! {Owner, {command, Data}} + * Port ! {Owner, {connect, NewOwner}} + * Port ! {Owner, close} + */ +ErtsPortOpResult erts_port_command(Process *, int, Port *, Eterm, Eterm *); + +/* + * Signals from processes to ports. + */ +ErtsPortOpResult erts_port_output(Process *, int, Port *, Eterm, Eterm, Eterm *); +ErtsPortOpResult erts_port_exit(Process *, int, Port *, Eterm, Eterm, Eterm *); +ErtsPortOpResult erts_port_connect(Process *, int, Port *, Eterm, Eterm, Eterm *); +ErtsPortOpResult erts_port_link(Process *, Port *, Eterm, Eterm *); +ErtsPortOpResult erts_port_unlink(Process *, Port *, Eterm, Eterm *); +ErtsPortOpResult erts_port_control(Process *, Port *, unsigned int, Eterm, Eterm *); +ErtsPortOpResult erts_port_call(Process *, Port *, unsigned int, Eterm, Eterm *); +ErtsPortOpResult erts_port_info(Process *, Port *, Eterm, Eterm *); +ErtsPortOpResult erts_port_set_data(Process *, Port *, Eterm, Eterm *); +ErtsPortOpResult erts_port_get_data(Process *, Port *, Eterm *); + +#endif diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index b6bc59a1c3..09c8e760f4 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -33,36 +33,34 @@ #include "erl_port_task.h" #include "dist.h" #include "dtrace-wrapper.h" - -#if defined(DEBUG) && 0 -#define HARD_DEBUG -#endif +#include <stdarg.h> /* - * Costs in reductions for some port operations. + * ERTS_PORT_CALLBACK_VREDS: Limit the amount of callback calls we do... */ -#define ERTS_PORT_REDS_EXECUTE 0 -#define ERTS_PORT_REDS_FREE 50 -#define ERTS_PORT_REDS_TIMEOUT 200 -#define ERTS_PORT_REDS_INPUT 200 -#define ERTS_PORT_REDS_OUTPUT 200 -#define ERTS_PORT_REDS_EVENT 200 -#define ERTS_PORT_REDS_TERMINATE 100 +#define ERTS_PORT_CALLBACK_VREDS (CONTEXT_REDS/5) +#if defined(DEBUG) && 0 +#define ERTS_HARD_DEBUG_TASK_QUEUES +#else +#undef ERTS_HARD_DEBUG_TASK_QUEUES +#endif -#define ERTS_PORT_TASK_INVALID_PORT(P, ID) \ - ((erts_port_status_get((P)) & ERTS_PORT_SFLGS_DEAD) || (P)->id != (ID)) - -#define ERTS_PORT_IS_IN_RUNQ(RQ, P) \ - ((P)->sched.next || (P)->sched.prev || (RQ)->ports.start == (P)) +#ifdef ERTS_HARD_DEBUG_TASK_QUEUES +static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue); +#define ERTS_PT_DBG_CHK_TASK_QS(PP, EQ, PBQ) \ + chk_task_queues((PP), (EQ), (PBQ)) +#else +#define ERTS_PT_DBG_CHK_TASK_QS(PP, EQ, PBQ) +#endif #ifdef USE_VM_PROBES #define DTRACE_DRIVER(PROBE_NAME, PP) \ - if (DTRACE_ENABLED(driver_ready_input)) { \ + if (DTRACE_ENABLED(PROBE_NAME)) { \ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); \ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); \ \ - dtrace_pid_str(PP->connected, process_str); \ + dtrace_pid_str(ERTS_PORT_GET_CONNECTED(PP), process_str); \ dtrace_port_str(PP, port_str); \ DTRACE3(PROBE_NAME, process_str, port_str, PP->name); \ } @@ -72,83 +70,768 @@ erts_smp_atomic_t erts_port_task_outstanding_io_tasks; -struct ErtsPortTaskQueue_ { - ErtsPortTask *first; - ErtsPortTask *last; - Port *port; -}; +#define ERTS_PT_STATE_SCHEDULED 0 +#define ERTS_PT_STATE_ABORTED 1 +#define ERTS_PT_STATE_EXECUTING 2 + +typedef union { + struct { /* I/O tasks */ + ErlDrvEvent event; + ErlDrvEventData event_data; + } io; + struct { + ErtsProc2PortSigCallback callback; + ErtsProc2PortSigData data; + } psig; +} ErtsPortTaskTypeData; struct ErtsPortTask_ { - ErtsPortTask *prev; - ErtsPortTask *next; - ErtsPortTaskQueue *queue; - ErtsPortTaskHandle *handle; + erts_smp_atomic32_t state; ErtsPortTaskType type; - ErlDrvEvent event; - ErlDrvEventData event_data; + union { + struct { + ErtsPortTask *next; + ErtsPortTaskHandle *handle; + int flags; + Uint32 ref[ERTS_MAX_REF_NUMBERS]; + ErtsPortTaskTypeData td; + } alive; + ErtsThrPrgrLaterOp release; + } u; }; -#ifdef HARD_DEBUG -#define ERTS_PT_CHK_PORTQ(RQ) check_port_queue((RQ), NULL, 0) -#define ERTS_PT_CHK_PRES_PORTQ(RQ, PP) check_port_queue((RQ), (PP), -1) -#define ERTS_PT_CHK_IN_PORTQ(RQ, PP) check_port_queue((RQ), (PP), 1) -#define ERTS_PT_CHK_NOT_IN_PORTQ(RQ, PP) check_port_queue((RQ), (PP), 0) -#define ERTS_PT_CHK_TASKQ(Q) check_task_queue((Q), NULL, 0) -#define ERTS_PT_CHK_IN_TASKQ(Q, T) check_task_queue((Q), (T), 1) -#define ERTS_PT_CHK_NOT_IN_TASKQ(Q, T) check_task_queue((Q), (T), 0) -static void -check_port_queue(Port *chk_pp, int inq); -static void -check_task_queue(ErtsPortTaskQueue *ptqp, - ErtsPortTask *chk_ptp, - int inq); -#else -#define ERTS_PT_CHK_PORTQ(RQ) -#define ERTS_PT_CHK_PRES_PORTQ(RQ, PP) -#define ERTS_PT_CHK_IN_PORTQ(RQ, PP) -#define ERTS_PT_CHK_NOT_IN_PORTQ(RQ, PP) -#define ERTS_PT_CHK_TASKQ(Q) -#define ERTS_PT_CHK_IN_TASKQ(Q, T) -#define ERTS_PT_CHK_NOT_IN_TASKQ(Q, T) +struct ErtsPortTaskHandleList_ { + ErtsPortTaskHandle handle; + union { + ErtsPortTaskHandleList *next; +#ifdef ERTS_SMP + ErtsThrPrgrLaterOp release; #endif + } u; +}; + +typedef struct ErtsPortTaskBusyCaller_ ErtsPortTaskBusyCaller; +struct ErtsPortTaskBusyCaller_ { + ErtsPortTaskBusyCaller *next; + Eterm caller; + SWord count; + ErtsPortTask *last; +}; + +#define ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS 17 +struct ErtsPortTaskBusyCallerTable_ { + ErtsPortTaskBusyCaller *bucket[ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS]; + ErtsPortTaskBusyCaller pre_alloc_busy_caller; +}; -static void handle_remaining_tasks(ErtsRunQueue *runq, Port *pp); + +static void begin_port_cleanup(Port *pp, + ErtsPortTask **execq, + int *processing_busy_q_p); ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_task, ErtsPortTask, - 200, + 1000, ERTS_ALC_T_PORT_TASK) -ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_taskq, - ErtsPortTaskQueue, + +ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table, + ErtsPortTaskBusyCallerTable, 50, - ERTS_ALC_T_PORT_TASKQ) + ERTS_ALC_T_BUSY_CALLER_TAB) + +#ifdef ERTS_SMP +static void +call_port_task_free(void *vptp) +{ + port_task_free((ErtsPortTask *) vptp); +} +#endif + +static ERTS_INLINE void +schedule_port_task_free(ErtsPortTask *ptp) +{ +#ifdef ERTS_SMP + erts_schedule_thr_prgr_later_op(call_port_task_free, + (void *) ptp, + &ptp->u.release); +#else + port_task_free(ptp); +#endif +} + +static ERTS_INLINE ErtsPortTask * +p2p_sig_data_to_task(ErtsProc2PortSigData *sigdp) +{ + ErtsPortTask *ptp; + char *ptr = (char *) sigdp; + ptr -= offsetof(ErtsPortTask, u.alive.td.psig.data); + ptp = (ErtsPortTask *) ptr; + ASSERT(ptp->type == ERTS_PORT_TASK_PROC_SIG); + return ptp; +} + +ErtsProc2PortSigData * +erts_port_task_alloc_p2p_sig_data(void) +{ + ErtsPortTask *ptp = port_task_alloc(); + + ptp->type = ERTS_PORT_TASK_PROC_SIG; + ptp->u.alive.flags = ERTS_PT_FLG_SIG_DEP; + erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); + + ASSERT(ptp == p2p_sig_data_to_task(&ptp->u.alive.td.psig.data)); + + return &ptp->u.alive.td.psig.data; +} + +static ERTS_INLINE Eterm +task_caller(ErtsPortTask *ptp) +{ + Eterm caller; + + ASSERT(ptp->type == ERTS_PORT_TASK_PROC_SIG); + + caller = ptp->u.alive.td.psig.data.caller; + + ASSERT(is_internal_pid(caller) || is_internal_port(caller)); + + return caller; +} + +/* + * Busy queue management + */ + +static ERTS_INLINE int +caller2bix(Eterm caller) +{ + ASSERT(is_internal_pid(caller) || is_internal_port(caller)); + return (int) (_GET_PID_DATA(caller) % ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS); +} + + +static void +popped_from_busy_queue(Port *pp, ErtsPortTask *ptp, int last) +{ + ErtsPortTaskBusyCaller **prev_bcpp = NULL, *bcp; + ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table; + Eterm caller = task_caller(ptp); + int bix = caller2bix(caller); + + ASSERT(is_internal_pid(caller)); + + ASSERT(tabp); + bcp = tabp->bucket[bix]; + prev_bcpp = &tabp->bucket[bix]; + ASSERT(bcp); + while (bcp->caller != caller) { + prev_bcpp = &bcp->next; + bcp = bcp->next; + ASSERT(bcp); + } + ASSERT(bcp->count > 0); + if (--bcp->count != 0) { + ASSERT(!last); + } + else { + *prev_bcpp = bcp->next; + if (bcp == &tabp->pre_alloc_busy_caller) + bcp->caller = am_undefined; + else + erts_free(ERTS_ALC_T_BUSY_CALLER, bcp); + if (last) { +#ifdef DEBUG + erts_aint32_t flags = +#endif + erts_smp_atomic32_read_band_nob( + &pp->sched.flags, + ~ERTS_PTS_FLG_HAVE_BUSY_TASKS); + ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS); +#ifdef DEBUG + for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) { + ASSERT(!tabp->bucket[bix]); + } +#endif + busy_caller_table_free(tabp); + pp->sched.taskq.local.busy.first = NULL; + pp->sched.taskq.local.busy.last = NULL; + pp->sched.taskq.local.busy.table = NULL; + } + } +} + +static void +busy_wait_move_to_busy_queue(Port *pp, ErtsPortTask *ptp) +{ + ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table; + Eterm caller = task_caller(ptp); + ErtsPortTaskBusyCaller *bcp; + int bix; + + ASSERT(is_internal_pid(caller)); + /* + * Port is busy and this task type needs to wait until not busy. + */ + + ASSERT(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY); + + ptp->u.alive.next = NULL; + if (pp->sched.taskq.local.busy.last) { + ASSERT(pp->sched.taskq.local.busy.first); + pp->sched.taskq.local.busy.last->u.alive.next = ptp; + } + else { + int i; + erts_aint32_t flags; + + pp->sched.taskq.local.busy.first = ptp; + flags = erts_smp_atomic32_read_bor_nob(&pp->sched.flags, + ERTS_PTS_FLG_HAVE_BUSY_TASKS); + ASSERT(!(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); + + ASSERT(!tabp); + + tabp = busy_caller_table_alloc(); + pp->sched.taskq.local.busy.table = tabp; + for (i = 0; i < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; i++) + tabp->bucket[i] = NULL; + tabp->pre_alloc_busy_caller.caller = am_undefined; + } + pp->sched.taskq.local.busy.last = ptp; + + bix = caller2bix(caller); + ASSERT(tabp); + bcp = tabp->bucket[bix]; + + while (bcp && bcp->caller != caller) + bcp = bcp->next; + + if (bcp) + bcp->count++; + else { + if (tabp->pre_alloc_busy_caller.caller == am_undefined) + bcp = &tabp->pre_alloc_busy_caller; + else + bcp = erts_alloc(ERTS_ALC_T_BUSY_CALLER, + sizeof(ErtsPortTaskBusyCaller)); + bcp->caller = caller; + bcp->count = 1; + bcp->next = tabp->bucket[bix]; + tabp->bucket[bix] = bcp; + } + + bcp->last = ptp; +} + +static ERTS_INLINE int +check_sig_dep_move_to_busy_queue(Port *pp, ErtsPortTask *ptp) +{ + ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table; + ErtsPortTask *last_ptp; + ErtsPortTaskBusyCaller *bcp; + int bix; + Eterm caller; + + ASSERT(ptp->u.alive.flags & ERTS_PT_FLG_SIG_DEP); + ASSERT(pp->sched.taskq.local.busy.last); + ASSERT(tabp); + + + /* + * We are either not busy, or the task does not imply wait on busy port. + * However, due to the signaling order requirements the task might depend + * on other tasks in the busy queue. + */ + + caller = task_caller(ptp); + bix = caller2bix(caller); + bcp = tabp->bucket[bix]; + while (bcp && bcp->caller != caller) + bcp = bcp->next; + + if (!bcp) + return 0; + + /* + * There are other tasks that we depend on in the busy queue; + * move into busy queue. + */ + + bcp->count++; + last_ptp = bcp->last; + ptp->u.alive.next = last_ptp->u.alive.next; + if (!ptp->u.alive.next) { + ASSERT(pp->sched.taskq.local.busy.last == last_ptp); + pp->sched.taskq.local.busy.last = ptp; + } + last_ptp->u.alive.next = ptp; + bcp->last = ptp; + + return 1; +} + +static void +no_sig_dep_move_from_busyq(Port *pp) +{ + ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table; + ErtsPortTask *first_ptp, *last_ptp, *ptp; + ErtsPortTaskBusyCaller **prev_bcpp = NULL, *bcp = NULL; + + /* + * Move tasks at the head of the busy queue that no longer + * have any dependencies to busy wait tasks into the ordinary + * queue. + */ + + first_ptp = ptp = pp->sched.taskq.local.busy.first; + + ASSERT(ptp && !(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY)); + ASSERT(tabp); + + do { + Eterm caller = task_caller(ptp); + + if (!bcp || bcp->caller != caller) { + int bix = caller2bix(caller); + + prev_bcpp = &tabp->bucket[bix]; + bcp = tabp->bucket[bix]; + ASSERT(bcp); + while (bcp->caller != caller) { + ASSERT(bcp); + prev_bcpp = &bcp->next; + bcp = bcp->next; + } + } + + ASSERT(bcp->caller == caller); + ASSERT(bcp->count > 0); + + if (--bcp->count == 0) { + *prev_bcpp = bcp->next; + if (bcp == &tabp->pre_alloc_busy_caller) + bcp->caller = am_undefined; + else + erts_free(ERTS_ALC_T_BUSY_CALLER, bcp); + } + + last_ptp = ptp; + ptp = ptp->u.alive.next; + } while (ptp && !(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY)); + + pp->sched.taskq.local.busy.first = last_ptp->u.alive.next; + if (!pp->sched.taskq.local.busy.first) { +#ifdef DEBUG + int bix; + erts_aint32_t flags = +#endif + erts_smp_atomic32_read_band_nob( + &pp->sched.flags, + ~ERTS_PTS_FLG_HAVE_BUSY_TASKS); + ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS); +#ifdef DEBUG + for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) { + ASSERT(!tabp->bucket[bix]); + } +#endif + busy_caller_table_free(tabp); + pp->sched.taskq.local.busy.last = NULL; + pp->sched.taskq.local.busy.table = NULL; + } + last_ptp->u.alive.next = pp->sched.taskq.local.first; + pp->sched.taskq.local.first = first_ptp; +} + +#ifdef ERTS_HARD_DEBUG_TASK_QUEUES + +static void +chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue) +{ + Sint tot_count, tot_table_count; + int bix; + ErtsPortTask *ptp, *last; + ErtsPortTask *first = processing_busy_queue ? execq : pp->sched.taskq.local.busy.first; + ErtsPortTask *nb_task_queue = processing_busy_queue ? pp->sched.taskq.local.first : execq; + ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table; + ErtsPortTaskBusyCaller *bcp; + + if (!first) { + ASSERT(!tabp); + ASSERT(!pp->sched.taskq.local.busy.last); + ASSERT(!(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); + return; + } + + ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS); + ASSERT(tabp); + + tot_count = 0; + ptp = first; + while (ptp) { + Sint count = 0; + Eterm caller = task_caller(ptp); + int bix = caller2bix(caller); + for (bcp = tabp->bucket[bix]; bcp; bcp = bcp->next) + if (bcp->caller == caller) + break; + ASSERT(bcp && bcp->caller == caller); + + ASSERT(bcp->last); + while (1) { + ErtsPortTask *ptp2; + + ASSERT(caller == task_caller(ptp)); + count++; + tot_count++; + last = ptp; + + for (ptp2 = nb_task_queue; ptp2; ptp2 = ptp2->u.alive.next) { + ASSERT(ptp != ptp2); + } + + if (ptp == bcp->last) + break; + ptp = ptp->u.alive.next; + } + + ASSERT(count == bcp->count); + ptp = ptp->u.alive.next; + } + + tot_table_count = 0; + for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) { + for (bcp = tabp->bucket[bix]; bcp; bcp = bcp->next) + tot_table_count += bcp->count; + } + + ASSERT(tot_count == tot_table_count); + + ASSERT(last == pp->sched.taskq.local.busy.last); +} + +#endif /* ERTS_HARD_DEBUG_TASK_QUEUES */ /* * Task handle manipulation. */ +static ERTS_INLINE void +reset_port_task_handle(ErtsPortTaskHandle *pthp) +{ + erts_smp_atomic_set_relb(pthp, (erts_aint_t) NULL); +} + static ERTS_INLINE ErtsPortTask * handle2task(ErtsPortTaskHandle *pthp) { - return (ErtsPortTask *) erts_smp_atomic_read_nob(pthp); + return (ErtsPortTask *) erts_smp_atomic_read_acqb(pthp); } static ERTS_INLINE void reset_handle(ErtsPortTask *ptp) { - if (ptp->handle) { - ASSERT(ptp == handle2task(ptp->handle)); - erts_smp_atomic_set_nob(ptp->handle, (erts_aint_t) NULL); + if (ptp->u.alive.handle) { + ASSERT(ptp == handle2task(ptp->u.alive.handle)); + reset_port_task_handle(ptp->u.alive.handle); } } static ERTS_INLINE void set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) { - ptp->handle = pthp; + ptp->u.alive.handle = pthp; if (pthp) { - erts_smp_atomic_set_nob(pthp, (erts_aint_t) ptp); - ASSERT(ptp == handle2task(ptp->handle)); + erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp); + ASSERT(ptp == handle2task(ptp->u.alive.handle)); + } +} + + +/* + * Busy port queue management + */ + +static erts_aint32_t +check_unset_busy_port_q(Port *pp, + erts_aint32_t flags, + ErtsPortTaskBusyPortQ *bpq) +{ + ErlDrvSizeT qsize, low; + int resume_procs = 0; + + ASSERT(bpq); + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + + erts_port_task_sched_lock(&pp->sched); + qsize = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size); + low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low); + if (qsize < low) { + erts_aint32_t mask = ~(ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q + | ERTS_PTS_FLG_BUSY_PORT_Q); + flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, mask); + if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q) + resume_procs = 1; + } + else if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) { + flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, + ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); + flags &= ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q; + } + erts_port_task_sched_unlock(&pp->sched); + if (resume_procs) + erts_port_resume_procs(pp); + + return flags; +} + +static ERTS_INLINE void +aborted_proc2port_data(Port *pp, ErlDrvSizeT size) +{ + ErtsPortTaskBusyPortQ *bpq; + erts_aint32_t flags; + ErlDrvSizeT qsz; + + ASSERT(pp->sched.taskq.bpq); + + if (size == 0) + return; + + bpq = pp->sched.taskq.bpq; + + qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size, + (erts_aint_t) -size); + ASSERT(qsz + size > qsz); + flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + ASSERT(pp->sched.taskq.bpq); + if ((flags & (ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q + | ERTS_PTS_FLG_BUSY_PORT_Q)) != ERTS_PTS_FLG_BUSY_PORT_Q) + return; + if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) + erts_smp_atomic32_read_bor_nob(&pp->sched.flags, + ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); +} + +static ERTS_INLINE void +dequeued_proc2port_data(Port *pp, ErlDrvSizeT size) +{ + ErtsPortTaskBusyPortQ *bpq; + erts_aint32_t flags; + ErlDrvSizeT qsz; + + ASSERT(pp->sched.taskq.bpq); + + if (size == 0) + return; + + bpq = pp->sched.taskq.bpq; + + qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size, + (erts_aint_t) -size); + ASSERT(qsz + size > qsz); + flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q)) + return; + if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->low)) + check_unset_busy_port_q(pp, flags, bpq); +} + +static ERTS_INLINE erts_aint32_t +enqueue_proc2port_data(Port *pp, + ErtsProc2PortSigData *sigdp, + erts_aint32_t flags) +{ + ErtsPortTaskBusyPortQ *bpq = pp->sched.taskq.bpq; + if (sigdp && bpq) { + ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp); + if (size) { + erts_aint_t asize = erts_smp_atomic_add_read_acqb(&bpq->size, + (erts_aint_t) size); + ErlDrvSizeT qsz = (ErlDrvSizeT) asize; + + ASSERT(qsz - size < qsz); + + if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q) && qsz > bpq->high) { + flags = erts_smp_atomic32_read_bor_acqb(&pp->sched.flags, + ERTS_PTS_FLG_BUSY_PORT_Q); + flags |= ERTS_PTS_FLG_BUSY_PORT_Q; + qsz = (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->size); + if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) { + flags = (erts_smp_atomic32_read_bor_relb( + &pp->sched.flags, + ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)); + flags |= ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q; + } + } + ASSERT(!(flags & ERTS_PTS_FLG_EXIT)); + } + } + return flags; +} + +/* + * erl_drv_busy_msgq_limits() is called by drivers either reading or + * writing the limits. + * + * A limit of zero is interpreted as a read only request (using a + * limit of zero would not be useful). Other values are interpreted + * as a write-read request. + */ + +void +erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp) +{ + Port *pp = erts_drvport2port(dport, NULL); + ErtsPortTaskBusyPortQ *bpq = pp->sched.taskq.bpq; + int written = 0, resume_procs = 0; + ErlDrvSizeT low, high; + + if (!pp || !bpq) { + if (lowp) + *lowp = ERL_DRV_BUSY_MSGQ_DISABLED; + if (highp) + *highp = ERL_DRV_BUSY_MSGQ_DISABLED; + return; + } + + low = lowp ? *lowp : 0; + high = highp ? *highp : 0; + + erts_port_task_sched_lock(&pp->sched); + + if (low == ERL_DRV_BUSY_MSGQ_DISABLED + || high == ERL_DRV_BUSY_MSGQ_DISABLED) { + /* Disable busy msgq feature */ + erts_aint32_t flags; + pp->sched.taskq.bpq = NULL; + flags = ~(ERTS_PTS_FLG_BUSY_PORT_Q|ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); + flags = erts_smp_atomic32_read_band_acqb(&pp->sched.flags, flags); + if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q) + resume_procs = 1; + } + else { + + if (!low) + low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low); + else { + if (bpq->high < low) + bpq->high = low; + erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low); + written = 1; + } + + if (!high) + high = bpq->high; + else { + if (low > high) { + low = high; + erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low); + } + bpq->high = high; + written = 1; + } + + if (written) { + ErlDrvSizeT size = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size); + if (size > high) + erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + ERTS_PTS_FLG_BUSY_PORT_Q); + else if (size < low) + erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); + } + } + + erts_port_task_sched_unlock(&pp->sched); + + if (resume_procs) + erts_port_resume_procs(pp); + if (lowp) + *lowp = low; + if (highp) + *highp = high; +} + +/* + * No-suspend handles. + */ + +#ifdef ERTS_SMP +static void +free_port_task_handle_list(void *vpthlp) +{ + erts_free(ERTS_ALC_T_PT_HNDL_LIST, vpthlp); +} +#endif + +static void +schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp) +{ +#ifdef ERTS_SMP + erts_schedule_thr_prgr_later_op(free_port_task_handle_list, + (void *) pthlp, + &pthlp->u.release); +#else + erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp); +#endif +} + +static ERTS_INLINE void +abort_nosuspend_task(Port *pp, + ErtsPortTaskType type, + ErtsPortTaskTypeData *tdp) +{ + + ASSERT(type == ERTS_PORT_TASK_PROC_SIG); + + if (!pp->sched.taskq.bpq) + tdp->psig.callback(NULL, + ERTS_PORT_SFLG_INVALID, + ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, + &tdp->psig.data); + else { + ErlDrvSizeT size = erts_proc2port_sig_command_data_size(&tdp->psig.data); + tdp->psig.callback(NULL, + ERTS_PORT_SFLG_INVALID, + ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, + &tdp->psig.data); + aborted_proc2port_data(pp, size); + } +} + +static ErtsPortTaskHandleList * +get_free_nosuspend_handles(Port *pp) +{ + ErtsPortTaskHandleList *nshp, *last_nshp = NULL; + + ERTS_SMP_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched)); + + nshp = pp->sched.taskq.local.busy.nosuspend; + + while (nshp && !erts_port_task_is_scheduled(&nshp->handle)) { + last_nshp = nshp; + nshp = nshp->u.next; + } + + if (!last_nshp) + nshp = NULL; + else { + nshp = pp->sched.taskq.local.busy.nosuspend; + pp->sched.taskq.local.busy.nosuspend = last_nshp->u.next; + last_nshp->u.next = NULL; + if (!pp->sched.taskq.local.busy.nosuspend) + erts_smp_atomic32_read_band_nob(&pp->sched.flags, + ~ERTS_PTS_FLG_HAVE_NS_TASKS); + } + return nshp; +} + +static void +free_nosuspend_handles(ErtsPortTaskHandleList *free_nshp) +{ + while (free_nshp) { + ErtsPortTaskHandleList *nshp = free_nshp; + free_nshp = free_nshp->u.next; + schedule_port_task_handle_list_free(nshp); } } @@ -161,7 +844,6 @@ enqueue_port(ErtsRunQueue *runq, Port *pp) { ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); pp->sched.next = NULL; - pp->sched.in_runq = 1; if (runq->ports.end) { ASSERT(runq->ports.start); runq->ports.end->sched.next = pp; @@ -199,285 +881,423 @@ pop_port(ErtsRunQueue *runq) return pp; } +/* + * Task queue operations + */ -#ifdef HARD_DEBUG +static ERTS_INLINE int +enqueue_task(Port *pp, + ErtsPortTask *ptp, + ErtsProc2PortSigData *sigdp, + ErtsPortTaskHandleList *ns_pthlp, + erts_aint32_t *flagsp) -static void -check_port_queue(ErtsRunQueue *runq, Port *chk_pp, int inq) { - Port *pp; - Port *last_pp; - Port *first_pp = runq->ports.start; - int no_forward = 0, no_backward = 0; - int found_forward = 0, found_backward = 0; - if (!first_pp) { - ASSERT(!runq->ports.end); - } + int res; + erts_aint32_t fail_flags = ERTS_PTS_FLG_EXIT; + erts_aint32_t flags; + ptp->u.alive.next = NULL; + if (ns_pthlp) + fail_flags |= ERTS_PTS_FLG_BUSY_PORT; + erts_port_task_sched_lock(&pp->sched); + flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + if (flags & fail_flags) + res = 0; else { - ASSERT(!first_pp->sched.prev); - for (pp = first_pp; pp; pp = pp->sched.next) { - ASSERT(pp->sched.taskq); - if (pp->sched.taskq->first) - no_forward++; - if (chk_pp == pp) - found_forward = 1; - if (!pp->sched.prev) { - ASSERT(first_pp == pp); - } - if (!pp->sched.next) { - ASSERT(runq->ports.end == pp); - last_pp = pp; - } - } - for (pp = last_pp; pp; pp = pp->sched.prev) { - ASSERT(pp->sched.taskq); - if (pp->sched.taskq->last) - no_backward++; - if (chk_pp == pp) - found_backward = 1; - if (!pp->sched.prev) { - ASSERT(first_pp == pp); - } - if (!pp->sched.next) { - ASSERT(runq->ports.end == pp); - } - check_task_queue(pp->sched.taskq, NULL, 0); + if (ns_pthlp) { + ns_pthlp->u.next = pp->sched.taskq.local.busy.nosuspend; + pp->sched.taskq.local.busy.nosuspend = ns_pthlp; } - ASSERT(no_forward == no_backward); - } - ASSERT(no_forward == RUNQ_READ_LEN(&runq->ports.info.len)); - if (chk_pp) { - if (chk_pp->sched.taskq || chk_pp->sched.exe_taskq) { - ASSERT(chk_pp->sched.taskq != chk_pp->sched.exe_taskq); - } - ASSERT(!chk_pp->sched.taskq || chk_pp->sched.taskq->first); - if (inq < 0) - inq = chk_pp->sched.taskq && !chk_pp->sched.exe_taskq; - if (inq) { - ASSERT(found_forward && found_backward); + if (pp->sched.taskq.in.last) { + ASSERT(pp->sched.taskq.in.first); + ASSERT(!pp->sched.taskq.in.last->u.alive.next); + + pp->sched.taskq.in.last->u.alive.next = ptp; } else { - ASSERT(!found_forward && !found_backward); - } - } -} - -#endif + ASSERT(!pp->sched.taskq.in.first); -/* - * Task queue operations - */ - -static ERTS_INLINE ErtsPortTaskQueue * -port_taskq_init(ErtsPortTaskQueue *ptqp, Port *pp) -{ - if (ptqp) { - ptqp->first = NULL; - ptqp->last = NULL; - ptqp->port = pp; + pp->sched.taskq.in.first = ptp; + } + pp->sched.taskq.in.last = ptp; + flags = enqueue_proc2port_data(pp, sigdp, flags); + res = 1; } - return ptqp; + erts_port_task_sched_unlock(&pp->sched); + *flagsp = flags; + return res; } static ERTS_INLINE void -enqueue_task(ErtsPortTaskQueue *ptqp, ErtsPortTask *ptp) +prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp); - ptp->next = NULL; - ptp->prev = ptqp->last; - ptp->queue = ptqp; - if (ptqp->last) { - ASSERT(ptqp->first); - ptqp->last->next = ptp; + erts_aint32_t act = erts_smp_atomic32_read_nob(&pp->sched.flags); + + if (!pp->sched.taskq.local.busy.first || (act & ERTS_PTS_FLG_BUSY_PORT)) { + *execqp = pp->sched.taskq.local.first; + *processing_busy_q_p = 0; } else { - ASSERT(!ptqp->first); - ptqp->first = ptp; + *execqp = pp->sched.taskq.local.busy.first; + *processing_busy_q_p = 1; + } + + ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p); + + while (1) { + erts_aint32_t new, exp; + + new = exp = act; + + new &= ~ERTS_PTS_FLG_IN_RUNQ; + new |= ERTS_PTS_FLG_EXEC; + + act = erts_smp_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp); + + ASSERT(act & ERTS_PTS_FLG_IN_RUNQ); + + if (exp == act) + break; } - ptqp->last = ptp; - ERTS_PT_CHK_IN_TASKQ(ptqp, ptp); } -static ERTS_INLINE void -push_task(ErtsPortTaskQueue *ptqp, ErtsPortTask *ptp) +/* finalize_exec() return value != 0 if port should remain active */ +static ERTS_INLINE int +finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) { - ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp); - ptp->next = ptqp->first; - ptp->prev = NULL; - ptp->queue = ptqp; - if (ptqp->first) { - ASSERT(ptqp->last); - ptqp->first->prev = ptp; - } + erts_aint32_t act; + + if (!processing_busy_q) + pp->sched.taskq.local.first = *execq; else { - ASSERT(!ptqp->last); - ptqp->last = ptp; + pp->sched.taskq.local.busy.first = *execq; + ASSERT(*execq); } - ptqp->first = ptp; - ERTS_PT_CHK_IN_TASKQ(ptqp, ptp); + + ERTS_PT_DBG_CHK_TASK_QS(pp, *execq, processing_busy_q); + + *execq = NULL; + + act = erts_smp_atomic32_read_nob(&pp->sched.flags); + if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) + act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq); + + while (1) { + erts_aint32_t new, exp; + + new = exp = act; + + new &= ~ERTS_PTS_FLG_EXEC; + if (act & ERTS_PTS_FLG_HAVE_TASKS) + new |= ERTS_PTS_FLG_IN_RUNQ; + + act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); + + ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ)); + + if (exp == act) + break; + } + + return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0; } -static ERTS_INLINE void -dequeue_task(ErtsPortTask *ptp) +static ERTS_INLINE erts_aint32_t +select_queue_for_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - ASSERT(ptp); - ASSERT(ptp->queue); - ERTS_PT_CHK_IN_TASKQ(ptp->queue, ptp); - if (ptp->next) - ptp->next->prev = ptp->prev; - else { - ASSERT(ptp->queue->last == ptp); - ptp->queue->last = ptp->prev; + erts_aint32_t flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + + if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) + flags = check_unset_busy_port_q(pp, flags, pp->sched.taskq.bpq); + + ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p); + + if (flags & ERTS_PTS_FLG_BUSY_PORT) { + if (*processing_busy_q_p) { + ErtsPortTask *ptp; + + ptp = pp->sched.taskq.local.busy.first = *execqp; + if (!ptp) + pp->sched.taskq.local.busy.last = NULL; + else if (!(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY)) + no_sig_dep_move_from_busyq(pp); + + *execqp = pp->sched.taskq.local.first; + *processing_busy_q_p = 0; + + ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p); + } + + return flags; } - if (ptp->prev) - ptp->prev->next = ptp->next; - else { - ASSERT(ptp->queue->first == ptp); - ptp->queue->first = ptp->next; + + /* Not busy */ + + if (!*processing_busy_q_p && pp->sched.taskq.local.busy.first) { + pp->sched.taskq.local.first = *execqp; + *execqp = pp->sched.taskq.local.busy.first; + *processing_busy_q_p = 1; + + ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p); } - ASSERT(ptp->queue->first || !ptp->queue->last); - ASSERT(ptp->queue->last || !ptp->queue->first); - ERTS_PT_CHK_NOT_IN_TASKQ(ptp->queue, ptp); + return flags; } -static ERTS_INLINE ErtsPortTask * -pop_task(ErtsPortTaskQueue *ptqp) +/* + * check_task_for_exec() returns a value !0 if the task + * is ok to execute; otherwise 0. + */ +static ERTS_INLINE int +check_task_for_exec(Port *pp, + erts_aint32_t flags, + ErtsPortTask **execqp, + int *processing_busy_q_p, + ErtsPortTask *ptp) { - ErtsPortTask *ptp = ptqp->first; - if (!ptp) { - ASSERT(!ptqp->last); + + if (!*processing_busy_q_p) { + /* Processing normal queue */ + + ERTS_PT_DBG_CHK_TASK_QS(pp, ptp, *processing_busy_q_p); + + if ((flags & ERTS_PTS_FLG_BUSY_PORT) + && (ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY)) { + + busy_wait_move_to_busy_queue(pp, ptp); + ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p); + + return 0; + } + + if (pp->sched.taskq.local.busy.last + && (ptp->u.alive.flags & ERTS_PT_FLG_SIG_DEP)) { + + int res = !check_sig_dep_move_to_busy_queue(pp, ptp); + ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p); + + return res; + } + } else { - ERTS_PT_CHK_IN_TASKQ(ptqp, ptp); - ASSERT(!ptp->prev); - ptqp->first = ptp->next; - if (ptqp->first) - ptqp->first->prev = NULL; - else { - ASSERT(ptqp->last == ptp); - ptqp->last = NULL; + /* Processing busy queue */ + + ASSERT(!(flags & ERTS_PTS_FLG_BUSY_PORT)); + + ERTS_PT_DBG_CHK_TASK_QS(pp, ptp, *processing_busy_q_p); + + popped_from_busy_queue(pp, ptp, !*execqp); + + if (!*execqp) { + *execqp = pp->sched.taskq.local.first; + *processing_busy_q_p = 0; } - ASSERT(ptp->queue->first || !ptp->queue->last); - ASSERT(ptp->queue->last || !ptp->queue->first); + + ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p); + } - ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp); - return ptp; + + return 1; } -#ifdef HARD_DEBUG +static ErtsPortTask * +fetch_in_queue(Port *pp, ErtsPortTask **execqp) +{ + ErtsPortTask *ptp; + ErtsPortTaskHandleList *free_nshp = NULL; + + erts_port_task_sched_lock(&pp->sched); -static void -check_task_queue(ErtsPortTaskQueue *ptqp, - ErtsPortTask *chk_ptp, - int inq) + ptp = pp->sched.taskq.in.first; + pp->sched.taskq.in.first = NULL; + pp->sched.taskq.in.last = NULL; + if (ptp) + *execqp = ptp->u.alive.next; + else + erts_smp_atomic32_read_band_nob(&pp->sched.flags, + ~ERTS_PTS_FLG_HAVE_TASKS); + + + if (pp->sched.taskq.local.busy.nosuspend) + free_nshp = get_free_nosuspend_handles(pp); + + erts_port_task_sched_unlock(&pp->sched); + + if (free_nshp) + free_nosuspend_handles(free_nshp); + + return ptp; +} + +static ERTS_INLINE ErtsPortTask * +select_task_for_exec(Port *pp, + ErtsPortTask **execqp, + int *processing_busy_q_p) { ErtsPortTask *ptp; - ErtsPortTask *last_ptp; - ErtsPortTask *first_ptp = ptqp->first; - int found_forward = 0, found_backward = 0; - if (!first_ptp) { - ASSERT(!ptqp->last); - } - else { - ASSERT(!first_ptp->prev); - for (ptp = first_ptp; ptp; ptp = ptp->next) { - ASSERT(ptp->queue == ptqp); - if (chk_ptp == ptp) - found_forward = 1; - if (!ptp->prev) { - ASSERT(first_ptp == ptp); - } - if (!ptp->next) { - ASSERT(ptqp->last == ptp); - last_ptp = ptp; - } - } - for (ptp = last_ptp; ptp; ptp = ptp->prev) { - ASSERT(ptp->queue == ptqp); - if (chk_ptp == ptp) - found_backward = 1; - if (!ptp->prev) { - ASSERT(first_ptp == ptp); - } - if (!ptp->next) { - ASSERT(ptqp->last == ptp); - } - } - } - if (chk_ptp) { - if (inq) { - ASSERT(found_forward && found_backward); - } + erts_aint32_t flags; + + flags = select_queue_for_exec(pp, execqp, processing_busy_q_p); + + while (1) { + ptp = *execqp; + if (ptp) + *execqp = ptp->u.alive.next; else { - ASSERT(!found_forward && !found_backward); + ptp = fetch_in_queue(pp, execqp); + if (!ptp) + return NULL; } + if (check_task_for_exec(pp, flags, execqp, processing_busy_q_p, ptp)) + return ptp; } } -#endif /* * Abort a scheduled task. */ int -erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp) +erts_port_task_abort(ErtsPortTaskHandle *pthp) { - ErtsRunQueue *runq; - ErtsPortTaskQueue *ptqp; + int res; ErtsPortTask *ptp; - Port *pp; - - pp = &erts_port[internal_port_index(id)]; - runq = erts_port_runq(pp); - if (!runq) - return 1; +#ifdef ERTS_SMP + ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay(); +#endif ptp = handle2task(pthp); + if (!ptp) + res = -1; + else { + erts_aint32_t old_state; + +#ifdef DEBUG + ErtsPortTaskHandle *saved_pthp = ptp->u.alive.handle; + ERTS_SMP_READ_MEMORY_BARRIER; + old_state = erts_smp_atomic32_read_nob(&ptp->state); + if (old_state == ERTS_PT_STATE_SCHEDULED) { + ASSERT(saved_pthp == pthp); + } +#endif - if (!ptp) { - erts_smp_runq_unlock(runq); - return 1; + old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + ERTS_PT_STATE_ABORTED, + ERTS_PT_STATE_SCHEDULED); + if (old_state != ERTS_PT_STATE_SCHEDULED) + res = - 1; /* Task already aborted, executing, or executed */ + else { + + reset_port_task_handle(pthp); + + switch (ptp->type) { + case ERTS_PORT_TASK_INPUT: + case ERTS_PORT_TASK_OUTPUT: + case ERTS_PORT_TASK_EVENT: + ASSERT(erts_smp_atomic_read_nob( + &erts_port_task_outstanding_io_tasks) > 0); + erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks); + break; + case ERTS_PORT_TASK_PROC_SIG: + ERTS_INTERNAL_ERROR("Aborted process to port signal"); + break; + default: + break; + } + + res = 0; + } } - ASSERT(ptp->handle == pthp); - ptqp = ptp->queue; - ASSERT(pp == ptqp->port); +#ifdef ERTS_SMP + erts_thr_progress_unmanaged_continue(dhndl); +#endif - ERTS_PT_CHK_PRES_PORTQ(runq, pp); - ASSERT(ptqp); - ASSERT(ptqp->first); + return res; +} - dequeue_task(ptp); - reset_handle(ptp); +void +erts_port_task_abort_nosuspend_tasks(Port *pp) +{ + erts_aint32_t flags; + ErtsPortTaskHandleList *abort_list; +#ifdef ERTS_SMP + ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID; +#endif - switch (ptp->type) { - case ERTS_PORT_TASK_INPUT: - case ERTS_PORT_TASK_OUTPUT: - case ERTS_PORT_TASK_EVENT: - ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0); - erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks); - break; - default: - break; - } + erts_port_task_sched_lock(&pp->sched); + flags = erts_smp_atomic32_read_band_nob(&pp->sched.flags, + ~ERTS_PTS_FLG_HAVE_NS_TASKS); + abort_list = pp->sched.taskq.local.busy.nosuspend; + pp->sched.taskq.local.busy.nosuspend = NULL; + erts_port_task_sched_unlock(&pp->sched); - ASSERT(ptqp == pp->sched.taskq || ptqp == pp->sched.exe_taskq); + while (abort_list) { +#ifdef DEBUG + ErtsPortTaskHandle *saved_pthp; +#endif + ErtsPortTaskType type; + ErtsPortTaskTypeData td; + ErtsPortTaskHandle *pthp; + ErtsPortTask *ptp; + ErtsPortTaskHandleList *pthlp; + erts_aint32_t old_state; - if (ptqp->first || pp->sched.taskq != ptqp) - ptqp = NULL; - else - pp->sched.taskq = NULL; + pthlp = abort_list; + abort_list = pthlp->u.next; - ERTS_PT_CHK_PRES_PORTQ(runq, pp); +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + dhndl = erts_thr_progress_unmanaged_delay(); +#endif - erts_smp_runq_unlock(runq); + pthp = &pthlp->handle; + ptp = handle2task(pthp); + if (!ptp) { +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_thr_progress_unmanaged_continue(dhndl); +#endif + schedule_port_task_handle_list_free(pthlp); + continue; + } - port_task_free(ptp); - if (ptqp) - port_taskq_free(ptqp); +#ifdef DEBUG + saved_pthp = ptp->u.alive.handle; + ERTS_SMP_READ_MEMORY_BARRIER; + old_state = erts_smp_atomic32_read_nob(&ptp->state); + if (old_state == ERTS_PT_STATE_SCHEDULED) { + ASSERT(saved_pthp == pthp); + } +#endif - return 0; + old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + ERTS_PT_STATE_ABORTED, + ERTS_PT_STATE_SCHEDULED); + if (old_state != ERTS_PT_STATE_SCHEDULED) { + /* Task already aborted, executing, or executed */ +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_thr_progress_unmanaged_continue(dhndl); +#endif + schedule_port_task_handle_list_free(pthlp); + continue; + } + + reset_port_task_handle(pthp); + + type = ptp->type; + td = ptp->u.alive.td; + +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_thr_progress_unmanaged_continue(dhndl); +#endif + schedule_port_task_handle_list_free(pthlp); + + abort_nosuspend_task(pp, type, &td); + } } /* @@ -488,243 +1308,265 @@ int erts_port_task_schedule(Eterm id, ErtsPortTaskHandle *pthp, ErtsPortTaskType type, - ErlDrvEvent event, - ErlDrvEventData event_data) + ...) { + ErtsProc2PortSigData *sigdp = NULL; + ErtsPortTaskHandleList *ns_pthlp = NULL; +#ifdef ERTS_SMP + ErtsRunQueue *xrunq; + ErtsThrPrgrDelayHandle dhndl; +#endif ErtsRunQueue *runq; Port *pp; - ErtsPortTask *ptp; - int enq_port = 0; - - /* - * NOTE: We might not have the port lock here. We are only - * allowed to access the 'sched', 'tab_status', - * and 'id' fields of the port struct while - * tasks_lock is held. - */ + ErtsPortTask *ptp = NULL; + erts_aint32_t act, add_flags; if (pthp && erts_port_task_is_scheduled(pthp)) { ASSERT(0); - erts_port_task_abort(id, pthp); + erts_port_task_abort(pthp); } - ptp = port_task_alloc(); - ASSERT(is_internal_port(id)); - pp = &erts_port[internal_port_index(id)]; - runq = erts_port_runq(pp); - - if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) { - if (runq) - erts_smp_runq_unlock(runq); - return -1; - } - ASSERT(!erts_port_task_is_scheduled(pthp)); +#ifdef ERTS_SMP + dhndl = erts_thr_progress_unmanaged_delay(); +#endif - ERTS_PT_CHK_PRES_PORTQ(runq, pp); + pp = erts_port_lookup_raw(id); - if (!pp->sched.taskq && !pp->sched.in_runq && !pp->sched.exe_taskq) { #ifdef ERTS_SMP - ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); - if (xrunq) { - /* Port emigrated ... */ - erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); - erts_smp_runq_unlock(runq); - runq = erts_port_runq(pp); - if (!runq) - return -1; - } - enq_port = !pp->sched.taskq && !pp->sched.in_runq && !pp->sched.exe_taskq; -#else - enq_port = 1; -#endif + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) { + if (pp) + erts_port_inc_refc(pp); + erts_thr_progress_unmanaged_continue(dhndl); } +#endif - ASSERT(!enq_port - || !(ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_SUSPENDED)); + if (!pp) + goto fail; - if (!pp->sched.taskq) - pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp); + if (type != ERTS_PORT_TASK_PROC_SIG) { + ptp = port_task_alloc(); - ASSERT(ptp); + ptp->type = type; + ptp->u.alive.flags = 0; - ptp->type = type; - ptp->event = event; - ptp->event_data = event_data; + erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); - set_handle(ptp, pthp); + set_handle(ptp, pthp); + } switch (type) { - case ERTS_PORT_TASK_FREE: - erl_exit(ERTS_ABORT_EXIT, - "erts_port_task_schedule(): Cannot schedule free task\n"); - break; case ERTS_PORT_TASK_INPUT: - case ERTS_PORT_TASK_OUTPUT: - case ERTS_PORT_TASK_EVENT: + case ERTS_PORT_TASK_OUTPUT: { + va_list argp; + va_start(argp, type); + ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent); + va_end(argp); + erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); + break; + } + case ERTS_PORT_TASK_EVENT: { + va_list argp; + va_start(argp, type); + ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent); + ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData); + va_end(argp); erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); - /* Fall through... */ + break; + } + case ERTS_PORT_TASK_PROC_SIG: { + va_list argp; + ASSERT(!pthp); + va_start(argp, type); + sigdp = va_arg(argp, ErtsProc2PortSigData *); + ptp = p2p_sig_data_to_task(sigdp); + ptp->u.alive.td.psig.callback = va_arg(argp, ErtsProc2PortSigCallback); + ptp->u.alive.flags |= va_arg(argp, int); + va_end(argp); + if (!(ptp->u.alive.flags & ERTS_PT_FLG_NOSUSPEND)) + set_handle(ptp, pthp); + else { + ns_pthlp = erts_alloc(ERTS_ALC_T_PT_HNDL_LIST, + sizeof(ErtsPortTaskHandleList)); + set_handle(ptp, &ns_pthlp->handle); + } + break; + } default: - enqueue_task(pp->sched.taskq, ptp); break; } -#ifndef ERTS_SMP - /* - * When (!enq_port && !pp->sched.exe_taskq) is true in the smp case, - * the port might not be in the run queue. If this is the case, another - * thread is in the process of enqueueing the port. This very seldom - * occur, but do occur and is a valid scenario. Debug info showing this - * enqueue in progress must be introduced before we can enable (modified - * versions of these) assertions in the smp case again. - */ -#if defined(HARD_DEBUG) - if (pp->sched.exe_taskq || enq_port) - ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp); - else - ERTS_PT_CHK_IN_PORTQ(runq, pp); -#elif defined(DEBUG) - if (!enq_port && !pp->sched.exe_taskq) { - /* We should be in port run q */ - ASSERT(pp->sched.in_runq); + if (!enqueue_task(pp, ptp, sigdp, ns_pthlp, &act)) { + reset_handle(ptp); + if (ns_pthlp && !(act & ERTS_PTS_FLG_EXIT)) + goto abort_nosuspend; + else + goto fail; } -#endif -#endif - if (!enq_port) { - ERTS_PT_CHK_PRES_PORTQ(runq, pp); - erts_smp_runq_unlock(runq); - } - else { - enqueue_port(runq, pp); - ERTS_PT_CHK_PRES_PORTQ(runq, pp); - - if (erts_system_profile_flags.runnable_ports) { - profile_runnable_port(pp, am_active); + add_flags = ERTS_PTS_FLG_HAVE_TASKS; + if (ns_pthlp) + add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS; + + while (1) { + erts_aint32_t new, exp; + + if ((act & add_flags) == add_flags + && (act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) + goto done; /* Done */ + + new = exp = act; + new |= add_flags; + if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) + new |= ERTS_PTS_FLG_IN_RUNQ; + + act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); + + if (exp == act) { + if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) + break; /* Need to enqueue port */ + goto done; /* Done */ } + if (act & ERTS_PTS_FLG_EXIT) + goto done; /* Died after our task insert... */ + } + + /* Enqueue port on run-queue */ + + runq = erts_port_runq(pp); + if (!runq) + ERTS_INTERNAL_ERROR("Missing run-queue"); + +#ifdef ERTS_SMP + xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); + if (xrunq) { + /* Port emigrated ... */ + erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); + runq = erts_port_runq(pp); + if (!runq) + ERTS_INTERNAL_ERROR("Missing run-queue"); + } +#endif - erts_smp_notify_inc_runq(runq); + enqueue_port(runq, pp); + + if (erts_system_profile_flags.runnable_ports) { + profile_runnable_port(pp, am_active); } + + erts_smp_runq_unlock(runq); + + erts_smp_notify_inc_runq(runq); + +done: + +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_port_dec_refc(pp); +#endif + return 0; + +abort_nosuspend: + +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_port_dec_refc(pp); +#endif + + abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td); + + ASSERT(ns_pthlp); + erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp); + if (ptp) + port_task_free(ptp); + + return 0; + +fail: + +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_port_dec_refc(pp); +#endif + + if (ns_pthlp) + erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp); + + if (ptp) + port_task_free(ptp); + + return -1; } void erts_port_task_free_port(Port *pp) { + ErtsProcList *suspended; + erts_aint32_t flags; ErtsRunQueue *runq; - ErtsPortTaskQueue *ptqp; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); - ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD)); + ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); + runq = erts_port_runq(pp); - ASSERT(runq); - ERTS_PT_CHK_PRES_PORTQ(runq, pp); - ptqp = pp->sched.exe_taskq; - if (ptqp) { - /* I (this thread) am currently executing this port, free it - when scheduled out... */ - ErtsPortTask *ptp; - enqueue_free: - ptp = port_task_alloc(); - erts_smp_port_state_lock(pp); - pp->status &= ~ERTS_PORT_SFLG_CLOSING; - pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; - erts_may_save_closed_port(pp); - erts_smp_port_state_unlock(pp); - ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 1); - ptp->type = ERTS_PORT_TASK_FREE; - ptp->event = (ErlDrvEvent) -1; - ptp->event_data = NULL; - set_handle(ptp, NULL); - push_task(ptqp, ptp); - ERTS_PT_CHK_PRES_PORTQ(runq, pp); - erts_smp_runq_unlock(runq); - } - else { - if (pp->sched.in_runq) { - ptqp = pp->sched.taskq; - if (!ptqp) - pp->sched.taskq = ptqp = port_taskq_init(port_taskq_alloc(), pp); - goto enqueue_free; - } - ASSERT(!pp->sched.taskq); - erts_smp_port_state_lock(pp); - pp->status &= ~ERTS_PORT_SFLG_CLOSING; - pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; - erts_may_save_closed_port(pp); - erts_smp_port_state_unlock(pp); - erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ - ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ - handle_remaining_tasks(runq, pp); /* May release runq lock */ - ASSERT(!pp->sched.exe_taskq && (!ptqp || !ptqp->first)); - pp->sched.taskq = NULL; - ERTS_PT_CHK_PRES_PORTQ(runq, pp); - erts_smp_runq_unlock(runq); - } -} + if (!runq) + ERTS_INTERNAL_ERROR("Missing run-queue"); + erts_port_task_sched_lock(&pp->sched); + flags = erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + ERTS_PTS_FLG_EXIT); + suspended = pp->suspended; + pp->suspended = NULL; + erts_port_task_sched_unlock(&pp->sched); + erts_atomic32_read_bset_relb(&pp->state, + (ERTS_PORT_SFLG_CLOSING + | ERTS_PORT_SFLG_FREE), + ERTS_PORT_SFLG_FREE); -typedef struct { - ErtsRunQueue *runq; - int *resp; -} ErtsPortTaskExeBlockData; + erts_smp_runq_unlock(runq); + + if (erts_proclist_fetch(&suspended, NULL)) + erts_resume_processes(suspended); + + if (!(flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) + begin_port_cleanup(pp, NULL, NULL); +} /* - * Run all scheduled tasks for the first port in run queue. If - * new tasks appear while running reschedule port (free task is - * an exception; it is always handled instantly). + * Execute scheduled tasks of a port. * * erts_port_task_execute() is called by scheduler threads between - * scheduleing of processes. Sched lock should be held by caller. + * scheduling of processes. Run-queue lock should be held by caller. */ int erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) { Port *pp; - ErtsPortTaskQueue *ptqp; - ErtsPortTask *ptp; + ErtsPortTask *execq; + int processing_busy_q; int res = 0; + int vreds = 0; int reds = ERTS_PORT_REDS_EXECUTE; erts_aint_t io_tasks_executed = 0; int fpe_was_unmasked; + erts_aint32_t state; + int active; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); - ERTS_PT_CHK_PORTQ(runq); - pp = pop_port(runq); if (!pp) { res = 0; goto done; } - ASSERT(pp->sched.in_runq); - pp->sched.in_runq = 0; - if (!pp->sched.taskq) { - if (erts_system_profile_flags.runnable_ports) - profile_runnable_port(pp, am_inactive); - res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) - != (erts_aint_t) 0); - goto done; - } + erts_smp_runq_unlock(runq); *curr_port_pp = pp; - - ASSERT(pp->sched.taskq->first); - ptqp = pp->sched.taskq; - pp->sched.taskq = NULL; - - ASSERT(!pp->sched.exe_taskq); - pp->sched.exe_taskq = ptqp; - - if (erts_smp_port_trylock(pp) == EBUSY) { - erts_smp_runq_unlock(runq); - erts_smp_port_lock(pp); - erts_smp_runq_lock(runq); - } if (erts_sched_stat.enabled) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); @@ -741,77 +1583,94 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_smp_spin_unlock(&erts_sched_stat.lock); } + prepare_exec(pp, &execq, &processing_busy_q); + + erts_smp_port_lock(pp); + /* trace port scheduling, in */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { trace_sched_ports(pp, am_in); } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + fpe_was_unmasked = erts_block_fpe(); - ERTS_PT_CHK_PRES_PORTQ(runq, pp); - ptp = pop_task(ptqp); + state = erts_atomic32_read_nob(&pp->state); + goto begin_handle_tasks; - fpe_was_unmasked = erts_block_fpe(); + while (1) { + erts_aint32_t task_state; + ErtsPortTask *ptp; - while (ptp) { - ASSERT(pp->sched.taskq != pp->sched.exe_taskq); + ptp = select_task_for_exec(pp, &execq, &processing_busy_q); + if (!ptp) + break; + + task_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + ERTS_PT_STATE_EXECUTING, + ERTS_PT_STATE_SCHEDULED); + if (task_state != ERTS_PT_STATE_SCHEDULED) { + ASSERT(task_state == ERTS_PT_STATE_ABORTED); + goto aborted_port_task; + } reset_handle(ptp); - erts_smp_runq_unlock(runq); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_SMP_CHK_NO_PROC_LOCKS; ASSERT(pp->drv_ptr); switch (ptp->type) { - case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */ - reds += ERTS_PORT_REDS_FREE; - erts_smp_runq_lock(runq); - - erts_unblock_fpe(fpe_was_unmasked); - ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED); - if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first)) - handle_remaining_tasks(runq, pp); - ASSERT(!ptqp->first - && (!pp->sched.taskq || !pp->sched.taskq->first)); - erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ - ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ - - port_task_free(ptp); - if (pp->sched.taskq) - port_taskq_free(pp->sched.taskq); - pp->sched.taskq = NULL; - - goto tasks_done; case ERTS_PORT_TASK_TIMEOUT: reds += ERTS_PORT_REDS_TIMEOUT; - if (!(pp->status & ERTS_PORT_SFLGS_DEAD)) { + if (!(state & ERTS_PORT_SFLGS_DEAD)) { DTRACE_DRIVER(driver_timeout, pp); (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data); } break; case ERTS_PORT_TASK_INPUT: reds += ERTS_PORT_REDS_INPUT; - ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); + ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0); DTRACE_DRIVER(driver_ready_input, pp); /* NOTE some windows drivers use ->ready_input for input and output */ - (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event); + (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, + ptp->u.alive.td.io.event); io_tasks_executed++; break; case ERTS_PORT_TASK_OUTPUT: reds += ERTS_PORT_REDS_OUTPUT; - ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); + ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0); DTRACE_DRIVER(driver_ready_output, pp); - (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event); + (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, + ptp->u.alive.td.io.event); io_tasks_executed++; break; case ERTS_PORT_TASK_EVENT: reds += ERTS_PORT_REDS_EVENT; - ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); + ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0); DTRACE_DRIVER(driver_event, pp); - (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data); + (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, + ptp->u.alive.td.io.event, + ptp->u.alive.td.io.event_data); io_tasks_executed++; break; + case ERTS_PORT_TASK_PROC_SIG: { + ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data; + ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0); + if (!pp->sched.taskq.bpq) + reds += ptp->u.alive.td.psig.callback(pp, + state, + ERTS_PROC2PORT_SIG_EXEC, + sigdp); + else { + ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp); + reds += ptp->u.alive.td.psig.callback(pp, + state, + ERTS_PROC2PORT_SIG_EXEC, + sigdp); + dequeued_proc2port_data(pp, size); + } + break; + } case ERTS_PORT_TASK_DIST_CMD: reds += erts_dist_command(pp, CONTEXT_REDS-reds); break; @@ -822,33 +1681,33 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) break; } - if ((pp->status & ERTS_PORT_SFLG_CLOSING) - && erts_is_port_ioq_empty(pp)) { - reds += ERTS_PORT_REDS_TERMINATE; - erts_terminate_port(pp); - } - - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + reds += erts_port_driver_callback_epilogue(pp, &state); -#ifdef ERTS_SMP - if (pp->xports) - erts_smp_xports_unlock(pp); - ASSERT(!pp->xports); -#endif + aborted_port_task: + schedule_port_task_free(ptp); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + begin_handle_tasks: + if (state & ERTS_PORT_SFLG_FREE) { + reds += ERTS_PORT_REDS_FREE; + begin_port_cleanup(pp, &execq, &processing_busy_q); - port_task_free(ptp); + break; + } - erts_smp_runq_lock(runq); + vreds += ERTS_PORT_CALLBACK_VREDS; + reds += ERTS_PORT_CALLBACK_VREDS; - ptp = pop_task(ptqp); + if (reds >= CONTEXT_REDS) + break; } - tasks_done: - erts_unblock_fpe(fpe_was_unmasked); + /* trace port scheduling, out */ + if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { + trace_sched_ports(pp, am_out); + } + if (io_tasks_executed) { ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed); @@ -856,15 +1715,19 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) -1*io_tasks_executed); } - *curr_port_pp = NULL; - #ifdef ERTS_SMP ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); #endif - if (!pp->sched.taskq) { - ASSERT(pp->sched.exe_taskq); - pp->sched.exe_taskq = NULL; + active = finalize_exec(pp, &execq, processing_busy_q); + + erts_port_release(pp); + + *curr_port_pp = NULL; + + erts_smp_runq_lock(runq); + + if (!active) { if (erts_system_profile_flags.runnable_ports) profile_runnable_port(pp, am_inactive); } @@ -873,16 +1736,13 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) ErtsRunQueue *xrunq; #endif - ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD)); - ASSERT(pp->sched.taskq->first); + ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); #ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (!xrunq) { #endif enqueue_port(runq, pp); - ASSERT(pp->sched.exe_taskq); - pp->sched.exe_taskq = NULL; /* No need to notify ourselves about inc in runq. */ #ifdef ERTS_SMP } @@ -892,49 +1752,21 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_smp_runq_unlock(runq); xrunq = erts_port_runq(pp); - if (xrunq) { - enqueue_port(xrunq, pp); - ASSERT(pp->sched.exe_taskq); - pp->sched.exe_taskq = NULL; - erts_smp_runq_unlock(xrunq); - erts_smp_notify_inc_runq(xrunq); - } + ASSERT(xrunq); + enqueue_port(xrunq, pp); + erts_smp_runq_unlock(xrunq); + erts_smp_notify_inc_runq(xrunq); erts_smp_runq_lock(runq); } #endif } + done: res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); - ERTS_PT_CHK_PRES_PORTQ(runq, pp); - - port_taskq_free(ptqp); - - /* trace port scheduling, out */ - if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { - trace_sched_ports(pp, am_out); - } -#ifndef ERTS_SMP - erts_port_release(pp); -#else - { - erts_aint_t refc; - erts_smp_mtx_unlock(pp->lock); - refc = erts_smp_atomic_dec_read_nob(&pp->refc); - ASSERT(refc >= 0); - if (refc == 0) { - erts_smp_runq_unlock(runq); - erts_port_cleanup(pp); /* Might aquire runq lock */ - erts_smp_runq_lock(runq); - res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) - != (erts_aint_t) 0); - } - } -#endif - - done: + reds -= vreds; runq->scheduler->reductions += reds; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); @@ -943,78 +1775,191 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) return res; } -/* - * Handle remaining tasks after a free task. - */ +#ifdef ERTS_SMP +static void +release_port(void *vport) +{ + erts_port_dec_refc((Port *) vport); +} +#endif static void -handle_remaining_tasks(ErtsRunQueue *runq, Port *pp) +begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - int i; - ErtsPortTask *ptp; - ErtsPortTaskQueue *ptqps[] = {pp->sched.exe_taskq, pp->sched.taskq}; + int i, max; + ErtsPortTaskBusyCallerTable *tabp; + ErtsPortTask *qs[3]; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); - for (i = 0; i < sizeof(ptqps)/sizeof(ErtsPortTaskQueue *); i++) { - if (!ptqps[i]) - continue; - ptp = pop_task(ptqps[i]); - while (ptp) { + /* + * Abort remaining tasks... + * + * We want to process queues in the following order in order + * to preserve signal ordering guarantees: + * 1. Local busy queue + * 2. Local queue + * 3. In queue + */ + + max = 0; + if (!execqp) { + if (pp->sched.taskq.local.busy.first) + qs[max++] = pp->sched.taskq.local.busy.first; + if (pp->sched.taskq.local.first) + qs[max++] = pp->sched.taskq.local.first; + } + else { + if (*processing_busy_q_p) { + if (*execqp) + qs[max++] = *execqp; + if (pp->sched.taskq.local.first) + qs[max++] = pp->sched.taskq.local.first; + } + else { + if (pp->sched.taskq.local.busy.first) + qs[max++] = pp->sched.taskq.local.busy.first; + if (*execqp) + qs[max++] = *execqp; + } + *execqp = NULL; + *processing_busy_q_p = 0; + } + pp->sched.taskq.local.busy.first = NULL; + pp->sched.taskq.local.busy.last = NULL; + pp->sched.taskq.local.first = NULL; + tabp = pp->sched.taskq.local.busy.table; + if (tabp) { + int bix; + for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) { + ErtsPortTaskBusyCaller *bcp = tabp->bucket[bix]; + while (bcp) { + ErtsPortTaskBusyCaller *free_bcp = bcp; + bcp = bcp->next; + if (free_bcp != &tabp->pre_alloc_busy_caller) + erts_free(ERTS_ALC_T_BUSY_CALLER, free_bcp); + } + } + + busy_caller_table_free(tabp); + pp->sched.taskq.local.busy.table = NULL; + } + + erts_port_task_sched_lock(&pp->sched); + qs[max] = pp->sched.taskq.in.first; + pp->sched.taskq.in.first = NULL; + pp->sched.taskq.in.last = NULL; + erts_port_task_sched_unlock(&pp->sched); + if (qs[max]) + max++; + + for (i = 0; i < max; i++) { + while (1) { + erts_aint32_t state; + ErtsPortTask *ptp = qs[i]; + if (!ptp) + break; + + qs[i] = ptp->u.alive.next; + + /* Normal case here is aborted tasks... */ + state = erts_smp_atomic32_read_nob(&ptp->state); + if (state == ERTS_PT_STATE_ABORTED) + goto aborted_port_task; + + state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + ERTS_PT_STATE_EXECUTING, + ERTS_PT_STATE_SCHEDULED); + if (state != ERTS_PT_STATE_SCHEDULED) { + ASSERT(state == ERTS_PT_STATE_ABORTED); + goto aborted_port_task; + } + reset_handle(ptp); - erts_smp_runq_unlock(runq); switch (ptp->type) { - case ERTS_PORT_TASK_FREE: case ERTS_PORT_TASK_TIMEOUT: break; case ERTS_PORT_TASK_INPUT: - erts_stale_drv_select(pp->id, ptp->event, DO_READ, 1); + erts_stale_drv_select(pp->common.id, + ptp->u.alive.td.io.event, + DO_READ, + 1); break; case ERTS_PORT_TASK_OUTPUT: - erts_stale_drv_select(pp->id, ptp->event, DO_WRITE, 1); + erts_stale_drv_select(pp->common.id, + ptp->u.alive.td.io.event, + DO_WRITE, + 1); break; case ERTS_PORT_TASK_EVENT: - erts_stale_drv_select(pp->id, ptp->event, 0, 1); + erts_stale_drv_select(pp->common.id, + ptp->u.alive.td.io.event, + 0, + 1); break; case ERTS_PORT_TASK_DIST_CMD: break; + case ERTS_PORT_TASK_PROC_SIG: { + ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data; + if (!pp->sched.taskq.bpq) + ptp->u.alive.td.psig.callback(NULL, + ERTS_PORT_SFLG_INVALID, + ERTS_PROC2PORT_SIG_ABORT_CLOSED, + sigdp); + else { + ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp); + ptp->u.alive.td.psig.callback(NULL, + ERTS_PORT_SFLG_INVALID, + ERTS_PROC2PORT_SIG_ABORT_CLOSED, + sigdp); + aborted_proc2port_data(pp, size); + } + break; + } default: erl_exit(ERTS_ABORT_EXIT, "Invalid port task type: %d\n", (int) ptp->type); } - port_task_free(ptp); - - erts_smp_runq_lock(runq); - ptp = pop_task(ptqps[i]); + aborted_port_task: + schedule_port_task_free(ptp); } } - ASSERT(!pp->sched.taskq || !pp->sched.taskq->first); + erts_smp_atomic32_read_band_nob(&pp->sched.flags, + ~(ERTS_PTS_FLG_HAVE_BUSY_TASKS + |ERTS_PTS_FLG_HAVE_TASKS)); + + /* + * Schedule cleanup of port structure... + */ +#ifdef ERTS_SMP + erts_schedule_thr_prgr_later_op(release_port, + (void *) pp, + &pp->common.u.release); +#else + pp->cleanup = 1; +#endif } int erts_port_is_scheduled(Port *pp) { - int res; - ErtsRunQueue *runq = erts_port_runq(pp); - if (!runq) - return 0; - res = pp->sched.taskq || pp->sched.exe_taskq; - erts_smp_runq_unlock(runq); - return res; + erts_aint32_t flags = erts_smp_atomic32_read_acqb(&pp->sched.flags); + return (flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)) != 0; } #ifdef ERTS_SMP + void erts_enqueue_port(ErtsRunQueue *rq, Port *pp) { ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); - ASSERT(pp->sched.in_runq); + ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ); enqueue_port(rq, pp); } @@ -1026,7 +1971,8 @@ erts_dequeue_port(ErtsRunQueue *rq) pp = pop_port(rq); ASSERT(!pp || rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); - ASSERT(!pp || pp->sched.in_runq); + ASSERT(!pp || (erts_smp_atomic32_read_nob(&pp->sched.flags) + & ERTS_PTS_FLG_IN_RUNQ)); return pp; } @@ -1041,5 +1987,5 @@ erts_port_task_init(void) erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks, (erts_aint_t) 0); init_port_task_alloc(); - init_port_taskq_alloc(); + init_busy_caller_table_alloc(); } diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index fd88b1c1ff..ae6cd69ae2 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2011. All Rights Reserved. + * Copyright Ericsson AB 2006-2012. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -27,6 +27,9 @@ #define ERTS_PORT_TASK_H_BASIC_TYPES__ #include "erl_sys_driver.h" #include "erl_smp.h" +#define ERL_PORT_GET_PORT_TYPE_ONLY__ +#include "erl_port.h" +#undef ERL_PORT_GET_PORT_TYPE_ONLY__ typedef erts_smp_atomic_t ErtsPortTaskHandle; #endif @@ -43,13 +46,19 @@ typedef erts_smp_atomic_t ErtsPortTaskHandle; #define ERTS_INCLUDE_SCHEDULER_INTERNALS #endif +#define ERTS_PT_FLG_WAIT_BUSY (1 << 0) +#define ERTS_PT_FLG_SIG_DEP (1 << 1) +#define ERTS_PT_FLG_NOSUSPEND (1 << 2) +#define ERTS_PT_FLG_REF (1 << 3) +#define ERTS_PT_FLG_BAD_OUTPUT (1 << 4) + typedef enum { - ERTS_PORT_TASK_FREE, ERTS_PORT_TASK_INPUT, ERTS_PORT_TASK_OUTPUT, ERTS_PORT_TASK_EVENT, ERTS_PORT_TASK_TIMEOUT, - ERTS_PORT_TASK_DIST_CMD + ERTS_PORT_TASK_DIST_CMD, + ERTS_PORT_TASK_PROC_SIG } ErtsPortTaskType; #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS @@ -57,19 +66,76 @@ typedef enum { extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; #endif +#define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0) +#define ERTS_PTS_FLG_EXEC (((erts_aint32_t) 1) << 1) +#define ERTS_PTS_FLG_HAVE_TASKS (((erts_aint32_t) 1) << 2) +#define ERTS_PTS_FLG_EXIT (((erts_aint32_t) 1) << 3) +#define ERTS_PTS_FLG_BUSY_PORT (((erts_aint32_t) 1) << 4) +#define ERTS_PTS_FLG_BUSY_PORT_Q (((erts_aint32_t) 1) << 5) +#define ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q (((erts_aint32_t) 1) << 6) +#define ERTS_PTS_FLG_HAVE_BUSY_TASKS (((erts_aint32_t) 1) << 7) +#define ERTS_PTS_FLG_HAVE_NS_TASKS (((erts_aint32_t) 1) << 8) +#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9) +#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10) + +#define ERTS_PTS_FLGS_BUSY \ + (ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q) + +#define ERTS_PTS_FLGS_FORCE_SCHEDULE_OP \ + (ERTS_PTS_FLG_EXIT \ + | ERTS_PTS_FLG_HAVE_BUSY_TASKS \ + | ERTS_PTS_FLG_HAVE_TASKS \ + | ERTS_PTS_FLG_EXEC \ + | ERTS_PTS_FLG_FORCE_SCHED) + +#define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH 8192 +#define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW 4096 + +typedef struct { + ErlDrvSizeT high; + erts_smp_atomic_t low; + erts_smp_atomic_t size; +} ErtsPortTaskBusyPortQ; + typedef struct ErtsPortTask_ ErtsPortTask; -typedef struct ErtsPortTaskQueue_ ErtsPortTaskQueue; +typedef struct ErtsPortTaskBusyCallerTable_ ErtsPortTaskBusyCallerTable; +typedef struct ErtsPortTaskHandleList_ ErtsPortTaskHandleList; typedef struct { Port *next; - int in_runq; - ErtsPortTaskQueue *taskq; - ErtsPortTaskQueue *exe_taskq; + struct { + struct { + struct { + ErtsPortTask *first; + ErtsPortTask *last; + ErtsPortTaskBusyCallerTable *table; + ErtsPortTaskHandleList *nosuspend; + } busy; + ErtsPortTask *first; + } local; + struct { + ErtsPortTask *first; + ErtsPortTask *last; + } in; + ErtsPortTaskBusyPortQ *bpq; + } taskq; + erts_smp_atomic32_t flags; +#ifdef ERTS_SMP + erts_mtx_t mtx; +#endif } ErtsPortTaskSched; ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp); ERTS_GLB_INLINE int erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp); -ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp); +ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, + ErtsPortTaskBusyPortQ *bpq); +ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp, + Eterm id); +ERTS_GLB_INLINE void erts_port_task_fini_sched(ErtsPortTaskSched *ptsp); +ERTS_GLB_INLINE void erts_port_task_sched_lock(ErtsPortTaskSched *ptsp); +ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp); +ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp); + #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void); #endif @@ -88,13 +154,75 @@ erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp) return ((void *) erts_smp_atomic_read_nob(pthp)) != NULL; } +ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, + ErtsPortTaskBusyPortQ *bpq) +{ + if (bpq) { + erts_aint_t low = (erts_aint_t) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW; + erts_smp_atomic_init_nob(&bpq->low, low); + bpq->high = (ErlDrvSizeT) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH; + erts_smp_atomic_init_nob(&bpq->size, (erts_aint_t) 0); + } + ptsp->taskq.bpq = bpq; +} + ERTS_GLB_INLINE void -erts_port_task_init_sched(ErtsPortTaskSched *ptsp) +erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id) { +#ifdef ERTS_SMP + char *lock_str = "port_sched_lock"; +#endif ptsp->next = NULL; - ptsp->in_runq = 0; - ptsp->taskq = NULL; - ptsp->exe_taskq = NULL; + ptsp->taskq.local.busy.first = NULL; + ptsp->taskq.local.busy.last = NULL; + ptsp->taskq.local.busy.table = NULL; + ptsp->taskq.local.busy.nosuspend = NULL; + ptsp->taskq.local.first = NULL; + ptsp->taskq.in.first = NULL; + ptsp->taskq.in.last = NULL; + erts_smp_atomic32_init_nob(&ptsp->flags, 0); +#ifdef ERTS_SMP +#ifdef ERTS_ENABLE_LOCK_COUNT + if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)) + lock_str = NULL; +#endif + erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id); +#endif +} + +ERTS_GLB_INLINE void +erts_port_task_sched_lock(ErtsPortTaskSched *ptsp) +{ +#ifdef ERTS_SMP + erts_mtx_lock(&ptsp->mtx); +#endif +} + +ERTS_GLB_INLINE void +erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp) +{ +#ifdef ERTS_SMP + erts_mtx_unlock(&ptsp->mtx); +#endif +} + +ERTS_GLB_INLINE int +erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp) +{ +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) + return erts_lc_mtx_is_locked(&ptsp->mtx); +#else + return 0; +#endif +} + + +ERTS_GLB_INLINE void +erts_port_task_fini_sched(ErtsPortTaskSched *ptsp) +{ +#ifdef ERTS_SMP + erts_mtx_destroy(&ptsp->mtx); +#endif } #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS @@ -115,14 +243,16 @@ int erts_port_task_execute(ErtsRunQueue *, Port **); void erts_port_task_init(void); #endif -int erts_port_task_abort(Eterm id, ErtsPortTaskHandle *); +int erts_port_task_abort(ErtsPortTaskHandle *); +void erts_port_task_abort_nosuspend_tasks(Port *); + int erts_port_task_schedule(Eterm, ErtsPortTaskHandle *, ErtsPortTaskType, - ErlDrvEvent, - ErlDrvEventData); + ...); void erts_port_task_free_port(Port *); int erts_port_is_scheduled(Port *); +ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void); #ifdef ERTS_SMP void erts_enqueue_port(ErtsRunQueue *rq, Port *pp); diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 61554780c4..6e9bf7ca12 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -42,6 +42,7 @@ #include "erl_thr_queue.h" #include "erl_async.h" #include "dtrace-wrapper.h" +#include "erl_ptab.h" #define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0) #define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2) @@ -136,155 +137,16 @@ do { \ #define ERTS_EMPTY_RUNQ_PORTS(RQ) \ (RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL) +const Process erts_invalid_process = {{ERTS_INVALID_PID}}; + extern BeamInstr beam_apply[]; extern BeamInstr beam_exit[]; extern BeamInstr beam_continue_exit[]; -#ifdef ARCH_32 - -union { - erts_smp_dw_atomic_t pid_data; - char align[ERTS_CACHE_LINE_SIZE]; -} last erts_align_attribute(ERTS_CACHE_LINE_SIZE); - - -static ERTS_INLINE Uint64 -dw_aint_to_uint64(erts_dw_aint_t *dw) -{ -#ifdef ETHR_SU_DW_NAINT_T__ - return (Uint64) dw->dw_sint; -#else - Uint64 res; - res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]); - res <<= 32; - res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]); - return res; -#endif -} - -static void -unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val) -{ -#ifdef ETHR_SU_DW_NAINT_T__ - dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val; -#else - dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff); - dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff); -#endif -} - -static ERTS_INLINE void -last_pid_data_init_nob(Uint64 val) -{ - erts_dw_aint_t dw; - unint64_to_dw_aint(&dw, val); - erts_smp_dw_atomic_init_nob(&last.pid_data, &dw); -} - -static ERTS_INLINE void -last_pid_data_set_relb(Uint64 val) -{ - erts_dw_aint_t dw; - unint64_to_dw_aint(&dw, val); - erts_smp_dw_atomic_set_relb(&last.pid_data, &dw); -} - -static ERTS_INLINE Uint64 -last_pid_data_read_nob(void) -{ - erts_dw_aint_t dw; - erts_smp_dw_atomic_read_nob(&last.pid_data, &dw); - return dw_aint_to_uint64(&dw); -} - -static ERTS_INLINE Uint64 -last_pid_data_read_acqb(void) -{ - erts_dw_aint_t dw; - erts_smp_dw_atomic_read_acqb(&last.pid_data, &dw); - return dw_aint_to_uint64(&dw); -} - -static ERTS_INLINE Uint64 -last_pid_data_cmpxchg_relb(Uint64 new, Uint64 exp) -{ - erts_dw_aint_t dw_new, dw_xchg; - - unint64_to_dw_aint(&dw_new, new); - unint64_to_dw_aint(&dw_xchg, exp); - - if (erts_smp_dw_atomic_cmpxchg_relb(&last.pid_data, &dw_new, &dw_xchg)) - return exp; - else - return dw_aint_to_uint64(&dw_xchg); -} - -#elif defined(ARCH_64) - -union { - erts_smp_atomic_t pid_data; - char align[ERTS_CACHE_LINE_SIZE]; -} last erts_align_attribute(ERTS_CACHE_LINE_SIZE); - -static ERTS_INLINE void -last_pid_data_init_nob(Uint64 val) -{ - erts_smp_atomic_init_nob(&last.pid_data, (erts_aint_t) val); -} - -static ERTS_INLINE void -last_pid_data_set_relb(Uint64 val) -{ - erts_smp_atomic_set_relb(&last.pid_data, (erts_aint_t) val); -} - -static ERTS_INLINE Uint64 -last_pid_data_read_nob(void) -{ - return (Uint64) erts_smp_atomic_read_nob(&last.pid_data); -} - -static ERTS_INLINE Uint64 -last_pid_data_read_acqb(void) -{ - return (Uint64) erts_smp_atomic_read_acqb(&last.pid_data); -} - -static ERTS_INLINE Uint64 -last_pid_data_cmpxchg_relb(Uint64 new, Uint64 exp) -{ - return (Uint64) erts_smp_atomic_cmpxchg_relb(&last.pid_data, - (erts_aint_t) new, - (erts_aint_t) exp); -} - -#else -# error "Not 64-bit, nor 32-bit architecture..." -#endif - -static ERTS_INLINE int -last_pid_data_cmp(Uint64 lpd1, Uint64 lpd2) -{ - Uint64 lpd1_wrap; - - if (lpd1 == lpd2) - return 0; - - lpd1_wrap = lpd1 + (((Uint64) 1) << 63); - - if (lpd1 < lpd1_wrap) - return (lpd1 < lpd2 && lpd2 < lpd1_wrap) ? -1 : 1; - else - return (lpd1_wrap <= lpd2 && lpd2 < lpd1) ? 1 : -1; -} - - -#define ERTS_PID_DATA_MASK__ ((1 << _PID_DATA_SIZE) - 1) - int erts_sched_compact_load; Uint erts_no_schedulers; -ErtsProcTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE); +ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE); int erts_sched_thread_suggested_stack_size = -1; @@ -371,8 +233,6 @@ erts_sched_stat_t erts_sched_stat; static erts_tsd_key_t sched_data_key; #endif -erts_smp_rwmtx_t erts_proc_tab_rwmtx; - static erts_smp_atomic32_t function_calls; #ifdef ERTS_SMP @@ -409,29 +269,6 @@ struct erts_system_profile_flags_t erts_system_profile_flags; #if ERTS_MAX_PROCESSES > 0x7fffffff #error "Need to store process_count in another type" #endif -static erts_smp_atomic32_t process_count; - -typedef struct ErtsTermProcElement_ ErtsTermProcElement; -struct ErtsTermProcElement_ { - ErtsTermProcElement *next; - ErtsTermProcElement *prev; - int ix; - union { - struct { - Eterm pid; - Uint64 spawned; - Uint64 exited; - } process; - struct { - Uint64 interval; - } bif_invocation; - } u; -}; - -static struct { - ErtsTermProcElement *start; - ErtsTermProcElement *end; -} saved_term_procs; ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list, ErtsMiscOpList, @@ -493,8 +330,6 @@ do { \ * Local functions. */ -static void init_processes_bif(void); -static void save_terminating_process(Process *p); static void exec_misc_ops(ErtsRunQueue *); static void print_function_from_pc(int to, void *to_arg, BeamInstr* x); static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, @@ -562,39 +397,17 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq) } #endif -static erts_interval_t *proc_interval; - -static void -proc_interval_init(void) -{ - proc_interval = erts_alloc_permanent_cache_aligned( - ERTS_ALC_T_PROC_INTERVAL, - sizeof(erts_interval_t)); - erts_smp_interval_init(proc_interval); -} - -static ERTS_INLINE Uint64 -get_proc_interval(void) -{ - return erts_smp_current_interval_nob(proc_interval); -} static ERTS_INLINE Uint64 ensure_later_proc_interval(Uint64 interval) { - return erts_smp_ensure_later_interval_nob(proc_interval, interval); -} - -static ERTS_INLINE Uint64 -step_proc_interval(void) -{ - return erts_smp_step_interval_nob(proc_interval); + return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval); } Uint64 erts_get_proc_interval(void) { - return get_proc_interval(); + return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc)); } Uint64 @@ -606,7 +419,7 @@ erts_ensure_later_proc_interval(Uint64 interval) Uint64 erts_step_proc_interval(void) { - return step_proc_interval(); + return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc)); } void @@ -654,20 +467,18 @@ erts_pre_init_process(void) #endif } +#ifdef ERTS_SMP +static void +release_process(void *vproc) +{ + erts_smp_proc_dec_refc((Process *) vproc); +} +#endif + /* initialize the scheduler */ void -erts_init_process(int ncpu) +erts_init_process(int ncpu, int proc_tab_size) { - int proc_tab_sz; - int max_proc_bits; - int proc_bits = ERTS_PROC_BITS; - erts_smp_atomic_t *proc_entry; - char *proc_tab_end; - erts_smp_rwmtx_opt_t proc_tab_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - proc_tab_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - proc_tab_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; - - proc_interval_init(); #ifdef ERTS_SMP erts_disable_proc_not_running_opt = 0; @@ -676,52 +487,16 @@ erts_init_process(int ncpu) init_proclist_alloc(); - erts_smp_atomic32_init_nob(&process_count, 0); - - if (erts_use_r9_pids_ports) - proc_bits = ERTS_R9_PROC_BITS; - - if (erts_proc.max > (1 << proc_bits)) - erts_proc.max = 1 << proc_bits; - - proc_tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(erts_proc.max - * sizeof(erts_smp_atomic_t)); - erts_proc.tab = erts_alloc(ERTS_ALC_T_PROC_TABLE, proc_tab_sz); - proc_tab_end = ((char *) erts_proc.tab) + proc_tab_sz; - proc_entry = erts_proc.tab; - while (proc_tab_end > ((char *) proc_entry)) { - erts_smp_atomic_init_nob(proc_entry, ERTS_AINT_NULL); - proc_entry++; - } - - erts_smp_rwmtx_init_opt(&erts_proc_tab_rwmtx, - &proc_tab_rwmtx_opts, - "proc_tab"); - last_pid_data_init_nob(~((Uint64) 0)); - - max_proc_bits = erts_fit_in_bits_int32((Sint32) erts_proc.max - 1); - - erts_proc.tab_cache_lines = proc_tab_sz/ERTS_CACHE_LINE_SIZE; - erts_proc.pix_per_cache_line = ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t); - if ((erts_proc.max & (erts_proc.max - 1)) - | (erts_proc.pix_per_cache_line & (erts_proc.pix_per_cache_line - 1))) { - /* - * erts_proc.max or erts_proc.pix_per_cache_line - * not a power of 2 :( - */ - erts_proc.pix_cl_mask = 0; - erts_proc.pix_cl_shift = 0; - erts_proc.pix_cli_mask = 0; - erts_proc.pix_cli_shift = 0; - } - else { - ASSERT((erts_proc.tab_cache_lines - & (erts_proc.tab_cache_lines - 1)) == 0); - erts_proc.pix_cl_mask = erts_proc.tab_cache_lines-1; - erts_proc.pix_cl_shift = erts_fit_in_bits_int32(erts_proc.pix_per_cache_line-1); - erts_proc.pix_cli_shift = erts_fit_in_bits_int32(erts_proc.pix_cl_mask); - erts_proc.pix_cli_mask = (1 << (max_proc_bits - erts_proc.pix_cli_shift)) - 1; - } + erts_ptab_init_table(&erts_proc, + ERTS_ALC_T_PROC_TABLE, +#ifdef ERTS_SMP + release_process, +#else + NULL, +#endif + (ErtsPTabElementCommon *) &erts_invalid_process.common, + proc_tab_size, + "process_table"); last_reductions = 0; last_exact_reductions = 0; @@ -732,7 +507,6 @@ void erts_late_init_process(void) { int ix; - init_processes_bif(); erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat"); for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) { @@ -980,9 +754,9 @@ static ERTS_INLINE ErtsProcList * proclist_create(Process *p) { ErtsProcList *plp = proclist_alloc(); - ensure_later_proc_interval(p->started_interval); - plp->pid = p->id; - plp->started_interval = p->started_interval; + ensure_later_proc_interval(p->common.u.alive.started_interval); + plp->pid = p->common.id; + plp->started_interval = p->common.u.alive.started_interval; return plp; } @@ -992,12 +766,6 @@ proclist_destroy(ErtsProcList *plp) proclist_free(plp); } -static ERTS_INLINE int -proclist_same(ErtsProcList *plp, Process *p) -{ - return plp->pid == p->id && plp->started_interval == p->started_interval; -} - ErtsProcList * erts_proclist_create(Process *p) { @@ -1010,12 +778,6 @@ erts_proclist_destroy(ErtsProcList *plp) proclist_destroy(plp); } -int -erts_proclist_same(ErtsProcList *plp, Process *p) -{ - return proclist_same(plp, p); -} - void * erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data) { @@ -1776,37 +1538,32 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS); awdp->esdp->run_queue->halt_in_progress = 1; if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) { - int i; + int i, max = erts_ptab_max(&erts_port); erts_smp_atomic32_set_nob(&erts_halt_progress, 1); - for (i = 0; i < erts_max_ports; i++) { - Port *prt = &erts_port[i]; - erts_smp_port_state_lock(prt); - if ((prt->status & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP - | ERTS_PORT_SFLG_HALT))) { - erts_smp_port_state_unlock(prt); - continue; - } - /* We need to set the halt flag - get the port lock */ -#ifdef ERTS_SMP - erts_smp_atomic_inc_nob(&prt->refc); -#endif - erts_smp_port_state_unlock(prt); -#ifdef ERTS_SMP - erts_smp_mtx_lock(prt->lock); -#endif - if ((prt->status & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP - | ERTS_PORT_SFLG_HALT))) { - erts_port_release(prt); + for (i = 0; i < max; i++) { + erts_aint32_t state; + Port *prt = erts_pix2port(i); + if (!prt) continue; - } - erts_port_status_bor_set(prt, ERTS_PORT_SFLG_HALT); - erts_smp_atomic32_inc_nob(&erts_halt_progress); - if (prt->status & (ERTS_PORT_SFLG_EXITING - | ERTS_PORT_SFLG_CLOSING)) { - erts_port_release(prt); + state = erts_atomic32_read_acqb(&prt->state); + if (state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + | ERTS_PORT_SFLG_HALT)) continue; + + /* We need to set the halt flag - get the port lock */ + + erts_smp_port_lock(prt); + + state = erts_atomic32_read_nob(&prt->state); + if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP + | ERTS_PORT_SFLG_HALT))) { + state = erts_atomic32_read_bor_relb(&prt->state, + ERTS_PORT_SFLG_HALT); + erts_smp_atomic32_inc_nob(&erts_halt_progress); + if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING))) + erts_deliver_port_exit(prt, prt->common.id, am_killed, 0); } - erts_do_exit_port(prt, prt->id, am_killed); + erts_port_release(prt); } if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) { @@ -3183,12 +2940,12 @@ resume_run_queue(ErtsRunQueue *rq) erts_smp_runq_lock(rq); - (void) ERTS_RUNQ_FLGS_MASK_SET(rq, - (ERTS_RUNQ_FLG_OUT_OF_WORK - | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK - | ERTS_RUNQ_FLG_SUSPENDED), - (ERTS_RUNQ_FLG_OUT_OF_WORK - | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); + (void) ERTS_RUNQ_FLGS_READ_BSET(rq, + (ERTS_RUNQ_FLG_OUT_OF_WORK + | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK + | ERTS_RUNQ_FLG_SUSPENDED), + (ERTS_RUNQ_FLG_OUT_OF_WORK + | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { @@ -4210,7 +3967,7 @@ erts_fprintf(stderr, "--------------------------------\n"); ERTS_DBG_CHK_FULL_REDS_HISTORY(rq); rq->out_of_work_count = 0; - (void) ERTS_RUNQ_FLGS_MASK_SET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags); + (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags); rq->max_len = rq->len; for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { @@ -4323,11 +4080,11 @@ typedef enum { } ErtsSchedWakeupOtherThreshold; typedef enum { - ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL, + ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT, ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY } ErtsSchedWakeupOtherType; -/* First proposal */ +/* Default */ #define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH (200*CONTEXT_REDS) #define ERTS_WAKEUP_OTHER_LIMIT_HIGH (50*CONTEXT_REDS) @@ -4344,7 +4101,7 @@ typedef enum { #define ERTS_WAKEUP_OTHER_DEC_SHIFT 2 #define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10) -/* To be legacy */ +/* Legacy */ #define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY (200*CONTEXT_REDS) #define ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY (50*CONTEXT_REDS) @@ -4482,7 +4239,7 @@ static void set_wakeup_other_data(void) { switch (wakeup_other.type) { - case ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL: + case ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT: wakeup_other.check = wakeup_other_check; wakeup_other_set_limit(); break; @@ -4501,7 +4258,7 @@ erts_early_init_scheduling(int no_schedulers) aux_work_timeout_early_init(no_schedulers); #ifdef ERTS_SMP wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; - wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; + wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT; #endif sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM @@ -4537,10 +4294,8 @@ int erts_sched_set_wakeup_other_type(char *str) { ErtsSchedWakeupOtherType type; - if (sys_strcmp(str, "proposal") == 0) - type = ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL; - else if (sys_strcmp(str, "default") == 0) - type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; + if (sys_strcmp(str, "default") == 0) + type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT; else if (sys_strcmp(str, "legacy") == 0) type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; else @@ -4896,6 +4651,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) erts_smp_atomic32_init_relb(&erts_halt_progress, -1); erts_halt_code = 0; + +#if !defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) + erts_lc_set_thread_name("scheduler 1"); +#endif + } ErtsRunQueue * @@ -5573,8 +5333,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) else if (on) { /* ------ BLOCK ------ */ if (schdlr_sspnd.msb.procs) { plp = proclist_create(p); - plp->next = schdlr_sspnd.msb.procs; - schdlr_sspnd.msb.procs = plp; + erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp); p->flags |= F_HAVE_BLCKD_MSCHED; ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1); ASSERT(p->scheduler_data->no == 1); @@ -5659,8 +5418,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) ~ERTS_SCHDLR_SSPND_CHNG_WAITER); } plp = proclist_create(p); - plp->next = schdlr_sspnd.msb.procs; - schdlr_sspnd.msb.procs = plp; + erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp); ASSERT(p->scheduler_data); } } @@ -5671,20 +5429,16 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) } else { /* ------ UNBLOCK ------ */ if (p->flags & F_HAVE_BLCKD_MSCHED) { - ErtsProcList **plpp = &schdlr_sspnd.msb.procs; - plp = schdlr_sspnd.msb.procs; + ErtsProcList *plp = erts_proclist_peek_first(schdlr_sspnd.msb.procs); while (plp) { - if (!proclist_same(plp, p)){ - plpp = &plp->next; - plp = plp->next; - } - else { - *plpp = plp->next; - proclist_destroy(plp); + ErtsProcList *tmp_plp = plp; + plp = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp); + if (erts_proclist_same(tmp_plp, p)) { + erts_proclist_remove(&schdlr_sspnd.msb.procs, tmp_plp); + proclist_destroy(tmp_plp); if (!all) break; - plp = *plpp; } } } @@ -5753,23 +5507,25 @@ erts_multi_scheduling_blockers(Process *p) Eterm res = NIL; erts_smp_mtx_lock(&schdlr_sspnd.mtx); - if (schdlr_sspnd.msb.procs) { + if (!erts_proclist_is_empty(schdlr_sspnd.msb.procs)) { Eterm *hp, *hp_end; ErtsProcList *plp1, *plp2; - Uint max_size; - ASSERT(schdlr_sspnd.msb.procs); - for (max_size = 0, plp1 = schdlr_sspnd.msb.procs; + Uint max_size = 0; + + for (plp1 = erts_proclist_peek_first(schdlr_sspnd.msb.procs); plp1; - plp1 = plp1->next) { + plp1 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp1)) { max_size += 2; } ASSERT(max_size); hp = HAlloc(p, max_size); hp_end = hp + max_size; - for (plp1 = schdlr_sspnd.msb.procs; plp1; plp1 = plp1->next) { - for (plp2 = schdlr_sspnd.msb.procs; + for (plp1 = erts_proclist_peek_first(schdlr_sspnd.msb.procs); + plp1; + plp1 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp1)) { + for (plp2 = erts_proclist_peek_first(schdlr_sspnd.msb.procs); plp2->pid != plp1->pid; - plp2 = plp2->next); + plp2 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp2)); if (plp2 == plp1) { res = CONS(hp, plp1->pid, res); hp += 2; @@ -6021,7 +5777,7 @@ handle_pend_sync_suspend(Process *suspendee, ASSERT(is_nil(suspender->suspendee)); if (suspendee_alive) { erts_suspend(suspendee, suspendee_locks, NULL); - suspender->suspendee = suspendee->id; + suspender->suspendee = suspendee->common.id; } /* suspender is suspended waiting for suspendee to suspend; resume suspender */ @@ -6042,7 +5798,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN); ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)); - if (c_p->id == pid) + if (c_p->common.id == pid) return erts_pid2proc(c_p, c_p_locks, pid, pid_locks); if (c_p_locks & ERTS_PROC_LOCK_STATUS) @@ -6094,7 +5850,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, */ if (!c_p->pending_suspenders) { /* Mark rp pending for suspend by c_p */ - add_pend_suspend(rp, c_p->id, handle_pend_sync_suspend); + add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend); ASSERT(is_nil(c_p->suspendee)); /* Suspend c_p; when rp is suspended c_p will be resumed. */ @@ -6200,20 +5956,20 @@ handle_pend_bif_sync_suspend(Process *suspendee, ASSERT(is_nil(suspender->suspendee)); if (!suspendee_alive) erts_delete_suspend_monitor(&suspender->suspend_monitors, - suspendee->id); + suspendee->common.id); else { #ifdef DEBUG int res; #endif ErtsSuspendMonitor *smon; smon = erts_lookup_suspend_monitor(suspender->suspend_monitors, - suspendee->id); + suspendee->common.id); #ifdef DEBUG res = #endif do_bif_suspend_process(suspendee, smon, suspendee); ASSERT(!smon || res != 0); - suspender->suspendee = suspendee->id; + suspender->suspendee = suspendee->common.id; } /* suspender is suspended waiting for suspendee to suspend; resume suspender */ @@ -6242,14 +5998,14 @@ handle_pend_bif_async_suspend(Process *suspendee, ASSERT(is_nil(suspender->suspendee)); if (!suspendee_alive) erts_delete_suspend_monitor(&suspender->suspend_monitors, - suspendee->id); + suspendee->common.id); else { #ifdef DEBUG int res; #endif ErtsSuspendMonitor *smon; smon = erts_lookup_suspend_monitor(suspender->suspend_monitors, - suspendee->id); + suspendee->common.id); #ifdef DEBUG res = #endif @@ -6294,7 +6050,7 @@ suspend_process_2(BIF_ALIST_2) int unless_suspending = 0; - if (BIF_P->id == BIF_ARG_1) + if (BIF_P->common.id == BIF_ARG_1) goto badarg; /* We are not allowed to suspend ourselves */ if (is_not_nil(BIF_ARG_2)) { @@ -6390,7 +6146,7 @@ suspend_process_2(BIF_ALIST_2) if (!do_bif_suspend_process(BIF_P, smon, suspendee)) add_pend_suspend(suspendee, - BIF_P->id, + BIF_P->common.id, handle_pend_bif_async_suspend); res = am_true; @@ -6453,7 +6209,7 @@ suspend_process_2(BIF_ALIST_2) else { /* Mark suspendee pending for suspend by BIF_P */ add_pend_suspend(suspendee, - BIF_P->id, + BIF_P->common.id, handle_pend_bif_sync_suspend); ASSERT(is_nil(BIF_P->suspendee)); @@ -6527,7 +6283,7 @@ resume_process_1(BIF_ALIST_1) Process *suspendee; int is_active; - if (BIF_P->id == BIF_ARG_1) + if (BIF_P->common.id == BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); @@ -6652,7 +6408,8 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks, for (i = 0; i < erts_no_schedulers; i++) { esdp = ERTS_SCHEDULER_IX(i); erts_smp_runq_lock(esdp->run_queue); - if (esdp->free_process && esdp->free_process->id == rpid) { + if (esdp->free_process + && esdp->free_process->common.id == rpid) { res = am_free; erts_smp_runq_unlock(esdp->run_queue); break; @@ -6673,27 +6430,31 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks, void erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) { -#ifdef DEBUG - int res; -#endif + int suspend; + ASSERT(c_p == erts_get_current_process()); ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + if (busy_port) + suspend = erts_save_suspend_process_on_port(busy_port, c_p); + else + suspend = 1; + + if (suspend) { #ifdef DEBUG - res = + int res = #endif - suspend_process(c_p, c_p); - - ASSERT(res); - - if (busy_port) - erts_wake_process_later(busy_port, c_p); + suspend_process(c_p, c_p); + ASSERT(res); + } if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + if (suspend && busy_port && erts_system_monitor_flags.busy_port) + monitor_generic(c_p, am_busy_port, busy_port->common.id); } void @@ -6708,16 +6469,19 @@ erts_resume(Process* process, ErtsProcLocks process_locks) } int -erts_resume_processes(ErtsProcList *plp) +erts_resume_processes(ErtsProcList *list) { + /* 'list' is expected to have been fetched (i.e. not a ring anymore) */ int nresumed = 0; + ErtsProcList *plp = list; + while (plp) { Process *proc; ErtsProcList *fplp; ASSERT(is_internal_pid(plp->pid)); proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS); if (proc) { - if (proclist_same(plp, proc)) { + if (erts_proclist_same(plp, proc)) { resume_process(proc); nresumed++; } @@ -6870,9 +6634,8 @@ Process *schedule(Process *p, int calls) state = erts_smp_atomic32_read_acqb(&p->state); if (IS_TRACED(p)) { - if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) { + if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT); - } if (state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) { if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT)) trace_sched(p, ((state & ERTS_PSFLG_FREE) @@ -6885,7 +6648,7 @@ Process *schedule(Process *p, int calls) else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS)) trace_virtual_sched(p, am_out); } - } + } #ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) @@ -6949,9 +6712,8 @@ Process *schedule(Process *p, int calls) #ifdef ERTS_SMP { ErtsProcList *pnd_xtrs = rq->procs.pending_exiters; - rq->procs.pending_exiters = NULL; - - if (pnd_xtrs) { + if (erts_proclist_fetch(&pnd_xtrs, NULL)) { + rq->procs.pending_exiters = NULL; erts_smp_runq_unlock(rq); handle_pending_exiters(pnd_xtrs); erts_smp_runq_lock(rq); @@ -7247,7 +7009,7 @@ Process *schedule(Process *p, int calls) erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); #ifdef ERTS_SMP - if (is_not_nil(p->tracer_proc)) + if (is_not_nil(ERTS_TRACER_PROC(p))) erts_check_my_tracer_proc(p); #endif @@ -7450,70 +7212,6 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } -/* - * erts_test_next_pid() is only used for testing. - */ -Sint -erts_test_next_pid(int set, Uint next) -{ - Uint64 lpd; - Sint res; - Eterm pid_data; - int first_pix = -1; - - erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); - - if (!set) - lpd = last_pid_data_read_nob(); - else { - - lpd = (Uint64) next; - pid_data = (Eterm) (lpd & ERTS_PID_DATA_MASK__); - if (ERTS_INVALID_PID == make_internal_pid(pid_data)) { - lpd += erts_proc.max; - ASSERT(erts_pid_data2ix(pid_data) - == erts_pid_data2ix(lpd & ERTS_PID_DATA_MASK__)); - } - last_pid_data_set_relb(lpd); - } - - while (1) { - int pix; - lpd++; - pix = (int) (lpd % erts_proc.max); - if (first_pix < 0) - first_pix = pix; - else if (pix == first_pix) { - res = -1; - break; - } - if (ERTS_AINT_NULL == erts_smp_atomic_read_nob(&erts_proc.tab[pix])) { - pid_data = (Eterm) (lpd & ERTS_PID_DATA_MASK__); - if (ERTS_INVALID_PID == make_internal_pid(pid_data)) { - lpd += erts_proc.max; - ASSERT(erts_pid_data2ix(pid_data) - == erts_pid_data2ix(lpd & ERTS_PID_DATA_MASK__)); - } - res = lpd & ERTS_PID_DATA_MASK__; - break; - } - } - - erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); - - return res; - -} - -Uint erts_process_count(void) -{ - erts_aint32_t res = erts_smp_atomic32_read_nob(&process_count); - if (res > erts_proc.max) - return erts_proc.max; - ASSERT(res >= 0); - return (Uint) res; -} - void erts_free_proc(Process *p) { @@ -7523,132 +7221,66 @@ erts_free_proc(Process *p) erts_free(ERTS_ALC_T_PROC, (void *) p); } +typedef struct { + Process *proc; + erts_aint32_t state; + ErtsRunQueue *run_queue; +} ErtsEarlyProcInit; + +static void early_init_process_struct(void *varg, Eterm data) +{ + ErtsEarlyProcInit *arg = (ErtsEarlyProcInit *) varg; + Process *proc = arg->proc; + + proc->common.id = make_internal_pid(data); + erts_smp_atomic32_init_relb(&proc->state, arg->state); + +#ifdef ERTS_SMP + RUNQ_SET_RQ(&proc->run_queue, arg->run_queue); + + erts_proc_lock_init(proc); /* All locks locked */ +#endif + +} + /* ** Allocate process and find out where to place next process. */ static Process* alloc_process(ErtsRunQueue *rq, erts_aint32_t state) { - int pix; - Process* p; - Uint64 lpd, exp_lpd; - Eterm pid_data; - erts_aint32_t proc_count; -#ifdef DEBUG - Eterm pid; -#endif - - erts_smp_rwmtx_rlock(&erts_proc_tab_rwmtx); - - proc_count = erts_smp_atomic32_inc_read_acqb(&process_count); - if (proc_count > erts_proc.max) { - while (1) { - erts_aint32_t act_proc_count; - - act_proc_count = erts_smp_atomic32_cmpxchg_relb(&process_count, - proc_count-1, - proc_count); - if (act_proc_count == proc_count) - goto system_limit; - proc_count = act_proc_count; - if (proc_count <= erts_proc.max) - break; - } - } + ErtsEarlyProcInit init_arg; + Process *p; - p = (Process*) erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process)); + p = erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process)); if (!p) - goto enomem; - - p->approx_started = erts_get_approx_time(); - p->started_interval = get_proc_interval(); - - lpd = last_pid_data_read_acqb(); - - /* Reserve slot */ - while (1) { - lpd++; - pix = erts_pid_data2ix((Eterm) (lpd & ERTS_PID_DATA_MASK__)); - if (erts_smp_atomic_read_nob(&erts_proc.tab[pix]) == ERTS_AINT_NULL) { - erts_aint_t val; - val = erts_smp_atomic_cmpxchg_relb(&erts_proc.tab[pix], - ((erts_aint_t) - ERTS_PROC_LOCK_BUSY), - ERTS_AINT_NULL); - - if (ERTS_AINT_NULL == val) - break; - } - } - - pid_data = (Eterm) lpd & ERTS_PID_DATA_MASK__; + return NULL; - p->id = make_internal_pid(pid_data); - if (p->id == ERTS_INVALID_PID) { - /* Do not use the invalid pid; change serial */ - lpd += erts_proc.max; - ASSERT(pix == erts_pid_data2ix((Eterm) (lpd & ERTS_PID_DATA_MASK__))); - pid_data = (Eterm) lpd & ERTS_PID_DATA_MASK__; - p->id = make_internal_pid(pid_data); - ASSERT(p->id != ERTS_INVALID_PID); - } + init_arg.proc = (Process *) p; + init_arg.run_queue = rq; + init_arg.state = state; - exp_lpd = last_pid_data_read_nob(); + ASSERT(((char *) p) == ((char *) &p->common)); - /* Move last pid data forward */ - while (1) { - Uint64 act_lpd; - if (last_pid_data_cmp(lpd, exp_lpd) < 0) - break; - act_lpd = last_pid_data_cmpxchg_relb(lpd, exp_lpd); - if (act_lpd == exp_lpd) - break; - exp_lpd = act_lpd; + if (!erts_ptab_new_element(&erts_proc, + &p->common, + (void *) &init_arg, + early_init_process_struct)) { + erts_free(ERTS_ALC_T_PROC, p); + return NULL; } -#ifdef ERTS_SMP - RUNQ_SET_RQ(&p->run_queue, rq); -#endif - - erts_smp_atomic32_init_relb(&p->state, state); - -#ifdef DEBUG - pid = p->id; -#endif - -#ifdef ERTS_SMP - erts_proc_lock_init(p); /* All locks locked */ -#endif - - /* Move into slot reserved */ -#ifdef DEBUG - ASSERT(ERTS_PROC_LOCK_BUSY - == (Process *) erts_smp_atomic_xchg_relb(&erts_proc.tab[pix], - (erts_aint_t) p)); -#else - erts_smp_atomic_set_relb(&erts_proc.tab[pix], (erts_aint_t) p); -#endif - - ASSERT(internal_pid_serial(p->id) <= (erts_use_r9_pids_ports - ? ERTS_MAX_PID_R9_SERIAL - : ERTS_MAX_PID_SERIAL)); - - erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx); - + ASSERT(internal_pid_serial(p->common.id) <= ERTS_MAX_PID_SERIAL); + + p->approx_started = erts_get_approx_time(); p->rcount = 0; - ASSERT(p == (Process *) - erts_smp_atomic_read_nob( - &erts_proc.tab[internal_pid_index(pid)])); - - return p; -enomem: -system_limit: - - erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx); - return NULL; + ASSERT(p == (Process *) (erts_ptab_pix2intptr_nob( + &erts_proc, + internal_pid_index(p->common.id)))); + return p; } Eterm @@ -7795,21 +7427,21 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->reds = 0; #ifdef ERTS_SMP - p->u.alive.ptimer = NULL; + p->common.u.alive.ptimer = NULL; #else - sys_memset(&p->u.alive.tm, 0, sizeof(ErlTimer)); + sys_memset(&p->common.u.alive.tm, 0, sizeof(ErlTimer)); #endif - p->reg = NULL; - p->nlinks = NULL; - p->monitors = NULL; + p->common.u.alive.reg = NULL; + ERTS_P_LINKS(p) = NULL; + ERTS_P_MONITORS(p) = NULL; p->nodes_monitors = NULL; p->suspend_monitors = NULL; ASSERT(is_pid(parent->group_leader)); if (parent->group_leader == ERTS_INVALID_PID) - p->group_leader = p->id; + p->group_leader = p->common.id; else { /* Needs to be done after the heap has been set up */ p->group_leader = @@ -7818,18 +7450,18 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). : STORE_NC(&p->htop, &p->off_heap, parent->group_leader); } - erts_get_default_tracing(&p->trace_flags, &p->tracer_proc); + erts_get_default_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER_PROC(p)); p->msg.first = NULL; p->msg.last = &p->msg.first; p->msg.save = &p->msg.first; p->msg.len = 0; #ifdef ERTS_SMP - p->u.alive.msg_inq.first = NULL; - p->u.alive.msg_inq.last = &p->u.alive.msg_inq.first; - p->u.alive.msg_inq.len = 0; + p->msg_inq.first = NULL; + p->msg_inq.last = &p->msg_inq.first; + p->msg_inq.len = 0; #endif - p->bif_timers = NULL; + p->u.bif_timers = NULL; p->mbuf = NULL; p->mbuf_sz = 0; p->psd = NULL; @@ -7841,7 +7473,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). DT_UTAG(p) = NIL; DT_UTAG_FLAGS(p) = 0; #endif - p->parent = parent->id == ERTS_INVALID_PID ? NIL : parent->id; + p->parent = (parent->common.id == ERTS_INVALID_PID + ? NIL + : parent->common.id); INIT_HOLE_CHECK(p); #ifdef DEBUG @@ -7849,18 +7483,19 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #endif if (IS_TRACED(parent)) { - if (parent->trace_flags & F_TRACE_SOS) { - p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS); - p->tracer_proc = parent->tracer_proc; + if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) { + ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS); + ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); } if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) { - trace_proc_spawn(parent, p->id, mod, func, args); + trace_proc_spawn(parent, p->common.id, mod, func, args); } - if (parent->trace_flags & F_TRACE_SOS1) { /* Overrides TRACE_CHILDREN */ - p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS); - p->tracer_proc = parent->tracer_proc; - p->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS); - parent->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS); + if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) { + /* Overrides TRACE_CHILDREN */ + ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS); + ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); + ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOS1 | F_TRACE_SOS); + ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOS1 | F_TRACE_SOS); } } @@ -7873,27 +7508,27 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). int ret; #endif if (IS_TRACED_FL(parent, F_TRACE_PROCS)) { - trace_proc(parent, parent, am_link, p->id); + trace_proc(parent, parent, am_link, p->common.id); } #ifdef DEBUG - ret = erts_add_link(&(parent->nlinks), LINK_PID, p->id); + ret = erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id); ASSERT(ret == 0); - ret = erts_add_link(&(p->nlinks), LINK_PID, parent->id); + ret = erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id); ASSERT(ret == 0); #else - erts_add_link(&(parent->nlinks), LINK_PID, p->id); - erts_add_link(&(p->nlinks), LINK_PID, parent->id); + erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id); + erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id); #endif if (IS_TRACED(parent)) { - if (parent->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) { - p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS); - p->tracer_proc = parent->tracer_proc; /* maybe steal */ + if (ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) { + ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS); + ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); /*maybe steal*/ - if (parent->trace_flags & F_TRACE_SOL1) { /* maybe override */ - p ->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL); - parent->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL); + if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/ + ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL); + ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL); } } } @@ -7906,8 +7541,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). Eterm mref; mref = erts_make_ref(parent); - erts_add_monitor(&(parent->monitors), MON_ORIGIN, mref, p->id, NIL); - erts_add_monitor(&(p->monitors), MON_TARGET, mref, parent->id, NIL); + erts_add_monitor(&ERTS_P_MONITORS(parent), MON_ORIGIN, mref, p->common.id, NIL); + erts_add_monitor(&ERTS_P_MONITORS(p), MON_TARGET, mref, parent->common.id, NIL); so->mref = mref; } @@ -7915,8 +7550,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->scheduler_data = NULL; p->suspendee = NIL; p->pending_suspenders = NULL; - p->u.alive.pending_exit.reason = THE_NON_VALUE; - p->u.alive.pending_exit.bp = NULL; + p->pending_exit.reason = THE_NON_VALUE; + p->pending_exit.bp = NULL; #endif #if !defined(NO_FPE_SIGNALS) || defined(HIPE) @@ -7925,7 +7560,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); - res = p->id; + res = p->common.id; /* * Schedule process for execution. @@ -7933,7 +7568,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). schedule_process(p, state, 0); - VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->id)); + VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id)); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(process_spawn)) { @@ -7968,10 +7603,10 @@ void erts_init_empty_process(Process *p) p->min_heap_size = 0; p->min_vheap_size = 0; p->rcount = 0; - p->id = ERTS_INVALID_PID; + p->common.id = ERTS_INVALID_PID; p->reds = 0; - p->tracer_proc = NIL; - p->trace_flags = F_INITIAL_TRACE_FLAGS; + ERTS_TRACER_PROC(p) = NIL; + ERTS_TRACE_FLAGS(p) = F_INITIAL_TRACE_FLAGS; p->group_leader = ERTS_INVALID_PID; p->flags = 0; p->fvalue = NIL; @@ -7984,14 +7619,14 @@ void erts_init_empty_process(Process *p) p->bin_old_vheap = 0; p->bin_vheap_mature = 0; #ifdef ERTS_SMP - p->u.alive.ptimer = NULL; + p->common.u.alive.ptimer = NULL; #else - memset(&(p->u.alive.tm), 0, sizeof(ErlTimer)); + memset(&(p->common.u.alive.tm), 0, sizeof(ErlTimer)); #endif p->next = NULL; p->off_heap.first = NULL; p->off_heap.overhead = 0; - p->reg = NULL; + p->common.u.alive.reg = NULL; p->heap_sz = 0; p->high_water = NULL; p->old_hend = NULL; @@ -8000,15 +7635,15 @@ void erts_init_empty_process(Process *p) p->mbuf = NULL; p->mbuf_sz = 0; p->psd = NULL; - p->monitors = NULL; - p->nlinks = NULL; /* List of links */ + ERTS_P_MONITORS(p) = NULL; + ERTS_P_LINKS(p) = NULL; /* List of links */ p->nodes_monitors = NULL; p->suspend_monitors = NULL; p->msg.first = NULL; p->msg.last = &p->msg.first; p->msg.save = &p->msg.first; p->msg.len = 0; - p->bif_timers = NULL; + p->u.bif_timers = NULL; p->dictionary = NULL; p->seq_trace_clock = 0; p->seq_trace_lastcnt = 0; @@ -8036,7 +7671,7 @@ void erts_init_empty_process(Process *p) p->parent = NIL; p->approx_started = 0; - p->started_interval = 0; + p->common.u.alive.started_interval = 0; #ifdef HIPE hipe_init_process(&p->hipe); @@ -8054,13 +7689,13 @@ void erts_init_empty_process(Process *p) #ifdef ERTS_SMP p->scheduler_data = NULL; - p->u.alive.msg_inq.first = NULL; - p->u.alive.msg_inq.last = &p->u.alive.msg_inq.first; - p->u.alive.msg_inq.len = 0; + p->msg_inq.first = NULL; + p->msg_inq.last = &p->msg_inq.first; + p->msg_inq.len = 0; p->suspendee = NIL; p->pending_suspenders = NULL; - p->u.alive.pending_exit.reason = THE_NON_VALUE; - p->u.alive.pending_exit.bp = NULL; + p->pending_exit.reason = THE_NON_VALUE; + p->pending_exit.bp = NULL; erts_proc_lock_init(p); erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0)); @@ -8082,25 +7717,25 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->stop == NULL); ASSERT(p->hend == NULL); ASSERT(p->heap == NULL); - ASSERT(p->id == ERTS_INVALID_PID); - ASSERT(p->tracer_proc == NIL); - ASSERT(p->trace_flags == F_INITIAL_TRACE_FLAGS); + ASSERT(p->common.id == ERTS_INVALID_PID); + ASSERT(ERTS_TRACER_PROC(p) == NIL); + ASSERT(ERTS_TRACE_FLAGS(p) == F_INITIAL_TRACE_FLAGS); ASSERT(p->group_leader == ERTS_INVALID_PID); ASSERT(p->next == NULL); - ASSERT(p->reg == NULL); + ASSERT(p->common.u.alive.reg == NULL); ASSERT(p->heap_sz == 0); ASSERT(p->high_water == NULL); ASSERT(p->old_hend == NULL); ASSERT(p->old_htop == NULL); ASSERT(p->old_heap == NULL); - ASSERT(p->monitors == NULL); - ASSERT(p->nlinks == NULL); + ASSERT(ERTS_P_MONITORS(p) == NULL); + ASSERT(ERTS_P_LINKS(p) == NULL); ASSERT(p->nodes_monitors == NULL); ASSERT(p->suspend_monitors == NULL); ASSERT(p->msg.first == NULL); ASSERT(p->msg.len == 0); - ASSERT(p->bif_timers == NULL); + ASSERT(p->u.bif_timers == NULL); ASSERT(p->dictionary == NULL); ASSERT(p->catches == 0); ASSERT(p->cp == NULL); @@ -8110,12 +7745,12 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->parent == NIL); #ifdef ERTS_SMP - ASSERT(p->u.alive.msg_inq.first == NULL); - ASSERT(p->u.alive.msg_inq.len == 0); + ASSERT(p->msg_inq.first == NULL); + ASSERT(p->msg_inq.len == 0); ASSERT(p->suspendee == NIL); ASSERT(p->pending_suspenders == NULL); - ASSERT(p->u.alive.pending_exit.reason == THE_NON_VALUE); - ASSERT(p->u.alive.pending_exit.bp == NULL); + ASSERT(p->pending_exit.reason == THE_NON_VALUE); + ASSERT(p->pending_exit.bp == NULL); #endif /* Thing that erts_cleanup_empty_process() cleans up */ @@ -8157,7 +7792,7 @@ delete_process(Process* p) { ErlMessage* mp; - VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->id)); + VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id)); /* Cleanup psd */ @@ -8231,8 +7866,6 @@ delete_process(Process* p) mp = next_mp; } - ASSERT(!p->monitors); - ASSERT(!p->nlinks); ASSERT(!p->nodes_monitors); ASSERT(!p->suspend_monitors); @@ -8296,7 +7929,7 @@ void erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) { ErtsProcLocks xlocks; - ASSERT(is_value(c_p->u.alive.pending_exit.reason)); + ASSERT(is_value(c_p->pending_exit.reason)); ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks); ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN); ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE) @@ -8315,10 +7948,10 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) set_proc_exiting(c_p, erts_smp_atomic32_read_acqb(&c_p->state), - c_p->u.alive.pending_exit.reason, - c_p->u.alive.pending_exit.bp); - c_p->u.alive.pending_exit.reason = THE_NON_VALUE; - c_p->u.alive.pending_exit.bp = NULL; + c_p->pending_exit.reason, + c_p->pending_exit.bp); + c_p->pending_exit.reason = THE_NON_VALUE; + c_p->pending_exit.bp = NULL; if (xlocks) erts_smp_proc_unlock(c_p, xlocks); @@ -8327,12 +7960,14 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) static void handle_pending_exiters(ErtsProcList *pnd_xtrs) { + /* 'list' is expected to have been fetched (i.e. not a ring anymore) */ ErtsProcList *plp = pnd_xtrs; - ErtsProcList *free_plp; + while (plp) { + ErtsProcList *free_plp; Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL); if (p) { - if (proclist_same(plp, p)) { + if (erts_proclist_same(plp, p)) { erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); if (!(state & ERTS_PSFLG_RUNNING)) { ASSERT(state & ERTS_PSFLG_PENDING_EXIT); @@ -8361,8 +7996,7 @@ save_pending_exiter(Process *p) erts_smp_runq_lock(rq); - plp->next = rq->procs.pending_exiters; - rq->procs.pending_exiters = plp; + erts_proclist_store_last(&rq->procs.pending_exiters, plp); erts_smp_runq_unlock(rq); wake_scheduler(rq, 1); @@ -8409,7 +8043,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp, hp = bp->mem; mess = copy_struct(exit_term, term_size, &hp, &bp->off_heap); /* the trace token must in this case be updated by the caller */ - seq_trace_output(token, mess, SEQ_TRACE_SEND, to->id, NULL); + seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL); temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap); erts_queue_message(to, to_locksp, bp, mess, temp_token #ifdef USE_VM_PROBES @@ -8539,7 +8173,7 @@ send_exit_signal(Process *c_p, /* current process if and only else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) { #ifdef ERTS_SMP if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) { - ASSERT(!rp->u.alive.pending_exit.bp); + ASSERT(!rp->pending_exit.bp); if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) { /* Ensure that all locks on c_p are locked before @@ -8589,7 +8223,7 @@ send_exit_signal(Process *c_p, /* current process if and only set_pending_exit: if (is_immed(rsn)) { - rp->u.alive.pending_exit.reason = rsn; + rp->pending_exit.reason = rsn; } else { Eterm *hp; @@ -8597,11 +8231,11 @@ send_exit_signal(Process *c_p, /* current process if and only ErlHeapFragment *bp = new_message_buffer(sz); hp = &bp->mem[0]; - rp->u.alive.pending_exit.reason = copy_struct(rsn, - sz, - &hp, - &bp->off_heap); - rp->u.alive.pending_exit.bp = bp; + rp->pending_exit.reason = copy_struct(rsn, + sz, + &hp, + &bp->off_heap); + rp->pending_exit.bp = bp; } erts_smp_atomic32_read_bor_relb(&rp->state, ERTS_PSFLG_PENDING_EXIT); @@ -8700,7 +8334,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) if (!rp) { goto done; } - rmon = erts_remove_monitor(&(rp->monitors),mon->ref); + rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon == NULL) { goto done; @@ -8735,7 +8369,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) ASSERT(mon->type == MON_TARGET); ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid)); if (is_internal_port(mon->pid)) { - Port *prt = erts_id2port(mon->pid, NULL, 0); + Port *prt = erts_id2port(mon->pid); if (prt == NULL) { goto done; } @@ -8751,13 +8385,13 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) goto done; } UseTmpHeapNoproc(3); - rmon = erts_remove_monitor(&(rp->monitors),mon->ref); + rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); if (rmon) { erts_destroy_monitor(rmon); watched = (is_atom(mon->name) ? TUPLE2(lhp, mon->name, erts_this_dist_entry->sysname) - : pcontext->p->id); + : pcontext->p->common.id); erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process, watched, pcontext->reason); } @@ -8822,21 +8456,22 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) switch(lnk->type) { case LINK_PID: if(is_internal_port(item)) { - Port *prt = erts_id2port(item, NULL, 0); - if (prt) { - rlnk = erts_remove_link(&prt->nlinks, p->id); - if (rlnk) - erts_destroy_link(rlnk); - erts_do_exit_port(prt, p->id, reason); - erts_port_release(prt); - } + Port *prt = erts_port_lookup(item, ERTS_PORT_SFLGS_INVALID_LOOKUP); + if (prt) + erts_port_exit(NULL, + (ERTS_PORT_SIG_FLG_FORCE_SCHED + | ERTS_PORT_SIG_FLG_BROKEN_LINK), + prt, + p->common.id, + reason, + NULL); } else if(is_external_port(item)) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Erroneous link between %T and external port %T " "found\n", - p->id, + p->common.id, item); erts_send_error_to_logger_nogl(dsbufp); ASSERT(0); /* It isn't possible to setup such a link... */ @@ -8846,14 +8481,14 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) | ERTS_PROC_LOCKS_XSIG_SEND); rp = erts_pid2proc(NULL, 0, item, rp_locks); if (rp) { - rlnk = erts_remove_link(&(rp->nlinks), p->id); + rlnk = erts_remove_link(&ERTS_P_LINKS(rp), p->common.id); /* If rlnk == NULL, we got unlinked while exiting, i.e., do nothing... */ if (rlnk) { int xres; erts_destroy_link(rlnk); xres = send_exit_signal(NULL, - p->id, + p->common.id, rp, &rp_locks, reason, @@ -8865,7 +8500,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) { /* We didn't exit the process and it is traced */ if (IS_TRACED_FL(rp, F_TRACE_PROCS)) { - trace_proc(p, rp, am_getting_unlinked, p->id); + trace_proc(p, rp, am_getting_unlinked, p->common.id); } } } @@ -8879,12 +8514,12 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) ErtsDSigData dsd; int code; ErtsDistLinkData dld; - erts_remove_dist_link(&dld, p->id, item, dep); + erts_remove_dist_link(&dld, p->common.id, item, dep); erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0); if (code == ERTS_DSIG_PREP_CONNECTED) { - code = erts_dsig_send_exit_tt(&dsd, p->id, item, reason, - SEQ_TRACE_TOKEN(p)); + code = erts_dsig_send_exit_tt(&dsd, p->common.id, item, + reason, SEQ_TRACE_TOKEN(p)); ASSERT(code == ERTS_DSIG_SEND_OK); } erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); @@ -8899,7 +8534,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) /* dist entries have node links in a separate structure to avoid confusion */ erts_smp_de_links_lock(dep); - rlnk = erts_remove_link(&(dep->node_links), p->id); + rlnk = erts_remove_link(&(dep->node_links), p->common.id); erts_smp_de_links_unlock(dep); if (rlnk) erts_destroy_link(rlnk); @@ -8927,15 +8562,6 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) erts_destroy_suspend_monitor(smon); } -#ifdef ERTS_SMP -static void -proc_dec_refc(void *vproc) -{ - erts_smp_proc_dec_refc((Process *) vproc); -} -#endif - - /* this function fishishes a process and propagates exit messages - called by process_main when a process dies */ void @@ -8973,10 +8599,10 @@ erts_do_exit_process(Process* p, Eterm reason) state = set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state)); if (state & ERTS_PSFLG_PENDING_EXIT) { /* Process exited before pending exit was received... */ - p->u.alive.pending_exit.reason = THE_NON_VALUE; - if (p->u.alive.pending_exit.bp) { - free_message_buffer(p->u.alive.pending_exit.bp); - p->u.alive.pending_exit.bp = NULL; + p->pending_exit.reason = THE_NON_VALUE; + if (p->pending_exit.bp) { + free_message_buffer(p->pending_exit.bp); + p->pending_exit.bp = NULL; } } @@ -8993,23 +8619,25 @@ erts_do_exit_process(Process* p, Eterm reason) trace_proc(p, p, am_exit, reason); } - erts_trace_check_exiting(p->id); + erts_trace_check_exiting(p->common.id); - ASSERT((p->trace_flags & F_INITIAL_TRACE_FLAGS) == F_INITIAL_TRACE_FLAGS); + ASSERT((ERTS_TRACE_FLAGS(p) & F_INITIAL_TRACE_FLAGS) + == F_INITIAL_TRACE_FLAGS); cancel_timer(p); /* Always cancel timer just in case */ - /* - * The timer of this process can *not* be used anymore. The field used - * for the timer is now used for misc exiting data. - */ - p->u.exit_data = NULL; - - if (p->bif_timers) + if (p->u.bif_timers) erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL); erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + /* + * The p->u.bif_timers of this process can *not* be used anymore; + * will be overwritten by misc termination data. + */ + p->u.terminate = NULL; + + erts_continue_exit_process(p); } @@ -9081,9 +8709,9 @@ erts_continue_exit_process(Process *p) * The registered name *should* be the last "erlang resource" to * cleanup. */ - if (p->reg) { + if (p->common.u.alive.reg) { (void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE); - ASSERT(!p->reg); + ASSERT(!p->common.u.alive.reg); } erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); @@ -9097,22 +8725,18 @@ erts_continue_exit_process(Process *p) yield_allowed = 0; #endif + /* + * Note! The monitor and link fields will be overwritten + * by erts_ptab_delete_element() below. + */ + mon = ERTS_P_MONITORS(p); + lnk = ERTS_P_LINKS(p); + { - int maybe_save; - int pix; /* Do *not* use erts_get_runq_proc() */ ErtsRunQueue *rq; rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p)); - pix = internal_pid_index(p->id); - - erts_smp_rwmtx_rlock(&erts_proc_tab_rwmtx); - maybe_save = saved_term_procs.end != NULL; - if (maybe_save) { - erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx); - erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); - } - erts_smp_runq_lock(rq); #ifdef ERTS_SMP @@ -9123,22 +8747,11 @@ erts_continue_exit_process(Process *p) p->scheduler_data->current_process = NULL; p->scheduler_data->free_process = p; #endif - /* Time of death! */ - erts_smp_atomic_set_relb(&erts_proc.tab[pix], ERTS_AINT_NULL); - ASSERT(erts_smp_atomic32_read_nob(&process_count) > 0); - erts_smp_atomic32_dec_relb(&process_count); + /* Time of death! */ + erts_ptab_delete_element(&erts_proc, &p->common); erts_smp_runq_unlock(rq); - - if (!maybe_save) - erts_smp_rwmtx_runlock(&erts_proc_tab_rwmtx); - else { - if (saved_term_procs.end) - save_terminating_process(p); - erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); - } - } /* @@ -9148,12 +8761,6 @@ erts_continue_exit_process(Process *p) * when the monitors and/or links hit. */ - mon = p->monitors; - p->monitors = NULL; /* to avoid recursive deletion during traversal */ - - lnk = p->nlinks; - p->nlinks = NULL; - { /* Inactivate and notify free */ erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state); @@ -9197,7 +8804,7 @@ erts_continue_exit_process(Process *p) UseTmpHeap(4,p); hp = &tmp_heap[0]; - exit_tuple = TUPLE3(hp, am_EXIT, p->id, reason); + exit_tuple = TUPLE3(hp, am_EXIT, p->common.id, reason); exit_tuple_sz = size_object(exit_tuple); @@ -9223,9 +8830,6 @@ erts_continue_exit_process(Process *p) delete_process(p); #ifdef ERTS_SMP - erts_schedule_thr_prgr_later_op(proc_dec_refc, - (void *) p, - &p->u.release_data); erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); #endif @@ -9277,9 +8881,9 @@ cancel_timer(Process* p) ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); p->flags &= ~(F_INSLPQUEUE|F_TIMO); #ifdef ERTS_SMP - erts_cancel_smp_ptimer(p->u.alive.ptimer); + erts_cancel_smp_ptimer(p->common.u.alive.ptimer); #else - erts_cancel_timer(&p->u.alive.tm); + erts_cancel_timer(&p->common.u.alive.tm); #endif } @@ -9300,12 +8904,12 @@ set_timer(Process* p, Uint timeout) p->flags &= ~F_TIMO; #ifdef ERTS_SMP - erts_create_smp_ptimer(&p->u.alive.ptimer, - p->id, + erts_create_smp_ptimer(&p->common.u.alive.ptimer, + p->common.id, (ErlTimeoutProc) timeout_proc, timeout); #else - erts_set_timer(&p->u.alive.tm, + erts_set_timer(&p->common.u.alive.tm, (ErlTimeoutProc) timeout_proc, NULL, (void*) p, @@ -9323,7 +8927,7 @@ erts_stack_dump(int to, void *to_arg, Process *p) Eterm* sp; int yreg = -1; - if (p->trace_flags & F_SENSITIVE) { + if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) { return; } erts_program_counter_info(to, to_arg, p); @@ -9411,1068 +9015,6 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg) return yreg; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ - * The processes/0 BIF implementation. * -\* */ - - -#define ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED 25 -#define ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE 1000 -#define ERTS_PROCESSES_BIF_MIN_START_REDS \ - (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \ - / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED) - -#define ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS 1 - -#define ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED 10 - -#define ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS \ - (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \ - / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED) - - -#define ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED 75 - -#define ERTS_PROCS_DBG_DO_TRACE 0 - -#ifdef DEBUG -# define ERTS_PROCESSES_BIF_DEBUGLEVEL 100 -#else -# define ERTS_PROCESSES_BIF_DEBUGLEVEL 0 -#endif - -#define ERTS_PROCS_DBGLVL_CHK_HALLOC 1 -#define ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS 5 -#define ERTS_PROCS_DBGLVL_CHK_PIDS 10 -#define ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST 20 -#define ERTS_PROCS_DBGLVL_CHK_RESLIST 20 - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0 -# define ERTS_PROCS_ASSERT(EXP) -#else -# define ERTS_PROCS_ASSERT(EXP) \ - ((void) ((EXP) \ - ? 1 \ - : (debug_processes_assert_error(#EXP, __FILE__, __LINE__), 0))) -#endif - - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC -# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ) \ -do { \ - ERTS_PROCS_ASSERT(!(PBDP)->debug.heap); \ - ERTS_PROCS_ASSERT(!(PBDP)->debug.heap_size); \ - (PBDP)->debug.heap = (HP); \ - (PBDP)->debug.heap_size = (SZ); \ -} while (0) -# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP) \ -do { \ - ERTS_PROCS_ASSERT((PBDP)->debug.heap); \ - ERTS_PROCS_ASSERT((PBDP)->debug.heap_size); \ - ERTS_PROCS_ASSERT((PBDP)->debug.heap + (PBDP)->debug.heap_size == (HP));\ - (PBDP)->debug.heap = NULL; \ - (PBDP)->debug.heap_size = 0; \ -} while (0) -# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP) \ -do { \ - (PBDP)->debug.heap = NULL; \ - (PBDP)->debug.heap_size = 0; \ -} while (0) -#else -# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ) -# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP) -# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP) -#endif - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST -# define ERTS_PROCS_DBG_CHK_RESLIST(R) debug_processes_check_res_list((R)) -#else -# define ERTS_PROCS_DBG_CHK_RESLIST(R) -#endif - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS -# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP) debug_processes_save_all_pids((PBDP)) -# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP) \ -do { \ - if (!(PBDP)->debug.correct_pids_verified) \ - debug_processes_verify_all_pids((PBDP)); \ -} while (0) -# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP) \ -do { \ - if ((PBDP)->debug.correct_pids) { \ - erts_free(ERTS_ALC_T_PROCS_PIDS, \ - (PBDP)->debug.correct_pids); \ - (PBDP)->debug.correct_pids = NULL; \ - } \ -} while(0) -# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP) \ -do { \ - (PBDP)->debug.correct_pids_verified = 0; \ - (PBDP)->debug.correct_pids = NULL; \ -} while (0) -#else -# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP) -# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP) -# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP) -# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP) -#endif - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS -# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, IC) \ - debug_processes_check_found_pid((PBDP), (PID), (IC), 1) -# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, IC) \ - debug_processes_check_found_pid((PBDP), (PID), (IC), 0) -#else -# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, IC) -# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, IC) -#endif - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST -# define ERTS_PROCS_DBG_CHK_TPLIST() \ - debug_processes_check_term_proc_list() -# define ERTS_PROCS_DBG_CHK_FREELIST(FL) \ - debug_processes_check_term_proc_free_list(FL) -#else -# define ERTS_PROCS_DBG_CHK_TPLIST() -# define ERTS_PROCS_DBG_CHK_FREELIST(FL) -#endif - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0 -#if ERTS_PROCS_DBG_DO_TRACE -# define ERTS_PROCS_DBG_INIT(P, PBDP) (PBDP)->debug.caller = (P)->id -# else -# define ERTS_PROCS_DBG_INIT(P, PBDP) -# endif -# define ERTS_PROCS_DBG_CLEANUP(PBDP) -#else -# define ERTS_PROCS_DBG_INIT(P, PBDP) \ -do { \ - (PBDP)->debug.caller = (P)->id; \ - ERTS_PROCS_DBG_HEAP_ALLOC_INIT((PBDP)); \ - ERTS_PROCS_DBG_CHK_PIDS_INIT((PBDP)); \ -} while (0) -# define ERTS_PROCS_DBG_CLEANUP(PBDP) \ -do { \ - ERTS_PROCS_DBG_CLEANUP_CHK_PIDS((PBDP)); \ -} while (0) -#endif - -#if ERTS_PROCS_DBG_DO_TRACE -# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT) \ - erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \ - (PID), __FILE__, __LINE__, #FUNC, #WHAT) -#else -# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT) -#endif - -static Uint processes_bif_tab_chunks; -static Export processes_trap_export; - -typedef struct { - Uint64 interval; -} ErtsProcessesBifChunkInfo; - -typedef enum { - INITIALIZING, - INSPECTING_TABLE, - INSPECTING_TERMINATED_PROCESSES, - BUILDING_RESULT, - RETURN_RESULT -} ErtsProcessesBifState; - -typedef struct { - ErtsProcessesBifState state; - Eterm caller; - ErtsProcessesBifChunkInfo *chunk; - int tix; - int pid_ix; - int pid_sz; - Eterm *pid; - ErtsTermProcElement *bif_invocation; /* Only used when > 1 chunk */ - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0 || ERTS_PROCS_DBG_DO_TRACE - struct { - Eterm caller; -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - Uint64 *pid_started; -#endif -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC - Eterm *heap; - Uint heap_size; -#endif -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS - int correct_pids_verified; - Eterm *correct_pids; -#endif - } debug; -#endif - -} ErtsProcessesBifData; - - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0 -static void debug_processes_assert_error(char* expr, char* file, int line); -#endif -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST -static void debug_processes_check_res_list(Eterm list); -#endif -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS -static void debug_processes_save_all_pids(ErtsProcessesBifData *pbdp); -static void debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp); -#endif -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS -static void debug_processes_check_found_pid(ErtsProcessesBifData *pbdp, - Eterm pid, - Uint64 ic, - int pid_should_be_found); -#endif -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST -static void debug_processes_check_term_proc_list(void); -static void debug_processes_check_term_proc_free_list(ErtsTermProcElement *tpep); -#endif - -static void -save_terminating_process(Process *p) -{ - ErtsTermProcElement *tpep = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL, - sizeof(ErtsTermProcElement)); - ERTS_PROCS_ASSERT(saved_term_procs.start && saved_term_procs.end); - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx)); - - ERTS_PROCS_DBG_CHK_TPLIST(); - - tpep->prev = saved_term_procs.end; - tpep->next = NULL; - tpep->ix = internal_pid_index(p->id); - tpep->u.process.pid = p->id; - tpep->u.process.spawned = p->started_interval; - tpep->u.process.exited = get_proc_interval(); - - saved_term_procs.end->next = tpep; - saved_term_procs.end = tpep; - - ERTS_PROCS_DBG_CHK_TPLIST(); - - ERTS_PROCS_ASSERT(tpep->prev->ix >= 0 - ? (tpep->u.process.exited - >= tpep->prev->u.process.exited) - : (tpep->u.process.exited - >= tpep->prev->u.bif_invocation.interval)); -} - -static void -cleanup_processes_bif_data(Binary *bp) -{ - ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(bp); - - ERTS_PROCS_DBG_TRACE(pbdp->debug.caller, cleanup_processes_bif_data, call); - - if (pbdp->state != INITIALIZING) { - - if (pbdp->chunk) { - erts_free(ERTS_ALC_T_PROCS_CNKINF, pbdp->chunk); - pbdp->chunk = NULL; - } - if (pbdp->pid) { - erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->pid); - pbdp->pid = NULL; - } - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - if (pbdp->debug.pid_started) { - erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.pid_started); - pbdp->debug.pid_started = NULL; - } -#endif - - if (pbdp->bif_invocation) { - ErtsTermProcElement *tpep; - - erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); - - ERTS_PROCS_DBG_TRACE(pbdp->debug.caller, - cleanup_processes_bif_data, - term_proc_cleanup); - - tpep = pbdp->bif_invocation; - pbdp->bif_invocation = NULL; - - ERTS_PROCS_DBG_CHK_TPLIST(); - - if (tpep->prev) { - /* - * Only remove this bif invokation when we - * have preceding invokations. - */ - tpep->prev->next = tpep->next; - if (tpep->next) - tpep->next->prev = tpep->prev; - else { - /* - * At the time of writing this branch cannot be - * reached. I don't want to remove this code though - * since it may be possible to reach this line - * in the future if the cleanup order in - * erts_do_exit_process() is changed. The ASSERT(0) - * is only here to make us aware that the reorder - * has happened. /rickard - */ - ASSERT(0); - saved_term_procs.end = tpep->prev; - } - erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep); - } - else { - /* - * Free all elements until next bif invokation - * is found. - */ - ERTS_PROCS_ASSERT(saved_term_procs.start == tpep); - do { - ErtsTermProcElement *ftpep = tpep; - tpep = tpep->next; - erts_free(ERTS_ALC_T_PROCS_TPROC_EL, ftpep); - } while (tpep && tpep->ix >= 0); - saved_term_procs.start = tpep; - if (tpep) - tpep->prev = NULL; - else - saved_term_procs.end = NULL; - } - - ERTS_PROCS_DBG_CHK_TPLIST(); - - erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); - - } - } - - ERTS_PROCS_DBG_TRACE(pbdp->debug.caller, - cleanup_processes_bif_data, - return); - ERTS_PROCS_DBG_CLEANUP(pbdp); -} - -static int -processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp) -{ - ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp); - int have_reds; - int reds; - int locked = 0; - - do { - switch (pbdp->state) { - case INITIALIZING: - pbdp->chunk = erts_alloc(ERTS_ALC_T_PROCS_CNKINF, - (sizeof(ErtsProcessesBifChunkInfo) - * processes_bif_tab_chunks)); - pbdp->tix = 0; - pbdp->pid_ix = 0; - - erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); - locked = 1; - - ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, init); - - pbdp->pid_sz = erts_process_count(); - pbdp->pid = erts_alloc(ERTS_ALC_T_PROCS_PIDS, - sizeof(Eterm)*pbdp->pid_sz); - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - pbdp->debug.pid_started = erts_alloc(ERTS_ALC_T_PROCS_PIDS, - sizeof(Uint64)*pbdp->pid_sz); -#endif - - ERTS_PROCS_DBG_SAVE_PIDS(pbdp); - - if (processes_bif_tab_chunks == 1) - pbdp->bif_invocation = NULL; - else { - /* - * We will have to access the table multiple times - * releasing the table lock in between chunks. - */ - pbdp->bif_invocation = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL, - sizeof(ErtsTermProcElement)); - pbdp->bif_invocation->ix = -1; - pbdp->bif_invocation->u.bif_invocation.interval - = step_proc_interval(); - ERTS_PROCS_DBG_CHK_TPLIST(); - - pbdp->bif_invocation->next = NULL; - if (saved_term_procs.end) { - pbdp->bif_invocation->prev = saved_term_procs.end; - saved_term_procs.end->next = pbdp->bif_invocation; - ERTS_PROCS_ASSERT(saved_term_procs.start); - } - else { - pbdp->bif_invocation->prev = NULL; - saved_term_procs.start = pbdp->bif_invocation; - } - saved_term_procs.end = pbdp->bif_invocation; - - ERTS_PROCS_DBG_CHK_TPLIST(); - - } - - pbdp->state = INSPECTING_TABLE; - /* Fall through */ - - case INSPECTING_TABLE: { - int ix = pbdp->tix; - int indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE; - int cix = ix / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE; - int end_ix = ix + indices; - Uint64 *invocation_interval_p; - - invocation_interval_p - = (pbdp->bif_invocation - ? &pbdp->bif_invocation->u.bif_invocation.interval - : NULL); - - ERTS_PROCS_ASSERT(is_nil(*res_accp)); - if (!locked) { - erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); - locked = 1; - } - - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx)); - ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_table); - - if (cix != 0) - pbdp->chunk[cix].interval = step_proc_interval(); - else if (pbdp->bif_invocation) - pbdp->chunk[0].interval = *invocation_interval_p; - /* else: interval is irrelevant */ - - if (end_ix >= erts_proc.max) { - ERTS_PROCS_ASSERT(cix+1 == processes_bif_tab_chunks); - end_ix = erts_proc.max; - indices = end_ix - ix; - /* What to do when done with this chunk */ - pbdp->state = (processes_bif_tab_chunks == 1 - ? BUILDING_RESULT - : INSPECTING_TERMINATED_PROCESSES); - } - - for (; ix < end_ix; ix++) { - Process *rp = erts_pix2proc(ix); - if (rp - && (!invocation_interval_p - || rp->started_interval < *invocation_interval_p)) { - ERTS_PROCS_ASSERT(is_internal_pid(rp->id)); - pbdp->pid[pbdp->pid_ix] = rp->id; - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - pbdp->debug.pid_started[pbdp->pid_ix] = rp->started_interval; -#endif - - pbdp->pid_ix++; - ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz); - } - } - - pbdp->tix = end_ix; - - erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); - locked = 0; - - reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED; - BUMP_REDS(p, reds); - - have_reds = ERTS_BIF_REDS_LEFT(p); - - if (have_reds && pbdp->state == INSPECTING_TABLE) { - ix = pbdp->tix; - indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE; - end_ix = ix + indices; - if (end_ix > erts_proc.max) { - end_ix = erts_proc.max; - indices = end_ix - ix; - } - - reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED; - - /* Pretend we have no reds left if we haven't got enough - reductions to complete next chunk */ - if (reds > have_reds) - have_reds = 0; - } - - break; - } - - case INSPECTING_TERMINATED_PROCESSES: { - int i; - int max_reds; - int free_term_procs = 0; - Uint64 invocation_interval; - ErtsTermProcElement *tpep; - ErtsTermProcElement *free_list = NULL; - - tpep = pbdp->bif_invocation; - ERTS_PROCS_ASSERT(tpep); - invocation_interval = tpep->u.bif_invocation.interval; - - max_reds = have_reds = ERTS_BIF_REDS_LEFT(p); - if (max_reds > ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS) - max_reds = ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS; - - reds = 0; - erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); - ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_term_procs); - - ERTS_PROCS_DBG_CHK_TPLIST(); - - if (tpep->prev) - tpep->prev->next = tpep->next; - else { - ERTS_PROCS_ASSERT(saved_term_procs.start == tpep); - saved_term_procs.start = tpep->next; - - if (saved_term_procs.start && saved_term_procs.start->ix >= 0) { - free_list = saved_term_procs.start; - free_term_procs = 1; - } - } - - if (tpep->next) - tpep->next->prev = tpep->prev; - else - saved_term_procs.end = tpep->prev; - - tpep = tpep->next; - - i = 0; - while (reds < max_reds && tpep) { - if (tpep->ix < 0) { - if (free_term_procs) { - ERTS_PROCS_ASSERT(free_list); - ERTS_PROCS_ASSERT(tpep->prev); - - tpep->prev->next = NULL; /* end of free_list */ - saved_term_procs.start = tpep; - tpep->prev = NULL; - free_term_procs = 0; - } - } - else { - int cix = tpep->ix/ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE; - Uint64 chunk_interval = pbdp->chunk[cix].interval; - Eterm pid = tpep->u.process.pid; - ERTS_PROCS_ASSERT(is_internal_pid(pid)); - - if (tpep->u.process.spawned < invocation_interval) { - if (tpep->u.process.exited < chunk_interval) { - ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp, - pid, - tpep->u.process.spawned); - pbdp->pid[pbdp->pid_ix] = pid; -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - pbdp->debug.pid_started[pbdp->pid_ix] - = tpep->u.process.spawned; -#endif - pbdp->pid_ix++; - ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz); - } - else { - ERTS_PROCS_DBG_CHK_PID_FOUND(pbdp, - pid, - tpep->u.process.spawned); - } - } - else { - ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp, - pid, - tpep->u.process.spawned); - } - - i++; - if (i == ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED) { - reds++; - i = 0; - } - if (free_term_procs) - reds += ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS; - } - tpep = tpep->next; - } - - if (free_term_procs) { - ERTS_PROCS_ASSERT(free_list); - saved_term_procs.start = tpep; - if (!tpep) - saved_term_procs.end = NULL; - else { - ERTS_PROCS_ASSERT(tpep->prev); - tpep->prev->next = NULL; /* end of free_list */ - tpep->prev = NULL; - } - } - - if (!tpep) { - /* Done */ - ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz); - pbdp->state = BUILDING_RESULT; - pbdp->bif_invocation->next = free_list; - free_list = pbdp->bif_invocation; - pbdp->bif_invocation = NULL; - } - else { - /* Link in bif_invocation again where we left off */ - pbdp->bif_invocation->prev = tpep->prev; - pbdp->bif_invocation->next = tpep; - tpep->prev = pbdp->bif_invocation; - if (pbdp->bif_invocation->prev) - pbdp->bif_invocation->prev->next = pbdp->bif_invocation; - else { - ERTS_PROCS_ASSERT(saved_term_procs.start == tpep); - saved_term_procs.start = pbdp->bif_invocation; - } - } - - ERTS_PROCS_DBG_CHK_TPLIST(); - ERTS_PROCS_DBG_CHK_FREELIST(free_list); - erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); - - /* - * We do the actual free of term proc structures now when we - * have released the table lock instead of when we encountered - * them. This since free() isn't for free and we don't want to - * unnecessarily block other schedulers. - */ - while (free_list) { - tpep = free_list; - free_list = tpep->next; - erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep); - } - - have_reds -= reds; - if (have_reds < 0) - have_reds = 0; - BUMP_REDS(p, reds); - break; - } - - case BUILDING_RESULT: { - int conses, ix, min_ix; - Eterm *hp; - Eterm res = *res_accp; - - ERTS_PROCS_DBG_VERIFY_PIDS(pbdp); - ERTS_PROCS_DBG_CHK_RESLIST(res); - - ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, begin_build_res); - - have_reds = ERTS_BIF_REDS_LEFT(p); - conses = ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds; - min_ix = pbdp->pid_ix - conses; - if (min_ix < 0) { - min_ix = 0; - conses = pbdp->pid_ix; - } - - hp = HAlloc(p, conses*2); - ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, conses*2); - - for (ix = pbdp->pid_ix - 1; ix >= min_ix; ix--) { - ERTS_PROCS_ASSERT(is_internal_pid(pbdp->pid[ix])); - res = CONS(hp, pbdp->pid[ix], res); - hp += 2; - } - - ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp); - - pbdp->pid_ix = min_ix; - if (min_ix == 0) - pbdp->state = RETURN_RESULT; - else { - pbdp->pid_sz = min_ix; - pbdp->pid = erts_realloc(ERTS_ALC_T_PROCS_PIDS, - pbdp->pid, - sizeof(Eterm)*pbdp->pid_sz); -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS - pbdp->debug.pid_started = erts_realloc(ERTS_ALC_T_PROCS_PIDS, - pbdp->debug.pid_started, - (sizeof(Uint64) - * pbdp->pid_sz)); -#endif - } - reds = conses/ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED; - BUMP_REDS(p, reds); - have_reds -= reds; - - ERTS_PROCS_DBG_CHK_RESLIST(res); - ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, end_build_res); - *res_accp = res; - break; - } - case RETURN_RESULT: - cleanup_processes_bif_data(mbp); - return 1; - - default: - erl_exit(ERTS_ABORT_EXIT, - "erlang:processes/0: Invalid state: %d\n", - (int) pbdp->state); - } - - - } while (have_reds || pbdp->state == RETURN_RESULT); - - return 0; -} - -/* - * processes_trap/2 is a hidden BIF that processes/0 traps to. - */ - -static BIF_RETTYPE processes_trap(BIF_ALIST_2) -{ - Eterm res_acc; - Binary *mbp; - - /* - * This bif cannot be called from erlang code. It can only be - * trapped to from processes/0; therefore, a bad argument - * is a processes/0 internal error. - */ - - ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, call); - ERTS_PROCS_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1)); - - res_acc = BIF_ARG_1; - - ERTS_PROCS_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2)); - - mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val; - - ERTS_PROCS_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) - == cleanup_processes_bif_data); - ERTS_PROCS_ASSERT( - ((ErtsProcessesBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller - == BIF_P->id); - - if (processes_bif_engine(BIF_P, &res_acc, mbp)) { - ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, return); - BIF_RET(res_acc); - } - else { - ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, trap); - ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, BIF_ARG_2); - } -} - - - -/* - * The actual processes/0 BIF. - */ - -BIF_RETTYPE processes_0(BIF_ALIST_0) -{ - /* - * A requirement: The list of pids returned should be a consistent - * snapshot of all processes existing at some point - * in time during the execution of processes/0. Since - * processes might terminate while processes/0 is - * executing, we have to keep track of terminated - * processes and add them to the result. We also - * ignore processes created after processes/0 has - * begun executing. - */ - Eterm res_acc = NIL; - Binary *mbp = erts_create_magic_binary(sizeof(ErtsProcessesBifData), - cleanup_processes_bif_data); - ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp); - - ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, call); - pbdp->state = INITIALIZING; - ERTS_PROCS_DBG_INIT(BIF_P, pbdp); - - if (ERTS_BIF_REDS_LEFT(BIF_P) >= ERTS_PROCESSES_BIF_MIN_START_REDS - && processes_bif_engine(BIF_P, &res_acc, mbp)) { - erts_bin_free(mbp); - ERTS_PROCS_DBG_CHK_RESLIST(res_acc); - ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, return); - BIF_RET(res_acc); - } - else { - Eterm *hp; - Eterm magic_bin; - ERTS_PROCS_DBG_CHK_RESLIST(res_acc); - hp = HAlloc(BIF_P, PROC_BIN_SIZE); - ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, PROC_BIN_SIZE); - magic_bin = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mbp); - ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp); - ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, trap); - ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, magic_bin); - } -} - -static void -init_processes_bif(void) -{ - saved_term_procs.start = NULL; - saved_term_procs.end = NULL; - processes_bif_tab_chunks = (((erts_proc.max - 1) - / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE) - + 1); - - /* processes_trap/2 is a hidden BIF that the processes/0 BIF traps to. */ - erts_init_trap_export(&processes_trap_export, am_erlang, am_processes_trap, 2, - &processes_trap); - -} - -/* - * Debug stuff - */ - -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) -int -erts_dbg_check_halloc_lock(Process *p) -{ - if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)) - return 1; - if (p->id == ERTS_INVALID_PID) - return 1; - if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process) - return 1; - if (erts_thr_progress_is_blocking()) - return 1; - return 0; -} -#endif - -Eterm -erts_debug_processes(Process *c_p) -{ - /* This is the old processes/0 BIF. */ - int i; - Uint need; - Eterm res; - Eterm* hp; - Process *p; - Eterm *hp_end; - - erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); - - res = NIL; - need = erts_process_count() * 2; - hp = HAlloc(c_p, need); /* we need two heap words for each pid */ - hp_end = hp + need; - - /* make the list by scanning bakward */ - - - for (i = erts_proc.max-1; i >= 0; i--) { - p = erts_pix2proc(i); - if (p) { - res = CONS(hp, p->id, res); - hp += 2; - } - } - - erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); - - HRelease(c_p, hp_end, hp); - - return res; -} - -Eterm -erts_debug_processes_bif_info(Process *c_p) -{ - ERTS_DECL_AM(processes_bif_info); - Eterm elements[] = { - AM_processes_bif_info, - make_small((Uint) ERTS_PROCESSES_BIF_MIN_START_REDS), - make_small((Uint) processes_bif_tab_chunks), - make_small((Uint) ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE), - make_small((Uint) ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED), - make_small((Uint) ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS), - make_small((Uint) ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED), - make_small((Uint) ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS), - make_small((Uint) ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED), - make_small((Uint) ERTS_PROCESSES_BIF_DEBUGLEVEL) - }; - Uint sz = 0; - Eterm *hp; - (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements); - hp = HAlloc(c_p, sz); - return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements); -} - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS -static void -debug_processes_check_found_pid(ErtsProcessesBifData *pbdp, - Eterm pid, - Uint64 ic, - int pid_should_be_found) -{ - int i; - for (i = 0; i < pbdp->pid_ix; i++) { - if (pbdp->pid[i] == pid && pbdp->debug.pid_started[i] == ic) { - ERTS_PROCS_ASSERT(pid_should_be_found); - return; - } - } - ERTS_PROCS_ASSERT(!pid_should_be_found); -} -#endif - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST -static void -debug_processes_check_res_list(Eterm list) -{ - while (is_list(list)) { - Eterm* consp = list_val(list); - Eterm hd = CAR(consp); - ERTS_PROCS_ASSERT(is_internal_pid(hd)); - list = CDR(consp); - } - - ERTS_PROCS_ASSERT(is_nil(list)); -} -#endif - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS - -static void -debug_processes_save_all_pids(ErtsProcessesBifData *pbdp) -{ - int ix, tix, cpix; - pbdp->debug.correct_pids_verified = 0; - pbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PROCS_PIDS, - sizeof(Eterm)*pbdp->pid_sz); - - for (tix = 0, cpix = 0; tix < erts_proc.max; tix++) { - Process *rp = erts_pix2proc(tix); - if (rp) { - ERTS_PROCS_ASSERT(is_internal_pid(rp->id)); - pbdp->debug.correct_pids[cpix++] = rp->id; - ERTS_PROCS_ASSERT(cpix <= pbdp->pid_sz); - } - } - ERTS_PROCS_ASSERT(cpix == pbdp->pid_sz); - - for (ix = 0; ix < pbdp->pid_sz; ix++) - pbdp->pid[ix] = make_small(ix); -} - -static void -debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp) -{ - int ix, cpix; - - ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz); - - for (ix = 0; ix < pbdp->pid_sz; ix++) { - int found = 0; - Eterm pid = pbdp->pid[ix]; - ERTS_PROCS_ASSERT(is_internal_pid(pid)); - for (cpix = ix; cpix < pbdp->pid_sz; cpix++) { - if (pbdp->debug.correct_pids[cpix] == pid) { - pbdp->debug.correct_pids[cpix] = NIL; - found = 1; - break; - } - } - if (!found) { - for (cpix = 0; cpix < ix; cpix++) { - if (pbdp->debug.correct_pids[cpix] == pid) { - pbdp->debug.correct_pids[cpix] = NIL; - found = 1; - break; - } - } - } - ERTS_PROCS_ASSERT(found); - } - pbdp->debug.correct_pids_verified = 1; - - erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.correct_pids); - pbdp->debug.correct_pids = NULL; -} -#endif /* ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS */ - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST -static void -debug_processes_check_term_proc_list(void) -{ - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_proc_tab_rwmtx)); - if (!saved_term_procs.start) - ERTS_PROCS_ASSERT(!saved_term_procs.end); - else { - Uint64 curr_interval = get_proc_interval(); - Uint64 *prev_x_interval_p = NULL; - ErtsTermProcElement *tpep; - - for (tpep = saved_term_procs.start; tpep; tpep = tpep->next) { - if (!tpep->prev) - ERTS_PROCS_ASSERT(saved_term_procs.start == tpep); - else - ERTS_PROCS_ASSERT(tpep->prev->next == tpep); - if (!tpep->next) - ERTS_PROCS_ASSERT(saved_term_procs.end == tpep); - else - ERTS_PROCS_ASSERT(tpep->next->prev == tpep); - if (tpep->ix < 0) { - Uint64 interval = tpep->u.bif_invocation.interval; - ERTS_PROCS_ASSERT(interval <= curr_interval); - } - else { - Uint64 s_interval = tpep->u.process.spawned; - Uint64 x_interval = tpep->u.process.exited; - - ERTS_PROCS_ASSERT(s_interval <= x_interval); - if (prev_x_interval_p) - ERTS_PROCS_ASSERT(*prev_x_interval_p <= x_interval); - prev_x_interval_p = &tpep->u.process.exited; - ERTS_PROCS_ASSERT(is_internal_pid(tpep->u.process.pid)); - ERTS_PROCS_ASSERT(tpep->ix - == internal_pid_index(tpep->u.process.pid)); - } - } - - } -} - -static void -debug_processes_check_term_proc_free_list(ErtsTermProcElement *free_list) -{ - if (saved_term_procs.start) { - ErtsTermProcElement *ftpep; - ErtsTermProcElement *tpep; - - for (ftpep = free_list; ftpep; ftpep = ftpep->next) { - for (tpep = saved_term_procs.start; tpep; tpep = tpep->next) - ERTS_PROCS_ASSERT(ftpep != tpep); - } - } -} - -#endif - -#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0 - -static void -debug_processes_assert_error(char* expr, char* file, int line) -{ - fflush(stdout); - erts_fprintf(stderr, "%s:%d: Assertion failed: %s\n", file, line, expr); - fflush(stderr); - abort(); -} - -#endif - -/* *\ - * End of the processes/0 BIF implementation. * -\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - /* * A nice system halt closing all open port goes as follows: * 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS @@ -10499,3 +9041,19 @@ void erl_halt(int code) notify_reap_ports_relb(); } } + +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +int +erts_dbg_check_halloc_lock(Process *p) +{ + if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)) + return 1; + if (p->common.id == ERTS_INVALID_PID) + return 1; + if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process) + return 1; + if (erts_thr_progress_is_blocking()) + return 1; + return 0; +} +#endif diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index e789c873fb..6d1032c292 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -42,6 +42,9 @@ typedef struct process Process; #include "erl_process_lock.h" /* Only pull out important types... */ #undef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__ +#define ERL_PORT_GET_PORT_TYPE_ONLY__ +#include "erl_port.h" +#undef ERL_PORT_GET_PORT_TYPE_ONLY__ #include "erl_vm.h" #include "erl_smp.h" #include "erl_message.h" @@ -66,11 +69,10 @@ typedef struct process Process; #undef ERL_THR_PROGRESS_TSD_TYPE_ONLY struct ErtsNodesMonitor_; -struct port; #define ERTS_MAX_NO_OF_SCHEDULERS 1024 -#define ERTS_DEFAULT_MAX_PROCESSES (1 << 15) +#define ERTS_DEFAULT_MAX_PROCESSES (1 << 18) #define ERTS_HEAP_ALLOC(Type, Size) \ erts_alloc((Type), (Size)) @@ -205,32 +207,10 @@ extern int erts_sched_thread_suggested_stack_size; ((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_GET_MB(RQ) \ ((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags)) -#define ERTS_RUNQ_FLGS_MASK_SET(RQ, MSK, FLGS) \ - ((Uint32) erts_smp_atomic32_mask_set_relb(&(RQ)->flags, \ - (erts_aint32_t) (MSK), \ - (erts_aint32_t) (FLGS))) - -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_mask_set_relb(erts_smp_atomic32_t *a32p, - erts_aint32_t mask, - erts_aint32_t set); -#if ERTS_GLB_INLINE_INCL_FUNC_DEF -ERTS_GLB_INLINE erts_aint32_t -erts_smp_atomic32_mask_set_relb(erts_smp_atomic32_t *a32p, - erts_aint32_t mask, - erts_aint32_t set) -{ - erts_aint32_t act = erts_smp_atomic32_read_nob(a32p); - while (1) { - erts_aint32_t exp = act; - erts_aint32_t new = exp & ~mask; - new |= (mask & set); - act = erts_smp_atomic32_cmpxchg_relb(a32p, new, exp); - if (act == exp) - return act; - } -} -#endif +#define ERTS_RUNQ_FLGS_READ_BSET(RQ, MSK, FLGS) \ + ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \ + (erts_aint32_t) (MSK), \ + (erts_aint32_t) (FLGS))) typedef enum { ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED, @@ -311,6 +291,7 @@ struct ErtsProcList_ { Eterm pid; Uint64 started_interval; ErtsProcList* next; + ErtsProcList* prev; }; typedef struct ErtsMiscOpList_ ErtsMiscOpList; @@ -401,8 +382,8 @@ struct ErtsRunQueue_ { struct { ErtsRunQueueInfo info; - struct port *start; - struct port *end; + Port *start; + Port *end; } ports; }; @@ -512,7 +493,7 @@ struct ErtsSchedulerData_ { ErtsSchedulerSleepInfo *ssi; Process *current_process; Uint no; /* Scheduler number */ - struct port *current_port; + Port *current_port; ErtsRunQueue *run_queue; int virtual_reds; int cpu_id; /* >= 0 when bound */ @@ -737,8 +718,8 @@ struct ErtsPendingSuspend_ { # define BIN_OLD_VHEAP(p) (p)->bin_old_vheap struct process { - Eterm id; /* The pid of this process - (need to be first in struct) */ + ErtsPTabElementCommon common; /* *Need* to be first in struct */ + /* All fields in the PCB that differs between different heap * architectures, have been moved to the end of this struct to * make sure that as few offsets as possible differ. Different @@ -782,12 +763,8 @@ struct process { * Only valid for the current process. */ Uint32 rcount; /* suspend count */ - int prio; /* Priority of process */ int schedule_count; /* Times left to reschedule a low prio process */ Uint reds; /* No of reductions for this process */ - Eterm tracer_proc; /* If proc is traced, this is the tracer - (can NOT be boxed) */ - Uint trace_flags; /* Trace flags (used to be in flags) */ Eterm group_leader; /* Pid in charge (can be boxed) */ Uint flags; /* Trap exit, etc (no trace flags anymore) */ @@ -797,10 +774,6 @@ struct process { Process *next; /* Pointer to next process in run queue */ - struct reg_proc *reg; /* NULL iff not registered */ - ErtsLink *nlinks; - ErtsMonitor *monitors; /* The process monitors, both ends */ - struct ErtsNodesMonitor_ *nodes_monitors; ErtsSuspendMonitor *suspend_monitors; /* Processes suspended by @@ -809,7 +782,10 @@ struct process { ErlMessageQueue msg; /* Message queue */ - ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */ + union { + ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */ + void *terminate; + } u; ProcDict *dictionary; /* Process dictionary, may be NULL */ @@ -834,7 +810,6 @@ struct process { */ Eterm parent; /* Pid of process that created this process. */ erts_approx_time_t approx_started; /* Time when started. */ - Uint64 started_interval; /* This is the place, where all fields that differs between memory * architectures, have gone to. @@ -856,25 +831,11 @@ struct process { Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */ Uint64 bin_old_vheap; /* Virtual old heap size for binaries */ - union { - struct { -#ifdef ERTS_SMP - ErtsSmpPTimer *ptimer; - ErlMessageInQueue msg_inq; - ErtsPendExit pending_exit; -#else - ErlTimer tm; /* Timer entry */ -#endif - } alive; /* when process is alive */ -#ifdef ERTS_SMP - ErtsThrPrgrLaterOp release_data; /* when releasing process struct */ -#endif - void *exit_data; /* Misc data referred during termination */ - } u; - erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ #ifdef ERTS_SMP + ErlMessageInQueue msg_inq; + ErtsPendExit pending_exit; erts_proc_lock_t lock; ErtsSchedulerData *scheduler_data; Eterm suspendee; @@ -904,6 +865,8 @@ struct process { #endif }; +extern const Process erts_invalid_process; + #ifdef CHECK_FOR_HOLES # define INIT_HOLE_CHECK(p) \ do { \ @@ -1028,8 +991,6 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra); Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz); #endif -extern erts_smp_rwmtx_t erts_proc_tab_rwmtx; -extern erts_smp_atomic_t *erts_proc_tab; extern Uint erts_default_process_flags; extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx; /* If any of the erts_system_monitor_* variables are set (enabled), @@ -1057,10 +1018,6 @@ struct erts_system_profile_flags_t { unsigned int exclusive : 1; }; extern struct erts_system_profile_flags_t erts_system_profile_flags; - -#define IS_TRACED(p) ( (p)->tracer_proc != NIL ) -#define ARE_TRACE_FLAGS_ON(p,tf) ( ((p)->trace_flags & (tf|F_SENSITIVE)) == (tf) ) -#define IS_TRACED_FL(p,tf) ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) ) /* process flags */ #define F_HIBERNATE_SCHED (1 << 0) /* Schedule out after hibernate op */ @@ -1171,7 +1128,172 @@ Uint64 erts_step_proc_interval(void); ErtsProcList *erts_proclist_create(Process *); void erts_proclist_destroy(ErtsProcList *); -int erts_proclist_same(ErtsProcList *, Process *); + +ERTS_GLB_INLINE int erts_proclist_same(ErtsProcList *, Process *); +ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **, ErtsProcList *); +ERTS_GLB_INLINE void erts_proclist_store_last(ErtsProcList **, ErtsProcList *); +ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_first(ErtsProcList *); +ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_last(ErtsProcList *); +ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_next(ErtsProcList *, ErtsProcList *); +ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_prev(ErtsProcList *, ErtsProcList *); +ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **); +ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_last(ErtsProcList **); +ERTS_GLB_INLINE int erts_proclist_fetch(ErtsProcList **, ErtsProcList **); +ERTS_GLB_INLINE void erts_proclist_remove(ErtsProcList **, ErtsProcList *); +ERTS_GLB_INLINE int erts_proclist_is_empty(ErtsProcList *); +ERTS_GLB_INLINE int erts_proclist_is_first(ErtsProcList *, ErtsProcList *); +ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *, ErtsProcList *); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE int +erts_proclist_same(ErtsProcList *plp, Process *p) +{ + return (plp->pid == p->common.id + && (plp->started_interval + == p->common.u.alive.started_interval)); +} + +ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **list, + ErtsProcList *element) +{ + if (!*list) + element->next = element->prev = element; + else { + element->prev = (*list)->prev; + element->next = *list; + element->prev->next = element; + element->next->prev = element; + } + *list = element; +} + +ERTS_GLB_INLINE void erts_proclist_store_last(ErtsProcList **list, + ErtsProcList *element) +{ + if (!*list) { + element->next = element->prev = element; + *list = element; + } + else { + element->prev = (*list)->prev; + element->next = *list; + element->prev->next = element; + element->next->prev = element; + } +} + +ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_first(ErtsProcList *list) +{ + return list; +} + +ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_last(ErtsProcList *list) +{ + if (!list) + return NULL; + else + return list->prev; +} + +ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_next(ErtsProcList *list, + ErtsProcList *element) +{ + ErtsProcList *next; + ASSERT(list && element); + next = element->next; + return list == next ? NULL : next; +} + +ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_prev(ErtsProcList *list, + ErtsProcList *element) +{ + ErtsProcList *prev; + ASSERT(list && element); + prev = element->prev; + return list == element ? NULL : prev; +} + +ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list) +{ + if (!*list) + return NULL; + else { + ErtsProcList *res = *list; + if (res == *list) + *list = NULL; + else + *list = res->next; + res->next->prev = res->prev; + res->prev->next = res->next; + return res; + } +} + +ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_last(ErtsProcList **list) +{ + if (!*list) + return NULL; + else { + ErtsProcList *res = (*list)->prev; + if (res == *list) + *list = NULL; + res->next->prev = res->prev; + res->prev->next = res->next; + return res; + } +} + +ERTS_GLB_INLINE int erts_proclist_fetch(ErtsProcList **list_first, + ErtsProcList **list_last) +{ + if (!*list_first) { + if (list_last) + *list_last = NULL; + return 0; + } + else { + if (list_last) + *list_last = (*list_first)->prev; + (*list_first)->prev->next = NULL; + (*list_first)->prev = NULL; + return !0; + } +} + +ERTS_GLB_INLINE void erts_proclist_remove(ErtsProcList **list, + ErtsProcList *element) +{ + ASSERT(list && *list); + if (*list == element) { + *list = element->next; + if (*list == element) + *list = NULL; + } + element->next->prev = element->prev; + element->prev->next = element->next; +} + +ERTS_GLB_INLINE int erts_proclist_is_empty(ErtsProcList *list) +{ + return list == NULL; +} + +ERTS_GLB_INLINE int erts_proclist_is_first(ErtsProcList *list, + ErtsProcList *element) +{ + ASSERT(list && element); + return list == element; +} + +ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *list, + ErtsProcList *element) +{ + ASSERT(list && element); + return list->prev == element; +} + +#endif int erts_sched_set_wakeup_other_thresold(char *str); int erts_sched_set_wakeup_other_type(char *str); @@ -1221,7 +1343,7 @@ void erts_schedule_multi_misc_aux_work(int ignore_self, erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int); void erts_sched_notify_check_cpu_bind(void); Uint erts_active_schedulers(void); -void erts_init_process(int); +void erts_init_process(int, int); Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm); Uint erts_run_queues_len(Uint *); void erts_add_to_runq(Process *); @@ -1240,7 +1362,6 @@ void set_timer(Process*, Uint); void cancel_timer(Process*); /* Begin System profile */ Uint erts_runnable_process_count(void); -Uint erts_process_count(void); /* End System profile */ void erts_init_empty_process(Process *p); void erts_cleanup_empty_process(Process* p); @@ -1264,7 +1385,7 @@ Eterm erts_sched_stat_term(Process *p, int total); void erts_free_proc(Process *); -void erts_suspend(Process*, ErtsProcLocks, struct port*); +void erts_suspend(Process*, ErtsProcLocks, Port*); void erts_resume(Process*, ErtsProcLocks); int erts_resume_processes(ErtsProcList *); @@ -1289,9 +1410,6 @@ void erts_deep_process_dump(int, void *); Eterm erts_get_reader_groups_map(Process *c_p); Eterm erts_debug_reader_groups_map(Process *c_p, int groups); -Sint erts_test_next_pid(int, Uint); -Eterm erts_debug_processes(Process *c_p); -Eterm erts_debug_processes_bif_info(Process *c_p); Uint erts_debug_nbalance(void); int erts_debug_wait_deallocations(Process *c_p); @@ -1603,7 +1721,7 @@ ERTS_GLB_INLINE Eterm erts_get_current_pid(void) { Process *proc = erts_get_current_process(); - return proc ? proc->id : THE_NON_VALUE; + return proc ? proc->common.id : THE_NON_VALUE; } ERTS_GLB_INLINE @@ -1812,10 +1930,10 @@ extern int erts_disable_proc_not_running_opt; /* Minimum NUMBER of processes for a small system to start */ -#ifdef ERTS_SMP +#define ERTS_MIN_PROCESSES 1024 +#if defined(ERTS_SMP) && ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS +#undef ERTS_MIN_PROCESSES #define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS -#else -#define ERTS_MIN_PROCESSES 16 #endif void erts_smp_notify_inc_runq(ErtsRunQueue *runq); diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c index 93466da3aa..bf384c66e1 100644 --- a/erts/emulator/beam/erl_process_dict.c +++ b/erts/emulator/beam/erl_process_dict.c @@ -360,7 +360,7 @@ static void pd_hash_erase(Process *p, Eterm id, Eterm *ret) erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" - "%T\n", p->id, __LINE__, old); + "%T\n", p->common.id, __LINE__, old); #endif erl_exit(1, "Damaged process dictionary found during erase/1."); } @@ -405,7 +405,7 @@ Eterm erts_pd_hash_get(Process *p, Eterm id) erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" - "%T\n", p->id, __LINE__, tmp); + "%T\n", p->common.id, __LINE__, tmp); #endif erl_exit(1, "Damaged process dictionary found during get/1."); } @@ -614,7 +614,7 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value) erts_fprintf(stderr, "Process dictionary for process %T is broken, trying to " "display term found in line %d:\n" - "%T\n", p->id, __LINE__, old); + "%T\n", p->common.id, __LINE__, old); #endif erl_exit(1, "Damaged process dictionary found during put/2."); diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index 542c5ed0d9..ba74dfd6a1 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -60,11 +60,11 @@ extern BeamInstr beam_continue_exit[]; void erts_deep_process_dump(int to, void *to_arg) { - int i; + int i, max = erts_ptab_max(&erts_proc); all_binaries = NULL; - for (i = 0; i < erts_max_processes; i++) { + for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); @@ -85,8 +85,8 @@ dump_process_info(int to, void *to_arg, Process *p) ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); - if ((p->trace_flags & F_SENSITIVE) == 0 && p->msg.first) { - erts_print(to, to_arg, "=proc_messages:%T\n", p->id); + if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) { + erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id); for (mp = p->msg.first; mp != NULL; mp = mp->next) { Eterm mesg = ERL_MESSAGE_TERM(mp); if (is_value(mesg)) @@ -100,21 +100,21 @@ dump_process_info(int to, void *to_arg, Process *p) } } - if ((p->trace_flags & F_SENSITIVE) == 0) { + if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) { if (p->dictionary) { - erts_print(to, to_arg, "=proc_dictionary:%T\n", p->id); + erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id); erts_deep_dictionary_dump(to, to_arg, p->dictionary, dump_element_nl); } } - if ((p->trace_flags & F_SENSITIVE) == 0) { - erts_print(to, to_arg, "=proc_stack:%T\n", p->id); + if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) { + erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id); for (sp = p->stop; sp < STACK_START(p); sp++) { yreg = stack_element_dump(to, to_arg, p, sp, yreg); } - erts_print(to, to_arg, "=proc_heap:%T\n", p->id); + erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id); for (sp = p->stop; sp < STACK_START(p); sp++) { Eterm term = *sp; diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index 84a8270d06..2db5df06b4 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -66,8 +66,7 @@ #endif #include "erl_process.h" - -const Process erts_proc_lock_busy = {ERTS_INVALID_PID}; +#include "erl_thr_progress.h" #ifdef ERTS_SMP @@ -399,7 +398,7 @@ wait_for_locks(Process *p, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { - erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id); + erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id); erts_tse_t *wtr; /* Acquire a waiter object on which this thread can wait. */ @@ -553,7 +552,7 @@ erts_proc_unlock_failed(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks wait_locks) { - erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id); + erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); @@ -580,7 +579,7 @@ erts_proc_lock_prepare_proc_lock_waiter(void) */ static void -proc_safelock(int is_sched, +proc_safelock(int is_managed, Process *a_proc, ErtsProcLocks a_have_locks, ErtsProcLocks a_need_locks, @@ -603,40 +602,40 @@ proc_safelock(int is_sched, * Locks with the same lock order should be locked on p1 before p2. */ if (a_proc) { - if (a_proc->id < b_proc->id) { + if (a_proc->common.id < b_proc->common.id) { p1 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK - pid1 = a_proc->id; + pid1 = a_proc->common.id; #endif need_locks1 = a_need_locks; have_locks1 = a_have_locks; p2 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK - pid2 = b_proc->id; + pid2 = b_proc->common.id; #endif need_locks2 = b_need_locks; have_locks2 = b_have_locks; } - else if (a_proc->id > b_proc->id) { + else if (a_proc->common.id > b_proc->common.id) { p1 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK - pid1 = b_proc->id; + pid1 = b_proc->common.id; #endif need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK - pid2 = a_proc->id; + pid2 = a_proc->common.id; #endif need_locks2 = a_need_locks; have_locks2 = a_have_locks; } else { ERTS_LC_ASSERT(a_proc == b_proc); - ERTS_LC_ASSERT(a_proc->id == b_proc->id); + ERTS_LC_ASSERT(a_proc->common.id == b_proc->common.id); p1 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK - pid1 = a_proc->id; + pid1 = a_proc->common.id; #endif need_locks1 = a_need_locks | b_need_locks; have_locks1 = a_have_locks | b_have_locks; @@ -651,7 +650,7 @@ proc_safelock(int is_sched, else { p1 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK - pid1 = b_proc->id; + pid1 = b_proc->common.id; #endif need_locks1 = b_need_locks; have_locks1 = b_have_locks; @@ -706,7 +705,7 @@ proc_safelock(int is_sched, if (unlock_locks) { have_locks1 &= ~unlock_locks; need_locks1 |= unlock_locks; - if (!is_sched && !have_locks1) { + if (!is_managed && !have_locks1) { refc1 = 1; erts_smp_proc_inc_refc(p1); } @@ -716,7 +715,7 @@ proc_safelock(int is_sched, if (unlock_locks) { have_locks2 &= ~unlock_locks; need_locks2 |= unlock_locks; - if (!is_sched && !have_locks2) { + if (!is_managed && !have_locks2) { refc2 = 1; erts_smp_proc_inc_refc(p2); } @@ -797,7 +796,7 @@ proc_safelock(int is_sched, } #endif - if (!is_sched) { + if (!is_managed) { if (refc1) erts_smp_proc_dec_refc(p1); if (refc2) @@ -830,7 +829,7 @@ erts_pid2proc_opt(Process *c_p, int flags) { Process *dec_refc_proc = NULL; - int need_ptl; + ErtsThrPrgrDelayHandle dhndl; ErtsProcLocks need_locks; Uint pix; Process *proc; @@ -853,8 +852,8 @@ erts_pid2proc_opt(Process *c_p, ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; - if (c_p && c_p->id == pid) { - ASSERT(c_p->id != ERTS_INVALID_PID); + if (c_p && c_p->common.id == pid) { + ASSERT(c_p->common.id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) @@ -868,15 +867,12 @@ erts_pid2proc_opt(Process *c_p, } } - need_ptl = !erts_get_scheduler_id(); - - if (need_ptl) - erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); + dhndl = erts_thr_progress_unmanaged_delay(); - proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); + proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix); if (proc) { - if (proc->id != pid) + if (proc->common.id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) @@ -935,6 +931,7 @@ erts_pid2proc_opt(Process *c_p, if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { + int managed; if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); @@ -942,14 +939,21 @@ erts_pid2proc_opt(Process *c_p, erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif - if (need_ptl) { + managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED; + if (!managed) { erts_smp_proc_inc_refc(proc); + erts_thr_progress_unmanaged_continue(dhndl); dec_refc_proc = proc; - erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); - need_ptl = 0; + + /* + * We don't want to call + * erts_thr_progress_unmanaged_continue() + * again. + */ + dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED; } - proc_safelock(!need_ptl, + proc_safelock(managed, c_p, c_p_have_locks, c_p_have_locks, @@ -961,8 +965,8 @@ erts_pid2proc_opt(Process *c_p, } } - if (need_ptl) - erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_thr_progress_unmanaged_continue(dhndl); if (need_locks && proc @@ -970,7 +974,7 @@ erts_pid2proc_opt(Process *c_p, && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc - != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) { + != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { erts_smp_proc_unlock(proc, need_locks); @@ -1012,22 +1016,22 @@ erts_proc_lock_init(Process *p) erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1); #endif #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL - erts_mtx_init_x(&p->lock.main, "proc_main", p->id); + erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id); ethr_mutex_lock(&p->lock.main.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.main.lc); #endif - erts_mtx_init_x(&p->lock.link, "proc_link", p->id); + erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id); ethr_mutex_lock(&p->lock.link.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.link.lc); #endif - erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->id); + erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id); ethr_mutex_lock(&p->lock.msgq.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.msgq.lc); #endif - erts_mtx_init_x(&p->lock.status, "proc_status", p->id); + erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id); ethr_mutex_lock(&p->lock.status.mtx); #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_trylock(1, &p->lock.status.lc); @@ -1064,11 +1068,11 @@ erts_proc_lock_fin(Process *p) #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) void erts_lcnt_proc_lock_init(Process *p) { if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { - if (p->id != ERTS_INVALID_PID) { - erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->id); - erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->id); - erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->id); - erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->id); + if (p->common.id != ERTS_INVALID_PID) { + erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id); + erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id); + erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->common.id); + erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->common.id); } else { erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK); erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK); @@ -1176,10 +1180,11 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res } -void erts_lcnt_enable_proc_lock_count(int enable) { - int i; +void erts_lcnt_enable_proc_lock_count(int enable) +{ + int i, max = erts_ptab_max(&erts_proc); - for (i = 0; i < erts_max_processes; ++i) { + for (i = 0; i < max; ++i) { Process* p = erts_pix2proc(i); if (p) { if (enable) { @@ -1208,7 +1213,7 @@ void erts_proc_lc_lock(Process *p, ErtsProcLocks locks) { erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_MAIN) { lck.id = lc_id.proc_lock_main; @@ -1232,7 +1237,7 @@ void erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked) { erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_MAIN) { lck.id = lc_id.proc_lock_main; @@ -1256,7 +1261,7 @@ void erts_proc_lc_unlock(Process *p, ErtsProcLocks locks) { erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_STATUS) { lck.id = lc_id.proc_lock_status; @@ -1283,7 +1288,7 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks) { #if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_STATUS) { lck.id = lc_id.proc_lock_status; @@ -1318,7 +1323,7 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks) { #if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_MAIN) { lck.id = lc_id.proc_lock_main; @@ -1353,7 +1358,7 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks) { #if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_STATUS) { lck.id = lc_id.proc_lock_status; @@ -1390,7 +1395,7 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks) { if (locks & ERTS_PROC_LOCKS_ALL) { erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK); if (locks & ERTS_PROC_LOCK_MAIN) @@ -1415,7 +1420,7 @@ void erts_proc_lc_chk_only_proc_main(Process *p) { #if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK); erts_lc_check_exact(&proc_main, 1); #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL @@ -1439,19 +1444,19 @@ erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks) ERTS_PROC_LC_EMPTY_LOCK_INIT}; if (locks & ERTS_PROC_LOCK_MAIN) { have_locks[have_locks_len].id = lc_id.proc_lock_main; - have_locks[have_locks_len++].extra = p->id; + have_locks[have_locks_len++].extra = p->common.id; } if (locks & ERTS_PROC_LOCK_LINK) { have_locks[have_locks_len].id = lc_id.proc_lock_link; - have_locks[have_locks_len++].extra = p->id; + have_locks[have_locks_len++].extra = p->common.id; } if (locks & ERTS_PROC_LOCK_MSGQ) { have_locks[have_locks_len].id = lc_id.proc_lock_msgq; - have_locks[have_locks_len++].extra = p->id; + have_locks[have_locks_len++].extra = p->common.id; } if (locks & ERTS_PROC_LOCK_STATUS) { have_locks[have_locks_len].id = lc_id.proc_lock_status; - have_locks[have_locks_len++].extra = p->id; + have_locks[have_locks_len++].extra = p->common.id; } #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL erts_lc_lock_t have_locks[4]; @@ -1484,35 +1489,35 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks) if (locks & ERTS_PROC_LOCK_MAIN) { have_locks[have_locks_len].id = lc_id.proc_lock_main; - have_locks[have_locks_len++].extra = p->id; + have_locks[have_locks_len++].extra = p->common.id; } else { have_not_locks[have_not_locks_len].id = lc_id.proc_lock_main; - have_not_locks[have_not_locks_len++].extra = p->id; + have_not_locks[have_not_locks_len++].extra = p->common.id; } if (locks & ERTS_PROC_LOCK_LINK) { have_locks[have_locks_len].id = lc_id.proc_lock_link; - have_locks[have_locks_len++].extra = p->id; + have_locks[have_locks_len++].extra = p->common.id; } else { have_not_locks[have_not_locks_len].id = lc_id.proc_lock_link; - have_not_locks[have_not_locks_len++].extra = p->id; + have_not_locks[have_not_locks_len++].extra = p->common.id; } if (locks & ERTS_PROC_LOCK_MSGQ) { have_locks[have_locks_len].id = lc_id.proc_lock_msgq; - have_locks[have_locks_len++].extra = p->id; + have_locks[have_locks_len++].extra = p->common.id; } else { have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq; - have_not_locks[have_not_locks_len++].extra = p->id; + have_not_locks[have_not_locks_len++].extra = p->common.id; } if (locks & ERTS_PROC_LOCK_STATUS) { have_locks[have_locks_len].id = lc_id.proc_lock_status; - have_locks[have_locks_len++].extra = p->id; + have_locks[have_locks_len++].extra = p->common.id; } else { have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status; - have_not_locks[have_not_locks_len++].extra = p->id; + have_not_locks[have_not_locks_len++].extra = p->common.id; } #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL erts_lc_lock_t have_locks[4]; @@ -1547,16 +1552,16 @@ erts_proc_lc_my_proc_locks(Process *p) ErtsProcLocks res = 0; #if ERTS_PROC_LOCK_OWN_IMPL erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK), ERTS_LC_LOCK_INIT(lc_id.proc_lock_link, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK), ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK), ERTS_LC_LOCK_INIT(lc_id.proc_lock_status, - p->id, + p->common.id, ERTS_LC_FLG_LT_PROCLOCK)}; #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL erts_lc_lock_t locks[4] = {p->lock.main.lc, diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 4aec19c8c3..9dd503f3cb 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -176,8 +176,8 @@ typedef struct erts_proc_lock_t_ { * on multiple processes, locks on processes with low process ids * have to be locked before locks on processes with high process * ids. E.g., if the main and the message queue locks are to be - * locked on processes p1 and p2 and p1->id < p2->id, then locks - * should be locked in the following order: + * locked on processes p1 and p2 and p1->common.id < p2->common.id, + * then locks should be locked in the following order: * 1. main lock on p1 * 2. main lock on p2 * 3. message queue lock on p1 @@ -203,7 +203,7 @@ typedef struct erts_proc_lock_t_ { & ~ERTS_PROC_LOCK_MAIN) -#define ERTS_PIX_LOCKS_BITS 8 +#define ERTS_PIX_LOCKS_BITS 10 #define ERTS_NO_OF_PIX_LOCKS (1 << ERTS_PIX_LOCKS_BITS) @@ -767,7 +767,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks) #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else - ERTS_PID2PIXLOCK(p->id), + ERTS_PID2PIXLOCK(p->common.id), #endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/ locks, file, line); #elif defined(ERTS_SMP) @@ -775,7 +775,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks) #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else - ERTS_PID2PIXLOCK(p->id), + ERTS_PID2PIXLOCK(p->common.id), #endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/ locks); #endif /*ERTS_SMP*/ @@ -789,7 +789,7 @@ erts_smp_proc_unlock(Process *p, ErtsProcLocks locks) #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else - ERTS_PID2PIXLOCK(p->id), + ERTS_PID2PIXLOCK(p->common.id), #endif locks); #endif @@ -805,7 +805,7 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks) #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else - ERTS_PID2PIXLOCK(p->id), + ERTS_PID2PIXLOCK(p->common.id), #endif locks); #endif @@ -814,21 +814,15 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks) ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *p) { #ifdef ERTS_SMP -#ifdef ERTS_ENABLE_LOCK_CHECK - erts_aint32_t refc = erts_atomic32_inc_read_nob(&p->lock.refc); - ERTS_SMP_LC_ASSERT(refc > 1); -#else - erts_atomic32_inc_nob(&p->lock.refc); -#endif + erts_ptab_inc_refc(&p->common); #endif } ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p) { #ifdef ERTS_SMP - erts_aint32_t refc = erts_atomic32_dec_read_nob(&p->lock.refc); - ERTS_SMP_LC_ASSERT(refc >= 0); - if (refc == 0) + int referred = erts_ptab_dec_test_refc(&p->common); + if (!referred) erts_free_proc(p); #endif } @@ -836,10 +830,8 @@ ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p) ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 add_refc) { #ifdef ERTS_SMP - erts_aint32_t refc = erts_atomic32_add_read_nob(&p->lock.refc, - (erts_aint32_t) add_refc); - ERTS_SMP_LC_ASSERT(refc >= 0); - if (refc == 0) + int referred = erts_ptab_add_test_refc(&p->common, add_refc); + if (!referred) erts_free_proc(p); #endif } @@ -875,8 +867,7 @@ void erts_proc_safelock(Process *a_proc, #define ERTS_P2P_FLG_TRY_LOCK (1 << 1) #define ERTS_P2P_FLG_SMP_INC_REFC (1 << 2) -#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_proc_lock_busy) -extern const Process erts_proc_lock_busy; +#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_invalid_process) #define erts_pid2proc(PROC, HL, PID, NL) \ erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0) @@ -896,33 +887,24 @@ Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int); ERTS_GLB_INLINE Process *erts_pix2proc(int ix) { Process *proc; - ASSERT(0 <= ix && ix < erts_proc.max); - proc = (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[ix]); + ASSERT(0 <= ix && ix < erts_ptab_max(&erts_proc)); + proc = (Process *) erts_ptab_pix2intptr_nob(&erts_proc, ix); return proc == ERTS_PROC_LOCK_BUSY ? NULL : proc; } ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid) { Process *proc; - int pix; - /* - * In SMP case: Only scheduler threads are allowed - * to use this function. Other threads need to - * atomicaly increment refc at lookup, i.e., use - * erts_pid2proc_opt() with ERTS_P2P_FLG_SMP_INC_REFC. - */ - ERTS_SMP_LC_ASSERT(erts_get_scheduler_id()); + ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); if (is_not_internal_pid(pid)) return NULL; - pix = internal_pid_index(pid); - proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); - - if (proc && proc->id != pid) + proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, + internal_pid_index(pid)); + if (proc && proc->common.id != pid) return NULL; - return proc; } diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c new file mode 100644 index 0000000000..87beeafa1a --- /dev/null +++ b/erts/emulator/beam/erl_ptab.c @@ -0,0 +1,1566 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +/* + * Description: Process/Port table implementation. + * + * Author: Rickard Green + */ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif +#define ERTS_PTAB_WANT_BIF_IMPL__ +#define ERTS_PTAB_WANT_DEBUG_FUNCS__ +#include "erl_ptab.h" +#include "global.h" +#include "erl_binary.h" + +typedef struct ErtsPTabListBifData_ ErtsPTabListBifData; + +#define ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED 25 +#define ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE 1000 +#define ERTS_PTAB_LIST_BIF_MIN_START_REDS \ + (ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE \ + / ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED) + +#define ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS 1 + +#define ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED 10 + +#define ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS \ + (ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE \ + / ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED) + + +#define ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED 75 + +#define ERTS_PTAB_LIST_DBG_DO_TRACE 0 + +#ifdef DEBUG +# define ERTS_PTAB_LIST_BIF_DEBUGLEVEL 100 +#else +# define ERTS_PTAB_LIST_BIF_DEBUGLEVEL 0 +#endif + +#define ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC 1 +#define ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS 5 +#define ERTS_PTAB_LIST_DBGLVL_CHK_PIDS 10 +#define ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST 20 +#define ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST 20 + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL == 0 +# define ERTS_PTAB_LIST_ASSERT(EXP) +#else +# define ERTS_PTAB_LIST_ASSERT(EXP) \ + ((void) ((EXP) \ + ? 1 \ + : (debug_ptab_list_assert_error(#EXP, \ + __FILE__, \ + __LINE__, \ + __func__), \ + 0))) +#endif + + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC +# define ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(PTLBDP, HP, SZ) \ +do { \ + ERTS_PTAB_LIST_ASSERT(!(PTLBDP)->debug.heap); \ + ERTS_PTAB_LIST_ASSERT(!(PTLBDP)->debug.heap_size); \ + (PTLBDP)->debug.heap = (HP); \ + (PTLBDP)->debug.heap_size = (SZ); \ +} while (0) +# define ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(PTLBDP, HP) \ +do { \ + ERTS_PTAB_LIST_ASSERT((PTLBDP)->debug.heap); \ + ERTS_PTAB_LIST_ASSERT((PTLBDP)->debug.heap_size); \ + ERTS_PTAB_LIST_ASSERT(((PTLBDP)->debug.heap \ + + (PTLBDP)->debug.heap_size) \ + == (HP)); \ + (PTLBDP)->debug.heap = NULL; \ + (PTLBDP)->debug.heap_size = 0; \ +} while (0) +# define ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT(PTLBDP) \ +do { \ + (PTLBDP)->debug.heap = NULL; \ + (PTLBDP)->debug.heap_size = 0; \ +} while (0) +#else +# define ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(PTLBDP, HP, SZ) +# define ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(PTLBDP, HP) +# define ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT(PTLBDP) +#endif + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST +# define ERTS_PTAB_LIST_DBG_CHK_RESLIST(R) \ + debug_ptab_list_check_res_list((R)) +#else +# define ERTS_PTAB_LIST_DBG_CHK_RESLIST(R) +#endif + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS +# define ERTS_PTAB_LIST_DBG_SAVE_PIDS(PTLBDP) \ + debug_ptab_list_save_all_pids((PTLBDP)) +# define ERTS_PTAB_LIST_DBG_VERIFY_PIDS(PTLBDP) \ +do { \ + if (!(PTLBDP)->debug.correct_pids_verified) \ + debug_ptab_list_verify_all_pids((PTLBDP)); \ +} while (0) +# define ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS(PTLBDP) \ +do { \ + if ((PTLBDP)->debug.correct_pids) { \ + erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, \ + (PTLBDP)->debug.correct_pids); \ + (PTLBDP)->debug.correct_pids = NULL; \ + } \ +} while(0) +# define ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT(PTLBDP) \ +do { \ + (PTLBDP)->debug.correct_pids_verified = 0; \ + (PTLBDP)->debug.correct_pids = NULL; \ +} while (0) +#else +# define ERTS_PTAB_LIST_DBG_SAVE_PIDS(PTLBDP) +# define ERTS_PTAB_LIST_DBG_VERIFY_PIDS(PTLBDP) +# define ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS(PTLBDP) +# define ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT(PTLBDP) +#endif + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS +# define ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(PTLBDP, PID, IC) \ + debug_ptab_list_check_found_pid((PTLBDP), (PID), (IC), 1) +# define ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(PTLBDP, PID, IC) \ + debug_ptab_list_check_found_pid((PTLBDP), (PID), (IC), 0) +#else +# define ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(PTLBDP, PID, IC) +# define ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(PTLBDP, PID, IC) +#endif + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST +# define ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(PTab) \ + debug_ptab_list_check_del_list((PTab)) +# define ERTS_PTAB_LIST_DBG_CHK_FREELIST(PTab, FL) \ + debug_ptab_list_check_del_free_list((PTab), (FL)) +#else +# define ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(PTab) +# define ERTS_PTAB_LIST_DBG_CHK_FREELIST(PTab, FL) +#endif + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL == 0 +#if ERTS_PTAB_LIST_DBG_DO_TRACE +# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) \ + (PTLBDP)->debug.caller = (P)->common.id +# else +# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) +# endif +# define ERTS_PTAB_LIST_DBG_CLEANUP(PTLBDP) +#else +# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) \ +do { \ + (PTLBDP)->debug.caller = (P)->common.id; \ + ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT((PTLBDP)); \ + ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT((PTLBDP)); \ +} while (0) +# define ERTS_PTAB_LIST_DBG_CLEANUP(PTLBDP) \ +do { \ + ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS((PTLBDP)); \ +} while (0) +#endif + +#if ERTS_PTAB_LIST_DBG_DO_TRACE +# define ERTS_PTAB_LIST_DBG_TRACE(PID, WHAT) \ + erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \ + (PID), __FILE__, __LINE__, __func__, #WHAT) +#else +# define ERTS_PTAB_LIST_DBG_TRACE(PID, WHAT) +#endif + + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0 +static void debug_ptab_list_assert_error(char* expr, + const char* file, + int line, + const char *func); +#endif +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST +static void debug_ptab_list_check_res_list(Eterm list); +#endif +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS +static void debug_ptab_list_save_all_pids(ErtsPTabListBifData *ptlbdp); +static void debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp); +#endif +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS +static void debug_ptab_list_check_found_pid(ErtsPTabListBifData *ptlbdp, + Eterm pid, + Uint64 ic, + int pid_should_be_found); +#endif +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST +static void debug_ptab_list_check_del_list(ErtsPTab *ptab); +static void debug_ptab_list_check_del_free_list(ErtsPTab *ptab, + ErtsPTabDeletedElement *ptdep); +#endif + +struct ErtsPTabDeletedElement_ { + ErtsPTabDeletedElement *next; + ErtsPTabDeletedElement *prev; + int ix; + union { + struct { + Eterm id; + Uint64 inserted; + Uint64 deleted; + } element; + struct { + Uint64 interval; + } bif_invocation; + } u; +}; + +static Export ptab_list_continue_export; + +typedef struct { + Uint64 interval; +} ErtsPTabListBifChunkInfo; + +typedef enum { + INITIALIZING, + INSPECTING_TABLE, + INSPECTING_DELETED, + BUILDING_RESULT, + RETURN_RESULT +} ErtsPTabListBifState; + +struct ErtsPTabListBifData_ { + ErtsPTab *ptab; + ErtsPTabListBifState state; + Eterm caller; + ErtsPTabListBifChunkInfo *chunk; + int tix; + int pid_ix; + int pid_sz; + Eterm *pid; + ErtsPTabDeletedElement *bif_invocation; /* Only used when > 1 chunk */ + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0 || ERTS_PTAB_LIST_DBG_DO_TRACE + struct { + Eterm caller; +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS + Uint64 *pid_started; +#endif +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC + Eterm *heap; + Uint heap_size; +#endif +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS + int correct_pids_verified; + Eterm *correct_pids; +#endif + } debug; +#endif + +}; + +#ifdef ARCH_32 + +static ERTS_INLINE Uint64 +dw_aint_to_uint64(erts_dw_aint_t *dw) +{ +#ifdef ETHR_SU_DW_NAINT_T__ + return (Uint64) dw->dw_sint; +#else + Uint64 res; + res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]); + res <<= 32; + res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]); + return res; +#endif +} + +static void +unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val) +{ +#ifdef ETHR_SU_DW_NAINT_T__ + dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val; +#else + dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff); + dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff); +#endif +} + +static ERTS_INLINE void +last_data_init_nob(ErtsPTab *ptab, Uint64 val) +{ + erts_dw_aint_t dw; + unint64_to_dw_aint(&dw, val); + erts_smp_dw_atomic_init_nob(&ptab->vola.tile.last_data, &dw); +} + +static ERTS_INLINE void +last_data_set_relb(ErtsPTab *ptab, Uint64 val) +{ + erts_dw_aint_t dw; + unint64_to_dw_aint(&dw, val); + erts_smp_dw_atomic_set_relb(&ptab->vola.tile.last_data, &dw); +} + +static ERTS_INLINE Uint64 +last_data_read_nob(ErtsPTab *ptab) +{ + erts_dw_aint_t dw; + erts_smp_dw_atomic_read_nob(&ptab->vola.tile.last_data, &dw); + return dw_aint_to_uint64(&dw); +} + +static ERTS_INLINE Uint64 +last_data_read_acqb(ErtsPTab *ptab) +{ + erts_dw_aint_t dw; + erts_smp_dw_atomic_read_acqb(&ptab->vola.tile.last_data, &dw); + return dw_aint_to_uint64(&dw); +} + +static ERTS_INLINE Uint64 +last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp) +{ + erts_dw_aint_t dw_new, dw_xchg; + + unint64_to_dw_aint(&dw_new, new); + unint64_to_dw_aint(&dw_xchg, exp); + + if (erts_smp_dw_atomic_cmpxchg_relb(&ptab->vola.tile.last_data, + &dw_new, + &dw_xchg)) + return exp; + else + return dw_aint_to_uint64(&dw_xchg); +} + +#elif defined(ARCH_64) + +union { + erts_smp_atomic_t pid_data; + char align[ERTS_CACHE_LINE_SIZE]; +} last erts_align_attribute(ERTS_CACHE_LINE_SIZE); + +static ERTS_INLINE void +last_data_init_nob(ErtsPTab *ptab, Uint64 val) +{ + erts_smp_atomic_init_nob(&ptab->vola.tile.last_data, (erts_aint_t) val); +} + +static ERTS_INLINE void +last_data_set_relb(ErtsPTab *ptab, Uint64 val) +{ + erts_smp_atomic_set_relb(&ptab->vola.tile.last_data, (erts_aint_t) val); +} + +static ERTS_INLINE Uint64 +last_data_read_nob(ErtsPTab *ptab) +{ + return (Uint64) erts_smp_atomic_read_nob(&ptab->vola.tile.last_data); +} + +static ERTS_INLINE Uint64 +last_data_read_acqb(ErtsPTab *ptab) +{ + return (Uint64) erts_smp_atomic_read_acqb(&ptab->vola.tile.last_data); +} + +static ERTS_INLINE Uint64 +last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp) +{ + return (Uint64) erts_smp_atomic_cmpxchg_relb(&ptab->vola.tile.last_data, + (erts_aint_t) new, + (erts_aint_t) exp); +} + +#else +# error "Not 64-bit, nor 32-bit architecture..." +#endif + +static ERTS_INLINE int +last_data_cmp(Uint64 ld1, Uint64 ld2) +{ + Uint64 ld1_wrap; + + if (ld1 == ld2) + return 0; + + ld1_wrap = ld1 + (((Uint64) 1) << 63); + + if (ld1 < ld1_wrap) + return (ld1 < ld2 && ld2 < ld1_wrap) ? -1 : 1; + else + return (ld1_wrap <= ld2 && ld2 < ld1) ? 1 : -1; +} + +#define ERTS_PTAB_LastData2EtermData(LD) \ + ((Eterm) ((LD) & ~(~((Uint64) 0) << ERTS_PTAB_ID_DATA_SIZE))) + +void +erts_ptab_init_table(ErtsPTab *ptab, + ErtsAlcType_t atype, + void (*release_element)(void *), + ErtsPTabElementCommon *invalid_element, + int size, + char *name) +{ + size_t tab_sz; + int bits; + char *tab_end; + erts_smp_atomic_t *tab_entry; + erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + + erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name); + erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0); + last_data_init_nob(ptab, ~((Uint64) 0)); + + /* A size that is a power of 2 is to prefer performance wise */ + bits = erts_fit_in_bits_int32(size-1); + size = 1 << bits; + if (size > ERTS_PTAB_MAX_SIZE) { + size = ERTS_PTAB_MAX_SIZE; + bits = erts_fit_in_bits_int32((Sint32) size - 1); + } + + ptab->r.o.max = size; + + tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t)); + ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, tab_sz); + tab_end = ((char *) ptab->r.o.tab) + tab_sz; + tab_entry = ptab->r.o.tab; + while (tab_end > ((char *) tab_entry)) { + erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL); + tab_entry++; + } + + ptab->r.o.tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE; + ptab->r.o.pix_per_cache_line = (ERTS_CACHE_LINE_SIZE + / sizeof(erts_smp_atomic_t)); + ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */ + ASSERT((ptab->r.o.pix_per_cache_line + & (ptab->r.o.pix_per_cache_line - 1)) == 0); /* power of 2 */ + ASSERT((ptab->r.o.tab_cache_lines + & (ptab->r.o.tab_cache_lines - 1)) == 0); /* power of 2 */ + + ptab->r.o.pix_mask + = (1 << bits) - 1; + ptab->r.o.pix_cl_mask + = ptab->r.o.tab_cache_lines-1; + ptab->r.o.pix_cl_shift + = erts_fit_in_bits_int32(ptab->r.o.pix_per_cache_line-1); + ptab->r.o.pix_cli_shift + = erts_fit_in_bits_int32(ptab->r.o.pix_cl_mask); + ptab->r.o.pix_cli_mask + = (1 << (bits - ptab->r.o.pix_cli_shift)) - 1; + + ASSERT(ptab->r.o.pix_cl_shift + ptab->r.o.pix_cli_shift == bits); + + ptab->r.o.invalid_element = invalid_element; + ptab->r.o.invalid_data = erts_ptab_id2data(ptab, invalid_element->id); + ptab->r.o.release_element = release_element; + + erts_smp_interval_init(&ptab->list.data.interval); + ptab->list.data.deleted.start = NULL; + ptab->list.data.deleted.end = NULL; + ptab->list.data.chunks = (((ptab->r.o.max - 1) + / ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE) + + 1); + + if (size == ERTS_PTAB_MAX_SIZE) { + int pix; + /* + * We want a table size of a power of 2 which ERTS_PTAB_MAX_SIZE + * is. We only have ERTS_PTAB_MAX_SIZE-1 unique identifiers and + * we don't want to shrink the size to ERTS_PTAB_MAX_SIZE/2. + * + * In order to fix this, we insert a pointer from the table + * to the invalid_element, wich will be interpreted as a + * slot currently being modified. This way we will be able to + * have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while + * still having a table size of the power of 2. + */ + erts_smp_atomic32_inc_nob(&ptab->vola.tile.count); + pix = erts_ptab_data2pix(ptab, ptab->r.o.invalid_data); + erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], + (erts_aint_t) ptab->r.o.invalid_element); + } + +} + +int +erts_ptab_initialized(ErtsPTab *ptab) +{ + return ptab->r.o.tab != NULL; +} + +int +erts_ptab_new_element(ErtsPTab *ptab, + ErtsPTabElementCommon *ptab_el, + void *init_arg, + void (*init_ptab_el)(void *, Eterm)) +{ + int pix; + Uint64 ld, exp_ld; + Eterm data; + erts_aint32_t count; + erts_aint_t invalid = (erts_aint_t) ptab->r.o.invalid_element; + + erts_ptab_rlock(ptab); + + count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count); + if (count > ptab->r.o.max) { + while (1) { + erts_aint32_t act_count; + + act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count, + count-1, + count); + if (act_count == count) { + erts_ptab_runlock(ptab); + return 0; + } + count = act_count; + if (count <= ptab->r.o.max) + break; + } + } + + ptab_el->u.alive.started_interval + = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + + ld = last_data_read_acqb(ptab); + + /* Reserve slot */ + while (1) { + ld++; + pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld)); + if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix]) == ERTS_AINT_NULL) { + erts_aint_t val; + val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix], + invalid, + ERTS_AINT_NULL); + + if (ERTS_AINT_NULL == val) + break; + } + } + + data = ERTS_PTAB_LastData2EtermData(ld); + + if (data == ptab->r.o.invalid_data) { + /* Do not use invalid data; fix it... */ + ld += ptab->r.o.max; + ASSERT(pix == erts_ptab_data2pix(ptab, + ERTS_PTAB_LastData2EtermData(ld))); + data = ERTS_PTAB_LastData2EtermData(ld); + ASSERT(data != ptab->r.o.invalid_data); + } + + exp_ld = last_data_read_nob(ptab); + + /* Move last data forward */ + while (1) { + Uint64 act_ld; + if (last_data_cmp(ld, exp_ld) < 0) + break; + act_ld = last_data_cmpxchg_relb(ptab, ld, exp_ld); + if (act_ld == exp_ld) + break; + exp_ld = act_ld; + } + + init_ptab_el(init_arg, data); + +#ifdef ERTS_SMP + erts_smp_atomic32_init_nob(&ptab_el->refc, 1); +#endif + + /* Move into slot reserved */ +#ifdef DEBUG + ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix], + (erts_aint_t) ptab_el)); +#else + erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); +#endif + + erts_ptab_runlock(ptab); + + return 1; +} + +static void +save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el) +{ + ErtsPTabDeletedElement *ptdep = erts_alloc(ERTS_ALC_T_PTAB_LIST_DEL, + sizeof(ErtsPTabDeletedElement)); + ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start + && ptab->list.data.deleted.end); + ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + + ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); + + ptdep->prev = ptab->list.data.deleted.end; + ptdep->next = NULL; + ptdep->ix = erts_ptab_id2pix(ptab, ptab_el->id); + ptdep->u.element.id = ptab_el->id; + ptdep->u.element.inserted = ptab_el->u.alive.started_interval; + ptdep->u.element.deleted = + erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + + ptab->list.data.deleted.end->next = ptdep; + ptab->list.data.deleted.end = ptdep; + + ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); + + ERTS_PTAB_LIST_ASSERT(ptdep->prev->ix >= 0 + ? (ptdep->u.element.deleted + >= ptdep->prev->u.element.deleted) + : (ptdep->u.element.deleted + >= ptdep->prev->u.bif_invocation.interval)); +} + +void +erts_ptab_delete_element(ErtsPTab *ptab, + ErtsPTabElementCommon *ptab_el) +{ + int maybe_save; + int pix = erts_ptab_id2pix(ptab, ptab_el->id); + + ASSERT(erts_get_scheduler_id()); /* *Need* to be a scheduler */ + + erts_ptab_rlock(ptab); + maybe_save = ptab->list.data.deleted.end != NULL; + if (maybe_save) { + erts_ptab_runlock(ptab); + erts_ptab_rwlock(ptab); + } + + erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL); + + ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0); + erts_smp_atomic32_dec_relb(&ptab->vola.tile.count); + + if (!maybe_save) + erts_ptab_runlock(ptab); + else { + if (ptab->list.data.deleted.end) + save_deleted_element(ptab, ptab_el); + erts_ptab_rwunlock(ptab); + } + + if (ptab->r.o.release_element) + erts_schedule_thr_prgr_later_op(ptab->r.o.release_element, + (void *) ptab_el, + &ptab_el->u.release); +} + +/* + * erts_ptab_list() implements BIFs listing the content of the table, + * e.g. erlang:processes/0. + */ +static void cleanup_ptab_list_bif_data(Binary *bp); +static int ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp); + + +BIF_RETTYPE +erts_ptab_list(Process *c_p, ErtsPTab *ptab) +{ + /* + * A requirement: The list of identifiers returned should be a + * consistent snapshot of all elements existing + * in the table at some point in time during the + * execution of the BIF calling this function. + * Since elements might be deleted while the BIF + * is executing, we have to keep track of all + * deleted elements and add them to the result. + * We also ignore elements created after the BIF + * has begun executing. + */ + BIF_RETTYPE ret_val; + Eterm res_acc = NIL; + Binary *mbp = erts_create_magic_binary(sizeof(ErtsPTabListBifData), + cleanup_ptab_list_bif_data); + ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(mbp); + + ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, call); + ptlbdp->ptab = ptab; + ptlbdp->state = INITIALIZING; + ERTS_PTAB_LIST_DBG_INIT(c_p, ptlbdp); + + if (ERTS_BIF_REDS_LEFT(c_p) >= ERTS_PTAB_LIST_BIF_MIN_START_REDS + && ptab_list_bif_engine(c_p, &res_acc, mbp)) { + erts_bin_free(mbp); + ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc); + ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, return); + ERTS_BIF_PREP_RET(ret_val, res_acc); + } + else { + Eterm *hp; + Eterm magic_bin; + ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc); + hp = HAlloc(c_p, PROC_BIN_SIZE); + ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, PROC_BIN_SIZE); + magic_bin = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp); + ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp); + ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, trap); + ERTS_BIF_PREP_YIELD2(ret_val, + &ptab_list_continue_export, + c_p, + res_acc, + magic_bin); + } + return ret_val; +} + +static void +cleanup_ptab_list_bif_data(Binary *bp) +{ + ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(bp); + ErtsPTab *ptab = ptlbdp->ptab; + + ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, call); + + if (ptlbdp->state != INITIALIZING) { + + if (ptlbdp->chunk) { + erts_free(ERTS_ALC_T_PTAB_LIST_CNKI, ptlbdp->chunk); + ptlbdp->chunk = NULL; + } + if (ptlbdp->pid) { + erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->pid); + ptlbdp->pid = NULL; + } + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS + if (ptlbdp->debug.pid_started) { + erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->debug.pid_started); + ptlbdp->debug.pid_started = NULL; + } +#endif + + if (ptlbdp->bif_invocation) { + ErtsPTabDeletedElement *ptdep; + + erts_ptab_rwlock(ptab); + + ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, deleted_cleanup); + + ptdep = ptlbdp->bif_invocation; + ptlbdp->bif_invocation = NULL; + + ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); + + if (ptdep->prev) { + /* + * Only remove this bif invokation when we + * have preceding invokations. + */ + ptdep->prev->next = ptdep->next; + if (ptdep->next) + ptdep->next->prev = ptdep->prev; + else { + /* + * At the time of writing this branch cannot be + * reached. I don't want to remove this code though + * since it may be possible to reach this line + * in the future if the cleanup order in + * erts_do_exit_process() is changed. The ASSERT(0) + * is only here to make us aware that the reorder + * has happened. /rickard + */ + ASSERT(0); + ptab->list.data.deleted.end = ptdep->prev; + } + erts_free(ERTS_ALC_T_PTAB_LIST_DEL, ptdep); + } + else { + /* + * Free all elements until next bif invokation + * is found. + */ + ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep); + do { + ErtsPTabDeletedElement *fptdep = ptdep; + ptdep = ptdep->next; + erts_free(ERTS_ALC_T_PTAB_LIST_DEL, fptdep); + } while (ptdep && ptdep->ix >= 0); + ptab->list.data.deleted.start = ptdep; + if (ptdep) + ptdep->prev = NULL; + else + ptab->list.data.deleted.end = NULL; + } + + ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); + + erts_ptab_rwunlock(ptab); + + } + } + + ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, return); + ERTS_PTAB_LIST_DBG_CLEANUP(ptlbdp); +} + +static int +ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp) +{ + ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(mbp); + ErtsPTab *ptab = ptlbdp->ptab; + int have_reds; + int reds; + int locked = 0; + + do { + switch (ptlbdp->state) { + case INITIALIZING: + ptlbdp->chunk = erts_alloc(ERTS_ALC_T_PTAB_LIST_CNKI, + (sizeof(ErtsPTabListBifChunkInfo) + * ptab->list.data.chunks)); + ptlbdp->tix = 0; + ptlbdp->pid_ix = 0; + + erts_ptab_rwlock(ptab); + locked = 1; + + ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, init); + + ptlbdp->pid_sz = erts_ptab_count(ptab); + ptlbdp->pid = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS, + sizeof(Eterm)*ptlbdp->pid_sz); + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS + ptlbdp->debug.pid_started + = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS, + sizeof(Uint64)*ptlbdp->pid_sz); +#endif + + ERTS_PTAB_LIST_DBG_SAVE_PIDS(ptlbdp); + + if (ptab->list.data.chunks == 1) + ptlbdp->bif_invocation = NULL; + else { + /* + * We will have to access the table multiple times + * releasing the table lock in between chunks. + */ + ptlbdp->bif_invocation + = erts_alloc(ERTS_ALC_T_PTAB_LIST_DEL, + sizeof(ErtsPTabDeletedElement)); + ptlbdp->bif_invocation->ix = -1; + ptlbdp->bif_invocation->u.bif_invocation.interval + = erts_smp_step_interval_nob(erts_ptab_interval(ptab)); + ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); + + ptlbdp->bif_invocation->next = NULL; + if (ptab->list.data.deleted.end) { + ptlbdp->bif_invocation->prev = ptab->list.data.deleted.end; + ptab->list.data.deleted.end->next = ptlbdp->bif_invocation; + ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start); + } + else { + ptlbdp->bif_invocation->prev = NULL; + ptab->list.data.deleted.start = ptlbdp->bif_invocation; + } + ptab->list.data.deleted.end = ptlbdp->bif_invocation; + + ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); + + } + + ptlbdp->state = INSPECTING_TABLE; + /* Fall through */ + + case INSPECTING_TABLE: { + int ix = ptlbdp->tix; + int indices = ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE; + int cix = ix / ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE; + int end_ix = ix + indices; + Uint64 *invocation_interval_p; + ErtsPTabElementCommon *invalid_element; + + invocation_interval_p + = (ptlbdp->bif_invocation + ? &ptlbdp->bif_invocation->u.bif_invocation.interval + : NULL); + + ERTS_PTAB_LIST_ASSERT(is_nil(*res_accp)); + if (!locked) { + erts_ptab_rwlock(ptab); + locked = 1; + } + + ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table); + + if (cix != 0) + ptlbdp->chunk[cix].interval + = erts_smp_step_interval_nob(erts_ptab_interval(ptab)); + else if (ptlbdp->bif_invocation) + ptlbdp->chunk[0].interval = *invocation_interval_p; + /* else: interval is irrelevant */ + + if (end_ix >= ptab->r.o.max) { + ERTS_PTAB_LIST_ASSERT(cix+1 == ptab->list.data.chunks); + end_ix = ptab->r.o.max; + indices = end_ix - ix; + /* What to do when done with this chunk */ + ptlbdp->state = (ptab->list.data.chunks == 1 + ? BUILDING_RESULT + : INSPECTING_DELETED); + } + + invalid_element = ptab->r.o.invalid_element; + for (; ix < end_ix; ix++) { + ErtsPTabElementCommon *el; + el = (ErtsPTabElementCommon *) erts_ptab_pix2intptr_nob(ptab, + ix); + if (el + && el != invalid_element + && (!invocation_interval_p + || el->u.alive.started_interval < *invocation_interval_p)) { + ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(el->id)); + ptlbdp->pid[ptlbdp->pid_ix] = el->id; + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS + ptlbdp->debug.pid_started[ptlbdp->pid_ix] + = el->u.alive.started_interval; +#endif + + ptlbdp->pid_ix++; + ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix <= ptlbdp->pid_sz); + } + } + + ptlbdp->tix = end_ix; + + erts_ptab_rwunlock(ptab); + locked = 0; + + reds = indices/ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED; + BUMP_REDS(c_p, reds); + + have_reds = ERTS_BIF_REDS_LEFT(c_p); + + if (have_reds && ptlbdp->state == INSPECTING_TABLE) { + ix = ptlbdp->tix; + indices = ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE; + end_ix = ix + indices; + if (end_ix > ptab->r.o.max) { + end_ix = ptab->r.o.max; + indices = end_ix - ix; + } + + reds = indices/ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED; + + /* Pretend we have no reds left if we haven't got enough + reductions to complete next chunk */ + if (reds > have_reds) + have_reds = 0; + } + + break; + } + + case INSPECTING_DELETED: { + int i; + int max_reds; + int free_deleted = 0; + Uint64 invocation_interval; + ErtsPTabDeletedElement *ptdep; + ErtsPTabDeletedElement *free_list = NULL; + + ptdep = ptlbdp->bif_invocation; + ERTS_PTAB_LIST_ASSERT(ptdep); + invocation_interval = ptdep->u.bif_invocation.interval; + + max_reds = have_reds = ERTS_BIF_REDS_LEFT(c_p); + if (max_reds > ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS) + max_reds = ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS; + + reds = 0; + erts_ptab_rwlock(ptab); + ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_term_procs); + + ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); + + if (ptdep->prev) + ptdep->prev->next = ptdep->next; + else { + ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep); + ptab->list.data.deleted.start = ptdep->next; + + if (ptab->list.data.deleted.start + && ptab->list.data.deleted.start->ix >= 0) { + free_list = ptab->list.data.deleted.start; + free_deleted = 1; + } + } + + if (ptdep->next) + ptdep->next->prev = ptdep->prev; + else + ptab->list.data.deleted.end = ptdep->prev; + + ptdep = ptdep->next; + + i = 0; + while (reds < max_reds && ptdep) { + if (ptdep->ix < 0) { + if (free_deleted) { + ERTS_PTAB_LIST_ASSERT(free_list); + ERTS_PTAB_LIST_ASSERT(ptdep->prev); + + ptdep->prev->next = NULL; /* end of free_list */ + ptab->list.data.deleted.start = ptdep; + ptdep->prev = NULL; + free_deleted = 0; + } + } + else { + int cix = ptdep->ix/ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE; + Uint64 chunk_interval = ptlbdp->chunk[cix].interval; + Eterm pid = ptdep->u.element.id; + ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(pid)); + + if (ptdep->u.element.inserted < invocation_interval) { + if (ptdep->u.element.deleted < chunk_interval) { + ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND( + ptlbdp, + pid, + ptdep->u.element.inserted); + ptlbdp->pid[ptlbdp->pid_ix] = pid; +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS + ptlbdp->debug.pid_started[ptlbdp->pid_ix] + = ptdep->u.element.inserted; +#endif + ptlbdp->pid_ix++; + ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix + <= ptlbdp->pid_sz); + } + else { + ERTS_PTAB_LIST_DBG_CHK_PID_FOUND( + ptlbdp, + pid, + ptdep->u.element.inserted); + } + } + else { + ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND( + ptlbdp, + pid, + ptdep->u.element.inserted); + } + + i++; + if (i == ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED) { + reds++; + i = 0; + } + if (free_deleted) + reds += ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS; + } + ptdep = ptdep->next; + } + + if (free_deleted) { + ERTS_PTAB_LIST_ASSERT(free_list); + ptab->list.data.deleted.start = ptdep; + if (!ptdep) + ptab->list.data.deleted.end = NULL; + else { + ERTS_PTAB_LIST_ASSERT(ptdep->prev); + ptdep->prev->next = NULL; /* end of free_list */ + ptdep->prev = NULL; + } + } + + if (!ptdep) { + /* Done */ + ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix == ptlbdp->pid_sz); + ptlbdp->state = BUILDING_RESULT; + ptlbdp->bif_invocation->next = free_list; + free_list = ptlbdp->bif_invocation; + ptlbdp->bif_invocation = NULL; + } + else { + /* Link in bif_invocation again where we left off */ + ptlbdp->bif_invocation->prev = ptdep->prev; + ptlbdp->bif_invocation->next = ptdep; + ptdep->prev = ptlbdp->bif_invocation; + if (ptlbdp->bif_invocation->prev) + ptlbdp->bif_invocation->prev->next = ptlbdp->bif_invocation; + else { + ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start + == ptdep); + ptab->list.data.deleted.start = ptlbdp->bif_invocation; + } + } + + ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); + ERTS_PTAB_LIST_DBG_CHK_FREELIST(ptab, free_list); + erts_ptab_rwunlock(ptab); + + /* + * We do the actual free of deleted structures now when we + * have released the table lock instead of when we encountered + * them. This since free() isn't for free and we don't want to + * unnecessarily block other schedulers. + */ + while (free_list) { + ptdep = free_list; + free_list = ptdep->next; + erts_free(ERTS_ALC_T_PTAB_LIST_DEL, ptdep); + } + + have_reds -= reds; + if (have_reds < 0) + have_reds = 0; + BUMP_REDS(c_p, reds); + break; + } + + case BUILDING_RESULT: { + int conses, ix, min_ix; + Eterm *hp; + Eterm res = *res_accp; + + ERTS_PTAB_LIST_DBG_VERIFY_PIDS(ptlbdp); + ERTS_PTAB_LIST_DBG_CHK_RESLIST(res); + + ERTS_PTAB_LIST_DBG_TRACE(p->common.id, begin_build_res); + + have_reds = ERTS_BIF_REDS_LEFT(c_p); + conses = ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds; + min_ix = ptlbdp->pid_ix - conses; + if (min_ix < 0) { + min_ix = 0; + conses = ptlbdp->pid_ix; + } + + if (conses) { + hp = HAlloc(c_p, conses*2); + ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, conses*2); + + for (ix = ptlbdp->pid_ix - 1; ix >= min_ix; ix--) { + ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(ptlbdp->pid[ix])); + res = CONS(hp, ptlbdp->pid[ix], res); + hp += 2; + } + + ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp); + } + + ptlbdp->pid_ix = min_ix; + if (min_ix == 0) + ptlbdp->state = RETURN_RESULT; + else { + ptlbdp->pid_sz = min_ix; + ptlbdp->pid = erts_realloc(ERTS_ALC_T_PTAB_LIST_PIDS, + ptlbdp->pid, + sizeof(Eterm)*ptlbdp->pid_sz); +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS + ptlbdp->debug.pid_started + = erts_realloc(ERTS_ALC_T_PTAB_LIST_PIDS, + ptlbdp->debug.pid_started, + sizeof(Uint64) * ptlbdp->pid_sz); +#endif + } + reds = conses/ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED; + BUMP_REDS(c_p, reds); + have_reds -= reds; + + ERTS_PTAB_LIST_DBG_CHK_RESLIST(res); + ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, end_build_res); + *res_accp = res; + break; + } + case RETURN_RESULT: + cleanup_ptab_list_bif_data(mbp); + return 1; + + default: + erl_exit(ERTS_ABORT_EXIT, + "%s:%d:ptab_list_bif_engine(): Invalid state: %d\n", + __FILE__, __LINE__, (int) ptlbdp->state); + } + + + } while (have_reds || ptlbdp->state == RETURN_RESULT); + + return 0; +} + +/* + * ptab_list_continue/2 is a hidden BIF that the original BIF traps to + * if there are too much work to do in one go. + */ + +static BIF_RETTYPE ptab_list_continue(BIF_ALIST_2) +{ + Eterm res_acc; + Binary *mbp; + + /* + * This bif cannot be called from erlang code. It can only be + * trapped to from other BIFs; therefore, a bad argument + * is an internal error and should never occur... + */ + + ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, call); + ERTS_PTAB_LIST_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1)); + + res_acc = BIF_ARG_1; + + ERTS_PTAB_LIST_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2)); + + mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val; + + ERTS_PTAB_LIST_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) + == cleanup_ptab_list_bif_data); + ERTS_PTAB_LIST_ASSERT( + ((ErtsPTabListBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller + == BIF_P->common.id); + + if (ptab_list_bif_engine(BIF_P, &res_acc, mbp)) { + ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, return); + BIF_RET(res_acc); + } + else { + ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, trap); + ERTS_BIF_YIELD2(&ptab_list_continue_export, BIF_P, res_acc, BIF_ARG_2); + } +} + +void +erts_ptab_init(void) +{ + /* ptab_list_continue/2 is a hidden BIF that the original BIF traps to. */ + erts_init_trap_export(&ptab_list_continue_export, + am_erlang, am_ptab_list_continue, 2, + &ptab_list_continue); + +} + +/* + * Debug stuff + */ + +Sint +erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next) +{ + Uint64 ld; + Sint res; + Eterm data; + int first_pix = -1; + + erts_ptab_rwlock(ptab); + + if (!set) + ld = last_data_read_nob(ptab); + else { + + ld = (Uint64) next; + data = ERTS_PTAB_LastData2EtermData(ld); + if (ptab->r.o.invalid_data == data) { + ld += ptab->r.o.max; + ASSERT(erts_ptab_data2pix(ptab, data) + == erts_ptab_data2pix(ptab, + ERTS_PTAB_LastData2EtermData(ld))); + } + last_data_set_relb(ptab, ld); + } + + while (1) { + int pix; + ld++; + pix = (int) (ld % ptab->r.o.max); + if (first_pix < 0) + first_pix = pix; + else if (pix == first_pix) { + res = -1; + break; + } + if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) { + data = ERTS_PTAB_LastData2EtermData(ld); + if (ptab->r.o.invalid_data == data) { + ld += ptab->r.o.max; + ASSERT(erts_ptab_data2pix(ptab, data) + == erts_ptab_data2pix(ptab, + ERTS_PTAB_LastData2EtermData(ld))); + data = ERTS_PTAB_LastData2EtermData(ld); + } + res = data; + break; + } + } + + erts_ptab_rwunlock(ptab); + + return res; +} + +static ERTS_INLINE ErtsPTabElementCommon * +ptab_pix2el(ErtsPTab *ptab, int ix) +{ + ErtsPTabElementCommon *ptab_el; + ASSERT(0 <= ix && ix < ptab->r.o.max); + ptab_el = (ErtsPTabElementCommon *) erts_ptab_pix2intptr_nob(ptab, ix); + if (ptab_el == ptab->r.o.invalid_element) + return NULL; + else + return ptab_el; +} + +Eterm +erts_debug_ptab_list(Process *c_p, ErtsPTab *ptab) +{ + int i; + Uint need; + Eterm res; + Eterm* hp; + Eterm *hp_end; + + erts_ptab_rwlock(ptab); + + res = NIL; + need = erts_ptab_count(ptab) * 2; + hp = HAlloc(c_p, need); /* we need two heap words for each id */ + hp_end = hp + need; + + /* make the list by scanning bakward */ + + + for (i = ptab->r.o.max-1; i >= 0; i--) { + ErtsPTabElementCommon *el = ptab_pix2el(ptab, i); + if (el) { + res = CONS(hp, el->id, res); + hp += 2; + } + } + + erts_ptab_rwunlock(ptab); + + HRelease(c_p, hp_end, hp); + + return res; +} + +Eterm +erts_debug_ptab_list_bif_info(Process *c_p, ErtsPTab *ptab) +{ + ERTS_DECL_AM(ptab_list_bif_info); + Eterm elements[] = { + AM_ptab_list_bif_info, + make_small((Uint) ERTS_PTAB_LIST_BIF_MIN_START_REDS), + make_small((Uint) ptab->list.data.chunks), + make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE), + make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED), + make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS), + make_small((Uint) ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED), + make_small((Uint) ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS), + make_small((Uint) ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED), + make_small((Uint) ERTS_PTAB_LIST_BIF_DEBUGLEVEL) + }; + Uint sz = 0; + Eterm *hp; + (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements); + hp = HAlloc(c_p, sz); + return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements); +} + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS +static void +debug_ptab_list_check_found_pid(ErtsPTabListBifData *ptlbdp, + Eterm pid, + Uint64 ic, + int pid_should_be_found) +{ + int i; + for (i = 0; i < ptlbdp->pid_ix; i++) { + if (ptlbdp->pid[i] == pid && ptlbdp->debug.pid_started[i] == ic) { + ERTS_PTAB_LIST_ASSERT(pid_should_be_found); + return; + } + } + ERTS_PTAB_LIST_ASSERT(!pid_should_be_found); +} +#endif + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST +static void +debug_ptab_list_check_res_list(Eterm list) +{ + while (is_list(list)) { + Eterm* consp = list_val(list); + Eterm hd = CAR(consp); + ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(hd)); + list = CDR(consp); + } + + ERTS_PTAB_LIST_ASSERT(is_nil(list)); +} +#endif + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS + +static void +debug_ptab_list_save_all_pids(ErtsPTabListBifData *ptlbdp) +{ + int ix, tix, cpix; + ErtsPTab *ptab = ptlbdp->ptab; + ptlbdp->debug.correct_pids_verified = 0; + ptlbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS, + sizeof(Eterm)*ptlbdp->pid_sz); + + for (tix = 0, cpix = 0; tix < ptab->r.o.max; tix++) { + ErtsPTabElementCommon *el = ptab_pix2el(ptab, tix); + if (el) { + ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(el->id)); + ptlbdp->debug.correct_pids[cpix++] = el->id; + ERTS_PTAB_LIST_ASSERT(cpix <= ptlbdp->pid_sz); + } + } + ERTS_PTAB_LIST_ASSERT(cpix == ptlbdp->pid_sz); + + for (ix = 0; ix < ptlbdp->pid_sz; ix++) + ptlbdp->pid[ix] = make_small(ix); +} + +static void +debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp) +{ + int ix, cpix; + + ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix == ptlbdp->pid_sz); + + for (ix = 0; ix < ptlbdp->pid_sz; ix++) { + int found = 0; + Eterm pid = ptlbdp->pid[ix]; + ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(pid)); + for (cpix = ix; cpix < ptlbdp->pid_sz; cpix++) { + if (ptlbdp->debug.correct_pids[cpix] == pid) { + ptlbdp->debug.correct_pids[cpix] = NIL; + found = 1; + break; + } + } + if (!found) { + for (cpix = 0; cpix < ix; cpix++) { + if (ptlbdp->debug.correct_pids[cpix] == pid) { + ptlbdp->debug.correct_pids[cpix] = NIL; + found = 1; + break; + } + } + } + ERTS_PTAB_LIST_ASSERT(found); + } + ptlbdp->debug.correct_pids_verified = 1; + + erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->debug.correct_pids); + ptlbdp->debug.correct_pids = NULL; +} +#endif /* ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS */ + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST +static void +debug_ptab_list_check_del_list(ErtsPTab *ptab) +{ + ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + if (!ptab->list.data.deleted.start) + ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end); + else { + Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + Uint64 *prev_x_interval_p = NULL; + ErtsPTabDeletedElement *ptdep; + + for (ptdep = ptab->list.data.deleted.start; + ptdep; + ptdep = ptdep->next) { + if (!ptdep->prev) + ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep); + else + ERTS_PTAB_LIST_ASSERT(ptdep->prev->next == ptdep); + if (!ptdep->next) + ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.end == ptdep); + else + ERTS_PTAB_LIST_ASSERT(ptdep->next->prev == ptdep); + if (ptdep->ix < 0) { + Uint64 interval = ptdep->u.bif_invocation.interval; + ERTS_PTAB_LIST_ASSERT(interval <= curr_interval); + } + else { + Uint64 s_interval = ptdep->u.element.inserted; + Uint64 x_interval = ptdep->u.element.deleted; + + ERTS_PTAB_LIST_ASSERT(s_interval <= x_interval); + if (prev_x_interval_p) + ERTS_PTAB_LIST_ASSERT(*prev_x_interval_p <= x_interval); + prev_x_interval_p = &ptdep->u.element.deleted; + ERTS_PTAB_LIST_ASSERT( + erts_ptab_is_valid_id(ptdep->u.element.id)); + ERTS_PTAB_LIST_ASSERT(erts_ptab_id2pix(ptab, + ptdep->u.element.id) + == ptdep->ix); + + } + } + + } +} + +static void +debug_ptab_list_check_del_free_list(ErtsPTab *ptab, + ErtsPTabDeletedElement *free_list) +{ + if (ptab->list.data.deleted.start) { + ErtsPTabDeletedElement *fptdep; + ErtsPTabDeletedElement *ptdep; + + for (fptdep = free_list; fptdep; fptdep = fptdep->next) { + for (ptdep = ptab->list.data.deleted.start; + ptdep; + ptdep = ptdep->next) { + ERTS_PTAB_LIST_ASSERT(fptdep != ptdep); + } + } + } +} + +#endif + +#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0 + +static void +debug_ptab_list_assert_error(char* expr, const char* file, int line, const char *func) +{ + fflush(stdout); + erts_fprintf(stderr, "%s:%d:%s(): Assertion failed: %s\n", + (char *) file, line, (char *) func, expr); + fflush(stderr); + abort(); +} + +#endif diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h new file mode 100644 index 0000000000..8a130f42a3 --- /dev/null +++ b/erts/emulator/beam/erl_ptab.h @@ -0,0 +1,472 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +/* + * Description: Process/Port table implementation. + * + * Author: Rickard Green + */ + +#ifndef ERL_PTAB_H__ +#define ERL_PTAB_H__ + +#include "sys.h" +#include "erl_term.h" +#include "erl_time.h" +#include "erl_utils.h" +#define ERL_THR_PROGRESS_TSD_TYPE_ONLY +#include "erl_thr_progress.h" +#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY +#include "erl_alloc.h" +#include "erl_monitors.h" + +#define ERTS_TRACER_PROC(P) ((P)->common.tracer_proc) +#define ERTS_TRACE_FLAGS(P) ((P)->common.trace_flags) + +#define ERTS_P_LINKS(P) ((P)->common.u.alive.links) +#define ERTS_P_MONITORS(P) ((P)->common.u.alive.monitors) + +#define IS_TRACED(p) \ + (ERTS_TRACER_PROC((p)) != NIL) +#define ARE_TRACE_FLAGS_ON(p,tf) \ + ((ERTS_TRACE_FLAGS((p)) & (tf|F_SENSITIVE)) == (tf)) +#define IS_TRACED_FL(p,tf) \ + ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) ) + +typedef struct { + Eterm id; +#ifdef ERTS_SMP + erts_atomic32_t refc; +#endif + Eterm tracer_proc; + Uint trace_flags; + union { + /* --- While being alive --- */ + struct { + Uint64 started_interval; + struct reg_proc *reg; + ErtsLink *links; + ErtsMonitor *monitors; +#ifdef ERTS_SMP + ErtsSmpPTimer *ptimer; +#else + ErlTimer tm; +#endif + } alive; + + /* --- While being released --- */ + ErtsThrPrgrLaterOp release; + } u; +} ErtsPTabElementCommon; + +typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement; + +typedef struct { + erts_smp_rwmtx_t rwmtx; + erts_interval_t interval; + struct { + ErtsPTabDeletedElement *start; + ErtsPTabDeletedElement *end; + } deleted; + int chunks; +} ErtsPTabListData; + +typedef struct { +#ifdef ARCH_32 + erts_smp_dw_atomic_t last_data; +#else + erts_smp_atomic_t last_data; +#endif + erts_smp_atomic32_t count; +} ErtsPTabVolatileData; + +typedef struct { + erts_smp_atomic_t *tab; + Uint32 max; + Uint32 tab_cache_lines; + Uint32 pix_per_cache_line; + Uint32 pix_mask; + Uint32 pix_cl_mask; + Uint32 pix_cl_shift; + Uint32 pix_cli_mask; + Uint32 pix_cli_shift; + ErtsPTabElementCommon *invalid_element; + Eterm invalid_data; + void (*release_element)(void *); +} ErtsPTabReadOnlyData; + +typedef struct { + /* + * Data mainly modified when someone is listing + * the content of the table. + */ + union { + ErtsPTabListData data; + char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabListData))]; + } list; + + /* + * Frequently modified data. + */ + union { + ErtsPTabVolatileData tile; + char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabVolatileData))]; + } vola; + + /* + * Read only data. + */ + union { + ErtsPTabReadOnlyData o; + char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabReadOnlyData))]; + } r; +} ErtsPTab; + +#define ERTS_PTAB_ID_DATA_SIZE 28 +#define ERTS_PTAB_ID_DATA_SHIFT (_TAG_IMMED1_SIZE) +/* ERTS_PTAB_MAX_SIZE must be a power of 2 */ +#define ERTS_PTAB_MAX_SIZE (SWORD_CONSTANT(1) << 27) +#if (ERTS_PTAB_MAX_SIZE-1) > MAX_SMALL +# error "The maximum number of processes/ports must fit in a SMALL." +#endif + + +/* + * Currently pids and ports are allowed. + */ +#if _PID_DATA_SIZE != ERTS_PTAB_ID_DATA_SIZE +# error "Unexpected pid data size" +#endif +#if _PID_DATA_SHIFT != ERTS_PTAB_ID_DATA_SHIFT +# error "Unexpected pid tag size" +#endif +#if _PORT_DATA_SIZE != ERTS_PTAB_ID_DATA_SIZE +# error "Unexpected port data size" +#endif +#if _PORT_DATA_SHIFT != ERTS_PTAB_ID_DATA_SHIFT +# error "Unexpected port tag size" +#endif + +#define ERTS_PTAB_INVALID_ID(TAG) \ + ((Eterm) \ + ((((1 << ERTS_PTAB_ID_DATA_SIZE) - 1) << ERTS_PTAB_ID_DATA_SHIFT) \ + | (TAG))) + +#define erts_ptab_is_valid_id(ID) \ + (is_internal_pid((ID)) || is_internal_port((ID))) + +void erts_ptab_init(void); +void erts_ptab_init_table(ErtsPTab *ptab, + ErtsAlcType_t atype, + void (*release_element)(void *), + ErtsPTabElementCommon *invalid_element, + int size, + char *name); +int erts_ptab_new_element(ErtsPTab *ptab, + ErtsPTabElementCommon *ptab_el, + void *init_arg, + void (*init_ptab_el)(void *, Eterm)); +void erts_ptab_delete_element(ErtsPTab *ptab, + ErtsPTabElementCommon *ptab_el); +int erts_ptab_initialized(ErtsPTab *ptab); + +ERTS_GLB_INLINE erts_interval_t *erts_ptab_interval(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_ptab_max(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_ptab_count(ErtsPTab *ptab); +ERTS_GLB_INLINE Uint erts_ptab_pixdata2data(ErtsPTab *ptab, Eterm pixdata); +ERTS_GLB_INLINE Uint32 erts_ptab_pixdata2pix(ErtsPTab *ptab, Eterm pixdata); +ERTS_GLB_INLINE Uint32 erts_ptab_data2pix(ErtsPTab *ptab, Eterm data); +ERTS_GLB_INLINE Uint erts_ptab_data2pixdata(ErtsPTab *ptab, Eterm data); +ERTS_GLB_INLINE Eterm erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag); +ERTS_GLB_INLINE int erts_ptab_id2pix(ErtsPTab *ptab, Eterm id); +ERTS_GLB_INLINE Uint erts_ptab_id2data(ErtsPTab *ptab, Eterm id); +ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix); +ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix); +ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix); +ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix); +ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el); +ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el); +ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el, + Sint32 add_refc); +ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab); +ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab); +ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab); +ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE erts_interval_t * +erts_ptab_interval(ErtsPTab *ptab) +{ + return &ptab->list.data.interval; +} + +ERTS_GLB_INLINE int +erts_ptab_max(ErtsPTab *ptab) +{ + int max = ptab->r.o.max; + return max == ERTS_PTAB_MAX_SIZE ? max - 1 : max; +} + +ERTS_GLB_INLINE int +erts_ptab_count(ErtsPTab *ptab) +{ + int max = ptab->r.o.max; + erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count); + if (max == ERTS_PTAB_MAX_SIZE) { + max--; + res--; + } + if (res > max) + return max; + ASSERT(res >= 0); + return (int) res; + +} + +ERTS_GLB_INLINE Uint erts_ptab_pixdata2data(ErtsPTab *ptab, Eterm pixdata) +{ + Uint32 data = ((Uint32) pixdata) & ~ptab->r.o.pix_mask; + data |= (pixdata >> ptab->r.o.pix_cl_shift) & ptab->r.o.pix_cl_mask; + data |= (pixdata & ptab->r.o.pix_cli_mask) << ptab->r.o.pix_cli_shift; + return data; +} + +ERTS_GLB_INLINE Uint32 erts_ptab_pixdata2pix(ErtsPTab *ptab, Eterm pixdata) +{ + return ((Uint32) pixdata) & ptab->r.o.pix_mask; +} + +ERTS_GLB_INLINE Uint32 erts_ptab_data2pix(ErtsPTab *ptab, Eterm data) +{ + Uint32 n, pix; + n = (Uint32) data; + pix = ((n & ptab->r.o.pix_cl_mask) << ptab->r.o.pix_cl_shift); + pix += ((n >> ptab->r.o.pix_cli_shift) & ptab->r.o.pix_cli_mask); + ASSERT(0 <= pix && pix < ptab->r.o.max); + return pix; +} + +ERTS_GLB_INLINE Uint erts_ptab_data2pixdata(ErtsPTab *ptab, Eterm data) +{ + Uint pixdata = data & ~((Uint) ptab->r.o.pix_mask); + pixdata |= (Uint) erts_ptab_data2pix(ptab, data); + ASSERT(data == erts_ptab_pixdata2data(ptab, pixdata)); + return pixdata; +} + +#if ERTS_SIZEOF_TERM == 8 + +ERTS_GLB_INLINE Eterm +erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag) +{ + HUint huint; + Uint32 low_data = (Uint32) data; + low_data &= (1 << ERTS_PTAB_ID_DATA_SIZE) - 1; + low_data <<= ERTS_PTAB_ID_DATA_SHIFT; + huint.hval[ERTS_HUINT_HVAL_HIGH] = erts_ptab_data2pix(ptab, data); + huint.hval[ERTS_HUINT_HVAL_LOW] = low_data | ((Uint32) tag); + return (Eterm) huint.val; +} + +ERTS_GLB_INLINE int +erts_ptab_id2pix(ErtsPTab *ptab, Eterm id) +{ + HUint huint; + huint.val = id; + return (int) huint.hval[ERTS_HUINT_HVAL_HIGH]; +} + +ERTS_GLB_INLINE Uint +erts_ptab_id2data(ErtsPTab *ptab, Eterm id) +{ + HUint huint; + huint.val = id; + return (Uint) (huint.hval[ERTS_HUINT_HVAL_LOW] >> ERTS_PTAB_ID_DATA_SHIFT); +} + +#elif ERTS_SIZEOF_TERM == 4 + +ERTS_GLB_INLINE Eterm +erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag) +{ + Eterm id; + data &= ((1 << ERTS_PTAB_ID_DATA_SIZE) - 1); + id = (Eterm) erts_ptab_data2pixdata(ptab, data); + return (id << ERTS_PTAB_ID_DATA_SHIFT) | tag; +} + +ERTS_GLB_INLINE int +erts_ptab_id2pix(ErtsPTab *ptab, Eterm id) +{ + Uint pixdata = (Uint) id; + pixdata >>= ERTS_PTAB_ID_DATA_SHIFT; + return (int) erts_ptab_pixdata2pix(ptab, pixdata); +} + +ERTS_GLB_INLINE Uint +erts_ptab_id2data(ErtsPTab *ptab, Eterm id) +{ + Uint pixdata = (Uint) id; + pixdata >>= ERTS_PTAB_ID_DATA_SHIFT; + return erts_ptab_pixdata2data(ptab, pixdata); +} + +#else +#error "Unsupported size of term" +#endif + +ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix) +{ + ASSERT(0 <= ix && ix < ptab->r.o.max); + return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]); +} + +ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix) +{ + ASSERT(0 <= ix && ix < ptab->r.o.max); + return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]); +} + +ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix) +{ + ASSERT(0 <= ix && ix < ptab->r.o.max); + return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]); +} + +ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix) +{ + ASSERT(0 <= ix && ix < ptab->r.o.max); + return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]); +} + +ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el) +{ +#ifdef ERTS_SMP +#ifdef ERTS_ENABLE_LOCK_CHECK + erts_aint32_t refc = erts_atomic32_inc_read_nob(&ptab_el->refc); + ERTS_SMP_LC_ASSERT(refc > 1); +#else + erts_atomic32_inc_nob(&ptab_el->refc); +#endif +#endif +} + +ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el) +{ +#ifdef ERTS_SMP + erts_aint32_t refc = erts_atomic32_dec_read_nob(&ptab_el->refc); + ERTS_SMP_LC_ASSERT(refc >= 0); + return (int) refc; +#else + return 0; +#endif +} + +ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el, + Sint32 add_refc) +{ +#ifdef ERTS_SMP + erts_aint32_t refc; + +#ifndef ERTS_ENABLE_LOCK_CHECK + if (add_refc >= 0) { + erts_atomic32_add_nob(&ptab_el->refc, + (erts_aint32_t) add_refc); + return 1; + } +#endif + + refc = erts_atomic32_add_read_nob(&ptab_el->refc, + (erts_aint32_t) add_refc); + ERTS_SMP_LC_ASSERT(refc >= 0); + return (int) refc; +#else + return 0; +#endif +} + +ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab) +{ + erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx); +} + +ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab) +{ + return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx); +} + +ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab) +{ + erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx); +} + +ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab) +{ + erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx); +} + +ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab) +{ + return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx); +} + +ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab) +{ + erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx); +} + +ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab) +{ + return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx); +} + +ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab) +{ + return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx); +} + +#endif + +#endif + +#if defined(ERTS_PTAB_WANT_BIF_IMPL__) && !defined(ERTS_PTAB_LIST__) +#define ERTS_PTAB_LIST__ + +#include "erl_process.h" +#include "bif.h" + +BIF_RETTYPE erts_ptab_list(struct process *c_p, ErtsPTab *ptab); + +#endif + +#if defined(ERTS_PTAB_WANT_DEBUG_FUNCS__) && !defined(ERTS_PTAB_DEBUG_FUNCS__) +#define ERTS_PTAB_DEBUG_FUNCS__ +#include "erl_process.h" + +/* Debug functions */ +Sint erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next); +Eterm erts_debug_ptab_list(Process *c_p, ErtsPTab *ptab); +Eterm erts_debug_ptab_list_bif_info(Process *c_p, ErtsPTab *ptab); + +#endif diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h index a32e9d9d7c..34c90c0bda 100644 --- a/erts/emulator/beam/erl_smp.h +++ b/erts/emulator/beam/erl_smp.h @@ -274,6 +274,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob #define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob #define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob +#define erts_smp_atomic_read_bset_nob erts_atomic_read_bset_nob #define erts_smp_atomic_init_mb erts_atomic_init_mb #define erts_smp_atomic_set_mb erts_atomic_set_mb @@ -288,6 +289,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb #define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb #define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb +#define erts_smp_atomic_read_bset_mb erts_atomic_read_bset_mb #define erts_smp_atomic_init_acqb erts_atomic_init_acqb #define erts_smp_atomic_set_acqb erts_atomic_set_acqb @@ -302,6 +304,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb #define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb #define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb +#define erts_smp_atomic_read_bset_acqb erts_atomic_read_bset_acqb #define erts_smp_atomic_init_relb erts_atomic_init_relb #define erts_smp_atomic_set_relb erts_atomic_set_relb @@ -316,6 +319,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb #define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb #define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb +#define erts_smp_atomic_read_bset_relb erts_atomic_read_bset_relb #define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb #define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb @@ -330,6 +334,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb #define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb #define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb +#define erts_smp_atomic_read_bset_ddrb erts_atomic_read_bset_ddrb #define erts_smp_atomic_init_rb erts_atomic_init_rb #define erts_smp_atomic_set_rb erts_atomic_set_rb @@ -344,6 +349,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb #define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb #define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb +#define erts_smp_atomic_read_bset_rb erts_atomic_read_bset_rb #define erts_smp_atomic_init_wb erts_atomic_init_wb #define erts_smp_atomic_set_wb erts_atomic_set_wb @@ -358,6 +364,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb #define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb #define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb +#define erts_smp_atomic_read_bset_wb erts_atomic_read_bset_wb /* 32-bit atomics */ @@ -374,6 +381,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob #define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob #define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob +#define erts_smp_atomic32_read_bset_nob erts_atomic32_read_bset_nob #define erts_smp_atomic32_init_mb erts_atomic32_init_mb #define erts_smp_atomic32_set_mb erts_atomic32_set_mb @@ -388,6 +396,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb #define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb #define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb +#define erts_smp_atomic32_read_bset_mb erts_atomic32_read_bset_mb #define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb #define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb @@ -402,6 +411,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb #define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb #define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb +#define erts_smp_atomic32_read_bset_acqb erts_atomic32_read_bset_acqb #define erts_smp_atomic32_init_relb erts_atomic32_init_relb #define erts_smp_atomic32_set_relb erts_atomic32_set_relb @@ -416,6 +426,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb #define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb #define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb +#define erts_smp_atomic32_read_bset_relb erts_atomic32_read_bset_relb #define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb #define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb @@ -430,6 +441,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb #define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb #define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb +#define erts_smp_atomic32_read_bset_ddrb erts_atomic32_read_bset_ddrb #define erts_smp_atomic32_init_rb erts_atomic32_init_rb #define erts_smp_atomic32_set_rb erts_atomic32_set_rb @@ -444,6 +456,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb #define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb #define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb +#define erts_smp_atomic32_read_bset_rb erts_atomic32_read_bset_rb #define erts_smp_atomic32_init_wb erts_atomic32_init_wb #define erts_smp_atomic32_set_wb erts_atomic32_set_wb @@ -458,6 +471,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb #define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb #define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb +#define erts_smp_atomic32_read_bset_wb erts_atomic32_read_bset_wb #else /* !ERTS_SMP */ @@ -513,6 +527,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_nob erts_no_atomic_read_band #define erts_smp_atomic_xchg_nob erts_no_atomic_xchg #define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg +#define erts_smp_atomic_read_bset_nob erts_no_atomic_read_bset #define erts_smp_atomic_init_mb erts_no_atomic_set #define erts_smp_atomic_set_mb erts_no_atomic_set @@ -527,6 +542,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_mb erts_no_atomic_read_band #define erts_smp_atomic_xchg_mb erts_no_atomic_xchg #define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg +#define erts_smp_atomic_read_bset_mb erts_no_atomic_read_bset #define erts_smp_atomic_init_acqb erts_no_atomic_set #define erts_smp_atomic_set_acqb erts_no_atomic_set @@ -541,6 +557,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band #define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg #define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg +#define erts_smp_atomic_read_bset_acqb erts_no_atomic_read_bset #define erts_smp_atomic_init_relb erts_no_atomic_set #define erts_smp_atomic_set_relb erts_no_atomic_set @@ -555,6 +572,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_relb erts_no_atomic_read_band #define erts_smp_atomic_xchg_relb erts_no_atomic_xchg #define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg +#define erts_smp_atomic_read_bset_relb erts_no_atomic_read_bset #define erts_smp_atomic_init_ddrb erts_no_atomic_set #define erts_smp_atomic_set_ddrb erts_no_atomic_set @@ -569,6 +587,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band #define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg #define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg +#define erts_smp_atomic_read_bset_ddrb erts_no_atomic_read_bset #define erts_smp_atomic_init_rb erts_no_atomic_set #define erts_smp_atomic_set_rb erts_no_atomic_set @@ -583,6 +602,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_rb erts_no_atomic_read_band #define erts_smp_atomic_xchg_rb erts_no_atomic_xchg #define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg +#define erts_smp_atomic_read_bset_rb erts_no_atomic_read_bset #define erts_smp_atomic_init_wb erts_no_atomic_set #define erts_smp_atomic_set_wb erts_no_atomic_set @@ -597,6 +617,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic_read_band_wb erts_no_atomic_read_band #define erts_smp_atomic_xchg_wb erts_no_atomic_xchg #define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg +#define erts_smp_atomic_read_bset_wb erts_no_atomic_read_bset /* 32-bit atomics */ @@ -613,6 +634,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band #define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg #define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg +#define erts_smp_atomic32_read_bset_nob erts_no_atomic32_read_bset #define erts_smp_atomic32_init_mb erts_no_atomic32_set #define erts_smp_atomic32_set_mb erts_no_atomic32_set @@ -627,6 +649,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band #define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg #define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg +#define erts_smp_atomic32_read_bset_mb erts_no_atomic32_read_bset #define erts_smp_atomic32_init_acqb erts_no_atomic32_set #define erts_smp_atomic32_set_acqb erts_no_atomic32_set @@ -641,6 +664,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band #define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg #define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg +#define erts_smp_atomic32_read_bset_acqb erts_no_atomic32_read_bset #define erts_smp_atomic32_init_relb erts_no_atomic32_set #define erts_smp_atomic32_set_relb erts_no_atomic32_set @@ -655,6 +679,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band #define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg #define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg +#define erts_smp_atomic32_read_bset_relb erts_no_atomic32_read_bset #define erts_smp_atomic32_init_ddrb erts_no_atomic32_set #define erts_smp_atomic32_set_ddrb erts_no_atomic32_set @@ -669,6 +694,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band #define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg #define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg +#define erts_smp_atomic32_read_bset_ddrb erts_no_atomic32_read_bset #define erts_smp_atomic32_init_rb erts_no_atomic32_set #define erts_smp_atomic32_set_rb erts_no_atomic32_set @@ -683,6 +709,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band #define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg #define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg +#define erts_smp_atomic32_read_bset_rb erts_no_atomic32_read_bset #define erts_smp_atomic32_init_wb erts_no_atomic32_set #define erts_smp_atomic32_set_wb erts_no_atomic32_set @@ -697,6 +724,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); #define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band #define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg #define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg +#define erts_smp_atomic32_read_bset_wb erts_no_atomic32_read_bset #endif /* !ERTS_SMP */ diff --git a/erts/emulator/beam/erl_sys_driver.h b/erts/emulator/beam/erl_sys_driver.h index d429d0ce96..b991a2840c 100644 --- a/erts/emulator/beam/erl_sys_driver.h +++ b/erts/emulator/beam/erl_sys_driver.h @@ -31,7 +31,6 @@ #define ERL_SYS_DRV typedef long ErlDrvEvent; /* An event to be selected on. */ -typedef long ErlDrvPort; /* A port descriptor. */ /* typedef struct _SysDriverOpts SysDriverOpts; defined in sys.h */ diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c index bf7774f882..4587cd84d1 100644 --- a/erts/emulator/beam/erl_term.c +++ b/erts/emulator/beam/erl_term.c @@ -133,7 +133,7 @@ ET_DEFINE_CHECKED(Uint,unsigned_val,Eterm,is_small); ET_DEFINE_CHECKED(Sint,signed_val,Eterm,is_small); ET_DEFINE_CHECKED(Uint,atom_val,Eterm,is_atom); ET_DEFINE_CHECKED(Uint,header_arity,Eterm,is_header); -ET_DEFINE_CHECKED(Uint,arityval,Eterm,is_arity_value); +ET_DEFINE_CHECKED(Uint,arityval,Eterm,is_sane_arity_value); ET_DEFINE_CHECKED(Uint,thing_arityval,Eterm,is_thing); ET_DEFINE_CHECKED(Uint,thing_subtag,Eterm,is_thing); ET_DEFINE_CHECKED(Eterm*,binary_val,Wterm,is_binary); @@ -144,9 +144,7 @@ ET_DEFINE_CHECKED(Uint,bignum_header_arity,Eterm,_is_bignum_header); ET_DEFINE_CHECKED(Eterm*,big_val,Wterm,is_big); ET_DEFINE_CHECKED(Eterm*,float_val,Wterm,is_float); ET_DEFINE_CHECKED(Eterm*,tuple_val,Wterm,is_tuple); -ET_DEFINE_CHECKED(Uint,internal_pid_data,Eterm,is_internal_pid); ET_DEFINE_CHECKED(struct erl_node_*,internal_pid_node,Eterm,is_internal_pid); -ET_DEFINE_CHECKED(Uint,internal_port_data,Eterm,is_internal_port); ET_DEFINE_CHECKED(struct erl_node_*,internal_port_node,Eterm,is_internal_port); ET_DEFINE_CHECKED(Eterm*,internal_ref_val,Wterm,is_internal_ref); ET_DEFINE_CHECKED(Uint,internal_ref_data_words,Wterm,is_internal_ref); diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h index c270d13365..fb3ef9cd6c 100644 --- a/erts/emulator/beam/erl_term.h +++ b/erts/emulator/beam/erl_term.h @@ -300,8 +300,17 @@ _ET_DECLARE_CHECKED(Uint,header_arity,Eterm) #define header_arity(x) _ET_APPLY(header_arity,(x)) /* arityval access methods */ +/* Erlang Spec. 4.7.3 defines max arity to 65535 + * we will however enforce max arity of 16777215 (24 bits) + * (checked in bifs and asserted in debug) + */ +#define MAX_ARITYVAL ((((Uint)1) << 24) - 1) +#define ERTS_MAX_TUPLE_SIZE MAX_ARITYVAL + #define make_arityval(sz) _make_header((sz),_TAG_HEADER_ARITYVAL) #define is_arity_value(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL) +#define is_sane_arity_value(x) ((((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL) && \ + (((x) >> _HEADER_ARITY_OFFS) <= MAX_ARITYVAL)) #define is_not_arity_value(x) (!is_arity_value((x))) #define _unchecked_arityval(x) _unchecked_header_arity((x)) _ET_DECLARE_CHECKED(Uint,arityval,Eterm) @@ -542,12 +551,6 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm) #define _GETBITS(X,Pos,Size) (((X) >> (Pos)) & ~(~((Uint) 0) << (Size))) /* - * Observe! New layout for pids, ports and references in R9 (see also note - * in erl_node_container_utils.h). - */ - - -/* * Creation in node specific data (pids, ports, refs) */ @@ -584,7 +587,6 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm) * */ -#define _PID_R9_SER_SIZE 3 #define _PID_SER_SIZE (_PID_DATA_SIZE - _PID_NUM_SIZE) #define _PID_NUM_SIZE 15 @@ -598,23 +600,13 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm) #define make_pid_data(Ser, Num) \ ((Uint) ((Ser) << _PID_NUM_SIZE | (Num))) -#define make_internal_pid(X) \ - ((Eterm) (((X) << _PID_DATA_SHIFT) | _TAG_IMMED1_PID)) - #define is_internal_pid(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_PID) #define is_not_internal_pid(x) (!is_internal_pid((x))) -#define _unchecked_internal_pid_data(x) _GET_PID_DATA((x)) -_ET_DECLARE_CHECKED(Uint,internal_pid_data,Eterm) -#define internal_pid_data(x) _ET_APPLY(internal_pid_data,(x)) - #define _unchecked_internal_pid_node(x) erts_this_node _ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm) #define internal_pid_node(x) _ET_APPLY(internal_pid_node,(x)) -#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x))) -#define internal_pid_serial(x) _GET_PID_SER(internal_pid_data((x))) - #define internal_pid_data_words(x) (1) /* @@ -644,7 +636,6 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm) * N : node number * */ -#define _PORT_R9_NUM_SIZE 18 #define _PORT_NUM_SIZE _PORT_DATA_SIZE #define _PORT_DATA_SIZE 28 @@ -654,18 +645,9 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm) #define _GET_PORT_NUM(X) _GETBITS((X), 0, _PORT_NUM_SIZE) -#define make_internal_port(X) \ - ((Eterm) (((X) << _PORT_DATA_SHIFT) | _TAG_IMMED1_PORT)) - #define is_internal_port(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_PORT) #define is_not_internal_port(x) (!is_internal_port(x)) -#define _unchecked_internal_port_data(x) _GET_PORT_DATA((x)) -_ET_DECLARE_CHECKED(Uint,internal_port_data,Eterm) -#define internal_port_data(x) _ET_APPLY(internal_port_data,(x)) - -#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x))) - #define _unchecked_internal_port_node(x) erts_this_node _ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm) #define internal_port_node(x) _ET_APPLY(internal_port_node,(x)) diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c index 88524bdd4c..9678d7e08b 100644 --- a/erts/emulator/beam/erl_thr_progress.c +++ b/erts/emulator/beam/erl_thr_progress.c @@ -96,17 +96,14 @@ #define ERTS_THR_PRGR_LFLG_BLOCK (((erts_aint32_t) 1) << 31) #define ERTS_THR_PRGR_LFLG_NO_LEADER (((erts_aint32_t) 1) << 30) -#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \ - | ERTS_THR_PRGR_LFLG_BLOCK)) +#define ERTS_THR_PRGR_LFLG_WAITING_UM (((erts_aint32_t) 1) << 29) +#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \ + | ERTS_THR_PRGR_LFLG_BLOCK \ + | ERTS_THR_PRGR_LFLG_WAITING_UM)) -#define ERTS_THR_PRGR_LFLGS_ACTIVE(LFLGS) \ +#define ERTS_THR_PRGR_LFLGS_ACTIVE(LFLGS) \ ((LFLGS) & ERTS_THR_PRGR_LFLG_ACTIVE_MASK) -#define ERTS_THR_PRGR_LFLGS_ALL_WAITING(LFLGS) \ - (((LFLGS) & (ERTS_THR_PRGR_LFLG_NO_LEADER \ - |ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) \ - == ERTS_THR_PRGR_LFLG_NO_LEADER) - /* * We use a 64-bit value for thread progress. By this wrapping of * the thread progress will more or less never occur. @@ -262,6 +259,11 @@ typedef struct { erts_atomic32_t managed_count; erts_atomic32_t managed_id; erts_atomic32_t unmanaged_id; + int chk_next_ix; + struct { + int waiting; + erts_atomic32_t current; + } umrefc_ix; } ErtsThrPrgrMiscData; typedef struct { @@ -276,12 +278,18 @@ typedef union { char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrPrgrElement))]; } ErtsThrPrgrArray; +typedef union { + erts_atomic_t refc; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic_t))]; +} ErtsThrPrgrUnmanagedRefc; + typedef struct { union { ErtsThrPrgrMiscData data; char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE( sizeof(ErtsThrPrgrMiscData))]; } misc; + ErtsThrPrgrUnmanagedRefc umrefc[2]; ErtsThrPrgrArray *thr; struct { int no; @@ -346,7 +354,9 @@ init_tmp_thr_prgr_data(ErtsThrPrgrData *tpd) tpd->is_managed = 0; tpd->is_blocking = 0; tpd->is_temporary = 1; - +#ifdef ERTS_ENABLE_LOCK_CHECK + tpd->is_delaying = 0; +#endif erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd); } @@ -461,6 +471,12 @@ erts_thr_progress_init(int no_schedulers, int managed, int unmanaged) erts_atomic32_init_nob(&intrnl->misc.data.managed_count, 0); erts_atomic32_init_nob(&intrnl->misc.data.managed_id, no_schedulers); erts_atomic32_init_nob(&intrnl->misc.data.unmanaged_id, -1); + intrnl->misc.data.chk_next_ix = 0; + intrnl->misc.data.umrefc_ix.waiting = -1; + erts_atomic32_init_nob(&intrnl->misc.data.umrefc_ix.current, 0); + + erts_atomic_init_nob(&intrnl->umrefc[0].refc, (erts_aint_t) 0); + erts_atomic_init_nob(&intrnl->umrefc[1].refc, (erts_aint_t) 0); intrnl->thr = (ErtsThrPrgrArray *) ptr; ptr += thr_arr_sz; @@ -547,6 +563,9 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks) tpd->is_managed = 0; tpd->is_blocking = is_blocking; tpd->is_temporary = 0; +#ifdef ERTS_ENABLE_LOCK_CHECK + tpd->is_delaying = 0; +#endif ASSERT(tpd->id >= 0); if (tpd->id >= intrnl->unmanaged.no) erl_exit(ERTS_ABORT_EXIT, @@ -600,6 +619,9 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp, tpd->is_managed = 1; tpd->is_blocking = is_blocking; tpd->is_temporary = 0; +#ifdef ERTS_ENABLE_LOCK_CHECK + tpd->is_delaying = 1; +#endif init_wakeup_request_array(&tpd->wakeup_request[0]); @@ -607,8 +629,8 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp, tpd->leader = 0; tpd->active = 1; - tpd->previous.local = 0; - tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING; + tpd->confirmed = 0; + tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING; erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd); erts_atomic32_inc_nob(&intrnl->misc.data.lflgs); @@ -651,60 +673,113 @@ leader_update(ErtsThrPrgrData *tpd) block_thread(tpd); } else { + ErtsThrPrgrVal current; + int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged; erts_aint32_t lflgs; ErtsThrPrgrVal next; - int ix, sz, make_progress; + erts_aint_t refc; - if (tpd->previous.current == ERTS_THR_PRGR_VAL_WAITING) { - /* Took over as leader from another thread */ - tpd->previous.current = read_acqb(&erts_thr_prgr__.current); - tpd->previous.next = tpd->previous.current; - tpd->previous.next++; - if (tpd->previous.next == ERTS_THR_PRGR_VAL_WAITING) - tpd->previous.next = 0; - } + my_ix = tpd->id; - if (tpd->previous.local == tpd->previous.current) { - ErtsThrPrgrVal val = tpd->previous.current + 1; - if (val == ERTS_THR_PRGR_VAL_WAITING) - val = 0; - tpd->previous.local = val; - set_mb(&intrnl->thr[tpd->id].data.current, val); + if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) { + /* Took over as leader from another thread */ + tpd->leader_state.current = read_nob(&erts_thr_prgr__.current); + tpd->leader_state.next = tpd->leader_state.current; + tpd->leader_state.next++; + if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING) + tpd->leader_state.next = 0; + tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix; + tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting; + tpd->leader_state.umrefc_ix.current = + (int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current); + + if (tpd->confirmed == tpd->leader_state.current) { + ErtsThrPrgrVal val = tpd->leader_state.current + 1; + if (val == ERTS_THR_PRGR_VAL_WAITING) + val = 0; + tpd->confirmed = val; + set_mb(&intrnl->thr[my_ix].data.current, val); + } } - next = tpd->previous.next; - make_progress = 1; - sz = intrnl->managed.no; - for (ix = 0; ix < sz; ix++) { - ErtsThrPrgrVal tmp; - tmp = read_nob(&intrnl->thr[ix].data.current); - if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) { - make_progress = 0; - ASSERT(erts_thr_progress_has_passed__(next, tmp)); - break; + next = tpd->leader_state.next; + + waiting_unmanaged = 0; + umrefc_ix = -1; /* Shut up annoying warning */ + + chk_next_ix = tpd->leader_state.chk_next_ix; + no_managed = intrnl->managed.no; + ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed); + /* Check manged threads */ + if (chk_next_ix < no_managed) { + for (ix = chk_next_ix; ix < no_managed; ix++) { + ErtsThrPrgrVal tmp; + if (ix == my_ix) + continue; + tmp = read_nob(&intrnl->thr[ix].data.current); + if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) { + tpd->leader_state.chk_next_ix = ix; + ASSERT(erts_thr_progress_has_passed__(next, tmp)); + goto done; + } } } - if (make_progress) { - ErtsThrPrgrVal current = next; + /* Check unmanged threads */ + waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1; + umrefc_ix = (waiting_unmanaged + ? tpd->leader_state.umrefc_ix.waiting + : tpd->leader_state.umrefc_ix.current); + refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); + ASSERT(refc >= 0); + if (refc != 0) { + int new_umrefc_ix; + + if (waiting_unmanaged) + goto done; + + new_umrefc_ix = (umrefc_ix + 1) & 0x1; + tpd->leader_state.umrefc_ix.waiting = umrefc_ix; + tpd->leader_state.chk_next_ix = no_managed; + erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current, + (erts_aint32_t) new_umrefc_ix); + ETHR_MEMBAR(ETHR_StoreLoad); + refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); + ASSERT(refc >= 0); + waiting_unmanaged = 1; + if (refc != 0) + goto done; + } - next++; - if (next == ERTS_THR_PRGR_VAL_WAITING) - next = 0; + /* Make progress */ + current = next; - set_nob(&intrnl->thr[tpd->id].data.current, next); - set_mb(&erts_thr_prgr__.current, current); - tpd->previous.local = next; - tpd->previous.next = next; - tpd->previous.current = current; + next++; + if (next == ERTS_THR_PRGR_VAL_WAITING) + next = 0; + + set_nob(&intrnl->thr[my_ix].data.current, next); + set_mb(&erts_thr_prgr__.current, current); + tpd->confirmed = next; + tpd->leader_state.next = next; + tpd->leader_state.current = current; #if ERTS_THR_PRGR_PRINT_VAL - if (current % 1000 == 0) - erts_fprintf(stderr, "%b64u\n", current); + if (current % 1000 == 0) + erts_fprintf(stderr, "%b64u\n", current); #endif - handle_wakeup_requests(current); + handle_wakeup_requests(current); + + if (waiting_unmanaged) { + waiting_unmanaged = 0; + tpd->leader_state.umrefc_ix.waiting = -1; + erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs, + ~ERTS_THR_PRGR_LFLG_WAITING_UM); } + tpd->leader_state.chk_next_ix = 0; + + done: if (tpd->active) { lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs); @@ -712,20 +787,44 @@ leader_update(ErtsThrPrgrData *tpd) (void) block_thread(tpd); } else { + int force_wakeup_check = 0; + erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER; tpd->leader = 0; - tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING; + tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING; #if ERTS_THR_PRGR_PRINT_LEADER erts_fprintf(stderr, "L <- %d\n", tpd->id); #endif ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0); + if (waiting_unmanaged) + set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM; + lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs, - ERTS_THR_PRGR_LFLG_NO_LEADER); + set_flags); + lflgs |= set_flags; if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK) lflgs = block_thread(tpd); - if (ERTS_THR_PRGR_LFLGS_ACTIVE(lflgs) == 0 && got_sched_wakeups()) + + if (waiting_unmanaged) { + /* Need to check umrefc again */ + ETHR_MEMBAR(ETHR_StoreLoad); + refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); + if (refc == 0) { + /* Need to force wakeup check */ + force_wakeup_check = 1; + } + } + + if ((force_wakeup_check + || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER + | ERTS_THR_PRGR_LFLG_WAITING_UM + | ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) + == ERTS_THR_PRGR_LFLG_NO_LEADER)) + && got_sched_wakeups()) { + /* Someone need to make progress */ wakeup_managed(0); + } } } @@ -744,11 +843,11 @@ update(ErtsThrPrgrData *tpd) erts_aint32_t lflgs; res = 0; val = read_acqb(&erts_thr_prgr__.current); - if (tpd->previous.local == val) { + if (tpd->confirmed == val) { val++; if (val == ERTS_THR_PRGR_VAL_WAITING) val = 0; - tpd->previous.local = val; + tpd->confirmed = val; set_mb(&intrnl->thr[tpd->id].data.current, val); } @@ -801,12 +900,19 @@ erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp) block_count_dec(); - tpd->previous.local = ERTS_THR_PRGR_VAL_WAITING; + tpd->confirmed = ERTS_THR_PRGR_VAL_WAITING; set_mb(&intrnl->thr[tpd->id].data.current, ERTS_THR_PRGR_VAL_WAITING); lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs); - if (ERTS_THR_PRGR_LFLGS_ALL_WAITING(lflgs) && got_sched_wakeups()) - wakeup_managed(0); /* Someone need to make progress */ + + if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER + | ERTS_THR_PRGR_LFLG_WAITING_UM + | ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) + == ERTS_THR_PRGR_LFLG_NO_LEADER + && got_sched_wakeups()) { + /* Someone need to make progress */ + wakeup_managed(0); + } } void @@ -828,7 +934,7 @@ erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp) val++; if (val == ERTS_THR_PRGR_VAL_WAITING) val = 0; - tpd->previous.local = val; + tpd->confirmed = val; set_mb(&intrnl->thr[tpd->id].data.current, val); val = read_acqb(&erts_thr_prgr__.current); if (current == val) @@ -875,6 +981,68 @@ erts_thr_progress_active(ErtsSchedulerData *esdp, int on) } +static ERTS_INLINE void +unmanaged_continue(ErtsThrPrgrDelayHandle handle) +{ + int umrefc_ix = (int) handle; + erts_aint_t refc; + + ASSERT(umrefc_ix == 0 || umrefc_ix == 1); + refc = erts_atomic_dec_read_relb(&intrnl->umrefc[umrefc_ix].refc); + ASSERT(refc >= 0); + if (refc == 0) { + erts_aint_t lflgs; + ERTS_THR_READ_MEMORY_BARRIER; + lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs); + if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER + | ERTS_THR_PRGR_LFLG_WAITING_UM + | ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) + == (ERTS_THR_PRGR_LFLG_NO_LEADER|ERTS_THR_PRGR_LFLG_WAITING_UM) + && got_sched_wakeups()) { + /* Others waiting for us... */ + wakeup_managed(0); + } + } +} + +void +erts_thr_progress_unmanaged_continue__(ErtsThrPrgrDelayHandle handle) +{ +#ifdef ERTS_ENABLE_LOCK_CHECK + ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL); + ERTS_LC_ASSERT(tpd && tpd->is_delaying); + tpd->is_delaying = 0; + return_tmp_thr_prgr_data(tpd); +#endif + ASSERT(!erts_thr_progress_is_managed_thread()); + + unmanaged_continue(handle); +} + +ErtsThrPrgrDelayHandle +erts_thr_progress_unmanaged_delay__(void) +{ + int umrefc_ix; + ASSERT(!erts_thr_progress_is_managed_thread()); + umrefc_ix = (int) erts_atomic32_read_acqb(&intrnl->misc.data.umrefc_ix.current); + while (1) { + int tmp_ix; + erts_atomic_inc_acqb(&intrnl->umrefc[umrefc_ix].refc); + tmp_ix = (int) erts_atomic32_read_acqb(&intrnl->misc.data.umrefc_ix.current); + if (tmp_ix == umrefc_ix) + break; + unmanaged_continue(umrefc_ix); + umrefc_ix = tmp_ix; + } +#ifdef ERTS_ENABLE_LOCK_CHECK + { + ErtsThrPrgrData *tpd = tmp_thr_prgr_data(NULL); + tpd->is_delaying = 1; + } +#endif + return (ErtsThrPrgrDelayHandle) umrefc_ix; +} + static ERTS_INLINE int has_reached_wakeup(ErtsThrPrgrVal wakeup) { @@ -931,7 +1099,7 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value) */ ASSERT(tpd->is_managed); - ASSERT(tpd->previous.local != ERTS_THR_PRGR_VAL_WAITING); + ASSERT(tpd->confirmed != ERTS_THR_PRGR_VAL_WAITING); if (has_reached_wakeup(value)) { wakeup_managed(tpd->id); @@ -946,7 +1114,7 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value) tpd->wakeup_request[wix])); - if (tpd->previous.local == value) { + if (tpd->confirmed == value) { /* * We have already confirmed this value. We need to request * wakeup for a value later than our latest confirmed value in diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h index e72321cf48..1416aa6166 100644 --- a/erts/emulator/beam/erl_thr_progress.h +++ b/erts/emulator/beam/erl_thr_progress.h @@ -53,9 +53,22 @@ typedef Uint64 ErtsThrPrgrVal; #define ERTS_THR_PRGR_WAKEUP_DATA_SIZE 4 /* Need to be an even power of 2. */ typedef struct { + ErtsThrPrgrVal next; + ErtsThrPrgrVal current; + int chk_next_ix; + struct { + int current; + int waiting; + } umrefc_ix; +} ErtsThrPrgrLeaderState; + +typedef struct { int id; int is_managed; int is_blocking; +#ifdef ERTS_ENABLE_LOCK_CHECK + int is_delaying; /* managed is always delaying */ +#endif int is_temporary; /* --- Part below only for registered threads --- */ @@ -66,11 +79,8 @@ typedef struct { int leader; /* Needs to be first in the managed threads part */ int active; - struct { - ErtsThrPrgrVal local; - ErtsThrPrgrVal next; - ErtsThrPrgrVal current; - } previous; + ErtsThrPrgrVal confirmed; + ErtsThrPrgrLeaderState leader_state; } ErtsThrPrgrData; void erts_thr_progress_fatal_error_block(SWord timeout, @@ -121,6 +131,11 @@ typedef struct { ERTS_THR_PRGR_ATOMIC current; } ErtsThrPrgr; +typedef int ErtsThrPrgrDelayHandle; +#define ERTS_THR_PRGR_DHANDLE_MANAGED ((ErtsThrPrgrDelayHandle) -1) +/* ERTS_THR_PRGR_DHANDLE_MANAGED implies managed thread */ +#define ERTS_THR_PRGR_DHANDLE_INVALID ((ErtsThrPrgrDelayHandle) -2) + extern ErtsThrPrgr erts_thr_prgr__; void erts_thr_progress_pre_init(void); @@ -136,6 +151,8 @@ int erts_thr_progress_update(ErtsSchedulerData *esdp); int erts_thr_progress_leader_update(ErtsSchedulerData *esdp); void erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp); void erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp); +ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay__(void); +void erts_thr_progress_unmanaged_continue__(int umrefc_ix); void erts_thr_progress_dbg_print_state(void); @@ -148,6 +165,11 @@ ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *a ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc); ERTS_GLB_INLINE int erts_thr_progress_is_managed_thread(void); +ERTS_GLB_INLINE ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay(void); +ERTS_GLB_INLINE void erts_thr_progress_unmanaged_continue(ErtsThrPrgrDelayHandle handle); +#ifdef ERTS_ENABLE_LOCK_CHECK +ERTS_GLB_INLINE int erts_thr_progress_lc_is_delaying(void); +#endif ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current_to_later__(ErtsThrPrgrVal val); ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(ErtsSchedulerData *); ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current(void); @@ -229,6 +251,35 @@ erts_thr_progress_is_managed_thread(void) return tpd && tpd->is_managed; } +ERTS_GLB_INLINE ErtsThrPrgrDelayHandle +erts_thr_progress_unmanaged_delay(void) +{ + if (erts_thr_progress_is_managed_thread()) + return ERTS_THR_PRGR_DHANDLE_MANAGED; /* Nothing to do */ + else + return erts_thr_progress_unmanaged_delay__(); +} + +ERTS_GLB_INLINE void +erts_thr_progress_unmanaged_continue(ErtsThrPrgrDelayHandle handle) +{ + ASSERT(handle != ERTS_THR_PRGR_DHANDLE_MANAGED + || erts_thr_progress_is_managed_thread()); + if (handle != ERTS_THR_PRGR_DHANDLE_MANAGED) + erts_thr_progress_unmanaged_continue__(handle); +} + +#ifdef ERTS_ENABLE_LOCK_CHECK + +ERTS_GLB_INLINE int +erts_thr_progress_lc_is_delaying(void) +{ + ErtsThrPrgrData *tpd = erts_tsd_get(erts_thr_prgr_data_key__); + return tpd && tpd->is_delaying; +} + +#endif + ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current_to_later__(ErtsThrPrgrVal val) { @@ -248,7 +299,7 @@ erts_thr_progress_later(ErtsSchedulerData *esdp) if (esdp) { tpd = &esdp->thr_progress_data; managed_thread: - val = tpd->previous.local; + val = tpd->confirmed; ERTS_THR_MEMORY_BARRIER; } else { diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h index 17628286bc..1dc3ffeb3c 100644 --- a/erts/emulator/beam/erl_threads.h +++ b/erts/emulator/beam/erl_threads.h @@ -533,6 +533,9 @@ ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp, ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp, erts_aint_t new, erts_aint_t expected); +ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bset(erts_no_atomic_t *var, + erts_aint_t mask, + erts_aint_t set); ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var, erts_aint32_t i); ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var); @@ -553,6 +556,9 @@ ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp, erts_aint32_t new, erts_aint32_t expected); +ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set); ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock, char *name, @@ -612,6 +618,78 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #ifdef USE_THREADS +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_nob(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set); +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_ddrb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set); +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_rb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set); +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_wb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set); +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_acqb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set); +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_relb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set); +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_mb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set); +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_nob(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set); +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_ddrb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set); +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_rb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set); +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_wb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set); +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_acqb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set); +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_relb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set); +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_mb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +#define ERTS_ATOMIC_BSET_IMPL__(Type, ReadOp, CmpxchgOp, VarP, Mask, Set) \ +do { \ + Type act = ReadOp((VarP)); \ + while (1) { \ + Type exp = act; \ + Type new = exp & ~(Mask); \ + new |= ((Mask) & (Set)); \ + act = CmpxchgOp((VarP), new, exp); \ + if (act == exp) \ + return act; \ + } \ +} while (0) +#endif + /* * See "Documentation of atomics and memory barriers" at the top * of this file for info on atomics. @@ -670,6 +748,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_xchg_nob ethr_atomic_xchg #define erts_atomic_cmpxchg_nob ethr_atomic_cmpxchg +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_nob(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint_t, + ethr_atomic_read, + ethr_atomic_cmpxchg, + var, mask, set); +} +#endif + #define erts_atomic_init_mb ethr_atomic_init_mb #define erts_atomic_set_mb ethr_atomic_set_mb #define erts_atomic_read_mb ethr_atomic_read_mb @@ -684,6 +775,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_xchg_mb ethr_atomic_xchg_mb #define erts_atomic_cmpxchg_mb ethr_atomic_cmpxchg_mb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_mb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint_t, + ethr_atomic_read, + ethr_atomic_cmpxchg_mb, + var, mask, set); +} +#endif + #define erts_atomic_init_acqb ethr_atomic_init_acqb #define erts_atomic_set_acqb ethr_atomic_set_acqb #define erts_atomic_read_acqb ethr_atomic_read_acqb @@ -698,6 +802,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_xchg_acqb ethr_atomic_xchg_acqb #define erts_atomic_cmpxchg_acqb ethr_atomic_cmpxchg_acqb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_acqb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint_t, + ethr_atomic_read, + ethr_atomic_cmpxchg_acqb, + var, mask, set); +} +#endif + #define erts_atomic_init_relb ethr_atomic_init_relb #define erts_atomic_set_relb ethr_atomic_set_relb #define erts_atomic_read_relb ethr_atomic_read_relb @@ -712,6 +829,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_xchg_relb ethr_atomic_xchg_relb #define erts_atomic_cmpxchg_relb ethr_atomic_cmpxchg_relb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_relb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint_t, + ethr_atomic_read, + ethr_atomic_cmpxchg_relb, + var, mask, set); +} +#endif + #define erts_atomic_init_ddrb ethr_atomic_init_ddrb #define erts_atomic_set_ddrb ethr_atomic_set_ddrb #define erts_atomic_read_ddrb ethr_atomic_read_ddrb @@ -726,6 +856,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_xchg_ddrb ethr_atomic_xchg_ddrb #define erts_atomic_cmpxchg_ddrb ethr_atomic_cmpxchg_ddrb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_ddrb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint_t, + ethr_atomic_read, + ethr_atomic_cmpxchg_ddrb, + var, mask, set); +} +#endif + #define erts_atomic_init_rb ethr_atomic_init_rb #define erts_atomic_set_rb ethr_atomic_set_rb #define erts_atomic_read_rb ethr_atomic_read_rb @@ -740,6 +883,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_xchg_rb ethr_atomic_xchg_rb #define erts_atomic_cmpxchg_rb ethr_atomic_cmpxchg_rb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_rb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint_t, + ethr_atomic_read, + ethr_atomic_cmpxchg_rb, + var, mask, set); +} +#endif + #define erts_atomic_init_wb ethr_atomic_init_wb #define erts_atomic_set_wb ethr_atomic_set_wb #define erts_atomic_read_wb ethr_atomic_read_wb @@ -754,6 +910,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_xchg_wb ethr_atomic_xchg_wb #define erts_atomic_cmpxchg_wb ethr_atomic_cmpxchg_wb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint_t +erts_atomic_read_bset_wb(erts_atomic_t *var, + erts_aint_t mask, + erts_aint_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint_t, + ethr_atomic_read, + ethr_atomic_cmpxchg_wb, + var, mask, set); +} +#endif + /* 32-bit atomics */ #define erts_atomic32_init_nob ethr_atomic32_init @@ -770,6 +939,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_xchg_nob ethr_atomic32_xchg #define erts_atomic32_cmpxchg_nob ethr_atomic32_cmpxchg +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_nob(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t, + ethr_atomic32_read, + ethr_atomic32_cmpxchg, + var, mask, set); +} +#endif + #define erts_atomic32_init_mb ethr_atomic32_init_mb #define erts_atomic32_set_mb ethr_atomic32_set_mb #define erts_atomic32_read_mb ethr_atomic32_read_mb @@ -784,6 +966,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_xchg_mb ethr_atomic32_xchg_mb #define erts_atomic32_cmpxchg_mb ethr_atomic32_cmpxchg_mb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_mb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t, + ethr_atomic32_read, + ethr_atomic32_cmpxchg_mb, + var, mask, set); +} +#endif + #define erts_atomic32_init_acqb ethr_atomic32_init_acqb #define erts_atomic32_set_acqb ethr_atomic32_set_acqb #define erts_atomic32_read_acqb ethr_atomic32_read_acqb @@ -798,6 +993,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_xchg_acqb ethr_atomic32_xchg_acqb #define erts_atomic32_cmpxchg_acqb ethr_atomic32_cmpxchg_acqb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_acqb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t, + ethr_atomic32_read, + ethr_atomic32_cmpxchg_acqb, + var, mask, set); +} +#endif + #define erts_atomic32_init_relb ethr_atomic32_init_relb #define erts_atomic32_set_relb ethr_atomic32_set_relb #define erts_atomic32_read_relb ethr_atomic32_read_relb @@ -812,6 +1020,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_xchg_relb ethr_atomic32_xchg_relb #define erts_atomic32_cmpxchg_relb ethr_atomic32_cmpxchg_relb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_relb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t, + ethr_atomic32_read, + ethr_atomic32_cmpxchg_relb, + var, mask, set); +} +#endif + #define erts_atomic32_init_ddrb ethr_atomic32_init_ddrb #define erts_atomic32_set_ddrb ethr_atomic32_set_ddrb #define erts_atomic32_read_ddrb ethr_atomic32_read_ddrb @@ -826,6 +1047,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_xchg_ddrb ethr_atomic32_xchg_ddrb #define erts_atomic32_cmpxchg_ddrb ethr_atomic32_cmpxchg_ddrb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_ddrb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t, + ethr_atomic32_read, + ethr_atomic32_cmpxchg_ddrb, + var, mask, set); +} +#endif + #define erts_atomic32_init_rb ethr_atomic32_init_rb #define erts_atomic32_set_rb ethr_atomic32_set_rb #define erts_atomic32_read_rb ethr_atomic32_read_rb @@ -840,6 +1074,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_xchg_rb ethr_atomic32_xchg_rb #define erts_atomic32_cmpxchg_rb ethr_atomic32_cmpxchg_rb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_rb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t, + ethr_atomic32_read, + ethr_atomic32_cmpxchg_rb, + var, mask, set); +} +#endif + #define erts_atomic32_init_wb ethr_atomic32_init_wb #define erts_atomic32_set_wb ethr_atomic32_set_wb #define erts_atomic32_read_wb ethr_atomic32_read_wb @@ -854,6 +1101,21 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_xchg_wb ethr_atomic32_xchg_wb #define erts_atomic32_cmpxchg_wb ethr_atomic32_cmpxchg_wb +#if ERTS_GLB_INLINE_INCL_FUNC_DEF +ERTS_GLB_INLINE erts_aint32_t +erts_atomic32_read_bset_wb(erts_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set) +{ + ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t, + ethr_atomic32_read, + ethr_atomic32_cmpxchg_wb, + var, mask, set); +} +#endif + +#undef ERTS_ATOMIC_BSET_IMPL__ + #else /* !USE_THREADS */ /* Double word size atomics */ @@ -908,6 +1170,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_read_band_nob erts_no_atomic_read_band #define erts_atomic_xchg_nob erts_no_atomic_xchg #define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg +#define erts_atomic_read_bset_nob erts_no_atomic_read_bset #define erts_atomic_init_mb erts_no_atomic_set #define erts_atomic_set_mb erts_no_atomic_set @@ -922,6 +1185,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_read_band_mb erts_no_atomic_read_band #define erts_atomic_xchg_mb erts_no_atomic_xchg #define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg +#define erts_atomic_read_bset_mb erts_no_atomic_read_bset #define erts_atomic_init_acqb erts_no_atomic_set #define erts_atomic_set_acqb erts_no_atomic_set @@ -936,6 +1200,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_read_band_acqb erts_no_atomic_read_band #define erts_atomic_xchg_acqb erts_no_atomic_xchg #define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg +#define erts_atomic_read_bset_acqb erts_no_atomic_read_bset #define erts_atomic_init_relb erts_no_atomic_set #define erts_atomic_set_relb erts_no_atomic_set @@ -950,6 +1215,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_read_band_relb erts_no_atomic_read_band #define erts_atomic_xchg_relb erts_no_atomic_xchg #define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg +#define erts_atomic_read_bset_relb erts_no_atomic_read_bset #define erts_atomic_init_ddrb erts_no_atomic_set #define erts_atomic_set_ddrb erts_no_atomic_set @@ -964,6 +1230,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_read_band_ddrb erts_no_atomic_read_band #define erts_atomic_xchg_ddrb erts_no_atomic_xchg #define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg +#define erts_atomic_read_bset_ddrb erts_no_atomic_read_bset #define erts_atomic_init_rb erts_no_atomic_set #define erts_atomic_set_rb erts_no_atomic_set @@ -978,6 +1245,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_read_band_rb erts_no_atomic_read_band #define erts_atomic_xchg_rb erts_no_atomic_xchg #define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg +#define erts_atomic_read_bset_rb erts_no_atomic_read_bset #define erts_atomic_init_wb erts_no_atomic_set #define erts_atomic_set_wb erts_no_atomic_set @@ -992,6 +1260,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic_read_band_wb erts_no_atomic_read_band #define erts_atomic_xchg_wb erts_no_atomic_xchg #define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg +#define erts_atomic_read_bset_wb erts_no_atomic_read_bset /* 32-bit atomics */ @@ -1008,6 +1277,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_read_band_nob erts_no_atomic32_read_band #define erts_atomic32_xchg_nob erts_no_atomic32_xchg #define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg +#define erts_atomic32_read_bset_nob erts_no_atomic32_read_bset #define erts_atomic32_init_mb erts_no_atomic32_set #define erts_atomic32_set_mb erts_no_atomic32_set @@ -1022,6 +1292,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_read_band_mb erts_no_atomic32_read_band #define erts_atomic32_xchg_mb erts_no_atomic32_xchg #define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg +#define erts_atomic32_read_bset_mb erts_no_atomic32_read_bset #define erts_atomic32_init_acqb erts_no_atomic32_set #define erts_atomic32_set_acqb erts_no_atomic32_set @@ -1036,6 +1307,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_read_band_acqb erts_no_atomic32_read_band #define erts_atomic32_xchg_acqb erts_no_atomic32_xchg #define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg +#define erts_atomic32_read_bset_acqb erts_no_atomic32_read_bset #define erts_atomic32_init_relb erts_no_atomic32_set #define erts_atomic32_set_relb erts_no_atomic32_set @@ -1050,6 +1322,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_read_band_relb erts_no_atomic32_read_band #define erts_atomic32_xchg_relb erts_no_atomic32_xchg #define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg +#define erts_atomic32_read_bset_relb erts_no_atomic32_read_bset #define erts_atomic32_init_ddrb erts_no_atomic32_set #define erts_atomic32_set_ddrb erts_no_atomic32_set @@ -1064,6 +1337,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band #define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg #define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg +#define erts_atomic32_read_bset_ddrb erts_no_atomic32_read_bset #define erts_atomic32_init_rb erts_no_atomic32_set #define erts_atomic32_set_rb erts_no_atomic32_set @@ -1078,6 +1352,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_read_band_rb erts_no_atomic32_read_band #define erts_atomic32_xchg_rb erts_no_atomic32_xchg #define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg +#define erts_atomic32_read_bset_rb erts_no_atomic32_read_bset #define erts_atomic32_init_wb erts_no_atomic32_set #define erts_atomic32_set_wb erts_no_atomic32_set @@ -1092,6 +1367,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); #define erts_atomic32_read_band_wb erts_no_atomic32_read_band #define erts_atomic32_xchg_wb erts_no_atomic32_xchg #define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg +#define erts_atomic32_read_bset_wb erts_no_atomic32_read_bset #endif /* !USE_THREADS */ @@ -1856,6 +2132,17 @@ erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp, return old; } +ERTS_GLB_INLINE erts_aint_t +erts_no_atomic_read_bset(erts_no_atomic_t *var, + erts_aint_t mask, + erts_aint_t set) +{ + erts_aint_t old = *var; + *var &= ~mask; + *var |= (mask & set); + return old; +} + /* atomic32 */ ERTS_GLB_INLINE void @@ -1943,6 +2230,17 @@ erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp, return old; } +ERTS_GLB_INLINE erts_aint32_t +erts_no_atomic32_read_bset(erts_no_atomic32_t *var, + erts_aint32_t mask, + erts_aint32_t set) +{ + erts_aint32_t old = *var; + *var &= ~mask; + *var |= (mask & set); + return old; +} + /* spinlock */ ERTS_GLB_INLINE void diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index d04a91f18c..848877d43e 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -44,9 +44,9 @@ #undef DEBUG_PRINTOUTS #endif -extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */ -extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */ -extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ +extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */ +extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */ +extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ /* Pseudo export entries. Never filled in with data, only used to yield unique pointers of the correct type. */ @@ -156,8 +156,8 @@ do { (RES) = (TPID); } while(0) #define ERTS_TRACER_REF_TYPE Process * #define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \ do { \ - (RES) = erts_proc_lookup((TPID)); \ - if (!(RES) || !((RES)->trace_flags & F_TRACER)) { \ + (RES) = erts_proc_lookup((TPID)); \ + if (!(RES) || !(ERTS_TRACE_FLAGS((RES)) & F_TRACER)) { \ (TPID) = NIL; \ (TRACEE_FLGS) &= ~TRACEE_FLAGS; \ return; \ @@ -409,7 +409,7 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to, } #ifndef ERTS_SMP - if (!INVALID_TRACER_PORT(trace_port, trace_port->id)) + if (!INVALID_TRACER_PORT(trace_port, trace_port->common.id)) #endif erts_raw_port_command(trace_port, buffer, ptr-buffer); @@ -441,7 +441,7 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) { message = TUPLE5(hp, am_trace_ts, pid, am_out, mfarity, timestamp); /* Note, hp is deliberately NOT incremented since it will be reused */ - do_send_to_port(trace_port->id, + do_send_to_port(trace_port->common.id, trace_port, pid, SYS_MSG_TYPE_UNDEFINED, @@ -451,7 +451,7 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) { hp += 5; hp = patch_ts(message, hp); - do_send_to_port(trace_port->id, + do_send_to_port(trace_port->common.id, trace_port, pid, SYS_MSG_TYPE_UNDEFINED, @@ -508,7 +508,7 @@ send_to_port(Process *c_p, Eterm message, #endif do_send_to_port(*tracer_pid, trace_port, - c_p ? c_p->id : NIL, + c_p ? c_p->common.id : NIL, SYS_MSG_TYPE_TRACE, message); #ifndef ERTS_SMP @@ -543,7 +543,7 @@ send_to_port(Process *c_p, Eterm message, trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY; do_send_to_port(*tracer_pid, trace_port, - c_p ? c_p->id : NIL, + c_p ? c_p->common.id : NIL, SYS_MSG_TYPE_TRACE, message); @@ -557,7 +557,7 @@ send_to_port(Process *c_p, Eterm message, * just after writning the real trace message, and now gets scheduled * in again. */ - do_send_schedfix_to_port(trace_port, c_p->id, ts); + do_send_schedfix_to_port(trace_port, c_p->common.id, ts); } erts_port_release(trace_port); @@ -599,15 +599,14 @@ profile_send(Eterm from, Eterm message) { if (profiler_port) { do_send_to_port(profiler, profiler_port, - NIL, /* or current process->id */ + NIL, /* or current process->common.id */ SYS_MSG_TYPE_SYSPROF, message); erts_port_release(profiler_port); } } else { - ASSERT(is_internal_pid(profiler) - && internal_pid_index(profiler) < erts_max_processes); + ASSERT(is_internal_pid(profiler)); profile_p = erts_proc_lookup(profiler); @@ -673,7 +672,7 @@ seq_trace_send_to_port(Process *c_p, #endif do_send_to_port(seq_tracer, trace_port, - c_p ? c_p->id : NIL, + c_p ? c_p->common.id : NIL, SYS_MSG_TYPE_SEQTRACE, message); @@ -704,7 +703,7 @@ seq_trace_send_to_port(Process *c_p, trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY; do_send_to_port(seq_tracer, trace_port, - c_p ? c_p->id : NIL, + c_p ? c_p->common.id : NIL, SYS_MSG_TYPE_SEQTRACE, message); @@ -718,7 +717,7 @@ seq_trace_send_to_port(Process *c_p, * just after writing the real trace message, and now gets scheduled * in again. */ - do_send_schedfix_to_port(trace_port, c_p->id, ts); + do_send_schedfix_to_port(trace_port, c_p->common.id, ts); } erts_port_release(trace_port); @@ -729,7 +728,9 @@ seq_trace_send_to_port(Process *c_p, } #define TS_HEAP_WORDS 5 -#define TS_SIZE(p) (((p)->trace_flags & F_TIMESTAMP) ? TS_HEAP_WORDS : 0) +#define TS_SIZE(p) ((ERTS_TRACE_FLAGS((p)) & F_TIMESTAMP) \ + ? TS_HEAP_WORDS \ + : 0) /* * Patch a timestamp into a tuple. The tuple must be the last thing @@ -764,17 +765,17 @@ send_to_tracer(Process *tracee, erts_smp_mtx_lock(&smq_mtx); - if (tracee->trace_flags & F_TIMESTAMP) + if (ERTS_TRACE_FLAGS(tracee) & F_TIMESTAMP) *hpp = patch_ts(msg, *hpp); - if (is_internal_pid(tracee->tracer_proc)) - ERTS_ENQ_TRACE_MSG(tracee->id, tracer_ref, msg, bp); + if (is_internal_pid(ERTS_TRACER_PROC(tracee))) + ERTS_ENQ_TRACE_MSG(tracee->common.id, tracer_ref, msg, bp); else { - ASSERT(is_internal_port(tracee->tracer_proc)); + ASSERT(is_internal_port(ERTS_TRACER_PROC(tracee))); send_to_port(no_fake_sched ? NULL : tracee, msg, - &tracee->tracer_proc, - &tracee->trace_flags); + &ERTS_TRACER_PROC(tracee), + &ERTS_TRACE_FLAGS(tracee)); } erts_smp_mtx_unlock(&smq_mtx); @@ -792,7 +793,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched) ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF; int sched_no, curr_func, to_port, no_fake_sched; - if (is_nil(p->tracer_proc)) + if (is_nil(ERTS_TRACER_PROC(p))) return; no_fake_sched = never_fake_sched; @@ -812,13 +813,14 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched) } sched_no = IS_TRACED_FL(p, F_TRACE_SCHED_NO); - to_port = is_internal_port(p->tracer_proc); + to_port = is_internal_port(ERTS_TRACER_PROC(p)); if (!to_port) { - ASSERT(is_internal_pid(p->tracer_proc) - && internal_pid_index(p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(p))); - ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(p), + ERTS_TRACE_FLAGS(p)); } if (ERTS_PROC_IS_EXITING(p)) @@ -851,7 +853,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched) } if (!sched_no) { - mess = TUPLE4(hp, am_trace, p->id, what, tmp); + mess = TUPLE4(hp, am_trace, p->common.id, what, tmp); hp += 5; } else { @@ -860,7 +862,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched) #else Eterm sched_id = make_small(1); #endif - mess = TUPLE5(hp, am_trace, p->id, what, sched_id, tmp); + mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, tmp); hp += 6; } @@ -912,19 +914,19 @@ trace_send(Process *p, Eterm to, Eterm msg) operation = am_atom_put(s, sys_strlen(s)); } - if (is_internal_port(p->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(p))) { #define LOCAL_HEAP_SIZE (11) DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); UseTmpHeapNoproc(LOCAL_HEAP_SIZE); hp = local_heap; - mess = TUPLE5(hp, am_trace, p->id, operation, msg, to); + mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to); hp += 6; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - send_to_port(p, mess, &p->tracer_proc, &p->trace_flags); + send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p)); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE erts_smp_mtx_unlock(&smq_mtx); @@ -934,10 +936,11 @@ trace_send(Process *p, Eterm to, Eterm msg) ErlOffHeap *off_heap; ERTS_TRACER_REF_TYPE tracer_ref; - ASSERT(is_internal_pid(p->tracer_proc) - && internal_pid_index(p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(p))); - ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(p), + ERTS_TRACE_FLAGS(p)); sz_msg = size_object(msg); sz_to = size_object(to); @@ -953,16 +956,16 @@ trace_send(Process *p, Eterm to, Eterm msg) sz_msg, &hp, off_heap); - mess = TUPLE5(hp, am_trace, p->id/* Local pid */, operation, msg, to); + mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to); hp += 6; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { patch_ts(mess, hp); } - ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp); + ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp); erts_smp_mtx_unlock(&smq_mtx); } } @@ -977,19 +980,19 @@ trace_receive(Process *rp, Eterm msg) size_t sz_msg; Eterm* hp; - if (is_internal_port(rp->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(rp))) { #define LOCAL_HEAP_SIZE (10) DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); UseTmpHeapNoproc(LOCAL_HEAP_SIZE); hp = local_heap; - mess = TUPLE4(hp, am_trace, rp->id, am_receive, msg); + mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg); hp += 5; erts_smp_mtx_lock(&smq_mtx); - if (rp->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - send_to_port(rp, mess, &rp->tracer_proc, &rp->trace_flags); + send_to_port(rp, mess, &ERTS_TRACER_PROC(rp), &ERTS_TRACE_FLAGS(rp)); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE erts_smp_mtx_unlock(&smq_mtx); @@ -999,10 +1002,11 @@ trace_receive(Process *rp, Eterm msg) ErlOffHeap *off_heap; ERTS_TRACER_REF_TYPE tracer_ref; - ASSERT(is_internal_pid(rp->tracer_proc) - && internal_pid_index(rp->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(rp))); - ERTS_GET_TRACER_REF(tracer_ref, rp->tracer_proc, rp->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(rp), + ERTS_TRACE_FLAGS(rp)); sz_msg = size_object(msg); @@ -1011,16 +1015,16 @@ trace_receive(Process *rp, Eterm msg) hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, tracer_ref); msg = copy_struct(msg, sz_msg, &hp, off_heap); - mess = TUPLE4(hp, am_trace, rp->id/* Local pid */, am_receive, msg); + mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg); hp += 5; erts_smp_mtx_lock(&smq_mtx); - if (rp->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) { patch_ts(mess, hp); } - ERTS_ENQ_TRACE_MSG(rp->id, tracer_ref, mess, bp); + ERTS_ENQ_TRACE_MSG(rp->common.id, tracer_ref, mess, bp); erts_smp_mtx_unlock(&smq_mtx); } } @@ -1030,14 +1034,14 @@ seq_trace_update_send(Process *p) { Eterm seq_tracer = erts_get_system_seq_tracer(); ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p)))); - if ( (p->id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL) + if ( (p->common.id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL) #ifdef USE_VM_PROBES || (SEQ_TRACE_TOKEN(p) == am_have_dt_utag) #endif ) { return 0; } - SEQ_TRACE_TOKEN_SENDER(p) = p->id; /* Internal pid */ + SEQ_TRACE_TOKEN_SENDER(p) = p->common.id; SEQ_TRACE_TOKEN_SERIAL(p) = make_small(++(p -> seq_trace_clock)); SEQ_TRACE_TOKEN_LASTCNT(p) = @@ -1074,7 +1078,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type, ASSERT(is_tuple(token) || is_nil(token)); if (SEQ_TRACE_T_SENDER(token) == seq_tracer || token == NIL || - (process && process->trace_flags & F_SENSITIVE)) { + (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE)) { return; } @@ -1138,8 +1142,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type, Uint sz_label, sz_lastcnt_serial, sz_msg, sz_ts, sz_sender, sz_exitfrom, sz_receiver; - ASSERT(is_internal_pid(seq_tracer) - && internal_pid_index(seq_tracer) < erts_max_processes); + ASSERT(is_internal_pid(seq_tracer)); #ifndef ERTS_SMP @@ -1253,17 +1256,17 @@ erts_trace_return_to(Process *p, BeamInstr *pc) hp += 4; } - mess = TUPLE4(hp, am_trace, p->id, am_return_to, mfa); + mess = TUPLE4(hp, am_trace, p->common.id, am_return_to, mfa); hp += 5; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - if (is_internal_port(p->tracer_proc)) { - send_to_port(p, mess, &p->tracer_proc, &p->trace_flags); + if (is_internal_port(ERTS_TRACER_PROC(p))) { + send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p)); } else { ErlHeapFragment *bp; ErlOffHeap *off_heap; @@ -1273,10 +1276,11 @@ erts_trace_return_to(Process *p, BeamInstr *pc) /* * Find the tracer. */ - ASSERT(is_internal_pid(p->tracer_proc) - && internal_pid_index(p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(p))); - ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(p), + ERTS_TRACE_FLAGS(p)); size = size_object(mess); @@ -1286,7 +1290,7 @@ erts_trace_return_to(Process *p, BeamInstr *pc) * Copy the trace message into the buffer and enqueue it. */ mess = copy_struct(mess, size, &hp, off_heap); - ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp); + ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp); } UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE @@ -1315,25 +1319,25 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid) /* Breakpoint trace enabled without specifying tracer => * use process tracer and flags */ - tracer_pid = &p->tracer_proc; + tracer_pid = &ERTS_TRACER_PROC(p); } if (is_nil(*tracer_pid)) { /* Trace disabled */ return; } ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid)); - if (*tracer_pid == p->id) { + if (*tracer_pid == p->common.id) { /* Do not generate trace messages to oneself */ return; } - if (tracer_pid == &p->tracer_proc) { + if (tracer_pid == &ERTS_TRACER_PROC(p)) { /* Tracer specified in process structure => * non-breakpoint trace => * use process flags */ - tracee_flags = &p->trace_flags; + tracee_flags = &ERTS_TRACE_FLAGS(p); #ifdef ERTS_SMP - tracee = p->id; + tracee = p->common.id; #endif } else { /* Tracer not specified in process structure => @@ -1362,7 +1366,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid) hp = local_heap; mfa = TUPLE3(hp, mod, name, make_small(arity)); hp += 4; - mess = TUPLE5(hp, am_trace, p->id, am_return_from, mfa, retval); + mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval); hp += 6; erts_smp_mtx_lock(&smq_mtx); if (*tracee_flags & F_TIMESTAMP) { @@ -1382,8 +1386,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid) Eterm* limit; #endif - ASSERT(is_internal_pid(*tracer_pid) - && internal_pid_index(*tracer_pid) < erts_max_processes); + ASSERT(is_internal_pid(*tracer_pid)); ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags); @@ -1405,7 +1408,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid) mfa = TUPLE3(hp, mod, name, make_small(arity)); hp += 4; retval = copy_struct(retval, retval_size, &hp, off_heap); - mess = TUPLE5(hp, am_trace, p->id/* Local pid */, am_return_from, mfa, retval); + mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval); hp += 6; erts_smp_mtx_lock(&smq_mtx); @@ -1446,25 +1449,25 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value, /* Breakpoint trace enabled without specifying tracer => * use process tracer and flags */ - tracer_pid = &p->tracer_proc; + tracer_pid = &ERTS_TRACER_PROC(p); } if (is_nil(*tracer_pid)) { /* Trace disabled */ return; } ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid)); - if (*tracer_pid == p->id) { + if (*tracer_pid == p->common.id) { /* Do not generate trace messages to oneself */ return; } - if (tracer_pid == &p->tracer_proc) { + if (tracer_pid == &ERTS_TRACER_PROC(p)) { /* Tracer specified in process structure => * non-breakpoint trace => * use process flags */ - tracee_flags = &p->trace_flags; + tracee_flags = &ERTS_TRACE_FLAGS(p); #ifdef ERTS_SMP - tracee = p->id; + tracee = p->common.id; #endif if (! (*tracee_flags & F_TRACE_CALLS)) { return; @@ -1492,7 +1495,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value, hp += 4; cv = TUPLE2(hp, class, value); hp += 3; - mess = TUPLE5(hp, am_trace, p->id, am_exception_from, mfa_tuple, cv); + mess = TUPLE5(hp, am_trace, p->common.id, am_exception_from, mfa_tuple, cv); hp += 6; ASSERT((hp - local_heap) <= LOCAL_HEAP_SIZE); erts_smp_mtx_lock(&smq_mtx); @@ -1514,8 +1517,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value, Eterm* limit; #endif - ASSERT(is_internal_pid(*tracer_pid) - && internal_pid_index(*tracer_pid) < erts_max_processes); + ASSERT(is_internal_pid(*tracer_pid)); ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags); @@ -1539,7 +1541,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value, value = copy_struct(value, value_size, &hp, off_heap); cv = TUPLE2(hp, class, value); hp += 3; - mess = TUPLE5(hp, am_trace, p->id/* Local pid */, + mess = TUPLE5(hp, am_trace, p->common.id, am_exception_from, mfa_tuple, cv); hp += 6; @@ -1593,25 +1595,25 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, /* Breakpoint trace enabled without specifying tracer => * use process tracer and flags */ - tracer_pid = &p->tracer_proc; + tracer_pid = &ERTS_TRACER_PROC(p); } if (is_nil(*tracer_pid)) { /* Trace disabled */ return 0; } ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid)); - if (*tracer_pid == p->id) { + if (*tracer_pid == p->common.id) { /* Do not generate trace messages to oneself */ return 0; } - if (tracer_pid == &p->tracer_proc) { + if (tracer_pid == &ERTS_TRACER_PROC(p)) { /* Tracer specified in process structure => * non-breakpoint trace => * use process flags */ - tracee_flags = &p->trace_flags; + tracee_flags = &ERTS_TRACE_FLAGS(p); #ifdef ERTS_SMP - tracee = p->id; + tracee = p->common.id; #endif } else { /* Tracer not specified in process structure => @@ -1619,7 +1621,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, * meta trace => * use fixed flag set instead of process flags */ - if (p->trace_flags & F_SENSITIVE) { + if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) { /* No trace messages for sensitive processes. */ return 0; } @@ -1677,7 +1679,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, if (!erts_is_valid_tracer_port(*tracer_pid)) { #ifdef ERTS_SMP - ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc); + ASSERT(is_nil(tracee) || tracer_pid == &ERTS_TRACER_PROC(p)); if (is_not_nil(tracee)) erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); #endif @@ -1779,7 +1781,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, * Build the trace tuple and send it to the port. */ - mess = TUPLE4(hp, am_trace, p->id, am_call, mfa_tuple); + mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple); hp += 5; if (pam_result != am_true) { hp[-5] = make_arityval(5); @@ -1814,21 +1816,21 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, Eterm* limit; #endif - ASSERT(is_internal_pid(*tracer_pid) - && internal_pid_index(*tracer_pid) < erts_max_processes); + ASSERT(is_internal_pid(*tracer_pid)); tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, *tracer_pid, ERTS_PROC_LOCK_STATUS); if (!tracer) invalid_tracer = 1; else { - invalid_tracer = (tracer->trace_flags & F_TRACER) == 0; + invalid_tracer = !(ERTS_TRACE_FLAGS(tracer) & F_TRACER); erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS); } if (invalid_tracer) { #ifdef ERTS_SMP - ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc); + ASSERT(is_nil(tracee) + || tracer_pid == &ERTS_TRACER_PROC(p)); if (is_not_nil(tracee)) erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); #endif @@ -1952,7 +1954,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec, * Build the trace tuple and enqueue it. */ - mess = TUPLE4(hp, am_trace, p->id/* Local pid */, am_call, mfa_tuple); + mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple); hp += 5; if (pam_result != am_true) { hp[-5] = make_arityval(5); @@ -1990,17 +1992,17 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data) ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0) || erts_thr_progress_is_blocking()); - if (is_internal_port(t_p->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(t_p))) { #define LOCAL_HEAP_SIZE (5+5) DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); UseTmpHeapNoproc(LOCAL_HEAP_SIZE); hp = local_heap; - mess = TUPLE4(hp, am_trace, t_p->id, what, data); + mess = TUPLE4(hp, am_trace, t_p->common.id, what, data); hp += 5; erts_smp_mtx_lock(&smq_mtx); - if (t_p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } send_to_port( @@ -2011,7 +2013,9 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data) /* Fake schedule out and in are never sent when smp enabled */ c_p, #endif - mess, &t_p->tracer_proc, &t_p->trace_flags); + mess, + &ERTS_TRACER_PROC(t_p), + &ERTS_TRACE_FLAGS(t_p)); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE erts_smp_mtx_unlock(&smq_mtx); @@ -2022,10 +2026,11 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data) ERTS_TRACER_REF_TYPE tracer_ref; size_t sz_data; - ASSERT(is_internal_pid(t_p->tracer_proc) - && internal_pid_index(t_p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p))); - ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(t_p), + ERTS_TRACE_FLAGS(t_p)); sz_data = size_object(data); @@ -2034,16 +2039,16 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data) hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref); tmp = copy_struct(data, sz_data, &hp, off_heap); - mess = TUPLE4(hp, am_trace, t_p->id/* Local pid */, what, tmp); + mess = TUPLE4(hp, am_trace, t_p->common.id, what, tmp); hp += 5; erts_smp_mtx_lock(&smq_mtx); - if (t_p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - ERTS_ENQ_TRACE_MSG(t_p->id, tracer_ref, mess, bp); + ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp); erts_smp_mtx_unlock(&smq_mtx); } } @@ -2064,7 +2069,7 @@ trace_proc_spawn(Process *p, Eterm pid, Eterm mess; Eterm* hp; - if (is_internal_port(p->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(p))) { #define LOCAL_HEAP_SIZE (4+6+5) DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); UseTmpHeapNoproc(LOCAL_HEAP_SIZE); @@ -2072,13 +2077,13 @@ trace_proc_spawn(Process *p, Eterm pid, hp = local_heap; mfa = TUPLE3(hp, mod, func, args); hp += 4; - mess = TUPLE5(hp, am_trace, p->id, am_spawn, pid, mfa); + mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, pid, mfa); hp += 6; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - send_to_port(p, mess, &p->tracer_proc, &p->trace_flags); + send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p)); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE erts_smp_mtx_unlock(&smq_mtx); @@ -2090,10 +2095,11 @@ trace_proc_spawn(Process *p, Eterm pid, size_t sz_args, sz_pid; Uint need; - ASSERT(is_internal_pid(p->tracer_proc) - && internal_pid_index(p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(p))); - ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(p), + ERTS_TRACE_FLAGS(p)); sz_args = size_object(args); sz_pid = size_object(pid); @@ -2105,16 +2111,16 @@ trace_proc_spawn(Process *p, Eterm pid, mfa = TUPLE3(hp, mod, func, tmp); hp += 4; tmp = copy_struct(pid, sz_pid, &hp, off_heap); - mess = TUPLE5(hp, am_trace, p->id, am_spawn, tmp, mfa); + mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, tmp, mfa); hp += 6; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp); + ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp); erts_smp_mtx_unlock(&smq_mtx); } } @@ -2204,7 +2210,7 @@ trace_gc(Process *p, Eterm what) UseTmpHeap(LOCAL_HEAP_SIZE,p); - if (is_internal_port(p->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(p))) { hp = local_heap; #ifdef DEBUG size = 0; @@ -2216,10 +2222,11 @@ trace_gc(Process *p, Eterm what) size += 5/*4-tuple*/ + TS_SIZE(p); #endif } else { - ASSERT(is_internal_pid(p->tracer_proc) - && internal_pid_index(p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(p))); - ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(p), + ERTS_TRACE_FLAGS(p)); size = 0; (void) erts_bld_atom_uint_2tup_list(NULL, @@ -2243,19 +2250,19 @@ trace_gc(Process *p, Eterm what) tags, values); - msg = TUPLE4(hp, am_trace, p->id/* Local pid */, what, msg); + msg = TUPLE4(hp, am_trace, p->common.id, what, msg); hp += 5; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(msg, hp); } ASSERT(hp == limit); - if (is_internal_port(p->tracer_proc)) - send_to_port(p, msg, &p->tracer_proc, &p->trace_flags); + if (is_internal_port(ERTS_TRACER_PROC(p))) + send_to_port(p, msg, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p)); else - ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, msg, bp); + ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, msg, bp); erts_smp_mtx_unlock(&smq_mtx); UnUseTmpHeap(LOCAL_HEAP_SIZE,p); #undef LOCAL_HEAP_SIZE @@ -2295,8 +2302,7 @@ monitor_long_gc(Process *p, Uint time) { #endif #ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor) - && internal_pid_index(system_monitor) < erts_max_processes); + ASSERT(is_internal_pid(system_monitor)); monitor_p = erts_proc_lookup(system_monitor); if (!monitor_p || p == monitor_p) return; @@ -2321,7 +2327,7 @@ monitor_long_gc(Process *p, Uint time) { sizeof(values)/sizeof(Uint), tags, values); - msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, am_long_gc, list); + msg = TUPLE4(hp, am_monitor, p->common.id, am_long_gc, list); #ifdef DEBUG hp += 5 /* 4-tuple */; @@ -2329,7 +2335,7 @@ monitor_long_gc(Process *p, Uint time) { #endif #ifdef ERTS_SMP - enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp); + enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); #else erts_queue_message(monitor_p, NULL, bp, msg, NIL #ifdef USE_VM_PROBES @@ -2370,8 +2376,7 @@ monitor_large_heap(Process *p) { #ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor) - && internal_pid_index(system_monitor) < erts_max_processes); + ASSERT(is_internal_pid(system_monitor)); monitor_p = erts_proc_lookup(system_monitor); if (monitor_p || p == monitor_p) { return; @@ -2397,7 +2402,7 @@ monitor_large_heap(Process *p) { sizeof(values)/sizeof(Uint), tags, values); - msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, am_large_heap, list); + msg = TUPLE4(hp, am_monitor, p->common.id, am_large_heap, list); #ifdef DEBUG hp += 5 /* 4-tuple */; @@ -2405,7 +2410,7 @@ monitor_large_heap(Process *p) { #endif #ifdef ERTS_SMP - enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp); + enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); #else erts_queue_message(monitor_p, NULL, bp, msg, NIL #ifdef USE_VM_PROBES @@ -2425,8 +2430,7 @@ monitor_generic(Process *p, Eterm type, Eterm spec) { Eterm *hp, msg; #ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor) - && internal_pid_index(system_monitor) < erts_max_processes); + ASSERT(is_internal_pid(system_monitor)); monitor_p = erts_proc_lookup(system_monitor); if (!monitor_p || p == monitor_p) return; @@ -2434,11 +2438,11 @@ monitor_generic(Process *p, Eterm type, Eterm spec) { hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p); - msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, type, spec); + msg = TUPLE4(hp, am_monitor, p->common.id, type, spec); hp += 5; #ifdef ERTS_SMP - enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp); + enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); #else erts_queue_message(monitor_p, NULL, bp, msg, NIL #ifdef USE_VM_PROBES @@ -2560,21 +2564,21 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) { Eterm mess; Eterm* hp; - if (is_internal_port(p->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(p))) { #define LOCAL_HEAP_SIZE (5+6) DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); UseTmpHeapNoproc(LOCAL_HEAP_SIZE); hp = local_heap; - mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name); + mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name); hp += 6; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } /* No fake schedule */ - send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags); + send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p)); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE erts_smp_mtx_unlock(&smq_mtx); @@ -2584,25 +2588,26 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) { size_t sz_data; ERTS_TRACER_REF_TYPE tracer_ref; - ASSERT(is_internal_pid(p->tracer_proc) - && internal_pid_index(p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(p))); sz_data = 6 + TS_SIZE(p); - ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(p), + ERTS_TRACE_FLAGS(p)); hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref); - mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name); + mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name); hp += 6; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp); + ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp); erts_smp_mtx_unlock(&smq_mtx); } @@ -2623,20 +2628,20 @@ trace_port(Port *t_p, Eterm what, Eterm data) { ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - if (is_internal_port(t_p->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(t_p))) { #define LOCAL_HEAP_SIZE (5+5) DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); UseTmpHeapNoproc(LOCAL_HEAP_SIZE); hp = local_heap; - mess = TUPLE4(hp, am_trace, t_p->id, what, data); + mess = TUPLE4(hp, am_trace, t_p->common.id, what, data); hp += 5; erts_smp_mtx_lock(&smq_mtx); - if (t_p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } /* No fake schedule */ - send_to_port(NULL, mess, &t_p->tracer_proc, &t_p->trace_flags); + send_to_port(NULL,mess,&ERTS_TRACER_PROC(t_p),&ERTS_TRACE_FLAGS(t_p)); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE erts_smp_mtx_unlock(&smq_mtx); @@ -2646,25 +2651,26 @@ trace_port(Port *t_p, Eterm what, Eterm data) { size_t sz_data; ERTS_TRACER_REF_TYPE tracer_ref; - ASSERT(is_internal_pid(t_p->tracer_proc) - && internal_pid_index(t_p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p))); sz_data = 5 + TS_SIZE(t_p); - ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(t_p), + ERTS_TRACE_FLAGS(t_p)); hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref); - mess = TUPLE4(hp, am_trace, t_p->id, what, data); + mess = TUPLE4(hp, am_trace, t_p->common.id, what, data); hp += 5; erts_smp_mtx_lock(&smq_mtx); - if (t_p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - ERTS_ENQ_TRACE_MSG(t_p->id, tracer_ref, mess, bp); + ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp); erts_smp_mtx_unlock(&smq_mtx); } } @@ -2689,7 +2695,7 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) { int ws = 5; Eterm sched_id = am_undefined; - if (is_internal_port(p->tracer_proc)) { + if (is_internal_port(ERTS_TRACER_PROC(p))) { #define LOCAL_HEAP_SIZE (5+6) DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); UseTmpHeapNoproc(LOCAL_HEAP_SIZE); @@ -2704,21 +2710,21 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) { #else sched_id = make_small(1); #endif - mess = TUPLE5(hp, am_trace, p->id, what, sched_id, where); + mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where); ws = 6; } else { - mess = TUPLE4(hp, am_trace, p->id, what, where); + mess = TUPLE4(hp, am_trace, p->common.id, what, where); ws = 5; } hp += ws; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } /* No fake scheduling */ - send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags); + send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p)); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE erts_smp_mtx_unlock(&smq_mtx); @@ -2727,12 +2733,13 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) { ErlOffHeap *off_heap; ERTS_TRACER_REF_TYPE tracer_ref; - ASSERT(is_internal_pid(p->tracer_proc) - && internal_pid_index(p->tracer_proc) < erts_max_processes); + ASSERT(is_internal_pid(ERTS_TRACER_PROC(p))); if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) ws = 6; /* Make place for scheduler id */ - ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags); + ERTS_GET_TRACER_REF(tracer_ref, + ERTS_TRACER_PROC(p), + ERTS_TRACE_FLAGS(p)); hp = ERTS_ALLOC_SYSMSG_HEAP(ws+TS_SIZE(p), &bp, &off_heap, tracer_ref); @@ -2744,19 +2751,19 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) { #else sched_id = make_small(1); #endif - mess = TUPLE5(hp, am_trace, p->id, what, sched_id, where); + mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where); } else { - mess = TUPLE4(hp, am_trace, p->id, what, where); + mess = TUPLE4(hp, am_trace, p->common.id, what, where); } hp += ws; erts_smp_mtx_lock(&smq_mtx); - if (p->trace_flags & F_TIMESTAMP) { + if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) { hp = patch_ts(mess, hp); } - ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp); + ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp); erts_smp_mtx_unlock(&smq_mtx); } } @@ -2792,14 +2799,14 @@ profile_runnable_port(Port *p, Eterm status) { GET_NOW(&Ms, &s, &us); timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4; - msg = TUPLE5(hp, am_profile, p->id, status, count, timestamp); hp += 6; + msg = TUPLE5(hp, am_profile, p->common.id, status, count, timestamp); hp += 6; #ifndef ERTS_SMP - profile_send(p->id, msg); + profile_send(p->common.id, msg); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE #else - enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->id, NIL, msg, bp); + enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp); #endif erts_smp_mtx_unlock(&smq_mtx); } @@ -2846,13 +2853,13 @@ profile_runnable_proc(Process *p, Eterm status){ GET_NOW(&Ms, &s, &us); timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4; - msg = TUPLE5(hp, am_profile, p->id, status, where, timestamp); hp += 6; + msg = TUPLE5(hp, am_profile, p->common.id, status, where, timestamp); hp += 6; #ifndef ERTS_SMP - profile_send(p->id, msg); + profile_send(p->common.id, msg); UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); #undef LOCAL_HEAP_SIZE #else - enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->id, NIL, msg, bp); + enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp); #endif erts_smp_mtx_unlock(&smq_mtx); } @@ -2865,16 +2872,19 @@ profile_runnable_proc(Process *p, Eterm status){ void erts_check_my_tracer_proc(Process *p) { - if (is_internal_pid(p->tracer_proc)) { - Process *tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, - p->tracer_proc, ERTS_PROC_LOCK_STATUS); - int invalid_tracer = !tracer || !(tracer->trace_flags & F_TRACER); + if (is_internal_pid(ERTS_TRACER_PROC(p))) { + Process *tracer = erts_pid2proc(p, + ERTS_PROC_LOCK_MAIN, + ERTS_TRACER_PROC(p), + ERTS_PROC_LOCK_STATUS); + int invalid_tracer = (!tracer + || !(ERTS_TRACE_FLAGS(tracer) & F_TRACER)); if (tracer) erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS); if (invalid_tracer) { erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); - p->trace_flags &= ~TRACEE_FLAGS; - p->tracer_proc = NIL; + ERTS_TRACE_FLAGS(p) &= ~TRACEE_FLAGS; + ERTS_TRACER_PROC(p) = NIL; erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); } } @@ -3218,7 +3228,7 @@ sys_msg_dispatcher_func(void *unused) proc = erts_pid2proc(NULL, 0, receiver, proc_locks); if (!proc || (smqp->type == SYS_MSG_TYPE_TRACE - && !(proc->trace_flags & F_TRACER))) { + && !(ERTS_TRACE_FLAGS(proc) & F_TRACER))) { /* Bad tracer */ #ifdef DEBUG_PRINTOUTS if (smqp->type == SYS_MSG_TYPE_TRACE && proc) @@ -3245,16 +3255,14 @@ sys_msg_dispatcher_func(void *unused) proc = erts_whereis_process(NULL,0,receiver,proc_locks,0); if (!proc) goto failure; - else if (smqp->from == proc->id) + else if (smqp->from == proc->common.id) goto drop_sys_msg; else goto queue_proc_msg; } else if (is_internal_port(receiver)) { - port = erts_id2port_sflgs(receiver, - NULL, - 0, - ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); + port = erts_thr_id2port_sflgs(receiver, + ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); if (!port) goto failure; else { @@ -3268,7 +3276,7 @@ sys_msg_dispatcher_func(void *unused) #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "delivered\n"); #endif - erts_port_release(port); + erts_thr_port_release(port); if (smqp->bp) free_message_buffer(smqp->bp); } diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h new file mode 100644 index 0000000000..50fb27aab0 --- /dev/null +++ b/erts/emulator/beam/erl_trace.h @@ -0,0 +1,141 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + + +#ifndef ERL_TRACE_H__ +#define ERL_TRACE_H__ + +struct binary; + +/* erl_bif_trace.c */ +Eterm erl_seq_trace_info(Process *p, Eterm arg1); +void erts_system_monitor_clear(Process *c_p); +void erts_system_profile_clear(Process *c_p); + +/* erl_trace.c */ +void erts_init_trace(void); +void erts_trace_check_exiting(Eterm exiting); +Eterm erts_set_system_seq_tracer(Process *c_p, + ErtsProcLocks c_p_locks, + Eterm new); +Eterm erts_get_system_seq_tracer(void); +void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp); +void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp); +void erts_set_system_monitor(Eterm monitor); +Eterm erts_get_system_monitor(void); + +#ifdef ERTS_SMP +void erts_check_my_tracer_proc(Process *); +void erts_block_sys_msg_dispatcher(void); +void erts_release_sys_msg_dispatcher(void); +void erts_foreach_sys_msg_in_q(void (*func)(Eterm, + Eterm, + Eterm, + ErlHeapFragment *)); +void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *); +#endif + +void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *); +void trace_send(Process*, Eterm, Eterm); +void trace_receive(Process*, Eterm); +Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec, Eterm* args, + int local, Eterm *tracer_pid); +void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid); +void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value, + Eterm *tracer); +void erts_trace_return_to(Process *p, BeamInstr *pc); +void trace_sched(Process*, Eterm); +void trace_proc(Process*, Process*, Eterm, Eterm); +void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args); +void save_calls(Process *p, Export *); +void trace_gc(Process *p, Eterm what); +/* port tracing */ +void trace_virtual_sched(Process*, Eterm); +void trace_sched_ports(Port *pp, Eterm); +void trace_sched_ports_where(Port *pp, Eterm, Eterm); +void trace_port(Port *, Eterm what, Eterm data); +void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name); + +/* system_profile */ +void erts_set_system_profile(Eterm profile); +Eterm erts_get_system_profile(void); +void profile_scheduler(Eterm scheduler_id, Eterm); +void profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us); +void profile_runnable_proc(Process* p, Eterm status); +void profile_runnable_port(Port* p, Eterm status); +void erts_system_profile_setup_active_schedulers(void); + +/* system_monitor */ +void monitor_long_gc(Process *p, Uint time); +void monitor_large_heap(Process *p); +void monitor_generic(Process *p, Eterm type, Eterm spec); +Uint erts_trace_flag2bit(Eterm flag); +int erts_trace_flags(Eterm List, + Uint *pMask, Eterm *pTracer, int *pCpuTimestamp); +Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I); + +#ifdef ERTS_SMP +void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp); +#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \ +do { \ + if ((ESDP)->pending_trace_msgs) \ + erts_send_pending_trace_msgs((ESDP)); \ +} while (0) +#else +#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) +#endif + +#define seq_trace_output(token, msg, type, receiver, process) \ +seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL) +#define seq_trace_output_exit(token, msg, type, receiver, exitfrom) \ +seq_trace_output_generic((token), (msg), (type), (receiver), NULL, (exitfrom)) +void seq_trace_output_generic(Eterm token, Eterm msg, Uint type, + Eterm receiver, Process *process, Eterm exitfrom); + +int seq_trace_update_send(Process *process); + +Eterm erts_seq_trace(Process *process, + Eterm atom_type, Eterm atom_true_or_false, + int build_result); + +struct trace_pattern_flags { + unsigned int breakpoint : 1; /* Set if any other is set */ + unsigned int local : 1; /* Local call trace breakpoint */ + unsigned int meta : 1; /* Metadata trace breakpoint */ + unsigned int call_count : 1; /* Fast call count breakpoint */ + unsigned int call_time : 1; /* Fast call time breakpoint */ +}; +extern const struct trace_pattern_flags erts_trace_pattern_flags_off; +extern int erts_call_time_breakpoint_tracing; +int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified, + struct binary* match_prog_set, + struct binary *meta_match_prog_set, + int on, struct trace_pattern_flags, + Eterm meta_tracer_pid, int is_blocking); +void +erts_get_default_trace_pattern(int *trace_pattern_is_on, + struct binary **match_spec, + struct binary **meta_match_spec, + struct trace_pattern_flags *trace_pattern_flags, + Eterm *meta_tracer_pid); +int erts_is_default_trace_enabled(void); +void erts_bif_trace_init(void); +int erts_finish_breakpointing(void); + +#endif /* ERL_TRACE_H__ */ diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h new file mode 100644 index 0000000000..a2064bd8a3 --- /dev/null +++ b/erts/emulator/beam/erl_utils.h @@ -0,0 +1,215 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +#ifndef ERL_UTILS_H__ +#define ERL_UTILS_H__ + +#include "sys.h" +#include "erl_smp.h" +#include "erl_printf.h" + +typedef struct { +#ifdef DEBUG + int smp_api; +#endif + union { + Uint64 not_atomic; +#ifdef ARCH_64 + erts_atomic_t atomic; +#else + erts_dw_atomic_t atomic; +#endif + } counter; +} erts_interval_t; + +void erts_interval_init(erts_interval_t *); +void erts_smp_interval_init(erts_interval_t *); +Uint64 erts_step_interval_nob(erts_interval_t *); +Uint64 erts_step_interval_relb(erts_interval_t *); +Uint64 erts_smp_step_interval_nob(erts_interval_t *); +Uint64 erts_smp_step_interval_relb(erts_interval_t *); +Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64); +Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64); +Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64); +Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64); +#ifdef ARCH_32 +ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *); +#endif +ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *); +ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +#ifdef ARCH_32 + +ERTS_GLB_INLINE Uint64 +erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw) +{ +#ifdef ETHR_SU_DW_NAINT_T__ + return (Uint64) dw->dw_sint; +#else + Uint64 res; + res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]); + res <<= 32; + res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]); + return res; +#endif +} + +#endif + +ERTS_GLB_INLINE Uint64 +erts_current_interval_nob__(erts_interval_t *icp) +{ +#ifdef ARCH_64 + return (Uint64) erts_atomic_read_nob(&icp->counter.atomic); +#else + erts_dw_aint_t dw; + erts_dw_atomic_read_nob(&icp->counter.atomic, &dw); + return erts_interval_dw_aint_to_val__(&dw); +#endif +} + +ERTS_GLB_INLINE Uint64 +erts_current_interval_acqb__(erts_interval_t *icp) +{ +#ifdef ARCH_64 + return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic); +#else + erts_dw_aint_t dw; + erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw); + return erts_interval_dw_aint_to_val__(&dw); +#endif +} + +ERTS_GLB_INLINE Uint64 +erts_current_interval_nob(erts_interval_t *icp) +{ + ASSERT(!icp->smp_api); + return erts_current_interval_nob__(icp); +} + +ERTS_GLB_INLINE Uint64 +erts_current_interval_acqb(erts_interval_t *icp) +{ + ASSERT(!icp->smp_api); + return erts_current_interval_acqb__(icp); +} + +ERTS_GLB_INLINE Uint64 +erts_smp_current_interval_nob(erts_interval_t *icp) +{ + ASSERT(icp->smp_api); +#ifdef ERTS_SMP + return erts_current_interval_nob__(icp); +#else + return icp->counter.not_atomic; +#endif +} + +ERTS_GLB_INLINE Uint64 +erts_smp_current_interval_acqb(erts_interval_t *icp) +{ + ASSERT(icp->smp_api); +#ifdef ERTS_SMP + return erts_current_interval_acqb__(icp); +#else + return icp->counter.not_atomic; +#endif +} + +#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ + +/* + * To be used to silence unused result warnings, but do not abuse it. + */ +void erts_silence_warn_unused_result(long unused); + + +int erts_fit_in_bits_int64(Sint64); +int erts_fit_in_bits_int32(Sint32); +int list_length(Eterm); +int erts_is_builtin(Eterm, Eterm, int); +Uint32 make_broken_hash(Eterm); +Uint32 block_hash(byte *, unsigned, Uint32); +Uint32 make_hash2(Eterm); +Uint32 make_hash(Eterm); + + +Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str); +Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui); +Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw); +Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64); +Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64); +Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr); +Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...); +Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]); +Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len); +#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str)) +Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]); +Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp, + Sint length, Eterm terms1[], Uint terms2[]); +Eterm +erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp, + Sint length, Eterm atoms[], Uint uints[]); +Eterm +erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length, + Eterm atoms[], Uint uints1[], Uint uints2[]); + +void erts_init_utils(void); +void erts_init_utils_mem(void); + +erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint); +void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *); + +#if HALFWORD_HEAP +int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base); +# define eq(A,B) eq_rel(A,NULL,B,NULL) +#else +int eq(Eterm, Eterm); +# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B) +#endif + +#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y)))) + +#if HALFWORD_HEAP +Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*); +#define CMP(A,B) cmp_rel(A,NULL,B,NULL) +#else +Sint cmp(Eterm, Eterm); +#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B) +#define CMP(A,B) cmp(A,B) +#endif +#define cmp_lt(a,b) (CMP((a),(b)) < 0) +#define cmp_le(a,b) (CMP((a),(b)) <= 0) +#define cmp_eq(a,b) (CMP((a),(b)) == 0) +#define cmp_ne(a,b) (CMP((a),(b)) != 0) +#define cmp_ge(a,b) (CMP((a),(b)) >= 0) +#define cmp_gt(a,b) (CMP((a),(b)) > 0) + +#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b))) +#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b))) +#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b))) +#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b))) + +#endif diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 649352ca91..41c2a0f2b9 100755 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -39,38 +39,8 @@ #include "erl_sys_driver.h" #include "erl_debug.h" #include "error.h" - -typedef struct port Port; -#include "erl_port_task.h" - -typedef struct erts_driver_t_ erts_driver_t; - -#define SMALL_IO_QUEUE 5 /* Number of fixed elements */ - -typedef struct { - ErlDrvSizeT size; /* total size in bytes */ - - SysIOVec* v_start; - SysIOVec* v_end; - SysIOVec* v_head; - SysIOVec* v_tail; - SysIOVec v_small[SMALL_IO_QUEUE]; - - ErlDrvBinary** b_start; - ErlDrvBinary** b_end; - ErlDrvBinary** b_head; - ErlDrvBinary** b_tail; - ErlDrvBinary* b_small[SMALL_IO_QUEUE]; -} ErlIOQueue; - -typedef struct line_buf { /* Buffer used in line oriented I/O */ - ErlDrvSizeT bufsiz; /* Size of character buffer */ - ErlDrvSizeT ovlen; /* Length of overflow data */ - ErlDrvSizeT ovsiz; /* Actual size of overflow buffer */ - char data[1]; /* Starting point of buffer data, - data[0] is a flag indicating an unprocess CR, - The rest is the overflow buffer. */ -} LineBuf; +#include "erl_utils.h" +#include "erl_port.h" struct enif_environment_t /* ErlNifEnv */ { @@ -90,162 +60,6 @@ extern void erts_print_nif_taints(int to, void* to_arg); void erts_unload_nif(struct erl_module_nif* nif); extern void erl_nif_init(void); -/* - * Port Specific Data. - * - * Only use PrtSD for very rarely used data. - */ - -#define ERTS_PRTSD_SCHED_ID 0 - -#define ERTS_PRTSD_SIZE 1 - -typedef struct { - void *data[ERTS_PRTSD_SIZE]; -} ErtsPrtSD; - -#ifdef ERTS_SMP -typedef struct ErtsXPortsList_ ErtsXPortsList; -#endif - -/* - * Port locking: - * - * Locking is done either driver specific or port specific. When - * driver specific locking is used, all instances of the driver, - * i.e. ports running the driver, share the same lock. When port - * specific locking is used each instance have its own lock. - * - * Most fields in the Port structure are protected by the lock - * referred to by the lock field. I'v called it the port lock. - * This lock is shared between all ports running the same driver - * when driver specific locking is used. - * - * The 'sched' field is protected by the port tasks lock - * (see erl_port_tasks.c) - * - * The 'status' field is protected by a combination of the port lock, - * the port tasks lock, and the state_lck. It may be read if - * the state_lck, or the port lock is held. It may only be - * modified if both the port lock and the state_lck is held - * (with one exception; see below). When changeing status from alive - * to dead or vice versa, also the port task lock has to be held. - * This in order to guarantee that tasks are scheduled only for - * ports that are alive. - * - * The status field may be modified with only the state_lck - * held when status is changed from dead to alive. This since no - * threads can have any references to the port other than via the - * port table. - * - * /rickard - */ - -struct port { - ErtsPortTaskSched sched; - ErtsPortTaskHandle timeout_task; - erts_smp_atomic_t refc; -#ifdef ERTS_SMP - erts_smp_mtx_t *lock; - ErtsXPortsList *xports; - erts_smp_atomic_t run_queue; - erts_smp_spinlock_t state_lck; /* protects: id, status, snapshot */ -#endif - Eterm id; /* The Port id of this port */ - Eterm connected; /* A connected process */ - Eterm caller; /* Current caller. */ - Eterm data; /* Data associated with port. */ - ErlHeapFragment* bp; /* Heap fragment holding data (NULL if imm data). */ - ErtsLink *nlinks; - ErtsMonitor *monitors; /* Only MON_ORIGIN monitors of pid's */ - Uint bytes_in; /* Number of bytes read */ - Uint bytes_out; /* Number of bytes written */ -#ifdef ERTS_SMP - ErtsSmpPTimer *ptimer; -#else - ErlTimer tm; /* Timer entry */ -#endif - - Eterm tracer_proc; /* If the port is traced, this is the tracer */ - Uint trace_flags; /* Trace flags */ - - ErlIOQueue ioq; /* driver accessible i/o queue */ - DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */ - char *name; /* String used in the open */ - erts_driver_t* drv_ptr; - UWord drv_data; - SWord os_pid; /* Child process ID */ - ErtsProcList *suspended; /* List of suspended processes. */ - LineBuf *linebuf; /* Buffer to hold data not ready for - process to get (line oriented I/O)*/ - Uint32 status; /* Status and type flags */ - int control_flags; /* Flags for port_control() */ - erts_aint32_t snapshot; /* Next snapshot that port should be part of */ - struct reg_proc *reg; - ErlDrvPDL port_data_lock; - - ErtsPrtSD *psd; /* Port specific data */ -}; - - -ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE ErtsRunQueue * -erts_port_runq(Port *prt) -{ -#ifdef ERTS_SMP - ErtsRunQueue *rq1, *rq2; - rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); - if (!rq1) - return NULL; - while (1) { - erts_smp_runq_lock(rq1); - rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); - if (rq1 == rq2) - return rq1; - erts_smp_runq_unlock(rq1); - rq1 = rq2; - if (!rq1) - return NULL; - } -#else - return ERTS_RUNQ_IX(0); -#endif -} - -#endif - - -ERTS_GLB_INLINE void *erts_prtsd_get(Port *p, int ix); -ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void * -erts_prtsd_get(Port *prt, int ix) -{ - return prt->psd ? prt->psd->data[ix] : NULL; -} - -ERTS_GLB_INLINE void * -erts_prtsd_set(Port *prt, int ix, void *data) -{ - if (prt->psd) { - void *old = prt->psd->data[ix]; - prt->psd->data[ix] = data; - return old; - } - else { - prt->psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD)); - prt->psd->data[ix] = data; - return NULL; - } -} - -#endif - /* Driver handle (wrapper for old plain handle) */ #define ERL_DE_OK 0 #define ERL_DE_UNLOAD 1 @@ -297,7 +111,7 @@ typedef struct { or that wait for it to change state */ erts_refc_t refc; /* Number of ports/processes having references to the driver */ - Uint port_count; /* Number of ports using the driver */ + erts_smp_atomic32_t port_count; /* Number of ports using the driver */ Uint flags; /* ERL_DE_FL_KILL_PORTS */ int status; /* ERL_DE_xxx */ char *full_path; /* Full path of the driver */ @@ -349,7 +163,7 @@ struct erts_driver_t_ { }; extern erts_driver_t *driver_list; -extern erts_smp_mtx_t erts_driver_list_lock; +extern erts_smp_rwmtx_t erts_driver_list_lock; extern void erts_ddll_init(void); extern void erts_ddll_lock_driver(DE_Handle *dh, char *name); @@ -529,40 +343,9 @@ union erl_off_heap_ptr { void* voidp; }; -/* arrays that get malloced at startup */ -extern Port* erts_port; - -extern Uint erts_max_ports; -extern Uint erts_port_tab_index_mask; -extern erts_smp_atomic32_t erts_ports_snapshot; -extern erts_smp_atomic_t erts_dead_ports_ptr; - -ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt) -{ - ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&prt->state_lck)); - if (prt->snapshot != erts_smp_atomic32_read_acqb(&erts_ports_snapshot)) { - /* Dead ports are added from the end of the snapshot buffer */ - Eterm* tombstone; - tombstone = (Eterm*) erts_smp_atomic_add_read_nob(&erts_dead_ports_ptr, - -(erts_aint_t)sizeof(Eterm)); - ASSERT(tombstone+1 != NULL); - ASSERT(prt->snapshot == erts_smp_atomic32_read_nob(&erts_ports_snapshot) - 1); - *tombstone = prt->id; - } - /*else no ongoing snapshot or port was already included or created after snapshot */ -} - -#endif - /* controls warning mapping in error_logger */ extern Eterm node_cookie; -extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */ -extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */ extern Uint display_items; /* no of items to display in traces etc */ extern int erts_backtrace_depth; @@ -700,54 +483,6 @@ do { \ #define WSTACK_ISEMPTY(s) (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_start)) #define WSTACK_POP(s) (*(--WSTK_CONCAT(s,_sp))) - -/* port status flags */ - -#define ERTS_PORT_SFLG_CONNECTED ((Uint32) (1 << 0)) -/* Port have begun exiting */ -#define ERTS_PORT_SFLG_EXITING ((Uint32) (1 << 1)) -/* Distribution port */ -#define ERTS_PORT_SFLG_DISTRIBUTION ((Uint32) (1 << 2)) -#define ERTS_PORT_SFLG_BINARY_IO ((Uint32) (1 << 3)) -#define ERTS_PORT_SFLG_SOFT_EOF ((Uint32) (1 << 4)) -/* Flow control */ -#define ERTS_PORT_SFLG_PORT_BUSY ((Uint32) (1 << 5)) -/* Port is closing (no i/o accepted) */ -#define ERTS_PORT_SFLG_CLOSING ((Uint32) (1 << 6)) -/* Send a closed message when terminating */ -#define ERTS_PORT_SFLG_SEND_CLOSED ((Uint32) (1 << 7)) -/* Line orinted io on port */ -#define ERTS_PORT_SFLG_LINEBUF_IO ((Uint32) (1 << 8)) -/* Immortal port (only certain system ports) */ -#define ERTS_PORT_SFLG_IMMORTAL ((Uint32) (1 << 9)) -#define ERTS_PORT_SFLG_FREE ((Uint32) (1 << 10)) -#define ERTS_PORT_SFLG_FREE_SCHEDULED ((Uint32) (1 << 11)) -#define ERTS_PORT_SFLG_INITIALIZING ((Uint32) (1 << 12)) -/* Port uses port specific locking (opposed to driver specific locking) */ -#define ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK ((Uint32) (1 << 13)) -#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 14)) -/* Last port to terminate halts the emulator */ -#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 15)) -#ifdef DEBUG -/* Only debug: make sure all flags aren't cleared unintentionally */ -#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31)) -#endif - -/* Combinations of port status flags */ -#define ERTS_PORT_SFLGS_DEAD \ - (ERTS_PORT_SFLG_FREE \ - | ERTS_PORT_SFLG_FREE_SCHEDULED \ - | ERTS_PORT_SFLG_INITIALIZING) -#define ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \ - (ERTS_PORT_SFLGS_DEAD | ERTS_PORT_SFLG_INVALID) -#define ERTS_PORT_SFLGS_INVALID_LOOKUP \ - (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \ - | ERTS_PORT_SFLG_CLOSING) -#define ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP \ - (ERTS_PORT_SFLGS_INVALID_LOOKUP \ - | ERTS_PORT_SFLG_PORT_BUSY \ - | ERTS_PORT_SFLG_DISTRIBUTION) - /* binary.c */ void erts_emasculate_writable_binary(ProcBin* pb); @@ -758,11 +493,35 @@ Eterm erts_realloc_binary(Eterm bin, size_t size); /* erl_bif_info.c */ +Eterm +erts_bld_port_info(Eterm **hpp, + ErlOffHeap *ohp, + Uint *szp, + Port *prt, + Eterm item); + void erts_bif_info_init(void); /* bif.c */ Eterm erts_make_ref(Process *); Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]); +void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]); + +ERTS_GLB_INLINE Eterm +erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]); + +#if ERTS_GLB_INLINE_INCL_FUNC_DEF + +ERTS_GLB_INLINE Eterm +erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]) +{ + Eterm *hp = HAlloc(c_p, REF_THING_SIZE); + write_ref_thing(hp, ref[0], ref[1], ref[2]); + return make_internal_ref(hp); +} + +#endif + void erts_queue_monitor_message(Process *, ErtsProcLocks*, Eterm, @@ -778,13 +537,6 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg); Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2); -/* erl_bif_port.c */ - -/* erl_bif_trace.c */ -Eterm erl_seq_trace_info(Process *p, Eterm arg1); -void erts_system_monitor_clear(Process *c_p); -void erts_system_profile_clear(Process *c_p); - /* beam_load.c */ typedef struct { BeamInstr* current; /* Pointer to: Mod, Name, Arity */ @@ -960,11 +712,6 @@ void erts_free_heap_frags(Process* p); /* io.c */ -struct erl_drv_port_data_lock { - erts_mtx_t mtx; - erts_atomic_t refc; -}; - typedef struct { char *name; char *driver_name; @@ -973,477 +720,33 @@ typedef struct { #define ERTS_SPAWN_DRIVER 1 #define ERTS_SPAWN_EXECUTABLE 2 #define ERTS_SPAWN_ANY (ERTS_SPAWN_DRIVER | ERTS_SPAWN_EXECUTABLE) - int erts_add_driver_entry(ErlDrvEntry *drv, DE_Handle *handle, int driver_list_locked); void erts_destroy_driver(erts_driver_t *drv); -void erts_wake_process_later(Port*, Process*); -int erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *); -int erts_is_port_ioq_empty(Port *); -void erts_terminate_port(Port *); -void close_port(Eterm); -void init_io(void); -void cleanup_io(void); -void erts_do_exit_port(Port *, Eterm, Eterm); -void erts_port_command(Process *, Eterm, Port *, Eterm); -Eterm erts_port_control(Process*, Port*, Uint, Eterm); -int erts_write_to_port(Eterm caller_id, Port *p, Eterm list); -void print_port_info(int, void *, int); +int erts_save_suspend_process_on_port(Port*, Process*); +Port *erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *, int *); +void erts_init_io(int, int); void erts_raw_port_command(Port*, byte*, Uint); -void driver_report_exit(int, int); +void driver_report_exit(ErlDrvPort, int); LineBuf* allocate_linebuf(int); int async_ready(Port *, void*); -Sint erts_test_next_port(int, Uint); ErtsPortNames *erts_get_port_names(Eterm); void erts_free_port_names(ErtsPortNames *); Uint erts_port_ioq_size(Port *pp); void erts_stale_drv_select(Eterm, ErlDrvEvent, int, int); -void erts_port_cleanup(Port *); -void erts_fire_port_monitor(Port *prt, Eterm ref); Port *erts_get_heart_port(void); -#ifdef ERTS_SMP -void erts_smp_xports_unlock(Port *); -#endif - #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) void erts_lcnt_enable_io_lock_count(int enable); #endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) -int erts_lc_is_port_locked(Port *); -#endif - -ERTS_GLB_INLINE void erts_smp_port_state_lock(Port*); -ERTS_GLB_INLINE void erts_smp_port_state_unlock(Port*); - -ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt); -ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt); -ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void -erts_smp_port_state_lock(Port* prt) -{ -#ifdef ERTS_SMP - erts_smp_spin_lock(&prt->state_lck); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_port_state_unlock(Port *prt) -{ -#ifdef ERTS_SMP - erts_smp_spin_unlock(&prt->state_lck); -#endif -} - - -ERTS_GLB_INLINE int -erts_smp_port_trylock(Port *prt) -{ - int res; - - ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0); - erts_smp_atomic_inc_nob(&prt->refc); - -#ifdef ERTS_SMP - res = erts_smp_mtx_trylock(prt->lock); - if (res == EBUSY) { - erts_smp_atomic_dec_nob(&prt->refc); - } -#else - res = 0; -#endif - - return res; -} - -ERTS_GLB_INLINE void -erts_smp_port_lock(Port *prt) -{ - ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0); - erts_smp_atomic_inc_nob(&prt->refc); -#ifdef ERTS_SMP - erts_smp_mtx_lock(prt->lock); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_port_unlock(Port *prt) -{ - erts_aint_t refc; -#ifdef ERTS_SMP - erts_smp_mtx_unlock(prt->lock); -#endif - refc = erts_smp_atomic_dec_read_nob(&prt->refc); - ASSERT(refc >= 0); - if (refc == 0) - erts_port_cleanup(prt); -} - -#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ - - -#define ERTS_INVALID_PORT_OPT(PP, ID, FLGS) \ - (!(PP) || ((PP)->status & (FLGS)) || (PP)->id != (ID)) - -/* port lookup */ - -#define INVALID_PORT(PP, ID) \ - ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_LOOKUP) - -/* Invalidate trace port if anything suspicious, for instance - * that the port is a distribution port or it is busy. - */ -#define INVALID_TRACER_PORT(PP, ID) \ - ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP) - -#define ERTS_PORT_SCHED_ID(P, ID) \ - ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID))) - -#ifdef ERTS_SMP -Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks); -#endif - -#define erts_id2port(ID, P, PL) \ - erts_id2port_sflgs((ID), (P), (PL), ERTS_PORT_SFLGS_INVALID_LOOKUP) - -ERTS_GLB_INLINE Port*erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32); -ERTS_GLB_INLINE void erts_port_release(Port *); -ERTS_GLB_INLINE Port*erts_drvport2port(ErlDrvPort); -ERTS_GLB_INLINE Port*erts_drvportid2port(Eterm); -ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm id); -ERTS_GLB_INLINE int erts_is_port_alive(Eterm id); -ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm id); -ERTS_GLB_INLINE void erts_port_status_bandor_set(Port *, Uint32, Uint32); -ERTS_GLB_INLINE void erts_port_status_band_set(Port *, Uint32); -ERTS_GLB_INLINE void erts_port_status_bor_set(Port *, Uint32); -ERTS_GLB_INLINE void erts_port_status_set(Port *, Uint32); -ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE Port* -erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 sflgs) -{ -#ifdef ERTS_SMP - int no_proc_locks = !c_p || !c_p_locks; -#endif - Port *prt; - - if (is_not_internal_port(id)) - return NULL; - - prt = &erts_port[internal_port_index(id)]; - - erts_smp_port_state_lock(prt); - if (ERTS_INVALID_PORT_OPT(prt, id, sflgs)) { - erts_smp_port_state_unlock(prt); - prt = NULL; - } - else { - erts_smp_atomic_inc_nob(&prt->refc); - erts_smp_port_state_unlock(prt); - -#ifdef ERTS_SMP - if (no_proc_locks) - erts_smp_mtx_lock(prt->lock); - else if (erts_smp_mtx_trylock(prt->lock) == EBUSY) { - /* Unlock process locks, and acquire locks in lock order... */ - erts_smp_proc_unlock(c_p, c_p_locks); - erts_smp_mtx_lock(prt->lock); - erts_smp_proc_lock(c_p, c_p_locks); - } - - /* The id may not have changed... */ - ERTS_SMP_LC_ASSERT(prt->id == id); - /* ... but status may have... */ - if (prt->status & sflgs) { - erts_smp_port_unlock(prt); /* Also decrements refc... */ - prt = NULL; - } -#endif - - } - - return prt; -} - -ERTS_GLB_INLINE void -erts_port_release(Port *prt) -{ - erts_smp_port_unlock(prt); -} - -ERTS_GLB_INLINE Port* -erts_drvport2port(ErlDrvPort drvport) -{ - int ix = (int) drvport; - if (ix < 0 || erts_max_ports <= ix) - return NULL; - if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) - return NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix])); - return &erts_port[ix]; -} - -ERTS_GLB_INLINE Port* -erts_drvportid2port(Eterm id) -{ - int ix; - if (is_not_internal_port(id)) - return NULL; - ix = (int) internal_port_index(id); - if (erts_max_ports <= ix) - return NULL; - if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) - return NULL; - if (erts_port[ix].id != id) - return NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix])); - return &erts_port[ix]; -} - -ERTS_GLB_INLINE Uint32 -erts_portid2status(Eterm id) -{ - if (is_not_internal_port(id)) - return ERTS_PORT_SFLG_INVALID; - else { - Uint32 status; - int ix = internal_port_index(id); - if (erts_max_ports <= ix) - return ERTS_PORT_SFLG_INVALID; - erts_smp_port_state_lock(&erts_port[ix]); - if (erts_port[ix].id == id) - status = erts_port[ix].status; - else - status = ERTS_PORT_SFLG_INVALID; - erts_smp_port_state_unlock(&erts_port[ix]); - return status; - } -} - -ERTS_GLB_INLINE int -erts_is_port_alive(Eterm id) -{ - return !(erts_portid2status(id) & (ERTS_PORT_SFLG_INVALID - | ERTS_PORT_SFLGS_DEAD)); -} - -ERTS_GLB_INLINE int -erts_is_valid_tracer_port(Eterm id) -{ - return !(erts_portid2status(id) & ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); -} - -ERTS_GLB_INLINE void erts_port_status_bandor_set(Port *prt, - Uint32 band_status, - Uint32 bor_status) -{ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - erts_smp_port_state_lock(prt); - prt->status &= band_status; - prt->status |= bor_status; - erts_smp_port_state_unlock(prt); -} - -ERTS_GLB_INLINE void erts_port_status_band_set(Port *prt, Uint32 status) -{ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - erts_smp_port_state_lock(prt); - prt->status &= status; - erts_smp_port_state_unlock(prt); -} - -ERTS_GLB_INLINE void erts_port_status_bor_set(Port *prt, Uint32 status) -{ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - erts_smp_port_state_lock(prt); - prt->status |= status; - erts_smp_port_state_unlock(prt); -} - -ERTS_GLB_INLINE void erts_port_status_set(Port *prt, Uint32 status) -{ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - erts_smp_port_state_lock(prt); - prt->status = status; - erts_smp_port_state_unlock(prt); -} - -ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *prt) -{ - Uint32 res; - erts_smp_port_state_lock(prt); - res = prt->status; - erts_smp_port_state_unlock(prt); - return res; -} -#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ - /* erl_drv_thread.c */ void erl_drv_thr_init(void); -/* time.c */ - /* utils.c */ - -typedef struct { -#ifdef DEBUG - int smp_api; -#endif - union { - Uint64 not_atomic; -#ifdef ARCH_64 - erts_atomic_t atomic; -#else - erts_dw_atomic_t atomic; -#endif - } counter; -} erts_interval_t; - -void erts_interval_init(erts_interval_t *); -void erts_smp_interval_init(erts_interval_t *); -Uint64 erts_step_interval_nob(erts_interval_t *); -Uint64 erts_step_interval_relb(erts_interval_t *); -Uint64 erts_smp_step_interval_nob(erts_interval_t *); -Uint64 erts_smp_step_interval_relb(erts_interval_t *); -Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64); -Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64); -Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64); -Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64); -#ifdef ARCH_32 -ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *); -#endif -ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -#ifdef ARCH_32 - -ERTS_GLB_INLINE Uint64 -erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw) -{ -#ifdef ETHR_SU_DW_NAINT_T__ - return (Uint64) dw->dw_sint; -#else - Uint64 res; - res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]); - res <<= 32; - res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]); - return res; -#endif -} - -#endif - -ERTS_GLB_INLINE Uint64 -erts_current_interval_nob__(erts_interval_t *icp) -{ -#ifdef ARCH_64 - return (Uint64) erts_atomic_read_nob(&icp->counter.atomic); -#else - erts_dw_aint_t dw; - erts_dw_atomic_read_nob(&icp->counter.atomic, &dw); - return erts_interval_dw_aint_to_val__(&dw); -#endif -} - -ERTS_GLB_INLINE Uint64 -erts_current_interval_acqb__(erts_interval_t *icp) -{ -#ifdef ARCH_64 - return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic); -#else - erts_dw_aint_t dw; - erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw); - return erts_interval_dw_aint_to_val__(&dw); -#endif -} - -ERTS_GLB_INLINE Uint64 -erts_current_interval_nob(erts_interval_t *icp) -{ - ASSERT(!icp->smp_api); - return erts_current_interval_nob__(icp); -} - -ERTS_GLB_INLINE Uint64 -erts_current_interval_acqb(erts_interval_t *icp) -{ - ASSERT(!icp->smp_api); - return erts_current_interval_acqb__(icp); -} - -ERTS_GLB_INLINE Uint64 -erts_smp_current_interval_nob(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return erts_current_interval_nob__(icp); -#else - return icp->counter.not_atomic; -#endif -} - -ERTS_GLB_INLINE Uint64 -erts_smp_current_interval_acqb(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return erts_current_interval_acqb__(icp); -#else - return icp->counter.not_atomic; -#endif -} - -#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ - -/* - * To be used to silence unused result warnings, but do not abuse it. - */ -void erts_silence_warn_unused_result(long unused); - void erts_cleanup_offheap(ErlOffHeap *offheap); -int erts_fit_in_bits_int64(Sint64); -int erts_fit_in_bits_int32(Sint32); -int list_length(Eterm); Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex); -int erts_is_builtin(Eterm, Eterm, int); -Uint32 make_broken_hash(Eterm); -Uint32 block_hash(byte *, unsigned, Uint32); -Uint32 make_hash2(Eterm); -Uint32 make_hash(Eterm); - - -Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str); -Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui); -Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw); -Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64); -Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64); -Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr); -Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...); -Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]); -Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len); -#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str)) -Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]); -Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp, - Sint length, Eterm terms1[], Uint terms2[]); -Eterm -erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp, - Sint length, Eterm atoms[], Uint uints[]); -Eterm -erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length, - Eterm atoms[], Uint uints1[], Uint uints2[]); Eterm store_external_or_ref_in_proc_(Process *, Eterm); Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm); @@ -1458,42 +761,6 @@ Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm); (ASSERT_EXPR(is_node_container((NC))), \ IS_CONST((NC)) ? (NC) : store_external_or_ref_in_proc_((Pp), (NC))) -void erts_init_utils(void); -void erts_init_utils_mem(void); - -erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint); -void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *); - -#if HALFWORD_HEAP -int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base); -# define eq(A,B) eq_rel(A,NULL,B,NULL) -#else -int eq(Eterm, Eterm); -# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B) -#endif - -#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y)))) - -#if HALFWORD_HEAP -Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*); -#define CMP(A,B) cmp_rel(A,NULL,B,NULL) -#else -Sint cmp(Eterm, Eterm); -#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B) -#define CMP(A,B) cmp(A,B) -#endif -#define cmp_lt(a,b) (CMP((a),(b)) < 0) -#define cmp_le(a,b) (CMP((a),(b)) <= 0) -#define cmp_eq(a,b) (CMP((a),(b)) == 0) -#define cmp_ne(a,b) (CMP((a),(b)) != 0) -#define cmp_ge(a,b) (CMP((a),(b)) >= 0) -#define cmp_gt(a,b) (CMP((a),(b)) > 0) - -#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b))) -#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b))) -#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b))) -#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b))) - /* duplicates from big.h */ int term_to_Uint(Eterm term, Uint *up); int term_to_UWord(Eterm, UWord*); @@ -1537,79 +804,6 @@ int erts_utf8_to_latin1(byte* dest, const byte* source, int slen); #define ERTS_UTF8_ANALYZE_MORE 3 #define ERTS_UTF8_OK_MAX_CHARS 4 -/* erl_trace.c */ -void erts_init_trace(void); -void erts_trace_check_exiting(Eterm exiting); -Eterm erts_set_system_seq_tracer(Process *c_p, - ErtsProcLocks c_p_locks, - Eterm new); -Eterm erts_get_system_seq_tracer(void); -void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp); -void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp); -void erts_set_system_monitor(Eterm monitor); -Eterm erts_get_system_monitor(void); - -#ifdef ERTS_SMP -void erts_check_my_tracer_proc(Process *); -void erts_block_sys_msg_dispatcher(void); -void erts_release_sys_msg_dispatcher(void); -void erts_foreach_sys_msg_in_q(void (*func)(Eterm, - Eterm, - Eterm, - ErlHeapFragment *)); -void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *); -#endif - -void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *); -void trace_send(Process*, Eterm, Eterm); -void trace_receive(Process*, Eterm); -Uint32 erts_call_trace(Process *p, BeamInstr mfa[], Binary *match_spec, Eterm* args, - int local, Eterm *tracer_pid); -void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid); -void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value, - Eterm *tracer); -void erts_trace_return_to(Process *p, BeamInstr *pc); -void trace_sched(Process*, Eterm); -void trace_proc(Process*, Process*, Eterm, Eterm); -void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args); -void save_calls(Process *p, Export *); -void trace_gc(Process *p, Eterm what); -/* port tracing */ -void trace_virtual_sched(Process*, Eterm); -void trace_sched_ports(Port *pp, Eterm); -void trace_sched_ports_where(Port *pp, Eterm, Eterm); -void trace_port(Port *, Eterm what, Eterm data); -void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name); - -/* system_profile */ -void erts_set_system_profile(Eterm profile); -Eterm erts_get_system_profile(void); -void profile_scheduler(Eterm scheduler_id, Eterm); -void profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us); -void profile_runnable_proc(Process* p, Eterm status); -void profile_runnable_port(Port* p, Eterm status); -void erts_system_profile_setup_active_schedulers(void); - -/* system_monitor */ -void monitor_long_gc(Process *p, Uint time); -void monitor_large_heap(Process *p); -void monitor_generic(Process *p, Eterm type, Eterm spec); -Uint erts_trace_flag2bit(Eterm flag); -int erts_trace_flags(Eterm List, - Uint *pMask, Eterm *pTracer, int *pCpuTimestamp); -Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I); - -#ifdef ERTS_SMP -void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp); -#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \ -do { \ - if ((ESDP)->pending_trace_msgs) \ - erts_send_pending_trace_msgs((ESDP)); \ -} while (0) -#else -#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) -#endif - void bin_write(int, void*, byte*, size_t); int intlist_to_buf(Eterm, char*, int); /* most callers pass plain char*'s */ @@ -1627,9 +821,16 @@ char* Sint_to_buf(Sint, struct Sint_buf*); #define ERTS_IOLIST_TYPE 2 Eterm buf_to_intlist(Eterm**, char*, size_t, Eterm); /* most callers pass plain char*'s */ -int io_list_to_buf(Eterm, char*, int); -int io_list_to_buf2(Eterm, char*, int); -int erts_iolist_size(Eterm, Uint *); + +#define ERTS_IOLIST_TO_BUF_OVERFLOW (~((ErlDrvSizeT) 0)) +#define ERTS_IOLIST_TO_BUF_TYPE_ERROR (~((ErlDrvSizeT) 1)) +#define ERTS_IOLIST_TO_BUF_FAILED(R) \ + (((R) & (~((ErlDrvSizeT) 1))) == (~((ErlDrvSizeT) 1))) +#define ERTS_IOLIST_TO_BUF_SUCCEEDED(R) \ + (!ERTS_IOLIST_TO_BUF_FAILED((R))) + +ErlDrvSizeT erts_iolist_to_buf(Eterm, char*, ErlDrvSizeT); +int erts_iolist_size(Eterm, ErlDrvSizeT *); int is_string(Eterm); void erl_at_exit(void (*) (void*), void*); Eterm collect_memory(Process *); @@ -1674,41 +875,6 @@ Uint erts_current_reductions(Process* current, Process *p); int erts_print_system_version(int to, void *arg, Process *c_p); int erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg); -#define seq_trace_output(token, msg, type, receiver, process) \ -seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL) -#define seq_trace_output_exit(token, msg, type, receiver, exitfrom) \ -seq_trace_output_generic((token), (msg), (type), (receiver), NULL, (exitfrom)) -void seq_trace_output_generic(Eterm token, Eterm msg, Uint type, - Eterm receiver, Process *process, Eterm exitfrom); - -int seq_trace_update_send(Process *process); - -Eterm erts_seq_trace(Process *process, - Eterm atom_type, Eterm atom_true_or_false, - int build_result); - -struct trace_pattern_flags { - unsigned int breakpoint : 1; /* Set if any other is set */ - unsigned int local : 1; /* Local call trace breakpoint */ - unsigned int meta : 1; /* Metadata trace breakpoint */ - unsigned int call_count : 1; /* Fast call count breakpoint */ - unsigned int call_time : 1; /* Fast call time breakpoint */ -}; -extern const struct trace_pattern_flags erts_trace_pattern_flags_off; -extern int erts_call_time_breakpoint_tracing; -int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified, - Binary* match_prog_set, Binary *meta_match_prog_set, - int on, struct trace_pattern_flags, - Eterm meta_tracer_pid, int is_blocking); -void -erts_get_default_trace_pattern(int *trace_pattern_is_on, - Binary **match_spec, - Binary **meta_match_spec, - struct trace_pattern_flags *trace_pattern_flags, - Eterm *meta_tracer_pid); -int erts_is_default_trace_enabled(void); -void erts_bif_trace_init(void); -int erts_finish_breakpointing(void); /* ** Call_trace uses this API for the parameter matching functions @@ -1950,15 +1116,15 @@ dtrace_pid_str(Eterm pid, char *process_buf) ERTS_GLB_INLINE void dtrace_proc_str(Process *process, char *process_buf) { - dtrace_pid_str(process->id, process_buf); + dtrace_pid_str(process->common.id, process_buf); } ERTS_GLB_INLINE void dtrace_port_str(Port *port, char *port_buf) { erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>", - port_channel_no(port->id), - port_number(port->id)); + port_channel_no(port->common.id), + port_number(port->common.id)); } ERTS_GLB_INLINE void diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index b1eb75bede..536a3cc819 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -43,6 +43,8 @@ #include "erl_version.h" #include "error.h" #include "erl_async.h" +#define ERTS_WANT_EXTERNAL_TAGS +#include "external.h" #include "dtrace-wrapper.h" extern ErlDrvEntry fd_driver_entry; @@ -51,34 +53,40 @@ extern ErlDrvEntry spawn_driver_entry; extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */ erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */ -erts_smp_mtx_t erts_driver_list_lock; /* Mutex for driver list */ +erts_smp_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */ static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling driver init */ static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a per thread basis (for BC interfaces) */ -Port* erts_port; /* The port table */ +ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */ erts_smp_atomic_t erts_bytes_out; /* No bytes sent out of the system */ erts_smp_atomic_t erts_bytes_in; /* No bytes gotten into the system */ -Uint erts_max_ports; -Uint erts_port_tab_index_mask; - const ErlDrvTermData driver_term_nil = (ErlDrvTermData)NIL; +const Port erts_invalid_port = {{ERTS_INVALID_PORT}}; + erts_driver_t vanilla_driver; erts_driver_t spawn_driver; erts_driver_t fd_driver; +int erts_port_synchronous_ops = 0; +int erts_port_schedule_all_ops = 0; +int erts_port_parallelism = 0; + +static void deliver_result(Eterm sender, Eterm pid, Eterm res); static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *); static void terminate_port(Port *p); static void pdl_init(void); #ifdef ERTS_SMP static void driver_monitor_lock_pdl(Port *p); static void driver_monitor_unlock_pdl(Port *p); +#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port_raw((Port), 1) #define DRV_MONITOR_LOCK_PDL(Port) driver_monitor_lock_pdl(Port) #define DRV_MONITOR_UNLOCK_PDL(Port) driver_monitor_unlock_pdl(Port) #else +#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port_raw((Port), 0) #define DRV_MONITOR_LOCK_PDL(Port) /* nothing */ #define DRV_MONITOR_UNLOCK_PDL(Port) /* nothing */ #endif @@ -89,36 +97,12 @@ static void driver_monitor_unlock_pdl(Port *p); static ERTS_INLINE ErlIOQueue* drvport2ioq(ErlDrvPort drvport) { - int ix = (int) drvport; - Uint32 status; - - if (ix < 0 || erts_max_ports <= ix) + Port *prt = erts_thr_drvport2port_raw(drvport, 0); + erts_aint32_t state = erts_atomic32_read_nob(&prt->state); + if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) return NULL; - - if (erts_get_scheduler_data()) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix])); - ERTS_LC_ASSERT(!erts_port[ix].port_data_lock - || erts_lc_mtx_is_locked( - &erts_port[ix].port_data_lock->mtx)); - - status = erts_port[ix].status; - } - else { - erts_smp_port_state_lock(&erts_port[ix]); - status = erts_port[ix].status; - erts_smp_port_state_unlock(&erts_port[ix]); - - ERTS_LC_ASSERT((status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) - || erts_port[ix].port_data_lock); - ERTS_LC_ASSERT(!erts_port[ix].port_data_lock - || erts_lc_mtx_is_locked( - &erts_port[ix].port_data_lock->mtx)); - - } - - return ((status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) - ? NULL - : &erts_port[ix].ioq); + else + return &prt->ioq; } static ERTS_INLINE int @@ -196,27 +180,13 @@ typedef struct line_buf_context { dtrace_port_str((PORT), port_str); #endif -/* The 'number' field in a port now has two parts: the lowest bits - contain the index in the port table, and the higher bits are a counter - which is incremented each time we look for a free port and start from - the beginning of the table. erts_max_ports is the number of file descriptors, - rounded up to a power of 2. - To get the index from a port, use the macro 'internal_port_index'; - 'port_number' returns the whole number field. -*/ - -static erts_smp_spinlock_t get_free_port_lck; -static Uint last_port_num; -static Uint port_num_mask; -erts_smp_atomic32_t erts_ports_snapshot; /* Identifies the _next_ snapshot (not the ongoing) */ - - static ERTS_INLINE void kill_port(Port *pp) { ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */ erts_port_task_free_port(pp); - ASSERT(pp->status & ERTS_PORT_SFLGS_DEAD); + /* In non-smp case the port structure may have been deallocated now */ } #ifdef ERTS_SMP @@ -227,146 +197,280 @@ erts_lc_is_port_locked(Port *prt) { if (!prt) return 0; + ERTS_SMP_LC_ASSERT(prt->lock); return erts_smp_lc_mtx_is_locked(prt->lock); } #endif #endif /* #ifdef ERTS_SMP */ -static int -get_free_port(void) -{ - Uint num; - Uint tries = erts_max_ports; - Port* port; +static void initq(Port* prt); - erts_smp_spin_lock(&get_free_port_lck); - num = last_port_num + 1; - for (;; ++num) { - port = &erts_port[num & erts_port_tab_index_mask]; +#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT) +#define ERTS_PORT_INIT_INSTR_NEED_ID 1 +#else +#define ERTS_PORT_INIT_INSTR_NEED_ID 0 +#endif - erts_smp_port_state_lock(port); - if (port->status & ERTS_PORT_SFLG_FREE) { - last_port_num = num; - erts_smp_spin_unlock(&get_free_port_lck); - break; - } - erts_smp_port_state_unlock(port); +static ERTS_INLINE void port_init_instr(Port *prt +#if ERTS_PORT_INIT_INSTR_NEED_ID + , Eterm id +#endif + ) +{ +#if !ERTS_PORT_INIT_INSTR_NEED_ID + Eterm id = NIL; /* Not used */ +#endif - if (--tries == 0) { - erts_smp_spin_unlock(&get_free_port_lck); - return -1; - } + /* + * Stuff that need to be initialized with the port id + * in the instrumented case, but not in the normal case. + */ +#ifdef ERTS_SMP + ASSERT(prt->drv_ptr && prt->lock); + if (!prt->drv_ptr->lock) { + char *lock_str = "port_lock"; +#ifdef ERTS_ENABLE_LOCK_COUNT + if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK)) + lock_str = NULL; +#endif + erts_mtx_init_locked_x(prt->lock, lock_str, id); } - port->status = ERTS_PORT_SFLG_INITIALIZING; - ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 0); - erts_smp_atomic_set_nob(&port->refc, 2); /* Port alive + lock */ - erts_smp_port_state_unlock(port); - return num & port_num_mask; +#endif + erts_port_task_init_sched(&prt->sched, id); } -/* - * erts_test_next_port() is only used for testing. - */ -Sint -erts_test_next_port(int set, Uint next) +#if !ERTS_PORT_INIT_INSTR_NEED_ID +static ERTS_INLINE void port_init_instr_abort(Port *prt) { - Uint i, num; - Sint res = -1; - - erts_smp_spin_lock(&get_free_port_lck); - if (set) { - last_port_num = (next - 1) & port_num_mask; +#ifdef ERTS_SMP + ASSERT(prt->drv_ptr && prt->lock); + if (!prt->drv_ptr->lock) { + erts_mtx_unlock(prt->lock); + erts_mtx_destroy(prt->lock); } - num = last_port_num + 1; +#endif + erts_port_task_fini_sched(&prt->sched); +} +#endif - for (i=0; i < erts_max_ports && res<0; ++i, ++num) { - - Port* port = &erts_port[num & erts_port_tab_index_mask]; +static void insert_port_struct(void *vprt, Eterm data) +{ + Port *prt = (Port *) vprt; + Eterm id = make_internal_port(data); +#if ERTS_PORT_INIT_INSTR_NEED_ID + /* + * This cannot be done earlier in the instrumented + * case since we don't now 'id' until now. + */ + port_init_instr(prt, id); +#endif + prt->common.id = id; + erts_atomic32_init_relb(&prt->state, ERTS_PORT_SFLG_INITIALIZING); +} - erts_smp_port_state_lock(port); +#define ERTS_CREATE_PORT_FLAG_PARALLELISM (1 << 0) - if (port->status & ERTS_PORT_SFLG_FREE) { - last_port_num = num - 1; - res = num & port_num_mask; - } - erts_smp_port_state_unlock(port); +static Port *create_port(char *name, + erts_driver_t *driver, + erts_mtx_t *driver_lock, + int create_flags, + Eterm pid, + int *enop) +{ + ErtsPortTaskBusyPortQ *busy_port_queue; + Port *prt; + char *p; + size_t port_size, busy_port_queue_size, size; + erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED; + erts_aint32_t x_pts_flgs = 0; +#ifdef DEBUG + /* Make sure the debug flags survives until port is freed */ + state |= ERTS_PORT_SFLG_PORT_DEBUG; +#endif + +#ifdef ERTS_SMP + if (!driver_lock) { + /* Align size for mutex following port struct */ + port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port)); + size += sizeof(erts_mtx_t); } - erts_smp_spin_unlock(&get_free_port_lck); - return res; -} + else +#endif + port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port)); + busy_port_queue_size + = ((driver->flags & ERL_DRV_FLAG_NO_BUSY_MSGQ) + ? 0 + : ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ))); + size += busy_port_queue_size; -static void port_cleanup(Port *prt); + size += sys_strlen(name) + 1; -#ifdef ERTS_SMP + p = erts_alloc_fnf(ERTS_ALC_T_PORT, size); + if (!p) { + if (enop) + *enop = ENOMEM; + return NULL; + } -static void -sched_port_cleanup(void *vprt) -{ - Port *prt = (Port *) vprt; - erts_smp_mtx_lock(prt->lock); - port_cleanup(prt); -} + prt = (Port *) p; + p += port_size; -#endif + if (!busy_port_queue_size) + busy_port_queue = NULL; + else { + busy_port_queue = (ErtsPortTaskBusyPortQ *) p; + p += busy_port_queue_size; + } -void -erts_port_cleanup(Port *prt) -{ #ifdef ERTS_SMP - if (erts_smp_mtx_trylock(prt->lock) == EBUSY) - erts_schedule_misc_op(sched_port_cleanup, (void *) prt); - else + if (driver_lock) { + prt->lock = driver_lock; + erts_mtx_lock(driver_lock); + } + else { + prt->lock = (erts_mtx_t *) p; + p += sizeof(erts_mtx_t); + state |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK; + } + erts_smp_atomic_set_nob(&prt->run_queue, + (erts_aint_t) erts_get_runq_current(NULL)); + prt->xports = NULL; +#else + erts_atomic32_init_nob(&prt->refc, 1); + prt->cleanup = 0; #endif - port_cleanup(prt); -} + + erts_port_task_pre_init_sched(&prt->sched, busy_port_queue); -void -port_cleanup(Port *prt) -{ + prt->name = p; + sys_strcpy(p, name); + prt->drv_ptr = driver; + ERTS_P_LINKS(prt) = NULL; + ERTS_P_MONITORS(prt) = NULL; + prt->linebuf = NULL; + prt->bp = NULL; + prt->suspended = NULL; + prt->data = am_undefined; + prt->port_data_lock = NULL; + prt->control_flags = 0; + prt->bytes_in = 0; + prt->bytes_out = 0; + prt->dist_entry = NULL; + ERTS_PORT_INIT_CONNECTED(prt, pid); + prt->common.u.alive.reg = NULL; #ifdef ERTS_SMP - Uint32 port_specific; - erts_smp_mtx_t *mtx; + prt->common.u.alive.ptimer = NULL; +#else + sys_memset(&prt->common.u.alive.tm, 0, sizeof(ErlTimer)); #endif - erts_driver_t *driver; + erts_port_task_handle_init(&prt->timeout_task); + prt->psd = NULL; + prt->drv_data = (SWord) 0; + prt->os_pid = -1; - erts_smp_port_state_lock(prt); + /* Set default tracing */ + erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - driver = prt->drv_ptr; - prt->drv_ptr = NULL; - ASSERT(driver); + ASSERT(((char *) prt) == ((char *) &prt->common)); - ASSERT(prt->status & ERTS_PORT_SFLG_FREE_SCHEDULED); - ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&prt->refc) == 0); +#if !ERTS_PORT_INIT_INSTR_NEED_ID + /* + * When 'id' isn't needed (the normal case), it is better to + * do the initialization here avoiding unnecessary contention + * on table... + */ + port_init_instr(prt); +#endif - ASSERT(prt->status & ERTS_PORT_SFLG_PORT_DEBUG); - ASSERT(!(prt->status & ERTS_PORT_SFLG_FREE)); - prt->status = ERTS_PORT_SFLG_FREE; + if (!erts_ptab_new_element(&erts_port, + &prt->common, + (void *) prt, + insert_port_struct)) { +#if !ERTS_PORT_INIT_INSTR_NEED_ID + port_init_instr_abort(prt); +#endif #ifdef ERTS_SMP + if (driver_lock) + erts_mtx_unlock(driver_lock); +#endif + if (enop) + *enop = 0; + return NULL; + } - port_specific = (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK); + ASSERT(prt == (Port *) (erts_ptab_pix2intptr_nob( + &erts_port, + internal_port_index(prt->common.id)))); - mtx = prt->lock; - ASSERT(mtx); + initq(prt); - prt->lock = NULL; + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - erts_smp_port_state_unlock(prt); - erts_smp_mtx_unlock(mtx); + if (erts_port_schedule_all_ops) + x_pts_flgs |= ERTS_PTS_FLG_FORCE_SCHED; - if (port_specific) { - erts_smp_mtx_destroy(mtx); - erts_free(ERTS_ALC_T_PORT_LOCK, mtx); - } -#endif + if (create_flags & ERTS_CREATE_PORT_FLAG_PARALLELISM) + x_pts_flgs |= ERTS_PTS_FLG_PARALLELISM; - if (driver->handle) - erts_ddll_dereference_driver(driver->handle); + if (x_pts_flgs) + erts_smp_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs); + + erts_atomic32_set_relb(&prt->state, state); + return prt; +} + +#ifndef ERTS_SMP +void +erts_port_cleanup(Port *prt) +{ + if (prt->drv_ptr && prt->drv_ptr->handle) + erts_ddll_dereference_driver(prt->drv_ptr->handle); + prt->drv_ptr = NULL; + erts_port_dec_refc(prt); } +#endif +void +erts_port_free(Port *prt) +{ +#if defined(ERTS_SMP) || defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK) + erts_aint32_t state = erts_atomic32_read_nob(&prt->state); +#endif + ERTS_LC_ASSERT(state & (ERTS_PORT_SFLG_INITIALIZING + | ERTS_PORT_SFLG_FREE)); + ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG); + +#ifdef ERTS_SMP + ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->common.refc) == 0); +#else + ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->refc) == 0); +#endif + + erts_port_task_fini_sched(&prt->sched); + +#ifdef ERTS_SMP + ASSERT(prt->lock); + if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) + erts_mtx_destroy(prt->lock); + + /* + * We cannot dereference a driver using driver + * locking until here in smp case. Otherwise, + * the driver lock may still be in use by others. + * + * In the non-smp case we cannot do it here since + * this function may be called by non-scheduler + * threads. This is done in erts_port_cleanup() + * in the non-smp case. + */ + if (prt->drv_ptr->handle) + erts_ddll_dereference_driver(prt->drv_ptr->handle); +#endif + erts_free(ERTS_ALC_T_PORT, prt); +} /* ** Initialize v_start to point to the small fixed vector. @@ -414,94 +518,21 @@ static void stopq(Port* prt) if (prt->port_data_lock) { driver_pdl_unlock(prt->port_data_lock); driver_pdl_dec_refc(prt->port_data_lock); - prt->port_data_lock = NULL; - } -} - - - -static void -setup_port(Port* prt, Eterm pid, erts_driver_t *driver, - ErlDrvData drv_data, char *name, Uint32 xstatus) -{ - ErtsRunQueue *runq = erts_get_runq_current(NULL); - char *new_name, *old_name; -#ifdef DEBUG - /* Make sure the debug flags survives until port is freed */ - xstatus |= ERTS_PORT_SFLG_PORT_DEBUG; -#endif - ASSERT(runq); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - - - new_name = (char*) erts_alloc(ERTS_ALC_T_PORT_NAME, sys_strlen(name)+1); - sys_strcpy(new_name, name); - erts_smp_runq_lock(runq); - erts_smp_port_state_lock(prt); - prt->os_pid = -1; - prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus; - prt->snapshot = erts_smp_atomic32_read_nob(&erts_ports_snapshot); - old_name = prt->name; - prt->name = new_name; -#ifdef ERTS_SMP - erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); -#endif - ASSERT(!prt->drv_ptr); - prt->drv_ptr = driver; - erts_smp_port_state_unlock(prt); - erts_smp_runq_unlock(runq); -#ifdef ERTS_SMP - ASSERT(!prt->xports); -#endif - if (old_name) { - erts_free(ERTS_ALC_T_PORT_NAME, (void *) old_name); } - - prt->control_flags = 0; - prt->connected = pid; - prt->drv_data = (SWord) drv_data; - prt->bytes_in = 0; - prt->bytes_out = 0; - prt->dist_entry = NULL; - prt->reg = NULL; -#ifdef ERTS_SMP - prt->ptimer = NULL; -#else - sys_memset(&prt->tm, 0, sizeof(ErlTimer)); -#endif - erts_port_task_handle_init(&prt->timeout_task); - prt->suspended = NULL; - sys_strcpy(prt->name, name); - prt->nlinks = NULL; - prt->monitors = NULL; - prt->linebuf = NULL; - prt->bp = NULL; - prt->data = am_undefined; - /* Set default tracing */ - erts_get_default_tracing(&(prt->trace_flags), &(prt->tracer_proc)); - - prt->psd = NULL; - - initq(prt); } -void -erts_wake_process_later(Port *prt, Process *process) +int +erts_save_suspend_process_on_port(Port *prt, Process *process) { - ErtsProcList** p; - ErtsProcList* new_p; - - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - - if (prt->status & ERTS_PORT_SFLGS_DEAD) - return; - - for (p = &(prt->suspended); *p != NULL; p = &((*p)->next)) - /* Empty loop body */; - - new_p = erts_proclist_create(process); - new_p->next = NULL; - *p = new_p; + int saved; + erts_aint32_t flags; + erts_port_task_sched_lock(&prt->sched); + flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + saved = (flags & ERTS_PTS_FLGS_BUSY) && !(flags & ERTS_PTS_FLG_EXIT); + if (saved) + erts_proclist_store_last(&prt->suspended, erts_proclist_create(process)); + erts_port_task_sched_unlock(&prt->sched); + return saved; } /* @@ -513,47 +544,44 @@ erts_wake_process_later(Port *prt, Process *process) (*error_number_ptr must contain either BADARG or SYSTEM_LIMIT). The driver start function must obey the same conventions. */ -int +Port * erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ Eterm pid, /* Current process. */ char* name, /* Driver name. */ SysDriverOpts* opts, /* Options. */ - int *error_number_ptr) /* errno in case -2 is returned */ + int *error_type_ptr, /* error type */ + int *error_number_ptr) /* errno in case of error type -2 */ { - int port_num; - int port_ix; + +#undef ERTS_OPEN_DRIVER_RET +#define ERTS_OPEN_DRIVER_RET(Prt, EType, ENo) \ + do { \ + if (error_type_ptr) \ + *error_type_ptr = (EType); \ + if (error_number_ptr) \ + *error_number_ptr = (ENo); \ + return (Prt); \ + } while (0) + ErlDrvData drv_data = 0; - Uint32 xstatus = 0; Port *port; int fpe_was_unmasked; - - if (error_number_ptr) - *error_number_ptr = 0; + int error_type, error_number; + int port_errno = 0; + erts_mtx_t *driver_lock = NULL; + int cprt_flgs = 0; ERTS_SMP_CHK_NO_PROC_LOCKS; - if ((port_num = get_free_port()) < 0) { - if (error_number_ptr) { - *error_number_ptr = SYSTEM_LIMIT; - } - return -3; - } - - port_ix = port_num & erts_port_tab_index_mask; - port = &erts_port[port_ix]; - port->id = make_internal_port(port_num); - - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rlock(&erts_driver_list_lock); if (!driver) { for (driver = driver_list; driver; driver = driver->next) { if (sys_strcmp(driver->name, name) == 0) break; } if (!driver) { - erts_smp_mtx_unlock(&erts_driver_list_lock); - if (error_number_ptr) - *error_number_ptr = BADARG; - return -3; + erts_smp_rwmtx_runlock(&erts_driver_list_lock); + ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG); } } if (driver == &spawn_driver) { @@ -597,52 +625,41 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ } if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) { - erts_smp_mtx_unlock(&erts_driver_list_lock); - if (error_number_ptr) { - *error_number_ptr = BADARG; - } - /* Need to mark the port as free again */ - erts_smp_port_state_lock(port); - port->status = ERTS_PORT_SFLG_FREE; - ERTS_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 2); - erts_smp_atomic_set_nob(&port->refc, 0); - erts_smp_port_state_unlock(port); - return -3; + erts_smp_rwmtx_runlock(&erts_driver_list_lock); + ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG); } - /* - * We'll set up the port before calling the start function, - * to allow message sending and setting timers in the start function. - */ - #ifdef ERTS_SMP - ASSERT(!port->lock); - port->lock = driver->lock; - if (!port->lock) { - port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK, - sizeof(erts_smp_mtx_t)); - erts_smp_mtx_init_x(port->lock, -#ifdef ERTS_ENABLE_LOCK_COUNT - (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_lock" : NULL, -#else - "port_lock", -#endif - port->id); - xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK; - } + driver_lock = driver->lock; #endif if (driver->handle != NULL) { erts_ddll_increment_port_count(driver->handle); erts_ddll_reference_driver(driver->handle); } - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_runlock(&erts_driver_list_lock); -#ifdef ERTS_SMP - erts_smp_mtx_lock(port->lock); -#endif + /* + * We'll set up the port before calling the start function, + * to allow message sending and setting timers in the start function. + */ + + if (opts->parallelism) + cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM; - setup_port(port, pid, driver, drv_data, name, xstatus); + port = create_port(name, driver, driver_lock, cprt_flgs, pid, &port_errno); + if (!port) { + if (driver->handle) { + erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_ddll_decrement_port_count(driver->handle); + erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_ddll_dereference_driver(driver->handle); + } + if (port_errno) + ERTS_OPEN_DRIVER_RET(NULL, -2, port_errno); + else + ERTS_OPEN_DRIVER_RET(NULL, -3, SYSTEM_LIMIT); + } if (IS_TRACED_FL(port, F_TRACE_PORTS)) { trace_port_open(port, @@ -652,7 +669,8 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ ERTS_ATOM_ENC_LATIN1, 1)); } - + + error_number = error_type = 0; if (driver->start) { if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(port, am_in, am_start); @@ -665,56 +683,63 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ } #endif fpe_was_unmasked = erts_block_fpe(); - drv_data = (*driver->start)((ErlDrvPort)(port_ix), - name, opts); + drv_data = (*driver->start)((ErlDrvPort) port, name, opts); + if (((SWord) drv_data) == -1) + error_type = -1; + else if (((SWord) drv_data) == -2) { + /* + * We need to save errno quickly after the + * call to the 'start' callback before + * something else modify it. + */ + error_type = -2; + error_number = errno; + } + else if (((SWord) drv_data) == -3) { + error_type = -3; + error_number = BADARG; + } + erts_unblock_fpe(fpe_was_unmasked); port->caller = NIL; if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(port, am_out, am_start); } - if (error_number_ptr && ((SWord) drv_data) == (SWord) -2) - *error_number_ptr = errno; #ifdef ERTS_SMP if (port->xports) - erts_smp_xports_unlock(port); + erts_port_handle_xports(port); ASSERT(!port->xports); #endif } - if (((SWord)drv_data) == -1 || - ((SWord)drv_data) == -2 || - ((SWord)drv_data) == -3) { - int res = (int) ((SWord) drv_data); - - if (res == -3 && error_number_ptr) { - *error_number_ptr = BADARG; - } - + if (error_type) { /* * Must clean up the port. */ #ifdef ERTS_SMP - erts_cancel_smp_ptimer(port->ptimer); + erts_cancel_smp_ptimer(port->common.u.alive.ptimer); #else - erts_cancel_timer(&(port->tm)); + erts_cancel_timer(&(port->common.u.alive.tm)); #endif stopq(port); - kill_port(port); if (port->linebuf != NULL) { erts_free(ERTS_ALC_T_LINEBUF, (void *) port->linebuf); port->linebuf = NULL; } if (driver->handle != NULL) { - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_runlock(&erts_driver_list_lock); } + kill_port(port); erts_port_release(port); - return res; + ERTS_OPEN_DRIVER_RET(NULL, error_type, error_number); } - port->drv_data = (SWord) drv_data; - return port_ix; + port->drv_data = (UWord) drv_data; + ERTS_OPEN_DRIVER_RET(port, 0, 0); + +#undef ERTS_OPEN_DRIVER_RET } #ifdef ERTS_SMP @@ -739,102 +764,122 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ char* name, /* Driver name */ ErlDrvData drv_data) /* Driver data */ { + int cprt_flgs = 0; Port *creator_port; Port* port; erts_driver_t *driver; Process *rp; - int port_num; - Eterm port_id; - Uint32 xstatus = 0; + erts_mtx_t *driver_lock = NULL; ERTS_SMP_CHK_NO_PROC_LOCKS; - creator_port = erts_drvport2port(creator_port_ix); + /* Need to be called from a scheduler thread */ + if (!erts_get_scheduler_id()) + return ERTS_INVALID_ERL_DRV_PORT; + + creator_port = erts_drvport2port(creator_port_ix, NULL); if (!creator_port) - return (ErlDrvTermData) -1; + return ERTS_INVALID_ERL_DRV_PORT; + + rp = erts_proc_lookup(pid); + if (!rp) + return ERTS_INVALID_ERL_DRV_PORT; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port)); driver = creator_port->drv_ptr; - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rlock(&erts_driver_list_lock); if (!erts_ddll_driver_ok(driver->handle)) { - erts_smp_mtx_unlock(&erts_driver_list_lock); - return (ErlDrvTermData) -1; + erts_smp_rwmtx_runlock(&erts_driver_list_lock); + return ERTS_INVALID_ERL_DRV_PORT; } - rp = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_LINK); - if (!rp) { - erts_smp_mtx_unlock(&erts_driver_list_lock); - return (ErlDrvTermData) -1; /* pid does not exist */ + if (driver->handle != NULL) { + erts_ddll_increment_port_count(driver->handle); + erts_ddll_reference_referenced_driver(driver->handle); + } + +#ifdef ERTS_SMP + driver_lock = driver->lock; +#endif + + erts_smp_rwmtx_runlock(&erts_driver_list_lock); + + /* Inherit parallelism flag from parent */ + if (ERTS_PTS_FLG_PARALLELISM & + erts_smp_atomic32_read_nob(&creator_port->sched.flags)) + cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM; + port = create_port(name, driver, driver_lock, cprt_flgs, pid, NULL); + if (!port) { + if (driver->handle) { + erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_ddll_decrement_port_count(driver->handle); + erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_ddll_dereference_driver(driver->handle); + } + return ERTS_INVALID_ERL_DRV_PORT; } - if ((port_num = get_free_port()) < 0) { - errno = SYSTEM_LIMIT; + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + + erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + if (ERTS_PROC_IS_EXITING(rp)) { erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); - erts_smp_mtx_unlock(&erts_driver_list_lock); - return (ErlDrvTermData) -1; + if (driver->handle) { + erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_ddll_decrement_port_count(driver->handle); + erts_smp_rwmtx_runlock(&erts_driver_list_lock); + } + kill_port(port); + erts_port_release(port); + return ERTS_INVALID_ERL_DRV_PORT; } - port_id = make_internal_port(port_num); - port = &erts_port[port_num & erts_port_tab_index_mask]; + erts_add_link(&ERTS_P_LINKS(port), LINK_PID, pid); + erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id); + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); #ifdef ERTS_SMP - ASSERT(!port->lock); - port->lock = driver->lock; - if (!port->lock) { + if (!driver_lock) { ErtsXPortsList *xplp = xports_list_alloc(); xplp->port = port; xplp->next = creator_port->xports; creator_port->xports = xplp; - port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK, - sizeof(erts_smp_mtx_t)); - erts_smp_mtx_init_locked_x(port->lock, -#ifdef ERTS_ENABLE_LOCK_COUNT - (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_lock" : NULL, -#else - "port_lock", -#endif - port_id); - xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK; } - #endif - if (driver->handle != NULL) { - erts_ddll_increment_port_count(driver->handle); - erts_ddll_reference_referenced_driver(driver->handle); - } - erts_smp_mtx_unlock(&erts_driver_list_lock); - - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + port->drv_data = (UWord) drv_data; - setup_port(port, pid, driver, drv_data, name, xstatus); - port->id = port_id; - - erts_add_link(&(port->nlinks), LINK_PID, pid); - erts_add_link(&(rp->nlinks), LINK_PID, port_id); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); - return port_num & erts_port_tab_index_mask; + return (ErlDrvPort) port; } #ifdef ERTS_SMP -void -erts_smp_xports_unlock(Port *prt) +int erts_port_handle_xports(Port *prt) { + int reds = 0; ErtsXPortsList *xplp; ASSERT(prt); xplp = prt->xports; ASSERT(xplp); while (xplp) { + Port *rprt = xplp->port; ErtsXPortsList *free_xplp; - if (xplp->port->xports) - erts_smp_xports_unlock(xplp->port); - erts_port_release(xplp->port); + erts_aint32_t state; + if (rprt->xports) + reds += erts_port_handle_xports(rprt); + state = erts_atomic32_read_nob(&rprt->state); + if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(rprt)) { + terminate_port(rprt); + reds += ERTS_PORT_REDS_TERMINATE; + } + erts_port_release(rprt); free_xplp = xplp; xplp = xplp->next; xports_list_free(free_xplp); + reds++; } prt->xports = NULL; + return reds; } #endif @@ -869,8 +914,8 @@ io_list_to_vec(Eterm obj, /* io-list */ DECLARE_ESTACK(s); Eterm* objp; char *buf = cbin->orig_bytes; - ErlDrvSizeT len = cbin->orig_size; - ErlDrvSizeT csize = 0; + Uint len = cbin->orig_size; + Uint csize = 0; int vlen = 0; char* cptr = buf; @@ -985,7 +1030,7 @@ io_list_to_vec(Eterm obj, /* io-list */ #define IO_LIST_VEC_COUNT(obj) \ do { \ - ErlDrvSizeT _size = binary_size(obj); \ + Uint _size = binary_size(obj); \ Eterm _real; \ ERTS_DECLARE_DUMMY(Uint _offset); \ int _bitoffs; \ @@ -1036,8 +1081,9 @@ do { \ */ static int -io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize, - Uint* pvsize, Uint* pcsize, Uint* total_size) +io_list_vec_len(Eterm obj, int* vsize, Uint* csize, + Uint* pvsize, Uint* pcsize, + ErlDrvSizeT* total_size) { DECLARE_ESTACK(s); Eterm* objp; @@ -1048,7 +1094,7 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize, Uint p_v_size = 0; Uint p_c_size = 0; Uint p_in_clist = 0; - Uint total; + Uint total; /* Uint due to halfword emulator */ goto L_jump_start; /* avoid a push */ @@ -1108,7 +1154,7 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize, if (total < c_size) { goto L_overflow_error; } - *total_size = total; + *total_size = (ErlDrvSizeT) total; DESTROY_ESTACK(s); *vsize = v_size; @@ -1123,56 +1169,726 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize, return 1; } -/* write data to a port */ -int erts_write_to_port(Eterm caller_id, Port *p, Eterm list) -{ - char *buf; - erts_driver_t *drv = p->drv_ptr; - Uint size; +typedef enum { + ERTS_TRY_IMM_DRV_CALL_OK, + ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK, + ERTS_TRY_IMM_DRV_CALL_INVALID_PORT, + ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS +} ErtsTryImmDrvCallResult; + +typedef struct { + Process *c_p; /* Currently executing process (unlocked) */ + Port *port; /* Port to operate on */ + Eterm port_op; /* port operation as an atom */ + erts_aint32_t state; /* in: invalid state; out: read state (if read) */ + erts_aint32_t sched_flags; /* in: invalid flags; out: read flags (if read) */ + int async; /* Asynchronous operation */ + int pre_chk_sched_flags; /* Check sched flags before lock? */ int fpe_was_unmasked; - - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p) || ERTS_IS_CRASH_DUMPING); +} ErtsTryImmDrvCallState; + +#define ERTS_INIT_TRY_IMM_DRV_CALL_STATE(C_P, PRT, SFLGS, PTS_FLGS, A, PRT_OP) \ + {(C_P), (PRT), (PRT_OP), (SFLGS), (PTS_FLGS), (A), 1, 0} + +/* + * Try doing an immediate driver callback call from a process. If + * this fail, the operation should be scheduled in the normal case... + * + */ +static ERTS_INLINE ErtsTryImmDrvCallResult +try_imm_drv_call(ErtsTryImmDrvCallState *sp) +{ + ErtsTryImmDrvCallResult res; + erts_aint32_t invalid_state, invalid_sched_flags; + Port *prt = sp->port; + Process *c_p = sp->c_p; + + ASSERT(is_atom(sp->port_op)); + + invalid_sched_flags = ERTS_PTS_FLGS_FORCE_SCHEDULE_OP; + invalid_sched_flags |= sp->sched_flags; + if (sp->async) + invalid_sched_flags |= ERTS_PTS_FLG_PARALLELISM; + + if (sp->pre_chk_sched_flags) { + sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + if (sp->sched_flags & invalid_sched_flags) + return ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; + } + + if (erts_smp_port_trylock(prt) == EBUSY) + return ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK; + + invalid_state = sp->state; + sp->state = erts_atomic32_read_nob(&prt->state); + if (sp->state & invalid_state) { + res = ERTS_TRY_IMM_DRV_CALL_INVALID_PORT; + goto locked_fail; + } + + sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + if (sp->sched_flags & invalid_sched_flags) { + res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; + goto locked_fail; + } + + if (c_p) { + if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) + trace_virtual_sched(c_p, am_out); + if (erts_system_profile_flags.runnable_procs + && erts_system_profile_flags.exclusive) + profile_runnable_proc(c_p, am_inactive); + + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + } + ERTS_SMP_CHK_NO_PROC_LOCKS; - p->caller = caller_id; - if (drv->outputv != NULL) { - Uint vsize; - Uint csize; - Uint pvsize; - Uint pcsize; - ErlDrvSizeT blimit; + if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) + trace_sched_ports_where(prt, am_in, sp->port_op); + if (erts_system_profile_flags.runnable_ports + && !erts_port_is_scheduled(prt)) + profile_runnable_port(prt, am_active); + + sp->fpe_was_unmasked = erts_block_fpe(); + + return ERTS_TRY_IMM_DRV_CALL_OK; + +locked_fail: + erts_port_release(prt); + return res; +} + +static ERTS_INLINE void +finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) +{ + Port *prt = sp->port; + Process *c_p = sp->c_p; + + erts_port_driver_callback_epilogue(prt, NULL); + + erts_unblock_fpe(sp->fpe_was_unmasked); + + if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) + trace_sched_ports_where(prt, am_out, sp->port_op); + if (erts_system_profile_flags.runnable_ports + && !erts_port_is_scheduled(prt)) + profile_runnable_port(prt, am_inactive); + + erts_port_release(prt); + + if (c_p) { + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + + if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) + trace_virtual_sched(c_p, am_in); + if (erts_system_profile_flags.runnable_procs + && erts_system_profile_flags.exclusive) + profile_runnable_proc(c_p, am_active); + } +} + +/* + * force_imm_drv_call()/finalize_force_imm_drv_call() should *only* + * be used while crash dumping... + */ +static ErtsTryImmDrvCallResult +force_imm_drv_call(ErtsTryImmDrvCallState *sp) +{ + erts_aint32_t invalid_state; + Port *prt = sp->port; + + ASSERT(ERTS_IS_CRASH_DUMPING) + ASSERT(is_atom(sp->port_op)); + + invalid_state = sp->state; + sp->state = erts_atomic32_read_nob(&prt->state); + if (sp->state & invalid_state) + return ERTS_TRY_IMM_DRV_CALL_INVALID_PORT; + + sp->fpe_was_unmasked = erts_block_fpe(); + + return ERTS_TRY_IMM_DRV_CALL_OK; +} + +static void +finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp) +{ + erts_unblock_fpe(sp->fpe_was_unmasked); +} + +#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (REF_THING_SIZE + 3) + +static ERTS_INLINE void +queue_port_sched_op_reply(Process *rp, + ErtsProcLocks *rp_locksp, + Eterm *hp_start, + Eterm *hp, + Uint h_size, + ErlHeapFragment* bp, + Uint32 *ref_num, + Eterm msg) +{ + Eterm ref = make_internal_ref(hp); + write_ref_thing(hp, ref_num[0], ref_num[1], ref_num[2]); + hp += REF_THING_SIZE; + + msg = TUPLE2(hp, ref, msg); + hp += 3; + + if (!bp) { + HRelease(rp, hp_start + h_size, hp); + } + else { + Uint used_h_size = hp - hp_start; + ASSERT(h_size >= used_h_size); + if (h_size > used_h_size) + bp = erts_resize_message_buffer(bp, used_h_size, &msg, 1); + } + + erts_queue_message(rp, + rp_locksp, + bp, + msg, + NIL +#ifdef USE_VM_PROBES + , NIL +#endif + ); +} + +static void +port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg) +{ + Process *rp = erts_proc_lookup_raw(to); + if (rp) { + ErlOffHeap *ohp; + ErlHeapFragment* bp; + Eterm msg_copy; + Uint hsz, msg_sz; + Eterm *hp, *hp_start; + ErtsProcLocks rp_locks = 0; + + hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; + if (is_immed(msg)) + msg_sz = 0; + else { + msg_sz = size_object(msg); + hsz += msg_sz; + } + + hp_start = hp = erts_alloc_message_heap(hsz, + &bp, + &ohp, + rp, + &rp_locks); + if (is_immed(msg)) + msg_copy = msg; + else + msg_copy = copy_struct(msg, msg_sz, &hp, ohp); + + queue_port_sched_op_reply(rp, + &rp_locks, + hp_start, + hp, + hsz, + bp, + ref_num, + msg_copy); + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + } +} + + +ErtsPortOpResult +erts_schedule_proc2port_signal(Process *c_p, + Port *prt, + Eterm caller, + Eterm *refp, + ErtsProc2PortSigData *sigdp, + int task_flags, + ErtsProc2PortSigCallback callback) +{ + int sched_res; + if (!refp) { + if (c_p) + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + } + else { + ASSERT(c_p); + sigdp->flags |= ERTS_P2P_SIG_DATA_FLG_REPLY; + erts_make_ref_in_array(sigdp->ref); + *refp = erts_proc_store_ref(c_p, sigdp->ref); + + /* + * Caller needs to wait for a message containing + * the ref that we just created. No such message + * can exist in callers message queue at this time. + * We therefore move the save pointer of the + * callers message queue to the end of the queue. + * + * NOTE: It is of vital importance that the caller + * immediately do a receive unconditionaly + * waiting for the message with the reference; + * otherwise, next receive will *not* work + * as expected! + */ + erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + + if (ERTS_PROC_PENDING_EXIT(c_p)) { + /* need to exit caller instead */ + erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + KILL_CATCHES(c_p); + c_p->freason = EXC_EXIT; + return ERTS_PORT_OP_CALLER_EXIT; + } + + ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + c_p->msg.save = c_p->msg.last; + + erts_smp_proc_unlock(c_p, + (ERTS_PROC_LOCK_MAIN + | ERTS_PROC_LOCKS_MSG_RECEIVE)); + } + + + sigdp->caller = caller; + + /* Schedule port close call for later execution... */ + sched_res = erts_port_task_schedule(prt->common.id, + NULL, + ERTS_PORT_TASK_PROC_SIG, + sigdp, + callback, + task_flags); + + if (c_p) + erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + + if (sched_res != 0) { + if (refp) + *refp = NIL; + return ERTS_PORT_OP_DROPPED; + } + return ERTS_PORT_OP_SCHEDULED; +} + +static ERTS_INLINE void +send_badsig(Port *prt) +{ + ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; + Process* rp; + Eterm connected = ERTS_PORT_GET_CONNECTED(prt); + + ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_get_scheduler_id()); + + ASSERT(is_internal_pid(connected)); + + rp = erts_proc_lookup_raw(connected); + if (rp) { + erts_smp_proc_lock(rp, rp_locks); + if (!ERTS_PROC_IS_EXITING(rp)) + (void) erts_send_exit_signal(NULL, + prt->common.id, + rp, + &rp_locks, + am_badsig, + NIL, + NULL, + 0); + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + } +} + +static void +badsig_received(int bang_op, + Port *prt, + erts_aint32_t state, + int bad_output_value) +{ + /* + * if (bang_op) + * we are part of a "Prt ! Something" operation + * else + * we are part of a call to a port BIF + * behave accordingly... + */ + if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) { + if (bad_output_value) { + erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); + erts_dsprintf(dsbufp, "Bad value on output port '%s'\n", prt->name); + erts_send_error_to_logger_nogl(dsbufp); + } + if (bang_op) + send_badsig(prt); + } +} + +static int +port_badsig(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp) +{ + if (op == ERTS_PROC2PORT_SIG_EXEC) + badsig_received(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP, + prt, + state, + sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT); + if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) + port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); + return ERTS_PORT_REDS_BADSIG; +} + + +/* + * bad_port_signal() will + * - preserve signal order of signals. + * - send a 'badsig' exit signal to connected process if 'from' is an + * internal pid and the port is alive when the bad signal reaches + * it. + */ +static ErtsPortOpResult +bad_port_signal(Process *c_p, + int flags, + Port *prt, + Eterm from, + Eterm *refp, + Eterm port_op) +{ + ErtsProc2PortSigData *sigdp; + ErtsTryImmDrvCallResult try_call_res; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE( + c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + !refp, + port_op); + + try_call_res = try_imm_drv_call(&try_call_state); + switch (try_call_res) { + case ERTS_TRY_IMM_DRV_CALL_OK: + badsig_received(flags & ERTS_PORT_SIG_FLG_BANG_OP, + prt, + try_call_state.state, + flags & ERTS_PORT_SIG_FLG_BAD_OUTPUT); + finalize_imm_drv_call(&try_call_state); + if (c_p) + BUMP_REDS(c_p, ERTS_PORT_REDS_BADSIG); + return ERTS_PORT_OP_BADARG; + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + return ERTS_PORT_OP_DROPPED; + case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS: + case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK: + /* Schedule badsig() call instead... */ + break; + } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = (flags & ~ERTS_P2P_SIG_TYPE_MASK) | ERTS_P2P_SIG_TYPE_BAD; + + return erts_schedule_proc2port_signal(c_p, + prt, + c_p->common.id, + refp, + sigdp, + 0, + port_badsig); +} + + +/* + * Driver outputv() callback + */ + +static ERTS_INLINE void +call_driver_outputv(int bang_op, + Eterm caller, + Eterm from, + Port *prt, + erts_driver_t *drv, + ErlIOVec *evp) +{ + /* + * if (bang_op) + * we are part of a "Prt ! {From, {command, Data}}" operation + * else + * we are part of a call to port_command/[2,3] + * behave accordingly... + */ + if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt)) + send_badsig(prt); + else { + ErlDrvSizeT size = evp->size; + + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + || ERTS_IS_CRASH_DUMPING); + +#ifdef USE_VM_PROBES + if (DTRACE_ENABLED(driver_outputv)) { + DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt); + DTRACE4(driver_outputv, process_str, port_str, prt->name, size); + } +#endif + + prt->caller = caller; + (*drv->outputv)((ErlDrvData) prt->drv_data, evp); + prt->caller = NIL; + + prt->bytes_out += size; + erts_smp_atomic_add_nob(&erts_bytes_out, size); + } +} + +static ERTS_INLINE void +cleanup_scheduled_outputv(ErlIOVec *ev, ErlDrvBinary *cbinp) +{ + int i; + /* Need to free all binaries */ + for (i = 1; i < ev->vsize; i++) + if (ev->binv[i]) + driver_free_binary(ev->binv[i]); + if (cbinp) + driver_free_binary(cbinp); + erts_free(ERTS_ALC_T_DRV_CMD_DATA, ev); +} + +static int +port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp) +{ + Eterm reply; + + switch (op) { + case ERTS_PROC2PORT_SIG_EXEC: + /* Execution of a scheduled outputv() call */ + + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + + if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) + reply = am_badarg; + else { + call_driver_outputv(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP, + sigdp->caller, + sigdp->u.outputv.from, + prt, + prt->drv_ptr, + sigdp->u.outputv.evp); + reply = am_true; + } + break; + case ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND: + reply = am_false; + break; + default: + reply = am_badarg; + break; + } + + if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) + port_sched_op_reply(sigdp->caller, sigdp->ref, reply); + + cleanup_scheduled_outputv(sigdp->u.outputv.evp, + sigdp->u.outputv.cbinp); + + return ERTS_PORT_REDS_CMD_OUTPUTV; +} + +/* + * Driver output() callback + */ + +static ERTS_INLINE void +call_driver_output(int bang_op, + Eterm caller, + Eterm from, + Port *prt, + erts_driver_t *drv, + char *bufp, + ErlDrvSizeT size) +{ + /* + * if (bang_op) + * we are part of a "Prt ! {From, {command, Data}}" operation + * else + * we are part of a call to port_command/[2,3] + * behave accordingly... + */ + if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt)) + send_badsig(prt); + else { + + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + || ERTS_IS_CRASH_DUMPING); + +#ifdef USE_VM_PROBES + if (DTRACE_ENABLED(driver_output)) { + DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt); + DTRACE4(driver_output, process_str, port_str, prt->name, size); + } +#endif + + prt->caller = caller; + (*drv->output)((ErlDrvData) prt->drv_data, bufp, size); + prt->caller = NIL; + + prt->bytes_out += size; + erts_smp_atomic_add_nob(&erts_bytes_out, size); + } +} + +static ERTS_INLINE void +cleanup_scheduled_output(char *bufp) +{ + erts_free(ERTS_ALC_T_DRV_CMD_DATA, bufp); +} + +static int +port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp) +{ + Eterm reply; + + switch (op) { + case ERTS_PROC2PORT_SIG_EXEC: + /* Execution of a scheduled output() call */ + + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + + if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) + reply = am_badarg; + else { + call_driver_output(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP, + sigdp->caller, + sigdp->u.output.from, + prt, + prt->drv_ptr, + sigdp->u.output.bufp, + sigdp->u.output.size); + reply = am_true; + } + break; + case ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND: + reply = am_false; + break; + default: + reply = am_badarg; + break; + } + + if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) + port_sched_op_reply(sigdp->caller, sigdp->ref, reply); + + cleanup_scheduled_output(sigdp->u.output.bufp); + + return ERTS_PORT_REDS_CMD_OUTPUT; +} + +ErtsPortOpResult +erts_port_output(Process *c_p, + int flags, + Port *prt, + Eterm from, + Eterm list, + Eterm *refp) +{ + ErtsPortOpResult res; + ErtsProc2PortSigData *sigdp; + erts_driver_t *drv = prt->drv_ptr; + size_t size; + int try_call; + erts_aint32_t sched_flags, busy_flgs, invalid_flags; + int task_flags; + ErtsProc2PortSigCallback port_sig_callback; + ErlDrvBinary *cbin = NULL; + ErlIOVec *evp = NULL; + char *buf = NULL; + int force_immediate_call = (flags & ERTS_PORT_SIG_FLG_FORCE_IMM_CALL); + + ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP + | ERTS_PORT_SIG_FLG_NOSUSPEND + | ERTS_PORT_SIG_FLG_FORCE + | ERTS_PORT_SIG_FLG_FORCE_IMM_CALL)) == 0); + + busy_flgs = ((flags & ERTS_PORT_SIG_FLG_FORCE) + ? ((erts_aint32_t) 0) + : ERTS_PTS_FLGS_BUSY); + invalid_flags = busy_flgs; + if (!refp) + invalid_flags |= ERTS_PTS_FLG_PARALLELISM; + + /* + * Assumes caller have checked that port is valid... + */ + + sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + if (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT)) + return ((sched_flags & ERTS_PTS_FLG_EXIT) + ? ERTS_PORT_OP_DROPPED + : ERTS_PORT_OP_BUSY); + + try_call = (force_immediate_call /* crash dumping */ + || !(sched_flags & (invalid_flags + | ERTS_PTS_FLGS_FORCE_SCHEDULE_OP))); + +#ifdef USE_VM_PROBES + if(DTRACE_ENABLED(port_command)) { + DTRACE_FORMAT_COMMON_PID_AND_PORT(c_p ? c_p->common.id : ERTS_INVALID_PID, prt); + DTRACE4(port_command, process_str, port_str, prt->name, "command"); + } +#endif + + if (drv->outputv) { + ErlIOVec ev; SysIOVec iv[SMALL_WRITE_VEC]; ErlDrvBinary* bv[SMALL_WRITE_VEC]; SysIOVec* ivp; ErlDrvBinary** bvp; - ErlDrvBinary* cbin; - ErlIOVec ev; + int vsize; + Uint csize; + Uint pvsize; + Uint pcsize; + Uint blimit; + size_t iov_offset, binv_offset, alloc_size; - if (io_list_vec_len(list, &vsize, &csize, - &pvsize, &pcsize, &size)) { + if (io_list_vec_len(list, &vsize, &csize, &pvsize, &pcsize, &size)) goto bad_value; + + iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec)); + binv_offset = iov_offset; + binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec)); + alloc_size = binv_offset; + alloc_size += (vsize+1)*sizeof(ErlDrvBinary *); + + if (try_call && vsize < SMALL_WRITE_VEC) { + ivp = ev.iov = iv; + bvp = ev.binv = bv; + evp = &ev; + } + else { + char *ptr = erts_alloc((try_call + ? ERTS_ALC_T_TMP + : ERTS_ALC_T_DRV_CMD_DATA), alloc_size); + + evp = (ErlIOVec *) ptr; + ivp = evp->iov = (SysIOVec *) (ptr + iov_offset); + bvp = evp->binv = (ErlDrvBinary **) (ptr + binv_offset); } + /* To pack or not to pack (small binaries) ...? */ - vsize++; - if (vsize <= SMALL_WRITE_VEC) { + if (vsize < SMALL_WRITE_VEC) { /* Do NOT pack */ blimit = 0; - } else { + } + else { /* Do pack */ vsize = pvsize + 1; csize = pcsize; blimit = ERL_SMALL_IO_BIN_LIMIT; } /* Use vsize and csize from now on */ - if (vsize <= SMALL_WRITE_VEC) { - ivp = iv; - bvp = bv; - } else { - ivp = (SysIOVec *) erts_alloc(ERTS_ALC_T_TMP, - vsize * sizeof(SysIOVec)); - bvp = (ErlDrvBinary**) erts_alloc(ERTS_ALC_T_TMP, - vsize * sizeof(ErlDrvBinary*)); - } + cbin = driver_alloc_binary(csize); if (!cbin) erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize)); @@ -1181,210 +1897,769 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list) ivp[0].iov_base = NULL; ivp[0].iov_len = 0; bvp[0] = NULL; - ev.vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit); - if (ev.vsize < 0) { - if (ivp != iv) { - erts_free(ERTS_ALC_T_TMP, (void *) ivp); - } - if (bvp != bv) { - erts_free(ERTS_ALC_T_TMP, (void *) bvp); - } + evp->vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit); + if (evp->vsize < 0) { + if (evp != &ev) + erts_free(try_call ? ERTS_ALC_T_TMP : ERTS_ALC_T_DRV_CMD_DATA, + evp); driver_free_binary(cbin); goto bad_value; } - ev.vsize++; #if 0 /* This assertion may say something useful, but it can be falsified during the emulator test suites. */ - ASSERT(ev.vsize == vsize); + ASSERT(evp->vsize == vsize); #endif - ev.size = size; /* total size */ - ev.iov = ivp; - ev.binv = bvp; -#ifdef USE_VM_PROBES - if (DTRACE_ENABLED(driver_outputv)) { - DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p) - DTRACE4(driver_outputv, process_str, port_str, p->name, size); - } -#endif - fpe_was_unmasked = erts_block_fpe(); - (*drv->outputv)((ErlDrvData)p->drv_data, &ev); - erts_unblock_fpe(fpe_was_unmasked); - if (ivp != iv) { - erts_free(ERTS_ALC_T_TMP, (void *) ivp); - } - if (bvp != bv) { - erts_free(ERTS_ALC_T_TMP, (void *) bvp); + evp->vsize++; + evp->size = size; /* total size */ + + if (!try_call) { + int i; + /* Need to increase refc on all binaries */ + for (i = 1; i < evp->vsize; i++) + if (bvp[i]) + driver_binary_inc_refc(bvp[i]); } - driver_free_binary(cbin); - } else { - int r; - - /* Try with an 8KB buffer first (will often be enough I guess). */ - size = 8*1024; - /* See below why the extra byte is added. */ - buf = erts_alloc(ERTS_ALC_T_TMP, size+1); - r = io_list_to_buf(list, buf, size); + else { + int i; + ErlIOVec *new_evp; + ErtsTryImmDrvCallResult try_call_res; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE( + c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + invalid_flags, + !refp, + am_command); + + try_call_state.pre_chk_sched_flags = 0; /* already checked */ + if (force_immediate_call) + try_call_res = force_imm_drv_call(&try_call_state); + else + try_call_res = try_imm_drv_call(&try_call_state); + switch (try_call_res) { + case ERTS_TRY_IMM_DRV_CALL_OK: + call_driver_outputv(flags & ERTS_PORT_SIG_FLG_BANG_OP, + c_p ? c_p->common.id : ERTS_INVALID_PID, + from, + prt, + drv, + evp); + if (force_immediate_call) + finalize_force_imm_drv_call(&try_call_state); + else + finalize_imm_drv_call(&try_call_state); + /* Fall through... */ + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + driver_free_binary(cbin); + if (evp != &ev) + erts_free(ERTS_ALC_T_TMP, evp); + if (try_call_res != ERTS_TRY_IMM_DRV_CALL_OK) + return ERTS_PORT_OP_DROPPED; + if (c_p) + BUMP_REDS(c_p, ERTS_PORT_REDS_CMD_OUTPUTV); + return ERTS_PORT_OP_DONE; + case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS: + sched_flags = try_call_state.sched_flags; + case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK: + /* Schedule outputv() call instead... */ + break; + } -#ifdef USE_VM_PROBES - if(DTRACE_ENABLED(port_command)) { - DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p) - DTRACE4(port_command, process_str, port_str, p->name, "command"); - } + /* Need to increase refc on all binaries */ + for (i = 1; i < evp->vsize; i++) + if (bvp[i]) + driver_binary_inc_refc(bvp[i]); + + new_evp = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, alloc_size); + + if (evp != &ev) { + sys_memcpy((void *) new_evp, (void *) evp, alloc_size); + new_evp->iov = (SysIOVec *) (((char *) new_evp) + + iov_offset); + bvp = new_evp->binv = (ErlDrvBinary **) (((char *) new_evp) + + binv_offset); + +#ifdef DEBUG + ASSERT(new_evp->vsize == evp->vsize); + ASSERT(new_evp->size == evp->size); + for (i = 0; i < evp->vsize; i++) { + ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len); + ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base); + ASSERT(new_evp->binv[i] == evp->binv[i]); + } #endif - if (r >= 0) { - size -= r; -#ifdef USE_VM_PROBES - if (DTRACE_ENABLED(driver_output)) { - DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p) - DTRACE4(driver_output, process_str, port_str, p->name, size); - } + erts_free(ERTS_ALC_T_TMP, evp); + } + else { /* from stack allocated structure; offsets may differ */ + + sys_memcpy((void *) new_evp, (void *) evp, sizeof(ErlIOVec)); + new_evp->iov = (SysIOVec *) (((char *) new_evp) + + iov_offset); + sys_memcpy((void *) new_evp->iov, + (void *) evp->iov, + evp->vsize * sizeof(SysIOVec)); + new_evp->binv = (ErlDrvBinary **) (((char *) new_evp) + + binv_offset); + sys_memcpy((void *) new_evp->binv, + (void *) evp->binv, + evp->vsize * sizeof(ErlDrvBinary *)); + +#ifdef DEBUG + ASSERT(new_evp->vsize == evp->vsize); + ASSERT(new_evp->size == evp->size); + for (i = 0; i < evp->vsize; i++) { + ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len); + ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base); + ASSERT(new_evp->binv[i] == evp->binv[i]); + } #endif - fpe_was_unmasked = erts_block_fpe(); - (*drv->output)((ErlDrvData)p->drv_data, buf, size); - erts_unblock_fpe(fpe_was_unmasked); - erts_free(ERTS_ALC_T_TMP, buf); + + } + + evp = new_evp; } - else if (r == -2) { - erts_free(ERTS_ALC_T_TMP, buf); - goto bad_value; + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV; + sigdp->u.outputv.from = from; + sigdp->u.outputv.evp = evp; + sigdp->u.outputv.cbinp = cbin; + port_sig_callback = port_sig_outputv; + } + else { + ErlDrvSizeT r; + + /* + * Apperently there exist code that write 1 byte to + * much in buffer. Where it resides I don't know, but + * we can live with one byte extra allocated... + */ + + if (!try_call) { + if (erts_iolist_size(list, &size)) + goto bad_value; + + buf = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, size + 1); + + r = erts_iolist_to_buf(list, buf, size); + ASSERT(ERTS_IOLIST_TO_BUF_SUCCEEDED(r)); } else { - ASSERT(r == -1); /* Overflow */ - erts_free(ERTS_ALC_T_TMP, buf); - if (erts_iolist_size(list, &size)) { - goto bad_value; + char *new_buf; + ErtsTryImmDrvCallResult try_call_res; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE( + c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + invalid_flags, + !refp, + am_command); + + /* Try with an 8KB buffer first (will often be enough I guess). */ + size = 8*1024; + + buf = erts_alloc(ERTS_ALC_T_TMP, size + 1); + r = erts_iolist_to_buf(list, buf, size); + + if (ERTS_IOLIST_TO_BUF_SUCCEEDED(r)) { + ASSERT(r <= size); + size -= r; + } + else { + erts_free(ERTS_ALC_T_TMP, buf); + if (r == ERTS_IOLIST_TO_BUF_TYPE_ERROR) + goto bad_value; + ASSERT(r == ERTS_IOLIST_TO_BUF_OVERFLOW); + if (erts_iolist_size(list, &size)) + goto bad_value; + buf = erts_alloc(ERTS_ALC_T_TMP, size + 1); + r = erts_iolist_to_buf(list, buf, size); + ASSERT(ERTS_IOLIST_TO_BUF_SUCCEEDED(r)); } - /* - * I know drivers that pad space with '\0' this is clearly - * incorrect but I don't feel like fixing them now, insted - * add ONE extra byte. - */ - buf = erts_alloc(ERTS_ALC_T_TMP, size+1); - r = io_list_to_buf(list, buf, size); -#ifdef USE_VM_PROBES - if (DTRACE_ENABLED(driver_output)) { - DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p) - DTRACE4(driver_output, process_str, port_str, p->name, size); - } -#endif - fpe_was_unmasked = erts_block_fpe(); - (*drv->output)((ErlDrvData)p->drv_data, buf, size); - erts_unblock_fpe(fpe_was_unmasked); + try_call_state.pre_chk_sched_flags = 0; /* already checked */ + if (force_immediate_call) + try_call_res = force_imm_drv_call(&try_call_state); + else + try_call_res = try_imm_drv_call(&try_call_state); + switch (try_call_res) { + case ERTS_TRY_IMM_DRV_CALL_OK: + call_driver_output(flags & ERTS_PORT_SIG_FLG_BANG_OP, + c_p ? c_p->common.id : ERTS_INVALID_PID, + from, + prt, + drv, + buf, + size); + if (force_immediate_call) + finalize_force_imm_drv_call(&try_call_state); + else + finalize_imm_drv_call(&try_call_state); + /* Fall through... */ + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + erts_free(ERTS_ALC_T_TMP, buf); + if (try_call_res != ERTS_TRY_IMM_DRV_CALL_OK) + return ERTS_PORT_OP_DROPPED; + if (c_p) + BUMP_REDS(c_p, ERTS_PORT_REDS_CMD_OUTPUT); + return ERTS_PORT_OP_DONE; + case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS: + sched_flags = try_call_state.sched_flags; + case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK: + /* Schedule outputv() call instead... */ + break; + } + + new_buf = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, size + 1); + sys_memcpy(new_buf, buf, size); erts_free(ERTS_ALC_T_TMP, buf); + buf = new_buf; } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUT; + sigdp->u.output.from = from; + sigdp->u.output.bufp = buf; + sigdp->u.output.size = size; + port_sig_callback = port_sig_output; + } + + task_flags = ERTS_PT_FLG_WAIT_BUSY; + sigdp->flags |= flags; + if (flags & (ERTS_P2P_SIG_DATA_FLG_FORCE|ERTS_P2P_SIG_DATA_FLG_NOSUSPEND)) { + task_flags = 0; + if (flags & ERTS_P2P_SIG_DATA_FLG_FORCE) + sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND; + else if (flags & ERTS_P2P_SIG_DATA_FLG_NOSUSPEND) + task_flags = ERTS_PT_FLG_NOSUSPEND; + } + + res = erts_schedule_proc2port_signal(c_p, + prt, + c_p ? c_p->common.id : ERTS_INVALID_PID, + refp, + sigdp, + task_flags, + port_sig_callback); + + if (res != ERTS_PORT_OP_SCHEDULED) { + if (drv->outputv) + cleanup_scheduled_outputv(evp, cbin); + else + cleanup_scheduled_output(buf); + return res; } - p->bytes_out += size; - erts_smp_atomic_add_nob(&erts_bytes_out, size); -#ifdef ERTS_SMP - if (p->xports) - erts_smp_xports_unlock(p); - ASSERT(!p->xports); + if (!(sched_flags & ERTS_PTS_FLG_EXIT) && (sched_flags & busy_flgs)) + return ERTS_PORT_OP_BUSY_SCHEDULED; + + return res; + +bad_value: + + flags |= ERTS_PORT_SIG_FLG_BAD_OUTPUT; + return bad_port_signal(c_p, flags, prt, from, refp, am_command); +} + +static ERTS_INLINE ErtsPortOpResult +call_deliver_port_exit(int bang_op, + Eterm from, + Port *prt, + erts_aint32_t state, + Eterm reason, + int broken_link) +{ + /* + * if (bang_op) + * we are part of a "Prt ! {From, close}" operation + * else + * we are part of a call to port_close(Port) + * behave accordingly... + */ + + if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) + return ERTS_PORT_OP_DROPPED; + + if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt)) { + send_badsig(prt); + return ERTS_PORT_OP_DROPPED; + } + + if (broken_link) { + ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from); + if (lnk) + erts_destroy_link(lnk); + else + return ERTS_PORT_OP_DROPPED; + } + + if (!erts_deliver_port_exit(prt, from, reason, bang_op)) + return ERTS_PORT_OP_DROPPED; + +#ifdef USE_VM_PROBES + if(DTRACE_ENABLED(port_command) && bang_op) { + DTRACE_FORMAT_COMMON_PID_AND_PORT(from, prt); + DTRACE4(port_command, process_str, port_str, prt->name, "close"); + } #endif - p->caller = NIL; - return 0; - bad_value: - p->caller = NIL; - { - erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "Bad value on output port '%s'\n", p->name); - erts_send_error_to_logger_nogl(dsbufp); - return 1; + return ERTS_PORT_OP_DONE; +} + +static int +port_sig_exit(Port *prt, + erts_aint32_t state, + int op, + ErtsProc2PortSigData *sigdp) +{ + Eterm msg = am_badarg; + if (op == ERTS_PROC2PORT_SIG_EXEC) { + ErtsPortOpResult res; + int bang_op = sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP; + int broken_link = sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK; + res = call_deliver_port_exit(bang_op, + sigdp->u.exit.from, + prt, + state, + sigdp->u.exit.reason, + broken_link); + + if (res == ERTS_PORT_OP_DONE) + msg = am_true; + } + if (sigdp->u.exit.bp) + free_message_buffer(sigdp->u.exit.bp); + if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) + port_sched_op_reply(sigdp->caller, sigdp->ref, msg); + + return ERTS_PORT_REDS_EXIT; +} + +ErtsPortOpResult +erts_port_exit(Process *c_p, + int flags, + Port *prt, + Eterm from, + Eterm reason, + Eterm *refp) +{ + ErtsPortOpResult res; + ErtsProc2PortSigData *sigdp; + ErlHeapFragment *bp = NULL; + + ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP + | ERTS_PORT_SIG_FLG_BROKEN_LINK + | ERTS_PORT_SIG_FLG_FORCE_SCHED)) == 0); + + if (!(flags & ERTS_PORT_SIG_FLG_FORCE_SCHED)) { + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + !refp, + am_exit); + + + switch (try_imm_drv_call(&try_call_state)) { + case ERTS_TRY_IMM_DRV_CALL_OK: { + res = call_deliver_port_exit(flags & ERTS_PORT_SIG_FLG_BANG_OP, + from, + prt, + try_call_state.state, + reason, + flags & ERTS_PORT_SIG_FLG_BROKEN_LINK); + finalize_imm_drv_call(&try_call_state); + if (res == ERTS_PORT_OP_DONE && c_p) + BUMP_REDS(c_p, ERTS_PORT_REDS_EXIT); + return res; + } + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + return ERTS_PORT_OP_DROPPED; + default: + /* Schedule call instead... */ + break; + } } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_EXIT | flags; + sigdp->u.exit.from = from; + + if (is_immed(reason)) { + sigdp->u.exit.reason = reason; + sigdp->u.exit.bp = NULL; + } + else { + Eterm *hp; + Uint hsz = size_object(reason); + bp = new_message_buffer(hsz); + sigdp->u.exit.bp = bp; + hp = bp->mem; + sigdp->u.exit.reason = copy_struct(reason, + hsz, + &hp, + &bp->off_heap); + } + + res = erts_schedule_proc2port_signal(c_p, + prt, + c_p ? c_p->common.id : from, + refp, + sigdp, + 0, + port_sig_exit); + + if (res == ERTS_PORT_OP_DROPPED) { + if (bp) + free_message_buffer(bp); + } + + return res; } -/* initialize the port array */ -void init_io(void) +static ErtsPortOpResult +set_port_connected(int bang_op, + Eterm from, + Port *prt, + erts_aint32_t state, + Eterm connect) { - int i; - ErlDrvEntry** dp; - char maxports[21]; /* enough for any 64-bit integer */ - size_t maxportssize = sizeof(maxports); - Uint ports_bits = ERTS_PORTS_BITS; - Sint port_extra_shift; + /* + * if (bang_op) + * we are part of a "Prt ! {From, {connect, Connect}}" operation + * else + * we are part of a call to port_connect(Port, Connect) + * behave accordingly... + */ -#ifdef ERTS_SMP - init_xports_list_alloc(); + if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) + return ERTS_PORT_OP_DROPPED; + + if (bang_op) { /* Bang operation */ + if (is_not_internal_pid(connect) || ERTS_PORT_GET_CONNECTED(prt) != from) { + send_badsig(prt); + return ERTS_PORT_OP_DROPPED; + } + + ERTS_PORT_SET_CONNECTED(prt, connect); + deliver_result(prt->common.id, from, am_connected); + +#ifdef USE_VM_PROBES + if(DTRACE_ENABLED(port_command)) { + DTRACE_FORMAT_COMMON_PID_AND_PORT(from, prt); + DTRACE4(port_command, process_str, port_str, prt->name, "connect"); + } #endif + } + else { /* Port BIF operation */ + Process *rp = erts_proc_lookup_raw(connect); + if (!rp) + return ERTS_PORT_OP_DROPPED; + erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + if (ERTS_PROC_IS_EXITING(rp)) { + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + return ERTS_PORT_OP_DROPPED; + } - pdl_init(); + erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, prt->common.id); + erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, connect); + + ERTS_PORT_SET_CONNECTED(prt, connect); + + erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + +#ifdef USE_VM_PROBES + if (DTRACE_ENABLED(port_connect)) { + DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); + DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); + DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE); + + dtrace_pid_str(connect, process_str); + erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id); + dtrace_proc_str(rp, newprocess_str); + DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str); + } +#endif + } + + return ERTS_PORT_OP_DONE; +} - if (erts_sys_getenv_raw("ERL_MAX_PORTS", maxports, &maxportssize) == 0) - erts_max_ports = atoi(maxports); +static int +port_sig_connect(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp) +{ + Eterm msg = am_badarg; + if (op == ERTS_PROC2PORT_SIG_EXEC) { + ErtsPortOpResult res; + res = set_port_connected(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP, + sigdp->u.connect.from, + prt, + state, + sigdp->u.connect.connected); + if (res == ERTS_PORT_OP_DONE) + msg = am_true; + } + if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) + port_sched_op_reply(sigdp->caller, sigdp->ref, msg); + return ERTS_PORT_REDS_CONNECT; +} + +ErtsPortOpResult +erts_port_connect(Process *c_p, + int flags, + Port *prt, + Eterm from, + Eterm connect, + Eterm *refp) +{ + ErtsProc2PortSigData *sigdp; + Eterm connect_id; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + !refp, + am_connect); + + ASSERT((flags & ~ERTS_PORT_SIG_FLG_BANG_OP) == 0); + + if (is_not_internal_pid(connect)) + connect_id = NIL; /* Fail in op (for signal order) */ else - erts_max_ports = sys_max_files(); + connect_id = connect; + + switch (try_imm_drv_call(&try_call_state)) { + case ERTS_TRY_IMM_DRV_CALL_OK: { + ErtsPortOpResult res; + res = set_port_connected(flags & ERTS_PORT_SIG_FLG_BANG_OP, + from, + prt, + try_call_state.state, + connect_id); + finalize_imm_drv_call(&try_call_state); + if (res == ERTS_PORT_OP_DONE) + BUMP_REDS(c_p, ERTS_PORT_REDS_CONNECT); + return res; + } + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + return ERTS_PORT_OP_DROPPED; + default: + /* Schedule call instead... */ + break; + } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_CONNECT | flags; + + sigdp->u.connect.from = from; + sigdp->u.connect.connected = connect_id; + + return erts_schedule_proc2port_signal(c_p, + prt, + c_p->common.id, + refp, + sigdp, + 0, + port_sig_connect); +} - if (erts_max_ports > ERTS_MAX_PORTS) - erts_max_ports = ERTS_MAX_PORTS; - if (erts_max_ports < 1024) - erts_max_ports = 1024; +static void +port_unlink(Port *prt, Eterm from) +{ + ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from); + if (lnk) + erts_destroy_link(lnk); +} - if (erts_use_r9_pids_ports) { - ports_bits = ERTS_R9_PORTS_BITS; - if (erts_max_ports > ERTS_MAX_R9_PORTS) - erts_max_ports = ERTS_MAX_R9_PORTS; +static int +port_sig_unlink(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp) +{ + if (op == ERTS_PROC2PORT_SIG_EXEC) + port_unlink(prt, sigdp->u.unlink.from); + if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) + port_sched_op_reply(sigdp->caller, sigdp->ref, am_true); + return ERTS_PORT_REDS_UNLINK; +} + +ErtsPortOpResult +erts_port_unlink(Process *c_p, Port *prt, Eterm from, Eterm *refp) +{ + ErtsProc2PortSigData *sigdp; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p, + prt, + ERTS_PORT_SFLGS_DEAD, + 0, + !refp, + am_unlink); + + switch (try_imm_drv_call(&try_call_state)) { + case ERTS_TRY_IMM_DRV_CALL_OK: + port_unlink(prt, from); + finalize_imm_drv_call(&try_call_state); + BUMP_REDS(c_p, ERTS_PORT_REDS_UNLINK); + return ERTS_PORT_OP_DONE; + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + return ERTS_PORT_OP_DROPPED; + default: + /* Schedule call instead... */ + break; } - port_extra_shift = erts_fit_in_bits_int32(erts_max_ports - 1); - port_num_mask = (1 << ports_bits) - 1; + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_UNLINK; + sigdp->u.unlink.from = from; + + return erts_schedule_proc2port_signal(c_p, + prt, + c_p ? c_p->common.id : from, + refp, + sigdp, + 0, + port_sig_unlink); +} - erts_port_tab_index_mask = ~(~((Uint) 0) << port_extra_shift); - erts_max_ports = 1 << port_extra_shift; +static void +port_link_failure(Eterm port_id, Eterm linker) +{ + Process *rp; + ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND; + ASSERT(is_internal_pid(linker)); + rp = erts_pid2proc(NULL, 0, linker, rp_locks); + if (rp) { + ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), port_id); + if (rlnk) { + int xres = erts_send_exit_signal(NULL, + port_id, + rp, + &rp_locks, + am_noproc, + NIL, + NULL, + 0); + if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) { + /* We didn't exit the process and it is traced */ + if (IS_TRACED_FL(rp, F_TRACE_PROCS)) + trace_proc(NULL, rp, am_getting_unlinked, port_id); + } + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + } + } +} - erts_smp_mtx_init(&erts_driver_list_lock,"driver_list"); - driver_list = NULL; - erts_smp_tsd_key_create(&driver_list_lock_status_key); - erts_smp_tsd_key_create(&driver_list_last_error_key); +static void +port_link(Port *prt, erts_aint32_t state, Eterm to) +{ + if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) + erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, to); + else + port_link_failure(prt->common.id, to); +} - if (erts_max_ports * sizeof(Port) <= erts_max_ports) { - /* More memory needed than the whole address space. */ - erts_alloc_enomem(ERTS_ALC_T_PORT_TABLE, ~((Uint) 0)); +static int +port_sig_link(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp) +{ + if (op == ERTS_PROC2PORT_SIG_EXEC) + port_link(prt, state, sigdp->u.link.to); + else + port_link_failure(sigdp->u.link.port, sigdp->u.link.to); + if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY) + port_sched_op_reply(sigdp->caller, sigdp->ref, am_true); + return ERTS_PORT_REDS_LINK; +} + +ErtsPortOpResult +erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp) +{ + ErtsProc2PortSigData *sigdp; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + !refp, + am_link); + + switch (try_imm_drv_call(&try_call_state)) { + case ERTS_TRY_IMM_DRV_CALL_OK: + port_link(prt, try_call_state.state, to); + finalize_imm_drv_call(&try_call_state); + BUMP_REDS(c_p, ERTS_PORT_REDS_LINK); + return ERTS_PORT_OP_DONE; + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + return ERTS_PORT_OP_BADARG; + default: + /* Schedule call instead... */ + break; } - erts_port = (Port *) erts_alloc(ERTS_ALC_T_PORT_TABLE, - erts_max_ports * sizeof(Port)); + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_LINK; + sigdp->u.link.port = prt->common.id; + sigdp->u.link.to = to; + + return erts_schedule_proc2port_signal(c_p, + prt, + c_p ? c_p->common.id : to, + refp, + sigdp, + 0, + port_sig_link); +} - erts_smp_atomic_init_nob(&erts_bytes_out, 0); - erts_smp_atomic_init_nob(&erts_bytes_in, 0); +void erts_init_io(int port_tab_size, + int port_tab_size_ignore_files) +{ + ErlDrvEntry** dp; + erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; - for (i = 0; i < erts_max_ports; i++) { - erts_port_task_init_sched(&erts_port[i].sched); - erts_smp_atomic_init_nob(&erts_port[i].refc, 0); #ifdef ERTS_SMP - erts_port[i].lock = NULL; - erts_port[i].xports = NULL; - erts_smp_spinlock_init_x(&erts_port[i].state_lck, -#ifdef ERTS_ENABLE_LOCK_COUNT - (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_state" : NULL, -#else - "port_state", -#endif - make_small(0)); + init_xports_list_alloc(); #endif - erts_port[i].tracer_proc = NIL; - erts_port[i].trace_flags = 0; - erts_port[i].drv_ptr = NULL; - erts_port[i].status = ERTS_PORT_SFLG_FREE; - erts_port[i].name = NULL; - erts_port[i].nlinks = NULL; - erts_port[i].monitors = NULL; - erts_port[i].linebuf = NULL; - erts_port[i].port_data_lock = NULL; + pdl_init(); + + if (!port_tab_size_ignore_files) { + int max_files = sys_max_files(); + if (port_tab_size < max_files) + port_tab_size = max_files; } - erts_smp_atomic32_init_nob(&erts_ports_snapshot, (erts_aint32_t) 0); - last_port_num = 0; - erts_smp_spinlock_init(&get_free_port_lck, "get_free_port"); + if (port_tab_size > ERTS_MAX_PORTS) + port_tab_size = ERTS_MAX_PORTS; + else if (port_tab_size < ERTS_MIN_PORTS) + port_tab_size = ERTS_MIN_PORTS; + + erts_smp_rwmtx_init_opt(&erts_driver_list_lock, + &drv_list_rwmtx_opts, + "driver_list"); + driver_list = NULL; + erts_smp_tsd_key_create(&driver_list_lock_status_key); + erts_smp_tsd_key_create(&driver_list_last_error_key); + + erts_ptab_init_table(&erts_port, + ERTS_ALC_T_PORT_TABLE, + NULL, + (ErtsPTabElementCommon *) &erts_invalid_port.common, + port_tab_size, + "port_table"); + + erts_smp_atomic_init_nob(&erts_bytes_out, 0); + erts_smp_atomic_init_nob(&erts_bytes_in, 0); sys_init_io(); erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1); - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rwlock(&erts_driver_list_lock); init_driver(&fd_driver, &fd_driver_entry, NULL); init_driver(&vanilla_driver, &vanilla_driver_entry, NULL); @@ -1393,27 +2668,66 @@ void init_io(void) erts_add_driver_entry(*dp, NULL, 1); erts_smp_tsd_set(driver_list_lock_status_key, NULL); - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); } #if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) -void erts_lcnt_enable_io_lock_count(int enable) { - int i; - for (i = 0; i < erts_max_ports; i++) { - Port* p = &erts_port[i]; - if (enable) { - erts_lcnt_init_lock_x(&p->state_lck.lcnt, "port_state", ERTS_LCNT_LT_SPINLOCK, make_small(i)); - if (p->lock) { - erts_lcnt_init_lock_x(&p->lock->lcnt, "port_lock", ERTS_LCNT_LT_MUTEX, make_small(i)); - } - } else { - erts_lcnt_destroy_lock(&p->state_lck.lcnt); - if (p->lock) { - erts_lcnt_destroy_lock(&p->lock->lcnt); - } - } +static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable) +{ + if (dp->lock) { + if (enable) + erts_lcnt_init_lock_x(&dp->lock->lcnt, + "driver_lock", + ERTS_LCNT_LT_MUTEX, + erts_atom_put((byte*)dp->name, + sys_strlen(dp->name), + ERTS_ATOM_ENC_LATIN1, + 1)); + + else + erts_lcnt_destroy_lock(&dp->lock->lcnt); + + } +} + +static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable) +{ + erts_aint32_t state = erts_atomic32_read_nob(&prt->state); + if (!enable) { + erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt); + if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) + erts_lcnt_destroy_lock(&prt->lock->lcnt); + } + else { + erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt, + "port_sched_lock", + ERTS_LCNT_LT_MUTEX, + prt->common.id); + if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) + erts_lcnt_init_lock_x(&prt->lock->lcnt, + "port_lock", + ERTS_LCNT_LT_MUTEX, + prt->common.id); + } +} + +void erts_lcnt_enable_io_lock_count(int enable) +{ + erts_driver_t *dp; + int i, max = erts_ptab_max(&erts_port); + + for (i = 0; i < max; i++) { + Port *prt = erts_pix2port(i); + if (prt) + lcnt_enable_port_lock_count(prt, enable); } + + lcnt_enable_drv_lock_count(&vanilla_driver, enable); + lcnt_enable_drv_lock_count(&spawn_driver, enable); + lcnt_enable_drv_lock_count(&fd_driver, enable); + for (dp = driver_list; dp; dp = dp->next) + lcnt_enable_drv_lock_count(dp, enable); } #endif @@ -1597,9 +2911,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res) ERTS_SMP_CHK_NO_PROC_LOCKS; - ASSERT(is_internal_port(sender) - && is_internal_pid(pid) - && internal_pid_index(pid) < erts_max_processes); + ASSERT(is_internal_port(sender) && is_internal_pid(pid)); rp = (scheduler ? erts_proc_lookup(pid) @@ -1611,16 +2923,19 @@ deliver_result(Eterm sender, Eterm pid, Eterm res) ErlOffHeap *ohp; Eterm* hp; Uint sz_res; - sz_res = size_object(res); - hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks); - res = copy_struct(res, sz_res, &hp, ohp); - tuple = TUPLE2(hp, sender, res); + + sz_res = size_object(res); + hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks); + res = copy_struct(res, sz_res, &hp, ohp); + tuple = TUPLE2(hp, sender, res); erts_queue_message(rp, &rp_locks, bp, tuple, NIL #ifdef USE_VM_PROBES , NIL #endif ); - erts_smp_proc_unlock(rp, rp_locks); + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); if (!scheduler) erts_smp_proc_dec_refc(rp); @@ -1636,7 +2951,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res) * len -- length of data */ -static void deliver_read_message(Port* prt, Eterm to, +static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, char *hbuf, ErlDrvSizeT hlen, char *buf, ErlDrvSizeT len, int eol) { @@ -1654,10 +2969,11 @@ static void deliver_read_message(Port* prt, Eterm to, ERTS_SMP_CHK_NO_PROC_LOCKS; need = 3 + 3 + 2*hlen; - if (prt->status & ERTS_PORT_SFLG_LINEBUF_IO) { + + if (state & ERTS_PORT_SFLG_LINEBUF_IO) { need += 3; } - if (prt->status & ERTS_PORT_SFLG_BINARY_IO && buf != NULL) { + if ((state & ERTS_PORT_SFLG_BINARY_IO) && buf != NULL) { need += PROC_BIN_SIZE; } else { need += 2*len; @@ -1673,7 +2989,7 @@ static void deliver_read_message(Port* prt, Eterm to, hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks); listp = NIL; - if ((prt->status & ERTS_PORT_SFLG_BINARY_IO) == 0) { + if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) { listp = buf_to_intlist(&hp, buf, len, listp); } else if (buf != NULL) { ProcBin* pb; @@ -1704,14 +3020,14 @@ static void deliver_read_message(Port* prt, Eterm to, listp = buf_to_intlist(&hp, hbuf, hlen, listp); } - if (prt->status & ERTS_PORT_SFLG_LINEBUF_IO){ + if (state & ERTS_PORT_SFLG_LINEBUF_IO){ listp = TUPLE2(hp, (eol) ? am_eol : am_noeol, listp); hp += 3; } tuple = TUPLE2(hp, am_data, listp); hp += 3; - tuple = TUPLE2(hp, prt->id, tuple); + tuple = TUPLE2(hp, prt->common.id, tuple); hp += 3; erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined @@ -1729,7 +3045,8 @@ static void deliver_read_message(Port* prt, Eterm to, * Deliver all lines in a line buffer, repeats calls to * deliver_read_message, and takes the same parameters. */ -static void deliver_linebuf_message(Port* prt, Eterm to, +static void deliver_linebuf_message(Port* prt, erts_aint_t state, + Eterm to, char* hbuf, ErlDrvSizeT hlen, char *buf, ErlDrvSizeT len) { @@ -1738,7 +3055,7 @@ static void deliver_linebuf_message(Port* prt, Eterm to, if(init_linebuf_context(&lc,&(prt->linebuf), buf, len) < 0) return; while((ret = read_linebuf(&lc)) > LINEBUF_EMPTY) - deliver_read_message(prt, to, hbuf, hlen, LINEBUF_DATA(lc), + deliver_read_message(prt, state, to, hbuf, hlen, LINEBUF_DATA(lc), LINEBUF_DATALEN(lc), (ret == LINEBUF_EOL)); } @@ -1749,20 +3066,25 @@ static void deliver_linebuf_message(Port* prt, Eterm to, * Parameters: * prt - Pointer to a Port structure for this port. */ -static void flush_linebuf_messages(Port *prt) +static void flush_linebuf_messages(Port *prt, erts_aint32_t state) { LineBufContext lc; int ret; ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); - if(prt == NULL || !(prt->status & ERTS_PORT_SFLG_LINEBUF_IO)) + + if (!prt) + return; + + if (!(state & ERTS_PORT_SFLG_LINEBUF_IO)) return; if(init_linebuf_context(&lc,&(prt->linebuf), NULL, 0) < 0) return; while((ret = flush_linebuf(&lc)) > LINEBUF_EMPTY) deliver_read_message(prt, - prt->connected, + state, + ERTS_PORT_GET_CONNECTED(prt), NULL, 0, LINEBUF_DATA(lc), @@ -1790,6 +3112,7 @@ deliver_vec_message(Port* prt, /* Port */ ErlOffHeap *ohp; ErtsProcLocks rp_locks = 0; int scheduler = erts_get_scheduler_id() != 0; + erts_aint32_t state; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -1805,12 +3128,13 @@ deliver_vec_message(Port* prt, /* Port */ if (!rp) return; + state = erts_atomic32_read_nob(&prt->state); /* * Calculate the exact number of heap words needed. */ need = 3 + 3; /* Heap space for two tuples */ - if (prt->status & ERTS_PORT_SFLG_BINARY_IO) { + if (state & ERTS_PORT_SFLG_BINARY_IO) { need += (2+PROC_BIN_SIZE)*vsize - 2 + hlen*2; } else { need += (hlen+csize)*2; @@ -1821,7 +3145,7 @@ deliver_vec_message(Port* prt, /* Port */ listp = NIL; iov += vsize; - if ((prt->status & ERTS_PORT_SFLG_BINARY_IO) == 0) { + if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) { Eterm* thp = hp; while (vsize--) { iov--; @@ -1874,7 +3198,7 @@ deliver_vec_message(Port* prt, /* Port */ tuple = TUPLE2(hp, am_data, listp); hp += 3; - tuple = TUPLE2(hp, prt->id, tuple); + tuple = TUPLE2(hp, prt->common.id, tuple); hp += 3; erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined @@ -1907,7 +3231,7 @@ static void deliver_bin_message(Port* prt, /* port */ /* * Note. * - * The test for (p->status & ERTS_PORT_SFLGS_DEAD) == 0 is important since the + * The test for ERTS_PORT_SFLGS_DEAD is important since the * driver's flush function might call driver_async, which when using no * threads and being short circuited will notice that the io queue is empty * (after calling the driver's async_ready) and recursively call @@ -1923,7 +3247,7 @@ static void flush_port(Port *p) if (p->drv_ptr->flush != NULL) { #ifdef USE_VM_PROBES if (DTRACE_ENABLED(driver_flush)) { - DTRACE_FORMAT_COMMON_PID_AND_PORT(p->connected, p) + DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p) DTRACE3(driver_flush, process_str, port_str, p->name); } #endif @@ -1938,11 +3262,12 @@ static void flush_port(Port *p) } #ifdef ERTS_SMP if (p->xports) - erts_smp_xports_unlock(p); + erts_port_handle_xports(p); ASSERT(!p->xports); #endif } - if ((p->status & ERTS_PORT_SFLGS_DEAD) == 0 && is_port_ioq_empty(p)) { + if ((erts_atomic32_read_nob(&p->state) & ERTS_PORT_SFLGS_DEAD) == 0 + && is_port_ioq_empty(p)) { terminate_port(p); } } @@ -1954,29 +3279,29 @@ terminate_port(Port *prt) Eterm send_closed_port_id; Eterm connected_id = NIL /* Initialize to silence compiler */; erts_driver_t *drv; - int halt; + erts_aint32_t state; ERTS_SMP_CHK_NO_PROC_LOCKS; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - ASSERT(!prt->nlinks); - ASSERT(!prt->monitors); + ASSERT(!ERTS_P_LINKS(prt)); + ASSERT(!ERTS_P_MONITORS(prt)); - /* prt->status may be altered by kill_port()below */ - halt = (prt->status & ERTS_PORT_SFLG_HALT) != 0; - if (prt->status & ERTS_PORT_SFLG_SEND_CLOSED) { - erts_port_status_band_set(prt, ~ERTS_PORT_SFLG_SEND_CLOSED); - send_closed_port_id = prt->id; - connected_id = prt->connected; + /* state may be altered by kill_port() below */ + state = erts_atomic32_read_band_nob(&prt->state, + ~ERTS_PORT_SFLG_SEND_CLOSED); + if (state & ERTS_PORT_SFLG_SEND_CLOSED) { + send_closed_port_id = prt->common.id; + connected_id = ERTS_PORT_GET_CONNECTED(prt); } else { send_closed_port_id = NIL; } #ifdef ERTS_SMP - erts_cancel_smp_ptimer(prt->ptimer); + erts_cancel_smp_ptimer(prt->common.u.alive.ptimer); #else - erts_cancel_timer(&prt->tm); + erts_cancel_timer(&prt->common.u.alive.tm); #endif drv = prt->drv_ptr; @@ -1984,7 +3309,7 @@ terminate_port(Port *prt) int fpe_was_unmasked = erts_block_fpe(); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(driver_stop)) { - DTRACE_FORMAT_COMMON_PID_AND_PORT(prt->connected, prt) + DTRACE_FORMAT_COMMON_PID_AND_PORT(connected_id, prt) DTRACE3(driver_stop, process_str, drv->name, port_str); } #endif @@ -1992,14 +3317,14 @@ terminate_port(Port *prt) erts_unblock_fpe(fpe_was_unmasked); #ifdef ERTS_SMP if (prt->xports) - erts_smp_xports_unlock(prt); + erts_port_handle_xports(prt); ASSERT(!prt->xports); #endif } if(drv->handle != NULL) { - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(drv->handle); - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_runlock(&erts_driver_list_lock); } stopq(prt); /* clear queue memory */ if(prt->linebuf != NULL){ @@ -2015,20 +3340,21 @@ terminate_port(Port *prt) if (prt->psd) erts_free(ERTS_ALC_T_PRTSD, prt->psd); + ASSERT(prt->dist_entry == NULL); + kill_port(prt); /* * We don't want to send the closed message until after the * port has been removed from the port table (in kill_port()). */ - if (halt && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) { - erts_smp_port_unlock(prt); /* We will exit and never return */ + if ((state & ERTS_PORT_SFLG_HALT) + && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) { + erts_port_release(prt); /* We will exit and never return */ erl_exit_flush_async(erts_halt_code, ""); } if (is_internal_port(send_closed_port_id)) deliver_result(send_closed_port_id, connected_id, am_closed); - - ASSERT(prt->dist_entry == NULL); } void @@ -2048,7 +3374,7 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc) if (!rp) { goto done; } - rmon = erts_remove_monitor(&(rp->monitors),mon->ref); + rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon == NULL) { goto done; @@ -2102,7 +3428,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) ASSERT(is_internal_pid(lnk->pid)); rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks); if (rp) { - ErtsLink *rlnk = erts_remove_link(&(rp->nlinks), psc->port); + ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), psc->port); if (rlnk) { int xres = erts_send_exit_signal(NULL, @@ -2138,11 +3464,13 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) * that is to kill a port till reason kill. Then the port is stopped. * */ -void -erts_do_exit_port(Port *p, Eterm from, Eterm reason) + +int +erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed) { ErtsLink *lnk; Eterm rreason; + erts_aint32_t state; ERTS_SMP_CHK_NO_PROC_LOCKS; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); @@ -2162,66 +3490,76 @@ erts_do_exit_port(Port *p, Eterm from, Eterm reason) } #endif - if ((p->status & (ERTS_PORT_SFLGS_DEAD - | ERTS_PORT_SFLG_EXITING - | ERTS_PORT_SFLG_IMMORTAL)) - || ((reason == am_normal) && - ((from != p->connected) && (from != p->id)))) { - return; - } + state = erts_atomic32_read_nob(&p->state); + if (state & (ERTS_PORT_SFLGS_DEAD + | ERTS_PORT_SFLG_EXITING + | ERTS_PORT_SFLG_CLOSING)) + return 0; + + if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(p) && from != p->common.id) + return 0; + + if (send_closed) + erts_atomic32_read_bor_relb(&p->state, + ERTS_PORT_SFLG_SEND_CLOSED); if (IS_TRACED_FL(p, F_TRACE_PORTS)) { trace_port(p, am_closed, reason); } - erts_trace_check_exiting(p->id); + erts_trace_check_exiting(p->common.id); /* * Setting the port to not busy here, frees the list of pending * processes and makes them runnable. */ - set_busy_port((ErlDrvPort)internal_port_index(p->id), 0); + set_busy_port((ErlDrvPort) p, 0); - if (p->reg != NULL) - (void) erts_unregister_name(NULL, 0, p, p->reg->name); + if (p->common.u.alive.reg != NULL) + (void) erts_unregister_name(NULL, 0, p, p->common.u.alive.reg->name); - erts_port_status_bor_set(p, ERTS_PORT_SFLG_EXITING); + state = erts_atomic32_read_bor_relb(&p->state, ERTS_PORT_SFLG_EXITING); { - SweepContext sc = {p->id, rreason}; - lnk = p->nlinks; - p->nlinks = NULL; + SweepContext sc = {p->common.id, rreason}; + lnk = ERTS_P_LINKS(p); + ERTS_P_LINKS(p) = NULL; erts_sweep_links(lnk, &sweep_one_link, &sc); } DRV_MONITOR_LOCK_PDL(p); { - ErtsMonitor *moni = p->monitors; - p->monitors = NULL; + ErtsMonitor *moni = ERTS_P_MONITORS(p); + ERTS_P_MONITORS(p) = NULL; erts_sweep_monitors(moni, &sweep_one_monitor, NULL); } DRV_MONITOR_UNLOCK_PDL(p); - if ((p->status & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) { + if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) { erts_do_net_exits(p->dist_entry, rreason); erts_deref_dist_entry(p->dist_entry); - p->dist_entry = NULL; - erts_port_status_band_set(p, ~ERTS_PORT_SFLG_DISTRIBUTION); + p->dist_entry = NULL; + erts_atomic32_read_band_relb(&p->state, + ~ERTS_PORT_SFLG_DISTRIBUTION); } if ((reason != am_kill) && !is_port_ioq_empty(p)) { - erts_port_status_bandor_set(p, - ~ERTS_PORT_SFLG_EXITING, /* must turn it off */ - ERTS_PORT_SFLG_CLOSING); + /* must turn exiting flag off */ + erts_atomic32_read_bset_relb(&p->state, + (ERTS_PORT_SFLG_EXITING + | ERTS_PORT_SFLG_CLOSING), + ERTS_PORT_SFLG_CLOSING); flush_port(p); } else { terminate_port(p); } + + return 1; } /* About the states ERTS_PORT_SFLG_EXITING and ERTS_PORT_SFLG_CLOSING used above. ** -** ERTS_PORT_SFLG_EXITING is a recursion protection for erts_do_exit_port(). +** ERTS_PORT_SFLG_EXITING is a recursion protection for erts_deliver_port_exit(). ** It is unclear whether this state is necessary or not, it might be possible ** to merge it with ERTS_PORT_SFLG_CLOSING. ERTS_PORT_SFLG_EXITING only persists ** over a section of sequential (but highly recursive) code. @@ -2237,232 +3575,1113 @@ erts_do_exit_port(Port *p, Eterm from, Eterm reason) ** {PID, close} ** {PID, {command, io-list}} ** {PID, {connect, New_PID}} -** -** */ -void erts_port_command(Process *proc, - Eterm caller_id, - Port *port, - Eterm command) +ErtsPortOpResult +erts_port_command(Process *c_p, + int flags, + Port *port, + Eterm command, + Eterm *refp) { Eterm *tp; - Eterm pid; - if (!port) - return; + ASSERT(port); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); - ERTS_SMP_CHK_NO_PROC_LOCKS; - ASSERT(!INVALID_PORT(port, port->id)); + flags |= ERTS_PORT_SIG_FLG_BANG_OP; if (is_tuple_arity(command, 2)) { + Eterm cntd; tp = tuple_val(command); - if ((pid = port->connected) == tp[1]) { - /* PID must be connected */ + cntd = tp[1]; + if (is_internal_pid(cntd)) { if (tp[2] == am_close) { - erts_port_status_bor_set(port, ERTS_PORT_SFLG_SEND_CLOSED); - erts_do_exit_port(port, pid, am_normal); - -#ifdef USE_VM_PROBES - if(DTRACE_ENABLED(port_command)) { - DTRACE_FORMAT_COMMON_PROC_AND_PORT(proc, port) - DTRACE4(port_command, process_str, port_str, port->name, "close"); - } -#endif - goto done; + if (!erts_port_synchronous_ops) + refp = NULL; + flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND; + return erts_port_exit(c_p, flags, port, cntd, am_normal, refp); } else if (is_tuple_arity(tp[2], 2)) { tp = tuple_val(tp[2]); if (tp[1] == am_command) { - if (erts_write_to_port(caller_id, port, tp[2]) == 0) - goto done; - } else if ((tp[1] == am_connect) && is_internal_pid(tp[2])) { -#ifdef USE_VM_PROBES - if(DTRACE_ENABLED(port_command)) { - DTRACE_FORMAT_COMMON_PROC_AND_PORT(proc, port) - DTRACE4(port_command, process_str, port_str, port->name, "connect"); - } -#endif - port->connected = tp[2]; - deliver_result(port->id, pid, am_connected); - goto done; + if (!(flags & ERTS_PORT_SIG_FLG_NOSUSPEND) + && !erts_port_synchronous_ops) + refp = NULL; + return erts_port_output(c_p, flags, port, cntd, tp[2], refp); + } + else if (tp[1] == am_connect) { + if (!erts_port_synchronous_ops) + refp = NULL; + flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND; + return erts_port_connect(c_p, flags, port, cntd, tp[2], refp); } } } } - { - ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; - Process* rp = erts_pid2proc(NULL, 0, - port->connected, rp_locks); - if (rp) { - (void) erts_send_exit_signal(NULL, - port->id, - rp, - &rp_locks, - am_badsig, - NIL, - NULL, - 0); - erts_smp_proc_unlock(rp, rp_locks); - } + /* badsig */ + if (!erts_port_synchronous_ops) + refp = NULL; + flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND; + return bad_port_signal(c_p, flags, port, c_p->common.id, refp, am_command); +} + +static ERTS_INLINE ErtsPortOpResult +call_driver_control(Eterm caller, + Port *prt, + unsigned int command, + char *bufp, + ErlDrvSizeT size, + char **resp_bufp, + ErlDrvSizeT *from_size) +{ + ErlDrvSSizeT cres; + if (!prt->drv_ptr->control) + return ERTS_PORT_OP_BADARG; + + +#ifdef USE_VM_PROBES + if (DTRACE_ENABLED(port_control) || DTRACE_ENABLED(driver_control)) { + DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt); + DTRACE4(port_control, process_str, port_str, prt->name, command); + DTRACE5(driver_control, process_str, port_str, prt->name, + command, size); } - done: - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); +#endif + + prt->caller = caller; + cres = prt->drv_ptr->control((ErlDrvData) prt->drv_data, + command, + bufp, + size, + resp_bufp, + *from_size); + prt->caller = NIL; + + if (cres < 0) + return ERTS_PORT_OP_BADARG; + + *from_size = (ErlDrvSizeT) cres; + + return ERTS_PORT_OP_DONE; } -/* - * Control a port synchronously. - * Returns either a list or a binary. - */ -Eterm -erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist) -{ - byte* to_port = NULL; /* Buffer to write to port. */ - /* Initialization is for shutting up - warning about use before set. */ - Uint to_len = 0; /* Length of buffer. */ - int must_free = 0; /* True if the buffer should be freed. */ - char port_result[ERL_ONHEAP_BIN_LIMIT]; /* Default buffer for result from port. */ - char* port_resp; /* Pointer to result buffer. */ - ErlDrvSSizeT n; - ErlDrvSSizeT (*control) - (ErlDrvData, unsigned, char*, ErlDrvSizeT, char**, ErlDrvSizeT); - int fpe_was_unmasked; +static void +cleanup_scheduled_control(Binary *binp, char *bufp) +{ + if (binp) { + if (erts_refc_dectest(&binp->refc, 0) == 0) + erts_bin_free(binp); + } + else { + if (bufp) + erts_free(ERTS_ALC_T_DRV_CTRL_DATA, bufp); + } +} - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - if ((control = prt->drv_ptr->control) == NULL) { - return THE_NON_VALUE; +static ERTS_INLINE Uint +port_control_result_size(int control_flags, + char *resp_bufp, + ErlDrvSizeT *resp_size, + char *pre_alloc_buf) +{ + if (!resp_bufp) + return (Uint) 0; + + if (control_flags & PORT_CONTROL_FLAG_BINARY) { + if (resp_bufp != pre_alloc_buf) { + ErlDrvBinary *dbin = (ErlDrvBinary *) resp_bufp; + *resp_size = dbin->orig_size; + if (*resp_size > ERL_ONHEAP_BIN_LIMIT) + return PROC_BIN_SIZE; + } + ASSERT(*resp_size <= ERL_ONHEAP_BIN_LIMIT); + return (Uint) heap_bin_size((*resp_size)); } - /* - * Convert the iolist to a buffer, pointed to by to_port, - * and with its length in to_len. - */ - if (is_binary(iolist) && binary_bitoffset(iolist) == 0) { + return (Uint) 2*(*resp_size); +} + +static ERTS_INLINE Eterm +write_port_control_result(int control_flags, + char *resp_bufp, + ErlDrvSizeT resp_size, + char *pre_alloc_buf, + Eterm **hpp, + ErlHeapFragment *bp, + ErlOffHeap *ohp) +{ + Eterm res; + if (!resp_bufp) + return NIL; + if (control_flags & PORT_CONTROL_FLAG_BINARY) { + /* Binary result */ + ErlDrvBinary *dbin; + ErlHeapBin *hbin; + + if (resp_bufp == pre_alloc_buf) + dbin = NULL; + else { + dbin = (ErlDrvBinary *) resp_bufp; + if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) { + ProcBin* pb = (ProcBin *) *hpp; + *hpp += PROC_BIN_SIZE; + pb->thing_word = HEADER_PROC_BIN; + pb->size = dbin->orig_size; + pb->next = ohp->first; + ohp->first = (struct erl_off_heap_header *) pb; + pb->val = ErlDrvBinary2Binary(dbin); + pb->bytes = (byte*) dbin->orig_bytes; + pb->flags = 0; + OH_OVERHEAD(ohp, dbin->orig_size / sizeof(Eterm)); + return make_binary(pb); + } + resp_bufp = dbin->orig_bytes; + resp_size = dbin->orig_size; + } + + hbin = (ErlHeapBin *) *hpp; + *hpp += heap_bin_size(resp_size); + ASSERT(resp_size <= ERL_ONHEAP_BIN_LIMIT); + hbin->thing_word = header_heap_bin(resp_size); + hbin->size = resp_size; + sys_memcpy(hbin->data, resp_bufp, resp_size); + if (dbin) + driver_free_binary(dbin); + return make_binary(hbin); + } + + /* List result */ + res = buf_to_intlist(hpp, resp_bufp, resp_size, NIL); + if (resp_bufp != pre_alloc_buf) + driver_free(resp_bufp); + return res; +} + +static int +port_sig_control(Port *prt, + erts_aint32_t state, + int op, + ErtsProc2PortSigData *sigdp) +{ + ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY); + + if (op == ERTS_PROC2PORT_SIG_EXEC) { + char resp_buf[ERL_ONHEAP_BIN_LIMIT]; + ErlDrvSizeT resp_size = sizeof(resp_buf); + char *resp_bufp = &resp_buf[0]; + ErtsPortOpResult res; + + res = call_driver_control(sigdp->caller, + prt, + sigdp->u.control.command, + sigdp->u.control.bufp, + sigdp->u.control.size, + &resp_bufp, + &resp_size); + + if (res == ERTS_PORT_OP_DONE) { + Eterm msg; + Eterm *hp, *hp_start; + ErlHeapFragment *bp; + ErlOffHeap *ohp; + Process *rp; + ErtsProcLocks rp_locks = 0; + Uint hsz; + int control_flags; + + rp = erts_proc_lookup_raw(sigdp->caller); + if (!rp) + goto done; + + control_flags = prt->control_flags; + + hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; + hsz += port_control_result_size(control_flags, + resp_bufp, + &resp_size, + &resp_buf[0]); + + hp_start = hp = erts_alloc_message_heap(hsz, + &bp, + &ohp, + rp, + &rp_locks); + + msg = write_port_control_result(control_flags, + resp_bufp, + resp_size, + &resp_buf[0], + &hp, + bp, + ohp); + + queue_port_sched_op_reply(rp, + &rp_locks, + hp_start, + hp, + hsz, + bp, + sigdp->ref, + msg); + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + goto done; + } + } + + /* failure */ + + port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); + +done: + + cleanup_scheduled_control(sigdp->u.control.binp, + sigdp->u.control.bufp); + + return ERTS_PORT_REDS_CONTROL; +} + + +ErtsPortOpResult +erts_port_control(Process* c_p, + Port *prt, + unsigned int command, + Eterm data, + Eterm *retvalp) +{ + ErtsPortOpResult res; + char *bufp = NULL; + ErlDrvSizeT size = 0; + int try_call; + int tmp_alloced = 0; + erts_aint32_t sched_flags; + Binary *binp; + int copy; + ErtsProc2PortSigData *sigdp; + + sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + if (sched_flags & ERTS_PTS_FLG_EXIT) + return ERTS_PORT_OP_BADARG; + + try_call = !(sched_flags & ERTS_PTS_FLGS_FORCE_SCHEDULE_OP); + + if (is_binary(data) && binary_bitoffset(data) == 0) { + byte *bytep; ERTS_DECLARE_DUMMY(Uint bitoffs); ERTS_DECLARE_DUMMY(Uint bitsize); - ERTS_GET_BINARY_BYTES(iolist, to_port, bitoffs, bitsize); - to_len = binary_size(iolist); + ERTS_GET_BINARY_BYTES(data, bytep, bitoffs, bitsize); + bufp = (char *) bytep; + size = binary_size(data); } else { int r; - /* Try with an 8KB buffer first (will often be enough I guess). */ - to_len = 8*1024; - to_port = erts_alloc(ERTS_ALC_T_TMP, to_len); - must_free = 1; + if (!try_call) { + if (erts_iolist_size(data, &size)) + return ERTS_PORT_OP_BADARG; + bufp = erts_alloc(ERTS_ALC_T_DRV_CTRL_DATA, size); + r = erts_iolist_to_buf(data, bufp, size); + ASSERT(r == 0); + } + else { + /* Try with an 8KB buffer first (will often be enough I guess). */ + size = 8*1024; + bufp = erts_alloc(ERTS_ALC_T_TMP, size); + tmp_alloced = 1; + + r = erts_iolist_to_buf(data, bufp, size); + if (ERTS_IOLIST_TO_BUF_SUCCEEDED(r)) { + size -= r; + } else { + if (r == ERTS_IOLIST_TO_BUF_TYPE_ERROR) { /* Type error */ + erts_free(ERTS_ALC_T_TMP, bufp); + return ERTS_PORT_OP_BADARG; + } + else { + ASSERT(r == ERTS_IOLIST_TO_BUF_OVERFLOW); /* Overflow */ + erts_free(ERTS_ALC_T_TMP, bufp); + if (erts_iolist_size(data, &size)) + return ERTS_PORT_OP_BADARG; /* Type error */ + } + bufp = erts_alloc(ERTS_ALC_T_TMP, size); + r = erts_iolist_to_buf(data, bufp, size); + ASSERT(r == 0); + } + } + } - /* - * In versions before R10B, we used to reserve random - * amounts of extra memory. From R10B, we allocate the - * exact amount. - */ - r = io_list_to_buf(iolist, (char*) to_port, to_len); - if (r >= 0) { - to_len -= r; - } else if (r == -2) { /* Type error */ - erts_free(ERTS_ALC_T_TMP, (void *) to_port); - return THE_NON_VALUE; - } else { - ASSERT(r == -1); /* Overflow */ - erts_free(ERTS_ALC_T_TMP, (void *) to_port); - if (erts_iolist_size(iolist, &to_len)) { /* Type error */ - return THE_NON_VALUE; + if (try_call) { + char resp_buf[ERL_ONHEAP_BIN_LIMIT]; + char* resp_bufp = &resp_buf[0]; + ErlDrvSizeT resp_size = sizeof(resp_buf); + ErtsTryImmDrvCallResult try_call_res; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE( + c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + 0, + am_control); + + try_call_res = try_imm_drv_call(&try_call_state); + switch (try_call_res) { + case ERTS_TRY_IMM_DRV_CALL_OK: { + Eterm *hp; + Uint hsz; + int control_flags; + + res = call_driver_control(c_p->common.id, + prt, + command, + bufp, + size, + &resp_bufp, + &resp_size); + finalize_imm_drv_call(&try_call_state); + if (tmp_alloced) + erts_free(ERTS_ALC_T_TMP, bufp); + if (res == ERTS_PORT_OP_BADARG) { + return ERTS_PORT_OP_BADARG; } - must_free = 1; - to_port = erts_alloc(ERTS_ALC_T_TMP, to_len); - r = io_list_to_buf(iolist, (char*) to_port, to_len); - ASSERT(r == 0); + + control_flags = prt->control_flags; + + hsz = port_control_result_size(control_flags, + resp_bufp, + &resp_size, + &resp_buf[0]); + hp = HAlloc(c_p, hsz); + *retvalp = write_port_control_result(control_flags, + resp_bufp, + resp_size, + &resp_buf[0], + &hp, + NULL, + &c_p->off_heap); + BUMP_REDS(c_p, ERTS_PORT_REDS_CONTROL); + return ERTS_PORT_OP_DONE; + } + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + if (tmp_alloced) + erts_free(ERTS_ALC_T_TMP, bufp); + return ERTS_PORT_OP_BADARG; + default: + /* Schedule control() call instead... */ + break; } } - prt->caller = p->id; /* Internal pid */ + /* Convert data into something that can be scheduled */ - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - ERTS_SMP_CHK_NO_PROC_LOCKS; + copy = tmp_alloced; + + binp = NULL; + + if (is_binary(data) && binary_bitoffset(data) == 0) { + Eterm *ebinp = binary_val_rel(data, NULL); + ASSERT(!tmp_alloced); + if (*ebinp == HEADER_SUB_BIN) + ebinp = binary_val_rel(((ErlSubBin *) ebinp)->orig, NULL); + if (*ebinp != HEADER_PROC_BIN) + copy = 1; + else { + binp = ((ProcBin *) ebinp)->val; + ASSERT(bufp < bufp + size); + ASSERT(binp->orig_bytes <= bufp + && bufp + size <= binp->orig_bytes + binp->orig_size); + erts_refc_inc(&binp->refc, 1); + } + } + + if (copy) { + char *old_bufp = bufp; + bufp = erts_alloc(ERTS_ALC_T_DRV_CTRL_DATA, size); + sys_memcpy(bufp, old_bufp, size); + if (tmp_alloced) + erts_free(ERTS_ALC_T_TMP, old_bufp); + } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_CONTROL; + sigdp->u.control.binp = binp; + sigdp->u.control.command = command; + sigdp->u.control.bufp = bufp; + sigdp->u.control.size = size; + + res = erts_schedule_proc2port_signal(c_p, + prt, + c_p->common.id, + retvalp, + sigdp, + 0, + port_sig_control); + if (res != ERTS_PORT_OP_SCHEDULED) { + cleanup_scheduled_control(binp, bufp); + return ERTS_PORT_OP_BADARG; + } + return res; +} + +static ERTS_INLINE ErtsPortOpResult +call_driver_call(Eterm caller, + Port *prt, + unsigned int command, + char *bufp, + ErlDrvSizeT size, + char **resp_bufp, + ErlDrvSizeT *from_size, + unsigned *ret_flagsp) +{ + ErlDrvSSizeT cres; + + if (!prt->drv_ptr->call) + return ERTS_PORT_OP_BADARG; #ifdef USE_VM_PROBES - if (DTRACE_ENABLED(port_control) || DTRACE_ENABLED(driver_control)) { - DTRACE_FORMAT_COMMON_PROC_AND_PORT(p, prt); - DTRACE4(port_control, process_str, port_str, prt->name, command); - DTRACE5(driver_control, process_str, port_str, prt->name, - command, to_len); + if (DTRACE_ENABLED(driver_call)) { + DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); + DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); + + dtrace_pid_str(caller, process_str); + dtrace_port_str(prt, port_str); + DTRACE5(driver_call, process_str, port_str, prt->name, command, size); } #endif - /* - * Call the port's control routine. - */ + prt->caller = caller; + cres = prt->drv_ptr->call((ErlDrvData) prt->drv_data, + command, + bufp, + size, + resp_bufp, + *from_size, + ret_flagsp); + prt->caller = NIL; - port_resp = port_result; - fpe_was_unmasked = erts_block_fpe(); - n = control((ErlDrvData)prt->drv_data, command, (char*)to_port, to_len, - &port_resp, sizeof(port_result)); - erts_unblock_fpe(fpe_was_unmasked); - if (must_free) { - erts_free(ERTS_ALC_T_TMP, (void *) to_port); + if (cres <= 0 + || ((byte) (*resp_bufp)[0]) != VERSION_MAGIC) + return ERTS_PORT_OP_BADARG; + + *from_size = (ErlDrvSizeT) cres; + + return ERTS_PORT_OP_DONE; +} + + +static +void cleanup_scheduled_call(char *bufp) +{ + if (bufp) + erts_free(ERTS_ALC_T_DRV_CALL_DATA, bufp); +} + +static int +port_sig_call(Port *prt, + erts_aint32_t state, + int op, + ErtsProc2PortSigData *sigdp) +{ + char resp_buf[256]; + ErlDrvSizeT resp_size = sizeof(resp_buf); + char *resp_bufp = &resp_buf[0]; + unsigned ret_flags = 0U; + + + ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY); + + if (op == ERTS_PROC2PORT_SIG_EXEC) { + ErtsPortOpResult res; + + res = call_driver_call(sigdp->caller, + prt, + sigdp->u.call.command, + sigdp->u.call.bufp, + sigdp->u.call.size, + &resp_bufp, + &resp_size, + &ret_flags); + + if (res == ERTS_PORT_OP_DONE) { + Eterm msg; + Eterm *hp; + ErlHeapFragment *bp; + ErlOffHeap *ohp; + Process *rp; + ErtsProcLocks rp_locks = 0; + Uint hsz; + + rp = erts_proc_lookup_raw(sigdp->caller); + if (!rp) + goto done; + + hsz = erts_decode_ext_size((byte *) resp_bufp, resp_size); + if (hsz >= 0) { + Eterm *hp_start; + byte *endp; + + hsz += 3; /* ok tuple */ + hsz += ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; + + hp_start = hp = erts_alloc_message_heap(hsz, + &bp, + &ohp, + rp, + &rp_locks); + endp = (byte *) resp_bufp; + msg = erts_decode_ext(&hp, ohp, &endp); + if (is_value(msg)) { + msg = TUPLE2(hp, am_ok, msg); + hp += 3; + + queue_port_sched_op_reply(rp, + &rp_locks, + hp_start, + hp, + hsz, + bp, + sigdp->ref, + msg); + + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + goto done; + } + if (bp) + free_message_buffer(bp); + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + } + } } - prt->caller = NIL; -#ifdef ERTS_SMP - if (prt->xports) - erts_smp_xports_unlock(prt); - ASSERT(!prt->xports); -#endif - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); - /* - * Handle the result. - */ + port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); - if (n < 0) { - return THE_NON_VALUE; +done: + + if (resp_bufp != &resp_buf[0] && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) + driver_free(resp_bufp); + + cleanup_scheduled_call(sigdp->u.call.bufp); + + return ERTS_PORT_REDS_CALL; +} + + +ErtsPortOpResult +erts_port_call(Process* c_p, + Port *prt, + unsigned int command, + Eterm data, + Eterm *retvalp) +{ + ErtsPortOpResult res; + char input_buf[256]; + char *bufp; + byte *endp; + ErlDrvSizeT size; + int try_call; + erts_aint32_t sched_flags; + ErtsProc2PortSigData *sigdp; + + sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + if (sched_flags & ERTS_PTS_FLG_EXIT) { + return ERTS_PORT_OP_BADARG; } - if ((prt->control_flags & PORT_CONTROL_FLAG_BINARY) == 0) { /* List result */ - Eterm ret; - Eterm* hp = HAlloc(p, 2*n); - ret = buf_to_intlist(&hp, port_resp, n, NIL); - if (port_resp != port_result) { - driver_free(port_resp); + try_call = !(sched_flags & ERTS_PTS_FLGS_FORCE_SCHEDULE_OP); + + size = erts_encode_ext_size(data); + + if (!try_call) + bufp = erts_alloc(ERTS_ALC_T_DRV_CALL_DATA, size); + else if (size <= sizeof(input_buf)) + bufp = &input_buf[0]; + else + bufp = erts_alloc(ERTS_ALC_T_TMP, size); + + endp = (byte *) bufp; + erts_encode_ext(data, &endp); + + if (endp - (byte *) bufp > size) + ERTS_INTERNAL_ERROR("erts_internal:port_call() - Buffer overflow"); + + size = endp - (byte *) bufp; + + if (try_call) { + char resp_buf[255]; + char* resp_bufp = &resp_buf[0]; + ErlDrvSizeT resp_size = sizeof(resp_buf); + ErtsTryImmDrvCallResult try_call_res; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE( + c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + 0, + am_call); + + try_call_res = try_imm_drv_call(&try_call_state); + switch (try_call_res) { + case ERTS_TRY_IMM_DRV_CALL_OK: { + Eterm *hp, *hp_end; + Uint hsz; + unsigned ret_flags = 0U; + Eterm term; + + res = call_driver_call(c_p->common.id, + prt, + command, + bufp, + size, + &resp_bufp, + &resp_size, + &ret_flags); + + finalize_imm_drv_call(&try_call_state); + if (bufp != &input_buf[0]) + erts_free(ERTS_ALC_T_TMP, bufp); + if (res == ERTS_PORT_OP_BADARG) + return ERTS_PORT_OP_BADARG; + hsz = erts_decode_ext_size((byte *) resp_bufp, resp_size); + if (hsz < 0) + return ERTS_PORT_OP_BADARG; + hsz += 3; + hp = HAlloc(c_p, hsz); + hp_end = hp + hsz; + endp = (byte *) resp_bufp; + term = erts_decode_ext(&hp, &MSO(c_p), &endp); + if (term == THE_NON_VALUE) + return ERTS_PORT_OP_BADARG; + *retvalp = TUPLE2(hp, am_ok, term); + hp += 3; + HRelease(c_p, hp_end, hp); + if (resp_buf != &resp_buf[0] + && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) + driver_free(resp_buf); + BUMP_REDS(c_p, ERTS_PORT_REDS_CALL); + return ERTS_PORT_OP_DONE; } - return ret; - } - else if (port_resp == NULL) { - return NIL; - } - else { /* Binary result */ - ErlDrvBinary *dbin; - ErlHeapBin *hbin; - if (port_resp != port_result) { - dbin = (ErlDrvBinary *) port_resp; - if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) { - ProcBin* pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE); - pb->thing_word = HEADER_PROC_BIN; - pb->size = dbin->orig_size; - pb->next = MSO(p).first; - MSO(p).first = (struct erl_off_heap_header*)pb; - pb->val = ErlDrvBinary2Binary(dbin); - pb->bytes = (byte*) dbin->orig_bytes; - pb->flags = 0; - OH_OVERHEAD(&(MSO(p)), dbin->orig_size / sizeof(Eterm)); - return make_binary(pb); - } - port_resp = dbin->orig_bytes; - n = dbin->orig_size; - } else { - dbin = NULL; + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + if (bufp != &input_buf[0]) + erts_free(ERTS_ALC_T_TMP, bufp); + return ERTS_PORT_OP_BADARG; + default: + /* Schedule call() call instead... */ + break; } - hbin = (ErlHeapBin*) HAlloc(p, heap_bin_size(n)); - ASSERT(n <= ERL_ONHEAP_BIN_LIMIT); - hbin->thing_word = header_heap_bin(n); - hbin->size = n; - sys_memcpy(hbin->data, port_resp, n); - if (dbin != NULL) { - driver_free_binary(dbin); + } + + /* Convert data into something that can be scheduled */ + + if (bufp == &input_buf[0] || try_call) { + char *new_bufp = erts_alloc(ERTS_ALC_T_DRV_CALL_DATA, size); + sys_memcpy(new_bufp, bufp, size); + if (bufp != &input_buf[0]) + erts_free(ERTS_ALC_T_TMP, bufp); + bufp = new_bufp; + } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_CALL; + sigdp->u.call.command = command; + sigdp->u.call.bufp = bufp; + sigdp->u.call.size = size; + + res = erts_schedule_proc2port_signal(c_p, + prt, + c_p->common.id, + retvalp, + sigdp, + 0, + port_sig_call); + if (res != ERTS_PORT_OP_SCHEDULED) { + cleanup_scheduled_call(bufp); + return ERTS_PORT_OP_BADARG; + } + return res; +} + +static Eterm +make_port_info_term(Eterm **hpp_start, + Eterm **hpp, + Uint *hszp, + ErlHeapFragment **bpp, + Port *prt, + Eterm item) +{ + ErlOffHeap *ohp; + + if (is_value(item)) { + if (erts_bld_port_info(NULL, NULL, hszp, prt, item) == am_false) + return THE_NON_VALUE; + if (*hszp) { + *bpp = new_message_buffer(*hszp); + *hpp_start = *hpp = (*bpp)->mem; + ohp = &(*bpp)->off_heap; } - return make_binary(hbin); + else { + *bpp = NULL; + *hpp_start = *hpp = NULL; + ohp = NULL; + } + return erts_bld_port_info(hpp, ohp, NULL, prt, item); + } + else { + int i; + int len; + int start; + static Eterm item[] = ERTS_PORT_INFO_1_ITEMS; + static Eterm value[sizeof(item)/sizeof(item[0])]; + + start = 0; + len = sizeof(item)/sizeof(item[0]); + + for (i = start; i < sizeof(item)/sizeof(item[0]); i++) { + ASSERT(is_atom(item[i])); + value[i] = erts_bld_port_info(NULL, NULL, hszp, prt, item[i]); + } + + if (value[0] == am_undefined) { + start++; + len--; + } + + erts_bld_list(NULL, hszp, len, &value[start]); + + *bpp = new_message_buffer(*hszp); + *hpp_start = *hpp = (*bpp)->mem; + ohp = &(*bpp)->off_heap; + + for (i = start; i < sizeof(item)/sizeof(item[0]); i++) + value[i] = erts_bld_port_info(hpp, ohp, NULL, prt, item[i]); + + return erts_bld_list(hpp, NULL, len, &value[start]); + } +} + +static int +port_sig_info(Port *prt, + erts_aint32_t state, + int op, + ErtsProc2PortSigData *sigdp) +{ + ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY); + if (op != ERTS_PROC2PORT_SIG_EXEC) + port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined); + else { + Eterm *hp, *hp_start; + Uint hsz; + ErlHeapFragment *bp; + Eterm value; + Process *rp; + ErtsProcLocks rp_locks = 0; + + rp = erts_proc_lookup_raw(sigdp->caller); + if (!rp) + return ERTS_PORT_REDS_INFO; + + hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; + value = make_port_info_term(&hp_start, + &hp, + &hsz, + &bp, + prt, + sigdp->u.info.item); + if (is_value(value)) { + queue_port_sched_op_reply(rp, + &rp_locks, + hp_start, + hp, + hsz, + bp, + sigdp->ref, + value); + } + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + } + return ERTS_PORT_REDS_INFO; +} + +ErtsPortOpResult +erts_port_info(Process* c_p, + Port *prt, + Eterm item, + Eterm *retvalp) +{ + ErtsProc2PortSigData *sigdp; + ErtsTryImmDrvCallResult try_call_res; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE( + c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + 0, + am_info); + + try_call_res = try_imm_drv_call(&try_call_state); + switch (try_call_res) { + case ERTS_TRY_IMM_DRV_CALL_OK: { + Eterm *hp, *hp_start; + ErlHeapFragment *bp; + Uint hsz = 0; + Eterm value = make_port_info_term(&hp_start, &hp, &hsz, &bp, prt, item); + finalize_imm_drv_call(&try_call_state); + if (is_non_value(value)) + return ERTS_PORT_OP_BADARG; + else if (is_immed(value)) + *retvalp = value; + else { + Uint used_h_size = hp - hp_start; + hp = HAlloc(c_p, used_h_size); + *retvalp = copy_struct(value, used_h_size, &hp, &MSO(c_p)); + free_message_buffer(bp); + } + BUMP_REDS(c_p, ERTS_PORT_REDS_INFO); + return ERTS_PORT_OP_DONE; + } + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + return ERTS_PORT_OP_DROPPED; + case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS: + case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK: + /* Schedule call instead... */ + break; + } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_INFO; + sigdp->u.info.item = item; + + return erts_schedule_proc2port_signal(c_p, + prt, + c_p->common.id, + retvalp, + sigdp, + 0, + port_sig_info); +} + +static int +port_sig_set_data(Port *prt, + erts_aint32_t state, + int op, + ErtsProc2PortSigData *sigdp) +{ + ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY); + + if (op == ERTS_PROC2PORT_SIG_EXEC) { + if (prt->bp) + free_message_buffer(prt->bp); + prt->bp = sigdp->u.set_data.bp; + prt->data = sigdp->u.set_data.data; + port_sched_op_reply(sigdp->caller, sigdp->ref, am_true); + } + else { + if (sigdp->u.set_data.bp) + free_message_buffer(sigdp->u.set_data.bp); + port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); + } + return ERTS_PORT_REDS_SET_DATA; +} + +ErtsPortOpResult +erts_port_set_data(Process* c_p, + Port *prt, + Eterm data, + Eterm *refp) +{ + ErtsPortOpResult res; + Eterm set_data; + ErlHeapFragment *bp; + ErtsProc2PortSigData *sigdp; + ErtsTryImmDrvCallResult try_call_res; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE( + c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + !refp, + am_set_data); + + if (is_immed(data)) { + set_data = data; + bp = NULL; + } + else { + Eterm *hp; + Uint sz = size_object(data); + bp = new_message_buffer(sz); + hp = bp->mem; + set_data = copy_struct(data, sz, &hp, &bp->off_heap); + } + + try_call_res = try_imm_drv_call(&try_call_state); + switch (try_call_res) { + case ERTS_TRY_IMM_DRV_CALL_OK: + if (prt->bp) + free_message_buffer(prt->bp); + prt->bp = bp; + prt->data = set_data; + finalize_imm_drv_call(&try_call_state); + BUMP_REDS(c_p, ERTS_PORT_REDS_SET_DATA); + return ERTS_PORT_OP_DONE; + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + return ERTS_PORT_OP_DROPPED; + case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS: + case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK: + /* Schedule call instead... */ + break; + } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_SET_DATA; + sigdp->u.set_data.data = set_data; + sigdp->u.set_data.bp = bp; + + res = erts_schedule_proc2port_signal(c_p, + prt, + c_p->common.id, + refp, + sigdp, + 0, + port_sig_set_data); + if (res != ERTS_PORT_OP_SCHEDULED && bp) + free_message_buffer(bp); + return res; +} + +static int +port_sig_get_data(Port *prt, + erts_aint32_t state, + int op, + ErtsProc2PortSigData *sigdp) +{ + ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY); + if (op != ERTS_PROC2PORT_SIG_EXEC) + port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg); + else { + Process *rp; + ErtsProcLocks rp_locks = 0; + + rp = erts_proc_lookup_raw(sigdp->caller); + if (rp) { + Uint hsz; + Eterm *hp, *hp_start; + Eterm data, msg; + ErlHeapFragment *bp; + ErlOffHeap *ohp; + + hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE; + hsz += 3; + if (prt->bp) + hsz += prt->bp->used_size; + + hp_start = hp = erts_alloc_message_heap(hsz, + &bp, + &ohp, + rp, + &rp_locks); + + if (is_immed(prt->data)) + data = prt->data; + else + data = copy_struct(prt->data, + prt->bp->used_size, + &hp, + &bp->off_heap); + + + + msg = TUPLE2(hp, am_ok, data); + hp += 3; + + queue_port_sched_op_reply(rp, + &rp_locks, + hp_start, + hp, + hsz, + bp, + sigdp->ref, + msg); + if (rp_locks) + erts_smp_proc_unlock(rp, rp_locks); + } + } + return ERTS_PORT_REDS_GET_DATA; +} + +ErtsPortOpResult +erts_port_get_data(Process* c_p, + Port *prt, + Eterm *retvalp) +{ + ErtsProc2PortSigData *sigdp; + ErtsTryImmDrvCallResult try_call_res; + ErtsTryImmDrvCallState try_call_state + = ERTS_INIT_TRY_IMM_DRV_CALL_STATE( + c_p, + prt, + ERTS_PORT_SFLGS_INVALID_LOOKUP, + 0, + 0, + am_get_data); + + try_call_res = try_imm_drv_call(&try_call_state); + switch (try_call_res) { + case ERTS_TRY_IMM_DRV_CALL_OK: { + Eterm *hp; + Eterm data; + ErlHeapFragment *bp; + Uint sz; + if (is_immed(prt->data)) { + bp = NULL; + data = prt->data; + } + else { + bp = new_message_buffer(prt->bp->used_size); + data = copy_struct(prt->data, + prt->bp->used_size, + &hp, + &bp->off_heap); + } + finalize_imm_drv_call(&try_call_state); + if (is_immed(data)) + sz = 0; + else + sz = bp->used_size; + + hp = HAlloc(c_p, sz + 3); + if (is_not_immed(data)) { + data = copy_struct(data, bp->used_size, &hp, &MSO(c_p)); + free_message_buffer(bp); + } + *retvalp = TUPLE2(hp, am_ok, data); + BUMP_REDS(c_p, ERTS_PORT_REDS_GET_DATA); + return ERTS_PORT_OP_DONE; + } + case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT: + return ERTS_PORT_OP_DROPPED; + case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS: + case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK: + /* Schedule call instead... */ + break; } + + sigdp = erts_port_task_alloc_p2p_sig_data(); + sigdp->flags = ERTS_P2P_SIG_TYPE_GET_DATA; + + return erts_schedule_proc2port_signal(c_p, + prt, + c_p->common.id, + retvalp, + sigdp, + 0, + port_sig_get_data); } typedef struct { @@ -2483,39 +4702,39 @@ static void prt_one_lnk(ErtsLink *lnk, void *vprtd) } void -print_port_info(int to, void *arg, int i) +print_port_info(Port *p, int to, void *arg) { - Port* p = &erts_port[i]; + erts_aint32_t state = erts_atomic32_read_nob(&p->state); - if (p->status & ERTS_PORT_SFLGS_DEAD) + if (state & ERTS_PORT_SFLGS_DEAD) return; - erts_print(to, arg, "=port:%T\n", p->id); - erts_print(to, arg, "Slot: %d\n", i); - if (p->status & ERTS_PORT_SFLG_CONNECTED) { - erts_print(to, arg, "Connected: %T", p->connected); + erts_print(to, arg, "=port:%T\n", p->common.id); + erts_print(to, arg, "Slot: %d\n", internal_port_index(p->common.id)); + if (state & ERTS_PORT_SFLG_CONNECTED) { + erts_print(to, arg, "Connected: %T", ERTS_PORT_GET_CONNECTED(p)); erts_print(to, arg, "\n"); } - if (p->nlinks != NULL) { + if (ERTS_P_LINKS(p)) { prt_one_lnk_data prtd; prtd.to = to; prtd.arg = arg; erts_print(to, arg, "Links: "); - erts_doforall_links(p->nlinks, &prt_one_lnk, &prtd); + erts_doforall_links(ERTS_P_LINKS(p), &prt_one_lnk, &prtd); erts_print(to, arg, "\n"); } - if (p->monitors != NULL) { + if (ERTS_P_MONITORS(p)) { prt_one_lnk_data prtd; prtd.to = to; prtd.arg = arg; erts_print(to, arg, "Monitors: "); - erts_doforall_monitors(p->monitors, &prt_one_monitor, &prtd); + erts_doforall_monitors(ERTS_P_MONITORS(p), &prt_one_monitor, &prtd); erts_print(to, arg, "\n"); } - if (p->reg != NULL) - erts_print(to, arg, "Registered as: %T\n", p->reg->name); + if (p->common.u.alive.reg != NULL) + erts_print(to, arg, "Registered as: %T\n", p->common.u.alive.reg->name); if (p->drv_ptr == &fd_driver) { erts_print(to, arg, "Port is UNIX fd not opened by emulator: %s\n", p->name); @@ -2529,109 +4748,143 @@ print_port_info(int to, void *arg, int i) } void -set_busy_port(ErlDrvPort port_num, int on) +set_busy_port(ErlDrvPort dprt, int on) { + Port *prt; + erts_aint32_t flags; + #ifdef USE_VM_PROBES DTRACE_CHARBUF(port_str, 16); #endif ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num])); + prt = erts_drvport2port_raw(dprt); + if (!prt) + return; if (on) { - erts_port_status_bor_set(&erts_port[port_num], - ERTS_PORT_SFLG_PORT_BUSY); + flags = erts_smp_atomic32_read_bor_acqb(&prt->sched.flags, + ERTS_PTS_FLG_BUSY_PORT); + if (flags & ERTS_PTS_FLG_BUSY_PORT) + return; /* Already busy */ + + if (flags & ERTS_PTS_FLG_HAVE_NS_TASKS) + erts_port_task_abort_nosuspend_tasks(prt); + #ifdef USE_VM_PROBES if (DTRACE_ENABLED(port_busy)) { erts_snprintf(port_str, sizeof(port_str), - "%T", erts_port[port_num].id); + "%T", prt->common.id); DTRACE1(port_busy, port_str); } #endif } else { - ErtsProcList* plp = erts_port[port_num].suspended; - erts_port_status_band_set(&erts_port[port_num], - ~ERTS_PORT_SFLG_PORT_BUSY); - erts_port[port_num].suspended = NULL; + flags = erts_smp_atomic32_read_band_acqb(&prt->sched.flags, + ~ERTS_PTS_FLG_BUSY_PORT); + if (!(flags & ERTS_PTS_FLG_BUSY_PORT)) + return; /* Already non-busy */ #ifdef USE_VM_PROBES if (DTRACE_ENABLED(port_not_busy)) { erts_snprintf(port_str, sizeof(port_str), - "%T", erts_port[port_num].id); + "%T", prt->common.id); DTRACE1(port_not_busy, port_str); } #endif - if (erts_port[port_num].dist_entry) { + if (prt->dist_entry) { /* * Processes suspended on distribution ports are * normally queued on the dist entry. */ - erts_dist_port_not_busy(&erts_port[port_num]); + erts_dist_port_not_busy(prt); } - /* - * Resume, in a round-robin fashion, all processes waiting on the port. - * - * This version submitted by Tony Rogvall. The earlier version used - * to resume the processes in order, which caused starvation of all but - * the first process. - */ + if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q)) + erts_port_resume_procs(prt); + } +} + +void +erts_port_resume_procs(Port *prt) +{ + /* + * Resume, in a round-robin fashion, all processes waiting on the port. + * + * This version submitted by Tony Rogvall. The earlier version used + * to resume the processes in order, which caused starvation of all but + * the first process. + */ + ErtsProcList *plp; + + erts_port_task_sched_lock(&prt->sched); + plp = prt->suspended; + prt->suspended = NULL; + erts_port_task_sched_unlock(&prt->sched); + + if (erts_proclist_fetch(&plp, NULL)) { - if (plp) { #ifdef USE_VM_PROBES - /* - * Hrm, for blocked dist ports, plp always seems to be NULL. - * That's not so fun. - * Well, another way to get the same info is using a D - * script to correlate an earlier process-port_blocked+pid - * event with a later process-scheduled event. That's - * subject to the multi-CPU races with how events are - * handled, but hey, that way works most of the time. - */ - if (DTRACE_ENABLED(process_port_unblocked)) { - DTRACE_CHARBUF(pid_str, 16); - ErtsProcList* plp2 = plp; - - erts_snprintf(port_str, sizeof(port_str), - "%T", erts_port[port_num]); - while (plp2 != NULL) { - erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid); - DTRACE2(process_port_unblocked, pid_str, port_str); - } - } -#endif - /* First proc should be resumed last */ - if (plp->next) { - erts_resume_processes(plp->next); - plp->next = NULL; + /* + * Hrm, for blocked dist ports, plp always seems to be NULL. + * That's not so fun. + * Well, another way to get the same info is using a D + * script to correlate an earlier process-port_blocked+pid + * event with a later process-scheduled event. That's + * subject to the multi-CPU races with how events are + * handled, but hey, that way works most of the time. + */ + if (DTRACE_ENABLED(process_port_unblocked)) { + DTRACE_CHARBUF(port_str, 16); + DTRACE_CHARBUF(pid_str, 16); + ErtsProcList* plp2 = plp; + + erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id); + while (plp2 != NULL) { + erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid); + DTRACE2(process_port_unblocked, pid_str, port_str); } - erts_resume_processes(plp); - } + } +#endif + + /* First proc should be resumed last */ + if (plp->next) { + plp->next->prev = NULL; + erts_resume_processes(plp->next); + plp->next = NULL; + } + erts_resume_processes(plp); } } void set_port_control_flags(ErlDrvPort port_num, int flags) { - - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num])); - - erts_port[port_num].control_flags = flags; + Port *prt = erts_drvport2port_raw(port_num); + if (prt) + prt->control_flags = flags; } -int get_port_flags(ErlDrvPort ix) { - Port* prt = erts_drvport2port(ix); +int get_port_flags(ErlDrvPort ix) +{ + int flags; + Port *prt; + erts_aint32_t state; + + prt = erts_drvport2port(ix, &state); + if (!prt) + return 0; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - if (prt == NULL) - return 0; + flags = 0; + if (state & ERTS_PORT_SFLG_BINARY_IO) + flags |= PORT_FLAG_BINARY; + if (state & ERTS_PORT_SFLG_LINEBUF_IO) + flags |= PORT_FLAG_LINE; - return (prt->status & ERTS_PORT_SFLG_BINARY_IO ? PORT_FLAG_BINARY : 0) - | (prt->status & ERTS_PORT_SFLG_LINEBUF_IO ? PORT_FLAG_LINE : 0); + return flags; } - void erts_raw_port_command(Port* p, byte* buf, Uint len) { int fpe_was_unmasked; @@ -2668,25 +4921,18 @@ int async_ready(Port *p, void* data) if (p) { ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); - ASSERT(!(p->status & ERTS_PORT_SFLGS_DEAD)); if (p->drv_ptr->ready_async != NULL) { #ifdef USE_VM_PROBES if (DTRACE_ENABLED(driver_ready_async)) { - DTRACE_FORMAT_COMMON_PID_AND_PORT(p->connected, p) + DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p) DTRACE3(driver_ready_async, process_str, port_str, p->name); } #endif (*p->drv_ptr->ready_async)((ErlDrvData)p->drv_data, data); need_free = 0; -#ifdef ERTS_SMP - if (p->xports) - erts_smp_xports_unlock(p); - ASSERT(!p->xports); -#endif - } - if ((p->status & ERTS_PORT_SFLG_CLOSING) && is_port_ioq_empty(p)) { - terminate_port(p); + } + erts_port_driver_callback_epilogue(p, NULL); } return need_free; } @@ -2694,12 +4940,12 @@ int async_ready(Port *p, void* data) static void report_missing_drv_callback(Port *p, char *drv_type, char *callback) { - ErtsPortNames *pnp = erts_get_port_names(p->id); + ErtsPortNames *pnp = erts_get_port_names(p->common.id); char *unknown = "<unknown>"; char *drv_name = pnp->driver_name ? pnp->driver_name : unknown; char *prt_name = pnp->name ? pnp->name : unknown; erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "%T: %s driver '%s' ", p->id, drv_type, drv_name); + erts_dsprintf(dsbufp, "%T: %s driver '%s' ", p->common.id, drv_type, drv_name); if (sys_strcmp(drv_name, prt_name) != 0) erts_dsprintf(dsbufp, "(%s) ", prt_name); erts_dsprintf(dsbufp, "does not implement the %s callback!\n", callback); @@ -2714,7 +4960,7 @@ erts_stale_drv_select(Eterm port, int deselect) { char *type; - ErlDrvPort drv_port = internal_port_index(port); + ErlDrvPort drv_port = (ErlDrvPort) erts_port_lookup_raw(port); ErtsPortNames *pnp = erts_get_port_names(port); erts_dsprintf_buf_t *dsbufp; @@ -2754,16 +5000,16 @@ erts_stale_drv_select(Eterm port, ErtsPortNames * erts_get_port_names(Eterm id) { + Port *prt = erts_port_lookup_raw(id); ErtsPortNames *pnp; ASSERT(is_nil(id) || is_internal_port(id)); - - if (is_not_internal_port(id)) { + + if (!prt) { pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, sizeof(ErtsPortNames)); pnp->name = NULL; pnp->driver_name = NULL; } else { - Port* prt = &erts_port[internal_port_index(id)]; int do_realloc = 1; int len = -1; size_t pnp_len = sizeof(ErtsPortNames); @@ -2779,17 +5025,10 @@ erts_get_port_names(Eterm id) pnp_len = sizeof(ErtsPortNames) + len; pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len); } - erts_smp_port_state_lock(prt); - if (id != prt->id) { - len = nlen = 0; - name = driver_name = NULL; - } - else { - name = prt->name; - len = nlen = name ? sys_strlen(name) + 1 : 0; - driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL); - len += driver_name ? sys_strlen(driver_name) + 1 : 0; - } + name = prt->name; + len = nlen = name ? sys_strlen(name) + 1 : 0; + driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL); + len += driver_name ? sys_strlen(driver_name) + 1 : 0; if (len <= pnp_len - sizeof(ErtsPortNames)) { if (!name) pnp->name = NULL; @@ -2807,7 +5046,6 @@ erts_get_port_names(Eterm id) } do_realloc = 0; } - erts_smp_port_state_unlock(prt); } while (do_realloc); } return pnp; @@ -2832,11 +5070,9 @@ static void schedule_port_timeout(Port *p) * /Rickard */ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); - (void) erts_port_task_schedule(p->id, - &p->timeout_task, - ERTS_PORT_TASK_TIMEOUT, - (ErlDrvEvent) -1, - NULL); + erts_port_task_schedule(p->common.id, + &p->timeout_task, + ERTS_PORT_TASK_TIMEOUT); } ErlDrvTermData driver_mk_term_nil(void) @@ -2844,9 +5080,9 @@ ErlDrvTermData driver_mk_term_nil(void) return driver_term_nil; } -void driver_report_exit(int ix, int status) +void driver_report_exit(ErlDrvPort ix, int status) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); Eterm* hp; Eterm tuple; Process *rp; @@ -2859,7 +5095,7 @@ void driver_report_exit(int ix, int status) ERTS_SMP_CHK_NO_PROC_LOCKS; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - pid = prt->connected; + pid = ERTS_PORT_GET_CONNECTED(prt); ASSERT(is_internal_pid(pid)); rp = (scheduler @@ -2872,7 +5108,7 @@ void driver_report_exit(int ix, int status) tuple = TUPLE2(hp, am_exit_status, make_small(status)); hp += 3; - tuple = TUPLE2(hp, prt->id, tuple); + tuple = TUPLE2(hp, prt->common.id, tuple); erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined #ifdef USE_VM_PROBES @@ -2885,28 +5121,6 @@ void driver_report_exit(int ix, int status) erts_smp_proc_dec_refc(rp); } - -static ERTS_INLINE int -deliver_term_check_port(ErlDrvPort drvport) -{ - int res; - int ix = (int) drvport; - if (ix < 0 || erts_max_ports <= ix) - res = -1; /* invalid */ - else { - Port* prt = &erts_port[ix]; - erts_smp_port_state_lock(prt); - if (!(prt->status & ERTS_PORT_SFLGS_INVALID_LOOKUP)) - res = 1; /* ok */ - else if (prt->status & ERTS_PORT_SFLG_CLOSING) - res = 0; /* closing */ - else - res = -1; /* invalid (dead) */ - erts_smp_port_state_unlock(prt); - } - return res; -} - #define ERTS_B2T_STATES_DEF_STATES_SZ 5 #define ERTS_B2T_STATES_DEF_STATES_INC 100 @@ -2994,10 +5208,7 @@ cleanup_b2t_states(struct b2t_states__ *b2tsp) */ static int -driver_deliver_term(ErlDrvPort port, - Eterm to, - ErlDrvTermData* data, - int len) +driver_deliver_term(Eterm to, ErlDrvTermData* data, int len) { #define ERTS_DDT_FAIL do { res = -1; goto done; } while (0) Uint need = 0; @@ -3199,11 +5410,8 @@ driver_deliver_term(ErlDrvPort port, b2t.ix = 0; /* - * The term is OK. Go ahead and validate the port and process. + * The term is OK. Go ahead and validate the process. */ - res = deliver_term_check_port(port); - if (res <= 0) - goto done; /* * Increase refc on proc if done from a non-scheduler thread. @@ -3473,25 +5681,115 @@ driver_deliver_term(ErlDrvPort port, #undef ERTS_DDT_FAIL } +static ERTS_INLINE int +deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p) +{ +#ifdef ERTS_SMP + ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay(); +#endif + Port *prt = erts_port_lookup_raw((Eterm) port_id); + erts_aint32_t state = erts_atomic32_read_nob(&prt->state); + if (connected_p) { +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) + ETHR_MEMBAR(ETHR_LoadLoad); +#endif + *connected_p = ERTS_PORT_GET_CONNECTED(prt); + } +#ifdef ERTS_SMP + if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) { + erts_thr_progress_unmanaged_continue(dhndl); + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); + } +#endif + ERTS_SMP_LC_ASSERT(dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED + ? erts_lc_is_port_locked(prt) + : !erts_lc_is_port_locked(prt)); + return ((state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) + ? -1 + : ((state & ERTS_PORT_SFLG_CLOSING) ? 0 : 1)); +} + +int erl_drv_output_term(ErlDrvTermData port_id, ErlDrvTermData* data, int len) +{ + /* May be called from arbitrary thread */ + Eterm connected; + int res = deliver_term_check_port(port_id, &connected); + if (res <= 0) + return res; + return driver_deliver_term(connected, data, len); +} +/* + * driver_output_term() is deprecated, and has been scheduled for + * removal in OTP-R17. It is replaced by erl_drv_output_term() + * above. + */ int -driver_output_term(ErlDrvPort ix, ErlDrvTermData* data, int len) +driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len) { - Port* prt = erts_drvport2port(ix); + erts_aint32_t state; + Port* prt; ERTS_SMP_CHK_NO_PROC_LOCKS; + /* NOTE! It *not* safe to access 'drvport' from unmanaged threads. */ + prt = erts_drvport2port(drvport, &state); + if (!prt) + return -1; /* invalid (dead) */ + ERTS_SMP_CHK_NO_PROC_LOCKS; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - - if (prt == NULL) + if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) return -1; - return driver_deliver_term(ix, prt->connected, data, len); + else if (state & ERTS_PORT_SFLG_CLOSING) + return 0; + + return driver_deliver_term(ERTS_PORT_GET_CONNECTED(prt), data, len); } +int erl_drv_send_term(ErlDrvTermData port_id, + ErlDrvTermData to, + ErlDrvTermData* data, + int len) +{ + /* May be called from arbitrary thread */ + int res = deliver_term_check_port(port_id, NULL); + if (res <= 0) + return res; + return driver_deliver_term(to, data, len); +} +/* + * driver_send_term() is deprecated, and has been scheduled for + * removal in OTP-R17. It is replaced by erl_drv_send_term() above. + */ int -driver_send_term(ErlDrvPort ix, ErlDrvTermData to, ErlDrvTermData* data, int len) +driver_send_term(ErlDrvPort drvport, + ErlDrvTermData to, + ErlDrvTermData* data, + int len) { - return driver_deliver_term(ix, to, data, len); + /* + * NOTE! It is *not* safe to access the 'drvport' parameter + * from unmanaged threads. Also note that it is impossible + * to make this access safe without using a less efficient + * internal data representation for ErlDrvPort. + */ + ERTS_SMP_CHK_NO_PROC_LOCKS; +#ifdef ERTS_SMP + if (erts_thr_progress_is_managed_thread()) +#endif + { + erts_aint32_t state; + Port* prt = erts_drvport2port(drvport, &state); + if (!prt) + return -1; /* invalid (dead) */ + ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) + return -1; + else if (state & ERTS_PORT_SFLG_CLOSING) + return 0; + } + return driver_deliver_term(to, data, len); } @@ -3503,26 +5801,27 @@ driver_send_term(ErlDrvPort ix, ErlDrvTermData to, ErlDrvTermData* data, int len int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, ErlDrvBinary* bin, ErlDrvSizeT offs, ErlDrvSizeT len) { - Port* prt = erts_drvport2port(ix); + erts_aint32_t state; + Port* prt = erts_drvport2port(ix, &state); ERTS_SMP_CHK_NO_PROC_LOCKS; if (prt == NULL) return -1; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - if (prt->status & ERTS_PORT_SFLG_CLOSING) + if (state & ERTS_PORT_SFLG_CLOSING) return 0; prt->bytes_in += (hlen + len); erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len)); - if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) { + if (state & ERTS_PORT_SFLG_DISTRIBUTION) { return erts_net_message(prt, prt->dist_entry, (byte*) hbuf, hlen, (byte*) (bin->orig_bytes+offs), len); } else - deliver_bin_message(prt, prt->connected, + deliver_bin_message(prt, ERTS_PORT_GET_CONNECTED(prt), hbuf, hlen, bin, offs, len); return 0; } @@ -3537,7 +5836,8 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, char* buf, ErlDrvSizeT len) { - Port* prt = erts_drvport2port(ix); + erts_aint32_t state; + Port* prt = erts_drvport2port(ix, &state); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -3546,12 +5846,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - if (prt->status & ERTS_PORT_SFLG_CLOSING) + if (state & ERTS_PORT_SFLG_CLOSING) return 0; prt->bytes_in += (hlen + len); erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len)); - if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) { + if (state & ERTS_PORT_SFLG_DISTRIBUTION) { if (len == 0) return erts_net_message(prt, prt->dist_entry, @@ -3563,10 +5863,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, (byte*) hbuf, hlen, (byte*) buf, len); } - else if(prt->status & ERTS_PORT_SFLG_LINEBUF_IO) - deliver_linebuf_message(prt, prt->connected, hbuf, hlen, buf, len); + else if (state & ERTS_PORT_SFLG_LINEBUF_IO) + deliver_linebuf_message(prt, state, ERTS_PORT_GET_CONNECTED(prt), + hbuf, hlen, buf, len); else - deliver_read_message(prt, prt->connected, hbuf, hlen, buf, len, 0); + deliver_read_message(prt, state, ERTS_PORT_GET_CONNECTED(prt), + hbuf, hlen, buf, len, 0); return 0; } @@ -3587,6 +5889,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, SysIOVec* iov; ErlDrvBinary** binv; Port* prt; + erts_aint32_t state; ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -3599,13 +5902,13 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, if (hlen < 0) hlen = 0; - prt = erts_drvport2port(ix); + prt = erts_drvport2port(ix, &state); if (prt == NULL) return -1; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - if (prt->status & ERTS_PORT_SFLG_CLOSING) + if (state & ERTS_PORT_SFLG_CLOSING) return 0; /* size > 0 ! */ @@ -3630,7 +5933,8 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, /* XXX handle distribution !!! */ prt->bytes_in += (hlen + size); erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + size)); - deliver_vec_message(prt, prt->connected, hbuf, hlen, binv, iov, n, size); + deliver_vec_message(prt, ERTS_PORT_GET_CONNECTED(prt), hbuf, hlen, + binv, iov, n, size); return 0; } @@ -3741,8 +6045,7 @@ ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size) } -void driver_free_binary(dbin) -ErlDrvBinary* dbin; +void driver_free_binary(ErlDrvBinary* dbin) { Binary *bin; if (!dbin) { @@ -3838,6 +6141,7 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl) { ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) == 0); erts_mtx_destroy(&pdl->mtx); + erts_port_dec_refc(pdl->prt); erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl); } @@ -3875,16 +6179,18 @@ ErlDrvPDL driver_pdl_create(ErlDrvPort dp) { ErlDrvPDL pdl; - Port *pp = erts_drvport2port(dp); + Port *pp = erts_drvport2port(dp, NULL); if (!pp || pp->port_data_lock) return NULL; pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK, sizeof(struct erl_drv_port_data_lock)); erts_mtx_init(&pdl->mtx, "port_data_lock"); pdl_init_refc(pdl); + erts_port_inc_refc(pp); + pdl->prt = pp; pp->port_data_lock = pdl; #ifdef HARDDEBUG - erts_fprintf(stderr, "driver_pdl_create(%T) -> 0x%08X\r\n",pp->id,(unsigned) pdl); + erts_fprintf(stderr, "driver_pdl_create(%T) -> 0x%08X\r\n",pp->common.id,(unsigned) pdl); #endif return pdl; } @@ -4322,33 +6628,33 @@ static ERTS_INLINE void drv_cancel_timer(Port *prt) { #ifdef ERTS_SMP - erts_cancel_smp_ptimer(prt->ptimer); + erts_cancel_smp_ptimer(prt->common.u.alive.ptimer); #else - erts_cancel_timer(&prt->tm); + erts_cancel_timer(&prt->common.u.alive.tm); #endif if (erts_port_task_is_scheduled(&prt->timeout_task)) - erts_port_task_abort(prt->id, &prt->timeout_task); + erts_port_task_abort(&prt->timeout_task); } int driver_set_timer(ErlDrvPort ix, unsigned long t) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); ERTS_SMP_CHK_NO_PROC_LOCKS; if (prt == NULL) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + if (prt->drv_ptr->timeout == NULL) return -1; drv_cancel_timer(prt); #ifdef ERTS_SMP - erts_create_smp_ptimer(&prt->ptimer, - prt->id, + erts_create_smp_ptimer(&prt->common.u.alive.ptimer, + prt->common.id, (ErlTimeoutProc) schedule_port_timeout, t); #else - erts_set_timer(&prt->tm, + erts_set_timer(&prt->common.u.alive.tm, (ErlTimeoutProc) schedule_port_timeout, NULL, prt, @@ -4359,7 +6665,7 @@ int driver_set_timer(ErlDrvPort ix, unsigned long t) int driver_cancel_timer(ErlDrvPort ix) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); if (prt == NULL) return -1; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); @@ -4371,7 +6677,7 @@ int driver_cancel_timer(ErlDrvPort ix) int driver_read_timer(ErlDrvPort ix, unsigned long* t) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -4379,9 +6685,11 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t) return -1; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); #ifdef ERTS_SMP - *t = prt->ptimer ? erts_time_left(&prt->ptimer->timer.tm) : 0; + *t = (prt->common.u.alive.ptimer + ? erts_time_left(&prt->common.u.alive.ptimer->timer.tm) + : 0); #else - *t = erts_time_left(&prt->tm); + *t = erts_time_left(&prt->common.u.alive.tm); #endif return 0; } @@ -4432,8 +6740,8 @@ static int do_driver_monitor_process(Port *prt, } ref = erts_make_ref_in_buffer(buf); - erts_add_monitor(&(prt->monitors), MON_ORIGIN, ref, rp->id, NIL); - erts_add_monitor(&(rp->monitors), MON_TARGET, ref, prt->id, NIL); + erts_add_monitor(&ERTS_P_MONITORS(prt), MON_ORIGIN, ref, rp->common.id, NIL); + erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); ref_to_driver_monitor(ref,monitor); @@ -4443,31 +6751,22 @@ static int do_driver_monitor_process(Port *prt, /* * This can be called from a non scheduler thread iff a port_data_lock exists */ -int driver_monitor_process(ErlDrvPort port, +int driver_monitor_process(ErlDrvPort drvport, ErlDrvTermData process, ErlDrvMonitor *monitor) { Port *prt; int ret; - Uint32 status; + erts_aint32_t state; +#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)) ErtsSchedulerData *sched = erts_get_scheduler_data(); - int ix = (int) port; - if (ix < 0 || erts_max_ports <= ix) { - return -1; - } - prt = &erts_port[ix]; +#endif - DRV_MONITOR_LOCK_PDL(prt); + prt = DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(drvport); - if (sched) { - status = erts_port[ix].status; - } else { - erts_smp_port_state_lock(prt); - status = erts_port[ix].status; - erts_smp_port_state_unlock(prt); - } + state = erts_atomic32_read_nob(&prt->state); - if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) { + if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) { DRV_MONITOR_UNLOCK_PDL(prt); return -1; } @@ -4505,7 +6804,7 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf, memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE); ref = make_internal_ref(buf); - mon = erts_lookup_monitor(prt->monitors, ref); + mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref); if (mon == NULL) { return 1; } @@ -4517,13 +6816,13 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf, to, ERTS_PROC_LOCK_LINK, ERTS_P2P_FLG_ALLOW_OTHER_X); - mon = erts_remove_monitor(&(prt->monitors), ref); + mon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref); if (mon) { erts_destroy_monitor(mon); } if (rp) { ErtsMonitor *rmon; - rmon = erts_remove_monitor(&(rp->monitors), ref); + rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon != NULL) { erts_destroy_monitor(rmon); @@ -4532,30 +6831,21 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf, return 0; } -int driver_demonitor_process(ErlDrvPort port, +int driver_demonitor_process(ErlDrvPort drvport, const ErlDrvMonitor *monitor) { Port *prt; int ret; - Uint32 status; + erts_aint32_t state; +#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)) ErtsSchedulerData *sched = erts_get_scheduler_data(); - int ix = (int) port; - if (ix < 0 || erts_max_ports <= ix) { - return -1; - } - prt = &erts_port[ix]; +#endif - DRV_MONITOR_LOCK_PDL(prt); + prt = DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(drvport); - if (sched) { - status = erts_port[ix].status; - } else { - erts_smp_port_state_lock(prt); - status = erts_port[ix].status; - erts_smp_port_state_unlock(prt); - } + state = erts_atomic32_read_nob(&prt->state); - if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) { + if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) { DRV_MONITOR_UNLOCK_PDL(prt); return -1; } @@ -4591,7 +6881,7 @@ static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf, memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE); ref = make_internal_ref(buf); - mon = erts_lookup_monitor(prt->monitors, ref); + mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref); if (mon == NULL) { return driver_term_nil; } @@ -4602,30 +6892,20 @@ static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf, } -ErlDrvTermData driver_get_monitored_process(ErlDrvPort port, +ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport, const ErlDrvMonitor *monitor) { Port *prt; ErlDrvTermData ret; - Uint32 status; + erts_aint32_t state; +#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)) ErtsSchedulerData *sched = erts_get_scheduler_data(); - int ix = (int) port; - if (ix < 0 || erts_max_ports <= ix) { - return driver_term_nil; - } - prt = &erts_port[ix]; - - DRV_MONITOR_LOCK_PDL(prt); +#endif - if (sched) { - status = erts_port[ix].status; - } else { - erts_smp_port_state_lock(prt); - status = erts_port[ix].status; - erts_smp_port_state_unlock(prt); - } + prt = DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(drvport); - if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) { + state = erts_atomic32_read_nob(&prt->state); + if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) { DRV_MONITOR_UNLOCK_PDL(prt); return driver_term_nil; } @@ -4670,7 +6950,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref) ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(prt->drv_ptr != NULL); DRV_MONITOR_LOCK_PDL(prt); - if (erts_lookup_monitor(prt->monitors,ref) == NULL) { + if (erts_lookup_monitor(ERTS_P_MONITORS(prt), ref) == NULL) { DRV_MONITOR_UNLOCK_PDL(prt); return; } @@ -4680,7 +6960,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref) DRV_MONITOR_UNLOCK_PDL(prt); #ifdef USE_VM_PROBES if (DTRACE_ENABLED(driver_process_exit)) { - DTRACE_FORMAT_COMMON_PID_AND_PORT(prt->connected, prt) + DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(prt), prt) DTRACE3(driver_process_exit, process_str, port_str, prt->name); } #endif @@ -4689,7 +6969,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref) erts_unblock_fpe(fpe_was_unmasked); DRV_MONITOR_LOCK_PDL(prt); /* remove monitor *after* callback */ - rmon = erts_remove_monitor(&(prt->monitors),ref); + rmon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref); DRV_MONITOR_UNLOCK_PDL(prt); if (rmon) { erts_destroy_monitor(rmon); @@ -4700,7 +6980,8 @@ void erts_fire_port_monitor(Port *prt, Eterm ref) static int driver_failure_term(ErlDrvPort ix, Eterm term, int eof) { - Port* prt = erts_drvport2port(ix); + erts_aint32_t state; + Port* prt = erts_drvport2port(ix, &state); ERTS_SMP_CHK_NO_PROC_LOCKS; @@ -4708,19 +6989,19 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof) return -1; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); if (eof) - flush_linebuf_messages(prt); - if (prt->status & ERTS_PORT_SFLG_CLOSING) { + flush_linebuf_messages(prt, state); + if (state & ERTS_PORT_SFLG_CLOSING) { terminate_port(prt); - } else if (eof && (prt->status & ERTS_PORT_SFLG_SOFT_EOF)) { - deliver_result(prt->id, prt->connected, am_eof); + } else if (eof && (state & ERTS_PORT_SFLG_SOFT_EOF)) { + deliver_result(prt->common.id, ERTS_PORT_GET_CONNECTED(prt), am_eof); } else { - /* XXX UGLY WORK AROUND, Let do_exit_port terminate the port */ + /* XXX UGLY WORK AROUND, Let erts_deliver_port_exit() terminate the port */ if (prt->port_data_lock) driver_pdl_lock(prt->port_data_lock); prt->ioq.size = 0; if (prt->port_data_lock) driver_pdl_unlock(prt->port_data_lock); - erts_do_exit_port(prt, prt->id, eof ? am_normal : term); + erts_deliver_port_exit(prt, prt->common.id, eof ? am_normal : term, 0); } return 0; } @@ -4733,23 +7014,23 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof) */ int driver_exit(ErlDrvPort ix, int err) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); Process* rp; ErtsLink *lnk, *rlnk = NULL; + Eterm connected; ERTS_SMP_CHK_NO_PROC_LOCKS; if (prt == NULL) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - - rp = erts_pid2proc(NULL, 0, prt->connected, ERTS_PROC_LOCK_LINK); + connected = ERTS_PORT_GET_CONNECTED(prt); + rp = erts_pid2proc(NULL, 0, connected, ERTS_PROC_LOCK_LINK); if (rp) { - rlnk = erts_remove_link(&(rp->nlinks),prt->id); + rlnk = erts_remove_link(&ERTS_P_LINKS(rp),prt->common.id); } - lnk = erts_remove_link(&(prt->nlinks),prt->connected); + lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected); #ifdef ERTS_SMP if (rp) @@ -4814,24 +7095,24 @@ ErlDrvTermData driver_mk_atom(char* string) ErlDrvTermData driver_mk_port(ErlDrvPort ix) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - return (ErlDrvTermData) prt->id; + return (ErlDrvTermData) prt->common.id; } ErlDrvTermData driver_connected(ErlDrvPort ix) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); ERTS_SMP_CHK_NO_PROC_LOCKS; if (prt == NULL) return NIL; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - return prt->connected; + return ERTS_PORT_GET_CONNECTED(prt); } ErlDrvTermData driver_caller(ErlDrvPort ix) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); ERTS_SMP_CHK_NO_PROC_LOCKS; if (prt == NULL) return NIL; @@ -4841,25 +7122,25 @@ ErlDrvTermData driver_caller(ErlDrvPort ix) int driver_lock_driver(ErlDrvPort ix) { - Port* prt = erts_drvport2port(ix); + Port* prt = erts_drvport2port(ix, NULL); DE_Handle* dh; ERTS_SMP_CHK_NO_PROC_LOCKS; - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rwlock(&erts_driver_list_lock); if (prt == NULL) { - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); return -1; } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) { - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); return -1; } erts_ddll_lock_driver(dh, prt->drv_ptr->name); - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); return 0; } @@ -4869,7 +7150,7 @@ static int maybe_lock_driver_list(void) void *rec_lock; rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); if (rec_lock == 0) { - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rwlock(&erts_driver_list_lock); return 1; } return 0; @@ -4877,7 +7158,7 @@ static int maybe_lock_driver_list(void) static void maybe_unlock_driver_list(int doit) { if (doit) { - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); } } /* @@ -5046,7 +7327,7 @@ no_event_callback(ErlDrvData drv_data, ErlDrvEvent event, ErlDrvEventData event_ { Port *prt = get_current_port(); report_missing_drv_callback(prt, "Event", "event()"); - driver_event((ErlDrvPort) internal_port_index(prt->id), event, NULL); + driver_event((ErlDrvPort) prt, event, NULL); } static void @@ -5054,7 +7335,7 @@ no_ready_input_callback(ErlDrvData drv_data, ErlDrvEvent event) { Port *prt = get_current_port(); report_missing_drv_callback(prt, "Input", "ready_input()"); - driver_select((ErlDrvPort) internal_port_index(prt->id), event, + driver_select((ErlDrvPort) prt, event, (ERL_DRV_READ | ERL_DRV_USE_NO_CALLBACK), 0); } @@ -5063,7 +7344,7 @@ no_ready_output_callback(ErlDrvData drv_data, ErlDrvEvent event) { Port *prt = get_current_port(); report_missing_drv_callback(prt, "Output", "ready_output()"); - driver_select((ErlDrvPort) internal_port_index(prt->id), event, + driver_select((ErlDrvPort) prt, event, (ERL_DRV_WRITE | ERL_DRV_USE_NO_CALLBACK), 0); } @@ -5098,16 +7379,16 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) drv->lock = NULL; else { drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK, - sizeof(erts_smp_mtx_t)); - erts_smp_mtx_init_x(drv->lock, - "driver_lock", + sizeof(erts_mtx_t)); + erts_mtx_init_x(drv->lock, + "driver_lock", #if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT) erts_atom_put((byte *) drv->name, sys_strlen(drv->name), ERTS_ATOM_ENC_LATIN1, 1) #else - NIL + NIL #endif ); } @@ -5179,7 +7460,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo int res; if (!driver_list_locked) { - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rwlock(&erts_driver_list_lock); } dp->next = driver_list; @@ -5208,7 +7489,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo if (!driver_list_locked) { erts_smp_tsd_set(driver_list_lock_status_key, NULL); - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); } return res; } @@ -5221,7 +7502,7 @@ int remove_driver_entry(ErlDrvEntry *drv) rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); if (rec_lock == NULL) { - erts_smp_mtx_lock(&erts_driver_list_lock); + erts_smp_rwmtx_rwlock(&erts_driver_list_lock); } dp = driver_list; while (dp && dp->entry != drv) @@ -5229,7 +7510,7 @@ int remove_driver_entry(ErlDrvEntry *drv) if (dp) { if (dp->handle) { if (rec_lock == NULL) { - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); } return -1; } @@ -5243,12 +7524,12 @@ int remove_driver_entry(ErlDrvEntry *drv) } erts_destroy_driver(dp); if (rec_lock == NULL) { - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); } return 1; } if (rec_lock == NULL) { - erts_smp_mtx_unlock(&erts_driver_list_lock); + erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); } return 0; } @@ -5278,18 +7559,22 @@ erl_drv_getenv(char *key, char *value, size_t *value_size) * - uses the fact that heart_port is registered when starting heart */ -Port *erts_get_heart_port() { +Port *erts_get_heart_port(void) +{ + int ix, max = erts_ptab_max(&erts_port); - Port* port; - Uint ix; + for (ix = 0; ix < max; ix++) { + struct reg_proc *reg; + Port *port = erts_pix2port(ix); - for(ix = 0; ix < erts_max_ports; ix++) { - port = &erts_port[ix]; + if (!port) + continue; /* only examine undead or alive ports */ - if (port->status & ERTS_PORT_SFLGS_DEAD) + if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_DEAD) continue; /* immediate atom compare */ - if (port->reg && port->reg->name == am_heart_port) { + reg = port->common.u.alive.reg; + if (reg && reg->name == am_heart_port) { return port; } } diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c index c02872ef80..757e2800e6 100644 --- a/erts/emulator/beam/register.c +++ b/erts/emulator/beam/register.c @@ -175,14 +175,14 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) if (is_not_atom(name) || name == am_undefined) return res; - if (c_p->id == id) /* A very common case I think... */ + if (c_p->common.id == id) /* A very common case I think... */ proc = c_p; else { if (is_not_internal_pid(id) && is_not_internal_port(id)) return res; erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (is_internal_port(id)) { - port = erts_id2port(id, NULL, 0); + port = erts_id2port(id); if (!port) goto done; } @@ -204,7 +204,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) r.p = proc; if (!proc) goto done; - if (proc->reg) + if (proc->common.u.alive.reg) goto done; r.pt = NULL; } @@ -212,7 +212,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) ASSERT(!INVALID_PORT(port, id)); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); r.pt = port; - if (r.pt->reg) + if (r.pt->common.u.alive.reg) goto done; r.p = NULL; } @@ -224,23 +224,24 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) if (IS_TRACED_FL(proc, F_TRACE_PROCS)) { trace_proc(c_p, proc, am_register, name); } - proc->reg = rp; + proc->common.u.alive.reg = rp; } else if (port && rp->pt == port) { if (IS_TRACED_FL(port, F_TRACE_PORTS)) { trace_port(port, am_register, name); } - port->reg = rp; + port->common.u.alive.reg = rp; } - if ((rp->p && rp->p->id == id) || (rp->pt && rp->pt->id == id)) { + if ((rp->p && rp->p->common.id == id) + || (rp->pt && rp->pt->common.id == id)) { res = 1; } done: reg_write_unlock(); if (port) - erts_smp_port_unlock(port); + erts_port_release(port); if (c_p != proc) { if (proc) erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); @@ -291,9 +292,9 @@ erts_whereis_name_to_id(Process *c_p, Eterm name) * is read only. */ if (rp->p) - res = rp->p->id; + res = rp->p->common.id; else if (rp->pt) - res = rp->pt->id; + res = rp->pt->common.id; break; } b = b->next; @@ -396,28 +397,26 @@ erts_whereis_name(Process *c_p, if (!rp || !rp->pt) *port = NULL; else { -#ifndef ERTS_SMP - erts_smp_atomic_inc_nob(&rp->pt->refc); -#else +#ifdef ERTS_SMP if (pending_port == rp->pt) pending_port = NULL; else { if (pending_port) { /* Ahh! Registered port changed while reg lock was unlocked... */ - erts_smp_port_unlock(pending_port); + erts_port_release(pending_port); pending_port = NULL; } if (erts_smp_port_trylock(rp->pt) == EBUSY) { - Eterm id = rp->pt->id; /* id read only... */ + Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { erts_smp_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_read_unlock(); - pending_port = erts_id2port(id, NULL, 0); + pending_port = erts_id2port(id); goto restart; } } @@ -431,7 +430,7 @@ erts_whereis_name(Process *c_p, if (c_p && !current_c_p_locks) erts_smp_proc_lock(c_p, c_p_locks); if (pending_port) - erts_smp_port_unlock(pending_port); + erts_port_release(pending_port); #endif reg_read_unlock(); @@ -493,8 +492,8 @@ int erts_unregister_name(Process *c_p, current_c_p_locks = c_p_locks; } #endif - if (c_p->reg) { - r.name = c_p->reg->name; + if (c_p->common.u.alive.reg) { + r.name = c_p->common.u.alive.reg->name; } else { /* Name got unregistered while main lock was released */ res = 0; @@ -505,24 +504,22 @@ int erts_unregister_name(Process *c_p, if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) { if (rp->pt) { if (port != rp->pt) { -#ifndef ERTS_SMP - erts_smp_atomic_inc_nob(&rp->pt->refc); -#else +#ifdef ERTS_SMP if (port) { ASSERT(port != c_prt); - erts_smp_port_unlock(port); + erts_port_release(port); port = NULL; } if (erts_smp_port_trylock(rp->pt) == EBUSY) { - Eterm id = rp->pt->id; /* id read only... */ + Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { erts_smp_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_write_unlock(); - port = erts_id2port(id, NULL, 0); + port = erts_id2port(id); goto restart; } #endif @@ -532,7 +529,7 @@ int erts_unregister_name(Process *c_p, ASSERT(rp->pt == port); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); - rp->pt->reg = NULL; + rp->pt->common.u.alive.reg = NULL; if (IS_TRACED_FL(port, F_TRACE_PORTS)) { trace_port(port, am_unregister, r.name); @@ -549,7 +546,7 @@ int erts_unregister_name(Process *c_p, ERTS_PROC_LOCK_MAIN); current_c_p_locks = c_p_locks; #endif - rp->p->reg = NULL; + rp->p->common.u.alive.reg = NULL; if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) { trace_proc(c_p, rp->p, am_unregister, r.name); } @@ -568,7 +565,7 @@ int erts_unregister_name(Process *c_p, reg_write_unlock(); if (c_prt != port) { if (port) { - erts_smp_port_unlock(port); + erts_port_release(port); } if (c_prt) { erts_smp_port_lock(c_prt); diff --git a/erts/emulator/beam/register.h b/erts/emulator/beam/register.h index 38e8cfbf28..7170463375 100644 --- a/erts/emulator/beam/register.h +++ b/erts/emulator/beam/register.h @@ -24,26 +24,19 @@ #ifndef __REGPROC_H__ #define __REGPROC_H__ -#ifndef __SYS_H__ #include "sys.h" -#endif - -#ifndef __HASH_H__ #include "hash.h" -#endif - -#ifndef __PROCESS_H__ #include "erl_process.h" -#endif - -struct port; +#define ERL_PORT_GET_PORT_TYPE_ONLY__ +#include "erl_port.h" +#undef ERL_PORT_GET_PORT_TYPE_ONLY__ typedef struct reg_proc { HashBucket bucket; /* MUST BE LOCATED AT TOP OF STRUCT!!! */ Process *p; /* The process registered (only one of this and 'pt' is non-NULL */ - struct port *pt; /* The port registered */ + Port *pt; /* The port registered */ Eterm name; /* Atom name */ } RegProc; @@ -55,12 +48,12 @@ int erts_register_name(Process *, Eterm, Eterm); Eterm erts_whereis_name_to_id(Process *, Eterm); void erts_whereis_name(Process *, ErtsProcLocks, Eterm, Process**, ErtsProcLocks, int, - struct port**); + Port**); Process *erts_whereis_process(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int); -int erts_unregister_name(Process *, ErtsProcLocks, struct port *, Eterm); +int erts_unregister_name(Process *, ErtsProcLocks, Port *, Eterm); #endif diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index f5f10bb616..cecaff54a4 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -116,6 +116,16 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType; # define ERTS_DECLARE_DUMMY(X) X #endif +#if !defined(__func__) +# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L +# if !defined(__GNUC__) || __GNUC__ < 2 +# define __func__ "[unknown_function]" +# else +# define __func__ __FUNCTION__ +# endif +# endif +#endif + #if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK) # undef ERTS_CAN_INLINE # define ERTS_CAN_INLINE 0 @@ -231,9 +241,11 @@ void erl_assert_error(char* expr, char* file, int line); #if SIZEOF_VOID_P == 8 #undef ARCH_32 #define ARCH_64 +#define ERTS_SIZEOF_TERM 8 #elif SIZEOF_VOID_P == 4 #define ARCH_32 #undef ARCH_64 +#define ERTS_SIZEOF_TERM 4 #else #error Neither 32 nor 64 bit architecture #endif @@ -241,6 +253,8 @@ void erl_assert_error(char* expr, char* file, int line); # define HALFWORD_HEAP 1 # define HALFWORD_ASSERT 0 # define ASSERT_HALFWORD(COND) ASSERT(COND) +# undef ERTS_SIZEOF_TERM +# define ERTS_SIZEOF_TERM 4 #else # define HALFWORD_HEAP 0 # define HALFWORD_ASSERT 0 @@ -367,6 +381,27 @@ typedef unsigned char byte; #error 64-bit architecture, but no appropriate type to use for Uint64 and Sint64 found #endif +#ifdef WORDS_BIGENDIAN +# define ERTS_HUINT_HVAL_HIGH 0 +# define ERTS_HUINT_HVAL_LOW 1 +#else +# define ERTS_HUINT_HVAL_HIGH 1 +# define ERTS_HUINT_HVAL_LOW 0 +#endif +#if ERTS_SIZEOF_TERM == 8 +typedef union { + Uint val; + Uint32 hval[2]; +} HUint; +#elif ERTS_SIZEOF_TERM == 4 +typedef union { + Uint val; + Uint16 hval[2]; +} HUint; +#else +#error "Unsupported size of term" +#endif + # define ERTS_EXTRA_DATA_ALIGN_SZ(X) \ (((size_t) 8) - (((size_t) (X)) & ((size_t) 7))) @@ -505,6 +540,10 @@ __decl_noreturn void __noreturn erl_exit(int n, char*, ...); #define ERTS_ABORT_EXIT (INT_MIN + 1) /* no crash dump; only abort() */ #define ERTS_DUMP_EXIT (INT_MIN + 2) /* crash dump; then exit() */ +#define ERTS_INTERNAL_ERROR(What) \ + erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error: %s\n", \ + __FILE__, __LINE__, __func__, What) + Eterm erts_check_io_info(void *p); /* Size of misc memory allocated from system dependent code */ @@ -579,6 +618,7 @@ typedef struct _SysDriverOpts { char *wd; /* Working directory. */ unsigned spawn_type; /* Bitfield of ERTS_SPAWN_DRIVER | ERTS_SPAWN_EXTERNAL | both*/ + int parallelism; /* Optimize for parallelism */ } SysDriverOpts; extern char *erts_default_arg0; @@ -689,9 +729,12 @@ char * getenv_string(GETENV_STATE *); void fini_getenv_state(GETENV_STATE *); /* xxxP */ +#define SYS_DEFAULT_FLOAT_DECIMALS 20 void init_sys_float(void); int sys_chars_to_double(char*, double*); int sys_double_to_chars(double, char*, size_t); +int sys_double_to_chars_ext(double, char*, size_t, size_t); +int sys_double_to_chars_fast(double, char*, int, int, int); void sys_get_pid(char *, size_t); /* erts_sys_putenv() returns, 0 on success and a value != 0 on failure. */ diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 97b6d01207..2a6a8efd4d 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -46,6 +46,7 @@ #include "erl_thr_queue.h" #include "erl_sched_spec_pre_alloc.h" #include "beam_bp.h" +#include "erl_ptab.h" #undef M_TRIM_THRESHOLD #undef M_TOP_PAD @@ -3016,12 +3017,13 @@ buf_to_intlist(Eterm** hpp, char *buf, size_t len, Eterm tail) ** ; ** ** Return remaining bytes in buffer on success -** -1 on overflow -** -2 on type error (including that result would not be a whole number of bytes) +** ERTS_IOLIST_TO_BUF_OVERFLOW on overflow +** ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes) */ -int io_list_to_buf(Eterm obj, char* buf, int len) +ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len) { + ErlDrvSizeT len = (ErlDrvSizeT) alloced_len; Eterm* objp; DECLARE_ESTACK(s); goto L_again; @@ -3114,20 +3116,20 @@ int io_list_to_buf(Eterm obj, char* buf, int len) L_type_error: DESTROY_ESTACK(s); - return -2; + return ERTS_IOLIST_TO_BUF_TYPE_ERROR; L_overflow: DESTROY_ESTACK(s); - return -1; + return ERTS_IOLIST_TO_BUF_OVERFLOW; } /* * Return 0 if successful, and non-zero if unsuccessful. */ -int erts_iolist_size(Eterm obj, Uint* sizep) +int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep) { Eterm* objp; - Uint size = 0; + Uint size = 0; /* Intentionally Uint due to halfword heap */ DECLARE_ESTACK(s); goto L_again; @@ -3179,7 +3181,7 @@ int erts_iolist_size(Eterm obj, Uint* sizep) #undef SAFE_ADD DESTROY_ESTACK(s); - *sizep = size; + *sizep = (ErlDrvSizeT) size; return ERTS_IOLIST_OK; L_overflow_error: diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c index 912f5d3d8b..186af03eff 100644 --- a/erts/emulator/drivers/common/efile_drv.c +++ b/erts/emulator/drivers/common/efile_drv.c @@ -56,7 +56,8 @@ #define FILE_FDATASYNC 30 #define FILE_FADVISE 31 #define FILE_SENDFILE 32 - +#define FILE_FALLOCATE 33 +#define FILE_CLOSE_ON_PORT_EXIT 34 /* Return codes */ #define FILE_RESP_OK 0 @@ -177,6 +178,7 @@ dt_private *get_dt_private(int); #define MUTEX_LOCK(m) do { IF_THRDS { TRACE_DRIVER; driver_pdl_lock(m); } } while (0) #define MUTEX_UNLOCK(m) do { IF_THRDS { TRACE_DRIVER; driver_pdl_unlock(m); } } while (0) #else +#define IF_THRDS if (0) #define MUTEX_INIT(m, p) #define MUTEX_LOCK(m) #define MUTEX_UNLOCK(m) @@ -428,6 +430,7 @@ struct t_data int level; void (*invoke)(void *); void (*free)(void *); + void *data_to_free; /* used by FILE_CLOSE_ON_PORT_EXIT only */ int again; int reply; #ifdef USE_VM_PROBES @@ -439,6 +442,7 @@ struct t_data Efile_error errInfo; int flags; SWord fd; + int is_fd_unused; /**/ Efile_info info; EFILE_DIR_HANDLE dir_handle; /* Handle to open directory. */ @@ -503,6 +507,10 @@ struct t_data Uint64 written; } sendfile; #endif /* HAVE_SENDFILE */ + struct { + Sint64 offset; + Sint64 length; + } fallocate; } c; char b[1]; }; @@ -781,11 +789,6 @@ file_start(ErlDrvPort port, char* command) return (ErlDrvData) desc; } -static void free_data(void *data) -{ - EF_FREE(data); -} - static void do_close(int flags, SWord fd) { if (flags & EFILE_COMPRESSED) { erts_gzclose((gzFile)(fd)); @@ -803,25 +806,27 @@ static void invoke_close(void *data) DTRACE_INVOKE_RETURN(FILE_CLOSE); } -/********************************************************************* - * Driver entry point -> stop - */ -static void -file_stop(ErlDrvData e) +static void free_data(void *data) { - file_descriptor* desc = (file_descriptor*)e; - - TRACE_C('p'); + struct t_data *d = (struct t_data *) data; - if (desc->fd != FILE_FD_INVALID) { - do_close(desc->flags, desc->fd); - desc->fd = FILE_FD_INVALID; - desc->flags = 0; - } - if (desc->read_binp) { - driver_free_binary(desc->read_binp); + switch (d->command) { + case FILE_OPEN: + if (d->is_fd_unused && d->fd != FILE_FD_INVALID) { + /* This is OK to do in scheduler thread because there can be no async op + ongoing for this fd here, as we exited during async open. + Ideally, this close should happen in an async thread too, but that would + require a substantial rewrite, as we are here because of a dead port and + cannot schedule async jobs for that port any more... */ + do_close(d->flags, d->fd); + } + break; + case FILE_CLOSE_ON_PORT_EXIT: + EF_FREE(d->data_to_free); + break; } - EF_FREE(desc); + + EF_FREE(data); } @@ -1862,6 +1867,9 @@ static void invoke_open(void *data) } d->result_ok = status; + if (!status) { + d->fd = FILE_FD_INVALID; + } DTRACE_INVOKE_RETURN(FILE_OPEN); } @@ -1953,6 +1961,17 @@ static int flush_sendfile(file_descriptor *desc,void *_) { #endif /* HAVE_SENDFILE */ +static void invoke_fallocate(void *data) +{ + struct t_data *d = (struct t_data *) data; + int fd = (int) d->fd; + Sint64 offset = d->c.fallocate.offset; + Sint64 length = d->c.fallocate.length; + + d->again = 0; + d->result_ok = efile_fallocate(&d->errInfo, fd, offset, length); +} + static void free_readdir(void *data) { struct t_data *d = (struct t_data *) data; @@ -2216,6 +2235,47 @@ static int lseek_flush_read(file_descriptor *desc, int *errp } +/********************************************************************* + * Driver entry point -> stop + * The close has to be scheduled on async thread, so that currently active + * async operation does not suddenly have the ground disappearing under their feet... + */ +static void +file_stop(ErlDrvData e) +{ + file_descriptor* desc = (file_descriptor*)e; + + TRACE_C('p'); + + IF_THRDS { + flush_read(desc); + if (desc->fd != FILE_FD_INVALID) { + struct t_data *d = EF_SAFE_ALLOC(sizeof(struct t_data)); + d->command = FILE_CLOSE_ON_PORT_EXIT; + d->reply = !0; + d->fd = desc->fd; + d->flags = desc->flags; + d->invoke = invoke_close; + d->free = free_data; + d->level = 2; + d->data_to_free = (void *) desc; + cq_enq(desc, d); + desc->fd = FILE_FD_INVALID; + desc->flags = 0; + cq_execute(desc); + } + } else { + if (desc->fd != FILE_FD_INVALID) { + do_close(desc->flags, desc->fd); + desc->fd = FILE_FD_INVALID; + desc->flags = 0; + } + if (desc->read_binp) { + driver_free_binary(desc->read_binp); + } + EF_FREE(desc); + } +} /********************************************************************* * Driver entry point -> ready_async @@ -2348,6 +2408,7 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data) case FILE_RENAME: case FILE_WRITE_INFO: case FILE_FADVISE: + case FILE_FALLOCATE: reply(desc, d->result_ok, &d->errInfo); free_data(data); break; @@ -2373,8 +2434,10 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data) if (!d->result_ok) { reply_error(desc, &d->errInfo); } else { + ASSERT(d->is_fd_unused); desc->fd = d->fd; desc->flags = d->flags; + d->is_fd_unused = 0; reply_Uint(desc, d->fd); } free_data(data); @@ -2436,7 +2499,6 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data) } free_readdir(data); break; - /* See file_stop */ case FILE_CLOSE: if (d->reply) { TRACE_C('K'); @@ -2496,6 +2558,15 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data) } break; #endif + case FILE_CLOSE_ON_PORT_EXIT: + /* See file_stop. However this is never invoked after the port is killed. */ + free_data(data); + EF_FREE(desc); + desc = NULL; + /* This is it for this port, so just send dtrace and return, avoid doing anything to the freed data */ + DTRACE6(efile_drv_return, sched_i1, sched_i2, sched_utag, + command, result_ok, posix_errno); + return; default: abort(); } @@ -2506,6 +2577,7 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data) driver_set_timer(desc->port, desc->write_delay); } cq_execute(desc); + } @@ -2745,6 +2817,7 @@ file_output(ErlDrvData e, char* buf, ErlDrvSizeT count) d->invoke = invoke_open; d->free = free_data; d->level = 2; + d->is_fd_unused = 1; goto done; } @@ -2958,6 +3031,20 @@ file_output(ErlDrvData e, char* buf, ErlDrvSizeT count) goto done; } + case FILE_FALLOCATE: + { + d = EF_SAFE_ALLOC(sizeof(struct t_data)); + + d->fd = fd; + d->command = command; + d->invoke = invoke_fallocate; + d->free = free_data; + d->level = 2; + d->c.fallocate.offset = get_int64((uchar*) buf); + d->c.fallocate.length = get_int64(((uchar*) buf) + sizeof(Sint64)); + goto done; + } + } /* diff --git a/erts/emulator/drivers/common/erl_efile.h b/erts/emulator/drivers/common/erl_efile.h index 69ad02633c..b29b4f971c 100644 --- a/erts/emulator/drivers/common/erl_efile.h +++ b/erts/emulator/drivers/common/erl_efile.h @@ -185,3 +185,4 @@ int efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset, Sint64 length, int efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, off_t *offset, Uint64 *nbytes, struct t_sendfile_hdtl *hdtl); #endif /* HAVE_SENDFILE */ +int efile_fallocate(Efile_error* errInfo, int fd, Sint64 offset, Sint64 length); diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 3210ffa92a..f0c22e9ebe 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -678,6 +678,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define INET_LOPT_UDP_READ_PACKETS 33 /* Number of packets to read */ #define INET_OPT_RAW 34 /* Raw socket options */ #define INET_LOPT_TCP_SEND_TIMEOUT_CLOSE 35 /* auto-close on send timeout or not */ +#define INET_LOPT_TCP_MSGQ_HIWTRMRK 36 /* set local high watermark */ +#define INET_LOPT_TCP_MSGQ_LOWTRMRK 37 /* set local low watermark */ /* SCTP options: a separate range, from 100: */ #define SCTP_OPT_RTOINFO 100 #define SCTP_OPT_ASSOCINFO 101 @@ -788,6 +790,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n) #define INET_HIGH_WATERMARK (1024*8) /* 8k pending high => busy */ #define INET_LOW_WATERMARK (1024*4) /* 4k pending => allow more */ +#define INET_HIGH_MSGQ_WATERMARK (1024*8) /* 8k pending high => busy */ +#define INET_LOW_MSGQ_WATERMARK (1024*4) /* 4k pending => allow more */ #define INET_INFINITY 0xffffffff /* infinity value */ @@ -879,7 +883,7 @@ typedef struct subs_list_ { #define NO_PROCESS 0 #define NO_SUBSCRIBERS(SLP) ((SLP)->subscriber == NO_PROCESS) -static void send_to_subscribers(ErlDrvPort, subs_list *, int, +static void send_to_subscribers(ErlDrvTermData, subs_list *, int, ErlDrvTermData [], int); static void free_subscribers(subs_list*); static int save_subscriber(subs_list *, ErlDrvTermData); @@ -929,12 +933,20 @@ typedef struct { int bufsz; /* minimum buffer constraint */ unsigned int hsz; /* the list header size, -1 is large !!! */ /* statistics */ - unsigned long recv_oct[2]; /* number of received octets >= 64 bits */ +#ifdef ARCH_64 + Uint64 recv_oct; /* number of received octets, 64 bits */ +#else + Uint32 recv_oct[2]; /* number of received octets, 64 bits */ +#endif unsigned long recv_cnt; /* number of packets received */ unsigned long recv_max; /* maximum packet size received */ double recv_avg; /* average packet size received */ double recv_dvi; /* avarage deviation from avg_size */ - unsigned long send_oct[2]; /* number of octets sent >= 64 bits */ +#ifdef ARCH_64 + Uint64 send_oct; /* number of octets sent, 64 bits */ +#else + Uint32 send_oct[2]; /* number of octets sent, 64 bits */ +#endif unsigned long send_cnt; /* number of packets sent */ unsigned long send_max; /* maximum packet send */ double send_avg; /* average packet size sent */ @@ -1873,8 +1885,7 @@ static int deq_async(inet_descriptor* desc, int* ap, ErlDrvTermData* cp, int* rp ** {inet_async, Port, Ref, ok} */ static int -send_async_ok(ErlDrvPort port, ErlDrvTermData Port, int Ref, - ErlDrvTermData recipient) +send_async_ok(ErlDrvTermData Port, int Ref,ErlDrvTermData recipient) { ErlDrvTermData spec[2*LOAD_ATOM_CNT + LOAD_PORT_CNT + LOAD_INT_CNT + LOAD_TUPLE_CNT]; @@ -1888,14 +1899,14 @@ send_async_ok(ErlDrvPort port, ErlDrvTermData Port, int Ref, ASSERT(i == sizeof(spec)/sizeof(*spec)); - return driver_send_term(port, recipient, spec, i); + return erl_drv_send_term(Port, recipient, spec, i); } /* send message: ** {inet_async, Port, Ref, {ok,Port2}} */ static int -send_async_ok_port(ErlDrvPort port, ErlDrvTermData Port, int Ref, +send_async_ok_port(ErlDrvTermData Port, int Ref, ErlDrvTermData recipient, ErlDrvTermData Port2) { ErlDrvTermData spec[2*LOAD_ATOM_CNT + 2*LOAD_PORT_CNT + @@ -1914,14 +1925,14 @@ send_async_ok_port(ErlDrvPort port, ErlDrvTermData Port, int Ref, ASSERT(i == sizeof(spec)/sizeof(*spec)); - return driver_send_term(port, recipient, spec, i); + return erl_drv_send_term(Port, recipient, spec, i); } /* send message: ** {inet_async, Port, Ref, {error,Reason}} */ static int -send_async_error(ErlDrvPort port, ErlDrvTermData Port, int Ref, +send_async_error(ErlDrvTermData Port, int Ref, ErlDrvTermData recipient, ErlDrvTermData Reason) { ErlDrvTermData spec[3*LOAD_ATOM_CNT + LOAD_PORT_CNT + @@ -1939,7 +1950,7 @@ send_async_error(ErlDrvPort port, ErlDrvTermData Port, int Ref, i = LOAD_TUPLE(spec, i, 4); ASSERT(i == sizeof(spec)/sizeof(*spec)); DEBUGF(("send_async_error %ld %ld\r\n", recipient, Reason)); - return driver_send_term(port, recipient, spec, i); + return erl_drv_send_term(Port, recipient, spec, i); } @@ -1951,7 +1962,7 @@ static int async_ok(inet_descriptor* desc) if (deq_async(desc, &aid, &caller, &req) < 0) return -1; - return send_async_ok(desc->port, desc->dport, aid, caller); + return send_async_ok(desc->dport, aid, caller); } static int async_ok_port(inet_descriptor* desc, ErlDrvTermData Port2) @@ -1962,7 +1973,7 @@ static int async_ok_port(inet_descriptor* desc, ErlDrvTermData Port2) if (deq_async(desc, &aid, &caller, &req) < 0) return -1; - return send_async_ok_port(desc->port, desc->dport, aid, caller, Port2); + return send_async_ok_port(desc->dport, aid, caller, Port2); } static int async_error_am(inet_descriptor* desc, ErlDrvTermData reason) @@ -1973,8 +1984,7 @@ static int async_error_am(inet_descriptor* desc, ErlDrvTermData reason) if (deq_async(desc, &aid, &caller, &req) < 0) return -1; - return send_async_error(desc->port, desc->dport, aid, caller, - reason); + return send_async_error(desc->dport, aid, caller, reason); } /* dequeue all operations */ @@ -1985,8 +1995,7 @@ static int async_error_am_all(inet_descriptor* desc, ErlDrvTermData reason) ErlDrvTermData caller; while (deq_async(desc, &aid, &caller, &req) == 0) { - send_async_error(desc->port, desc->dport, aid, caller, - reason); + send_async_error(desc->dport, aid, caller, reason); } return 0; } @@ -2014,7 +2023,7 @@ static int inet_reply_ok(inet_descriptor* desc) ASSERT(i == sizeof(spec)/sizeof(*spec)); desc->caller = 0; - return driver_send_term(desc->port, caller, spec, i); + return erl_drv_send_term(desc->dport, caller, spec, i); } #ifdef HAVE_SCTP @@ -2033,7 +2042,7 @@ static int inet_reply_ok_port(inet_descriptor* desc, ErlDrvTermData dport) ASSERT(i == sizeof(spec)/sizeof(*spec)); desc->caller = 0; - return driver_send_term(desc->port, caller, spec, i); + return erl_drv_send_term(desc->dport, caller, spec, i); } #endif @@ -2056,7 +2065,7 @@ static int inet_reply_error_am(inet_descriptor* desc, ErlDrvTermData reason) desc->caller = 0; DEBUGF(("inet_reply_error_am %ld %ld\r\n", caller, reason)); - return driver_send_term(desc->port, caller, spec, i); + return erl_drv_send_term(desc->dport, caller, spec, i); } /* send: @@ -2165,12 +2174,12 @@ static int http_response_inetdrv(void *arg, int major, int minor, i = LOAD_TUPLE(spec, i, 2); i = LOAD_TUPLE(spec, i, 4); ASSERT(i<=27); - return driver_send_term(desc->inet.port, caller, spec, i); + return erl_drv_send_term(desc->inet.dport, caller, spec, i); } else { i = LOAD_TUPLE(spec, i, 3); ASSERT(i<=27); - return driver_output_term(desc->inet.port, spec, i); + return erl_drv_output_term(desc->inet.dport, spec, i); } } @@ -2262,12 +2271,12 @@ http_request_inetdrv(void* arg, const http_atom_t* meth, const char* meth_ptr, i = LOAD_TUPLE(spec, i, 2); i = LOAD_TUPLE(spec, i, 4); ASSERT(i <= 43); - return driver_send_term(desc->inet.port, caller, spec, i); + return erl_drv_send_term(desc->inet.dport, caller, spec, i); } else { i = LOAD_TUPLE(spec, i, 3); ASSERT(i <= 43); - return driver_output_term(desc->inet.port, spec, i); + return erl_drv_output_term(desc->inet.dport, spec, i); } } @@ -2316,12 +2325,12 @@ http_header_inetdrv(void* arg, const http_atom_t* name, const char* name_ptr, i = LOAD_TUPLE(spec, i, 2); i = LOAD_TUPLE(spec, i, 4); ASSERT(i <= 26); - return driver_send_term(desc->inet.port, caller, spec, i); + return erl_drv_send_term(desc->inet.dport, caller, spec, i); } else { i = LOAD_TUPLE(spec, i, 3); ASSERT(i <= 26); - return driver_output_term(desc->inet.port, spec, i); + return erl_drv_output_term(desc->inet.dport, spec, i); } } @@ -2347,7 +2356,7 @@ static int http_eoh_inetdrv(void* arg) i = LOAD_TUPLE(spec, i, 2); i = LOAD_TUPLE(spec, i, 4); ASSERT(i <= 14); - return driver_send_term(desc->inet.port, caller, spec, i); + return erl_drv_send_term(desc->inet.dport, caller, spec, i); } else { /* {http, S, http_eoh} */ @@ -2356,7 +2365,7 @@ static int http_eoh_inetdrv(void* arg) i = LOAD_ATOM(spec, i, am_http_eoh); i = LOAD_TUPLE(spec, i, 3); ASSERT(i <= 14); - return driver_output_term(desc->inet.port, spec, i); + return erl_drv_output_term(desc->inet.dport, spec, i); } } @@ -2384,7 +2393,7 @@ static int http_error_inetdrv(void* arg, const char* buf, int len) i = LOAD_TUPLE(spec, i, 2); i = LOAD_TUPLE(spec, i, 4); ASSERT(i <= 19); - return driver_send_term(desc->inet.port, caller, spec, i); + return erl_drv_send_term(desc->inet.dport, caller, spec, i); } else { /* {http, S, {http_error,Line} */ @@ -2395,7 +2404,7 @@ static int http_error_inetdrv(void* arg, const char* buf, int len) i = LOAD_TUPLE(spec, i, 2); i = LOAD_TUPLE(spec, i, 3); ASSERT(i <= 19); - return driver_output_term(desc->inet.port, spec, i); + return erl_drv_output_term(desc->inet.dport, spec, i); } } @@ -2448,11 +2457,11 @@ int ssl_tls_inetdrv(void* arg, unsigned type, unsigned major, unsigned minor, i = LOAD_TUPLE(spec, i, 2); i = LOAD_TUPLE(spec, i, 4); ASSERT(i <= 28); - ret = driver_send_term(desc->inet.port, caller, spec, i); + ret = erl_drv_send_term(desc->inet.dport, caller, spec, i); } else { ASSERT(i <= 28); - ret = driver_output_term(desc->inet.port, spec, i); + ret = erl_drv_output_term(desc->inet.dport, spec, i); } done: driver_free_binary(bin); @@ -2502,7 +2511,7 @@ static int inet_async_data(inet_descriptor* desc, const char* buf, int len) i = LOAD_TUPLE(spec, i, 4); ASSERT(i == 15); desc->caller = 0; - return driver_send_term(desc->port, caller, spec, i); + return erl_drv_send_term(desc->dport, caller, spec, i); } else { /* INET_MODE_BINARY => [H1,H2,...HSz | Binary] */ @@ -2516,7 +2525,7 @@ static int inet_async_data(inet_descriptor* desc, const char* buf, int len) i = LOAD_TUPLE(spec, i, 4); ASSERT(i <= 20); desc->caller = 0; - code = driver_send_term(desc->port, caller, spec, i); + code = erl_drv_send_term(desc->dport, caller, spec, i); return code; } } @@ -3109,7 +3118,7 @@ inet_async_binary_data ASSERT(i <= PACKET_ERL_DRV_TERM_DATA_LEN); desc->caller = 0; - return driver_send_term(desc->port, caller, spec, i); + return erl_drv_send_term(desc->dport, caller, spec, i); } /* @@ -3132,7 +3141,7 @@ static int tcp_message(inet_descriptor* desc, const char* buf, int len) i = LOAD_STRING(spec, i, buf, len); /* => [H1,H2,...Hn] */ i = LOAD_TUPLE(spec, i, 3); ASSERT(i <= 20); - return driver_output_term(desc->port, spec, i); + return erl_drv_output_term(desc->dport, spec, i); } else { /* INET_MODE_BINARY => [H1,H2,...HSz | Binary] */ @@ -3144,7 +3153,7 @@ static int tcp_message(inet_descriptor* desc, const char* buf, int len) i = LOAD_STRING_CONS(spec, i, buf, hsz); i = LOAD_TUPLE(spec, i, 3); ASSERT(i <= 20); - code = driver_output_term(desc->port, spec, i); + code = erl_drv_output_term(desc->dport, spec, i); return code; } } @@ -3179,7 +3188,7 @@ tcp_binary_message(inet_descriptor* desc, ErlDrvBinary* bin, int offs, int len) } i = LOAD_TUPLE(spec, i, 3); ASSERT(i <= 20); - return driver_output_term(desc->port, spec, i); + return erl_drv_output_term(desc->dport, spec, i); } /* @@ -3198,7 +3207,7 @@ static int tcp_closed_message(tcp_descriptor* desc) i = LOAD_PORT(spec, i, desc->inet.dport); i = LOAD_TUPLE(spec, i, 2); ASSERT(i <= 6); - return driver_output_term(desc->inet.port, spec, i); + return erl_drv_output_term(desc->inet.dport, spec, i); } return 0; } @@ -3219,7 +3228,7 @@ static int tcp_error_message(tcp_descriptor* desc, int err) i = LOAD_ATOM(spec, i, am_err); i = LOAD_TUPLE(spec, i, 3); ASSERT(i <= 8); - return driver_output_term(desc->inet.port, spec, i); + return erl_drv_output_term(desc->inet.dport, spec, i); } /* @@ -3310,7 +3319,7 @@ static int packet_binary_message /* Close up the outer 5-tuple: */ i = LOAD_TUPLE(spec, i, 5); ASSERT(i <= PACKET_ERL_DRV_TERM_DATA_LEN); - return driver_output_term(desc->port, spec, i); + return erl_drv_output_term(desc->dport, spec, i); } /* @@ -3337,7 +3346,7 @@ static int packet_error_message(udp_descriptor* udesc, int err) i = LOAD_ATOM(spec, i, am_err); i = LOAD_TUPLE(spec, i, 3); ASSERT(i == sizeof(spec)/sizeof(*spec)); - return driver_output_term(desc->port, spec, i); + return erl_drv_output_term(desc->dport, spec, i); } @@ -5465,6 +5474,28 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len) } continue; + case INET_LOPT_TCP_MSGQ_HIWTRMRK: + if (desc->stype == SOCK_STREAM) { + ErlDrvSizeT high; + if (ival < ERL_DRV_BUSY_MSGQ_LIM_MIN + || ERL_DRV_BUSY_MSGQ_LIM_MAX < ival) + return -1; + high = (ErlDrvSizeT) ival; + erl_drv_busy_msgq_limits(desc->port, NULL, &high); + } + continue; + + case INET_LOPT_TCP_MSGQ_LOWTRMRK: + if (desc->stype == SOCK_STREAM) { + ErlDrvSizeT low; + if (ival < ERL_DRV_BUSY_MSGQ_LIM_MIN + || ERL_DRV_BUSY_MSGQ_LIM_MAX < ival) + return -1; + low = (ErlDrvSizeT) ival; + erl_drv_busy_msgq_limits(desc->port, &low, NULL); + } + continue; + case INET_LOPT_TCP_SEND_TIMEOUT: if (desc->stype == SOCK_STREAM) { tcp_descriptor* tdesc = (tcp_descriptor*) desc; @@ -6365,6 +6396,32 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc, } continue; + case INET_LOPT_TCP_MSGQ_HIWTRMRK: + if (desc->stype == SOCK_STREAM) { + ErlDrvSizeT high = ERL_DRV_BUSY_MSGQ_READ_ONLY; + *ptr++ = opt; + erl_drv_busy_msgq_limits(desc->port, NULL, &high); + ival = high > INT_MAX ? INT_MAX : (int) high; + put_int32(ival, ptr); + } + else { + TRUNCATE_TO(0,ptr); + } + continue; + + case INET_LOPT_TCP_MSGQ_LOWTRMRK: + if (desc->stype == SOCK_STREAM) { + ErlDrvSizeT low = ERL_DRV_BUSY_MSGQ_READ_ONLY; + *ptr++ = opt; + erl_drv_busy_msgq_limits(desc->port, &low, NULL); + ival = low > INT_MAX ? INT_MAX : (int) low; + put_int32(ival, ptr); + } + else { + TRUNCATE_TO(0,ptr); + } + continue; + case INET_LOPT_TCP_SEND_TIMEOUT: if (desc->stype == SOCK_STREAM) { *ptr++ = opt; @@ -7278,7 +7335,7 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc, i = LOAD_TUPLE(spec, i, 3); /* Now, convert "spec" into the returnable term: */ - driver_send_term(desc->port, driver_caller(desc->port), spec, i); + erl_drv_send_term(desc->dport, driver_caller(desc->port), spec, i); FREE(spec); (*dest)[0] = INET_REP; @@ -7328,13 +7385,21 @@ static ErlDrvSSizeT inet_fill_stat(inet_descriptor* desc, val = (unsigned long) driver_sizeq(desc->port); break; case INET_STAT_RECV_OCT: +#ifdef ARCH_64 + put_int64(desc->recv_oct, dst); /* write it all */ +#else put_int32(desc->recv_oct[1], dst); /* write high 32bit */ put_int32(desc->recv_oct[0], dst+4); /* write low 32bit */ +#endif dst += 8; continue; case INET_STAT_SEND_OCT: +#ifdef ARCH_64 + put_int64(desc->send_oct, dst); /* write it all */ +#else put_int32(desc->send_oct[1], dst); /* write high 32bit */ put_int32(desc->send_oct[0], dst+4); /* write low 32bit */ +#endif dst += 8; continue; default: return -1; /* invalid argument */ @@ -7360,7 +7425,7 @@ send_empty_out_q_msgs(inet_descriptor* desc) ASSERT(msg_len == sizeof(msg)/sizeof(*msg)); - send_to_subscribers(desc->port, + send_to_subscribers(desc->dport, &desc->empty_out_q_subs, 1, msg, @@ -7442,12 +7507,20 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol) desc->peer_ptr = NULL; desc->name_ptr = NULL; +#ifdef ARCH_64 + desc->recv_oct = 0; +#else desc->recv_oct[0] = desc->recv_oct[1] = 0; +#endif desc->recv_cnt = 0; desc->recv_max = 0; desc->recv_avg = 0.0; desc->recv_dvi = 0.0; +#ifdef ARCH_64 + desc->send_oct = 0; +#else desc->send_oct[0] = desc->send_oct[1] = 0; +#endif desc->send_cnt = 0; desc->send_max = 0; desc->send_avg = 0.0; @@ -7836,14 +7909,19 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf, static void inet_output_count(inet_descriptor* desc, ErlDrvSizeT len) { unsigned long n = desc->send_cnt + 1; - unsigned long t = desc->send_oct[0] + len; +#ifndef ARCH_64 + Uint32 t = desc->send_oct[0] + len; int c = (t < desc->send_oct[0]); +#endif double avg = desc->send_avg; - /* at least 64 bit octet count */ +#ifdef ARCH_64 + desc->send_oct += len; +#else + /* 64 bit octet count in 32 bit words */ desc->send_oct[0] = t; desc->send_oct[1] += c; - +#endif if (n == 0) /* WRAP, use old avg as input to a new sequence */ n = 1; desc->send_avg += (len - avg) / n; @@ -7856,14 +7934,20 @@ static void inet_output_count(inet_descriptor* desc, ErlDrvSizeT len) static void inet_input_count(inet_descriptor* desc, ErlDrvSizeT len) { unsigned long n = desc->recv_cnt + 1; - unsigned long t = desc->recv_oct[0] + len; +#ifndef ARCH_64 + Uint32 t = (desc->recv_oct[0] + len); int c = (t < desc->recv_oct[0]); +#endif double avg = desc->recv_avg; double dvi; - /* at least 64 bit octet count */ +#ifdef ARCH_64 + desc->recv_oct += len; +#else + /* 64 bit octet count in 32 bit words */ desc->recv_oct[0] = t; desc->recv_oct[1] += c; +#endif if (n == 0) /* WRAP */ n = 1; @@ -8007,6 +8091,7 @@ static int tcp_inet_init(void) static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args) { + ErlDrvSizeT q_low, q_high; tcp_descriptor* desc; DEBUGF(("tcp_inet_start(%ld) {\r\n", (long)port)); @@ -8016,6 +8101,17 @@ static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args) return ERL_DRV_ERROR_ERRNO; desc->high = INET_HIGH_WATERMARK; desc->low = INET_LOW_WATERMARK; + q_high = INET_HIGH_MSGQ_WATERMARK; + q_low = INET_LOW_MSGQ_WATERMARK; + if (q_low < ERL_DRV_BUSY_MSGQ_LIM_MIN) + q_low = ERL_DRV_BUSY_MSGQ_LIM_MIN; + else if (q_low > ERL_DRV_BUSY_MSGQ_LIM_MAX) + q_low = ERL_DRV_BUSY_MSGQ_LIM_MAX; + if (q_high < ERL_DRV_BUSY_MSGQ_LIM_MIN) + q_high = ERL_DRV_BUSY_MSGQ_LIM_MIN; + else if (q_high > ERL_DRV_BUSY_MSGQ_LIM_MAX) + q_high = ERL_DRV_BUSY_MSGQ_LIM_MAX; + erl_drv_busy_msgq_limits(port, &q_low, &q_high); desc->send_timeout = INET_INFINITY; desc->send_timeout_close = 0; desc->busy_on_send = 0; @@ -8039,6 +8135,7 @@ static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args) static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s, ErlDrvTermData owner, int* err) { + ErlDrvSizeT q_low, q_high; ErlDrvPort port = desc->inet.port; tcp_descriptor* copy_desc; @@ -8076,6 +8173,13 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s, FREE(copy_desc); return NULL; } + + /* Read busy msgq limits of parent */ + q_low = q_high = ERL_DRV_BUSY_MSGQ_READ_ONLY; + erl_drv_busy_msgq_limits(desc->inet.port, &q_low, &q_high); + /* Write same busy msgq limits to child */ + erl_drv_busy_msgq_limits(port, &q_low, &q_high); + copy_desc->inet.port = port; copy_desc->inet.dport = driver_mk_port(port); *err = 0; @@ -8108,7 +8212,7 @@ static void tcp_close_check(tcp_descriptor* desc) desc->inet.state = INET_STATE_LISTENING; while (deq_multi_op(desc,&id,&req,&caller,NULL,&monitor) == 0) { driver_demonitor_process(desc->inet.port, &monitor); - send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_closed); + send_async_error(desc->inet.dport, id, caller, am_closed); } clean_multi_timers(&(desc->mtd), desc->inet.port); } @@ -8532,7 +8636,7 @@ static void tcp_inet_multi_timeout(ErlDrvData e, ErlDrvTermData caller) sock_select(INETP(desc),FD_ACCEPT,0); desc->inet.state = INET_STATE_LISTENING; /* restore state */ } - send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_timeout); + send_async_error(desc->inet.dport, id, caller, am_timeout); } @@ -9273,7 +9377,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event) if (s == INVALID_SOCKET) { /* Not ERRNO_BLOCK, that's handled right away */ - ret = send_async_error(desc->inet.port, desc->inet.dport, + ret = send_async_error(desc->inet.dport, id, caller, error_atom(sock_errno())); goto done; } @@ -9283,7 +9387,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event) if ((accept_desc = tcp_inet_copy(desc,s,caller,&err)) == NULL) { sock_close(s); - ret = send_async_error(desc->inet.port, desc->inet.dport, + ret = send_async_error(desc->inet.dport, id, caller, error_atom(err)); goto done; } @@ -9294,7 +9398,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event) ERL_DRV_READ, 1); #endif accept_desc->inet.state = INET_STATE_CONNECTED; - ret = send_async_ok_port(desc->inet.port, desc->inet.dport, + ret = send_async_ok_port(desc->inet.dport, id, caller, accept_desc->inet.dport); } } @@ -9654,6 +9758,7 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event) DEBUGF(("tcp_inet_output(%ld): s=%d, About to send %d items\r\n", (long)desc->inet.port, desc->inet.s, vsize)); if (IS_SOCKET_ERROR(sock_sendv(desc->inet.s, iov, vsize, &n, 0))) { + write_error: if ((sock_errno() != ERRNO_BLOCK) && (sock_errno() != EINTR)) { DEBUGF(("tcp_inet_output(%ld): sock_sendv(%d) errno = %d\r\n", (long)desc->inet.port, vsize, sock_errno())); @@ -9664,6 +9769,22 @@ static int tcp_inet_output(tcp_descriptor* desc, HANDLE event) desc->inet.send_would_block = 1; #endif goto done; + } else if (n == 0) { /* Workaround for redhat/CentOS 6.3 returning + 0 when sending packets with + sizes > (max 32 bit signed int) */ + size_t howmuch = 0x7FFFFFFF; /* max signed 32 bit */ + int x; + for(x = 0; x < vsize && iov[x].iov_len == 0; ++x) + ; + if (x < vsize) { + if (howmuch > iov[x].iov_len) { + howmuch = iov[x].iov_len; + } + n = sock_send(desc->inet.s, iov[x].iov_base,howmuch,0); + if (IS_SOCKET_ERROR(n)) { + goto write_error; + } + } } if (driver_deq(ix, n) <= desc->low) { if (IS_BUSY(INETP(desc))) { @@ -10936,7 +11057,7 @@ subs_list *subs; static void send_to_subscribers ( - ErlDrvPort port, + ErlDrvTermData port, subs_list *subs, int free_subs, ErlDrvTermData msg[], @@ -10953,7 +11074,7 @@ static void send_to_subscribers this = subs; while(this) { - (void) driver_send_term(port, this->subscriber, msg, msg_len); + (void) erl_drv_send_term(port, this->subscriber, msg, msg_len); if(free_subs && !first) { next = this->next; diff --git a/erts/emulator/drivers/common/ram_file_drv.c b/erts/emulator/drivers/common/ram_file_drv.c index a109e40333..7f7cd7cd91 100644 --- a/erts/emulator/drivers/common/ram_file_drv.c +++ b/erts/emulator/drivers/common/ram_file_drv.c @@ -48,6 +48,7 @@ #define RAM_FILE_SIZE 37 /* get file size */ #define RAM_FILE_ADVISE 38 /* predeclare the access * pattern for file data */ +#define RAM_FILE_ALLOCATE 39 /* allocate space for a file */ /* possible new operations include: DES_ENCRYPT DES_DECRYPT @@ -720,6 +721,13 @@ static void rfile_command(ErlDrvData e, char* buf, ErlDrvSizeT count) else reply(f, 1, 0); break; + + case RAM_FILE_ALLOCATE: + if (f->flags == 0) + error_reply(f, EBADF); + else + reply(f, 1, 0); + break; } /* * Ignore anything else -- let the caller hang. diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c index b29f80a8ba..ab2abb88d1 100644 --- a/erts/emulator/drivers/unix/ttsl_drv.c +++ b/erts/emulator/drivers/unix/ttsl_drv.c @@ -912,11 +912,15 @@ static int insert_buf(byte *s, int n) lbuf[lpos++] = (CONTROL_TAG | ((Uint32) ch)); ch = 0; } while (lpos % 8); - } else if (ch == '\n' || ch == '\r') { + } else if (ch == '\e' || ch == '\n' || ch == '\r') { write_buf(lbuf + buffpos, lpos - buffpos); - outc('\r'); - if (ch == '\n') - outc('\n'); + if (ch == '\e') { + outc('\e'); + } else { + outc('\r'); + if (ch == '\n') + outc('\n'); + } if (llen > lpos) { memcpy(lbuf, lbuf + lpos, llen - lpos); } diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c index cf7af71b92..558651fff9 100644 --- a/erts/emulator/drivers/unix/unix_efile.c +++ b/erts/emulator/drivers/unix/unix_efile.c @@ -22,6 +22,12 @@ #ifdef HAVE_CONFIG_H # include "config.h" #endif +#if defined(HAVE_POSIX_FALLOCATE) && !defined(__sun) && !defined(__sun__) +#define _XOPEN_SOURCE 600 +#endif +#if !defined(_GNU_SOURCE) && defined(HAVE_LINUX_FALLOC_H) +#define _GNU_SOURCE +#endif #include "sys.h" #include "erl_driver.h" #include "erl_efile.h" @@ -41,9 +47,13 @@ #define DARWIN 1 #endif -#ifdef DARWIN +#if defined(DARWIN) || defined(HAVE_LINUX_FALLOC_H) || defined(HAVE_POSIX_FALLOCATE) #include <fcntl.h> -#endif /* DARWIN */ +#endif + +#ifdef HAVE_LINUX_FALLOC_H +#include <linux/falloc.h> +#endif #ifdef SUNOS4 # define getcwd(buf, size) getwd(buf) @@ -967,3 +977,81 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd, return check_error(retval, errInfo); } #endif /* HAVE_SENDFILE */ + +#ifdef HAVE_POSIX_FALLOCATE +static int +call_posix_fallocate(int fd, Sint64 offset, Sint64 length) +{ + int ret; + + /* + * On Linux and Solaris for example, posix_fallocate() returns + * a positive error number on error and it does not set errno. + * On FreeBSD however (9.0 at least), it returns -1 on error + * and it sets errno. + */ + do { + ret = posix_fallocate(fd, (off_t) offset, (off_t) length); + if (ret > 0) { + errno = ret; + ret = -1; + } + } while (ret != 0 && errno == EINTR); + + return ret; +} +#endif /* HAVE_POSIX_FALLOCATE */ + +int +efile_fallocate(Efile_error* errInfo, int fd, Sint64 offset, Sint64 length) +{ +#if defined HAVE_FALLOCATE + /* Linux specific, more efficient than posix_fallocate. */ + int ret; + + do { + ret = fallocate(fd, FALLOC_FL_KEEP_SIZE, (off_t) offset, (off_t) length); + } while (ret != 0 && errno == EINTR); + +#if defined HAVE_POSIX_FALLOCATE + /* Fallback to posix_fallocate if available. */ + if (ret != 0) { + ret = call_posix_fallocate(fd, offset, length); + } +#endif + + return check_error(ret, errInfo); +#elif defined F_PREALLOCATE + /* Mac OS X specific, equivalent to posix_fallocate. */ + int ret; + fstore_t fs; + + memset(&fs, 0, sizeof(fs)); + fs.fst_flags = F_ALLOCATECONTIG; + fs.fst_posmode = F_VOLPOSMODE; + fs.fst_offset = (off_t) offset; + fs.fst_length = (off_t) length; + + ret = fcntl(fd, F_PREALLOCATE, &fs); + + if (-1 == ret) { + fs.fst_flags = F_ALLOCATEALL; + ret = fcntl(fd, F_PREALLOCATE, &fs); + +#if defined HAVE_POSIX_FALLOCATE + /* Fallback to posix_fallocate if available. */ + if (-1 == ret) { + ret = call_posix_fallocate(fd, offset, length); + } +#endif + } + + return check_error(ret, errInfo); +#elif defined HAVE_POSIX_FALLOCATE + /* Other Unixes, use posix_fallocate if available. */ + return check_error(call_posix_fallocate(fd, offset, length), errInfo); +#else + errno = ENOTSUP; + return check_error(-1, errInfo); +#endif +} diff --git a/erts/emulator/drivers/win32/win_efile.c b/erts/emulator/drivers/win32/win_efile.c index dc7add01f7..f2b0c8a843 100644 --- a/erts/emulator/drivers/win32/win_efile.c +++ b/erts/emulator/drivers/win32/win_efile.c @@ -41,6 +41,8 @@ #define IS_DOT_OR_DOTDOT(s) \ ((s)[0] == L'.' && ((s)[1] == L'\0' || ((s)[1] == L'.' && (s)[2] == L'\0'))) +#define FILE_SHARE_FLAGS (FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE) + #ifndef INVALID_FILE_ATTRIBUTES #define INVALID_FILE_ATTRIBUTES ((DWORD) 0xFFFFFFFF) #endif @@ -724,7 +726,7 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */ crFlags = CREATE_NEW; } fd = CreateFileW(wname, access, - FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + FILE_SHARE_FLAGS, NULL, crFlags, FILE_ATTRIBUTE_NORMAL, NULL); /* @@ -909,7 +911,7 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo, { HANDLE handle; /* Handle returned by CreateFile() */ BY_HANDLE_FILE_INFORMATION fileInfo; /* from CreateFile() */ - if (handle = CreateFileW(name, GENERIC_READ, 0,NULL, + if (handle = CreateFileW(name, GENERIC_READ, FILE_SHARE_FLAGS, NULL, OPEN_EXISTING, 0, NULL)) { GetFileInformationByHandle(handle, &fileInfo); pInfo->links = fileInfo.nNumberOfLinks; @@ -1021,7 +1023,7 @@ efile_write_info(Efile_error* errInfo, } fd = CreateFileW(wname, GENERIC_READ|GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, + FILE_SHARE_FLAGS, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if (fd != INVALID_HANDLE_VALUE) { BOOL result = SetFileTime(fd, &CreationFileTime, &AccessFileTime, &ModifyFileTime); @@ -1384,7 +1386,7 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size) DWORD fileAttributes = GetFileAttributesW(wname); if ((fileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) { BOOLEAN success = 0; - HANDLE h = CreateFileW(wname, GENERIC_READ, 0,NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); + HANDLE h = CreateFileW(wname, GENERIC_READ, FILE_SHARE_FLAGS, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); int len; if(h != INVALID_HANDLE_VALUE) { success = pGetFinalPathNameByHandle(h, wbuffer, size / sizeof(WCHAR),0); @@ -1558,3 +1560,13 @@ efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset, errno = ERROR_SUCCESS; return check_error(0, errInfo); } + +int +efile_fallocate(Efile_error* errInfo, int fd, Sint64 offset, Sint64 length) +{ + /* No file preallocation method available in Windows. */ + errno = errno_map(ERROR_NOT_SUPPORTED); + SetLastError(ERROR_NOT_SUPPORTED); + + return check_error(-1, errInfo); +} diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4 index ab078b9583..764b8d180c 100644 --- a/erts/emulator/hipe/hipe_bif_list.m4 +++ b/erts/emulator/hipe/hipe_bif_list.m4 @@ -145,6 +145,7 @@ * Zero-arity BIFs that can fail. */ standard_bif_interface_0(nbif_processes_0, processes_0) +standard_bif_interface_0(nbif_ports_0, ports_0) /* * BIFs and primops that may do a GC (change heap limit and walk the native stack). diff --git a/erts/emulator/hipe/hipe_debug.c b/erts/emulator/hipe/hipe_debug.c index 37615bf718..f2e9d03607 100644 --- a/erts/emulator/hipe/hipe_debug.c +++ b/erts/emulator/hipe/hipe_debug.c @@ -189,11 +189,10 @@ void hipe_print_pcb(Process *p) U("old_head ", old_heap); U("min_heap_..", min_heap_size); U("rcount ", rcount); - U("id ", id); - U("prio ", prio); + U("id ", common.id); U("reds ", reds); - U("tracer_pr..", tracer_proc); - U("trace_fla..", trace_flags); + U("tracer_pr..", common.tracer_proc); + U("trace_fla..", common.trace_flags); U("group_lea..", group_leader); U("flags ", flags); U("fvalue ", fvalue); @@ -202,8 +201,8 @@ void hipe_print_pcb(Process *p) /*XXX: ErlTimer tm; */ U("next ", next); /*XXX: ErlOffHeap off_heap; */ - U("reg ", reg); - U("nlinks ", nlinks); + U("reg ", common.u.alive.reg); + U("nlinks ", common.u.alive.links); /*XXX: ErlMessageQueue msg; */ U("mbuf ", mbuf); U("mbuf_sz ", mbuf_sz); diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c index cbbf1db2e5..0e287908b1 100644 --- a/erts/emulator/hipe/hipe_mkliterals.c +++ b/erts/emulator/hipe/hipe_mkliterals.c @@ -262,47 +262,6 @@ static const struct literal { const char *name; int value; } literals[] = { - /* Field offsets in a process struct */ - { "P_HP", offsetof(struct process, htop) }, - { "P_HP_LIMIT", offsetof(struct process, stop) }, - { "P_OFF_HEAP_FIRST", offsetof(struct process, off_heap.first) }, - { "P_MBUF", offsetof(struct process, mbuf) }, - { "P_ID", offsetof(struct process, id) }, - { "P_FLAGS", offsetof(struct process, flags) }, - { "P_FVALUE", offsetof(struct process, fvalue) }, - { "P_FREASON", offsetof(struct process, freason) }, - { "P_FTRACE", offsetof(struct process, ftrace) }, - { "P_FCALLS", offsetof(struct process, fcalls) }, - { "P_BEAM_IP", offsetof(struct process, i) }, - { "P_ARITY", offsetof(struct process, arity) }, - { "P_ARG0", offsetof(struct process, def_arg_reg[0]) }, - { "P_ARG1", offsetof(struct process, def_arg_reg[1]) }, - { "P_ARG2", offsetof(struct process, def_arg_reg[2]) }, - { "P_ARG3", offsetof(struct process, def_arg_reg[3]) }, - { "P_ARG4", offsetof(struct process, def_arg_reg[4]) }, - { "P_ARG5", offsetof(struct process, def_arg_reg[5]) }, -#ifdef HIPE - { "P_NSP", offsetof(struct process, hipe.nsp) }, - { "P_NCALLEE", offsetof(struct process, hipe.ncallee) }, - { "P_CLOSURE", offsetof(struct process, hipe.closure) }, -#if defined(__i386__) || defined(__x86_64__) - { "P_NSP_LIMIT", offsetof(struct process, hipe.nstack) }, - { "P_CSP", offsetof(struct process, hipe.ncsp) }, -#elif defined(__sparc__) || defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__) - { "P_NSP_LIMIT", offsetof(struct process, hipe.nstack) }, - { "P_NRA", offsetof(struct process, hipe.nra) }, -#endif - { "P_NARITY", offsetof(struct process, hipe.narity) }, - { "P_FLOAT_RESULT", -# ifdef NO_FPE_SIGNALS - offsetof(struct process, hipe.float_result) -# endif - }, -# if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) - { "P_BIF_CALLEE", offsetof(struct process, hipe.bif_callee) }, -# endif -#endif /* HIPE */ - /* process flags bits */ { "F_TIMO", F_TIMO }, @@ -380,8 +339,6 @@ static const struct literal { { "MS_SAVEOFFSET_SIZE", field_sizeof(struct erl_bin_match_struct, save_offset)}, /* messages */ - { "P_MSG_FIRST", offsetof(struct process, msg.first) }, - { "P_MSG_SAVE", offsetof(struct process, msg.save) }, { "MSG_NEXT", offsetof(struct erl_mesg, next) }, /* ARM */ @@ -460,12 +417,14 @@ static const struct atom_literal { * These depend on configuration options such as heap architecture. * The compiler accesses these through hipe_bifs:get_rts_param/1. */ -static const struct rts_param { +struct rts_param { unsigned int nr; const char *name; unsigned int is_defined; int value; -} rts_params[] = { +}; + +static const struct rts_param rts_params[] = { { 1, "P_OFF_HEAP_FUNS", 1, offsetof(struct process, off_heap.first) }, @@ -518,7 +477,53 @@ static const struct rts_param { { 19, "MSG_MESSAGE", 1, offsetof(struct erl_mesg, m[0]) }, - /* highest entry ever used == 21 */ + + /* Field offsets in a process struct */ + { 22, "P_HP", 1, offsetof(struct process, htop) }, + { 23, "P_HP_LIMIT", 1, offsetof(struct process, stop) }, + { 24, "P_OFF_HEAP_FIRST", 1, offsetof(struct process, off_heap.first) }, + { 25, "P_MBUF", 1, offsetof(struct process, mbuf) }, + { 26, "P_ID", 1, offsetof(struct process, common.id) }, + { 27, "P_FLAGS", 1, offsetof(struct process, flags) }, + { 28, "P_FVALUE", 1, offsetof(struct process, fvalue) }, + { 29, "P_FREASON", 1, offsetof(struct process, freason) }, + { 30, "P_FTRACE", 1, offsetof(struct process, ftrace) }, + { 31, "P_FCALLS", 1, offsetof(struct process, fcalls) }, + { 32, "P_BEAM_IP", 1, offsetof(struct process, i) }, + { 33, "P_ARITY", 1, offsetof(struct process, arity) }, + { 34, "P_ARG0", 1, offsetof(struct process, def_arg_reg[0]) }, + { 35, "P_ARG1", 1, offsetof(struct process, def_arg_reg[1]) }, + { 36, "P_ARG2", 1, offsetof(struct process, def_arg_reg[2]) }, + { 37, "P_ARG3", 1, offsetof(struct process, def_arg_reg[3]) }, + { 38, "P_ARG4", 1, offsetof(struct process, def_arg_reg[4]) }, + { 39, "P_ARG5", 1, offsetof(struct process, def_arg_reg[5]) }, + { 40, "P_NSP", 1, offsetof(struct process, hipe.nsp) }, + { 41, "P_NCALLEE", 1, offsetof(struct process, hipe.ncallee) }, + { 42, "P_CLOSURE", 1, offsetof(struct process, hipe.closure) }, + { 43, "P_NSP_LIMIT", 1, offsetof(struct process, hipe.nstack) }, + { 44, "P_CSP", +#if defined(__i386__) || defined(__x86_64__) + 1, offsetof(struct process, hipe.ncsp) +#endif + }, + { 45, "P_NRA", +#if defined(__sparc__) || defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__) + 1, offsetof(struct process, hipe.nra) +#endif + }, + { 46, "P_NARITY", 1, offsetof(struct process, hipe.narity) }, + { 47, "P_FLOAT_RESULT", +#ifdef NO_FPE_SIGNALS + 1, offsetof(struct process, hipe.float_result) +#endif + }, + { 48, "P_BIF_CALLEE", +#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) + 1, offsetof(struct process, hipe.bif_callee) +#endif + }, + { 49, "P_MSG_FIRST", 1, offsetof(struct process, msg.first) }, + { 50, "P_MSG_SAVE", 1, offsetof(struct process, msg.save) }, }; #define NR_PARAMS ARRAY_SIZE(rts_params) diff --git a/erts/emulator/hipe/hipe_x86_gc.h b/erts/emulator/hipe/hipe_x86_gc.h index aa4abb6f59..4bea9276c0 100644 --- a/erts/emulator/hipe/hipe_x86_gc.h +++ b/erts/emulator/hipe/hipe_x86_gc.h @@ -71,7 +71,7 @@ nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state) state->sdesc0[0].livebits[0] = 0; # ifdef DEBUG state->sdesc0[0].dbg_M = 0; - state->sdesc0[0].dbg_F = am_init; + state->sdesc0[0].dbg_F = am_undefined; state->sdesc0[0].dbg_A = 0; # endif /* XXX: this appears to prevent a gcc-4.1.1 bug on x86 */ diff --git a/erts/emulator/pcre/pcre.mk b/erts/emulator/pcre/pcre.mk index 352137b341..57bf5de2fb 100644 --- a/erts/emulator/pcre/pcre.mk +++ b/erts/emulator/pcre/pcre.mk @@ -49,18 +49,18 @@ PCRE_CFLAGS = $(filter-out -DDEBUG,$(CFLAGS)) -DERLANG_INTEGRATION ifeq ($(TARGET), win32) $(EPCRE_LIB): $(PCRE_OBJS) - $(AR) -out:$@ $(PCRE_OBJS) + $(V_AR) -out:$@ $(PCRE_OBJS) else $(EPCRE_LIB): $(PCRE_OBJS) - $(AR) $(ARFLAGS) $@ $(PCRE_OBJS) + $(V_AR) $(ARFLAGS) $@ $(PCRE_OBJS) -@ ($(RANLIB) $@ || true) 2>/dev/null endif $(PCRE_OBJDIR)/%.o: pcre/%.c - $(CC) -c $(PCRE_CFLAGS) -o $@ $< + $(V_CC) -c $(PCRE_CFLAGS) -o $@ $< $(PCRE_GENINC): pcre/pcre_exec.c - for x in `grep -n COST_CHK pcre/pcre_exec.c | grep -v 'COST_CHK(N)' | awk -F: '{print $$1}'`; \ + $(gen_verbose)for x in `grep -n COST_CHK pcre/pcre_exec.c | grep -v 'COST_CHK(N)' | awk -F: '{print $$1}'`; \ do \ N=`expr $$x + 100`; \ echo "case $$N: goto L_LOOP_COUNT_$${x};"; \ diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index ce014c19c2..474408ae7c 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -200,17 +200,6 @@ static void event_large_fd_error(ErlDrvPort, ErtsSysFdType, ErlDrvEventData); #endif static void steal_pending_stop_select(erts_dsprintf_buf_t*, ErlDrvPort, ErtsDrvEventState*, int mode, int on); -static ERTS_INLINE Eterm -drvport2id(ErlDrvPort dp) -{ - Port *pp = erts_drvport2port(dp); - if (pp) - return pp->id; - else { - ASSERT(0); - return am_undefined; - } -} #ifdef ERTS_SMP ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_FD_LIST) @@ -378,7 +367,7 @@ abort_task(Eterm id, ErtsPortTaskHandle *pthp, EventStateType type) || !erts_port_task_is_scheduled(pthp)); } else if (erts_port_task_is_scheduled(pthp)) { - erts_port_task_abort(id, pthp); + erts_port_task_abort(pthp); ASSERT(erts_is_port_alive(id)); } } @@ -492,7 +481,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, int on) { void (*stop_select_fn)(ErlDrvEvent, void*) = NULL; - Eterm id = drvport2id(ix); + Eterm id = erts_drvport2id(ix); ErtsSysFdType fd = (ErtsSysFdType) e; ErtsPollEvents ctl_events = (ErtsPollEvents) 0; ErtsPollEvents new_events, old_events; @@ -503,8 +492,8 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, DTRACE_CHARBUF(name, 64); #endif - ERTS_SMP_LC_ASSERT(erts_drvport2port(ix) - && erts_lc_is_port_locked(erts_drvport2port(ix))); + ERTS_SMP_LC_ASSERT(erts_drvport2port(ix, NULL) + && erts_lc_is_port_locked(erts_drvport2port(ix, NULL))); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) { @@ -530,9 +519,9 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, if (!on && (mode&ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) { if (IS_FD_UNKNOWN(state)) { /* fast track to stop_select callback */ - stop_select_fn = erts_drvport2port(ix)->drv_ptr->stop_select; + stop_select_fn = erts_drvport2port(ix, NULL)->drv_ptr->stop_select; #ifdef USE_VM_PROBES - strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1); + strncpy(name, erts_drvport2port(ix, NULL)->drv_ptr->name, sizeof(name)-1); name[sizeof(name)-1] = '\0'; #endif ret = 0; @@ -665,14 +654,14 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, } } if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) { - erts_driver_t* drv_ptr = erts_drvport2port(ix)->drv_ptr; + erts_driver_t* drv_ptr = erts_drvport2port(ix, NULL)->drv_ptr; ASSERT(new_events==0); if (state->remove_cnt == 0 || !wake_poller) { /* Safe to close fd now as it is not in pollset or there was no need to eject fd (kernel poll) */ stop_select_fn = drv_ptr->stop_select; #ifdef USE_VM_PROBES - strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1); + strncpy(name, erts_drvport2port(ix, NULL)->drv_ptr->name, sizeof(name)-1); name[sizeof(name)-1] = '\0'; #endif } @@ -719,13 +708,13 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, ErtsPollEvents events; ErtsPollEvents add_events; ErtsPollEvents remove_events; - Eterm id = drvport2id(ix); + Eterm id = erts_drvport2id(ix); ErtsDrvEventState *state; int do_wake = 0; int ret; - ERTS_SMP_LC_ASSERT(erts_drvport2port(ix) - && erts_lc_is_port_locked(erts_drvport2port(ix))); + ERTS_SMP_LC_ASSERT(erts_drvport2port(ix, NULL) + && erts_lc_is_port_locked(erts_drvport2port(ix, NULL))); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) { @@ -960,7 +949,7 @@ static void print_select_op(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix, ErtsSysFdType fd, int mode, int on) { - Port *pp = erts_drvport2port(ix); + Port *pp = erts_drvport2port(ix, NULL); erts_dsprintf(dsbufp, "driver_select(%p, %d,%s%s%s%s, %d) " "by ", @@ -971,8 +960,8 @@ print_select_op(erts_dsprintf_buf_t *dsbufp, mode & ERL_DRV_USE ? " ERL_DRV_USE" : "", mode & (ERL_DRV_USE_NO_CALLBACK & ~ERL_DRV_USE) ? "_NO_CALLBACK" : "", on); - print_driver_name(dsbufp, pp->id); - erts_dsprintf(dsbufp, "driver %T ", pp ? pp->id : NIL); + print_driver_name(dsbufp, pp->common.id); + erts_dsprintf(dsbufp, "driver %T ", pp ? pp->common.id : NIL); } static void @@ -1031,7 +1020,7 @@ steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix, state->driver.drv_ptr = NULL; } else if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) { - erts_driver_t* drv_ptr = erts_drvport2port(ix)->drv_ptr; + erts_driver_t* drv_ptr = erts_drvport2port(ix, NULL)->drv_ptr; if (drv_ptr != state->driver.drv_ptr) { /* Some other driver wants the stop_select callback */ if (state->driver.drv_ptr->handle) { @@ -1053,7 +1042,7 @@ static void print_event_op(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data) { - Port *pp = erts_drvport2port(ix); + Port *pp = erts_drvport2port(ix, NULL); erts_dsprintf(dsbufp, "driver_event(%p, %d, ", ix, (int) fd); if (!event_data) erts_dsprintf(dsbufp, "NULL"); @@ -1062,8 +1051,8 @@ print_event_op(erts_dsprintf_buf_t *dsbufp, (unsigned int) event_data->events, (unsigned int) event_data->revents); erts_dsprintf(dsbufp, ") by "); - print_driver_name(dsbufp, pp->id); - erts_dsprintf(dsbufp, "driver %T ", pp ? pp->id : NIL); + print_driver_name(dsbufp, pp->common.id); + erts_dsprintf(dsbufp, "driver %T ", pp ? pp->common.id : NIL); } static void @@ -1100,8 +1089,7 @@ iready(Eterm id, ErtsDrvEventState *state) if (erts_port_task_schedule(id, &state->driver.select->intask, ERTS_PORT_TASK_INPUT, - (ErlDrvEvent) state->fd, - NULL) != 0) { + (ErlDrvEvent) state->fd) != 0) { stale_drv_select(id, state, ERL_DRV_READ); } } @@ -1112,8 +1100,7 @@ oready(Eterm id, ErtsDrvEventState *state) if (erts_port_task_schedule(id, &state->driver.select->outtask, ERTS_PORT_TASK_OUTPUT, - (ErlDrvEvent) state->fd, - NULL) != 0) { + (ErlDrvEvent) state->fd) != 0) { stale_drv_select(id, state, ERL_DRV_WRITE); } } diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index db2854fa40..94f9f76a20 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -57,26 +57,48 @@ /* Implement some other way to get the real page size if needed! */ #endif -#define MAX_CACHE_SIZE 30 - #undef MIN #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #undef MAX #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#undef PAGE_MASK -#define INV_PAGE_MASK ((Uint) (page_size - 1)) -#define PAGE_MASK (~INV_PAGE_MASK) -#define PAGE_FLOOR(X) ((X) & PAGE_MASK) -#define PAGE_CEILING(X) PAGE_FLOOR((X) + INV_PAGE_MASK) -#define PAGES(X) ((X) >> page_shift) +#define INV_ALIGNED_MASK ((UWord) ((MSEG_ALIGNED_SIZE) - 1)) +#define ALIGNED_MASK (~INV_ALIGNED_MASK) +#define ALIGNED_FLOOR(X) (((UWord)(X)) & ALIGNED_MASK) +#define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK) +#define MAP_IS_ALIGNED(X) (((UWord)(X) & (MSEG_ALIGNED_SIZE - 1)) == 0) + +#define IS_2POW(X) ((X) && !((X) & ((X) - 1))) +static ERTS_INLINE Uint ceil_2pow(Uint x) { + int i = 1 << (4 + (sizeof(Uint) != 4 ? 1 : 0)); + x--; + do { x |= x >> i; } while(i >>= 1); + return x + 1; +} +static const int debruijn[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 +}; + +#define LOG2(X) (debruijn[((Uint32)(((X) & -(X)) * 0x077CB531U)) >> 27]) + +#define CACHE_AREAS (32 - MSEG_ALIGN_BITS) + +#define SIZE_TO_CACHE_AREA_IDX(S) (LOG2((S)) - MSEG_ALIGN_BITS) +#define MAX_CACHE_SIZE (30) + +#define MSEG_FLG_IS_2POW(X) ((X) & ERTS_MSEG_FLG_2POW) + +#ifdef DEBUG +#define DBG(F,...) fprintf(stderr, (F), __VA_ARGS__ ) +#else +#define DBG(F,...) do{}while(0) +#endif static int atoms_initialized; typedef struct mem_kind_t MemKind; -static void mseg_clear_cache(MemKind*); - #if HALFWORD_HEAP static int initialize_pmmap(void); static void *pmmap(size_t size); @@ -116,15 +138,6 @@ static int mmap_fd; #error "Not supported" #endif /* #if HAVE_MMAP */ -#if defined(ERTS_MSEG_FAKE_SEGMENTS) && HALFWORD_HEAP -# warning "ERTS_MSEG_FAKE_SEGMENTS will only be used for high memory segments" -#endif - -#if defined(ERTS_MSEG_FAKE_SEGMENTS) -#undef CAN_PARTLY_DESTROY -#define CAN_PARTLY_DESTROY 0 -#endif - const ErtsMsegOpt_t erts_mseg_default_opt = { 1, /* Use cache */ 1, /* Preserv data */ @@ -137,26 +150,17 @@ const ErtsMsegOpt_t erts_mseg_default_opt = { }; -typedef struct cache_desc_t_ { - void *seg; - Uint size; - struct cache_desc_t_ *next; - struct cache_desc_t_ *prev; -} cache_desc_t; - typedef struct { Uint32 giga_no; Uint32 no; } CallCounter; -static Uint page_size; -static Uint page_shift; - typedef struct { CallCounter alloc; CallCounter dealloc; CallCounter realloc; CallCounter create; + CallCounter create_resize; CallCounter destroy; #if HAVE_MSEG_RECREATE CallCounter recreate; @@ -165,17 +169,25 @@ typedef struct { CallCounter check_cache; } ErtsMsegCalls; +typedef struct cache_t_ cache_t; + +struct cache_t_ { + Uint size; + void *seg; + cache_t *next; +}; + + typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t; struct mem_kind_t { - cache_desc_t cache_descs[MAX_CACHE_SIZE]; - cache_desc_t *free_cache_descs; - cache_desc_t *cache; - cache_desc_t *cache_end; - - Uint cache_size; - Uint min_cached_seg_size; - Uint max_cached_seg_size; + + cache_t cache[MAX_CACHE_SIZE]; + cache_t *cache_unpowered; + cache_t *cache_area[CACHE_AREAS]; + cache_t *cache_free; + + Sint cache_size; Uint cache_hits; struct { @@ -320,8 +332,7 @@ static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */ static ERTS_INLINE void -schedule_cache_check(ErtsMsegAllctr_t *ma) -{ +schedule_cache_check(ErtsMsegAllctr_t *ma) { if (!ma->is_cache_check_scheduled && ma->is_init_done) { erts_set_aux_work_timeout(ma->ix, @@ -331,12 +342,45 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) } } +/* remove ErtsMsegAllctr_t from arguments? + * only used for statistics + */ +static ERTS_INLINE void * +mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, int fd, off_t offset) { + + void *p, *q; + UWord d; + + p = mmap(addr, length, prot, flags, fd, offset); + + if (MAP_IS_ALIGNED(p) || p == MAP_FAILED) + return p; + + if (ma) + INC_CC(ma, create_resize); + + munmap(p, length); + + if ((p = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED) + return MAP_FAILED; + + q = (void *)ALIGNED_CEILING(p); + d = q - p; + + if (d > 0) + munmap(p, d); + + if (MSEG_ALIGNED_SIZE - d > 0) + munmap((void *) (q + length), MSEG_ALIGNED_SIZE - d); + + return q; +} + static ERTS_INLINE void * mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) { void *seg; - - ASSERT(size % page_size == 0); + ASSERT(size % MSEG_ALIGNED_SIZE == 0); #if HALFWORD_HEAP if (mk == &ma->low_mem) { @@ -345,18 +389,17 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg); return NULL; } - } - else + } else #endif { -#if defined(ERTS_MSEG_FAKE_SEGMENTS) - seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size); -#elif HAVE_MMAP +#if HAVE_MMAP { - seg = (void *) mmap((void *) 0, (size_t) size, + seg = (void *) mmap_align(ma, (void *) 0, (size_t) size, MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0); if (seg == (void *) MAP_FAILED) seg = NULL; + + ASSERT(MAP_IS_ALIGNED(seg) || !seg); } #else # error "Missing mseg_create() implementation" @@ -369,38 +412,24 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) } static ERTS_INLINE void -mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) -{ -#ifdef DEBUG - int res; -#endif +mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { + ERTS_DECLARE_DUMMY(int res); #if HALFWORD_HEAP if (mk == &ma->low_mem) { -#ifdef DEBUG - res = -#endif - pmunmap((void *) seg, size); + res = pmunmap((void *) seg, size); } else #endif { -#ifdef ERTS_MSEG_FAKE_SEGMENTS - erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg); -#ifdef DEBUG - res = 0; -#endif -#elif HAVE_MMAP -#ifdef DEBUG - res = -#endif - munmap((void *) seg, size); +#ifdef HAVE_MMAP + res = munmap((void *) seg, size); #else # error "Missing mseg_destroy() implementation" #endif } - ASSERT(size % page_size == 0); + ASSERT(size % MSEG_ALIGNED_SIZE == 0); ASSERT(res == 0); INC_CC(ma, destroy); @@ -408,14 +437,36 @@ mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) } #if HAVE_MSEG_RECREATE +#if defined(__NetBsd__) +#define MREMAP_FLAGS (0) +#else +#define MREMAP_FLAGS (MREMAP_MAYMOVE) +#endif + + +/* mseg_recreate + * May return *unaligned* segments as in address not aligned to MSEG_ALIGNMENT + * it is still page aligned + * + * This is fine for single block carriers as long as we don't cache misaligned + * segments (since multiblock carriers may use them) + * + * For multiblock carriers we *need* MSEG_ALIGNMENT but mbc's will never be + * reallocated. + * + * This should probably be fixed the following way: + * 1) Use an option to segment allocation - NEED_ALIGNMENT + * 2) Add mremap_align which takes care of aligning a new a mremaped area + * 3) Fix the cache to handle of aligned and unaligned segments + */ static ERTS_INLINE void * mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size) { void *new_seg; - ASSERT(old_size % page_size == 0); - ASSERT(new_size % page_size == 0); + ASSERT(old_size % MSEG_ALIGNED_SIZE == 0); + ASSERT(new_size % MSEG_ALIGNED_SIZE == 0); #if HALFWORD_HEAP if (mk == &ma->low_mem) { @@ -426,22 +477,12 @@ mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, U else #endif { -#if defined(ERTS_MSEG_FAKE_SEGMENTS) - new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size); -#elif HAVE_MREMAP - - #if defined(__NetBSD__) - new_seg = (void *) mremap((void *) old_seg, - (size_t) old_size, - NULL, - (size_t) new_size, - 0); - #else - new_seg = (void *) mremap((void *) old_seg, - (size_t) old_size, - (size_t) new_size, - MREMAP_MAYMOVE); - #endif +#if HAVE_MREMAP +#if defined(__NetBSD__) + new_seg = mremap(old_seg, (size_t)old_size, NULL, new_size, MREMAP_FLAGS); +#else + new_seg = mremap(old_seg, (size_t)old_size, (size_t)new_size, MREMAP_FLAGS); +#endif if (new_seg == (void *) MAP_FAILED) new_seg = NULL; #else @@ -475,151 +516,265 @@ do { \ #define ERTS_DBG_MK_CHK_THR_ACCESS(MK) #endif -static ERTS_INLINE cache_desc_t * -alloc_cd(MemKind* mk) -{ - cache_desc_t *cd = mk->free_cache_descs; +/* NEW CACHE interface */ + +static ERTS_INLINE cache_t *mseg_cache_alloc_descriptor(MemKind *mk) { + cache_t *c = mk->cache_free; + ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (cd) - mk->free_cache_descs = cd->next; - return cd; + if (c) + mk->cache_free = c->next; + + return c; } -static ERTS_INLINE void -free_cd(MemKind* mk, cache_desc_t *cd) -{ +static ERTS_INLINE void mseg_cache_free_descriptor(MemKind *mk, cache_t *c) { ERTS_DBG_MK_CHK_THR_ACCESS(mk); - cd->next = mk->free_cache_descs; - mk->free_cache_descs = cd; + ASSERT(c); + + c->seg = NULL; + c->size = 0; + c->next = mk->cache_free; + mk->cache_free = c; } +static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) { -static ERTS_INLINE void -link_cd(MemKind* mk, cache_desc_t *cd) -{ + cache_t *c; ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (mk->cache) - mk->cache->prev = cd; - cd->next = mk->cache; - cd->prev = NULL; - mk->cache = cd; - - if (!mk->cache_end) { - ASSERT(!cd->next); - mk->cache_end = cd; + + if (mk->cache_free && MAP_IS_ALIGNED(seg)) { + if (IS_2POW(size)) { + int ix = SIZE_TO_CACHE_AREA_IDX(size); + + ASSERT(ix < CACHE_AREAS); + ASSERT((1 << (ix + MSEG_ALIGN_BITS)) == size); + + /* unlink from free cache list */ + c = mseg_cache_alloc_descriptor(mk); + + /* link to cache area */ + c->seg = seg; + c->size = size; + c->next = mk->cache_area[ix]; + + mk->cache_area[ix] = c; + mk->cache_size++; + + ASSERT(mk->cache_size <= mk->ma->max_cache_size); + + return 1; + } else { + /* unlink from free cache list */ + c = mseg_cache_alloc_descriptor(mk); + + /* link to cache area */ + c->seg = seg; + c->size = size; + c->next = mk->cache_unpowered; + + mk->cache_unpowered = c; + mk->cache_size++; + + ASSERT(mk->cache_size <= mk->ma->max_cache_size); + + return 1; + } } - mk->cache_size++; + return 0; } -#if CAN_PARTLY_DESTROY -static ERTS_INLINE void -end_link_cd(MemKind* mk, cache_desc_t *cd) -{ +static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p) { + + Uint size = *size_p; + ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (mk->cache_end) - mk->cache_end->next = cd; - cd->next = NULL; - cd->prev = mk->cache_end; - mk->cache_end = cd; - - if (!mk->cache) { - ASSERT(!cd->prev); - mk->cache = cd; - } + if (IS_2POW(size)) { + + int i, ix = SIZE_TO_CACHE_AREA_IDX(size); + void *seg; + cache_t *c; + Uint csize; + + for( i = ix; i < CACHE_AREAS; i++) { + + if ((c = mk->cache_area[i]) == NULL) + continue; + + ASSERT(IS_2POW(c->size)); + + /* unlink from cache area */ + csize = c->size; + seg = c->seg; + mk->cache_area[i] = c->next; + c->next = NULL; + mk->cache_size--; + mk->cache_hits++; + + /* link to free cache list */ + mseg_cache_free_descriptor(mk, c); + + ASSERT(!(mk->cache_size < 0)); + + /* divvy up the cache - if needed */ + while( i > ix) { + csize = csize >> 1; + /* try to cache half of it */ + if (!cache_bless_segment(mk, (char *)seg + csize, csize)) { + /* wouldn't cache .. destroy it instead */ + mseg_destroy(mk->ma, mk, (char *)seg + csize, csize); + } + i--; + } + ASSERT(csize == size); + return seg; + } + } + else if (mk->cache_unpowered) { + void *seg; + cache_t *c, *pc; + Uint csize; + Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit; + Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit; + + c = mk->cache_unpowered; + pc = c; + + while (c) { + csize = c->size; + if (csize >= size && + ((csize - size)*100 < bad_max_rel*size) && + (csize - size) < bad_max_abs ) { + + /* unlink from cache area */ + seg = c->seg; + + if (pc == c) { + mk->cache_unpowered = c->next; + } else { + pc->next = c->next; + } + + c->next = NULL; + mk->cache_size--; + mk->cache_hits++; + + /* link to free cache list */ + mseg_cache_free_descriptor(mk, c); + *size_p = csize; + + return seg; + } - mk->cache_size++; + pc = c; + c = c->next; + } + } + return NULL; } -#endif -static ERTS_INLINE void -unlink_cd(MemKind* mk, cache_desc_t *cd) -{ - ERTS_DBG_MK_CHK_THR_ACCESS(mk); - if (cd->next) - cd->next->prev = cd->prev; - else - mk->cache_end = cd->prev; - - if (cd->prev) - cd->prev->next = cd->next; - else - mk->cache = cd->next; - ASSERT(mk->cache_size > 0); +/* *_mseg_check_*_cache + * Slowly remove segments cached in the allocator by + * using callbacks from aux-work in the scheduler. + */ + +static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t **head) { + cache_t *c = NULL; + + c = *head; + + ASSERT( c != NULL ); + + *head = c->next; + + if (erts_mtrace_enabled) + erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); + + mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_cache_free_descriptor(mk, c); + + mk->segments.current.watermark--; mk->cache_size--; -} -static ERTS_INLINE void -check_cache_limits(MemKind* mk) -{ - cache_desc_t *cd; - ERTS_DBG_MK_CHK_THR_ACCESS(mk); - mk->max_cached_seg_size = 0; - mk->min_cached_seg_size = ~((Uint) 0); - for (cd = mk->cache; cd; cd = cd->next) { - if (cd->size < mk->min_cached_seg_size) - mk->min_cached_seg_size = cd->size; - if (cd->size > mk->max_cached_seg_size) - mk->max_cached_seg_size = cd->size; - } + ASSERT( mk->cache_size >= 0 ); + + return mk->cache_size; } -static ERTS_INLINE void -adjust_cache_size(MemKind* mk, int force_check_limits) -{ - cache_desc_t *cd; - int check_limits = force_check_limits; - Sint max_cached = ((Sint) mk->segments.current.watermark - - (Sint) mk->segments.current.no); - ERTS_DBG_MK_CHK_THR_ACCESS(mk); - while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) { - ASSERT(mk->cache_end); - cd = mk->cache_end; - if (!check_limits && - !(mk->min_cached_seg_size < cd->size - && cd->size < mk->max_cached_seg_size)) { - check_limits = 1; - } +static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t **head) { + cache_t *c = NULL, *next = NULL; + + c = *head; + ASSERT( c != NULL ); + + while (c) { + + next = c->next; + if (erts_mtrace_enabled) - erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg); - mseg_destroy(mk->ma, mk, cd->seg, cd->size); - unlink_cd(mk,cd); - free_cd(mk,cd); - } + erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); - if (check_limits) - check_cache_limits(mk); -} + mseg_destroy(mk->ma, mk, c->seg, c->size); + mseg_cache_free_descriptor(mk, c); -static Uint -check_one_cache(MemKind* mk) -{ - if (mk->segments.current.watermark > mk->segments.current.no) mk->segments.current.watermark--; - adjust_cache_size(mk, 0); + mk->cache_size--; + + c = next; + } + + *head = NULL; + + ASSERT( mk->cache_size >= 0 ); - if (mk->cache_size) - schedule_cache_check(mk->ma); return mk->cache_size; } -static void do_cache_check(ErtsMsegAllctr_t *ma) -{ - int empty_cache = 1; +/* mseg_check_memkind_cache + * - Check if we can empty some cached segments in this + * MemKind. + */ + + +static Uint mseg_check_memkind_cache(MemKind *mk) { + int i; + + ERTS_DBG_MK_CHK_THR_ACCESS(mk); + + for (i = 0; i < CACHE_AREAS; i++) { + if (mk->cache_area[i] != NULL) + return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_area[i])); + } + + if (mk->cache_unpowered) + return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered)); + + return 0; +} + +/* mseg_cache_check + * - Check if we have some cache we can purge + * in any of the memkinds. + */ + +static void mseg_cache_check(ErtsMsegAllctr_t *ma) { MemKind* mk; + Uint empty_cache = 1; ERTS_MSEG_LOCK(ma); - for (mk=ma->mk_list; mk; mk=mk->next) { - if (check_one_cache(mk)) + for (mk = ma->mk_list; mk; mk = mk->next) { + if (mseg_check_memkind_cache(mk)) empty_cache = 0; } + /* If all MemKinds caches are empty, + * remove aux-work callback + */ if (empty_cache) { ma->is_cache_check_scheduled = 0; - erts_set_aux_work_timeout(ma->ix, - ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK, - 0); + erts_set_aux_work_timeout(ma->ix, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK, 0); } INC_CC(ma, check_cache); @@ -627,27 +782,65 @@ static void do_cache_check(ErtsMsegAllctr_t *ma) ERTS_MSEG_UNLOCK(ma); } -void erts_mseg_cache_check(void) -{ - do_cache_check(ERTS_MSEG_ALLCTR_SS()); +/* erts_mseg_cache_check + * - This is a callback that is scheduled as aux-work from + * schedulers and is called at some interval if we have a cache + * on this mseg-allocator and memkind. + * - Purpose: Empty cache slowly so we don't collect mapped areas + * and bloat memory. + */ + +void erts_mseg_cache_check(void) { + mseg_cache_check(ERTS_MSEG_ALLCTR_SS()); } -static void -mseg_clear_cache(MemKind* mk) -{ - mk->segments.current.watermark = 0; - adjust_cache_size(mk, 1); +/* *_mseg_clear_*_cache + * Remove cached segments from the allocator completely + */ + +static void mseg_clear_memkind_cache(MemKind *mk) { + int i; + + /* drop pow2 caches */ + for (i = 0; i < CACHE_AREAS; i++) { + if (mk->cache_area[i] == NULL) + continue; + + mseg_drop_memkind_cache_size(mk, &(mk->cache_area[i])); + ASSERT(mk->cache_area[i] == NULL); + } + /* drop varied caches */ + if(mk->cache_unpowered) + mseg_drop_memkind_cache_size(mk, &(mk->cache_unpowered)); + + ASSERT(mk->cache_unpowered == NULL); + ASSERT(mk->cache_size == 0); +} + +static void mseg_clear_cache(ErtsMsegAllctr_t *ma) { + MemKind* mk; + + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + - ASSERT(!mk->cache); - ASSERT(!mk->cache_end); - ASSERT(!mk->cache_size); + for (mk = ma->mk_list; mk; mk = mk->next) { + mseg_clear_memkind_cache(mk); + } - mk->segments.current.watermark = mk->segments.current.no; + INC_CC(ma, clear_cache); - INC_CC(mk->ma, clear_cache); + ERTS_MSEG_UNLOCK(ma); } +void erts_mseg_clear_cache(void) { + mseg_clear_cache(ERTS_MSEG_ALLCTR_SS()); + mseg_clear_cache(ERTS_MSEG_ALLCTR_IX(0)); +} + + + static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, const ErtsMsegOpt_t *opt) { @@ -660,116 +853,40 @@ static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, static void * mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p, - const ErtsMsegOpt_t *opt) + Uint flags, const ErtsMsegOpt_t *opt) { - Uint max, min, diff_size, size; - cache_desc_t *cd, *cand_cd; + Uint size; void *seg; MemKind* mk = memkind(ma, opt); INC_CC(ma, alloc); - size = PAGE_CEILING(*size_p); + /* Carrier align */ + size = ALIGNED_CEILING(*size_p); + + /* Cache optim (if applicable) */ + if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(size)) + size = ceil_2pow(size); #if CAN_PARTLY_DESTROY if (size < ma->min_seg_size) ma->min_seg_size = size; #endif + + if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size)) != NULL) + goto done; - if (!opt->cache) { - create_seg: - adjust_cache_size(mk,0); - seg = mseg_create(ma, mk, size); - if (!seg) { - mseg_clear_cache(mk); - seg = mseg_create(ma, mk, size); - if (!seg) - size = 0; - } - - *size_p = size; - if (seg) { - if (erts_mtrace_enabled) - erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size); - ERTS_MSEG_ALLOC_STAT(mk,size); - } - return seg; - } - - if (size > mk->max_cached_seg_size) - goto create_seg; - - if (size < mk->min_cached_seg_size) { - - diff_size = mk->min_cached_seg_size - size; - - if (diff_size > ma->abs_max_cache_bad_fit) - goto create_seg; - - if (100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) - goto create_seg; - - } - - max = 0; - min = ~((Uint) 0); - cand_cd = NULL; - - for (cd = mk->cache; cd; cd = cd->next) { - if (cd->size >= size) { - if (!cand_cd) { - cand_cd = cd; - continue; - } - else if (cd->size < cand_cd->size) { - if (max < cand_cd->size) - max = cand_cd->size; - if (min > cand_cd->size) - min = cand_cd->size; - cand_cd = cd; - continue; - } - } - if (max < cd->size) - max = cd->size; - if (min > cd->size) - min = cd->size; - } - - mk->min_cached_seg_size = min; - mk->max_cached_seg_size = max; - - if (!cand_cd) - goto create_seg; - - diff_size = cand_cd->size - size; - - if (diff_size > ma->abs_max_cache_bad_fit - || 100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) { - if (mk->max_cached_seg_size < cand_cd->size) - mk->max_cached_seg_size = cand_cd->size; - if (mk->min_cached_seg_size > cand_cd->size) - mk->min_cached_seg_size = cand_cd->size; - goto create_seg; - } - - mk->cache_hits++; - - size = cand_cd->size; - seg = cand_cd->seg; - - unlink_cd(mk,cand_cd); - free_cd(mk,cand_cd); + if ((seg = mseg_create(ma, mk, size)) == NULL) + size = 0; +done: *size_p = size; + if (seg) { + if (erts_mtrace_enabled) + erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size); - if (erts_mtrace_enabled) { - erts_mtrace_crr_free(SEGTYPE, SEGTYPE, seg); - erts_mtrace_crr_alloc(seg, atype, SEGTYPE, size); - } - - if (seg) ERTS_MSEG_ALLOC_STAT(mk,size); + } return seg; } @@ -780,73 +897,42 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size, const ErtsMsegOpt_t *opt) { MemKind* mk = memkind(ma, opt); - cache_desc_t *cd; + ERTS_MSEG_DEALLOC_STAT(mk,size); - if (!opt->cache || ma->max_cache_size == 0) { - if (erts_mtrace_enabled) - erts_mtrace_crr_free(atype, SEGTYPE, seg); - mseg_destroy(ma, mk, seg, size); + if (opt->cache && cache_bless_segment(mk, seg, size)) { + schedule_cache_check(ma); + goto done; } - else { - int check_limits = 0; - - if (size < mk->min_cached_seg_size) - mk->min_cached_seg_size = size; - if (size > mk->max_cached_seg_size) - mk->max_cached_seg_size = size; - - if (!mk->free_cache_descs) { - cd = mk->cache_end; - if (!(mk->min_cached_seg_size < cd->size - && cd->size < mk->max_cached_seg_size)) { - check_limits = 1; - } - if (erts_mtrace_enabled) - erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg); - mseg_destroy(ma, mk, cd->seg, cd->size); - unlink_cd(mk,cd); - free_cd(mk,cd); - } - cd = alloc_cd(mk); - ASSERT(cd); - cd->seg = seg; - cd->size = size; - link_cd(mk,cd); + if (erts_mtrace_enabled) + erts_mtrace_crr_free(atype, SEGTYPE, seg); - if (erts_mtrace_enabled) { - erts_mtrace_crr_free(atype, SEGTYPE, seg); - erts_mtrace_crr_alloc(seg, SEGTYPE, SEGTYPE, size); - } - - /* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */ - - if (check_limits) - check_cache_limits(mk); + mseg_destroy(ma, mk, seg, size); - schedule_cache_check(ma); - - } +done: INC_CC(ma, dealloc); } static void * mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt) + Uint old_size, Uint *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) { MemKind* mk; void *new_seg; Uint new_size; + /* Just allocate a new segment if we didn't have one before */ if (!seg || !old_size) { - new_seg = mseg_alloc(ma, atype, new_size_p, opt); + new_seg = mseg_alloc(ma, atype, new_size_p, flags, opt); DEC_CC(ma, alloc); return new_seg; } + + /* Dealloc old segment if new segment is of size 0 */ if (!(*new_size_p)) { mseg_dealloc(ma, atype, seg, old_size, opt); DEC_CC(ma, dealloc); @@ -855,7 +941,13 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, mk = memkind(ma, opt); new_seg = seg; - new_size = PAGE_CEILING(*new_size_p); + + /* Carrier align */ + new_size = ALIGNED_CEILING(*new_size_p); + + /* Cache optim (if applicable) */ + if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(new_size)) + new_size = ceil_2pow(new_size); if (new_size == old_size) ; @@ -866,53 +958,27 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, if (new_size < ma->min_seg_size) ma->min_seg_size = new_size; #endif - + /* +M<S>rsbcst <ratio> */ if (shrink_sz < opt->abs_shrink_th - && 100*PAGES(shrink_sz) < opt->rel_shrink_th*PAGES(old_size)) { + && 100*shrink_sz < opt->rel_shrink_th*old_size) { new_size = old_size; } else { #if CAN_PARTLY_DESTROY - if (shrink_sz > ma->min_seg_size - && mk->free_cache_descs - && opt->cache) { - cache_desc_t *cd; - - cd = alloc_cd(mk); - ASSERT(cd); - cd->seg = ((char *) seg) + new_size; - cd->size = shrink_sz; - end_link_cd(mk,cd); - - if (erts_mtrace_enabled) { - erts_mtrace_crr_realloc(new_seg, - atype, - SEGTYPE, - seg, - new_size); - erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size); - } - schedule_cache_check(ma); - } - else { - if (erts_mtrace_enabled) - erts_mtrace_crr_realloc(new_seg, - atype, - SEGTYPE, - seg, - new_size); - mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz); - } + if (erts_mtrace_enabled) + erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); -#elif HAVE_MSEG_RECREATE + mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz); +#elif HAVE_MSEG_RECREATE goto do_recreate; - #else + new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); + + ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); - new_seg = mseg_alloc(ma, atype, &new_size, opt); if (!new_seg) new_size = old_size; else { @@ -921,16 +987,15 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, MIN(new_size, old_size)); mseg_dealloc(ma, atype, seg, old_size, opt); } - #endif - } } else { if (!opt->preserv) { mseg_dealloc(ma, atype, seg, old_size, opt); - new_seg = mseg_alloc(ma, atype, &new_size, opt); + new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); + ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); } else { #if HAVE_MSEG_RECREATE @@ -938,18 +1003,23 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, do_recreate: #endif new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size); + /* ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); + * will not always be aligned and it ok for now + */ + if (erts_mtrace_enabled) erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); if (!new_seg) new_size = old_size; #else - new_seg = mseg_alloc(ma, atype, &new_size, opt); + new_seg = mseg_alloc(ma, atype, &new_size, flags, opt); + + ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg); + if (!new_seg) new_size = old_size; else { - sys_memcpy(((char *) new_seg), - ((char *) seg), - MIN(new_size, old_size)); + sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size)); mseg_dealloc(ma, atype, seg, old_size, opt); } #endif @@ -958,6 +1028,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, INC_CC(ma, realloc); + ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size)); *new_size_p = new_size; ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size); @@ -990,6 +1061,7 @@ static struct { Eterm mseg_dealloc; Eterm mseg_realloc; Eterm mseg_create; + Eterm mseg_create_resize; Eterm mseg_destroy; #if HAVE_MSEG_RECREATE Eterm mseg_recreate; @@ -1046,6 +1118,7 @@ init_atoms(ErtsMsegAllctr_t *ma) AM_INIT(mseg_dealloc); AM_INIT(mseg_realloc); AM_INIT(mseg_create); + AM_INIT(mseg_create_resize); AM_INIT(mseg_destroy); #if HAVE_MSEG_RECREATE AM_INIT(mseg_recreate); @@ -1065,14 +1138,12 @@ init_atoms(ErtsMsegAllctr_t *ma) erts_mtx_unlock(&init_atoms_mutex); } - #define bld_uint erts_bld_uint #define bld_cons erts_bld_cons #define bld_tuple erts_bld_tuple #define bld_string erts_bld_string #define bld_2tup_list erts_bld_2tup_list - /* * bld_unstable_uint() (instead of bld_uint()) is used when values may * change between size check and actual build. This because a value @@ -1116,6 +1187,7 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp, *lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp); } + static Eterm info_options(ErtsMsegAllctr_t *ma, char *prefix, @@ -1176,6 +1248,7 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp PRINT_CC(to, arg, dealloc); PRINT_CC(to, arg, realloc); PRINT_CC(to, arg, create); + PRINT_CC(to, arg, create_resize); PRINT_CC(to, arg, destroy); #if HAVE_MSEG_RECREATE PRINT_CC(to, arg, recreate); @@ -1215,6 +1288,10 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp bld_unstable_uint(hpp, szp, ma->calls.create.giga_no), bld_unstable_uint(hpp, szp, ma->calls.create.no)); + add_3tup(hpp, szp, &res, + am.mseg_create_resize, + bld_unstable_uint(hpp, szp, ma->calls.create_resize.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.create_resize.no)); add_3tup(hpp, szp, &res, am.mseg_realloc, @@ -1401,21 +1478,21 @@ erts_mseg_info(int ix, } void * -erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) +erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *seg; ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); - seg = mseg_alloc(ma, atype, size_p, opt); + seg = mseg_alloc(ma, atype, size_p, flags, opt); ERTS_MSEG_UNLOCK(ma); return seg; } void * -erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p) +erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p, Uint flags) { - return erts_mseg_alloc_opt(atype, size_p, &erts_mseg_default_opt); + return erts_mseg_alloc_opt(atype, size_p, flags, &erts_mseg_default_opt); } void @@ -1438,44 +1515,24 @@ erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size) void * erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, + Uint flags, const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *new_seg; ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); - new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt); + new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, flags, opt); ERTS_MSEG_UNLOCK(ma); return new_seg; } void * erts_mseg_realloc(ErtsAlcType_t atype, void *seg, - Uint old_size, Uint *new_size_p) + Uint old_size, Uint *new_size_p, Uint flags) { return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, - &erts_mseg_default_opt); -} - -void -erts_mseg_clear_cache(void) -{ - ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS(); - MemKind* mk; - -start: - - ERTS_MSEG_LOCK(ma); - ERTS_DBG_MA_CHK_THR_ACCESS(ma); - for (mk=ma->mk_list; mk; mk=mk->next) { - mseg_clear_cache(mk); - } - ERTS_MSEG_UNLOCK(ma); - - if (ma->ix != 0) { - ma = ERTS_MSEG_ALLCTR_IX(0); - goto start; - } + flags, &erts_mseg_default_opt); } Uint @@ -1496,28 +1553,32 @@ erts_mseg_no(const ErtsMsegOpt_t *opt) Uint erts_mseg_unit_size(void) { - return page_size; + return MSEG_ALIGNED_SIZE; } static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) { - unsigned i; + int i; - mk->cache = NULL; - mk->cache_end = NULL; - mk->max_cached_seg_size = 0; - mk->min_cached_seg_size = ~((Uint) 0); - mk->cache_size = 0; - mk->cache_hits = 0; + for (i = 0; i < CACHE_AREAS; i++) { + mk->cache_area[i] = NULL; + } + + mk->cache_free = NULL; - if (ma->max_cache_size > 0) { - for (i = 0; i < ma->max_cache_size - 1; i++) - mk->cache_descs[i].next = &mk->cache_descs[i + 1]; - mk->cache_descs[ma->max_cache_size - 1].next = NULL; - mk->free_cache_descs = &mk->cache_descs[0]; + ASSERT(ma->max_cache_size <= MAX_CACHE_SIZE); + + for (i = 0; i < ma->max_cache_size; i++) { + mk->cache[i].seg = NULL; + mk->cache[i].size = 0; + mk->cache[i].next = mk->cache_free; + mk->cache_free = &(mk->cache[i]); } - else - mk->free_cache_descs = NULL; + + mk->cache_unpowered = NULL; + + mk->cache_size = 0; + mk->cache_hits = 0; mk->segments.current.watermark = 0; mk->segments.current.no = 0; @@ -1570,15 +1631,10 @@ erts_mseg_init(ErtsMsegInit_t *init) initialize_pmmap(); #endif - page_size = GET_PAGE_SIZE; + if (!IS_2POW(GET_PAGE_SIZE)) + erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE); - page_shift = 1; - while ((page_size >> page_shift) != 1) { - if ((page_size & (1 << (page_shift - 1))) != 0) - erl_exit(ERTS_ABORT_EXIT, - "erts_mseg: Unexpected page_size %beu\n", page_size); - page_shift++; - } + ASSERT((MSEG_ALIGNED_SIZE % GET_PAGE_SIZE) == 0); for (i = 0; i < no_mseg_allocators; i++) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i); @@ -1663,7 +1719,7 @@ erts_mseg_test(unsigned long op, case 0x400: /* Have erts_mseg */ return (unsigned long) 1; case 0x401: - return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1); + return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1, (Uint) 0); case 0x402: erts_mseg_dealloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2); return (unsigned long) 0; @@ -1671,7 +1727,8 @@ erts_mseg_test(unsigned long op, return (unsigned long) erts_mseg_realloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2, - (Uint *) a3); + (Uint *) a3, + (Uint) 0); case 0x404: erts_mseg_clear_cache(); return (unsigned long) 0; @@ -1707,7 +1764,40 @@ erts_mseg_test(unsigned long op, * mapping tricks. */ -/*#define HARDDEBUG 1*/ +/* #define HARDDEBUG 1 */ + +#ifdef HARDDEBUG +static void dump_freelist(void) +{ + FreeBlock *p = first; + + while (p) { + fprintf(stderr, "p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n", + (void *) p, (unsigned long) p->num, (void *) p->next); + p = p->next; + } +} + +#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) \ + fprintf(stderr,"Mapping of address %p with size %ld " \ + "does not map complete pages (%s:%d)\r\n", \ + (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) + +#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) \ + fprintf(stderr,"Mapping of address %p with size %ld " \ + "is not page aligned (%s:%d)\r\n", \ + (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) + +#define HARDDEBUG_MAP_FAILED(PTR, SZ) \ + fprintf(stderr, "Could not actually map memory " \ + "at address %p with size %ld (%s:%d) ..\r\n", \ + (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__) +#else +#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) do{}while(0) +#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) do{}while(0) +#define HARDDEBUG_MAP_FAILED(PTR, SZ) do{}while(0) +#endif + #ifdef __APPLE__ #define MAP_ANONYMOUS MAP_ANON @@ -1726,49 +1816,20 @@ typedef struct _free_block { struct _free_block *next; } FreeBlock; -/* Assigned once and for all */ -static size_t pagsz; - /* Protect with lock */ static FreeBlock *first; -static size_t round_up_to_pagesize(size_t size) -{ - size_t x = size / pagsz; - - if ((size % pagsz)) { - ++x; - } - - return pagsz * x; -} - -static size_t round_down_to_pagesize(size_t size) -{ - size_t x = size / pagsz; - - return pagsz * x; -} - static void *do_map(void *ptr, size_t sz) { void *res; - if (round_up_to_pagesize(sz) != sz) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld " - "does not map complete pages\r\n", - (void *) ptr, (unsigned long) sz); -#endif + if (ALIGNED_CEILING(sz) != sz) { + HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz); return NULL; } - if (((unsigned long) ptr) % pagsz) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld " - "is not page aligned\r\n", - (void *) ptr, (unsigned long) sz); -#endif + if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) { + HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); return NULL; } @@ -1782,10 +1843,7 @@ static void *do_map(void *ptr, size_t sz) #endif if (res == MAP_FAILED) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n", - (void *) ptr, (unsigned long) sz); -#endif + HARDDEBUG_MAP_FAILED(ptr, sz); return NULL; } @@ -1796,35 +1854,22 @@ static int do_unmap(void *ptr, size_t sz) { void *res; - if (round_up_to_pagesize(sz) != sz) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld " - "does not map complete pages\r\n", - (void *) ptr, (unsigned long) sz); -#endif + if (ALIGNED_CEILING(sz) != sz) { + HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz); return 1; } - if (((unsigned long) ptr) % pagsz) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld " - "is not page aligned\r\n", - (void *) ptr, (unsigned long) sz); -#endif + if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) { + HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz); return 1; } - res = mmap(ptr, sz, - PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE - | MAP_FIXED, + PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, -1 , 0); if (res == MAP_FAILED) { -#ifdef HARDDEBUG - fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n", - (void *) ptr, (unsigned long) sz); -#endif + HARDDEBUG_MAP_FAILED(ptr, sz); return 1; } @@ -1862,8 +1907,6 @@ static int initialize_pmmap(void) size_t rsz; FreeBlock *initial; - - pagsz = getpagesize(); SET_RANGE_MIN(); if (sizeof(void *) != 8) { erl_exit(1,"Halfword emulator cannot be run in 32bit mode"); @@ -1872,15 +1915,15 @@ static int initialize_pmmap(void) p = (char *) RANGE_MIN; q = (char *) RANGE_MAX; - rsz = round_down_to_pagesize(q - p); + rsz = ALIGNED_FLOOR(q - p); - rptr = mmap((void *) p, rsz, + rptr = mmap_align(NULL, (void *) p, rsz, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | EXTRA_MAP_FLAGS, -1 , 0); #ifdef HARDDEBUG printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n", - p, (unsigned long) rsz, (unsigned long) (rsz / pagsz), + p, (unsigned long) rsz, (unsigned long) (rsz / MSEG_ALIGNED_SIZE), (void *) rptr, (void*)(rptr + rsz)); #endif if ((UWord)(rptr + rsz) > RANGE_MAX) { @@ -1892,39 +1935,27 @@ static int initialize_pmmap(void) munmap((void*)RANGE_MAX, rsz - rsz_trunc); rsz = rsz_trunc; } - if (!do_map(rptr,pagsz)) { + if (!do_map(rptr, MSEG_ALIGNED_SIZE)) { erl_exit(1,"Could not actually mmap first page for halfword emulator...\n"); } initial = (FreeBlock *) rptr; - initial->num = (rsz / pagsz); + initial->num = (rsz / MSEG_ALIGNED_SIZE); initial->next = NULL; first = initial; INIT_LOCK(); return 0; } -#ifdef HARDDEBUG -static void dump_freelist(void) -{ - FreeBlock *p = first; - - while (p) { - printf("p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n", - (void *) p, (unsigned long) p->num, (void *) p->next); - p = p->next; - } -} -#endif - - static void *pmmap(size_t size) { - size_t real_size = round_up_to_pagesize(size); - size_t num_pages = real_size / pagsz; + size_t real_size = ALIGNED_CEILING(size); + size_t num_pages = real_size / MSEG_ALIGNED_SIZE; FreeBlock **block; FreeBlock *tail; FreeBlock *res; + TAKE_LOCK(); + for (block = &first; *block != NULL && (*block)->num < num_pages; block = &((*block)->next)) @@ -1935,29 +1966,25 @@ static void *pmmap(size_t size) } if ((*block)->num == num_pages) { /* nice, perfect fit */ - res = *block; + res = *block; *block = (*block)->next; } else { tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size); - if (!do_map(tail,pagsz)) { -#ifdef HARDDEBUG - fprintf(stderr, "Could not actually allocate page at %p...\r\n", - (void *) tail); -#endif + if (!do_map(tail, MSEG_ALIGNED_SIZE)) { + HARDDEBUG_MAP_FAILED(tail, MSEG_ALIGNED_SIZE); RELEASE_LOCK(); return NULL; } - tail->num = (*block)->num - num_pages; + tail->num = (*block)->num - num_pages; tail->next = (*block)->next; res = *block; *block = tail; } + RELEASE_LOCK(); - if (!do_map(res,real_size)) { -#ifdef HARDDEBUG - fprintf(stderr, "Could not actually allocate %ld at %p...\r\n", - (unsigned long) real_size, (void *) res); -#endif + + if (!do_map(res, real_size)) { + HARDDEBUG_MAP_FAILED(res, real_size); return NULL; } @@ -1966,15 +1993,17 @@ static void *pmmap(size_t size) static int pmunmap(void *p, size_t size) { - size_t real_size = round_up_to_pagesize(size); - size_t num_pages = real_size / pagsz; + size_t real_size = ALIGNED_CEILING(size); + size_t num_pages = real_size / MSEG_ALIGNED_SIZE; + FreeBlock *block; FreeBlock *last; FreeBlock *nb = (FreeBlock *) p; ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0); - if (real_size > pagsz) { - if (do_unmap(((char *) p) + pagsz,real_size - pagsz)) { + + if (real_size > MSEG_ALIGNED_SIZE) { + if (do_unmap(((char *) p) + MSEG_ALIGNED_SIZE, real_size - MSEG_ALIGNED_SIZE)) { return 1; } } @@ -1993,7 +2022,7 @@ static int pmunmap(void *p, size_t size) /* Merge new free block with following */ nb->num = block->num + num_pages; nb->next = block->next; - if (do_unmap(block,pagsz)) { + if (do_unmap(block, MSEG_ALIGNED_SIZE)) { RELEASE_LOCK(); return 1; } @@ -2003,11 +2032,11 @@ static int pmunmap(void *p, size_t size) nb->next = block; } if (last != NULL) { - if (p == ((void *) (((char *) last) + (last->num * pagsz)))) { + if (p == ((void *) (((char *) last) + (last->num * MSEG_ALIGNED_SIZE)))) { /* Merge with previous */ last->num += nb->num; last->next = nb->next; - if (do_unmap(nb,pagsz)) { + if (do_unmap(nb, MSEG_ALIGNED_SIZE)) { RELEASE_LOCK(); return 1; } @@ -2024,10 +2053,10 @@ static int pmunmap(void *p, size_t size) static void *pmremap(void *old_address, size_t old_size, size_t new_size) { - size_t new_real_size = round_up_to_pagesize(new_size); - size_t new_num_pages = new_real_size / pagsz; - size_t old_real_size = round_up_to_pagesize(old_size); - size_t old_num_pages = old_real_size / pagsz; + size_t new_real_size = ALIGNED_CEILING(new_size); + size_t new_num_pages = new_real_size / MSEG_ALIGNED_SIZE; + size_t old_real_size = ALIGNED_CEILING(old_size); + size_t old_num_pages = old_real_size / MSEG_ALIGNED_SIZE; if (new_num_pages == old_num_pages) { return old_address; } else if (new_num_pages < old_num_pages) { /* Shrink */ @@ -2045,8 +2074,8 @@ static void *pmremap(void *old_address, size_t old_size, (*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) { /* Normal link in */ if (nfb_pages > 1) { - if (do_unmap((void *)(((char *) vnfb) + pagsz), - (nfb_pages - 1)*pagsz)) { + if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE), + (nfb_pages - 1)*MSEG_ALIGNED_SIZE)) { return NULL; } } @@ -2058,8 +2087,8 @@ static void *pmremap(void *old_address, size_t old_size, nfb->num = nfb_pages + (*block)->num; /* unmap also the first page of the next freeblock */ (*block) = nfb; - if (do_unmap((void *)(((char *) vnfb) + pagsz), - nfb_pages*pagsz)) { + if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE), + nfb_pages*MSEG_ALIGNED_SIZE)) { return NULL; } } @@ -2094,9 +2123,9 @@ static void *pmremap(void *old_address, size_t old_size, size_t remaining_pages = (*block)->num - (new_num_pages - old_num_pages); if (!remaining_pages) { - void *p = (void *) (((char *) (*block)) + pagsz); + void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE); void *n = (*block)->next; - size_t x = ((*block)->num - 1) * pagsz; + size_t x = ((*block)->num - 1) * MSEG_ALIGNED_SIZE; if (x > 0) { if (do_map(p,x) == NULL) { RELEASE_LOCK(); @@ -2108,7 +2137,7 @@ static void *pmremap(void *old_address, size_t old_size, FreeBlock *nfb = (FreeBlock *) ((void *) (((char *) old_address) + new_real_size)); - void *p = (void *) (((char *) (*block)) + pagsz); + void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE); if (do_map(p,new_real_size - old_real_size) == NULL) { RELEASE_LOCK(); return NULL; @@ -2122,5 +2151,4 @@ static void *pmremap(void *old_address, size_t old_size, } } } - #endif /* HALFWORD_HEAP */ diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 741080fb78..6f373f13f9 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -32,12 +32,35 @@ #if HAVE_MMAP # define HAVE_ERTS_MSEG 1 +# define HAVE_SUPER_ALIGNED_MB_CARRIERS 1 #else # define HAVE_ERTS_MSEG 0 +# define HAVE_SUPER_ALIGNED_MB_CARRIERS 0 +#endif + +#if HAVE_SUPER_ALIGNED_MB_CARRIERS +# define MSEG_ALIGN_BITS (18) + /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */ +#else +/* If we don't use super aligned multiblock carriers + * we will mmap with page size alignment (and thus use corresponding + * align bits). + * + * Current implementation needs this to be a constant and + * only uses this for user dev testing so setting page size + * to 4096 (12 bits) is fine. + */ +# define MSEG_ALIGN_BITS (12) #endif #if HAVE_ERTS_MSEG +#define MSEG_ALIGNED_SIZE (1 << MSEG_ALIGN_BITS) + +#define ERTS_MSEG_FLG_NONE ((Uint)(0)) +#define ERTS_MSEG_FLG_2POW ((Uint)(1 << 0)) + + #define ERTS_MSEG_VSN_STR "0.9" typedef struct { @@ -68,13 +91,13 @@ typedef struct { extern const ErtsMsegOpt_t erts_mseg_default_opt; -void *erts_mseg_alloc(ErtsAlcType_t, Uint *); -void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, const ErtsMsegOpt_t *); +void *erts_mseg_alloc(ErtsAlcType_t, Uint *, Uint); +void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, Uint, const ErtsMsegOpt_t *); void erts_mseg_dealloc(ErtsAlcType_t, void *, Uint); void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, Uint, const ErtsMsegOpt_t *); -void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *); +void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *, Uint); void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *, - const ErtsMsegOpt_t *); + Uint, const ErtsMsegOpt_t *); void erts_mseg_clear_cache(void); void erts_mseg_cache_check(void); Uint erts_mseg_no( const ErtsMsegOpt_t *); diff --git a/erts/emulator/sys/common/erl_sys_common_misc.c b/erts/emulator/sys/common/erl_sys_common_misc.c index 461e763f03..d22914acea 100644 --- a/erts/emulator/sys/common/erl_sys_common_misc.c +++ b/erts/emulator/sys/common/erl_sys_common_misc.c @@ -105,3 +105,154 @@ int erts_get_native_filename_encoding(void) { return filename_encoding; } + +/* For internal use by sys_double_to_chars_fast() */ +static char* float_first_trailing_zero(char* p) +{ + for (--p; *p == '0' && *(p-1) == '0'; --p); + if (*(p-1) == '.') ++p; + return p; +} + +int +sys_double_to_chars(double fp, char *buffer, size_t buffer_size) +{ + return sys_double_to_chars_ext(fp, buffer, buffer_size, SYS_DEFAULT_FLOAT_DECIMALS); +} + +int +sys_double_to_chars_fast(double f, char *outbuf, int maxlen, int decimals, int compact) +{ + enum { + FRAC_SIZE = 52 + , EXP_SIZE = 11 + , EXP_MASK = (1ll << EXP_SIZE) - 1 + , FRAC_MASK = (1ll << FRAC_SIZE) - 1 + , FRAC_MASK2 = (1ll << (FRAC_SIZE + 1)) - 1 + , MAX_FLOAT = 1ll << (FRAC_SIZE+1) + }; + + long long mantissa, int_part, int_part2, frac_part; + short exp; + int sign, i, n, m, max; + double absf; + union { long long L; double F; } x; + char c, *p = outbuf; + int digit, roundup; + + x.F = f; + + exp = (x.L >> FRAC_SIZE) & EXP_MASK; + mantissa = x.L & FRAC_MASK; + sign = x.L >= 0 ? 1 : -1; + if (exp == EXP_MASK) { + if (mantissa == 0) { + if (sign == -1) + *p++ = '-'; + *p++ = 'i'; + *p++ = 'n'; + *p++ = 'f'; + } else { + *p++ = 'n'; + *p++ = 'a'; + *p++ = 'n'; + } + *p = '\0'; + return p - outbuf; + } + + exp -= EXP_MASK >> 1; + mantissa |= (1ll << FRAC_SIZE); + frac_part = 0; + int_part = 0; + absf = f * sign; + + /* Don't bother with optimizing too large numbers and decimals */ + if (absf > MAX_FLOAT || decimals > maxlen-17) { + int len = erts_snprintf(outbuf, maxlen, "%.*f", decimals, f); + if (len >= maxlen) + return -1; + p = outbuf + len; + /* Delete trailing zeroes */ + if (compact) + p = float_first_trailing_zero(outbuf + len); + *p = '\0'; + return p - outbuf; + } + + if (exp >= FRAC_SIZE) + int_part = mantissa << (exp - FRAC_SIZE); + else if (exp >= 0) { + int_part = mantissa >> (FRAC_SIZE - exp); + frac_part = (mantissa << (exp + 1)) & FRAC_MASK2; + } + else /* if (exp < 0) */ + frac_part = (mantissa & FRAC_MASK2) >> -(exp + 1); + + if (int_part == 0) { + if (sign == -1) + *p++ = '-'; + *p++ = '0'; + } else { + int ret; + while (int_part != 0) { + int_part2 = int_part / 10; + *p++ = (char)(int_part - ((int_part2 << 3) + (int_part2 << 1)) + '0'); + int_part = int_part2; + } + if (sign == -1) + *p++ = '-'; + /* Reverse string */ + ret = p - outbuf; + for (i = 0, n = ret/2; i < n; i++) { + int j = ret - i - 1; + c = outbuf[i]; + outbuf[i] = outbuf[j]; + outbuf[j] = c; + } + } + if (decimals != 0) + *p++ = '.'; + + max = maxlen - (p - outbuf) - 1 /* leave room for trailing '\0' */; + if (max > decimals) + max = decimals; + for (m = 0; m < max; m++) { + /* frac_part *= 10; */ + frac_part = (frac_part << 3) + (frac_part << 1); + + *p++ = (char)((frac_part >> (FRAC_SIZE + 1)) + '0'); + frac_part &= FRAC_MASK2; + } + + roundup = 0; + /* Rounding - look at the next digit */ + frac_part = (frac_part << 3) + (frac_part << 1); + digit = (frac_part >> (FRAC_SIZE + 1)); + if (digit > 5) + roundup = 1; + else if (digit == 5) { + frac_part &= FRAC_MASK2; + if (frac_part != 0) roundup = 1; + } + if (roundup) { + char d; + int pos = p - outbuf - 1; + do { + d = outbuf[pos]; + if (d == '-') break; + if (d == '.') continue; + if (++d != ':') { + outbuf[pos] = d; + break; + } + outbuf[pos] = '0'; + } while (--pos); + } + + /* Delete trailing zeroes */ + if (compact && *(p - 1) == '0') + p = float_first_trailing_zero(--p); + *p = '\0'; + return p - outbuf; +} diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c index 9e7cbc017f..0b96eded76 100644 --- a/erts/emulator/sys/unix/sys.c +++ b/erts/emulator/sys/unix/sys.c @@ -123,7 +123,8 @@ struct ErtsSysReportExit_ { /* This data is shared by these drivers - initialized by spawn_init() */ static struct driver_data { - int port_num, ofd, packet_bytes; + ErlDrvPort port_num; + int ofd, packet_bytes; ErtsSysReportExit *report_exit; int pid; int alive; @@ -731,7 +732,8 @@ prepare_crash_dump(int secs) list = CONS(hp, make_small(8), list); hp += 2; /* send to heart port, CMD = 8, i.e. prepare crash dump =o */ - erts_write_to_port(ERTS_INVALID_PID, heart_port, list); + erts_port_output(NULL, ERTS_PORT_SIG_FLG_FORCE_IMM_CALL, heart_port, + heart_port->common.id, list, NULL); } /* Make sure we unregister at epmd (unknown fd) and get at least @@ -1182,7 +1184,7 @@ static RETSIGTYPE onchld(int signum) #endif } -static int set_driver_data(int port_num, +static int set_driver_data(ErlDrvPort port_num, int ifd, int ofd, int packet_bytes, @@ -1190,6 +1192,7 @@ static int set_driver_data(int port_num, int exit_status, int pid) { + Port *prt; ErtsSysReportExit *report_exit; if (!exit_status) @@ -1198,7 +1201,7 @@ static int set_driver_data(int port_num, report_exit = erts_alloc(ERTS_ALC_T_PRT_REP_EXIT, sizeof(ErtsSysReportExit)); report_exit->next = report_exit_list; - report_exit->port = erts_port[port_num].id; + report_exit->port = erts_drvport2id(port_num); report_exit->pid = pid; report_exit->ifd = read_write & DO_READ ? ifd : -1; report_exit->ofd = read_write & DO_WRITE ? ofd : -1; @@ -1208,7 +1211,9 @@ static int set_driver_data(int port_num, report_exit_list = report_exit; } - erts_port[port_num].os_pid = pid; + prt = erts_drvport2port(port_num, NULL); + if (prt) + prt->os_pid = pid; if (read_write & DO_READ) { driver_data[ifd].packet_bytes = packet_bytes; @@ -1281,7 +1286,7 @@ static void close_pipes(int ifd[2], int ofd[2], int read_write) } } -static void init_fd_data(int fd, int prt) +static void init_fd_data(int fd, ErlDrvPort port_num) { fd_data[fd].buf = NULL; fd_data[fd].cpos = NULL; @@ -1971,7 +1976,7 @@ static void clear_fd_data(int fd) fd_data[fd].psz = 0; } -static void nbio_stop_fd(int prt, int fd) +static void nbio_stop_fd(ErlDrvPort prt, int fd) { driver_select(prt,fd,DO_READ|DO_WRITE,0); clear_fd_data(fd); @@ -2019,7 +2024,8 @@ static ErlDrvData vanilla_start(ErlDrvPort port_num, char* name, static void stop(ErlDrvData fd) { - int prt, ofd; + ErlDrvPort prt; + int ofd; prt = driver_data[(int)(long)fd].port_num; nbio_stop_fd(prt, (int)(long)fd); @@ -2032,7 +2038,7 @@ static void stop(ErlDrvData fd) CHLD_STAT_LOCK; - /* Mark as unused. Maybe resetting the 'port_num' slot is better? */ + /* Mark as unused. */ driver_data[(int)(long)fd].pid = -1; CHLD_STAT_UNLOCK; @@ -2048,7 +2054,7 @@ static void stop(ErlDrvData fd) static void outputv(ErlDrvData e, ErlIOVec* ev) { int fd = (int)(long)e; - int ix = driver_data[fd].port_num; + ErlDrvPort ix = driver_data[fd].port_num; int pb = driver_data[fd].packet_bytes; int ofd = driver_data[fd].ofd; ssize_t n; @@ -2098,7 +2104,7 @@ static void outputv(ErlDrvData e, ErlIOVec* ev) static void output(ErlDrvData e, char* buf, ErlDrvSizeT len) { int fd = (int)(long)e; - int ix = driver_data[fd].port_num; + ErlDrvPort ix = driver_data[fd].port_num; int pb = driver_data[fd].packet_bytes; int ofd = driver_data[fd].ofd; ssize_t n; @@ -2149,7 +2155,7 @@ static void output(ErlDrvData e, char* buf, ErlDrvSizeT len) return; /* 0; */ } -static int port_inp_failure(int port_num, int ready_fd, int res) +static int port_inp_failure(ErlDrvPort port_num, int ready_fd, int res) /* Result: 0 (eof) or -1 (error) */ { int err = errno; @@ -2199,7 +2205,7 @@ static int port_inp_failure(int port_num, int ready_fd, int res) static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd) { int fd = (int)(long)e; - int port_num; + ErlDrvPort port_num; int packet_bytes; int res; Uint h; @@ -2322,7 +2328,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd) static void ready_output(ErlDrvData e, ErlDrvEvent ready_fd) { int fd = (int)(long)e; - int ix = driver_data[fd].port_num; + ErlDrvPort ix = driver_data[fd].port_num; int n; struct iovec* iv; int vsize; @@ -2631,19 +2637,20 @@ report_exit_status(ErtsSysReportExit *rep, int status) Port *pp; #ifdef ERTS_SMP CHLD_STAT_UNLOCK; -#endif + pp = erts_thr_id2port_sflgs(rep->port, + ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); + CHLD_STAT_LOCK; +#else pp = erts_id2port_sflgs(rep->port, NULL, 0, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); -#ifdef ERTS_SMP - CHLD_STAT_LOCK; #endif if (pp) { if (rep->ifd >= 0) { driver_data[rep->ifd].alive = 0; driver_data[rep->ifd].status = status; - (void) driver_select((ErlDrvPort) internal_port_index(pp->id), + (void) driver_select((ErlDrvPort) pp, rep->ifd, (ERL_DRV_READ|ERL_DRV_USE), 1); @@ -2651,12 +2658,16 @@ report_exit_status(ErtsSysReportExit *rep, int status) if (rep->ofd >= 0) { driver_data[rep->ofd].alive = 0; driver_data[rep->ofd].status = status; - (void) driver_select((ErlDrvPort) internal_port_index(pp->id), + (void) driver_select((ErlDrvPort) pp, rep->ofd, (ERL_DRV_WRITE|ERL_DRV_USE), 1); } +#ifdef ERTS_SMP + erts_thr_port_release(pp); +#else erts_port_release(pp); +#endif } erts_free(ERTS_ALC_T_PRT_REP_EXIT, rep); } diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c index 3fcb4d88dc..6875c17a75 100644 --- a/erts/emulator/sys/unix/sys_float.c +++ b/erts/emulator/sys/unix/sys_float.c @@ -735,7 +735,7 @@ void erts_sys_unblock_fpe(int unmasked) /* ** Convert a double to ascii format 0.dddde[+|-]ddd - ** return number of characters converted + ** return number of characters converted or -1 if error. ** ** These two functions should maybe use localeconv() to pick up ** the current radix character, but since it is uncertain how @@ -745,11 +745,12 @@ void erts_sys_unblock_fpe(int unmasked) */ int -sys_double_to_chars(double fp, char *buffer, size_t buffer_size) +sys_double_to_chars_ext(double fp, char *buffer, size_t buffer_size, size_t decimals) { char *s = buffer; - - (void) erts_snprintf(buffer, buffer_size, "%.20e", fp); + + if (erts_snprintf(buffer, buffer_size, "%.*e", decimals, fp) >= buffer_size) + return -1; /* Search upto decimal point */ if (*s == '+' || *s == '-') s++; while (ISDIGIT(*s)) s++; diff --git a/erts/emulator/sys/win32/erl_win_dyn_driver.h b/erts/emulator/sys/win32/erl_win_dyn_driver.h index ec5141838a..8b6be2b2f1 100644 --- a/erts/emulator/sys/win32/erl_win_dyn_driver.h +++ b/erts/emulator/sys/win32/erl_win_dyn_driver.h @@ -74,7 +74,9 @@ WDD_TYPEDEF(ErlDrvTermData, driver_mk_port,(ErlDrvPort)); WDD_TYPEDEF(ErlDrvTermData, driver_connected,(ErlDrvPort)); WDD_TYPEDEF(ErlDrvTermData, driver_caller,(ErlDrvPort)); WDD_TYPEDEF(ErlDrvTermData, driver_mk_term_nil,(void)); +WDD_TYPEDEF(int, erl_drv_output_term, (ErlDrvTermData, ErlDrvTermData*, int)); WDD_TYPEDEF(int, driver_output_term, (ErlDrvPort, ErlDrvTermData*, int)); +WDD_TYPEDEF(int, erl_drv_send_term, (ErlDrvTermData, ErlDrvTermData, ErlDrvTermData*, int)); WDD_TYPEDEF(int, driver_send_term, (ErlDrvPort, ErlDrvTermData, ErlDrvTermData*, int)); WDD_TYPEDEF(long, driver_async, (ErlDrvPort,unsigned int*,void (*)(void*),void*,void (*)(void*))); WDD_TYPEDEF(int, driver_async_cancel, (unsigned int)); @@ -187,7 +189,9 @@ typedef struct { WDD_FTYPE(driver_connected) *driver_connected; WDD_FTYPE(driver_caller) *driver_caller; WDD_FTYPE(driver_mk_term_nil) *driver_mk_term_nil; + WDD_FTYPE(erl_drv_output_term) *erl_drv_output_term; WDD_FTYPE(driver_output_term) *driver_output_term; + WDD_FTYPE(erl_drv_send_term) *erl_drv_send_term; WDD_FTYPE(driver_send_term) *driver_send_term; WDD_FTYPE(driver_async) *driver_async; WDD_FTYPE(driver_async_cancel) *driver_async_cancel; @@ -294,7 +298,9 @@ extern TWinDynDriverCallbacks WinDynDriverCallbacks; #define driver_connected (WinDynDriverCallbacks.driver_connected) #define driver_caller (WinDynDriverCallbacks.driver_caller) #define driver_mk_term_nil (WinDynDriverCallbacks.driver_mk_term_nil) +#define erl_drv_output_term (WinDynDriverCallbacks.erl_drv_output_term) #define driver_output_term (WinDynDriverCallbacks.driver_output_term) +#define erl_drv_send_term (WinDynDriverCallbacks.erl_drv_send_term) #define driver_send_term (WinDynDriverCallbacks.driver_send_term) #define driver_async (WinDynDriverCallbacks.driver_async) #define driver_async_cancel (WinDynDriverCallbacks.driver_async_cancel) @@ -425,7 +431,9 @@ do { \ ((W).driver_connected) = driver_connected; \ ((W).driver_caller) = driver_caller; \ ((W).driver_mk_term_nil) = driver_mk_term_nil; \ +((W).erl_drv_output_term) = erl_drv_output_term; \ ((W).driver_output_term) = driver_output_term; \ +((W).erl_drv_send_term) = erl_drv_send_term; \ ((W).driver_send_term) = driver_send_term; \ ((W).driver_async) = driver_async; \ ((W).driver_async_cancel) = driver_async_cancel; \ diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c index f19f4ebd8c..1cd9072cea 100755 --- a/erts/emulator/sys/win32/sys.c +++ b/erts/emulator/sys/win32/sys.c @@ -87,9 +87,6 @@ static erts_smp_tsd_key_t win32_errstr_key; static erts_smp_atomic_t pipe_creation_counter; -static erts_smp_mtx_t sys_driver_data_lock; - - /* Results from application_type(_w) is one of */ #define APPL_NONE 0 #define APPL_DOS 1 @@ -97,7 +94,6 @@ static erts_smp_mtx_t sys_driver_data_lock; #define APPL_WIN32 3 static int driver_write(long, HANDLE, byte*, int); -static void common_stop(int); static int create_file_thread(struct async_io* aio, int mode); #ifdef ERTS_SMP static void close_active_handle(ErlDrvPort, HANDLE handle); @@ -115,9 +111,6 @@ BOOL WINAPI ctrl_handler(DWORD dwCtrlType); #define PORT_BUFSIZ 4096 -#define PORT_FREE (-1) -#define PORT_EXITING (-2) - #define DRV_BUF_ALLOC(SZ) \ erts_alloc_fnf(ERTS_ALC_T_DRV_DATA_BUF, (SZ)) #define DRV_BUF_REALLOC(P, SZ) \ @@ -269,7 +262,8 @@ int erts_sys_prepare_crash_dump(int secs) list = CONS(hp, make_small(8), list); hp += 2; /* send to heart port, CMD = 8, i.e. prepare crash dump =o */ - erts_write_to_port(NIL, heart_port, list); + erts_port_output(NULL, ERTS_PORT_SIG_FLG_FORCE_IMM_CALL, heart_port, + heart_port->common.id, list, NULL); return 1; } @@ -474,7 +468,7 @@ typedef struct driver_data { byte *inbuf; /* Buffer to use for overlapped read. */ int outBufSize; /* Size of output buffer. */ byte *outbuf; /* Buffer to use for overlapped write. */ - ErlDrvPort port_num; /* The port number. */ + ErlDrvPort port_num; /* The port handle. */ int packet_bytes; /* 0: continous stream, 1, 2, or 4: the number * of bytes in the packet header. */ @@ -484,8 +478,6 @@ typedef struct driver_data { int report_exit; /* Do report exit status for the port */ } DriverData; -static DriverData* driver_data; /* Pointer to array of driver data. */ - /* Driver interfaces */ static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*); static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*); @@ -597,67 +589,53 @@ struct erl_drv_entry vanilla_driver_entry = { */ static DriverData* -new_driver_data(int port_num, int packet_bytes, int wait_objs_required, int use_threads) +new_driver_data(ErlDrvPort port_num, int packet_bytes, int wait_objs_required, int use_threads) { DriverData* dp; - - erts_smp_mtx_lock(&sys_driver_data_lock); - DEBUGF(("new_driver_data(port_num %d, pb %d)\n", - port_num, packet_bytes)); + DEBUGF(("new_driver_data(%p, pb %d)\n", port_num, packet_bytes)); + dp = driver_alloc(sizeof(DriverData)); + if (!dp) + return NULL; /* * We used to test first at all that there is enough room in the * array used by WaitForMultipleObjects(), but that is not necessary * any more, since driver_select() can't fail. */ - /* - * Search for a free slot. - */ + dp->bytesInBuffer = 0; + dp->totalNeeded = packet_bytes; + dp->inBufSize = PORT_BUFSIZ; + dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize); + if (dp->inbuf == NULL) + goto buf_alloc_error; + erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize); + dp->outBufSize = 0; + dp->outbuf = NULL; + dp->port_num = port_num; + dp->packet_bytes = packet_bytes; + dp->port_pid = INVALID_HANDLE_VALUE; + if (init_async_io(&dp->in, use_threads) == -1) + goto async_io_error1; + if (init_async_io(&dp->out, use_threads) == -1) + goto async_io_error2; - for (dp = driver_data; dp < driver_data+max_files; dp++) { - if (dp->port_num == PORT_FREE) { - dp->bytesInBuffer = 0; - dp->totalNeeded = packet_bytes; - dp->inBufSize = PORT_BUFSIZ; - dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize); - if (dp->inbuf == NULL) { - erts_smp_mtx_unlock(&sys_driver_data_lock); - return NULL; - } - erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize); - dp->outBufSize = 0; - dp->outbuf = NULL; - dp->port_num = port_num; - dp->packet_bytes = packet_bytes; - dp->port_pid = INVALID_HANDLE_VALUE; - if (init_async_io(&dp->in, use_threads) == -1) - break; - if (init_async_io(&dp->out, use_threads) == -1) - break; - erts_smp_mtx_unlock(&sys_driver_data_lock); - return dp; - } - } + return dp; - /* - * Error or no free driver data. - */ +async_io_error2: + release_async_io(&dp->in, dp->port_num); +async_io_error1: + release_async_io(&dp->out, dp->port_num); - if (dp < driver_data+max_files) { - release_async_io(&dp->in, dp->port_num); - release_async_io(&dp->out, dp->port_num); - } - erts_smp_mtx_unlock(&sys_driver_data_lock); +buf_alloc_error: + driver_free(dp); return NULL; } static void release_driver_data(DriverData* dp) { - erts_smp_mtx_lock(&sys_driver_data_lock); - #ifdef ERTS_SMP #ifdef USE_CANCELIOEX if (fpCancelIoEx != NULL) { @@ -741,8 +719,7 @@ release_driver_data(DriverData* dp) * the exit thread. */ - dp->port_num = PORT_FREE; - erts_smp_mtx_unlock(&sys_driver_data_lock); + driver_free(dp); } #ifdef ERTS_SMP @@ -837,7 +814,6 @@ threaded_handle_closer(LPVOID param) static ErlDrvData set_driver_data(DriverData* dp, HANDLE ifd, HANDLE ofd, int read_write, int report_exit) { - int index = dp - driver_data; int result; dp->in.fd = ifd; @@ -856,13 +832,12 @@ set_driver_data(DriverData* dp, HANDLE ifd, HANDLE ofd, int read_write, int repo ERL_DRV_WRITE|ERL_DRV_USE, 1); ASSERT(result != -1); } - return (ErlDrvData)index; + return (ErlDrvData) dp; } static ErlDrvData reuse_driver_data(DriverData *dp, HANDLE ifd, HANDLE ofd, int read_write, ErlDrvPort port_num) { - int index = dp - driver_data; int result; dp->port_num = port_num; @@ -881,7 +856,7 @@ reuse_driver_data(DriverData *dp, HANDLE ifd, HANDLE ofd, int read_write, ErlDrv ERL_DRV_WRITE|ERL_DRV_USE, 1); ASSERT(result != -1); } - return (ErlDrvData)index; + return (ErlDrvData) dp; } /* @@ -1154,12 +1129,6 @@ spawn_init(void) ((module != NULL) ? GetProcAddress(module,"CancelIoEx") : NULL); DEBUGF(("fpCancelIoEx = %p\r\n", fpCancelIoEx)); #endif - driver_data = (struct driver_data *) - erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data)); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, - max_files*sizeof(struct driver_data)); - for (i = 0; i < max_files; i++) - driver_data[i].port_num = PORT_FREE; return 0; } @@ -1290,9 +1259,12 @@ spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts) #endif retval = set_driver_data(dp, hFromChild, hToChild, opts->read_write, opts->exit_status); - if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO) - /* We assume that this cannot generate a negative number */ - erts_port[port_num].os_pid = (SWord) pid; + if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO) { + Port *prt = erts_drvport2port_raw(port_num); + /* We assume that this cannot generate a negative number */ + ASSERT(prt); + prt->os_pid = (SWord) pid; + } } if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO) @@ -2281,12 +2253,10 @@ fd_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts) **/ if (!create_file_thread(&dp->in, DO_READ)) { - dp->port_num = PORT_FREE; return ERL_DRV_ERROR_GENERAL; } if (!create_file_thread(&dp->out, DO_WRITE)) { - dp->port_num = PORT_FREE; return ERL_DRV_ERROR_GENERAL; } @@ -2306,10 +2276,9 @@ fd_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts) } } -static void fd_stop(ErlDrvData d) +static void fd_stop(ErlDrvData data) { - int fd = (int)d; - DriverData* dp = driver_data+fd; + DriverData * dp = (DriverData *) data; /* * There's no way we can terminate an fd port in a consistent way. * Instead we let it live until it's opened again (which it is, @@ -2372,16 +2341,10 @@ vanilla_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts) } static void -stop(ErlDrvData index) -{ - common_stop((int)index); -} - -static void common_stop(int index) +stop(ErlDrvData data) { - DriverData* dp = driver_data+index; - - DEBUGF(("common_stop(%d)\n", index)); + DriverData *dp = (DriverData *) data; + DEBUGF(("stop(%p)\n", dp)); if (dp->in.ov.hEvent != NULL) { (void) driver_select(dp->port_num, @@ -2403,7 +2366,6 @@ static void common_stop(int index) */ HANDLE thread; DWORD tid; - dp->port_num = PORT_EXITING; thread = (HANDLE *) _beginthreadex(NULL, 0, threaded_exiter, dp, 0, &tid); CloseHandle(thread); } @@ -2528,22 +2490,17 @@ threaded_exiter(LPVOID param) static void output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len) -/* long drv_data; /* The slot to use in the driver data table. +/* ErlDrvData drv_data; /* The slot to use in the driver data table. * For Windows NT, this is *NOT* a file handle. * The handle is found in the driver data. */ /* char *buf; /* Pointer to data to write to the port program. */ /* ErlDrvSizeT len; /* Number of bytes to write. */ { - DriverData* dp; + DriverData* dp = (DriverData *) drv_data; int pb; /* The header size for this port. */ - int port_num; /* The actual port number (for diagnostics). */ char* current; - dp = driver_data + (int)drv_data; - if ((port_num = dp->port_num) == -1) - return ; /*-1;*/ - pb = dp->packet_bytes; if ((pb+len) == 0) @@ -2554,7 +2511,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len) */ if ((pb == 2 && len > 65535) || (pb == 1 && len > 255)) { - driver_failure_posix(port_num, EINVAL); + driver_failure_posix(dp->port_num, EINVAL); return ; /* -1; */ } @@ -2568,7 +2525,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len) ASSERT(!dp->outbuf); dp->outbuf = DRV_BUF_ALLOC(pb+len); if (!dp->outbuf) { - driver_failure_posix(port_num, ENOMEM); + driver_failure_posix(dp->port_num, ENOMEM); return ; /* -1; */ } @@ -2598,7 +2555,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len) memcpy(current, buf, len); if (!async_write_file(&dp->out, dp->outbuf, pb+len)) { - set_busy_port(port_num, 1); + set_busy_port(dp->port_num, 1); } else { dp->out.ov.Offset += pb+len; /* For vanilla driver. */ /* XXX OffsetHigh should be changed too. */ @@ -2633,10 +2590,9 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event) { int error = 0; /* The error code (assume initially no errors). */ DWORD bytesRead; /* Number of bytes read. */ - DriverData* dp; + DriverData* dp = (DriverData *) drv_data; int pb; - dp = driver_data+(int)drv_data; pb = dp->packet_bytes; #ifdef ERTS_SMP if(dp->in.thread == (HANDLE) -1) { @@ -2804,7 +2760,7 @@ static void ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event) { DWORD bytesWritten; - DriverData* dp = driver_data + (int)drv_data; + DriverData *dp = (DriverData *) drv_data; int error; #ifdef ERTS_SMP @@ -2812,7 +2768,7 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event) dp->out.async_io_active = 0; } #endif - DEBUGF(("ready_output(%d, 0x%x)\n", drv_data, ready_event)); + DEBUGF(("ready_output(%p, 0x%x)\n", drv_data, ready_event)); set_busy_port(dp->port_num, 0); if (!(dp->outbuf)) { /* Happens because event sometimes get signalled during a successful @@ -2867,7 +2823,7 @@ sys_init_io(void) can change our view of the number of open files possible. We estimate the number to twice the amount of ports. We really dont know on windows, do we? */ - max_files = 2*erts_max_ports; + max_files = 2*erts_ptab_max(&erts_port); } #ifdef ERTS_SMP @@ -3322,9 +3278,6 @@ void erl_sys_init(void) noinherit_std_handle(STD_INPUT_HANDLE); noinherit_std_handle(STD_ERROR_HANDLE); - - erts_smp_mtx_init(&sys_driver_data_lock, "sys_driver_data_lock"); - #ifdef ERTS_SMP erts_smp_tsd_key_create(&win32_errstr_key); InitializeCriticalSection(&htbc_lock); diff --git a/erts/emulator/sys/win32/sys_float.c b/erts/emulator/sys/win32/sys_float.c index 09dad89140..960edaa7a5 100644 --- a/erts/emulator/sys/win32/sys_float.c +++ b/erts/emulator/sys/win32/sys_float.c @@ -114,15 +114,16 @@ sys_chars_to_double(char *buf, double *fp) /* ** Convert a double to ascii format 0.dddde[+|-]ddd -** return number of characters converted +** return number of characters converted or -1 if error. */ int -sys_double_to_chars(double fp, char *buffer, size_t buffer_size) +sys_double_to_chars_ext(double fp, char *buffer, size_t buffer_size, size_t decimals) { char *s = buffer; - - (void) erts_snprintf(buffer, buffer_size, "%.20e", fp); + + if (erts_snprintf(buffer, buffer_size, "%.*e", decimals, fp) >= buffer_size) + return -1; /* Search upto decimal point */ if (*s == '+' || *s == '-') s++; while (isdigit(*s)) s++; diff --git a/erts/emulator/test/alloc_SUITE_data/allocator_test.h b/erts/emulator/test/alloc_SUITE_data/allocator_test.h index cd4a91d34a..c0396ddb61 100644 --- a/erts/emulator/test/alloc_SUITE_data/allocator_test.h +++ b/erts/emulator/test/alloc_SUITE_data/allocator_test.h @@ -60,9 +60,9 @@ typedef void* erts_cond; #define IS_MMAP_C(C) ((Ulong) ALC_TEST1(0x00a, (C))) #define C_SZ(C) ((Ulong) ALC_TEST1(0x00b, (C))) #define SBC2BLK(A, C) ((Block_t *) ALC_TEST2(0x00c, (A), (C))) -#define BLK2SBC(A, B) ((Carrier_t *) ALC_TEST2(0x00d, (A), (B))) -#define MBC2FBLK(A, C) ((Block_t *) ALC_TEST2(0x00e, (A), (C))) -#define FBLK2MBC(A, B) ((Carrier_t *) ALC_TEST2(0x00f, (A), (B))) +#define BLK_TO_SBC(A, B) ((Carrier_t *) ALC_TEST2(0x00d, (A), (B))) +#define MBC_TO_FIRST_BLK(A, C) ((Block_t *) ALC_TEST2(0x00e, (A), (C))) +#define FIRST_BLK_TO_MBC(A, B) ((Carrier_t *) ALC_TEST2(0x00f, (A), (B))) #define FIRST_MBC(A) ((Carrier_t *) ALC_TEST1(0x010, (A))) #define LAST_MBC(A) ((Carrier_t *) ALC_TEST1(0x011, (A))) #define FIRST_SBC(A) ((Carrier_t *) ALC_TEST1(0x012, (A))) @@ -73,7 +73,7 @@ typedef void* erts_cond; #define MIN_BLK_SZ(A) ((Ulong) ALC_TEST1(0x017, (A))) #define NXT_BLK(B) ((Block_t *) ALC_TEST1(0x018, (B))) #define PREV_BLK(B) ((Block_t *) ALC_TEST1(0x019, (B))) -#define IS_FIRST_BLK(B) ((Ulong) ALC_TEST1(0x01a, (B))) +#define IS_MBC_FIRST_BLK(A,B) ((Ulong) ALC_TEST2(0x01a, (A), (B))) #define UNIT_SZ ((Ulong) ALC_TEST0(0x01b)) /* From erl_goodfit_alloc.c */ diff --git a/erts/emulator/test/alloc_SUITE_data/basic.c b/erts/emulator/test/alloc_SUITE_data/basic.c index 4a5e888161..0c27665712 100644 --- a/erts/emulator/test/alloc_SUITE_data/basic.c +++ b/erts/emulator/test/alloc_SUITE_data/basic.c @@ -44,7 +44,7 @@ testcase_run(TestCaseState_t *tcs) c = FIRST_MBC(a); ASSERT(tcs, !NEXT_C(c)); - blk = MBC2FBLK(a, c); + blk = MBC_TO_FIRST_BLK(a, c); ASSERT(tcs, IS_LAST_BLK(blk)); ASSERT(tcs, IS_FREE_BLK(blk)); diff --git a/erts/emulator/test/alloc_SUITE_data/bucket_mask.c b/erts/emulator/test/alloc_SUITE_data/bucket_mask.c index b214f87e4a..34979cacf1 100644 --- a/erts/emulator/test/alloc_SUITE_data/bucket_mask.c +++ b/erts/emulator/test/alloc_SUITE_data/bucket_mask.c @@ -22,7 +22,7 @@ #include "allocator_test.h" #include <stdio.h> -#ifdef __WIN32__ && SIZEOF_VOID_P == 8 +#if defined(__WIN32__) && SIZEOF_VOID_P == 8 /* Use larger threashold for win64 as block alignment is 16 bytes and not 8 */ #define SBCT ((1024*1024)) @@ -48,10 +48,16 @@ testcase_cleanup(TestCaseState_t *tcs) void testcase_run(TestCaseState_t *tcs) { - void *tmp; - void **fence; + typedef struct linked_block { + struct linked_block* next; + }Linked; + Linked* link; + Linked* fence_list; + Linked* pad_list; + void* tmp; void **blk; Ulong sz; + Ulong residue; Ulong smbcs; int i; int bi; @@ -73,7 +79,7 @@ testcase_run(TestCaseState_t *tcs) ASSERT(tcs, a); min_blk_sz = MIN_BLK_SZ(a); - smbcs = 2*(no_bkts*sizeof(void *) + min_blk_sz) + min_blk_sz; + smbcs = (no_bkts*sizeof(void *) + min_blk_sz) + min_blk_sz; for (i = 0; i < no_bkts; i++) { sz = BKT_MIN_SZ(a, i); if (sz >= sbct) @@ -98,26 +104,42 @@ testcase_run(TestCaseState_t *tcs) tcs->extra = (void *) a; ASSERT(tcs, a); + blk = (void **) ALLOC(a, no_bkts*sizeof(void *)); - fence = (void **) ALLOC(a, no_bkts*sizeof(void *)); - ASSERT(tcs, blk && fence); + ASSERT(tcs, blk); + fence_list = NULL; testcase_printf(tcs, "Allocating blocks and fences\n"); for (i = 0; i < bi_tests; i++) { sz = BKT_MIN_SZ(a, i); blk[i] = ALLOC(a, sz - ablk_hdr_sz); - fence[i] = ALLOC(a, 1); - ASSERT(tcs, blk[i] && fence[i]); + link = (Linked*) ALLOC(a, sizeof(Linked)); + ASSERT(tcs, blk[i] && link); + link->next = fence_list; + fence_list = link; } - tmp = (void *) UMEM2BLK(fence[bi_tests - 1]); - tmp = (void *) NXT_BLK((Block_t *) tmp); - ASSERT(tcs, IS_LAST_BLK(tmp)); - sz = BLK_SZ((Block_t *) tmp); - testcase_printf(tcs, "Allocating leftover size = %lu\n", sz); - tmp = ALLOC(a, sz - ablk_hdr_sz); - ASSERT(tcs, tmp); + pad_list = 0; + do { + tmp = (void *) UMEM2BLK(link); /* last allocated */ + tmp = (void *) NXT_BLK((Block_t *) tmp); + ASSERT(tcs, IS_LAST_BLK(tmp)); + sz = BLK_SZ((Block_t *) tmp); + if (sz >= sbct) { + residue = sz; + sz = sbct - min_blk_sz; + residue -= sz; + } + else { + residue = 0; + } + testcase_printf(tcs, "Allocating leftover size = %lu, residue = %lu\n", sz, residue); + link = (Linked*) ALLOC(a, sz - ablk_hdr_sz); + ASSERT(tcs, link); + link->next = pad_list; + pad_list = link; + } while (residue); bi = FIND_BKT(a, 0); ASSERT(tcs, bi < 0); @@ -135,16 +157,23 @@ testcase_run(TestCaseState_t *tcs) for (i = 0; i < bi_tests; i++) { FREE(a, blk[i]); - FREE(a, fence[i]); + } + while (fence_list) { + link = fence_list; + fence_list = link->next; + FREE(a, link); } FREE(a, (void *) blk); - FREE(a, (void *) fence); bi = FIND_BKT(a, 0); ASSERT(tcs, bi == no_bkts - 1); - FREE(a, tmp); + while (pad_list) { + link = pad_list; + pad_list = link->next; + FREE(a, link); + } bi = FIND_BKT(a, 0); ASSERT(tcs, bi < 0); diff --git a/erts/emulator/test/alloc_SUITE_data/coalesce.c b/erts/emulator/test/alloc_SUITE_data/coalesce.c index 6f35d3279b..981fa6d43e 100644 --- a/erts/emulator/test/alloc_SUITE_data/coalesce.c +++ b/erts/emulator/test/alloc_SUITE_data/coalesce.c @@ -54,7 +54,7 @@ setup_sequence(TestCaseState_t *tcs, Allctr_t *a, Ulong bsz, int no, no, bsz); c = FIRST_MBC(a); ASSERT(tcs, !NEXT_C(c)); - blk = MBC2FBLK(a, c); + blk = MBC_TO_FIRST_BLK(a, c); ASSERT(tcs, IS_LAST_BLK(blk)); for (i = 0; i < no; i++) @@ -266,7 +266,7 @@ testcase_name(void) void testcase_run(TestCaseState_t *tcs) { - char *argv_org[] = {"-tmmbcs1024", "-tsbct2048", "-trmbcmt100", "-tas", NULL, NULL}; + char *argv_org[] = {"-tsmbcs511","-tmmbcs511", "-tsbct512", "-trmbcmt100", "-tas", NULL, NULL}; char *alg[] = {"af", "gf", "bf", "aobf", "aoff", NULL}; int i; @@ -276,7 +276,7 @@ testcase_run(TestCaseState_t *tcs) char *argv[sizeof(argv_org)/sizeof(argv_org[0])]; memcpy((void *) argv, (void *) argv_org, sizeof(argv_org)); - argv[4] = alg[i]; + argv[5] = alg[i]; testcase_printf(tcs, " *** Starting \"%s\" allocator *** \n", alg[i]); a = START_ALC("coalesce_", 0, argv); ASSERT(tcs, a); diff --git a/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c b/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c index 0277616bd0..7d5608f890 100644 --- a/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c +++ b/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c @@ -52,10 +52,10 @@ testcase_run(TestCaseState_t *tcs) tcs->extra = &seg[0]; for (i = 0; i < MAX_SEGS; i++) { - seg[i].size = 1000; + seg[i].size = 1 << 18; seg[i].ptr = MSEG_ALLOC(&seg[i].size); ASSERT(tcs, seg[i].ptr); - ASSERT(tcs, seg[i].size >= 1000); + ASSERT(tcs, seg[i].size >= (1 << 18)); } n = MSEG_NO(); diff --git a/erts/emulator/test/alloc_SUITE_data/testcase_driver.c b/erts/emulator/test/alloc_SUITE_data/testcase_driver.c index 66971654a2..5c4b11454f 100644 --- a/erts/emulator/test/alloc_SUITE_data/testcase_driver.c +++ b/erts/emulator/test/alloc_SUITE_data/testcase_driver.c @@ -42,6 +42,7 @@ typedef struct { TestCaseState_t visible; ErlDrvPort port; + ErlDrvTermData port_id; int result; jmp_buf done_jmp_buf; char *comment; @@ -97,6 +98,7 @@ testcase_drv_start(ErlDrvPort port, char *command) itcs->visible.testcase_name = testcase_name(); itcs->visible.extra = NULL; itcs->port = port; + itcs->port_id = driver_mk_port(port); itcs->result = TESTCASE_FAILED; itcs->comment = ""; @@ -142,7 +144,7 @@ testcase_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) msg[1] = (ErlDrvTermData) result_atom; msg[2] = ERL_DRV_PORT; - msg[3] = driver_mk_port(itcs->port); + msg[3] = itcs->port_id; msg[4] = ERL_DRV_ATOM; msg[5] = driver_mk_atom(itcs->visible.testcase_name); @@ -154,7 +156,7 @@ testcase_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) msg[9] = ERL_DRV_TUPLE; msg[10] = (ErlDrvTermData) 4; - driver_output_term(itcs->port, msg, 11); + erl_drv_output_term(itcs->port_id, msg, 11); } int @@ -184,7 +186,7 @@ testcase_printf(TestCaseState_t *tcs, char *frmt, ...) msg[1] = (ErlDrvTermData) driver_mk_atom("print"); msg[2] = ERL_DRV_PORT; - msg[3] = driver_mk_port(itcs->port); + msg[3] = itcs->port_id; msg[4] = ERL_DRV_ATOM; msg[5] = driver_mk_atom(itcs->visible.testcase_name); @@ -196,7 +198,7 @@ testcase_printf(TestCaseState_t *tcs, char *frmt, ...) msg[9] = ERL_DRV_TUPLE; msg[10] = (ErlDrvTermData) 4; - driver_output_term(itcs->port, msg, 11); + erl_drv_output_term(itcs->port_id, msg, 11); } diff --git a/erts/emulator/test/beam_SUITE.erl b/erts/emulator/test/beam_SUITE.erl index 02c6e19686..3197a4c137 100644 --- a/erts/emulator/test/beam_SUITE.erl +++ b/erts/emulator/test/beam_SUITE.erl @@ -54,7 +54,7 @@ end_per_group(_GroupName, Config) -> %% Verify that apply(M, F, A) is really tail recursive. apply_last(Config) when is_list(Config) -> - Pid=spawn(?MODULE, applied, [self(), 10000]), + Pid = spawn(?MODULE, applied, [self(), 10000]), Size = receive {Pid, finished} -> @@ -94,32 +94,32 @@ apply_last_bif(Config) when is_list(Config) -> %% Test three high register numbers in a put_list instruction %% (to test whether packing works properly). packed_registers(Config) when is_list(Config) -> - ?line PrivDir = ?config(priv_dir, Config), - ?line Mod = packed_regs, - ?line Name = filename:join(PrivDir, atom_to_list(Mod) ++ ".erl"), + PrivDir = ?config(priv_dir, Config), + Mod = packed_regs, + Name = filename:join(PrivDir, atom_to_list(Mod) ++ ".erl"), %% Generate a module which generates a list of tuples. %% put_list(A) -> [{A, 600}, {A, 999}, ... {A, 0}]. - ?line Code = gen_packed_regs(600, ["-module("++atom_to_list(Mod)++").\n", + Code = gen_packed_regs(600, ["-module("++atom_to_list(Mod)++").\n", "-export([put_list/1]).\n", "put_list(A) ->\n["]), - ?line ok = file:write_file(Name, Code), + ok = file:write_file(Name, Code), %% Compile the module. - ?line io:format("Compiling: ~s\n", [Name]), - ?line CompRc = compile:file(Name, [{outdir, PrivDir}, report]), - ?line io:format("Result: ~p\n",[CompRc]), - ?line {ok, Mod} = CompRc, + io:format("Compiling: ~s\n", [Name]), + CompRc = compile:file(Name, [{outdir, PrivDir}, report]), + io:format("Result: ~p\n",[CompRc]), + {ok, Mod} = CompRc, %% Load it. - ?line io:format("Loading...\n",[]), - ?line LoadRc = code:load_abs(filename:join(PrivDir, atom_to_list(Mod))), - ?line {module,_Module} = LoadRc, + io:format("Loading...\n",[]), + LoadRc = code:load_abs(filename:join(PrivDir, atom_to_list(Mod))), + {module,_Module} = LoadRc, %% Call it and verify result. - ?line Term = {a, b}, - ?line L = Mod:put_list(Term), - ?line verify_packed_regs(L, Term, 600), + Term = {a, b}, + L = Mod:put_list(Term), + verify_packed_regs(L, Term, 600), ok. gen_packed_regs(0, Acc) -> @@ -131,11 +131,11 @@ verify_packed_regs([], _, -1) -> ok; verify_packed_regs([{Term, N}| T], Term, N) -> verify_packed_regs(T, Term, N-1); verify_packed_regs(L, Term, N) -> - ?line ok = io:format("Expected [{~p, ~p}|T]; got\n~p\n", [Term, N, L]), - ?line test_server:fail(). + ok = io:format("Expected [{~p, ~p}|T]; got\n~p\n", [Term, N, L]), + test_server:fail(). buildo_mucho(Config) when is_list(Config) -> - ?line buildo_mucho_1(), + buildo_mucho_1(), ok. buildo_mucho_1() -> @@ -206,20 +206,27 @@ buildo_mucho_1() -> {<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1}]. heap_sizes(Config) when is_list(Config) -> - ?line Sizes = erlang:system_info(heap_sizes), - ?line io:format("~p heap sizes\n", [length(Sizes)]), - ?line io:format("~p\n", [Sizes]), + Sizes = erlang:system_info(heap_sizes), + io:format("~p heap sizes\n", [length(Sizes)]), + io:format("~p\n", [Sizes]), %% Verify that heap sizes increase monotonically. - ?line Largest = lists:foldl(fun(E, P) when is_integer(P), E > P -> E; + Largest = lists:foldl(fun(E, P) when is_integer(P), E > P -> E; (E, []) -> E end, [], Sizes), - %% Verify that the largest heap size consists of 31 or 63 bits. - ?line - case Largest bsr (erlang:system_info(wordsize)*8-2) of - R when R > 0 -> ok - end, + %% Verify that the largest heap size consists of + %% - 31 bits of bytes on 32 bits arch + %% - atleast 52 bits of bytes (48 is the maximum virtual address) + %% and at the most 63 bits on 64 bit archs + %% heap sizes are in words + case erlang:system_info(wordsize) of + 8 -> + 0 = (Largest*8) bsr 63, + true = (Largest*8) > (1 bsl 52); + 4 -> + 1 = (Largest*4) bsr 31 + end, ok. %% Thanks to Igor Goryachev. @@ -302,10 +309,10 @@ b() -> end. fconv(Config) when is_list(Config) -> - ?line do_fconv(atom), - ?line do_fconv(nil), - ?line do_fconv(tuple_literal), - ?line 3.0 = do_fconv(1.0, 2.0), + do_fconv(atom), + do_fconv(nil), + do_fconv(tuple_literal), + 3.0 = do_fconv(1.0, 2.0), ok. do_fconv(Type) -> @@ -325,9 +332,9 @@ do_fconv(tuple_literal, Float) when is_float(Float) -> Float + {a,b}. select_val(Config) when is_list(Config) -> - ?line zero = do_select_val(0), - ?line big = do_select_val(1 bsl 64), - ?line integer = do_select_val(42), + zero = do_select_val(0), + big = do_select_val(1 bsl 64), + integer = do_select_val(42), ok. do_select_val(X) -> diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl index 3a29fd4d68..a92afef003 100644 --- a/erts/emulator/test/busy_port_SUITE.erl +++ b/erts/emulator/test/busy_port_SUITE.erl @@ -26,6 +26,8 @@ no_trap_exit_unlinked/1, trap_exit/1, multiple_writers/1, hard_busy_driver/1, soft_busy_driver/1]). +-compile(export_all). + -include_lib("test_server/include/test_server.hrl"). %% Internal exports. @@ -36,7 +38,9 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [io_to_busy, message_order, send_3, system_monitor, no_trap_exit, no_trap_exit_unlinked, trap_exit, - multiple_writers, hard_busy_driver, soft_busy_driver]. + multiple_writers, hard_busy_driver, soft_busy_driver, + scheduling_delay_busy,scheduling_delay_busy_nosuspend, + scheduling_busy_link]. groups() -> []. @@ -148,9 +152,9 @@ message_order(Config) when is_list(Config) -> send_to_busy_1(Parent) -> {Owner, Slave} = get_slave(), - Slave ! {Owner, {command, "set_me_busy"}}, - Slave ! {Owner, {command, "hello"}}, - Slave ! {Owner, {command, "hello again"}}, + (catch port_command(Slave, "set_me_busy")), + (catch port_command(Slave, "hello")), + (catch port_command(Slave, "hello again")), receive Message -> Parent ! {self(), Message} @@ -193,10 +197,10 @@ system_monitor(Config) when is_list(Config) -> ?line Busy = spawn_link( fun() -> - Slave ! {Owner,{command,"set busy"}}, + (catch port_command(Slave, "set busy")), receive {Parent,alpha} -> ok end, - Slave ! {Owner,{command,"busy"}}, - Slave ! {Owner,{command,"free"}}, + (catch port_command(Slave, "busy")), + (catch port_command(Slave, "free")), Parent ! {self(),alpha}, command(lock), receive {Parent,beta} -> ok end, @@ -212,7 +216,7 @@ system_monitor(Config) when is_list(Config) -> ?line Void = rec(Void), ?line Busy ! {self(), beta}, ?line {monitor,Owner,busy_port,Slave} = rec(Void), - ?line Master ! {Owner, {command, "u"}}, + ?line port_command(Master, "u"), ?line {Busy,beta} = rec(Void), ?line Void = rec(Void), ?line _NewMonitor = erlang:system_monitor(OldMonitor), @@ -296,9 +300,9 @@ no_trap_exit_process(ResultTo, Link, Config) -> linked -> ok; unlink -> unlink(Slave) end, - ?line Slave ! {self(), {command, "lock port"}}, + ?line (catch port_command(Slave, "lock port")), ?line ResultTo ! {self(), port_created, Slave}, - ?line Slave ! {self(), {command, "suspend me"}}, + ?line (catch port_command(Slave, "suspend me")), ok. %% Assuming the following scenario, @@ -339,9 +343,9 @@ busy_port_exit_process(ResultTo, Config) -> ?line load_busy_driver(Config), ?line _Master = open_port({spawn, "busy_drv master"}, [eof]), ?line Slave = open_port({spawn, "busy_drv slave"}, [eof]), - ?line Slave ! {self(), {command, "lock port"}}, + ?line (catch port_command(Slave, "lock port")), ?line ResultTo ! {self(), port_created, Slave}, - ?line Slave ! {self(), {command, "suspend me"}}, + ?line (catch port_command(Slave, "suspend me")), receive {'EXIT', Slave, die} -> ResultTo ! {self(), ok}; @@ -383,8 +387,8 @@ multiple_writers(Config) when is_list(Config) -> quick_writer() -> {Owner, Port} = get_slave(), - Port ! {Owner, {command, "port to busy"}}, - Port ! {Owner, {command, "lock me"}}, + (catch port_command(Port, "port to busy")), + (catch port_command(Port, "lock me")), ok. hard_busy_driver(Config) when is_list(Config) -> @@ -528,6 +532,304 @@ hs_busy_pcmd(Prt, Opts, StartFun, EndFun) -> EndFun(P, Res, Time) end. +scheduling_delay_busy(Config) -> + + Scenario = + [{1,{spawn,[{var,drvname},undefined]}}, + {2,{call,[{var,1},open_port]}}, + {3,{spawn,[{var,2},{var,1}]}}, + {0,{ack,[{var,1},{busy,1,250}]}}, + {0,{cast,[{var,3},{command,2}]}}, + [{0,{cast,[{var,3},{command,I}]}} + || I <- lists:seq(3,50)], + {0,{cast,[{var,3},take_control]}}, + {0,{cast,[{var,1},{new_owner,{var,3}}]}}, + {0,{cast,[{var,3},close]}}, + {0,{timer,sleep,[300]}}, + {0,{erlang,port_command,[{var,2},<<$N>>,[force]]}}, + [{0,{cast,[{var,1},{command,I}]}} + || I <- lists:seq(101,127)] + ,{10,{call,[{var,3},get_data]}} + ], + + Validation = [{seq,10,lists:seq(1,50)}], + + port_scheduling(Scenario,Validation,?config(data_dir,Config)). + +scheduling_delay_busy_nosuspend(Config) -> + + Scenario = + [{1,{spawn,[{var,drvname},undefined]}}, + {2,{call,[{var,1},open_port]}}, + {0,{cast,[{var,1},{command,1,100}]}}, + {0,{cast,[{var,1},{busy,2}]}}, + {10,{call,[{var,1},{command,3,[nosuspend]}]}}, + {0,{timer,sleep,[200]}}, + {0,{erlang,port_command,[{var,2},<<$N>>,[force]]}}, + {0,{cast,[{var,1},close]}}, + {20,{call,[{var,1},get_data]}} + ], + + Validation = [{eq,10,nosuspend},{seq,20,[1,2]}], + + port_scheduling(Scenario,Validation,?config(data_dir,Config)). + +scheduling_busy_link(Config) -> + + Scenario = + [{1,{spawn,[{var,drvname},undefined]}}, + {2,{call,[{var,1},open_port]}}, + {3,{spawn,[{var,2},{var,1}]}}, + {0,{cast,[{var,1},unlink]}}, + {0,{cast,[{var,1},{busy,1}]}}, + {0,{cast,[{var,1},{command,2}]}}, + {0,{cast,[{var,1},link]}}, + {0,{timer,sleep,[1000]}}, + {0,{ack,[{var,3},take_control]}}, + {0,{cast,[{var,1},{new_owner,{var,3}}]}}, + {0,{cast,[{var,3},close]}}, + {10,{call,[{var,3},get_data]}}, + {20,{call,[{var,1},get_exit]}} + ], + + Validation = [{seq,10,[1]}, + {seq,20,[{'EXIT',noproc}]}], + + port_scheduling(Scenario,Validation,?config(data_dir,Config)). + +process_init(DrvName,Owner) -> + process_flag(trap_exit,true), + process_loop(DrvName,Owner, {[],[]}). + +process_loop(DrvName,undefined,Data) when is_list(DrvName) -> + process_loop(DrvName,[binary],Data); +process_loop(DrvName,PortOpts,Data) when is_list(DrvName) -> + receive + {call,open_port,P} -> + Port = open_port({spawn, DrvName}, PortOpts), + send(P,Port), + process_loop(Port,self(),Data) + end; +process_loop(Port,undefined,Data) -> + receive + {cast,{new_owner,Pid}} -> + pal("NewOwner: ~p",[Pid]), + process_loop(Port,Pid,Data) + end; +process_loop(Port,Owner,{Data,Exit} = DE) -> + receive + {Port,connected} -> + pal("Connected",[]), + process_loop(Port,undefined,DE); + {Port,{data,NewData}} -> + pal("Got: ~p",[NewData]), + receive + {Port,closed} -> + process_loop(Port,Owner,{Data ++ [NewData],Exit}) + after 2000 -> + exit(did_not_get_port_close) + end; + {'EXIT',Port,Reason} = Exit -> + pal("Exit: ~p",[Exit]), + process_loop(Port,Owner,{Data, Exit ++ [[{'EXIT',Reason}]]}); + {'EXIT',_Port,_Reason} = Exit -> + pal("Exit: ~p",[Exit]); + {call,Msg,P} -> + case handle_msg(Msg,Port,Owner,DE) of + {Reply,NewOwner,NewData} -> + send(P,Reply), + process_loop(Port,NewOwner,NewData); + Reply -> + send(P,Reply), + process_loop(Port,Owner,DE) + end; + {ack,Msg,P} -> + send(P,ok), + case handle_msg(Msg,Port,Owner,DE) of + {_Reply,NewOwner,NewData} -> + process_loop(Port,NewOwner,NewData); + _Reply -> + process_loop(Port,Owner,DE) + end; + {cast,Msg} when is_atom(Msg) orelse element(1,Msg) /= new_owner -> + case handle_msg(Msg,Port,Owner,DE) of + {_Reply,NewOwner,NewData} -> + process_loop(Port,NewOwner,NewData); + _ -> + process_loop(Port,Owner,DE) + end + end. + +handle_msg({busy,Value,Delay},Port,Owner,_Data) -> + pal("Long busy: ~p",[Value]), + send(Port,{Owner,{command,<<$L,Value:32,(round(Delay/100))>>}}); +handle_msg({busy,Value},Port,Owner,_Data) -> + pal("Busy: ~p",[Value]), + send(Port,{Owner,{command,<<$B,Value:32>>}}); +handle_msg({command,Value},Port,Owner,_Data) -> + pal("Short: ~p",[Value]), + send(Port,{Owner,{command,<<$C,Value:32>>}}); +handle_msg({command,Value,Delay},Port,Owner,_Data) when is_integer(Delay) -> + pal("Long: ~p",[Value]), + send(Port,{Owner,{command,<<$D,Value:32,(round(Delay/100))>>}}); +handle_msg({command,Value,Opts},Port,Owner,_Data) -> + pal("Short Opt: ~p",[Value]), + send(Port,{Owner,{command,<<$C,Value:32>>}},Opts); +handle_msg({command,Value,Opts,Delay},Port,Owner,_Data) -> + pal("Long Opt: ~p",[Value]), + send(Port,{Owner,{command,<<$D,Value:32,(round(Delay/100))>>}},Opts); +handle_msg(take_control,Port,Owner,Data) -> + pal("Connect: ~p",[self()]), + send(Port,{Owner, {connect, self()}}), + {undefined,self(),Data}; +handle_msg(unlink,Port,_Owner,_Data) -> + pal("Unlink:",[]), + erlang:unlink(Port); +handle_msg(link,Port,_Owner,_Data) -> + pal("Link:",[]), + erlang:link(Port); +handle_msg(close,Port,Owner,_Data) -> + pal("Close",[]), + send(Port,{Owner,close}); +handle_msg(get_data,Port,_Owner,{[],_Exit}) -> + %% Wait for data if it has not arrived yet + receive + {Port,{data,Data}} -> + Data + after 2000 -> + pal("~p",[erlang:process_info(self())]), + exit(did_not_get_port_data) + end; +handle_msg(get_data,_Port,Owner,{Data,Exit}) -> + pal("GetData",[]), + {hd(Data),Owner,{tl(Data),Exit}}; +handle_msg(get_exit,Port,_Owner,{_Data,[]}) -> + %% Wait for exit if it has not arrived yet + receive + {'EXIT',Port,Reason} -> + [{'EXIT',Reason}] + after 2000 -> + pal("~p",[erlang:process_info(self())]), + exit(did_not_get_port_exit) + end; +handle_msg(get_exit,_Port,Owner,{Data,Exit}) -> + {hd(Exit),Owner,{Data,tl(Exit)}}. + + + +call(Pid,Msg) -> + pal("call(~p,~p)",[Pid,Msg]), + send(Pid,{call,Msg,self()}), + receive + Ret -> + Ret + end. +ack(Pid,Msg) -> + pal("ack(~p,~p)",[Pid,Msg]), + send(Pid,{ack,Msg,self()}), + receive + Ret -> + Ret + end. + +cast(Pid,Msg) -> + pal("cast(~p,~p)",[Pid,Msg]), + send(Pid,{cast,Msg}). + +send(Pid,Msg) -> + erlang:send(Pid,Msg). +send(Prt,Msg,Opts) -> + erlang:send(Prt,Msg,Opts). + + +port_scheduling(Scenario,Validation,Path) -> + DrvName = "scheduling_drv", + erl_ddll:start(), + case erl_ddll:load_driver(Path, DrvName) of + ok -> ok; + {error, Error} -> + io:format("~s\n", [erl_ddll:format_error(Error)]), + ?line ?t:fail() + end, + + Data = run_scenario(lists:flatten(Scenario),[{drvname,DrvName}]), + ok = validate_scenario(Data,Validation). + + +run_scenario([{V,{Module,Cmd,Args}}|T],Vars) -> + Res = run_command(Module,Cmd, + replace_args(Args,Vars)), + run_scenario(T,[{V,Res}|Vars]); +run_scenario([{V,{Cmd,Args}}|T],Vars) -> + run_scenario([{V,{?MODULE,Cmd,Args}}|T],Vars); +run_scenario([],Vars) -> + Vars. + +run_command(_M,spawn,{Args,Opts}) -> + Pid = spawn_opt(fun() -> apply(?MODULE,process_init,Args) end,[link|Opts]), + pal("spawn(~p): ~p",[Args,Pid]), + Pid; +run_command(M,spawn,Args) -> + run_command(M,spawn,{Args,[]}); +run_command(Mod,Func,Args) -> + erlang:display({{Mod,Func,Args},now()}), + apply(Mod,Func,Args). + +validate_scenario(Data,[{print,Var}|T]) -> + pal("Val: ~p",[proplists:get_value(Var,Data)]), + validate_scenario(Data,T); +validate_scenario(Data,[{eq,Var,Value}|T]) -> + case proplists:get_value(Var,Data) of + Value -> + validate_scenario(Data,T); + Else -> + exit({eq_return,Value,Else}) + end; +validate_scenario(Data,[{neq,Var,Value}|T]) -> + case proplists:get_value(Var,Data) of + Value -> + exit({neq_return,Value}); + _Else -> + validate_scenario(Data,T) + end; +validate_scenario(Data,[{seq,Var,Seq}|T]) -> + try + validate_sequence(proplists:get_value(Var,Data),Seq) + catch _:{validate_sequence,NotFound} -> + exit({validate_sequence,NotFound,Data}) + end, + validate_scenario(Data,T); +validate_scenario(_,[]) -> + ok. + +validate_sequence(Data,Validation) when is_binary(Data) -> + validate_sequence(binary_to_list(Data),Validation); +validate_sequence([H|R],[H|T]) -> + validate_sequence(R,T); +validate_sequence([_|R],Seq) -> + validate_sequence(R,Seq); +validate_sequence(_,[]) -> + ok; +validate_sequence([],NotFound) -> + exit({validate_sequence,NotFound}). + +replace_args({var,Var},Vars) -> + proplists:get_value(Var,Vars); +replace_args([H|T],Vars) -> + [replace_args(H,Vars)|replace_args(T,Vars)]; +replace_args([],_Vars) -> + []; +replace_args(Tuple,Vars) when is_tuple(Tuple) -> + list_to_tuple(replace_args(tuple_to_list(Tuple),Vars)); +replace_args(Else,_Vars) -> + Else. + +pal(_F,_A) -> ok. +%pal(Format,Args) -> +% ct:pal("~p "++Format,[self()|Args]). +% erlang:display(lists:flatten(io_lib:format("~p "++Format,[self()|Args]))). + + %%% Utilities. chk_range(Min, Val, Max) when Min =< Val, Val =< Max -> @@ -644,11 +946,11 @@ loop(Master, Slave) -> Pid ! {busy_drv_reply, {self(), Slave}}, loop(Master, Slave); {Pid, unlock} -> - Master ! {self(), {command, "u"}}, + port_command(Master, "u"), Pid ! {busy_drv_reply, ok}, loop(Master, Slave); {Pid, lock} -> - Master ! {self(), {command, "l"}}, + port_command(Master, "l"), Pid ! {busy_drv_reply, ok}, loop(Master, Slave); {Pid, {port_command,Data}} -> diff --git a/erts/emulator/test/busy_port_SUITE_data/Makefile.src b/erts/emulator/test/busy_port_SUITE_data/Makefile.src index 664909db71..b5fcf25176 100644 --- a/erts/emulator/test/busy_port_SUITE_data/Makefile.src +++ b/erts/emulator/test/busy_port_SUITE_data/Makefile.src @@ -17,9 +17,10 @@ # %CopyrightEnd% # -all: busy_drv@dll@ hard_busy_drv@dll@ soft_busy_drv@dll@ +all: busy_drv@dll@ hard_busy_drv@dll@ soft_busy_drv@dll@ scheduling_drv@dll@ @SHLIB_RULES@ hard_busy_drv@obj@: hard_busy_drv.c hs_busy_drv.c soft_busy_drv@obj@: soft_busy_drv.c hs_busy_drv.c +scheduling_drv@obj@: scheduling_drv.c diff --git a/erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c b/erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c index 9f6bd310c6..dcbaf500b8 100644 --- a/erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c +++ b/erts/emulator/test/busy_port_SUITE_data/hs_busy_drv.c @@ -71,9 +71,9 @@ void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) ERL_DRV_PID, driver_caller(port), ERL_DRV_TUPLE, (ErlDrvTermData) 3 }; - res = driver_output_term(port, msg, sizeof(msg)/sizeof(ErlDrvTermData)); + res = erl_drv_output_term(driver_mk_port(port), msg, sizeof(msg)/sizeof(ErlDrvTermData)); if (res <= 0) - driver_failure_atom(port, "driver_output_term failed"); + driver_failure_atom(port, "erl_drv_output_term failed"); } ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf, diff --git a/erts/emulator/test/busy_port_SUITE_data/scheduling_drv.c b/erts/emulator/test/busy_port_SUITE_data/scheduling_drv.c new file mode 100644 index 0000000000..57be9b6392 --- /dev/null +++ b/erts/emulator/test/busy_port_SUITE_data/scheduling_drv.c @@ -0,0 +1,190 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2009-2011. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +#ifdef __WIN32__ +#include <windows.h> +#else +#include <sys/select.h> +#endif +#include <errno.h> +#include <stdio.h> +#include "erl_driver.h" + +#define get_int32(s) ((((unsigned char*) (s))[0] << 24) | \ + (((unsigned char*) (s))[1] << 16) | \ + (((unsigned char*) (s))[2] << 8) | \ + (((unsigned char*) (s))[3])) + +#define ERTS_TEST_SCHEDULING_DRV_NAME "scheduling_drv" +#define ERTS_TEST_SCHEDULING_DRV_FLAGS \ + ERL_DRV_FLAG_USE_PORT_LOCKING | ERL_DRV_FLAG_SOFT_BUSY + +ErlDrvData start(ErlDrvPort port, char *command); +void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len); +ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf, + ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen); +void stop(ErlDrvData drv_data); +void timeout(ErlDrvData drv_data); + +static void delay(unsigned ms); + +static ErlDrvEntry busy_drv_entry = { + NULL /* init */, + start, + stop, + output, + NULL /* ready_input */, + NULL /* ready_output */, + ERTS_TEST_SCHEDULING_DRV_NAME, + NULL /* finish */, + NULL /* handle */, + control, + timeout, + NULL /* outputv */, + NULL /* ready_async */, + NULL /* flush */, + NULL /* call */, + NULL /* event */, + ERL_DRV_EXTENDED_MARKER, + ERL_DRV_EXTENDED_MAJOR_VERSION, + ERL_DRV_EXTENDED_MINOR_VERSION, + ERTS_TEST_SCHEDULING_DRV_FLAGS, + NULL /* handle2 */, + NULL /* handle_monitor */, + NULL /* stop_select */ +}; + +#define DBG(data,FMT) +/* #define DBG(data,FMT) printf("0x%.8lx: %s",driver_caller(data->port),FMT); */ + +typedef struct SchedDrvData { + ErlDrvPort port; + char data[255]; + int curr; + int use_auto_busy; +} SchedDrvData; + +DRIVER_INIT(busy_drv) +{ + return &busy_drv_entry; +} + +ErlDrvData start(ErlDrvPort port, char *command) +{ + SchedDrvData *d = driver_alloc(sizeof(SchedDrvData)); + d->port = port; + d->curr = 0; + d->use_auto_busy = 0; + DBG(d,"start\r\n"); + return (ErlDrvData) d; +} + +void stop(ErlDrvData drv_data) { + SchedDrvData *d = (SchedDrvData*)drv_data; + driver_output(d->port,d->data,d->curr); + DBG(d,"close\r\n"); + driver_free(d); + return; +} + +void timeout(ErlDrvData drv_data) { + SchedDrvData *d = (SchedDrvData*)drv_data; + set_busy_port(d->port, 0); + DBG(d,"timeout\r\n"); +} + +void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) +{ + int res; + unsigned int command = *buf; + SchedDrvData *d = (SchedDrvData*)drv_data; + + switch (command) { + case 'B': /* busy */ + DBG(d,"busy: "); + set_busy_port(d->port, 1); + break; + case 'L': /* busy long call */ + DBG(d,"long: "); + delay(buf[5]*100); + set_busy_port(d->port, 1); + break; + case 'D': /* delay call */ + DBG(d,"delay: "); + delay(buf[5]*100); + break; + case 'N': /* not busy */ + DBG(d,"not"); + set_busy_port(d->port, 0); + goto done; + case 'C': /* change state */ + DBG(d,"chang: "); + break; + case 'G': /* get state */ + DBG(d,"get : "); + driver_output(d->port,d->data,d->curr); + return; + default: + driver_failure_posix((ErlDrvPort) drv_data, EINVAL); + break; + } + if (len > 1) { + unsigned int val = get_int32(buf+1); + fprintf(stderr,"%u",val); + d->data[d->curr++] = val; + } + done: + fprintf(stderr,"\r\n"); +} + +ErlDrvSSizeT control(ErlDrvData drv_data, unsigned int command, char *buf, + ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen) +{ + switch (command) { + case 'B': /* busy */ + set_busy_port((ErlDrvPort) drv_data, 1); + break; + case 'N': /* not busy */ + set_busy_port((ErlDrvPort) drv_data, 0); + break; + default: + driver_failure_posix((ErlDrvPort) drv_data, EINVAL); + break; + } + return 0; +} + + +/* + * Delays (sleeps) the given number of milli-seconds. + */ + +static void delay(unsigned ms) +{ + fprintf(stderr,"delay(%u)",ms); +#ifdef __WIN32__ + Sleep(ms); +#else + struct timeval t; + t.tv_sec = ms/1000; + t.tv_usec = (ms % 1000) * 1000; + + select(0, NULL, NULL, NULL, &t); +#endif +} diff --git a/erts/emulator/test/ddll_SUITE.erl b/erts/emulator/test/ddll_SUITE.erl index 6e15c228cd..4675cab15c 100644 --- a/erts/emulator/test/ddll_SUITE.erl +++ b/erts/emulator/test/ddll_SUITE.erl @@ -136,8 +136,8 @@ delayed_unload_with_ports(Config) when is_list(Config) -> ?line {ok,pending_driver,Ref} = erl_ddll:try_unload(echo_drv,[{monitor, pending_driver}]), ?line ok = receive _ -> false after 0 -> ok end, ?line Port ! {self(), close}, - ?line 1 = erl_ddll:info(echo_drv, port_count), ?line ok = receive {Port,closed} -> ok after 1000 -> false end, + ?line 1 = erl_ddll:info(echo_drv, port_count), ?line Port2 ! {self(), close}, ?line ok = receive {Port2,closed} -> ok after 1000 -> false end, ?line ok = receive {'DOWN', Ref, driver, echo_drv, unloaded} -> ok after 1000 -> false end, diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index 643357263c..13f18b4563 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -77,7 +77,8 @@ thread_mseg_alloc_cache_clean/1, otp_9302/1, thr_free_drv/1, - async_blast/1]). + async_blast/1, + thr_msg_blast/1]). -export([bin_prefix/2]). @@ -147,7 +148,8 @@ all() -> thread_mseg_alloc_cache_clean, otp_9302, thr_free_drv, - async_blast]. + async_blast, + thr_msg_blast]. groups() -> [{timer, [], @@ -1136,7 +1138,9 @@ check_driver_system_info_result(Result) -> {{1, 1}, _} -> ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES -- ?EXPECTED_SYSTEM_INFO_NAMES2), - ?line ExpNs = lists:sort(Ns) + ?line ExpNs = lists:sort(Ns); + {{2, 0}, _} -> + ?line [] = Ns end. chk_sis(SIs, Ns) -> @@ -2010,7 +2014,64 @@ async_blast(Config) when is_list(Config) -> ?line erlang:display({async_blast_time, AsyncBlastTime}), ?line ok. +thr_msg_blast_receiver(_Port, N, N) -> + ok; +thr_msg_blast_receiver(Port, N, Max) -> + receive + {Port, hi} -> + thr_msg_blast_receiver(Port, N+1, Max) + end. + +thr_msg_blast_receiver_proc(Port, Max, Parent, Done) -> + case port_control(Port, 0, "") of + "receiver" -> + spawn(fun () -> + thr_msg_blast_receiver_proc(Port, Max+1, Parent, Done) + end), + thr_msg_blast_receiver(Port, 0, Max); + "done" -> + Parent ! Done + end. +thr_msg_blast(Config) when is_list(Config) -> + case erlang:system_info(smp_support) of + false -> + {skipped, "Non-SMP emulator; nothing to test..."}; + true -> + Path = ?config(data_dir, Config), + erl_ddll:start(), + ok = load_driver(Path, thr_msg_blast_drv), + MemBefore = driver_alloc_size(), + Start = os:timestamp(), + Port = open_port({spawn, thr_msg_blast_drv}, []), + true = is_port(Port), + Done = make_ref(), + Me = self(), + spawn(fun () -> + thr_msg_blast_receiver_proc(Port, 1, Me, Done) + end), + receive + Done -> ok + end, + ok = thr_msg_blast_receiver(Port, 0, 32*10000), + port_close(Port), + End = os:timestamp(), + receive + Garbage -> + ?t:fail({received_garbage, Port, Garbage}) + after 2000 -> + ok + end, + MemAfter = driver_alloc_size(), + io:format("MemBefore=~p, MemAfter=~p~n", + [MemBefore, MemAfter]), + ThrMsgBlastTime = timer:now_diff(End,Start)/1000000, + io:format("ThrMsgBlastTime=~p~n", [ThrMsgBlastTime]), + MemBefore = MemAfter, + Res = {thr_msg_blast_time, ThrMsgBlastTime}, + erlang:display(Res), + Res + end. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Utilities diff --git a/erts/emulator/test/driver_SUITE_data/Makefile.src b/erts/emulator/test/driver_SUITE_data/Makefile.src index 9cc107cc66..b667dff6b6 100644 --- a/erts/emulator/test/driver_SUITE_data/Makefile.src +++ b/erts/emulator/test/driver_SUITE_data/Makefile.src @@ -14,7 +14,8 @@ MISC_DRVS = outputv_drv@dll@ \ thr_alloc_drv@dll@ \ otp_9302_drv@dll@ \ thr_free_drv@dll@ \ - async_blast_drv@dll@ + async_blast_drv@dll@ \ + thr_msg_blast_drv@dll@ SYS_INFO_DRVS = sys_info_base_drv@dll@ \ sys_info_prev_drv@dll@ \ diff --git a/erts/emulator/test/driver_SUITE_data/async_blast_drv.c b/erts/emulator/test/driver_SUITE_data/async_blast_drv.c index c2086c5860..d72b20d143 100644 --- a/erts/emulator/test/driver_SUITE_data/async_blast_drv.c +++ b/erts/emulator/test/driver_SUITE_data/async_blast_drv.c @@ -56,6 +56,7 @@ static ErlDrvEntry async_blast_drv_entry = { typedef struct { ErlDrvPort port; + ErlDrvTermData port_id; ErlDrvTermData caller; int counter; } async_blast_data_t; @@ -81,6 +82,7 @@ static ErlDrvData start(ErlDrvPort port, return ERL_DRV_ERROR_GENERAL; abd->port = port; + abd->port_id = driver_mk_port(port); abd->counter = 0; return (ErlDrvData) abd; } @@ -97,12 +99,12 @@ static void ready_async(ErlDrvData drv_data, async_blast_data_t *abd = (async_blast_data_t *) drv_data; if (--abd->counter == 0) { ErlDrvTermData spec[] = { - ERL_DRV_PORT, driver_mk_port(abd->port), + ERL_DRV_PORT, abd->port_id, ERL_DRV_ATOM, driver_mk_atom("done"), ERL_DRV_TUPLE, 2 }; - driver_send_term(abd->port, abd->caller, - spec, sizeof(spec)/sizeof(spec[0])); + erl_drv_send_term(abd->port_id, abd->caller, + spec, sizeof(spec)/sizeof(spec[0])); } } diff --git a/erts/emulator/test/driver_SUITE_data/caller_drv.c b/erts/emulator/test/driver_SUITE_data/caller_drv.c index 1ed20b0638..2731f9b317 100644 --- a/erts/emulator/test/driver_SUITE_data/caller_drv.c +++ b/erts/emulator/test/driver_SUITE_data/caller_drv.c @@ -85,9 +85,9 @@ send_caller(ErlDrvData drv_data, char *func) ERL_DRV_PID, driver_caller(port), ERL_DRV_TUPLE, (ErlDrvTermData) 4 }; - res = driver_output_term(port, msg, sizeof(msg)/sizeof(ErlDrvTermData)); + res = erl_drv_output_term(driver_mk_port(port), msg, sizeof(msg)/sizeof(ErlDrvTermData)); if (res <= 0) - driver_failure_atom(port, "driver_output_term failed"); + driver_failure_atom(port, "erl_drv_output_term failed"); } static ErlDrvData diff --git a/erts/emulator/test/driver_SUITE_data/monitor_drv.c b/erts/emulator/test/driver_SUITE_data/monitor_drv.c index 3da067fd09..81dfb65191 100644 --- a/erts/emulator/test/driver_SUITE_data/monitor_drv.c +++ b/erts/emulator/test/driver_SUITE_data/monitor_drv.c @@ -117,7 +117,7 @@ static void handle_monitor(ErlDrvData drv_data, ErlDrvMonitor *monitor) o->next = p->next; } driver_free(p); - driver_send_term(data->port, data->ipid, spec, sizeof(spec)/sizeof(ErlDrvTermData)); + erl_drv_send_term(driver_mk_port(data->port), data->ipid, spec, sizeof(spec)/sizeof(ErlDrvTermData)); } return; diff --git a/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c b/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c index 221fd0ce51..93ef767d75 100644 --- a/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c +++ b/erts/emulator/test/driver_SUITE_data/otp_9302_drv.c @@ -134,8 +134,8 @@ static void send_reply(Otp9302AsyncData *adata) ERL_DRV_ATOM, adata->term_data.msg, ERL_DRV_TUPLE, 2 }; - driver_send_term(adata->port, adata->term_data.receiver, - spec, sizeof(spec)/sizeof(spec[0])); + erl_drv_send_term(adata->term_data.port, adata->term_data.receiver, + spec, sizeof(spec)/sizeof(spec[0])); } static void enqueue_reply(Otp9302AsyncData *adata) diff --git a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c index 0c86a26604..cbee1c3dce 100644 --- a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c +++ b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c @@ -177,15 +177,16 @@ static void ready_async(ErlDrvData drv_data, ErlDrvThreadData thread_data) { PeekNonXQDrvData *dp = (PeekNonXQDrvData *) drv_data; if (dp->cmd == PEEK_NONXQ_WAIT) { + ErlDrvTermData port_id = driver_mk_port(dp->port); ErlDrvTermData spec[] = { - ERL_DRV_PORT, driver_mk_port(dp->port), + ERL_DRV_PORT, port_id, ERL_DRV_ATOM, driver_mk_atom("test_successful"), ERL_DRV_TUPLE, 2 }; - driver_send_term(dp->port, - dp->caller, - spec, - sizeof(spec) / sizeof(spec[0])); + erl_drv_send_term(port_id, + dp->caller, + spec, + sizeof(spec) / sizeof(spec[0])); } if (thread_data) driver_free(thread_data); diff --git a/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c b/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c new file mode 100644 index 0000000000..5a9112afa3 --- /dev/null +++ b/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c @@ -0,0 +1,178 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2012. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +#include "erl_driver.h" + +#define THR_MSG_BLAST_NO_PROCS 10 +#define THR_MSG_BLAST_NO_SENDS_PER_PROC 10000 + +#define THR_MSG_BLAST_THREADS 32 + +static void stop(ErlDrvData drv_data); +static ErlDrvData start(ErlDrvPort port, + char *command); +static ErlDrvSSizeT control(ErlDrvData drv_data, + unsigned int command, + char *buf, ErlDrvSizeT len, + char **rbuf, ErlDrvSizeT rlen); + +static ErlDrvEntry thr_msg_blast_drv_entry = { + NULL /* init */, + start, + stop, + NULL /* output */, + NULL /* ready_input */, + NULL /* ready_output */, + "thr_msg_blast_drv", + NULL /* finish */, + NULL /* handle */, + control, + NULL /* timeout */, + NULL /* outputv */, + NULL /* ready_async */, + NULL /* flush */, + NULL /* call */, + NULL /* event */, + ERL_DRV_EXTENDED_MARKER, + ERL_DRV_EXTENDED_MAJOR_VERSION, + ERL_DRV_EXTENDED_MINOR_VERSION, + ERL_DRV_FLAG_USE_PORT_LOCKING, + NULL /* handle2 */, + NULL /* handle_monitor */ +}; + +typedef struct { + ErlDrvPort port; + ErlDrvTermData td_port; + ErlDrvTermData hi; + ErlDrvTid tid[THR_MSG_BLAST_THREADS]; + int no_thrs; + ErlDrvTermData proc[THR_MSG_BLAST_NO_PROCS]; + int no_procs; +} thr_msg_blast_data_t; + + +DRIVER_INIT(thr_msg_blast_drv) +{ + return &thr_msg_blast_drv_entry; +} + +static void stop(ErlDrvData drv_data) +{ + int i; + thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) drv_data; + for (i = 0; i < tmbd->no_thrs; i++) + erl_drv_thread_join(tmbd->tid[i], NULL); + driver_free((void *) tmbd); +} + +static ErlDrvData start(ErlDrvPort port, + char *command) +{ + thr_msg_blast_data_t *tmbd; + + tmbd = driver_alloc(sizeof(thr_msg_blast_data_t)); + if (!tmbd) + return ERL_DRV_ERROR_GENERAL; + + tmbd->port = port; + tmbd->td_port = driver_mk_port(port); + tmbd->hi = driver_mk_atom("hi"); + tmbd->no_thrs = 0; + tmbd->no_procs = 1; + tmbd->proc[0] = driver_caller(port); + + return (ErlDrvData) tmbd; +} + +static void *thread(void *); + +static ErlDrvSSizeT control(ErlDrvData drv_data, + unsigned int command, + char *buf, ErlDrvSizeT len, + char **rbuf, ErlDrvSizeT rlen) +{ + thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) drv_data; + char *res_str = "error"; + + if (tmbd->no_procs >= THR_MSG_BLAST_NO_PROCS) { + int i; + for (i = 0; i < tmbd->no_thrs; i++) + erl_drv_thread_join(tmbd->tid[i], NULL); + tmbd->no_thrs = 0; + res_str = "done"; + } + else { + + tmbd->proc[tmbd->no_procs++] = driver_caller(tmbd->port); + + if (tmbd->no_procs == THR_MSG_BLAST_NO_PROCS) { + for (tmbd->no_thrs = 0; + tmbd->no_thrs < THR_MSG_BLAST_THREADS; + tmbd->no_thrs++) { + int res = erl_drv_thread_create("test", + &tmbd->tid[tmbd->no_thrs], + thread, + tmbd, + NULL); + if (res != 0) { + driver_failure_posix(tmbd->port, res); + goto done; + } + } + } + + res_str = "receiver"; + } + + done: { + ErlDrvSSizeT res_len = strlen(res_str); + if (res_len > rlen) { + char *abuf = driver_alloc(sizeof(char)*res_len); + if (!abuf) + return 0; + *rbuf = abuf; + } + + memcpy((void *) *rbuf, (void *) res_str, res_len); + + return res_len; + } +} + +static void *thread(void *varg) +{ + int s, p; + thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) varg; + ErlDrvTermData spec[] = { + ERL_DRV_PORT, tmbd->td_port, + ERL_DRV_ATOM, tmbd->hi, + ERL_DRV_TUPLE, 2 + }; + + for (s = 0; s < THR_MSG_BLAST_NO_SENDS_PER_PROC; s++) { + for (p = 0; p < THR_MSG_BLAST_NO_PROCS; p++) { + int res = erl_drv_send_term(tmbd->td_port, tmbd->proc[p], + spec, sizeof(spec)/sizeof(spec[0])); + if (p == 0 && res <= 0) + abort(); /* Could not send to creator */ + } + } + return NULL; +} diff --git a/erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c b/erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c index b4542f3e36..2cd3209231 100644 --- a/erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c +++ b/erts/emulator/test/erl_drv_thread_SUITE_data/testcase_driver.c @@ -42,6 +42,7 @@ typedef struct { TestCaseState_t visible; ErlDrvPort port; + ErlDrvTermData port_id; int result; jmp_buf done_jmp_buf; char *comment; @@ -98,6 +99,7 @@ testcase_drv_start(ErlDrvPort port, char *command) itcs->visible.testcase_name = testcase_name(); itcs->visible.extra = NULL; itcs->port = port; + itcs->port_id = driver_mk_port(port); itcs->result = TESTCASE_FAILED; itcs->comment = ""; @@ -143,7 +145,7 @@ testcase_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) msg[1] = (ErlDrvTermData) result_atom; msg[2] = ERL_DRV_PORT; - msg[3] = driver_mk_port(itcs->port); + msg[3] = itcs->port_id; msg[4] = ERL_DRV_ATOM; msg[5] = driver_mk_atom(itcs->visible.testcase_name); @@ -155,7 +157,7 @@ testcase_drv_run(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) msg[9] = ERL_DRV_TUPLE; msg[10] = (ErlDrvTermData) 4; - driver_output_term(itcs->port, msg, 11); + erl_drv_output_term(itcs->port_id, msg, 11); } int @@ -185,7 +187,7 @@ testcase_printf(TestCaseState_t *tcs, char *frmt, ...) msg[1] = (ErlDrvTermData) driver_mk_atom("print"); msg[2] = ERL_DRV_PORT; - msg[3] = driver_mk_port(itcs->port); + msg[3] = itcs->port_id; msg[4] = ERL_DRV_ATOM; msg[5] = driver_mk_atom(itcs->visible.testcase_name); @@ -197,7 +199,7 @@ testcase_printf(TestCaseState_t *tcs, char *frmt, ...) msg[9] = ERL_DRV_TUPLE; msg[10] = (ErlDrvTermData) 4; - driver_output_term(itcs->port, msg, 11); + erl_drv_output_term(itcs->port_id, msg, 11); } diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl index 461773114e..d5cb4ee1b7 100644 --- a/erts/emulator/test/match_spec_SUITE.erl +++ b/erts/emulator/test/match_spec_SUITE.erl @@ -28,7 +28,7 @@ unary_plus/1, unary_minus/1, moving_labels/1]). -export([fpe/1]). -export([otp_9422/1]). - +-export([faulty_seq_trace/1, do_faulty_seq_trace/0]). -export([runner/2, loop_runner/3]). -export([f1/1, f2/2, f3/2, fn/1, fn/2, fn/3]). -export([do_boxed_and_small/0]). @@ -59,6 +59,7 @@ all() -> ms_trace3, boxed_and_small, destructive_in_test_bif, guard_exceptions, unary_plus, unary_minus, fpe, moving_labels, + faulty_seq_trace, otp_9422]; true -> [not_run] end. @@ -726,6 +727,19 @@ do_boxed_and_small() -> {ok, false, _, _} = erlang:match_spec_test({0,3},[{{make_ref(),'_'},[],['$_']}],table), ok. +faulty_seq_trace(doc) -> + ["Test that faulty seq_trace_call does not crash emulator"]; +faulty_seq_trace(suite) -> []; +faulty_seq_trace(Config) when is_list(Config) -> + ?line {ok, Node} = start_node(match_spec_suite_other), + ?line ok = rpc:call(Node,?MODULE,do_faulty_seq_trace,[]), + ?line stop_node(Node), + ok. + +do_faulty_seq_trace() -> + {ok,'EXIT',_,_} = erlang:match_spec_test([],[{'_',[],[{message,{set_seq_token,yxa,true}}]}],trace), + ok. + errchk(Pat) -> case catch erlang:trace_pattern({?MODULE, f2, 2}, Pat) of {'EXIT', {badarg, _}} -> diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl index 4459732257..7a045484cf 100644 --- a/erts/emulator/test/num_bif_SUITE.erl +++ b/erts/emulator/test/num_bif_SUITE.erl @@ -25,6 +25,7 @@ %% abs/1 %% float/1 %% float_to_list/1 +%% float_to_list/2 %% integer_to_list/1 %% list_to_float/1 %% list_to_integer/1 @@ -114,14 +115,46 @@ t_float(Config) when is_list(Config) -> ok. -%% Tests float_to_list/1. +%% Tests float_to_list/1, float_to_list/2. t_float_to_list(Config) when is_list(Config) -> - ?line test_ftl("0.0e+0", 0.0), - ?line test_ftl("2.5e+1", 25.0), - ?line test_ftl("2.5e+0", 2.5), - ?line test_ftl("2.5e-1", 0.25), - ?line test_ftl("-3.5e+17", -350.0e15), + test_ftl("0.0e+0", 0.0), + test_ftl("2.5e+1", 25.0), + test_ftl("2.5e+0", 2.5), + test_ftl("2.5e-1", 0.25), + test_ftl("-3.5e+17", -350.0e15), + "1.00000000000000000000e+00" = float_to_list(1.0), + "1.00000000000000000000e+00" = float_to_list(1.0, []), + "-1.00000000000000000000e+00" = float_to_list(-1.0, []), + "-1.00000000000000000000" = float_to_list(-1.0, [{decimals, 20}]), + {'EXIT', {badarg, _}} = (catch float_to_list(1.0, [{decimals, -1}])), + {'EXIT', {badarg, _}} = (catch float_to_list(1.0, [{decimals, 250}])), + {'EXIT', {badarg, _}} = (catch float_to_list(1.0e+300, [{decimals, 1}])), + "1.0e+300" = float_to_list(1.0e+300, [{scientific, 1}]), + "1.0" = float_to_list(1.0, [{decimals, 249}, compact]), + Expected = "1." ++ string:copies("0", 249) ++ "e+00", + Expected = float_to_list(1.0, [{scientific, 249}, compact]), + + X1 = float_to_list(1.0), + X2 = float_to_list(1.0, [{scientific, 20}]), + X1 = X2, + "1.000e+00" = float_to_list(1.0, [{scientific, 3}]), + "1.000" = float_to_list(1.0, [{decimals, 3}]), + "1.0" = float_to_list(1.0, [{decimals, 3}, compact]), + "1.12" = float_to_list(1.123, [{decimals, 2}]), + "1.123" = float_to_list(1.123, [{decimals, 3}]), + "1.123" = float_to_list(1.123, [{decimals, 3}, compact]), + "1.1230" = float_to_list(1.123, [{decimals, 4}]), + "1.12300" = float_to_list(1.123, [{decimals, 5}]), + "1.123" = float_to_list(1.123, [{decimals, 5}, compact]), + "1.1234" = float_to_list(1.1234,[{decimals, 6}, compact]), + "2.333333" = erlang:float_to_list(7/3, [{decimals, 6}, compact]), + "2.333333" = erlang:float_to_list(7/3, [{decimals, 6}]), + "0.00000000000000000000e+00" = float_to_list(0.0, [compact]), + "0.0" = float_to_list(0.0, [{decimals, 10}, compact]), + "123000000000000000000.0" = float_to_list(1.23e20, [{decimals, 10}, compact]), + "1.2300000000e+20" = float_to_list(1.23e20, [{scientific, 10}, compact]), + "1.23000000000000000000e+20" = float_to_list(1.23e20, []), ok. test_ftl(Expect, Float) -> diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl index 873601ddd1..13aa0f4c00 100644 --- a/erts/emulator/test/port_SUITE.erl +++ b/erts/emulator/test/port_SUITE.erl @@ -90,7 +90,7 @@ mix_up_ports/1, otp_5112/1, otp_5119/1, otp_6224/1, exit_status_multi_scheduling_block/1, ports/1, spawn_driver/1, spawn_executable/1, close_deaf_port/1, - unregister_name/1]). + unregister_name/1, parallelism_option/1]). -export([]). @@ -114,7 +114,8 @@ all() -> stderr_to_stdout, otp_3906, otp_4389, win_massive, mix_up_ports, otp_5112, otp_5119, exit_status_multi_scheduling_block, ports, spawn_driver, - spawn_executable, close_deaf_port, unregister_name]. + spawn_executable, close_deaf_port, unregister_name, + parallelism_option]. groups() -> [{stream, [], [stream_small, stream_big]}, @@ -159,11 +160,11 @@ win_massive(Config) when is_list(Config) -> do_win_massive() -> Dog = test_server:timetrap(test_server:seconds(360)), SuiteDir = filename:dirname(code:which(?MODULE)), - Env = " -env ERL_MAX_PORTS 8192", + Ports = " +Q 8192", {ok, Node} = test_server:start_node(win_massive, slave, - [{args, " -pa " ++ SuiteDir ++ Env}]), + [{args, " -pa " ++ SuiteDir ++ Ports}]), ok = rpc:call(Node,?MODULE,win_massive_client,[3000]), test_server:stop_node(Node), test_server:timetrap_cancel(Dog), @@ -1298,6 +1299,43 @@ spawn_driver(Config) when is_list(Config) -> test_server:timetrap_cancel(Dog), ok. +parallelism_option(suite) -> + []; +parallelism_option(doc) -> + ["Test parallelism option of open_port"]; +parallelism_option(Config) when is_list(Config) -> + ?line Dog = test_server:timetrap(test_server:seconds(10)), + ?line Path = ?config(data_dir, Config), + ?line ok = load_driver(Path, "echo_drv"), + ?line Port = erlang:open_port({spawn_driver, "echo_drv"}, + [{parallelism, true}]), + ?line {parallelism, true} = erlang:port_info(Port, parallelism), + ?line Port ! {self(), {command, "Hello port!"}}, + ?line receive + {Port, {data, "Hello port!"}} = Msg1 -> + io:format("~p~n", [Msg1]), + ok; + Other -> + test_server:fail({unexpected, Other}) + end, + ?line Port ! {self(), close}, + ?line receive {Port, closed} -> ok end, + + ?line Port2 = erlang:open_port({spawn_driver, "echo_drv -Hello port?"}, + [{parallelism, false}]), + ?line {parallelism, false} = erlang:port_info(Port2, parallelism), + ?line receive + {Port2, {data, "Hello port?"}} = Msg2 -> + io:format("~p~n", [Msg2]), + ok; + Other2 -> + test_server:fail({unexpected2, Other2}) + end, + ?line Port2 ! {self(), close}, + ?line receive {Port2, closed} -> ok end, + ?line test_server:timetrap_cancel(Dog), + ok. + spawn_executable(suite) -> []; spawn_executable(doc) -> @@ -1566,6 +1604,7 @@ otp_5112(Config) when is_list(Config) -> ?t:format("Links1: ~p~n",[Links1]), true = lists:member(Port, Links1), Port ! {self(), {command, ""}}, + ?line wait_until(fun () -> lists:member(Port, erlang:ports()) == false end), {links, Links2} = process_info(self(),links), ?t:format("Links2: ~p~n",[Links2]), false = lists:member(Port, Links2), %% This used to fail @@ -1636,38 +1675,8 @@ otp_5119_fill_empty_port_tab(Ports) -> LastPort end. --define(DEF_MAX_PORTS, 1024). - -max_ports_env() -> - case os:getenv("ERL_MAX_PORTS") of - EMP when is_list(EMP) -> - case catch list_to_integer(EMP) of - Int when is_integer(Int) -> Int; - _ -> false - end; - _ -> false - end. - max_ports() -> - PreMaxPorts - = case max_ports_env() of - Env when is_integer(Env) -> Env; - _ -> - case os:type() of - {unix, _} -> - UlimStr = string:strip(os:cmd("ulimit -n") - -- "\n"), - case catch list_to_integer(UlimStr) of - Ulim when is_integer(Ulim) -> Ulim; - _ -> ?DEF_MAX_PORTS - end; - _ -> ?DEF_MAX_PORTS - end - end, - case PreMaxPorts > ?DEF_MAX_PORTS of - true -> PreMaxPorts; - false -> ?DEF_MAX_PORTS - end. + erlang:system_info(port_limit). port_ix(Port) when is_port(Port) -> ["#Port",_,PortIxStr] = string:tokens(erlang:port_to_list(Port), @@ -2270,5 +2279,12 @@ close_deaf_port_1(N, Cmd) -> _:eagain -> {comment, "Could not spawn more than " ++ integer_to_list(N) ++ " OS processes."} end. - +wait_until(Fun) -> + case catch Fun() of + true -> + ok; + _ -> + receive after 100 -> ok end, + wait_until(Fun) + end. diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl index 6509871a7d..11dd88413f 100644 --- a/erts/emulator/test/process_SUITE.erl +++ b/erts/emulator/test/process_SUITE.erl @@ -118,15 +118,15 @@ fun_spawn(Fun) -> %% (unclear if this test case will actually prove anything on %% a modern computer with lots of memory). spawn_with_binaries(Config) when is_list(Config) -> - ?line L = lists:duplicate(2048, 42), - ?line TwoMeg = lists:duplicate(1024, L), - ?line Fun = fun() -> spawn(?MODULE, binary_owner, [list_to_binary(TwoMeg)]), + L = lists:duplicate(2048, 42), + TwoMeg = lists:duplicate(1024, L), + Fun = fun() -> spawn(?MODULE, binary_owner, [list_to_binary(TwoMeg)]), receive after 1 -> ok end end, - ?line Iter = case test_server:purify_is_running() of + Iter = case test_server:purify_is_running() of true -> 10; false -> 150 end, - ?line test_server:do_times(Iter, Fun), + test_server:do_times(Iter, Fun), ok. binary_owner(Bin) when is_binary(Bin) -> @@ -134,87 +134,87 @@ binary_owner(Bin) when is_binary(Bin) -> %% Tests exit/1 with a big message. t_exit_1(Config) when is_list(Config) -> - ?line start_spawner(), - ?line Dog = test_server:timetrap(test_server:seconds(20)), - ?line process_flag(trap_exit, true), - ?line test_server:do_times(10, fun t_exit_1/0), - ?line test_server:timetrap_cancel(Dog), - ?line stop_spawner(), + start_spawner(), + Dog = test_server:timetrap(test_server:seconds(20)), + process_flag(trap_exit, true), + test_server:do_times(10, fun t_exit_1/0), + test_server:timetrap_cancel(Dog), + stop_spawner(), ok. t_exit_1() -> - ?line Pid = fun_spawn(fun() -> exit(kb_128()) end), - ?line Garbage = kb_128(), - ?line receive + Pid = fun_spawn(fun() -> exit(kb_128()) end), + Garbage = kb_128(), + receive {'EXIT', Pid, Garbage} -> ok end. %% Tests exit/2 with a lot of data in the exit message. t_exit_2_other(Config) when is_list(Config) -> - ?line start_spawner(), - ?line Dog = test_server:timetrap(test_server:seconds(20)), - ?line process_flag(trap_exit, true), - ?line test_server:do_times(10, fun t_exit_2_other/0), - ?line test_server:timetrap_cancel(Dog), - ?line stop_spawner(), + start_spawner(), + Dog = test_server:timetrap(test_server:seconds(20)), + process_flag(trap_exit, true), + test_server:do_times(10, fun t_exit_2_other/0), + test_server:timetrap_cancel(Dog), + stop_spawner(), ok. t_exit_2_other() -> - ?line Pid = fun_spawn(fun() -> receive x -> ok end end), - ?line Garbage = kb_128(), - ?line exit(Pid, Garbage), - ?line receive + Pid = fun_spawn(fun() -> receive x -> ok end end), + Garbage = kb_128(), + exit(Pid, Garbage), + receive {'EXIT', Pid, Garbage} -> ok end. %% Tests that exit(Pid, normal) does not kill another process.; t_exit_2_other_normal(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(20)), - ?line process_flag(trap_exit, true), - ?line Pid = fun_spawn(fun() -> receive x -> ok end end), - ?line exit(Pid, normal), - ?line receive + Dog = test_server:timetrap(test_server:seconds(20)), + process_flag(trap_exit, true), + Pid = fun_spawn(fun() -> receive x -> ok end end), + exit(Pid, normal), + receive {'EXIT', Pid, Reason} -> - ?line test_server:fail({process_died, Reason}) + test_server:fail({process_died, Reason}) after 1000 -> ok end, - ?line case process_info(Pid) of + case process_info(Pid) of undefined -> test_server:fail(process_died_on_normal); List when is_list(List) -> ok end, exit(Pid, kill), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. %% Tests that we can trap an exit message sent with exit/2 from %% the same process. self_exit(Config) when is_list(Config) -> - ?line start_spawner(), - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line process_flag(trap_exit, true), - ?line test_server:do_times(200, fun self_exit/0), - ?line test_server:timetrap_cancel(Dog), - ?line stop_spawner(), + start_spawner(), + Dog = test_server:timetrap(test_server:seconds(10)), + process_flag(trap_exit, true), + test_server:do_times(200, fun self_exit/0), + test_server:timetrap_cancel(Dog), + stop_spawner(), ok. self_exit() -> - ?line Garbage = eight_kb(), - ?line P = self(), - ?line true = exit(P, Garbage), - ?line receive + Garbage = eight_kb(), + P = self(), + true = exit(P, Garbage), + receive {'EXIT', P, Garbage} -> ok end. %% Tests exit(self(), normal) is equivalent to exit(normal) for a process %% that doesn't trap exits. normal_suicide_exit(Config) when is_list(Config) -> - ?line process_flag(trap_exit, true), - ?line Pid = fun_spawn(fun() -> exit(self(), normal) end), - ?line receive + process_flag(trap_exit, true), + Pid = fun_spawn(fun() -> exit(self(), normal) end), + receive {'EXIT', Pid, normal} -> ok; Other -> test_server:fail({bad_message, Other}) end. @@ -222,19 +222,19 @@ normal_suicide_exit(Config) when is_list(Config) -> %% Tests exit(self(), Term) is equivalent to exit(Term) for a process %% that doesn't trap exits."; abnormal_suicide_exit(Config) when is_list(Config) -> - ?line Garbage = eight_kb(), - ?line process_flag(trap_exit, true), - ?line Pid = fun_spawn(fun() -> exit(self(), Garbage) end), - ?line receive + Garbage = eight_kb(), + process_flag(trap_exit, true), + Pid = fun_spawn(fun() -> exit(self(), Garbage) end), + receive {'EXIT', Pid, Garbage} -> ok; Other -> test_server:fail({bad_message, Other}) end. %% Tests that exit(self(), die) cannot be catched. t_exit_2_catch(Config) when is_list(Config) -> - ?line process_flag(trap_exit, true), - ?line Pid = fun_spawn(fun() -> catch exit(self(), die) end), - ?line receive + process_flag(trap_exit, true), + Pid = fun_spawn(fun() -> catch exit(self(), die) end), + receive {'EXIT', Pid, normal} -> test_server:fail(catch_worked); {'EXIT', Pid, die} -> @@ -246,29 +246,29 @@ t_exit_2_catch(Config) when is_list(Config) -> %% Tests trapping of an 'EXIT' message generated by a bad argument to %% the abs/1 bif. The 'EXIT' message will intentionally be very big. trap_exit_badarg(Config) when is_list(Config) -> - ?line start_spawner(), - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line process_flag(trap_exit, true), - ?line test_server:do_times(10, fun trap_exit_badarg/0), - ?line test_server:timetrap_cancel(Dog), - ?line stop_spawner(), + start_spawner(), + Dog = test_server:timetrap(test_server:seconds(10)), + process_flag(trap_exit, true), + test_server:do_times(10, fun trap_exit_badarg/0), + test_server:timetrap_cancel(Dog), + stop_spawner(), ok. trap_exit_badarg() -> - ?line Pid = fun_spawn(fun() -> bad_guy(kb_128()) end), - ?line Garbage = kb_128(), - ?line receive + Pid = fun_spawn(fun() -> bad_guy(kb_128()) end), + Garbage = kb_128(), + receive {'EXIT',Pid,{badarg,[{erlang,abs,[Garbage],Loc1}, {?MODULE,bad_guy,1,Loc2}|_]}} when is_list(Loc1), is_list(Loc2) -> ok; Other -> - ?line ok = io:format("Bad EXIT message: ~P", [Other, 30]), - ?line test_server:fail(bad_exit_message) + ok = io:format("Bad EXIT message: ~P", [Other, 30]), + test_server:fail(bad_exit_message) end. bad_guy(Arg) -> - ?line abs(Arg). + abs(Arg). kb_128() -> @@ -281,11 +281,11 @@ kb_128() -> eight_kb() -> B64 = lists:seq(1, 64), - ?line B512 = {<<1>>,B64,<<2,3>>,B64,make_unaligned_sub_binary(<<4,5,6,7,8,9>>), + B512 = {<<1>>,B64,<<2,3>>,B64,make_unaligned_sub_binary(<<4,5,6,7,8,9>>), B64,make_sub_binary([1,2,3,4,5,6]), B64,make_sub_binary(lists:seq(1, ?heap_binary_size+1)), B64,B64,B64,B64,big_binary()}, - ?line lists:duplicate(8, {B512,B512}). + lists:duplicate(8, {B512,B512}). big_binary() -> big_binary(10, [42]). @@ -296,19 +296,19 @@ big_binary(N, Acc) -> %% Test receiving an EXIT message when spawning a BIF with bad arguments. trap_exit_badarg_in_bif(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(10)), - ?line process_flag(trap_exit, true), - ?line test_server:do_times(10, fun trap_exit_badarg_bif/0), - ?line test_server:timetrap_cancel(Dog), + Dog = test_server:timetrap(test_server:seconds(10)), + process_flag(trap_exit, true), + test_server:do_times(10, fun trap_exit_badarg_bif/0), + test_server:timetrap_cancel(Dog), ok. trap_exit_badarg_bif() -> - ?line Pid = spawn_link(erlang, node, [1]), - ?line receive + Pid = spawn_link(erlang, node, [1]), + receive {'EXIT', Pid, {badarg, _}} -> ok; Other -> - ?line test_server:fail({unexpected, Other}) + test_server:fail({unexpected, Other}) end. %% The following sequences of events have crasched Beam. @@ -321,27 +321,27 @@ trap_exit_badarg_bif() -> %% 3) The process will crash the next time it executes 'receive'. exit_and_timeout(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(20)), + Dog = test_server:timetrap(test_server:seconds(20)), - ?line process_flag(trap_exit, true), - ?line Parent = self(), - ?line Low = fun_spawn(fun() -> eat_low(Parent) end), - ?line High = fun_spawn(fun() -> eat_high(Low) end), - ?line eat_wait_for(Low, High), + process_flag(trap_exit, true), + Parent = self(), + Low = fun_spawn(fun() -> eat_low(Parent) end), + High = fun_spawn(fun() -> eat_high(Low) end), + eat_wait_for(Low, High), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. eat_wait_for(Low, High) -> - ?line receive - {'EXIT', Low, {you, are, dead}} -> - ok; - {'EXIT', High, normal} -> - eat_wait_for(Low, High); - Other -> - test_server:fail({bad_message, Other}) - end. + receive + {'EXIT', Low, {you, are, dead}} -> + ok; + {'EXIT', High, normal} -> + eat_wait_for(Low, High); + Other -> + test_server:fail({bad_message, Other}) + end. eat_low(_Parent) -> receive @@ -374,27 +374,27 @@ loop(_, _) -> %% Tries to send two different exit messages to a process. %% (The second one should be ignored.) exit_twice(Config) when is_list(Config) -> - ?line Dog = test_server:timetrap(test_server:seconds(20)), + Dog = test_server:timetrap(test_server:seconds(20)), - ?line process_flag(trap_exit, true), - ?line Low = fun_spawn(fun etwice_low/0), - ?line High = fun_spawn(fun() -> etwice_high(Low) end), - ?line etwice_wait_for(Low, High), + process_flag(trap_exit, true), + Low = fun_spawn(fun etwice_low/0), + High = fun_spawn(fun() -> etwice_high(Low) end), + etwice_wait_for(Low, High), - ?line test_server:timetrap_cancel(Dog), + test_server:timetrap_cancel(Dog), ok. etwice_wait_for(Low, High) -> - ?line receive - {'EXIT', Low, first} -> - ok; - {'EXIT', Low, Other} -> - test_server:fail({wrong_exit_reason, Other}); - {'EXIT', High, normal} -> - etwice_wait_for(Low, High); - Other -> - test_server:fail({bad_message, Other}) - end. + receive + {'EXIT', Low, first} -> + ok; + {'EXIT', Low, Other} -> + test_server:fail({wrong_exit_reason, Other}); + {'EXIT', High, normal} -> + etwice_wait_for(Low, High); + Other -> + test_server:fail({bad_message, Other}) + end. etwice_low() -> etwice_low(). @@ -406,15 +406,15 @@ etwice_high(Low) -> %% Tests the process_info/2 BIF. t_process_info(Config) when is_list(Config) -> - ?line [] = process_info(self(), registered_name), - ?line register(my_name, self()), - ?line {registered_name, my_name} = process_info(self(), registered_name), - ?line {status, running} = process_info(self(), status), - ?line {min_heap_size, 233} = process_info(self(), min_heap_size), - ?line {min_bin_vheap_size, 46368} = process_info(self(), min_bin_vheap_size), - ?line {current_function,{?MODULE,t_process_info,1}} = + [] = process_info(self(), registered_name), + register(my_name, self()), + {registered_name, my_name} = process_info(self(), registered_name), + {status, running} = process_info(self(), status), + {min_heap_size, 233} = process_info(self(), min_heap_size), + {min_bin_vheap_size,46422} = process_info(self(), min_bin_vheap_size), + {current_function,{?MODULE,t_process_info,1}} = process_info(self(), current_function), - ?line {current_function,{?MODULE,t_process_info,1}} = + {current_function,{?MODULE,t_process_info,1}} = apply(erlang, process_info, [self(),current_function]), %% current_location and current_stacktrace @@ -425,9 +425,9 @@ t_process_info(Config) when is_list(Config) -> verify_loc(Line2, Res2), pi_stacktrace([{?MODULE,t_process_info,1,?LINE}]), - ?line Gleader = group_leader(), - ?line {group_leader, Gleader} = process_info(self(), group_leader), - ?line {'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')), + Gleader = group_leader(), + {group_leader, Gleader} = process_info(self(), group_leader), + {'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')), ok. pi_stacktrace(Expected0) -> @@ -509,50 +509,50 @@ process_info_looper(Parent) -> %% Tests the process_info/1 BIF on another process with messages. process_info_other_msg(Config) when is_list(Config) -> Self = self(), - ?line Pid = spawn_link(fun() -> other_process(Self) end), + Pid = spawn_link(fun() -> other_process(Self) end), receive {go_ahead,Pid} -> ok end, - ?line Own = {my,own,message}, + Own = {my,own,message}, - ?line {messages,[Own]} = process_info(Pid, messages), + {messages,[Own]} = process_info(Pid, messages), - ?line Garbage = kb_128(), - ?line MsgA = {a,Garbage}, - ?line MsgB = {b,Garbage}, - ?line MsgC = {c,Garbage}, - ?line MsgD = {d,Garbage}, - ?line MsgE = {e,Garbage}, - - ?line Pid ! MsgA, - ?line {messages,[Own,MsgA]} = process_info(Pid, messages), - ?line Pid ! MsgB, - ?line {messages,[Own,MsgA,MsgB]} = process_info(Pid, messages), - ?line Pid ! MsgC, - ?line {messages,[Own,MsgA,MsgB,MsgC]} = process_info(Pid, messages), - ?line Pid ! MsgD, - ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD]} = process_info(Pid, messages), - ?line Pid ! MsgE, - ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages), - ?line {memory,BytesOther} = process_info(Pid, memory), - ?line {memory,BytesSelf} = process_info(self(), memory), + Garbage = kb_128(), + MsgA = {a,Garbage}, + MsgB = {b,Garbage}, + MsgC = {c,Garbage}, + MsgD = {d,Garbage}, + MsgE = {e,Garbage}, + + Pid ! MsgA, + {messages,[Own,MsgA]} = process_info(Pid, messages), + Pid ! MsgB, + {messages,[Own,MsgA,MsgB]} = process_info(Pid, messages), + Pid ! MsgC, + {messages,[Own,MsgA,MsgB,MsgC]} = process_info(Pid, messages), + Pid ! MsgD, + {messages,[Own,MsgA,MsgB,MsgC,MsgD]} = process_info(Pid, messages), + Pid ! MsgE, + {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages), + {memory,BytesOther} = process_info(Pid, memory), + {memory,BytesSelf} = process_info(self(), memory), io:format("Memory ~p: ~p\n", [Pid,BytesOther]), io:format("Memory ~p (self): ~p\n", [self(),BytesSelf]), [Own,MsgA,MsgB,MsgC,MsgD,MsgE] = All, - ?line Pid ! {self(),empty}, - ?line receive + Pid ! {self(),empty}, + receive empty -> ok end, - ?line {messages,[]} = process_info(Pid, messages), + {messages,[]} = process_info(Pid, messages), - ?line {min_heap_size, 233} = process_info(Pid, min_heap_size), - ?line {min_bin_vheap_size, 46368} = process_info(Pid, min_bin_vheap_size), + {min_heap_size, 233} = process_info(Pid, min_heap_size), + {min_bin_vheap_size, 46422} = process_info(Pid, min_bin_vheap_size), - ?line Pid ! stop, + Pid ! stop, ok. process_info_other_dist_msg(Config) when is_list(Config) -> @@ -560,52 +560,51 @@ process_info_other_dist_msg(Config) when is_list(Config) -> %% Check that process_info can handle messages that have not been %% decoded yet. %% - ?line {ok, Node} = start_node(Config), - ?line Self = self(), - ?line Pid = spawn_link(fun() -> other_process(Self) end), - ?line receive {go_ahead,Pid} -> ok end, + {ok, Node} = start_node(Config), + Self = self(), + Pid = spawn_link(fun() -> other_process(Self) end), + receive {go_ahead,Pid} -> ok end, - ?line Own = {my,own,message}, + Own = {my,own,message}, - ?line {messages,[Own]} = process_info(Pid, messages), - ?line Garbage = kb_128(), - ?line MsgA = {a,self(),Garbage}, - ?line MsgB = {b,self(),Garbage}, - ?line MsgC = {c,self(),Garbage}, - ?line MsgD = {d,self(),Garbage}, - ?line MsgE = {e,self(),Garbage}, + {messages,[Own]} = process_info(Pid, messages), + Garbage = kb_128(), + MsgA = {a,self(),Garbage}, + MsgB = {b,self(),Garbage}, + MsgC = {c,self(),Garbage}, + MsgD = {d,self(),Garbage}, + MsgE = {e,self(),Garbage}, %% We don't want the other process to decode messages itself %% therefore we suspend it. - ?line true = erlang:suspend_process(Pid), - ?line spawn_link(Node, fun () -> - Pid ! MsgA, - Pid ! MsgB, - Pid ! MsgC, - Self ! check_abc - end), - ?line receive check_abc -> ok end, - ?line [{status,suspended}, - {messages,[Own,MsgA,MsgB,MsgC]}, - {status,suspended}]= process_info(Pid, [status,messages,status]), - ?line spawn_link(Node, fun () -> - Pid ! MsgD, - Pid ! MsgE, - Self ! check_de - end), - ?line receive check_de -> ok end, - ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} - = process_info(Pid, messages), - ?line true = erlang:resume_process(Pid), - ?line Pid ! {self(), get_all_messages}, - ?line receive + true = erlang:suspend_process(Pid), + spawn_link(Node, fun () -> + Pid ! MsgA, + Pid ! MsgB, + Pid ! MsgC, + Self ! check_abc + end), + receive check_abc -> ok end, + [{status,suspended}, + {messages,[Own,MsgA,MsgB,MsgC]}, + {status,suspended}]= process_info(Pid, [status,messages,status]), + spawn_link(Node, fun () -> + Pid ! MsgD, + Pid ! MsgE, + Self ! check_de + end), + receive check_de -> ok end, + {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages), + true = erlang:resume_process(Pid), + Pid ! {self(), get_all_messages}, + receive {all_messages, AllMsgs} -> - ?line All = AllMsgs + All = AllMsgs end, - ?line {messages,[]} = process_info(Pid, messages), - ?line Pid ! stop, - ?line stop_node(Node), - ?line ok. + {messages,[]} = process_info(Pid, messages), + Pid ! stop, + stop_node(Node), + ok. other_process(Parent) -> @@ -652,38 +651,36 @@ process_info_2_list(doc) -> process_info_2_list(suite) -> []; process_info_2_list(Config) when is_list(Config) -> - ?line Proc = spawn(fun () -> - receive after infinity -> ok end end), + Proc = spawn(fun () -> receive after infinity -> ok end end), register(process_SUITE_process_info_2_list1, self()), register(process_SUITE_process_info_2_list2, Proc), - ?line erts_debug:set_internal_state(available_internal_state,true), - ?line AllArgs = erts_debug:get_internal_state(process_info_args), - ?line A1 = lists:sort(AllArgs) ++ [status] ++ lists:reverse(AllArgs), + erts_debug:set_internal_state(available_internal_state,true), + AllArgs = erts_debug:get_internal_state(process_info_args), + A1 = lists:sort(AllArgs) ++ [status] ++ lists:reverse(AllArgs), %% Verify that argument is accepted as single atom - ?line lists:foreach(fun (A) -> - ?line {A, _} = process_info(Proc, A), - ?line {A, _} = process_info(self(), A) - end, - A1), + lists:foreach(fun (A) -> + {A, _} = process_info(Proc, A), + {A, _} = process_info(self(), A) + end, A1), %% Verify that order is preserved - ?line ok = chk_pi_order(process_info(self(), A1), A1), - ?line ok = chk_pi_order(process_info(Proc, A1), A1), + ok = chk_pi_order(process_info(self(), A1), A1), + ok = chk_pi_order(process_info(Proc, A1), A1), %% Small arg list - ?line A2 = [status, stack_size, trap_exit, priority], - ?line [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}] + A2 = [status, stack_size, trap_exit, priority], + [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}] = process_info(Proc, A2), - ?line [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}] + [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}] = process_info(self(), A2), %% Huge arg list (note values are shared) - ?line A3 = lists:duplicate(5000,backtrace), - ?line V3 = process_info(Proc, A3), - ?line 5000 = length(V3), - ?line lists:foreach(fun ({backtrace, _}) -> ok end, V3), - ?line ok. + A3 = lists:duplicate(5000,backtrace), + V3 = process_info(Proc, A3), + 5000 = length(V3), + lists:foreach(fun ({backtrace, _}) -> ok end, V3), + ok. process_info_lock_reschedule(doc) -> []; @@ -692,43 +689,37 @@ process_info_lock_reschedule(suite) -> process_info_lock_reschedule(Config) when is_list(Config) -> %% We need a process that is running and an item that requires %% process_info to take the main process lock. - ?line Target1 = spawn_link(fun tok_loop/0), - ?line Name1 = process_info_lock_reschedule_running, - ?line register(Name1, Target1), - ?line Target2 = spawn_link(fun () -> receive after infinity -> ok end end), - ?line Name2 = process_info_lock_reschedule_waiting, - ?line register(Name2, Target2), - ?line PI = fun(_) -> - ?line erlang:yield(), - ?line [{registered_name, Name1}] - = process_info(Target1, [registered_name]), - ?line [{registered_name, Name2}] - = process_info(Target2, [registered_name]), - ?line erlang:yield(), - ?line {registered_name, Name1} - = process_info(Target1, registered_name), - ?line {registered_name, Name2} - = process_info(Target2, registered_name), - ?line erlang:yield(), - ?line [{registered_name, Name1}| _] - = process_info(Target1), - ?line [{registered_name, Name2}| _] - = process_info(Target2) - end, - ?line lists:foreach(PI, lists:seq(1,1000)), + Target1 = spawn_link(fun tok_loop/0), + Name1 = process_info_lock_reschedule_running, + register(Name1, Target1), + Target2 = spawn_link(fun () -> receive after infinity -> ok end end), + Name2 = process_info_lock_reschedule_waiting, + register(Name2, Target2), + PI = fun(_) -> + erlang:yield(), + [{registered_name, Name1}] = process_info(Target1, [registered_name]), + [{registered_name, Name2}] = process_info(Target2, [registered_name]), + erlang:yield(), + {registered_name, Name1} = process_info(Target1, registered_name), + {registered_name, Name2} = process_info(Target2, registered_name), + erlang:yield(), + [{registered_name, Name1}| _] = process_info(Target1), + [{registered_name, Name2}| _] = process_info(Target2) + end, + lists:foreach(PI, lists:seq(1,1000)), %% Make sure Target1 still is willing to "tok loop" - ?line case process_info(Target1, status) of - {status, OkStatus} when OkStatus == runnable; - OkStatus == running; - OkStatus == garbage_collecting -> - ?line unlink(Target1), - ?line unlink(Target2), - ?line exit(Target1, bang), - ?line exit(Target2, bang), - ?line OkStatus; - {status, BadStatus} -> - ?line ?t:fail(BadStatus) - end. + case process_info(Target1, status) of + {status, OkStatus} when OkStatus == runnable; + OkStatus == running; + OkStatus == garbage_collecting -> + unlink(Target1), + unlink(Target2), + exit(Target1, bang), + exit(Target2, bang), + OkStatus; + {status, BadStatus} -> + ?t:fail(BadStatus) + end. pi_loop(_Name, _Pid, 0) -> ok; @@ -741,50 +732,50 @@ process_info_lock_reschedule2(doc) -> process_info_lock_reschedule2(suite) -> []; process_info_lock_reschedule2(Config) when is_list(Config) -> - ?line Parent = self(), - ?line Fun = fun () -> - receive {go, Name, Pid} -> ok end, - pi_loop(Name, Pid, 10000), - Parent ! {done, self()}, - receive after infinity -> ok end - end, - ?line P1 = spawn_link(Fun), - ?line N1 = process_info_lock_reschedule2_1, - ?line true = register(N1, P1), - ?line P2 = spawn_link(Fun), - ?line N2 = process_info_lock_reschedule2_2, - ?line true = register(N2, P2), - ?line P3 = spawn_link(Fun), - ?line N3 = process_info_lock_reschedule2_3, - ?line true = register(N3, P3), - ?line P4 = spawn_link(Fun), - ?line N4 = process_info_lock_reschedule2_4, - ?line true = register(N4, P4), - ?line P5 = spawn_link(Fun), - ?line N5 = process_info_lock_reschedule2_5, - ?line true = register(N5, P5), - ?line P6 = spawn_link(Fun), - ?line N6 = process_info_lock_reschedule2_6, - ?line true = register(N6, P6), - ?line P1 ! {go, N2, P2}, - ?line P2 ! {go, N1, P1}, - ?line P3 ! {go, N1, P1}, - ?line P4 ! {go, N1, P1}, - ?line P5 ! {go, N6, P6}, - ?line P6 ! {go, N5, P5}, - ?line receive {done, P1} -> ok end, - ?line receive {done, P2} -> ok end, - ?line receive {done, P3} -> ok end, - ?line receive {done, P4} -> ok end, - ?line receive {done, P5} -> ok end, - ?line receive {done, P6} -> ok end, - ?line unlink(P1), exit(P1, bang), - ?line unlink(P2), exit(P2, bang), - ?line unlink(P3), exit(P3, bang), - ?line unlink(P4), exit(P4, bang), - ?line unlink(P5), exit(P5, bang), - ?line unlink(P6), exit(P6, bang), - ?line ok. + Parent = self(), + Fun = fun () -> + receive {go, Name, Pid} -> ok end, + pi_loop(Name, Pid, 10000), + Parent ! {done, self()}, + receive after infinity -> ok end + end, + P1 = spawn_link(Fun), + N1 = process_info_lock_reschedule2_1, + true = register(N1, P1), + P2 = spawn_link(Fun), + N2 = process_info_lock_reschedule2_2, + true = register(N2, P2), + P3 = spawn_link(Fun), + N3 = process_info_lock_reschedule2_3, + true = register(N3, P3), + P4 = spawn_link(Fun), + N4 = process_info_lock_reschedule2_4, + true = register(N4, P4), + P5 = spawn_link(Fun), + N5 = process_info_lock_reschedule2_5, + true = register(N5, P5), + P6 = spawn_link(Fun), + N6 = process_info_lock_reschedule2_6, + true = register(N6, P6), + P1 ! {go, N2, P2}, + P2 ! {go, N1, P1}, + P3 ! {go, N1, P1}, + P4 ! {go, N1, P1}, + P5 ! {go, N6, P6}, + P6 ! {go, N5, P5}, + receive {done, P1} -> ok end, + receive {done, P2} -> ok end, + receive {done, P3} -> ok end, + receive {done, P4} -> ok end, + receive {done, P5} -> ok end, + receive {done, P6} -> ok end, + unlink(P1), exit(P1, bang), + unlink(P2), exit(P2, bang), + unlink(P3), exit(P3, bang), + unlink(P4), exit(P4, bang), + unlink(P5), exit(P5, bang), + unlink(P6), exit(P6, bang), + ok. many_args(0,_B,_C,_D,_E,_F,_G,_H,_I,_J) -> ok; @@ -802,120 +793,115 @@ process_info_lock_reschedule3(suite) -> process_info_lock_reschedule3(Config) when is_list(Config) -> %% We need a process that is running and an item that requires %% process_info to take the main process lock. - ?line Target1 = spawn_link(fun tok_loop/0), - ?line Name1 = process_info_lock_reschedule_running, - ?line register(Name1, Target1), - ?line Target2 = spawn_link(fun () -> receive after infinity -> ok end end), - ?line Name2 = process_info_lock_reschedule_waiting, - ?line register(Name2, Target2), - ?line PI = fun(N) -> - case N rem 10 of - 0 -> erlang:yield(); - _ -> ok - end, - ?line do_pi_msg_len({proc, Target1}, - {arg, message_queue_len}) - end, - ?line many_args(100000,1,2,3,4,5,6,7,8,9), - ?line lists:foreach(PI, lists:seq(1,1000000)), + Target1 = spawn_link(fun tok_loop/0), + Name1 = process_info_lock_reschedule_running, + register(Name1, Target1), + Target2 = spawn_link(fun () -> receive after infinity -> ok end end), + Name2 = process_info_lock_reschedule_waiting, + register(Name2, Target2), + PI = fun(N) -> + case N rem 10 of + 0 -> erlang:yield(); + _ -> ok + end, + do_pi_msg_len({proc, Target1}, + {arg, message_queue_len}) + end, + many_args(100000,1,2,3,4,5,6,7,8,9), + lists:foreach(PI, lists:seq(1,1000000)), %% Make sure Target1 still is willing to "tok loop" - ?line case process_info(Target1, status) of + case process_info(Target1, status) of {status, OkStatus} when OkStatus == runnable; OkStatus == running; OkStatus == garbage_collecting -> - ?line unlink(Target1), - ?line unlink(Target2), - ?line exit(Target1, bang), - ?line exit(Target2, bang), - ?line OkStatus; + unlink(Target1), + unlink(Target2), + exit(Target1, bang), + exit(Target2, bang), + OkStatus; {status, BadStatus} -> - ?line ?t:fail(BadStatus) + ?t:fail(BadStatus) end. process_status_exiting(Config) when is_list(Config) -> %% Make sure that erts_debug:get_internal_state({process_status,P}) %% returns exiting if it is in status P_EXITING. - ?line erts_debug:set_internal_state(available_internal_state,true), - ?line Prio = process_flag(priority, max), - ?line P = spawn_opt(fun () -> receive after infinity -> ok end end, + erts_debug:set_internal_state(available_internal_state,true), + Prio = process_flag(priority, max), + P = spawn_opt(fun () -> receive after infinity -> ok end end, [{priority, normal}]), - ?line erlang:yield(), + erlang:yield(), %% The tok_loop processes are here to make it hard for the exiting %% process to be scheduled in for exit... - ?line TokLoops = lists:map(fun (_) -> - spawn_opt(fun tok_loop/0, - [link,{priority, high}]) - end, - lists:seq(1, erlang:system_info(schedulers_online))), - ?line exit(P, boom), - ?line wait_until( - fun () -> - exiting =:= erts_debug:get_internal_state({process_status,P}) - end), - ?line lists:foreach(fun (Tok) -> unlink(Tok), exit(Tok,bang) end, TokLoops), - ?line process_flag(priority, Prio), - ?line ok. + TokLoops = lists:map(fun (_) -> + spawn_opt(fun tok_loop/0, + [link,{priority, high}]) + end, lists:seq(1, erlang:system_info(schedulers_online))), + exit(P, boom), + wait_until(fun() -> + exiting =:= erts_debug:get_internal_state({process_status,P}) + end), + lists:foreach(fun (Tok) -> unlink(Tok), exit(Tok,bang) end, TokLoops), + process_flag(priority, Prio), + ok. otp_4725(Config) when is_list(Config) -> - ?line Tester = self(), - ?line Ref1 = make_ref(), - ?line Pid1 = spawn_opt(fun () -> - Tester ! {Ref1, process_info(self())}, - receive - Ref1 -> bye - end - end, - [link, - {priority, max}, - {fullsweep_after, 600}]), - ?line receive - {Ref1, ProcInfo1A} -> - ?line ProcInfo1B = process_info(Pid1), - ?line Pid1 ! Ref1, - ?line check_proc_infos(ProcInfo1A, ProcInfo1B) - end, - ?line Ref2 = make_ref(), - ?line Pid2 = spawn_opt(fun () -> - Tester ! {Ref2, process_info(self())}, - receive - Ref2 -> bye - end - end, - []), - ?line receive - {Ref2, ProcInfo2A} -> - ?line ProcInfo2B = process_info(Pid2), - ?line Pid2 ! Ref2, - ?line check_proc_infos(ProcInfo2A, ProcInfo2B) - end, - ?line ok. + Tester = self(), + Ref1 = make_ref(), + Pid1 = spawn_opt(fun () -> + Tester ! {Ref1, process_info(self())}, + receive + Ref1 -> bye + end + end, [link, {priority, max}, {fullsweep_after, 600}]), + receive + {Ref1, ProcInfo1A} -> + ProcInfo1B = process_info(Pid1), + Pid1 ! Ref1, + check_proc_infos(ProcInfo1A, ProcInfo1B) + end, + Ref2 = make_ref(), + Pid2 = spawn_opt(fun () -> + Tester ! {Ref2, process_info(self())}, + receive + Ref2 -> bye + end + end, + []), + receive + {Ref2, ProcInfo2A} -> + ProcInfo2B = process_info(Pid2), + Pid2 ! Ref2, + check_proc_infos(ProcInfo2A, ProcInfo2B) + end, + ok. check_proc_infos(A, B) -> - ?line IC = lists:keysearch(initial_call, 1, A), - ?line IC = lists:keysearch(initial_call, 1, B), + IC = lists:keysearch(initial_call, 1, A), + IC = lists:keysearch(initial_call, 1, B), - ?line L = lists:keysearch(links, 1, A), - ?line L = lists:keysearch(links, 1, B), + L = lists:keysearch(links, 1, A), + L = lists:keysearch(links, 1, B), - ?line D = lists:keysearch(dictionary, 1, A), - ?line D = lists:keysearch(dictionary, 1, B), + D = lists:keysearch(dictionary, 1, A), + D = lists:keysearch(dictionary, 1, B), - ?line TE = lists:keysearch(trap_exit, 1, A), - ?line TE = lists:keysearch(trap_exit, 1, B), + TE = lists:keysearch(trap_exit, 1, A), + TE = lists:keysearch(trap_exit, 1, B), - ?line EH = lists:keysearch(error_handler, 1, A), - ?line EH = lists:keysearch(error_handler, 1, B), + EH = lists:keysearch(error_handler, 1, A), + EH = lists:keysearch(error_handler, 1, B), - ?line P = lists:keysearch(priority, 1, A), - ?line P = lists:keysearch(priority, 1, B), + P = lists:keysearch(priority, 1, A), + P = lists:keysearch(priority, 1, B), - ?line GL = lists:keysearch(group_leader, 1, A), - ?line GL = lists:keysearch(group_leader, 1, B), + GL = lists:keysearch(group_leader, 1, A), + GL = lists:keysearch(group_leader, 1, B), - ?line GC = lists:keysearch(garbage_collection, 1, A), - ?line GC = lists:keysearch(garbage_collection, 1, B), + GC = lists:keysearch(garbage_collection, 1, A), + GC = lists:keysearch(garbage_collection, 1, B), - ?line ok. + ok. %% Dummies. @@ -928,18 +914,18 @@ stop_spawner() -> %% Tests erlang:bump_reductions/1. bump_reductions(Config) when is_list(Config) -> - ?line erlang:garbage_collect(), - ?line receive after 1 -> ok end, % Clear reductions. - ?line {reductions,R1} = process_info(self(), reductions), - ?line true = erlang:bump_reductions(100), - ?line {reductions,R2} = process_info(self(), reductions), - ?line case R2-R1 of + erlang:garbage_collect(), + receive after 1 -> ok end, % Clear reductions. + {reductions,R1} = process_info(self(), reductions), + true = erlang:bump_reductions(100), + {reductions,R2} = process_info(self(), reductions), + case R2-R1 of Diff when Diff < 100 -> - ?line ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]), - ?line test_server:fail({small_diff, Diff}); + ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]), + test_server:fail({small_diff, Diff}); Diff when Diff > 110 -> - ?line ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]), - ?line test_server:fail({big_diff, Diff}); + ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]), + test_server:fail({big_diff, Diff}); Diff -> io:format("~p\n", [Diff]), ok @@ -949,11 +935,11 @@ bump_reductions(Config) when is_list(Config) -> bump_big(R2, 16#08000000). bump_big(Prev, Limit) -> - ?line true = erlang:bump_reductions(100000), %Limited to CONTEXT_REDUCTIONS. - ?line case process_info(self(), reductions) of + true = erlang:bump_reductions(100000), %Limited to CONTEXT_REDUCTIONS. + case process_info(self(), reductions) of {reductions,Big} when is_integer(Big), Big > Limit -> - ?line erlang:garbage_collect(), - ?line io:format("~p\n", [Big]); + erlang:garbage_collect(), + io:format("~p\n", [Big]); {reductions,R} when is_integer(R), R > Prev -> bump_big(R, Limit) end, @@ -964,34 +950,34 @@ bump_big(Prev, Limit) -> low_prio(Config) when is_list(Config) -> case erlang:system_info(schedulers_online) of 1 -> - ?line ok = low_prio_test(Config); + ok = low_prio_test(Config); _ -> - ?line erlang:system_flag(multi_scheduling, block), - ?line ok = low_prio_test(Config), - ?line erlang:system_flag(multi_scheduling, unblock), - ?line {comment, + erlang:system_flag(multi_scheduling, block), + ok = low_prio_test(Config), + erlang:system_flag(multi_scheduling, unblock), + {comment, "Test not written for SMP runtime system. " "Multi scheduling blocked during test."} end. low_prio_test(Config) when is_list(Config) -> - ?line process_flag(trap_exit, true), - ?line S = spawn_link(?MODULE, prio_server, [0, 0]), - ?line PCs = spawn_prio_clients(S, erlang:system_info(schedulers_online)), - ?line timer:sleep(2000), - ?line lists:foreach(fun (P) -> exit(P, kill) end, PCs), - ?line S ! exit, - ?line receive {'EXIT', S, {A, B}} -> check_prio(A, B) end, + process_flag(trap_exit, true), + S = spawn_link(?MODULE, prio_server, [0, 0]), + PCs = spawn_prio_clients(S, erlang:system_info(schedulers_online)), + timer:sleep(2000), + lists:foreach(fun (P) -> exit(P, kill) end, PCs), + S ! exit, + receive {'EXIT', S, {A, B}} -> check_prio(A, B) end, ok. check_prio(A, B) -> - ?line Prop = A/B, - ?line ok = io:format("Low=~p, High=~p, Prop=~p\n", [A, B, Prop]), + Prop = A/B, + ok = io:format("Low=~p, High=~p, Prop=~p\n", [A, B, Prop]), %% It isn't 1/8, it's more like 0.3, but let's check that %% the low-prio processes get some little chance to run at all. - ?line true = (Prop < 1.0), - ?line true = (Prop > 1/32). + true = (Prop < 1.0), + true = (Prop > 1/32). prio_server(A, B) -> receive @@ -1051,25 +1037,25 @@ yield(Config) when is_list(Config) -> end. yield_test() -> - ?line erlang:garbage_collect(), - ?line receive after 1 -> ok end, % Clear reductions. - ?line SC = schedcnt(start), - ?line {reductions, R1} = process_info(self(), reductions), - ?line {ok, true} = call_yield(middle), - ?line true = call_yield(final), - ?line true = call_yield(), - ?line true = apply(erlang, yield, []), - ?line {reductions, R2} = process_info(self(), reductions), - ?line Schedcnt = schedcnt(stop, SC), - ?line case {R2-R1, Schedcnt} of - {Diff, 4} when Diff < 30 -> - ?line ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w", - [R1, R2, Schedcnt]); - {Diff, _} -> - ?line ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w", - [R1, R2, Schedcnt]), - ?line test_server:fail({measurement_error, Diff, Schedcnt}) - end. + erlang:garbage_collect(), + receive after 1 -> ok end, % Clear reductions. + SC = schedcnt(start), + {reductions, R1} = process_info(self(), reductions), + {ok, true} = call_yield(middle), + true = call_yield(final), + true = call_yield(), + true = apply(erlang, yield, []), + {reductions, R2} = process_info(self(), reductions), + Schedcnt = schedcnt(stop, SC), + case {R2-R1, Schedcnt} of + {Diff, 4} when Diff < 30 -> + ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w", + [R1, R2, Schedcnt]); + {Diff, _} -> + ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w", + [R1, R2, Schedcnt]), + test_server:fail({measurement_error, Diff, Schedcnt}) + end. call_yield() -> erlang:yield(). @@ -1108,61 +1094,61 @@ schedcnt(stop, {Ref, Pid}) when is_reference(Ref), is_pid(Pid) -> yield2(doc) -> []; yield2(suite) -> []; yield2(Config) when is_list(Config) -> - ?line Me = self(), - ?line Go = make_ref(), - ?line RedDiff = make_ref(), - ?line Done = make_ref(), - ?line P = spawn(fun () -> - receive Go -> ok end, - {reductions, R1} = process_info(self(), reductions), - {ok, true} = call_yield(middle), - true = call_yield(final), - true = call_yield(), - true = apply(erlang, yield, []), - {reductions, R2} = process_info(self(), reductions), - Me ! {RedDiff, R2 - R1}, - exit(Done) - end), - ?line erlang:yield(), - - ?line 1 = erlang:trace(P, true, [running, procs, {tracer, self()}]), - - ?line P ! Go, + Me = self(), + Go = make_ref(), + RedDiff = make_ref(), + Done = make_ref(), + P = spawn(fun () -> + receive Go -> ok end, + {reductions, R1} = process_info(self(), reductions), + {ok, true} = call_yield(middle), + true = call_yield(final), + true = call_yield(), + true = apply(erlang, yield, []), + {reductions, R2} = process_info(self(), reductions), + Me ! {RedDiff, R2 - R1}, + exit(Done) + end), + erlang:yield(), + + 1 = erlang:trace(P, true, [running, procs, {tracer, self()}]), + + P ! Go, %% receive Go -> ok end, - ?line {trace, P, in, _} = next_tmsg(P), + {trace, P, in, _} = next_tmsg(P), %% {ok, true} = call_yield(middle), - ?line {trace, P, out, _} = next_tmsg(P), - ?line {trace, P, in, _} = next_tmsg(P), + {trace, P, out, _} = next_tmsg(P), + {trace, P, in, _} = next_tmsg(P), %% true = call_yield(final), - ?line {trace, P, out, _} = next_tmsg(P), - ?line {trace, P, in, _} = next_tmsg(P), + {trace, P, out, _} = next_tmsg(P), + {trace, P, in, _} = next_tmsg(P), %% true = call_yield(), - ?line {trace, P, out, _} = next_tmsg(P), - ?line {trace, P, in, _} = next_tmsg(P), + {trace, P, out, _} = next_tmsg(P), + {trace, P, in, _} = next_tmsg(P), %% true = apply(erlang, yield, []), - ?line {trace, P, out, _} = next_tmsg(P), - ?line {trace, P, in, _} = next_tmsg(P), + {trace, P, out, _} = next_tmsg(P), + {trace, P, in, _} = next_tmsg(P), %% exit(Done) - ?line {trace, P, exit, Done} = next_tmsg(P), + {trace, P, exit, Done} = next_tmsg(P), - ?line receive + receive {RedDiff, Reductions} when Reductions < 30, Reductions > 0 -> io:format("Reductions = ~p~n", [Reductions]), - ?line ok; + ok; {RedDiff, Reductions} -> - ?line ?t:fail({unexpected_reduction_count, Reductions}) + ?t:fail({unexpected_reduction_count, Reductions}) end, - ?line none = next_tmsg(P), + none = next_tmsg(P), - ?line ok. + ok. next_tmsg(Pid) -> receive @@ -1178,19 +1164,19 @@ next_tmsg(Pid) -> bad_register(Config) when is_list(Config) -> Name = a_long_and_unused_name, - ?line {'EXIT',{badarg,_}} = (catch register({bad,name}, self())), - ?line fail_register(undefined, self()), - ?line fail_register([bad,name], self()), + {'EXIT',{badarg,_}} = (catch register({bad,name}, self())), + fail_register(undefined, self()), + fail_register([bad,name], self()), - ?line {Dead,Mref} = spawn_monitor(fun() -> true end), + {Dead,Mref} = spawn_monitor(fun() -> true end), receive {'DOWN',Mref,process,Dead,_} -> ok end, - ?line fail_register(Name, Dead), - ?line fail_register(Name, make_ref()), - ?line fail_register(Name, []), - ?line fail_register(Name, {bad,process}), - ?line fail_register(Name, <<>>), + fail_register(Name, Dead), + fail_register(Name, make_ref()), + fail_register(Name, []), + fail_register(Name, {bad,process}), + fail_register(Name, <<>>), ok. fail_register(Name, Process) -> @@ -1201,50 +1187,50 @@ fail_register(Name, Process) -> garbage_collect(doc) -> []; garbage_collect(suite) -> []; garbage_collect(Config) when is_list(Config) -> - ?line Prio = process_flag(priority, high), - ?line true = erlang:garbage_collect(), - ?line TokLoopers = lists:map(fun (_) -> - spawn_opt(fun tok_loop/0, - [{priority, low}, link]) - end, - lists:seq(1, 10)), - ?line lists:foreach(fun (Pid) -> - ?line Mon = erlang:monitor(process, Pid), - ?line DownBefore = receive - {'DOWN', Mon, _, _, _} -> - ?line true - after 0 -> - ?line false - end, - ?line GC = erlang:garbage_collect(Pid), - ?line DownAfter = receive - {'DOWN', Mon, _, _, _} -> - ?line true - after 0 -> - ?line false - end, - ?line true = erlang:demonitor(Mon), - ?line case {DownBefore, DownAfter} of - {true, _} -> ?line false = GC; - {false, false} -> ?line true = GC; - _ -> ?line GC - end - end, - processes()), - ?line lists:foreach(fun (Pid) -> - unlink(Pid), - exit(Pid, bang) - end, TokLoopers), - ?line process_flag(priority, Prio), - ?line ok. + Prio = process_flag(priority, high), + true = erlang:garbage_collect(), + + TokLoopers = lists:map(fun (_) -> + spawn_opt(fun tok_loop/0, [{priority, low}, link]) + end, lists:seq(1, 10)), + + lists:foreach(fun (Pid) -> + Mon = erlang:monitor(process, Pid), + DownBefore = receive + {'DOWN', Mon, _, _, _} -> + true + after 0 -> + false + end, + GC = erlang:garbage_collect(Pid), + DownAfter = receive + {'DOWN', Mon, _, _, _} -> + true + after 0 -> + false + end, + true = erlang:demonitor(Mon), + case {DownBefore, DownAfter} of + {true, _} -> false = GC; + {false, false} -> true = GC; + _ -> GC + end + end, processes()), + + lists:foreach(fun (Pid) -> + unlink(Pid), + exit(Pid, bang) + end, TokLoopers), + process_flag(priority, Prio), + ok. process_info_messages(doc) -> ["This used to cause the nofrag emulator to dump core"]; process_info_messages(suite) -> []; process_info_messages(Config) when is_list(Config) -> - ?line process_info_messages_test(), - ?line ok. + process_info_messages_test(), + ok. process_info_messages_loop(0) -> ok; process_info_messages_loop(N) -> process_info_messages_loop(N-1). @@ -1259,43 +1245,42 @@ process_info_messages_send_my_msgs_to(Rcvr) -> end. process_info_messages_test() -> - ?line Go = make_ref(), - ?line Done = make_ref(), - ?line Rcvr = self(), - ?line Rcvr2 = spawn_link(fun () -> - receive {Go, Rcvr} -> ok end, - garbage_collect(), - Rcvr ! {Done, self()} - end), - ?line Sndrs = lists:map( - fun (_) -> - spawn_link(fun () -> - Rcvr ! {Go, self()}, - receive {Go, Rcvr} -> ok end, - BigData = lists:seq(1, 1000), - Rcvr ! BigData, - Rcvr ! BigData, - Rcvr ! BigData, - Rcvr ! {Done, self()} - end) - end, - lists:seq(1, 10)), - ?line lists:foreach(fun (Sndr) -> receive {Go, Sndr} -> ok end end, + Go = make_ref(), + Done = make_ref(), + Rcvr = self(), + Rcvr2 = spawn_link(fun () -> + receive {Go, Rcvr} -> ok end, + garbage_collect(), + Rcvr ! {Done, self()} + end), + Sndrs = lists:map( + fun (_) -> + spawn_link(fun () -> + Rcvr ! {Go, self()}, + receive {Go, Rcvr} -> ok end, + BigData = lists:seq(1, 1000), + Rcvr ! BigData, + Rcvr ! BigData, + Rcvr ! BigData, + Rcvr ! {Done, self()} + end) + end, lists:seq(1, 10)), + lists:foreach(fun (Sndr) -> receive {Go, Sndr} -> ok end end, Sndrs), - ?line garbage_collect(), - ?line erlang:yield(), - ?line lists:foreach(fun (Sndr) -> Sndr ! {Go, self()} end, Sndrs), - ?line process_info_messages_loop(100000000), - ?line Msgs = process_info(self(), messages), - ?line lists:foreach(fun (Sndr) -> receive {Done, Sndr} -> ok end end, + garbage_collect(), + erlang:yield(), + lists:foreach(fun (Sndr) -> Sndr ! {Go, self()} end, Sndrs), + process_info_messages_loop(100000000), + Msgs = process_info(self(), messages), + lists:foreach(fun (Sndr) -> receive {Done, Sndr} -> ok end end, Sndrs), - ?line garbage_collect(), - ?line Rcvr2 ! Msgs, - ?line process_info_messages_send_my_msgs_to(Rcvr2), - ?line Rcvr2 ! {Go, self()}, - ?line garbage_collect(), - ?line receive {Done, Rcvr2} -> ok end, - ?line Msgs. + garbage_collect(), + Rcvr2 ! Msgs, + process_info_messages_send_my_msgs_to(Rcvr2), + Rcvr2 ! {Go, self()}, + garbage_collect(), + receive {Done, Rcvr2} -> ok end, + Msgs. chk_badarg(Fun) -> try Fun(), exit(no_badarg) catch error:badarg -> ok end. @@ -1305,76 +1290,72 @@ process_flag_badarg(doc) -> process_flag_badarg(suite) -> []; process_flag_badarg(Config) when is_list(Config) -> - ?line chk_badarg(fun () -> process_flag(gurka, banan) end), - ?line chk_badarg(fun () -> process_flag(trap_exit, gurka) end), - ?line chk_badarg(fun () -> process_flag(error_handler, 1) end), - ?line chk_badarg(fun () -> process_flag(min_heap_size, gurka) end), - ?line chk_badarg(fun () -> process_flag(min_bin_vheap_size, gurka) end), - ?line chk_badarg(fun () -> process_flag(min_bin_vheap_size, -1) end), - ?line chk_badarg(fun () -> process_flag(priority, 4711) end), - ?line chk_badarg(fun () -> process_flag(save_calls, hmmm) end), - ?line P= spawn_link(fun () -> receive die -> ok end end), - ?line chk_badarg(fun () -> process_flag(P, save_calls, hmmm) end), - ?line chk_badarg(fun () -> process_flag(gurka, save_calls, hmmm) end), - ?line P ! die, - ?line ok. + chk_badarg(fun () -> process_flag(gurka, banan) end), + chk_badarg(fun () -> process_flag(trap_exit, gurka) end), + chk_badarg(fun () -> process_flag(error_handler, 1) end), + chk_badarg(fun () -> process_flag(min_heap_size, gurka) end), + chk_badarg(fun () -> process_flag(min_bin_vheap_size, gurka) end), + chk_badarg(fun () -> process_flag(min_bin_vheap_size, -1) end), + chk_badarg(fun () -> process_flag(priority, 4711) end), + chk_badarg(fun () -> process_flag(save_calls, hmmm) end), + P= spawn_link(fun () -> receive die -> ok end end), + chk_badarg(fun () -> process_flag(P, save_calls, hmmm) end), + chk_badarg(fun () -> process_flag(gurka, save_calls, hmmm) end), + P ! die, + ok. -include_lib("stdlib/include/ms_transform.hrl"). otp_6237(doc) -> []; otp_6237(suite) -> []; otp_6237(Config) when is_list(Config) -> - ?line Slctrs = lists:map(fun (_) -> - spawn_link(fun () -> - otp_6237_select_loop() - end) - end, - lists:seq(1,5)), - ?line lists:foreach(fun (_) -> otp_6237_test() end, lists:seq(1, 100)), - ?line lists:foreach(fun (S) -> unlink(S),exit(S, kill) end, Slctrs), - ?line ok. + Slctrs = lists:map(fun (_) -> + spawn_link(fun () -> + otp_6237_select_loop() + end) + end, + lists:seq(1,5)), + lists:foreach(fun (_) -> otp_6237_test() end, lists:seq(1, 100)), + lists:foreach(fun (S) -> unlink(S),exit(S, kill) end, Slctrs), + ok. otp_6237_test() -> - ?line Parent = self(), - ?line Inited = make_ref(), - ?line Die = make_ref(), - ?line Pid = spawn_link(fun () -> - register(otp_6237,self()), - otp_6237 = ets:new(otp_6237, - [named_table, - ordered_set]), - ets:insert(otp_6237, - [{I,I} - || I <- lists:seq(1, 100)]), - %% Inserting a lot of bif timers - %% increase the possibility that - %% the test will fail when the - %% original cleanup order is used - lists:foreach( - fun (_) -> - erlang:send_after(1000000, - self(), - {a,b,c}) - end, - lists:seq(1,1000)), - Parent ! Inited, - receive Die -> bye end - end), - ?line receive - Inited -> ?line ok - end, - ?line Pid ! Die, + Parent = self(), + Inited = make_ref(), + Die = make_ref(), + Pid = spawn_link(fun () -> + register(otp_6237,self()), + otp_6237 = ets:new(otp_6237, + [named_table, + ordered_set]), + ets:insert(otp_6237, + [{I,I} + || I <- lists:seq(1, 100)]), + %% Inserting a lot of bif timers + %% increase the possibility that + %% the test will fail when the + %% original cleanup order is used + lists:foreach( fun (_) -> + erlang:send_after(1000000, self(), {a,b,c}) + end, lists:seq(1,1000)), + Parent ! Inited, + receive Die -> bye end + end), + receive + Inited -> ok + end, + Pid ! Die, otp_6237_whereis_loop(). otp_6237_whereis_loop() -> - ?line case whereis(otp_6237) of + case whereis(otp_6237) of undefined -> - ?line otp_6237 = ets:new(otp_6237, + otp_6237 = ets:new(otp_6237, [named_table,ordered_set]), - ?line ets:delete(otp_6237), - ?line ok; + ets:delete(otp_6237), + ok; _ -> - ?line otp_6237_whereis_loop() + otp_6237_whereis_loop() end. otp_6237_select_loop() -> @@ -1382,9 +1363,8 @@ otp_6237_select_loop() -> otp_6237_select_loop(). - -define(NoTestProcs, 10000). --record(processes_bif_info, {min_start_reds, +-record(ptab_list_bif_info, {min_start_reds, tab_chunks, tab_chunks_size, tab_indices_per_red, @@ -1399,89 +1379,86 @@ processes_large_tab(doc) -> processes_large_tab(suite) -> []; processes_large_tab(Config) when is_list(Config) -> - ?line enable_internal_state(), - ?line MaxDbgLvl = 20, - ?line MinProcTabSize = 2*(1 bsl 15), - ?line ProcTabSize0 = 1000000, - ?line ProcTabSize1 = case {erlang:system_info(schedulers_online), - erlang:system_info(logical_processors)} of - {Schdlrs, Cpus} when is_integer(Cpus), - Schdlrs =< Cpus -> - ProcTabSize0; - _ -> - ProcTabSize0 div 4 - end, - ?line ProcTabSize2 = case erlang:system_info(debug_compiled) of - true -> ProcTabSize1 - 500000; - false -> ProcTabSize1 - end, + enable_internal_state(), + MaxDbgLvl = 20, + MinProcTabSize = 2*(1 bsl 15), + ProcTabSize0 = 1000000, + ProcTabSize1 = case {erlang:system_info(schedulers_online), + erlang:system_info(logical_processors)} of + {Schdlrs, Cpus} when is_integer(Cpus), + Schdlrs =< Cpus -> + ProcTabSize0; + _ -> + ProcTabSize0 div 4 + end, + ProcTabSize2 = case erlang:system_info(debug_compiled) of + true -> ProcTabSize1 - 500000; + false -> ProcTabSize1 + end, %% With high debug levels this test takes so long time that %% the connection times out; therefore, shrink the test on %% high debug levels. - ?line DbgLvl = case erts_debug:get_internal_state(processes_bif_info) of - #processes_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl -> + DbgLvl = case erts_debug:get_internal_state(processes_bif_info) of + #ptab_list_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl -> 20; - #processes_bif_info{debug_level = Lvl} when Lvl < 0 -> - ?line ?t:fail({debug_level, Lvl}); - #processes_bif_info{debug_level = Lvl} -> + #ptab_list_bif_info{debug_level = Lvl} when Lvl < 0 -> + ?t:fail({debug_level, Lvl}); + #ptab_list_bif_info{debug_level = Lvl} -> Lvl end, - ?line ProcTabSize3 = ProcTabSize2 - (1300000 * DbgLvl div MaxDbgLvl), - ?line ProcTabSize = case ProcTabSize3 < MinProcTabSize of + ProcTabSize3 = ProcTabSize2 - (1300000 * DbgLvl div MaxDbgLvl), + ProcTabSize = case ProcTabSize3 < MinProcTabSize of true -> MinProcTabSize; false -> ProcTabSize3 end, - ?line {ok, LargeNode} = start_node(Config, + {ok, LargeNode} = start_node(Config, "+P " ++ integer_to_list(ProcTabSize)), - ?line Res = rpc:call(LargeNode, ?MODULE, processes_bif_test, []), - ?line case rpc:call(LargeNode, + Res = rpc:call(LargeNode, ?MODULE, processes_bif_test, []), + case rpc:call(LargeNode, erts_debug, get_internal_state, [processes_bif_info]) of - #processes_bif_info{tab_chunks = Chunks} when is_integer(Chunks), + #ptab_list_bif_info{tab_chunks = Chunks} when is_integer(Chunks), Chunks > 1 -> ok; PBInfo -> ?t:fail(PBInfo) end, - ?line stop_node(LargeNode), - ?line chk_processes_bif_test_res(Res). + stop_node(LargeNode), + chk_processes_bif_test_res(Res). processes_default_tab(doc) -> []; processes_default_tab(suite) -> []; processes_default_tab(Config) when is_list(Config) -> - ?line {ok, DefaultNode} = start_node(Config, ""), - ?line Res = rpc:call(DefaultNode, ?MODULE, processes_bif_test, []), - ?line stop_node(DefaultNode), - ?line chk_processes_bif_test_res(Res). + {ok, DefaultNode} = start_node(Config, ""), + Res = rpc:call(DefaultNode, ?MODULE, processes_bif_test, []), + stop_node(DefaultNode), + chk_processes_bif_test_res(Res). processes_small_tab(doc) -> []; processes_small_tab(suite) -> []; processes_small_tab(Config) when is_list(Config) -> - ?line {ok, SmallNode} = start_node(Config, "+P 500"), - ?line Res = rpc:call(SmallNode, ?MODULE, processes_bif_test, []), - ?line PBInfo = rpc:call(SmallNode, - erts_debug, - get_internal_state, - [processes_bif_info]), - ?line stop_node(SmallNode), - ?line 1 = PBInfo#processes_bif_info.tab_chunks, - ?line chk_processes_bif_test_res(Res). + {ok, SmallNode} = start_node(Config, "+P 1024"), + Res = rpc:call(SmallNode, ?MODULE, processes_bif_test, []), + PBInfo = rpc:call(SmallNode, erts_debug, get_internal_state, [processes_bif_info]), + stop_node(SmallNode), + true = PBInfo#ptab_list_bif_info.tab_chunks < 10, + chk_processes_bif_test_res(Res). processes_this_tab(doc) -> []; processes_this_tab(suite) -> []; processes_this_tab(Config) when is_list(Config) -> - ?line chk_processes_bif_test_res(processes_bif_test()). + chk_processes_bif_test_res(processes_bif_test()). chk_processes_bif_test_res(ok) -> ok; chk_processes_bif_test_res({comment, _} = Comment) -> Comment; chk_processes_bif_test_res(Failure) -> ?t:fail(Failure). -print_processes_bif_info(#processes_bif_info{min_start_reds = MinStartReds, +print_processes_bif_info(#ptab_list_bif_info{min_start_reds = MinStartReds, tab_chunks = TabChunks, tab_chunks_size = TabChunksSize, tab_indices_per_red = TabIndPerRed, @@ -1575,26 +1552,26 @@ do_processes(WantReds) -> processes(). processes_bif_test() -> - ?line Tester = self(), - ?line enable_internal_state(), - ?line PBInfo = erts_debug:get_internal_state(processes_bif_info), - ?line print_processes_bif_info(PBInfo), - ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10, - ?line WillTrap = case PBInfo of - #processes_bif_info{tab_chunks = 1} -> - false; - #processes_bif_info{tab_chunks = Chunks, - tab_chunks_size = ChunksSize, - tab_indices_per_red = IndiciesPerRed - } -> - Chunks*ChunksSize >= IndiciesPerRed*WantReds - end, - ?line Processes = fun () -> - erts_debug:set_internal_state(reds_left,WantReds), - processes() - end, + Tester = self(), + enable_internal_state(), + PBInfo = erts_debug:get_internal_state(processes_bif_info), + print_processes_bif_info(PBInfo), + WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10, + WillTrap = case PBInfo of + #ptab_list_bif_info{tab_chunks = Chunks} when Chunks < 10 -> + false; %% Skip for small tables + #ptab_list_bif_info{tab_chunks = Chunks, + tab_chunks_size = ChunksSize, + tab_indices_per_red = IndiciesPerRed + } -> + Chunks*ChunksSize >= IndiciesPerRed*WantReds + end, + Processes = fun () -> + erts_debug:set_internal_state(reds_left,WantReds), + processes() + end, - ?line ok = do_processes_bif_test(WantReds, WillTrap, Processes), + ok = do_processes_bif_test(WantReds, WillTrap, Processes), case WillTrap of false -> @@ -1602,8 +1579,8 @@ processes_bif_test() -> true -> %% Do it again with a process suspended while %% in the processes/0 bif. - ?line erlang:system_flag(multi_scheduling, block), - ?line Suspendee = spawn_link(fun () -> + erlang:system_flag(multi_scheduling, block), + Suspendee = spawn_link(fun () -> Tester ! {suspend_me, self()}, Tester ! {self(), done, @@ -1613,179 +1590,160 @@ processes_bif_test() -> ok end end), - ?line receive {suspend_me, Suspendee} -> ok end, - ?line erlang:suspend_process(Suspendee), - ?line erlang:system_flag(multi_scheduling, unblock), + receive {suspend_me, Suspendee} -> ok end, + erlang:suspend_process(Suspendee), + erlang:system_flag(multi_scheduling, unblock), - ?line [{status,suspended}, - {current_function,{erlang,processes_trap,2}}] - = process_info(Suspendee, [status, current_function]), + [{status,suspended},{current_function,{erlang,ptab_list_continue,2}}] = + process_info(Suspendee, [status, current_function]), - ?line ok = do_processes_bif_test(WantReds, WillTrap, Processes), + ok = do_processes_bif_test(WantReds, WillTrap, Processes), - ?line erlang:resume_process(Suspendee), - ?line receive {Suspendee, done, _} -> ok end, - ?line unlink(Suspendee), - ?line exit(Suspendee, bang) + erlang:resume_process(Suspendee), + receive {Suspendee, done, _} -> ok end, + unlink(Suspendee), + exit(Suspendee, bang) end, case get(processes_bif_testcase_comment) of - undefined -> ?line ok; - Comment -> ?line {comment, Comment} + undefined -> ok; + Comment -> {comment, Comment} end. do_processes_bif_test(WantReds, DieTest, Processes) -> - ?line Tester = self(), - ?line SpawnProcesses = fun (Prio) -> - spawn_opt(?MODULE, - do_processes, - [WantReds], - [link, {priority, Prio}]) - end, - ?line Cleaner = spawn_link(fun () -> - process_flag(trap_exit, true), - Tester ! {cleaner_alive, self()}, - processes_bif_cleaner() - end), - ?line receive {cleaner_alive, Cleaner} -> ok end, + Tester = self(), + SpawnProcesses = fun (Prio) -> + spawn_opt(?MODULE, do_processes, [WantReds], [link, {priority, Prio}]) + end, + Cleaner = spawn_link(fun () -> + process_flag(trap_exit, true), + Tester ! {cleaner_alive, self()}, + processes_bif_cleaner() + end), + receive {cleaner_alive, Cleaner} -> ok end, try - ?line DoIt = make_ref(), - ?line GetGoing = make_ref(), - ?line {NoTestProcs, TestProcs} = spawn_initial_hangarounds(Cleaner), - ?line ?t:format("Testing with ~p processes~n", [NoTestProcs]), - ?line SpawnHangAround = fun () -> - spawn(?MODULE, - hangaround, - [Cleaner, new_hangaround]) - end, - ?line Killer = spawn_opt(fun () -> - Splt = NoTestProcs div 10, - {TP1, TP23} = lists:split(Splt, - TestProcs), - {TP2, TP3} = lists:split(Splt, TP23), - erlang:system_flag(multi_scheduling, - block), - Tester ! DoIt, - receive GetGoing -> ok end, - erlang:system_flag(multi_scheduling, - unblock), - SpawnProcesses(high), - lists:foreach( - fun (P) -> - SpawnHangAround(), - exit(P, bang) - end, - TP1), - SpawnProcesses(high), - erlang:yield(), - lists:foreach( - fun (P) -> - SpawnHangAround(), - exit(P, bang) - end, - TP2), - SpawnProcesses(high), - lists:foreach( - fun (P) -> - SpawnHangAround(), - exit(P, bang) - end, - TP3) - end, - [{priority, high}, link]), - ?line receive DoIt -> ok end, - ?line process_flag(priority, low), - ?line SpawnProcesses(low), - ?line erlang:yield(), - ?line process_flag(priority, normal), - ?line CorrectProcs0 = erts_debug:get_internal_state(processes), - ?line Killer ! GetGoing, - ?line erts_debug:set_internal_state(reds_left, WantReds), - ?line Procs0 = processes(), - ?line Procs = lists:sort(Procs0), - ?line CorrectProcs = lists:sort(CorrectProcs0), - ?line LengthCorrectProcs = length(CorrectProcs), - ?line ?t:format("~p = length(CorrectProcs)~n", [LengthCorrectProcs]), - ?line true = LengthCorrectProcs > NoTestProcs, - ?line case CorrectProcs =:= Procs of - true -> - ?line ok; - false -> - ?line processes_unexpected_result(CorrectProcs, Procs) - end, - ?line unlink(Killer), - ?line exit(Killer, bang) + DoIt = make_ref(), + GetGoing = make_ref(), + {NoTestProcs, TestProcs} = spawn_initial_hangarounds(Cleaner), + ?t:format("Testing with ~p processes~n", [NoTestProcs]), + SpawnHangAround = fun () -> + spawn(?MODULE, hangaround, [Cleaner, new_hangaround]) + end, + Killer = spawn_opt(fun () -> + Splt = NoTestProcs div 10, + {TP1, TP23} = lists:split(Splt, TestProcs), + {TP2, TP3} = lists:split(Splt, TP23), + erlang:system_flag(multi_scheduling, block), + Tester ! DoIt, + receive GetGoing -> ok end, + erlang:system_flag(multi_scheduling, unblock), + SpawnProcesses(high), + lists:foreach( fun (P) -> + SpawnHangAround(), + exit(P, bang) + end, TP1), + SpawnProcesses(high), + erlang:yield(), + lists:foreach( fun (P) -> + SpawnHangAround(), + exit(P, bang) + end, TP2), + SpawnProcesses(high), + lists:foreach( + fun (P) -> + SpawnHangAround(), + exit(P, bang) + end, TP3) + end, [{priority, high}, link]), + receive DoIt -> ok end, + process_flag(priority, low), + SpawnProcesses(low), + erlang:yield(), + process_flag(priority, normal), + CorrectProcs0 = erts_debug:get_internal_state(processes), + Killer ! GetGoing, + erts_debug:set_internal_state(reds_left, WantReds), + Procs0 = processes(), + Procs = lists:sort(Procs0), + CorrectProcs = lists:sort(CorrectProcs0), + LengthCorrectProcs = length(CorrectProcs), + ?t:format("~p = length(CorrectProcs)~n", [LengthCorrectProcs]), + true = LengthCorrectProcs > NoTestProcs, + case CorrectProcs =:= Procs of + true -> + ok; + false -> + processes_unexpected_result(CorrectProcs, Procs) + end, + unlink(Killer), + exit(Killer, bang) after unlink(Cleaner), exit(Cleaner, kill), %% Wait for the system to recover to a normal state... wait_until_system_recover() end, - ?line do_processes_bif_die_test(DieTest, Processes), - ?line ok. + do_processes_bif_die_test(DieTest, Processes), + ok. do_processes_bif_die_test(false, _Processes) -> - ?line ?t:format("Skipping test killing process executing processes/0~n",[]), - ?line ok; + ?t:format("Skipping test killing process executing processes/0~n",[]), + ok; do_processes_bif_die_test(true, Processes) -> - ?line do_processes_bif_die_test(5, Processes); + do_processes_bif_die_test(5, Processes); do_processes_bif_die_test(N, Processes) -> - ?line ?t:format("Doing test killing process executing processes/0~n",[]), + ?t:format("Doing test killing process executing processes/0~n",[]), try - ?line Tester = self(), - ?line Oooh_Nooooooo = make_ref(), - ?line {_, DieWhileDoingMon} = erlang:spawn_monitor( - fun () -> - Victim = self(), - spawn_opt( - fun () -> - exit(Victim, got_him) - end, - [link, - {priority, max}]), - Tester ! {Oooh_Nooooooo, - hd(Processes())}, - exit(ohhhh_nooooo) - end), - ?line receive - {'DOWN', DieWhileDoingMon, _, _, Reason} -> - case Reason of - got_him -> ok; - _ -> throw({kill_in_trap, Reason}) - end - end, - ?line receive - {Oooh_Nooooooo, _} -> - ?line throw({kill_in_trap, 'Oooh_Nooooooo'}) - after 0 -> - ?line ok - end, - ?line PrcsCllrsSeqLen = 2*erlang:system_info(schedulers_online), - ?line PrcsCllrsSeq = lists:seq(1, PrcsCllrsSeqLen), - ?line ProcsCallers = lists:map( - fun (_) -> - spawn_link( - fun () -> - Tester ! hd(Processes()) - end) - end, - PrcsCllrsSeq), - ?line erlang:yield(), + Tester = self(), + Oooh_Nooooooo = make_ref(), + {_, DieWhileDoingMon} = erlang:spawn_monitor( fun () -> + Victim = self(), + spawn_opt( + fun () -> + exit(Victim, got_him) + end, + [link, {priority, max}]), + Tester ! {Oooh_Nooooooo, + hd(Processes())}, + exit(ohhhh_nooooo) + end), + receive + {'DOWN', DieWhileDoingMon, _, _, Reason} -> + case Reason of + got_him -> ok; + _ -> throw({kill_in_trap, Reason}) + end + end, + receive + {Oooh_Nooooooo, _} -> + throw({kill_in_trap, 'Oooh_Nooooooo'}) + after 0 -> + ok + end, + PrcsCllrsSeqLen = 2*erlang:system_info(schedulers_online), + PrcsCllrsSeq = lists:seq(1, PrcsCllrsSeqLen), + ProcsCallers = lists:map( fun (_) -> + spawn_link( + fun () -> + Tester ! hd(Processes()) + end) + end, PrcsCllrsSeq), + erlang:yield(), {ProcsCallers1, ProcsCallers2} = lists:split(PrcsCllrsSeqLen div 2, ProcsCallers), - ?line process_flag(priority, high), - ?line lists:foreach( + process_flag(priority, high), + lists:foreach( fun (P) -> unlink(P), exit(P, bang) end, lists:reverse(ProcsCallers2) ++ ProcsCallers1), - ?line process_flag(priority, normal), - ?line ok + process_flag(priority, normal), + ok catch throw:{kill_in_trap, R} when N > 0 -> ?t:format("Failed to kill in trap: ~p~n", [R]), - ?t:format("Trying again~p~n", []), + ?t:format("Trying again~n", []), do_processes_bif_die_test(N-1, Processes) end. @@ -1844,23 +1802,23 @@ processes_last_call_trap(doc) -> processes_last_call_trap(suite) -> []; processes_last_call_trap(Config) when is_list(Config) -> - ?line enable_internal_state(), - ?line Processes = fun () -> processes() end, - ?line PBInfo = erts_debug:get_internal_state(processes_bif_info), - ?line print_processes_bif_info(PBInfo), - ?line WantReds = case PBInfo#processes_bif_info.min_start_reds of - R when R > 10 -> R - 1; - _R -> 9 - end, - ?line lists:foreach(fun (_) -> - ?line erts_debug:set_internal_state(reds_left, - WantReds), - Processes(), - ?line erts_debug:set_internal_state(reds_left, - WantReds), - my_processes() - end, - lists:seq(1,100)). + enable_internal_state(), + Processes = fun () -> processes() end, + PBInfo = erts_debug:get_internal_state(processes_bif_info), + print_processes_bif_info(PBInfo), + WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of + R when R > 10 -> R - 1; + _R -> 9 + end, + lists:foreach(fun (_) -> + erts_debug:set_internal_state(reds_left, + WantReds), + Processes(), + erts_debug:set_internal_state(reds_left, + WantReds), + my_processes() + end, + lists:seq(1,100)). my_processes() -> processes(). @@ -1870,108 +1828,106 @@ processes_apply_trap(doc) -> processes_apply_trap(suite) -> []; processes_apply_trap(Config) when is_list(Config) -> - ?line enable_internal_state(), - ?line PBInfo = erts_debug:get_internal_state(processes_bif_info), - ?line print_processes_bif_info(PBInfo), - ?line WantReds = case PBInfo#processes_bif_info.min_start_reds of - R when R > 10 -> R - 1; - _R -> 9 - end, - ?line lists:foreach(fun (_) -> - ?line erts_debug:set_internal_state(reds_left, - WantReds), - ?line apply(erlang, processes, []) - end, - lists:seq(1,100)). + enable_internal_state(), + PBInfo = erts_debug:get_internal_state(processes_bif_info), + print_processes_bif_info(PBInfo), + WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of + R when R > 10 -> R - 1; + _R -> 9 + end, + lists:foreach(fun (_) -> + erts_debug:set_internal_state(reds_left, + WantReds), + apply(erlang, processes, []) + end, lists:seq(1,100)). processes_gc_trap(doc) -> []; processes_gc_trap(suite) -> []; processes_gc_trap(Config) when is_list(Config) -> - ?line Tester = self(), - ?line enable_internal_state(), - ?line PBInfo = erts_debug:get_internal_state(processes_bif_info), - ?line print_processes_bif_info(PBInfo), - ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10, - ?line Processes = fun () -> - erts_debug:set_internal_state(reds_left,WantReds), - processes() - end, + Tester = self(), + enable_internal_state(), + PBInfo = erts_debug:get_internal_state(processes_bif_info), + print_processes_bif_info(PBInfo), + WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10, + Processes = fun () -> + erts_debug:set_internal_state(reds_left,WantReds), + processes() + end, - ?line erlang:system_flag(multi_scheduling, block), - ?line Suspendee = spawn_link(fun () -> + erlang:system_flag(multi_scheduling, block), + Suspendee = spawn_link(fun () -> Tester ! {suspend_me, self()}, Tester ! {self(), done, hd(Processes())}, receive after infinity -> ok end end), - ?line receive {suspend_me, Suspendee} -> ok end, - ?line erlang:suspend_process(Suspendee), - ?line erlang:system_flag(multi_scheduling, unblock), + receive {suspend_me, Suspendee} -> ok end, + erlang:suspend_process(Suspendee), + erlang:system_flag(multi_scheduling, unblock), - ?line [{status,suspended}, {current_function,{erlang,processes_trap,2}}] + [{status,suspended}, {current_function,{erlang,ptab_list_continue,2}}] = process_info(Suspendee, [status, current_function]), - ?line erlang:garbage_collect(Suspendee), - ?line erlang:garbage_collect(Suspendee), + erlang:garbage_collect(Suspendee), + erlang:garbage_collect(Suspendee), - ?line erlang:resume_process(Suspendee), - ?line receive {Suspendee, done, _} -> ok end, - ?line erlang:garbage_collect(Suspendee), - ?line erlang:garbage_collect(Suspendee), + erlang:resume_process(Suspendee), + receive {Suspendee, done, _} -> ok end, + erlang:garbage_collect(Suspendee), + erlang:garbage_collect(Suspendee), - ?line unlink(Suspendee), - ?line exit(Suspendee, bang), - ?line ok. + unlink(Suspendee), + exit(Suspendee, bang), + ok. process_flag_heap_size(doc) -> []; process_flag_heap_size(suite) -> []; process_flag_heap_size(Config) when is_list(Config) -> - HSize = 2584, % must be gc fib number - VHSize = 317811, % must be gc fib number - ?line OldHmin = erlang:process_flag(min_heap_size, HSize), - ?line {min_heap_size, HSize} = erlang:process_info(self(), min_heap_size), - ?line OldVHmin = erlang:process_flag(min_bin_vheap_size, VHSize), - ?line {min_bin_vheap_size, VHSize} = erlang:process_info(self(), min_bin_vheap_size), - ?line HSize = erlang:process_flag(min_heap_size, OldHmin), - ?line VHSize = erlang:process_flag(min_bin_vheap_size, OldVHmin), - ?line ok. + HSize = 2586, % must be gc fib+ number + VHSize = 318187, % must be gc fib+ number + OldHmin = erlang:process_flag(min_heap_size, HSize), + {min_heap_size, HSize} = erlang:process_info(self(), min_heap_size), + OldVHmin = erlang:process_flag(min_bin_vheap_size, VHSize), + {min_bin_vheap_size, VHSize} = erlang:process_info(self(), min_bin_vheap_size), + HSize = erlang:process_flag(min_heap_size, OldHmin), + VHSize = erlang:process_flag(min_bin_vheap_size, OldVHmin), + ok. spawn_opt_heap_size(doc) -> []; spawn_opt_heap_size(suite) -> []; spawn_opt_heap_size(Config) when is_list(Config) -> - HSize = 987, % must be gc fib number - VHSize = 46368, % must be gc fib number - ?line Pid = spawn_opt(fun () -> receive stop -> ok end end, + HSize = 987, % must be gc fib+ number + VHSize = 46422, % must be gc fib+ number + Pid = spawn_opt(fun () -> receive stop -> ok end end, [{min_heap_size, HSize},{ min_bin_vheap_size, VHSize}]), - ?line {min_heap_size, HSize} = process_info(Pid, min_heap_size), - ?line {min_bin_vheap_size, VHSize} = process_info(Pid, min_bin_vheap_size), - ?line Pid ! stop, - ?line ok. + {min_heap_size, HSize} = process_info(Pid, min_heap_size), + {min_bin_vheap_size, VHSize} = process_info(Pid, min_bin_vheap_size), + Pid ! stop, + ok. processes_term_proc_list(doc) -> []; processes_term_proc_list(suite) -> []; processes_term_proc_list(Config) when is_list(Config) -> - ?line Tester = self(), - ?line as_expected = processes_term_proc_list_test(false), - ?line {ok, Node} = start_node(Config, "+Mis true"), - ?line RT = spawn_link(Node, - fun () -> - receive after 1000 -> ok end, - processes_term_proc_list_test(false), - Tester ! {it_worked, self()} - end), - ?line receive {it_worked, RT} -> ok end, - ?line stop_node(Node), - ?line ok. + Tester = self(), + as_expected = processes_term_proc_list_test(false), + {ok, Node} = start_node(Config, "+Mis true"), + RT = spawn_link(Node, fun () -> + receive after 1000 -> ok end, + processes_term_proc_list_test(false), + Tester ! {it_worked, self()} + end), + receive {it_worked, RT} -> ok end, + stop_node(Node), + ok. -define(CHK_TERM_PROC_LIST(MC, XB), chk_term_proc_list(?LINE, MC, XB)). @@ -1982,8 +1938,8 @@ chk_term_proc_list(Line, MustChk, ExpectBlks) -> not_enabled; {_, MS} -> {value, - {processes_term_proc_el, - DL}} = lists:keysearch(processes_term_proc_el, 1, MS), + {ptab_list_deleted_el, + DL}} = lists:keysearch(ptab_list_deleted_el, 1, MS), case lists:keysearch(blocks, 1, DL) of {value, {blocks, ExpectBlks, _, _}} -> ok; @@ -1997,35 +1953,34 @@ chk_term_proc_list(Line, MustChk, ExpectBlks) -> ok. processes_term_proc_list_test(MustChk) -> - ?line Tester = self(), - ?line enable_internal_state(), - ?line PBInfo = erts_debug:get_internal_state(processes_bif_info), - ?line print_processes_bif_info(PBInfo), - ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10, - ?line #processes_bif_info{tab_chunks = Chunks, - tab_chunks_size = ChunksSize, - tab_indices_per_red = IndiciesPerRed - } = PBInfo, - ?line true = Chunks > 1, - ?line true = Chunks*ChunksSize >= IndiciesPerRed*WantReds, - ?line Processes = fun () -> - erts_debug:set_internal_state(reds_left, - WantReds), - processes() - end, - ?line Exit = fun (P) -> - unlink(P), - exit(P, bang), - wait_until( - fun () -> - not lists:member( - P, - erts_debug:get_internal_state( - processes)) - end) - end, - ?line SpawnSuspendProcessesProc - = fun () -> + Tester = self(), + enable_internal_state(), + PBInfo = erts_debug:get_internal_state(processes_bif_info), + print_processes_bif_info(PBInfo), + WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10, + #ptab_list_bif_info{tab_chunks = Chunks, + tab_chunks_size = ChunksSize, + tab_indices_per_red = IndiciesPerRed + } = PBInfo, + true = Chunks > 1, + true = Chunks*ChunksSize >= IndiciesPerRed*WantReds, + Processes = fun () -> + erts_debug:set_internal_state(reds_left, + WantReds), + processes() + end, + Exit = fun (P) -> + unlink(P), + exit(P, bang), + wait_until( + fun () -> + not lists:member( + P, + erts_debug:get_internal_state( + processes)) + end) + end, + SpawnSuspendProcessesProc = fun () -> erlang:system_flag(multi_scheduling, block), P = spawn_link(fun () -> Tester ! {suspend_me, self()}, @@ -2038,76 +1993,76 @@ processes_term_proc_list_test(MustChk) -> erlang:suspend_process(P), erlang:system_flag(multi_scheduling, unblock), [{status,suspended}, - {current_function,{erlang,processes_trap,2}}] + {current_function,{erlang,ptab_list_continue,2}}] = process_info(P, [status, current_function]), P end, - ?line ResumeProcessesProc = fun (P) -> + ResumeProcessesProc = fun (P) -> erlang:resume_process(P), receive {P, done, _} -> ok end end, - ?line ?CHK_TERM_PROC_LIST(MustChk, 0), - ?line HangAround = fun () -> receive after infinity -> ok end end, - ?line HA1 = spawn_link(HangAround), - ?line HA2 = spawn_link(HangAround), - ?line HA3 = spawn_link(HangAround), - ?line S1 = SpawnSuspendProcessesProc(), - ?line ?CHK_TERM_PROC_LIST(MustChk, 1), - ?line Exit(HA1), - ?line ?CHK_TERM_PROC_LIST(MustChk, 2), - ?line S2 = SpawnSuspendProcessesProc(), - ?line ?CHK_TERM_PROC_LIST(MustChk, 3), - ?line S3 = SpawnSuspendProcessesProc(), - ?line ?CHK_TERM_PROC_LIST(MustChk, 4), - ?line Exit(HA2), - ?line ?CHK_TERM_PROC_LIST(MustChk, 5), - ?line S4 = SpawnSuspendProcessesProc(), - ?line ?CHK_TERM_PROC_LIST(MustChk, 6), - ?line Exit(HA3), - ?line ?CHK_TERM_PROC_LIST(MustChk, 7), - ?line ResumeProcessesProc(S1), - ?line ?CHK_TERM_PROC_LIST(MustChk, 5), - ?line ResumeProcessesProc(S3), - ?line ?CHK_TERM_PROC_LIST(MustChk, 4), - ?line ResumeProcessesProc(S4), - ?line ?CHK_TERM_PROC_LIST(MustChk, 3), - ?line ResumeProcessesProc(S2), - ?line ?CHK_TERM_PROC_LIST(MustChk, 0), - ?line Exit(S1), - ?line Exit(S2), - ?line Exit(S3), - ?line Exit(S4), - - - ?line HA4 = spawn_link(HangAround), - ?line HA5 = spawn_link(HangAround), - ?line HA6 = spawn_link(HangAround), - ?line S5 = SpawnSuspendProcessesProc(), - ?line ?CHK_TERM_PROC_LIST(MustChk, 1), - ?line Exit(HA4), - ?line ?CHK_TERM_PROC_LIST(MustChk, 2), - ?line S6 = SpawnSuspendProcessesProc(), - ?line ?CHK_TERM_PROC_LIST(MustChk, 3), - ?line Exit(HA5), - ?line ?CHK_TERM_PROC_LIST(MustChk, 4), - ?line S7 = SpawnSuspendProcessesProc(), - ?line ?CHK_TERM_PROC_LIST(MustChk, 5), - ?line Exit(HA6), - ?line ?CHK_TERM_PROC_LIST(MustChk, 6), - ?line S8 = SpawnSuspendProcessesProc(), - ?line ?CHK_TERM_PROC_LIST(MustChk, 7), - - ?line erlang:system_flag(multi_scheduling, block), - ?line Exit(S8), - ?line ?CHK_TERM_PROC_LIST(MustChk, 7), - ?line Exit(S5), - ?line ?CHK_TERM_PROC_LIST(MustChk, 6), - ?line Exit(S7), - ?line ?CHK_TERM_PROC_LIST(MustChk, 6), - ?line Exit(S6), - ?line ?CHK_TERM_PROC_LIST(MustChk, 0), - ?line erlang:system_flag(multi_scheduling, unblock), - ?line as_expected. + ?CHK_TERM_PROC_LIST(MustChk, 0), + HangAround = fun () -> receive after infinity -> ok end end, + HA1 = spawn_link(HangAround), + HA2 = spawn_link(HangAround), + HA3 = spawn_link(HangAround), + S1 = SpawnSuspendProcessesProc(), + ?CHK_TERM_PROC_LIST(MustChk, 1), + Exit(HA1), + ?CHK_TERM_PROC_LIST(MustChk, 2), + S2 = SpawnSuspendProcessesProc(), + ?CHK_TERM_PROC_LIST(MustChk, 3), + S3 = SpawnSuspendProcessesProc(), + ?CHK_TERM_PROC_LIST(MustChk, 4), + Exit(HA2), + ?CHK_TERM_PROC_LIST(MustChk, 5), + S4 = SpawnSuspendProcessesProc(), + ?CHK_TERM_PROC_LIST(MustChk, 6), + Exit(HA3), + ?CHK_TERM_PROC_LIST(MustChk, 7), + ResumeProcessesProc(S1), + ?CHK_TERM_PROC_LIST(MustChk, 5), + ResumeProcessesProc(S3), + ?CHK_TERM_PROC_LIST(MustChk, 4), + ResumeProcessesProc(S4), + ?CHK_TERM_PROC_LIST(MustChk, 3), + ResumeProcessesProc(S2), + ?CHK_TERM_PROC_LIST(MustChk, 0), + Exit(S1), + Exit(S2), + Exit(S3), + Exit(S4), + + + HA4 = spawn_link(HangAround), + HA5 = spawn_link(HangAround), + HA6 = spawn_link(HangAround), + S5 = SpawnSuspendProcessesProc(), + ?CHK_TERM_PROC_LIST(MustChk, 1), + Exit(HA4), + ?CHK_TERM_PROC_LIST(MustChk, 2), + S6 = SpawnSuspendProcessesProc(), + ?CHK_TERM_PROC_LIST(MustChk, 3), + Exit(HA5), + ?CHK_TERM_PROC_LIST(MustChk, 4), + S7 = SpawnSuspendProcessesProc(), + ?CHK_TERM_PROC_LIST(MustChk, 5), + Exit(HA6), + ?CHK_TERM_PROC_LIST(MustChk, 6), + S8 = SpawnSuspendProcessesProc(), + ?CHK_TERM_PROC_LIST(MustChk, 7), + + erlang:system_flag(multi_scheduling, block), + Exit(S8), + ?CHK_TERM_PROC_LIST(MustChk, 7), + Exit(S5), + ?CHK_TERM_PROC_LIST(MustChk, 6), + Exit(S7), + ?CHK_TERM_PROC_LIST(MustChk, 6), + Exit(S6), + ?CHK_TERM_PROC_LIST(MustChk, 0), + erlang:system_flag(multi_scheduling, unblock), + as_expected. otp_7738_waiting(doc) -> @@ -2115,88 +2070,88 @@ otp_7738_waiting(doc) -> otp_7738_waiting(suite) -> []; otp_7738_waiting(Config) when is_list(Config) -> - ?line otp_7738_test(waiting). + otp_7738_test(waiting). otp_7738_suspended(doc) -> []; otp_7738_suspended(suite) -> []; otp_7738_suspended(Config) when is_list(Config) -> - ?line otp_7738_test(suspended). + otp_7738_test(suspended). otp_7738_resume(doc) -> []; otp_7738_resume(suite) -> []; otp_7738_resume(Config) when is_list(Config) -> - ?line otp_7738_test(resume). + otp_7738_test(resume). otp_7738_test(Type) -> - ?line T = self(), - ?line S = spawn_link(fun () -> - receive - {suspend, Suspendee} -> - erlang:suspend_process(Suspendee), - T ! {suspended, Suspendee}, - receive - after 10 -> - erlang:resume_process(Suspendee), - Suspendee ! wake_up - end; - {send, To, Msg} -> - receive after 10 -> ok end, - To ! Msg - end - end), - ?line R = spawn_link(fun () -> - X = lists:seq(1, 20000000), - T ! {initialized, self()}, - ?line case Type of - _ when Type == suspended; - Type == waiting -> - receive _ -> ok end; - _ when Type == resume -> - Receive = fun (F) -> - receive - _ -> - ok - after 0 -> - F(F) - end - end, - Receive(Receive) - end, - T ! {woke_up, self()}, - id(X) - end), - ?line receive {initialized, R} -> ok end, - ?line receive after 10 -> ok end, - ?line case Type of + T = self(), + S = spawn_link(fun () -> + receive + {suspend, Suspendee} -> + erlang:suspend_process(Suspendee), + T ! {suspended, Suspendee}, + receive + after 10 -> + erlang:resume_process(Suspendee), + Suspendee ! wake_up + end; + {send, To, Msg} -> + receive after 10 -> ok end, + To ! Msg + end + end), + R = spawn_link(fun () -> + X = lists:seq(1, 20000000), + T ! {initialized, self()}, + case Type of + _ when Type == suspended; + Type == waiting -> + receive _ -> ok end; + _ when Type == resume -> + Receive = fun (F) -> + receive + _ -> + ok + after 0 -> + F(F) + end + end, + Receive(Receive) + end, + T ! {woke_up, self()}, + id(X) + end), + receive {initialized, R} -> ok end, + receive after 10 -> ok end, + case Type of suspended -> - ?line erlang:suspend_process(R), - ?line S ! {send, R, wake_up}; + erlang:suspend_process(R), + S ! {send, R, wake_up}; waiting -> - ?line S ! {send, R, wake_up}; + S ! {send, R, wake_up}; resume -> - ?line S ! {suspend, R}, - ?line receive {suspended, R} -> ok end + S ! {suspend, R}, + receive {suspended, R} -> ok end end, - ?line erlang:garbage_collect(R), - ?line case Type of + erlang:garbage_collect(R), + case Type of suspended -> - ?line erlang:resume_process(R); + erlang:resume_process(R); _ -> - ?line ok + ok end, - ?line receive + receive {woke_up, R} -> - ?line ok + ok after 2000 -> - ?line I = process_info(R, [status, message_queue_len]), - ?line ?t:format("~p~n", [I]), - ?line ?t:fail(no_progress) + I = process_info(R, [status, message_queue_len]), + ?t:format("~p~n", [I]), + ?t:fail(no_progress) end, - ?line ok. + ok. gor(Reds, Stop) -> receive @@ -2210,28 +2165,28 @@ gor(Reds, Stop) -> end. garb_other_running(Config) when is_list(Config) -> - ?line Stop = make_ref(), - ?line {Pid, Mon} = spawn_monitor(fun () -> gor(0, Stop) end), - ?line Reds = lists:foldl(fun (_, OldReds) -> - ?line erlang:garbage_collect(Pid), - ?line receive after 1 -> ok end, - ?line Pid ! {self(), reds}, - ?line receive + Stop = make_ref(), + {Pid, Mon} = spawn_monitor(fun () -> gor(0, Stop) end), + Reds = lists:foldl(fun (_, OldReds) -> + erlang:garbage_collect(Pid), + receive after 1 -> ok end, + Pid ! {self(), reds}, + receive {reds, NewReds, Pid} -> - ?line true = (NewReds > OldReds), - ?line NewReds + true = (NewReds > OldReds), + NewReds end end, 0, lists:seq(1, 10000)), - ?line receive after 1 -> ok end, - ?line Pid ! {self(), Stop}, - ?line receive + receive after 1 -> ok end, + Pid ! {self(), Stop}, + receive {stopped, Stop, StopReds, Pid} -> - ?line true = (StopReds > Reds) + true = (StopReds > Reds) end, - ?line receive {'DOWN', Mon, process, Pid, normal} -> ok end, - ?line ok. + receive {'DOWN', Mon, process, Pid, normal} -> ok end, + ok. %% Internal functions @@ -2255,9 +2210,9 @@ start_node(Config) -> start_node(Config, ""). start_node(Config, Args) when is_list(Config) -> - ?line Pa = filename:dirname(code:which(?MODULE)), - ?line {A, B, C} = now(), - ?line Name = list_to_atom(atom_to_list(?MODULE) + Pa = filename:dirname(code:which(?MODULE)), + {A, B, C} = now(), + Name = list_to_atom(atom_to_list(?MODULE) ++ "-" ++ atom_to_list(?config(testcase, Config)) ++ "-" @@ -2266,7 +2221,7 @@ start_node(Config, Args) when is_list(Config) -> ++ integer_to_list(B) ++ "-" ++ integer_to_list(C)), - ?line ?t:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]). + ?t:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]). stop_node(Node) -> ?t:stop_node(Node). diff --git a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c index b3feca79f0..f8613487b0 100644 --- a/erts/emulator/test/send_term_SUITE_data/send_term_drv.c +++ b/erts/emulator/test/send_term_SUITE_data/send_term_drv.c @@ -664,7 +664,7 @@ static void send_term_drv_run(ErlDrvData port, char *buf, ErlDrvSizeT count) /* Signal end of test case */ msg[0] = ERL_DRV_NIL; - driver_output_term(erlang_port, msg, 1); + erl_drv_output_term(driver_mk_port(erlang_port), msg, 1); return; } break; @@ -687,14 +687,14 @@ static void send_term_drv_run(ErlDrvData port, char *buf, ErlDrvSizeT count) static void output_term(ErlDrvTermData* msg, int len) { - if (driver_output_term(erlang_port, msg, len) <= 0) { - driver_failure_atom(erlang_port, "driver_output_term_failed"); + if (erl_drv_output_term(driver_mk_port(erlang_port), msg, len) <= 0) { + driver_failure_atom(erlang_port, "erl_drv_output_term_failed"); } } static void fail_term(ErlDrvTermData* msg, int len, int line) { - int status = driver_output_term(erlang_port, msg, len); + int status = erl_drv_output_term(driver_mk_port(erlang_port), msg, len); if (status == 1) { char buf[1024]; diff --git a/erts/emulator/test/tuple_SUITE.erl b/erts/emulator/test/tuple_SUITE.erl index bfc3910742..a3b2764a5d 100644 --- a/erts/emulator/test/tuple_SUITE.erl +++ b/erts/emulator/test/tuple_SUITE.erl @@ -20,6 +20,7 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, t_size/1, t_tuple_size/1, t_element/1, t_setelement/1, + t_insert_element/1, t_delete_element/1, t_list_to_tuple/1, t_tuple_to_list/1, t_make_tuple_2/1, t_make_tuple_3/1, t_append_element/1, build_and_match/1, tuple_with_case/1, tuple_in_guard/1]). @@ -41,6 +42,7 @@ all() -> [build_and_match, t_size, t_tuple_size, t_list_to_tuple, t_tuple_to_list, t_element, t_setelement, t_make_tuple_2, t_make_tuple_3, t_append_element, + t_insert_element, t_delete_element, tuple_with_case, tuple_in_guard]. groups() -> @@ -60,40 +62,40 @@ end_per_group(_GroupName, Config) -> build_and_match(Config) when is_list(Config) -> - ?line {} = id({}), - ?line {1} = id({1}), - ?line {1, 2} = id({1, 2}), - ?line {1, 2, 3} = id({1, 2, 3}), - ?line {1, 2, 3, 4} = id({1, 2, 3, 4}), - ?line {1, 2, 3, 4, 5} = id({1, 2, 3, 4, 5}), - ?line {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}), - ?line {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}), - ?line {1, 2, 3, 4, 5, 6, 7} = id({1, 2, 3, 4, 5, 6, 7}), - ?line {1, 2, 3, 4, 5, 6, 7, 8} = id({1, 2, 3, 4, 5, 6, 7, 8}), + {} = id({}), + {1} = id({1}), + {1, 2} = id({1, 2}), + {1, 2, 3} = id({1, 2, 3}), + {1, 2, 3, 4} = id({1, 2, 3, 4}), + {1, 2, 3, 4, 5} = id({1, 2, 3, 4, 5}), + {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}), + {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}), + {1, 2, 3, 4, 5, 6, 7} = id({1, 2, 3, 4, 5, 6, 7}), + {1, 2, 3, 4, 5, 6, 7, 8} = id({1, 2, 3, 4, 5, 6, 7, 8}), ok. %% Tests size(Tuple). t_size(Config) when is_list(Config) -> - ?line 0 = size({}), - ?line 1 = size({a}), - ?line 1 = size({{a}}), - ?line 2 = size({{a}, {b}}), - ?line 3 = size({1, 2, 3}), + 0 = size({}), + 1 = size({a}), + 1 = size({{a}}), + 2 = size({{a}, {b}}), + 3 = size({1, 2, 3}), ok. t_tuple_size(Config) when is_list(Config) -> - ?line 0 = tuple_size(id({})), - ?line 1 = tuple_size(id({a})), - ?line 1 = tuple_size(id({{a}})), - ?line 2 = tuple_size(id({{a},{b}})), - ?line 3 = tuple_size(id({1,2,3})), + 0 = tuple_size(id({})), + 1 = tuple_size(id({a})), + 1 = tuple_size(id({{a}})), + 2 = tuple_size(id({{a},{b}})), + 3 = tuple_size(id({1,2,3})), %% Error cases. - ?line {'EXIT',{badarg,_}} = (catch tuple_size([])), - ?line {'EXIT',{badarg,_}} = (catch tuple_size(<<1,2,3>>)), - ?line error = ludicrous_tuple_size({a,b,c}), - ?line error = ludicrous_tuple_size([a,b,c]), + {'EXIT',{badarg,_}} = (catch tuple_size([])), + {'EXIT',{badarg,_}} = (catch tuple_size(<<1,2,3>>)), + error = ludicrous_tuple_size({a,b,c}), + error = ludicrous_tuple_size([a,b,c]), ok. @@ -104,44 +106,44 @@ ludicrous_tuple_size(_) -> error. %% Tests element/2. t_element(Config) when is_list(Config) -> - ?line a = element(1, {a}), - ?line a = element(1, {a, b}), + a = element(1, {a}), + a = element(1, {a, b}), - ?line List = lists:seq(1, 4096), - ?line Tuple = list_to_tuple(lists:seq(1, 4096)), - ?line get_elements(List, Tuple, 1), + List = lists:seq(1, 4096), + Tuple = list_to_tuple(lists:seq(1, 4096)), + get_elements(List, Tuple, 1), - ?line {'EXIT', {badarg, _}} = (catch element(0, id({a,b}))), - ?line {'EXIT', {badarg, _}} = (catch element(3, id({a,b}))), - ?line {'EXIT', {badarg, _}} = (catch element(1, id({}))), - ?line {'EXIT', {badarg, _}} = (catch element(1, id([a,b]))), - ?line {'EXIT', {badarg, _}} = (catch element(1, id(42))), - ?line {'EXIT', {badarg, _}} = (catch element(id(1.5), id({a,b}))), + {'EXIT', {badarg, _}} = (catch element(0, id({a,b}))), + {'EXIT', {badarg, _}} = (catch element(3, id({a,b}))), + {'EXIT', {badarg, _}} = (catch element(1, id({}))), + {'EXIT', {badarg, _}} = (catch element(1, id([a,b]))), + {'EXIT', {badarg, _}} = (catch element(1, id(42))), + {'EXIT', {badarg, _}} = (catch element(id(1.5), id({a,b}))), ok. get_elements([Element|Rest], Tuple, Pos) -> - ?line Element = element(Pos, Tuple), - ?line get_elements(Rest, Tuple, Pos+1); + Element = element(Pos, Tuple), + get_elements(Rest, Tuple, Pos+1); get_elements([], _Tuple, _Pos) -> ok. %% Tests set_element/3. t_setelement(Config) when is_list(Config) -> - ?line {x} = setelement(1, id({1}), x), - ?line {x,2} = setelement(1, id({1,2}), x), - ?line {1,x} = setelement(2, id({1,2}), x), + {x} = setelement(1, id({1}), x), + {x,2} = setelement(1, id({1,2}), x), + {1,x} = setelement(2, id({1,2}), x), - ?line Tuple = list_to_tuple(lists:duplicate(2048, x)), - ?line NewTuple = set_all_elements(Tuple, 1), - ?line NewTuple = list_to_tuple(lists:seq(1+7, 2048+7)), + Tuple = list_to_tuple(lists:duplicate(2048, x)), + NewTuple = set_all_elements(Tuple, 1), + NewTuple = list_to_tuple(lists:seq(1+7, 2048+7)), - ?line {'EXIT', {badarg, _}} = (catch setelement(0, {a, b}, x)), - ?line {'EXIT', {badarg, _}} = (catch setelement(3, {a, b}, x)), - ?line {'EXIT', {badarg, _}} = (catch setelement(1, {}, x)), - ?line {'EXIT', {badarg, _}} = (catch setelement(1, [a, b], x)), - ?line {'EXIT', {badarg, _}} = (catch setelement(1.5, {a, b}, x)), + {'EXIT', {badarg, _}} = (catch setelement(0, {a, b}, x)), + {'EXIT', {badarg, _}} = (catch setelement(3, {a, b}, x)), + {'EXIT', {badarg, _}} = (catch setelement(1, {}, x)), + {'EXIT', {badarg, _}} = (catch setelement(1, [a, b], x)), + {'EXIT', {badarg, _}} = (catch setelement(1.5, {a, b}, x)), %% Nested setelement with literals. AnotherTuple = id({0,0,a,b,c}), @@ -159,52 +161,68 @@ set_all_elements(Tuple, Pos) when Pos > size(Tuple) -> %% Tests list_to_tuple/1. t_list_to_tuple(Config) when is_list(Config) -> - ?line {} = list_to_tuple([]), - ?line {a} = list_to_tuple([a]), - ?line {a, b} = list_to_tuple([a, b]), - ?line {a, b, c} = list_to_tuple([a, b, c]), - ?line {a, b, c, d} = list_to_tuple([a, b, c, d]), - ?line {a, b, c, d, e} = list_to_tuple([a, b, c, d, e]), - - ?line Size = 4096, - ?line Tuple = list_to_tuple(lists:seq(1, Size)), - ?line Size = size(Tuple), - - ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id({a,b}))), - ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))), - ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))), - + {} = list_to_tuple([]), + {a} = list_to_tuple([a]), + {a, b} = list_to_tuple([a, b]), + {a, b, c} = list_to_tuple([a, b, c]), + {a, b, c, d} = list_to_tuple([a, b, c, d]), + {a, b, c, d, e} = list_to_tuple([a, b, c, d, e]), + + Size = 4096, + Tuple = list_to_tuple(lists:seq(1, Size)), + Size = size(Tuple), + + {'EXIT', {badarg, _}} = (catch list_to_tuple(id({a,b}))), + {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))), + {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))), + + % test upper boundry, 16777215 elements + MaxSize = 1 bsl 24 - 1, + MaxTuple = list_to_tuple(lists:seq(1, MaxSize)), + MaxSize = size(MaxTuple), + + {'EXIT', {badarg,_}} = (catch list_to_tuple(lists:seq(1, 1 bsl 24))), ok. %% Tests tuple_to_list/1. t_tuple_to_list(Config) when is_list(Config) -> - ?line [] = tuple_to_list({}), - ?line [a] = tuple_to_list({a}), - ?line [a, b] = tuple_to_list({a, b}), - ?line [a, b, c] = tuple_to_list({a, b, c}), - ?line [a, b, c, d] = tuple_to_list({a, b, c, d}), - ?line [a, b, c, d] = tuple_to_list({a, b, c, d}), - - ?line Size = 4096, - ?line List = lists:seq(1, Size), - ?line Tuple = list_to_tuple(List), - ?line Size = size(Tuple), - ?line List = tuple_to_list(Tuple), - - ?line {'EXIT', {badarg,_}} = (catch tuple_to_list(id(a))), - ?line {'EXIT', {badarg,_}} = (catch tuple_to_list(id(42))), + [] = tuple_to_list({}), + [a] = tuple_to_list({a}), + [a, b] = tuple_to_list({a, b}), + [a, b, c] = tuple_to_list({a, b, c}), + [a, b, c, d] = tuple_to_list({a, b, c, d}), + [a, b, c, d] = tuple_to_list({a, b, c, d}), + + Size = 4096, + List = lists:seq(1, Size), + Tuple = list_to_tuple(List), + Size = size(Tuple), + List = tuple_to_list(Tuple), + + {'EXIT', {badarg,_}} = (catch tuple_to_list(id(a))), + {'EXIT', {badarg,_}} = (catch tuple_to_list(id(42))), ok. %% Tests the make_tuple/2 BIF. t_make_tuple_2(Config) when is_list(Config) -> - ?line t_make_tuple1([]), - ?line t_make_tuple1(42), - ?line t_make_tuple1(a), - ?line t_make_tuple1({}), - ?line t_make_tuple1({a}), - ?line t_make_tuple1(erlang:make_tuple(400, [])), + t_make_tuple1([]), + t_make_tuple1(42), + t_make_tuple1(a), + t_make_tuple1({}), + t_make_tuple1({a}), + t_make_tuple1(erlang:make_tuple(400, [])), + + % test upper boundry, 16777215 elements + t_make_tuple(1 bsl 24 - 1, a), + {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 24, a)), + + {'EXIT', {badarg,_}} = (catch erlang:make_tuple(-1, a)), + % 26 bits is the total header arity room (for now) + {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 26 + 3, a)), + % bignum + {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 65 + 3, a)), ok. t_make_tuple1(Element) -> @@ -222,29 +240,82 @@ t_make_tuple(Size, Element) -> %% Tests the erlang:make_tuple/3 BIF. t_make_tuple_3(Config) when is_list(Config) -> - ?line {} = erlang:make_tuple(0, def, []), - ?line {def} = erlang:make_tuple(1, def, []), - ?line {a} = erlang:make_tuple(1, def, [{1,a}]), - ?line {a,def,c,def,e} = erlang:make_tuple(5, def, [{5,e},{1,a},{3,c}]), - ?line {a,def,c,def,e} = erlang:make_tuple(5, def, - [{1,blurf},{5,e},{3,blurf}, - {1,a},{3,c}]), + {} = erlang:make_tuple(0, def, []), + {def} = erlang:make_tuple(1, def, []), + {a} = erlang:make_tuple(1, def, [{1,a}]), + + {a,def,c,def,e} = erlang:make_tuple(5, def, [{5,e},{1,a},{3,c}]), + {a,def,c,def,e} = erlang:make_tuple(5, def, [{1,blurf},{5,e},{3,blurf},{1,a},{3,c}]), + MaxSize = 1 bsl 16 - 1, + MaxTuple = erlang:make_tuple(MaxSize, def, [{1,blurf},{5,e},{3,blurf},{1,a},{3,c}]), + MaxSize = size(MaxTuple), + + %% Error cases. + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(0, def, [{1,a}])), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{-1,a}])), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{0,a}])), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{6,z}])), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(a, def, [{6,z}])), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{1,a}|b])), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [42])), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [[a,b,c]])), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, non_list)), + {'EXIT',{badarg,_}} = (catch erlang:make_tuple(1 bsl 24, def, [{5,e},{1,a},{3,c}])), + + ok. + +%% Tests the erlang:insert_element/3 BIF. +t_insert_element(Config) when is_list(Config) -> + {a} = erlang:insert_element(1, {}, a), + {{b,b},a} = erlang:insert_element(1, {a}, {b,b}), + {a,b} = erlang:insert_element(2, {a}, b), + [b,def|_] = tuple_to_list(erlang:insert_element(1, erlang:make_tuple(1 bsl 20, def), b)), + [def,b|_] = tuple_to_list(erlang:insert_element(2, erlang:make_tuple(1 bsl 20, def), b)), + [def,b|_] = lists:reverse(tuple_to_list(erlang:insert_element(1 bsl 20, erlang:make_tuple(1 bsl 20, def), b))), + [b,def|_] = lists:reverse(tuple_to_list(erlang:insert_element((1 bsl 20) + 1, erlang:make_tuple(1 bsl 20, def), b))), + + %% Error cases. + {'EXIT',{badarg,_}} = (catch erlang:insert_element(1, [], a)), + {'EXIT',{badarg,_}} = (catch erlang:insert_element(1, a, a)), + {'EXIT',{badarg,_}} = (catch erlang:insert_element(0, {}, a)), + {'EXIT',{badarg,_}} = (catch erlang:insert_element(0, {b,b,b,b,b}, a)), + {'EXIT',{badarg,_}} = (catch erlang:insert_element(-1, {}, a)), + {'EXIT',{badarg,_}} = (catch erlang:insert_element(2, {}, a)), + {'EXIT',{badarg,_}} = (catch erlang:insert_element(6, {b,b,b,b}, a)), + {'EXIT',{badarg,_}} = (catch erlang:insert_element(1 bsl 20, {b,b,b,b}, a)), + ok. + +%% Tests the erlang:delete_element/3 BIF. +t_delete_element(Config) when is_list(Config) -> + {} = erlang:delete_element(1, {a}), + {{b,b},c} = erlang:delete_element(1, {a,{b,b},c}), + {a,b} = erlang:delete_element(2, {a,c,b}), + [2,3|_] = tuple_to_list(erlang:delete_element(1, list_to_tuple(lists:seq(1, 1 bsl 20)))), + [1,3|_] = tuple_to_list(erlang:delete_element(2, list_to_tuple(lists:seq(1, 1 bsl 20)))), + [(1 bsl 20) - 1, (1 bsl 20) - 2 |_] = lists:reverse(tuple_to_list(erlang:delete_element(1 bsl 20, list_to_tuple(lists:seq(1, 1 bsl 20))))), + [(1 bsl 20), (1 bsl 20) - 2 |_] = lists:reverse(tuple_to_list(erlang:delete_element((1 bsl 20) - 1, list_to_tuple(lists:seq(1, 1 bsl 20))))), %% Error cases. - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(0, def, [{1,a}])), - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{-1,a}])), - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{0,a}])), - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{6,z}])), - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(a, def, [{6,z}])), - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{1,a}|b])), - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [42])), - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [[a,b,c]])), - ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, non_list)), + {'EXIT',{badarg,_}} = (catch erlang:delete_element(1, [])), + {'EXIT',{badarg,_}} = (catch erlang:delete_element(1, a)), + {'EXIT',{badarg,_}} = (catch erlang:delete_element(0, {})), + {'EXIT',{badarg,_}} = (catch erlang:delete_element(-1, {})), + {'EXIT',{badarg,_}} = (catch erlang:delete_element(1, {})), + {'EXIT',{badarg,_}} = (catch erlang:delete_element(0, {b,b,b,b,b})), + {'EXIT',{badarg,_}} = (catch erlang:delete_element(5, {b,b,b,b})), + {'EXIT',{badarg,_}} = (catch erlang:delete_element(1 bsl 20, {b,c,b,b,b})), ok. + %% Tests the append_element/2 BIF. t_append_element(Config) when is_list(Config) -> - t_append_element({}, 2048, 2048). + ok = t_append_element({}, 2048, 2048), + + % test upper boundry, 16777215 elements + MaxSize = 1 bsl 24 - 1, + MaxTuple = list_to_tuple(lists:seq(1, MaxSize)), + {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)), + ok. t_append_element(_Tuple, 0, _High) -> ok; t_append_element(Tuple, N, High) -> @@ -261,7 +332,7 @@ verify_seq([High|T], High, Lower) -> %% (This is known to crash earlier versions of BEAM.) tuple_with_case(Config) when is_list(Config) -> - ?line {reply, true} = tuple_with_case(), + {reply, true} = tuple_with_case(), ok. tuple_with_case() -> @@ -280,21 +351,21 @@ foo() -> ignored. %% Test to build a tuple in a guard. tuple_in_guard(Config) when is_list(Config) -> - ?line Tuple1 = id({a,b}), - ?line Tuple2 = id({a,b,c}), - ?line if - Tuple1 == {element(1, Tuple2),element(2, Tuple2)} -> - ok; - true -> - ?line test_server:fail() - end, - ?line if - Tuple2 == {element(1, Tuple2),element(2, Tuple2), - element(3, Tuple2)} -> - ok; - true -> - ?line test_server:fail() - end, + Tuple1 = id({a,b}), + Tuple2 = id({a,b,c}), + if + Tuple1 == {element(1, Tuple2),element(2, Tuple2)} -> + ok; + true -> + test_server:fail() + end, + if + Tuple2 == {element(1, Tuple2),element(2, Tuple2), + element(3, Tuple2)} -> + ok; + true -> + test_server:fail() + end, ok. %% Use this function to avoid compile-time evaluation of an expression. diff --git a/erts/emulator/zlib/zlib.mk b/erts/emulator/zlib/zlib.mk index fa1f159fae..ff5ffa5328 100644 --- a/erts/emulator/zlib/zlib.mk +++ b/erts/emulator/zlib/zlib.mk @@ -63,12 +63,12 @@ endif # gcov ifeq ($(TARGET), win32) $(ZLIB_LIBRARY): $(ZLIB_OBJS) - $(AR) -out:$@ $(ZLIB_OBJS) + $(V_AR) -out:$@ $(ZLIB_OBJS) else $(ZLIB_LIBRARY): $(ZLIB_OBJS) - $(AR) $(ARFLAGS) $@ $(ZLIB_OBJS) + $(V_AR) $(ARFLAGS) $@ $(ZLIB_OBJS) -@ ($(RANLIB) $@ || true) 2>/dev/null endif $(ZLIB_OBJDIR)/%.o: zlib/%.c - $(CC) -c $(ZLIB_CFLAGS) -o $@ $< + $(V_CC) -c $(ZLIB_CFLAGS) -o $@ $< |