aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator')
-rw-r--r--erts/emulator/Makefile.in66
-rw-r--r--erts/emulator/beam/atom.names9
-rw-r--r--erts/emulator/beam/beam_bif_load.c792
-rw-r--r--erts/emulator/beam/beam_bp.c2004
-rw-r--r--erts/emulator/beam/beam_bp.h233
-rw-r--r--erts/emulator/beam/beam_catches.c136
-rw-r--r--erts/emulator/beam/beam_catches.h11
-rw-r--r--erts/emulator/beam/beam_debug.c30
-rw-r--r--erts/emulator/beam/beam_emu.c421
-rw-r--r--erts/emulator/beam/beam_load.c600
-rw-r--r--erts/emulator/beam/beam_load.h49
-rw-r--r--erts/emulator/beam/beam_ranges.c349
-rw-r--r--erts/emulator/beam/bif.c815
-rw-r--r--erts/emulator/beam/bif.h39
-rw-r--r--erts/emulator/beam/bif.tab309
-rw-r--r--erts/emulator/beam/binary.c6
-rw-r--r--erts/emulator/beam/break.c198
-rw-r--r--erts/emulator/beam/code_ix.c168
-rw-r--r--erts/emulator/beam/code_ix.h141
-rw-r--r--erts/emulator/beam/copy.c2
-rw-r--r--erts/emulator/beam/dist.c350
-rw-r--r--erts/emulator/beam/dist.h14
-rw-r--r--erts/emulator/beam/erl_afit_alloc.c11
-rw-r--r--erts/emulator/beam/erl_afit_alloc.h5
-rw-r--r--erts/emulator/beam/erl_alloc.c51
-rw-r--r--erts/emulator/beam/erl_alloc.h2
-rw-r--r--erts/emulator/beam/erl_alloc.types49
-rw-r--r--erts/emulator/beam/erl_alloc_util.c920
-rw-r--r--erts/emulator/beam/erl_alloc_util.h55
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c16
-rw-r--r--erts/emulator/beam/erl_async.c18
-rw-r--r--erts/emulator/beam/erl_bestfit_alloc.c45
-rw-r--r--erts/emulator/beam/erl_bif_binary.c69
-rw-r--r--erts/emulator/beam/erl_bif_chksum.c13
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c200
-rwxr-xr-xerts/emulator/beam/erl_bif_info.c424
-rw-r--r--erts/emulator/beam/erl_bif_op.c5
-rw-r--r--erts/emulator/beam/erl_bif_os.c2
-rw-r--r--erts/emulator/beam/erl_bif_port.c867
-rw-r--r--erts/emulator/beam/erl_bif_re.c27
-rw-r--r--erts/emulator/beam/erl_bif_timer.c22
-rw-r--r--erts/emulator/beam/erl_bif_trace.c819
-rw-r--r--erts/emulator/beam/erl_bits.c18
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c5
-rw-r--r--erts/emulator/beam/erl_db.c179
-rw-r--r--erts/emulator/beam/erl_db.h10
-rw-r--r--erts/emulator/beam/erl_db_tree.c12
-rw-r--r--erts/emulator/beam/erl_db_util.c63
-rw-r--r--erts/emulator/beam/erl_db_util.h8
-rw-r--r--erts/emulator/beam/erl_debug.c28
-rw-r--r--erts/emulator/beam/erl_driver.h53
-rw-r--r--erts/emulator/beam/erl_fun.h2
-rw-r--r--erts/emulator/beam/erl_gc.c98
-rw-r--r--erts/emulator/beam/erl_goodfit_alloc.c13
-rw-r--r--erts/emulator/beam/erl_init.c205
-rw-r--r--erts/emulator/beam/erl_lock_check.c27
-rw-r--r--erts/emulator/beam/erl_lock_check.h5
-rw-r--r--erts/emulator/beam/erl_message.c279
-rw-r--r--erts/emulator/beam/erl_message.h29
-rw-r--r--erts/emulator/beam/erl_monitors.c13
-rw-r--r--erts/emulator/beam/erl_monitors.h2
-rw-r--r--erts/emulator/beam/erl_mtrace.c2
-rw-r--r--erts/emulator/beam/erl_nif.c123
-rw-r--r--erts/emulator/beam/erl_nif.h6
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h4
-rw-r--r--erts/emulator/beam/erl_node_container_utils.h67
-rw-r--r--erts/emulator/beam/erl_node_tables.c77
-rw-r--r--erts/emulator/beam/erl_node_tables.h9
-rw-r--r--erts/emulator/beam/erl_port.h944
-rw-r--r--erts/emulator/beam/erl_port_task.c2202
-rw-r--r--erts/emulator/beam/erl_port_task.h169
-rw-r--r--erts/emulator/beam/erl_printf_term.c5
-rw-r--r--erts/emulator/beam/erl_process.c4925
-rw-r--r--erts/emulator/beam/erl_process.h719
-rw-r--r--erts/emulator/beam/erl_process_dict.c6
-rw-r--r--erts/emulator/beam/erl_process_dump.c33
-rw-r--r--erts/emulator/beam/erl_process_lock.c547
-rw-r--r--erts/emulator/beam/erl_process_lock.h419
-rw-r--r--erts/emulator/beam/erl_ptab.c1566
-rw-r--r--erts/emulator/beam/erl_ptab.h472
-rw-r--r--erts/emulator/beam/erl_resolv_dns.c23
-rw-r--r--erts/emulator/beam/erl_resolv_nodns.c23
-rw-r--r--erts/emulator/beam/erl_smp.h28
-rw-r--r--erts/emulator/beam/erl_sys_driver.h1
-rw-r--r--erts/emulator/beam/erl_term.c6
-rw-r--r--erts/emulator/beam/erl_term.h36
-rw-r--r--erts/emulator/beam/erl_thr_progress.c288
-rw-r--r--erts/emulator/beam/erl_thr_progress.h73
-rw-r--r--erts/emulator/beam/erl_threads.h329
-rw-r--r--erts/emulator/beam/erl_time.h4
-rw-r--r--erts/emulator/beam/erl_time_sup.c85
-rw-r--r--erts/emulator/beam/erl_trace.c740
-rw-r--r--erts/emulator/beam/erl_trace.h141
-rw-r--r--erts/emulator/beam/erl_unicode.c79
-rw-r--r--erts/emulator/beam/erl_utils.h215
-rw-r--r--erts/emulator/beam/erlang_dtrace.d6
-rw-r--r--erts/emulator/beam/export.c354
-rw-r--r--erts/emulator/beam/export.h48
-rw-r--r--erts/emulator/beam/external.c6
-rwxr-xr-xerts/emulator/beam/global.h897
-rw-r--r--erts/emulator/beam/index.c25
-rw-r--r--erts/emulator/beam/index.h14
-rw-r--r--erts/emulator/beam/io.c4514
-rw-r--r--erts/emulator/beam/module.c159
-rw-r--r--erts/emulator/beam/module.h64
-rw-r--r--erts/emulator/beam/ops.tab18
-rw-r--r--erts/emulator/beam/register.c61
-rw-r--r--erts/emulator/beam/register.h19
-rw-r--r--erts/emulator/beam/sys.h214
-rw-r--r--erts/emulator/beam/utils.c328
-rw-r--r--erts/emulator/drivers/common/efile_drv.c23
-rw-r--r--erts/emulator/drivers/common/gzio.c5
-rw-r--r--erts/emulator/drivers/common/inet_drv.c402
-rw-r--r--erts/emulator/drivers/unix/unix_efile.c581
-rw-r--r--erts/emulator/drivers/win32/registry_drv.c2
-rw-r--r--erts/emulator/hipe/hipe_amd64_bifs.m46
-rw-r--r--erts/emulator/hipe/hipe_bif0.c14
-rw-r--r--erts/emulator/hipe/hipe_bif2.c7
-rw-r--r--erts/emulator/hipe/hipe_bif2.tab1
-rw-r--r--erts/emulator/hipe/hipe_bif_list.m46
-rw-r--r--erts/emulator/hipe/hipe_debug.c13
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c97
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c10
-rw-r--r--erts/emulator/hipe/hipe_native_bif.h3
-rw-r--r--erts/emulator/hipe/hipe_primops.h1
-rw-r--r--erts/emulator/hipe/hipe_stack.c10
-rw-r--r--erts/emulator/hipe/hipe_stack.h4
-rw-r--r--erts/emulator/hipe/hipe_x86.c2
-rw-r--r--erts/emulator/hipe/hipe_x86_bifs.m412
-rw-r--r--erts/emulator/hipe/hipe_x86_gc.h5
-rw-r--r--erts/emulator/hipe/hipe_x86_glue.h3
-rw-r--r--erts/emulator/sys/common/erl_check_io.c58
-rw-r--r--erts/emulator/sys/common/erl_mseg.c1134
-rw-r--r--erts/emulator/sys/common/erl_mseg.h31
-rw-r--r--erts/emulator/sys/common/erl_poll.c55
-rw-r--r--erts/emulator/sys/common/erl_poll.h2
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys_ddll.c4
-rw-r--r--erts/emulator/sys/unix/sys.c127
-rw-r--r--erts/emulator/sys/unix/sys_float.c8
-rw-r--r--erts/emulator/sys/vxworks/driver_int.h30
-rw-r--r--erts/emulator/sys/vxworks/erl_main.c45
-rw-r--r--erts/emulator/sys/vxworks/erl_vxworks_sys.h184
-rw-r--r--erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c253
-rw-r--r--erts/emulator/sys/vxworks/sys.c2610
-rw-r--r--erts/emulator/sys/win32/erl_win32_sys_ddll.c4
-rw-r--r--erts/emulator/sys/win32/erl_win_dyn_driver.h8
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h5
-rwxr-xr-xerts/emulator/sys/win32/sys.c169
-rw-r--r--erts/emulator/sys/win32/sys_float.c8
-rw-r--r--erts/emulator/sys/win32/sys_time.c6
-rw-r--r--erts/emulator/test/Makefile9
-rw-r--r--erts/emulator/test/alloc_SUITE_data/allocator_test.h8
-rw-r--r--erts/emulator/test/alloc_SUITE_data/basic.c2
-rw-r--r--erts/emulator/test/alloc_SUITE_data/bucket_mask.c65
-rw-r--r--erts/emulator/test/alloc_SUITE_data/coalesce.c6
-rw-r--r--erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c4
-rw-r--r--erts/emulator/test/beam_SUITE.erl77
-rw-r--r--erts/emulator/test/bif_SUITE.erl283
-rw-r--r--erts/emulator/test/binary_SUITE.erl26
-rw-r--r--erts/emulator/test/bs_bit_binaries_SUITE.erl45
-rw-r--r--erts/emulator/test/bs_construct_SUITE.erl91
-rw-r--r--erts/emulator/test/busy_port_SUITE.erl30
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl133
-rw-r--r--erts/emulator/test/call_trace_SUITE_data/my_upgrade_test.erl26
-rw-r--r--erts/emulator/test/code_parallel_load_SUITE.erl198
-rw-r--r--erts/emulator/test/ddll_SUITE.erl2
-rw-r--r--erts/emulator/test/distribution_SUITE.erl21
-rw-r--r--erts/emulator/test/driver_SUITE.erl67
-rw-r--r--erts/emulator/test/driver_SUITE_data/Makefile.src3
-rw-r--r--erts/emulator/test/driver_SUITE_data/chkio_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/missing_callback_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c2
-rw-r--r--erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c178
-rw-r--r--erts/emulator/test/driver_SUITE_data/timer_drv.c12
-rw-r--r--erts/emulator/test/emulator.spec.vxworks26
-rw-r--r--erts/emulator/test/emulator_bench.spec1
-rw-r--r--erts/emulator/test/estone_SUITE.erl16
-rw-r--r--erts/emulator/test/estone_SUITE_data/estone_cat.c4
-rw-r--r--erts/emulator/test/fun_SUITE.erl4
-rw-r--r--erts/emulator/test/gc_SUITE.erl59
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl16
-rw-r--r--erts/emulator/test/mtx_SUITE_data/Makefile.src7
-rw-r--r--erts/emulator/test/nofrag_SUITE.erl26
-rw-r--r--erts/emulator/test/port_SUITE.erl1244
-rw-r--r--erts/emulator/test/port_SUITE_data/dead_port.c24
-rw-r--r--erts/emulator/test/port_SUITE_data/port_test.c35
-rw-r--r--erts/emulator/test/port_SUITE_data/reclaim.h60
-rw-r--r--erts/emulator/test/port_bif_SUITE_data/port_test.c37
-rw-r--r--erts/emulator/test/port_bif_SUITE_data/reclaim.h60
-rw-r--r--erts/emulator/test/process_SUITE.erl2077
-rw-r--r--erts/emulator/test/save_calls_SUITE.erl27
-rw-r--r--erts/emulator/test/smoke_test_SUITE.erl2
-rw-r--r--erts/emulator/test/timer_bif_SUITE.erl67
-rw-r--r--erts/emulator/test/trace_local_SUITE.erl186
-rw-r--r--erts/emulator/test/trace_port_SUITE.erl7
-rw-r--r--erts/emulator/test/tuple_SUITE.erl305
-rwxr-xr-xerts/emulator/utils/beam_makeops21
-rwxr-xr-xerts/emulator/utils/make_tables2
-rw-r--r--erts/emulator/valgrind/suppress.halfword56
-rw-r--r--erts/emulator/valgrind/suppress.patched.3.6.020
-rw-r--r--erts/emulator/valgrind/suppress.standard12
203 files changed, 24205 insertions, 19944 deletions
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 985ef72517..89c948cc00 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -61,7 +61,7 @@ else
ifeq ($(TYPE),purify)
PURIFY = purify $(PURIFY_BUILD_OPTIONS)
TYPEMARKER = .purify
-TYPE_FLAGS = $(DEBUG_CFLAGS) -DPURIFY -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DPURIFY -DNO_JUMP_TABLE
ENABLE_ALLOC_TYPE_VARS += purify
else
@@ -92,7 +92,7 @@ else
ifeq ($(TYPE),valgrind)
PURIFY =
TYPEMARKER = .valgrind
-TYPE_FLAGS = $(DEBUG_CFLAGS) -DVALGRIND -DNO_JUMP_TABLE -DERTS_MSEG_FAKE_SEGMENTS
+TYPE_FLAGS = $(DEBUG_CFLAGS) -DVALGRIND -DNO_JUMP_TABLE
ENABLE_ALLOC_TYPE_VARS += valgrind
else
@@ -332,15 +332,11 @@ LIBS += $(THR_LIBS)
ifneq ($(findstring erts_internal_r, $(THR_LIBS)),erts_internal_r)
-ifeq ($(findstring vxworks,$(TARGET)),vxworks)
-ERTS_INTERNAL_LIB=erts_internal
-else
ifneq ($(strip $(THR_LIB_NAME)),)
ERTS_INTERNAL_LIB=erts_internal_r
else
ERTS_INTERNAL_LIB=erts_internal
endif
-endif
LIBS += -l$(ERTS_INTERNAL_LIB)$(TYPEMARKER)
@@ -463,8 +459,6 @@ _create_dirs := $(shell mkdir -p $(CREATE_DIRS))
GENERATE =
HIPE_ASM =
-ifeq ($(findstring vxworks,$(TARGET)),vxworks)
-else
ifdef HIPE_ENABLED
HIPE_ASM += $(TTF_DIR)/hipe_x86_asm.h \
$(TTF_DIR)/hipe_amd64_asm.h \
@@ -476,7 +470,6 @@ GENERATE += $(HIPE_ASM) \
$(TTF_DIR)/hipe_literals.h \
$(BINDIR)/hipe_mkliterals$(TF_MARKER)
endif
-endif
ifdef DTRACE_ENABLED
# global.h causes problems by including dtrace-wrapper.h which includes
@@ -567,7 +560,8 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/zlib.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \
$(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erlang.beam
+ $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam
LANG=C $(PERL) utils/make_preload $(MAKE_PRELOAD_EXTRA) -rc $^ > $@
else
PRELOAD_OBJ = $(OBJDIR)/preload.o
@@ -579,7 +573,8 @@ $(PRELOAD_SRC): $(ERL_TOP)/erts/preloaded/ebin/otp_ring0.beam \
$(ERL_TOP)/erts/preloaded/ebin/zlib.beam \
$(ERL_TOP)/erts/preloaded/ebin/prim_zip.beam \
$(ERL_TOP)/erts/preloaded/ebin/erl_prim_loader.beam \
- $(ERL_TOP)/erts/preloaded/ebin/erlang.beam
+ $(ERL_TOP)/erts/preloaded/ebin/erlang.beam \
+ $(ERL_TOP)/erts/preloaded/ebin/erts_internal.beam
LANG=C $(PERL) utils/make_preload -old $^ > $@
endif
@@ -616,11 +611,6 @@ ifdef PERFCTR_PATH
INCLUDES += -I$(PERFCTR_PATH)/usr.lib -I$(PERFCTR_PATH)/linux/include
endif
-# Need to include etc dir on VxWorks
-ifeq ($(findstring vxworks,$(TARGET)),vxworks)
-INCLUDES += -I$(ERL_TOP)/erts/etc/vxworks
-endif
-
ifeq ($(TARGET),win32)
$(OBJDIR)/dll_sys.o: sys/$(ERLANG_OSTYPE)/sys.c
$(CC) $(CFLAGS) -DERL_RUN_SHARED_LIB=1 $(INCLUDES) -c $< -o $@
@@ -663,12 +653,6 @@ $(OBJDIR)/%.o: drivers/common/%.c
$(OBJDIR)/%.o: drivers/$(ERLANG_OSTYPE)/%.c
$(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -Idrivers/$(ERLANG_OSTYPE) -I../etc/$(ERLANG_OSTYPE) -c $< -o $@
-# VxWorks uses unix drivers too...
-ifeq ($(findstring vxworks,$(TARGET)),vxworks)
-$(OBJDIR)/%.o: drivers/unix/%.c
- $(CC) $(CFLAGS) $(INCLUDES) -Idrivers/common -c $< -o $@
-endif
-
# ----------------------------------------------------------------------
# Specials
#
@@ -709,7 +693,9 @@ EMU_OBJS = \
$(OBJDIR)/beam_emu.o $(OBJDIR)/beam_opcodes.o \
$(OBJDIR)/beam_load.o $(OBJDIR)/beam_bif_load.o \
$(OBJDIR)/beam_debug.o $(OBJDIR)/beam_bp.o \
- $(OBJDIR)/beam_catches.o
+ $(OBJDIR)/beam_catches.o \
+ $(OBJDIR)/code_ix.o \
+ $(OBJDIR)/beam_ranges.o
RUN_OBJS = \
$(OBJDIR)/erl_pbifs.o $(OBJDIR)/benchmark.o \
@@ -751,7 +737,8 @@ RUN_OBJS = \
$(OBJDIR)/packet_parser.o $(OBJDIR)/safe_hash.o \
$(OBJDIR)/erl_zlib.o $(OBJDIR)/erl_nif.o \
$(OBJDIR)/erl_bif_binary.o $(OBJDIR)/erl_ao_firstfit_alloc.o \
- $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o
+ $(OBJDIR)/erl_thr_queue.o $(OBJDIR)/erl_sched_spec_pre_alloc.o \
+ $(OBJDIR)/erl_ptab.o
ifeq ($(TARGET),win32)
DRV_OBJS = \
@@ -779,12 +766,8 @@ OS_OBJS = \
$(OBJDIR)/gzio.o \
$(OBJDIR)/elib_memmove.o
-ifeq ($(findstring vxworks,$(TARGET)),vxworks)
- OS_OBJS += $(OBJDIR)/int64.o
-else
OS_OBJS += $(OBJDIR)/sys_float.o \
$(OBJDIR)/sys_time.o
-endif
DRV_OBJS = \
$(OBJDIR)/efile_drv.o \
$(OBJDIR)/inet_drv.o \
@@ -792,9 +775,7 @@ DRV_OBJS = \
$(OBJDIR)/ram_file_drv.o
endif
-ifneq ($(findstring vxworks,$(TARGET)),vxworks)
DRV_OBJS += $(OBJDIR)/ttsl_drv.o
-endif
ifeq ($(ERTS_ENABLE_KERNEL_POLL),yes)
OS_OBJS += $(OBJDIR)/erl_poll.kp.o \
@@ -932,31 +913,6 @@ $(OBJDIR)/hipe_arm.o: hipe/hipe_arm.c
# end of HiPE section
########################################
-ifeq ($(findstring vxworks,$(TARGET)),vxworks)
-########################################
-# Extract what we need from libgcc.a
-########################################
-GCCLIBFLAGS=@GCCLIBFLAGS@
-STRIP=@STRIP@
-SYMPREFIX=@SYMPREFIX@
-
-NEEDFUNCTIONS=__divdi3 __moddi3 __udivdi3
-KEEPSYMS=$(NEEDFUNCTIONS:%=-K $(SYMPREFIX)%)
-
-$(OBJDIR)/int64.o: $(TARGET)/int64.c
- $(CC) -o $(OBJDIR)/int64tmp.o -c $(TARGET)/int64.c
- $(LD) -o $(OBJDIR)/int64.o $(OBJDIR)/int64tmp.o $(LDFLAGS) $(GCCLIBFLAGS)
- $(STRIP) $(KEEPSYMS) $(OBJDIR)/int64.o
-
-$(TARGET)/int64.c:
- echo 'void dummy(void); void dummy(void) {' > $(TARGET)/int64.c
- for x in $(NEEDFUNCTIONS); do echo 'extern void '$$x'();' \
- >> $(TARGET)/int64.c; done
- for x in $(NEEDFUNCTIONS); do echo $$x'();' >> $(TARGET)/int64.c; done
- echo '}' >> $(TARGET)/int64.c
-
-endif
-
# ----------------------------------------------------------------------
# The emulator itself
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index afcbd732df..c47a608215 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -94,6 +94,7 @@ atom asynchronous
atom atom
atom atom_used
atom attributes
+atom await_port_send_result
atom await_proc_exit
atom await_sched_wall_time_modifications
atom awaiting_load
@@ -152,6 +153,7 @@ atom connection_closed
atom cons
atom const
atom context_switches
+atom control
atom copy
atom cpu
atom cpu_timestamp
@@ -204,6 +206,7 @@ atom erlang
atom ERROR='ERROR'
atom error_handler
atom error_logger
+atom erts_internal
atom ets
atom ETS_TRANSFER='ETS-TRANSFER'
atom event
@@ -237,6 +240,7 @@ atom gc_end
atom gc_start
atom Ge='>='
atom generational
+atom get_data
atom get_seq_token
atom get_tcw
atom getenv
@@ -408,6 +412,7 @@ atom overlapped_io
atom owner
atom packet
atom packet_size
+atom parallelism
atom Plus='+'
atom pause
atom pending
@@ -419,12 +424,12 @@ atom pid
atom port
atom ports
atom port_count
+atom port_limit
atom print
atom priority
atom private
atom process
atom processes
-atom processes_trap
atom processes_used
atom process_count
atom process_display
@@ -434,6 +439,7 @@ atom procs
atom profile
atom protected
atom protection
+atom ptab_list_continue
atom public
atom purify
atom quantify
@@ -481,6 +487,7 @@ atom sequential_trace_token
atom serial
atom set
atom set_cpu_topology
+atom set_data
atom set_on_first_link
atom set_on_first_spawn
atom set_on_link
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 02f37bf9bc..e0a4f86d2d 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -37,25 +37,83 @@
static void set_default_trace_pattern(Eterm module);
static Eterm check_process_code(Process* rp, Module* modp);
-static void delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp);
-static void delete_export_references(Eterm module);
-static int purge_module(int module);
+static void delete_code(Module* modp);
static void decrement_refc(BeamInstr* code);
static int is_native(BeamInstr* code);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
static int any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
-static void remove_from_address_table(BeamInstr* code);
-Eterm
-load_module_2(BIF_ALIST_2)
+
+
+BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
{
- Eterm reason;
- Eterm* hp;
- int sz;
- byte* code;
+ Module* modp;
Eterm res;
+ ErtsCodeIndex code_ix;
+
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+ code_ix = erts_active_code_ix();
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
+ return am_undefined;
+ }
+ erts_rlock_old_code(code_ix);
+ res = ((modp->curr.code && is_native(modp->curr.code)) ||
+ (modp->old.code != 0 && is_native(modp->old.code))) ?
+ am_true : am_false;
+ erts_runlock_old_code(code_ix);
+ return res;
+}
+
+BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
+{
+ Module* modp;
+ Eterm res;
+
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD3(bif_export[BIF_code_make_stub_module_3],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+
+ modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
+
+ if (modp && modp->curr.num_breakpoints > 0) {
+ ASSERT(modp->curr.code != NULL);
+ erts_clear_module_break(modp);
+ ASSERT(modp->curr.num_breakpoints == 0);
+ }
+
+ erts_start_staging_code_ix();
+
+ res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+
+ if (res == BIF_ARG_1) {
+ erts_end_staging_code_ix();
+ erts_commit_staging_code_ix();
+ }
+ else {
+ erts_abort_staging_code_ix();
+ }
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_release_code_write_permission();
+ return res;
+}
+
+BIF_RETTYPE
+prepare_loading_2(BIF_ALIST_2)
+{
byte* temp_alloc = NULL;
- struct LoaderState* stp;
+ byte* code;
+ Uint sz;
+ Binary* magic;
+ Eterm reason;
+ Eterm* hp;
+ Eterm res;
if (is_not_atom(BIF_ARG_1)) {
error:
@@ -65,108 +123,307 @@ load_module_2(BIF_ALIST_2)
if ((code = erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc)) == NULL) {
goto error;
}
- hp = HAlloc(BIF_P, 3);
- /*
- * Read the BEAM file and prepare the module for loading.
- */
- stp = erts_alloc_loader_state();
+ magic = erts_alloc_loader_state();
sz = binary_size(BIF_ARG_2);
- reason = erts_prepare_loading(stp, BIF_P, BIF_P->group_leader,
+ reason = erts_prepare_loading(magic, BIF_P, BIF_P->group_leader,
&BIF_ARG_1, code, sz);
erts_free_aligned_binary_bytes(temp_alloc);
if (reason != NIL) {
+ hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_error, reason);
BIF_RET(res);
}
+ hp = HAlloc(BIF_P, PROC_BIN_SIZE);
+ res = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), magic);
+ erts_refc_dec(&magic->refc, 1);
+ BIF_RET(res);
+}
+
+struct m {
+ Binary* code;
+ Eterm module;
+ Module* modp;
+ Uint exception;
+};
+
+static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int);
+#ifdef ERTS_SMP
+static void smp_code_ix_commiter(void*);
+
+static struct /* Protected by code_write_permission */
+{
+ Process* stager;
+ ErtsThrPrgrLaterOp lop;
+}commiter_state;
+#endif
+
+static Eterm
+exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
+{
+ Eterm* hp = HAlloc(p, 3 + 2*exceptions);
+ Eterm res = NIL;
+
+ mp += exceptions - 1;
+ while (exceptions > 0) {
+ if (mp->exception) {
+ res = CONS(hp, mp->module, res);
+ hp += 2;
+ exceptions--;
+ }
+ mp--;
+ }
+ return TUPLE2(hp, tag, res);
+}
+
+
+BIF_RETTYPE
+finish_loading_1(BIF_ALIST_1)
+{
+ int i;
+ int n;
+ struct m* p = NULL;
+ Uint exceptions;
+ Eterm res;
+ int is_blocking = 0;
+ int do_commit = 0;
+
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_finish_loading_1], BIF_P, BIF_ARG_1);
+ }
/*
- * Stop all other processes and finish the loading of the module.
+ * Validate the argument before we start loading; it must be a
+ * proper list where each element is a magic binary containing
+ * prepared (not previously loaded) code.
+ *
+ * First count the number of elements and allocate an array
+ * to keep the elements in.
*/
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- reason = erts_finish_loading(stp, BIF_P, 0, &BIF_ARG_1);
- if (reason != NIL) {
- res = TUPLE2(hp, am_error, reason);
- } else {
- set_default_trace_pattern(BIF_ARG_1);
- res = TUPLE2(hp, am_module, BIF_ARG_1);
+ n = list_length(BIF_ARG_1);
+ if (n == -1) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ goto done;
}
+ p = erts_alloc(ERTS_ALC_T_LOADER_TMP, n*sizeof(struct m));
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(res);
-}
+ /*
+ * We now know that the argument is a proper list. Validate
+ * and collect the binaries into the array.
+ */
-BIF_RETTYPE purge_module_1(BIF_ALIST_1)
-{
- int purge_res;
+ for (i = 0; i < n; i++) {
+ Eterm* cons = list_val(BIF_ARG_1);
+ Eterm term = CAR(cons);
+ ProcBin* pb;
- if (is_not_atom(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ if (!ERTS_TERM_IS_MAGIC_BINARY(term)) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ goto done;
+ }
+ pb = (ProcBin*) binary_val(term);
+ p[i].code = pb->val;
+ p[i].module = erts_module_for_prepared_code(p[i].code);
+ if (p[i].module == NIL) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ goto done;
+ }
+ BIF_ARG_1 = CDR(cons);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ /*
+ * Since we cannot handle atomic loading of a group of modules
+ * if one or more of them uses on_load, we will only allow one
+ * element in the list. This limitation is intended to be
+ * lifted in the future.
+ */
- erts_export_consolidate();
- purge_res = purge_module(atom_val(BIF_ARG_1));
+ if (n > 1) {
+ ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
+ goto done;
+ }
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ /*
+ * All types are correct. There cannot be a BADARG from now on.
+ * Before we can start loading, we must check whether any of
+ * the modules already has old code. To avoid a race, we must
+ * not allow other process to initiate a code loading operation
+ * from now on.
+ */
- if (purge_res < 0) {
- BIF_ERROR(BIF_P, BADARG);
+ res = am_ok;
+ erts_start_staging_code_ix();
+
+ for (i = 0; i < n; i++) {
+ p[i].modp = erts_put_module(p[i].module);
+ }
+ for (i = 0; i < n; i++) {
+ if (p[i].modp->curr.num_breakpoints > 0 ||
+ p[i].modp->curr.num_traced_exports > 0 ||
+ erts_is_default_trace_enabled()) {
+ /* tracing involved, fallback with thread blocking */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ is_blocking = 1;
+ break;
+ }
}
- BIF_RET(am_true);
-}
-BIF_RETTYPE code_is_module_native_1(BIF_ALIST_1)
-{
- Module* modp;
+ if (is_blocking) {
+ for (i = 0; i < n; i++) {
+ if (p[i].modp->curr.num_breakpoints) {
+ erts_clear_module_break(p[i].modp);
+ ASSERT(p[i].modp->curr.num_breakpoints == 0);
+ }
+ }
+ }
- if (is_not_atom(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
+ exceptions = 0;
+ for (i = 0; i < n; i++) {
+ p[i].exception = 0;
+ if (p[i].modp->curr.code && p[i].modp->old.code) {
+ p[i].exception = 1;
+ exceptions++;
+ }
}
- if ((modp = erts_get_module(BIF_ARG_1)) == NULL) {
- return am_undefined;
+
+ if (exceptions) {
+ res = exception_list(BIF_P, am_not_purged, p, exceptions);
+ } else {
+ /*
+ * Now we can load all code. This can't fail.
+ */
+
+ exceptions = 0;
+ for (i = 0; i < n; i++) {
+ Eterm mod;
+ Eterm retval;
+
+ erts_refc_inc(&p[i].code->refc, 1);
+ retval = erts_finish_loading(p[i].code, BIF_P, 0, &mod);
+ ASSERT(retval == NIL || retval == am_on_load);
+ if (retval == am_on_load) {
+ p[i].exception = 1;
+ exceptions++;
+ }
+ }
+
+ /*
+ * Check whether any module has an on_load_handler.
+ */
+
+ if (exceptions) {
+ res = exception_list(BIF_P, am_on_load, p, exceptions);
+ }
+ do_commit = 1;
}
- return ((modp->code && is_native(modp->code)) ||
- (modp->old_code != 0 && is_native(modp->old_code))) ?
- am_true : am_false;
+
+done:
+ return staging_epilogue(BIF_P, do_commit, res, is_blocking, p, n);
}
-BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3)
-{
- Eterm res;
+static Eterm
+staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
+ struct m* loaded, int nloaded)
+{
+#ifdef ERTS_SMP
+ if (is_blocking || !commit)
+#endif
+ {
+ if (commit) {
+ erts_end_staging_code_ix();
+ erts_commit_staging_code_ix();
+ if (loaded) {
+ int i;
+ for (i=0; i < nloaded; i++) {
+ set_default_trace_pattern(loaded[i].module);
+ }
+ }
+ }
+ else {
+ erts_abort_staging_code_ix();
+ }
+ if (loaded) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ }
+ if (is_blocking) {
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ erts_release_code_write_permission();
+ return res;
+ }
+#ifdef ERTS_SMP
+ else {
+ ASSERT(is_value(res));
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ if (loaded) {
+ erts_free(ERTS_ALC_T_LOADER_TMP, loaded);
+ }
+ erts_end_staging_code_ix();
+ /*
+ * Now we must wait for all schedulers to do a memory barrier before
+ * we can commit and let them access the new staged code. This allows
+ * schedulers to read active code_ix in a safe way while executing
+ * without any memory barriers at all.
+ */
+ ASSERT(commiter_state.stager == NULL);
+ commiter_state.stager = c_p;
+ erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &commiter_state.lop);
+ erts_smp_proc_inc_refc(c_p);
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ /*
+ * smp_code_ix_commiter() will do the rest "later"
+ * and resume this process to return 'res'.
+ */
+ ERTS_BIF_YIELD_RETURN(c_p, res);
+ }
+#endif
+}
- erts_export_consolidate();
- res = erts_make_stub_module(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- return res;
+#ifdef ERTS_SMP
+static void smp_code_ix_commiter(void* null)
+{
+ Process* p = commiter_state.stager;
+
+ erts_commit_staging_code_ix();
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_dec_refc(p);
+#ifdef DEBUG
+ commiter_state.stager = NULL;
+#endif
+ erts_release_code_write_permission();
}
+#endif /* ERTS_SMP */
+
+
BIF_RETTYPE
check_old_code_1(BIF_ALIST_1)
{
+ ErtsCodeIndex code_ix;
Module* modp;
+ Eterm res = am_false;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
- modp = erts_get_module(BIF_ARG_1);
- if (modp == NULL) { /* Doesn't exist. */
- BIF_RET(am_false);
- } else if (modp->old_code == NULL) { /* No old code. */
- BIF_RET(am_false);
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+ if (modp != NULL) {
+ erts_rlock_old_code(code_ix);
+ if (modp->old.code != NULL) {
+ res = am_true;
+ }
+ erts_runlock_old_code(code_ix);
}
- BIF_RET(am_true);
+ BIF_RET(res);
}
Eterm
@@ -180,14 +437,19 @@ check_process_code_2(BIF_ALIST_2)
}
if (is_internal_pid(BIF_ARG_1)) {
Eterm res;
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- goto error;
- modp = erts_get_module(BIF_ARG_2);
+ ErtsCodeIndex code_ix;
+
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(BIF_ARG_2, code_ix);
if (modp == NULL) { /* Doesn't exist. */
return am_false;
- } else if (modp->old_code == NULL) { /* No old code. */
+ }
+ erts_rlock_old_code(code_ix);
+ if (modp->old.code == NULL) { /* No old code. */
+ erts_runlock_old_code(code_ix);
return am_false;
}
+ erts_runlock_old_code(code_ix);
#ifdef ERTS_SMP
rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN,
@@ -202,7 +464,14 @@ check_process_code_2(BIF_ALIST_2)
ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P,
BIF_ARG_1, BIF_ARG_2);
}
- res = check_process_code(rp, modp);
+ erts_rlock_old_code(code_ix);
+ if (modp->old.code != NULL) { /* must check again */
+ res = check_process_code(rp, modp);
+ }
+ else {
+ res = am_false;
+ }
+ erts_runlock_old_code(code_ix);
#ifdef ERTS_SMP
if (BIF_P != rp) {
erts_resume(rp, ERTS_PROC_LOCK_MAIN);
@@ -223,56 +492,71 @@ check_process_code_2(BIF_ALIST_2)
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
{
- int res;
+ ErtsCodeIndex code_ix;
+ Module* modp;
+ int is_blocking = 0;
+ int success = 0;
+ Eterm res = NIL;
- if (is_not_atom(BIF_ARG_1))
- goto badarg;
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_delete_module_1], BIF_P, BIF_ARG_1);
+ }
{
- Module *modp = erts_get_module(BIF_ARG_1);
+ erts_start_staging_code_ix();
+ code_ix = erts_staging_code_ix();
+ modp = erts_get_module(BIF_ARG_1, code_ix);
if (!modp) {
res = am_undefined;
}
- else if (modp->old_code != 0) {
+ else if (modp->old.code != 0) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
- res = am_badarg;
+ ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
}
else {
- delete_export_references(BIF_ARG_1);
- delete_code(BIF_P, 0, modp);
+ if (modp->curr.num_breakpoints > 0 ||
+ modp->curr.num_traced_exports > 0) {
+ /* we have tracing, retry single threaded */
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ is_blocking = 1;
+ if (modp->curr.num_breakpoints) {
+ erts_clear_module_break(modp);
+ ASSERT(modp->curr.num_breakpoints == 0);
+ }
+ }
+ delete_code(modp);
res = am_true;
+ success = 1;
}
}
-
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-
- if (res == am_badarg) {
- badarg:
- BIF_ERROR(BIF_P, BADARG);
- }
- BIF_RET(res);
+ return staging_epilogue(BIF_P, success, res, is_blocking, NULL, 0);
}
BIF_RETTYPE module_loaded_1(BIF_ALIST_1)
{
Module* modp;
+ ErtsCodeIndex code_ix;
+ Eterm res = am_false;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
- if ((modp = erts_get_module(BIF_ARG_1)) == NULL ||
- modp->code == NULL ||
- modp->code[MI_ON_LOAD_FUNCTION_PTR] != 0) {
- BIF_RET(am_false);
+ code_ix = erts_active_code_ix();
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) != NULL) {
+ if (modp->curr.code != NULL
+ && modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] == 0) {
+ res = am_true;
+ }
}
- BIF_RET(am_true);
+ BIF_RET(res);
}
BIF_RETTYPE pre_loaded_0(BIF_ALIST_0)
@@ -282,27 +566,28 @@ BIF_RETTYPE pre_loaded_0(BIF_ALIST_0)
BIF_RETTYPE loaded_0(BIF_ALIST_0)
{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ Module* modp;
Eterm previous = NIL;
Eterm* hp;
int i;
int j = 0;
-
- for (i = 0; i < module_code_size(); i++) {
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
+
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ if ((modp = module_code(i, code_ix)) != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
j++;
}
}
if (j > 0) {
hp = HAlloc(BIF_P, j*2);
- for (i = 0; i < module_code_size(); i++) {
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
- previous = CONS(hp, make_atom(module_code(i)->module),
- previous);
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ if ((modp=module_code(i,code_ix)) != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
+ previous = CONS(hp, make_atom(modp->module), previous);
hp += 2;
}
}
@@ -312,54 +597,65 @@ BIF_RETTYPE loaded_0(BIF_ALIST_0)
BIF_RETTYPE call_on_load_function_1(BIF_ALIST_1)
{
- Module* modp = erts_get_module(BIF_ARG_1);
- Eterm on_load;
+ Module* modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
- if (!modp || modp->code == 0) {
- error:
- BIF_ERROR(BIF_P, BADARG);
+ if (modp && modp->curr.code) {
+ BIF_TRAP_CODE_PTR_0(BIF_P, modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]);
}
- if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
- goto error;
+ else {
+ BIF_ERROR(BIF_P, BADARG);
}
- BIF_TRAP_CODE_PTR_0(BIF_P, on_load);
}
BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
{
- Module* modp = erts_get_module(BIF_ARG_1);
+ ErtsCodeIndex code_ix;
+ Module* modp;
Eterm on_load;
- if (!modp || modp->code == 0) {
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_finish_after_on_load_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
+ /* ToDo: Use code_ix staging instead of thread blocking */
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(BIF_ARG_1, code_ix);
+
+ if (!modp || modp->curr.code == 0) {
error:
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_release_code_write_permission();
BIF_ERROR(BIF_P, BADARG);
}
- if ((on_load = modp->code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
+ if ((on_load = modp->curr.code[MI_ON_LOAD_FUNCTION_PTR]) == 0) {
goto error;
}
if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
goto error;
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
-
if (BIF_ARG_2 == am_true) {
int i;
/*
* The on_load function succeded. Fix up export entries.
*/
- for (i = 0; i < export_list_size(); i++) {
- Export *ep = export_list(i);
+ for (i = 0; i < export_list_size(code_ix); i++) {
+ Export *ep = export_list(i,code_ix);
if (ep != NULL &&
ep->code[0] == BIF_ARG_1 &&
ep->code[4] != 0) {
- ep->address = (void *) ep->code[4];
+ ep->addressv[code_ix] = (void *) ep->code[4];
ep->code[4] = 0;
}
}
- modp->code[MI_ON_LOAD_FUNCTION_PTR] = 0;
+ modp->curr.code[MI_ON_LOAD_FUNCTION_PTR] = 0;
set_default_trace_pattern(BIF_ARG_1);
} else if (BIF_ARG_2 == am_false) {
BeamInstr* code;
@@ -370,19 +666,21 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
* This is an combination of delete and purge. We purge
* the current code; the old code is not touched.
*/
- erts_total_code_size -= modp->code_length;
- code = modp->code;
- end = (BeamInstr *)((char *)code + modp->code_length);
+ erts_total_code_size -= modp->curr.code_length;
+ code = modp->curr.code;
+ end = (BeamInstr *)((char *)code + modp->curr.code_length);
erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->catches, code, modp->code_length);
+ beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length,
+ erts_active_code_ix());
erts_free(ERTS_ALC_T_CODE, (void *) code);
- modp->code = NULL;
- modp->code_length = 0;
- modp->catches = BEAM_CATCHES_NIL;
- remove_from_address_table(code);
+ modp->curr.code = NULL;
+ modp->curr.code_length = 0;
+ modp->curr.catches = BEAM_CATCHES_NIL;
+ erts_remove_from_ranges(code);
}
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_release_code_write_permission();
BIF_RET(am_true);
}
@@ -403,11 +701,11 @@ set_default_trace_pattern(Eterm module)
if (trace_pattern_is_on) {
Eterm mfa[1];
mfa[0] = module;
- (void) erts_set_trace_pattern(mfa, 1,
+ (void) erts_set_trace_pattern(0, mfa, 1,
match_spec,
meta_match_spec,
1, trace_pattern_flags,
- meta_tracer_pid);
+ meta_tracer_pid, 1);
}
}
@@ -427,10 +725,10 @@ check_process_code(Process* rp, Module* modp)
/*
* Pick up limits for the module.
*/
- start = modp->old_code;
- end = (BeamInstr *)((char *)start + modp->old_code_length);
+ start = modp->old.code;
+ end = (BeamInstr *)((char *)start + modp->old.code_length);
mod_start = (char *) start;
- mod_size = modp->old_code_length;
+ mod_size = modp->old.code_length;
/*
* Check if current instruction or continuation pointer points into module.
@@ -562,10 +860,10 @@ check_process_code(Process* rp, Module* modp)
done_gc = 1;
FLAGS(rp) |= F_NEED_FULLSWEEP;
(void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
- literals = (Eterm *) modp->old_code[MI_LITERALS_START];
- lit_size = (Eterm *) modp->old_code[MI_LITERALS_END] - literals;
+ literals = (Eterm *) modp->old.code[MI_LITERALS_START];
+ lit_size = (Eterm *) modp->old.code[MI_LITERALS_END] - literals;
oh = (struct erl_off_heap_header *)
- modp->old_code[MI_LITERALS_OFF_HEAP];
+ modp->old.code[MI_LITERALS_OFF_HEAP];
erts_garbage_collect_literals(rp, literals, lit_size, oh);
}
}
@@ -624,53 +922,82 @@ any_heap_refs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size)
#undef in_area
-
-static int
-purge_module(int module)
+BIF_RETTYPE purge_module_1(BIF_ALIST_1)
{
+ ErtsCodeIndex code_ix;
BeamInstr* code;
BeamInstr* end;
Module* modp;
+ int is_blocking = 0;
+ Eterm ret;
- /*
- * Correct module?
- */
-
- if ((modp = erts_get_module(make_atom(module))) == NULL) {
- return -2;
+ if (is_not_atom(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
}
- /*
- * Any code to purge?
- */
- if (modp->old_code == 0) {
- return -1;
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD1(bif_export[BIF_purge_module_1], BIF_P, BIF_ARG_1);
}
+ code_ix = erts_active_code_ix();
+
/*
- * Unload any NIF library
+ * Correct module?
*/
- if (modp->old_nif != NULL) {
- erts_unload_nif(modp->old_nif);
- modp->old_nif = NULL;
+
+ if ((modp = erts_get_module(BIF_ARG_1, code_ix)) == NULL) {
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
}
+ else {
+ erts_rwlock_old_code(code_ix);
- /*
- * Remove the old code.
- */
- ASSERT(erts_total_code_size >= modp->old_code_length);
- erts_total_code_size -= modp->old_code_length;
- code = modp->old_code;
- end = (BeamInstr *)((char *)code + modp->old_code_length);
- erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->old_catches, code, modp->old_code_length);
- decrement_refc(code);
- erts_free(ERTS_ALC_T_CODE, (void *) code);
- modp->old_code = NULL;
- modp->old_code_length = 0;
- modp->old_catches = BEAM_CATCHES_NIL;
- remove_from_address_table(code);
- return 0;
+ /*
+ * Any code to purge?
+ */
+ if (modp->old.code == 0) {
+ ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
+ }
+ else {
+ /*
+ * Unload any NIF library
+ */
+ if (modp->old.nif != NULL) {
+ /* ToDo: Do unload nif without blocking */
+ erts_rwunlock_old_code(code_ix);
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+ is_blocking = 1;
+ erts_rwlock_old_code(code_ix);
+ erts_unload_nif(modp->old.nif);
+ modp->old.nif = NULL;
+ }
+
+ /*
+ * Remove the old code.
+ */
+ ASSERT(erts_total_code_size >= modp->old.code_length);
+ erts_total_code_size -= modp->old.code_length;
+ code = modp->old.code;
+ end = (BeamInstr *)((char *)code + modp->old.code_length);
+ erts_cleanup_funs_on_purge(code, end);
+ beam_catches_delmod(modp->old.catches, code, modp->old.code_length,
+ code_ix);
+ decrement_refc(code);
+ erts_free(ERTS_ALC_T_CODE, (void *) code);
+ modp->old.code = NULL;
+ modp->old.code_length = 0;
+ modp->old.catches = BEAM_CATCHES_NIL;
+ erts_remove_from_ranges(code);
+ ERTS_BIF_PREP_RET(ret, am_true);
+ }
+ erts_rwunlock_old_code(code_ix);
+ }
+ if (is_blocking) {
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ }
+ erts_release_code_write_permission();
+ return ret;
}
static void
@@ -690,92 +1017,48 @@ decrement_refc(BeamInstr* code)
}
}
-static void
-remove_from_address_table(BeamInstr* code)
-{
- int i;
-
- for (i = 0; i < num_loaded_modules; i++) {
- if (modules[i].start == code) {
- num_loaded_modules--;
- while (i < num_loaded_modules) {
- modules[i] = modules[i+1];
- i++;
- }
- mid_module = &modules[num_loaded_modules/2];
- return;
- }
- }
- ASSERT(0); /* Not found? */
-}
-
-
/*
- * Move code from current to old.
+ * Move code from current to old and null all export entries for the module
*/
-static void
-delete_code(Process *c_p, ErtsProcLocks c_p_locks, Module* modp)
-{
-#ifdef ERTS_ENABLE_LOCK_CHECK
-#ifdef ERTS_SMP
- if (c_p && c_p_locks)
- erts_proc_lc_chk_only_proc_main(c_p);
- else
-#endif
- erts_lc_check_exact(NULL, 0);
-#endif
-
- /*
- * Clear breakpoints if any
- */
- if (modp->code != NULL && modp->code[MI_NUM_BREAKPOINTS] > 0) {
- if (c_p && c_p_locks)
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
- erts_clear_module_break(modp);
- modp->code[MI_NUM_BREAKPOINTS] = 0;
- erts_smp_thr_progress_unblock();
- if (c_p && c_p_locks)
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
- }
- modp->old_code = modp->code;
- modp->old_code_length = modp->code_length;
- modp->old_catches = modp->catches;
- modp->old_nif = modp->nif;
- modp->code = NULL;
- modp->code_length = 0;
- modp->catches = BEAM_CATCHES_NIL;
- modp->nif = NULL;
-}
-
-
-/* null all references on the export table for the module called with the
- atom index below */
-
static void
-delete_export_references(Eterm module)
+delete_code(Module* modp)
{
+ ErtsCodeIndex code_ix = erts_staging_code_ix();
+ Eterm module = make_atom(modp->module);
int i;
- ASSERT(is_atom(module));
-
- for (i = 0; i < export_list_size(); i++) {
- Export *ep = export_list(i);
+ for (i = 0; i < export_list_size(code_ix); i++) {
+ Export *ep = export_list(i, code_ix);
if (ep != NULL && (ep->code[0] == module)) {
- if (ep->address == ep->code+3 &&
- (ep->code[3] == (BeamInstr) em_apply_bif)) {
- continue;
+ if (ep->addressv[code_ix] == ep->code+3) {
+ if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ continue;
+ }
+ else if (ep->code[3] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(modp->curr.num_traced_exports > 0);
+ erts_clear_export_break(modp, ep->code+3);
+ }
+ else ASSERT(ep->code[3] == (BeamInstr) em_call_error_handler
+ || !erts_initialized);
}
- ep->address = ep->code+3;
+ ep->addressv[code_ix] = ep->code+3;
ep->code[3] = (BeamInstr) em_call_error_handler;
ep->code[4] = 0;
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
}
}
+
+ ASSERT(modp->curr.num_breakpoints == 0);
+ ASSERT(modp->curr.num_traced_exports == 0);
+ modp->old = modp->curr;
+ modp->curr.code = NULL;
+ modp->curr.code_length = 0;
+ modp->curr.catches = BEAM_CATCHES_NIL;
+ modp->curr.nif = NULL;
}
-
+
Eterm
beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
@@ -787,11 +1070,10 @@ beam_make_current_old(Process *c_p, ErtsProcLocks c_p_locks, Eterm module)
* if not, delete old code; error if old code already exists.
*/
- if (modp->code != NULL && modp->old_code != NULL) {
+ if (modp->curr.code != NULL && modp->old.code != NULL) {
return am_not_purged;
- } else if (modp->old_code == NULL) { /* Make the current version old. */
- delete_code(c_p, c_p_locks, modp);
- delete_export_references(module);
+ } else if (modp->old.code == NULL) { /* Make the current version old. */
+ delete_code(modp);
}
return NIL;
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index d772bea02f..c1e11f6448 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -44,67 +44,34 @@
#define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ))
#define Free(P) erts_free(ERTS_ALC_T_BPD, (P))
-/*
-** Doubly linked ring macros
-*/
-
-#define BpInit(a,i) \
-do { \
- (a)->orig_instr = (i); \
- (a)->next = (a); \
- (a)->prev = (a); \
-} while (0)
-
-#define BpSpliceNext(a,b) \
-do { \
- register BpData *c = (a), *d = (b), *e; \
- e = c->next->prev; \
- c->next->prev = d->next->prev; \
- d->next->prev = e; \
- e = c->next; \
- c->next = d->next; \
- d->next = e; \
-} while (0)
-
-#define BpSplicePrev(a,b) \
-do { \
- register BpData *c = (a), *d = (b), *e; \
- e = c->prev->next; \
- c->prev->next = d->prev->next; \
- d->prev->next = e; \
- e = c->prev; \
- c->prev = d->prev; \
- d->prev = e; \
-} while (0)
-
-#ifdef DEBUG
-# define BpSingleton(a) ((a)->next == (a) && (a)->prev == (a))
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
#else
-# define BpSingleton(a) ((a)->next == (a))
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
#endif
-#define BpInitAndSpliceNext(a,i,b) \
-do { \
- (a)->orig_instr = (i); \
- (a)->prev = (b); \
- (b)->next->prev = (a); \
- (a)->next = (b)->next; \
- (b)->next = (a); \
-} while (0)
+#define ERTS_BPF_LOCAL_TRACE 0x01
+#define ERTS_BPF_META_TRACE 0x02
+#define ERTS_BPF_COUNT 0x04
+#define ERTS_BPF_COUNT_ACTIVE 0x08
+#define ERTS_BPF_DEBUG 0x10
+#define ERTS_BPF_TIME_TRACE 0x20
+#define ERTS_BPF_TIME_TRACE_ACTIVE 0x40
+#define ERTS_BPF_GLOBAL_TRACE 0x80
-#define BpInitAndSplicePrev(a,i,b) \
-do { \
- (a)->orig_instr = (i); \
- (a)->next = (b); \
- (b)->prev->next = (a); \
- (a)->prev = (b)->prev; \
- (b)->prev = (a); \
-} while (0)
+#define ERTS_BPF_ALL 0xFF
+extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern BeamInstr beam_exception_trace[1]; /* OpCode(i_exception_trace) */
+extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
-#define BREAK_IS_BIF (1)
-#define BREAK_IS_ERL (0)
-
+erts_smp_atomic32_t erts_active_bp_index;
+erts_smp_atomic32_t erts_staging_bp_index;
/* *************************************************************************
** Local prototypes
@@ -113,26 +80,30 @@ do { \
/*
** Helpers
*/
-
-static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-static int set_function_break(Module *modp, BeamInstr *pc, int bif,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid);
-
-static int clear_break(Eterm mfa[3], int specified,
- BeamInstr break_op);
-static int clear_module_break(Module *modp, Eterm mfa[3], int specified,
- BeamInstr break_op);
-static int clear_function_break(Module *modp, BeamInstr *pc, int bif,
- BeamInstr break_op);
-
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op);
-static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op);
+static Eterm do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, Eterm tracer_pid);
+static void set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid);
+static void set_function_break(BeamInstr *pc,
+ Binary *match_spec,
+ Uint break_flags,
+ enum erts_break_op count_op,
+ Eterm tracer_pid);
+
+static void clear_break(BpFunctions* f, Uint break_flags);
+static int clear_function_break(BeamInstr *pc, Uint break_flags);
+
+static BpDataTime* get_time_break(BeamInstr *pc);
+static GenericBpData* check_break(BeamInstr *pc, Uint break_flags);
+static void bp_time_diff(bp_data_time_item_t *item,
+ process_breakpoint_time_t *pbt,
+ Uint ms, Uint s, Uint us);
+
+static void bp_meta_unref(BpMetaPid* bmp);
+static void bp_count_unref(BpCount* bcp);
+static void bp_time_unref(BpDataTime* bdt);
+static void consolidate_bp_data(Module* modp, BeamInstr* pc, int local);
+static void uninstall_breakpoint(BeamInstr* pc);
/* bp_hash */
#define BP_TIME_ADD(pi0, pi1) \
@@ -152,240 +123,996 @@ static ERTS_INLINE bp_data_time_item_t * bp_hash_get(bp_time_hash_t *hash, bp_da
static ERTS_INLINE bp_data_time_item_t * bp_hash_put(bp_time_hash_t *hash, bp_data_time_item_t *sitem);
static void bp_hash_delete(bp_time_hash_t *hash);
-
/* *************************************************************************
** External interfaces
*/
-erts_smp_spinlock_t erts_bp_lock;
-
void
erts_bp_init(void) {
- erts_smp_spinlock_init(&erts_bp_lock, "breakpoints");
+ erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
+ erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
}
-int
-erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, match_spec,
- (BeamInstr) BeamOp(op_i_trace_breakpoint), 0, tracer_pid);
+
+void
+erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ Uint max_funcs = 0;
+ int current;
+ int max_modules = module_code_size(code_ix);
+ int num_modules = 0;
+ Module* modp;
+ Module** module;
+ Uint i;
+
+ module = (Module **) Alloc(max_modules*sizeof(Module *));
+ num_modules = 0;
+ for (current = 0; current < max_modules; current++) {
+ modp = module_code(current, code_ix);
+ if (modp->curr.code) {
+ max_funcs += modp->curr.code[MI_NUM_FUNCTIONS];
+ module[num_modules++] = modp;
+ }
+ }
+
+ f->matching = (BpFunction *) Alloc(max_funcs*sizeof(BpFunction));
+ i = 0;
+ for (current = 0; current < num_modules; current++) {
+ BeamInstr** code_base = (BeamInstr **) module[current]->curr.code;
+ BeamInstr* code;
+ Uint num_functions = (Uint)(UWord) code_base[MI_NUM_FUNCTIONS];
+ Uint fi;
+
+ if (specified > 0) {
+ if (mfa[0] != make_atom(module[current]->module)) {
+ /* Wrong module name */
+ continue;
+ }
+ }
+
+ for (fi = 0; fi < num_functions; fi++) {
+ BeamInstr* pc;
+ int wi;
+
+ code = code_base[MI_FUNCTIONS+fi];
+ ASSERT(code[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ pc = code+5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ if (is_nil(code[3])) { /* Ignore BIF stub */
+ continue;
+ }
+ for (wi = 0;
+ wi < specified && (Eterm) code[2+wi] == mfa[wi];
+ wi++) {
+ /* Empty loop body */
+ }
+ if (wi == specified) {
+ /* Store match */
+ f->matching[i].pc = pc;
+ f->matching[i].mod = module[current];
+ i++;
+ }
+ }
+ }
+ f->matched = i;
+ Free(module);
}
-int
-erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, match_spec,
- (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+void
+erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
+{
+ ErtsCodeIndex code_ix = erts_active_code_ix();
+ int i;
+ int num_exps = export_list_size(code_ix);
+ int ne;
+
+ f->matching = (BpFunction *) Alloc(num_exps*sizeof(BpFunction));
+ ne = 0;
+ for (i = 0; i < num_exps; i++) {
+ Export* ep = export_list(i, code_ix);
+ BeamInstr* pc;
+ int j;
+
+ for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
+ /* Empty loop body */
+ }
+ if (j < specified) {
+ continue;
+ }
+ pc = ep->code+3;
+ if (ep->addressv[code_ix] == pc) {
+ if ((*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) em_call_error_handler)) {
+ continue;
+ }
+ ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ } else if (erts_is_native_break(ep->addressv[code_ix])) {
+ continue;
+ }
+
+ f->matching[ne].pc = pc;
+ f->matching[ne].mod = erts_get_module(ep->code[0], code_ix);
+ ne++;
+
+ }
+ f->matched = ne;
}
-/* set breakpoint data for on exported bif entry */
+void
+erts_bp_free_matched_functions(BpFunctions* f)
+{
+ Free(f->matching);
+}
void
-erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- set_function_break(NULL, pc, BREAK_IS_BIF, match_spec, (BeamInstr) BeamOp(op_i_mtrace_breakpoint), 0, tracer_pid);
+erts_consolidate_bp_data(BpFunctions* f, int local)
+{
+ BpFunction* fs = f->matching;
+ Uint i;
+ Uint n = f->matched;
+
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+
+ for (i = 0; i < n; i++) {
+ consolidate_bp_data(fs[i].mod, fs[i].pc, local);
+ }
}
-void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op) {
- set_function_break(NULL, pc, BREAK_IS_BIF, NULL, (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+void
+erts_consolidate_bif_bp_data(void)
+{
+ int i;
+
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ for (i = 0; i < BIF_SIZE; i++) {
+ Export *ep = bif_export[i];
+ consolidate_bp_data(0, ep->code+3, 0);
+ }
}
-void erts_clear_time_trace_bif(BeamInstr *pc) {
- clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_time_breakpoint));
+static void
+consolidate_bp_data(Module* modp, BeamInstr* pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+ GenericBpData* src;
+ GenericBpData* dst;
+ Uint flags;
+
+ if (g == 0) {
+ return;
+ }
+
+ src = &g->data[erts_active_bp_ix()];
+ dst = &g->data[erts_staging_bp_ix()];
+
+ /*
+ * The contents of the staging area may be out of date.
+ * Decrement all reference pointers.
+ */
+
+ flags = dst->flags;
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(dst->local_ms);
+ }
+ if (flags & ERTS_BPF_META_TRACE) {
+ bp_meta_unref(dst->meta_pid);
+ MatchSetUnref(dst->meta_ms);
+ }
+ if (flags & ERTS_BPF_COUNT) {
+ bp_count_unref(dst->count);
+ }
+ if (flags & ERTS_BPF_TIME_TRACE) {
+ bp_time_unref(dst->time);
+ }
+
+ /*
+ * If all flags are zero, deallocate all breakpoint data.
+ */
+
+ flags = dst->flags = src->flags;
+ if (flags == 0) {
+ if (modp) {
+ if (local) {
+ modp->curr.num_breakpoints--;
+ } else {
+ modp->curr.num_traced_exports--;
+ }
+ ASSERT(modp->curr.num_breakpoints >= 0);
+ ASSERT(modp->curr.num_traced_exports >= 0);
+ ASSERT(*pc != (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ }
+ pc[-4] = 0;
+ Free(g);
+ return;
+ }
+
+ /*
+ * Copy the active data to the staging area (making it ready
+ * for the next time it will be used).
+ */
+
+ if (flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ dst->local_ms = src->local_ms;
+ MatchSetRef(dst->local_ms);
+ }
+ if (flags & ERTS_BPF_META_TRACE) {
+ dst->meta_pid = src->meta_pid;
+ erts_refc_inc(&dst->meta_pid->refc, 1);
+ dst->meta_ms = src->meta_ms;
+ MatchSetRef(dst->meta_ms);
+ }
+ if (flags & ERTS_BPF_COUNT) {
+ dst->count = src->count;
+ erts_refc_inc(&dst->count->refc, 1);
+ }
+ if (flags & ERTS_BPF_TIME_TRACE) {
+ dst->time = src->time;
+ erts_refc_inc(&dst->time->refc, 1);
+ ASSERT(dst->time->hash);
+ }
}
-int
-erts_set_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_debug_breakpoint), 0, NIL);
+void
+erts_commit_staged_bp(void)
+{
+ ErtsBpIndex staging = erts_staging_bp_ix();
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ erts_smp_atomic32_set_nob(&erts_active_bp_index, staging);
+ erts_smp_atomic32_set_nob(&erts_staging_bp_index, active);
}
-int
-erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_count_breakpoint), count_op, NIL);
+void
+erts_install_breakpoints(BpFunctions* f)
+{
+ Uint i;
+ Uint n = f->matched;
+ BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (*pc != br && g) {
+ Module* modp = f->matching[i].mod;
+
+ /*
+ * The breakpoint must be disabled in the active data
+ * (it will enabled later by switching bp indices),
+ * and enabled in the staging data.
+ */
+ ASSERT(g->data[erts_active_bp_ix()].flags == 0);
+ ASSERT(g->data[erts_staging_bp_ix()].flags != 0);
+
+ /*
+ * The following write is not protected by any lock. We
+ * assume that the hardware guarantees that a write of an
+ * aligned word-size (or half-word) writes is atomic
+ * (i.e. that other processes executing this code will not
+ * see a half pointer).
+ */
+ *pc = br;
+ modp->curr.num_breakpoints++;
+ }
+ }
}
-int
-erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op count_op) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return set_break(mfa, specified, NULL,
- (BeamInstr) BeamOp(op_i_time_breakpoint), count_op, NIL);
+void
+erts_uninstall_breakpoints(BpFunctions* f)
+{
+ Uint i;
+ Uint n = f->matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ uninstall_breakpoint(pc);
+ }
}
-int
-erts_clear_trace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_trace_breakpoint));
+static void
+uninstall_breakpoint(BeamInstr* pc)
+{
+ if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g->data[erts_active_bp_ix()].flags == 0) {
+ /*
+ * The following write is not protected by any lock. We
+ * assume that the hardware guarantees that a write of an
+ * aligned word-size (or half-word) writes is atomic
+ * (i.e. that other processes executing this code will not
+ * see a half pointer).
+ */
+ *pc = g->orig_instr;
+ }
+ }
}
-int
-erts_clear_mtrace_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+void
+erts_set_trace_break(BpFunctions* f, Binary *match_spec)
+{
+ set_break(f, match_spec, ERTS_BPF_LOCAL_TRACE, 0, am_true);
}
void
-erts_clear_mtrace_bif(BeamInstr *pc) {
- clear_function_break(NULL, pc, BREAK_IS_BIF, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+erts_set_mtrace_break(BpFunctions* f, Binary *match_spec, Eterm tracer_pid)
+{
+ set_break(f, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
-int
-erts_clear_debug_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_debug_breakpoint));
+void
+erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+
+ set_function_break(pc, match_spec, flags, 0, NIL);
}
-int
-erts_clear_count_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_count_breakpoint));
+void
+erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec, Eterm tracer_pid)
+{
+ set_function_break(pc, match_spec, ERTS_BPF_META_TRACE, 0, tracer_pid);
}
-int
-erts_clear_time_break(Eterm mfa[3], int specified) {
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified,
- (BeamInstr) BeamOp(op_i_time_breakpoint));
+void
+erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op count_op)
+{
+ set_function_break(pc, NULL,
+ ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
+ count_op, NIL);
}
-int
-erts_clear_break(Eterm mfa[3], int specified) {
+void
+erts_clear_time_trace_bif(BeamInstr *pc) {
+ clear_function_break(pc, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+}
+
+void
+erts_set_debug_break(BpFunctions* f) {
+ set_break(f, NULL, ERTS_BPF_DEBUG, 0, NIL);
+}
+
+void
+erts_set_count_break(BpFunctions* f, enum erts_break_op count_op)
+{
+ set_break(f, 0, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE,
+ count_op, NIL);
+}
+
+void
+erts_set_time_break(BpFunctions* f, enum erts_break_op count_op)
+{
+ set_break(f, 0, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE,
+ count_op, NIL);
+}
+
+void
+erts_clear_trace_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_LOCAL_TRACE);
+}
+
+void
+erts_clear_call_trace_bif(BeamInstr *pc, int local)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
+
+ if (g) {
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ if (g->data[erts_staging_bp_ix()].flags & flags) {
+ clear_function_break(pc, flags);
+ }
+ }
+}
+
+void
+erts_clear_mtrace_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_META_TRACE);
+}
+
+void
+erts_clear_mtrace_bif(BeamInstr *pc)
+{
+ clear_function_break(pc, ERTS_BPF_META_TRACE);
+}
+
+void
+erts_clear_debug_break(BpFunctions* f)
+{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- return clear_break(mfa, specified, 0);
+ clear_break(f, ERTS_BPF_DEBUG);
+}
+
+void
+erts_clear_count_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_COUNT|ERTS_BPF_COUNT_ACTIVE);
+}
+
+void
+erts_clear_time_break(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_TIME_TRACE|ERTS_BPF_TIME_TRACE_ACTIVE);
+}
+
+void
+erts_clear_all_breaks(BpFunctions* f)
+{
+ clear_break(f, ERTS_BPF_ALL);
}
int
erts_clear_module_break(Module *modp) {
+ BeamInstr** code_base;
+ Uint n;
+ Uint i;
+
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
ASSERT(modp);
- return clear_module_break(modp, NULL, 0, 0);
+ code_base = (BeamInstr **) modp->curr.code;
+ if (code_base == NULL) {
+ return 0;
+ }
+ n = (Uint)(UWord) code_base[MI_NUM_FUNCTIONS];
+ for (i = 0; i < n; ++i) {
+ BeamInstr* pc;
+
+ pc = code_base[MI_FUNCTIONS+i] + 5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ clear_function_break(pc, ERTS_BPF_ALL);
+ }
+
+ erts_commit_staged_bp();
+
+ for (i = 0; i < n; ++i) {
+ BeamInstr* pc;
+
+ pc = code_base[MI_FUNCTIONS+i] + 5;
+ if (erts_is_native_break(pc)) {
+ continue;
+ }
+ uninstall_breakpoint(pc);
+ consolidate_bp_data(modp, pc, 1);
+ ASSERT(pc[-4] == 0);
+ }
+ return n;
}
-int
-erts_clear_function_break(Module *modp, BeamInstr *pc) {
+void
+erts_clear_export_break(Module* modp, BeamInstr* pc)
+{
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
- ASSERT(modp);
- return clear_function_break(modp, pc, BREAK_IS_ERL, 0);
+
+ clear_function_break(pc, ERTS_BPF_ALL);
+ erts_commit_staged_bp();
+ *pc = (BeamInstr) 0;
+ consolidate_bp_data(modp, pc, 0);
+ ASSERT(pc[-4] == 0);
}
+BeamInstr
+erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
+{
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint bp_flags;
+ ErtsBpIndex ix = erts_active_bp_ix();
+
+ g = (GenericBp *) I[-4];
+ bp = &g->data[ix];
+ bp_flags = bp->flags;
+ ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE) &&
+ !IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
+ bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
+ ERTS_BPF_GLOBAL_TRACE|
+ ERTS_BPF_TIME_TRACE|
+ ERTS_BPF_TIME_TRACE_ACTIVE);
+ if (bp_flags == 0) { /* Quick exit */
+ return g->orig_instr;
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
+ ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
+ (void) do_call_trace(c_p, I, reg, 1, bp->local_ms, am_true);
+ } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
+ (void) do_call_trace(c_p, I, reg, 0, bp->local_ms, am_true);
+ }
+
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm old_pid;
+ Eterm new_pid;
+
+ old_pid = (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ new_pid = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_pid);
+ if (new_pid != old_pid) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, new_pid);
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
+ erts_smp_atomic_inc_nob(&bp->count->acount);
+ }
+
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
+ Eterm w;
+ erts_trace_time_call(c_p, I, bp->time);
+ w = (BeamInstr) *c_p->cp;
+ if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
+ w == (BeamInstr) BeamOp(op_return_trace) ||
+ w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) {
+ Eterm* E = c_p->stop;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - 2 < c_p->htop) {
+ (void) erts_garbage_collect(c_p, 2, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ }
+ E = c_p->stop;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+
+ E -= 2;
+ E[0] = make_cp(I);
+ E[1] = make_cp(c_p->cp); /* original return address */
+ c_p->cp = beam_return_time_trace;
+ c_p->stop = E;
+ }
+ }
+
+ if (bp_flags & ERTS_BPF_DEBUG) {
+ return (BeamInstr) BeamOp(op_i_debug_breakpoint);
+ } else {
+ return g->orig_instr;
+ }
+}
/*
- * SMP NOTE: Process p may have become exiting on return!
+ * Entry point called by the trace wrap functions in erl_bif_wrap.c
+ *
+ * The trace wrap functions are themselves called through the export
+ * entries instead of the original BIF functions.
*/
-BeamInstr
-erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
- Uint32 *ret_flags, Eterm *tracer_pid) {
- Eterm tpid1, tpid2;
- BpData **bds = (BpData **) (pc)[-4];
- BpDataTrace *bdt = NULL;
+Eterm
+erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
+{
+ Eterm result;
+ Eterm (*func)(Process*, Eterm*, BeamInstr*);
+ Export* ep = bif_export[bif_index];
+ Uint32 flags = 0, flags_meta = 0;
+ Eterm meta_tracer_pid = NIL;
+ int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
+ * is actually in the
+ * export entry */
+ BeamInstr *cp = p->cp;
+ GenericBp* g;
+ GenericBpData* bp = NULL;
+ Uint bp_flags = 0;
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ bp = &g->data[erts_active_bp_ix()];
+ bp_flags = bp->flags;
+ }
- ASSERT(bds);
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- bdt = (BpDataTrace *) bds[bp_sched2ix_proc(p)];
- ASSERT(bdt);
- bdt = (BpDataTrace *) bdt->next;
- ASSERT(bdt);
- ASSERT(ret_flags);
- ASSERT(tracer_pid);
-
- ErtsSmpBPLock(bdt);
- tpid1 = tpid2 = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
-
- *ret_flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
- 1, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- ErtsSmpBPLock(bdt);
- bdt->tracer_pid = tpid2;
- ErtsSmpBPUnlock(bdt);
- }
- bds[bp_sched2ix_proc(p)] = (BpData *) bdt;
- return bdt->orig_instr;
+ /*
+ * Make continuation pointer OK, it is not during direct BIF calls,
+ * but it is correct during apply of bif.
+ */
+ if (!applying) {
+ p->cp = I;
+ }
+ if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
+ flags = erts_call_trace(p, ep->code, bp->local_ms, args,
+ local, &ERTS_TRACER_PROC(p));
+ }
+ if (bp_flags & ERTS_BPF_META_TRACE) {
+ Eterm tpid1, tpid2;
+
+ tpid1 = tpid2 =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
+ flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
+ 0, &tpid2);
+ meta_tracer_pid = tpid2;
+ if (tpid1 != tpid2) {
+ erts_smp_atomic_set_nob(&bp->meta_pid->pid, tpid2);
+ }
+ }
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
+ BeamInstr *pc = (BeamInstr *)ep->code+3;
+ erts_trace_time_call(p, pc, bp->time);
+ }
+
+ /* Restore original continuation pointer (if changed). */
+ p->cp = cp;
+
+ func = bif_table[bif_index].f;
+
+ result = func(p, args, I);
+
+ if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
+ BeamInstr i_return_trace = beam_return_trace[0];
+ BeamInstr i_return_to_trace = beam_return_to_trace[0];
+ BeamInstr i_return_time_trace = beam_return_time_trace[0];
+ Eterm *cpp;
+ /* Maybe advance cp to skip trace stack frames */
+ for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
+ if (*cp == i_return_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 2; /* Skip return_trace parameters */
+ } else if (*cp == i_return_time_trace) {
+ /* Skip stack frame variables */
+ while (is_not_CP(*cpp)) cpp++;
+ cpp += 1; /* Skip return_time_trace parameters */
+ } else if (*cp == i_return_to_trace) {
+ /* A return_to trace message is going to be generated
+ * by normal means, so we do not have to.
+ */
+ cp = NULL;
+ break;
+ } else break;
+ }
+ }
+
+ /* Try to get these in the order
+ * they usually appear in normal code... */
+ if (is_non_value(result)) {
+ Uint reason = p->freason;
+ if (reason != TRAP) {
+ Eterm class;
+ Eterm value = p->fvalue;
+ DeclareTmpHeapNoproc(nocatch,3);
+ UseTmpHeapNoproc(3);
+ /* Expand error value like in handle_error() */
+ if (reason & EXF_ARGLIST) {
+ Eterm *tp;
+ ASSERT(is_tuple(value));
+ tp = tuple_val(value);
+ value = tp[1];
+ }
+ if ((reason & EXF_THROWN) && (p->catches <= 0)) {
+ value = TUPLE2(nocatch, am_nocatch, value);
+ reason = EXC_ERROR;
+ }
+ /* Note: expand_error_value() could theoretically
+ * allocate on the heap, but not for any error
+ * returned by a BIF, and it would do no harm,
+ * just be annoying.
+ */
+ value = expand_error_value(p, reason, value);
+ class = exception_tag[GET_EXC_CLASS(reason)];
+
+ if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &meta_tracer_pid);
+ }
+ if (flags & MATCH_SET_EXCEPTION_TRACE) {
+ erts_trace_exception(p, ep->code, class, value,
+ &ERTS_TRACER_PROC(p));
+ }
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
+ /* can only happen if(local)*/
+ Eterm *ptr = p->stop;
+ ASSERT(is_CP(*ptr));
+ ASSERT(ptr <= STACK_START(p));
+ /* Search the nearest stack frame for a catch */
+ while (++ptr < STACK_START(p)) {
+ if (is_CP(*ptr)) break;
+ if (is_catch(*ptr)) {
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into
+ * calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ }
+ UnUseTmpHeapNoproc(3);
+ if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ }
+ } else {
+ if (flags_meta & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &meta_tracer_pid);
+ }
+ /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
+ if (flags & MATCH_SET_RX_TRACE) {
+ erts_trace_return(p, ep->code, result, &ERTS_TRACER_PROC(p));
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE) {
+ /* can only happen if(local)*/
+ if (applying) {
+ /* Apply of BIF, cp is in calling function */
+ if (cp) erts_trace_return_to(p, cp);
+ } else {
+ /* Direct bif call, I points into calling function */
+ erts_trace_return_to(p, I);
+ }
+ }
+ }
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ return result;
}
+static Eterm
+do_call_trace(Process* c_p, BeamInstr* I, Eterm* reg,
+ int local, Binary* ms, Eterm tracer_pid)
+{
+ Eterm* cpp;
+ int return_to_trace = 0;
+ BeamInstr w;
+ BeamInstr *cp_save;
+ Uint32 flags;
+ Uint need = 0;
+ Eterm* E = c_p->stop;
+
+ w = *c_p->cp;
+ if (w == (BeamInstr) BeamOp(op_return_trace)) {
+ cpp = &E[2];
+ } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
+ return_to_trace = 1;
+ cpp = &E[0];
+ } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
+ cpp = &E[0];
+ } else {
+ cpp = NULL;
+ }
+ if (cpp) {
+ for (;;) {
+ BeamInstr w = *cp_val(*cpp);
+ if (w == (BeamInstr) BeamOp(op_return_trace)) {
+ cpp += 3;
+ } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) {
+ return_to_trace = 1;
+ cpp += 1;
+ } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) {
+ cpp += 2;
+ } else {
+ break;
+ }
+ }
+ cp_save = c_p->cp;
+ c_p->cp = (BeamInstr *) cp_val(*cpp);
+ ASSERT(is_CP(*cpp));
+ }
+ ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
+ flags = erts_call_trace(c_p, I-3, ms, reg, local, &tracer_pid);
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ if (cpp) {
+ c_p->cp = cp_save;
+ }
+
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
+ need += 1;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ need += 3;
+ }
+ if (need) {
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ if (E - need < c_p->htop) {
+ (void) erts_garbage_collect(c_p, need, reg, I[-1]);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ E = c_p->stop;
+ }
+ }
+ if (flags & MATCH_SET_RETURN_TO_TRACE && !return_to_trace) {
+ E -= 1;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ E[0] = make_cp(c_p->cp);
+ c_p->cp = beam_return_to_trace;
+ }
+ if (flags & MATCH_SET_RX_TRACE) {
+ E -= 3;
+ ASSERT(c_p->htop <= E && E <= c_p->hend);
+ ASSERT(is_CP((Eterm) (UWord) (I - 3)));
+ ASSERT(am_true == tracer_pid ||
+ is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
+ E[2] = make_cp(c_p->cp);
+ E[1] = tracer_pid;
+ E[0] = make_cp(I - 3); /* We ARE at the beginning of an
+ instruction,
+ the funcinfo is above i. */
+ c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ?
+ beam_exception_trace : beam_return_trace;
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE;
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ }
+ c_p->stop = E;
+ return tracer_pid;
+}
+void
+erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
+{
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
-/*
- * SMP NOTE: Process p may have become exiting on return!
- */
-Uint32
-erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args, int local,
- Eterm *tracer_pid) {
- BpData **bds = (BpData **) (pc)[-4];
- BpDataTrace *bdt = NULL;
+ ASSERT(c_p);
+ ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING);
+ /* get previous timestamp and breakpoint
+ * from the process psd */
- ASSERT(tracer_pid);
- if (bds) {
- Eterm tpid1, tpid2;
- Uint32 flags;
- bdt = (BpDataTrace *)bds[bp_sched2ix_proc(p)];
+ pbt = ERTS_PROC_GET_CALL_TIME(c_p);
+ get_sys_now(&ms, &s, &us);
- ErtsSmpBPLock(bdt);
- tpid1 = tpid2 = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ /* get pbt
+ * timestamp = t0
+ * lookup bdt from code
+ * set ts0 to pbt
+ * add call count here?
+ */
+ if (pbt == 0) {
+ /* First call of process to instrumented function */
+ pbt = Alloc(sizeof(process_breakpoint_time_t));
+ (void *) ERTS_PROC_SET_CALL_TIME(c_p, ERTS_PROC_LOCK_MAIN, pbt);
+ } else {
+ ASSERT(pbt->pc);
+ /* add time to previous code */
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = c_p->common.id;
+ sitem.count = 0;
- flags = erts_call_trace(p, pc-3/*mfa*/, bdt->match_spec, args,
- local, &tpid2);
- *tracer_pid = tpid2;
- if (tpid1 != tpid2) {
- ErtsSmpBPLock(bdt);
- bdt->tracer_pid = tpid2;
- ErtsSmpBPUnlock(bdt);
+ /* previous breakpoint */
+ pbdt = get_time_break(pbt->pc);
+
+ /* if null then the breakpoint was removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
}
- return flags;
}
- *tracer_pid = NIL;
- return 0;
+
+ /* Add count to this code */
+ sitem.pid = c_p->common.id;
+ sitem.count = 1;
+ sitem.s_time = 0;
+ sitem.us_time = 0;
+
+ /* this breakpoint */
+ ASSERT(bdt);
+ h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+
+ pbt->pc = I;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
}
+void
+erts_trace_time_return(Process *p, BeamInstr *pc)
+{
+ Uint ms,s,us;
+ process_breakpoint_time_t *pbt = NULL;
+ bp_data_time_item_t sitem, *item = NULL;
+ bp_time_hash_t *h = NULL;
+ BpDataTime *pbdt = NULL;
+
+ ASSERT(p);
+ ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING);
+ /* get previous timestamp and breakpoint
+ * from the process psd */
+
+ pbt = ERTS_PROC_GET_CALL_TIME(p);
+ get_sys_now(&ms,&s,&us);
+
+ /* get pbt
+ * lookup bdt from code
+ * timestamp = t1
+ * get ts0 from pbt
+ * get item from bdt->hash[bp_hash(p->id)]
+ * ack diff (t1, t0) to item
+ */
+
+ if (pbt) {
+ /* might have been removed due to
+ * trace_pattern(false)
+ */
+ ASSERT(pbt->pc);
+
+ bp_time_diff(&sitem, pbt, ms, s, us);
+ sitem.pid = p->common.id;
+ sitem.count = 0;
+
+ /* previous breakpoint */
+ pbdt = get_time_break(pbt->pc);
+
+ /* beware, the trace_pattern might have been removed */
+ if (pbdt) {
+ h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ ASSERT(h);
+ ASSERT(h->item);
+
+ item = bp_hash_get(h, &sitem);
+ if (!item) {
+ item = bp_hash_put(h, &sitem);
+ } else {
+ BP_TIME_ADD(item, &sitem);
+ }
+ }
+
+ pbt->pc = pc;
+ pbt->ms = ms;
+ pbt->s = s;
+ pbt->us = us;
+ }
+}
int
-erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_trace_breakpoint));
-
- if (bdt) {
+erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local)
+{
+ Uint flags = local ? ERTS_BPF_LOCAL_TRACE : ERTS_BPF_GLOBAL_TRACE;
+ GenericBpData* bp = check_break(pc, flags);
+
+ if (bp) {
if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
+ *match_spec_ret = bp->local_ms;
}
- if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
- }
- return !0;
+ return 1;
}
return 0;
}
int
-erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret, Eterm *tracer_pid_ret) {
- BpDataTrace *bdt =
- (BpDataTrace *) is_break(pc, (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
+erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
+ Eterm *tracer_pid_ret)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_META_TRACE);
- if (bdt) {
+ if (bp) {
if (match_spec_ret) {
- *match_spec_ret = bdt->match_spec;
+ *match_spec_ret = bp->meta_ms;
}
if (tracer_pid_ret) {
- ErtsSmpBPLock(bdt);
- *tracer_pid_ret = bdt->tracer_pid;
- ErtsSmpBPUnlock(bdt);
+ *tracer_pid_ret =
+ (Eterm) erts_smp_atomic_read_nob(&bp->meta_pid->pid);
}
- return !0;
+ return 1;
}
return 0;
}
@@ -402,15 +1129,15 @@ erts_is_native_break(BeamInstr *pc) {
}
int
-erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
- BpDataCount *bdc =
- (BpDataCount *) is_break(pc, (BeamInstr) BeamOp(op_i_count_breakpoint));
+erts_is_count_break(BeamInstr *pc, Uint *count_ret)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_COUNT);
- if (bdc) {
+ if (bp) {
if (count_ret) {
- *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount);
+ *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount);
}
- return !0;
+ return 1;
}
return 0;
}
@@ -421,7 +1148,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
Uint size;
Eterm *hp, t;
bp_data_time_item_t *item = NULL;
- BpDataTime *bdt = (BpDataTime *) is_break(pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ BpDataTime *bdt = get_time_break(pc);
if (bdt) {
if (retval) {
@@ -464,7 +1191,7 @@ int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *retval) {
}
bp_hash_delete(&hash);
}
- return !0;
+ return 1;
}
return 0;
@@ -478,15 +1205,16 @@ erts_find_local_func(Eterm mfa[3]) {
BeamInstr* code_ptr;
Uint i,n;
- if ((modp = erts_get_module(mfa[0])) == NULL)
+ if ((modp = erts_get_module(mfa[0], erts_active_code_ix())) == NULL)
return NULL;
- if ((code_base = (BeamInstr **) modp->code) == NULL)
+ if ((code_base = (BeamInstr **) modp->curr.code) == NULL)
return NULL;
n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
for (i = 0; i < n; ++i) {
code_ptr = code_base[MI_FUNCTIONS+i];
ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == code_ptr[0]);
- ASSERT(mfa[0] == ((Eterm) code_ptr[2]));
+ ASSERT(mfa[0] == ((Eterm) code_ptr[2]) ||
+ is_nil((Eterm) code_ptr[2]));
if (mfa[1] == ((Eterm) code_ptr[3]) &&
((BeamInstr) mfa[2]) == code_ptr[4]) {
return code_ptr + 5;
@@ -654,11 +1382,11 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
* the previous breakpoint.
*/
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+ pbdt = get_time_break(pbt->pc);
if (pbdt) {
get_sys_now(&ms,&s,&us);
bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
+ sitem.pid = p->common.id;
sitem.count = 0;
h = &(pbdt->hash[bp_sched2ix_proc(p)]);
@@ -692,669 +1420,259 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
} /* pbt */
}
-/* call_time breakpoint
- * Accumulated times are added to the previous bp,
- * not the current one. The current one is saved
- * for future reference.
- * The previous breakpoint is stored in the process it self, the psd.
- * We do not need to store in a stack frame.
- * There is no need for locking, each thread has its own
- * area in each bp to save data.
- * Since we need to diffrentiate between processes for each bp,
- * every bp has a hash (per thread) to process-bp statistics.
- * - egil
- */
-
-void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type) {
- Uint ms,s,us;
- process_breakpoint_time_t *pbt = NULL;
- bp_data_time_item_t sitem, *item = NULL;
- bp_time_hash_t *h = NULL;
- BpDataTime *pbdt = NULL;
-
- ASSERT(p);
- ASSERT(p->status == P_RUNNING);
-
- /* get previous timestamp and breakpoint
- * from the process psd */
-
- pbt = ERTS_PROC_GET_CALL_TIME(p);
- get_sys_now(&ms,&s,&us);
-
- switch(type) {
- /* get pbt
- * timestamp = t0
- * lookup bdt from code
- * set ts0 to pbt
- * add call count here?
- */
- case ERTS_BP_CALL_TIME_CALL:
- case ERTS_BP_CALL_TIME_TAIL_CALL:
-
- if (pbt) {
- ASSERT(pbt->pc);
- /* add time to previous code */
- bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
- sitem.count = 0;
-
- /* previous breakpoint */
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
-
- /* if null then the breakpoint was removed */
- if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
- }
-
- } else {
- /* first call of process to instrumented function */
- pbt = Alloc(sizeof(process_breakpoint_time_t));
- (void *) ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCK_MAIN, pbt);
- }
- /* add count to this code */
- sitem.pid = p->id;
- sitem.count = 1;
- sitem.s_time = 0;
- sitem.us_time = 0;
-
- /* this breakpoint */
- ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
-
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
- break;
-
- case ERTS_BP_CALL_TIME_RETURN:
- /* get pbt
- * lookup bdt from code
- * timestamp = t1
- * get ts0 from pbt
- * get item from bdt->hash[bp_hash(p->id)]
- * ack diff (t1, t0) to item
- */
-
- if(pbt) {
- /* might have been removed due to
- * trace_pattern(false)
- */
- ASSERT(pbt->pc);
-
- bp_time_diff(&sitem, pbt, ms, s, us);
- sitem.pid = p->id;
- sitem.count = 0;
-
- /* previous breakpoint */
- pbdt = (BpDataTime *) get_break(p, pbt->pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
-
- /* beware, the trace_pattern might have been removed */
- if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
-
- ASSERT(h);
- ASSERT(h->item);
-
- item = bp_hash_get(h, &sitem);
- if (!item) {
- item = bp_hash_put(h, &sitem);
- } else {
- BP_TIME_ADD(item, &sitem);
- }
- }
-
- pbt->pc = pc;
- pbt->ms = ms;
- pbt->s = s;
- pbt->us = us;
- }
- break;
- default :
- ASSERT(0);
- /* will never happen */
- break;
- }
-}
-
-
/* *************************************************************************
** Local helpers
*/
-static int set_break(Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid)
+static void
+set_break(BpFunctions* f, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid)
{
- Module *modp;
- int num_processed = 0;
- if (!specified) {
- /* Find and process all modules in the system... */
- int current;
- int last = module_code_size();
- for (current = 0; current < last; current++) {
- modp = module_code(current);
- ASSERT(modp != NULL);
- num_processed +=
- set_module_break(modp, mfa, specified,
- match_spec, break_op, count_op,
- tracer_pid);
- }
- } else {
- /* Process a single module */
- if ((modp = erts_get_module(mfa[0])) != NULL) {
- num_processed +=
- set_module_break(modp, mfa, specified,
- match_spec, break_op, count_op,
- tracer_pid);
- }
- }
- return num_processed;
-}
-
-static int set_module_break(Module *modp, Eterm mfa[3], int specified,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid) {
- BeamInstr** code_base;
- BeamInstr* code_ptr;
- int num_processed = 0;
- Uint i,n;
+ Uint i;
+ Uint n;
- ASSERT(break_op);
- ASSERT(modp);
- code_base = (BeamInstr **) modp->code;
- if (code_base == NULL) {
- return 0;
- }
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
- for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- ASSERT(code_ptr[0] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
- (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr+5;
-
- num_processed +=
- set_function_break(modp, pc, BREAK_IS_ERL, match_spec,
- break_op, count_op, tracer_pid);
- }
+ n = f->matched;
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ set_function_break(pc, match_spec, break_flags,
+ count_op, tracer_pid);
}
- return num_processed;
}
-static int set_function_break(Module *modp, BeamInstr *pc, int bif,
- Binary *match_spec, BeamInstr break_op,
- enum erts_break_op count_op, Eterm tracer_pid) {
-
- BeamInstr **code_base = NULL;
- BpData *bd, **r, ***rs;
- size_t size;
- Uint ix = 0;
-
- if (bif == BREAK_IS_ERL) {
- code_base = (BeamInstr **)modp->code;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (modp->code_length/sizeof(BeamInstr *)));
- } else {
- ASSERT(*pc == (BeamInstr) em_apply_bif);
- ASSERT(modp == NULL);
+static void
+set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
+ enum erts_break_op count_op, Eterm tracer_pid)
+{
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint common;
+ ErtsBpIndex ix = erts_staging_bp_ix();
+
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ g = (GenericBp *) pc[-4];
+ if (g == 0) {
+ int i;
+ if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ /* Do not insert a new breakpoint */
+ return;
+ }
+ g = Alloc(sizeof(GenericBp));
+ g->orig_instr = *pc;
+ for (i = 0; i < ERTS_NUM_BP_IX; i++) {
+ g->data[i].flags = 0;
+ }
+ pc[-4] = (BeamInstr) g;
}
+ bp = &g->data[ix];
/*
- * Currently no trace support for native code.
+ * If we are changing an existing breakpoint, clean up old data.
*/
- if (erts_is_native_break(pc)) {
- return 0;
- }
- /* Do not allow two breakpoints of the same kind */
- if ( (bd = is_break(pc, break_op))) {
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint)
- || break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
- Binary *old_match_spec;
-
- /* Update match spec and tracer */
- MatchSetRef(match_spec);
- ErtsSmpBPLock(bdt);
- old_match_spec = bdt->match_spec;
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- ErtsSmpBPUnlock(bdt);
- MatchSetUnref(old_match_spec);
- } else {
- BpDataCount *bdc = (BpDataCount *) bd;
- erts_aint_t count = 0;
- erts_aint_t res = 0;
-
- ASSERT(! match_spec);
- ASSERT(is_nil(tracer_pid));
-
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_stop) {
- count = erts_smp_atomic_read_nob(&bdc->acount);
- if (count >= 0) {
- while(1) {
- res = erts_smp_atomic_cmpxchg_nob(&bdc->acount, -count - 1, count);
- if ((res == count) || count < 0) break;
- count = res;
- }
- }
- } else {
- /* Reset call counter */
- erts_smp_atomic_set_nob(&bdc->acount, 0);
- }
-
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
-
- ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-
- if (count_op == erts_break_stop) {
- bdt->pause = 1;
- } else {
- bdt->pause = 0;
- for (i = 0; i < bdt->n; i++) {
- bp_hash_delete(&(bdt->hash[i]));
- bp_hash_init(&(bdt->hash[i]), 32);
- }
- }
- } else {
- ASSERT (! count_op);
- }
- }
- return 1;
- }
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
- size = sizeof(BpDataTrace);
- } else {
- ASSERT(! match_spec);
- ASSERT(is_nil(tracer_pid));
- if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
- /* Do not insert a new breakpoint */
- return 1;
- }
- size = sizeof(BpDataCount);
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
- /* Do not insert a new breakpoint */
- return 1;
- }
- size = sizeof(BpDataTime);
+ common = break_flags & bp->flags;
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(bp->local_ms);
+ } else if (common & ERTS_BPF_META_TRACE) {
+ MatchSetUnref(bp->meta_ms);
+ bp_meta_unref(bp->meta_pid);
+ } else if (common & ERTS_BPF_COUNT) {
+ if (count_op == erts_break_stop) {
+ bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
} else {
- ASSERT(! count_op);
- ASSERT(break_op == (BeamInstr) BeamOp(op_i_debug_breakpoint));
- size = sizeof(BpDataDebug);
+ bp->flags |= ERTS_BPF_COUNT_ACTIVE;
+ erts_smp_atomic_set_nob(&bp->count->acount, 0);
}
- }
- rs = (BpData ***) (pc-4);
- if (! *rs) {
- size_t ssize = sizeof(BeamInstr) * erts_no_schedulers;
- *rs = (BpData **) Alloc(ssize);
- sys_memzero(*rs, ssize);
- }
-
- r = &((*rs)[0]);
-
- if (! *r) {
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_trace_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_mtrace_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_debug_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_count_breakpoint));
- ASSERT(*pc != (BeamInstr) BeamOp(op_i_time_breakpoint));
- /* First breakpoint; create singleton ring */
- bd = Alloc(size);
- BpInit(bd, *pc);
- *r = bd;
- if (bif == BREAK_IS_ERL) {
- *pc = break_op;
- }
- } else {
- ASSERT(*pc == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_mtrace_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_debug_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_time_breakpoint) ||
- *pc == (BeamInstr) BeamOp(op_i_count_breakpoint) ||
- *pc == (BeamInstr) em_apply_bif);
- if (*pc == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
- /* Debug bp must be last, so if it is also first;
- * it must be singleton. */
- ASSERT(BpSingleton(*r));
- /* Insert new bp first in the ring, i.e second to last. */
- bd = Alloc(size);
- BpInitAndSpliceNext(bd, *pc, *r);
- if (bif == BREAK_IS_ERL) {
- *pc = break_op;
- }
- } else if ((*r)->prev->orig_instr
- == (BeamInstr) BeamOp(op_i_debug_breakpoint)) {
- /* Debug bp last in the ring; insert new second to last. */
- bd = Alloc(size);
- BpInitAndSplicePrev(bd, (*r)->prev->orig_instr, *r);
- (*r)->prev->orig_instr = break_op;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return;
+ } else if (common & ERTS_BPF_TIME_TRACE) {
+ BpDataTime* bdt = bp->time;
+ Uint i = 0;
+
+ if (count_op == erts_break_stop) {
+ bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE;
} else {
- /* Just insert last in the ring */
- bd = Alloc(size);
- BpInitAndSpliceNext(bd, (*r)->orig_instr, *r);
- (*r)->orig_instr = break_op;
- *r = bd;
+ bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE;
+ for (i = 0; i < bdt->n; i++) {
+ bp_hash_delete(&(bdt->hash[i]));
+ bp_hash_init(&(bdt->hash[i]), 32);
+ }
}
- }
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- (*rs)[ix] = (*rs)[0];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return;
}
- bd->this_instr = break_op;
- /* Init the bp type specific data */
- if (break_op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- break_op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
-
- MatchSetRef(match_spec);
- bdt->match_spec = match_spec;
- bdt->tracer_pid = tracer_pid;
- } else if (break_op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
-
- bdt->pause = 0;
- bdt->n = erts_no_schedulers;
- bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
+ /*
+ * Initialize the new breakpoint data.
+ */
+ if (break_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetRef(match_spec);
+ bp->local_ms = match_spec;
+ } else if (break_flags & ERTS_BPF_META_TRACE) {
+ BpMetaPid* bmp;
+ MatchSetRef(match_spec);
+ bp->meta_ms = match_spec;
+ bmp = Alloc(sizeof(BpMetaPid));
+ erts_refc_init(&bmp->refc, 1);
+ erts_smp_atomic_init_nob(&bmp->pid, tracer_pid);
+ bp->meta_pid = bmp;
+ } else if (break_flags & ERTS_BPF_COUNT) {
+ BpCount* bcp;
+
+ ASSERT((bp->flags & ERTS_BPF_COUNT) == 0);
+ bcp = Alloc(sizeof(BpCount));
+ erts_refc_init(&bcp->refc, 1);
+ erts_smp_atomic_init_nob(&bcp->acount, 0);
+ bp->count = bcp;
+ } else if (break_flags & ERTS_BPF_TIME_TRACE) {
+ BpDataTime* bdt;
+ int i;
+
+ ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
+ bdt = Alloc(sizeof(BpDataTime));
+ erts_refc_init(&bdt->refc, 1);
+ bdt->n = erts_no_schedulers;
+ bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
}
- } else if (break_op == (BeamInstr) BeamOp(op_i_count_breakpoint)) {
- BpDataCount *bdc = (BpDataCount *) bd;
- erts_smp_atomic_init_nob(&bdc->acount, 0);
+ bp->time = bdt;
}
- if (bif == BREAK_IS_ERL) {
- ++(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
+ bp->flags |= break_flags;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+}
+
+static void
+clear_break(BpFunctions* f, Uint break_flags)
+{
+ Uint i;
+ Uint n;
+
+ n = f->matched;
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = f->matching[i].pc;
+ clear_function_break(pc, break_flags);
}
- return 1;
}
-static int clear_break(Eterm mfa[3], int specified, BeamInstr break_op)
+static int
+clear_function_break(BeamInstr *pc, Uint break_flags)
{
- int num_processed = 0;
- Module *modp;
+ GenericBp* g;
+ GenericBpData* bp;
+ Uint common;
+ ErtsBpIndex ix = erts_staging_bp_ix();
- if (!specified) {
- /* Iterate over all modules */
- int current;
- int last = module_code_size();
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
- for (current = 0; current < last; current++) {
- modp = module_code(current);
- ASSERT(modp != NULL);
- num_processed += clear_module_break(modp, mfa, specified, break_op);
- }
- } else {
- /* Process a single module */
- if ((modp = erts_get_module(mfa[0])) != NULL) {
- num_processed +=
- clear_module_break(modp, mfa, specified, break_op);
- }
+ if ((g = (GenericBp *) pc[-4]) == 0) {
+ return 1;
}
- return num_processed;
-}
-static int clear_module_break(Module *m, Eterm mfa[3], int specified,
- BeamInstr break_op) {
- BeamInstr** code_base;
- BeamInstr* code_ptr;
- int num_processed = 0;
- Uint i;
- BeamInstr n;
-
- ASSERT(m);
- code_base = (BeamInstr **) m->code;
- if (code_base == NULL) {
- return 0;
+ bp = &g->data[ix];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ common = bp->flags & break_flags;
+ bp->flags &= ~break_flags;
+ if (common & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE)) {
+ MatchSetUnref(bp->local_ms);
}
- n = (BeamInstr) code_base[MI_NUM_FUNCTIONS];
- for (i = 0; i < n; ++i) {
- code_ptr = code_base[MI_FUNCTIONS+i];
- if ((specified < 2 || mfa[1] == ((Eterm) code_ptr[3])) &&
- (specified < 3 || ((int) mfa[2]) == ((int) code_ptr[4]))) {
- BeamInstr *pc = code_ptr + 5;
-
- num_processed +=
- clear_function_break(m, pc, BREAK_IS_ERL, break_op);
- }
+ if (common & ERTS_BPF_META_TRACE) {
+ MatchSetUnref(bp->meta_ms);
+ }
+ if (common & ERTS_BPF_COUNT) {
+ ASSERT((bp->flags & ERTS_BPF_COUNT_ACTIVE) == 0);
+ bp_count_unref(bp->count);
+ }
+ if (common & ERTS_BPF_TIME_TRACE) {
+ ASSERT((bp->flags & ERTS_BPF_TIME_TRACE_ACTIVE) == 0);
+ bp_time_unref(bp->time);
}
- return num_processed;
-}
-static int clear_function_break(Module *m, BeamInstr *pc, int bif, BeamInstr break_op) {
- BpData *bd;
- Uint ix = 0;
- BeamInstr **code_base = NULL;
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ return 1;
+}
- if (bif == BREAK_IS_ERL) {
- code_base = (BeamInstr **)m->code;
- ASSERT(code_base);
- ASSERT(code_base <= (BeamInstr **)pc);
- ASSERT((BeamInstr **)pc < code_base + (m->code_length/sizeof(BeamInstr *)));
- } else {
- ASSERT(*pc == (BeamInstr) em_apply_bif);
- ASSERT(m == NULL);
+static void
+bp_meta_unref(BpMetaPid* bmp)
+{
+ if (erts_refc_dectest(&bmp->refc, 0) <= 0) {
+ Free(bmp);
}
+}
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(pc)) {
- return 0;
+static void
+bp_count_unref(BpCount* bcp)
+{
+ if (erts_refc_dectest(&bcp->refc, 0) <= 0) {
+ Free(bcp);
}
+}
- while ( (bd = is_break(pc, break_op))) {
- /* Remove all breakpoints of this type.
- * There should be only one of each type,
- * but break_op may be 0 which matches any type.
+static void
+bp_time_unref(BpDataTime* bdt)
+{
+ if (erts_refc_dectest(&bdt->refc, 0) <= 0) {
+ Uint i = 0;
+ Uint j = 0;
+ Process *h_p = NULL;
+ bp_data_time_item_t* item = NULL;
+ process_breakpoint_time_t* pbt = NULL;
+
+ /* remove all psd associated with the hash
+ * and then delete the hash.
+ * ... sigh ...
*/
- BeamInstr op;
- BpData ***rs = (BpData ***) (pc - 4);
- BpData **r = NULL;
-
-#ifdef DEBUG
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- ASSERT((*rs)[ix] == (*rs)[0]);
- }
-#endif
-
- r = &((*rs)[0]);
-
- ASSERT(*r);
- /* Find opcode for this breakpoint */
- if (break_op) {
- op = break_op;
- } else {
- if (bd == (*r)->next) {
- /* First breakpoint in ring */
- op = *pc;
- } else {
- op = bd->prev->orig_instr;
- }
- }
- if (BpSingleton(bd)) {
- ASSERT(*r == bd);
- /* Only one breakpoint to remove */
- if (bif == BREAK_IS_ERL) {
- *pc = bd->orig_instr;
- }
- Free(*rs);
- *rs = NULL;
- } else {
- BpData *bd_prev = bd->prev;
-
- BpSpliceNext(bd, bd_prev);
- ASSERT(BpSingleton(bd));
- if (bd == *r) {
- /* We removed the last breakpoint in the ring */
- *r = bd_prev;
- bd_prev->orig_instr = bd->orig_instr;
- } else if (bd_prev == *r) {
- /* We removed the first breakpoint in the ring */
- if (bif == BREAK_IS_ERL) {
- *pc = bd->orig_instr;
- }
- } else {
- bd_prev->orig_instr = bd->orig_instr;
- }
- }
- if (op == (BeamInstr) BeamOp(op_i_trace_breakpoint) ||
- op == (BeamInstr) BeamOp(op_i_mtrace_breakpoint)) {
-
- BpDataTrace *bdt = (BpDataTrace *) bd;
- MatchSetUnref(bdt->match_spec);
- }
- if (op == (BeamInstr) BeamOp(op_i_time_breakpoint)) {
- BpDataTime *bdt = (BpDataTime *) bd;
- Uint i = 0;
- Uint j = 0;
- Process *h_p = NULL;
- bp_data_time_item_t *item = NULL;
- process_breakpoint_time_t *pbt = NULL;
-
- /* remove all psd associated with the hash
- * and then delete the hash.
- * ... sigh ...
- */
- for( i = 0; i < bdt->n; ++i) {
- if (bdt->hash[i].used) {
- for (j = 0; j < bdt->hash[i].n; ++j) {
- item = &(bdt->hash[i].item[j]);
- if (item->pid != NIL) {
- h_p = process_tab[internal_pid_index(item->pid)];
- if (h_p) {
- pbt = ERTS_PROC_SET_CALL_TIME(h_p, ERTS_PROC_LOCK_MAIN, NULL);
- if (pbt) {
- Free(pbt);
- }
+ for (i = 0; i < bdt->n; ++i) {
+ if (bdt->hash[i].used) {
+ for (j = 0; j < bdt->hash[i].n; ++j) {
+ item = &(bdt->hash[i].item[j]);
+ if (item->pid != NIL) {
+ h_p = erts_pid2proc(NULL, 0, item->pid,
+ ERTS_PROC_LOCK_MAIN);
+ if (h_p) {
+ pbt = ERTS_PROC_SET_CALL_TIME(h_p,
+ ERTS_PROC_LOCK_MAIN,
+ NULL);
+ if (pbt) {
+ Free(pbt);
}
+ erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN);
}
}
}
- bp_hash_delete(&(bdt->hash[i]));
}
- Free(bdt->hash);
- bdt->hash = NULL;
- bdt->n = 0;
+ bp_hash_delete(&(bdt->hash[i]));
}
- Free(bd);
- if (bif == BREAK_IS_ERL) {
- ASSERT(((BeamInstr) code_base[MI_NUM_BREAKPOINTS]) > 0);
- --(*(BeamInstr*)&code_base[MI_NUM_BREAKPOINTS]);
- }
- if (*rs) {
- for (ix = 1; ix < erts_no_schedulers; ++ix) {
- (*rs)[ix] = (*rs)[0];
- }
- }
- } /* while bd != NULL */
- return 1;
+ Free(bdt->hash);
+ Free(bdt);
+ }
}
-
-
-/*
-** Searches (linear forward) the breakpoint ring for a specified opcode
-** and returns a pointer to the breakpoint data structure or NULL if
-** not found. If the specified opcode is 0, the last breakpoint is
-** returned. The program counter must point to the first executable
-** (breakpoint) instruction of the function.
-*/
-
-BpData *erts_get_time_break(Process *p, BeamInstr *pc) {
- return get_break(p, pc, (BeamInstr) BeamOp(op_i_time_breakpoint));
+static BpDataTime*
+get_time_break(BeamInstr *pc)
+{
+ GenericBpData* bp = check_break(pc, ERTS_BPF_TIME_TRACE);
+ return bp ? bp->time : 0;
}
-static BpData *get_break(Process *p, BeamInstr *pc, BeamInstr break_op) {
- ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- if (! erts_is_native_break(pc)) {
- BpData **rs = (BpData **) pc[-4];
- BpData *bd = NULL, *ebd = NULL;
-
- if (! rs) {
- return NULL;
- }
-
- bd = ebd = rs[bp_sched2ix_proc(p)];
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- return bd;
- }
-
- bd = bd->next;
- while (bd != ebd) {
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- ASSERT(bd);
- return bd;
- }
- bd = bd->next;
- }
- }
- return NULL;
-}
+static GenericBpData*
+check_break(BeamInstr *pc, Uint break_flags)
+{
+ GenericBp* g = (GenericBp *) pc[-4];
-static BpData *is_break(BeamInstr *pc, BeamInstr break_op) {
- BpData **rs;
- BpData *bd = NULL, *ebd = NULL;
ASSERT(pc[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
-
if (erts_is_native_break(pc)) {
- return NULL;
- }
- rs = (BpData **) pc[-4];
- if (! rs) {
- return NULL;
- }
-
- bd = ebd = rs[erts_bp_sched2ix()];
- ASSERT(bd);
- if ( (break_op == 0) || (bd->this_instr == break_op)) {
- return bd;
+ return 0;
}
-
- bd = bd->next;
- while (bd != ebd) {
- ASSERT(bd);
- if (bd->this_instr == break_op) {
- ASSERT(bd);
- return bd;
+ if (g) {
+ GenericBpData* bp = &g->data[erts_active_bp_ix()];
+ ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0);
+ if (bp->flags & break_flags) {
+ return bp;
}
- bd = bd->next;
}
- return NULL;
+ return 0;
}
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 167069552f..28aaaa462a 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -25,77 +25,6 @@
#include "erl_vm.h"
#include "global.h"
-
-
-/* A couple of gotchas:
- *
- * The breakpoint structure from BeamInstr,
- * In beam_emu where the instruction counter pointer, I (or pc),
- * points to the *current* instruction. At that time, if the instruction
- * is a breakpoint instruction the pc looks like the following,
- *
- * I[-5] | op_i_func_info_IaaI | scheduler specific entries
- * I[-4] | BpData** bpa | --> | BpData * bdas1 | ... | BpData * bdasN |
- * I[-3] | Tagged Module | | |
- * I[-2] | Tagged Function | V V
- * I[-1] | Arity | BpData -> BpData -> BpData -> BpData
- * I[0] | The bp instruction | ^ * the bp wheel * |
- * |------------------------------
- *
- * Common struct to all bp_data_*
- *
- * 1) The type of bp_data structure in the ring is deduced from the
- * orig_instr field of the structure _before_ in the ring, except for
- * the first structure in the ring that has its instruction in
- * pc[0] of the code to execute.
- * This is valid as long as you don't search for the function while it is
- * being executed by something else. Or is in the middle of its rotation for
- * any other reason.
- * A key, the bp beam instruction, is included for this reason.
- *
- * 2) pc[-4][sched_id - 1] points to the _last_ structure in the ring before the
- * breakpoints are being executed.
- *
- * So, as an example, when a breakpointed function starts to execute,
- * the first instruction that is a breakpoint instruction at pc[0] finds
- * its data at ((BpData **) pc[-4][sched_id - 1])->next and has to cast that pointer
- * to the correct bp_data type.
-*/
-
-typedef struct bp_data {
- struct bp_data *next; /* Doubly linked ring pointers */
- struct bp_data *prev; /* -"- */
- BeamInstr orig_instr; /* The original instruction to execute */
- BeamInstr this_instr; /* key */
-} BpData;
-/*
-** All the following bp_data_.. structs must begin the same way
-*/
-
-typedef struct bp_data_trace {
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- Binary *match_spec;
- Eterm tracer_pid;
-} BpDataTrace;
-
-typedef struct bp_data_debug {
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
-} BpDataDebug;
-
-typedef struct bp_data_count { /* Call count */
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- erts_smp_atomic_t acount;
-} BpDataCount;
-
typedef struct {
Eterm pid;
Sint count;
@@ -110,13 +39,9 @@ typedef struct {
} bp_time_hash_t;
typedef struct bp_data_time { /* Call time */
- struct bp_data *next;
- struct bp_data *prev;
- BeamInstr orig_instr;
- BeamInstr this_instr; /* key */
- Uint pause;
- Uint n;
- bp_time_hash_t *hash;
+ Uint n;
+ bp_time_hash_t *hash;
+ erts_refc_t refc;
} BpDataTime;
typedef struct {
@@ -126,64 +51,42 @@ typedef struct {
BeamInstr *pc;
} process_breakpoint_time_t; /* used within psd */
-extern erts_smp_spinlock_t erts_bp_lock;
+typedef struct {
+ erts_smp_atomic_t acount;
+ erts_refc_t refc;
+} BpCount;
+
+typedef struct {
+ erts_smp_atomic_t pid;
+ erts_refc_t refc;
+} BpMetaPid;
+
+typedef struct generic_bp_data {
+ Uint flags;
+ Binary* local_ms; /* Match spec for local call trace */
+ Binary* meta_ms; /* Match spec for meta trace */
+ BpMetaPid* meta_pid; /* Meta trace pid */
+ BpCount* count; /* For call count */
+ BpDataTime* time; /* For time trace */
+} GenericBpData;
+
+#define ERTS_NUM_BP_IX 2
+
+typedef struct generic_bp {
+ BeamInstr orig_instr;
+ GenericBpData data[ERTS_NUM_BP_IX];
+} GenericBp;
#define ERTS_BP_CALL_TIME_SCHEDULE_IN (0)
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#define ERTS_BP_CALL_TIME_CALL (0)
-#define ERTS_BP_CALL_TIME_RETURN (1)
-#define ERTS_BP_CALL_TIME_TAIL_CALL (2)
-
-#ifdef ERTS_SMP
-#define ErtsSmpBPLock(BDC) erts_smp_spin_lock(&erts_bp_lock)
-#define ErtsSmpBPUnlock(BDC) erts_smp_spin_unlock(&erts_bp_lock)
-#else
-#define ErtsSmpBPLock(BDC)
-#define ErtsSmpBPUnlock(BDC)
-#endif
-
#ifdef ERTS_SMP
#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
#else
#define bp_sched2ix_proc(p) (0)
#endif
-#define ErtsCountBreak(p, pc,instr_result) \
-do { \
- BpData **bds = (BpData **) (pc)[-4]; \
- BpDataCount *bdc = NULL; \
- Uint ix = bp_sched2ix_proc( (p) ); \
- erts_aint_t count = 0; \
- \
- ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bds); \
- bdc = (BpDataCount *) bds[ix]; \
- bdc = (BpDataCount *) bdc->next; \
- ASSERT(bdc); \
- bds[ix] = (BpData *) bdc; \
- count = erts_smp_atomic_read_nob(&bdc->acount); \
- if (count >= 0) erts_smp_atomic_inc_nob(&bdc->acount); \
- *(instr_result) = bdc->orig_instr; \
-} while (0)
-
-#define ErtsBreakSkip(p, pc,instr_result) \
-do { \
- BpData **bds = (BpData **) (pc)[-4]; \
- BpData *bd = NULL; \
- Uint ix = bp_sched2ix_proc( (p) ); \
- \
- ASSERT((pc)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI)); \
- ASSERT(bds); \
- bd = bds[ix]; \
- ASSERT(bd); \
- bd = bd->next; \
- ASSERT(bd); \
- bds[ix] = bd; \
- *(instr_result) = bd->orig_instr; \
-} while (0)
-
enum erts_break_op{
erts_break_nop = 0, /* Must be false */
erts_break_set = !0, /* Must be true */
@@ -191,7 +94,17 @@ enum erts_break_op{
erts_break_stop
};
+typedef Uint32 ErtsBpIndex;
+typedef struct {
+ BeamInstr* pc;
+ Module* mod;
+} BpFunction;
+
+typedef struct {
+ Uint matched; /* Number matched */
+ BpFunction* matching; /* Matching functions */
+} BpFunctions;
/*
** Function interface exported from beam_bp.c
@@ -199,49 +112,66 @@ enum erts_break_op{
void erts_bp_init(void);
-int erts_set_trace_break(Eterm mfa[3], int specified, Binary *match_spec,
- Eterm tracer_pid);
-int erts_clear_trace_break(Eterm mfa[3], int specified);
-int erts_set_mtrace_break(Eterm mfa[3], int specified, Binary *match_spec,
+void erts_prepare_bp_staging(void);
+void erts_commit_staged_bp(void);
+
+ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void);
+ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void);
+
+void erts_bp_match_functions(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified);
+void erts_bp_free_matched_functions(BpFunctions* f);
+
+void erts_install_breakpoints(BpFunctions* f);
+void erts_uninstall_breakpoints(BpFunctions* f);
+void erts_consolidate_bp_data(BpFunctions* f, int local);
+void erts_consolidate_bif_bp_data(void);
+
+void erts_set_trace_break(BpFunctions *f, Binary *match_spec);
+void erts_clear_trace_break(BpFunctions *f);
+
+void erts_set_call_trace_bif(BeamInstr *pc, Binary *match_spec, int local);
+void erts_clear_call_trace_bif(BeamInstr *pc, int local);
+
+void erts_set_mtrace_break(BpFunctions *f, Binary *match_spec,
Eterm tracer_pid);
-int erts_clear_mtrace_break(Eterm mfa[3], int specified);
+void erts_clear_mtrace_break(BpFunctions *f);
void erts_set_mtrace_bif(BeamInstr *pc, Binary *match_spec,
Eterm tracer_pid);
void erts_clear_mtrace_bif(BeamInstr *pc);
-int erts_set_debug_break(Eterm mfa[3], int specified);
-int erts_clear_debug_break(Eterm mfa[3], int specified);
-int erts_set_count_break(Eterm mfa[3], int specified, enum erts_break_op);
-int erts_clear_count_break(Eterm mfa[3], int specified);
+void erts_set_debug_break(BpFunctions *f);
+void erts_clear_debug_break(BpFunctions *f);
+void erts_set_count_break(BpFunctions *f, enum erts_break_op);
+void erts_clear_count_break(BpFunctions *f);
-int erts_clear_break(Eterm mfa[3], int specified);
+
+void erts_clear_all_breaks(BpFunctions* f);
int erts_clear_module_break(Module *modp);
-int erts_clear_function_break(Module *modp, BeamInstr *pc);
+void erts_clear_export_break(Module *modp, BeamInstr* pc);
+BeamInstr erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg);
BeamInstr erts_trace_break(Process *p, BeamInstr *pc, Eterm *args,
Uint32 *ret_flags, Eterm *tracer_pid);
-Uint32 erts_bif_mtrace(Process *p, BeamInstr *pc, Eterm *args,
- int local, Eterm *tracer_pid);
-int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret,
- Eterm *tracer_pid_ret);
+int erts_is_trace_break(BeamInstr *pc, Binary **match_spec_ret, int local);
int erts_is_mtrace_break(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_rte);
int erts_is_mtrace_bif(BeamInstr *pc, Binary **match_spec_ret,
Eterm *tracer_pid_ret);
int erts_is_native_break(BeamInstr *pc);
-int erts_is_count_break(BeamInstr *pc, Sint *count_ret);
+int erts_is_count_break(BeamInstr *pc, Uint *count_ret);
int erts_is_time_break(Process *p, BeamInstr *pc, Eterm *call_time);
-void erts_trace_time_break(Process *p, BeamInstr *pc, BpDataTime *bdt, Uint type);
+void erts_trace_time_call(Process* c_p, BeamInstr* pc, BpDataTime* bdt);
+void erts_trace_time_return(Process* c_p, BeamInstr* pc);
void erts_schedule_time_break(Process *p, Uint out);
-int erts_set_time_break(Eterm mfa[3], int specified, enum erts_break_op);
-int erts_clear_time_break(Eterm mfa[3], int specified);
+void erts_set_time_break(BpFunctions *f, enum erts_break_op);
+void erts_clear_time_break(BpFunctions *f);
int erts_is_time_trace_bif(Process *p, BeamInstr *pc, Eterm *call_time);
void erts_set_time_trace_bif(BeamInstr *pc, enum erts_break_op);
void erts_clear_time_trace_bif(BeamInstr *pc);
-BpData *erts_get_time_break(Process *p, BeamInstr *pc);
BeamInstr *erts_find_local_func(Eterm mfa[3]);
@@ -258,6 +188,19 @@ ERTS_GLB_INLINE Uint erts_bp_sched2ix(void)
return 0;
#endif
}
+
+extern erts_smp_atomic32_t erts_active_bp_index;
+extern erts_smp_atomic32_t erts_staging_bp_index;
+
+ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&erts_active_bp_index);
+}
+
+ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&erts_staging_bp_index);
+}
#endif
#endif /* _BEAM_BP_H */
diff --git a/erts/emulator/beam/beam_catches.c b/erts/emulator/beam/beam_catches.c
index 406ef1db5f..92f7ffe5a2 100644
--- a/erts/emulator/beam/beam_catches.c
+++ b/erts/emulator/beam/beam_catches.c
@@ -31,78 +31,144 @@ typedef struct {
unsigned cdr;
} beam_catch_t;
-static int free_list;
-static unsigned high_mark;
-static unsigned tabsize;
-static beam_catch_t *beam_catches;
+#ifdef DEBUG
+# define IF_DEBUG(x) x
+#else
+# define IF_DEBUG(x)
+#endif
+
+struct bc_pool {
+ int free_list;
+ unsigned high_mark;
+ unsigned tabsize;
+ beam_catch_t *beam_catches;
+ /*
+ * Note that the 'beam_catches' area is shared by pools. Used slots
+ * are readonly as long as the module is not purgable. The free-list is
+ * protected by the code_ix lock.
+ */
+
+ IF_DEBUG(int is_staging;)
+};
+
+static struct bc_pool bccix[ERTS_NUM_CODE_IX];
void beam_catches_init(void)
{
- tabsize = DEFAULT_TABSIZE;
- free_list = -1;
- high_mark = 0;
+ int i;
+
+ bccix[0].tabsize = DEFAULT_TABSIZE;
+ bccix[0].free_list = -1;
+ bccix[0].high_mark = 0;
+ bccix[0].beam_catches = erts_alloc(ERTS_ALC_T_CODE,
+ sizeof(beam_catch_t)*DEFAULT_TABSIZE);
+ IF_DEBUG(bccix[0].is_staging = 0);
+ for (i=1; i<ERTS_NUM_CODE_IX; i++) {
+ bccix[i] = bccix[i-1];
+ }
+ /* For initial load: */
+ IF_DEBUG(bccix[erts_staging_code_ix()].is_staging = 1);
+}
- beam_catches = erts_alloc(ERTS_ALC_T_CODE, sizeof(beam_catch_t)*DEFAULT_TABSIZE);
+
+static void gc_old_vec(beam_catch_t* vec)
+{
+ int i;
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ if (bccix[i].beam_catches == vec) {
+ return;
+ }
+ }
+ erts_free(ERTS_ALC_T_CODE, vec);
+}
+
+
+void beam_catches_start_staging(void)
+{
+ ErtsCodeIndex dst = erts_staging_code_ix();
+ ErtsCodeIndex src = erts_active_code_ix();
+ beam_catch_t* prev_vec = bccix[dst].beam_catches;
+
+ ASSERT(!bccix[src].is_staging && !bccix[dst].is_staging);
+
+ bccix[dst] = bccix[src];
+ gc_old_vec(prev_vec);
+ IF_DEBUG(bccix[dst].is_staging = 1);
+}
+
+void beam_catches_end_staging(int commit)
+{
+ IF_DEBUG(bccix[erts_staging_code_ix()].is_staging = 0);
}
unsigned beam_catches_cons(BeamInstr *cp, unsigned cdr)
{
int i;
+ struct bc_pool* p = &bccix[erts_staging_code_ix()];
+ ASSERT(p->is_staging);
/*
* Allocate from free_list while it is non-empty.
* If free_list is empty, allocate at high_mark.
- *
- * This avoids the need to initialise the free list in
- * beam_catches_init(), which would cost O(TABSIZ) time.
*/
- if( free_list >= 0 ) {
- i = free_list;
- free_list = beam_catches[i].cdr;
- } else if( high_mark < tabsize ) {
- i = high_mark;
- high_mark++;
- } else {
- /* No free slots and table is full: realloc table */
- tabsize = 2*tabsize;
- beam_catches = erts_realloc(ERTS_ALC_T_CODE, beam_catches, sizeof(beam_catch_t)*tabsize);
- i = high_mark;
- high_mark++;
+ if (p->free_list >= 0) {
+ i = p->free_list;
+ p->free_list = p->beam_catches[i].cdr;
+ }
+ else {
+ if (p->high_mark >= p->tabsize) {
+ /* No free slots and table is full: realloc table */
+ beam_catch_t* prev_vec = p->beam_catches;
+ unsigned newsize = p->tabsize*2;
+
+ p->beam_catches = erts_alloc(ERTS_ALC_T_CODE,
+ newsize*sizeof(beam_catch_t));
+ sys_memcpy(p->beam_catches, prev_vec,
+ p->tabsize*sizeof(beam_catch_t));
+ gc_old_vec(prev_vec);
+ p->tabsize = newsize;
+ }
+ i = p->high_mark++;
}
- beam_catches[i].cp = cp;
- beam_catches[i].cdr = cdr;
+ p->beam_catches[i].cp = cp;
+ p->beam_catches[i].cdr = cdr;
return i;
}
BeamInstr *beam_catches_car(unsigned i)
{
- if( i >= tabsize ) {
+ struct bc_pool* p = &bccix[erts_active_code_ix()];
+
+ if (i >= p->tabsize ) {
erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
- return beam_catches[i].cp;
+ return p->beam_catches[i].cp;
}
-void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes)
+void beam_catches_delmod(unsigned head, BeamInstr *code, unsigned code_bytes,
+ ErtsCodeIndex code_ix)
{
+ struct bc_pool* p = &bccix[code_ix];
unsigned i, cdr;
+ ASSERT((code_ix == erts_active_code_ix()) != bccix[erts_staging_code_ix()].is_staging);
for(i = head; i != (unsigned)-1;) {
- if( i >= tabsize ) {
+ if (i >= p->tabsize) {
erl_exit(1, "beam_catches_delmod: index %#x is out of range\r\n", i);
}
- if( (char*)beam_catches[i].cp - (char*)code >= code_bytes ) {
+ if( (char*)p->beam_catches[i].cp - (char*)code >= code_bytes ) {
erl_exit(1,
"beam_catches_delmod: item %#x has cp %#lx which is not "
"in module's range [%#lx,%#lx[\r\n",
- i, (long)beam_catches[i].cp,
+ i, (long)p->beam_catches[i].cp,
(long)code, (long)((char*)code + code_bytes));
}
- beam_catches[i].cp = 0;
- cdr = beam_catches[i].cdr;
- beam_catches[i].cdr = free_list;
- free_list = i;
+ p->beam_catches[i].cp = 0;
+ cdr = p->beam_catches[i].cdr;
+ p->beam_catches[i].cdr = p->free_list;
+ p->free_list = i;
i = cdr;
}
}
diff --git a/erts/emulator/beam/beam_catches.h b/erts/emulator/beam/beam_catches.h
index 6223427f0d..b2bd2351a5 100644
--- a/erts/emulator/beam/beam_catches.h
+++ b/erts/emulator/beam/beam_catches.h
@@ -20,12 +20,21 @@
#ifndef __BEAM_CATCHES_H
#define __BEAM_CATCHES_H
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#include "sys.h"
+#include "code_ix.h"
+
#define BEAM_CATCHES_NIL (-1)
void beam_catches_init(void);
+void beam_catches_start_staging(void);
+void beam_catches_end_staging(int commit);
unsigned beam_catches_cons(BeamInstr* cp, unsigned cdr);
BeamInstr *beam_catches_car(unsigned i);
-void beam_catches_delmod(unsigned head, BeamInstr* code, unsigned code_bytes);
+void beam_catches_delmod(unsigned head, BeamInstr* code, unsigned code_bytes,
+ ErtsCodeIndex);
#define catch_pc(x) beam_catches_car(catch_val((x)))
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 8041c92162..a609ed8c71 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -84,6 +84,7 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
int i;
int specified = 0;
Eterm res;
+ BpFunctions f;
if (bool != am_true && bool != am_false)
goto error;
@@ -114,18 +115,30 @@ erts_debug_breakpoint_2(BIF_ALIST_2)
mfa[2] = signed_val(mfa[2]);
}
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
+ erts_bp_match_functions(&f, mfa, specified);
if (bool == am_true) {
- res = make_small(erts_set_debug_break(mfa, specified));
+ erts_set_debug_break(&f);
+ erts_install_breakpoints(&f);
+ erts_commit_staged_bp();
} else {
- res = make_small(erts_clear_debug_break(mfa, specified));
+ erts_clear_debug_break(&f);
+ erts_commit_staged_bp();
+ erts_uninstall_breakpoints(&f);
}
+ erts_consolidate_bp_data(&f, 1);
+ res = make_small(f.matched);
+ erts_bp_free_matched_functions(&f);
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
-
+ erts_release_code_write_permission();
return res;
error:
@@ -207,6 +220,7 @@ erts_debug_disassemble_1(BIF_ALIST_1)
BIF_RET(am_false);
}
} else if (is_tuple(addr)) {
+ ErtsCodeIndex code_ix;
Module* modp;
Eterm mod;
Eterm name;
@@ -225,14 +239,14 @@ erts_debug_disassemble_1(BIF_ALIST_1)
goto error;
}
arity = signed_val(tp[3]);
- modp = erts_get_module(mod);
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(mod, code_ix);
/*
* Try the export entry first to allow disassembly of special functions
* such as erts_debug:apply/4. Then search for it in the module.
*/
-
- if ((ep = erts_find_function(mod, name, arity)) != NULL) {
+ if ((ep = erts_find_function(mod, name, arity, code_ix)) != NULL) {
/* XXX: add "&& ep->address != ep->code+3" condition?
* Consider a traced function.
* Its ep will have ep->address == ep->code+3.
@@ -241,9 +255,9 @@ erts_debug_disassemble_1(BIF_ALIST_1)
* But this code_ptr will point to the start of the Export,
* not the function's func_info instruction. BOOM !?
*/
- code_ptr = ((BeamInstr *) ep->address) - 5;
+ code_ptr = ((BeamInstr *) ep->addressv[code_ix]) - 5;
funcinfo = code_ptr+2;
- } else if (modp == NULL || (code_base = modp->code) == NULL) {
+ } else if (modp == NULL || (code_base = modp->curr.code) == NULL) {
BIF_RET(am_undef);
} else {
n = code_base[MI_NUM_FUNCTIONS];
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 6d3b15cd46..0e9d140908 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -63,11 +63,7 @@
# define PROCESS_MAIN_CHK_LOCKS(P) \
do { \
if ((P)) { \
- erts_pix_lock_t *pix_lock__ = ERTS_PIX2PIXLOCK(internal_pid_index((P)->id));\
erts_proc_lc_chk_only_proc_main((P)); \
- erts_pix_lock(pix_lock__); \
- ASSERT(0 < (P)->lock.refc && (P)->lock.refc < erts_no_schedulers*5);\
- erts_pix_unlock(pix_lock__); \
} \
else \
erts_lc_check_exact(NULL, 0); \
@@ -221,7 +217,6 @@ BeamInstr beam_continue_exit[1];
BeamInstr* em_call_error_handler;
BeamInstr* em_apply_bif;
-BeamInstr* em_call_traced_function;
/* NOTE These should be the only variables containing trace instructions.
@@ -236,11 +231,6 @@ BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/*
- * We should warn only once for tuple funs.
- */
-static erts_smp_atomic_t warned_for_tuple_funs;
-
-/*
* All Beam instructions in numerical order.
*/
@@ -495,7 +485,7 @@ extern int count_instructions;
do { \
if (FCALLS > 0) { \
Eterm* dis_next; \
- SET_I(((Export *) Arg(0))->address); \
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
dis_next = (Eterm *) *I; \
FCALLS--; \
CHECK_ARGS(I); \
@@ -504,7 +494,7 @@ extern int count_instructions;
&& FCALLS > neg_o_reds) { \
goto save_calls1; \
} else { \
- SET_I(((Export *) Arg(0))->address); \
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \
CHECK_ARGS(I); \
goto context_switch; \
} \
@@ -526,7 +516,7 @@ extern int count_instructions;
# define Dispatchfun() DispatchMacroFun()
#endif
-#define Self(R) R = c_p->id
+#define Self(R) R = c_p->common.id
#define Node(R) R = erts_this_node->sysname
#define Arg(N) I[(N)+1]
@@ -965,17 +955,9 @@ static void save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg,
static struct StackTrace * get_trace_from_exc(Eterm exc);
static Eterm make_arglist(Process* c_p, Eterm* reg, int a);
-#if defined(VXWORKS)
-static int init_done;
-#endif
-
void
init_emulator(void)
{
-#if defined(VXWORKS)
- init_done = 0;
-#endif
- erts_smp_atomic_init_nob(&warned_for_tuple_funs, (erts_aint_t) 0);
process_main();
}
@@ -1092,11 +1074,11 @@ init_emulator(void)
void
dtrace_drvport_str(ErlDrvPort drvport, char *port_buf)
{
- Port *port = erts_drvport2port(drvport);
+ Port *port = erts_drvport2port(drvport, NULL);
erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
- port_channel_no(port->id),
- port_number(port->id));
+ port_channel_no(port->common.id),
+ port_number(port->common.id));
}
#endif
/*
@@ -1107,9 +1089,7 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf)
*/
void process_main(void)
{
-#if !defined(VXWORKS)
static int init_done = 0;
-#endif
Process* c_p = NULL;
int reds_used;
#ifdef DEBUG
@@ -1215,7 +1195,7 @@ void process_main(void)
c_p = schedule(c_p, reds_used);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
#ifdef DEBUG
- pid = c_p->id; /* Save for debugging purpouses */
+ pid = c_p->common.id; /* Save for debugging purpouses */
#endif
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -1247,7 +1227,7 @@ void process_main(void)
reds = c_p->fcalls;
if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
- && (c_p->trace_flags & F_SENSITIVE) == 0) {
+ && (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE) == 0) {
neg_o_reds = -reds;
FCALLS = REDS_IN(c_p) = 0;
} else {
@@ -1507,7 +1487,7 @@ void process_main(void)
*/
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
}
#endif
@@ -1522,7 +1502,7 @@ void process_main(void)
SET_CP(c_p, I+2);
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
}
#endif
@@ -1535,7 +1515,7 @@ void process_main(void)
OpCase(i_call_ext_only_e):
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->address);
+ BeamInstr* fp = (BeamInstr *) (((Export *) Arg(0))->addressv[erts_active_code_ix()]);
DTRACE_GLOBAL_CALL(c_p, (Eterm)fp[-3], (Eterm)fp[-2], fp[-1]);
}
#endif
@@ -1611,6 +1591,7 @@ void process_main(void)
reg[0] = r(0);
result = erl_send(c_p, r(0), x(1));
PreFetch(0, next);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
if (c_p->mbuf || MSO(c_p).overhead >= BIN_VHEAP_SZ(c_p)) {
@@ -1826,13 +1807,12 @@ void process_main(void)
msgp = PEEK_MESSAGE(c_p);
if (msgp)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
- else {
+ else
#endif
+ {
SET_I((BeamInstr *) Arg(0));
Goto(*I); /* Jump to a wait or wait_timeout instruction */
-#ifdef ERTS_SMP
}
-#endif
}
ErtsMoveMsgAttachmentIntoProc(msgp, c_p, E, HTOP, FCALLS,
{
@@ -1887,14 +1867,14 @@ void process_main(void)
erts_fprintf(stderr,
"Dtrace -> (%T) stop spreading "
"tag %T with message %T\r\n",
- c_p->id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
+ c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
#endif
} else {
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) kill tag %T with "
"message %T\r\n",
- c_p->id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
+ c_p->common.id,DT_UTAG(c_p),ERL_MESSAGE_TERM(msgp));
#endif
DT_UTAG(c_p) = NIL;
SEQ_TRACE_TOKEN(c_p) = NIL;
@@ -1919,7 +1899,7 @@ void process_main(void)
erts_fprintf(stderr,
"Dtrace -> (%T) receive tag (%T) "
"with message %T\r\n",
- c_p->id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp));
+ c_p->common.id, DT_UTAG(c_p), ERL_MESSAGE_TERM(msgp));
#endif
} else {
#endif
@@ -1935,7 +1915,7 @@ void process_main(void)
}
msg = ERL_MESSAGE_TERM(msgp);
seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE,
- c_p->id, c_p);
+ c_p->common.id, c_p);
#ifdef USE_VM_PROBES
}
#endif
@@ -2061,11 +2041,11 @@ void process_main(void)
OpCase(wait_f):
wait2: {
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
c_p->i = (BeamInstr *) Arg(0); /* L1 */
SWAPOUT;
c_p->arity = 0;
- c_p->status = P_WAITING;
+ erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
c_p->current = NULL;
goto do_schedule;
@@ -2588,6 +2568,7 @@ void process_main(void)
reg[0] = r(0);
result = (*bf)(c_p, reg, I);
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result));
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ERTS_HOLE_CHECK(c_p);
ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -3144,10 +3125,6 @@ void process_main(void)
c_p->arg_reg[0] = r(0);
SWAPOUT;
c_p->i = I;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
- if (c_p->status != P_SUSPENDED)
- erts_add_to_runq(c_p);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
goto do_schedule1;
}
@@ -3326,7 +3303,6 @@ void process_main(void)
PROCESS_MAIN_CHK_LOCKS(c_p);
bif_nif_arity = I[-1];
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
{
@@ -3371,7 +3347,6 @@ void process_main(void)
bif_nif_arity = I[-1];
ASSERT(bif_nif_arity <= 3);
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
reg[0] = r(0);
{
Eterm (*bf)(Process*, Eterm*, BeamInstr*) = vbf;
@@ -4580,64 +4555,6 @@ void process_main(void)
* Trace and debugging support.
*/
- /*
- * At this point, I points to the code[3] in the export entry for
- * a trace-enabled function.
- *
- * code[0]: Module
- * code[1]: Function
- * code[2]: Arity
- * code[3]: &&call_traced_function
- * code[4]: Address of function.
- */
- OpCase(call_traced_function): {
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- unsigned offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
- Export* ep = (Export *) (((char *)I)-offset);
- Uint32 flags;
-
- SWAPOUT;
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- flags = erts_call_trace(c_p, ep->code, ep->match_prog_set, reg,
- 0, &c_p->tracer_proc);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
- SWAPIN;
-
- if (flags & MATCH_SET_RX_TRACE) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 3 < HTOP) {
- /* SWAPOUT, SWAPIN was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 3, reg, ep->code[2]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((BeamInstr)(ep->code)));
- ASSERT(is_internal_pid(c_p->tracer_proc) ||
- is_internal_port(c_p->tracer_proc));
- E[2] = make_cp(c_p->cp); /* Code in lower range on halfword */
- E[1] = am_true; /* Process tracer */
- E[0] = make_cp(ep->code);
- c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- SET_I((BeamInstr *)Arg(0));
- Dispatch();
- }
-
OpCase(return_trace): {
BeamInstr* code = (BeamInstr *) (UWord) E[0];
@@ -4652,80 +4569,22 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_count_breakpoint): {
+ OpCase(i_generic_breakpoint): {
BeamInstr real_I;
-
- ErtsCountBreak(c_p, (BeamInstr *) I, &real_I);
+ ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
+ SWAPOUT;
+ reg[0] = r(0);
+ real_I = erts_generic_breakpoint(c_p, I, reg);
+ r(0) = reg[0];
+ SWAPIN;
ASSERT(VALID_INSTR(real_I));
Goto(real_I);
}
- /* need to send mfa instead of bdt pointer
- * the pointer might be deallocated.
- */
-
- OpCase(i_time_breakpoint): {
- BeamInstr real_I;
- BpData **bds = (BpData **) (I)[-4];
- BpDataTime *bdt = NULL;
- Uint ix = 0;
-#ifdef ERTS_SMP
- ix = c_p->scheduler_data->no - 1;
-#else
- ix = 0;
-#endif
- bdt = (BpDataTime *)bds[ix];
-
- ASSERT((I)[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- ASSERT(bdt);
- bdt = (BpDataTime *) bdt->next;
- ASSERT(bdt);
- bds[ix] = (BpData *) bdt;
- real_I = bdt->orig_instr;
- ASSERT(VALID_INSTR(real_I));
-
- if (IS_TRACED_FL(c_p, F_TRACE_CALLS) && !(bdt->pause)) {
- if ( (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) ||
- (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) ||
- (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace))) {
- /* This _IS_ a tail recursive call */
- SWAPOUT;
- erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_TAIL_CALL);
- SWAPIN;
- } else {
- SWAPOUT;
- erts_trace_time_break(c_p, I, bdt, ERTS_BP_CALL_TIME_CALL);
-
- /* r register needs to be copied to the array
- * for the garbage collector
- */
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - 2 < HTOP) {
- reg[0] = r(0);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, 2, reg, I[-1]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- }
- SWAPIN;
-
- ASSERT(c_p->htop <= E && E <= c_p->hend);
-
- E -= 2;
- E[0] = make_cp(I);
- E[1] = make_cp(c_p->cp); /* original return address */
- c_p->cp = beam_return_time_trace;
- }
- }
-
- Goto(real_I);
- }
-
OpCase(i_return_time_trace): {
BeamInstr *pc = (BeamInstr *) (UWord) E[0];
SWAPOUT;
- erts_trace_time_break(c_p, pc, NULL, ERTS_BP_CALL_TIME_RETURN);
+ erts_trace_time_return(c_p, pc);
SWAPIN;
c_p->cp = NULL;
SET_I((BeamInstr *) cp_val(E[1]));
@@ -4733,114 +4592,6 @@ void process_main(void)
Goto(*I);
}
- OpCase(i_trace_breakpoint):
- if (! IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
- BeamInstr real_I;
-
- ErtsBreakSkip(c_p, (BeamInstr *) I, &real_I);
- Goto(real_I);
- }
- /* Fall through to next case */
- OpCase(i_mtrace_breakpoint): {
- BeamInstr real_I;
- Uint32 flags;
- Eterm tracer_pid;
- Uint* cpp;
- int return_to_trace = 0, need = 0;
- flags = 0;
- SWAPOUT;
- reg[0] = r(0);
-
- if (*(c_p->cp) == (BeamInstr) OpCode(return_trace)) {
- cpp = &E[2];
- } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_to_trace)) {
- return_to_trace = !0;
- cpp = &E[0];
- } else if (*(c_p->cp) == (BeamInstr) OpCode(i_return_time_trace)) {
- return_to_trace = !0;
- cpp = &E[0];
- } else {
- cpp = NULL;
- }
- if (cpp) {
- /* This _IS_ a tail recursive call, if there are
- * return_trace and/or i_return_to_trace stackframes
- * on the stack, they are not intermixed with y registers
- */
- BeamInstr *cp_save = c_p->cp;
- for (;;) {
- ASSERT(is_CP(*cpp));
- if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) {
- cpp += 3;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) {
- return_to_trace = !0;
- cpp += 1;
- } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_time_trace)) {
- cpp += 2;
- } else
- break;
- }
- c_p->cp = (BeamInstr *) cp_val(*cpp);
- ASSERT(is_CP(*cpp));
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN; /* Needed by shared heap. */
- c_p->cp = cp_save;
- } else {
- ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
- real_I = erts_trace_break(c_p, I, reg, &flags, &tracer_pid);
- ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
- SWAPIN; /* Needed by shared heap. */
- }
-
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
-
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
- need += 1;
- }
- if (flags & MATCH_SET_RX_TRACE) {
- need += 3;
- }
- if (need) {
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- if (E - need < HTOP) {
- /* SWAPOUT was done and r(0) was saved above */
- PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect(c_p, need, reg, I[-1]);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- PROCESS_MAIN_CHK_LOCKS(c_p);
- r(0) = reg[0];
- SWAPIN;
- }
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && !return_to_trace) {
- E -= 1;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- E[0] = make_cp(c_p->cp);
- c_p->cp = (BeamInstr *) beam_return_to_trace;
- }
- if (flags & MATCH_SET_RX_TRACE) {
- E -= 3;
- ASSERT(c_p->htop <= E && E <= c_p->hend);
- ASSERT(is_CP((Eterm) (UWord) (I - 3)));
- ASSERT(am_true == tracer_pid ||
- is_internal_pid(tracer_pid) || is_internal_port(tracer_pid));
- E[2] = make_cp(c_p->cp);
- E[1] = tracer_pid;
- E[0] = make_cp(I - 3); /* We ARE at the beginning of an
- instruction,
- the funcinfo is above i. */
- c_p->cp =
- (flags & MATCH_SET_EXCEPTION_TRACE)
- ? beam_exception_trace : beam_return_trace;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- Goto(real_I);
- }
-
OpCase(i_return_to_trace): {
if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) {
Uint *cpp = (Uint*) E;
@@ -5110,9 +4861,6 @@ void process_main(void)
c_p->arity = 1; /* One living register (the 'true' return value) */
SWAPOUT;
c_p->i = I + 1; /* Next instruction */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
- erts_add_to_runq(c_p);
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
c_p->current = NULL;
goto do_schedule;
}
@@ -5193,7 +4941,6 @@ void process_main(void)
#endif /* NO_JUMP_TABLE */
em_call_error_handler = OpCode(call_error_handler);
- em_call_traced_function = OpCode(call_traced_function);
em_apply_bif = OpCode(apply_bif);
beam_apply[0] = (BeamInstr) OpCode(i_apply);
@@ -5234,7 +4981,7 @@ void process_main(void)
save_calls(c_p, (Export *) Arg(0));
- SET_I(((Export *) Arg(0))->address);
+ SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]);
dis_next = (Eterm *) *I;
FCALLS--;
@@ -5510,7 +5257,7 @@ terminate_proc(Process* c_p, Eterm Value)
/* EXF_LOG is a primary exception flag */
if (c_p->freason & EXF_LOG) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Error in process %T ", c_p->id);
+ erts_dsprintf(dsbufp, "Error in process %T ", c_p->common.id);
if (erts_is_alive)
erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
erts_dsprintf(dsbufp,"with exit value: %0.*T\n", display_items, Value);
@@ -5911,7 +5658,8 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
/*
* Search for the error_handler module.
*/
- ep = erts_find_function(erts_proc_get_error_handler(p), func, 3);
+ ep = erts_find_function(erts_proc_get_error_handler(p), func, 3,
+ erts_active_code_ix());
if (ep == NULL) { /* No error handler */
p->current = fi;
p->freason = EXC_UNDEF;
@@ -5941,7 +5689,7 @@ call_error_handler(Process* p, BeamInstr* fi, Eterm* reg, Eterm func)
reg[0] = fi[0];
reg[1] = fi[1];
reg[2] = args;
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
}
@@ -5955,7 +5703,7 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
* there is no error handler module.
*/
- if ((ep = erts_find_export_entry(erts_proc_get_error_handler(p),
+ if ((ep = erts_active_export_entry(erts_proc_get_error_handler(p),
am_undefined_function, 3)) == NULL) {
return NULL;
} else {
@@ -6062,7 +5810,7 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
* Note: All BIFs have export entries; thus, no special case is needed.
*/
- if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
+ if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL) goto error;
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
@@ -6070,11 +5818,11 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr *fptr = (BeamInstr *) ep->address;
+ BeamInstr *fptr = (BeamInstr *) ep->addressv[erts_active_code_ix()];
DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]);
}
#endif
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
}
static BeamInstr*
@@ -6116,7 +5864,7 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
* Note: All BIFs have export entries; thus, no special case is needed.
*/
- if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
+ if ((ep = erts_active_export_entry(module, function, arity)) == NULL) {
if ((ep = apply_setup_error_handler(p, module, function, arity, reg)) == NULL)
goto error;
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
@@ -6125,11 +5873,11 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
#ifdef USE_VM_CALL_PROBES
if (DTRACE_ENABLED(global_function_entry)) {
- BeamInstr *fptr = (BeamInstr *) ep->address;
+ BeamInstr *fptr = (BeamInstr *) ep->addressv[erts_active_code_ix()];
DTRACE_GLOBAL_CALL(p, (Eterm)fptr[-3], (Eterm)fptr[-2], (Uint)fptr[-1]);
}
#endif
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
}
int
@@ -6206,9 +5954,7 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
*/
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- if (c_p->msg.len > 0) {
- erts_add_to_runq(c_p);
- } else {
+ if (!c_p->msg.len) {
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
c_p->fvalue = NIL;
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -6216,14 +5962,12 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- ASSERT(!ERTS_PROC_IS_EXITING(c_p));
#ifdef ERTS_SMP
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
- if (c_p->msg.len > 0)
- erts_add_to_runq(c_p);
- else
+ if (!c_p->msg.len)
#endif
- c_p->status = P_WAITING;
+ erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
c_p->current = bif_export[BIF_hibernate_3]->code;
@@ -6240,7 +5984,6 @@ call_fun(Process* p, /* Current process. */
Eterm fun = reg[arity];
Eterm hdr;
int i;
- Eterm function;
Eterm* hp;
if (!is_boxed(fun)) {
@@ -6311,7 +6054,7 @@ call_fun(Process* p, /* Current process. */
Export* ep;
Module* modp;
Eterm module;
-
+ ErtsCodeIndex code_ix = erts_active_code_ix();
/*
* No arity. There is no module loaded that defines the fun,
@@ -6319,9 +6062,9 @@ call_fun(Process* p, /* Current process. */
* representation (the module has never been loaded),
* or the module defining the fun has been unloaded.
*/
-
module = fe->module;
- if ((modp = erts_get_module(module)) != NULL && modp->code != NULL) {
+ if ((modp = erts_get_module(module, code_ix)) != NULL
+ && modp->curr.code != NULL) {
/*
* There is a module loaded, but obviously the fun is not
* defined in it. We must not call the error_handler
@@ -6336,7 +6079,7 @@ call_fun(Process* p, /* Current process. */
*/
ep = erts_find_function(erts_proc_get_error_handler(p),
- am_undefined_lambda, 3);
+ am_undefined_lambda, 3, code_ix);
if (ep == NULL) { /* No error handler */
p->current = NULL;
p->freason = EXC_UNDEF;
@@ -6346,7 +6089,7 @@ call_fun(Process* p, /* Current process. */
reg[1] = fun;
reg[2] = args;
reg[3] = NIL;
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
}
}
} else if (is_export_header(hdr)) {
@@ -6358,7 +6101,7 @@ call_fun(Process* p, /* Current process. */
if (arity == actual_arity) {
DTRACE_GLOBAL_CALL(p, ep->code[0], ep->code[1], (Uint)ep->code[2]);
- return ep->address;
+ return ep->addressv[erts_active_code_ix()];
} else {
/*
* Wrong arity. First build a list of the arguments.
@@ -6378,63 +6121,6 @@ call_fun(Process* p, /* Current process. */
p->fvalue = TUPLE2(hp, fun, args);
return NULL;
}
- } else if (hdr == make_arityval(2)) {
- Eterm* tp;
- Export* ep;
- Eterm module;
-
- tp = tuple_val(fun);
- module = tp[1];
- function = tp[2];
- if (!is_atom(module) || !is_atom(function)) {
- goto badfun;
- }
-
- /*
- * If this is the first time a tuple fun is used,
- * send a warning to the logger.
- */
- if (erts_smp_atomic_xchg_nob(&warned_for_tuple_funs,
- (erts_aint_t) 1) == 0) {
- erts_dsprintf_buf_t* dsbufp;
-
- dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Call to tuple fun {%T,%T}.\n\n"
- "Tuple funs are deprecated and will be removed "
- "in R16. Use \"fun M:F/A\" instead, for example "
- "\"fun %T:%T/%d\".\n\n"
- "(This warning will only be shown the first time "
- "a tuple fun is called.)\n",
- module, function, module, function, arity);
- erts_send_warning_to_logger(p->group_leader, dsbufp);
- }
-
- if ((ep = erts_find_export_entry(module, function, arity)) == NULL) {
- ep = erts_find_export_entry(erts_proc_get_error_handler(p),
- am_undefined_function, 3);
- if (ep == NULL) {
- p->freason = EXC_UNDEF;
- return 0;
- }
- if (is_non_value(args)) {
- Uint sz = 2 * arity;
- if (HeapWordsLeft(p) < sz) {
- erts_garbage_collect(p, sz, reg, arity);
- }
- hp = HEAP_TOP(p);
- HEAP_TOP(p) += sz;
- args = NIL;
- while (arity-- > 0) {
- args = CONS(hp, reg[arity], args);
- hp += 2;
- }
- }
- reg[0] = module;
- reg[1] = function;
- reg[2] = args;
- }
- DTRACE_GLOBAL_CALL(p, module, function, arity);
- return ep->address;
} else {
badfun:
p->current = NULL;
@@ -6500,7 +6186,7 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free)
MSO(p).first = (struct erl_off_heap_header*) funp;
funp->fe = fe;
funp->num_free = num_free;
- funp->creator = p->id;
+ funp->creator = p->common.id;
#ifdef HIPE
funp->native_address = fe->native_address;
#endif
@@ -6537,7 +6223,8 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity)
if ((ep = export_get(&e)) == NULL) {
return 0;
}
- return ep->address == ep->code+3 && (ep->code[3] == (BeamInstr) em_apply_bif);
+ return ep->addressv[erts_active_code_ix()] == ep->code+3
+ && (ep->code[3] == (BeamInstr) em_apply_bif);
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index dd788df6e4..b51f076a5d 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -352,27 +352,6 @@ typedef struct LoaderState {
int loc_size; /* Size of location info in bytes (2/4) */
} LoaderState;
-/*
- * Layout of the line table.
- */
-
-#define MI_LINE_FNAME_PTR 0
-#define MI_LINE_LOC_TAB 1
-#define MI_LINE_LOC_SIZE 2
-#define MI_LINE_FUNC_TAB 3
-
-#define LINE_INVALID_LOCATION (0)
-
-/*
- * Macros for manipulating locations.
- */
-
-#define IS_VALID_LOCATION(File, Line) \
- ((unsigned) (File) < 255 && (unsigned) (Line) < ((1 << 24) - 1))
-#define MAKE_LOCATION(File, Line) (((File) << 24) | (Line))
-#define LOC_FILE(Loc) ((Loc) >> 24)
-#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
-
#define GetTagAndValue(Stp, Tag, Val) \
do { \
BeamInstr __w; \
@@ -496,7 +475,8 @@ typedef struct LoaderState {
} while (0)
-static void free_state(LoaderState* stp);
+static void free_loader_state(Binary* magic);
+static void loader_state_dtor(Binary* magic);
static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
Eterm group_leader, Eterm module,
BeamInstr* code, Uint size);
@@ -507,6 +487,7 @@ static int verify_chunks(LoaderState* stp);
static int load_atom_table(LoaderState* stp);
static int load_import_table(LoaderState* stp);
static int read_export_table(LoaderState* stp);
+static int is_bif(Eterm mod, Eterm func, unsigned arity);
static int read_lambda_table(LoaderState* stp);
static int read_literal_table(LoaderState* stp);
static int read_line_table(LoaderState* stp);
@@ -548,22 +529,9 @@ static Eterm native_addresses(Process* p, Eterm mod);
int patch_funentries(Eterm Patchlist);
int patch(Eterm Addresses, Uint fe);
static int safe_mul(UWord a, UWord b, UWord* resp);
-static void lookup_loc(FunctionInfo* fi, BeamInstr* pc,
- BeamInstr* modp, int idx);
-
static int must_swap_floats;
-/*
- * The following variables keep a sorted list of address ranges for
- * each module. It allows us to quickly find a function given an
- * instruction pointer.
- */
-Range* modules = NULL; /* Sorted lists of module addresses. */
-int num_loaded_modules; /* Number of loaded modules. */
-int allocated_modules; /* Number of slots allocated. */
-Range* mid_module = NULL; /* Cached search start point */
-
Uint erts_total_code_size;
/**********************************************************************/
@@ -579,11 +547,7 @@ void init_load(void)
f.fd = 1.0;
must_swap_floats = (f.fw[0] == 0);
- allocated_modules = 128;
- modules = (Range *) erts_alloc(ERTS_ALC_T_MODULE_REFS,
- allocated_modules*sizeof(Range));
- mid_module = modules;
- num_loaded_modules = 0;
+ erts_init_ranges();
}
static void
@@ -595,7 +559,7 @@ define_file(LoaderState* stp, char* name, int idx)
}
Eterm
-erts_load_module(Process *c_p,
+erts_preload_module(Process *c_p,
ErtsProcLocks c_p_locks,
Eterm group_leader, /* Group leader or NIL if none. */
Eterm* modp, /*
@@ -605,15 +569,16 @@ erts_load_module(Process *c_p,
byte* code, /* Points to the code to load */
Uint size) /* Size of code to load. */
{
- LoaderState* stp = erts_alloc_loader_state();
+ Binary* magic = erts_alloc_loader_state();
Eterm retval;
- retval = erts_prepare_loading(stp, c_p, group_leader, modp,
+ ASSERT(!erts_initialized);
+ retval = erts_prepare_loading(magic, c_p, group_leader, modp,
code, size);
if (retval != NIL) {
return retval;
}
- return erts_finish_loading(stp, c_p, c_p_locks, modp);
+ return erts_finish_loading(magic, c_p, c_p_locks, modp);
}
/* #define LOAD_MEMORY_HARD_DEBUG 1*/
@@ -629,11 +594,13 @@ extern void check_allocated_block(Uint type, void *blk);
#endif
Eterm
-erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
+erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader,
Eterm* modp, byte* code, Uint unloaded_size)
{
Eterm retval = am_badfile;
+ LoaderState* stp;
+ stp = ERTS_MAGIC_BIN_DATA(magic);
stp->module = *modp;
stp->group_leader = group_leader;
@@ -666,7 +633,7 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
/*
* Initialize code area.
*/
- stp->code_buffer_size = erts_next_heap_size(2048 + stp->num_functions, 0);
+ stp->code_buffer_size = 2048 + stp->num_functions;
stp->code = (BeamInstr *) erts_alloc(ERTS_ALC_T_CODE,
sizeof(BeamInstr) * stp->code_buffer_size);
@@ -679,8 +646,6 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
stp->code[MI_COMPILE_PTR] = 0;
stp->code[MI_COMPILE_SIZE] = 0;
stp->code[MI_COMPILE_SIZE_ON_HEAP] = 0;
- stp->code[MI_NUM_BREAKPOINTS] = 0;
-
/*
* Read the atom table.
@@ -774,23 +739,24 @@ erts_prepare_loading(LoaderState* stp, Process *c_p, Eterm group_leader,
load_error:
if (retval != NIL) {
- free_state(stp);
+ free_loader_state(magic);
}
return retval;
}
Eterm
-erts_finish_loading(LoaderState* stp, Process* c_p,
+erts_finish_loading(Binary* magic, Process* c_p,
ErtsProcLocks c_p_locks, Eterm* modp)
{
Eterm retval;
+ LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
/*
* No other process may run since we will update the export
* table which is not protected by any locks.
*/
- ERTS_SMP_LC_ASSERT(erts_initialized == 0 ||
+ ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() ||
erts_smp_thr_progress_is_blocking());
/*
@@ -811,7 +777,6 @@ erts_finish_loading(LoaderState* stp, Process* c_p,
* exported and imported functions. This can't fail.
*/
- erts_export_consolidate();
CHKBLK(ERTS_ALC_T_CODE,stp->code);
final_touch(stp);
@@ -837,16 +802,20 @@ erts_finish_loading(LoaderState* stp, Process* c_p,
}
load_error:
- free_state(stp);
+ free_loader_state(magic);
return retval;
}
-LoaderState*
+Binary*
erts_alloc_loader_state(void)
{
LoaderState* stp;
+ Binary* magic;
- stp = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LoaderState));
+ magic = erts_create_magic_binary(sizeof(LoaderState),
+ loader_state_dtor);
+ erts_refc_inc(&magic->refc, 1);
+ stp = ERTS_MAGIC_BIN_DATA(magic);
stp->bin = NULL;
stp->function = THE_NON_VALUE; /* Function not known yet */
stp->arity = 0;
@@ -875,76 +844,123 @@ erts_alloc_loader_state(void)
stp->line_instr = 0;
stp->func_line = 0;
stp->fname = 0;
- return stp;
+ return magic;
+}
+
+/*
+ * Return the module name (a tagged atom) for the prepared code
+ * in the magic binary, or NIL if the binary does not contain
+ * prepared code.
+ */
+Eterm
+erts_module_for_prepared_code(Binary* magic)
+{
+ LoaderState* stp;
+
+ if (ERTS_MAGIC_BIN_DESTRUCTOR(magic) != loader_state_dtor) {
+ return NIL;
+ }
+ stp = ERTS_MAGIC_BIN_DATA(magic);
+ if (stp->code != 0) {
+ return stp->module;
+ } else {
+ return NIL;
+ }
}
static void
-free_state(LoaderState* stp)
+free_loader_state(Binary* magic)
{
+ loader_state_dtor(magic);
+ if (erts_refc_dectest(&magic->refc, 0) == 0) {
+ erts_bin_free(magic);
+ }
+}
+
+/*
+ * This destructor function can safely be called multiple times.
+ */
+static void
+loader_state_dtor(Binary* magic)
+{
+ LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
+
if (stp->bin != 0) {
driver_free_binary(stp->bin);
+ stp->bin = 0;
}
if (stp->code != 0) {
erts_free(ERTS_ALC_T_CODE, stp->code);
+ stp->code = 0;
}
- if (stp->labels != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->labels);
+ if (stp->labels != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels);
+ stp->labels = 0;
}
- if (stp->atom != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->atom);
+ if (stp->atom != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->atom);
+ stp->atom = 0;
}
- if (stp->import != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->import);
+ if (stp->import != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->import);
+ stp->import = 0;
}
- if (stp->export != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->export);
+ if (stp->export != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->export);
+ stp->export = 0;
}
if (stp->lambdas != stp->def_lambdas) {
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->lambdas);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->lambdas);
+ stp->lambdas = stp->def_lambdas;
}
- if (stp->literals != NULL) {
+ if (stp->literals != 0) {
int i;
for (i = 0; i < stp->num_literals; i++) {
- if (stp->literals[i].heap != NULL) {
- erts_free(ERTS_ALC_T_LOADER_TMP,
+ if (stp->literals[i].heap != 0) {
+ erts_free(ERTS_ALC_T_PREPARED_CODE,
(void *) stp->literals[i].heap);
+ stp->literals[i].heap = 0;
}
}
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literals);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literals);
+ stp->literals = 0;
}
- while (stp->literal_patches != NULL) {
+ while (stp->literal_patches != 0) {
LiteralPatch* next = stp->literal_patches->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->literal_patches);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->literal_patches);
stp->literal_patches = next;
}
- while (stp->string_patches != NULL) {
+ while (stp->string_patches != 0) {
StringPatch* next = stp->string_patches->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->string_patches);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->string_patches);
stp->string_patches = next;
}
- while (stp->genop_blocks) {
- GenOpBlock* next = stp->genop_blocks->next;
- erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->genop_blocks);
- stp->genop_blocks = next;
- }
if (stp->line_item != 0) {
- erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_item);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, stp->line_item);
+ stp->line_item = 0;
}
if (stp->line_instr != 0) {
- erts_free(ERTS_ALC_T_LOADER_TMP, stp->line_instr);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, stp->line_instr);
+ stp->line_instr = 0;
}
if (stp->func_line != 0) {
- erts_free(ERTS_ALC_T_LOADER_TMP, stp->func_line);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, stp->func_line);
+ stp->func_line = 0;
}
if (stp->fname != 0) {
- erts_free(ERTS_ALC_T_LOADER_TMP, stp->fname);
+ erts_free(ERTS_ALC_T_PREPARED_CODE, stp->fname);
+ stp->fname = 0;
}
- erts_free(ERTS_ALC_T_LOADER_TMP, stp);
+ /*
+ * The following data items should have been freed earlier.
+ */
+
+ ASSERT(stp->genop_blocks == 0);
}
static Eterm
@@ -954,7 +970,6 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
{
Module* modp;
Eterm retval;
- int i;
if ((retval = beam_make_current_old(c_p, c_p_locks, module)) != NIL) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
@@ -971,30 +986,15 @@ insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
erts_total_code_size += size;
modp = erts_put_module(module);
- modp->code = code;
- modp->code_length = size;
- modp->catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
+ modp->curr.code = code;
+ modp->curr.code_length = size;
+ modp->curr.catches = BEAM_CATCHES_NIL; /* Will be filled in later. */
/*
- * Update address table (used for finding a function from a PC value).
+ * Update ranges (used for finding a function from a PC value).
*/
- if (num_loaded_modules == allocated_modules) {
- allocated_modules *= 2;
- modules = (Range *) erts_realloc(ERTS_ALC_T_MODULE_REFS,
- (void *) modules,
- allocated_modules * sizeof(Range));
- }
- for (i = num_loaded_modules; i > 0; i--) {
- if (code > modules[i-1].start) {
- break;
- }
- modules[i] = modules[i-1];
- }
- modules[i].start = code;
- modules[i].end = (BeamInstr *) (((byte *)code) + size);
- num_loaded_modules++;
- mid_module = &modules[num_loaded_modules/2];
+ erts_update_ranges(code, size);
return NIL;
}
@@ -1217,9 +1217,8 @@ load_atom_table(LoaderState* stp)
GetInt(stp, 4, stp->num_atoms);
stp->num_atoms++;
- stp->atom = erts_alloc(ERTS_ALC_T_LOADER_TMP,
- erts_next_heap_size((stp->num_atoms*sizeof(Eterm)),
- 0));
+ stp->atom = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ stp->num_atoms*sizeof(Eterm));
/*
* Read all atoms.
@@ -1263,10 +1262,8 @@ load_import_table(LoaderState* stp)
int i;
GetInt(stp, 4, stp->num_imports);
- stp->import = erts_alloc(ERTS_ALC_T_LOADER_TMP,
- erts_next_heap_size((stp->num_imports *
- sizeof(ImportEntry)),
- 0));
+ stp->import = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ stp->num_imports * sizeof(ImportEntry));
for (i = 0; i < stp->num_imports; i++) {
int n;
Eterm mod;
@@ -1296,7 +1293,7 @@ load_import_table(LoaderState* stp)
* If the export entry refers to a BIF, get the pointer to
* the BIF function.
*/
- if ((e = erts_find_export_entry(mod, func, arity)) != NULL) {
+ if ((e = erts_active_export_entry(mod, func, arity)) != NULL) {
if (e->code[3] == (BeamInstr) em_apply_bif) {
stp->import[i].bf = (BifFunction) e->code[4];
if (func == am_load_nif && mod == am_erlang && arity == 2) {
@@ -1315,16 +1312,8 @@ load_import_table(LoaderState* stp)
static int
read_export_table(LoaderState* stp)
{
- static struct {
- Eterm mod;
- Eterm func;
- int arity;
- } allow_redef[] = {
- /* The BIFs that are allowed to be redefined by Erlang code */
- {am_erlang,am_apply,2},
- {am_erlang,am_apply,3},
- };
int i;
+ BeamInstr* address;
GetInt(stp, 4, stp->num_exps);
if (stp->num_exps > stp->num_functions) {
@@ -1332,7 +1321,7 @@ read_export_table(LoaderState* stp)
stp->num_exps, stp->num_functions);
}
stp->export
- = (ExportEntry *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ = (ExportEntry *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
(stp->num_exps * sizeof(ExportEntry)));
for (i = 0; i < stp->num_exps; i++) {
@@ -1340,7 +1329,6 @@ read_export_table(LoaderState* stp)
Uint value;
Eterm func;
Uint arity;
- Export* e;
GetInt(stp, 4, n);
GetAtom(stp, n, func);
@@ -1358,29 +1346,34 @@ read_export_table(LoaderState* stp)
if (value == 0) {
LoadError2(stp, "export table entry %d: label %d not resolved", i, n);
}
- stp->export[i].address = stp->code + value;
+ stp->export[i].address = address = stp->code + value;
/*
- * Check that we are not redefining a BIF (except the ones allowed to
- * redefine).
+ * Find out if there is a BIF with the same name.
*/
- if ((e = erts_find_export_entry(stp->module, func, arity)) != NULL) {
- if (e->code[3] == (BeamInstr) em_apply_bif) {
- int j;
- for (j = 0; j < sizeof(allow_redef)/sizeof(allow_redef[0]); j++) {
- if (stp->module == allow_redef[j].mod &&
- func == allow_redef[j].func &&
- arity == allow_redef[j].arity) {
- break;
- }
- }
- if (j == sizeof(allow_redef)/sizeof(allow_redef[0])) {
- LoadError2(stp, "exported function %T/%d redefines BIF",
- func, arity);
- }
- }
+ if (!is_bif(stp->module, func, arity)) {
+ continue;
+ }
+
+ /*
+ * This is a stub for a BIF.
+ *
+ * It should not be exported, and the information in its
+ * func_info instruction should be invalidated so that it
+ * can be filtered out by module_info(functions) and by
+ * any other functions that walk through all local functions.
+ */
+
+ if (stp->labels[n].patches) {
+ LoadError3(stp, "there are local calls to the stub for "
+ "the BIF %T:%T/%d",
+ stp->module, func, arity);
}
+ stp->export[i].address = NULL;
+ address[-1] = 0;
+ address[-2] = NIL;
+ address[-3] = NIL;
}
return 1;
@@ -1388,15 +1381,39 @@ read_export_table(LoaderState* stp)
return 0;
}
+
+static int
+is_bif(Eterm mod, Eterm func, unsigned arity)
+{
+ Export* e = erts_active_export_entry(mod, func, arity);
+ if (e == NULL) {
+ return 0;
+ }
+ if (e->code[3] != (BeamInstr) em_apply_bif) {
+ return 0;
+ }
+ if (mod == am_erlang && func == am_apply && arity == 3) {
+ /*
+ * erlang:apply/3 is a special case -- it is implemented
+ * as an instruction and it is OK to redefine it.
+ */
+ return 0;
+ }
+ return 1;
+}
+
static int
read_lambda_table(LoaderState* stp)
{
int i;
GetInt(stp, 4, stp->num_lambdas);
- stp->lambdas_allocated = stp->num_lambdas;
- stp->lambdas = (Lambda *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
- stp->num_lambdas * sizeof(Lambda));
+ if (stp->num_lambdas > stp->lambdas_allocated) {
+ ASSERT(stp->lambdas == stp->def_lambdas);
+ stp->lambdas_allocated = stp->num_lambdas;
+ stp->lambdas = (Lambda *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ stp->num_lambdas * sizeof(Lambda));
+ }
for (i = 0; i < stp->num_lambdas; i++) {
Uint n;
Uint32 Index;
@@ -1446,7 +1463,7 @@ read_literal_table(LoaderState* stp)
stp->file_p = uncompressed;
stp->file_left = (unsigned) uncompressed_sz;
GetInt(stp, 4, stp->num_literals);
- stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_literals * sizeof(Literal));
stp->allocated_literals = stp->num_literals;
@@ -1466,7 +1483,7 @@ read_literal_table(LoaderState* stp)
if ((heap_size = erts_decode_ext_size(p, sz)) < 0) {
LoadError1(stp, "literal %d: bad external format", i);
}
- hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ hp = stp->literals[i].heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
heap_size*sizeof(Eterm));
stp->literals[i].off_heap.first = 0;
stp->literals[i].off_heap.overhead = 0;
@@ -1538,7 +1555,7 @@ read_line_table(LoaderState* stp)
*/
num_line_items++;
- lp = (BeamInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ lp = (BeamInstr *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
num_line_items * sizeof(BeamInstr));
stp->line_item = lp;
stp->num_line_items = num_line_items;
@@ -1594,7 +1611,7 @@ read_line_table(LoaderState* stp)
*/
if (stp->num_fnames != 0) {
- stp->fname = (Eterm *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->fname = (Eterm *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_fnames *
sizeof(Eterm));
for (i = 0; i < stp->num_fnames; i++) {
@@ -1610,11 +1627,11 @@ read_line_table(LoaderState* stp)
/*
* Allocate the arrays to be filled while code is being loaded.
*/
- stp->line_instr = (LineInstr *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->line_instr = (LineInstr *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_line_instrs *
sizeof(LineInstr));
stp->current_li = 0;
- stp->func_line = (int *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->func_line = (int *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_functions *
sizeof(int));
@@ -1677,7 +1694,7 @@ read_code_header(LoaderState* stp)
* Initialize label table.
*/
- stp->labels = (Label *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->labels = (Label *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_labels * sizeof(Label));
for (i = 0; i < stp->num_labels; i++) {
stp->labels[i].value = 0;
@@ -1703,9 +1720,9 @@ read_code_header(LoaderState* stp)
#define CodeNeed(w) do { \
ASSERT(ci <= code_buffer_size); \
if (code_buffer_size < ci+(w)) { \
- code_buffer_size = erts_next_heap_size(ci+(w), 0); \
- stp->code = code \
- = (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, \
+ code_buffer_size = 2*ci+(w); \
+ stp->code = code = \
+ (BeamInstr *) erts_realloc(ERTS_ALC_T_CODE, \
(void *) code, \
code_buffer_size * sizeof(BeamInstr)); \
} \
@@ -1731,6 +1748,7 @@ load_code(LoaderState* stp)
GenOp* last_op = NULL;
GenOp** last_op_next = NULL;
int arity;
+ int retval = 1;
/*
* The size of the loaded func_info instruction is needed
@@ -2457,7 +2475,11 @@ load_code(LoaderState* stp)
case op_int_code_end:
stp->code_buffer_size = code_buffer_size;
stp->ci = ci;
- return 1;
+ stp->function = THE_NON_VALUE;
+ stp->genop = NULL;
+ stp->specific_op = -1;
+ retval = 1;
+ goto cleanup;
}
/*
@@ -2471,9 +2493,20 @@ load_code(LoaderState* stp)
}
}
-
load_error:
- return 0;
+ retval = 0;
+
+ cleanup:
+ /*
+ * Clean up everything that is not needed any longer.
+ */
+
+ while (stp->genop_blocks) {
+ GenOpBlock* next = stp->genop_blocks->next;
+ erts_free(ERTS_ALC_T_LOADER_TMP, (void *) stp->genop_blocks);
+ stp->genop_blocks = next;
+ }
+ return retval;
}
@@ -4265,24 +4298,31 @@ final_touch(LoaderState* stp)
index = next;
}
modp = erts_put_module(stp->module);
- modp->catches = catches;
+ modp->curr.catches = catches;
/*
* Export functions.
*/
for (i = 0; i < stp->num_exps; i++) {
- Export* ep = erts_export_put(stp->module, stp->export[i].function,
- stp->export[i].arity);
+ Export* ep;
+ BeamInstr* address = stp->export[i].address;
+
+ if (address == NULL) {
+ /* Skip stub for a BIF */
+ continue;
+ }
+ ep = erts_export_put(stp->module, stp->export[i].function,
+ stp->export[i].arity);
if (!on_load) {
- ep->address = stp->export[i].address;
+ ep->addressv[erts_staging_code_ix()] = address;
} else {
/*
* Don't make any of the exported functions
* callable yet.
*/
- ep->address = ep->code+3;
- ep->code[4] = (BeamInstr) stp->export[i].address;
+ ep->addressv[erts_staging_code_ix()] = ep->code+3;
+ ep->code[4] = (BeamInstr) address;
}
}
@@ -4926,7 +4966,7 @@ new_label(LoaderState* stp)
int num = stp->num_labels;
stp->num_labels++;
- stp->labels = (Label *) erts_realloc(ERTS_ALC_T_LOADER_TMP,
+ stp->labels = (Label *) erts_realloc(ERTS_ALC_T_PREPARED_CODE,
(void *) stp->labels,
stp->num_labels * sizeof(Label));
stp->labels[num].value = 0;
@@ -4937,7 +4977,8 @@ new_label(LoaderState* stp)
static void
new_literal_patch(LoaderState* stp, int pos)
{
- LiteralPatch* p = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(LiteralPatch));
+ LiteralPatch* p = erts_alloc(ERTS_ALC_T_PREPARED_CODE,
+ sizeof(LiteralPatch));
p->pos = pos;
p->next = stp->literal_patches;
stp->literal_patches = p;
@@ -4946,7 +4987,7 @@ new_literal_patch(LoaderState* stp, int pos)
static void
new_string_patch(LoaderState* stp, int pos)
{
- StringPatch* p = erts_alloc(ERTS_ALC_T_LOADER_TMP, sizeof(StringPatch));
+ StringPatch* p = erts_alloc(ERTS_ALC_T_PREPARED_CODE, sizeof(StringPatch));
p->pos = pos;
p->next = stp->string_patches;
stp->string_patches = p;
@@ -4964,14 +5005,14 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
ASSERT(stp->num_literals == 0);
stp->allocated_literals = 8;
need = stp->allocated_literals * sizeof(Literal);
- stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ stp->literals = (Literal *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
need);
} else if (stp->allocated_literals <= stp->num_literals) {
Uint need;
stp->allocated_literals *= 2;
need = stp->allocated_literals * sizeof(Literal);
- stp->literals = (Literal *) erts_realloc(ERTS_ALC_T_LOADER_TMP,
+ stp->literals = (Literal *) erts_realloc(ERTS_ALC_T_PREPARED_CODE,
(void *) stp->literals,
need);
}
@@ -4980,7 +5021,7 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size)
lit = stp->literals + stp->num_literals;
lit->offset = 0;
lit->heap_size = heap_size;
- lit->heap = erts_alloc(ERTS_ALC_T_LOADER_TMP, heap_size*sizeof(Eterm));
+ lit->heap = erts_alloc(ERTS_ALC_T_PREPARED_CODE, heap_size*sizeof(Eterm));
lit->term = make_boxed(lit->heap);
lit->off_heap.first = 0;
lit->off_heap.overhead = 0;
@@ -4999,7 +5040,7 @@ erts_module_info_0(Process* p, Eterm module)
return THE_NON_VALUE;
}
- if (erts_get_module(module) == NULL) {
+ if (erts_get_module(module, erts_active_code_ix()) == NULL) {
return THE_NON_VALUE;
}
@@ -5054,32 +5095,43 @@ functions_in_module(Process* p, /* Process whose heap to use. */
BeamInstr* code;
int i;
Uint num_functions;
+ Uint need;
Eterm* hp;
+ Eterm* hp_end;
Eterm result = NIL;
if (is_not_atom(mod)) {
return THE_NON_VALUE;
}
- modp = erts_get_module(mod);
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp == NULL) {
return THE_NON_VALUE;
}
- code = modp->code;
+ code = modp->curr.code;
num_functions = code[MI_NUM_FUNCTIONS];
- hp = HAlloc(p, 5*num_functions);
+ need = 5*num_functions;
+ hp = HAlloc(p, need);
+ hp_end = hp + need;
for (i = num_functions-1; i >= 0 ; i--) {
BeamInstr* func_info = (BeamInstr *) code[MI_FUNCTIONS+i];
Eterm name = (Eterm) func_info[3];
int arity = (int) func_info[4];
Eterm tuple;
- ASSERT(is_atom(name));
- tuple = TUPLE2(hp, name, make_small(arity));
- hp += 3;
- result = CONS(hp, tuple, result);
- hp += 2;
+ /*
+ * If the function name is [], this entry is a stub for
+ * a BIF that should be ignored.
+ */
+ ASSERT(is_atom(name) || is_nil(name));
+ if (is_atom(name)) {
+ tuple = TUPLE2(hp, name, make_small(arity));
+ hp += 3;
+ result = CONS(hp, tuple, result);
+ hp += 2;
+ }
}
+ HRelease(p, hp_end, hp);
return result;
}
@@ -5106,12 +5158,12 @@ native_addresses(Process* p, Eterm mod)
return THE_NON_VALUE;
}
- modp = erts_get_module(mod);
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp == NULL) {
return THE_NON_VALUE;
}
- code = modp->code;
+ code = modp->curr.code;
num_functions = code[MI_NUM_FUNCTIONS];
need = (6+BIG_UINT_HEAP_SIZE)*num_functions;
hp = HAlloc(p, need);
@@ -5122,9 +5174,11 @@ native_addresses(Process* p, Eterm mod)
int arity = (int) func_info[4];
Eterm tuple;
- ASSERT(is_atom(name));
+ ASSERT(is_atom(name) || is_nil(name)); /* [] if BIF stub */
if (func_info[1] != 0) {
- Eterm addr = erts_bld_uint(&hp, NULL, func_info[1]);
+ Eterm addr;
+ ASSERT(is_atom(name));
+ addr = erts_bld_uint(&hp, NULL, func_info[1]);
tuple = erts_bld_tuple(&hp, NULL, 3, name, make_small(arity), addr);
result = erts_bld_cons(&hp, NULL, tuple, result);
}
@@ -5149,18 +5203,20 @@ exported_from_module(Process* p, /* Process whose heap to use. */
Eterm* hp = NULL;
Eterm* hend = NULL;
Eterm result = NIL;
+ ErtsCodeIndex code_ix;
if (is_not_atom(mod)) {
return THE_NON_VALUE;
}
- for (i = 0; i < export_list_size(); i++) {
- Export* ep = export_list(i);
+ code_ix = erts_active_code_ix();
+ for (i = 0; i < export_list_size(code_ix); i++) {
+ Export* ep = export_list(i,code_ix);
if (ep->code[0] == mod) {
Eterm tuple;
- if (ep->address == ep->code+3 &&
+ if (ep->addressv[code_ix] == ep->code+3 &&
ep->code[3] == (BeamInstr) em_call_error_handler) {
/* There is a call to the function, but it does not exist. */
continue;
@@ -5204,11 +5260,11 @@ attributes_for_module(Process* p, /* Process whose heap to use. */
return THE_NON_VALUE;
}
- modp = erts_get_module(mod);
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp == NULL) {
return THE_NON_VALUE;
}
- code = modp->code;
+ code = modp->curr.code;
ext = (byte *) code[MI_ATTR_PTR];
if (ext != NULL) {
hp = HAlloc(p, code[MI_ATTR_SIZE_ON_HEAP]);
@@ -5244,11 +5300,11 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
return THE_NON_VALUE;
}
- modp = erts_get_module(mod);
+ modp = erts_get_module(mod, erts_active_code_ix());
if (modp == NULL) {
return THE_NON_VALUE;
}
- code = modp->code;
+ code = modp->curr.code;
ext = (byte *) code[MI_COMPILE_PTR];
if (ext != NULL) {
hp = HAlloc(p, code[MI_COMPILE_SIZE_ON_HEAP]);
@@ -5263,113 +5319,6 @@ compilation_info_for_module(Process* p, /* Process whose heap to use. */
}
/*
- * Find a function from the given pc and fill information in
- * the FunctionInfo struct. If the full_info is non-zero, fill
- * in all available information (including location in the
- * source code). If no function is found, the 'current' field
- * will be set to NULL.
- */
-
-void
-erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
-{
- Range* low = modules;
- Range* high = low + num_loaded_modules;
- Range* mid = mid_module;
-
- fi->current = NULL;
- fi->needed = 5;
- fi->loc = LINE_INVALID_LOCATION;
- while (low < high) {
- if (pc < mid->start) {
- high = mid;
- } else if (pc > mid->end) {
- low = mid + 1;
- } else {
- BeamInstr** low1 = (BeamInstr **) (mid->start + MI_FUNCTIONS);
- BeamInstr** high1 = low1 + mid->start[MI_NUM_FUNCTIONS];
- BeamInstr** mid1;
-
- while (low1 < high1) {
- mid1 = low1 + (high1-low1) / 2;
- if (pc < mid1[0]) {
- high1 = mid1;
- } else if (pc < mid1[1]) {
- mid_module = mid;
- fi->current = mid1[0]+2;
- if (full_info) {
- BeamInstr** fp = (BeamInstr **) (mid->start +
- MI_FUNCTIONS);
- int idx = mid1 - fp;
- lookup_loc(fi, pc, mid->start, idx);
- }
- return;
- } else {
- low1 = mid1 + 1;
- }
- }
- return;
- }
- mid = low + (high-low) / 2;
- }
-}
-
-static void
-lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx)
-{
- Eterm* line = (Eterm *) modp[MI_LINE_TABLE];
- Eterm* low;
- Eterm* high;
- Eterm* mid;
- Eterm pc;
-
- if (line == 0) {
- return;
- }
-
- pc = (Eterm) (BeamInstr) orig_pc;
- fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR];
- low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx];
- high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1];
- while (high > low) {
- mid = low + (high-low) / 2;
- if (pc < mid[0]) {
- high = mid;
- } else if (pc < mid[1]) {
- int file;
- int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB];
-
- if (line[MI_LINE_LOC_SIZE] == 2) {
- Uint16* loc_table =
- (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB];
- fi->loc = loc_table[index];
- } else {
- Uint32* loc_table =
- (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB];
- ASSERT(line[MI_LINE_LOC_SIZE] == 4);
- fi->loc = loc_table[index];
- }
- if (fi->loc == LINE_INVALID_LOCATION) {
- return;
- }
- fi->needed += 3+2+3+2;
- file = LOC_FILE(fi->loc);
- if (file == 0) {
- /* Special case: Module name with ".erl" appended */
- Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
- fi->needed += 2*(mod_atom->len+4);
- } else {
- Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
- fi->needed += 2*ap->len;
- }
- return;
- } else {
- low = mid + 1;
- }
- }
-}
-
-/*
* Build a single {M,F,A,Loction} item to be part of
* a stack trace.
*/
@@ -5449,6 +5398,7 @@ code_get_chunk_2(BIF_ALIST_2)
Process* p = BIF_P;
Eterm Bin = BIF_ARG_1;
Eterm Chunk = BIF_ARG_2;
+ Binary* magic = 0;
LoaderState* stp;
Uint chunk = 0;
ErlSubBin* sb;
@@ -5461,12 +5411,13 @@ code_get_chunk_2(BIF_ALIST_2)
Eterm real_bin;
byte* temp_alloc = NULL;
- stp = erts_alloc_loader_state();
+ magic = erts_alloc_loader_state();
+ stp = ERTS_MAGIC_BIN_DATA(magic);
if ((start = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
error:
erts_free_aligned_binary_bytes(temp_alloc);
- if (stp) {
- free_state(stp);
+ if (magic) {
+ free_loader_state(magic);
}
BIF_ERROR(p, BADARG);
}
@@ -5511,7 +5462,7 @@ code_get_chunk_2(BIF_ALIST_2)
done:
erts_free_aligned_binary_bytes(temp_alloc);
- free_state(stp);
+ free_loader_state(magic);
return res;
}
@@ -5524,14 +5475,16 @@ code_module_md5_1(BIF_ALIST_1)
{
Process* p = BIF_P;
Eterm Bin = BIF_ARG_1;
+ Binary* magic;
LoaderState* stp;
byte* bytes;
byte* temp_alloc = NULL;
Eterm res;
- stp = erts_alloc_loader_state();
+ magic = erts_alloc_loader_state();
+ stp = ERTS_MAGIC_BIN_DATA(magic);
if ((bytes = erts_get_aligned_binary_bytes(Bin, &temp_alloc)) == NULL) {
- free_state(stp);
+ free_loader_state(magic);
BIF_ERROR(p, BADARG);
}
stp->module = THE_NON_VALUE; /* Suppress diagnostiscs */
@@ -5545,7 +5498,7 @@ code_module_md5_1(BIF_ALIST_1)
done:
erts_free_aligned_binary_bytes(temp_alloc);
- free_state(stp);
+ free_loader_state(magic);
return res;
}
@@ -5574,7 +5527,8 @@ stub_copy_info(LoaderState* stp,
int chunk, /* Chunk: ATTR_CHUNK or COMPILE_CHUNK */
byte* info, /* Where to store info. */
BeamInstr* ptr_word, /* Where to store pointer into info. */
- BeamInstr* size_word) /* Where to store size of info. */
+ BeamInstr* size_word, /* Where to store size into info. */
+ BeamInstr* size_on_heap_word) /* Where to store size on heap. */
{
Sint decoded_size;
Uint size = stp->chunks[chunk].size;
@@ -5585,7 +5539,8 @@ stub_copy_info(LoaderState* stp,
if (decoded_size < 0) {
return 0;
}
- *size_word = decoded_size;
+ *size_word = (BeamInstr) size;
+ *size_on_heap_word = decoded_size;
}
return info + size;
}
@@ -5601,7 +5556,7 @@ stub_read_export_table(LoaderState* stp)
stp->num_exps, stp->num_functions);
}
stp->export
- = (ExportEntry *) erts_alloc(ERTS_ALC_T_LOADER_TMP,
+ = (ExportEntry *) erts_alloc(ERTS_ALC_T_PREPARED_CODE,
stp->num_exps * sizeof(ExportEntry));
for (i = 0; i < stp->num_exps; i++) {
@@ -5627,20 +5582,29 @@ stub_final_touch(LoaderState* stp, BeamInstr* fp)
{
int i;
int n = stp->num_exps;
+ Eterm mod = fp[2];
Eterm function = fp[3];
int arity = fp[4];
#ifdef HIPE
Lambda* lp;
#endif
+ if (is_bif(mod, function, arity)) {
+ fp[1] = 0;
+ fp[2] = 0;
+ fp[3] = 0;
+ fp[4] = 0;
+ return;
+ }
+
/*
* Test if the function should be exported.
*/
for (i = 0; i < n; i++) {
if (stp->export[i].function == function && stp->export[i].arity == arity) {
- Export* ep = erts_export_put(fp[2], function, arity);
- ep->address = fp+5;
+ Export* ep = erts_export_put(mod, function, arity);
+ ep->addressv[erts_staging_code_ix()] = fp+5;
return;
}
}
@@ -5819,6 +5783,7 @@ patch_funentries(Eterm Patchlist)
Eterm
erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
{
+ Binary* magic;
LoaderState* stp;
BeamInstr Funcs;
BeamInstr Patchlist;
@@ -5840,7 +5805,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Must initialize stp->lambdas here because the error handling code
* at label 'error' uses it.
*/
- stp = erts_alloc_loader_state();
+ magic = erts_alloc_loader_state();
+ stp = ERTS_MAGIC_BIN_DATA(magic);
if (is_not_atom(Mod)) {
goto error;
@@ -5920,7 +5886,6 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
code[MI_COMPILE_PTR] = 0;
code[MI_COMPILE_SIZE] = 0;
code[MI_COMPILE_SIZE_ON_HEAP] = 0;
- code[MI_NUM_BREAKPOINTS] = 0;
code[MI_LITERALS_START] = 0;
code[MI_LITERALS_END] = 0;
code[MI_LITERALS_OFF_HEAP] = 0;
@@ -5997,12 +5962,16 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
info = (byte *) fp;
info = stub_copy_info(stp, ATTR_CHUNK, info,
- code+MI_ATTR_PTR, code+MI_ATTR_SIZE_ON_HEAP);
+ code+MI_ATTR_PTR,
+ code+MI_ATTR_SIZE,
+ code+MI_ATTR_SIZE_ON_HEAP);
if (info == NULL) {
goto error;
}
info = stub_copy_info(stp, COMPILE_CHUNK, info,
- code+MI_COMPILE_PTR, code+MI_COMPILE_SIZE_ON_HEAP);
+ code+MI_COMPILE_PTR,
+ code+MI_COMPILE_SIZE,
+ code+MI_COMPILE_SIZE_ON_HEAP);
if (info == NULL) {
goto error;
}
@@ -6028,13 +5997,13 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
if (patch_funentries(Patchlist)) {
erts_free_aligned_binary_bytes(temp_alloc);
- free_state(stp);
+ free_loader_state(magic);
return Mod;
}
error:
erts_free_aligned_binary_bytes(temp_alloc);
- free_state(stp);
+ free_loader_state(magic);
BIF_ERROR(p, BADARG);
}
@@ -6051,3 +6020,4 @@ static int safe_mul(UWord a, UWord b, UWord* resp)
return (res / b) == a;
}
}
+
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index 997ba197db..1048f258a5 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -49,11 +49,6 @@ extern void** beam_ops;
extern BeamInstr beam_debug_apply[];
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_apply_bif;
-extern BeamInstr* em_call_traced_function;
-typedef struct {
- BeamInstr* start; /* Pointer to start of module. */
- BeamInstr* end; /* Points one word beyond last function in module. */
-} Range;
/*
* The following variables keep a sorted list of address ranges for
@@ -61,11 +56,6 @@ typedef struct {
* instruction pointer.
*/
-extern Range* modules;
-extern int num_loaded_modules;
-extern int allocated_modules;
-extern Range* mid_module;
-
/* Total code size in bytes */
extern Uint erts_total_code_size;
/*
@@ -94,27 +84,22 @@ extern Uint erts_total_code_size;
#define MI_COMPILE_SIZE_ON_HEAP 6
/*
- * Number of breakpoints in module is stored in this word
- */
-#define MI_NUM_BREAKPOINTS 7
-
-/*
* Literal area (constant pool).
*/
-#define MI_LITERALS_START 8
-#define MI_LITERALS_END 9
-#define MI_LITERALS_OFF_HEAP 10
+#define MI_LITERALS_START 7
+#define MI_LITERALS_END 8
+#define MI_LITERALS_OFF_HEAP 9
/*
* Pointer to the on_load function (or NULL if none).
*/
-#define MI_ON_LOAD_FUNCTION_PTR 11
+#define MI_ON_LOAD_FUNCTION_PTR 10
/*
* Pointer to the line table (or NULL if none).
*/
-#define MI_LINE_TABLE 12
+#define MI_LINE_TABLE 11
/*
* Start of function pointer table. This table contains pointers to
@@ -125,5 +110,27 @@ extern Uint erts_total_code_size;
* this table.
*/
-#define MI_FUNCTIONS 13
+#define MI_FUNCTIONS 12
+
+/*
+ * Layout of the line table.
+ */
+
+#define MI_LINE_FNAME_PTR 0
+#define MI_LINE_LOC_TAB 1
+#define MI_LINE_LOC_SIZE 2
+#define MI_LINE_FUNC_TAB 3
+
+#define LINE_INVALID_LOCATION (0)
+
+/*
+ * Macros for manipulating locations.
+ */
+
+#define IS_VALID_LOCATION(File, Line) \
+ ((unsigned) (File) < 255 && (unsigned) (Line) < ((1 << 24) - 1))
+#define MAKE_LOCATION(File, Line) (((File) << 24) | (Line))
+#define LOC_FILE(Loc) ((Loc) >> 24)
+#define LOC_LINE(Loc) ((Loc) & ((1 << 24)-1))
+
#endif /* _BEAM_LOAD_H */
diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c
new file mode 100644
index 0000000000..0f2d5d0c2a
--- /dev/null
+++ b/erts/emulator/beam/beam_ranges.c
@@ -0,0 +1,349 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "sys.h"
+#include "erl_vm.h"
+#include "global.h"
+#include "beam_load.h"
+
+typedef struct {
+ BeamInstr* start; /* Pointer to start of module. */
+ erts_smp_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */
+} Range;
+
+/* Range 'end' needs to be atomic as we purge module
+ by setting end=start in active code_ix */
+#define RANGE_END(R) ((BeamInstr*)erts_smp_atomic_read_nob(&(R)->end))
+
+static Range* find_range(BeamInstr* pc);
+static void lookup_loc(FunctionInfo* fi, BeamInstr* pc,
+ BeamInstr* modp, int idx);
+
+/*
+ * The following variables keep a sorted list of address ranges for
+ * each module. It allows us to quickly find a function given an
+ * instruction pointer.
+ */
+struct ranges {
+ Range* modules; /* Sorted lists of module addresses. */
+ Sint n; /* Number of range entries. */
+ Sint allocated; /* Number of allocated entries. */
+ erts_smp_atomic_t mid; /* Cached search start point */
+};
+static struct ranges r[ERTS_NUM_CODE_IX];
+static erts_smp_atomic_t mem_used;
+
+#ifdef HARD_DEBUG
+static void check_consistency(struct ranges* p)
+{
+ int i;
+
+ ASSERT(p->n <= p->allocated);
+ ASSERT((Uint)(p->mid - p->modules) < p->n ||
+ (p->mid == p->modules && p->n == 0));
+ for (i = 0; i < p->n; i++) {
+ ASSERT(p->modules[i].start <= RANGE_END(&p->modules[i]));
+ ASSERT(!i || RANGE_END(&p->modules[i-1]) < p->modules[i].start);
+ }
+}
+# define CHECK(r) check_consistency(r)
+#else
+# define CHECK(r)
+#endif /* HARD_DEBUG */
+
+
+void
+erts_init_ranges(void)
+{
+ Sint i;
+
+ erts_smp_atomic_init_nob(&mem_used, 0);
+ for (i = 0; i < ERTS_NUM_CODE_IX; i++) {
+ r[i].modules = 0;
+ r[i].n = 0;
+ r[i].allocated = 0;
+ erts_smp_atomic_init_nob(&r[i].mid, 0);
+ }
+}
+
+void
+erts_start_staging_ranges(void)
+{
+ ErtsCodeIndex dst = erts_staging_code_ix();
+
+ if (r[dst].modules) {
+ erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated);
+ erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules);
+ r[dst].modules = NULL;
+ }
+}
+
+void
+erts_end_staging_ranges(int commit)
+{
+ ErtsCodeIndex dst = erts_staging_code_ix();
+
+ if (commit && r[dst].modules == NULL) {
+ Sint i;
+ Sint n;
+
+ /* No modules added, just clone src and remove purged code. */
+ ErtsCodeIndex src = erts_active_code_ix();
+
+ erts_smp_atomic_add_nob(&mem_used, r[src].n);
+ r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS,
+ r[src].n * sizeof(Range));
+ r[dst].allocated = r[src].n;
+ n = 0;
+ for (i = 0; i < r[src].n; i++) {
+ Range* rp = r[src].modules+i;
+ if (rp->start < RANGE_END(rp)) {
+ /* Only insert a module that has not been purged. */
+ r[dst].modules[n] = *rp;
+ n++;
+ }
+ }
+ r[dst].n = n;
+ erts_smp_atomic_set_nob(&r[dst].mid,
+ (erts_aint_t) (r[dst].modules + n / 2));
+ }
+}
+
+void
+erts_update_ranges(BeamInstr* code, Uint size)
+{
+ ErtsCodeIndex dst = erts_staging_code_ix();
+ ErtsCodeIndex src = erts_active_code_ix();
+ Sint i;
+ Sint n;
+ Sint need;
+
+ if (src == dst) {
+ ASSERT(!erts_initialized);
+
+ /*
+ * During start-up of system, the indices are the same.
+ * Handle this by faking a source area.
+ */
+ src = (src+1) % ERTS_NUM_CODE_IX;
+ if (r[src].modules) {
+ erts_smp_atomic_add_nob(&mem_used, -r[src].allocated);
+ erts_free(ERTS_ALC_T_MODULE_REFS, r[src].modules);
+ }
+ r[src] = r[dst];
+ r[dst].modules = 0;
+ }
+
+ CHECK(&r[src]);
+
+ ASSERT(r[dst].modules == NULL);
+ need = r[dst].allocated = r[src].n + 1;
+ erts_smp_atomic_add_nob(&mem_used, need);
+ r[dst].modules = (Range *) erts_alloc(ERTS_ALC_T_MODULE_REFS,
+ need * sizeof(Range));
+ n = 0;
+ for (i = 0; i < r[src].n; i++) {
+ Range* rp = r[src].modules+i;
+ if (code < rp->start) {
+ r[dst].modules[n].start = code;
+ erts_smp_atomic_init_nob(&r[dst].modules[n].end,
+ (erts_aint_t)(((byte *)code) + size));
+ ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code);
+ n++;
+ break;
+ }
+ if (rp->start < RANGE_END(rp)) {
+ /* Only insert a module that has not been purged. */
+ r[dst].modules[n].start = rp->start;
+ erts_smp_atomic_init_nob(&r[dst].modules[n].end,
+ (erts_aint_t)(RANGE_END(rp)));
+ ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start);
+ n++;
+ }
+ }
+
+ while (i < r[src].n) {
+ Range* rp = r[src].modules+i;
+ if (rp->start < RANGE_END(rp)) {
+ /* Only insert a module that has not been purged. */
+ r[dst].modules[n].start = rp->start;
+ erts_smp_atomic_init_nob(&r[dst].modules[n].end,
+ (erts_aint_t)(RANGE_END(rp)));
+ ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < rp->start);
+ n++;
+ }
+ i++;
+ }
+
+ if (n == 0 || code > r[dst].modules[n-1].start) {
+ r[dst].modules[n].start = code;
+ erts_smp_atomic_init_nob(&r[dst].modules[n].end,
+ (erts_aint_t)(((byte *)code) + size));
+ ASSERT(!n || RANGE_END(&r[dst].modules[n-1]) < code);
+ n++;
+ }
+
+ ASSERT(n <= r[src].n+1);
+ r[dst].n = n;
+ erts_smp_atomic_set_nob(&r[dst].mid,
+ (erts_aint_t) (r[dst].modules + n / 2));
+
+ CHECK(&r[dst]);
+ CHECK(&r[src]);
+}
+
+void
+erts_remove_from_ranges(BeamInstr* code)
+{
+ Range* rp = find_range(code);
+ erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start);
+}
+
+UWord
+erts_ranges_sz(void)
+{
+ return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range);
+}
+
+/*
+ * Find a function from the given pc and fill information in
+ * the FunctionInfo struct. If the full_info is non-zero, fill
+ * in all available information (including location in the
+ * source code). If no function is found, the 'current' field
+ * will be set to NULL.
+ */
+
+void
+erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info)
+{
+ BeamInstr** low;
+ BeamInstr** high;
+ BeamInstr** mid;
+ Range* rp;
+
+ fi->current = NULL;
+ fi->needed = 5;
+ fi->loc = LINE_INVALID_LOCATION;
+ rp = find_range(pc);
+ if (rp == 0) {
+ return;
+ }
+
+ low = (BeamInstr **) (rp->start + MI_FUNCTIONS);
+ high = low + rp->start[MI_NUM_FUNCTIONS];
+ while (low < high) {
+ mid = low + (high-low) / 2;
+ if (pc < mid[0]) {
+ high = mid;
+ } else if (pc < mid[1]) {
+ fi->current = mid[0]+2;
+ if (full_info) {
+ BeamInstr** fp = (BeamInstr **) (rp->start +
+ MI_FUNCTIONS);
+ int idx = mid - fp;
+ lookup_loc(fi, pc, rp->start, idx);
+ }
+ return;
+ } else {
+ low = mid + 1;
+ }
+ }
+}
+
+static Range*
+find_range(BeamInstr* pc)
+{
+ ErtsCodeIndex active = erts_active_code_ix();
+ Range* low = r[active].modules;
+ Range* high = low + r[active].n;
+ Range* mid = (Range *) erts_smp_atomic_read_nob(&r[active].mid);
+
+ CHECK(&r[active]);
+ while (low < high) {
+ if (pc < mid->start) {
+ high = mid;
+ } else if (pc > RANGE_END(mid)) {
+ low = mid + 1;
+ } else {
+ erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid);
+ return mid;
+ }
+ mid = low + (high-low) / 2;
+ }
+ return 0;
+}
+
+static void
+lookup_loc(FunctionInfo* fi, BeamInstr* orig_pc, BeamInstr* modp, int idx)
+{
+ Eterm* line = (Eterm *) modp[MI_LINE_TABLE];
+ Eterm* low;
+ Eterm* high;
+ Eterm* mid;
+ Eterm pc;
+
+ if (line == 0) {
+ return;
+ }
+
+ pc = (Eterm) (BeamInstr) orig_pc;
+ fi->fname_ptr = (Eterm *) (BeamInstr) line[MI_LINE_FNAME_PTR];
+ low = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx];
+ high = (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB+idx+1];
+ while (high > low) {
+ mid = low + (high-low) / 2;
+ if (pc < mid[0]) {
+ high = mid;
+ } else if (pc < mid[1]) {
+ int file;
+ int index = mid - (Eterm *) (BeamInstr) line[MI_LINE_FUNC_TAB];
+
+ if (line[MI_LINE_LOC_SIZE] == 2) {
+ Uint16* loc_table =
+ (Uint16 *) (BeamInstr) line[MI_LINE_LOC_TAB];
+ fi->loc = loc_table[index];
+ } else {
+ Uint32* loc_table =
+ (Uint32 *) (BeamInstr) line[MI_LINE_LOC_TAB];
+ ASSERT(line[MI_LINE_LOC_SIZE] == 4);
+ fi->loc = loc_table[index];
+ }
+ if (fi->loc == LINE_INVALID_LOCATION) {
+ return;
+ }
+ fi->needed += 3+2+3+2;
+ file = LOC_FILE(fi->loc);
+ if (file == 0) {
+ /* Special case: Module name with ".erl" appended */
+ Atom* mod_atom = atom_tab(atom_val(fi->current[0]));
+ fi->needed += 2*(mod_atom->len+4);
+ } else {
+ Atom* ap = atom_tab(atom_val((fi->fname_ptr)[file-1]));
+ fi->needed += 2*ap->len;
+ }
+ return;
+ } else {
+ low = mid + 1;
+ }
+ }
+}
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index fc00b42454..97c8114437 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -37,10 +37,13 @@
#include "erl_db_util.h"
#include "register.h"
#include "erl_thr_progress.h"
+#define ERTS_PTAB_WANT_BIF_IMPL__
+#include "erl_ptab.h"
static Export* flush_monitor_message_trap = NULL;
static Export* set_cpu_topology_trap = NULL;
static Export* await_proc_exit_trap = NULL;
+static Export* await_port_send_result_trap = NULL;
Export* erts_format_cpu_topology_trap = NULL;
static Export *await_sched_wall_time_mod_trap;
@@ -83,8 +86,10 @@ static int insert_internal_link(Process* p, Eterm rpid)
ASSERT(is_internal_pid(rpid));
#ifdef ERTS_SMP
- if (IS_TRACED(p) && (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)))
+ if (IS_TRACED(p)
+ && (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) {
rp_locks = ERTS_PROC_LOCKS_ALL;
+ }
erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
#endif
@@ -100,27 +105,27 @@ static int insert_internal_link(Process* p, Eterm rpid)
}
if (p != rp) {
- erts_add_link(&(p->nlinks), LINK_PID, rp->id);
- erts_add_link(&(rp->nlinks), LINK_PID, p->id);
+ erts_add_link(&ERTS_P_LINKS(p), LINK_PID, rp->common.id);
+ erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, p->common.id);
- ASSERT(is_nil(p->tracer_proc)
- || is_internal_pid(p->tracer_proc)
- || is_internal_port(p->tracer_proc));
+ ASSERT(is_nil(ERTS_TRACER_PROC(p))
+ || is_internal_pid(ERTS_TRACER_PROC(p))
+ || is_internal_port(ERTS_TRACER_PROC(p)));
if (IS_TRACED(p)) {
- if (p->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) {
- rp->trace_flags |= (p->trace_flags & TRACEE_FLAGS);
- rp->tracer_proc = p->tracer_proc; /* maybe steal */
+ if (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(rp) |= (ERTS_TRACE_FLAGS(p) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(rp) = ERTS_TRACER_PROC(p); /* maybe steal */
- if (p->trace_flags & F_TRACE_SOL1) { /* maybe override */
- rp->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- p->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ if (ERTS_TRACE_FLAGS(p) & F_TRACE_SOL1) { /* maybe override */
+ ERTS_TRACE_FLAGS(rp) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
}
}
}
}
if (IS_TRACED_FL(rp, F_TRACE_PROCS))
- trace_proc(p, rp, am_getting_linked, p->id);
+ trace_proc(p, rp, am_getting_linked, p->common.id);
if (p == rp)
erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN);
@@ -144,10 +149,6 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
/* check that the pid or port which is our argument is OK */
if (is_internal_pid(BIF_ARG_1)) {
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
if (insert_internal_link(BIF_P, BIF_ARG_1)) {
BIF_RET(am_true);
}
@@ -157,19 +158,37 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
}
if (is_internal_port(BIF_ARG_1)) {
- Port *pt = erts_id2port(BIF_ARG_1, BIF_P, ERTS_PROC_LOCK_MAIN);
- if (!pt) {
+ int send_link_signal = 0;
+ Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (!prt) {
goto res_no_proc;
}
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
- if (erts_add_link(&(BIF_P->nlinks), LINK_PID, BIF_ARG_1) >= 0)
- erts_add_link(&(pt->nlinks), LINK_PID, BIF_P->id);
+ if (erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1) >= 0)
+ send_link_signal = 1;
/* else: already linked */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- erts_smp_port_unlock(pt);
+
+ if (send_link_signal) {
+ Eterm ref;
+ Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
+
+ switch (erts_port_link(BIF_P, prt, BIF_P->common.id, refp)) {
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ goto res_no_proc;
+ case ERTS_PORT_OP_SCHEDULED:
+ if (refp) {
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
+ }
+ default:
+ break;
+ }
+ }
BIF_RET(am_true);
}
else if (is_external_port(BIF_ARG_1)
@@ -182,7 +201,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
/* We may earn time by checking first that we're not linked already */
- if (erts_lookup_link(BIF_P->nlinks, BIF_ARG_1) != NULL) {
+ if (erts_lookup_link(ERTS_P_LINKS(BIF_P), BIF_ARG_1) != NULL) {
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
}
@@ -209,10 +228,10 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
erts_smp_de_links_lock(dep);
- erts_add_link(&(BIF_P->nlinks), LINK_PID, BIF_ARG_1);
+ erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1);
lnk = erts_add_or_lookup_link(&(dep->nlinks),
LINK_PID,
- BIF_P->id);
+ BIF_P->common.id);
ASSERT(lnk != NULL);
erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1);
@@ -220,7 +239,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
erts_smp_de_runlock(dep);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- code = erts_dsig_send_link(&dsd, BIF_P->id, BIF_ARG_1);
+ code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
BIF_RET(am_true);
@@ -233,15 +252,17 @@ BIF_RETTYPE link_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
- res_no_proc:
- if (BIF_P->flags & F_TRAPEXIT) {
- ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
- erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL);
- erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks);
- BIF_RET(am_true);
+res_no_proc: {
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&BIF_P->state);
+ if (state & ERTS_PSFLG_TRAP_EXIT) {
+ ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN;
+ erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL);
+ erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks);
+ BIF_RET(am_true);
+ }
+ else
+ BIF_ERROR(BIF_P, EXC_NOPROC);
}
- else
- BIF_ERROR(BIF_P, EXC_NOPROC);
}
#define ERTS_DEMONITOR_FALSE 2
@@ -287,7 +308,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
if (dmon)
erts_destroy_monitor(dmon);
}
- mon = erts_remove_monitor(&c_p->monitors, ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
res = ERTS_DEMONITOR_TRUE;
@@ -296,7 +317,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
case ERTS_DSIG_PREP_CONNECTED:
erts_smp_de_links_lock(dep);
- mon = erts_remove_monitor(&c_p->monitors, ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
dmon = erts_remove_monitor(&dep->monitors, ref);
erts_smp_de_links_unlock(dep);
erts_smp_de_runlock(dep);
@@ -323,7 +344,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
* the atom is stored there. Yield if necessary.
*/
code = erts_dsig_send_demonitor(&dsd,
- c_p->id,
+ c_p->common.id,
(mon->name != NIL
? mon->name
: mon->pid),
@@ -385,7 +406,7 @@ static int demonitor(Process *c_p, Eterm ref)
goto done; /* Cannot be this monitor's ref */
}
- mon = erts_lookup_monitor(c_p->monitors, ref);
+ mon = erts_lookup_monitor(ERTS_P_MONITORS(c_p), ref);
if (!mon) {
res = ERTS_DEMONITOR_FALSE;
goto done;
@@ -424,7 +445,7 @@ static int demonitor(Process *c_p, Eterm ref)
to,
ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
- mon = erts_remove_monitor(&c_p->monitors, ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
#ifndef ERTS_SMP
ASSERT(mon);
#else
@@ -438,7 +459,7 @@ static int demonitor(Process *c_p, Eterm ref)
}
if (rp) {
ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&(rp->monitors), ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
if (rp != c_p)
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon != NULL)
@@ -580,7 +601,7 @@ local_pid_monitor(Process *p, Eterm target)
mon_ref = erts_make_ref(p);
ERTS_BIF_PREP_RET(ret, mon_ref);
- if (target == p->id) {
+ if (target == p->common.id) {
return ret;
}
@@ -597,8 +618,8 @@ local_pid_monitor(Process *p, Eterm target)
else {
ASSERT(rp != p);
- erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, target, NIL);
- erts_add_monitor(&(rp->monitors), MON_TARGET, mon_ref, p->id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
@@ -633,9 +654,9 @@ local_name_monitor(Process *p, Eterm target_name)
UnUseTmpHeap(3,p);
}
else if (rp != p) {
- erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, rp->id,
+ erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, rp->common.id,
target_name);
- erts_add_monitor(&(rp->monitors), MON_TARGET, mon_ref, p->id,
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id,
target_name);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
@@ -687,16 +708,16 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2,
erts_smp_de_links_lock(dep);
- erts_add_monitor(&(p->monitors), MON_ORIGIN, mon_ref, p_trgt,
+ erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, p_trgt,
p_name);
- erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->id,
+ erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->common.id,
d_name);
erts_smp_de_links_unlock(dep);
erts_smp_de_runlock(dep);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
- code = erts_dsig_send_monitor(&dsd, p->id, target, mon_ref);
+ code = erts_dsig_send_monitor(&dsd, p->common.id, target, mon_ref);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_PREP_YIELD_RETURN(ret, p, mon_ref);
else
@@ -939,36 +960,39 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
}
if (is_internal_port(BIF_ARG_1)) {
- Port *pt = erts_id2port_sflgs(BIF_ARG_1,
- BIF_P,
- ERTS_PROC_LOCK_MAIN,
- ERTS_PORT_SFLGS_DEAD);
-
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
#ifdef ERTS_SMP
- if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
- if (pt)
- erts_smp_port_unlock(pt);
+ if (ERTS_PROC_PENDING_EXIT(BIF_P))
goto handle_pending_exit;
- }
#endif
- l = erts_remove_link(&BIF_P->nlinks, BIF_ARG_1);
-
- ASSERT(pt || !l);
-
- if (pt) {
- rl = erts_remove_link(&pt->nlinks, BIF_P->id);
- erts_smp_port_unlock(pt);
- if (rl)
- erts_destroy_link(rl);
- }
+ l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
- if (l)
+ if (l) {
+ Port *prt;
+
erts_destroy_link(l);
+ /* Send unlink signal */
+ prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_DEAD);
+ if (prt) {
+ ErtsPortOpResult res;
+ Eterm ref;
+ Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
+#ifdef DEBUG
+ ref = NIL;
+#endif
+ res = erts_port_unlink(BIF_P, prt, BIF_P->common.id, refp);
+
+ if (refp && res == ERTS_PORT_OP_SCHEDULED) {
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
+ }
+ }
+ }
+
BIF_RET(am_true);
}
else if (is_external_port(BIF_ARG_1)
@@ -991,7 +1015,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
if (ERTS_PROC_PENDING_EXIT(BIF_P))
goto handle_pending_exit;
#endif
- l = erts_remove_link(&BIF_P->nlinks,BIF_ARG_1);
+ l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
erts_smp_proc_unlock(BIF_P,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
@@ -1020,8 +1044,8 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
#endif
case ERTS_DSIG_PREP_CONNECTED:
- erts_remove_dist_link(&dld, BIF_P->id, BIF_ARG_1, dep);
- code = erts_dsig_send_unlink(&dsd, BIF_P->id, BIF_ARG_1);
+ erts_remove_dist_link(&dld, BIF_P->common.id, BIF_ARG_1, dep);
+ code = erts_dsig_send_unlink(&dsd, BIF_P->common.id, BIF_ARG_1);
erts_destroy_dist_link(&dld);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
@@ -1035,10 +1059,6 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
/* Internal pid... */
- /* process ok ? */
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
-
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
/* get process struct */
@@ -1057,7 +1077,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
#endif
/* unlink and ignore errors */
- l = erts_remove_link(&BIF_P->nlinks,BIF_ARG_1);
+ l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
if (l != NULL)
erts_destroy_link(l);
@@ -1065,12 +1085,12 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
}
else {
- rl = erts_remove_link(&(rp->nlinks),BIF_P->id);
+ rl = erts_remove_link(&ERTS_P_LINKS(rp), BIF_P->common.id);
if (rl != NULL)
erts_destroy_link(rl);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) {
- trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->id);
+ trace_proc(BIF_P, rp, am_getting_unlinked, BIF_P->common.id);
}
if (rp != BIF_P)
@@ -1103,8 +1123,9 @@ BIF_RETTYPE hibernate_3(BIF_ALIST_3)
if (erts_hibernate(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, reg)) {
/*
- * If hibernate succeeded, TRAP. The process will be suspended
- * if status is P_WAITING or continue (if any message was in the queue).
+ * If hibernate succeeded, TRAP. The process will be wait in a
+ * hibernated state if its state is inactive (!ERTS_PSFLG_ACTIVE);
+ * otherwise, continue executing (if any message was in the queue).
*/
BIF_TRAP_CODE_PTR_(BIF_P, BIF_P->i);
}
@@ -1342,15 +1363,28 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
*/
if (is_internal_port(BIF_ARG_1)) {
- Port *prt;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- prt = erts_id2port(BIF_ARG_1, NULL, 0);
+ Port *prt = erts_port_lookup(BIF_ARG_1, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
if (prt) {
- erts_do_exit_port(prt, BIF_P->id, BIF_ARG_2);
- erts_port_release(prt);
+ Eterm ref;
+ Eterm *refp = erts_port_synchronous_ops ? &ref : NULL;
+ ErtsPortOpResult res;
+
+#ifdef DEBUG
+ ref = NIL;
+#endif
+
+ res = erts_port_exit(BIF_P, 0, prt, BIF_P->common.id, BIF_ARG_2, refp);
+
+ ERTS_BIF_CHK_EXITED(BIF_P);
+
+ if (refp && res == ERTS_PORT_OP_SCHEDULED) {
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, BIF_P, ref, am_true, am_true);
+ }
+
}
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- ERTS_BIF_CHK_EXITED(BIF_P);
+
BIF_RET(am_true);
}
else if(is_external_port(BIF_ARG_1)
@@ -1376,7 +1410,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
case ERTS_DSIG_PREP_NOT_CONNECTED:
BIF_TRAP2(dexit_trap, BIF_P, BIF_ARG_1, BIF_ARG_2);
case ERTS_DSIG_PREP_CONNECTED:
- code = erts_dsig_send_exit2(&dsd, BIF_P->id, BIF_ARG_1, BIF_ARG_2);
+ code = erts_dsig_send_exit2(&dsd, BIF_P->common.id, BIF_ARG_1, BIF_ARG_2);
if (code == ERTS_DSIG_SEND_YIELD)
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
BIF_RET(am_true);
@@ -1394,18 +1428,15 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
*/
ErtsProcLocks rp_locks;
- if (internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
- if (BIF_ARG_1 == BIF_P->id) {
+ if (BIF_ARG_1 == BIF_P->common.id) {
rp_locks = ERTS_PROC_LOCKS_ALL;
rp = BIF_P;
erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR);
}
else {
rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- rp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, rp_locks,
- ERTS_P2P_FLG_SMP_INC_REFC);
+ rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, rp_locks);
if (!rp) {
BIF_RET(am_true);
}
@@ -1415,7 +1446,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
* Send an exit signal.
*/
erts_send_exit_signal(BIF_P,
- BIF_P->id,
+ BIF_P->common.id,
rp,
&rp_locks,
BIF_ARG_2,
@@ -1427,8 +1458,6 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
- if (rp != BIF_P)
- erts_smp_proc_dec_refc(rp);
#endif
/*
* We may have exited ourselves and may have to take action.
@@ -1502,14 +1531,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
BIF_RET(old_value);
}
else if (BIF_ARG_1 == am_priority) {
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
old_value = erts_set_process_priority(BIF_P, BIF_ARG_2);
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
if (old_value == THE_NON_VALUE)
goto error;
BIF_RET(old_value);
}
else if (BIF_ARG_1 == am_trap_exit) {
+ erts_aint32_t state;
Uint trap_exit;
if (BIF_ARG_2 == am_true) {
trap_exit = 1;
@@ -1520,63 +1548,58 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
}
/*
* NOTE: It is important that we check for pending exit signals
- * and handle them before flag trap_exit is set to true.
- * For more info, see implementation of erts_send_exit_signal().
+ * and handle them before returning if trap_exit is set to
+ * true. For more info, see implementation of
+ * erts_send_exit_signal().
*/
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- old_value = ERTS_PROC_IS_TRAPPING_EXITS(BIF_P) ? am_true : am_false;
- if (trap_exit) {
- ERTS_PROC_SET_TRAP_EXIT(BIF_P);
- } else {
- ERTS_PROC_UNSET_TRAP_EXIT(BIF_P);
+ if (trap_exit)
+ state = erts_smp_atomic32_read_bor_mb(&BIF_P->state,
+ ERTS_PSFLG_TRAP_EXIT);
+ else
+ state = erts_smp_atomic32_read_band_mb(&BIF_P->state,
+ ~ERTS_PSFLG_TRAP_EXIT);
+#ifdef ERTS_SMP
+ if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
+ erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
+ ERTS_BIF_EXITED(BIF_P);
}
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
+#endif
+
+ old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false;
BIF_RET(old_value);
}
else if (BIF_ARG_1 == am_scheduler) {
- int yield;
- ErtsRunQueue *old;
- ErtsRunQueue *new;
+ ErtsRunQueue *old, *new, *curr;
Sint sched;
+ erts_aint32_t state;
+
if (!is_small(BIF_ARG_2))
goto error;
sched = signed_val(BIF_ARG_2);
if (sched < 0 || erts_no_schedulers < sched)
goto error;
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
- old = BIF_P->bound_runq;
-#ifdef ERTS_SMP
- ASSERT(!old || old == BIF_P->run_queue);
-#endif
- new = !sched ? NULL : erts_schedid2runq(sched);
-#ifndef ERTS_SMP
- yield = 0;
-#else
- if (new == old)
- yield = 0;
+
+ if (sched == 0) {
+ new = NULL;
+ state = erts_smp_atomic32_read_band_mb(&BIF_P->state,
+ ~ERTS_PSFLG_BOUND);
+ }
else {
- ErtsRunQueue *curr = BIF_P->run_queue;
- if (!new)
- erts_smp_runq_lock(curr);
- else
- erts_smp_runqs_lock(curr, new);
- yield = new && BIF_P->run_queue != new;
-#endif
- BIF_P->bound_runq = new;
+ new = erts_schedid2runq(sched);
#ifdef ERTS_SMP
- if (new)
- BIF_P->run_queue = new;
- if (!new)
- erts_smp_runq_unlock(curr);
- else
- erts_smp_runqs_unlock(curr, new);
- }
+ erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new);
#endif
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ state = erts_smp_atomic32_read_bor_mb(&BIF_P->state,
+ ERTS_PSFLG_BOUND);
+ }
+
+ curr = ERTS_GET_SCHEDULER_DATA_FROM_PROC(BIF_P)->run_queue;
+ old = (ERTS_PSFLG_BOUND & state) ? curr : NULL;
+
+ ASSERT(!old || old == curr);
+
old_value = old ? make_small(old->ix+1) : make_small(0);
- if (yield)
+ if (new && new != curr)
ERTS_BIF_YIELD_RETURN_X(BIF_P, old_value, am_scheduler);
else
BIF_RET(old_value);
@@ -1625,11 +1648,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
goto error;
}
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
- old_value = BIF_P->trace_flags & F_SENSITIVE ? am_true : am_false;
+ old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE
+ ? am_true
+ : am_false);
if (is_sensitive) {
- BIF_P->trace_flags |= F_SENSITIVE;
+ ERTS_TRACE_FLAGS(BIF_P) |= F_SENSITIVE;
} else {
- BIF_P->trace_flags &= ~F_SENSITIVE;
+ ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE;
}
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
BIF_RET(old_value);
@@ -1755,8 +1780,9 @@ ebif_bang_2(BIF_ALIST_2)
#define SEND_BADARG (-4)
#define SEND_USER_ERROR (-5)
#define SEND_INTERNAL_ERROR (-6)
+#define SEND_AWAIT_RESULT (-7)
-Sint do_send(Process *p, Eterm to, Eterm msg, int suspend);
+Sint do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp);
static Sint remote_send(Process *p, DistEntry *dep,
Eterm to, Eterm full_to, Eterm msg, int suspend)
@@ -1810,7 +1836,7 @@ static Sint remote_send(Process *p, DistEntry *dep,
}
Sint
-do_send(Process *p, Eterm to, Eterm msg, int suspend) {
+do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
Eterm portid;
Port *pt;
Process* rp;
@@ -1822,17 +1848,10 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
-
- if (internal_pid_index(to) >= erts_max_processes)
- return SEND_BADARG;
- rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN,
- to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
-
- if (!rp) {
- ERTS_SMP_ASSERT_IS_NOT_EXITING(p);
+ rp = erts_proc_lookup_raw(to);
+ if (!rp)
return 0;
- }
} else if (is_external_pid(to)) {
dep = external_pid_dist_entry(to);
if(dep == erts_this_dist_entry) {
@@ -1841,7 +1860,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
"Discarding message %T from %T to %T in an old "
"incarnation (%d) of this node (%d)\n",
msg,
- p->id,
+ p->common.id,
to,
external_pid_creation(to),
erts_this_node->creation);
@@ -1850,45 +1869,24 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
}
return remote_send(p, dep, to, to, msg, suspend);
} else if (is_atom(to)) {
-
- /* Need to virtual schedule out sending process
- * because of lock wait. This is only necessary
- * for internal port calling but the lock is bundled
- * with name lookup.
- */
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_inactive);
- }
- erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
- to,
- &rp, 0, ERTS_P2P_FLG_SMP_INC_REFC,
- &pt);
+ Eterm id = erts_whereis_name_to_id(p, to);
+
+ rp = erts_proc_lookup(id);
+ if (rp)
+ goto send_message;
+ pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
if (pt) {
- portid = pt->id;
+ portid = id;
goto port_common;
}
-
- /* Not a port virtually schedule the process back in */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_active);
- }
if (IS_TRACED(p))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
- if (!rp) {
- return SEND_BADARG;
- }
+ return SEND_BADARG;
} else if (is_external_port(to)
&& (external_port_dist_entry(to)
== erts_this_dist_entry)) {
@@ -1897,50 +1895,56 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
"Discarding message %T from %T to %T in an old "
"incarnation (%d) of this node (%d)\n",
msg,
- p->id,
+ p->common.id,
to,
external_port_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
return 0;
} else if (is_internal_port(to)) {
+ int ret_val;
portid = to;
- /* schedule out calling process, waiting for lock*/
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_inactive);
- }
- pt = erts_id2port(to, p, ERTS_PROC_LOCK_MAIN);
+
+ pt = erts_port_lookup(portid, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+
port_common:
- ERTS_SMP_LC_ASSERT(!pt || erts_lc_is_port_locked(pt));
+ ret_val = 0;
- /* We have waited for locks, trace schedule ports */
- if (pt && IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(pt, am_in, am_command);
- }
- if (pt && erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
- profile_runnable_port(pt, am_active);
- }
-
- /* XXX let port_command handle the busy stuff !!! */
- if (pt && (pt->status & ERTS_PORT_SFLG_PORT_BUSY)) {
- if (suspend) {
- erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
- if (erts_system_monitor_flags.busy_port) {
- monitor_generic(p, am_busy_port, portid);
+ if (pt) {
+ int ps_flags = suspend ? 0 : ERTS_PORT_SIG_FLG_NOSUSPEND;
+ *refp = NIL;
+
+ switch (erts_port_command(p, ps_flags, pt, msg, refp)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ /* We are exiting... */
+ return SEND_USER_ERROR;
+ case ERTS_PORT_OP_BUSY:
+ /* Nothing has been sent */
+ if (suspend)
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
+ return SEND_YIELD;
+ case ERTS_PORT_OP_BUSY_SCHEDULED:
+ /* Message was sent */
+ if (suspend) {
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, pt);
+ ret_val = SEND_YIELD_RETURN;
+ break;
}
+ /* Fall through */
+ case ERTS_PORT_OP_SCHEDULED:
+ if (is_not_nil(*refp)) {
+ ASSERT(is_internal_ref(*refp));
+ ret_val = SEND_AWAIT_RESULT;
+ }
+ break;
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DONE:
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_command() result");
+ break;
}
- /* Virtually schedule out the port before releasing */
- if (IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(pt, am_out, am_command);
- }
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
- profile_runnable_port(pt, am_inactive);
- }
- erts_port_release(pt);
- return SEND_YIELD;
}
if (IS_TRACED(p)) /* trace once only !! */
@@ -1958,30 +1962,11 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
SEQ_TRACE_SEND, portid, p);
}
- /* XXX NO GC in port command */
- erts_port_command(p, p->id, pt, msg);
- if (pt) {
- /* Virtually schedule out the port before releasing */
- if (IS_TRACED_FL(pt, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(pt, am_out, am_command);
- }
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(pt)) {
- profile_runnable_port(pt, am_inactive);
- }
- erts_port_release(pt);
- }
- /* Virtually schedule in process */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_active);
- }
if (ERTS_PROC_IS_EXITING(p)) {
KILL_CATCHES(p); /* Must exit */
return SEND_USER_ERROR;
}
- return 0;
+ return ret_val;
} else if (is_tuple(to)) { /* Remote send */
int ret;
tp = tuple_val(to);
@@ -1997,47 +1982,24 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
dep = erts_sysname_to_connected_dist_entry(tp[2]);
if (dep == erts_this_dist_entry) {
+ Eterm id;
erts_deref_dist_entry(dep);
if (IS_TRACED(p))
trace_send(p, to, msg);
if (ERTS_PROC_GET_SAVED_CALLS_BUF(p))
save_calls(p, &exp_send);
-
- /* Need to virtual schedule out sending process
- * because of lock wait. This is only necessary
- * for internal port calling but the lock is bundled.
- */
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_out);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_inactive);
- }
- erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
- tp[1],
- &rp, 0, ERTS_P2P_FLG_SMP_INC_REFC,
- &pt);
+ id = erts_whereis_name_to_id(p, tp[1]);
+
+ rp = erts_proc_lookup_raw(id);
+ if (rp)
+ goto send_message;
+ pt = erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
if (pt) {
- portid = pt->id;
+ portid = id;
goto port_common;
}
- /* Port lookup failed, virtually schedule the process
- * back in.
- */
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(p, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(p, am_active);
- }
-
- if (!rp) {
- return 0;
- }
- goto send_message;
+ return 0;
}
ret = remote_send(p, dep, tp[1], to, msg, suspend);
@@ -2060,23 +2022,15 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
rp_locks |= ERTS_PROC_LOCK_MAIN;
#endif
/* send to local process */
- erts_send_message(p, rp, &rp_locks, msg, 0);
- if (!erts_use_sender_punish)
+ res = erts_send_message(p, rp, &rp_locks, msg, 0);
+ if (erts_use_sender_punish)
+ res *= 4;
+ else
res = 0;
- else {
-#ifdef ERTS_SMP
- res = rp->msg_inq.len*4;
- if (ERTS_PROC_LOCK_MAIN & rp_locks)
- res += rp->msg.len*4;
-#else
- res = rp->msg.len*4;
-#endif
- }
erts_smp_proc_unlock(rp,
p == rp
? (rp_locks & ~ERTS_PROC_LOCK_MAIN)
: rp_locks);
- erts_smp_proc_dec_refc(rp);
return res;
}
}
@@ -2084,6 +2038,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend) {
BIF_RETTYPE send_3(BIF_ALIST_3)
{
+ Eterm ref;
Process *p = BIF_P;
Eterm to = BIF_ARG_1;
Eterm msg = BIF_ARG_2;
@@ -2107,12 +2062,18 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
if(!is_nil(l)) {
BIF_ERROR(p, BADARG);
}
-
- result = do_send(p, to, msg, suspend);
+
+#ifdef DEBUG
+ ref = NIL;
+#endif
+
+ result = do_send(p, to, msg, suspend, &ref);
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
BIF_RET(am_ok);
- } else switch (result) {
+ }
+
+ switch (result) {
case 0:
BIF_RET(am_ok);
break;
@@ -2135,6 +2096,9 @@ BIF_RETTYPE send_3(BIF_ALIST_3)
ERTS_BIF_YIELD_RETURN(p, am_ok);
else
BIF_RET(am_nosuspend);
+ case SEND_AWAIT_RESULT:
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, p, ref, am_nosuspend, am_ok);
case SEND_BADARG:
BIF_ERROR(p, BADARG);
break;
@@ -2159,12 +2123,21 @@ BIF_RETTYPE send_2(BIF_ALIST_2)
Eterm erl_send(Process *p, Eterm to, Eterm msg)
{
- Sint result = do_send(p, to, msg, !0);
+ Eterm ref;
+ Sint result;
+
+#ifdef DEBUG
+ ref = NIL;
+#endif
+
+ result = do_send(p, to, msg, !0, &ref);
if (result > 0) {
ERTS_VBUMP_REDS(p, result);
BIF_RET(msg);
- } else switch (result) {
+ }
+
+ switch (result) {
case 0:
BIF_RET(msg);
break;
@@ -2176,6 +2149,9 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg)
break;
case SEND_YIELD_RETURN:
ERTS_BIF_YIELD_RETURN(p, msg);
+ case SEND_AWAIT_RESULT:
+ ASSERT(is_internal_ref(ref));
+ BIF_TRAP3(await_port_send_result_trap, p, ref, msg, msg);
case SEND_BADARG:
BIF_ERROR(p, BADARG);
break;
@@ -2445,9 +2421,7 @@ BIF_RETTYPE setelement_3(BIF_ALIST_3)
/* copy the tuple */
resp = hp;
- while (size--) { /* XXX use memcpy? */
- *hp++ = *ptr++;
- }
+ sys_memcpy(hp, ptr, sizeof(Eterm)*size);
resp[ix] = BIF_ARG_3;
BIF_RET(make_tuple(resp));
}
@@ -2460,7 +2434,7 @@ BIF_RETTYPE make_tuple_2(BIF_ALIST_2)
Eterm* hp;
Eterm res;
- if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0) {
+ if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0 || n > ERTS_MAX_TUPLE_SIZE) {
BIF_ERROR(BIF_P, BADARG);
}
hp = HAlloc(BIF_P, n+1);
@@ -2481,7 +2455,7 @@ BIF_RETTYPE make_tuple_3(BIF_ALIST_3)
Eterm list = BIF_ARG_3;
Eterm* tup;
- if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0) {
+ if (is_not_small(BIF_ARG_1) || (n = signed_val(BIF_ARG_1)) < 0 || n > ERTS_MAX_TUPLE_SIZE) {
error:
BIF_ERROR(BIF_P, BADARG);
}
@@ -2533,11 +2507,16 @@ BIF_RETTYPE append_element_2(BIF_ALIST_2)
Eterm res;
if (is_not_tuple(BIF_ARG_1)) {
+ error:
BIF_ERROR(BIF_P, BADARG);
}
- ptr = tuple_val(BIF_ARG_1);
+ ptr = tuple_val(BIF_ARG_1);
arity = arityval(*ptr);
- hp = HAlloc(BIF_P, arity + 2);
+
+ if (arity + 1 > ERTS_MAX_TUPLE_SIZE)
+ goto error;
+
+ hp = HAlloc(BIF_P, arity + 2);
res = make_tuple(hp);
*hp = make_arityval(arity+1);
while (arity--) {
@@ -2547,6 +2526,78 @@ BIF_RETTYPE append_element_2(BIF_ALIST_2)
BIF_RET(res);
}
+BIF_RETTYPE insert_element_3(BIF_ALIST_3)
+{
+ Eterm* ptr;
+ Eterm* hp;
+ Uint arity;
+ Eterm res;
+ Sint ix;
+
+ if (is_not_tuple(BIF_ARG_2) || is_not_small(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ptr = tuple_val(BIF_ARG_2);
+ arity = arityval(*ptr);
+ ix = signed_val(BIF_ARG_1);
+
+ if ((ix < 1) || (ix > (arity + 1))) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ hp = HAlloc(BIF_P, arity + 1 + 1);
+ res = make_tuple(hp);
+ *hp = make_arityval(arity + 1);
+
+ ix--;
+ arity -= ix;
+
+ while (ix--) { *++hp = *++ptr; }
+
+ *++hp = BIF_ARG_3;
+
+ while(arity--) { *++hp = *++ptr; }
+
+ BIF_RET(res);
+}
+
+BIF_RETTYPE delete_element_2(BIF_ALIST_3)
+{
+ Eterm* ptr;
+ Eterm* hp;
+ Uint arity;
+ Eterm res;
+ Sint ix;
+
+ if (is_not_tuple(BIF_ARG_2) || is_not_small(BIF_ARG_1)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ ptr = tuple_val(BIF_ARG_2);
+ arity = arityval(*ptr);
+ ix = signed_val(BIF_ARG_1);
+
+ if ((ix < 1) || (ix > arity) || (arity == 0)) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ hp = HAlloc(BIF_P, arity + 1 - 1);
+ res = make_tuple(hp);
+ *hp = make_arityval(arity - 1);
+
+ ix--;
+ arity -= ix;
+
+ while (ix--) { *++hp = *++ptr; }
+
+ ++ptr;
+
+ while(arity--) { *++hp = *++ptr; }
+
+ BIF_RET(res);
+}
+
/**********************************************************************/
/* convert an atom to a list of ascii integer */
@@ -2861,7 +2912,7 @@ BIF_RETTYPE float_to_list_1(BIF_ALIST_1)
if (is_not_float(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
GET_DOUBLE(BIF_ARG_1, f);
- if ((i = sys_double_to_chars(f.fd, fbuf)) <= 0)
+ if ((i = sys_double_to_chars(f.fd, fbuf, sizeof(fbuf))) <= 0)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
need = i*2;
hp = HAlloc(BIF_P, need);
@@ -3121,7 +3172,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
Eterm* hp;
int len;
- if ((len = list_length(list)) < 0) {
+ if ((len = list_length(list)) < 0 || len > ERTS_MAX_TUPLE_SIZE) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -3143,7 +3194,7 @@ BIF_RETTYPE list_to_tuple_1(BIF_ALIST_1)
BIF_RETTYPE self_0(BIF_ALIST_0)
{
- BIF_RET(BIF_P->id);
+ BIF_RET(BIF_P->common.id);
}
/**********************************************************************/
@@ -3180,11 +3231,9 @@ static erts_smp_spinlock_t make_ref_lock;
static erts_smp_mtx_t ports_snapshot_mtx;
erts_smp_atomic_t erts_dead_ports_ptr; /* To store dying ports during snapshot */
-Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+void
+erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS])
{
- Eterm* hp = buffer;
- Uint32 ref0, ref1, ref2;
-
erts_smp_spin_lock(&make_ref_lock);
reference0++;
@@ -3196,24 +3245,36 @@ Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
}
}
- ref0 = reference0;
- ref1 = reference1;
- ref2 = reference2;
+ ref[0] = reference0;
+ ref[1] = reference1;
+ ref[2] = reference2;
erts_smp_spin_unlock(&make_ref_lock);
+}
+
+Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE])
+{
+ Eterm* hp = buffer;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
- write_ref_thing(hp, ref0, ref1, ref2);
+ erts_make_ref_in_array(ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
return make_internal_ref(hp);
}
Eterm erts_make_ref(Process *p)
{
Eterm* hp;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
hp = HAlloc(p, REF_THING_SIZE);
- return erts_make_ref_in_buffer(hp);
+
+ erts_make_ref_in_array(ref);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+
+ return make_internal_ref(hp);
}
BIF_RETTYPE make_ref_0(BIF_ALIST_0)
@@ -3477,7 +3538,7 @@ BIF_RETTYPE garbage_collect_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
- if (BIF_P->id == BIF_ARG_1)
+ if (BIF_P->common.id == BIF_ARG_1)
rp = BIF_P;
else {
#ifdef ERTS_SMP
@@ -3486,7 +3547,7 @@ BIF_RETTYPE garbage_collect_1(BIF_ALIST_1)
if (rp == ERTS_PROC_LOCK_BUSY)
ERTS_BIF_YIELD1(bif_export[BIF_garbage_collect_1], BIF_P, BIF_ARG_1);
#else
- rp = erts_pid2proc(BIF_P, 0, BIF_ARG_1, 0);
+ rp = erts_proc_lookup(BIF_ARG_1);
#endif
if (!rp)
BIF_RET(am_false);
@@ -3517,71 +3578,23 @@ BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
}
/**********************************************************************/
-/* Return a list of active ports */
+/*
+ * The erlang:processes/0 BIF.
+ */
-BIF_RETTYPE ports_0(BIF_ALIST_0)
+BIF_RETTYPE processes_0(BIF_ALIST_0)
{
- Eterm res = NIL;
- Eterm* port_buf = erts_alloc(ERTS_ALC_T_TMP,
- sizeof(Eterm)*erts_max_ports);
- Eterm* pp = port_buf;
- Eterm* dead_ports;
- int alive, dead;
- Uint32 next_ss;
- int i;
-
- /* To get a consistent snapshot...
- * We add alive ports from start of the buffer
- * while dying ports are added from the other end by the killing threads.
- */
-
- erts_smp_mtx_lock(&ports_snapshot_mtx); /* One snapshot at a time */
-
- erts_smp_atomic_set_nob(&erts_dead_ports_ptr,
- (erts_aint_t) (port_buf + erts_max_ports));
-
- next_ss = erts_smp_atomic32_inc_read_relb(&erts_ports_snapshot);
-
- for (i = erts_max_ports-1; i >= 0; i--) {
- Port* prt = &erts_port[i];
- erts_smp_port_state_lock(prt);
- if (!(prt->status & ERTS_PORT_SFLGS_DEAD)
- && prt->snapshot != next_ss) {
- ASSERT(prt->snapshot == next_ss - 1);
- *pp++ = prt->id;
- prt->snapshot = next_ss; /* Consumed by this snapshot */
- }
- erts_smp_port_state_unlock(prt);
- }
-
- dead_ports = (Eterm*)erts_smp_atomic_xchg_nob(&erts_dead_ports_ptr,
- (erts_aint_t) NULL);
- erts_smp_mtx_unlock(&ports_snapshot_mtx);
-
- ASSERT(pp <= dead_ports);
-
- alive = pp - port_buf;
- dead = port_buf + erts_max_ports - dead_ports;
-
- ASSERT((alive+dead) <= erts_max_ports);
-
- if (alive+dead > 0) {
- erts_aint_t i;
- Eterm *hp = HAlloc(BIF_P, (alive+dead)*2);
-
- for (i = 0; i < alive; i++) {
- res = CONS(hp, port_buf[i], res);
- hp += 2;
- }
- for (i = 0; i < dead; i++) {
- res = CONS(hp, dead_ports[i], res);
- hp += 2;
- }
- }
+ return erts_ptab_list(BIF_P, &erts_proc);
+}
- erts_free(ERTS_ALC_T_TMP, port_buf);
+/**********************************************************************/
+/*
+ * The erlang:ports/0 BIF.
+ */
- BIF_RET(res);
+BIF_RETTYPE ports_0(BIF_ALIST_0)
+{
+ return erts_ptab_list(BIF_P, &erts_port);
}
/**********************************************************************/
@@ -3780,7 +3793,8 @@ BIF_RETTYPE function_exported_3(BIF_ALIST_3)
is_not_small(BIF_ARG_3)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3)) == NULL) {
+ if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3),
+ erts_active_code_ix()) == NULL) {
BIF_RET(am_false);
}
BIF_RET(am_true);
@@ -4210,14 +4224,15 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_RET(old_value);
}
} else if (BIF_ARG_1 == make_small(1)) {
- Uint i;
+ int i, max;
ErlMessage* mp;
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
- for (i = 0; i < erts_max_processes; i++) {
- if (process_tab[i] != (Process*) 0) {
- Process* p = process_tab[i];
+ max = erts_ptab_max(&erts_proc);
+ for (i = 0; i < max; i++) {
+ Process *p = erts_pix2proc(i);
+ if (p) {
#ifdef USE_VM_PROBES
p->seq_trace_token = (p->dt_utag != NIL) ? am_have_dt_utag : NIL;
#else
@@ -4513,6 +4528,21 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Export bif_return_trap_export;
+void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
+ Eterm (*bif)(BIF_ALIST_0))
+{
+ int i;
+ sys_memset((void *) ep, 0, sizeof(Export));
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ ep->addressv[i] = &ep->code[3];
+ }
+ ep->code[0] = m;
+ ep->code[1] = f;
+ ep->code[2] = a;
+ ep->code[3] = (BeamInstr) em_apply_bif;
+ ep->code[4] = (BeamInstr) bif;
+}
+
void erts_init_bif(void)
{
reference0 = 0;
@@ -4528,17 +4558,13 @@ void erts_init_bif(void)
* yield the calling process traps to. The only thing it does:
* return the value passed as argument.
*/
- sys_memset((void *) &bif_return_trap_export, 0, sizeof(Export));
- bif_return_trap_export.address = &bif_return_trap_export.code[3];
- bif_return_trap_export.code[0] = am_erlang;
- bif_return_trap_export.code[1] = am_bif_return_trap;
+ erts_init_trap_export(&bif_return_trap_export, am_erlang, am_bif_return_trap,
#ifdef DEBUG
- bif_return_trap_export.code[2] = 2;
+ 2
#else
- bif_return_trap_export.code[2] = 1;
+ 1
#endif
- bif_return_trap_export.code[3] = (BeamInstr) em_apply_bif;
- bif_return_trap_export.code[4] = (BeamInstr) &bif_return_trap;
+ , &bif_return_trap);
flush_monitor_message_trap = erts_export_put(am_erlang,
am_flush_monitor_message,
@@ -4551,6 +4577,8 @@ void erts_init_bif(void)
am_format_cpu_topology,
1);
await_proc_exit_trap = erts_export_put(am_erlang,am_await_proc_exit,3);
+ await_port_send_result_trap
+ = erts_export_put(am_erts_internal, am_await_port_send_result, 3);
await_sched_wall_time_mod_trap
= erts_export_put(am_erlang, am_await_sched_wall_time_modifications, 2);
erts_smp_atomic32_init_nob(&sched_wall_time, 0);
@@ -4566,19 +4594,18 @@ bif erlang:send_to_logger/2
BIF_RETTYPE send_to_logger_2(BIF_ALIST_2)
{
byte *buf;
- int len;
+ ErlDrvSizeT len;
if (!is_atom(BIF_ARG_1) || !(is_list(BIF_ARG_2) ||
is_nil(BIF_ARG_1))) {
BIF_ERROR(BIF_P,BADARG);
}
- len = io_list_len(BIF_ARG_2);
- if (len < 0)
+ if (erts_iolist_size(BIF_ARG_2, &len) != 0)
BIF_ERROR(BIF_P,BADARG);
else if (len == 0)
buf = "";
else {
#ifdef DEBUG
- int len2;
+ ErlDrvSizeT len2;
#endif
buf = (byte *) erts_alloc(ERTS_ALC_T_TMP, len+1);
#ifdef DEBUG
@@ -4586,7 +4613,7 @@ BIF_RETTYPE send_to_logger_2(BIF_ALIST_2)
#else
(void)
#endif
- io_list_to_buf(BIF_ARG_2, buf, len);
+ erts_iolist_to_buf(BIF_ARG_2, buf, len);
ASSERT(len2 == len);
buf[len] = '\0';
switch (BIF_ARG_1) {
@@ -4680,7 +4707,6 @@ BIF_RETTYPE dt_prepend_vm_tag_data_1(BIF_ALIST_1)
#ifdef USE_VM_PROBES
Eterm b;
Eterm *hp;
- hp = HAlloc(BIF_P,2);
if (is_binary((DT_UTAG(BIF_P)))) {
Uint sz = binary_size(DT_UTAG(BIF_P));
int i;
@@ -4697,6 +4723,7 @@ BIF_RETTYPE dt_prepend_vm_tag_data_1(BIF_ALIST_1)
} else {
b = new_binary(BIF_P,(byte *)"\0",1);
}
+ hp = HAlloc(BIF_P,2);
BIF_RET(CONS(hp,b,BIF_ARG_1));
#else
BIF_RET(BIF_ARG_1);
@@ -4707,7 +4734,6 @@ BIF_RETTYPE dt_append_vm_tag_data_1(BIF_ALIST_1)
#ifdef USE_VM_PROBES
Eterm b;
Eterm *hp;
- hp = HAlloc(BIF_P,2);
if (is_binary((DT_UTAG(BIF_P)))) {
Uint sz = binary_size(DT_UTAG(BIF_P));
int i;
@@ -4724,6 +4750,7 @@ BIF_RETTYPE dt_append_vm_tag_data_1(BIF_ALIST_1)
} else {
b = new_binary(BIF_P,(byte *)"\0",1);
}
+ hp = HAlloc(BIF_P,2);
BIF_RET(CONS(hp,BIF_ARG_1,b));
#else
BIF_RET(BIF_ARG_1);
@@ -4747,14 +4774,14 @@ BIF_RETTYPE dt_spread_tag_1(BIF_ALIST_1)
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) start spreading tag %T\r\n",
- BIF_P->id,DT_UTAG(BIF_P));
+ BIF_P->common.id,DT_UTAG(BIF_P));
#endif
} else {
DT_UTAG_FLAGS(BIF_P) &= ~DT_UTAG_SPREADING;
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) stop spreading tag %T\r\n",
- BIF_P->id,DT_UTAG(BIF_P));
+ BIF_P->common.id,DT_UTAG(BIF_P));
#endif
}
}
@@ -4780,7 +4807,7 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) restore Killing tag!\r\n",
- BIF_P->id);
+ BIF_P->common.id);
#endif
}
DT_UTAG(BIF_P) = NIL;
@@ -4797,12 +4824,12 @@ BIF_RETTYPE dt_restore_tag_1(BIF_ALIST_1)
erts_fprintf(stderr,
"Dtrace -> (%T) restore stop spreading "
"tag %T\r\n",
- BIF_P->id, tpl[2]);
+ BIF_P->common.id, tpl[2]);
} else if ((x & DT_UTAG_SPREADING) &&
!(DT_UTAG_FLAGS(BIF_P) & DT_UTAG_SPREADING)) {
erts_fprintf(stderr,
"Dtrace -> (%T) restore start spreading "
- "tag %T\r\n",BIF_P->id,tpl[2]);
+ "tag %T\r\n",BIF_P->common.id,tpl[2]);
}
#endif
DT_UTAG_FLAGS(BIF_P) = x;
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index d20089a9fb..71f232035d 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -125,7 +125,7 @@ do { \
#define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \
do { \
(Proc)->arity = 0; \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -135,7 +135,7 @@ do { \
Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
(Proc)->arity = 1; \
reg[0] = (Eterm) (A0); \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -146,7 +146,7 @@ do { \
(Proc)->arity = 2; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -158,7 +158,7 @@ do { \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
(Ret) = THE_NON_VALUE; \
} while (0)
@@ -170,13 +170,13 @@ do { \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
reg[2] = (Eterm) (A2); \
- (Proc)->i = (BeamInstr*) ((Trap)->address); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
(Proc)->freason = TRAP; \
} while (0)
#define BIF_TRAP0(p, Trap_) do { \
(p)->arity = 0; \
- (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -185,7 +185,7 @@ do { \
Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
(p)->arity = 1; \
reg[0] = (A0); \
- (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -195,7 +195,7 @@ do { \
(p)->arity = 2; \
reg[0] = (A0); \
reg[1] = (A1); \
- (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -206,7 +206,7 @@ do { \
reg[0] = (A0); \
reg[1] = (A1); \
reg[2] = (A2); \
- (p)->i = (BeamInstr*) ((Trap_)->address); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
(p)->freason = TRAP; \
return THE_NON_VALUE; \
} while(0)
@@ -322,27 +322,6 @@ do { \
ERTS_BIF_EXITED((PROC)); \
} while (0)
-#ifdef ERTS_SMP
-#define ERTS_SMP_BIF_CHK_PENDING_EXIT(P, L) \
-do { \
- ERTS_SMP_LC_ASSERT((L) == erts_proc_lc_my_proc_locks((P))); \
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & (L)); \
- if (!((L) & ERTS_PROC_LOCK_STATUS)) \
- erts_smp_proc_lock((P), ERTS_PROC_LOCK_STATUS); \
- if (ERTS_PROC_PENDING_EXIT((P))) { \
- erts_handle_pending_exit((P), (L)|ERTS_PROC_LOCK_STATUS); \
- erts_smp_proc_unlock((P), \
- (((L)|ERTS_PROC_LOCK_STATUS) \
- & ~ERTS_PROC_LOCK_MAIN)); \
- ERTS_BIF_EXITED((P)); \
- } \
- if (!((L) & ERTS_PROC_LOCK_STATUS)) \
- erts_smp_proc_unlock((P), ERTS_PROC_LOCK_STATUS); \
-} while (0)
-#else
-#define ERTS_SMP_BIF_CHK_PENDING_EXIT(P, L)
-#endif
-
/*
* The ERTS_BIF_*_AWAIT_X_*_TRAP makros either exits the caller, or
* sets up a trap to erlang:await_proc_exit/3.
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 797bce43ab..59a91cd40c 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -31,434 +31,233 @@
#
# Important: Use "ubif" for guard BIFs and operators; use "bif" for ordinary BIFs.
#
-# Add new BIFs to the end of the file. Do not bother adding a "packaged BIF name"
-# (such as 'erl.lang.number'); if/when packages will be supported we will add
-# all those names.
+# Add new BIFs to the end of the file.
#
# Note: Guards BIFs require special support in the compiler (to be able to actually
# call them from within a guard).
#
ubif erlang:abs/1
-ubif 'erl.lang.number':abs/1 ebif_abs_1
bif erlang:adler32/1
-bif 'erl.util.crypt.adler32':sum/1 ebif_adler32_1
bif erlang:adler32/2
-bif 'erl.util.crypt.adler32':sum/2 ebif_adler32_2
bif erlang:adler32_combine/3
-bif 'erl.util.crypt.adler32':combine/3 ebif_adler32_combine_3
bif erlang:apply/3
-bif 'erl.lang':apply/3 ebif_apply_3
bif erlang:atom_to_list/1
-bif 'erl.lang.atom':to_string/1 ebif_atom_to_string_1 atom_to_list_1
bif erlang:binary_to_list/1
-bif 'erl.lang.binary':to_list/1 ebif_binary_to_list_1
bif erlang:binary_to_list/3
-bif 'erl.lang.binary':to_list/3 ebif_binary_to_list_3
bif erlang:binary_to_term/1
-bif 'erl.lang.binary':to_term/1 ebif_binary_to_term_1
bif erlang:check_process_code/2
-bif 'erl.system.code':check_process/2 ebif_check_process_code_2
bif erlang:crc32/1
-bif 'erl.util.crypt.crc32':sum/1 ebif_crc32_1
bif erlang:crc32/2
-bif 'erl.util.crypt.crc32':sum/2 ebif_crc32_2
bif erlang:crc32_combine/3
-bif 'erl.util.crypt.crc32':combine/3 ebif_crc32_combine_3
bif erlang:date/0
-bif 'erl.util.date':today/0 ebif_date_0
bif erlang:delete_module/1
-bif 'erl.system.code':delete/1 ebif_delete_module_1
bif erlang:display/1
-bif 'erl.system.debug':display/1 ebif_display_1
bif erlang:display_string/1
-bif 'erl.system.debug':display_string/1 ebif_display_string_1
bif erlang:display_nl/0
-bif 'erl.system.debug':display_nl/0 ebif_display_nl_0
ubif erlang:element/2
-ubif 'erl.lang.tuple':element/2 ebif_element_2
bif erlang:erase/0
-bif 'erl.lang.proc.pdict':erase/0 ebif_erase_0
bif erlang:erase/1
-bif 'erl.lang.proc.pdict':erase/1 ebif_erase_1
bif erlang:exit/1
-bif 'erl.lang':exit/1 ebif_exit_1
bif erlang:exit/2
-bif 'erl.lang.proc':signal/2 ebif_signal_2 exit_2
bif erlang:external_size/1
-bif 'erl.lang.term':external_size/1 ebif_external_size_1
bif erlang:external_size/2
-bif 'erl.lang.term':external_size/2 ebif_external_size_2
ubif erlang:float/1
-ubif 'erl.lang.number':to_float/1 ebif_to_float_1 float_1
bif erlang:float_to_list/1
-bif 'erl.lang.float':to_string/1 ebif_float_to_string_1 float_to_list_1
bif erlang:fun_info/2
-bif 'erl.lang.function':info/2 ebif_fun_info_2
bif erlang:garbage_collect/0
-bif 'erl.system':garbage_collect/0 ebif_garbage_collect_0
bif erlang:garbage_collect/1
-bif 'erl.system':garbage_collect/1 ebif_garbage_collect_1
bif erlang:get/0
-bif 'erl.lang.proc.pdict':get/0 ebif_get_0
bif erlang:get/1
-bif 'erl.lang.proc.pdict':get/1 ebif_get_1
bif erlang:get_keys/1
-bif 'erl.lang.proc.pdict':get_keys/1 ebif_get_keys_1
bif erlang:group_leader/0
-bif 'erl.lang.proc':group_leader/0 ebif_group_leader_0
bif erlang:group_leader/2
-bif 'erl.lang.proc':set_group_leader/2 ebif_group_leader_2
bif erlang:halt/0
-bif 'erl.lang.system':halt/0 ebif_halt_0
bif erlang:halt/1
-bif 'erl.lang.system':halt/1 ebif_halt_1
bif erlang:halt/2
-bif 'erl.lang.system':halt/2 ebif_halt_2
bif erlang:phash/2
bif erlang:phash2/1
bif erlang:phash2/2
-bif 'erl.lang.term':hash/1 ebif_phash2_1
-bif 'erl.lang.term':hash/2 ebif_phash2_2
ubif erlang:hd/1
-ubif 'erl.lang.list':hd/1 ebif_hd_1
bif erlang:integer_to_list/1
-bif 'erl.lang.integer':to_string/1 ebif_integer_to_string_1 integer_to_list_1
bif erlang:is_alive/0
-bif 'erl.lang.node':is_alive/0 ebif_is_alive_0
ubif erlang:length/1
-ubif 'erl.lang.list':length/1 ebif_length_1
bif erlang:link/1
-bif 'erl.lang.proc':link/1 ebif_link_1
bif erlang:list_to_atom/1
-bif 'erl.lang.atom':from_string/1 ebif_string_to_atom_1 list_to_atom_1
bif erlang:list_to_binary/1
-bif 'erl.lang.binary':from_list/1 ebif_list_to_binary_1
bif erlang:list_to_float/1
-bif 'erl.lang.float':from_string/1 ebif_string_to_float_1 list_to_float_1
bif erlang:list_to_integer/1
-bif 'erl.lang.integer':from_string/1 ebif_string_to_integer_1 list_to_integer_1
bif erlang:list_to_pid/1
-bif 'erl.lang.proc':string_to_pid/1 ebif_string_to_pid_1 list_to_pid_1
bif erlang:list_to_tuple/1
-bif 'erl.lang.tuple':from_list/1 ebif_list_to_tuple_1
-bif erlang:load_module/2
-bif 'erl.system.code':load/2 ebif_load_module_2
bif erlang:loaded/0
-bif 'erl.system.code':loaded/0 ebif_loaded_0
bif erlang:localtime/0
-bif 'erl.util.date':local/0 ebif_localtime_0
bif erlang:localtime_to_universaltime/2
-bif 'erl.util.date':local_to_utc/2 ebif_localtime_to_universaltime_2
bif erlang:make_ref/0
-bif 'erl.lang.ref':new/0 ebif_make_ref_0
bif erlang:md5/1
-bif 'erl.util.crypt.md5':digest/1 ebif_md5_1
bif erlang:md5_init/0
-bif 'erl.util.crypt.md5':init/0 ebif_md5_init_0
bif erlang:md5_update/2
-bif 'erl.util.crypt.md5':update/2 ebif_md5_update_2
bif erlang:md5_final/1
-bif 'erl.util.crypt.md5':final/1 ebif_md5_final_1
bif erlang:module_loaded/1
-bif 'erl.system.code':is_loaded/1 ebif_is_loaded_1 module_loaded_1
bif erlang:function_exported/3
-bif 'erl.system.code':is_loaded/3 ebif_is_loaded_3 function_exported_3
bif erlang:monitor_node/2
-bif 'erl.lang.node':monitor/2 ebif_monitor_node_2
bif erlang:monitor_node/3
-bif 'erl.lang.node':monitor/3 ebif_monitor_node_3
ubif erlang:node/1
-ubif 'erl.lang.node':node/1 ebif_node_1
ubif erlang:node/0
-ubif 'erl.lang.node':node/0 ebif_node_0
bif erlang:nodes/1
-bif 'erl.lang.node':nodes/1 ebif_nodes_1
bif erlang:now/0
-bif 'erl.system':now/0 ebif_now_0
bif erlang:open_port/2
-bif 'erl.lang.port':open/2 ebif_open_port_2 open_port_2
bif erlang:pid_to_list/1
-bif 'erl.lang.proc':pid_to_string/1 ebif_pid_to_string_1 pid_to_list_1
-bif erlang:port_info/1
-bif 'erl.lang.port':info/1 ebif_port_info_1
-bif erlang:port_info/2
-bif 'erl.lang.port':info/2 ebif_port_info_2
bif erlang:ports/0
-bif 'erl.lang.node':ports/0 ebif_ports_0
bif erlang:pre_loaded/0
-bif 'erl.system.code':preloaded/0 ebif_pre_loaded_0
bif erlang:process_flag/2
-bif 'erl.lang.proc':set_flag/2 ebif_process_flag_2
bif erlang:process_flag/3
-bif 'erl.lang.proc':set_flag/3 ebif_process_flag_3
bif erlang:process_info/1
-bif 'erl.lang.proc':info/1 ebif_process_info_1
bif erlang:process_info/2
-bif 'erl.lang.proc':info/2 ebif_process_info_2
bif erlang:processes/0
-bif 'erl.lang.node':processes/0 ebif_processes_0
bif erlang:purge_module/1
-bif 'erl.system.code':purge/1 ebif_purge_module_1
bif erlang:put/2
-bif 'erl.lang.proc.pdict':put/2 ebif_put_2
bif erlang:register/2
-bif 'erl.lang.node':register/2 ebif_register_2
bif erlang:registered/0
-bif 'erl.lang.node':registered/0 ebif_registered_0
ubif erlang:round/1
-ubif 'erl.lang.number':round/1 ebif_round_1
ubif erlang:self/0
-ubif 'erl.lang.proc':self/0 ebif_self_0
bif erlang:setelement/3
-bif 'erl.lang.tuple':setelement/3 ebif_setelement_3
ubif erlang:size/1
-ubif 'erl.lang.term':size/1 ebif_size_1
bif erlang:spawn/3
-bif 'erl.lang.proc':spawn/3 ebif_spawn_3
bif erlang:spawn_link/3
-bif 'erl.lang.proc':spawn_link/3 ebif_spawn_link_3
bif erlang:split_binary/2
-bif 'erl.lang.binary':split/2 ebif_split_binary_2
bif erlang:statistics/1
-bif 'erl.system':statistics/1 ebif_statistics_1
bif erlang:term_to_binary/1
-bif 'erl.lang.binary':from_term/1 ebif_term_to_binary_1
bif erlang:term_to_binary/2
-bif 'erl.lang.binary':from_term/2 ebif_term_to_binary_2
bif erlang:throw/1
-bif 'erl.lang':throw/1 ebif_throw_1
bif erlang:time/0
-bif 'erl.util.date':time_of_day/0 ebif_time_0
ubif erlang:tl/1
-ubif 'erl.lang.list':tl/1 ebif_tl_1
ubif erlang:trunc/1
-ubif 'erl.lang.number':trunc/1 ebif_trunc_1
bif erlang:tuple_to_list/1
-bif 'erl.lang.tuple':to_list/1 ebif_tuple_to_list_1
bif erlang:universaltime/0
-bif 'erl.util.date':utc/0 ebif_universaltime_0
bif erlang:universaltime_to_localtime/1
-bif 'erl.util.date':utc_to_local/1 ebif_universaltime_to_localtime_1
bif erlang:unlink/1
-bif 'erl.lang.proc':unlink/1 ebif_unlink_1
bif erlang:unregister/1
-bif 'erl.lang.node':unregister/1 ebif_unregister_1
bif erlang:whereis/1
-bif 'erl.lang.node':whereis/1 ebif_whereis_1
bif erlang:spawn_opt/1
-bif 'erl.lang.proc':spawn_opt/1 ebif_spawn_opt_1
bif erlang:setnode/2
bif erlang:setnode/3
bif erlang:dist_exit/3
-bif erlang:port_call/2
-bif 'erl.lang.port':call/2 ebif_port_call_2
-bif erlang:port_call/3
-bif 'erl.lang.port':call/3 ebif_port_call_3
-bif erlang:port_command/2
-bif 'erl.lang.port':command/2 ebif_port_command_2
-bif erlang:port_command/3
-bif 'erl.lang.port':command/3 ebif_port_command_3
-bif erlang:port_control/3
-bif 'erl.lang.port':control/3 ebif_port_control_3
-bif erlang:port_close/1
-bif 'erl.lang.port':close/1 ebif_port_close_1
-bif erlang:port_connect/2
-bif 'erl.lang.port':connect/2 ebif_port_connect_2
-bif erlang:port_set_data/2
-bif 'erl.lang.port':set_data/2 ebif_port_set_data_2
-bif erlang:port_get_data/1
-bif 'erl.lang.port':get_data/1 ebif_port_get_data_1
+# Static native functions in erts_internal
+bif erts_internal:port_info/1
+bif erts_internal:port_info/2
+bif erts_internal:port_call/3
+bif erts_internal:port_command/3
+bif erts_internal:port_control/3
+bif erts_internal:port_close/1
+bif erts_internal:port_connect/2
+bif erts_internal:port_set_data/2
+bif erts_internal:port_get_data/1
# Tracing & debugging.
bif erlang:trace_pattern/2
-bif 'erl.system.debug':trace_pattern/2 ebif_trace_pattern_2
bif erlang:trace_pattern/3
-bif 'erl.system.debug':trace_pattern/3 ebif_trace_pattern_3
bif erlang:trace/3
-bif 'erl.system.debug':trace/3 ebif_trace_3
bif erlang:trace_info/2
-bif 'erl.system.debug':trace_info/2 ebif_trace_info_2
bif erlang:trace_delivered/1
-bif 'erl.system.debug':trace_delivered/1 ebif_trace_delivered_1
bif erlang:seq_trace/2
-bif 'erl.system.debug':seq_trace/2 ebif_seq_trace_2
bif erlang:seq_trace_info/1
-bif 'erl.system.debug':seq_trace_info/1 ebif_seq_trace_info_1
bif erlang:seq_trace_print/1
-bif 'erl.system.debug':seq_trace_print/1 ebif_seq_trace_print_1
bif erlang:seq_trace_print/2
-bif 'erl.system.debug':seq_trace_print/2 ebif_seq_trace_print_2
bif erlang:suspend_process/2
-bif 'erl.system.debug':suspend_process/2 ebif_suspend_process_2
bif erlang:resume_process/1
-bif 'erl.system.debug':resume_process/1 ebif_resume_process_1
bif erlang:process_display/2
-bif 'erl.system.debug':process_display/2 ebif_process_display_2
bif erlang:bump_reductions/1
-bif 'erl.lang.proc':bump_reductions/1 ebif_bump_reductions_1
bif math:cos/1
-bif 'erl.lang.math':cos/1 ebif_math_cos_1
bif math:cosh/1
-bif 'erl.lang.math':cosh/1 ebif_math_cosh_1
bif math:sin/1
-bif 'erl.lang.math':sin/1 ebif_math_sin_1
bif math:sinh/1
-bif 'erl.lang.math':sinh/1 ebif_math_sinh_1
bif math:tan/1
-bif 'erl.lang.math':tan/1 ebif_math_tan_1
bif math:tanh/1
-bif 'erl.lang.math':tanh/1 ebif_math_tanh_1
bif math:acos/1
-bif 'erl.lang.math':acos/1 ebif_math_acos_1
bif math:acosh/1
-bif 'erl.lang.math':acosh/1 ebif_math_acosh_1
bif math:asin/1
-bif 'erl.lang.math':asin/1 ebif_math_asin_1
bif math:asinh/1
-bif 'erl.lang.math':asinh/1 ebif_math_asinh_1
bif math:atan/1
-bif 'erl.lang.math':atan/1 ebif_math_atan_1
bif math:atanh/1
-bif 'erl.lang.math':atanh/1 ebif_math_atanh_1
bif math:erf/1
-bif 'erl.lang.math':erf/1 ebif_math_erf_1
bif math:erfc/1
-bif 'erl.lang.math':erfc/1 ebif_math_erfc_1
bif math:exp/1
-bif 'erl.lang.math':exp/1 ebif_math_exp_1
bif math:log/1
-bif 'erl.lang.math':log/1 ebif_math_log_1
bif math:log10/1
-bif 'erl.lang.math':log10/1 ebif_math_log10_1
bif math:sqrt/1
-bif 'erl.lang.math':sqrt/1 ebif_math_sqrt_1
bif math:atan2/2
-bif 'erl.lang.math':atan2/2 ebif_math_atan2_2
bif math:pow/2
-bif 'erl.lang.math':pow/2 ebif_math_pow_2
bif erlang:start_timer/3
-bif 'erl.lang.timer':start/3 ebif_start_timer_3
bif erlang:send_after/3
-bif 'erl.lang.timer':send_after/3 ebif_send_after_3
bif erlang:cancel_timer/1
-bif 'erl.lang.timer':cancel/1 ebif_cancel_timer_1
bif erlang:read_timer/1
-bif 'erl.lang.timer':read/1 ebif_read_timer_1
bif erlang:make_tuple/2
-bif 'erl.lang.tuple':make/2 ebif_make_tuple_2
bif erlang:append_element/2
-bif 'erl.lang.tuple':append_element/2 ebif_append_element_2
bif erlang:make_tuple/3
bif erlang:system_flag/2
-bif 'erl.system':set_flag/2 ebif_system_flag_2
bif erlang:system_info/1
-bif 'erl.system':info/1 ebif_system_info_1
# New in R9C
bif erlang:system_monitor/0
-bif 'erl.system':monitor/0 ebif_system_monitor_0
bif erlang:system_monitor/1
-bif 'erl.system':monitor/1 ebif_system_monitor_1
bif erlang:system_monitor/2
-bif 'erl.system':monitor/2 ebif_system_monitor_2
# Added 2006-11-07
bif erlang:system_profile/2
-bif 'erl.system':profile/2 ebif_system_profile_2
# End Added 2006-11-07
# Added 2007-01-17
bif erlang:system_profile/0
-bif 'erl.system':profile/0 ebif_system_profile_0
# End Added 2007-01-17
bif erlang:ref_to_list/1
-bif 'erl.lang.ref':to_string/1 ebif_ref_to_string_1 ref_to_list_1
bif erlang:port_to_list/1
-bif 'erl.lang.port':to_string/1 ebif_port_to_string_1 port_to_list_1
bif erlang:fun_to_list/1
-bif 'erl.lang.function':to_string/1 ebif_fun_to_string_1 fun_to_list_1
bif erlang:monitor/2
-bif 'erl.lang.proc':monitor/2 ebif_monitor_2
bif erlang:demonitor/1
-bif 'erl.lang.proc':demonitor/1 ebif_demonitor_1
bif erlang:demonitor/2
-bif 'erl.lang.proc':demonitor/2 ebif_demonitor_2
bif erlang:is_process_alive/1
-bif 'erl.lang.proc':is_alive/1 ebif_proc_is_alive_1 is_process_alive_1
bif erlang:error/1 error_1
-bif 'erl.lang':error/1 ebif_error_1 error_1
bif erlang:error/2 error_2
-bif 'erl.lang':error/2 ebif_error_2 error_2
bif erlang:raise/3 raise_3
-bif 'erl.lang':raise/3 ebif_raise_3 raise_3
bif erlang:get_stacktrace/0
-bif 'erl.lang.proc':get_stacktrace/0 ebif_get_stacktrace_0
bif erlang:is_builtin/3
-bif 'erl.system.code':is_builtin/3 ebif_is_builtin_3
ubif erlang:'and'/2
-ubif 'erl.lang.bool':'and'/2 ebif_and_2
ubif erlang:'or'/2
-ubif 'erl.lang.bool':'or'/2 ebif_or_2
ubif erlang:'xor'/2
-ubif 'erl.lang.bool':'xor'/2 ebif_xor_2
ubif erlang:'not'/1
-ubif 'erl.lang.bool':'not'/1 ebif_not_1
ubif erlang:'>'/2 sgt_2
-ubif 'erl.lang.term':greater/2 ebif_gt_2 sgt_2
ubif erlang:'>='/2 sge_2
-ubif 'erl.lang.term':greater_or_equal/2 ebif_ge_2 sge_2
ubif erlang:'<'/2 slt_2
-ubif 'erl.lang.term':less/2 ebif_lt_2 slt_2
ubif erlang:'=<'/2 sle_2
-ubif 'erl.lang.term':less_or_equal/2 ebif_le_2 sle_2
ubif erlang:'=:='/2 seq_2
-ubif 'erl.lang.term':equal/2 ebif_eq_2 seq_2
ubif erlang:'=='/2 seqeq_2
-ubif 'erl.lang.term':arith_equal/2 ebif_areq_2 seqeq_2
ubif erlang:'=/='/2 sneq_2
-ubif 'erl.lang.term':not_equal/2 ebif_neq_2 sneq_2
ubif erlang:'/='/2 sneqeq_2
-ubif 'erl.lang.term':not_arith_equal/2 ebif_nareq_2 sneqeq_2
ubif erlang:'+'/2 splus_2
-ubif 'erl.lang.number':plus/2 ebif_plus_2 splus_2
ubif erlang:'-'/2 sminus_2
-ubif 'erl.lang.number':minus/2 ebif_minus_2 sminus_2
ubif erlang:'*'/2 stimes_2
-ubif 'erl.lang.number':multiply/2 ebif_multiply_2 stimes_2
ubif erlang:'/'/2 div_2
-ubif 'erl.lang.number':divide/2 ebif_divide_2 div_2
ubif erlang:'div'/2 intdiv_2
-ubif 'erl.lang.integer':'div'/2 ebif_intdiv_2
ubif erlang:'rem'/2
-ubif 'erl.lang.integer':'rem'/2 ebif_rem_2
ubif erlang:'bor'/2
-ubif 'erl.lang.integer':'bor'/2 ebif_bor_2
ubif erlang:'band'/2
-ubif 'erl.lang.integer':'band'/2 ebif_band_2
ubif erlang:'bxor'/2
-ubif 'erl.lang.integer':'bxor'/2 ebif_bxor_2
ubif erlang:'bsl'/2
-ubif 'erl.lang.integer':'bsl'/2 ebif_bsl_2
ubif erlang:'bsr'/2
-ubif 'erl.lang.integer':'bsr'/2 ebif_bsr_2
ubif erlang:'bnot'/1
-ubif 'erl.lang.integer':'bnot'/1 ebif_bnot_1
ubif erlang:'-'/1 sminus_1
-ubif 'erl.lang.number':minus/1 ebif_minus_1 sminus_1
ubif erlang:'+'/1 splus_1
-ubif 'erl.lang.number':plus/1 ebif_plus_1 splus_1
# New operators in R8. These were the only operators missing.
# erlang:send/2, erlang:append/2 and erlang:subtract/2 are now also
@@ -466,45 +265,27 @@ ubif 'erl.lang.number':plus/1 ebif_plus_1 splus_1
# internal references have been updated to the new ebif_... entries.
bif erlang:'!'/2 ebif_bang_2
-bif 'erl.lang.proc':send/2 ebif_send_2 send_2
bif erlang:send/2
-bif 'erl.lang':send/3 ebif_send_3 send_3
bif erlang:send/3
bif erlang:'++'/2 ebif_plusplus_2
-bif 'erl.lang.list':append/2 ebif_append_2 ebif_plusplus_2
bif erlang:append/2
bif erlang:'--'/2 ebif_minusminus_2
-bif 'erl.lang.list':subtract/2 ebif_list_subtract_2 ebif_minusminus_2
bif erlang:subtract/2
ubif erlang:is_atom/1
-ubif 'erl.lang.term':is_atom/1 ebif_is_atom_1
ubif erlang:is_list/1
-ubif 'erl.lang.term':is_list/1 ebif_is_list_1
ubif erlang:is_tuple/1
-ubif 'erl.lang.term':is_tuple/1 ebif_is_tuple_1
ubif erlang:is_float/1
-ubif 'erl.lang.term':is_float/1 ebif_is_float_1
ubif erlang:is_integer/1
-ubif 'erl.lang.term':is_integer/1 ebif_is_integer_1
ubif erlang:is_number/1
-ubif 'erl.lang.term':is_number/1 ebif_is_number_1
ubif erlang:is_pid/1
-ubif 'erl.lang.term':is_pid/1 ebif_is_pid_1
ubif erlang:is_port/1
-ubif 'erl.lang.term':is_port/1 ebif_is_port_1
ubif erlang:is_reference/1
-ubif 'erl.lang.term':is_reference/1 ebif_is_reference_1
ubif erlang:is_binary/1
-ubif 'erl.lang.term':is_binary/1 ebif_is_binary_1
ubif erlang:is_function/1
-ubif 'erl.lang.term':is_function/1 ebif_is_function_1
ubif erlang:is_function/2
-ubif 'erl.lang.term':is_function/2 ebif_is_function_2
ubif erlang:is_record/2
-ubif 'erl.lang.term':is_record/2 ebif_is_record_2
ubif erlang:is_record/3
-ubif 'erl.lang.term':is_record/3 ebif_is_record_3
bif erlang:match_spec_test/3
@@ -513,96 +294,53 @@ bif erlang:match_spec_test/3
#
bif ets:all/0
-bif 'erl.lang.ets':all/0 ebif_ets_all_0
bif ets:new/2
-bif 'erl.lang.ets':new/2 ebif_ets_new_2
bif ets:delete/1
-bif 'erl.lang.ets':delete/1 ebif_ets_delete_1
bif ets:delete/2
-bif 'erl.lang.ets':delete/2 ebif_ets_delete_2
bif ets:delete_all_objects/1
-bif 'erl.lang.ets':delete_all_objects/1 ebif_ets_delete_all_objects_1
bif ets:delete_object/2
-bif 'erl.lang.ets':delete_object/2 ebif_ets_delete_object_2
bif ets:first/1
-bif 'erl.lang.ets':first/1 ebif_ets_first_1
bif ets:is_compiled_ms/1
-bif 'erl.lang.ets':is_compiled_ms/1 ebif_ets_is_compiled_ms_1
bif ets:lookup/2
-bif 'erl.lang.ets':lookup/2 ebif_ets_lookup_2
bif ets:lookup_element/3
-bif 'erl.lang.ets':lookup_element/3 ebif_ets_lookup_element_3
bif ets:info/1
-bif 'erl.lang.ets':info/1 ebif_ets_info_1
bif ets:info/2
-bif 'erl.lang.ets':info/2 ebif_ets_info_2
bif ets:last/1
-bif 'erl.lang.ets':last/1 ebif_ets_last_1
bif ets:match/1
-bif 'erl.lang.ets':match/1 ebif_ets_match_1
bif ets:match/2
-bif 'erl.lang.ets':match/2 ebif_ets_match_2
bif ets:match/3
-bif 'erl.lang.ets':match/3 ebif_ets_match_3
bif ets:match_object/1
-bif 'erl.lang.ets':match_object/1 ebif_ets_match_object_1
bif ets:match_object/2
-bif 'erl.lang.ets':match_object/2 ebif_ets_match_object_2
bif ets:match_object/3
-bif 'erl.lang.ets':match_object/3 ebif_ets_match_object_3
bif ets:member/2
-bif 'erl.lang.ets':is_key/2 ebif_ets_member_2
bif ets:next/2
-bif 'erl.lang.ets':next/2 ebif_ets_next_2
bif ets:prev/2
-bif 'erl.lang.ets':prev/2 ebif_ets_prev_2
bif ets:insert/2
-bif 'erl.lang.ets':insert/2 ebif_ets_insert_2
bif ets:insert_new/2
-bif 'erl.lang.ets':insert_new/2 ebif_ets_insert_new_2
bif ets:rename/2
-bif 'erl.lang.ets':rename/2 ebif_ets_rename_2
bif ets:safe_fixtable/2
-bif 'erl.lang.ets':fixtable/2 ebif_ets_safe_fixtable_2
bif ets:slot/2
-bif 'erl.lang.ets':slot/2 ebif_ets_slot_2
bif ets:update_counter/3
-bif 'erl.lang.ets':update_counter/3 ebif_ets_update_counter_3
bif ets:select/1
-bif 'erl.lang.ets':select/1 ebif_ets_select_1
bif ets:select/2
-bif 'erl.lang.ets':select/2 ebif_ets_select_2
bif ets:select/3
-bif 'erl.lang.ets':select/3 ebif_ets_select_3
bif ets:select_count/2
-bif 'erl.lang.ets':select/2 ebif_ets_select_count_2
bif ets:select_reverse/1
-bif 'erl.lang.ets':select_reverse/1 ebif_ets_select_reverse_1
bif ets:select_reverse/2
-bif 'erl.lang.ets':select_reverse/2 ebif_ets_select_reverse_2
bif ets:select_reverse/3
-bif 'erl.lang.ets':select_reverse/3 ebif_ets_select_reverse_3
bif ets:select_delete/2
-bif 'erl.lang.ets':select_delete/2 ebif_ets_select_delete_2
bif ets:match_spec_compile/1
-bif 'erl.lang.ets':match_spec_compile/1 ebif_ets_match_spec_compile_1
bif ets:match_spec_run_r/3
-bif 'erl.lang.ets':match_spec_run_r/3 ebif_ets_match_spec_run_r_3
#
# Bifs in os module.
#
bif os:putenv/2
-bif 'erl.system.os':setenv/2 ebif_os_setenv_2 os_putenv_2
bif os:getenv/0
-bif 'erl.system.os':getenv/0 ebif_os_getenv_0
bif os:getenv/1
-bif 'erl.system.os':getenv/1 ebif_os_getenv_1
bif os:getpid/0
-bif 'erl.system.os':pid/0 ebif_os_pid_0 os_getpid_0
bif os:timestamp/0
-bif 'erl.system.os':timestamp/0 ebif_os_timestamp_0 os_timestamp_0
#
# Bifs in the erl_ddll module (the module actually does not exist)
@@ -629,13 +367,9 @@ bif re:run/3
#
bif lists:member/2
-bif 'erl.lang.list':is_element/2 ebif_list_is_element_2 lists_member_2
bif lists:reverse/2
-bif 'erl.lang.list':reverse/2 ebif_list_reverse_2 lists_reverse_2
bif lists:keymember/3
-bif 'erl.lang.list.keylist':is_element/3 ebif_keylist_is_element_3 lists_keymember_3
bif lists:keysearch/3
-bif 'erl.lang.list.keylist':search/3 ebif_keylist_search_3 lists_keysearch_3
bif lists:keyfind/3
#
@@ -643,21 +377,13 @@ bif lists:keyfind/3
#
bif erts_debug:disassemble/1
-bif 'erl.system.debug':disassemble/1 ebif_erts_debug_disassemble_1
bif erts_debug:breakpoint/2
-bif 'erl.system.debug':breakpoint/2 ebif_erts_debug_breakpoint_2
bif erts_debug:same/2
-bif 'erl.system.debug':same/2 ebif_erts_debug_same_2
bif erts_debug:flat_size/1
-bif 'erl.system.debug':flat_size/1 ebif_erts_debug_flat_size_1
bif erts_debug:get_internal_state/1
-bif 'erl.system.debug':get_internal_state/1 ebif_erts_debug_get_internal_state_1
bif erts_debug:set_internal_state/2
-bif 'erl.system.debug':set_internal_state/2 ebif_erts_debug_set_internal_state_2
bif erts_debug:display/1
-bif 'erl.system.debug':display/1 ebif_erts_debug_display_1
bif erts_debug:dist_ext_to_term/2
-bif 'erl.system.debug':dist_ext_to_term/2 ebif_erts_debug_dist_ext_to_term_2
bif erts_debug:instructions/0
#
@@ -677,13 +403,9 @@ bif erts_debug:lock_counters/1
#
bif code:get_chunk/2
-bif 'erl.system.code':get_chunk/2 ebif_code_get_chunk_2
bif code:module_md5/1
-bif 'erl.system.code':module_md5/1 ebif_code_module_md5_1
bif code:make_stub_module/3
-bif 'erl.system.code':make_stub_module/3 ebif_code_make_stub_module_3
bif code:is_module_native/1
-bif 'erl.system.code':is_native/1 ebif_code_is_native_1 code_is_module_native_1
#
# New Bifs in R9C.
@@ -829,6 +551,15 @@ bif erlang:dt_restore_tag/1
bif erlang:dt_prepend_vm_tag_data/1
bif erlang:dt_append_vm_tag_data/1
+
+#
+# New in R16B.
+#
+bif erlang:prepare_loading/2
+bif erlang:finish_loading/1
+bif erlang:insert_element/3
+bif erlang:delete_element/2
+
#
# Obsolete
#
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 3d2725e239..4441dab181 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -355,10 +355,10 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1)
BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
{
Eterm bin;
- Uint size;
+ ErlDrvSizeT size;
byte* bytes;
#ifdef DEBUG
- int offset;
+ ErlDrvSizeT offset;
#endif
if (is_nil(arg)) {
@@ -377,7 +377,7 @@ BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg)
#ifdef DEBUG
offset =
#endif
- io_list_to_buf(arg, (char*) bytes, size);
+ erts_iolist_to_buf(arg, (char*) bytes, size);
ASSERT(offset == 0);
BIF_RET(bin);
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index cf66f4e6b6..9aa1e5f30d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -61,19 +61,23 @@ extern char* erts_system_version[];
static void
port_info(int to, void *to_arg)
{
- int i;
- for (i = 0; i < erts_max_ports; i++)
- print_port_info(to, to_arg, i);
+ int i, max = erts_ptab_max(&erts_port);
+ for (i = 0; i < max; i++) {
+ Port *p = erts_pix2port(i);
+ if (p)
+ print_port_info(p, to, to_arg);
+ }
}
void
process_info(int to, void *to_arg)
{
- int i;
- for (i = 0; i < erts_max_processes; i++) {
- if ((process_tab[i] != NULL) && (process_tab[i]->i != ENULL)) {
- if (process_tab[i]->status != P_EXITING)
- print_process_info(to, to_arg, process_tab[i]);
+ int i, max = erts_ptab_max(&erts_proc);
+ for (i = 0; i < max; i++) {
+ Process *p = erts_pix2proc(i);
+ if (p && p->i != ENULL) {
+ if (!ERTS_PROC_IS_EXITING(p))
+ print_process_info(to, to_arg, p);
}
}
@@ -83,13 +87,14 @@ process_info(int to, void *to_arg)
static void
process_killer(void)
{
- int i, j;
+ int i, j, max = erts_ptab_max(&erts_proc);
Process* rp;
erts_printf("\n\nProcess Information\n\n");
erts_printf("--------------------------------------------------\n");
- for (i = erts_max_processes-1; i >= 0; i--) {
- if (((rp = process_tab[i]) != NULL) && rp->i != ENULL) {
+ for (i = max-1; i >= 0; i--) {
+ rp = erts_pix2proc(i);
+ if (rp && rp->i != ENULL) {
int br;
print_process_info(ERTS_PRINT_STDOUT, NULL, rp);
erts_printf("(k)ill (n)ext (r)eturn:\n");
@@ -97,11 +102,20 @@ process_killer(void)
if ((j = sys_get_key(0)) <= 0)
erl_exit(0, "");
switch(j) {
- case 'k':
- if (rp->status == P_WAITING) {
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- erts_smp_proc_inc_refc(rp);
- erts_smp_proc_lock(rp, rp_locks);
+ case 'k': {
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ erts_aint32_t state;
+ erts_smp_proc_inc_refc(rp);
+ erts_smp_proc_lock(rp, rp_locks);
+ state = erts_smp_atomic32_read_acqb(&rp->state);
+ if (state & (ERTS_PSFLG_FREE
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_IN_RUNQ
+ | ERTS_PSFLG_RUNNING)) {
+ erts_printf("Can only kill WAITING processes this way\n");
+ }
+ else {
(void) erts_send_exit_signal(NULL,
NIL,
rp,
@@ -110,12 +124,10 @@ process_killer(void)
NIL,
NULL,
0);
- erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
}
- else
- erts_printf("Can only kill WAITING processes this way\n");
-
+ erts_smp_proc_unlock(rp, rp_locks);
+ erts_smp_proc_dec_refc(rp);
+ }
case 'n': br = 1; break;
case 'r': return;
default: return;
@@ -180,49 +192,45 @@ static void doit_print_monitor(ErtsMonitor *mon, void *vpcontext)
void
print_process_info(int to, void *to_arg, Process *p)
{
+ time_t approx_started;
int garbing = 0;
int running = 0;
- time_t tmp_t;
struct saved_calls *scb;
+ erts_aint32_t state;
/* display the PID */
- erts_print(to, to_arg, "=proc:%T\n", p->id);
+ erts_print(to, to_arg, "=proc:%T\n", p->common.id);
/* Display the state */
erts_print(to, to_arg, "State: ");
- switch (p->status) {
- case P_FREE:
+
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (state & ERTS_PSFLG_FREE)
erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
- break;
- case P_RUNABLE:
- erts_print(to, to_arg, "Scheduled\n");
- break;
- case P_WAITING:
- erts_print(to, to_arg, "Waiting\n");
- break;
- case P_SUSPENDED:
- erts_print(to, to_arg, "Suspended\n");
- break;
- case P_RUNNING:
- erts_print(to, to_arg, "Running\n");
- running = 1;
- break;
- case P_EXITING:
+ else if (state & ERTS_PSFLG_EXITING)
erts_print(to, to_arg, "Exiting\n");
- break;
- case P_GARBING:
- erts_print(to, to_arg, "Garbing\n");
+ else if (state & ERTS_PSFLG_GC) {
garbing = 1;
running = 1;
- break;
+ erts_print(to, to_arg, "Garbing\n");
}
+ else if (state & ERTS_PSFLG_SUSPENDED)
+ erts_print(to, to_arg, "Suspended\n");
+ else if (state & ERTS_PSFLG_RUNNING) {
+ running = 1;
+ erts_print(to, to_arg, "Running\n");
+ }
+ else if (state & ERTS_PSFLG_ACTIVE)
+ erts_print(to, to_arg, "Scheduled\n");
+ else
+ erts_print(to, to_arg, "Waiting\n");
/*
* If the process is registered as a global process, display the
* registered name
*/
- if (p->reg != NULL)
- erts_print(to, to_arg, "Name: %T\n", p->reg->name);
+ if (p->common.u.alive.reg)
+ erts_print(to, to_arg, "Name: %T\n", p->common.u.alive.reg->name);
/*
* Display the initial function name
@@ -245,8 +253,8 @@ print_process_info(int to, void *to_arg, Process *p)
}
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
- tmp_t = p->started.tv_sec;
- erts_print(to, to_arg, "Started: %s", ctime(&tmp_t));
+ approx_started = (time_t) p->approx_started;
+ erts_print(to, to_arg, "Started: %s", ctime(&approx_started));
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len);
@@ -296,11 +304,11 @@ print_process_info(int to, void *to_arg, Process *p)
}
/* display the links only if there are any*/
- if (p->nlinks != NULL || p->monitors != NULL) {
+ if (ERTS_P_LINKS(p) || ERTS_P_MONITORS(p)) {
PrintMonitorContext context = {1,to};
erts_print(to, to_arg,"Link list: [");
- erts_doforall_links(p->nlinks, &doit_print_link, &context);
- erts_doforall_monitors(p->monitors, &doit_print_monitor, &context);
+ erts_doforall_links(ERTS_P_LINKS(p), &doit_print_link, &context);
+ erts_doforall_monitors(ERTS_P_MONITORS(p), &doit_print_monitor, &context);
erts_print(to, to_arg,"]\n");
}
@@ -377,17 +385,22 @@ loaded(int to, void *to_arg)
int old = 0;
int cur = 0;
BeamInstr* code;
+ Module* modp;
+ ErtsCodeIndex code_ix;
+
+ code_ix = erts_active_code_ix();
+ erts_rlock_old_code(code_ix);
/*
* Calculate and print totals.
*/
- for (i = 0; i < module_code_size(); i++) {
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
- cur += module_code(i)->code_length;
- if (module_code(i)->old_code_length != 0) {
- old += module_code(i)->old_code_length;
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ if ((modp = module_code(i, code_ix)) != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
+ cur += modp->curr.code_length;
+ if (modp->old.code_length != 0) {
+ old += modp->old.code_length;
}
}
}
@@ -398,21 +411,22 @@ loaded(int to, void *to_arg)
* Print one line per module.
*/
- for (i = 0; i < module_code_size(); i++) {
+ for (i = 0; i < module_code_size(code_ix); i++) {
+ modp = module_code(i, code_ix);
if (!ERTS_IS_CRASH_DUMPING) {
/*
* Interactive dump; keep it brief.
*/
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
- erts_print(to, to_arg, "%T", make_atom(module_code(i)->module));
- cur += module_code(i)->code_length;
- erts_print(to, to_arg, " %d", module_code(i)->code_length );
- if (module_code(i)->old_code_length != 0) {
+ if (modp != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
+ erts_print(to, to_arg, "%T", make_atom(modp->module));
+ cur += modp->curr.code_length;
+ erts_print(to, to_arg, " %d", modp->curr.code_length );
+ if (modp->old.code_length != 0) {
erts_print(to, to_arg, " (%d old)",
- module_code(i)->old_code_length );
- old += module_code(i)->old_code_length;
+ modp->old.code_length );
+ old += modp->old.code_length;
}
erts_print(to, to_arg, "\n");
}
@@ -420,15 +434,15 @@ loaded(int to, void *to_arg)
/*
* To crash dump; make it parseable.
*/
- if (module_code(i) != NULL &&
- ((module_code(i)->code_length != 0) ||
- (module_code(i)->old_code_length != 0))) {
+ if (modp != NULL &&
+ ((modp->curr.code_length != 0) ||
+ (modp->old.code_length != 0))) {
erts_print(to, to_arg, "=mod:");
- erts_print(to, to_arg, "%T", make_atom(module_code(i)->module));
+ erts_print(to, to_arg, "%T", make_atom(modp->module));
erts_print(to, to_arg, "\n");
erts_print(to, to_arg, "Current size: %d\n",
- module_code(i)->code_length);
- code = module_code(i)->code;
+ modp->curr.code_length);
+ code = modp->curr.code;
if (code != NULL && code[MI_ATTR_PTR]) {
erts_print(to, to_arg, "Current attributes: ");
dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR],
@@ -440,9 +454,9 @@ loaded(int to, void *to_arg)
code[MI_COMPILE_SIZE]);
}
- if (module_code(i)->old_code_length != 0) {
- erts_print(to, to_arg, "Old size: %d\n", module_code(i)->old_code_length);
- code = module_code(i)->old_code;
+ if (modp->old.code_length != 0) {
+ erts_print(to, to_arg, "Old size: %d\n", modp->old.code_length);
+ code = modp->old.code;
if (code[MI_ATTR_PTR]) {
erts_print(to, to_arg, "Old attributes: ");
dump_attributes(to, to_arg, (byte *) code[MI_ATTR_PTR],
@@ -457,6 +471,7 @@ loaded(int to, void *to_arg)
}
}
}
+ erts_runlock_old_code(code_ix);
}
@@ -613,16 +628,17 @@ bin_check(void)
{
Process *rp;
struct erl_off_heap_header* hdr;
- int i, printed = 0;
+ int i, printed = 0, max = erts_ptab_max(&erts_proc);
- for (i=0; i < erts_max_processes; i++) {
- if ((rp = process_tab[i]) == NULL)
+ for (i=0; i < max; i++) {
+ rp = erts_pix2proc(i);
+ if (!rp)
continue;
for (hdr = rp->off_heap.first; hdr; hdr = hdr->next) {
if (hdr->thing_word == HEADER_PROC_BIN) {
ProcBin *bp = (ProcBin*) hdr;
if (!printed) {
- erts_printf("Process %T holding binary data \n", rp->id);
+ erts_printf("Process %T holding binary data \n", rp->common.id);
printed = 1;
}
erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
@@ -657,6 +673,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
char dumpnamebuf[MAXPATHLEN];
char* dumpname;
int secs;
+ int env_erl_crash_dump_seconds_set = 1;
if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
@@ -681,6 +698,8 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
envsz = sizeof(env);
/* ERL_CRASH_DUMP_SECONDS not set
+ * if we have a heart port, break immediately
+ * otherwise dump crash indefinitely (until crash is complete)
* same as ERL_CRASH_DUMP_SECONDS = 0
* - do not write dump
* - do not set an alarm
@@ -702,8 +721,10 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
*/
if (erts_sys_getenv__("ERL_CRASH_DUMP_SECONDS", env, &envsz) != 0) {
- return; /* break immediately */
+ env_erl_crash_dump_seconds_set = 0;
+ secs = -1;
} else {
+ env_erl_crash_dump_seconds_set = 1;
secs = atoi(env);
}
@@ -711,7 +732,16 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
return;
}
- erts_sys_prepare_crash_dump(secs);
+ /* erts_sys_prepare_crash_dump returns 1 if heart port is found, otherwise 0
+ * If we don't find heart (0) and we don't have ERL_CRASH_DUMP_SECONDS set
+ * we should continue writing a dump
+ *
+ * beware: secs -1 means no alarm
+ */
+
+ if (erts_sys_prepare_crash_dump(secs) && !env_erl_crash_dump_seconds_set ) {
+ return;
+ }
if (erts_sys_getenv__("ERL_CRASH_DUMP",&dumpnamebuf[0],&dumpnamebufsize) != 0)
dumpname = "erl_crash.dump";
@@ -739,7 +769,7 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_print_nif_taints(fd, NULL);
erts_fdprintf(fd, "Atoms: %d\n", atom_table_size());
info(fd, NULL); /* General system info */
- if (process_tab != NULL) /* XXX true at init */
+ if (erts_ptab_initialized(&erts_proc))
process_info(fd, NULL); /* Info about each process and port */
db_info(fd, NULL, 0);
erts_print_bif_timer_info(fd, NULL);
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
new file mode 100644
index 0000000000..c66d5a2f05
--- /dev/null
+++ b/erts/emulator/beam/code_ix.c
@@ -0,0 +1,168 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "code_ix.h"
+#include "global.h"
+#include "beam_catches.h"
+
+
+
+#if 0
+# define CIX_TRACE(text) erts_fprintf(stderr, "CIX_TRACE: " text " act=%u load=%u\r\n", erts_active_code_ix(), erts_staging_code_ix())
+#else
+# define CIX_TRACE(text)
+#endif
+
+erts_smp_atomic32_t the_active_code_index;
+erts_smp_atomic32_t the_staging_code_index;
+
+static Process* code_writing_process = NULL;
+struct code_write_queue_item {
+ Process *p;
+ struct code_write_queue_item* next;
+};
+static struct code_write_queue_item* code_write_queue = NULL;
+static erts_smp_mtx_t code_write_permission_mtx;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+static erts_tsd_key_t has_code_write_permission;
+#endif
+
+void erts_code_ix_init(void)
+{
+ /* We start emulator by initializing preloaded modules
+ * single threaded with active and staging set both to zero.
+ * Preloading is finished by a commit that will set things straight.
+ */
+ erts_smp_atomic32_init_nob(&the_active_code_index, 0);
+ erts_smp_atomic32_init_nob(&the_staging_code_index, 0);
+ erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission");
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_key_create(&has_code_write_permission);
+#endif
+ CIX_TRACE("init");
+}
+
+void erts_start_staging_code_ix(void)
+{
+ beam_catches_start_staging();
+ export_start_staging();
+ module_start_staging();
+ erts_start_staging_ranges();
+ CIX_TRACE("start");
+}
+
+
+void erts_end_staging_code_ix(void)
+{
+ beam_catches_end_staging(1);
+ export_end_staging(1);
+ module_end_staging(1);
+ erts_end_staging_ranges(1);
+ CIX_TRACE("end");
+}
+
+void erts_commit_staging_code_ix(void)
+{
+ ErtsCodeIndex ix;
+ /* We need to this lock as we are now making the staging export table active */
+ export_staging_lock();
+ ix = erts_staging_code_ix();
+ erts_smp_atomic32_set_nob(&the_active_code_index, ix);
+ ix = (ix + 1) % ERTS_NUM_CODE_IX;
+ erts_smp_atomic32_set_nob(&the_staging_code_index, ix);
+ export_staging_unlock();
+ CIX_TRACE("activate");
+}
+
+void erts_abort_staging_code_ix(void)
+{
+ beam_catches_end_staging(0);
+ export_end_staging(0);
+ module_end_staging(0);
+ erts_end_staging_ranges(0);
+ CIX_TRACE("abort");
+}
+
+
+/*
+ * Calller _must_ yield if we return 0
+ */
+int erts_try_seize_code_write_permission(Process* c_p)
+{
+ int success;
+#ifdef ERTS_SMP
+ ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */
+#endif
+ ASSERT(c_p != NULL);
+
+ erts_smp_mtx_lock(&code_write_permission_mtx);
+ success = (code_writing_process == NULL);
+ if (success) {
+ code_writing_process = c_p;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_set(has_code_write_permission, (void *) 1);
+#endif
+ }
+ else { /* Already locked */
+ struct code_write_queue_item* qitem;
+ ASSERT(code_writing_process != c_p);
+ qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem));
+ qitem->p = c_p;
+ erts_smp_proc_inc_refc(c_p);
+ qitem->next = code_write_queue;
+ code_write_queue = qitem;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+ erts_smp_mtx_unlock(&code_write_permission_mtx);
+ return success;
+}
+
+void erts_release_code_write_permission(void)
+{
+ erts_smp_mtx_lock(&code_write_permission_mtx);
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+ while (code_write_queue != NULL) { /* unleash the entire herd */
+ struct code_write_queue_item* qitem = code_write_queue;
+ erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(qitem->p)) {
+ erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
+ code_write_queue = qitem->next;
+ erts_smp_proc_dec_refc(qitem->p);
+ erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem);
+ }
+ code_writing_process = NULL;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_tsd_set(has_code_write_permission, (void *) 0);
+#endif
+ erts_smp_mtx_unlock(&code_write_permission_mtx);
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int erts_has_code_write_permission(void)
+{
+ return (code_writing_process != NULL) && erts_tsd_get(has_code_write_permission);
+}
+#endif
diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h
new file mode 100644
index 0000000000..e2bd0da112
--- /dev/null
+++ b/erts/emulator/beam/code_ix.h
@@ -0,0 +1,141 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/* Description:
+ * This is the interface that facilitates changing the beam code
+ * (load,upgrade,delete) while allowing executing Erlang processes to
+ * access the code without any locks or other expensive memory barriers.
+ *
+ * The basic idea is to maintain several "logical copies" of the code. These
+ * copies are identified by a global 'code index', an integer of 0, 1 or 2.
+ * The code index is used as argument to code access structures like
+ * export, module, beam_catches, beam_ranges.
+ *
+ * The current 'active' code index is used to access the current running
+ * code. The 'staging' code index is used by the process that performs
+ * a code change operation. When a code change operation completes
+ * succesfully, the staging code index becomes the new active code index.
+ *
+ * The third code index is not explicitly used. It can be thought of as
+ * the "previous active" or the "next staging" index. It is needed to make
+ * sure that we do not reuse a code index for staging until we are sure
+ * that no executing BIFs are still referencing it.
+ * We could get by with only two (0 and 1), but that would require that we
+ * must wait for all schedulers to re-schedule before each code change
+ * operation can start staging.
+ *
+ * Note that the 'code index' is very loosely coupled to the concept of
+ * 'current' and 'old' module versions. You can almost say that they are
+ * orthogonal to each other. Code index is an emulator global concept while
+ * 'current' and 'old' is specific for each module.
+ */
+
+#ifndef __CODE_IX_H__
+#define __CODE_IX_H__
+
+#ifndef __SYS_H__
+# ifdef HAVE_CONFIG_H
+# include "config.h"
+# endif
+# include "sys.h"
+#endif
+struct process;
+
+
+#define ERTS_NUM_CODE_IX 3
+typedef unsigned ErtsCodeIndex;
+
+
+/* Called once at emulator initialization.
+ */
+void erts_code_ix_init(void);
+
+/* Return active code index.
+ * Is guaranteed to be valid until the calling BIF returns.
+ * To get a consistent view of the code, only one call to erts_active_code_ix()
+ * should be made and the returned ix reused within the same BIF call.
+ */
+ERTS_GLB_INLINE
+ErtsCodeIndex erts_active_code_ix(void);
+
+/* Return staging code ix.
+ * Only used by a process performing code loading/upgrading/deleting/purging.
+ * code_ix must be locked.
+ */
+ERTS_GLB_INLINE
+ErtsCodeIndex erts_staging_code_ix(void);
+
+/* Try seize exclusive code write permission. Needed for code staging.
+ * Main process lock (only) must be held.
+ * System thread progress must not be blocked.
+ * Caller is suspended and *must* yield if 0 is returned.
+ */
+int erts_try_seize_code_write_permission(struct process*);
+
+/* Release code write permission.
+ * Will resume any suspended waiters.
+ */
+void erts_release_code_write_permission(void);
+
+/* Prepare the "staging area" to be a complete copy of the active code.
+ * Code write permission must have been seized.
+ * Must be followed by calls to either "end" and "commit" or "abort" before
+ * code write permission can be released.
+ */
+void erts_start_staging_code_ix(void);
+
+/* End the staging.
+ * Preceded by "start" and must be followed by "commit".
+ */
+void erts_end_staging_code_ix(void);
+
+/* Set staging code index as new active code index.
+ * Preceded by "end".
+ */
+void erts_commit_staging_code_ix(void);
+
+/* Abort the staging.
+ * Preceded by "start".
+ */
+void erts_abort_staging_code_ix(void);
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int erts_has_code_write_permission(void);
+#endif
+
+
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+extern erts_smp_atomic32_t the_active_code_index;
+extern erts_smp_atomic32_t the_staging_code_index;
+
+ERTS_GLB_INLINE ErtsCodeIndex erts_active_code_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&the_active_code_index);
+}
+ERTS_GLB_INLINE ErtsCodeIndex erts_staging_code_ix(void)
+{
+ return erts_smp_atomic32_read_nob(&the_staging_code_index);
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* !__CODE_IX_H__ */
+
diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c
index 36eda04de2..23c0fca6aa 100644
--- a/erts/emulator/beam/copy.c
+++ b/erts/emulator/beam/copy.c
@@ -47,7 +47,7 @@ copy_object(Eterm obj, Process* to)
if (DTRACE_ENABLED(copy_object)) {
DTRACE_CHARBUF(proc_name, 64);
- erts_snprintf(proc_name, sizeof(proc_name), "%T", to->id);
+ erts_snprintf(proc_name, sizeof(proc_name), "%T", to->common.id);
DTRACE2(copy_object, proc_name, size);
}
#endif
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 025258e8de..64cd93a100 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -67,7 +67,7 @@ dist_msg_dbg(ErtsDistExternal *edep, char *what, byte *buf, int sz)
{
byte *extp = edep->extp;
Eterm msg;
- Sint size = erts_decode_dist_ext_size(edep, 0);
+ Sint size = erts_decode_dist_ext_size(edep);
if (size < 0) {
erts_fprintf(stderr,
"DIST MSG DEBUG: erts_decode_dist_ext_size(%s) failed:\n",
@@ -124,6 +124,13 @@ static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm);
static void init_nodes_monitors(void);
static erts_smp_atomic_t no_caches;
+static erts_smp_atomic_t no_nodes;
+
+struct {
+ Eterm reason;
+ ErlHeapFragment *bp;
+} nodedown;
+
static void
delete_cache(ErtsAtomCache *cache)
@@ -144,7 +151,7 @@ create_cache(DistEntry *dep)
ERTS_SMP_LC_ASSERT(
is_internal_port(dep->cid)
- && erts_lc_is_port_locked(&erts_port[internal_port_index(dep->cid)]));
+ && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
ASSERT(!dep->cache);
dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE,
@@ -171,11 +178,10 @@ get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs)
return NULL;
}
else {
- ErtsProcList *plp;
- plp = dep->suspended.first;
- dep->suspended.first = NULL;
- dep->suspended.last = NULL;
- return plp;
+ ErtsProcList *suspended = dep->suspended;
+ dep->suspended = NULL;
+ erts_proclist_fetch(&suspended, NULL);
+ return suspended;
}
}
@@ -252,7 +258,7 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
if (mon->type == MON_ORIGIN) {
/* local pid is beeing monitored */
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); nope, can happen during process exit */
if (rmon != NULL) {
erts_destroy_monitor(rmon);
@@ -262,7 +268,7 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
Eterm watched;
UseTmpHeapNoproc(3);
ASSERT(mon->type == MON_TARGET);
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
/* ASSERT(rmon != NULL); can happen during process exit */
if (rmon != NULL) {
ASSERT(is_atom(rmon->name) || is_nil(rmon->name));
@@ -311,7 +317,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp)
goto done;
}
- rlnk = erts_remove_link(&(rp->nlinks), sublnk->pid);
+ rlnk = erts_remove_link(&ERTS_P_LINKS(rp), sublnk->pid);
xres = erts_send_exit_signal(NULL,
sublnk->pid,
rp,
@@ -370,7 +376,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
if (!rp) {
goto done;
}
- rlnk = erts_remove_link(&(rp->nlinks), name);
+ rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name);
if (rlnk != NULL) {
ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE));
erts_destroy_link(rlnk);
@@ -394,6 +400,47 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
erts_destroy_link(lnk);
}
+static void
+set_node_not_alive(void *unused)
+{
+ ErlHeapFragment *bp;
+ Eterm nodename = erts_this_dist_entry->sysname;
+
+ ASSERT(erts_smp_atomic_read_nob(&no_nodes) == 0);
+
+ erts_smp_thr_progress_block();
+ erts_set_this_node(am_Noname, 0);
+ erts_is_alive = 0;
+ send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nodedown.reason);
+ nodedown.reason = NIL;
+ bp = nodedown.bp;
+ nodedown.bp = NULL;
+ erts_smp_thr_progress_unblock();
+ if (bp)
+ free_message_buffer(bp);
+}
+
+static ERTS_INLINE void
+dec_no_nodes(void)
+{
+ erts_aint_t no = erts_smp_atomic_dec_read_mb(&no_nodes);
+ ASSERT(no >= 0);
+ ASSERT(erts_get_scheduler_id()); /* Need to be a scheduler */
+ if (no == 0)
+ erts_schedule_misc_aux_work(erts_get_scheduler_id(),
+ set_node_not_alive,
+ NULL);
+}
+
+static ERTS_INLINE void
+inc_no_nodes(void)
+{
+#ifdef DEBUG
+ erts_aint_t no = erts_smp_atomic_read_nob(&no_nodes);
+ ASSERT(erts_is_alive ? no > 0 : no == 0);
+#endif
+ erts_smp_atomic_inc_mb(&no_nodes);
+}
/*
* proc is currently running or exiting process.
@@ -403,47 +450,76 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
Eterm nodename;
if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */
+ DistEntry *tdep;
+ int no_dist_port = 0;
Eterm nd_reason = (reason == am_no_network
? am_no_network
: am_net_kernel_terminated);
erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
+ for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next)
+ no_dist_port++;
+ for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next)
+ no_dist_port++;
+
/* KILL all port controllers */
- while(erts_visible_dist_entries || erts_hidden_dist_entries) {
- DistEntry *tdep;
- Eterm prt_id;
- Port *prt;
- if(erts_hidden_dist_entries)
- tdep = erts_hidden_dist_entries;
- else
- tdep = erts_visible_dist_entries;
- prt_id = tdep->cid;
- ASSERT(is_internal_port(prt_id));
+ if (no_dist_port == 0)
erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ else {
+ Eterm def_buf[128];
+ int i = 0;
+ Eterm *dist_port;
- prt = erts_id2port(prt_id, NULL, 0);
- if (prt) {
- ASSERT(prt->status & ERTS_PORT_SFLG_DISTRIBUTION);
- ASSERT(prt->dist_entry);
- /* will call do_net_exists !!! */
- erts_do_exit_port(prt, prt_id, nd_reason);
- erts_port_release(prt);
+ if (no_dist_port <= sizeof(def_buf)/sizeof(def_buf[0]))
+ dist_port = &def_buf[0];
+ else
+ dist_port = erts_alloc(ERTS_ALC_T_TMP,
+ sizeof(Eterm)*no_dist_port);
+ for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) {
+ ASSERT(is_internal_port(tdep->cid));
+ dist_port[i++] = tdep->cid;
}
+ for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) {
+ ASSERT(is_internal_port(tdep->cid));
+ dist_port[i++] = tdep->cid;
+ }
+ erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
- erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx);
- }
+ for (i = 0; i < no_dist_port; i++) {
+ Port *prt = erts_port_lookup(dist_port[i],
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (!prt)
+ continue;
+ ASSERT(erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLG_DISTRIBUTION);
- erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx);
+ erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED,
+ prt, dist_port[i], nd_reason, NULL);
+ }
- nodename = erts_this_dist_entry->sysname;
- erts_smp_thr_progress_block();
- erts_set_this_node(am_Noname, 0);
- erts_is_alive = 0;
- send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nd_reason);
- erts_smp_thr_progress_unblock();
+ if (dist_port != &def_buf[0])
+ erts_free(ERTS_ALC_T_TMP, dist_port);
+ }
+ /*
+ * When last dist port exits, node will be taken
+ * from alive to not alive.
+ */
+ ASSERT(is_nil(nodedown.reason) && !nodedown.bp);
+ if (is_immed(nd_reason))
+ nodedown.reason = nd_reason;
+ else {
+ Eterm *hp;
+ Uint sz = size_object(nd_reason);
+ nodedown.bp = new_message_buffer(sz);
+ hp = nodedown.bp->mem;
+ nodedown.reason = copy_struct(nd_reason,
+ sz,
+ &hp,
+ &nodedown.bp->off_heap);
+ }
}
- else { /* recursive call via erts_do_exit_port() will end up here */
+ else { /* Call from distribution port */
NetExitsContext nec = {dep};
ErtsLink *nlinks;
ErtsLink *node_links;
@@ -454,10 +530,10 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
erts_smp_de_rwlock(dep);
ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid)
- && erts_lc_is_port_locked(&erts_port[internal_port_index(dep->cid)]));
+ && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid)));
if (erts_port_task_is_scheduled(&dep->dist_cmd))
- erts_port_task_abort(dep->cid, &dep->dist_cmd);
+ erts_port_task_abort(&dep->dist_cmd);
if (dep->status & ERTS_DE_SFLG_EXITING) {
#ifdef DEBUG
@@ -503,6 +579,9 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason)
clear_dist_entry(dep);
}
+
+ dec_no_nodes();
+
return 1;
}
@@ -516,6 +595,10 @@ void init_dist(void)
{
init_nodes_monitors();
+ nodedown.reason = NIL;
+ nodedown.bp = NULL;
+
+ erts_smp_atomic_init_nob(&no_nodes, 0);
erts_smp_atomic_init_nob(&no_caches, 0);
/* Lookup/Install all references to trap functions */
@@ -769,7 +852,7 @@ erts_dsig_send_msg(ErtsDSigData *dsdp, Eterm remote, Eterm message)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(receiver_name), "%T", remote);
msize = size_object(message);
if (token != NIL && token != am_have_dt_utag) {
@@ -826,7 +909,7 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
*node_name = *sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send) || DTRACE_ENABLED(message_send_remote)) {
erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
erts_snprintf(receiver_name, sizeof(receiver_name),
"{%T,%s}", remote_name, node_name);
msize = size_object(message);
@@ -840,10 +923,10 @@ erts_dsig_send_reg_msg(ErtsDSigData *dsdp, Eterm remote_name, Eterm message)
if (token != NIL)
ctl = TUPLE5(&ctl_heap[0], make_small(DOP_REG_SEND_TT),
- sender->id, am_Cookie, remote_name, token);
+ sender->common.id, am_Cookie, remote_name, token);
else
ctl = TUPLE4(&ctl_heap[0], make_small(DOP_REG_SEND),
- sender->id, am_Cookie, remote_name);
+ sender->common.id, am_Cookie, remote_name);
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
DTRACE7(message_send_remote, sender_name, node_name, receiver_name,
@@ -889,7 +972,7 @@ erts_dsig_send_exit_tt(ErtsDSigData *dsdp, Eterm local, Eterm remote,
*node_name = *sender_name = *remote_name = '\0';
if (DTRACE_ENABLED(process_exit_signal_remote)) {
erts_snprintf(node_name, sizeof(node_name), "%T", dsdp->dep->sysname);
- erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->id);
+ erts_snprintf(sender_name, sizeof(sender_name), "%T", sender->common.id);
erts_snprintf(remote_name, sizeof(remote_name),
"{%T,%s}", remote, node_name);
erts_snprintf(reason_str, sizeof(reason), "%T", reason);
@@ -1141,7 +1224,7 @@ int erts_net_message(Port *prt,
}
erts_smp_de_links_lock(dep);
- res = erts_add_link(&(rp->nlinks), LINK_PID, from);
+ res = erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, from);
if (res < 0) {
/* It was already there! Lets skip the rest... */
@@ -1149,7 +1232,7 @@ int erts_net_message(Port *prt,
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
break;
}
- lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->id);
+ lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->common.id);
erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from);
erts_smp_de_links_unlock(dep);
@@ -1176,7 +1259,7 @@ int erts_net_message(Port *prt,
if (!rp)
break;
- lnk = erts_remove_link(&(rp->nlinks), from);
+ lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
if (IS_TRACED_FL(rp, F_TRACE_PROCS) && lnk != NULL) {
trace_proc(NULL, rp, am_getting_unlinked, from);
@@ -1233,10 +1316,10 @@ int erts_net_message(Port *prt,
}
else {
if (is_atom(watched))
- watched = rp->id;
+ watched = rp->common.id;
erts_smp_de_links_lock(dep);
erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name);
- erts_add_monitor(&(rp->monitors), MON_TARGET, ref, watcher, name);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, watcher, name);
erts_smp_de_links_unlock(dep);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
}
@@ -1275,7 +1358,7 @@ int erts_net_message(Port *prt,
if (!rp) {
break;
}
- mon = erts_remove_monitor(&(rp->monitors),ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
ASSERT(mon != NULL);
if (mon == NULL) {
@@ -1312,7 +1395,7 @@ int erts_net_message(Port *prt,
if (is_not_pid(from) || is_not_atom(to)){
goto invalid_message;
}
- rp = erts_whereis_process(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ rp = erts_whereis_process(NULL, 0, to, 0, 0);
if (rp) {
Uint xsize = (type == DOP_REG_SEND
? 0
@@ -1338,7 +1421,6 @@ int erts_net_message(Port *prt,
erts_queue_dist_message(rp, &locks, ede_copy, token);
if (locks)
erts_smp_proc_unlock(rp, locks);
- erts_smp_proc_dec_refc(rp);
}
break;
@@ -1364,7 +1446,7 @@ int erts_net_message(Port *prt,
if (is_not_pid(to)) {
goto invalid_message;
}
- rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ rp = erts_proc_lookup(to);
if (rp) {
Uint xsize = type == DOP_SEND ? 0 : ERTS_HEAP_FRAG_SIZE(token_size);
ErtsProcLocks locks = 0;
@@ -1388,7 +1470,6 @@ int erts_net_message(Port *prt,
erts_queue_dist_message(rp, &locks, ede_copy, token);
if (locks)
erts_smp_proc_unlock(rp, locks);
- erts_smp_proc_dec_refc(rp);
}
break;
@@ -1434,7 +1515,7 @@ int erts_net_message(Port *prt,
erts_destroy_monitor(mon);
- mon = erts_remove_monitor(&(rp->monitors),ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
if (mon == NULL) {
erts_smp_proc_unlock(rp, rp_locks);
@@ -1485,7 +1566,7 @@ int erts_net_message(Port *prt,
if (!rp)
lnk = NULL;
else {
- lnk = erts_remove_link(&(rp->nlinks), from);
+ lnk = erts_remove_link(&ERTS_P_LINKS(rp), from);
/* If lnk == NULL, we have unlinked on this side, i.e.
* ignore exit.
@@ -1544,8 +1625,7 @@ int erts_net_message(Port *prt,
if (is_not_pid(from) || is_not_internal_pid(to)) {
goto invalid_message;
}
- rp = erts_pid2proc_opt(NULL, 0, to, rp_locks,
- ERTS_P2P_FLG_SMP_INC_REFC);
+ rp = erts_pid2proc(NULL, 0, to, rp_locks);
if (rp) {
(void) erts_send_exit_signal(NULL,
from,
@@ -1556,7 +1636,6 @@ int erts_net_message(Port *prt,
NULL,
0);
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
}
break;
}
@@ -1601,7 +1680,7 @@ int erts_net_message(Port *prt,
erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl);
}
UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE);
- erts_do_exit_port(prt, dep->cid, am_killed);
+ erts_deliver_port_exit(prt, dep->cid, am_killed, 0);
ERTS_SMP_CHK_NO_PROC_LOCKS;
return -1;
}
@@ -1697,7 +1776,6 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
erts_smp_mtx_unlock(&dep->qlock);
plp = erts_proclist_create(c_p);
- plp->next = NULL;
erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
suspended = 1;
erts_smp_mtx_lock(&dep->qlock);
@@ -1730,11 +1808,7 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
else {
/* Enqueue suspended process on dist entry */
ASSERT(plp);
- if (dep->suspended.last)
- dep->suspended.last->next = plp;
- else
- dep->suspended.first = plp;
- dep->suspended.last = plp;
+ erts_proclist_store_last(&dep->suspended, plp);
}
}
@@ -1783,7 +1857,7 @@ dsig_send(ErtsDSigData *dsdp, Eterm ctl, Eterm msg, int force_busy)
erts_snprintf(port_str, sizeof(port_str), "%T", cid);
erts_snprintf(remote_str, sizeof(remote_str), "%T", dep->sysname);
- erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->id);
+ erts_snprintf(pid_str, sizeof(pid_str), "%T", c_p->common.id);
DTRACE4(dist_port_busy, erts_this_node_sysname,
port_str, remote_str, pid_str);
}
@@ -1816,7 +1890,7 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
erts_snprintf(remote_str, sizeof(remote_str),
"%T", prt->dist_entry->sysname);
DTRACE4(dist_output, erts_this_node_sysname, port_str,
@@ -1870,7 +1944,7 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
erts_snprintf(remote_str, sizeof(remote_str),
"%T", prt->dist_entry->sysname);
DTRACE4(dist_outputv, erts_this_node_sysname, port_str,
@@ -1907,13 +1981,13 @@ int
erts_dist_command(Port *prt, int reds_limit)
{
Sint reds = ERTS_PORT_REDS_DIST_CMD_START;
- int prt_busy;
Uint32 status;
Uint32 flags;
Sint obufsize = 0;
ErtsDistOutputQueue oq, foq;
DistEntry *dep = prt->dist_entry;
Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
+ erts_aint32_t sched_flags;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
@@ -1929,7 +2003,7 @@ erts_dist_command(Port *prt, int reds_limit)
erts_smp_de_runlock(dep);
if (status & ERTS_DE_SFLG_EXITING) {
- erts_do_exit_port(prt, prt->id, am_killed);
+ erts_deliver_port_exit(prt, prt->common.id, am_killed, 0);
erts_deref_dist_entry(dep);
return reds + ERTS_PORT_REDS_DIST_CMD_EXIT;
}
@@ -1956,12 +2030,12 @@ erts_dist_command(Port *prt, int reds_limit)
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+
if (reds > reds_limit)
goto preempted;
- prt_busy = (int) (prt->status & ERTS_PORT_SFLG_PORT_BUSY);
-
- if (!prt_busy && foq.first) {
+ if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && foq.first) {
int preempt = 0;
do {
Uint size;
@@ -1973,15 +2047,15 @@ erts_dist_command(Port *prt, int reds_limit)
bw(foq.first->extp, size);
#endif
reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
fob = foq.first;
obufsize += size_obuf(fob);
foq.first = foq.first->next;
free_dist_obuf(fob);
- preempt = reds > reds_limit || (prt->status & ERTS_PORT_SFLGS_DEAD);
- if (prt->status & ERTS_PORT_SFLG_PORT_BUSY) {
- prt_busy = 1;
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT);
+ if (sched_flags & ERTS_PTS_FLG_BUSY_PORT)
break;
- }
} while (foq.first && !preempt);
if (!foq.first)
foq.last = NULL;
@@ -1989,7 +2063,7 @@ erts_dist_command(Port *prt, int reds_limit)
goto preempted;
}
- if (prt_busy) {
+ if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) {
if (oq.first) {
ErtsDistOutputBuf *ob;
int preempt;
@@ -2056,16 +2130,15 @@ erts_dist_command(Port *prt, int reds_limit)
bw(oq.first->extp, size);
#endif
reds += ERTS_PORT_REDS_DIST_CMD_DATA(size);
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
fob = oq.first;
obufsize += size_obuf(fob);
oq.first = oq.first->next;
free_dist_obuf(fob);
- preempt = reds > reds_limit || (prt->status & ERTS_PORT_SFLGS_DEAD);
- if (prt->status & ERTS_PORT_SFLG_PORT_BUSY) {
- prt_busy = 1;
- if (oq.first && !preempt)
- goto finalize_only;
- }
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT);
+ if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt)
+ goto finalize_only;
}
ASSERT(!oq.first || preempt);
@@ -2093,7 +2166,7 @@ erts_dist_command(Port *prt, int reds_limit)
ASSERT(dep->qsize >= obufsize);
dep->qsize -= obufsize;
obufsize = 0;
- if (!prt_busy
+ if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)
&& (dep->qflgs & ERTS_DE_QFLG_BUSY)
&& dep->qsize < erts_dist_buf_busy_limit) {
ErtsProcList *suspendees;
@@ -2139,11 +2212,15 @@ erts_dist_command(Port *prt, int reds_limit)
return reds;
preempted:
+ /*
+ * Here we assume that state has been read
+ * since last call to driver.
+ */
ASSERT(oq.first || !oq.last);
ASSERT(!oq.first || oq.last);
- if (prt->status & ERTS_PORT_SFLGS_DEAD) {
+ if (sched_flags & ERTS_PTS_FLG_EXIT) {
/*
* Port died during port command; clean up 'oq'
* and 'foq'. Things buffered in dist entry after
@@ -2201,7 +2278,7 @@ erts_dist_port_not_busy(Port *prt)
DTRACE_CHARBUF(port_str, 64);
DTRACE_CHARBUF(remote_str, 64);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
erts_snprintf(remote_str, sizeof(remote_str),
"%T", prt->dist_entry->sysname);
DTRACE3(dist_port_not_busy, erts_this_node_sysname,
@@ -2242,8 +2319,8 @@ static void doit_print_monitor_info(ErtsMonitor *mon, void *vptdp)
void *arg = ((struct print_to_data *) vptdp)->arg;
Process *rp;
ErtsMonitor *rmon;
- rp = erts_pid2proc_unlocked(mon->pid);
- if (!rp || (rmon = erts_lookup_monitor(rp->monitors, mon->ref)) == NULL) {
+ rp = erts_proc_lookup(mon->pid);
+ if (!rp || (rmon = erts_lookup_monitor(ERTS_P_MONITORS(rp), mon->ref)) == NULL) {
erts_print(to, arg, "Warning, stray monitor for: %T\n", mon->pid);
} else if (mon->type == MON_ORIGIN) {
/* Local pid is being monitored */
@@ -2281,7 +2358,7 @@ static void doit_print_link_info2(ErtsLink *lnk, void *vpplc)
static void doit_print_link_info(ErtsLink *lnk, void *vptdp)
{
- if (is_internal_pid(lnk->pid) && erts_pid2proc_unlocked(lnk->pid)) {
+ if (is_internal_pid(lnk->pid) && erts_proc_lookup(lnk->pid)) {
PrintLinkContext plc = {(struct print_to_data *) vptdp, lnk->pid};
erts_doforall_links(ERTS_LINK_ROOT(lnk), &doit_print_link_info2, &plc);
}
@@ -2303,7 +2380,7 @@ static void doit_print_nodelink_info(ErtsLink *lnk, void *vpcontext)
{
PrintNodeLinkContext *pcontext = vpcontext;
- if (is_internal_pid(lnk->pid) && erts_pid2proc_unlocked(lnk->pid))
+ if (is_internal_pid(lnk->pid) && erts_proc_lookup(lnk->pid))
erts_print(pcontext->ptd.to, pcontext->ptd.arg,
"Remote monitoring: %T %T\n", lnk->pid, pcontext->sysname);
}
@@ -2451,15 +2528,15 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
goto error;
/* Check that all trap functions are defined !! */
- if (dsend2_trap->address == NULL ||
- dsend3_trap->address == NULL ||
+ if (dsend2_trap->addressv[0] == NULL ||
+ dsend3_trap->addressv[0] == NULL ||
/* dsend_nosuspend_trap->address == NULL ||*/
- dlink_trap->address == NULL ||
- dunlink_trap->address == NULL ||
- dmonitor_node_trap->address == NULL ||
- dgroup_leader_trap->address == NULL ||
- dmonitor_p_trap->address == NULL ||
- dexit_trap->address == NULL) {
+ dlink_trap->addressv[0] == NULL ||
+ dunlink_trap->addressv[0] == NULL ||
+ dmonitor_node_trap->addressv[0] == NULL ||
+ dgroup_leader_trap->addressv[0] == NULL ||
+ dmonitor_p_trap->addressv[0] == NULL ||
+ dexit_trap->addressv[0] == NULL) {
goto error;
}
@@ -2488,6 +2565,7 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
+ inc_no_nodes();
erts_set_this_node(BIF_ARG_1, (Uint32) creation);
erts_is_alive = 1;
send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL);
@@ -2556,9 +2634,9 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
/* DFLAG_EXTENDED_REFERENCES is compulsory from R9 and forward */
if (!(DFLAG_EXTENDED_REFERENCES & flags)) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "%T", BIF_P->id);
- if (BIF_P->reg)
- erts_dsprintf(dsbufp, " (%T)", BIF_P->reg->name);
+ erts_dsprintf(dsbufp, "%T", BIF_P->common.id);
+ if (BIF_P->common.u.alive.reg)
+ erts_dsprintf(dsbufp, " (%T)", BIF_P->common.u.alive.reg->name);
erts_dsprintf(dsbufp,
" attempted to enable connection to node %T "
"which is not able to handle extended references.\n",
@@ -2578,10 +2656,14 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
else if (!dep)
goto system_limit; /* Should never happen!!! */
- pp = erts_id2port(BIF_ARG_2, BIF_P, ERTS_PROC_LOCK_MAIN);
+ pp = erts_id2port_sflgs(BIF_ARG_2,
+ BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
erts_smp_de_rwlock(dep);
- if (!pp || (pp->status & ERTS_PORT_SFLG_EXITING))
+ if (!pp || (erts_atomic32_read_nob(&pp->state)
+ & ERTS_PORT_SFLG_EXITING))
goto badarg;
if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
@@ -2596,11 +2678,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
plp->next = NULL;
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
erts_smp_mtx_lock(&dep->qlock);
- if (dep->suspended.last)
- dep->suspended.last->next = plp;
- else
- dep->suspended.first = plp;
- dep->suspended.last = plp;
+ erts_proclist_store_last(&dep->suspended, plp);
erts_smp_mtx_unlock(&dep->qlock);
goto yield;
}
@@ -2610,7 +2688,16 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
if (pp->dist_entry || is_not_nil(dep->cid))
goto badarg;
- erts_port_status_bor_set(pp, ERTS_PORT_SFLG_DISTRIBUTION);
+ erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
+
+ /*
+ * Dist-ports do not use the "busy port message queue" functionality, but
+ * instead use "busy dist entry" functionality.
+ */
+ {
+ ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
+ erl_drv_busy_msgq_limits((ErlDrvPort) pp, &disable, NULL);
+ }
pp->dist_entry = dep;
@@ -2642,6 +2729,8 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
erts_smp_de_rwunlock(dep);
dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ inc_no_nodes();
+
send_nodes_mon_msgs(BIF_P,
am_nodeup,
BIF_ARG_1,
@@ -2655,7 +2744,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
}
if (pp)
- erts_smp_port_unlock(pp);
+ erts_port_release(pp);
return ret;
@@ -2699,16 +2788,15 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
if (is_internal_pid(local)) {
Process *lp;
ErtsProcLocks lp_locks;
- if (BIF_P->id == local) {
+ if (BIF_P->common.id == local) {
lp_locks = ERTS_PROC_LOCKS_ALL;
lp = BIF_P;
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR);
}
else {
lp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- lp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN,
- local, lp_locks,
- ERTS_P2P_FLG_SMP_INC_REFC);
+ lp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ local, lp_locks);
if (!lp) {
BIF_RET(am_true); /* ignore */
}
@@ -2727,14 +2815,18 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
lp_locks &= ~ERTS_PROC_LOCK_MAIN;
#endif
erts_smp_proc_unlock(lp, lp_locks);
- if (lp != BIF_P)
- erts_smp_proc_dec_refc(lp);
- else {
+ if (lp == BIF_P) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state);
/*
* We may have exited current process and may have to take action.
*/
- ERTS_BIF_CHK_EXITED(BIF_P);
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
+#ifdef ERTS_SMP
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
+#endif
+ ERTS_BIF_EXITED(BIF_P);
+ }
}
}
else if (is_external_pid(local)
@@ -2932,23 +3024,23 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options)
if (Bool == am_true) {
ASSERT(dep->cid != NIL);
lnk = erts_add_or_lookup_link(&(dep->node_links), LINK_NODE,
- p->id);
+ p->common.id);
++ERTS_LINK_REFC(lnk);
- lnk = erts_add_or_lookup_link(&(p->nlinks), LINK_NODE, Node);
+ lnk = erts_add_or_lookup_link(&ERTS_P_LINKS(p), LINK_NODE, Node);
++ERTS_LINK_REFC(lnk);
}
else {
- lnk = erts_lookup_link(dep->node_links, p->id);
+ lnk = erts_lookup_link(dep->node_links, p->common.id);
if (lnk != NULL) {
if ((--ERTS_LINK_REFC(lnk)) == 0) {
erts_destroy_link(erts_remove_link(&(dep->node_links),
- p->id));
+ p->common.id));
}
}
- lnk = erts_lookup_link(p->nlinks, Node);
+ lnk = erts_lookup_link(ERTS_P_LINKS(p), Node);
if (lnk != NULL) {
if ((--ERTS_LINK_REFC(lnk)) == 0) {
- erts_destroy_link(erts_remove_link(&(p->nlinks),
+ erts_destroy_link(erts_remove_link(&ERTS_P_LINKS(p),
Node));
}
}
@@ -3510,7 +3602,7 @@ erts_processes_monitoring_nodes(Process *c_p)
olist = erts_bld_cons(hpp, szp, am_nodedown_reason, olist);
res = erts_bld_cons(hpp, szp,
erts_bld_tuple(hpp, szp, 2,
- nmp->proc->id,
+ nmp->proc->common.id,
olist),
res);
}
diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h
index 845151c895..2bc3d9c881 100644
--- a/erts/emulator/beam/dist.h
+++ b/erts/emulator/beam/dist.h
@@ -187,11 +187,12 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
if (prt) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ASSERT((erts_port_status_get(prt) & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT((erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLGS_DEAD) == 0);
ASSERT(prt->dist_entry);
dep = prt->dist_entry;
- id = prt->id;
+ id = prt->common.id;
}
else {
ASSERT(dist_entry);
@@ -203,13 +204,8 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
id = dep->cid;
}
- if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) {
- (void) erts_port_task_schedule(id,
- &dep->dist_cmd,
- ERTS_PORT_TASK_DIST_CMD,
- (ErlDrvEvent) -1,
- NULL);
- }
+ if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1))
+ erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD);
}
#endif
diff --git a/erts/emulator/beam/erl_afit_alloc.c b/erts/emulator/beam/erl_afit_alloc.c
index 570cc59be2..306b32691c 100644
--- a/erts/emulator/beam/erl_afit_alloc.c
+++ b/erts/emulator/beam/erl_afit_alloc.c
@@ -37,6 +37,12 @@
#define GET_ERL_AF_ALLOC_IMPL
#include "erl_afit_alloc.h"
+struct AFFreeBlock_t_ {
+ Block_t block_head;
+ AFFreeBlock_t *prev;
+ AFFreeBlock_t *next;
+};
+#define AF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->block_head)
#define MIN_MBC_SZ (16*1024)
#define MIN_MBC_FIRST_FREE_SZ (4*1024)
@@ -80,7 +86,6 @@ erts_afalc_start(AFAllctr_t *afallctr,
init->sbmbct = 0; /* Small mbc not supported by afit */
- allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(AFFreeBlock_t);
@@ -118,7 +123,7 @@ get_free_block(Allctr_t *allctr, Uint size, Block_t *cand_blk, Uint cand_size,
ASSERT(!cand_blk || cand_size >= size);
- if (afallctr->free_list && BLK_SZ(afallctr->free_list) >= size) {
+ if (afallctr->free_list && AF_BLK_SZ(afallctr->free_list) >= size) {
AFFreeBlock_t *res = afallctr->free_list;
afallctr->free_list = res->next;
if (res->next)
@@ -135,7 +140,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
AFFreeBlock_t *blk = (AFFreeBlock_t *) block;
AFAllctr_t *afallctr = (AFAllctr_t *) allctr;
- if (afallctr->free_list && BLK_SZ(afallctr->free_list) > BLK_SZ(blk)) {
+ if (afallctr->free_list && AF_BLK_SZ(afallctr->free_list) > AF_BLK_SZ(blk)) {
blk->next = afallctr->free_list->next;
blk->prev = afallctr->free_list;
afallctr->free_list->next = blk;
diff --git a/erts/emulator/beam/erl_afit_alloc.h b/erts/emulator/beam/erl_afit_alloc.h
index ea408a7194..87caccac20 100644
--- a/erts/emulator/beam/erl_afit_alloc.h
+++ b/erts/emulator/beam/erl_afit_alloc.h
@@ -49,11 +49,6 @@ Allctr_t *erts_afalc_start(AFAllctr_t *, AFAllctrInit_t *, AllctrInit_t *);
#include "erl_alloc_util.h"
typedef struct AFFreeBlock_t_ AFFreeBlock_t;
-struct AFFreeBlock_t_ {
- Block_t block_head;
- AFFreeBlock_t *prev;
- AFFreeBlock_t *next;
-};
struct AFAllctr_t_ {
Allctr_t allctr; /* Has to be first! */
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 6fce032f9d..5da8c69c03 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -310,9 +310,9 @@ set_default_ll_alloc_opts(struct au_init *ip)
ip->init.util.name_prefix = "ll_";
ip->init.util.alloc_no = ERTS_ALC_A_LONG_LIVED;
#ifndef SMALL_MEMORY
- ip->init.util.mmbcs = 2*1024*1024; /* Main carrier size */
+ ip->init.util.mmbcs = 2*1024*1024 - 40; /* Main carrier size */
#else
- ip->init.util.mmbcs = 1*1024*1024; /* Main carrier size */
+ ip->init.util.mmbcs = 1*1024*1024 - 40; /* Main carrier size */
#endif
ip->init.util.ts = ERTS_ALC_MTA_LONG_LIVED;
ip->init.util.asbcst = 0;
@@ -1173,6 +1173,11 @@ handle_au_arg(struct au_init *auip,
break;
case 'e':
auip->enable = get_bool_value(sub_param+1, argv, ip);
+#if !HAVE_ERTS_SBMBC
+ if (auip->init.util.alloc_no == ERTS_ALC_A_SBMBC) {
+ auip->enable = 0;
+ }
+#endif
break;
case 'l':
if (has_prefix("lmbcs", sub_param)) {
@@ -1233,10 +1238,16 @@ handle_au_arg(struct au_init *auip,
auip->init.util.sbct = get_kb_value(sub_param + 4, argv, ip);
}
else if (has_prefix("sbmbcs", sub_param)) {
- auip->init.util.sbmbcs = get_byte_value(sub_param + 6, argv, ip);
+#if HAVE_ERTS_SBMBC
+ auip->init.util.sbmbcs =
+#endif
+ get_byte_value(sub_param + 6, argv, ip);
}
else if (has_prefix("sbmbct", sub_param)) {
- auip->init.util.sbmbct = get_byte_value(sub_param + 6, argv, ip);
+#if HAVE_ERTS_SBMBC
+ auip->init.util.sbmbct =
+#endif
+ get_byte_value(sub_param + 6, argv, ip);
}
else if (has_prefix("smbcs", sub_param)) {
auip->default_.smbcs = 0;
@@ -1403,6 +1414,9 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init)
else if (strcmp("max", arg) == 0) {
for (a = 0; a < aui_sz; a++)
aui[a]->enable = 1;
+#if !HAVE_ERTS_SBMBC
+ init->sbmbc_alloc.enable = 0;
+#endif
}
else if (strcmp("config", arg) == 0) {
init->erts_alloc_config = 1;
@@ -1675,7 +1689,7 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
t_str = type_no_str(n);
if (!t_str) {
- sprintf(buf, "%d", (int) n);
+ erts_snprintf(buf, sizeof(buf), "%d", (int) n);
t_str = buf;
}
@@ -2128,6 +2142,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (want_tot_or_sys || want.processes || want.processes_used) {
+ int max_processes = erts_ptab_max(&erts_proc);
UWord tmp;
if (ERTS_MEM_NEED_ALL_ALCU)
@@ -2137,7 +2152,7 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
fi, ERTS_ALC_NO_FIXED_SIZES);
tmp = alcu_size(ERTS_ALC_A_EHEAP, NULL, 0);
}
- tmp += erts_max_processes*sizeof(Process*);
+ tmp += max_processes*sizeof(erts_smp_atomic_t);
tmp += erts_bif_timer_memory_size();
tmp += erts_tot_link_lh_size();
@@ -2182,9 +2197,9 @@ erts_memory(int *print_to_p, void *print_to_arg, void *proc, Eterm earg)
if (want.code) {
size.code = module_table_sz();
size.code += export_table_sz();
- size.code += export_list_size() * sizeof(Export);
+ size.code += export_entries_sz();
size.code += erts_fun_table_sz();
- size.code += allocated_modules*sizeof(Range);
+ size.code += erts_ranges_sz();
size.code += erts_total_code_size;
}
@@ -2268,6 +2283,8 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
Eterm res = THE_NON_VALUE;
int i, length;
Uint reserved_atom_space, atom_space;
+ int max_processes = erts_ptab_max(&erts_proc);
+ int max_ports = erts_ptab_max(&erts_port);
if (proc) {
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN
@@ -2299,12 +2316,8 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "static";
values[i].ui[0] =
- erts_max_ports*sizeof(Port) /* Port table */
- + erts_timer_wheel_memory_size() /* Timer wheel */
-#ifdef SYS_TMP_BUF_SIZE
- + SYS_TMP_BUF_SIZE /* tmp_buf in sys on vxworks & ose */
-#endif
- ;
+ max_ports*sizeof(erts_smp_atomic_t) /* Port table */
+ + erts_timer_wheel_memory_size(); /* Timer wheel */
i++;
erts_atom_get_text_space_sizes(&reserved_atom_space, &atom_space);
@@ -2332,7 +2345,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "export_list";
- values[i].ui[0] = export_list_size() * sizeof(Export);
+ values[i].ui[0] = export_entries_sz();
i++;
values[i].arity = 2;
@@ -2347,7 +2360,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "module_refs";
- values[i].ui[0] = allocated_modules*sizeof(Range);
+ values[i].ui[0] = erts_ranges_sz();
i++;
values[i].arity = 2;
@@ -2382,7 +2395,7 @@ erts_allocated_areas(int *print_to_p, void *print_to_arg, void *proc)
values[i].arity = 2;
values[i].name = "process_table";
- values[i].ui[0] = erts_max_processes*sizeof(Process*);
+ values[i].ui[0] = max_processes*sizeof(Process*);
i++;
values[i].arity = 2;
@@ -3576,12 +3589,12 @@ check_memory_fence(void *ptr, Uint *size, ErtsAlcType_t n, int func)
ftype = type_no_str(found_type);
if (!ftype) {
- sprintf(fbuf, "%d", (int) found_type);
+ erts_snprintf(fbuf, sizeof(fbuf), "%d", (int) found_type);
ftype = fbuf;
}
otype = type_no_str(n);
if (!otype) {
- sprintf(obuf, "%d", (int) n);
+ erts_snprintf(obuf, sizeof(obuf), "%d", (int) n);
otype = obuf;
}
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index e475f9d8a2..ba5ec9c367 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -267,6 +267,8 @@ typedef void (*erts_alloc_verify_func_t)(Allctr_t *);
erts_alloc_verify_func_t
erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr);
+#define ERTS_ALC_DATA_ALIGN_SIZE(SZ) \
+ (((((SZ) - 1) / 8) + 1) * 8)
#define ERTS_ALC_CACHE_LINE_ALIGN_SIZE(SZ) \
(((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE)
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 4aa8fa82fb..d4de0d076a 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -146,6 +146,7 @@ class SYSTEM system_data
type SBMBC SBMBC SYSTEM small_block_mbc
type PROC FIXED_SIZE PROCESSES proc
+type PORT DRIVER SYSTEM port
type ATOM LONG_LIVED ATOM atom_entry
type MODULE LONG_LIVED CODE module_entry
type REG_PROC STANDARD PROCESSES reg_proc
@@ -164,6 +165,7 @@ type MSG_REF FIXED_SIZE PROCESSES msg_ref
type MSG_ROOTS TEMPORARY PROCESSES msg_roots
type ROOTSET TEMPORARY PROCESSES root_set
type LOADER_TMP TEMPORARY CODE loader_tmp
+type PREPARED_CODE SHORT_LIVED CODE prepared_code
type BIF_TIMER_TABLE LONG_LIVED SYSTEM bif_timer_table
type SL_BIF_TIMER SHORT_LIVED PROCESSES bif_timer_sl
type LL_BIF_TIMER STANDARD PROCESSES bif_timer_ll
@@ -188,7 +190,10 @@ type PORT_TABLE LONG_LIVED SYSTEM port_tab
type TIMER_WHEEL LONG_LIVED SYSTEM timer_wheel
type DRV DRIVER SYSTEM drv_internal
type DRV_BINARY BINARY BINARIES drv_binary
-type DRIVER STANDARD SYSTEM driver
+type DRIVER DRIVER SYSTEM driver
+type DRV_CMD_DATA DRIVER SYSTEM driver_command_data
+type DRV_CTRL_DATA DRIVER SYSTEM driver_control_data
+type DRV_CALL_DATA DRIVER SYSTEM driver_call_data
type NIF DRIVER SYSTEM nif_internal
type BINARY BINARY BINARIES binary
type NBIF_TABLE SYSTEM SYSTEM nbif_tab
@@ -196,14 +201,12 @@ type ARG_REG STANDARD PROCESSES arg_reg
type PROC_DICT STANDARD PROCESSES proc_dict
type CALLS_BUF STANDARD PROCESSES calls_buf
type BPD STANDARD SYSTEM bpd
-type PORT_NAME STANDARD SYSTEM port_name
type LINEBUF STANDARD SYSTEM line_buf
type IOQ STANDARD SYSTEM io_queue
type BITS_BUF STANDARD SYSTEM bits_buf
type TMP_DIST_BUF TEMPORARY SYSTEM tmp_dist_buf
type ASYNC_DATA LONG_LIVED SYSTEM internal_async_data
type ESTACK TEMPORARY SYSTEM estack
-type PORT_CALL_BUF TEMPORARY SYSTEM port_call_buf
type DB_TABLE ETS ETS db_tab
type DB_FIXATION SHORT_LIVED ETS db_fixation
type DB_FIX_DEL SHORT_LIVED ETS fixed_del
@@ -233,14 +236,14 @@ type DDLL_HANDLE STANDARD SYSTEM ddll_handle
type DDLL_ERRCODES LONG_LIVED SYSTEM ddll_errcodes
type DDLL_TMP_BUF TEMPORARY SYSTEM ddll_tmp_buf
type PORT_TASK SHORT_LIVED SYSTEM port_task
-type PORT_TASKQ SHORT_LIVED SYSTEM port_task_queue
+type PT_HNDL_LIST SHORT_LIVED SYSTEM port_task_handle_list
type MISC_OP_LIST SHORT_LIVED SYSTEM misc_op_list
type PORT_NAMES SHORT_LIVED SYSTEM port_names
-type PORT_DATA_LOCK STANDARD SYSTEM port_data_lock
+type PORT_DATA_LOCK DRIVER SYSTEM port_data_lock
type NODES_MON STANDARD PROCESSES nodes_monitor
-type PROCS_TPROC_EL SHORT_LIVED PROCESSES processes_term_proc_el
-type PROCS_CNKINF SHORT_LIVED PROCESSES processes_chunk_info
-type PROCS_PIDS SHORT_LIVED PROCESSES processes_pids
+type PTAB_LIST_DEL SHORT_LIVED PROCESSES ptab_list_deleted_el
+type PTAB_LIST_CNKI SHORT_LIVED PROCESSES ptab_list_chunk_info
+type PTAB_LIST_PIDS SHORT_LIVED PROCESSES ptab_list_pids
type RE_TMP_BUF TEMPORARY SYSTEM re_tmp_buf
type RE_SUBJECT SHORT_LIVED SYSTEM re_subject
type RE_HEAP STANDARD SYSTEM re_heap
@@ -263,6 +266,10 @@ type ZLIB STANDARD SYSTEM zlib
type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map
type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
+type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q
+type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
+type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
+type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
@@ -321,6 +328,8 @@ type SL_PTIMER SHORT_LIVED SYSTEM ptimer_sl
type LL_PTIMER STANDARD SYSTEM ptimer_ll
type SYS_MSG_Q SHORT_LIVED PROCESSES system_messages_queue
type FP_EXCEPTION LONG_LIVED SYSTEM fp_exception
+type LL_MPATHS LONG_LIVED SYSTEM ll_migration_paths
+type SL_MPATHS SHORT_LIVED SYSTEM sl_migration_paths
+endif
+if hipe
@@ -414,28 +423,4 @@ type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf
+endif
-+if vxworks
-
-type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf
-type PEND_DATA SYSTEM SYSTEM pending_data
-type FD_TAB LONG_LIVED SYSTEM fd_tab
-type FD_ENTRY_BUF SYSTEM SYSTEM fd_entry_buf
-
-+endif
-
-+if ose
-
-type SYS_TMP_BUF LONG_LIVED SYSTEM sys_tmp_buf
-type PUTENV_STR SYSTEM SYSTEM putenv_string
-type GETENV_STR SYSTEM SYSTEM getenv_string
-type GETENV_STATE SYSTEM SYSTEM getenv_state
-type SIG_ENTRY SYSTEM SYSTEM sig_entry
-type DRIVER_DATA SYSTEM SYSTEM driver_data
-type PGM_TAB SYSTEM SYSTEM pgm_tab
-type PGM_ENTRY SYSTEM SYSTEM pgm_entry
-type PRT_TAB SYSTEM SYSTEM prt_tab
-type PRT_ENTRY SYSTEM SYSTEM prt_entry
-
-+endif
-
# ----------------------------------------------------------------------------
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index 97ba306a79..3d6761339b 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -78,10 +78,12 @@ int erts_have_sbmbc_alloc;
#if HAVE_ERTS_MSEG
-#define INV_MSEG_UNIT_MASK ((UWord) (mseg_unit_size - 1))
-#define MSEG_UNIT_MASK (~INV_MSEG_UNIT_MASK)
+#define MSEG_UNIT_SHIFT MSEG_ALIGN_BITS
+#define MSEG_UNIT_SZ (1 << MSEG_UNIT_SHIFT)
+#define MSEG_UNIT_MASK ((~(UWord)0) << MSEG_UNIT_SHIFT)
+
#define MSEG_UNIT_FLOOR(X) ((X) & MSEG_UNIT_MASK)
-#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + INV_MSEG_UNIT_MASK)
+#define MSEG_UNIT_CEILING(X) MSEG_UNIT_FLOOR((X) + ~MSEG_UNIT_MASK)
#endif
@@ -104,7 +106,6 @@ int erts_have_sbmbc_alloc;
static Uint sys_alloc_carrier_size;
#if HAVE_ERTS_MSEG
static Uint max_mseg_carriers;
-static Uint mseg_unit_size;
#endif
#define ONE_GIGA (1000000000)
@@ -117,16 +118,47 @@ static Uint mseg_unit_size;
? ((CC).giga_no--, (CC).no = ONE_GIGA - 1) \
: (CC).no--)
-/* ... */
+/* Multi block carrier (MBC) memory layout in R16:
+
+Empty MBC:
+[Carrier_t|pad|Block_t L0T|fhdr| free... ]
+
+MBC after allocating first block:
+[Carrier_t|pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ]
+
+MBC after allocating second block:
+[Carrier_t|pad|Block_t 000| udata |pad|Block_t 000| udata |pad|Block_t L0T|fhdr| free... ]
+
+MBC after deallocating first block:
+[Carrier_t|pad|Block_t 00T|fhdr| free |FreeBlkFtr_t|Block_t 0P0| udata |pad|Block_t L0T|fhdr| free... ]
+
+
+ udata = Allocated user data
+ pad = Padding to ensure correct alignment for user data
+ fhdr = Allocator specific header to keep track of free block
+ free = Unused free memory
+ T = This block is free (THIS_FREE_BLK_HDR_FLG)
+ P = Previous block is free (PREV_FREE_BLK_HDR_FLG)
+ L = Last block in carrier (LAST_BLK_HDR_FLG)
+*/
+
+/* Single block carrier (SBC):
+[Carrier_t|pad|Block_t 111| udata... ]
+*/
+
/* Blocks ... */
-#define SBC_BLK_FTR_FLG (((UWord) 1) << 0)
+#define UNUSED0_BLK_FTR_FLG (((UWord) 1) << 0)
#define UNUSED1_BLK_FTR_FLG (((UWord) 1) << 1)
#define UNUSED2_BLK_FTR_FLG (((UWord) 1) << 2)
-#define ABLK_HDR_SZ (sizeof(Block_t))
-#define FBLK_FTR_SZ (sizeof(UWord))
+#if MBC_ABLK_OFFSET_BITS
+# define ABLK_HDR_SZ (offsetof(Block_t,u))
+#else
+# define ABLK_HDR_SZ (sizeof(Block_t))
+#endif
+#define FBLK_FTR_SZ (sizeof(FreeBlkFtr_t))
#define UMEMSZ2BLKSZ(AP, SZ) \
(ABLK_HDR_SZ + (SZ) <= (AP)->min_block_size \
@@ -136,88 +168,181 @@ static Uint mseg_unit_size;
#define UMEM2BLK(P) ((Block_t *) (((char *) (P)) - ABLK_HDR_SZ))
#define BLK2UMEM(P) ((void *) (((char *) (P)) + ABLK_HDR_SZ))
-#define PREV_BLK_SZ(B) \
- ((UWord) (*(((UWord *) (B)) - 1) & SZ_MASK))
+#define PREV_BLK_SZ(B) ((UWord) (((FreeBlkFtr_t *)(B))[-1]))
#define SET_BLK_SZ_FTR(B, SZ) \
- (*((UWord *) (((char *) (B)) + (SZ) - sizeof(UWord))) = (SZ))
+ (((FreeBlkFtr_t *) (((char *) (B)) + (SZ)))[-1] = (SZ))
#define THIS_FREE_BLK_HDR_FLG (((UWord) 1) << 0)
#define PREV_FREE_BLK_HDR_FLG (((UWord) 1) << 1)
#define LAST_BLK_HDR_FLG (((UWord) 1) << 2)
-#define SET_BLK_SZ(B, SZ) \
+/* Special flag combo for (allocated) SBC blocks
+*/
+#define SBC_BLK_HDR_FLG (THIS_FREE_BLK_HDR_FLG | PREV_FREE_BLK_HDR_FLG | LAST_BLK_HDR_FLG)
+
+#define SET_MBC_ABLK_SZ(B, SZ) \
(ASSERT(((SZ) & FLG_MASK) == 0), \
- (*((Block_t *) (B)) = ((*((Block_t *) (B)) & FLG_MASK) | (SZ))))
-#define SET_BLK_FREE(B) \
- (*((Block_t *) (B)) |= THIS_FREE_BLK_HDR_FLG)
-#define SET_BLK_ALLOCED(B) \
- (*((Block_t *) (B)) &= ~THIS_FREE_BLK_HDR_FLG)
-#define SET_PREV_BLK_FREE(B) \
- (*((Block_t *) (B)) |= PREV_FREE_BLK_HDR_FLG)
+ (B)->bhdr = (((B)->bhdr) & ~MBC_ABLK_SZ_MASK) | (SZ))
+#define SET_MBC_FBLK_SZ(B, SZ) \
+ (ASSERT(((SZ) & FLG_MASK) == 0), \
+ (B)->bhdr = (((B)->bhdr) & ~MBC_FBLK_SZ_MASK) | (SZ))
+#define SET_SBC_BLK_SZ(B, SZ) \
+ (ASSERT(((SZ) & FLG_MASK) == 0), \
+ (B)->bhdr = (((B)->bhdr) & ~SBC_BLK_SZ_MASK) | (SZ))
+#define SET_PREV_BLK_FREE(AP,B) \
+ (ASSERT(!IS_MBC_FIRST_BLK(AP,B)), \
+ ASSERT(!IS_FREE_BLK(B)), \
+ (B)->bhdr |= PREV_FREE_BLK_HDR_FLG)
#define SET_PREV_BLK_ALLOCED(B) \
- (*((Block_t *) (B)) &= ~PREV_FREE_BLK_HDR_FLG)
+ ((B)->bhdr &= ~PREV_FREE_BLK_HDR_FLG)
#define SET_LAST_BLK(B) \
- (*((Block_t *) (B)) |= LAST_BLK_HDR_FLG)
+ ((B)->bhdr |= LAST_BLK_HDR_FLG)
#define SET_NOT_LAST_BLK(B) \
- (*((Block_t *) (B)) &= ~LAST_BLK_HDR_FLG)
+ ((B)->bhdr &= ~LAST_BLK_HDR_FLG)
#define SBH_THIS_FREE THIS_FREE_BLK_HDR_FLG
-#define SBH_THIS_ALLOCED ((UWord) 0)
#define SBH_PREV_FREE PREV_FREE_BLK_HDR_FLG
-#define SBH_PREV_ALLOCED ((UWord) 0)
#define SBH_LAST_BLK LAST_BLK_HDR_FLG
-#define SBH_NOT_LAST_BLK ((UWord) 0)
-#define SET_BLK_HDR(B, Sz, F) \
- (ASSERT(((Sz) & FLG_MASK) == 0), *((Block_t *) (B)) = ((Sz) | (F)))
+
+#if MBC_ABLK_OFFSET_BITS
+
+# define MBC_SZ_MAX_LIMIT ((((UWord)1 << MBC_ABLK_OFFSET_BITS) - 1) << MSEG_ALIGN_BITS)
+
+# define BLK_CARRIER_OFFSET(B, C) (((char*)(B) - (char*)(C)) >> MSEG_UNIT_SHIFT)
+
+# define SET_MBC_ABLK_HDR(B, Sz, F, C) \
+ (ASSERT(((Sz) & ~MBC_ABLK_SZ_MASK) == 0), \
+ ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
+ (B)->bhdr = ((Sz) | (F) | (BLK_CARRIER_OFFSET(B,C) << MBC_ABLK_OFFSET_SHIFT)))
+
+# define SET_MBC_FBLK_HDR(B, Sz, F, C) \
+ (ASSERT(((Sz) & ~MBC_FBLK_SZ_MASK) == 0), \
+ ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
+ (B)->bhdr = ((Sz) | (F)), \
+ (B)->u.carrier = (C))
+
+# define ABLK_TO_MBC(B) \
+ (ASSERT(IS_MBC_BLK(B) && IS_ALLOCED_BLK(B)), \
+ (Carrier_t*)((MSEG_UNIT_FLOOR((UWord)(B)) - \
+ (((B)->bhdr >> MBC_ABLK_OFFSET_SHIFT) << MSEG_UNIT_SHIFT))))
+
+# define FBLK_TO_MBC(B) \
+ (ASSERT(IS_MBC_BLK(B) && IS_FREE_BLK(B)), \
+ (B)->u.carrier)
+
+# define BLK_TO_MBC(B) (IS_FREE_BLK(B) ? FBLK_TO_MBC(B) : ABLK_TO_MBC(B))
+
+# define IS_MBC_FIRST_ABLK(AP,B) \
+ ((((UWord)(B) & ~MSEG_UNIT_MASK) == MBC_HEADER_SIZE(AP)) \
+ && ((B)->bhdr & MBC_ABLK_OFFSET_MASK) == 0)
+
+# define IS_MBC_FIRST_FBLK(AP,B) \
+ ((char*)(B) == (char*)((B)->u.carrier) + MBC_HEADER_SIZE(AP))
+
+# define IS_MBC_FIRST_BLK(AP,B) \
+ (IS_FREE_BLK(B) ? IS_MBC_FIRST_FBLK(AP,B) : IS_MBC_FIRST_ABLK(AP,B))
+
+# define SET_BLK_FREE(B) \
+ (ASSERT(!IS_PREV_BLK_FREE(B)), \
+ (B)->u.carrier = ABLK_TO_MBC(B), \
+ (B)->bhdr |= THIS_FREE_BLK_HDR_FLG, \
+ (B)->bhdr &= (MBC_ABLK_SZ_MASK|FLG_MASK))
+
+# define SET_BLK_ALLOCED(B) \
+ (ASSERT(((B)->bhdr & (MBC_ABLK_OFFSET_MASK|THIS_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
+ (B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG, \
+ (B)->bhdr |= (BLK_CARRIER_OFFSET(B,(B)->u.carrier) << MBC_ABLK_OFFSET_SHIFT))
+
+#else /* !MBC_ABLK_OFFSET_BITS */
+
+# define MBC_SZ_MAX_LIMIT ((UWord)~0)
+
+# define SET_MBC_ABLK_HDR(B, Sz, F, C) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), \
+ ASSERT(!((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG))), \
+ ASSERT((UWord)(F) < SBC_BLK_HDR_FLG), \
+ (B)->bhdr = ((Sz) | (F)), \
+ (B)->carrier = (C))
+
+# define SET_MBC_FBLK_HDR(B, Sz, F, C) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), \
+ ASSERT(((UWord)(F) & (~FLG_MASK|THIS_FREE_BLK_HDR_FLG|PREV_FREE_BLK_HDR_FLG)) == THIS_FREE_BLK_HDR_FLG), \
+ (B)->bhdr = ((Sz) | (F)), \
+ (B)->carrier = (C))
+
+# define BLK_TO_MBC(B) ((B)->carrier)
+# define ABLK_TO_MBC(B) BLK_TO_MBC(B)
+# define FBLK_TO_MBC(B) BLK_TO_MBC(B)
+
+# define IS_MBC_FIRST_BLK(AP,B) \
+ ((char*)(B) == (char*)((B)->carrier) + MBC_HEADER_SIZE(AP))
+# define IS_MBC_FIRST_ABLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
+# define IS_MBC_FIRST_FBLK(AP,B) IS_MBC_FIRST_BLK(AP,B)
+
+# define SET_BLK_FREE(B) \
+ (ASSERT(!IS_PREV_BLK_FREE(B)), \
+ (B)->bhdr |= THIS_FREE_BLK_HDR_FLG)
+
+# define SET_BLK_ALLOCED(B) \
+ ((B)->bhdr &= ~THIS_FREE_BLK_HDR_FLG)
+
+#endif /* !MBC_ABLK_OFFSET_BITS */
+
+#define SET_SBC_BLK_HDR(B, Sz) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), (B)->bhdr = ((Sz) | (SBC_BLK_HDR_FLG)))
+
#define BLK_UMEM_SZ(B) \
(BLK_SZ(B) - (ABLK_HDR_SZ))
#define IS_PREV_BLK_FREE(B) \
- (*((Block_t *) (B)) & PREV_FREE_BLK_HDR_FLG)
+ ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
#define IS_PREV_BLK_ALLOCED(B) \
(!IS_PREV_BLK_FREE((B)))
#define IS_FREE_BLK(B) \
- (*((Block_t *) (B)) & THIS_FREE_BLK_HDR_FLG)
+ (ASSERT(!IS_SBC_BLK(B)), (B)->bhdr & THIS_FREE_BLK_HDR_FLG)
#define IS_ALLOCED_BLK(B) \
(!IS_FREE_BLK((B)))
#define IS_LAST_BLK(B) \
- (*((Block_t *) (B)) & LAST_BLK_HDR_FLG)
+ ((B)->bhdr & LAST_BLK_HDR_FLG)
#define IS_NOT_LAST_BLK(B) \
(!IS_LAST_BLK((B)))
#define GET_LAST_BLK_HDR_FLG(B) \
- (*((Block_t*) (B)) & LAST_BLK_HDR_FLG)
+ ((B)->bhdr & LAST_BLK_HDR_FLG)
#define GET_THIS_FREE_BLK_HDR_FLG(B) \
- (*((Block_t*) (B)) & THIS_FREE_BLK_HDR_FLG)
+ ((B)->bhdr & THIS_FREE_BLK_HDR_FLG)
#define GET_PREV_FREE_BLK_HDR_FLG(B) \
- (*((Block_t*) (B)) & PREV_FREE_BLK_HDR_FLG)
+ ((B)->bhdr & PREV_FREE_BLK_HDR_FLG)
#define GET_BLK_HDR_FLGS(B) \
- (*((Block_t*) (B)) & FLG_MASK)
-
-#define IS_FIRST_BLK(B) \
- (IS_PREV_BLK_FREE((B)) && (PREV_BLK_SZ((B)) == 0))
-#define IS_NOT_FIRST_BLK(B) \
- (!IS_FIRST_BLK((B)))
-
-#define SET_SBC_BLK_FTR(FTR) \
- ((FTR) = (0 | SBC_BLK_FTR_FLG))
-#define SET_MBC_BLK_FTR(FTR) \
- ((FTR) = 0)
+ ((B)->bhdr & FLG_MASK)
#define IS_SBC_BLK(B) \
- (IS_PREV_BLK_FREE((B)) && (((UWord *) (B))[-1] & SBC_BLK_FTR_FLG))
+ (((B)->bhdr & FLG_MASK) == SBC_BLK_HDR_FLG)
#define IS_MBC_BLK(B) \
(!IS_SBC_BLK((B)))
+#define MBC_BLK_SZ(B) (IS_FREE_BLK(B) ? MBC_FBLK_SZ(B) : MBC_ABLK_SZ(B))
+
#define NXT_BLK(B) \
- ((Block_t *) (((char *) (B)) + BLK_SZ((B))))
+ (ASSERT(IS_MBC_BLK(B)), \
+ (Block_t *) (((char *) (B)) + MBC_BLK_SZ((B))))
#define PREV_BLK(B) \
((Block_t *) (((char *) (B)) - PREV_BLK_SZ((B))))
+#define BLK_AFTER(B,Sz) \
+ ((Block_t *) (((char *) (B)) + (Sz)))
+
+#define BLK_SZ(B) ((B)->bhdr & (((B)->bhdr & THIS_FREE_BLK_HDR_FLG) ? MBC_FBLK_SZ_MASK : MBC_ABLK_SZ_MASK))
+
/* Carriers ... */
+#define SBC_HEADER_SIZE (UNIT_CEILING(sizeof(Carrier_t) + ABLK_HDR_SZ) \
+ - ABLK_HDR_SZ)
+#define MBC_HEADER_SIZE(AP) SBC_HEADER_SIZE
+
+
#define MSEG_CARRIER_HDR_FLAG (((UWord) 1) << 0)
#define SBC_CARRIER_HDR_FLAG (((UWord) 1) << 1)
@@ -226,20 +351,20 @@ static Uint mseg_unit_size;
#define SCH_MBC 0
#define SCH_SBC SBC_CARRIER_HDR_FLAG
-#define SET_CARRIER_HDR(C, Sz, F) \
- (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)))
+#define SET_CARRIER_HDR(C, Sz, F, AP) \
+ (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), (C)->allctr = (AP))
-#define BLK2SBC(AP, B) \
- ((Carrier_t *) (((char *) (B)) - (AP)->sbc_header_size))
-#define FBLK2MBC(AP, B) \
- ((Carrier_t *) (((char *) (B)) - (AP)->mbc_header_size))
+#define BLK_TO_SBC(B) \
+ ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE))
+#define FIRST_BLK_TO_MBC(AP, B) \
+ ((Carrier_t *) (((char *) (B)) - MBC_HEADER_SIZE(AP)))
-#define MBC2FBLK(AP, P) \
- ((Block_t *) (((char *) (P)) + (AP)->mbc_header_size))
+#define MBC_TO_FIRST_BLK(AP, P) \
+ ((Block_t *) (((char *) (P)) + MBC_HEADER_SIZE(AP)))
#define SBC2BLK(AP, P) \
- ((Block_t *) (((char *) (P)) + (AP)->sbc_header_size))
+ ((Block_t *) (((char *) (P)) + SBC_HEADER_SIZE))
#define SBC2UMEM(AP, P) \
- ((void *) (((char *) (P)) + ((AP)->sbc_header_size + ABLK_HDR_SZ)))
+ ((void *) (((char *) (P)) + (SBC_HEADER_SIZE + ABLK_HDR_SZ)))
#define IS_MSEG_CARRIER(C) \
((C)->chdr & MSEG_CARRIER_HDR_FLAG)
@@ -250,15 +375,6 @@ static Uint mseg_unit_size;
#define IS_MB_CARRIER(C) \
(!IS_SB_CARRIER((C)))
-#define SET_MSEG_CARRIER(C) \
- ((C)->chdr |= MSEG_CARRIER_HDR_FLAG)
-#define SET_SYS_ALLOC_CARRIER(C) \
- ((C)->chdr &= ~MSEG_CARRIER_HDR_FLAG)
-#define SET_SB_CARRIER(C) \
- ((C)->chdr |= SBC_CARRIER_HDR_FLAG)
-#define SET_MB_CARRIER(C) \
- ((C)->chdr &= ~SBC_CARRIER_HDR_FLAG)
-
#define SET_CARRIER_SZ(C, SZ) \
(ASSERT(((SZ) & FLG_MASK) == 0), \
((C)->chdr = ((C)->chdr & FLG_MASK) | (SZ)))
@@ -506,11 +622,11 @@ static void mbc_free(Allctr_t *allctr, void *p);
#if HAVE_ERTS_MSEG
static ERTS_INLINE void *
-alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p)
+alcu_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags)
{
void *res;
- res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, &allctr->mseg_opt);
+ res = erts_mseg_alloc_opt(allctr->alloc_no, size_p, flags, &allctr->mseg_opt);
INC_CC(allctr->calls.mseg_alloc);
return res;
}
@@ -521,7 +637,7 @@ alcu_mseg_realloc(Allctr_t *allctr, void *seg, Uint old_size, Uint *new_size_p)
void *res;
res = erts_mseg_realloc_opt(allctr->alloc_no, seg, old_size, new_size_p,
- &allctr->mseg_opt);
+ ERTS_MSEG_FLG_NONE, &allctr->mseg_opt);
INC_CC(allctr->calls.mseg_realloc);
return res;
}
@@ -765,13 +881,9 @@ erts_alcu_fix_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
#define ERTS_ALCU_DD_FIX_TYPE_OFFS \
((sizeof(ErtsAllctrDDBlock_t)-1)/sizeof(UWord) + 1)
-#define ERTS_AU_PREF_ALLOC_IX_MASK \
- ((((UWord) 1) << ERTS_AU_PREF_ALLOC_BITS) - 1)
-#define ERTS_AU_PREF_ALLOC_SIZE_MASK \
- ((((UWord) 1) << (sizeof(UWord)*8 - ERTS_AU_PREF_ALLOC_BITS)) - 1)
-static ERTS_INLINE int
-get_pref_allctr(void *extra, Allctr_t **allctr)
+static ERTS_INLINE Allctr_t*
+get_pref_allctr(void *extra)
{
ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
int pref_ix;
@@ -781,34 +893,33 @@ get_pref_allctr(void *extra, Allctr_t **allctr)
ASSERT(sizeof(UWord) == sizeof(Allctr_t *));
ASSERT(0 <= pref_ix && pref_ix < tspec->size);
- *allctr = tspec->allctr[pref_ix];
- return pref_ix;
+ return tspec->allctr[pref_ix];
}
-static ERTS_INLINE void *
-get_used_allctr(void *extra, void *p, Allctr_t **allctr, UWord *sizep)
+/* SMP note:
+ * get_used_allctr() must be safe WITHOUT locking the allocator while
+ * concurrent threads may be updating adjacent blocks.
+ * We rely on getting a consistent result (without atomic op) when reading
+ * the block header word even if a concurrent thread is updating
+ * the "PREV_FREE" flag bit.
+ */
+static ERTS_INLINE Allctr_t*
+get_used_allctr(void *extra, void *p, UWord *sizep)
{
- ErtsAllocatorThrSpec_t *tspec = (ErtsAllocatorThrSpec_t *) extra;
- void *ptr = (void *) (((char *) p) - sizeof(UWord));
- UWord ainfo = *((UWord *) ptr);
- int aix = (int) (ainfo & ERTS_AU_PREF_ALLOC_IX_MASK);
- *allctr = tspec->allctr[aix];
- if (sizep)
- *sizep = ((ainfo >> ERTS_AU_PREF_ALLOC_BITS)
- & ERTS_AU_PREF_ALLOC_SIZE_MASK);
- return ptr;
-}
+ Block_t* blk = UMEM2BLK(p);
+ Carrier_t* crr;
-static ERTS_INLINE void *
-put_used_allctr(void *p, int ix, UWord size)
-{
- UWord ainfo = (size >= ERTS_AU_PREF_ALLOC_SIZE_MASK
- ? ERTS_AU_PREF_ALLOC_SIZE_MASK
- : size);
- ainfo <<= ERTS_AU_PREF_ALLOC_BITS;
- ainfo |= (UWord) ix;
- *((UWord *) p) = ainfo;
- return (void *) (((char *) p) + sizeof(UWord));
+ if (IS_SBC_BLK(blk)) {
+ crr = BLK_TO_SBC(blk);
+ if (sizep)
+ *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ;
+ }
+ else {
+ crr = ABLK_TO_MBC(blk);
+ if (sizep)
+ *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ;
+ }
+ return crr->allctr;
}
static void
@@ -1209,10 +1320,8 @@ mbc_alloc_block(Allctr_t *allctr, Uint size, Uint *blk_szp, Uint32 *alcu_flgsp)
if ((*alcu_flgsp) & ERTS_ALCU_FLG_SBMBC)
blk = create_sbmbc(allctr, get_blk_sz);
else {
-#if HALFWORD_HEAP
- blk = create_carrier(allctr, get_blk_sz, CFLG_MBC|CFLG_FORCE_MSEG);
-#else
blk = create_carrier(allctr, get_blk_sz, CFLG_MBC);
+#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS
if (!blk) {
/* Emergency! We couldn't create the carrier as we wanted.
Try to place it in a sys_alloced sbc. */
@@ -1242,6 +1351,7 @@ mbc_alloc_finalize(Allctr_t *allctr,
Block_t *blk,
Uint org_blk_sz,
UWord flags,
+ Carrier_t *crr,
Uint want_blk_sz,
int valid_blk_info,
Uint32 alcu_flgs)
@@ -1262,22 +1372,18 @@ mbc_alloc_finalize(Allctr_t *allctr,
/* Shrink block... */
blk_sz = want_blk_sz;
nxt_blk_sz = org_blk_sz - blk_sz;
- SET_BLK_HDR(blk,
- blk_sz,
- SBH_THIS_ALLOCED|SBH_NOT_LAST_BLK|prev_free_flg);
+ SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr);
- nxt_blk = NXT_BLK(blk);
- SET_BLK_HDR(nxt_blk,
- nxt_blk_sz,
- (SBH_THIS_FREE
- | SBH_PREV_ALLOCED
- | (flags & LAST_BLK_HDR_FLG)));
+ nxt_blk = BLK_AFTER(blk, blk_sz);
+ SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
+ SBH_THIS_FREE|(flags & LAST_BLK_HDR_FLG),
+ crr);
if (!(flags & LAST_BLK_HDR_FLG)) {
SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
if (!valid_blk_info) {
- Block_t *nxt_nxt_blk = NXT_BLK(nxt_blk);
- SET_PREV_BLK_FREE(nxt_nxt_blk);
+ Block_t *nxt_nxt_blk = BLK_AFTER(nxt_blk, nxt_blk_sz);
+ SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
}
}
(*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
@@ -1291,40 +1397,40 @@ mbc_alloc_finalize(Allctr_t *allctr,
|| nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
ASSERT((flags & LAST_BLK_HDR_FLG)
|| IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
- ASSERT(nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
ASSERT(nxt_blk_sz >= allctr->min_block_size);
+ ASSERT(ABLK_TO_MBC(blk) == crr);
+ ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
}
else {
+ ASSERT(org_blk_sz <= MBC_ABLK_SZ_MASK);
blk_sz = org_blk_sz;
if (flags & LAST_BLK_HDR_FLG) {
if (valid_blk_info)
SET_BLK_ALLOCED(blk);
else
- SET_BLK_HDR(blk,
- blk_sz,
- SBH_THIS_ALLOCED|SBH_LAST_BLK|prev_free_flg);
+ SET_MBC_ABLK_HDR(blk, blk_sz, SBH_LAST_BLK|prev_free_flg, crr);
}
else {
if (valid_blk_info)
SET_BLK_ALLOCED(blk);
else
- SET_BLK_HDR(blk,
- blk_sz,
- SBH_THIS_ALLOCED|SBH_NOT_LAST_BLK|prev_free_flg);
- nxt_blk = NXT_BLK(blk);
+ SET_MBC_ABLK_HDR(blk, blk_sz, prev_free_flg, crr);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
SET_PREV_BLK_ALLOCED(nxt_blk);
}
ASSERT((flags & LAST_BLK_HDR_FLG)
? IS_LAST_BLK(blk)
: IS_NOT_LAST_BLK(blk));
+ ASSERT(ABLK_TO_MBC(blk) == crr);
}
STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
- ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz == MBC_BLK_SZ(blk));
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(blk_sz >= allctr->min_block_size);
ASSERT(blk_sz >= want_blk_sz);
@@ -1348,8 +1454,9 @@ mbc_alloc(Allctr_t *allctr, Uint size)
if (IS_MBC_BLK(blk))
mbc_alloc_finalize(allctr,
blk,
- BLK_SZ(blk),
+ MBC_FBLK_SZ(blk),
GET_BLK_HDR_FLGS(blk),
+ FBLK_TO_MBC(blk),
blk_sz,
1,
alcu_flgs);
@@ -1370,7 +1477,7 @@ mbc_free(Allctr_t *allctr, void *p)
ASSERT(p);
blk = UMEM2BLK(p);
- blk_sz = BLK_SZ(blk);
+ blk_sz = MBC_ABLK_SZ(blk);
if (blk_sz < allctr->sbmbc_threshold)
alcu_flgs |= ERTS_ALCU_FLG_SBMBC;
@@ -1381,17 +1488,18 @@ mbc_free(Allctr_t *allctr, void *p)
STAT_MBC_BLK_FREE(allctr, blk_sz, alcu_flgs);
- is_first_blk = IS_FIRST_BLK(blk);
+ is_first_blk = IS_MBC_FIRST_ABLK(allctr, blk);
is_last_blk = IS_LAST_BLK(blk);
- if (!is_first_blk && IS_PREV_BLK_FREE(blk)) {
+ if (IS_PREV_BLK_FREE(blk)) {
+ ASSERT(!is_first_blk);
/* Coalesce with previous block... */
blk = PREV_BLK(blk);
(*allctr->unlink_free_block)(allctr, blk, alcu_flgs);
- blk_sz += BLK_SZ(blk);
- is_first_blk = IS_FIRST_BLK(blk);
- SET_BLK_SZ(blk, blk_sz);
+ blk_sz += MBC_FBLK_SZ(blk);
+ is_first_blk = IS_MBC_FIRST_FBLK(allctr, blk);
+ SET_MBC_FBLK_SZ(blk, blk_sz);
}
else {
SET_BLK_FREE(blk);
@@ -1400,12 +1508,12 @@ mbc_free(Allctr_t *allctr, void *p)
if (is_last_blk)
SET_LAST_BLK(blk);
else {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
if (IS_FREE_BLK(nxt_blk)) {
/* Coalesce with next block... */
(*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
- blk_sz += BLK_SZ(nxt_blk);
- SET_BLK_SZ(blk, blk_sz);
+ blk_sz += MBC_FBLK_SZ(nxt_blk);
+ SET_MBC_FBLK_SZ(blk, blk_sz);
is_last_blk = IS_LAST_BLK(nxt_blk);
if (is_last_blk)
@@ -1416,26 +1524,26 @@ mbc_free(Allctr_t *allctr, void *p)
}
}
else {
- SET_PREV_BLK_FREE(nxt_blk);
+ SET_PREV_BLK_FREE(allctr, nxt_blk);
SET_NOT_LAST_BLK(blk);
SET_BLK_SZ_FTR(blk, blk_sz);
}
}
- ASSERT(is_last_blk ? IS_LAST_BLK(blk) : IS_NOT_LAST_BLK(blk));
- ASSERT(is_first_blk ? IS_FIRST_BLK(blk) : IS_NOT_FIRST_BLK(blk));
ASSERT(IS_FREE_BLK(blk));
+ ASSERT(!is_last_blk == !IS_LAST_BLK(blk));
+ ASSERT(!is_first_blk == !IS_MBC_FIRST_FBLK(allctr, blk));
ASSERT(is_first_blk || IS_PREV_BLK_ALLOCED(blk));
ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(blk)));
- ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz == MBC_BLK_SZ(blk));
ASSERT(is_last_blk || blk == PREV_BLK(NXT_BLK(blk)));
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(IS_MBC_BLK(blk));
if (is_first_blk
&& is_last_blk
- && allctr->main_carrier != FBLK2MBC(allctr, blk)) {
+ && allctr->main_carrier != FIRST_BLK_TO_MBC(allctr, blk)) {
if (alcu_flgs & ERTS_ALCU_FLG_SBMBC)
destroy_sbmbc(allctr, blk);
else
@@ -1472,7 +1580,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
ASSERT(size < allctr->sbc_threshold);
blk = (Block_t *) UMEM2BLK(p);
- old_blk_sz = BLK_SZ(blk);
+ old_blk_sz = MBC_ABLK_SZ(blk);
ASSERT(old_blk_sz >= allctr->min_block_size);
@@ -1497,6 +1605,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
return p;
else if (blk_sz < old_blk_sz) {
/* Shrink block... */
+ Carrier_t* crr;
Block_t *nxt_nxt_blk;
Uint diff_sz_val = old_blk_sz - blk_sz;
Uint old_blk_sz_val = old_blk_sz;
@@ -1516,16 +1625,18 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
return NULL;
cand_blk_sz = old_blk_sz;
- if (!IS_PREV_BLK_FREE(blk) || IS_FIRST_BLK(blk))
+ if (!IS_PREV_BLK_FREE(blk)) {
cand_blk = blk;
+ }
else {
+ ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk));
cand_blk = PREV_BLK(blk);
cand_blk_sz += PREV_BLK_SZ(blk);
}
if (!is_last_blk) {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, old_blk_sz);
if (IS_FREE_BLK(nxt_blk))
- cand_blk_sz += BLK_SZ(nxt_blk);
+ cand_blk_sz += MBC_FBLK_SZ(nxt_blk);
}
new_blk = (*allctr->get_free_block)(allctr,
@@ -1541,52 +1652,48 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
nxt_blk_sz = old_blk_sz - blk_sz;
- if ((is_last_blk || IS_ALLOCED_BLK(NXT_BLK(blk)))
+ if ((is_last_blk || IS_ALLOCED_BLK(BLK_AFTER(blk,old_blk_sz)))
&& (nxt_blk_sz < allctr->min_block_size))
return p;
HARD_CHECK_BLK_CARRIER(allctr, blk);
- SET_BLK_SZ(blk, blk_sz);
+ nxt_nxt_blk = BLK_AFTER(blk, old_blk_sz);
+
+ SET_MBC_ABLK_SZ(blk, blk_sz);
SET_NOT_LAST_BLK(blk);
- nxt_blk = NXT_BLK(blk);
- SET_BLK_HDR(nxt_blk,
- nxt_blk_sz,
- SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
STAT_MBC_BLK_ALLOC(allctr, blk_sz, alcu_flgs);
- ASSERT(BLK_SZ(blk) >= allctr->min_block_size);
+ ASSERT(MBC_BLK_SZ(blk) >= allctr->min_block_size);
- if (is_last_blk)
- SET_LAST_BLK(nxt_blk);
- else {
- nxt_nxt_blk = NXT_BLK(nxt_blk);
+ if (!is_last_blk) {
if (IS_FREE_BLK(nxt_nxt_blk)) {
/* Coalesce with next free block... */
- nxt_blk_sz += BLK_SZ(nxt_nxt_blk);
+ nxt_blk_sz += MBC_FBLK_SZ(nxt_nxt_blk);
(*allctr->unlink_free_block)(allctr, nxt_nxt_blk, alcu_flgs);
- SET_BLK_SZ(nxt_blk, nxt_blk_sz);
- is_last_blk = IS_LAST_BLK(nxt_nxt_blk);
- if (is_last_blk)
- SET_LAST_BLK(nxt_blk);
- else
- SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
+ is_last_blk = GET_LAST_BLK_HDR_FLG(nxt_nxt_blk);
}
else {
- SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
- SET_PREV_BLK_FREE(nxt_nxt_blk);
+ SET_PREV_BLK_FREE(allctr, nxt_nxt_blk);
}
+ SET_BLK_SZ_FTR(nxt_blk, nxt_blk_sz);
}
+ crr = ABLK_TO_MBC(blk);
+ SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz,
+ SBH_THIS_FREE | (is_last_blk ? SBH_LAST_BLK : 0),
+ crr);
+
(*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
ASSERT(IS_ALLOCED_BLK(blk));
- ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz == MBC_BLK_SZ(blk));
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(blk_sz >= allctr->min_block_size);
ASSERT(blk_sz >= size + ABLK_HDR_SZ);
@@ -1594,14 +1701,15 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
ASSERT(IS_FREE_BLK(nxt_blk));
ASSERT(IS_PREV_BLK_ALLOCED(nxt_blk));
- ASSERT(nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
ASSERT(nxt_blk_sz % sizeof(Unit_t) == 0);
ASSERT(nxt_blk_sz >= allctr->min_block_size);
ASSERT(IS_MBC_BLK(nxt_blk));
ASSERT(is_last_blk ? IS_LAST_BLK(nxt_blk) : IS_NOT_LAST_BLK(nxt_blk));
ASSERT(is_last_blk || nxt_blk == PREV_BLK(NXT_BLK(nxt_blk)));
ASSERT(is_last_blk || IS_PREV_BLK_FREE(NXT_BLK(nxt_blk)));
-
+ ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
+
HARD_CHECK_BLK_CARRIER(allctr, blk);
return p;
@@ -1610,8 +1718,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
/* Need larger block... */
if (!is_last_blk) {
- nxt_blk = NXT_BLK(blk);
- nxt_blk_sz = BLK_SZ(nxt_blk);
+ nxt_blk = BLK_AFTER(blk, old_blk_sz);
+ nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
if (IS_FREE_BLK(nxt_blk) && get_blk_sz <= old_blk_sz + nxt_blk_sz) {
/* Grow into next block... */
@@ -1624,7 +1732,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (nxt_blk_sz < allctr->min_block_size) {
blk_sz += nxt_blk_sz;
- SET_BLK_SZ(blk, blk_sz);
+ SET_MBC_ABLK_SZ(blk, blk_sz);
if (is_last_blk) {
SET_LAST_BLK(blk);
@@ -1633,21 +1741,20 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
#endif
}
else {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
SET_PREV_BLK_ALLOCED(nxt_blk);
#ifdef DEBUG
is_last_blk = IS_LAST_BLK(nxt_blk);
- nxt_blk_sz = BLK_SZ(nxt_blk);
+ nxt_blk_sz = MBC_BLK_SZ(nxt_blk);
#endif
}
}
else {
- SET_BLK_SZ(blk, blk_sz);
+ Carrier_t* crr = ABLK_TO_MBC(blk);
+ SET_MBC_ABLK_SZ(blk, blk_sz);
- nxt_blk = NXT_BLK(blk);
- SET_BLK_HDR(nxt_blk,
- nxt_blk_sz,
- SBH_THIS_FREE|SBH_PREV_ALLOCED|SBH_NOT_LAST_BLK);
+ nxt_blk = BLK_AFTER(blk, blk_sz);
+ SET_MBC_FBLK_HDR(nxt_blk, nxt_blk_sz, SBH_THIS_FREE, crr);
if (is_last_blk)
SET_LAST_BLK(nxt_blk);
@@ -1657,6 +1764,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
(*allctr->link_free_block)(allctr, nxt_blk, alcu_flgs);
ASSERT(IS_FREE_BLK(nxt_blk));
+ ASSERT(FBLK_TO_MBC(nxt_blk) == crr);
}
STAT_MBC_BLK_FREE(allctr, old_blk_sz, alcu_flgs);
@@ -1664,14 +1772,14 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
ASSERT(IS_ALLOCED_BLK(blk));
- ASSERT(blk_sz == BLK_SZ(blk));
+ ASSERT(blk_sz == MBC_BLK_SZ(blk));
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(blk_sz >= allctr->min_block_size);
ASSERT(blk_sz >= size + ABLK_HDR_SZ);
ASSERT(IS_MBC_BLK(blk));
ASSERT(!nxt_blk || IS_PREV_BLK_ALLOCED(nxt_blk));
- ASSERT(!nxt_blk || nxt_blk_sz == BLK_SZ(nxt_blk));
+ ASSERT(!nxt_blk || nxt_blk_sz == MBC_BLK_SZ(nxt_blk));
ASSERT(!nxt_blk || nxt_blk_sz % sizeof(Unit_t) == 0);
ASSERT(!nxt_blk || nxt_blk_sz >= allctr->min_block_size);
ASSERT(!nxt_blk || IS_MBC_BLK(nxt_blk));
@@ -1696,18 +1804,19 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
/* Need to grow in another block */
- if (!IS_PREV_BLK_FREE(blk) || IS_FIRST_BLK(blk)) {
+ if (!IS_PREV_BLK_FREE(blk)) {
cand_blk = NULL;
cand_blk_sz = 0;
}
else {
+ ASSERT(!IS_MBC_FIRST_ABLK(allctr, blk));
cand_blk = PREV_BLK(blk);
cand_blk_sz = old_blk_sz + PREV_BLK_SZ(blk);
if (!is_last_blk) {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, old_blk_sz);
if (IS_FREE_BLK(nxt_blk))
- cand_blk_sz += BLK_SZ(nxt_blk);
+ cand_blk_sz += MBC_FBLK_SZ(nxt_blk);
}
}
@@ -1743,8 +1852,9 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (new_blk) {
mbc_alloc_finalize(allctr,
new_blk,
- BLK_SZ(new_blk),
+ MBC_FBLK_SZ(new_blk),
GET_BLK_HDR_FLGS(new_blk),
+ FBLK_TO_MBC(new_blk),
blk_sz,
1,
alcu_flgs);
@@ -1754,6 +1864,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
return new_p;
}
else {
+ Carrier_t* crr;
Uint new_blk_sz;
UWord new_blk_flgs;
Uint prev_blk_sz;
@@ -1774,10 +1885,10 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
if (is_last_blk)
new_blk_flgs |= LAST_BLK_HDR_FLG;
else {
- nxt_blk = NXT_BLK(blk);
+ nxt_blk = BLK_AFTER(blk, old_blk_sz);
if (IS_FREE_BLK(nxt_blk)) {
new_blk_flgs |= GET_LAST_BLK_HDR_FLG(nxt_blk);
- new_blk_sz += BLK_SZ(nxt_blk);
+ new_blk_sz += MBC_FBLK_SZ(nxt_blk);
(*allctr->unlink_free_block)(allctr, nxt_blk, alcu_flgs);
}
}
@@ -1790,6 +1901,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
new_p = BLK2UMEM(new_blk);
blk_cpy_sz = MIN(blk_sz, old_blk_sz);
+ crr = FBLK_TO_MBC(new_blk);
if (prev_blk_sz >= blk_cpy_sz)
sys_memcpy(new_p, p, blk_cpy_sz - ABLK_HDR_SZ);
@@ -1800,6 +1912,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
new_blk,
new_blk_sz,
new_blk_flgs,
+ crr,
blk_sz,
0,
alcu_flgs);
@@ -1815,35 +1928,40 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs)
#ifdef DEBUG
#if HAVE_ERTS_MSEG
-#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % mseg_unit_size == 0)
+#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ) ASSERT((CSZ) % MSEG_UNIT_SZ == 0)
#else
#define ASSERT_MSEG_UNIT_SIZE_MULTIPLE(CSZ)
#endif
-#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ) \
-do { \
- ASSERT(IS_FIRST_BLK((B))); \
- ASSERT(IS_LAST_BLK((B))); \
- ASSERT((CSZ) == CARRIER_SZ((C))); \
- ASSERT((BSZ) == BLK_SZ((B))); \
- ASSERT((BSZ) % sizeof(Unit_t) == 0); \
- if ((SBC)) { \
- ASSERT(IS_SBC_BLK((B))); \
- ASSERT(IS_SB_CARRIER((C))); \
- } \
- else { \
- ASSERT(IS_MBC_BLK((B))); \
- ASSERT(IS_MB_CARRIER((C))); \
- } \
- if ((MSEGED)) { \
- ASSERT(IS_MSEG_CARRIER((C))); \
- ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ)); \
- } \
- else { \
- ASSERT(IS_SYS_ALLOC_CARRIER((C))); \
- ASSERT((CSZ) % sizeof(Unit_t) == 0); \
- } \
-} while (0)
+static void CHECK_1BLK_CARRIER(Allctr_t* A, int SBC, int MSEGED, Carrier_t* C,
+ UWord CSZ, Block_t* B, UWord BSZ)
+{
+ ASSERT(IS_LAST_BLK((B)));
+ ASSERT((CSZ) == CARRIER_SZ((C)));
+ ASSERT((BSZ) % sizeof(Unit_t) == 0);
+ if ((SBC)) {
+ ASSERT((BSZ) == SBC_BLK_SZ((B)));
+ ASSERT((char*)B == (char*)C + SBC_HEADER_SIZE);
+ ASSERT(IS_SBC_BLK((B)));
+ ASSERT(IS_SB_CARRIER((C)));
+ }
+ else {
+ ASSERT(IS_FREE_BLK(B));
+ ASSERT((BSZ) == MBC_FBLK_SZ((B)));
+ ASSERT(IS_MBC_FIRST_FBLK(A, (B)));
+ ASSERT(IS_MBC_BLK((B)));
+ ASSERT(IS_MB_CARRIER((C)));
+ ASSERT(FBLK_TO_MBC(B) == (C));
+ }
+ if ((MSEGED)) {
+ ASSERT(IS_MSEG_CARRIER((C)));
+ ASSERT_MSEG_UNIT_SIZE_MULTIPLE((CSZ));
+ }
+ else {
+ ASSERT(IS_SYS_ALLOC_CARRIER((C)));
+ ASSERT((CSZ) % sizeof(Unit_t) == 0);
+ }
+}
#else
#define CHECK_1BLK_CARRIER(A, SBC, MSEGED, C, CSZ, B, BSZ)
@@ -1865,37 +1983,18 @@ create_sbmbc(Allctr_t *allctr, Uint umem_sz)
crr = erts_alloc(ERTS_ALC_T_SBMBC, crr_sz);
INC_CC(allctr->calls.sbmbc_alloc);
- SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC);
-
- blk = MBC2FBLK(allctr, crr);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz -= sizeof(UWord);
-#endif
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
- blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size);
+ blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr));
- SET_MBC_BLK_FTR(((UWord *) blk)[-1]);
- SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK);
-
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- *((Carrier_t **) NXT_BLK(blk)) = crr;
-#endif
+ SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr);
link_carrier(&allctr->sbmbc_list, crr);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz += sizeof(UWord);
-#endif
-
STAT_SBMBC_ALLOC(allctr, crr_sz);
CHECK_1BLK_CARRIER(allctr, 0, 0, crr, crr_sz, blk, blk_sz);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz -= sizeof(UWord);
-#endif
if (allctr->creating_mbc)
(*allctr->creating_mbc)(allctr, crr, ERTS_ALCU_FLG_SBMBC);
@@ -1909,11 +2008,10 @@ destroy_sbmbc(Allctr_t *allctr, Block_t *blk)
Uint crr_sz;
Carrier_t *crr;
- ASSERT(IS_FIRST_BLK(blk));
-
ASSERT(IS_MBC_BLK(blk));
+ ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
- crr = FBLK2MBC(allctr, blk);
+ crr = FIRST_BLK_TO_MBC(allctr, blk);
crr_sz = CARRIER_SZ(crr);
#ifdef DEBUG
@@ -1952,14 +2050,25 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
Uint blk_sz, bcrr_sz, crr_sz;
#if HAVE_ERTS_MSEG
int have_tried_sys_alloc = 0, have_tried_mseg = 0;
+ Uint mseg_flags;
#endif
#ifdef DEBUG
int is_mseg = 0;
#endif
+#if HALFWORD_HEAP
+ flags |= CFLG_FORCE_MSEG;
+#elif HAVE_SUPER_ALIGNED_MB_CARRIERS
+ if (flags & CFLG_MBC) {
+ flags |= CFLG_FORCE_MSEG;
+ }
+#endif
+
ASSERT((flags & CFLG_SBC && !(flags & CFLG_MBC))
|| (flags & CFLG_MBC && !(flags & CFLG_SBC)));
+ ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC));
+
blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
#if HAVE_ERTS_MSEG
@@ -1974,29 +2083,27 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
if (allctr->sbcs.curr.norm.mseg.no >= allctr->max_mseg_sbcs)
goto try_sys_alloc;
}
+#if !HAVE_SUPER_ALIGNED_MB_CARRIERS
else {
if (allctr->mbcs.curr.norm.mseg.no >= allctr->max_mseg_mbcs)
goto try_sys_alloc;
}
+#endif
try_mseg:
if (flags & CFLG_SBC) {
- crr_sz = blk_sz + allctr->sbc_header_size;
+ crr_sz = blk_sz + SBC_HEADER_SIZE;
+ mseg_flags = ERTS_MSEG_FLG_NONE;
}
else {
crr_sz = (*allctr->get_next_mbc_size)(allctr);
- if (crr_sz < allctr->mbc_header_size + blk_sz)
- crr_sz = allctr->mbc_header_size + blk_sz;
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz += sizeof(UWord);
-#endif
+ if (crr_sz < MBC_HEADER_SIZE(allctr) + blk_sz)
+ crr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
+ mseg_flags = ERTS_MSEG_FLG_2POW;
}
- crr_sz = MSEG_UNIT_CEILING(crr_sz);
- ASSERT(crr_sz % mseg_unit_size == 0);
- crr = (Carrier_t *) alcu_mseg_alloc(allctr, &crr_sz);
+ crr = (Carrier_t *) alcu_mseg_alloc(allctr, &crr_sz, mseg_flags);
if (!crr) {
have_tried_mseg = 1;
if (!(have_tried_sys_alloc || flags & CFLG_FORCE_MSEG))
@@ -2008,32 +2115,31 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
is_mseg = 1;
#endif
if (flags & CFLG_SBC) {
- SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_SBC, allctr);
STAT_MSEG_SBC_ALLOC(allctr, crr_sz, blk_sz);
goto sbc_final_touch;
}
else {
- SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC);
+#ifndef ARCH_64
+ ASSERT(crr_sz <= MBC_SZ_MAX_LIMIT);
+#endif
+ SET_CARRIER_HDR(crr, crr_sz, SCH_MSEG|SCH_MBC, allctr);
STAT_MSEG_MBC_ALLOC(allctr, crr_sz);
goto mbc_final_touch;
}
try_sys_alloc:
+
#endif /* #if HAVE_ERTS_MSEG */
if (flags & CFLG_SBC) {
- bcrr_sz = blk_sz + allctr->sbc_header_size;
+ bcrr_sz = blk_sz + SBC_HEADER_SIZE;
}
else {
- bcrr_sz = allctr->mbc_header_size + blk_sz;
+ bcrr_sz = MBC_HEADER_SIZE(allctr) + blk_sz;
if (!(flags & CFLG_MAIN_CARRIER)
&& bcrr_sz < allctr->smallest_mbc_size)
bcrr_sz = allctr->smallest_mbc_size;
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- bcrr_sz += sizeof(UWord);
-#endif
-
}
crr_sz = (flags & CFLG_FORCE_SIZE
@@ -2057,7 +2163,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
}
}
if (flags & CFLG_SBC) {
- SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_SBC, allctr);
STAT_SYS_ALLOC_SBC_ALLOC(allctr, crr_sz, blk_sz);
#if HAVE_ERTS_MSEG
@@ -2066,8 +2172,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
blk = SBC2BLK(allctr, crr);
- SET_SBC_BLK_FTR(((UWord *) blk)[-1]);
- SET_BLK_HDR(blk, blk_sz, SBH_THIS_ALLOCED|SBH_PREV_FREE|SBH_LAST_BLK);
+ SET_SBC_BLK_HDR(blk, blk_sz);
link_carrier(&allctr->sbc_list, crr);
@@ -2075,28 +2180,18 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
}
else {
- SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC);
+ SET_CARRIER_HDR(crr, crr_sz, SCH_SYS_ALLOC|SCH_MBC, allctr);
STAT_SYS_ALLOC_MBC_ALLOC(allctr, crr_sz);
#if HAVE_ERTS_MSEG
mbc_final_touch:
#endif
- blk = MBC2FBLK(allctr, crr);
-
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz -= sizeof(UWord);
-#endif
-
- blk_sz = UNIT_FLOOR(crr_sz - allctr->mbc_header_size);
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
- SET_MBC_BLK_FTR(((UWord *) blk)[-1]);
- SET_BLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_PREV_FREE|SBH_LAST_BLK);
+ blk_sz = UNIT_FLOOR(crr_sz - MBC_HEADER_SIZE(allctr));
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- *((Carrier_t **) NXT_BLK(blk)) = crr;
-#endif
+ SET_MBC_FBLK_HDR(blk, blk_sz, SBH_THIS_FREE|SBH_LAST_BLK, crr);
if (flags & CFLG_MAIN_CARRIER) {
ASSERT(!allctr->main_carrier);
@@ -2105,15 +2200,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
link_carrier(&allctr->mbc_list, crr);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz += sizeof(UWord);
-#endif
CHECK_1BLK_CARRIER(allctr, 0, is_mseg, crr, crr_sz, blk, blk_sz);
-#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
- if (allctr->mbc_header_size % sizeof(Unit_t) == 0)
- crr_sz -= sizeof(UWord);
-#endif
if (allctr->creating_mbc)
(*allctr->creating_mbc)(allctr, crr, 0);
@@ -2142,8 +2229,8 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
HARD_CHECK_BLK_CARRIER(allctr, old_blk);
- old_blk_sz = BLK_SZ(old_blk);
- old_crr = BLK2SBC(allctr, old_blk);
+ old_blk_sz = SBC_BLK_SZ(old_blk);
+ old_crr = BLK_TO_SBC(old_blk);
old_crr_sz = CARRIER_SZ(old_crr);
ASSERT(IS_SB_CARRIER(old_crr));
ASSERT(IS_SBC_BLK(old_blk));
@@ -2157,7 +2244,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
if (!(flags & CFLG_FORCE_SYS_ALLOC)) {
- new_crr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
new_crr_sz = MSEG_UNIT_CEILING(new_crr_sz);
new_crr = (Carrier_t *) alcu_mseg_realloc(allctr,
old_crr,
@@ -2166,7 +2253,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
if (new_crr) {
SET_CARRIER_SZ(new_crr, new_crr_sz);
new_blk = SBC2BLK(allctr, new_crr);
- SET_BLK_SZ(new_blk, new_blk_sz);
+ SET_SBC_BLK_SZ(new_blk, new_blk_sz);
STAT_MSEG_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
relink_carrier(&allctr->sbc_list, new_crr);
CHECK_1BLK_CARRIER(allctr, 1, 1, new_crr, new_crr_sz,
@@ -2174,6 +2261,11 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
DEBUG_SAVE_ALIGNMENT(new_crr);
return new_blk;
}
+#if HALFWORD_HEAP
+ /* Old carrier unchanged; restore stat */
+ STAT_MSEG_SBC_ALLOC(allctr, old_crr_sz, old_blk_sz);
+ return NULL;
+#endif
create_flags |= CFLG_FORCE_SYS_ALLOC; /* since mseg_realloc()
failed */
}
@@ -2196,7 +2288,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
else {
if (!(flags & CFLG_FORCE_MSEG)) {
#endif /* #if HAVE_ERTS_MSEG */
- new_bcrr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_bcrr_sz = new_blk_sz + SBC_HEADER_SIZE;
new_crr_sz = (flags & CFLG_FORCE_SIZE
? UNIT_CEILING(new_bcrr_sz)
: SYS_ALLOC_CARRIER_CEILING(new_bcrr_sz));
@@ -2208,7 +2300,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
sys_realloc_success:
SET_CARRIER_SZ(new_crr, new_crr_sz);
new_blk = SBC2BLK(allctr, new_crr);
- SET_BLK_SZ(new_blk, new_blk_sz);
+ SET_SBC_BLK_SZ(new_blk, new_blk_sz);
STAT_SYS_ALLOC_SBC_FREE(allctr, old_crr_sz, old_blk_sz);
STAT_SYS_ALLOC_SBC_ALLOC(allctr, new_crr_sz, new_blk_sz);
relink_carrier(&allctr->sbc_list, new_crr);
@@ -2218,7 +2310,7 @@ resize_carrier(Allctr_t *allctr, Block_t *old_blk, Uint umem_sz, UWord flags)
return new_blk;
}
else if (new_crr_sz > UNIT_CEILING(new_bcrr_sz)) {
- new_crr_sz = new_blk_sz + allctr->sbc_header_size;
+ new_crr_sz = new_blk_sz + SBC_HEADER_SIZE;
new_crr_sz = UNIT_CEILING(new_crr_sz);
new_crr = (Carrier_t *) alcu_sys_realloc(allctr,
(void *) old_crr,
@@ -2262,11 +2354,9 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
Uint is_mseg = 0;
#endif
- ASSERT(IS_FIRST_BLK(blk));
-
if (IS_SBC_BLK(blk)) {
- Uint blk_sz = BLK_SZ(blk);
- crr = BLK2SBC(allctr, blk);
+ Uint blk_sz = SBC_BLK_SZ(blk);
+ crr = BLK_TO_SBC(blk);
crr_sz = CARRIER_SZ(crr);
ASSERT(IS_LAST_BLK(blk));
@@ -2276,7 +2366,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
is_mseg++;
- ASSERT(crr_sz % mseg_unit_size == 0);
+ ASSERT(crr_sz % MSEG_UNIT_SZ == 0);
STAT_MSEG_SBC_FREE(allctr, crr_sz, blk_sz);
}
else
@@ -2287,7 +2377,8 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
}
else {
- crr = FBLK2MBC(allctr, blk);
+ ASSERT(IS_MBC_FIRST_FBLK(allctr, blk));
+ crr = FIRST_BLK_TO_MBC(allctr, blk);
crr_sz = CARRIER_SZ(crr);
#ifdef DEBUG
@@ -2305,7 +2396,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk)
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(crr)) {
is_mseg++;
- ASSERT(crr_sz % mseg_unit_size == 0);
+ ASSERT(crr_sz % MSEG_UNIT_SZ == 0);
STAT_MSEG_MBC_FREE(allctr, crr_sz);
}
else
@@ -3430,12 +3521,7 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
if (allctr->dd.use)
ERTS_ALCU_HANDLE_DD_IN_OP(allctr, 1);
#endif
-#if HALFWORD_HEAP
- blk = create_carrier(allctr, size,
- CFLG_SBC | CFLG_FORCE_MSEG);
-#else
blk = create_carrier(allctr, size, CFLG_SBC);
-#endif
res = blk ? BLK2UMEM(blk) : NULL;
}
else
@@ -3506,24 +3592,20 @@ erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
void *
erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
{
- int pref_ix;
Allctr_t *pref_allctr;
void *res;
- pref_ix = get_pref_allctr(extra, &pref_allctr);
+ pref_allctr = get_pref_allctr(extra);
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
- res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
+ res = do_erts_alcu_alloc(type, pref_allctr, size);
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
- if (res)
- res = put_used_allctr(res, pref_ix, size);
-
DEBUG_CHECK_ALIGNMENT(res);
@@ -3644,21 +3726,20 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
{
if (p) {
Allctr_t *pref_allctr, *used_allctr;
- void *ptr;
- get_pref_allctr(extra, &pref_allctr);
- ptr = get_used_allctr(extra, p, &used_allctr, NULL);
+ pref_allctr = get_pref_allctr(extra);
+ used_allctr = get_used_allctr(extra, p, NULL);
if (pref_allctr != used_allctr)
enqueue_dealloc_other_instance(type,
used_allctr,
- ptr,
+ p,
(used_allctr->dd.ix
- pref_allctr->dd.ix));
else {
if (used_allctr->thread_safe)
erts_mtx_lock(&used_allctr->mutex);
ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
- do_erts_alcu_free(type, used_allctr, ptr);
+ do_erts_alcu_free(type, used_allctr, p);
if (used_allctr->thread_safe)
erts_mtx_unlock(&used_allctr->mutex);
}
@@ -3739,13 +3820,13 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
if (IS_MBC_BLK(blk))
res = mbc_realloc(allctr, p, size, alcu_flgs);
else {
- Uint used_sz = allctr->sbc_header_size + ABLK_HDR_SZ + size;
+ Uint used_sz = SBC_HEADER_SIZE + ABLK_HDR_SZ + size;
Uint crr_sz;
Uint diff_sz_val;
Uint crr_sz_val;
#if HAVE_ERTS_MSEG
- if (IS_SYS_ALLOC_CARRIER(BLK2SBC(allctr, blk)))
+ if (IS_SYS_ALLOC_CARRIER(BLK_TO_SBC(blk)))
#endif
crr_sz = SYS_ALLOC_CARRIER_CEILING(used_sz);
#if HAVE_ERTS_MSEG
@@ -3775,7 +3856,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
if (res) {
sys_memcpy((void*) res,
(void*) p,
- MIN(BLK_SZ(blk) - ABLK_HDR_SZ, size));
+ MIN(SBC_BLK_SZ(blk) - ABLK_HDR_SZ, size));
destroy_carrier(allctr, blk);
}
}
@@ -3798,16 +3879,12 @@ do_erts_alcu_realloc(ErtsAlcType_t type,
else if (alcu_flgs & ERTS_ALCU_FLG_FAIL_REALLOC_MOVE)
return NULL;
else {
-#if HALFWORD_HEAP
- new_blk = create_carrier(allctr, size, CFLG_SBC | CFLG_FORCE_MSEG);
-#else
new_blk = create_carrier(allctr, size, CFLG_SBC);
-#endif
if (new_blk) {
res = BLK2UMEM(new_blk);
sys_memcpy((void *) res,
(void *) p,
- MIN(BLK_SZ(blk) - ABLK_HDR_SZ, size));
+ MIN(MBC_ABLK_SZ(blk) - ABLK_HDR_SZ, size));
mbc_free(allctr, p);
}
else
@@ -3966,16 +4043,15 @@ static ERTS_INLINE void *
realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
int force_move)
{
- int pref_ix;
- void *ptr, *res;
+ void *res;
Allctr_t *pref_allctr, *used_allctr;
UWord old_user_size;
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
- pref_ix = get_pref_allctr(extra, &pref_allctr);
- ptr = get_used_allctr(extra, p, &used_allctr, &old_user_size);
+ pref_allctr = get_pref_allctr(extra);
+ used_allctr = get_used_allctr(extra, p, &old_user_size);
ASSERT(used_allctr && pref_allctr);
@@ -3985,56 +4061,33 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
ERTS_ALCU_DBG_CHK_THR_ACCESS(used_allctr);
res = do_erts_alcu_realloc(type,
used_allctr,
- ptr,
- size + sizeof(UWord),
+ p,
+ size,
0);
if (used_allctr->thread_safe)
erts_mtx_unlock(&used_allctr->mutex);
- if (res)
- res = put_used_allctr(res, pref_ix, size);
}
else {
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
- res = do_erts_alcu_alloc(type, pref_allctr, size + sizeof(UWord));
- if (pref_allctr->thread_safe && (!force_move
- || used_allctr != pref_allctr))
+ res = do_erts_alcu_alloc(type, pref_allctr, size);
+ if (pref_allctr->thread_safe && used_allctr != pref_allctr) {
erts_mtx_unlock(&pref_allctr->mutex);
+ }
if (res) {
- Block_t *blk;
- size_t cpy_size;
-
- res = put_used_allctr(res, pref_ix, size);
-
DEBUG_CHECK_ALIGNMENT(res);
- blk = UMEM2BLK(ptr);
- if (old_user_size != ERTS_AU_PREF_ALLOC_SIZE_MASK)
- cpy_size = old_user_size;
- else {
- if (used_allctr->thread_safe && (!force_move
- || used_allctr != pref_allctr))
- erts_mtx_lock(&used_allctr->mutex);
- ERTS_SMP_LC_ASSERT(!used_allctr->thread_safe ||
- erts_lc_mtx_is_locked(&used_allctr->mutex));
- cpy_size = BLK_SZ(blk);
- if (used_allctr->thread_safe && (!force_move
- || used_allctr != pref_allctr))
- erts_mtx_unlock(&used_allctr->mutex);
- cpy_size -= ABLK_HDR_SZ + sizeof(UWord);
- }
- if (cpy_size > size)
- cpy_size = size;
- sys_memcpy(res, p, cpy_size);
+ sys_memcpy(res, p, MIN(size,old_user_size));
- if (!force_move || used_allctr != pref_allctr)
+ if (used_allctr != pref_allctr) {
enqueue_dealloc_other_instance(type,
used_allctr,
- ptr,
+ p,
(used_allctr->dd.ix
- pref_allctr->dd.ix));
+ }
else {
- do_erts_alcu_free(type, used_allctr, ptr);
+ do_erts_alcu_free(type, used_allctr, p);
ASSERT(pref_allctr == used_allctr);
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
@@ -4111,7 +4164,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->ramv = init->ramv;
allctr->main_carrier_size = init->mmbcs;
- allctr->sbc_threshold = init->sbct;
+
#if HAVE_ERTS_MSEG
allctr->mseg_opt.abs_shrink_th = init->asbcst;
allctr->mseg_opt.rel_shrink_th = init->rsbcst;
@@ -4120,20 +4173,29 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->mbc_move_threshold = init->rmbcmt;
#if HAVE_ERTS_MSEG
allctr->max_mseg_sbcs = init->mmsbc;
+# if HAVE_SUPER_ALIGNED_MB_CARRIERS
+ allctr->max_mseg_mbcs = ~(Uint)0;
+# else
allctr->max_mseg_mbcs = init->mmmbc;
+# endif
#endif
allctr->largest_mbc_size = MAX(init->lmbcs, init->smbcs);
+#ifndef ARCH_64
+ if (allctr->largest_mbc_size > MBC_SZ_MAX_LIMIT) {
+ allctr->largest_mbc_size = MBC_SZ_MAX_LIMIT;
+ }
+#endif
allctr->smallest_mbc_size = init->smbcs;
allctr->mbc_growth_stages = MAX(1, init->mbcgs);
if (allctr->min_block_size < ABLK_HDR_SZ)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
- + sizeof(UWord));
+ + sizeof(FreeBlkFtr_t));
#if ERTS_SMP
if (init->tpref) {
- Uint sz = sizeof(Block_t);
+ Uint sz = ABLK_HDR_SZ;
sz += ERTS_ALCU_DD_FIX_TYPE_OFFS*sizeof(UWord);
if (init->fix)
sz += sizeof(UWord);
@@ -4143,6 +4205,23 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
}
#endif
+ allctr->sbc_threshold = init->sbct;
+#ifndef ARCH_64
+ if (allctr->sbc_threshold > 0) {
+ Uint max_mbc_block_sz = UNIT_CEILING(allctr->sbc_threshold - 1 + ABLK_HDR_SZ);
+ if (max_mbc_block_sz + UNIT_FLOOR(allctr->min_block_size - 1) > MBC_ABLK_SZ_MASK
+ || max_mbc_block_sz < allctr->sbc_threshold) { /* wrap around */
+ /*
+ * By limiting sbc_threshold to (hard limit - min_block_size)
+ * we avoid having to split off free "residue blocks"
+ * smaller than min_block_size.
+ */
+ max_mbc_block_sz = MBC_ABLK_SZ_MASK - UNIT_FLOOR(allctr->min_block_size - 1);
+ allctr->sbc_threshold = max_mbc_block_sz - ABLK_HDR_SZ + 1;
+ }
+ }
+#endif
+
allctr->sbmbc_threshold = init->sbmbct;
@@ -4158,7 +4237,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->sbmbc_size = init->sbmbcs;
min_size = allctr->sbmbc_threshold;
min_size += allctr->min_block_size;
- min_size += allctr->mbc_header_size;
+ min_size += MBC_HEADER_SIZE(allctr);
if (allctr->sbmbc_size < min_size)
allctr->sbmbc_size = min_size;
}
@@ -4203,59 +4282,26 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (!allctr->get_next_mbc_size)
allctr->get_next_mbc_size = get_next_mbc_size;
- if (allctr->mbc_header_size < sizeof(Carrier_t))
- goto error;
#ifdef ERTS_SMP
allctr->dd.use = 0;
if (init->tpref) {
- allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
- + FBLK_FTR_SZ
- + ABLK_HDR_SZ
- + sizeof(UWord))
- - ABLK_HDR_SZ
- - sizeof(UWord));
- allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t)
- + FBLK_FTR_SZ
- + ABLK_HDR_SZ
- + sizeof(UWord))
- - ABLK_HDR_SZ
- - sizeof(UWord));
-
allctr->dd.use = 1;
init_dd_queue(&allctr->dd.q);
allctr->dd.ix = init->ix;
}
- else
#endif
- {
- allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
- + FBLK_FTR_SZ
- + ABLK_HDR_SZ)
- - ABLK_HDR_SZ);
- allctr->sbc_header_size = (UNIT_CEILING(sizeof(Carrier_t)
- + FBLK_FTR_SZ
- + ABLK_HDR_SZ)
- - ABLK_HDR_SZ);
- }
if (allctr->main_carrier_size) {
Block_t *blk;
-#if HALFWORD_HEAP
- blk = create_carrier(allctr,
- allctr->main_carrier_size,
- CFLG_MBC
- | CFLG_FORCE_SIZE
- | CFLG_FORCE_MSEG
- | CFLG_MAIN_CARRIER);
-#else
blk = create_carrier(allctr,
allctr->main_carrier_size,
CFLG_MBC
| CFLG_FORCE_SIZE
+#if !HALFWORD_HEAP && !HAVE_SUPER_ALIGNED_MB_CARRIERS
| CFLG_FORCE_SYS_ALLOC
- | CFLG_MAIN_CARRIER);
#endif
+ | CFLG_MAIN_CARRIER);
if (!blk)
goto error;
@@ -4303,9 +4349,9 @@ erts_alcu_stop(Allctr_t *allctr)
while (allctr->sbc_list.first)
destroy_carrier(allctr, SBC2BLK(allctr, allctr->sbc_list.first));
while (allctr->mbc_list.first)
- destroy_carrier(allctr, MBC2FBLK(allctr, allctr->mbc_list.first));
+ destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first));
while (allctr->sbmbc_list.first)
- destroy_sbmbc(allctr, MBC2FBLK(allctr, allctr->sbmbc_list.first));
+ destroy_sbmbc(allctr, MBC_TO_FIRST_BLK(allctr, allctr->sbmbc_list.first));
#ifdef USE_THREADS
if (allctr->thread_safe)
@@ -4319,17 +4365,9 @@ erts_alcu_stop(Allctr_t *allctr)
void
erts_alcu_init(AlcUInit_t *init)
{
-
+ ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
- mseg_unit_size = erts_mseg_unit_size();
-
- if (mseg_unit_size % sizeof(Unit_t)) /* A little paranoid... */
- erl_exit(-1,
- "Mseg unit size (%d) not evenly divideble by "
- "internal unit size of alloc_util (%d)\n",
- mseg_unit_size,
- sizeof(Unit_t));
-
+ ASSERT(erts_mseg_unit_size() == MSEG_UNIT_SZ);
max_mseg_carriers = init->mmc;
sys_alloc_carrier_size = MSEG_UNIT_CEILING(init->ycs);
#else /* #if HAVE_ERTS_MSEG */
@@ -4372,11 +4410,10 @@ erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2)
case 0x00b: return (unsigned long) CARRIER_SZ((Carrier_t *) a1);
case 0x00c: return (unsigned long) SBC2BLK((Allctr_t *) a1,
(Carrier_t *) a2);
- case 0x00d: return (unsigned long) BLK2SBC((Allctr_t *) a1,
- (Block_t *) a2);
- case 0x00e: return (unsigned long) MBC2FBLK((Allctr_t *) a1,
+ case 0x00d: return (unsigned long) BLK_TO_SBC((Block_t *) a2);
+ case 0x00e: return (unsigned long) MBC_TO_FIRST_BLK((Allctr_t *) a1,
(Carrier_t *) a2);
- case 0x00f: return (unsigned long) FBLK2MBC((Allctr_t *) a1,
+ case 0x00f: return (unsigned long) FIRST_BLK_TO_MBC((Allctr_t *) a1,
(Block_t *) a2);
case 0x010: return (unsigned long) ((Allctr_t *) a1)->mbc_list.first;
case 0x011: return (unsigned long) ((Allctr_t *) a1)->mbc_list.last;
@@ -4388,7 +4425,7 @@ erts_alcu_test(unsigned long op, unsigned long a1, unsigned long a2)
case 0x017: return (unsigned long) ((Allctr_t *) a1)->min_block_size;
case 0x018: return (unsigned long) NXT_BLK((Block_t *) a1);
case 0x019: return (unsigned long) PREV_BLK((Block_t *) a1);
- case 0x01a: return (unsigned long) IS_FIRST_BLK((Block_t *) a1);
+ case 0x01a: return (unsigned long) IS_MBC_FIRST_BLK((Allctr_t*)a1, (Block_t *) a2);
case 0x01b: return (unsigned long) sizeof(Unit_t);
default: ASSERT(0); return ~((unsigned long) 0);
}
@@ -4432,6 +4469,13 @@ erts_alcu_verify_unused_ts(Allctr_t *allctr)
#endif
}
+#ifdef DEBUG
+int is_sbc_blk(Block_t* blk)
+{
+ return IS_SBC_BLK(blk);
+}
+#endif
+
#ifdef ERTS_ALLOC_UTIL_HARD_DEBUG
static void
@@ -4441,34 +4485,37 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
CarrierList_t *cl;
if (IS_SBC_BLK(iblk)) {
- Carrier_t *sbc = BLK2SBC(allctr, iblk);
+ Carrier_t *sbc = BLK_TO_SBC(iblk);
ASSERT(SBC2BLK(allctr, sbc) == iblk);
- ASSERT(IS_ALLOCED_BLK(iblk));
- ASSERT(IS_FIRST_BLK(iblk));
- ASSERT(IS_LAST_BLK(iblk));
- ASSERT(CARRIER_SZ(sbc) - allctr->sbc_header_size >= BLK_SZ(iblk));
+ ASSERT(CARRIER_SZ(sbc) - SBC_HEADER_SIZE >= SBC_BLK_SZ(iblk));
#if HAVE_ERTS_MSEG
if (IS_MSEG_CARRIER(sbc)) {
- ASSERT(CARRIER_SZ(sbc) % mseg_unit_size == 0);
+ ASSERT(CARRIER_SZ(sbc) % MSEG_UNIT_SZ == 0);
}
#endif
crr = sbc;
cl = &allctr->sbc_list;
}
else {
- Carrier_t *mbc = NULL;
Block_t *prev_blk = NULL;
Block_t *blk;
char *carrier_end;
Uint is_free_blk;
Uint tot_blk_sz;
Uint blk_sz;
+ int has_wrapped_around = 0;
blk = iblk;
tot_blk_sz = 0;
+ crr = BLK_TO_MBC(blk);
+ ASSERT(IS_MB_CARRIER(crr));
+ /* Step around the carrier one whole lap starting at 'iblk'
+ */
while (1) {
+ ASSERT(IS_MBC_BLK(blk));
+ ASSERT(BLK_TO_MBC(blk) == crr);
if (prev_blk) {
ASSERT(NXT_BLK(prev_blk) == blk);
@@ -4481,18 +4528,16 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
}
}
- if (mbc) {
+ if (has_wrapped_around) {
+ ASSERT(((Block_t *) crr) < blk);
if (blk == iblk)
break;
- ASSERT(((Block_t *) mbc) < blk && blk < iblk);
+ ASSERT(blk < iblk);
}
else
ASSERT(blk >= iblk);
-
- ASSERT(IS_MBC_BLK(blk));
-
- blk_sz = BLK_SZ(blk);
+ blk_sz = MBC_BLK_SZ(blk);
ASSERT(blk_sz % sizeof(Unit_t) == 0);
ASSERT(blk_sz >= allctr->min_block_size);
@@ -4500,44 +4545,40 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
tot_blk_sz += blk_sz;
is_free_blk = (int) IS_FREE_BLK(blk);
- if(is_free_blk) {
- if (IS_NOT_LAST_BLK(blk))
- ASSERT(*((UWord *) (((char *) blk)+blk_sz-sizeof(UWord)))
- == blk_sz);
- }
+ ASSERT(!is_free_blk
+ || IS_LAST_BLK(blk)
+ || PREV_BLK_SZ(((char *) blk)+blk_sz) == blk_sz);
if (allctr->check_block)
(*allctr->check_block)(allctr, blk, (int) is_free_blk);
if (IS_LAST_BLK(blk)) {
carrier_end = ((char *) NXT_BLK(blk));
- mbc = *((Carrier_t **) NXT_BLK(blk));
+ has_wrapped_around = 1;
prev_blk = NULL;
- blk = MBC2FBLK(allctr, mbc);
- ASSERT(IS_FIRST_BLK(blk));
+ blk = MBC_TO_FIRST_BLK(allctr, crr);
+ ASSERT(IS_MBC_FIRST_BLK(allctr,blk));
}
else {
prev_blk = blk;
blk = NXT_BLK(blk);
}
}
-
- ASSERT(IS_MB_CARRIER(mbc));
- ASSERT((((char *) mbc)
- + allctr->mbc_header_size
+
+ ASSERT((((char *) crr)
+ + MBC_HEADER_SIZE(allctr)
+ tot_blk_sz) == carrier_end);
- ASSERT(((char *) mbc) + CARRIER_SZ(mbc) - sizeof(Unit_t) <= carrier_end
- && carrier_end <= ((char *) mbc) + CARRIER_SZ(mbc));
+ ASSERT(((char *) crr) + CARRIER_SZ(crr) - sizeof(Unit_t) <= carrier_end
+ && carrier_end <= ((char *) crr) + CARRIER_SZ(crr));
if (allctr->check_mbc)
- (*allctr->check_mbc)(allctr, mbc);
+ (*allctr->check_mbc)(allctr, crr);
#if HAVE_ERTS_MSEG
- if (IS_MSEG_CARRIER(mbc)) {
- ASSERT(CARRIER_SZ(mbc) % mseg_unit_size == 0);
+ if (IS_MSEG_CARRIER(crr)) {
+ ASSERT(CARRIER_SZ(crr) % MSEG_UNIT_SZ == 0);
}
#endif
- crr = mbc;
cl = &allctr->mbc_list;
}
@@ -4559,4 +4600,5 @@ check_blk_carrier(Allctr_t *allctr, Block_t *iblk)
#endif
}
-#endif
+#endif /* ERTS_ALLOC_UTIL_HARD_DEBUG */
+
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index cedf4ccf85..e0754e7f69 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -216,16 +216,40 @@ erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#define UNIT_FLOOR(X) ((X) & UNIT_MASK)
#define UNIT_CEILING(X) UNIT_FLOOR((X) + INV_UNIT_MASK)
+#define FLG_MASK INV_UNIT_MASK
+#define SBC_BLK_SZ_MASK UNIT_MASK
+#define MBC_FBLK_SZ_MASK UNIT_MASK
+#define CARRIER_SZ_MASK UNIT_MASK
-#define SZ_MASK (~((UWord) 0) << 3)
-#define FLG_MASK (~(SZ_MASK))
+#if HAVE_ERTS_MSEG
+# ifdef ARCH_64
+# define MBC_ABLK_OFFSET_BITS 24
+# elif HAVE_SUPER_ALIGNED_MB_CARRIERS
+# define MBC_ABLK_OFFSET_BITS 9
+ /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
+# endif
+#endif
+#ifndef MBC_ABLK_OFFSET_BITS
+# define MBC_ABLK_OFFSET_BITS 0 /* no carrier offset in block header */
+#endif
+
+#if MBC_ABLK_OFFSET_BITS
+# define MBC_ABLK_OFFSET_SHIFT (sizeof(UWord)*8 - MBC_ABLK_OFFSET_BITS)
+# define MBC_ABLK_OFFSET_MASK (~((UWord)0) << MBC_ABLK_OFFSET_SHIFT)
+# define MBC_ABLK_SZ_MASK (~MBC_ABLK_OFFSET_MASK & ~FLG_MASK)
+# define HAVE_ERTS_SBMBC 0
+#else
+# define MBC_ABLK_SZ_MASK (~FLG_MASK)
+# define HAVE_ERTS_SBMBC 1
+#endif
-#define BLK_SZ(B) \
- (*((Block_t *) (B)) & SZ_MASK)
+#define MBC_ABLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_ABLK_SZ_MASK)
+#define MBC_FBLK_SZ(B) (ASSERT_EXPR(!is_sbc_blk(B)), (B)->bhdr & MBC_FBLK_SZ_MASK)
+#define SBC_BLK_SZ(B) (ASSERT_EXPR(is_sbc_blk(B)), (B)->bhdr & SBC_BLK_SZ_MASK)
#define CARRIER_SZ(C) \
- ((C)->chdr & SZ_MASK)
+ ((C)->chdr & CARRIER_SZ_MASK)
extern int erts_have_sbmbc_alloc;
@@ -236,6 +260,7 @@ struct Carrier_t_ {
UWord chdr;
Carrier_t *next;
Carrier_t *prev;
+ Allctr_t *allctr;
};
typedef struct {
@@ -243,8 +268,19 @@ typedef struct {
Carrier_t *last;
} CarrierList_t;
-typedef UWord Block_t;
-typedef UWord FreeBlkFtr_t;
+typedef struct {
+ UWord bhdr;
+#if !MBC_ABLK_OFFSET_BITS
+ Carrier_t *carrier;
+#else
+ union {
+ Carrier_t *carrier; /* if free */
+ char udata__[1]; /* if allocated */
+ }u;
+#endif
+} Block_t;
+
+typedef UWord FreeBlkFtr_t; /* Footer of a free block */
typedef struct {
UWord giga_no;
@@ -381,8 +417,6 @@ struct Allctr_t_ {
#endif
/* */
- Uint mbc_header_size;
- Uint sbc_header_size;
Uint min_mbc_size;
Uint min_mbc_first_free_size;
Uint min_block_size;
@@ -469,6 +503,9 @@ void erts_alcu_verify_unused_ts(Allctr_t *allctr);
unsigned long erts_alcu_test(unsigned long, unsigned long, unsigned long);
+#ifdef DEBUG
+int is_sbc_blk(Block_t*);
+#endif
#endif /* #if defined(GET_ERL_ALLOC_UTIL_IMPL)
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 5bdb752d3a..86b4696d8f 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -91,6 +91,7 @@ struct AOFF_RBTree_t_ {
AOFF_RBTree_t *right;
Uint max_sz; /* of all blocks in this sub-tree */
};
+#define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
#ifdef HARD_DEBUG
static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint);
@@ -102,7 +103,7 @@ static AOFF_RBTree_t * check_tree(AOFF_RBTree_t* root, Uint);
*/
static ERTS_INLINE Uint node_max_size(AOFF_RBTree_t *x)
{
- Uint sz = BLK_SZ(x);
+ Uint sz = AOFF_BLK_SZ(x);
if (x->left && x->left->max_sz > sz) {
sz = x->left->max_sz;
}
@@ -183,7 +184,6 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
sys_memcpy((void *) alc, (void *) &zero.allctr, sizeof(AOFFAllctr_t));
- allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(AOFF_RBTree_t);
@@ -587,7 +587,7 @@ aoff_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
AOFF_RBTree_t *blk = (AOFF_RBTree_t *) block;
AOFF_RBTree_t **root = ((flags & ERTS_ALCU_FLG_SBMBC)
? &alc->sbmbc_root : &alc->mbc_root);
- Uint blk_sz = BLK_SZ(blk);
+ Uint blk_sz = AOFF_BLK_SZ(blk);
#ifdef HARD_DEBUG
check_tree(*root, 0);
@@ -659,7 +659,7 @@ aoff_get_free_block(Allctr_t *allctr, Uint size,
if (x->left && x->left->max_sz >= size) {
x = x->left;
}
- else if (BLK_SZ(x) >= size) {
+ else if (AOFF_BLK_SZ(x) >= size) {
blk = x;
break;
}
@@ -910,12 +910,12 @@ check_tree(AOFF_RBTree_t* root, Uint size)
ASSERT(x->right > x);
ASSERT(x->right->max_sz <= x->max_sz);
}
- ASSERT(x->max_sz >= BLK_SZ(x));
- ASSERT(x->max_sz == BLK_SZ(x)
+ ASSERT(x->max_sz >= AOFF_BLK_SZ(x));
+ ASSERT(x->max_sz == AOFF_BLK_SZ(x)
|| x->max_sz == (x->left ? x->left->max_sz : 0)
|| x->max_sz == (x->right ? x->right->max_sz : 0));
- if (size && BLK_SZ(x) >= size) {
+ if (size && AOFF_BLK_SZ(x) >= size) {
if (!res || x < res) {
res = x;
}
@@ -956,7 +956,7 @@ print_tree_aux(AOFF_RBTree_t *x, int indent)
}
fprintf(stderr, "%s: sz=%lu addr=0x%lx max_size=%lu\r\n",
IS_BLACK(x) ? "BLACK" : "RED",
- BLK_SZ(x), (Uint)x, x->max_sz);
+ AOFF_BLK_SZ(x), (Uint)x, x->max_sz);
print_tree_aux(x->left, indent + INDENT_STEP);
}
}
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index f308039baf..f2ca193ace 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -122,6 +122,8 @@ typedef struct {
#endif
} ErtsAsyncData;
+#if defined(USE_THREADS) && defined(USE_VM_PROBES)
+
/*
* Some compilers, e.g. GCC 4.2.1 and -O3, will optimize away DTrace
* calls if they're the last thing in the function. :-(
@@ -129,6 +131,7 @@ typedef struct {
* https://github.com/memcached/memcached/commit/6298b3978687530bc9d219b6ac707a1b681b2a46
*/
static unsigned gcc_optimizer_hack = 0;
+#endif
int erts_async_max_threads; /* Initialized by erl_init.c */
int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */
@@ -281,8 +284,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
len = -1;
DTRACE2(aio_pool_add, port_str, len);
}
-#endif
gcc_optimizer_hack++;
+#endif
}
static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
@@ -379,10 +382,15 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
static ERTS_INLINE void call_async_ready(ErtsAsync *a)
{
+#if ERTS_USE_ASYNC_READY_Q
Port *p = erts_id2port_sflgs(a->port,
NULL,
0,
ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+#else
+ Port *p = erts_thr_id2port_sflgs(a->port,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+#endif
if (!p) {
if (a->async_free)
a->async_free(a->async_data);
@@ -392,7 +400,11 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
if (a->async_free)
a->async_free(a->async_data);
}
+#if ERTS_USE_ASYNC_READY_Q
erts_port_release(p);
+#else
+ erts_thr_port_release(p);
+#endif
}
if (a->pdl)
driver_pdl_dec_refc(a->pdl);
@@ -600,7 +612,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
sched_id = 1;
#endif
- prt = erts_drvport2port(ix);
+ prt = erts_drvport2port(ix, NULL);
if (!prt)
return -1;
@@ -612,7 +624,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
a->sched_id = sched_id;
#endif
a->hndl = (DE_Handle*)prt->drv_ptr->handle;
- a->port = prt->id;
+ a->port = prt->common.id;
a->pdl = NULL;
a->async_data = async_data;
a->async_invoke = async_invoke;
diff --git a/erts/emulator/beam/erl_bestfit_alloc.c b/erts/emulator/beam/erl_bestfit_alloc.c
index c50fdeb4e8..743cbd93c9 100644
--- a/erts/emulator/beam/erl_bestfit_alloc.c
+++ b/erts/emulator/beam/erl_bestfit_alloc.c
@@ -73,6 +73,8 @@
#define SET_RED(N) (((RBTree_t *) (N))->flags |= RED_FLG)
#define SET_BLACK(N) (((RBTree_t *) (N))->flags &= ~RED_FLG)
+#define BF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
+
#undef ASSERT
#define ASSERT ASSERT_EXPR
@@ -177,7 +179,6 @@ erts_bfalc_start(BFAllctr_t *bfallctr,
bfallctr->address_order = bfinit->ao;
- allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = (bfinit->ao
@@ -592,7 +593,7 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
? &bfallctr->sbmbc_root
: &bfallctr->mbc_root);
RBTree_t *blk = (RBTree_t *) block;
- Uint blk_sz = BLK_SZ(blk);
+ Uint blk_sz = BF_BLK_SZ(blk);
@@ -610,7 +611,7 @@ aobf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
while (1) {
Uint size;
- size = BLK_SZ(x);
+ size = BF_BLK_SZ(x);
if (blk_sz < size || (blk_sz == size && blk < x)) {
if (!x->left) {
@@ -668,7 +669,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
ASSERT(!cand_blk || cand_size >= size);
while (x) {
- blk_sz = BLK_SZ(x);
+ blk_sz = BF_BLK_SZ(x);
if (blk_sz < size) {
x = x->right;
}
@@ -686,7 +687,7 @@ aobf_get_free_block(Allctr_t *allctr, Uint size,
#endif
if (cand_blk) {
- blk_sz = BLK_SZ(blk);
+ blk_sz = BF_BLK_SZ(blk);
if (cand_size < blk_sz)
return NULL; /* cand_blk was better */
if (cand_size == blk_sz && ((void *) cand_blk) < ((void *) blk))
@@ -711,7 +712,7 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
? &bfallctr->sbmbc_root
: &bfallctr->mbc_root);
RBTree_t *blk = (RBTree_t *) block;
- Uint blk_sz = BLK_SZ(blk);
+ Uint blk_sz = BF_BLK_SZ(blk);
SET_TREE_NODE(blk);
@@ -730,7 +731,7 @@ bf_link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
while (1) {
Uint size;
- size = BLK_SZ(x);
+ size = BF_BLK_SZ(x);
if (blk_sz == size) {
@@ -796,7 +797,7 @@ bf_unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
else if (LIST_NEXT(x)) {
/* Replace tree node by next element in list... */
- ASSERT(BLK_SZ(LIST_NEXT(x)) == BLK_SZ(x));
+ ASSERT(BF_BLK_SZ(LIST_NEXT(x)) == BF_BLK_SZ(x));
ASSERT(IS_TREE_NODE(x));
ASSERT(IS_LIST_ELEM(LIST_NEXT(x)));
@@ -834,7 +835,7 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
ASSERT(!cand_blk || cand_size >= size);
while (x) {
- blk_sz = BLK_SZ(x);
+ blk_sz = BF_BLK_SZ(x);
if (blk_sz < size) {
x = x->right;
}
@@ -855,11 +856,11 @@ bf_get_free_block(Allctr_t *allctr, Uint size,
#ifdef HARD_DEBUG
{
RBTree_t *ct_blk = check_tree(root, 0, size);
- ASSERT(BLK_SZ(ct_blk) == BLK_SZ(blk));
+ ASSERT(BF_BLK_SZ(ct_blk) == BF_BLK_SZ(blk));
}
#endif
- if (cand_blk && cand_size <= BLK_SZ(blk))
+ if (cand_blk && cand_size <= BF_BLK_SZ(blk))
return NULL; /* cand_blk was better */
/* Use next block if it exist in order to avoid replacing
@@ -1093,36 +1094,36 @@ check_tree(RBTree_t *root, int ao, Uint size)
if (x->left) {
ASSERT(x->left->parent == x);
if (ao) {
- ASSERT(BLK_SZ(x->left) < BLK_SZ(x)
- || (BLK_SZ(x->left) == BLK_SZ(x) && x->left < x));
+ ASSERT(BF_BLK_SZ(x->left) < BF_BLK_SZ(x)
+ || (BF_BLK_SZ(x->left) == BF_BLK_SZ(x) && x->left < x));
}
else {
ASSERT(IS_TREE_NODE(x->left));
- ASSERT(BLK_SZ(x->left) < BLK_SZ(x));
+ ASSERT(BF_BLK_SZ(x->left) < BF_BLK_SZ(x));
}
}
if (x->right) {
ASSERT(x->right->parent == x);
if (ao) {
- ASSERT(BLK_SZ(x->right) > BLK_SZ(x)
- || (BLK_SZ(x->right) == BLK_SZ(x) && x->right > x));
+ ASSERT(BF_BLK_SZ(x->right) > BF_BLK_SZ(x)
+ || (BF_BLK_SZ(x->right) == BF_BLK_SZ(x) && x->right > x));
}
else {
ASSERT(IS_TREE_NODE(x->right));
- ASSERT(BLK_SZ(x->right) > BLK_SZ(x));
+ ASSERT(BF_BLK_SZ(x->right) > BF_BLK_SZ(x));
}
}
- if (size && BLK_SZ(x) >= size) {
+ if (size && BF_BLK_SZ(x) >= size) {
if (ao) {
if (!res
- || BLK_SZ(x) < BLK_SZ(res)
- || (BLK_SZ(x) == BLK_SZ(res) && x < res))
+ || BF_BLK_SZ(x) < BF_BLK_SZ(res)
+ || (BF_BLK_SZ(x) == BF_BLK_SZ(res) && x < res))
res = x;
}
else {
- if (!res || BLK_SZ(x) < BLK_SZ(res))
+ if (!res || BF_BLK_SZ(x) < BF_BLK_SZ(res))
res = x;
}
}
@@ -1168,7 +1169,7 @@ print_tree_aux(RBTree_t *x, int indent)
}
fprintf(stderr, "%s: sz=%lu addr=0x%lx\r\n",
IS_BLACK(x) ? "BLACK" : "RED",
- BLK_SZ(x),
+ BF_BLK_SZ(x),
(Uint) x);
print_tree_aux(x->left, indent + INDENT_STEP);
}
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index cc4f2be8eb..474151d454 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -72,52 +72,29 @@ binary_matches(Process *p, Eterm arg1, Eterm arg2, Eterm arg3);
void erts_init_bif_binary(void)
{
- sys_memset((void *) &binary_match_trap_export, 0, sizeof(Export));
- binary_match_trap_export.address = &binary_match_trap_export.code[3];
- binary_match_trap_export.code[0] = am_erlang;
- binary_match_trap_export.code[1] = am_binary_match_trap;
- binary_match_trap_export.code[2] = 3;
- binary_match_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_match_trap_export.code[4] = (BeamInstr) &binary_match_trap;
-
- sys_memset((void *) &binary_matches_trap_export, 0, sizeof(Export));
- binary_matches_trap_export.address = &binary_matches_trap_export.code[3];
- binary_matches_trap_export.code[0] = am_erlang;
- binary_matches_trap_export.code[1] = am_binary_matches_trap;
- binary_matches_trap_export.code[2] = 3;
- binary_matches_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_matches_trap_export.code[4] = (BeamInstr) &binary_matches_trap;
-
- sys_memset((void *) &binary_longest_prefix_trap_export, 0, sizeof(Export));
- binary_longest_prefix_trap_export.address = &binary_longest_prefix_trap_export.code[3];
- binary_longest_prefix_trap_export.code[0] = am_erlang;
- binary_longest_prefix_trap_export.code[1] = am_binary_longest_prefix_trap;
- binary_longest_prefix_trap_export.code[2] = 3;
- binary_longest_prefix_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_longest_prefix_trap_export.code[4] = (BeamInstr) &binary_longest_prefix_trap;
-
- sys_memset((void *) &binary_longest_suffix_trap_export, 0, sizeof(Export));
- binary_longest_suffix_trap_export.address = &binary_longest_suffix_trap_export.code[3];
- binary_longest_suffix_trap_export.code[0] = am_erlang;
- binary_longest_suffix_trap_export.code[1] = am_binary_longest_suffix_trap;
- binary_longest_suffix_trap_export.code[2] = 3;
- binary_longest_suffix_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_longest_suffix_trap_export.code[4] = (BeamInstr) &binary_longest_suffix_trap;
-
- sys_memset((void *) &binary_bin_to_list_trap_export, 0, sizeof(Export));
- binary_bin_to_list_trap_export.address = &binary_bin_to_list_trap_export.code[3];
- binary_bin_to_list_trap_export.code[0] = am_erlang;
- binary_bin_to_list_trap_export.code[1] = am_binary_bin_to_list_trap;
- binary_bin_to_list_trap_export.code[2] = 3;
- binary_bin_to_list_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_bin_to_list_trap_export.code[4] = (BeamInstr) &binary_bin_to_list_trap;
- sys_memset((void *) &binary_copy_trap_export, 0, sizeof(Export));
- binary_copy_trap_export.address = &binary_copy_trap_export.code[3];
- binary_copy_trap_export.code[0] = am_erlang;
- binary_copy_trap_export.code[1] = am_binary_copy_trap;
- binary_copy_trap_export.code[2] = 2;
- binary_copy_trap_export.code[3] = (BeamInstr) em_apply_bif;
- binary_copy_trap_export.code[4] = (BeamInstr) &binary_copy_trap;
+ erts_init_trap_export(&binary_match_trap_export,
+ am_erlang, am_binary_match_trap, 3,
+ &binary_match_trap);
+
+ erts_init_trap_export(&binary_matches_trap_export,
+ am_erlang, am_binary_matches_trap, 3,
+ &binary_matches_trap);
+
+ erts_init_trap_export(&binary_longest_prefix_trap_export,
+ am_erlang, am_binary_longest_prefix_trap, 3,
+ &binary_longest_prefix_trap);
+
+ erts_init_trap_export(&binary_longest_suffix_trap_export,
+ am_erlang, am_binary_longest_suffix_trap, 3,
+ &binary_longest_suffix_trap);
+
+ erts_init_trap_export(&binary_bin_to_list_trap_export,
+ am_erlang, am_binary_bin_to_list_trap, 3,
+ &binary_bin_to_list_trap);
+
+ erts_init_trap_export(&binary_copy_trap_export,
+ am_erlang, am_binary_copy_trap, 2,
+ &binary_copy_trap);
max_loop_limit = 0;
return;
diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c
index 06b7ffdf32..ff5ce3cc7a 100644
--- a/erts/emulator/beam/erl_bif_chksum.c
+++ b/erts/emulator/beam/erl_bif_chksum.c
@@ -42,16 +42,9 @@ static Export chksum_md5_2_exp;
void erts_init_bif_chksum(void)
{
/* Non visual BIF to trap to. */
- memset(&chksum_md5_2_exp, 0, sizeof(Export));
- chksum_md5_2_exp.address =
- &chksum_md5_2_exp.code[3];
- chksum_md5_2_exp.code[0] = am_erlang;
- chksum_md5_2_exp.code[1] = am_atom_put("md5_trap",8);
- chksum_md5_2_exp.code[2] = 2;
- chksum_md5_2_exp.code[3] =
- (BeamInstr) em_apply_bif;
- chksum_md5_2_exp.code[4] =
- (BeamInstr) &md5_2;
+ erts_init_trap_export(&chksum_md5_2_exp,
+ am_erlang, am_atom_put("md5_trap",8), 2,
+ &md5_2);
}
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index c338ee1c4b..7cbea55eac 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -104,16 +104,49 @@ static void dereference_all_processes(DE_Handle *dh);
static void restore_process_references(DE_Handle *dh);
static void ddll_no_more_references(void *vdh);
-#define lock_drv_list() erts_smp_mtx_lock(&erts_driver_list_lock)
-#define unlock_drv_list() erts_smp_mtx_unlock(&erts_driver_list_lock)
+#define lock_drv_list() erts_smp_rwmtx_rwlock(&erts_driver_list_lock)
+#define unlock_drv_list() erts_smp_rwmtx_rwunlock(&erts_driver_list_lock)
#define assert_drv_list_locked() \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&erts_driver_list_lock))
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
+ || erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
+#define assert_drv_list_rwlocked() \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock))
+#define assert_drv_list_rlocked() \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define assert_drv_list_not_locked() \
- ERTS_SMP_LC_ASSERT(!erts_smp_lc_mtx_is_locked(&erts_driver_list_lock))
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \
+ && !erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock))
#define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING))
+static void
+kill_ports_driver_unloaded(DE_Handle *dh)
+{
+ int ix, max = erts_ptab_max(&erts_port);
+
+ for (ix = 0; ix < max; ix++) {
+ erts_aint32_t state;
+ Port* prt = erts_pix2port(ix);
+ if (!prt)
+ continue;
+
+ ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & FREE_PORT_FLAGS)
+ continue;
+
+ erts_smp_port_lock(prt);
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh)
+ driver_failure_atom((ErlDrvPort) prt, "driver_unloaded");
+
+ erts_port_release(prt);
+ }
+}
+
/*
* try_load(Path, Name, OptionList) -> {ok,Status} |
* {ok, PendingStatus, Ref} |
@@ -149,7 +182,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
Eterm name_term = BIF_ARG_2;
Eterm options = BIF_ARG_3;
char *path = NULL;
- Uint path_len;
+ ErlDrvSizeT path_len;
char *name = NULL;
DE_Handle *dh;
erts_driver_t *drv;
@@ -228,7 +261,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
goto error;
}
path = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, path_len + 1 /* might need path separator */ + sys_strlen(name) + 1);
- if (io_list_to_buf(path_term, path, path_len) != 0) {
+ if (erts_iolist_to_buf(path_term, path, path_len) != 0) {
goto error;
}
while (path_len > 0 && (path[path_len-1] == '\\' || path[path_len-1] == '/')) {
@@ -356,42 +389,16 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
ok_term = mkatom("loaded");
}
}
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
if (kill_ports) {
- int j;
- /* Avoid closing the driver by referencing it */
+ /* Avoid closing the driver by referencing it */
erts_ddll_reference_driver(dh);
ASSERT(dh->status == ERL_DE_RELOAD);
dh->status = ERL_DE_FORCE_RELOAD;
#if DDLL_SMP
unlock_drv_list();
#endif
- for (j = 0; j < erts_max_ports; j++) {
- Port* prt = &erts_port[j];
-#ifdef DDLL_SMP
- erts_smp_port_state_lock(prt);
-#endif
- if (!(prt->status & FREE_PORT_FLAGS) &&
- prt->drv_ptr->handle == dh) {
-#if DDLL_SMP
- erts_smp_atomic_inc_nob(&prt->refc);
- /* Extremely rare spinlock */
- while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
- erts_smp_port_state_unlock(prt);
- erts_smp_port_state_lock(prt);
- }
- erts_smp_port_state_unlock(prt);
- erts_smp_mtx_lock(prt->lock);
- if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
- driver_failure_atom(j, "driver_unloaded");
- }
-#else
- driver_failure_atom(j, "driver_unloaded");
-#endif
- erts_port_release(prt);
- }
- else erts_smp_port_state_unlock(prt);
- }
+ kill_ports_driver_unloaded(dh);
/* Dereference, eventually causing driver destruction */
#if DDLL_SMP
lock_drv_list();
@@ -581,47 +588,21 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
dh->reload_full_path = dh->reload_driver_name = NULL;
dh->reload_flags = 0;
}
- if (dh->port_count > 0) {
+ if (erts_smp_atomic32_read_nob(&dh->port_count) > 0) {
++kill_ports;
}
dh->status = ERL_DE_UNLOAD;
ok_term = am_pending_driver;
done:
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
if (kill_ports > 1) {
- int j;
/* Avoid closing the driver by referencing it */
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
#if DDLL_SMP
unlock_drv_list();
#endif
- for (j = 0; j < erts_max_ports; j++) {
- Port* prt = &erts_port[j];
-#if DDLL_SMP
- erts_smp_port_state_lock(prt);
-#endif
- if (!(prt->status & FREE_PORT_FLAGS)
- && prt->drv_ptr->handle == dh) {
-#if DDLL_SMP
- erts_smp_atomic_inc_nob(&prt->refc);
- /* Extremely rare spinlock */
- while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
- erts_smp_port_state_unlock(prt);
- erts_smp_port_state_lock(prt);
- }
- erts_smp_port_state_unlock(prt);
- erts_smp_mtx_lock(prt->lock);
- if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
- driver_failure_atom(j, "driver_unloaded");
- }
-#else
- driver_failure_atom(j, "driver_unloaded");
-#endif
- erts_port_release(prt);
- }
- else erts_smp_port_state_unlock(prt);
- }
+ kill_ports_driver_unloaded(dh);
#if DDLL_SMP
lock_drv_list();
#endif
@@ -791,7 +772,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
} else if (drv->handle->status == ERL_DE_PERMANENT) {
res = am_permanent;
} else {
- res = make_small(drv->handle->port_count);
+ res = make_small(erts_smp_atomic32_read_nob(&drv->handle->port_count));
}
goto done;
case am_linked_in_driver:
@@ -1049,40 +1030,16 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
}
dh->status = ERL_DE_UNLOAD;
}
- if (!left && drv->handle->port_count > 0) {
+ if (!left
+ && erts_smp_atomic32_read_nob(&drv->handle->port_count) > 0) {
if (kill_ports) {
- int j;
DE_Handle *dh = drv->handle;
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
#if DDLL_SMP
unlock_drv_list();
#endif
- for (j = 0; j < erts_max_ports; j++) {
- Port* prt = &erts_port[j];
-#if DDLL_SMP
- erts_smp_port_state_lock(prt);
-#endif
- if (!(prt->status & FREE_PORT_FLAGS) &&
- prt->drv_ptr->handle == dh) {
-#if DDLL_SMP
- erts_smp_atomic_inc_nob(&prt->refc);
- while(prt->status & ERTS_PORT_SFLG_INITIALIZING) {
- erts_smp_port_state_unlock(prt);
- erts_smp_port_state_lock(prt);
- }
- erts_smp_port_state_unlock(prt);
- erts_smp_mtx_lock(prt->lock);
- if (!(prt->status & ERTS_PORT_SFLGS_DEAD)) {
- driver_failure_atom(j, "driver_unloaded");
- }
-#else
- driver_failure_atom(j, "driver_unloaded");
-#endif
- erts_port_release(prt);
- }
- else erts_smp_port_state_unlock(prt);
- }
+ kill_ports_driver_unloaded(dh);
#if DDLL_SMP
lock_drv_list(); /* Needed for future list operations */
#endif
@@ -1104,7 +1061,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
void erts_ddll_lock_driver(DE_Handle *dh, char *name)
{
DE_ProcEntry *p,*q;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
notify_all(dh, name,
ERL_DE_PROC_AWAIT_LOAD, am_UP, am_permanent);
notify_all(dh, name,
@@ -1127,19 +1084,22 @@ void erts_ddll_lock_driver(DE_Handle *dh, char *name)
void erts_ddll_increment_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
- dh->port_count++;
+ erts_smp_atomic32_inc_nob(&dh->port_count);
}
void erts_ddll_decrement_port_count(DE_Handle *dh)
{
assert_drv_list_locked();
- ASSERT(dh->port_count > 0);
- dh->port_count--;
+#if DEBUG
+ ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0);
+#else
+ erts_smp_atomic32_dec_nob(&dh->port_count);
+#endif
}
static void first_ddll_reference(DE_Handle *dh)
{
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
erts_refc_init(&(dh->refc),1);
}
@@ -1167,7 +1127,7 @@ void erts_ddll_dereference_driver(DE_Handle *dh)
static void dereference_all_processes(DE_Handle *dh)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
for(p = dh->procs;p != NULL; p = p->next) {
if (p->awaiting_status == ERL_DE_PROC_LOADED) {
ASSERT(!(p->flags & ERL_DE_FL_DEREFERENCED));
@@ -1180,7 +1140,7 @@ static void dereference_all_processes(DE_Handle *dh)
static void restore_process_references(DE_Handle *dh)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
ASSERT(erts_refc_read(&(dh->refc),0) == 0);
for(p = dh->procs;p != NULL; p = p->next) {
if (p->awaiting_status == ERL_DE_PROC_LOADED) {
@@ -1408,7 +1368,7 @@ static int is_last_user(DE_Handle *dh, Process *proc) {
DE_ProcEntry *p = dh->procs;
int found = 0;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->proc == proc && p->awaiting_status == ERL_DE_PROC_LOADED) {
@@ -1429,7 +1389,7 @@ static DE_ProcEntry *find_proc_entry(DE_Handle *dh, Process *proc, Uint status)
{
DE_ProcEntry *p = dh->procs;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->proc == proc && p->awaiting_status == status) {
@@ -1456,7 +1416,7 @@ static int num_procs(DE_Handle *dh, Uint status) {
DE_ProcEntry *p = dh->procs;
int i = 0;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->awaiting_status == status) {
@@ -1471,7 +1431,7 @@ static int num_entries(DE_Handle *dh, Process *proc, Uint status) {
DE_ProcEntry *p = dh->procs;
int i = 0;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->awaiting_status == status && p->proc == proc) {
++i;
@@ -1484,7 +1444,7 @@ static int num_entries(DE_Handle *dh, Process *proc, Uint status) {
static void add_proc_loaded(DE_Handle *dh, Process *proc)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
p->proc = proc;
p->flags = 0;
@@ -1496,7 +1456,7 @@ static void add_proc_loaded(DE_Handle *dh, Process *proc)
static void add_proc_loaded_deref(DE_Handle *dh, Process *proc)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
p->proc = proc;
p->awaiting_status = ERL_DE_PROC_LOADED;
@@ -1516,7 +1476,7 @@ static void add_proc_waiting(DE_Handle *dh, Process *proc,
Uint status, Eterm ref)
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
p->proc = proc;
p->flags = 0;
@@ -1530,7 +1490,7 @@ static Eterm add_monitor(Process *p, DE_Handle *dh, Uint status)
{
Eterm r;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
r = erts_make_ref(p);
add_proc_waiting(dh, p, status, r);
return r;
@@ -1541,7 +1501,7 @@ static void set_driver_reloading(DE_Handle *dh, Process *proc, char *path, char
{
DE_ProcEntry *p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = erts_alloc(ERTS_ALC_T_DDLL_PROCESS, sizeof(DE_ProcEntry));
p->proc = proc;
p->awaiting_status = ERL_DE_OK;
@@ -1562,7 +1522,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
int res;
ErlDrvEntry *dp;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
if ((res = erts_sys_ddll_open(path, &(dh->handle))) != ERL_DE_NO_ERROR) {
return res;
@@ -1600,7 +1560,7 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
goto error;
}
erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
- dh->port_count = 0;
+ erts_smp_atomic32_init_nob(&dh->port_count, 0);
dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
sys_strcpy(dh->full_path, path);
dh->flags = 0;
@@ -1626,7 +1586,7 @@ static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
{
erts_driver_t *q, *p = driver_list;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
while (p != NULL) {
if (p->handle == dh) {
@@ -1666,11 +1626,11 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
int res;
DE_Handle *dh = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sizeof(DE_Handle));
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
dh->handle = NULL;
dh->procs = NULL;
- dh->port_count = 0;
+ erts_smp_atomic32_init_nob(&dh->port_count, 0);
erts_refc_init(&(dh->refc), (erts_aint_t) 0);
dh->status = -1;
dh->reload_full_path = NULL;
@@ -1704,7 +1664,7 @@ static int reload_driver_entry(DE_Handle *dh)
int loadres;
Uint flags = dh->reload_flags;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
dh->reload_full_path = NULL;
dh->reload_driver_name = NULL;
@@ -1742,7 +1702,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
ErtsProcLocks rp_locks = 0;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
if (errcode != 0) {
int need = load_error_need(errcode);
Eterm e;
@@ -1775,7 +1735,7 @@ static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Ete
{
DE_ProcEntry **p;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
p = &(dh->procs);
while (*p != NULL) {
@@ -1881,7 +1841,7 @@ static Eterm mkatom(char *str)
static char *pick_list_or_atom(Eterm name_term)
{
char *name = NULL;
- Uint name_len;
+ ErlDrvSizeT name_len;
if (is_atom(name_term)) {
Atom *ap = atom_tab(atom_val(name_term));
if (ap->len == 0) {
@@ -1897,7 +1857,7 @@ static char *pick_list_or_atom(Eterm name_term)
goto error;
}
name = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, name_len + 1);
- if (io_list_to_buf(name_term, name, name_len) != 0) {
+ if (erts_iolist_to_buf(name_term, name, name_len) != 0) {
goto error;
}
name[name_len] = '\0';
@@ -1918,10 +1878,10 @@ static int build_proc_info(DE_Handle *dh, ProcEntryInfo **out_pei, Uint filter)
int i;
DE_ProcEntry *pe;
- assert_drv_list_locked();
+ assert_drv_list_rwlocked();
for (pe = dh->procs; pe != NULL; pe = pe->next) {
- Eterm id = pe->proc->id;
+ Eterm id = pe->proc->common.id;
Uint stat = pe->awaiting_status;
if (stat == ERL_DE_PROC_AWAIT_UNLOAD_ONLY) {
stat = ERL_DE_PROC_AWAIT_UNLOAD;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 45dc5fb11c..fabddffc68 100755
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -40,6 +40,8 @@
#include "erl_cpu_topology.h"
#include "erl_async.h"
#include "erl_thr_progress.h"
+#define ERTS_PTAB_WANT_DEBUG_FUNCS__
+#include "erl_ptab.h"
#ifdef HIPE
#include "hipe_arch.h"
#endif
@@ -128,8 +130,6 @@ static char erts_system_version[] = ("Erlang " ERLANG_OTP_RELEASE
static Eterm os_type_tuple;
static Eterm os_version_tuple;
-static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item);
-
static Eterm
current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
@@ -873,8 +873,7 @@ BIF_RETTYPE process_info_1(BIF_ALIST_1)
&& external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
BIF_RET(am_undefined);
- if (is_not_internal_pid(BIF_ARG_1)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -909,8 +908,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2)
&& external_pid_dist_entry(pid) == erts_this_dist_entry)
BIF_RET(am_undefined);
- if (is_not_internal_pid(pid)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(pid)) {
BIF_ERROR(BIF_P, BADARG);
}
@@ -1002,9 +1000,9 @@ process_info_aux(Process *BIF_P,
switch (item) {
case am_registered_name:
- if (rp->reg != NULL) {
+ if (rp->common.u.alive.reg) {
hp = HAlloc(BIF_P, 3);
- res = rp->reg->name;
+ res = rp->common.u.alive.reg->name;
} else {
if (always_wrap) {
hp = HAlloc(BIF_P, 3);
@@ -1050,7 +1048,7 @@ process_info_aux(Process *BIF_P,
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
n = rp->msg.len;
- if (n == 0 || rp->trace_flags & F_SENSITIVE) {
+ if (n == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
hp = HAlloc(BIF_P, 3);
} else {
int remove_bad_messages = 0;
@@ -1209,7 +1207,7 @@ process_info_aux(Process *BIF_P,
INIT_MONITOR_INFOS(mic);
- erts_doforall_links(rp->nlinks,&collect_one_link,&mic);
+ erts_doforall_links(ERTS_P_LINKS(rp),&collect_one_link,&mic);
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
@@ -1227,7 +1225,7 @@ process_info_aux(Process *BIF_P,
int i;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(rp->monitors,&collect_one_origin_monitor,&mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_origin_monitor,&mic);
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
@@ -1264,7 +1262,7 @@ process_info_aux(Process *BIF_P,
Eterm item;
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(rp->monitors,&collect_one_target_monitor,&mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(rp),&collect_one_target_monitor,&mic);
hp = HAlloc(BIF_P, 3 + mic.sz);
res = NIL;
@@ -1330,7 +1328,7 @@ process_info_aux(Process *BIF_P,
}
case am_dictionary:
- if (rp->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
res = NIL;
} else {
res = erts_dictionary_copy(BIF_P, rp->dictionary);
@@ -1338,13 +1336,15 @@ process_info_aux(Process *BIF_P,
hp = HAlloc(BIF_P, 3);
break;
- case am_trap_exit:
+ case am_trap_exit: {
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
hp = HAlloc(BIF_P, 3);
- if (rp->flags & F_TRAPEXIT)
+ if (state & ERTS_PSFLG_TRAP_EXIT)
res = am_true;
else
res = am_false;
break;
+ }
case am_error_handler:
hp = HAlloc(BIF_P, 3);
@@ -1424,8 +1424,8 @@ process_info_aux(Process *BIF_P,
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp);
- erts_doforall_links(rp->nlinks, &one_link_size, &size);
- erts_doforall_monitors(rp->monitors, &one_mon_size, &size);
+ erts_doforall_links(ERTS_P_LINKS(rp), &one_link_size, &size);
+ erts_doforall_monitors(ERTS_P_MONITORS(rp), &one_mon_size, &size);
size += (rp->heap_sz + rp->mbuf_sz) * sizeof(Eterm);
if (rp->old_hend && rp->old_heap)
size += (rp->old_hend - rp->old_heap) * sizeof(Eterm);
@@ -1498,7 +1498,7 @@ process_info_aux(Process *BIF_P,
case am_trace:
hp = HAlloc(BIF_P, 3);
- res = make_small(rp->trace_flags & TRACEE_FLAGS);
+ res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
break;
case am_binary: {
@@ -1603,7 +1603,7 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
}
}
- if (BIF_P->id == rp->id) {
+ if (BIF_P == rp) {
FunctionInfo fi2;
/*
@@ -1835,17 +1835,17 @@ info_1_tuple(Process* BIF_P, /* Pointer to current process. */
# define ERTS_ERROR_CHECKER_PRINTF_XML VALGRIND_PRINTF_XML
# endif
#endif
- Uint buf_size = 8*1024; /* Try with 8KB first */
+ ErlDrvSizeT buf_size = 8*1024; /* Try with 8KB first */
char *buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
- int r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
- if (r < 0) {
+ ErlDrvSizeT r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
+ if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
erts_free(ERTS_ALC_T_TMP, (void *) buf);
if (erts_iolist_size(*tp, &buf_size)) {
goto badarg;
}
buf_size++;
buf = erts_alloc(ERTS_ALC_T_TMP, buf_size);
- r = io_list_to_buf(*tp, (char*) buf, buf_size - 1);
+ r = erts_iolist_to_buf(*tp, (char*) buf, buf_size - 1);
ASSERT(r == buf_size - 1);
}
buf[buf_size - 1 - r] = '\0';
@@ -2157,9 +2157,13 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
BIF_RET(res);
} else if (BIF_ARG_1 == am_process_count) {
- BIF_RET(make_small(erts_process_count()));
+ BIF_RET(make_small(erts_ptab_count(&erts_proc)));
} else if (BIF_ARG_1 == am_process_limit) {
- BIF_RET(make_small(erts_max_processes));
+ BIF_RET(make_small(erts_ptab_max(&erts_proc)));
+ } else if (BIF_ARG_1 == am_port_count) {
+ BIF_RET(make_small(erts_ptab_count(&erts_port)));
+ } else if (BIF_ARG_1 == am_port_limit) {
+ BIF_RET(make_small(erts_ptab_max(&erts_port)));
} else if (BIF_ARG_1 == am_info
|| BIF_ARG_1 == am_procs
|| BIF_ARG_1 == am_loaded
@@ -2532,6 +2536,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
BIF_RET(res);
+ } else if (ERTS_IS_ATOM_STR("port_parallelism", BIF_ARG_1)) {
+ res = erts_port_parallelism ? am_true : am_false;
+ BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("c_compiler_used", BIF_ARG_1)) {
Eterm *hp = NULL;
Uint sz = 0;
@@ -2699,66 +2706,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE
-port_info_1(BIF_ALIST_1)
-{
- Process* p = BIF_P;
- Eterm pid = BIF_ARG_1;
- static Eterm keys[] = {
- am_name,
- am_links,
- am_id,
- am_connected,
- am_input,
- am_output,
- am_os_pid
- };
- Eterm items[ASIZE(keys)];
- Eterm result = NIL;
- Eterm reg_name;
- Eterm* hp;
- Uint need;
- int i;
-
- /*
- * Collect all information about the port.
- */
-
- for (i = 0; i < ASIZE(keys); i++) {
- Eterm item;
-
- item = port_info(p, pid, keys[i]);
- if (is_non_value(item)) {
- return THE_NON_VALUE;
- }
- if (item == am_undefined) {
- return am_undefined;
- }
- items[i] = item;
- }
- reg_name = port_info(p, pid, am_registered_name);
-
- /*
- * Build the resulting list.
- */
-
- need = 2*ASIZE(keys);
- if (is_tuple(reg_name)) {
- need += 2;
- }
- hp = HAlloc(p, need);
- for (i = ASIZE(keys) - 1; i >= 0; i--) {
- result = CONS(hp, items[i], result);
- hp += 2;
- }
- if (is_tuple(reg_name)) {
- result = CONS(hp, reg_name, result);
- }
-
- return result;
-}
-
-
/**********************************************************************/
/* Return information on ports */
/* Info:
@@ -2771,38 +2718,20 @@ port_info_1(BIF_ALIST_1)
** os_pid The child's process ID
*/
-BIF_RETTYPE port_info_2(BIF_ALIST_2)
+Eterm
+erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, Eterm item)
{
- return port_info(BIF_P, BIF_ARG_1, BIF_ARG_2);
-}
+ Eterm res = THE_NON_VALUE;
-static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
-{
- BIF_RETTYPE ret;
- Port *prt;
- Eterm res;
- Eterm* hp;
- int count;
-
- if (is_internal_port(portid))
- prt = erts_id2port(portid, p, ERTS_PROC_LOCK_MAIN);
- else if (is_atom(portid))
- erts_whereis_name(p, ERTS_PROC_LOCK_MAIN,
- portid, NULL, 0, 0, &prt);
- else if (is_external_port(portid)
- && external_port_dist_entry(portid) == erts_this_dist_entry)
- BIF_RET(am_undefined);
- else {
- BIF_ERROR(p, BADARG);
- }
-
- if (!prt) {
- BIF_RET(am_undefined);
- }
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if (item == am_id) {
- hp = HAlloc(p, 3);
- res = make_small(internal_port_number(portid));
+ if (hpp)
+ res = make_small(internal_port_index(prt->common.id));
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_links) {
MonitorInfoCollection mic;
@@ -2811,17 +2740,26 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
INIT_MONITOR_INFOS(mic);
- erts_doforall_links(prt->nlinks, &collect_one_link, &mic);
+ erts_doforall_links(ERTS_P_LINKS(prt), &collect_one_link, &mic);
- hp = HAlloc(p, 3 + mic.sz);
- res = NIL;
- for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
- res = CONS(hp, item, res);
- hp += 2;
+ if (szp)
+ *szp += mic.sz;
+
+ if (hpp) {
+ res = NIL;
+ for (i = 0; i < mic.mi_i; i++) {
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ res = CONS(*hpp, item, res);
+ *hpp += 2;
+ }
}
+
DESTROY_MONITOR_INFOS(mic);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_monitors) {
MonitorInfoCollection mic;
@@ -2830,79 +2768,96 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
INIT_MONITOR_INFOS(mic);
- erts_doforall_monitors(prt->monitors, &collect_one_origin_monitor, &mic);
+ erts_doforall_monitors(ERTS_P_MONITORS(prt), &collect_one_origin_monitor, &mic);
- hp = HAlloc(p, 3 + mic.sz);
- res = NIL;
- for (i = 0; i < mic.mi_i; i++) {
- Eterm t;
- item = STORE_NC(&hp, &MSO(p), mic.mi[i].entity);
- t = TUPLE2(hp, am_process, item);
- hp += 3;
- res = CONS(hp, t, res);
- hp += 2;
+ if (szp)
+ *szp += mic.sz;
+
+ if (hpp) {
+ res = NIL;
+ for (i = 0; i < mic.mi_i; i++) {
+ Eterm t;
+ item = STORE_NC(hpp, ohp, mic.mi[i].entity);
+ t = TUPLE2(*hpp, am_process, item);
+ *hpp += 3;
+ res = CONS(*hpp, t, res);
+ *hpp += 2;
+ }
}
+
DESTROY_MONITOR_INFOS(mic);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_name) {
- count = sys_strlen(prt->name);
+ int count = sys_strlen(prt->name);
+
+ if (hpp)
+ res = buf_to_intlist(hpp, prt->name, count, NIL);
- hp = HAlloc(p, 3 + 2*count);
- res = buf_to_intlist(&hp, prt->name, count, NIL);
+ if (szp) {
+ *szp += 2*count;
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_connected) {
- hp = HAlloc(p, 3);
- res = prt->connected; /* internal pid */
+ if (hpp)
+ res = ERTS_PORT_GET_CONNECTED(prt); /* internal pid */
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_input) {
- Uint hsz = 3;
- Uint n = prt->bytes_in;
- (void) erts_bld_uint(NULL, &hsz, n);
- hp = HAlloc(p, hsz);
- res = erts_bld_uint(&hp, NULL, n);
+ res = erts_bld_uint(hpp, szp, prt->bytes_in);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_output) {
- Uint hsz = 3;
- Uint n = prt->bytes_out;
- (void) erts_bld_uint(NULL, &hsz, n);
- hp = HAlloc(p, hsz);
- res = erts_bld_uint(&hp, NULL, n);
+ res = erts_bld_uint(hpp, szp, prt->bytes_out);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_os_pid) {
- if (prt->os_pid >= 0) {
- Uint hsz = 3;
- UWord n = prt->os_pid;
- (void) erts_bld_uword(NULL, &hsz, n);
- hp = HAlloc(p, hsz);
- res = erts_bld_uword(&hp, NULL, n);
- } else {
- hp = HAlloc(p, 3);
- res = am_undefined;
- }
+ res = (prt->os_pid < 0
+ ? am_undefined
+ : erts_bld_uword(hpp, szp, (UWord) prt->os_pid));
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_registered_name) {
- RegProc *reg;
- reg = prt->reg;
- if (reg == NULL) {
- ERTS_BIF_PREP_RET(ret, NIL);
- goto done;
- } else {
- hp = HAlloc(p, 3);
+ RegProc *reg = prt->common.u.alive.reg;
+ if (reg) {
res = reg->name;
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
+ }
+ else {
+ if (szp)
+ return am_undefined;
+ return NIL;
}
}
else if (item == am_memory) {
/* All memory consumed in bytes (the Port struct should not be
included though).
*/
- Uint hsz = 3;
Uint size = 0;
ErlHeapFragment* bp;
- hp = HAlloc(p, 3);
-
- erts_doforall_links(prt->nlinks, &one_link_size, &size);
+ erts_doforall_links(ERTS_P_LINKS(prt), &one_link_size, &size);
for (bp = prt->bp; bp; bp = bp->next)
size += sizeof(ErlHeapFragment) + (bp->alloc_size - 1)*sizeof(Eterm);
@@ -2916,51 +2871,72 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item)
/* All memory allocated by the driver should be included, but it is
hard to retrieve... */
- (void) erts_bld_uint(NULL, &hsz, size);
- hp = HAlloc(p, hsz);
- res = erts_bld_uint(&hp, NULL, size);
+ res = erts_bld_uint(hpp, szp, size);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (item == am_queue_size) {
Uint ioq_size = erts_port_ioq_size(prt);
- Uint hsz = 3;
- (void) erts_bld_uint(NULL, &hsz, ioq_size);
- hp = HAlloc(p, hsz);
- res = erts_bld_uint(&hp, NULL, ioq_size);
+ res = erts_bld_uint(hpp, szp, ioq_size);
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
}
else if (ERTS_IS_ATOM_STR("locking", item)) {
- hp = HAlloc(p, 3);
+ if (hpp) {
#ifndef ERTS_SMP
- res = am_false;
+ res = am_false;
#else
- if (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
- DECL_AM(port_level);
- ASSERT(prt->drv_ptr->flags
- & ERL_DRV_FLAG_USE_PORT_LOCKING);
- res = AM_port_level;
+ if (erts_atomic32_read_nob(&prt->state)
+ & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
+ DECL_AM(port_level);
+ ASSERT(prt->drv_ptr->flags
+ & ERL_DRV_FLAG_USE_PORT_LOCKING);
+ res = AM_port_level;
+ }
+ else {
+ DECL_AM(driver_level);
+ ASSERT(!(prt->drv_ptr->flags
+ & ERL_DRV_FLAG_USE_PORT_LOCKING));
+ res = AM_driver_level;
+ }
+#endif
}
- else {
- DECL_AM(driver_level);
- ASSERT(!(prt->drv_ptr->flags
- & ERL_DRV_FLAG_USE_PORT_LOCKING));
- res = AM_driver_level;
+ if (szp) {
+ res = am_true;
+ goto done;
}
-#endif
+ }
+ else if (item == am_parallelism) {
+ if (szp) {
+ res = am_true;
+ goto done;
+ }
+ res = ((ERTS_PTS_FLG_PARALLELISM &
+ erts_smp_atomic32_read_nob(&prt->sched.flags))
+ ? am_true
+ : am_false);
}
else {
- ERTS_BIF_PREP_ERROR(ret, p, BADARG);
- goto done;
+ if (szp)
+ return am_false;
+ return THE_NON_VALUE;
}
- ERTS_BIF_PREP_RET(ret, TUPLE2(hp, item, res));
-
- done:
-
- erts_smp_port_unlock(prt);
+done:
+ if (szp)
+ *szp += 3;
+ if (hpp) {
+ res = TUPLE2(*hpp, item, res);
+ *hpp += 3;
+ }
- return ret;
+ return res;
}
-
BIF_RETTYPE
fun_info_2(BIF_ALIST_2)
{
@@ -3092,21 +3068,16 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
if(is_internal_pid(BIF_ARG_1)) {
Process *rp;
- if (BIF_ARG_1 == BIF_P->id)
+ if (BIF_ARG_1 == BIF_P->common.id)
BIF_RET(am_true);
- if(internal_pid_index(BIF_ARG_1) >= erts_max_processes)
- BIF_ERROR(BIF_P, BADARG);
-
- rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_STATUS);
+ rp = erts_proc_lookup(BIF_ARG_1);
if (!rp) {
BIF_RET(am_false);
}
else {
- int have_pending_exit = ERTS_PROC_PENDING_EXIT(rp);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- if (have_pending_exit)
+ if (erts_smp_atomic32_read_acqb(&rp->state)
+ & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING))
ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false);
else
BIF_RET(am_true);
@@ -3317,10 +3288,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
/* Used by node_container_SUITE (emulator) */
Eterm res;
if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
- res = erts_test_next_pid(0, 0);
- else {
- res = erts_test_next_port(0, 0);
- }
+ res = erts_ptab_test_next_id(&erts_proc, 0, 0);
+ else
+ res = erts_ptab_test_next_id(&erts_port, 0, 0);
if (res < 0)
BIF_RET(am_false);
BIF_RET(erts_make_integer(res, BIF_P));
@@ -3356,11 +3326,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
}
else if (ERTS_IS_ATOM_STR("processes", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
- BIF_RET(erts_debug_processes(BIF_P));
+ BIF_RET(erts_debug_ptab_list(BIF_P, &erts_proc));
}
else if (ERTS_IS_ATOM_STR("processes_bif_info", BIF_ARG_1)) {
/* Used by process_SUITE (emulator) */
- BIF_RET(erts_debug_processes_bif_info(BIF_P));
+ BIF_RET(erts_debug_ptab_list_bif_info(BIF_P, &erts_proc));
}
else if (ERTS_IS_ATOM_STR("max_atom_out_cache_index", BIF_ARG_1)) {
/* Used by distribution_SUITE (emulator) */
@@ -3421,17 +3391,20 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
- res = make_link_list(BIF_P, p->nlinks, NIL);
+ res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
BIF_RET(res);
}
else if(is_internal_port(tp[2])) {
Eterm res;
- Port *p = erts_id2port(tp[2], BIF_P, ERTS_PROC_LOCK_MAIN);
+ Port *p = erts_id2port_sflgs(tp[2],
+ BIF_P,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
if(!p)
BIF_RET(am_undefined);
- res = make_link_list(BIF_P, p->nlinks, NIL);
- erts_smp_port_unlock(p);
+ res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL);
+ erts_port_release(p);
BIF_RET(res);
}
else if(is_node_name_atom(tp[2])) {
@@ -3463,7 +3436,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
BIF_RET(am_undefined);
}
- res = make_monitor_list(BIF_P, p->monitors);
+ res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p));
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK);
BIF_RET(res);
} else if(is_node_name_atom(tp[2])) {
@@ -3606,7 +3579,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on);
if (on) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Process %T ", BIF_P->id);
+ erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id);
if (erts_is_alive)
erts_dsprintf(dsbufp, "on node %T ", erts_this_node->sysname);
erts_dsprintf(dsbufp,
@@ -3674,10 +3647,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
Eterm res;
if (ERTS_IS_ATOM_STR("next_pid", BIF_ARG_1))
- res = erts_test_next_pid(1, next);
- else {
- res = erts_test_next_port(1, next);
- }
+ res = erts_ptab_test_next_id(&erts_proc, 1, next);
+ else
+ res = erts_ptab_test_next_id(&erts_port, 1, next);
if (res < 0)
BIF_RET(am_false);
BIF_RET(erts_make_integer(res, BIF_P));
@@ -3710,9 +3682,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
&& is_internal_pid(tp[2])) {
int xres;
ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- Process *rp = erts_pid2proc_opt(BIF_P, ERTS_PROC_LOCK_MAIN,
- tp[2], rp_locks,
- ERTS_P2P_FLG_SMP_INC_REFC);
+ Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
+ tp[2], rp_locks);
if (!rp) {
DECL_AM(dead);
BIF_RET(AM_dead);
@@ -3738,7 +3709,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
#endif
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
if (xres > 1) {
DECL_AM(message);
BIF_RET(AM_message);
@@ -3949,9 +3919,9 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
id = am_atom_put(ltype, strlen(ltype));
} else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
/* use registered names as id's for process locks if available */
- proc = erts_pid2proc_unlocked(lock->id);
- if (proc && proc->reg) {
- id = proc->reg->name;
+ proc = erts_proc_lookup(lock->id);
+ if (proc && proc->common.u.alive.reg) {
+ id = proc->common.u.alive.reg->name;
} else {
/* otherwise use process id */
id = lock->id;
diff --git a/erts/emulator/beam/erl_bif_op.c b/erts/emulator/beam/erl_bif_op.c
index 13f8b1f63c..d4cd9c89b3 100644
--- a/erts/emulator/beam/erl_bif_op.c
+++ b/erts/emulator/beam/erl_bif_op.c
@@ -261,11 +261,6 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2)
if (exp->code[2] == (Uint) arity) {
BIF_RET(am_true);
}
- } else if (is_tuple(arg1)) {
- Eterm* tp = tuple_val(arg1);
- if (tp[0] == make_arityval(2) && is_atom(tp[1]) && is_atom(tp[2])) {
- BIF_RET(am_true);
- }
}
BIF_RET(am_false);
}
diff --git a/erts/emulator/beam/erl_bif_os.c b/erts/emulator/beam/erl_bif_os.c
index 831e05493a..1062d4379b 100644
--- a/erts/emulator/beam/erl_bif_os.c
+++ b/erts/emulator/beam/erl_bif_os.c
@@ -58,7 +58,7 @@ BIF_RETTYPE os_getpid_0(BIF_ALIST_0)
char pid_string[21]; /* enough for a 64 bit number */
int n;
Eterm* hp;
- sys_get_pid(pid_string); /* In sys.c */
+ sys_get_pid(pid_string, sizeof(pid_string)); /* In sys.c */
n = sys_strlen(pid_string);
hp = HAlloc(BIF_P, n*2);
BIF_RET(buf_to_intlist(&hp, pid_string, n, NIL));
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index f9009166c0..81146e38d7 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -42,28 +42,26 @@
#include "erl_bits.h"
#include "dtrace-wrapper.h"
-static int open_port(Process* p, Eterm name, Eterm settings, int *err_nump);
+static Port *open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump);
static byte* convert_environment(Process* p, Eterm env);
static char **convert_args(Eterm);
static void free_args(char **);
char *erts_default_arg0 = "default";
-static BIF_RETTYPE
-port_call(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
-
BIF_RETTYPE open_port_2(BIF_ALIST_2)
{
- int port_num;
- Eterm port_val;
+ Port *port;
+ Eterm port_id;
char *str;
- int err_num;
+ int err_type, err_num;
- if ((port_num = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_num)) < 0) {
- if (port_num == -3) {
+ port = open_port(BIF_P, BIF_ARG_1, BIF_ARG_2, &err_type, &err_num);
+ if (!port) {
+ if (err_type == -3) {
ASSERT(err_num == BADARG || err_num == SYSTEM_LIMIT);
BIF_ERROR(BIF_P, err_num);
- } else if (port_num == -2) {
+ } else if (err_type == -2) {
str = erl_errno_id(err_num);
} else {
str = "einval";
@@ -74,546 +72,408 @@ BIF_RETTYPE open_port_2(BIF_ALIST_2)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
- port_val = erts_port[port_num].id;
- erts_add_link(&(erts_port[port_num].nlinks), LINK_PID, BIF_P->id);
- erts_add_link(&(BIF_P->nlinks), LINK_PID, port_val);
+ port_id = port->common.id;
+ erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id);
+ erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, port_id);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
- erts_port_release(&erts_port[port_num]);
+ erts_port_release(port);
- BIF_RET(port_val);
+ BIF_RET(port_id);
}
-/****************************************************************************
-
- PORT BIFS:
-
- port_command/2 -- replace Port ! {..., {command, Data}}
- port_command(Port, Data) -> true
- when port(Port), io-list(Data)
-
- port_control/3 -- new port_control(Port, Ctl, Data) -> Reply
- port_control(Port, Ctl, Data) -> Reply
- where integer(Ctl), io-list(Data), io-list(Reply)
-
- port_close/1 -- replace Port ! {..., close}
- port_close(Port) -> true
- when port(Port)
-
- port_connect/2 -- replace Port ! {..., {connect, Pid}}
- port_connect(Port, Pid)
- when port(Port), pid(Pid)
-
- ***************************************************************************/
-
-static Port*
-id_or_name2port(Process *c_p, Eterm id)
+static ERTS_INLINE Port *
+lookup_port(Process *c_p, Eterm id_or_name)
{
- Port *port;
- if (is_not_atom(id))
- port = erts_id2port(id, c_p, ERTS_PROC_LOCK_MAIN);
+ /* TODO: Implement nicer lookup in register... */
+ Eterm id;
+ if (is_atom(id_or_name))
+ id = erts_whereis_name_to_id(c_p, id_or_name);
else
- erts_whereis_name(c_p, ERTS_PROC_LOCK_MAIN, id, NULL, 0, 0, &port);
- return port;
+ id = id_or_name;
+ return erts_port_lookup(id, ERTS_PORT_SFLGS_INVALID_LOOKUP);
}
-#define ERTS_PORT_COMMAND_FLAG_FORCE (((Uint32) 1) << 0)
-#define ERTS_PORT_COMMAND_FLAG_NOSUSPEND (((Uint32) 1) << 1)
+/*
+ * erts_internal:port_command/3 is used by the
+ * erlang:port_command/2 and erlang:port_command/3
+ * BIFs.
+ */
-static BIF_RETTYPE
-do_port_command(Process *BIF_P, Eterm arg1, Eterm arg2, Eterm arg3,
- Uint32 flags)
+BIF_RETTYPE erts_internal_port_command_3(BIF_ALIST_3)
{
BIF_RETTYPE res;
- Port *p;
-
- /* Trace sched out before lock check wait */
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_out);
- }
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_inactive);
- }
-
- p = id_or_name2port(BIF_P, arg1);
- if (!p) {
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
+ Port *prt;
+ int flags = 0;
+ Eterm ref;
+
+ if (is_not_nil(BIF_ARG_3)) {
+ Eterm l = BIF_ARG_3;
+ while (is_list(l)) {
+ Eterm* cons = list_val(l);
+ Eterm car = CAR(cons);
+ if (car == am_force)
+ flags |= ERTS_PORT_SIG_FLG_FORCE;
+ else if (car == am_nosuspend)
+ flags |= ERTS_PORT_SIG_FLG_NOSUSPEND;
+ else
+ BIF_RET(am_badarg);
+ l = CDR(cons);
}
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
- }
- BIF_ERROR(BIF_P, BADARG);
+ if (!is_nil(l))
+ BIF_RET(am_badarg);
}
-
- /* Trace port in, id_or_name2port causes wait */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_in, am_command);
- }
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_active);
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ if (flags & ERTS_PORT_SIG_FLG_FORCE) {
+ if (!(prt->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY))
+ BIF_RET(am_notsup);
}
- ERTS_BIF_PREP_RET(res, am_true);
+#ifdef DEBUG
+ ref = NIL;
+#endif
- if ((flags & ERTS_PORT_COMMAND_FLAG_FORCE)
- && !(p->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY)) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_NOTSUP);
- }
- else if (!(flags & ERTS_PORT_COMMAND_FLAG_FORCE)
- && p->status & ERTS_PORT_SFLG_PORT_BUSY) {
- if (flags & ERTS_PORT_COMMAND_FLAG_NOSUSPEND) {
+ switch (erts_port_output(BIF_P, flags, prt, prt->common.id, BIF_ARG_2, &ref)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ ERTS_BIF_PREP_RET(res, am_badarg);
+ break;
+ case ERTS_PORT_OP_BUSY:
+ ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
+ if (flags & ERTS_PORT_SIG_FLG_NOSUSPEND)
ERTS_BIF_PREP_RET(res, am_false);
- }
else {
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, p);
- if (erts_system_monitor_flags.busy_port) {
- monitor_generic(BIF_P, am_busy_port, p->id);
- }
- ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_port_command_3], BIF_P,
- arg1, arg2, arg3);
- }
- } else {
- int wres;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- wres = erts_write_to_port(BIF_P->id, p, arg2);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- if (wres != 0) {
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, prt);
+ ERTS_BIF_PREP_YIELD3(res, bif_export[BIF_erts_internal_port_command_3],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
}
- }
-
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_out, am_command);
- }
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_inactive);
+ break;
+ case ERTS_PORT_OP_BUSY_SCHEDULED:
+ ASSERT(!(flags & ERTS_PORT_SIG_FLG_FORCE));
+ /* Fall through... */
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(ref));
+ ERTS_BIF_PREP_RET(res, ref);
+ break;
+ case ERTS_PORT_OP_DONE:
+ ERTS_BIF_PREP_RET(res, am_true);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_output() result");
+ break;
}
- erts_port_release(p);
- /* Trace sched in after port release */
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
- }
-
if (ERTS_PROC_IS_EXITING(BIF_P)) {
KILL_CATCHES(BIF_P); /* Must exit */
ERTS_BIF_PREP_ERROR(res, BIF_P, EXC_ERROR);
}
- return res;
-}
-BIF_RETTYPE port_command_2(BIF_ALIST_2)
-{
- return do_port_command(BIF_P, BIF_ARG_1, BIF_ARG_2, NIL, 0);
+ return res;
}
-BIF_RETTYPE port_command_3(BIF_ALIST_3)
+BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
{
- Eterm l = BIF_ARG_3;
- Uint32 flags = 0;
- while (is_list(l)) {
- Eterm* cons = list_val(l);
- Eterm car = CAR(cons);
- if (car == am_force) {
- flags |= ERTS_PORT_COMMAND_FLAG_FORCE;
- } else if (car == am_nosuspend) {
- flags |= ERTS_PORT_COMMAND_FLAG_NOSUSPEND;
- } else {
- BIF_ERROR(BIF_P, BADARG);
- }
- l = CDR(cons);
- }
- if(!is_nil(l)) {
- BIF_ERROR(BIF_P, BADARG);
+ Port* prt;
+ Eterm retval;
+ Uint uint_op;
+ unsigned int op;
+ erts_aint32_t state;
+
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ if (!term_to_Uint(BIF_ARG_2, &uint_op))
+ BIF_RET(am_badarg);
+
+ if (uint_op > (Uint) UINT_MAX)
+ BIF_RET(am_badarg);
+
+ op = (unsigned int) uint_op;
+
+ switch (erts_port_call(BIF_P, prt, op, BIF_ARG_3, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_DROPPED:
+ case ERTS_PORT_OP_BADARG:
+ retval = am_badarg;
+ break;
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ break;
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_call() result");
+ retval = am_internal_error;
+ break;
}
- return do_port_command(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, flags);
-}
-BIF_RETTYPE port_call_2(BIF_ALIST_2)
-{
- return port_call(BIF_P,BIF_ARG_1, make_small(0), BIF_ARG_2);
-}
+ state = erts_smp_atomic32_read_acqb(&BIF_P->state);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
+#ifdef ERTS_SMP
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
+#endif
+ ERTS_BIF_EXITED(BIF_P);
+ }
-BIF_RETTYPE port_call_3(BIF_ALIST_3)
-{
- return port_call(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ BIF_RET(retval);
}
-static BIF_RETTYPE
-port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3)
+BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
{
- Uint op;
- Port *p;
- Uint size;
- byte *bytes;
- byte *endp;
- ErlDrvSizeT real_size;
- erts_driver_t *drv;
- byte port_input[256]; /* Default input buffer to encode in */
- byte port_result[256]; /* Buffer for result from port. */
- byte* port_resp; /* Pointer to result buffer. */
- char *prc;
- ErlDrvSSizeT ret;
- Eterm res;
- Sint result_size;
- Eterm *hp;
- Eterm *hp_end;
- unsigned ret_flags = 0U;
- int fpe_was_unmasked;
-
- bytes = &port_input[0];
- port_resp = port_result;
- /* trace of port scheduling with virtual process descheduling
- * lock wait
- */
- if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(c_p, am_out);
- }
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(c_p, am_inactive);
+ Port* prt;
+ Eterm retval;
+ Uint uint_op;
+ unsigned int op;
+ erts_aint32_t state;
+
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ if (!term_to_Uint(BIF_ARG_2, &uint_op))
+ BIF_RET(am_badarg);
+
+ if (uint_op > (Uint) UINT_MAX)
+ BIF_RET(am_badarg);
+
+ op = (unsigned int) uint_op;
+
+ switch (erts_port_control(BIF_P, prt, op, BIF_ARG_3, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ retval = am_badarg;
+ break;
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ break;
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_control() result");
+ retval = am_internal_error;
+ break;
}
- p = id_or_name2port(c_p, arg1);
- if (!p) {
- error:
- if (port_resp != port_result &&
- !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
- driver_free(port_resp);
- }
- if (bytes != &port_input[0])
- erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes);
- /* Need to virtual schedule in the process if there
- * was an error.
- */
- if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(c_p, am_in);
- }
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(c_p, am_active);
- }
-
- if (p)
- erts_port_release(p);
+ state = erts_smp_atomic32_read_acqb(&BIF_P->state);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
-#else
- ERTS_BIF_CHK_EXITED(c_p);
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
#endif
- BIF_ERROR(c_p, BADARG);
- }
-
- if ((drv = p->drv_ptr) == NULL) {
- goto error;
- }
- if (drv->call == NULL) {
- goto error;
- }
- if (!term_to_Uint(arg2, &op)) {
- goto error;
- }
- p->caller = c_p->id;
-
- /* Lock taken, virtual schedule of port */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_in, am_call);
- }
-
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_active);
+ ERTS_BIF_EXITED(BIF_P);
}
- size = erts_encode_ext_size(arg3);
- if (size > sizeof(port_input))
- bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size);
- endp = bytes;
- erts_encode_ext(arg3, &endp);
+ BIF_RET(retval);
+}
- real_size = endp - bytes;
- if (real_size > size) {
- erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
- __FILE__, __LINE__, endp - (bytes + size));
- }
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(driver_call)) {
- DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+/*
+ * erts_internal:port_close/1 is used by the
+ * erlang:port_close/1 BIF.
+ */
+BIF_RETTYPE erts_internal_port_close_1(BIF_ALIST_1)
+{
+ Eterm ref;
+ Port *prt;
- dtrace_pid_str(p->connected, process_str);
- dtrace_port_str(p, port_str);
- DTRACE5(driver_call, process_str, port_str, p->name, op, real_size);
- }
-#endif
- prc = (char *) port_resp;
- fpe_was_unmasked = erts_block_fpe();
- ret = drv->call((ErlDrvData)p->drv_data,
- (unsigned) op,
- (char *) bytes,
- (int) real_size,
- &prc,
- (int) sizeof(port_result),
- &ret_flags);
- erts_unblock_fpe(fpe_was_unmasked);
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_out, am_call);
- }
-
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_inactive);
- }
-
- port_resp = (byte *) prc;
- p->caller = NIL;
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
-#ifdef HARDDEBUG
- {
- ErlDrvSizeT z;
- printf("real_size = %ld,%d, ret = %ld,%d\r\n", (unsigned long) real_size,
- (int) real_size, (unsigned long)ret, (int) ret);
- printf("[");
- for(z = 0; z < real_size; ++z) {
- printf("%d, ",(int) bytes[z]);
- }
- printf("]\r\n");
- printf("[");
- for(z = 0; z < ret; ++z) {
- printf("%d, ",(int) port_resp[z]);
- }
- printf("]\r\n");
- }
-#endif
- if (ret <= 0 || port_resp[0] != VERSION_MAGIC) {
- /* Error or a binary without magic/ with wrong magic */
- goto error;
- }
- result_size = erts_decode_ext_size(port_resp, ret);
- if (result_size < 0) {
- goto error;
- }
- hp = HAlloc(c_p, result_size);
- hp_end = hp + result_size;
- endp = port_resp;
- res = erts_decode_ext(&hp, &MSO(c_p), &endp);
- if (res == THE_NON_VALUE) {
- goto error;
- }
- HRelease(c_p, hp_end, hp);
- if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
- driver_free(port_resp);
- }
- if (bytes != &port_input[0])
- erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes);
- if (p)
- erts_port_release(p);
-#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
-#else
- ERTS_BIF_CHK_EXITED(c_p);
+#ifdef DEBUG
+ ref = NIL;
#endif
- if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(c_p, am_in);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(c_p, am_active);
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+
+ switch (erts_port_exit(BIF_P, 0, prt, prt->common.id, am_normal, &ref)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(ref));
+ BIF_RET(ref);
+ case ERTS_PORT_OP_DONE:
+ BIF_RET(am_true);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_exit() result");
+ BIF_RET(am_internal_error);
}
-
- return res;
}
-
-BIF_RETTYPE port_control_3(BIF_ALIST_3)
-{
- Port* p;
- Uint op;
- Eterm res = THE_NON_VALUE;
-
- /* Virtual schedule out calling process before lock wait */
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_out);
- }
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_inactive);
- }
+/*
+ * erts_internal:port_connect/2 is used by the
+ * erlang:port_connect/2 BIF.
+ */
+BIF_RETTYPE erts_internal_port_connect_2(BIF_ALIST_2)
+{
+ Eterm ref;
+ Port* prt;
- p = id_or_name2port(BIF_P, BIF_ARG_1);
- if (!p) {
- /* Schedule the process before exiting */
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
- }
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
- }
-
- BIF_ERROR(BIF_P, BADARG);
- }
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
- /* Trace the port for scheduling in */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_in, am_control);
- }
-
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_active);
- }
+#ifdef DEBUG
+ ref = NIL;
+#endif
- if (term_to_Uint(BIF_ARG_2, &op))
- res = erts_port_control(BIF_P, p, op, BIF_ARG_3);
-
- /* Trace the port for scheduling out */
- if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports_where(p, am_out, am_control);
+ switch (erts_port_connect(BIF_P, 0, prt, prt->common.id, BIF_ARG_2, &ref)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(ref));
+ BIF_RET(ref);
+ break;
+ case ERTS_PORT_OP_DONE:
+ BIF_RET(am_true);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_connect() result");
+ BIF_RET(am_internal_error);
}
+}
- if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
- profile_runnable_port(p, am_inactive);
- }
+BIF_RETTYPE erts_internal_port_info_1(BIF_ALIST_1)
+{
+ Eterm retval;
+ Port* prt;
- erts_port_release(p);
-#ifdef ERTS_SMP
- ERTS_SMP_BIF_CHK_PENDING_EXIT(BIF_P, ERTS_PROC_LOCK_MAIN);
-#else
- ERTS_BIF_CHK_EXITED(BIF_P);
-#endif
-
- if (IS_TRACED_FL(BIF_P, F_TRACE_SCHED_PROCS)) {
- trace_virtual_sched(BIF_P, am_in);
+ if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) {
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_undefined);
}
-
- if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
- profile_runnable_proc(BIF_P, am_active);
+ else if (is_external_port(BIF_ARG_1)) {
+ if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ BIF_RET(am_undefined);
+ else
+ BIF_RET(am_badarg);
}
-
- if (is_non_value(res)) {
- BIF_ERROR(BIF_P, BADARG);
+ else {
+ BIF_RET(am_badarg);
+ }
+
+ switch (erts_port_info(BIF_P, prt, THE_NON_VALUE, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_undefined);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ BIF_RET(retval);
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ BIF_RET(retval);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_info() result");
+ BIF_RET(am_internal_error);
}
- BIF_RET(res);
}
-BIF_RETTYPE port_close_1(BIF_ALIST_1)
-{
- Port* p;
- erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- p = id_or_name2port(NULL, BIF_ARG_1);
- if (!p) {
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_ERROR(BIF_P, BADARG);
- }
- erts_do_exit_port(p, p->connected, am_normal);
- /* if !ERTS_SMP: since we terminate port with reason normal
- we SHOULD never get an exit signal ourselves
- */
- erts_port_release(p);
- erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(am_true);
-}
-BIF_RETTYPE port_connect_2(BIF_ALIST_2)
+BIF_RETTYPE erts_internal_port_info_2(BIF_ALIST_2)
{
+ Eterm retval;
Port* prt;
- Process* rp;
- Eterm pid = BIF_ARG_2;
- if (is_not_internal_pid(pid)) {
- error:
- BIF_ERROR(BIF_P, BADARG);
+ if (is_internal_port(BIF_ARG_1) || is_atom(BIF_ARG_1)) {
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_undefined);
}
- prt = id_or_name2port(BIF_P, BIF_ARG_1);
- if (!prt) {
- goto error;
+ else if (is_external_port(BIF_ARG_1)) {
+ if (external_port_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
+ BIF_RET(am_undefined);
+ else
+ BIF_RET(am_badarg);
}
-
- rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- erts_smp_port_unlock(prt);
- ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P);
- goto error;
- }
-
- erts_add_link(&(rp->nlinks), LINK_PID, prt->id);
- erts_add_link(&(prt->nlinks), LINK_PID, pid);
-
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-
- prt->connected = pid; /* internal pid */
- erts_smp_port_unlock(prt);
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(port_connect)) {
- DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
-
- dtrace_pid_str(prt->connected, process_str);
- erts_snprintf(port_str, sizeof(port_str), "%T", prt->id);
- dtrace_proc_str(rp, newprocess_str);
- DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
+ else {
+ BIF_RET(am_badarg);
+ }
+
+ switch (erts_port_info(BIF_P, prt, BIF_ARG_2, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_undefined);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ BIF_RET(retval);
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ BIF_RET(retval);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_info() result");
+ BIF_RET(am_internal_error);
}
-#endif
- BIF_RET(am_true);
}
-BIF_RETTYPE port_set_data_2(BIF_ALIST_2)
+
+BIF_RETTYPE erts_internal_port_set_data_2(BIF_ALIST_2)
{
+ Eterm ref;
Port* prt;
- Eterm portid = BIF_ARG_1;
- Eterm data = BIF_ARG_2;
- prt = id_or_name2port(BIF_P, portid);
- if (!prt) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (prt->bp != NULL) {
- free_message_buffer(prt->bp);
- prt->bp = NULL;
- }
- if (IS_CONST(data)) {
- prt->data = data;
- } else {
- Uint size;
- ErlHeapFragment* bp;
- Eterm* hp;
-
- size = size_object(data);
- prt->bp = bp = new_message_buffer(size);
- hp = bp->mem;
- prt->data = copy_struct(data, size, &hp, &bp->off_heap);
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ switch (erts_port_set_data(BIF_P, prt, BIF_ARG_2, &ref)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(ref));
+ BIF_RET(ref);
+ case ERTS_PORT_OP_DONE:
+ BIF_RET(am_true);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_set_data() result");
+ BIF_RET(am_internal_error);
}
- erts_smp_port_unlock(prt);
- BIF_RET(am_true);
}
-BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
+BIF_RETTYPE erts_internal_port_get_data_1(BIF_ALIST_1)
{
- BIF_RETTYPE res;
+ Eterm retval;
Port* prt;
- Eterm portid = BIF_ARG_1;
- prt = id_or_name2port(BIF_P, portid);
- if (!prt) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (prt->bp == NULL) { /* MUST be CONST! */
- res = prt->data;
- } else {
- Eterm* hp = HAlloc(BIF_P, prt->bp->used_size);
- res = copy_struct(prt->data, prt->bp->used_size, &hp, &MSO(BIF_P));
+ prt = lookup_port(BIF_P, BIF_ARG_1);
+ if (!prt)
+ BIF_RET(am_badarg);
+
+ switch (erts_port_get_data(BIF_P, prt, &retval)) {
+ case ERTS_PORT_OP_CALLER_EXIT:
+ case ERTS_PORT_OP_BADARG:
+ case ERTS_PORT_OP_DROPPED:
+ BIF_RET(am_badarg);
+ case ERTS_PORT_OP_SCHEDULED:
+ ASSERT(is_internal_ref(retval));
+ BIF_RET(retval);
+ case ERTS_PORT_OP_DONE:
+ ASSERT(is_not_internal_ref(retval));
+ BIF_RET(retval);
+ default:
+ ERTS_INTERNAL_ERROR("Unexpected erts_port_get_data() result");
+ BIF_RET(am_internal_error);
}
- erts_smp_port_unlock(prt);
- BIF_RET(res);
}
/*
@@ -625,11 +485,10 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1)
* either BADARG or SYSTEM_LIMIT).
*/
-static int
-open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
+static Port *
+open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump)
{
-#define OPEN_PORT_ERROR(VAL) do { port_num = (VAL); goto do_return; } while (0)
- int i, port_num;
+ int i;
Eterm option;
Uint arity;
Eterm* tp;
@@ -637,11 +496,11 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
erts_driver_t* driver;
char* name_buf = NULL;
SysDriverOpts opts;
- int binary_io;
- int soft_eof;
Sint linebuf;
Eterm edir = NIL;
byte dir[MAXPATHLEN];
+ erts_aint32_t sflgs = 0;
+ Port *port;
/* These are the defaults */
opts.packet_bytes = 0;
@@ -655,8 +514,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
opts.overlapped_io = 0;
opts.spawn_type = ERTS_SPAWN_ANY;
opts.argv = NULL;
- binary_io = 0;
- soft_eof = 0;
+ opts.parallelism = erts_port_parallelism;
linebuf = 0;
*err_nump = 0;
@@ -734,6 +592,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
}
} else if (option == am_cd) {
edir = *tp;
+ } else if (option == am_parallelism) {
+ if (*tp == am_true)
+ opts.parallelism = 1;
+ else if (*tp == am_false)
+ opts.parallelism = 0;
+ else
+ goto badarg;
} else {
goto badarg;
}
@@ -748,13 +613,13 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
} else if (*nargs == am_nouse_stdio) {
opts.use_stdio = 0;
} else if (*nargs == am_binary) {
- binary_io = 1;
+ sflgs |= ERTS_PORT_SFLG_BINARY_IO;
} else if (*nargs == am_in) {
opts.read_write |= DO_READ;
} else if (*nargs == am_out) {
opts.read_write |= DO_WRITE;
} else if (*nargs == am_eof) {
- soft_eof = 1;
+ sflgs |= ERTS_PORT_SFLG_SOFT_EOF;
} else if (*nargs == am_hide) {
opts.hide_window = 1;
} else if (*nargs == am_exit_status) {
@@ -902,9 +767,9 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
heap[2] = make_small(0);
heap[3] = NIL;
iolist = make_list(heap);
- r = io_list_to_buf(iolist, (char*) dir, MAXPATHLEN);
+ r = erts_iolist_to_buf(iolist, (char*) dir, MAXPATHLEN);
UnUseTmpHeap(4,p);
- if (r < 0) {
+ if (ERTS_IOLIST_TO_BUF_FAILED(r)) {
goto badarg;
}
opts.wd = (char *) dir;
@@ -926,44 +791,40 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- port_num = erts_open_driver(driver, p->id, name_buf, &opts, err_nump);
+ port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump);
#ifdef USE_VM_PROBES
- if (port_num >= 0 && DTRACE_ENABLED(port_open)) {
+ if (port && DTRACE_ENABLED(port_open)) {
DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
dtrace_proc_str(p, process_str);
- erts_snprintf(port_str, sizeof(port_str), "%T", erts_port[port_num].id);
+ erts_snprintf(port_str, sizeof(port_str), "%T", port->common.id);
DTRACE3(port_open, process_str, name_buf, port_str);
}
#endif
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- if (port_num < 0) {
- DEBUGF(("open_driver returned %d(%d)\n", port_num, *err_nump));
+ if (!port) {
+ DEBUGF(("open_driver returned (%d:%d)\n",
+ err_typep ? *err_typep : 4711,
+ err_nump ? *err_nump : 4711));
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
trace_virtual_sched(p, am_in);
}
- OPEN_PORT_ERROR(port_num);
+ goto do_return;
}
if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) {
trace_virtual_sched(p, am_in);
}
- if (binary_io) {
- erts_port_status_bor_set(&erts_port[port_num],
- ERTS_PORT_SFLG_BINARY_IO);
- }
- if (soft_eof) {
- erts_port_status_bor_set(&erts_port[port_num],
- ERTS_PORT_SFLG_SOFT_EOF);
- }
- if (linebuf && erts_port[port_num].linebuf == NULL){
- erts_port[port_num].linebuf = allocate_linebuf(linebuf);
- erts_port_status_bor_set(&erts_port[port_num],
- ERTS_PORT_SFLG_LINEBUF_IO);
+ if (linebuf && port->linebuf == NULL){
+ port->linebuf = allocate_linebuf(linebuf);
+ sflgs |= ERTS_PORT_SFLG_LINEBUF_IO;
}
+
+ if (sflgs)
+ erts_atomic32_read_bor_relb(&port->state, sflgs);
do_return:
if (name_buf)
@@ -974,13 +835,15 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_nump)
if (opts.wd && opts.wd != ((char *)dir)) {
erts_free(ERTS_ALC_T_TMP, (void *) opts.wd);
}
- return port_num;
+ return port;
badarg:
- *err_nump = BADARG;
- OPEN_PORT_ERROR(-3);
+ if (err_typep)
+ *err_typep = -3;
+ if (err_nump)
+ *err_nump = BADARG;
+ port = NULL;
goto do_return;
-#undef OPEN_PORT_ERROR
}
/* Arguments can be given i unicode and as raw binaries, convert filename is used to convert */
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index 6b843d2e08..88f980d19f 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -71,14 +71,9 @@ void erts_init_bif_re(void)
erts_pcre_stack_free = &erts_erts_pcre_stack_free;
default_table = NULL; /* ISO8859-1 default, forced into pcre */
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
-
- sys_memset((void *) &re_exec_trap_export, 0, sizeof(Export));
- re_exec_trap_export.address = &re_exec_trap_export.code[3];
- re_exec_trap_export.code[0] = am_erlang;
- re_exec_trap_export.code[1] = am_re_run_trap;
- re_exec_trap_export.code[2] = 3;
- re_exec_trap_export.code[3] = (BeamInstr) em_apply_bif;
- re_exec_trap_export.code[4] = (BeamInstr) &re_exec_trap;
+
+ erts_init_trap_export(&re_exec_trap_export, am_erlang, am_re_run_trap, 3,
+ &re_exec_trap);
grun_trap_exportp = erts_export_put(am_re,am_grun,3);
urun_trap_exportp = erts_export_put(am_re,am_urun,3);
@@ -418,7 +413,7 @@ build_compile_result(Process *p, Eterm error_tag, pcre *result, int errcode, con
static BIF_RETTYPE
re_compile(Process* p, Eterm arg1, Eterm arg2)
{
- Uint slen;
+ ErlDrvSizeT slen;
char *expr;
pcre *result;
int errcode = 0;
@@ -449,7 +444,7 @@ re_compile(Process* p, Eterm arg1, Eterm arg2)
BIF_ERROR(p,BADARG);
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (io_list_to_buf(arg1, expr, slen) != 0) {
+ if (erts_iolist_to_buf(arg1, expr, slen) != 0) {
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
BIF_ERROR(p,BADARG);
}
@@ -802,7 +797,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
memcpy(tmpb,ap->name,ap->len);
tmpb[ap->len] = '\0';
} else {
- Uint slen;
+ ErlDrvSizeT slen;
if (erts_iolist_size(val, &slen)) {
goto error;
}
@@ -814,7 +809,7 @@ build_capture(Eterm capture_spec[CAPSPEC_SIZE], const pcre *code)
(tmpbsiz = slen + 1));
}
}
- if (io_list_to_buf(val, tmpb, slen) != 0) {
+ if (erts_iolist_to_buf(val, tmpb, slen) != 0) {
goto error;
}
tmpb[slen] = '\0';
@@ -858,7 +853,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
const pcre *code_tmp;
RestartContext restart;
byte *temp_alloc = NULL;
- Uint slength;
+ ErlDrvSizeT slength;
int startoffset = 0;
int options = 0, comp_options = 0;
int ovsize;
@@ -882,7 +877,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
if (is_not_tuple(arg2) || (arityval(*tuple_val(arg2)) != 4)) {
if (is_binary(arg2) || is_list(arg2) || is_nil(arg2)) {
/* Compile from textual RE */
- Uint slen;
+ ErlDrvSizeT slen;
char *expr;
pcre *result;
int errcode = 0;
@@ -901,7 +896,7 @@ re_run(Process *p, Eterm arg1, Eterm arg2, Eterm arg3)
}
expr = erts_alloc(ERTS_ALC_T_RE_TMP_BUF, slen + 1);
- if (io_list_to_buf(arg2, expr, slen) != 0) {
+ if (erts_iolist_to_buf(arg2, expr, slen) != 0) {
erts_free(ERTS_ALC_T_RE_TMP_BUF, expr);
BIF_ERROR(p,BADARG);
}
@@ -1044,7 +1039,7 @@ handle_iolist:
}
restart.subject = erts_alloc(ERTS_ALC_T_RE_SUBJECT, slength);
- if (io_list_to_buf(arg1, restart.subject, slength) != 0) {
+ if (erts_iolist_to_buf(arg1, restart.subject, slength) != 0) {
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.ovector);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.code);
erts_free(ERTS_ALC_T_RE_SUBJECT, restart.subject);
diff --git a/erts/emulator/beam/erl_bif_timer.c b/erts/emulator/beam/erl_bif_timer.c
index d806be0704..d67695e533 100644
--- a/erts/emulator/beam/erl_bif_timer.c
+++ b/erts/emulator/beam/erl_bif_timer.c
@@ -265,10 +265,10 @@ link_proc(Process *p, ErtsBifTimer* btm)
{
btm->receiver.proc.ess = p;
btm->receiver.proc.prev = NULL;
- btm->receiver.proc.next = p->bif_timers;
- if (p->bif_timers)
- p->bif_timers->receiver.proc.prev = btm;
- p->bif_timers = btm;
+ btm->receiver.proc.next = p->u.bif_timers;
+ if (p->u.bif_timers)
+ p->u.bif_timers->receiver.proc.prev = btm;
+ p->u.bif_timers = btm;
}
static ERTS_INLINE void
@@ -277,7 +277,7 @@ unlink_proc(ErtsBifTimer* btm)
if (btm->receiver.proc.prev)
btm->receiver.proc.prev->receiver.proc.next = btm->receiver.proc.next;
else
- btm->receiver.proc.ess->bif_timers = btm->receiver.proc.next;
+ btm->receiver.proc.ess->u.bif_timers = btm->receiver.proc.next;
if (btm->receiver.proc.next)
btm->receiver.proc.next->receiver.proc.prev = btm->receiver.proc.prev;
}
@@ -324,10 +324,9 @@ bif_timer_timeout(ErtsBifTimer* btm)
ASSERT(!erts_get_current_process());
if (btm->flags & BTM_FLG_BYNAME)
- rp = erts_whereis_process(NULL,0,btm->receiver.name,0,ERTS_P2P_FLG_SMP_INC_REFC);
+ rp = erts_whereis_process(NULL, 0, btm->receiver.name, 0, 0);
else {
rp = btm->receiver.proc.ess;
- erts_smp_proc_inc_refc(rp);
unlink_proc(btm);
}
@@ -379,7 +378,6 @@ bif_timer_timeout(ErtsBifTimer* btm)
#endif
);
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
}
}
@@ -615,7 +613,7 @@ erts_print_bif_timer_info(int to, void *to_arg)
for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
Eterm receiver = (btm->flags & BTM_FLG_BYNAME
? btm->receiver.name
- : btm->receiver.proc.ess->id);
+ : btm->receiver.proc.ess->common.id);
erts_print(to, to_arg, "=timer:%T\n", receiver);
erts_print(to, to_arg, "Message: %T\n", btm->message);
erts_print(to, to_arg, "Time left: %u ms\n",
@@ -639,7 +637,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
erts_smp_proc_lock(p, plocks);
}
- btm = p->bif_timers;
+ btm = p->u.bif_timers;
while (btm) {
ErtsBifTimer *tmp_btm;
ASSERT(!(btm->flags & BTM_FLG_CANCELED));
@@ -649,7 +647,7 @@ erts_cancel_bif_timers(Process *p, ErtsProcLocks plocks)
erts_cancel_timer(&tmp_btm->tm);
}
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
erts_smp_btm_rwunlock();
}
@@ -698,7 +696,7 @@ erts_bif_timer_foreach(void (*func)(Eterm, Eterm, ErlHeapFragment *, void *),
for (btm = bif_timer_tab[i]; btm; btm = btm->tab.next) {
(*func)((btm->flags & BTM_FLG_BYNAME
? btm->receiver.name
- : btm->receiver.proc.ess->id),
+ : btm->receiver.proc.ess->common.id),
btm->message,
btm->bp,
arg);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 80f774523c..99a4394666 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -42,14 +42,33 @@
#define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1)
const struct trace_pattern_flags erts_trace_pattern_flags_off = {0, 0, 0, 0, 0};
+
+/*
+ * The following variables are protected by code write permission.
+ */
static int erts_default_trace_pattern_is_on;
static Binary *erts_default_match_spec;
static Binary *erts_default_meta_match_spec;
static struct trace_pattern_flags erts_default_trace_pattern_flags;
static Eterm erts_default_meta_tracer_pid;
+static struct { /* Protected by code write permission */
+ int current;
+ int install;
+ int local;
+ BpFunctions f; /* Local functions */
+ BpFunctions e; /* Export entries */
+#ifdef ERTS_SMP
+ Process* stager;
+ ErtsThrPrgrLaterOp lop;
+#endif
+} finish_bp;
+
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
+#ifdef ERTS_SMP
+static void smp_bp_finisher(void* arg);
+#endif
static BIF_RETTYPE
system_monitor(Process *p, Eterm monitor_pid, Eterm list);
@@ -60,12 +79,11 @@ static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
-static int setup_func_trace(Export* ep, void* match_prog);
-static int reset_func_trace(Export* ep);
-static void reset_bif_trace(int bif_index);
-static void setup_bif_trace(int bif_index);
-static void set_trace_bif(int bif_index, void* match_prog);
-static void clear_trace_bif(int bif_index);
+static void reset_bif_trace(void);
+static void setup_bif_trace(void);
+static void install_exp_breakpoints(BpFunctions* f);
+static void uninstall_exp_breakpoints(BpFunctions* f);
+static void clean_export_entries(BpFunctions* f);
void
erts_bif_trace_init(void)
@@ -98,7 +116,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
{
DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
int i;
- int matches = 0;
+ int matches = -1;
int specified = 0;
enum erts_break_op on;
Binary* match_prog_set;
@@ -106,10 +124,12 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
struct trace_pattern_flags flags = erts_trace_pattern_flags_off;
int is_global;
Process *meta_tracer_proc = p;
- Eterm meta_tracer_pid = p->id;
+ Eterm meta_tracer_pid = p->common.id;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- erts_smp_thr_progress_block();
+ if (!erts_try_seize_code_write_permission(p)) {
+ ERTS_BIF_YIELD3(bif_export[BIF_trace_pattern_3], p, MFA, Pattern, flaglist);
+ }
+ finish_bp.current = -1;
UseTmpHeap(3,p);
/*
@@ -151,14 +171,12 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
} else if (is_internal_port(meta_tracer_pid)) {
Port *meta_tracer_port;
- meta_tracer_proc = NULL;
- if (internal_port_index(meta_tracer_pid) >= erts_max_ports)
- goto error;
- meta_tracer_port =
- &erts_port[internal_port_index(meta_tracer_pid)];
- if (INVALID_TRACER_PORT(meta_tracer_port, meta_tracer_pid)) {
+ meta_tracer_proc = NULL;
+ meta_tracer_port = (erts_port_lookup(
+ meta_tracer_pid,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP));
+ if (!meta_tracer_port)
goto error;
- }
} else {
goto error;
}
@@ -234,14 +252,13 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
MatchSetRef(erts_default_meta_match_spec);
erts_default_meta_tracer_pid = meta_tracer_pid;
if (meta_tracer_proc) {
- meta_tracer_proc->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
}
} else if (! flags.breakpoint) {
MatchSetUnref(erts_default_meta_match_spec);
erts_default_meta_match_spec = NULL;
erts_default_meta_tracer_pid = NIL;
}
- MatchSetUnref(match_prog_set);
if (erts_default_trace_pattern_flags.breakpoint &&
flags.breakpoint) {
/* Breakpoint trace -> breakpoint trace */
@@ -297,8 +314,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
erts_default_trace_pattern_is_on = !!flags.breakpoint;
}
}
-
- goto done;
+ matches = 0;
} else if (is_tuple(MFA)) {
Eterm *tp = tuple_val(MFA);
if (tp[0] != make_arityval(3)) {
@@ -322,36 +338,64 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
if (is_small(mfa[2])) {
mfa[2] = signed_val(mfa[2]);
}
- } else {
- goto error;
- }
- if (meta_tracer_proc) {
- meta_tracer_proc->trace_flags |= F_TRACER;
- }
+ if (meta_tracer_proc) {
+ ERTS_TRACE_FLAGS(meta_tracer_proc) |= F_TRACER;
+ }
+ matches = erts_set_trace_pattern(p, mfa, specified,
+ match_prog_set, match_prog_set,
+ on, flags, meta_tracer_pid, 0);
+ }
- matches = erts_set_trace_pattern(mfa, specified,
- match_prog_set, match_prog_set,
- on, flags, meta_tracer_pid);
+ error:
MatchSetUnref(match_prog_set);
-
- done:
UnUseTmpHeap(3,p);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- return make_small(matches);
+#ifdef ERTS_SMP
+ if (finish_bp.current >= 0) {
+ ASSERT(matches >= 0);
+ ASSERT(finish_bp.stager == NULL);
+ finish_bp.stager = p;
+ erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop);
+ erts_smp_proc_inc_refc(p);
+ erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
+ ERTS_BIF_YIELD_RETURN(p, make_small(matches));
+ }
+#endif
- error:
+ erts_release_code_write_permission();
- MatchSetUnref(match_prog_set);
+ if (matches >= 0) {
+ return make_small(matches);
+ }
+ else {
+ BIF_ERROR(p, BADARG);
+ }
+}
- UnUseTmpHeap(3,p);
- erts_smp_thr_progress_unblock();
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- BIF_ERROR(p, BADARG);
+#ifdef ERTS_SMP
+static void smp_bp_finisher(void* null)
+{
+ if (erts_finish_breakpointing()) { /* Not done */
+ /* Arrange for being called again */
+ erts_schedule_thr_prgr_later_op(smp_bp_finisher, NULL, &finish_bp.lop);
+ }
+ else { /* Done */
+ Process* p = finish_bp.stager;
+#ifdef DEBUG
+ finish_bp.stager = NULL;
+#endif
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (!ERTS_PROC_IS_EXITING(p)) {
+ erts_resume(p, ERTS_PROC_LOCK_STATUS);
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_dec_refc(p);
+ erts_release_code_write_permission();
+ }
}
+#endif /* ERTS_SMP */
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
@@ -360,6 +404,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
struct trace_pattern_flags *trace_pattern_flags,
Eterm *meta_tracer_pid)
{
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
+ erts_smp_thr_progress_is_blocking());
if (trace_pattern_is_on)
*trace_pattern_is_on = erts_default_trace_pattern_is_on;
if (match_spec)
@@ -372,7 +418,12 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on,
*meta_tracer_pid = erts_default_meta_tracer_pid;
}
-
+int erts_is_default_trace_enabled(void)
+{
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() ||
+ erts_smp_thr_progress_is_blocking());
+ return erts_default_trace_pattern_is_on;
+}
Uint
erts_trace_flag2bit(Eterm flag)
@@ -466,27 +517,31 @@ Eterm trace_3(BIF_ALIST_3)
BIF_ERROR(p, BADARG);
}
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ ERTS_BIF_YIELD3(bif_export[BIF_trace_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ }
+
if (is_nil(tracer) || is_internal_pid(tracer)) {
Process *tracer_proc = erts_pid2proc(p,
ERTS_PROC_LOCK_MAIN,
- is_nil(tracer) ? p->id : tracer,
+ is_nil(tracer) ? p->common.id : tracer,
ERTS_PROC_LOCKS_ALL);
if (!tracer_proc)
goto error;
- tracer_proc->trace_flags |= F_TRACER;
+ ERTS_TRACE_FLAGS(tracer_proc) |= F_TRACER;
erts_smp_proc_unlock(tracer_proc,
(tracer_proc == p
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
} else if (is_internal_port(tracer)) {
- Port *tracer_port = erts_id2port(tracer, p, ERTS_PROC_LOCK_MAIN);
- if (!erts_is_valid_tracer_port(tracer)) {
- if (tracer_port)
- erts_smp_port_unlock(tracer_port);
+ Port *tracer_port = erts_id2port_sflgs(tracer,
+ p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+ if (!tracer_port)
goto error;
- }
- tracer_port->trace_flags |= F_TRACER;
- erts_smp_port_unlock(tracer_port);
+ ERTS_TRACE_FLAGS(tracer_port) |= F_TRACER;
+ erts_port_release(tracer_port);
} else
goto error;
@@ -497,7 +552,7 @@ Eterm trace_3(BIF_ALIST_3)
case am_true:
on = 1;
if (is_nil(tracer))
- tracer = p->id;
+ tracer = p->common.id;
break;
default:
goto error;
@@ -519,26 +574,29 @@ Eterm trace_3(BIF_ALIST_3)
if (pid_spec == tracer)
goto error;
- tracee_port = erts_id2port(pid_spec, p, ERTS_PROC_LOCK_MAIN);
+ tracee_port = erts_id2port_sflgs(pid_spec,
+ p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
if (!tracee_port)
goto error;
if (tracer != NIL && port_already_traced(p, tracee_port, tracer)) {
- erts_smp_port_unlock(tracee_port);
+ erts_port_release(tracee_port);
goto already_traced;
}
if (on)
- tracee_port->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_port) |= mask;
else
- tracee_port->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
- if (!tracee_port->trace_flags)
- tracee_port->tracer_proc = NIL;
+ if (!ERTS_TRACE_FLAGS(tracee_port))
+ ERTS_TRACER_PROC(tracee_port) = NIL;
else if (tracer != NIL)
- tracee_port->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_port) = tracer;
- erts_smp_port_unlock(tracee_port);
+ erts_port_release(tracee_port);
matches = 1;
} else if (is_pid(pid_spec)) {
@@ -570,14 +628,14 @@ Eterm trace_3(BIF_ALIST_3)
}
if (on)
- tracee_p->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_p) |= mask;
else
- tracee_p->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
- if ((tracee_p->trace_flags & TRACEE_FLAGS) == 0)
- tracee_p->tracer_proc = NIL;
+ if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS) == 0)
+ ERTS_TRACER_PROC(tracee_p) = NIL;
else if (tracer != NIL)
- tracee_p->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_p) = tracer;
erts_smp_proc_unlock(tracee_p,
(tracee_p == p
@@ -651,48 +709,56 @@ Eterm trace_3(BIF_ALIST_3)
ok = 1;
if (procs || mods) {
+ int max = erts_ptab_max(&erts_proc);
/* tracing of processes */
- for (i = 0; i < erts_max_processes; i++) {
- Process* tracee_p = process_tab[i];
-
+ for (i = 0; i < max; i++) {
+ Process* tracee_p = erts_pix2proc(i);
if (! tracee_p)
continue;
if (tracer != NIL) {
- if (tracee_p->id == tracer)
+ if (tracee_p->common.id == tracer)
continue;
if (already_traced(NULL, tracee_p, tracer))
continue;
}
if (on) {
- tracee_p->trace_flags |= mask;
+ ERTS_TRACE_FLAGS(tracee_p) |= mask;
} else {
- tracee_p->trace_flags &= ~mask;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~mask;
}
- if(!(tracee_p->trace_flags & TRACEE_FLAGS)) {
- tracee_p->tracer_proc = NIL;
+ if(!(ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)) {
+ ERTS_TRACER_PROC(tracee_p) = NIL;
} else if (tracer != NIL) {
- tracee_p->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_p) = tracer;
}
matches++;
}
}
if (ports || mods) {
+ int max = erts_ptab_max(&erts_port);
/* tracing of ports */
- for (i = 0; i < erts_max_ports; i++) {
- Port *tracee_port = &erts_port[i];
- if (tracee_port->status & ERTS_PORT_SFLGS_DEAD) continue;
+ for (i = 0; i < max; i++) {
+ erts_aint32_t state;
+ Port *tracee_port = erts_pix2port(i);
+ if (!tracee_port)
+ continue;
+ state = erts_atomic32_read_nob(&tracee_port->state);
+ if (state & ERTS_PORT_SFLGS_DEAD)
+ continue;
if (tracer != NIL) {
- if (tracee_port->id == tracer) continue;
- if (port_already_traced(NULL, tracee_port, tracer)) continue;
+ if (tracee_port->common.id == tracer)
+ continue;
+ if (port_already_traced(NULL, tracee_port, tracer))
+ continue;
}
- if (on) tracee_port->trace_flags |= mask;
- else tracee_port->trace_flags &= ~mask;
+ if (on) ERTS_TRACE_FLAGS(tracee_port) |= mask;
+ else ERTS_TRACE_FLAGS(tracee_port) &= ~mask;
- if (!(tracee_port->trace_flags & TRACEE_FLAGS)) {
- tracee_port->tracer_proc = NIL;
+ if (!(ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)) {
+ ERTS_TRACER_PROC(tracee_port) = NIL;
} else if (tracer != NIL) {
- tracee_port->tracer_proc = tracer;
+ ERTS_TRACER_PROC(tracee_port) = tracer;
}
/* matches are not counted for ports since it would violate compatibility */
/* This could be a reason to modify this function or make a new one. */
@@ -730,6 +796,7 @@ Eterm trace_3(BIF_ALIST_3)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
+ erts_release_code_write_permission();
BIF_RET(make_small(matches));
@@ -745,6 +812,7 @@ Eterm trace_3(BIF_ALIST_3)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
#endif
+ erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
}
@@ -759,21 +827,20 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
* * main lock is held on c_p
* * all locks are held on port tracee_p
*/
- if ((tracee_port->trace_flags & TRACEE_FLAGS)
- && tracee_port->tracer_proc != tracer) {
+ if ((ERTS_TRACE_FLAGS(tracee_port) & TRACEE_FLAGS)
+ && ERTS_TRACER_PROC(tracee_port) != tracer) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (is_internal_port(tracee_port->tracer_proc)) {
- if (!erts_is_valid_tracer_port(tracee_port->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(tracee_port))) {
+ if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_port))) {
/* Current trace port now invalid
* - discard it and approve the new. */
goto remove_tracer;
} else
return 1;
}
- else if(is_internal_pid(tracee_port->tracer_proc)) {
- Process *tracer_p = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
- tracee_port->tracer_proc, 0);
+ else if(is_internal_pid(ERTS_TRACER_PROC(tracee_port))) {
+ Process *tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_port));
if (!tracer_p) {
/* Current trace process now invalid
* - discard it and approve the new. */
@@ -783,8 +850,8 @@ static int port_already_traced(Process *c_p, Port *tracee_port, Eterm tracer)
}
else {
remove_tracer:
- tracee_port->trace_flags &= ~TRACEE_FLAGS;
- tracee_port->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee_port) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(tracee_port) = NIL;
}
}
return 0;
@@ -800,21 +867,22 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
* * main lock is held on c_p
* * all locks multiple are held on tracee_p
*/
- if ((tracee_p->trace_flags & TRACEE_FLAGS)
- && tracee_p->tracer_proc != tracer) {
+ if ((ERTS_TRACE_FLAGS(tracee_p) & TRACEE_FLAGS)
+ && ERTS_TRACER_PROC(tracee_p) != tracer) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (is_internal_port(tracee_p->tracer_proc)) {
- if (!erts_is_valid_tracer_port(tracee_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(tracee_p))) {
+ if (!erts_is_valid_tracer_port(ERTS_TRACER_PROC(tracee_p))) {
/* Current trace port now invalid
* - discard it and approve the new. */
goto remove_tracer;
} else
return 1;
}
- else if(is_internal_pid(tracee_p->tracer_proc)) {
- Process *tracer_p = erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
- tracee_p->tracer_proc, 0);
+ else if(is_internal_pid(ERTS_TRACER_PROC(tracee_p))) {
+ Process *tracer_p;
+
+ tracer_p = erts_proc_lookup(ERTS_TRACER_PROC(tracee_p));
if (!tracer_p) {
/* Current trace process now invalid
* - discard it and approve the new. */
@@ -824,8 +892,8 @@ static int already_traced(Process *c_p, Process *tracee_p, Eterm tracer)
}
else {
remove_tracer:
- tracee_p->trace_flags &= ~TRACEE_FLAGS;
- tracee_p->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee_p) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(tracee_p) = NIL;
}
}
return 0;
@@ -841,6 +909,11 @@ Eterm trace_info_2(BIF_ALIST_2)
Eterm What = BIF_ARG_1;
Eterm Key = BIF_ARG_2;
Eterm res;
+
+ if (!erts_try_seize_code_write_permission(p)) {
+ ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, What, Key);
+ }
+
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
} else if (is_atom(What) || is_pid(What)) {
@@ -848,8 +921,10 @@ Eterm trace_info_2(BIF_ALIST_2)
} else if (is_tuple(What)) {
res = trace_info_func(p, What, Key);
} else {
+ erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
}
+ erts_release_code_write_permission();
BIF_RET(res);
}
@@ -862,8 +937,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
if (pid_spec == am_new) {
erts_get_default_tracing(&trace_flags, &tracer);
- } else if (is_internal_pid(pid_spec)
- && internal_pid_index(pid_spec) < erts_max_processes) {
+ } else if (is_internal_pid(pid_spec)) {
Process *tracee;
tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
pid_spec, ERTS_PROC_LOCKS_ALL);
@@ -871,16 +945,16 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
if (!tracee) {
return am_undefined;
} else {
- tracer = tracee->tracer_proc;
- trace_flags = tracee->trace_flags;
+ tracer = ERTS_TRACER_PROC(tracee);
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
}
if (is_internal_pid(tracer)) {
- if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, tracer, 0)) {
+ if (!erts_proc_lookup(tracer)) {
reset_tracer:
- tracee->trace_flags &= ~TRACEE_FLAGS;
- trace_flags = tracee->trace_flags;
- tracer = tracee->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(tracee) &= ~TRACEE_FLAGS;
+ trace_flags = ERTS_TRACE_FLAGS(tracee);
+ tracer = ERTS_TRACER_PROC(tracee) = NIL;
}
}
else if (is_internal_port(tracer)) {
@@ -977,64 +1051,54 @@ static int function_is_traced(Process *p,
Binary **ms, /* out */
Binary **ms_meta, /* out */
Eterm *tracer_pid_meta, /* out */
- Sint *count, /* out */
+ Uint *count, /* out */
Eterm *call_time) /* out */
{
Export e;
Export* ep;
- int i;
- BeamInstr *code;
+ BeamInstr* pc;
/* First look for an export entry */
e.code[0] = mfa[0];
e.code[1] = mfa[1];
e.code[2] = mfa[2];
if ((ep = export_get(&e)) != NULL) {
- if (ep->address == ep->code+3 &&
- ep->code[3] != (BeamInstr) em_call_error_handler) {
- if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- *ms = ep->match_prog_set;
+ pc = ep->code+3;
+ if (ep->addressv[erts_active_code_ix()] == pc &&
+ *pc != (BeamInstr) em_call_error_handler) {
+
+ int r = 0;
+
+ ASSERT(*pc == (BeamInstr) em_apply_bif ||
+ *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint));
+
+ if (erts_is_trace_break(pc, ms, 0)) {
return FUNC_TRACE_GLOBAL_TRACE;
}
- if (ep->code[3] == (BeamInstr) em_apply_bif) {
- for (i = 0; i < BIF_SIZE; ++i) {
- if (bif_export[i] == ep) {
- int r = 0;
-
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- *ms = ep->match_prog_set;
- return FUNC_TRACE_GLOBAL_TRACE;
- } else {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- r |= FUNC_TRACE_LOCAL_TRACE;
- *ms = ep->match_prog_set;
- }
- if (erts_is_mtrace_break(ep->code+3, ms_meta,
- tracer_pid_meta)) {
- r |= FUNC_TRACE_META_TRACE;
- }
- if (erts_is_time_break(p, ep->code+3, call_time)) {
- r |= FUNC_TRACE_TIME_TRACE;
- }
- }
- return r ? r : FUNC_TRACE_UNTRACED;
- }
- }
- erl_exit(1,"Impossible ghost bif encountered in trace_info.");
+
+ if (erts_is_trace_break(pc, ms, 1)) {
+ r |= FUNC_TRACE_LOCAL_TRACE;
+ }
+ if (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)) {
+ r |= FUNC_TRACE_META_TRACE;
}
+ if (erts_is_time_break(p, pc, call_time)) {
+ r |= FUNC_TRACE_TIME_TRACE;
+ }
+ return r ? r : FUNC_TRACE_UNTRACED;
}
}
/* OK, now look for breakpoint tracing */
- if ((code = erts_find_local_func(mfa)) != NULL) {
+ if ((pc = erts_find_local_func(mfa)) != NULL) {
int r =
- (erts_is_trace_break(code, ms, NULL)
+ (erts_is_trace_break(pc, ms, 1)
? FUNC_TRACE_LOCAL_TRACE : 0)
- | (erts_is_mtrace_break(code, ms_meta, tracer_pid_meta)
+ | (erts_is_mtrace_break(pc, ms_meta, tracer_pid_meta)
? FUNC_TRACE_META_TRACE : 0)
- | (erts_is_count_break(code, count)
+ | (erts_is_count_break(pc, count)
? FUNC_TRACE_COUNT_TRACE : 0)
- | (erts_is_time_break(p, code, call_time)
+ | (erts_is_time_break(p, pc, call_time)
? FUNC_TRACE_TIME_TRACE : 0);
return r ? r : FUNC_TRACE_UNTRACED;
@@ -1049,7 +1113,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
Eterm* hp;
DeclareTmpHeap(mfa,3,p); /* Not really heap here, but might be when setting pattern */
Binary *ms = NULL, *ms_meta = NULL;
- Sint count = 0;
+ Uint count = 0;
Eterm traced = am_false;
Eterm match_spec = am_false;
Eterm retval = am_false;
@@ -1137,9 +1201,7 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
break;
case am_call_count:
if (r & FUNC_TRACE_COUNT_TRACE) {
- retval = count < 0 ?
- erts_make_integer(-count-1, p) :
- erts_make_integer(count, p);
+ retval = erts_make_integer(count, p);
}
break;
case am_call_time:
@@ -1328,38 +1390,53 @@ trace_info_on_load(Process* p, Eterm key)
#undef FUNC_TRACE_LOCAL_TRACE
int
-erts_set_trace_pattern(Eterm* mfa, int specified,
+erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
Binary* match_prog_set, Binary *meta_match_prog_set,
int on, struct trace_pattern_flags flags,
- Eterm meta_tracer_pid)
+ Eterm meta_tracer_pid, int is_blocking)
{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
int matches = 0;
int i;
+ int n;
+ BpFunction* fp;
/*
* First work on normal functions (not real BIFs).
*/
-
- for (i = 0; i < export_list_size(); i++) {
- Export* ep = export_list(i);
- int j;
-
- if (ExportIsBuiltIn(ep)) {
- continue;
- }
-
- for (j = 0; j < specified && mfa[j] == ep->code[j]; j++) {
- /* Empty loop body */
- }
- if (j == specified) {
- if (on) {
- if (! flags.breakpoint)
- matches += setup_func_trace(ep, match_prog_set);
- else
- reset_func_trace(ep);
- } else if (! flags.breakpoint) {
- matches += reset_func_trace(ep);
+ erts_bp_match_export(&finish_bp.e, mfa, specified);
+ fp = finish_bp.e.matching;
+ n = finish_bp.e.matched;
+
+ for (i = 0; i < n; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code));
+
+ if (on && !flags.breakpoint) {
+ /* Turn on global call tracing */
+ if (ep->addressv[code_ix] != pc) {
+ fp[i].mod->curr.num_traced_exports++;
+#ifdef DEBUG
+ pc[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI);
+#endif
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
+ pc[1] = (BeamInstr) ep->addressv[code_ix];
+ }
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
+ if (ep->addressv[code_ix] != pc) {
+ pc[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint);
+ }
+ } else if (!on && flags.breakpoint) {
+ /* Turn off breakpoint tracing -- nothing to do here. */
+ } else {
+ /*
+ * Turn off global tracing, either explicitly or implicitly
+ * before turning on breakpoint tracing.
+ */
+ erts_clear_call_trace_bif(pc, 0);
+ if (pc[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ pc[0] = (BeamInstr) BeamOp(op_jump_f);
}
}
}
@@ -1384,26 +1461,15 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/* Empty loop body */
}
if (j == specified) {
+ BeamInstr* pc = (BeamInstr *)bif_export[i]->code + 3;
+
if (! flags.breakpoint) { /* Export entry call trace */
if (on) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- ASSERT(ExportIsBuiltIn(bif_export[i]));
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_GLOBAL;
- setup_bif_trace(i);
+ erts_clear_call_trace_bif(pc, 1);
+ erts_clear_mtrace_bif(pc);
+ erts_set_call_trace_bif(pc, match_prog_set, 0);
} else { /* off */
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_GLOBAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
- }
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
+ erts_clear_call_trace_bif(pc, 0);
}
matches++;
} else { /* Breakpoint call trace */
@@ -1411,52 +1477,33 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
if (on) {
if (flags.local) {
- set_trace_bif(i, match_prog_set);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_LOCAL;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_clear_call_trace_bif(pc, 0);
+ erts_set_call_trace_bif(pc, match_prog_set, 1);
m = 1;
}
if (flags.meta) {
- erts_set_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3,
- meta_match_prog_set, meta_tracer_pid);
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_META;
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_GLOBAL;
+ erts_set_mtrace_bif(pc, meta_match_prog_set,
+ meta_tracer_pid);
m = 1;
}
if (flags.call_time) {
- erts_set_time_trace_bif(bif_export[i]->code + 3, on);
+ erts_set_time_trace_bif(pc, on);
/* I don't want to remove any other tracers */
- erts_bif_trace_flags[i] |= BIF_TRACE_AS_CALL_TIME;
m = 1;
}
- if (erts_bif_trace_flags[i]) {
- setup_bif_trace(i);
- }
} else { /* off */
if (flags.local) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_LOCAL) {
- clear_trace_bif(i);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_LOCAL;
- }
+ erts_clear_call_trace_bif(pc, 1);
m = 1;
}
if (flags.meta) {
- if (erts_bif_trace_flags[i] & BIF_TRACE_AS_META) {
- erts_clear_mtrace_bif
- ((BeamInstr *)bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_META;
- }
+ erts_clear_mtrace_bif(pc);
m = 1;
}
if (flags.call_time) {
- erts_clear_time_trace_bif(bif_export[i]->code + 3);
- erts_bif_trace_flags[i] &= ~BIF_TRACE_AS_CALL_TIME;
+ erts_clear_time_trace_bif(pc);
m = 1;
}
- if (! erts_bif_trace_flags[i]) {
- reset_bif_trace(i);
- }
}
matches += m;
}
@@ -1466,176 +1513,241 @@ erts_set_trace_pattern(Eterm* mfa, int specified,
/*
** So, now for breakpoint tracing
*/
+ erts_bp_match_functions(&finish_bp.f, mfa, specified);
if (on) {
if (! flags.breakpoint) {
- erts_clear_trace_break(mfa, specified);
- erts_clear_mtrace_break(mfa, specified);
- erts_clear_count_break(mfa, specified);
- erts_clear_time_break(mfa, specified);
+ erts_clear_all_breaks(&finish_bp.f);
} else {
- int m = 0;
if (flags.local) {
- m = erts_set_trace_break(mfa, specified, match_prog_set,
- am_true);
+ erts_set_trace_break(&finish_bp.f, match_prog_set);
}
if (flags.meta) {
- m = erts_set_mtrace_break(mfa, specified, meta_match_prog_set,
- meta_tracer_pid);
+ erts_set_mtrace_break(&finish_bp.f, meta_match_prog_set,
+ meta_tracer_pid);
}
if (flags.call_count) {
- m = erts_set_count_break(mfa, specified, on);
+ erts_set_count_break(&finish_bp.f, on);
}
if (flags.call_time) {
- m = erts_set_time_break(mfa, specified, on);
+ erts_set_time_break(&finish_bp.f, on);
}
- /* All assignments to 'm' above should give the same value,
- * so just use the last */
- matches += m;
}
} else {
- int m = 0;
if (flags.local) {
- m = erts_clear_trace_break(mfa, specified);
+ erts_clear_trace_break(&finish_bp.f);
}
if (flags.meta) {
- m = erts_clear_mtrace_break(mfa, specified);
+ erts_clear_mtrace_break(&finish_bp.f);
}
if (flags.call_count) {
- m = erts_clear_count_break(mfa, specified);
+ erts_clear_count_break(&finish_bp.f);
}
if (flags.call_time) {
- m = erts_clear_time_break(mfa, specified);
+ erts_clear_time_break(&finish_bp.f);
+ }
+ }
+
+ finish_bp.current = 0;
+ finish_bp.install = on;
+ finish_bp.local = flags.breakpoint;
+
+#ifdef ERTS_SMP
+ if (is_blocking) {
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+#endif
+ while (erts_finish_breakpointing()) {
+ /* Empty loop body */
}
- /* All assignments to 'm' above should give the same value,
- * so just use the last */
- matches += m;
+#ifdef ERTS_SMP
+ finish_bp.current = -1;
}
+#endif
+ if (flags.breakpoint) {
+ matches += finish_bp.f.matched;
+ } else {
+ matches += finish_bp.e.matched;
+ }
return matches;
}
-/*
- * Setup function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
-
-static int
-setup_func_trace(Export* ep, void* match_prog)
+int
+erts_finish_breakpointing(void)
{
- if (ep->address == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
- }
- }
-
+ ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
+
/*
- * Currently no trace support for native code.
+ * Memory barriers will be issued for all processes *before*
+ * each of the stages below. (Unless the other schedulers
+ * are blocked, in which case memory barriers will be issued
+ * when they are awaken.)
*/
- if (erts_is_native_break(ep->address)) {
+ switch (finish_bp.current++) {
+ case 0:
+ /*
+ * At this point, in all functions that are to be breakpointed,
+ * a pointer to a GenericBp struct has already been added,
+ *
+ * Insert the new breakpoints (if any) into the
+ * code. Different schedulers may see breakpoint instruction
+ * at different times, but it does not matter since the newly
+ * added breakpoints are disabled.
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ erts_install_breakpoints(&finish_bp.f);
+ } else {
+ install_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ setup_bif_trace();
+ return 1;
+ case 1:
+ /*
+ * Switch index for the breakpoint data, activating the staged
+ * data. (Depending on the changes in the breakpoint data,
+ * that could either activate breakpoints or disable
+ * breakpoints.)
+ */
+ erts_commit_staged_bp();
+ return 1;
+ case 2:
+ /*
+ * Remove breakpoints instructions for disabled breakpoints
+ * (if any).
+ */
+ if (finish_bp.install) {
+ if (finish_bp.local) {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ } else {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ }
+ } else {
+ if (finish_bp.local) {
+ erts_uninstall_breakpoints(&finish_bp.f);
+ } else {
+ uninstall_exp_breakpoints(&finish_bp.e);
+ }
+ }
+ reset_bif_trace();
+ return 1;
+ case 3:
+ /*
+ * Now all breakpoints have either been inserted or removed.
+ * For all updated breakpoints, copy the active breakpoint
+ * data to the staged breakpoint data to make them equal
+ * (simplifying for the next time breakpoints are to be
+ * updated). If any breakpoints have been totally disabled,
+ * deallocate the GenericBp structs for them.
+ */
+ erts_consolidate_bif_bp_data();
+ clean_export_entries(&finish_bp.e);
+ erts_consolidate_bp_data(&finish_bp.e, 0);
+ erts_consolidate_bp_data(&finish_bp.f, 1);
+ erts_bp_free_matched_functions(&finish_bp.e);
+ erts_bp_free_matched_functions(&finish_bp.f);
return 0;
+ default:
+ ASSERT(0);
}
-
- ep->code[3] = (BeamInstr) em_call_traced_function;
- ep->code[4] = (BeamInstr) ep->address;
- ep->address = ep->code+3;
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
- return 1;
+ return 0;
}
-static void setup_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ep->code[4] = (BeamInstr) bif_table[bif_index].traced;
-}
+static void
+install_exp_breakpoints(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
-static void set_trace_bif(int bif_index, void* match_prog) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "set_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = match_prog;
- MatchSetRef(ep->match_prog_set);
-}
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
-/*
- * Reset function tracing for the given exported function.
- *
- * Return Value: 1 if entry refers to a BIF or loaded function,
- * 0 if the entry refers to a function not loaded.
- */
+ ep->addressv[code_ix] = pc;
+ }
+}
-static int
-reset_func_trace(Export* ep)
+static void
+uninstall_exp_breakpoints(BpFunctions* f)
{
- if (ep->address == ep->code+3) {
- if (ep->code[3] == (BeamInstr) em_call_error_handler) {
- return 0;
- } else if (ep->code[3] == (BeamInstr) em_call_traced_function) {
- ep->address = (Uint *) ep->code[4];
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
- return 1;
- } else {
- /*
- * We ignore apply/3 and anything else.
- */
- return 0;
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
+
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
+
+ if (ep->addressv[code_ix] != pc) {
+ continue;
}
+ ASSERT(*pc == (BeamInstr) BeamOp(op_jump_f));
+ ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
}
-
- /*
- * Currently no trace support for native code.
- */
- if (erts_is_native_break(ep->address)) {
- return 0;
- }
-
- /*
- * Nothing to do, but the export entry matches.
- */
+}
- return 1;
+static void
+clean_export_entries(BpFunctions* f)
+{
+ const ErtsCodeIndex code_ix = erts_active_code_ix();
+ BpFunction* fp = f->matching;
+ Uint ne = f->matched;
+ Uint i;
+ Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
+
+ for (i = 0; i < ne; i++) {
+ BeamInstr* pc = fp[i].pc;
+ Export* ep = (Export *) (((char *)pc)-offset);
+
+ if (ep->addressv[code_ix] == pc) {
+ continue;
+ }
+ if (*pc == (BeamInstr) BeamOp(op_jump_f)) {
+ ep->code[3] = (BeamInstr) 0;
+ ep->code[4] = (BeamInstr) 0;
+ }
+ }
}
-static void reset_bif_trace(int bif_index) {
- Export *ep = bif_export[bif_index];
-
- ASSERT(ExportIsBuiltIn(ep));
- ASSERT(ep->code[4]);
- ASSERT(! ep->match_prog_set);
- ASSERT(! erts_is_mtrace_break((BeamInstr *)ep->code+3, NULL, NULL));
- ep->code[4] = (BeamInstr) bif_table[bif_index].f;
+static void
+setup_bif_trace(void)
+{
+ int i;
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ GenericBp* g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
+ if (g) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].traced;
+ }
+ }
+ }
}
-static void clear_trace_bif(int bif_index) {
- Export *ep = bif_export[bif_index];
-
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "clear_trace_bif: %T:%T/%bpu\n",
- ep->code[0], ep->code[1], ep->code[2]);
-#endif
- ASSERT(ExportIsBuiltIn(ep));
- MatchSetUnref(ep->match_prog_set);
- ep->match_prog_set = NULL;
+static void
+reset_bif_trace(void)
+{
+ int i;
+ ErtsBpIndex active = erts_active_bp_ix();
+
+ for (i = 0; i < BIF_SIZE; ++i) {
+ Export *ep = bif_export[i];
+ BeamInstr* pc = ep->code+3;
+ GenericBp* g = (GenericBp *) pc[-4];
+ if (g && g->data[active].flags == 0) {
+ if (ExportIsBuiltIn(ep)) {
+ ASSERT(ep->code[4]);
+ ep->code[4] = (BeamInstr) bif_table[i].f;
+ }
+ }
+ }
}
/*
@@ -1776,7 +1888,7 @@ new_seq_trace_token(Process* p)
SEQ_TRACE_TOKEN(p) = TUPLE5(hp, make_small(0), /* Flags */
make_small(0), /* Label */
make_small(0), /* Serial */
- p->id, /* Internal pid */ /* From */
+ p->common.id, /* Internal pid */ /* From */
make_small(p->seq_trace_lastcnt));
}
}
@@ -2142,13 +2254,15 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2)
/* Check if valid process, no locks are taken */
if (is_internal_pid(profiler)) {
- if (internal_pid_index(profiler) >= erts_max_processes) goto error;
- profiler_p = process_tab[internal_pid_index(profiler)];
- if (INVALID_PID(profiler_p, profiler)) goto error;
+ profiler_p = erts_proc_lookup(profiler);
+ if (!profiler_p)
+ goto error;
} else if (is_internal_port(profiler)) {
- if (internal_port_index(profiler) >= erts_max_ports) goto error;
- profiler_port = &erts_port[internal_port_index(profiler)];
- if (INVALID_TRACER_PORT(profiler_port, profiler)) goto error;
+ profiler_port = (erts_port_lookup(
+ profiler,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP));
+ if (!profiler_port)
+ goto error;
} else {
goto error;
}
@@ -2212,8 +2326,7 @@ trace_delivered_1(BIF_ALIST_1)
p = NULL;
} else if (! (p = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
BIF_ARG_1, ERTS_PROC_LOCKS_ALL))) {
- if (is_not_internal_pid(BIF_ARG_1)
- || internal_pid_index(BIF_ARG_1) >= erts_max_processes) {
+ if (is_not_internal_pid(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
}
@@ -2232,7 +2345,7 @@ trace_delivered_1(BIF_ALIST_1)
msg = TUPLE3(hp, AM_trace_delivered, BIF_ARG_1, msg_ref);
#ifdef ERTS_SMP
- erts_send_sys_msg_proc(BIF_P->id, BIF_P->id, msg, bp);
+ erts_send_sys_msg_proc(BIF_P->common.id, BIF_P->common.id, msg, bp);
if (p)
erts_smp_proc_unlock(p,
(BIF_P == p
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 6bc227eeda..3753b618e1 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -1247,6 +1247,12 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
*/
erts_bin_offset = 8*sb->size + sb->bitsize;
+ if (unit > 1) {
+ if ((unit == 8 && (erts_bin_offset & 7) != 0) ||
+ (erts_bin_offset % unit) != 0) {
+ goto badarg;
+ }
+ }
used_size_in_bits = erts_bin_offset + build_size_in_bits;
sb->is_writable = 0; /* Make sure that no one else can write. */
pb->size = NBYTES(used_size_in_bits);
@@ -1316,6 +1322,12 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
*/
ERTS_GET_BINARY_BYTES(bin, src_bytes, bitoffs, bitsize);
erts_bin_offset = 8*binary_size(bin) + bitsize;
+ if (unit > 1) {
+ if ((unit == 8 && (erts_bin_offset & 7) != 0) ||
+ (erts_bin_offset % unit) != 0) {
+ goto badarg;
+ }
+ }
used_size_in_bits = erts_bin_offset + build_size_in_bits;
used_size_in_bytes = NBYTES(used_size_in_bits);
bin_size = 2*used_size_in_bytes;
@@ -1363,12 +1375,6 @@ erts_bs_append(Process* c_p, Eterm* reg, Uint live, Eterm build_size_term,
/*
* Now copy the data into the binary.
*/
- if (unit > 1) {
- if ((unit == 8 && (erts_bin_offset & 7) != 0) ||
- (erts_bin_offset % unit) != 0) {
- return THE_NON_VALUE;
- }
- }
copy_binary_to_buffer(erts_current_bin, 0, src_bytes, bitoffs, erts_bin_offset);
return make_binary(sb);
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index fe3693d0ca..3f90f34736 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -486,7 +486,7 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
erts_thr_set_main_status(1, (int) esdp->no);
/* Make sure we check if we should bind to a cpu or not... */
- esdp->run_queue->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ (void) ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND);
}
#endif
@@ -498,9 +498,6 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_cpu_groups_map_t *cgm;
erts_cpu_groups_callback_list_t *cgcl;
erts_cpu_groups_callback_call_t *cgcc;
-#ifdef ERTS_SMP
- esdp->run_queue->flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
-#endif
erts_smp_runq_unlock(esdp->run_queue);
erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx);
cpu_id = scheduler2cpu_map[esdp->no].bind_id;
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 51bdf53823..48a95cdf32 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -224,8 +224,9 @@ Export ets_select_continue_exp;
static Export ets_delete_continue_exp;
static void
-free_dbtable(DbTable* tb)
+free_dbtable(void *vtb)
{
+ DbTable *tb = (DbTable *) vtb;
#ifdef HARDDEBUG
if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) {
erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n",
@@ -250,21 +251,8 @@ free_dbtable(DbTable* tb)
#endif
ASSERT(is_immed(tb->common.heir_data));
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable));
- ERTS_SMP_MEMORY_BARRIER;
}
-#ifdef ERTS_SMP
-static void
-chk_free_dbtable(void *vtb)
-{
- DbTable * tb = (DbTable *) vtb;
- ERTS_THR_MEMORY_BARRIER;
- if (erts_refc_dectest(&tb->common.ref, 0) == 0)
- free_dbtable(tb);
-}
-#endif
-
static void schedule_free_dbtable(DbTable* tb)
{
/*
@@ -275,15 +263,10 @@ static void schedule_free_dbtable(DbTable* tb)
* need to unlock the table lock after this
* function has returned).
*/
-#ifdef ERTS_SMP
- int scheds = erts_get_max_no_executing_schedulers();
- ASSERT(scheds >= 1);
ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
- erts_refc_init(&tb->common.ref, scheds);
- erts_schedule_multi_misc_aux_work(0, scheds, chk_free_dbtable, tb);
-#else
- free_dbtable(tb);
-#endif
+ erts_schedule_thr_prgr_later_op(free_dbtable,
+ (void *) tb,
+ &tb->release.data);
}
static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
@@ -442,7 +425,8 @@ DbTable* db_get_table_aux(Process *p,
if (tb) {
db_lock(tb, kind);
if (tb->common.id != id
- || ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
+ || ((tb->common.status & what) == 0
+ && p->common.id != tb->common.owner)) {
db_unlock(tb, kind);
tb = NULL;
}
@@ -542,10 +526,6 @@ static int remove_named_tab(DbTable *tb, int have_lock)
&rwlock);
#ifdef ERTS_SMP
if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
- /*
- * We keep our increased refc over this op in order to
- * prevent the table from disapearing.
- */
db_unlock(tb, LCK_WRITE);
erts_smp_rwmtx_rwlock(rwlock);
db_lock(tb, LCK_WRITE);
@@ -636,7 +616,7 @@ BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2)
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:safe_fixtable(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
- BIF_ARG_1, BIF_ARG_2, BIF_P->id,
+ BIF_ARG_1, BIF_ARG_2, BIF_P->common.id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC;
@@ -1215,7 +1195,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:rename(%T,%T); Process: %T, initial: %T:%T/%bpu\n",
- BIF_ARG_1, BIF_ARG_2, BIF_P->id,
+ BIF_ARG_1, BIF_ARG_2, BIF_P->common.id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
@@ -1443,7 +1423,6 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0);
tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb, sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
erts_smp_atomic_init_nob(&tb->common.memory_size,
erts_smp_atomic_read_nob(&init_tb.common.memory_size));
}
@@ -1459,7 +1438,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ),
"db_tab", "db_tab_fix");
tb->common.keypos = keypos;
- tb->common.owner = BIF_P->id;
+ tb->common.owner = BIF_P->common.id;
set_heir(BIF_P, tb, heir, heir_data);
erts_smp_atomic_init_nob(&tb->common.nitems, 0);
@@ -1481,7 +1460,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
"** Too many db tables **\n");
free_heir_data(tb);
tb->common.meth->db_free_table(tb);
- free_dbtable(tb);
+ free_dbtable((void *) tb);
BIF_ERROR(BIF_P, SYSTEM_LIMIT);
}
@@ -1528,7 +1507,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:new(%T,%T)=%T; Process: %T, initial: %T:%T/%bpu\n",
- BIF_ARG_1, BIF_ARG_2, ret, BIF_P->id,
+ BIF_ARG_1, BIF_ARG_2, ret, BIF_P->common.id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
erts_fprintf(stderr, "ets: new: meta_pid_to_tab common.memory_size = %ld\n",
erts_smp_atomic_read_nob(&meta_pid_to_tab->common.memory_size));
@@ -1540,7 +1519,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
db_meta_lock(meta_pid_to_tab, LCK_WRITE_REC);
if (db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple, BIF_P->id, make_small(slot)),
+ TUPLE2(meta_tuple,
+ BIF_P->common.id,
+ make_small(slot)),
0) != DB_ERROR_NONE) {
erl_exit(1,"Could not update ets metadata.");
}
@@ -1659,7 +1640,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
#ifdef HARDDEBUG
erts_fprintf(stderr,
"ets:delete(%T); Process: %T, initial: %T:%T/%bpu\n",
- BIF_ARG_1, BIF_P->id,
+ BIF_ARG_1, BIF_P->common.id,
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
#endif
@@ -1676,7 +1657,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
tb->common.status |= DB_DELETE;
- if (tb->common.owner != BIF_P->id) {
+ if (tb->common.owner != BIF_P->common.id) {
DeclareTmpHeap(meta_tuple,3,BIF_P);
/*
@@ -1691,10 +1672,12 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
make_small(tb->common.slot));
BIF_P->flags |= F_USING_DB;
- tb->common.owner = BIF_P->id;
+ tb->common.owner = BIF_P->common.id;
db_put_hash(meta_pid_to_tab,
- TUPLE2(meta_tuple,BIF_P->id,make_small(tb->common.slot)),
+ TUPLE2(meta_tuple,
+ BIF_P->common.id,
+ make_small(tb->common.slot)),
0);
db_meta_unlock(meta_pid_to_tab, LCK_WRITE_REC);
UnUseTmpHeap(3,BIF_P);
@@ -1770,7 +1753,7 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
}
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->id) {
+ || tb->common.owner != BIF_P->common.id) {
goto badarg;
}
from_pid = tb->common.owner;
@@ -1793,7 +1776,10 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
db_unlock(tb,LCK_WRITE);
erts_send_message(BIF_P, to_proc, &to_locks,
- TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, from_pid, BIF_ARG_3),
+ TUPLE4(buf, am_ETS_TRANSFER,
+ tb->common.id,
+ from_pid,
+ BIF_ARG_3),
0);
erts_smp_proc_unlock(to_proc, to_locks);
UnUseTmpHeap(5,BIF_P);
@@ -1855,7 +1841,7 @@ BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
if (tail != NIL
|| (tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->id) {
+ || tb->common.owner != BIF_P->common.id) {
goto badarg;
}
@@ -2669,7 +2655,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
*/
/* If/when we implement lockless private tables:
- if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->id) {
+ if ((tb->common.status & DB_PRIVATE) && owner != BIF_P->common.id) {
db_unlock(tb, LCK_READ);
rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
owner, ERTS_PROC_LOCK_MAIN);
@@ -2816,7 +2802,6 @@ void init_db(void)
{
DbTable init_tb;
int i;
- extern BeamInstr* em_apply_bif;
Eterm *hp;
unsigned bits;
size_t size;
@@ -2850,7 +2835,7 @@ void init_db(void)
else
db_max_tabs = user_requested_db_max_tabs;
- bits = erts_fit_in_bits(db_max_tabs-1);
+ bits = erts_fit_in_bits_int32(db_max_tabs-1);
if (bits > SMALL_BITS) {
erl_exit(1,"Max limit for ets tabled too high %u (max %u).",
db_max_tabs, ((Uint)1)<<SMALL_BITS);
@@ -2888,7 +2873,6 @@ void init_db(void)
meta_pid_to_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb,
sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
erts_smp_atomic_init_nob(&meta_pid_to_tab->common.memory_size,
erts_smp_atomic_read_nob(&init_tb.common.memory_size));
@@ -2920,7 +2904,6 @@ void init_db(void)
meta_pid_to_fixed_tab = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE,
&init_tb,
sizeof(DbTable));
- ERTS_ETS_MISC_MEM_ADD(sizeof(DbTable));
erts_smp_atomic_init_nob(&meta_pid_to_fixed_tab->common.memory_size,
erts_smp_atomic_read_nob(&init_tb.common.memory_size));
@@ -2949,49 +2932,24 @@ void init_db(void)
}
/* Non visual BIF to trap to. */
- memset(&ets_select_delete_continue_exp, 0, sizeof(Export));
- ets_select_delete_continue_exp.address =
- &ets_select_delete_continue_exp.code[3];
- ets_select_delete_continue_exp.code[0] = am_ets;
- ets_select_delete_continue_exp.code[1] = am_atom_put("delete_trap",11);
- ets_select_delete_continue_exp.code[2] = 1;
- ets_select_delete_continue_exp.code[3] =
- (BeamInstr) em_apply_bif;
- ets_select_delete_continue_exp.code[4] =
- (BeamInstr) &ets_select_delete_1;
+ erts_init_trap_export(&ets_select_delete_continue_exp,
+ am_ets, am_atom_put("delete_trap",11), 1,
+ &ets_select_delete_1);
/* Non visual BIF to trap to. */
- memset(&ets_select_count_continue_exp, 0, sizeof(Export));
- ets_select_count_continue_exp.address =
- &ets_select_count_continue_exp.code[3];
- ets_select_count_continue_exp.code[0] = am_ets;
- ets_select_count_continue_exp.code[1] = am_atom_put("count_trap",11);
- ets_select_count_continue_exp.code[2] = 1;
- ets_select_count_continue_exp.code[3] =
- (BeamInstr) em_apply_bif;
- ets_select_count_continue_exp.code[4] =
- (BeamInstr) &ets_select_count_1;
+ erts_init_trap_export(&ets_select_count_continue_exp,
+ am_ets, am_atom_put("count_trap",11), 1,
+ &ets_select_count_1);
/* Non visual BIF to trap to. */
- memset(&ets_select_continue_exp, 0, sizeof(Export));
- ets_select_continue_exp.address =
- &ets_select_continue_exp.code[3];
- ets_select_continue_exp.code[0] = am_ets;
- ets_select_continue_exp.code[1] = am_atom_put("select_trap",11);
- ets_select_continue_exp.code[2] = 1;
- ets_select_continue_exp.code[3] =
- (BeamInstr) em_apply_bif;
- ets_select_continue_exp.code[4] =
- (BeamInstr) &ets_select_trap_1;
+ erts_init_trap_export(&ets_select_continue_exp,
+ am_ets, am_atom_put("select_trap",11), 1,
+ &ets_select_trap_1);
/* Non visual BIF to trap to. */
- memset(&ets_delete_continue_exp, 0, sizeof(Export));
- ets_delete_continue_exp.address = &ets_delete_continue_exp.code[3];
- ets_delete_continue_exp.code[0] = am_ets;
- ets_delete_continue_exp.code[1] = am_atom_put("delete_trap",11);
- ets_delete_continue_exp.code[2] = 1;
- ets_delete_continue_exp.code[3] = (BeamInstr) em_apply_bif;
- ets_delete_continue_exp.code[4] = (BeamInstr) &ets_delete_trap;
+ erts_init_trap_export(&ets_delete_continue_exp,
+ am_ets, am_atom_put("delete_trap",11), 1,
+ &ets_delete_trap);
hp = ms_delete_all_buff;
ms_delete_all = CONS(hp, am_true, NIL);
@@ -3089,9 +3047,9 @@ static int give_away_to_heir(Process* p, DbTable* tb)
Eterm to_pid;
UWord heir_data;
- ASSERT(tb->common.owner == p->id);
+ ASSERT(tb->common.owner == p->common.id);
ASSERT(is_internal_pid(tb->common.heir));
- ASSERT(tb->common.heir != p->id);
+ ASSERT(tb->common.heir != p->common.id);
retry:
to_pid = tb->common.heir;
to_proc = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN,
@@ -3104,7 +3062,7 @@ retry:
db_lock(tb,LCK_WRITE);
ASSERT(tb != NULL);
- if (tb->common.owner != p->id) {
+ if (tb->common.owner != p->common.id) {
if (to_proc != NULL ) {
erts_smp_proc_unlock(to_proc, to_locks);
}
@@ -3115,7 +3073,7 @@ retry:
if (to_proc != NULL ) {
erts_smp_proc_unlock(to_proc, to_locks);
}
- if (to_pid == p->id || to_pid == am_none) {
+ if (to_pid == p->common.id || to_pid == am_none) {
return 0; /* no real heir, table still mine */
}
goto retry;
@@ -3124,7 +3082,8 @@ retry:
if (to_proc == NULL) {
return 0; /* heir not alive, table still mine */
}
- if (erts_cmp_timeval(&to_proc->started, &tb->common.heir_started) != 0) {
+ if (to_proc->common.u.alive.started_interval
+ != tb->common.heir_started_interval) {
erts_smp_proc_unlock(to_proc, to_locks);
return 0; /* heir dead and pid reused, table still mine */
}
@@ -3149,7 +3108,11 @@ retry:
heir_data = tpv[1];
}
erts_send_message(p, to_proc, &to_locks,
- TUPLE4(buf, am_ETS_TRANSFER, tb->common.id, p->id, heir_data),
+ TUPLE4(buf,
+ am_ETS_TRANSFER,
+ tb->common.id,
+ p->common.id,
+ heir_data),
0);
erts_smp_proc_unlock(to_proc, to_locks);
return !0;
@@ -3158,7 +3121,7 @@ retry:
/*
* erts_db_process_exiting() is called when a process terminates.
* It returns 0 when completely done, and !0 when it wants to
- * yield. c_p->u.exit_data can hold a pointer to a state while
+ * yield. c_p->u.terminate can hold a pointer to a state while
* yielding.
*/
#define ERTS_DB_INTERNAL_ERROR(LSTR) \
@@ -3168,8 +3131,8 @@ retry:
int
erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
{
- ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.exit_data;
- Eterm pid = c_p->id;
+ ErtsDbProcCleanupState *state = (ErtsDbProcCleanupState *) c_p->u.terminate;
+ Eterm pid = c_p->common.id;
ErtsDbProcCleanupState default_state;
int ret;
@@ -3350,7 +3313,7 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
if (state != &default_state)
erts_free(ERTS_ALC_T_DB_PROC_CLEANUP, state);
- c_p->u.exit_data = NULL;
+ c_p->u.terminate = NULL;
return 0;
default:
@@ -3371,13 +3334,13 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks)
break;
}
- ASSERT(c_p->u.exit_data == (void *) state
+ ASSERT(c_p->u.terminate == (void *) state
|| state == &default_state);
if (state == &default_state) {
- c_p->u.exit_data = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
+ c_p->u.terminate = erts_alloc(ERTS_ALC_T_DB_PROC_CLEANUP,
sizeof(ErtsDbProcCleanupState));
- sys_memcpy(c_p->u.exit_data,
+ sys_memcpy(c_p->u.terminate,
(void*) state,
sizeof(ErtsDbProcCleanupState));
}
@@ -3403,7 +3366,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
}
else {
for (; fix != NULL; fix = fix->next) {
- if (fix->pid == p->id) {
+ if (fix->pid == p->common.id) {
++(fix->counter);
#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
@@ -3415,7 +3378,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
fix = (DbFixation *) erts_db_alloc(ERTS_ALC_T_DB_FIXATION,
tb, sizeof(DbFixation));
ERTS_ETS_MISC_MEM_ADD(sizeof(DbFixation));
- fix->pid = p->id;
+ fix->pid = p->common.id;
fix->counter = 1;
fix->next = tb->common.fixations;
tb->common.fixations = fix;
@@ -3426,7 +3389,9 @@ static void fix_table_locked(Process* p, DbTable* tb)
UseTmpHeap(3,p);
db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
if (db_put_hash(meta_pid_to_fixed_tab,
- TUPLE2(meta_tuple, p->id, make_small(tb->common.slot)),
+ TUPLE2(meta_tuple,
+ p->common.id,
+ make_small(tb->common.slot)),
0) != DB_ERROR_NONE) {
UnUseTmpHeap(3,p);
erl_exit(1,"Could not insert ets metadata in safe_fixtable.");
@@ -3446,7 +3411,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
erts_smp_mtx_lock(&tb->common.fixlock);
#endif
for (pp = &tb->common.fixations; *pp != NULL; pp = &(*pp)->next) {
- if ((*pp)->pid == p->id) {
+ if ((*pp)->pid == p->common.id) {
DbFixation* fix = *pp;
erts_refc_dec(&tb->common.ref,0);
--(fix->counter);
@@ -3460,7 +3425,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
#endif
db_meta_lock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
db_erase_bag_exact2(meta_pid_to_fixed_tab,
- p->id, make_small(tb->common.slot));
+ p->common.id, make_small(tb->common.slot));
db_meta_unlock(meta_pid_to_fixed_tab, LCK_WRITE_REC);
erts_db_free(ERTS_ALC_T_DB_FIXATION,
tb, (void *) fix, sizeof(DbFixation));
@@ -3519,15 +3484,15 @@ static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data)
if (heir == am_none) {
return;
}
- if (heir == me->id) {
- tb->common.heir_started = me->started;
+ if (heir == me->common.id) {
+ erts_ensure_later_proc_interval(me->common.u.alive.started_interval);
+ tb->common.heir_started_interval = me->common.u.alive.started_interval;
}
else {
- Process* heir_proc= erts_pid2proc_opt(me, ERTS_PROC_LOCK_MAIN, heir,
- 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ Process* heir_proc= erts_proc_lookup(heir);
if (heir_proc != NULL) {
- tb->common.heir_started = heir_proc->started;
- erts_smp_proc_dec_refc(heir_proc);
+ erts_ensure_later_proc_interval(heir_proc->common.u.alive.started_interval);
+ tb->common.heir_started_interval = heir_proc->common.u.alive.started_interval;
} else {
tb->common.heir = am_none;
}
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index 2e5deaf338..e9a661efbc 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -27,6 +27,10 @@
#define __DB_H__
#include "sys.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#include "bif.h"
#include "erl_db_util.h" /* Flags */
@@ -36,6 +40,11 @@
Uint erts_get_ets_misc_mem_size(void);
+typedef struct {
+ DbTableCommon common;
+ ErtsThrPrgrLaterOp data;
+} DbTableRelease;
+
/*
* So, the structure for a database table, NB this is only
* interesting in db.c.
@@ -44,6 +53,7 @@ union db_table {
DbTableCommon common; /* Any type of db table */
DbTableHash hash; /* Linear hash array specific data */
DbTableTree tree; /* AVL tree specific data */
+ DbTableRelease release;
/*TT*/
};
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 312050b931..faa7f31d99 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -452,16 +452,8 @@ DbTableMethod db_tree =
void db_initialize_tree(void)
{
- memset(&ets_select_reverse_exp, 0, sizeof(Export));
- ets_select_reverse_exp.address =
- &ets_select_reverse_exp.code[3];
- ets_select_reverse_exp.code[0] = am_ets;
- ets_select_reverse_exp.code[1] = am_reverse;
- ets_select_reverse_exp.code[2] = 3;
- ets_select_reverse_exp.code[3] =
- (BeamInstr) em_apply_bif;
- ets_select_reverse_exp.code[4] =
- (BeamInstr) &ets_select_reverse;
+ erts_init_trap_export(&ets_select_reverse_exp, am_ets, am_reverse, 3,
+ &ets_select_reverse);
return;
};
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 42907e2e84..bb08762b26 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -138,21 +138,23 @@ set_tracee_flags(Process *tracee_p, Eterm tracer, Uint d_flags, Uint e_flags) {
Uint flags;
if (tracer == NIL) {
- flags = tracee_p->trace_flags & ~TRACEE_FLAGS;
+ flags = ERTS_TRACE_FLAGS(tracee_p) & ~TRACEE_FLAGS;
} else {
- flags = ((tracee_p->trace_flags & ~d_flags) | e_flags);
+ flags = ((ERTS_TRACE_FLAGS(tracee_p) & ~d_flags) | e_flags);
if (! flags) tracer = NIL;
}
- ret = tracee_p->tracer_proc != tracer || tracee_p->trace_flags != flags
- ? am_true : am_false;
- tracee_p->tracer_proc = tracer;
- tracee_p->trace_flags = flags;
+ ret = ((ERTS_TRACER_PROC(tracee_p) != tracer
+ || ERTS_TRACE_FLAGS(tracee_p) != flags)
+ ? am_true
+ : am_false);
+ ERTS_TRACER_PROC(tracee_p) = tracer;
+ ERTS_TRACE_FLAGS(tracee_p) = flags;
return ret;
}
/*
** Assuming all locks on tracee_p on entry
**
-** Changes tracee_p->trace_flags and tracee_p->tracer_proc
+** Changes ERTS_TRACE_FLAGS(tracee_p) and ERTS_TRACER_PROC(tracee_p)
** according to input disable/enable flags and tracer.
**
** Returns am_true|am_false on success, am_true if value changed,
@@ -173,17 +175,20 @@ set_match_trace(Process *tracee_p, Eterm fail_term, Eterm tracer,
tracer, ERTS_PROC_LOCKS_ALL))) {
if (tracee_p != tracer_p) {
ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- tracer_p->trace_flags |= tracee_p->trace_flags ? F_TRACER : 0;
+ ERTS_TRACE_FLAGS(tracer_p) |= (ERTS_TRACE_FLAGS(tracee_p)
+ ? F_TRACER
+ : 0);
erts_smp_proc_unlock(tracer_p, ERTS_PROC_LOCKS_ALL);
}
} else if (is_internal_port(tracer)) {
Port *tracer_port =
- erts_id2port(tracer, tracee_p, ERTS_PROC_LOCKS_ALL);
+ erts_id2port_sflgs(tracer,
+ tracee_p,
+ ERTS_PROC_LOCKS_ALL,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
if (tracer_port) {
- if (! INVALID_TRACER_PORT(tracer_port, tracer)) {
- ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
- }
- erts_smp_port_unlock(tracer_port);
+ ret = set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
+ erts_port_release(tracer_port);
}
} else {
ASSERT(is_nil(tracer));
@@ -2174,7 +2179,7 @@ restart:
pc += n;
break;
case matchSelf:
- *esp++ = c_p->id;
+ *esp++ = c_p->common.id;
break;
case matchWaste:
--esp;
@@ -2261,7 +2266,7 @@ restart:
case matchEnableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, c_p->tracer_proc, 0, n);
+ set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), 0, n);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2274,7 +2279,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, c_p->tracer_proc, 0, n);
+ set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), 0, n);
esp[-1] = am_true;
}
}
@@ -2282,7 +2287,7 @@ restart:
case matchDisableTrace:
if ( (n = erts_trace_flag2bit(esp[-1]))) {
BEGIN_ATOMIC_TRACE(c_p);
- set_tracee_flags(c_p, c_p->tracer_proc, n, 0);
+ set_tracee_flags(c_p, ERTS_TRACER_PROC(c_p), n, 0);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
@@ -2295,7 +2300,7 @@ restart:
BEGIN_ATOMIC_TRACE(c_p);
if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
/* Always take over the tracer of the current process */
- set_tracee_flags(tmpp, c_p->tracer_proc, n, 0);
+ set_tracee_flags(tmpp, ERTS_TRACER_PROC(c_p), n, 0);
esp[-1] = am_true;
}
}
@@ -2316,12 +2321,12 @@ restart:
--esp;
if (*esp == am_true) {
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags |= F_TRACE_SILENT;
+ ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
else if (*esp == am_false) {
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- c_p->trace_flags &= ~F_TRACE_SILENT;
+ ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
break;
@@ -2329,11 +2334,11 @@ restart:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = c_p->tracer_proc;
+ Eterm tracer = ERTS_TRACER_PROC(c_p);
/* XXX Atomicity note: Not fully atomic. Default tracer
* is sampled from current process but applied to
* tracee and tracer later after releasing main
- * locks on current process, so c_p->tracer_proc
+ * locks on current process, so ERTS_TRACER_PROC(c_p)
* may actually have changed when tracee and tracer
* gets updated. I do not think nobody will notice.
* It is just the default value that is not fully atomic.
@@ -2358,7 +2363,7 @@ restart:
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
- Eterm tracer = c_p->tracer_proc;
+ Eterm tracer = ERTS_TRACER_PROC(c_p);
/* XXX Atomicity note. Not fully atomic. See above.
* Above it could possibly be solved, but not here.
*/
@@ -2480,7 +2485,7 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei)
vnum = tmp->variable;
}
if (vnum >= 0)
- sprintf(buff,tmp->error_string, vnum);
+ erts_snprintf(buff,sizeof(buff)+20,tmp->error_string, vnum);
else
strcpy(buff,tmp->error_string);
sl = strlen(buff);
@@ -4485,7 +4490,9 @@ static DMCRet dmc_fun(DMCContext *context,
if (context->err_info != NULL) {
/* Ugly, should define a better RETURN_TERM_ERROR interface... */
char buff[100];
- sprintf(buff, "Function %%T/%d does_not_exist.", (int)a - 1);
+ erts_snprintf(buff, sizeof(buff),
+ "Function %%T/%d does_not_exist.",
+ (int)a - 1);
RETURN_TERM_ERROR(buff, p[1], context, *constant);
} else {
return retFail;
@@ -4500,7 +4507,7 @@ static DMCRet dmc_fun(DMCContext *context,
if (context->err_info != NULL) {
/* Ugly, should define a better RETURN_TERM_ERROR interface... */
char buff[100];
- sprintf(buff,
+ erts_snprintf(buff, sizeof(buff),
"Function %%T/%d cannot be called in this context.",
(int)a - 1);
RETURN_TERM_ERROR(buff, p[1], context, *constant);
@@ -4764,7 +4771,7 @@ static int match_compact(ErlHeapFragment *expr, DMCErrInfo *err_info)
for (j = 0; j < x && DMC_PEEK(heap,j) != n; ++j)
;
ASSERT(j < x);
- sprintf(buff+1,"%u", (unsigned) j);
+ erts_snprintf(buff+1, sizeof(buff) - 1, "%u", (unsigned) j);
/* Yes, writing directly into terms, they ARE off heap */
*p = am_atom_put(buff, strlen(buff));
}
@@ -5002,7 +5009,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
static Eterm seq_trace_fake(Process *p, Eterm arg1)
{
Eterm result = erl_seq_trace_info(p, arg1);
- if (is_tuple(result) && *tuple_val(result) == 2) {
+ if (!is_non_value(result) && is_tuple(result) && *tuple_val(result) == 2) {
return (tuple_val(result))[2];
}
return result;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 6a96e174e1..d8f6e40d2e 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -209,7 +209,7 @@ typedef struct db_fixation {
*/
typedef struct db_table_common {
- erts_refc_t ref; /* fixation counter and delete counter */
+ erts_refc_t ref; /* fixation counter */
#ifdef ERTS_SMP
erts_smp_rwmtx_t rwlock; /* rw lock on table */
erts_smp_mtx_t fixlock; /* Protects fixations,megasec,sec,microsec */
@@ -219,7 +219,7 @@ typedef struct db_table_common {
Eterm owner; /* Pid of the creator */
Eterm heir; /* Pid of the heir */
UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
- SysTimeval heir_started; /* To further identify the heir */
+ Uint64 heir_started_interval; /* To further identify the heir */
Eterm the_name; /* an atom */
Eterm id; /* atom | integer */
DbTableMethod* meth; /* table methods */
@@ -320,10 +320,10 @@ ERTS_GLB_INLINE int db_eq(DbTableCommon* tb, Eterm a, DbTerm* b)
#define DB_INFO (DB_PROTECTED|DB_PUBLIC|DB_PRIVATE)
#define ONLY_WRITER(P,T) (((T)->common.status & (DB_PRIVATE|DB_PROTECTED)) \
- && (T)->common.owner == (P)->id)
+ && (T)->common.owner == (P)->common.id)
#define ONLY_READER(P,T) (((T)->common.status & DB_PRIVATE) && \
-(T)->common.owner == (P)->id)
+(T)->common.owner == (P)->common.id)
/* Function prototypes */
BIF_RETTYPE db_get_trace_control_word(Process* p);
diff --git a/erts/emulator/beam/erl_debug.c b/erts/emulator/beam/erl_debug.c
index 22e873afc6..b90d00f236 100644
--- a/erts/emulator/beam/erl_debug.c
+++ b/erts/emulator/beam/erl_debug.c
@@ -252,16 +252,16 @@ void erts_check_stack(Process *p)
if (p->stop > stack_start)
erl_exit(1,
"<%lu.%lu.%lu>: Stack underflow\n",
- internal_pid_channel_no(p->id),
- internal_pid_number(p->id),
- internal_pid_serial(p->id));
+ internal_pid_channel_no(p->common.id),
+ internal_pid_number(p->common.id),
+ internal_pid_serial(p->common.id));
if (p->stop < stack_end)
erl_exit(1,
"<%lu.%lu.%lu>: Stack overflow\n",
- internal_pid_channel_no(p->id),
- internal_pid_number(p->id),
- internal_pid_serial(p->id));
+ internal_pid_channel_no(p->common.id),
+ internal_pid_number(p->common.id),
+ internal_pid_serial(p->common.id));
for (elemp = p->stop; elemp < stack_start; elemp++) {
int in_mbuf = 0;
@@ -284,9 +284,9 @@ void erts_check_stack(Process *p)
erl_exit(1,
"<%lu.%lu.%lu>: Wild stack pointer\n",
- internal_pid_channel_no(p->id),
- internal_pid_number(p->id),
- internal_pid_serial(p->id));
+ internal_pid_channel_no(p->common.id),
+ internal_pid_number(p->common.id),
+ internal_pid_serial(p->common.id));
}
}
@@ -387,16 +387,16 @@ void verify_process(Process *p)
#define VERIFY_AREA(name,ptr,sz) { \
int n = (sz); \
while (n--) if(!verify_eterm(p,*(ptr+n))) \
- erl_exit(1,"Wild pointer found in " name " of %T!\n",p->id); }
+ erl_exit(1,"Wild pointer found in " name " of %T!\n",p->common.id); }
#define VERIFY_ETERM(name,eterm) { \
if(!verify_eterm(p,eterm)) \
- erl_exit(1,"Wild pointer found in " name " of %T!\n",p->id); }
+ erl_exit(1,"Wild pointer found in " name " of %T!\n",p->common.id); }
ErlMessage* mp = p->msg.first;
- VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->id));
+ VERBOSE(DEBUG_MEMORY,("Verify process: %T...\n",p->common.id));
while (mp != NULL) {
VERIFY_ETERM("message term",ERL_MESSAGE_TERM(mp));
@@ -516,7 +516,7 @@ static void print_process_memory(Process *p)
ErlHeapFragment* bp = MBUF(p);
erts_printf("==============================\n");
- erts_printf("|| Memory info for %T ||\n",p->id);
+ erts_printf("|| Memory info for %T ||\n",p->common.id);
erts_printf("==============================\n");
erts_printf("-- %-*s ---%s-%s-%s-%s--\n",
@@ -601,7 +601,7 @@ void print_memory_info(Process *p)
{
if (p != NULL) {
erts_printf("======================================\n");
- erts_printf("|| Memory info for %-12T ||\n",p->id);
+ erts_printf("|| Memory info for %-12T ||\n",p->common.id);
erts_printf("======================================\n");
erts_printf("+- local heap ----%s-%s-%s-%s-+\n",
dashes,dashes,dashes,dashes);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 1ae9a211d7..046b46513f 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -87,10 +87,7 @@
#include <stdlib.h>
#include <string.h> /* ssize_t on Mac OS X */
-#if defined(VXWORKS)
-# include <ioLib.h>
-typedef struct iovec SysIOVec;
-#elif defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)
+#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)
#ifndef STATIC_ERLANG_DRIVER
/* Windows dynamic drivers, everything is different... */
#define ERL_DRIVER_TYPES_ONLY
@@ -136,7 +133,7 @@ typedef struct {
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
#define ERL_DRV_EXTENDED_MAJOR_VERSION 2
-#define ERL_DRV_EXTENDED_MINOR_VERSION 0
+#define ERL_DRV_EXTENDED_MINOR_VERSION 1
/*
* The emulator will refuse to load a driver with different major
@@ -157,6 +154,7 @@ typedef struct {
#define ERL_DRV_FLAG_USE_PORT_LOCKING (1 << 0)
#define ERL_DRV_FLAG_SOFT_BUSY (1 << 1)
+#define ERL_DRV_FLAG_NO_BUSY_MSGQ (1 << 2)
/*
* Integer types
@@ -210,8 +208,8 @@ typedef struct erl_drv_binary {
typedef struct _erl_drv_data* ErlDrvData; /* Data to be used by the driver itself. */
#ifndef ERL_SYS_DRV
typedef struct _erl_drv_event* ErlDrvEvent; /* An event to be selected on. */
-typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */
#endif
+typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */
typedef struct _erl_drv_port* ErlDrvThreadData; /* Thread data. */
#if !defined(__WIN32__) && !defined(_WIN32) && !defined(_WIN32_) && !defined(USE_SELECT)
@@ -370,11 +368,7 @@ typedef struct erl_drv_entry {
/* For windows dynamic drivers */
#ifndef ERL_DRIVER_TYPES_ONLY
-#if defined(VXWORKS)
-# define DRIVER_INIT(DRIVER_NAME) \
- ErlDrvEntry* DRIVER_NAME ## _init(void); \
- ErlDrvEntry* DRIVER_NAME ## _init(void)
-#elif defined(__WIN32__)
+#if defined(__WIN32__)
# define DRIVER_INIT(DRIVER_NAME) \
__declspec(dllexport) ErlDrvEntry* driver_init(void); \
__declspec(dllexport) ErlDrvEntry* driver_init(void)
@@ -384,9 +378,18 @@ typedef struct erl_drv_entry {
ErlDrvEntry* driver_init(void)
#endif
+#define ERL_DRV_BUSY_MSGQ_DISABLED (~((ErlDrvSizeT) 0))
+#define ERL_DRV_BUSY_MSGQ_READ_ONLY ((ErlDrvSizeT) 0)
+#define ERL_DRV_BUSY_MSGQ_LIM_MAX (ERL_DRV_BUSY_MSGQ_DISABLED - 1)
+#define ERL_DRV_BUSY_MSGQ_LIM_MIN ((ErlDrvSizeT) 1)
+
/*
* These are the functions available for driver writers.
*/
+EXTERN void erl_drv_busy_msgq_limits(ErlDrvPort port,
+ ErlDrvSizeT *low,
+ ErlDrvSizeT *high);
+
EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on);
EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event,
ErlDrvEventData event_data);
@@ -601,11 +604,33 @@ EXTERN ErlDrvPort driver_create_port(ErlDrvPort creator_port,
ErlDrvData drv_data);
+/*
+ * driver_output_term() is deprecated, and scheduled for removal in
+ * OTP-R17. Use erl_drv_output_term() instead. For more information
+ * see the erl_driver(3) documentation.
+ */
+EXTERN int driver_output_term(ErlDrvPort ix,
+ ErlDrvTermData* data,
+ int len) ERL_DRV_DEPRECATED_FUNC;
+/*
+ * driver_send_term() is deprecated, and scheduled for removal in
+ * OTP-R17. Use erl_drv_send_term() instead. For more information
+ * see the erl_driver(3) documentation.
+ */
+EXTERN int driver_send_term(ErlDrvPort ix,
+ ErlDrvTermData to,
+ ErlDrvTermData* data,
+ int len) ERL_DRV_DEPRECATED_FUNC;
+
/* output term data to the port owner */
-EXTERN int driver_output_term(ErlDrvPort ix, ErlDrvTermData* data, int len);
+EXTERN int erl_drv_output_term(ErlDrvTermData port,
+ ErlDrvTermData* data,
+ int len);
/* output term data to a specific process */
-EXTERN int driver_send_term(ErlDrvPort ix, ErlDrvTermData to,
- ErlDrvTermData* data, int len);
+EXTERN int erl_drv_send_term(ErlDrvTermData port,
+ ErlDrvTermData to,
+ ErlDrvTermData* data,
+ int len);
/* Async IO functions */
EXTERN long driver_async(ErlDrvPort ix,
diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h
index 5c66a0bf73..b673ef6b3c 100644
--- a/erts/emulator/beam/erl_fun.h
+++ b/erts/emulator/beam/erl_fun.h
@@ -77,8 +77,6 @@ ErlFunEntry* erts_get_fun_entry(Eterm mod, int uniq, int index);
ErlFunEntry* erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index,
byte* uniq, int index, int arity);
-ErlFunEntry* erts_get_fun_entry2(Eterm mod, int old_uniq, int old_index,
- byte* uniq, int index, int arity);
void erts_erase_fun_entry(ErlFunEntry* fe);
void erts_cleanup_funs(ErlFunThing* funp);
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 52a6e52e6c..a33085315a 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -59,7 +59,7 @@ static Uint reclaimed; /* no of words reclaimed in GCs */
erts_fprintf(stderr, "htop=%p\n", (p)->htop); \
erts_fprintf(stderr, "heap=%p\n", (p)->heap); \
erl_exit(ERTS_ABORT_EXIT, "%s, line %d: %T: Overrun stack and heap\n", \
- __FILE__,__LINE__,(P)->id); \
+ __FILE__,__LINE__,(P)->common.id); \
}
#ifdef DEBUG
@@ -129,7 +129,7 @@ static void disallow_heap_frag_ref(Process* p, Eterm* n_htop, Eterm* objv, int n
#if defined(ARCH_64) && !HALFWORD_HEAP
# define MAX_HEAP_SIZES 154
#else
-# define MAX_HEAP_SIZES 55
+# define MAX_HEAP_SIZES 59
#endif
static Sint heap_sizes[MAX_HEAP_SIZES]; /* Suitable heap sizes. */
@@ -144,6 +144,7 @@ void
erts_init_gc(void)
{
int i = 0;
+ Sint max_heap_size = 0;
ASSERT(offsetof(ProcBin,thing_word) == offsetof(struct erl_off_heap_header,thing_word));
ASSERT(offsetof(ProcBin,thing_word) == offsetof(ErlFunThing,thing_word));
@@ -168,16 +169,30 @@ erts_init_gc(void)
* we really don't want that growth when the heaps are that big.
*/
- heap_sizes[0] = 34;
- heap_sizes[1] = 55;
- for (i = 2; i < 23; i++) {
- heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-2];
+ /* Growth stage 1 - Fibonacci + 1*/
+ /* 12,38 will hit size 233, the old default */
+
+ heap_sizes[0] = 12;
+ heap_sizes[1] = 38;
+
+ for(i = 2; i < 23; i++) {
+ /* one extra word for block header */
+ heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-2] + 1;
}
+
+ /* for 32 bit we want max_heap_size to be MAX(32bit) / 4 [words] (and halfword)
+ * for 64 bit we want max_heap_size to be MAX(52bit) / 8 [words]
+ */
+
+ max_heap_size = sizeof(Eterm) < 8 ? (Sint)((~(Uint)0)/(sizeof(Eterm))) :
+ (Sint)(((Uint64)1 << 53)/sizeof(Eterm));
+
+ /* Growth stage 2 - 20% growth */
/* At 1.3 mega words heap, we start to slow down. */
for (i = 23; i < ALENGTH(heap_sizes); i++) {
- heap_sizes[i] = 5*(heap_sizes[i-1]/4);
- if (heap_sizes[i] < 0) {
+ heap_sizes[i] = heap_sizes[i-1] + heap_sizes[i-1]/5;
+ if ((heap_sizes[i] < 0) || heap_sizes[i] > max_heap_size) {
/* Size turned negative. Discard this last size. */
i--;
break;
@@ -336,6 +351,19 @@ erts_gc_after_bif_call(Process* p, Eterm result, Eterm* regs, Uint arity)
return result;
}
+static ERTS_INLINE void reset_active_writer(Process *p)
+{
+ struct erl_off_heap_header* ptr;
+ ptr = MSO(p).first;
+ while (ptr) {
+ if (ptr->thing_word == HEADER_PROC_BIN) {
+ ProcBin *pbp = (ProcBin*) ptr;
+ pbp->flags &= ~PB_ACTIVE_WRITER;
+ }
+ ptr = ptr->next;
+ }
+}
+
/*
* Garbage collect a process.
*
@@ -357,11 +385,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
trace_gc(p, am_gc_start);
}
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- p->gcstatus = p->status;
- p->status = P_GARBING;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
-
+ erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
if (erts_system_monitor_long_gc != 0) {
get_now(&ms1, &s1, &us1);
}
@@ -395,6 +419,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
}
}
+ reset_active_writer(p);
/*
* Finish.
@@ -404,9 +429,8 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
ErtsGcQuickSanityCheck(p);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- p->status = p->gcstatus;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_end);
}
@@ -490,10 +514,7 @@ erts_garbage_collect_hibernate(Process* p)
/*
* Preliminaries.
*/
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- p->gcstatus = p->status;
- p->status = P_GARBING;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
ErtsGcQuickSanityCheck(p);
ASSERT(p->mbuf_sz == 0);
ASSERT(p->mbuf == 0);
@@ -604,9 +625,7 @@ erts_garbage_collect_hibernate(Process* p)
ErtsGcQuickSanityCheck(p);
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- p->status = p->gcstatus;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
}
@@ -630,10 +649,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
/*
* Set GC state.
*/
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- p->gcstatus = p->status;
- p->status = P_GARBING;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC);
/*
* We assume that the caller has already done a major collection
@@ -775,9 +791,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
/*
* Restore status.
*/
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- p->status = p->gcstatus;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
}
static int
@@ -869,14 +883,12 @@ minor_collection(Process* p, int need, Eterm* objv, int nobj, Uint *recl)
}
}
- if (wanted < MIN_HEAP_SIZE(p)) {
- wanted = MIN_HEAP_SIZE(p);
- } else {
- wanted = next_heap_size(p, wanted, 0);
- }
+ wanted = wanted < MIN_HEAP_SIZE(p) ? MIN_HEAP_SIZE(p)
+ : next_heap_size(p, wanted, 0);
if (wanted < HEAP_SIZE(p)) {
shrink_new_heap(p, wanted, objv, nobj);
}
+
ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
return 1; /* We are done. */
}
@@ -1435,11 +1447,10 @@ adjust_after_fullsweep(Process *p, Uint size_before, int need, Eterm *objv, int
I think this is better as fullsweep is used mainly on
small memory systems, but I could be wrong... */
wanted = 2 * need_after;
- if (wanted < p->min_heap_size) {
- sz = p->min_heap_size;
- } else {
- sz = next_heap_size(p, wanted, 0);
- }
+
+ sz = wanted < p->min_heap_size ? p->min_heap_size
+ : next_heap_size(p, wanted, 0);
+
if (sz < HEAP_SIZE(p)) {
shrink_new_heap(p, sz, objv, nobj);
}
@@ -1947,9 +1958,9 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
n++;
}
#endif
- ASSERT(is_nil(p->tracer_proc) ||
- is_internal_pid(p->tracer_proc) ||
- is_internal_port(p->tracer_proc));
+ ASSERT(is_nil(ERTS_TRACER_PROC(p)) ||
+ is_internal_pid(ERTS_TRACER_PROC(p)) ||
+ is_internal_port(ERTS_TRACER_PROC(p)));
ASSERT(is_pid(follow_moved(p->group_leader)));
if (is_not_immed(p->group_leader)) {
@@ -2181,7 +2192,6 @@ link_live_proc_bin(struct shrink_cand_data *shrink,
if (pbp->flags & PB_ACTIVE_WRITER) {
- pbp->flags &= ~PB_ACTIVE_WRITER;
shrink->no_of_active++;
}
else { /* inactive */
diff --git a/erts/emulator/beam/erl_goodfit_alloc.c b/erts/emulator/beam/erl_goodfit_alloc.c
index e7d4ac2b67..f98d377fd6 100644
--- a/erts/emulator/beam/erl_goodfit_alloc.c
+++ b/erts/emulator/beam/erl_goodfit_alloc.c
@@ -205,7 +205,6 @@ erts_gfalc_start(GFAllctr_t *gfallctr,
init->sbmbct = 0; /* Small mbc not yet supported by goodfit */
- allctr->mbc_header_size = sizeof(Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
allctr->min_block_size = sizeof(GFFreeBlock_t);
@@ -363,7 +362,7 @@ search_bucket(Allctr_t *allctr, int ix, Uint size)
blk && i < max_blk_search;
blk = blk->next, i++) {
- blk_sz = BLK_SZ(blk);
+ blk_sz = MBC_FBLK_SZ(&blk->block_head);
blk_on_lambc = (((char *) blk) < gfallctr->last_aux_mbc_end
&& gfallctr->last_aux_mbc_start <= ((char *) blk));
@@ -402,7 +401,7 @@ get_free_block(Allctr_t *allctr, Uint size,
if (min_bi == unsafe_bi) {
blk = search_bucket(allctr, min_bi, size);
if (blk) {
- if (cand_blk && cand_size <= BLK_SZ(blk))
+ if (cand_blk && cand_size <= MBC_FBLK_SZ(blk))
return NULL; /* cand_blk was better */
unlink_free_block(allctr, blk, flags);
return blk;
@@ -422,7 +421,7 @@ get_free_block(Allctr_t *allctr, Uint size,
/* We are guaranteed to find a block that fits in this bucket */
blk = search_bucket(allctr, min_bi, size);
ASSERT(blk);
- if (cand_blk && cand_size <= BLK_SZ(blk))
+ if (cand_blk && cand_size <= MBC_FBLK_SZ(blk))
return NULL; /* cand_blk was better */
unlink_free_block(allctr, blk, flags);
return blk;
@@ -435,7 +434,7 @@ link_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
- Uint sz = BLK_SZ(blk);
+ Uint sz = MBC_FBLK_SZ(&blk->block_head);
int i = BKT_IX(gfallctr, sz);
ASSERT(sz >= MIN_BLK_SZ);
@@ -456,7 +455,7 @@ unlink_free_block(Allctr_t *allctr, Block_t *block, Uint32 flags)
{
GFAllctr_t *gfallctr = (GFAllctr_t *) allctr;
GFFreeBlock_t *blk = (GFFreeBlock_t *) block;
- Uint sz = BLK_SZ(blk);
+ Uint sz = MBC_FBLK_SZ(&blk->block_head);
int i = BKT_IX(gfallctr, sz);
if (!blk->prev) {
@@ -618,7 +617,7 @@ check_block(Allctr_t *allctr, Block_t * blk, int free_block)
GFFreeBlock_t *fblk;
if(free_block) {
- Uint blk_sz = BLK_SZ(blk);
+ Uint blk_sz = is_sbc_blk(blk) ? SBC_BLK_SZ(blk) : MBC_BLK_SZ(blk);
bi = BKT_IX(gfallctr, blk_sz);
ASSERT(gfallctr->bucket_mask.main & (((UWord) 1) << IX2SMIX(bi)));
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 1eb3dba240..8cdf954dd2 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -44,6 +44,7 @@
#include "erl_thr_progress.h"
#include "erl_thr_queue.h"
#include "erl_async.h"
+#include "erl_ptab.h"
#ifdef HIPE
#include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */
@@ -55,6 +56,66 @@
#endif
/*
+ * The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands
+ * only. Do not remove even though they aren't used elsewhere in the emulator!
+ */
+#ifdef ERTS_SMP
+const int etp_smp_compiled = 1;
+#else
+const int etp_smp_compiled = 0;
+#endif
+#ifdef USE_THREADS
+const int etp_thread_compiled = 1;
+#else
+const int etp_thread_compiled = 0;
+#endif
+const char etp_erts_version[] = ERLANG_VERSION;
+const char etp_otp_release[] = ERLANG_OTP_RELEASE;
+const char etp_compile_date[] = ERLANG_COMPILE_DATE;
+const char etp_arch[] = ERLANG_ARCHITECTURE;
+#ifdef ERTS_ENABLE_KERNEL_POLL
+const int etp_kernel_poll_support = 1;
+#else
+const int etp_kernel_poll_support = 0;
+#endif
+#if defined(ARCH_64)
+const int etp_arch_bits = 64;
+#elif defined(ARCH_32)
+const int etp_arch_bits = 32;
+#else
+# error "Not 64-bit, nor 32-bit arch"
+#endif
+#if HALFWORD_HEAP
+const int etp_halfword = 1;
+#else
+const int etp_halfword = 0;
+#endif
+#ifdef HIPE
+const int etp_hipe = 1;
+#else
+const int etp_hipe = 0;
+#endif
+#ifdef DEBUG
+const int etp_debug_compiled = 1;
+#else
+const int etp_debug_compiled = 0;
+#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+const int etp_lock_count = 1;
+#else
+const int etp_lock_count = 0;
+#endif
+#ifdef ERTS_ENABLE_LOCK_CHECK
+const int etp_lock_check = 1;
+#else
+const int etp_lock_check = 0;
+#endif
+#ifdef WORDS_BIGENDIAN
+const int etp_big_endian = 1;
+#else
+const int etp_big_endian = 0;
+#endif
+/*
* Note about VxWorks: All variables must be initialized by executable code,
* not by an initializer. Otherwise a new instance of the emulator will
* inherit previous values.
@@ -66,9 +127,10 @@ extern void ConNormalExit(void);
extern void ConWaitForExit(void);
#endif
-static void erl_init(int ncpu);
-
-#define ERTS_MIN_COMPAT_REL 7
+static void erl_init(int ncpu,
+ int proc_tab_sz,
+ int port_tab_sz,
+ int port_tab_sz_ignore_files);
static erts_atomic_t exiting;
@@ -151,8 +213,6 @@ ErtsModifiedTimings erts_modified_timings[] = {
Export *erts_delay_trap = NULL;
-int erts_use_r9_pids_ports;
-
int ignore_break;
int replace_intr;
@@ -216,12 +276,18 @@ void
erts_short_init(void)
{
int ncpu = early_init(NULL, NULL);
- erl_init(ncpu);
+ erl_init(ncpu,
+ ERTS_DEFAULT_MAX_PROCESSES,
+ ERTS_DEFAULT_MAX_PORTS,
+ 0);
erts_initialized = 1;
}
static void
-erl_init(int ncpu)
+erl_init(int ncpu,
+ int proc_tab_sz,
+ int port_tab_sz,
+ int port_tab_sz_ignore_files)
{
init_benchmarking();
@@ -229,7 +295,7 @@ erl_init(int ncpu)
erts_init_gc();
erts_init_time();
erts_init_sys_common_misc();
- erts_init_process(ncpu);
+ erts_init_process(ncpu, proc_tab_sz);
erts_init_scheduling(no_schedulers,
no_schedulers_online);
erts_init_cpu_topology(); /* Must be after init_scheduling */
@@ -241,6 +307,7 @@ erl_init(int ncpu)
erts_init_trace();
erts_init_binary();
erts_init_bits();
+ erts_code_ix_init();
erts_init_fun_table();
init_atom_table();
init_export_table();
@@ -250,6 +317,7 @@ erl_init(int ncpu)
erts_bif_info_init();
erts_ddll_init();
init_emulator();
+ erts_ptab_init(); /* Must be after init_emulator() */
erts_bp_init();
init_db(); /* Must be after init_emulator */
erts_bif_timer_init();
@@ -257,7 +325,7 @@ erl_init(int ncpu)
init_dist();
erl_drv_thr_init();
erts_init_async();
- init_io();
+ erts_init_io(port_tab_sz, port_tab_sz_ignore_files);
init_load();
erts_init_bif();
erts_init_bif_chksum();
@@ -289,7 +357,8 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char**
Eterm env;
start_mod = am_atom_put(modname, sys_strlen(modname));
- if (erts_find_function(start_mod, am_start, 2) == NULL) {
+ if (erts_find_function(start_mod, am_start, 2,
+ erts_active_code_ix()) == NULL) {
erl_exit(5, "No function %s:start/2\n", modname);
}
@@ -388,7 +457,7 @@ load_preloaded(void)
if ((code = sys_preload_begin(&preload_p[i])) == 0)
erl_exit(1, "Failed to find preloaded code for module %s\n",
name);
- res = erts_load_module(NULL, 0, NIL, &module_name, code, length);
+ res = erts_preload_module(NULL, 0, NIL, &module_name, code, length);
sys_preload_end(&preload_p[i]);
if (res != NIL)
erl_exit(1,"Failed loading preloaded module %s (%T)\n",
@@ -400,6 +469,7 @@ load_preloaded(void)
/* be helpful (or maybe downright rude:-) */
void erts_usage(void)
{
+ int this_rel = this_rel_num();
erts_fprintf(stderr, "Usage: %s [flags] [ -- [init_args] ]\n", progname(program));
erts_fprintf(stderr, "The flags are:\n\n");
@@ -433,16 +503,20 @@ void erts_usage(void)
/* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */
erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n");
-
+ erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n");
+ erts_fprintf(stderr, " Note that this flag is deprecated!\n");
erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n");
erts_fprintf(stderr, " see the erts_alloc(3) documentation for more info.\n");
erts_fprintf(stderr, "-P number set maximum number of processes on this node,\n");
erts_fprintf(stderr, " valid range is [%d-%d]\n",
- ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES);
+ ERTS_MIN_PROCESSES, ERTS_MAX_PROCESSES);
+ erts_fprintf(stderr, "-Q number set maximum number of ports on this node,\n");
+ erts_fprintf(stderr, " valid range is [%d-%d]\n",
+ ERTS_MIN_PORTS, ERTS_MAX_PORTS);
erts_fprintf(stderr, "-R number set compatibility release number,\n");
erts_fprintf(stderr, " valid range [%d-%d]\n",
- ERTS_MIN_COMPAT_REL, this_rel_num());
+ this_rel-2, this_rel);
erts_fprintf(stderr, "-r force ets memory block to be moved on realloc\n");
erts_fprintf(stderr, "-rg amount set reader groups limit\n");
@@ -462,6 +536,7 @@ void erts_usage(void)
erts_fprintf(stderr, " valid range is [%d-%d]\n",
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
ERTS_SCHED_THREAD_MAX_STACK_SIZE);
+ erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
erts_fprintf(stderr, " schedulers online (n2), valid range for both\n");
erts_fprintf(stderr, " numbers are [1-%d]\n",
@@ -583,8 +658,6 @@ early_init(int *argc, char **argv) /*
erts_compat_rel = this_rel_num();
- erts_use_r9_pids_ports = 0;
-
erts_sys_pre_init();
erts_atomic_init_nob(&exiting, 0);
#ifdef ERTS_SMP
@@ -839,11 +912,13 @@ erl_start(int argc, char **argv)
{
int i = 1;
char* arg=NULL;
- char* Parg = NULL;
int have_break_handler = 1;
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
int ncpu = early_init(&argc, argv);
+ int proc_tab_sz = ERTS_DEFAULT_MAX_PROCESSES;
+ int port_tab_sz = ERTS_DEFAULT_MAX_PORTS;
+ int port_tab_sz_ignore_files = 0;
envbufsz = sizeof(envbuf);
if (erts_sys_getenv_raw(ERL_MAX_ETS_TABLES_ENV, envbuf, &envbufsz) == 0)
@@ -914,6 +989,7 @@ erl_start(int argc, char **argv)
break;
case 'a':
erts_set_user_requested_filename_encoding(ERL_FILENAME_UNKNOWN);
+ break;
default:
erts_fprintf(stderr, "bad filename encoding %s, can be (l,u or a)\n", arg);
erts_usage();
@@ -1094,12 +1170,53 @@ erl_start(int argc, char **argv)
arg);
break;
- case 'P':
- /* set maximum number of processes */
- Parg = get_arg(argv[i]+2, argv[i+1], &i);
- erts_max_processes = atoi(Parg);
- /* Check of result is delayed until later. This is because +R
- may be given after +P. */
+ case 'n':
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ switch (arg[0]) {
+ case 's': /* synchronous */
+ erts_port_synchronous_ops = 1;
+ erts_port_schedule_all_ops = 0;
+ break;
+ case 'a': /* asynchronous */
+ erts_port_synchronous_ops = 0;
+ erts_port_schedule_all_ops = 1;
+ break;
+ case 'd': /* Default - schedule on conflict (asynchronous) */
+ erts_port_synchronous_ops = 0;
+ erts_port_schedule_all_ops = 0;
+ break;
+ default:
+ bad_n_option:
+ erts_fprintf(stderr, "bad -n option %s\n", arg);
+ erts_usage();
+ }
+ if (arg[1] != '\0')
+ goto bad_n_option;
+ break;
+
+ case 'P': /* set maximum number of processes */
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ errno = 0;
+ proc_tab_sz = strtol(arg, NULL, 10);
+ if (errno != 0
+ || proc_tab_sz < ERTS_MIN_PROCESSES
+ || ERTS_MAX_PROCESSES < proc_tab_sz) {
+ erts_fprintf(stderr, "bad number of processes %s\n", arg);
+ erts_usage();
+ }
+ break;
+
+ case 'Q': /* set maximum number of ports */
+ arg = get_arg(argv[i]+2, argv[i+1], &i);
+ errno = 0;
+ port_tab_sz = strtol(arg, NULL, 10);
+ if (errno != 0
+ || port_tab_sz < ERTS_MIN_PROCESSES
+ || ERTS_MAX_PROCESSES < port_tab_sz) {
+ erts_fprintf(stderr, "bad number of ports %s\n", arg);
+ erts_usage();
+ }
+ port_tab_sz_ignore_files = 1;
break;
case 'S' : /* Was handled in early_init() just read past it */
@@ -1201,6 +1318,19 @@ erl_start(int argc, char **argv)
erts_usage();
}
}
+ else if (has_prefix("pp", sub_param)) {
+ arg = get_arg(sub_param+2, argv[i+1], &i);
+ if (sys_strcmp(arg, "true") == 0)
+ erts_port_parallelism = 1;
+ else if (sys_strcmp(arg, "false") == 0)
+ erts_port_parallelism = 0;
+ else {
+ erts_fprintf(stderr,
+ "bad port parallelism scheduling hint %s\n",
+ arg);
+ erts_usage();
+ }
+ }
else if (sys_strcmp("nsp", sub_param) == 0)
erts_use_sender_punish = 0;
else if (sys_strcmp("wt", sub_param) == 0) {
@@ -1282,22 +1412,19 @@ erl_start(int argc, char **argv)
case 'R': {
/* set compatibility release */
+ int this_rel;
arg = get_arg(argv[i]+2, argv[i+1], &i);
erts_compat_rel = atoi(arg);
- if (erts_compat_rel < ERTS_MIN_COMPAT_REL
- || erts_compat_rel > this_rel_num()) {
+ this_rel = this_rel_num();
+ if (erts_compat_rel < this_rel - 2 || this_rel < erts_compat_rel) {
erts_fprintf(stderr, "bad compatibility release number %s\n", arg);
erts_usage();
}
- ASSERT(ERTS_MIN_COMPAT_REL >= 7);
switch (erts_compat_rel) {
- case 7:
- case 8:
- case 9:
- erts_use_r9_pids_ports = 1;
+ /* Currently no compat features... */
default:
break;
}
@@ -1339,8 +1466,6 @@ erl_start(int argc, char **argv)
}
break;
}
- case 'n': /* XXX obsolete */
- break;
case 'c':
if (argv[i][2] == 0) { /* -c: documented option */
erts_disable_tolerant_timeofday = 1;
@@ -1395,15 +1520,6 @@ erl_start(int argc, char **argv)
i++;
}
- /* Delayed check of +P flag */
- if (erts_max_processes < ERTS_MIN_PROCESSES
- || erts_max_processes > ERTS_MAX_PROCESSES
- || (erts_use_r9_pids_ports
- && erts_max_processes > ERTS_MAX_R9_PROCESSES)) {
- erts_fprintf(stderr, "bad number of processes %s\n", Parg);
- erts_usage();
- }
-
/* Restart will not reinstall the break handler */
#ifdef __WIN32__
if (ignore_break)
@@ -1424,9 +1540,14 @@ erl_start(int argc, char **argv)
boot_argc = argc - i; /* Number of arguments to init */
boot_argv = &argv[i];
- erl_init(ncpu);
+ erl_init(ncpu,
+ proc_tab_sz,
+ port_tab_sz,
+ port_tab_sz_ignore_files);
load_preloaded();
+ erts_end_staging_code_ix();
+ erts_commit_staging_code_ix();
erts_initialized = 1;
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 1f388c1796..69bb4be717 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -82,8 +82,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#ifdef ERTS_SMP
{ "bif_timers", NULL },
{ "reg_tab", NULL },
- { "migration_info_update", NULL },
{ "proc_main", "pid" },
+ { "old_code", "address" },
#ifdef HIPE
{ "hipe_mfait_lock", NULL },
#endif
@@ -93,8 +93,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "proc_msgq", "pid" },
{ "dist_entry", "address" },
{ "dist_entry_links", "address" },
+ { "code_write_permission", NULL },
{ "proc_status", "pid" },
- { "proc_tab", NULL },
{ "ports_snapshot", NULL },
{ "meta_name_tab", "address" },
{ "meta_main_tab_slot", "address" },
@@ -114,9 +114,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
#if defined(ENABLE_CHILD_WAITER_THREAD) || defined(ERTS_SMP)
{ "child_status", NULL },
#endif
-#ifdef __WIN32__
- { "sys_driver_data_lock", NULL },
-#endif
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
@@ -124,7 +121,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "removed_fd_pre_alloc_lock", "address" },
{ "state_prealloc", NULL },
{ "schdlr_sspnd", NULL },
+ { "migration_info_update", NULL },
{ "run_queue", "address" },
+ { "process_table", NULL },
{ "cpu_info", NULL },
{ "pollset", "address" },
#ifdef __WIN32__
@@ -155,12 +154,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pmmap", NULL },
#endif
#ifdef ERTS_SMP
+ { "port_sched_lock", "port_id" },
{ "port_task_pre_alloc_lock", "address" },
- { "port_taskq_pre_alloc_lock", "address" },
{ "proclist_pre_alloc_lock", "address" },
- { "port_tasks_lock", NULL },
- { "get_free_port", NULL },
- { "port_state", "address" },
+ { "port_table", NULL },
{ "xports_list_pre_alloc_lock", "address" },
{ "inet_buffer_stack_lock", NULL },
{ "gc_info", NULL },
@@ -245,6 +242,7 @@ typedef struct {
typedef struct erts_lc_locked_locks_t_ erts_lc_locked_locks_t;
struct erts_lc_locked_locks_t_ {
char *thread_name;
+ int emu_thread;
erts_tid_t tid;
erts_lc_locked_locks_t *next;
erts_lc_locked_locks_t *prev;
@@ -362,6 +360,7 @@ create_locked_locks(char *thread_name)
if (!l_lcks->thread_name)
lc_abort();
+ l_lcks->emu_thread = 0;
l_lcks->tid = erts_thr_self();
l_lcks->required.first = NULL;
l_lcks->required.last = NULL;
@@ -669,7 +668,7 @@ erts_lc_set_thread_name(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
if (!l_lcks)
- (void) create_locked_locks(thread_name);
+ l_lcks = create_locked_locks(thread_name);
else {
ASSERT(l_lcks->thread_name);
free((void *) l_lcks->thread_name);
@@ -677,6 +676,14 @@ erts_lc_set_thread_name(char *thread_name)
if (!l_lcks->thread_name)
lc_abort();
}
+ l_lcks->emu_thread = 1;
+}
+
+int
+erts_lc_is_emu_thr(void)
+{
+ erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ return l_lcks->emu_thread;
}
int
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index b67f36fa06..068340abe7 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2005-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2005-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -102,9 +102,10 @@ void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, Uint16 op_flags);
void erts_lc_require_lock(erts_lc_lock_t *lck);
void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
+int erts_lc_is_emu_thr(void);
#define ERTS_LC_ASSERT(A) \
- ((void) ((A) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
+ ((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
#ifdef ERTS_SMP
#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A)
#else
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 919567ab27..325d77e911 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -296,36 +296,6 @@ erts_msg_distext2heap(Process *pp,
return THE_NON_VALUE;
}
-static ERTS_INLINE void
-notify_new_message(Process *receiver)
-{
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- & erts_proc_lc_my_proc_locks(receiver));
-
- switch (receiver->status) {
- case P_GARBING:
- switch (receiver->gcstatus) {
- case P_SUSPENDED:
- goto suspended;
- case P_WAITING:
- goto waiting;
- default:
- break;
- }
- break;
- case P_SUSPENDED:
- suspended:
- receiver->rstatus = P_RUNABLE;
- break;
- case P_WAITING:
- waiting:
- erts_add_to_runq(receiver);
- break;
- default:
- break;
- }
-}
-
void
erts_queue_dist_message(Process *rcvr,
ErtsProcLocks *rcvr_locks,
@@ -339,7 +309,7 @@ erts_queue_dist_message(Process *rcvr,
Sint tok_serial = 0;
#endif
#ifdef ERTS_SMP
- ErtsProcLocks need_locks;
+ erts_aint_t state;
#endif
ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
@@ -347,20 +317,21 @@ erts_queue_dist_message(Process *rcvr,
mp = message_alloc();
#ifdef ERTS_SMP
- need_locks = ~(*rcvr_locks) & (ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- if (need_locks) {
- *rcvr_locks |= need_locks;
- if (erts_smp_proc_trylock(rcvr, need_locks) == EBUSY) {
- if (need_locks == ERTS_PROC_LOCK_MSGQ) {
+ if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
+ if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+ ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
+ if (*rcvr_locks & ERTS_PROC_LOCK_STATUS) {
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS);
- need_locks = (ERTS_PROC_LOCK_MSGQ
- | ERTS_PROC_LOCK_STATUS);
+ need_locks |= ERTS_PROC_LOCK_STATUS;
}
erts_smp_proc_lock(rcvr, need_locks);
}
}
- if (rcvr->is_exiting || ERTS_PROC_PENDING_EXIT(rcvr)) {
+ state = erts_smp_atomic32_read_acqb(&rcvr->state);
+ if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
+ if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
/* Drop message if receiver is exiting or has a pending exit ... */
if (is_not_nil(token)) {
ErlHeapFragment *heap_frag;
@@ -376,6 +347,8 @@ erts_queue_dist_message(Process *rcvr,
/* Ahh... need to decode it in order to trace it... */
ErlHeapFragment *mbuf;
Eterm msg;
+ if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
message_free(mp);
msg = erts_msg_distext2heap(rcvr, rcvr_locks, &mbuf, &token, dist_ext);
if (is_value(msg))
@@ -437,26 +410,33 @@ erts_queue_dist_message(Process *rcvr,
mp->data.dist_ext = dist_ext;
LINK_MESSAGE(rcvr, mp);
- notify_new_message(rcvr);
+ if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
+
+ erts_proc_notify_new_message(rcvr);
}
}
/* Add a message last in message queue */
-void
-erts_queue_message(Process* receiver,
- ErtsProcLocks *receiver_locks,
- ErlHeapFragment* bp,
- Eterm message,
- Eterm seq_trace_token
+static Sint
+queue_message(Process *c_p,
+ Process* receiver,
+ ErtsProcLocks *receiver_locks,
+ erts_aint32_t *receiver_state,
+ ErlHeapFragment* bp,
+ Eterm message,
+ Eterm seq_trace_token
#ifdef USE_VM_PROBES
, Eterm dt_utag
#endif
-)
+ )
{
+ Sint res;
ErlMessage* mp;
-#ifdef ERTS_SMP
- ErtsProcLocks need_locks;
-#else
+ int locked_msgq = 0;
+ erts_aint_t state;
+
+#ifndef ERTS_SMP
ASSERT(bp != NULL || receiver->mbuf == NULL);
#endif
@@ -464,31 +444,45 @@ erts_queue_message(Process* receiver,
mp = message_alloc();
+ if (receiver_state)
+ state = *receiver_state;
+ else
+ state = erts_smp_atomic32_read_acqb(&receiver->state);
+
#ifdef ERTS_SMP
- need_locks = ~(*receiver_locks) & (ERTS_PROC_LOCK_MSGQ
- | ERTS_PROC_LOCK_STATUS);
- if (need_locks) {
- *receiver_locks |= need_locks;
- if (erts_smp_proc_trylock(receiver, need_locks) == EBUSY) {
- if (need_locks == ERTS_PROC_LOCK_MSGQ) {
+
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
+ goto exiting;
+
+ if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
+ if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
+ ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
+ if (*receiver_locks & ERTS_PROC_LOCK_STATUS) {
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS);
- need_locks = (ERTS_PROC_LOCK_MSGQ
- | ERTS_PROC_LOCK_STATUS);
+ need_locks |= ERTS_PROC_LOCK_STATUS;
}
erts_smp_proc_lock(receiver, need_locks);
}
+ locked_msgq = 1;
+ state = erts_smp_atomic32_read_nob(&receiver->state);
+ if (receiver_state)
+ *receiver_state = state;
}
- if (receiver->is_exiting || ERTS_PROC_PENDING_EXIT(receiver)) {
- /* Drop message if receiver is exiting or has a pending
- * exit ...
- */
+#endif
+
+ if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
+#ifdef ERTS_SMP
+ exiting:
+#endif
+ /* Drop message if receiver is exiting or has a pending exit... */
+ if (locked_msgq)
+ erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
if (bp)
free_message_buffer(bp);
message_free(mp);
- return;
+ return 0;
}
-#endif
ERL_MESSAGE_TERM(mp) = message;
ERL_MESSAGE_TOKEN(mp) = seq_trace_token;
@@ -498,7 +492,10 @@ erts_queue_message(Process* receiver,
mp->next = NULL;
mp->data.heap_frag = bp;
-#ifdef ERTS_SMP
+#ifndef ERTS_SMP
+ res = receiver->msg.len;
+#else
+ res = receiver->msg_inq.len;
if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
/*
* We move 'in queue' to 'private queue' and place
@@ -508,15 +505,15 @@ erts_queue_message(Process* receiver,
* we don't need to include the 'in queue' in
* the root set when garbage collecting.
*/
+ res += receiver->msg.len;
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
LINK_MESSAGE_PRIVQ(receiver, mp);
}
- else {
+ else
+#endif
+ {
LINK_MESSAGE(receiver, mp);
}
-#else
- LINK_MESSAGE(receiver, mp);
-#endif
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(message_queued)) {
@@ -536,15 +533,43 @@ erts_queue_message(Process* receiver,
tok_label, tok_lastcnt, tok_serial);
}
#endif
- notify_new_message(receiver);
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE))
trace_receive(receiver, message);
- }
+
+ if (locked_msgq)
+ erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+
+ erts_proc_notify_new_message(receiver);
#ifndef ERTS_SMP
ERTS_HOLE_CHECK(receiver);
#endif
+ return res;
+}
+
+void
+erts_queue_message(Process* receiver,
+ ErtsProcLocks *receiver_locks,
+ ErlHeapFragment* bp,
+ Eterm message,
+ Eterm seq_trace_token
+#ifdef USE_VM_PROBES
+ , Eterm dt_utag
+#endif
+ )
+{
+ queue_message(NULL,
+ receiver,
+ receiver_locks,
+ NULL,
+ bp,
+ message,
+ seq_trace_token
+#ifdef USE_VM_PROBES
+ , dt_utag
+#endif
+ );
}
void
@@ -576,9 +601,7 @@ erts_move_msg_mbuf_to_heap(Eterm** hpp, ErlOffHeap* off_heap, ErlMessage *msg)
#endif
#ifdef HARD_DEBUG
- ProcBin *dbg_mso_start = off_heap->mso;
- ErlFunThing *dbg_fun_start = off_heap->funs;
- ExternalThing *dbg_external_start = off_heap->externals;
+ struct erl_off_heap_header* dbg_oh_start = off_heap->first;
Eterm dbg_term, dbg_token;
ErlHeapFragment *dbg_bp;
Uint *dbg_hp, *dbg_thp_start;
@@ -752,48 +775,16 @@ copy_done:
int i, j;
ErlHeapFragment* frag;
{
- ProcBin *mso = off_heap->mso;
+ struct erl_off_heap_header* dbg_oh = off_heap->first;
i = j = 0;
- while (mso != dbg_mso_start) {
- mso = mso->next;
+ while (dbg_oh != dbg_oh_start) {
+ dbg_oh = dbg_oh->next;
i++;
}
for (frag=bp; frag; frag=frag->next) {
- mso = frag->off_heap.mso;
- while (mso) {
- mso = mso->next;
- j++;
- }
- }
- ASSERT(i == j);
- }
- {
- ErlFunThing *fun = off_heap->funs;
- i = j = 0;
- while (fun != dbg_fun_start) {
- fun = fun->next;
- i++;
- }
- for (frag=bp; frag; frag=frag->next) {
- fun = frag->off_heap.funs;
- while (fun) {
- fun = fun->next;
- j++;
- }
- }
- ASSERT(i == j);
- }
- {
- ExternalThing *external = off_heap->externals;
- i = j = 0;
- while (external != dbg_external_start) {
- external = external->next;
- i++;
- }
- for (frag=bp; frag; frag=frag->next) {
- external = frag->off_heap.externals;
- while (external) {
- external = external->next;
+ dbg_oh = frag->off_heap.first;
+ while (dbg_oh) {
+ dbg_oh = dbg_oh->next;
j++;
}
}
@@ -878,7 +869,7 @@ erts_move_msg_attached_data_to_heap(Eterm **hpp, ErlOffHeap *ohp, ErlMessage *ms
* Send a local message when sender & receiver processes are known.
*/
-void
+Sint
erts_send_message(Process* sender,
Process* receiver,
ErtsProcLocks *receiver_locks,
@@ -888,6 +879,7 @@ erts_send_message(Process* sender,
Uint msize;
ErlHeapFragment* bp = NULL;
Eterm token = NIL;
+ Sint res = 0;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(sender_name, 64);
DTRACE_CHARBUF(receiver_name, 64);
@@ -902,8 +894,8 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
*sender_name = *receiver_name = '\0';
if (DTRACE_ENABLED(message_send)) {
- erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->id);
- erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->id);
+ erts_snprintf(sender_name, sizeof(DTRACE_CHARBUF_NAME(sender_name)), "%T", sender->common.id);
+ erts_snprintf(receiver_name, sizeof(DTRACE_CHARBUF_NAME(receiver_name)), "%T", receiver->common.id);
}
#endif
if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) {
@@ -925,7 +917,7 @@ erts_send_message(Process* sender,
seq_trace_update_send(sender);
seq_trace_output(stoken, message, SEQ_TRACE_SEND,
- receiver->id, sender);
+ receiver->common.id, sender);
seq_trace_size = 6; /* TUPLE5 */
#ifdef USE_VM_PROBES
}
@@ -956,7 +948,7 @@ erts_send_message(Process* sender,
#ifdef DTRACE_TAG_HARDDEBUG
erts_fprintf(stderr,
"Dtrace -> (%T) Spreading tag (%T) with "
- "message %T!\r\n",sender->id, utag, message);
+ "message %T!\r\n",sender->common.id, utag, message);
#endif
}
#endif
@@ -974,15 +966,17 @@ erts_send_message(Process* sender,
msize, tok_label, tok_lastcnt, tok_serial);
}
#endif
- erts_queue_message(receiver,
- receiver_locks,
- bp,
- message,
- token
+ res = queue_message(NULL,
+ receiver,
+ receiver_locks,
+ NULL,
+ bp,
+ message,
+ token
#ifdef USE_VM_PROBES
- , utag
+ , utag
#endif
- );
+ );
BM_SWAP_TIMER(send,system);
} else if (sender == receiver) {
/* Drop message if receiver has a pending exit ... */
@@ -1026,31 +1020,45 @@ erts_send_message(Process* sender,
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver);
LINK_MESSAGE_PRIVQ(receiver, mp);
+ res = receiver->msg.len;
+
if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
trace_receive(receiver, message);
}
}
BM_SWAP_TIMER(send,system);
- return;
} else {
#ifdef ERTS_SMP
ErlOffHeap *ohp;
Eterm *hp;
+ erts_aint32_t state;
+
BM_SWAP_TIMER(send,size);
msize = size_object(message);
BM_SWAP_TIMER(size,send);
- hp = erts_alloc_message_heap(msize,&bp,&ohp,receiver,receiver_locks);
+ hp = erts_alloc_message_heap_state(msize,
+ &bp,
+ &ohp,
+ receiver,
+ receiver_locks,
+ &state);
BM_SWAP_TIMER(send,copy);
message = copy_struct(message, msize, &hp, ohp);
BM_MESSAGE_COPIED(msz);
BM_SWAP_TIMER(copy,send);
DTRACE6(message_send, sender_name, receiver_name,
msize, tok_label, tok_lastcnt, tok_serial);
- erts_queue_message(receiver, receiver_locks, bp, message, token
+ res = queue_message(sender,
+ receiver,
+ receiver_locks,
+ &state,
+ bp,
+ message,
+ token
#ifdef USE_VM_PROBES
- , NIL
+ , NIL
#endif
- );
+ );
BM_SWAP_TIMER(send,system);
#else
ErlMessage* mp = message_alloc();
@@ -1080,19 +1088,16 @@ erts_send_message(Process* sender,
mp->next = NULL;
mp->data.attached = NULL;
LINK_MESSAGE(receiver, mp);
+ res = receiver->msg.len;
+ erts_proc_notify_new_message(receiver);
- if (receiver->status == P_WAITING) {
- erts_add_to_runq(receiver);
- } else if (receiver->status == P_SUSPENDED) {
- receiver->rstatus = P_RUNABLE;
- }
if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
trace_receive(receiver, message);
}
BM_SWAP_TIMER(send,system);
#endif /* #ifndef ERTS_SMP */
- return;
}
+ return res;
}
/*
@@ -1131,7 +1136,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
save = TUPLE3(hp, am_EXIT, from_copy, mess);
hp += 4;
/* the trace token must in this case be updated by the caller */
- seq_trace_output(token, save, SEQ_TRACE_SEND, to->id, NULL);
+ seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL);
temptoken = copy_struct(token, sz_token, &hp, &bp->off_heap);
erts_queue_message(to, to_locksp, bp, save, temptoken
#ifdef USE_VM_PROBES
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 3e9a24ee81..771eba431f 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -90,7 +90,7 @@ typedef struct {
ErlMessage* first;
ErlMessage** last; /* point to the last next pointer */
ErlMessage** save;
- int len; /* queue length */
+ Sint len; /* queue length */
/*
* The following two fields are used by the recv_mark/1 and
@@ -105,7 +105,7 @@ typedef struct {
typedef struct {
ErlMessage* first;
ErlMessage** last; /* point to the last next pointer */
- int len; /* queue length */
+ Sint len; /* queue length */
} ErlMessageInQueue;
#endif
@@ -125,16 +125,16 @@ typedef struct {
#ifdef ERTS_SMP
/* Move in message queue to end of private message queue */
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \
-do { \
- if ((P)->msg_inq.first) { \
- *(P)->msg.last = (P)->msg_inq.first; \
- (P)->msg.last = (P)->msg_inq.last; \
- (P)->msg.len += (P)->msg_inq.len; \
- (P)->msg_inq.first = NULL; \
- (P)->msg_inq.last = &(P)->msg_inq.first; \
- (P)->msg_inq.len = 0; \
- } \
+#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(P) \
+do { \
+ if ((P)->msg_inq.first) { \
+ *(P)->msg.last = (P)->msg_inq.first; \
+ (P)->msg.last = (P)->msg_inq.last; \
+ (P)->msg.len += (P)->msg_inq.len; \
+ (P)->msg_inq.first = NULL; \
+ (P)->msg_inq.last = &(P)->msg_inq.first; \
+ (P)->msg_inq.len = 0; \
+ } \
} while (0)
/* Add message last in message queue */
@@ -234,7 +234,7 @@ void erts_queue_message(Process*, ErtsProcLocks*, ErlHeapFragment*, Eterm, Eterm
#endif
);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
-void erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
+Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
void erts_move_msg_mbuf_to_heap(Eterm**, ErlOffHeap*, ErlMessage *);
@@ -245,6 +245,9 @@ void erts_move_msg_attached_data_to_heap(Eterm **, ErlOffHeap *, ErlMessage *);
Eterm erts_msg_distext2heap(Process *, ErtsProcLocks *, ErlHeapFragment **,
Eterm *, ErtsDistExternal *);
+void erts_cleanup_offheap(ErlOffHeap *offheap);
+
+
ERTS_GLB_INLINE Uint erts_msg_used_frag_sz(const ErlMessage *msg);
ERTS_GLB_INLINE Uint erts_msg_attached_data_size(ErlMessage *msg);
diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c
index 1a84950120..63175c44d6 100644
--- a/erts/emulator/beam/erl_monitors.c
+++ b/erts/emulator/beam/erl_monitors.c
@@ -971,7 +971,7 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1)
}
} else {
erts_printf("Dumping pid monitors--------------------\n");
- erts_dump_monitors(rp->monitors,0);
+ erts_dump_monitors(ERTS_P_MONITORS(rp),0);
erts_printf("Monitors dumped-------------------------\n");
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
@@ -985,12 +985,15 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1)
Process *rp;
DistEntry *dep;
if (is_internal_port(pid)) {
- Port *rport = erts_id2port(pid, p, ERTS_PROC_LOCK_MAIN);
+ Port *rport = erts_id2port_sflgs(pid,
+ p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
if (rport) {
erts_printf("Dumping port links----------------------\n");
- erts_dump_links(rport->nlinks,0);
+ erts_dump_links(ERTS_P_LINKS(rport), 0);
erts_printf("Links dumped----------------------------\n");
- erts_smp_port_unlock(rport);
+ erts_port_release(rport);
BIF_RET(am_true);
} else {
BIF_ERROR(p,BADARG);
@@ -1014,7 +1017,7 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1)
} else {
erts_printf("Dumping pid links-----------------------\n");
- erts_dump_links(rp->nlinks,0);
+ erts_dump_links(ERTS_P_LINKS(rp), 0);
erts_printf("Links dumped----------------------------\n");
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
BIF_RET(am_true);
diff --git a/erts/emulator/beam/erl_monitors.h b/erts/emulator/beam/erl_monitors.h
index d3f6d410dd..a7fa4e0145 100644
--- a/erts/emulator/beam/erl_monitors.h
+++ b/erts/emulator/beam/erl_monitors.h
@@ -137,8 +137,6 @@ typedef struct erts_suspend_monitor {
#define ERTS_LINK_ROOT(Linkp) ((Linkp)->shared.root)
#define ERTS_LINK_REFC(Linkp) ((Linkp)->shared.refc)
-#define ERTS_LINK_ROOT_AS_UINT(Linkp) (*((Uint *) &((Linkp)->root)))
-
Uint erts_tot_link_lh_size(void);
diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c
index 358c67bf20..5a6fb8589f 100644
--- a/erts/emulator/beam/erl_mtrace.c
+++ b/erts/emulator/beam/erl_mtrace.c
@@ -611,7 +611,7 @@ void erts_mtrace_init(char *receiver, char *nodename)
if (erts_sock_gethostname(hostname, MAXHOSTNAMELEN) != 0)
hostname[0] = '\0';
hostname[MAXHOSTNAMELEN-1] = '\0';
- sys_get_pid(pid);
+ sys_get_pid(pid, sizeof(pid));
write_trace_header(nodename ? nodename : "", pid, hostname);
erts_mtrace_update_heap_size();
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 4109c20fa7..1bd2d933b2 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -263,7 +263,7 @@ ErlNifEnv* enif_alloc_env(void)
HEAP_LIMIT(&msg_env->phony_proc) = phony_heap;
HEAP_END(&msg_env->phony_proc) = phony_heap;
MBUF(&msg_env->phony_proc) = NULL;
- msg_env->phony_proc.id = ERTS_INVALID_PID;
+ msg_env->phony_proc.common.id = ERTS_INVALID_PID;
#ifdef FORCE_HEAP_FRAGS
msg_env->phony_proc.space_verified = 0;
msg_env->phony_proc.space_verified_from = NULL;
@@ -287,7 +287,7 @@ void enif_clear_env(ErlNifEnv* env)
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)env;
Process* p = &menv->phony_proc;
ASSERT(p == menv->env.proc);
- ASSERT(p->id == ERTS_INVALID_PID);
+ ASSERT(p->common.id == ERTS_INVALID_PID);
ASSERT(MBUF(p) == menv->env.heap_frag);
if (MBUF(p) != NULL) {
erts_cleanup_offheap(&MSO(p));
@@ -315,10 +315,11 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#endif
Eterm receiver = to_pid->pid;
int flush_me = 0;
+ int scheduler = erts_get_scheduler_id() != 0;
if (env != NULL) {
c_p = env->proc;
- if (receiver == c_p->id) {
+ if (receiver == c_p->common.id) {
rp_locks = ERTS_PROC_LOCK_MAIN;
flush_me = 1;
}
@@ -334,10 +335,13 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
rp_had_locks = rp_locks;
#endif
- rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
- receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC);
+
+ rp = (scheduler
+ ? erts_proc_lookup(receiver)
+ : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
+ receiver, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC));
if (rp == NULL) {
- ASSERT(env == NULL || receiver != c_p->id);
+ ASSERT(env == NULL || receiver != c_p->common.id);
return 0;
}
flush_env(msg_env);
@@ -362,12 +366,12 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
, NIL
#endif
);
- if (rp_locks) {
- ERTS_SMP_LC_ASSERT(rp_locks == (rp_had_locks | (ERTS_PROC_LOCK_MSGQ |
- ERTS_PROC_LOCK_STATUS)));
- erts_smp_proc_unlock(rp, (ERTS_PROC_LOCK_MSGQ | ERTS_PROC_LOCK_STATUS));
- }
- erts_smp_proc_dec_refc(rp);
+ if (c_p == rp)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ if (!scheduler)
+ erts_smp_proc_dec_refc(rp);
if (flush_me) {
cache_env(env);
}
@@ -393,7 +397,7 @@ static int is_offheap(const ErlOffHeap* oh)
ErlNifPid* enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)
{
- pid->pid = caller_env->proc->id;
+ pid->pid = caller_env->proc->common.id;
return pid;
}
int enif_get_local_pid(ErlNifEnv* env, ERL_NIF_TERM term, ErlNifPid* pid)
@@ -501,7 +505,7 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
{
struct enif_tmp_obj_t* tobj;
ErtsAlcType_t allocator;
- Uint sz;
+ ErlDrvSizeT sz;
if (is_binary(term)) {
return enif_inspect_binary(env,term,bin);
}
@@ -527,7 +531,7 @@ int enif_inspect_iolist_as_binary(ErlNifEnv* env, Eterm term, ErlNifBinary* bin)
bin->size = sz;
bin->bin_term = THE_NON_VALUE;
bin->ref_bin = NULL;
- io_list_to_buf(term, (char*) bin->data, sz);
+ erts_iolist_to_buf(term, (char*) bin->data, sz);
ADD_READONLY_CHECK(env, bin->data, bin->size);
return 1;
}
@@ -1392,6 +1396,44 @@ size_t enif_sizeof_resource(void* obj)
return ERTS_MAGIC_BIN_DATA_SIZE(bin) - offsetof(ErlNifResource,data);
}
+
+void* enif_dlopen(const char* lib,
+ void (*err_handler)(void*,const char*), void* err_arg)
+{
+ ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
+ void* handle;
+ void* init_func;
+ if (erts_sys_ddll_open2(lib, &handle, &errdesc) == ERL_DE_NO_ERROR) {
+ if (erts_sys_ddll_load_nif_init(handle, &init_func, &errdesc) == ERL_DE_NO_ERROR) {
+ erts_sys_ddll_call_nif_init(init_func);
+ }
+ }
+ else {
+ if (err_handler != NULL) {
+ (*err_handler)(err_arg, errdesc.str);
+ }
+ handle = NULL;
+ }
+ erts_sys_ddll_free_error(&errdesc);
+ return handle;
+}
+
+void* enif_dlsym(void* handle, const char* symbol,
+ void (*err_handler)(void*,const char*), void* err_arg)
+{
+ ErtsSysDdllError errdesc = ERTS_SYS_DDLL_ERROR_INIT;
+ void* ret;
+ if (erts_sys_ddll_sym2(handle, symbol, &ret, &errdesc) != ERL_DE_NO_ERROR) {
+ if (err_handler != NULL) {
+ (*err_handler)(err_arg, errdesc.str);
+ }
+ erts_sys_ddll_free_error(&errdesc);
+ return NULL;
+ }
+ return ret;
+}
+
+
/***************************************************************************
** load_nif/2 **
***************************************************************************/
@@ -1524,6 +1566,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
if (len < 0) {
BIF_ERROR(BIF_P, BADARG);
}
+
lib_name = (char *) erts_alloc(ERTS_ALC_T_TMP, len + 1);
if (intlist_to_buf(BIF_ARG_1, lib_name, len) != len) {
@@ -1532,6 +1575,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
lib_name[len] = '\0';
+ if (!erts_try_seize_code_write_permission(BIF_P)) {
+ erts_free(ERTS_ALC_T_TMP, lib_name);
+ ERTS_BIF_YIELD2(bif_export[BIF_load_nif_2],
+ BIF_P, BIF_ARG_1, BIF_ARG_2);
+ }
+
/* Block system (is this the right place to do it?) */
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
@@ -1545,11 +1594,11 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ASSERT(caller != NULL);
mod_atom = caller[0];
ASSERT(is_atom(mod_atom));
- mod=erts_get_module(mod_atom);
+ mod=erts_get_module(mod_atom, erts_active_code_ix());
ASSERT(mod != NULL);
- if (!in_area(caller, mod->code, mod->code_length)) {
- ASSERT(in_area(caller, mod->old_code, mod->old_code_length));
+ if (!in_area(caller, mod->curr.code, mod->curr.code_length)) {
+ ASSERT(in_area(caller, mod->old.code, mod->old.code_length));
ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
"module '%T' not allowed", mod_atom);
@@ -1595,7 +1644,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
BeamInstr** code_pp;
ErlNifFunc* f = &entry->funcs[i];
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom)
- || (code_pp = get_func_pp(mod->code, f_atom, f->arity))==NULL) {
+ || (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
}
@@ -1624,18 +1673,18 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_refc_init(&lib->rt_dtor_cnt, 0);
lib->mod = mod;
env.mod_nif = lib;
- if (mod->nif != NULL) { /* Reload */
+ if (mod->curr.nif != NULL) { /* Reload */
int k;
- lib->priv_data = mod->nif->priv_data;
+ lib->priv_data = mod->curr.nif->priv_data;
- ASSERT(mod->nif->entry != NULL);
+ ASSERT(mod->curr.nif->entry != NULL);
if (entry->reload == NULL) {
ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library.");
goto error;
}
/* Check that no NIF is removed */
- for (k=0; k < mod->nif->entry->num_of_funcs; k++) {
- ErlNifFunc* old_func = &mod->nif->entry->funcs[k];
+ for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
+ ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k];
for (i=0; i < entry->num_of_funcs; i++) {
if (old_func->arity == entry->funcs[i].arity
&& sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
@@ -1656,24 +1705,24 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ret = load_nif_error(BIF_P, reload, "Library reload-call unsuccessful.");
}
else {
- mod->nif->entry = NULL; /* to prevent 'unload' callback */
- erts_unload_nif(mod->nif);
+ mod->curr.nif->entry = NULL; /* to prevent 'unload' callback */
+ erts_unload_nif(mod->curr.nif);
reload_warning = 1;
}
}
else {
lib->priv_data = NULL;
- if (mod->old_nif != NULL) { /* Upgrade */
- void* prev_old_data = mod->old_nif->priv_data;
+ if (mod->old.nif != NULL) { /* Upgrade */
+ void* prev_old_data = mod->old.nif->priv_data;
if (entry->upgrade == NULL) {
ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
goto error;
}
erts_pre_nif(&env, BIF_P, lib);
- veto = entry->upgrade(&env, &lib->priv_data, &mod->old_nif->priv_data, BIF_ARG_2);
+ veto = entry->upgrade(&env, &lib->priv_data, &mod->old.nif->priv_data, BIF_ARG_2);
erts_post_nif(&env);
if (veto) {
- mod->old_nif->priv_data = prev_old_data;
+ mod->old.nif->priv_data = prev_old_data;
ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful.");
}
/*else if (mod->old_nif->priv_data != prev_old_data) {
@@ -1693,20 +1742,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/*
** Everything ok, patch the beam code with op_call_nif
*/
- mod->nif = lib;
+ mod->curr.nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
BeamInstr* code_ptr;
erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom);
- code_ptr = *get_func_pp(mod->code, f_atom, entry->funcs[i].arity);
+ code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity);
if (code_ptr[1] == 0) {
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
else { /* Function traced, patch the original instruction word */
- BpData** bps = (BpData**) code_ptr[1];
- BpData* bp = (BpData*) bps[erts_bp_sched2ix()];
- bp->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ GenericBp* g = (GenericBp *) code_ptr[1];
+ ASSERT(code_ptr[5+0] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint));
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
code_ptr[5+2] = (BeamInstr) lib;
@@ -1726,6 +1776,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_release_code_write_permission();
erts_free(ERTS_ALC_T_TMP, lib_name);
if (reload_warning) {
@@ -1793,7 +1844,7 @@ void erl_nif_init()
#ifdef USE_VM_PROBES
void dtrace_nifenv_str(ErlNifEnv *env, char *process_buf)
{
- dtrace_pid_str(env->proc->id, process_buf);
+ dtrace_pid_str(env->proc->common.id, process_buf);
}
#endif
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 50f99c90c4..93e56332e1 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -187,11 +187,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
#else
# define ERL_NIF_INIT_GLOB
# define ERL_NIF_INIT_BODY
-# if defined(VXWORKS)
-# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _init(void)
-# else
-# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
-# endif
+# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* nif_init(void)
#endif
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index 6396af09d0..51ff1eaa48 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -138,6 +138,8 @@ ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_make_uint64,(ErlNifEnv*, ErlNifUInt64));
ERL_NIF_API_FUNC_DECL(int,enif_is_exception,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int,enif_make_reverse_list,(ErlNifEnv*, ERL_NIF_TERM term, ERL_NIF_TERM *list));
ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
+ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
+ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
/*
** Add new entries here to keep compatibility on Windows!!!
@@ -260,6 +262,8 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
+# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
+# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
/*
** Add new entries here
diff --git a/erts/emulator/beam/erl_node_container_utils.h b/erts/emulator/beam/erl_node_container_utils.h
index 329a2204cc..667bda255b 100644
--- a/erts/emulator/beam/erl_node_container_utils.h
+++ b/erts/emulator/beam/erl_node_container_utils.h
@@ -20,7 +20,7 @@
#ifndef ERL_NODE_CONTAINER_UTILS_H__
#define ERL_NODE_CONTAINER_UTILS_H__
-#include "erl_term.h"
+#include "erl_ptab.h"
/*
* Note regarding node containers:
@@ -29,9 +29,6 @@
* the emulator) for the Erlang data types that contain a reference
* to a node, i.e. pids, ports, and references.
*
- * Observe! The layouts of the node container data types have been
- * changed in R9.
- *
* Node containers are divided into internal and external node containers.
* An internal node container refer to the current incarnation of the
* node which it reside on. An external node container refer to
@@ -52,13 +49,6 @@
* reference is a boxed data type. An internal node container have an
* implicit reference to the 'erts_this_node' element in the node table.
*
- * Due to the R9 changes in layouts of node containers there are room to
- * store more data than previously. Today (R9) this extra space is unused,
- * but it is planned to be used in the future. For example only 18 bits
- * are used for data in a pid but there is room for 28 bits of data (on a
- * 32-bit machine). Some preparations have been made in the emulator for
- * usage of this extra space.
- *
* OBSERVE! Pids doesn't use fixed size 'serial' and 'number' fields any
* more. Previously the 15 bit 'number' field of a pid was used as index
* into the process table, and the 3 bit 'serial' field was used as a
@@ -104,8 +94,6 @@
#define internal_dist_entry(x) (erts_this_node->dist_entry)
#define external_dist_entry(x) (external_node((x))->dist_entry)
-extern int erts_use_r9_pids_ports;
-
/*
* For this node (and previous incarnations of this node), 0 is used as
* channel no. For other nodes, the atom index of the atom corresponding
@@ -128,8 +116,20 @@ extern int erts_use_r9_pids_ports;
* Pids *
\* */
-#define internal_pid_index(x) (internal_pid_data((x)) \
- & erts_process_tab_index_mask)
+extern ErtsPTab erts_proc;
+
+#define make_internal_pid(D) erts_ptab_make_id(&erts_proc, \
+ (D), \
+ _TAG_IMMED1_PID)
+
+#define internal_pid_index(PID) (ASSERT_EXPR(is_internal_pid((PID))), \
+ erts_ptab_id2pix(&erts_proc, (PID)))
+
+#define internal_pid_data(PID) (ASSERT_EXPR(is_internal_pid((PID))), \
+ erts_ptab_id2data(&erts_proc, (PID)))
+
+#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x)))
+#define internal_pid_serial(x) _GET_PID_SER(internal_pid_data((x)))
#define internal_pid_node_name(x) (internal_pid_node((x))->sysname)
#define external_pid_node_name(x) (external_pid_node((x))->sysname)
@@ -169,34 +169,37 @@ extern int erts_use_r9_pids_ports;
|| is_external_pid((x)))
#define is_not_pid(x) (!is_pid(x))
-#define ERTS_MAX_R9_PROCESSES (1 << ERTS_R9_PROC_BITS)
-
/*
* Maximum number of processes. We want the number to fit in a SMALL on
* 32-bit CPU.
*/
-#define ERTS_MAX_PROCESSES ((SWORD_CONSTANT(1) << 27)-1)
-#if (ERTS_MAX_PROCESSES > MAX_SMALL)
-# error "The maximum number of processes must fit in a SMALL."
-#endif
-
+#define ERTS_MAX_PROCESSES (ERTS_PTAB_MAX_SIZE-1)
#define ERTS_MAX_PID_DATA ((1 << _PID_DATA_SIZE) - 1)
#define ERTS_MAX_PID_NUMBER ((1 << _PID_NUM_SIZE) - 1)
#define ERTS_MAX_PID_SERIAL ((1 << _PID_SER_SIZE) - 1)
-#define ERTS_MAX_PID_R9_SERIAL ((1 << _PID_R9_SER_SIZE) - 1)
-#define ERTS_R9_PROC_BITS (_PID_R9_SER_SIZE + _PID_NUM_SIZE)
#define ERTS_PROC_BITS (_PID_SER_SIZE + _PID_NUM_SIZE)
-#define ERTS_INVALID_PID make_internal_pid(ERTS_MAX_PID_DATA)
+#define ERTS_INVALID_PID ERTS_PTAB_INVALID_ID(_TAG_IMMED1_PID)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Ports *
\* */
-#define internal_port_index(x) (internal_port_data((x)) \
- & erts_port_tab_index_mask)
+extern ErtsPTab erts_port;
+
+#define make_internal_port(D) erts_ptab_make_id(&erts_port, \
+ (D), \
+ _TAG_IMMED1_PORT)
+
+#define internal_port_index(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \
+ erts_ptab_id2pix(&erts_port, (PRT)))
+
+#define internal_port_data(PRT) (ASSERT_EXPR(is_internal_port((PRT))), \
+ erts_ptab_id2data(&erts_port, (PRT)))
+
+#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x)))
#define internal_port_node_name(x) (internal_port_node((x))->sysname)
#define external_port_node_name(x) (external_port_node((x))->sysname)
@@ -235,18 +238,18 @@ extern int erts_use_r9_pids_ports;
#define is_not_port(x) (!is_port(x))
/* Highest port-ID part in a term of type Port
- Not necessarily the same as the variable erts_max_ports
+ Not necessarily the same as current maximum port table size
which defines the maximum number of simultaneous Ports
in the Erlang node. ERTS_MAX_PORTS is a hard upper limit.
*/
-#define ERTS_MAX_R9_PORTS (1 << ERTS_R9_PORTS_BITS)
-#define ERTS_MAX_PORTS (1 << ERTS_PORTS_BITS)
-
+#define ERTS_MAX_PORTS (ERTS_PTAB_MAX_SIZE-1)
#define ERTS_MAX_PORT_DATA ((1 << _PORT_DATA_SIZE) - 1)
#define ERTS_MAX_PORT_NUMBER ((1 << _PORT_NUM_SIZE) - 1)
-#define ERTS_R9_PORTS_BITS (_PORT_R9_NUM_SIZE)
#define ERTS_PORTS_BITS (_PORT_NUM_SIZE)
+
+#define ERTS_INVALID_PORT ERTS_PTAB_INVALID_ID(_TAG_IMMED1_PORT)
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Refs *
\* */
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index 5574cb0ac4..ebfba065d1 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -116,8 +116,7 @@ dist_table_alloc(void *dep_tmpl)
dep->qsize = 0;
dep->out_queue.first = NULL;
dep->out_queue.last = NULL;
- dep->suspended.first = NULL;
- dep->suspended.last = NULL;
+ dep->suspended = NULL;
dep->finalized_out_queue.first = NULL;
dep->finalized_out_queue.last = NULL;
@@ -769,8 +768,7 @@ void erts_init_node_tables(void)
erts_this_dist_entry->qsize = 0;
erts_this_dist_entry->out_queue.first = NULL;
erts_this_dist_entry->out_queue.last = NULL;
- erts_this_dist_entry->suspended.first = NULL;
- erts_this_dist_entry->suspended.last = NULL;
+ erts_this_dist_entry->suspended = NULL;
erts_this_dist_entry->finalized_out_queue.first = NULL;
erts_this_dist_entry->finalized_out_queue.last = NULL;
@@ -1268,7 +1266,7 @@ setup_reference_table(void)
ErlHeapFragment *hfp;
DistEntry *dep;
HashInfo hi;
- int i;
+ int i, max;
DeclareTmpHeapNoproc(heap,3);
inserted_bins = NULL;
@@ -1297,22 +1295,24 @@ setup_reference_table(void)
UnUseTmpHeapNoproc(3);
+ max = erts_ptab_max(&erts_proc);
/* Insert all processes */
- for (i = 0; i < erts_max_processes; i++)
- if (process_tab[i]) {
+ for (i = 0; i < max; i++) {
+ Process *proc = erts_pix2proc(i);
+ if (proc) {
ErlMessage *msg;
/* Insert Heap */
- insert_offheap(&(process_tab[i]->off_heap),
+ insert_offheap(&(proc->off_heap),
HEAP_REF,
- process_tab[i]->id);
+ proc->common.id);
/* Insert message buffers */
- for(hfp = process_tab[i]->mbuf; hfp; hfp = hfp->next)
+ for(hfp = proc->mbuf; hfp; hfp = hfp->next)
insert_offheap(&(hfp->off_heap),
HEAP_REF,
- process_tab[i]->id);
+ proc->common.id);
/* Insert msg msg buffers */
- for (msg = process_tab[i]->msg.first; msg; msg = msg->next) {
+ for (msg = proc->msg.first; msg; msg = msg->next) {
ErlHeapFragment *heap_frag = NULL;
if (msg->data.attached) {
if (is_value(ERL_MESSAGE_TERM(msg)))
@@ -1320,7 +1320,7 @@ setup_reference_table(void)
else {
if (msg->data.dist_ext->dep)
insert_dist_entry(msg->data.dist_ext->dep,
- HEAP_REF, process_tab[i]->id, 0);
+ HEAP_REF, proc->common.id, 0);
if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
}
@@ -1328,10 +1328,10 @@ setup_reference_table(void)
if (heap_frag)
insert_offheap(&(heap_frag->off_heap),
HEAP_REF,
- process_tab[i]->id);
+ proc->common.id);
}
#ifdef ERTS_SMP
- for (msg = process_tab[i]->msg_inq.first; msg; msg = msg->next) {
+ for (msg = proc->msg_inq.first; msg; msg = msg->next) {
ErlHeapFragment *heap_frag = NULL;
if (msg->data.attached) {
if (is_value(ERL_MESSAGE_TERM(msg)))
@@ -1339,7 +1339,7 @@ setup_reference_table(void)
else {
if (msg->data.dist_ext->dep)
insert_dist_entry(msg->data.dist_ext->dep,
- HEAP_REF, process_tab[i]->id, 0);
+ HEAP_REF, proc->common.id, 0);
if (is_not_nil(ERL_MESSAGE_TOKEN(msg)))
heap_frag = erts_dist_ext_trailer(msg->data.dist_ext);
}
@@ -1347,42 +1347,55 @@ setup_reference_table(void)
if (heap_frag)
insert_offheap(&(heap_frag->off_heap),
HEAP_REF,
- process_tab[i]->id);
+ proc->common.id);
}
#endif
/* Insert links */
- if(process_tab[i]->nlinks)
- insert_links(process_tab[i]->nlinks, process_tab[i]->id);
- if(process_tab[i]->monitors)
- insert_monitors(process_tab[i]->monitors, process_tab[i]->id);
+ if (ERTS_P_LINKS(proc))
+ insert_links(ERTS_P_LINKS(proc), proc->common.id);
+ if (ERTS_P_MONITORS(proc))
+ insert_monitors(ERTS_P_MONITORS(proc), proc->common.id);
/* Insert controller */
{
- DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(process_tab[i]);
+ DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc);
if (dep)
- insert_dist_entry(dep, CTRL_REF, process_tab[i]->id, 0);
+ insert_dist_entry(dep, CTRL_REF, proc->common.id, 0);
}
}
+ }
#ifdef ERTS_SMP
erts_foreach_sys_msg_in_q(insert_sys_msg);
#endif
/* Insert all ports */
- for (i = 0; i < erts_max_ports; i++) {
- if (erts_port[i].status & ERTS_PORT_SFLGS_DEAD)
+ max = erts_ptab_max(&erts_port);
+ for (i = 0; i < max; i++) {
+ erts_aint32_t state;
+ Port *prt;
+
+ prt = erts_pix2port(i);
+ if (!prt)
+ continue;
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_DEAD)
continue;
/* Insert links */
- if(erts_port[i].nlinks)
- insert_links(erts_port[i].nlinks, erts_port[i].id);
+ if (ERTS_P_LINKS(prt))
+ insert_links(ERTS_P_LINKS(prt), prt->common.id);
+ /* Insert monitors */
+ if (ERTS_P_MONITORS(prt))
+ insert_monitors(ERTS_P_MONITORS(prt), prt->common.id);
/* Insert port data */
- for(hfp = erts_port[i].bp; hfp; hfp = hfp->next)
- insert_offheap(&(hfp->off_heap), HEAP_REF, erts_port[i].id);
+ for(hfp = prt->bp; hfp; hfp = hfp->next)
+ insert_offheap(&(hfp->off_heap), HEAP_REF, prt->common.id);
/* Insert controller */
- if (erts_port[i].dist_entry)
- insert_dist_entry(erts_port[i].dist_entry,
+ if (prt->dist_entry)
+ insert_dist_entry(prt->dist_entry,
CTRL_REF,
- erts_port[i].id,
+ prt->common.id,
0);
}
diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h
index 4a015bdef9..af60071ea5 100644
--- a/erts/emulator/beam/erl_node_tables.h
+++ b/erts/emulator/beam/erl_node_tables.h
@@ -84,10 +84,6 @@ typedef struct {
} ErtsDistOutputQueue;
struct ErtsProcList_;
-typedef struct {
- struct ErtsProcList_ *first;
- struct ErtsProcList_ *last;
-} ErtsDistSuspended;
/*
* Lock order:
@@ -100,7 +96,6 @@ typedef struct {
*/
struct erl_link;
-struct port;
typedef struct dist_entry_ {
HashBucket hash_bucket; /* Hash bucket */
@@ -135,13 +130,13 @@ typedef struct dist_entry_ {
Uint32 qflgs;
Sint qsize;
ErtsDistOutputQueue out_queue;
- ErtsDistSuspended suspended;
+ struct ErtsProcList_ *suspended;
ErtsDistOutputQueue finalized_out_queue;
erts_smp_atomic_t dist_cmd_scheduled;
ErtsPortTaskHandle dist_cmd;
- Uint (*send)(struct port *prt, ErtsDistOutputBuf *obuf);
+ Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf);
struct cache* cache; /* The atom cache */
} DistEntry;
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
new file mode 100644
index 0000000000..fb07f3d5bc
--- /dev/null
+++ b/erts/emulator/beam/erl_port.h
@@ -0,0 +1,944 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_PORT_TYPE__
+#define ERL_PORT_TYPE__
+typedef struct _erl_drv_port Port;
+typedef struct ErtsProc2PortSigData_ ErtsProc2PortSigData;
+#endif
+
+#if !defined(ERL_PORT_H__) && !defined(ERL_PORT_GET_PORT_TYPE_ONLY__)
+#define ERL_PORT_H__
+
+#include "erl_port_task.h"
+#include "erl_ptab.h"
+#include "erl_thr_progress.h"
+#include "erl_trace.h"
+
+#define ERTS_DEFAULT_MAX_PORTS (1 << 16)
+#define ERTS_MIN_PORTS 1024
+
+extern int erts_port_synchronous_ops;
+extern int erts_port_schedule_all_ops;
+extern int erts_port_parallelism;
+
+typedef struct erts_driver_t_ erts_driver_t;
+
+#define ERTS_INVALID_ERL_DRV_PORT ((ErlDrvPort) (SWord) -1)
+#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
+
+typedef struct {
+ ErlDrvSizeT size; /* total size in bytes */
+
+ SysIOVec* v_start;
+ SysIOVec* v_end;
+ SysIOVec* v_head;
+ SysIOVec* v_tail;
+ SysIOVec v_small[SMALL_IO_QUEUE];
+
+ ErlDrvBinary** b_start;
+ ErlDrvBinary** b_end;
+ ErlDrvBinary** b_head;
+ ErlDrvBinary** b_tail;
+ ErlDrvBinary* b_small[SMALL_IO_QUEUE];
+} ErlIOQueue;
+
+typedef struct line_buf { /* Buffer used in line oriented I/O */
+ ErlDrvSizeT bufsiz; /* Size of character buffer */
+ ErlDrvSizeT ovlen; /* Length of overflow data */
+ ErlDrvSizeT ovsiz; /* Actual size of overflow buffer */
+ char data[1]; /* Starting point of buffer data,
+ data[0] is a flag indicating an unprocess CR,
+ The rest is the overflow buffer. */
+} LineBuf;
+
+/*
+ * Items part of erlang:port_info/1 result. Note am_registered_name
+ * *need* to be first.
+ */
+
+#define ERTS_PORT_INFO_1_ITEMS \
+ { am_registered_name, /* Needs to be first */ \
+ am_name, \
+ am_links, \
+ am_id, \
+ am_connected, \
+ am_input, \
+ am_output, \
+ am_os_pid }
+
+/*
+ * Port Specific Data.
+ *
+ * Only use PrtSD for very rarely used data.
+ */
+
+#define ERTS_PRTSD_SCHED_ID 0
+
+#define ERTS_PRTSD_SIZE 1
+
+typedef struct {
+ void *data[ERTS_PRTSD_SIZE];
+} ErtsPrtSD;
+
+#ifdef ERTS_SMP
+typedef struct ErtsXPortsList_ ErtsXPortsList;
+#endif
+
+/*
+ * Port locking:
+ *
+ * Locking is done either driver specific or port specific. When
+ * driver specific locking is used, all instances of the driver,
+ * i.e. ports running the driver, share the same lock. When port
+ * specific locking is used each instance have its own lock.
+ *
+ * Most fields in the Port structure are protected by the lock
+ * referred to by the 'lock' field. This lock is shared between
+ * all ports running the same driver when driver specific locking
+ * is used.
+ *
+ * The 'sched' field is protected by the run queue lock that the
+ * port currently is assigned to.
+ *
+ */
+
+struct _erl_drv_port {
+ ErtsPTabElementCommon common; /* *Need* to be first in struct */
+
+ ErtsPortTaskSched sched;
+ ErtsPortTaskHandle timeout_task;
+#ifdef ERTS_SMP
+ erts_mtx_t *lock;
+ ErtsXPortsList *xports;
+ erts_smp_atomic_t run_queue;
+#else
+ erts_atomic32_t refc;
+ int cleanup;
+#endif
+ erts_atomic_t connected; /* A connected process */
+ Eterm caller; /* Current caller. */
+ Eterm data; /* Data associated with port. */
+ ErlHeapFragment* bp; /* Heap fragment holding data (NULL if imm data). */
+ Uint bytes_in; /* Number of bytes read */
+ Uint bytes_out; /* Number of bytes written */
+
+ ErlIOQueue ioq; /* driver accessible i/o queue */
+ DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
+ char *name; /* String used in the open */
+ erts_driver_t* drv_ptr;
+ UWord drv_data;
+ SWord os_pid; /* Child process ID */
+ ErtsProcList *suspended; /* List of suspended processes. */
+ LineBuf *linebuf; /* Buffer to hold data not ready for
+ process to get (line oriented I/O)*/
+ erts_atomic32_t state; /* Status and type flags */
+ int control_flags; /* Flags for port_control() */
+ ErlDrvPDL port_data_lock;
+
+ ErtsPrtSD *psd; /* Port specific data */
+};
+
+#define ERTS_PORT_GET_CONNECTED(PRT) \
+ ((Eterm) erts_atomic_read_nob(&(PRT)->connected))
+#define ERTS_PORT_SET_CONNECTED(PRT, PID) \
+ erts_atomic_set_relb(&(PRT)->connected, (erts_aint_t) (PID))
+#define ERTS_PORT_INIT_CONNECTED(PRT, PID) \
+ erts_atomic_init_nob(&(PRT)->connected, (erts_aint_t) (PID))
+
+
+struct erl_drv_port_data_lock {
+ erts_mtx_t mtx;
+ erts_atomic_t refc;
+ Port *prt;
+};
+
+ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_port_runq(Port *prt)
+{
+#ifdef ERTS_SMP
+ ErtsRunQueue *rq1, *rq2;
+ rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
+ if (!rq1)
+ return NULL;
+ while (1) {
+ erts_smp_runq_lock(rq1);
+ rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
+ if (rq1 == rq2)
+ return rq1;
+ erts_smp_runq_unlock(rq1);
+ rq1 = rq2;
+ if (!rq1)
+ return NULL;
+ }
+#else
+ return ERTS_RUNQ_IX(0);
+#endif
+}
+
+#endif
+
+
+ERTS_GLB_INLINE void *erts_prtsd_get(Port *p, int ix);
+ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void *
+erts_prtsd_get(Port *prt, int ix)
+{
+ return prt->psd ? prt->psd->data[ix] : NULL;
+}
+
+ERTS_GLB_INLINE void *
+erts_prtsd_set(Port *prt, int ix, void *data)
+{
+ if (prt->psd) {
+ void *old = prt->psd->data[ix];
+ prt->psd->data[ix] = data;
+ return old;
+ }
+ else {
+ prt->psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD));
+ prt->psd->data[ix] = data;
+ return NULL;
+ }
+}
+
+#endif
+
+extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */
+extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
+
+
+/* port status flags */
+
+#define ERTS_PORT_SFLG_CONNECTED ((Uint32) (1 << 0))
+/* Port have begun exiting */
+#define ERTS_PORT_SFLG_EXITING ((Uint32) (1 << 1))
+/* Distribution port */
+#define ERTS_PORT_SFLG_DISTRIBUTION ((Uint32) (1 << 2))
+#define ERTS_PORT_SFLG_BINARY_IO ((Uint32) (1 << 3))
+#define ERTS_PORT_SFLG_SOFT_EOF ((Uint32) (1 << 4))
+/* Flow control */
+/* Port is closing (no i/o accepted) */
+#define ERTS_PORT_SFLG_CLOSING ((Uint32) (1 << 5))
+/* Send a closed message when terminating */
+#define ERTS_PORT_SFLG_SEND_CLOSED ((Uint32) (1 << 6))
+/* Line orinted io on port */
+#define ERTS_PORT_SFLG_LINEBUF_IO ((Uint32) (1 << 7))
+/* Immortal port (only certain system ports) */
+#define ERTS_PORT_SFLG_FREE ((Uint32) (1 << 8))
+#define ERTS_PORT_SFLG_INITIALIZING ((Uint32) (1 << 9))
+/* Port uses port specific locking (opposed to driver specific locking) */
+#define ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK ((Uint32) (1 << 10))
+#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 11))
+/* Last port to terminate halts the emulator */
+#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 12))
+#ifdef DEBUG
+/* Only debug: make sure all flags aren't cleared unintentionally */
+#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
+#endif
+
+/* Combinations of port status flags */
+#define ERTS_PORT_SFLGS_DEAD \
+ (ERTS_PORT_SFLG_FREE | ERTS_PORT_SFLG_INITIALIZING)
+#define ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
+ (ERTS_PORT_SFLGS_DEAD | ERTS_PORT_SFLG_INVALID)
+#define ERTS_PORT_SFLGS_INVALID_LOOKUP \
+ (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
+ | ERTS_PORT_SFLG_EXITING \
+ | ERTS_PORT_SFLG_CLOSING)
+#define ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP \
+ (ERTS_PORT_SFLGS_INVALID_LOOKUP \
+ | ERTS_PORT_SFLG_DISTRIBUTION)
+
+
+/*
+ * Costs in reductions for some port operations.
+ */
+#define ERTS_PORT_REDS_EXECUTE 10
+#define ERTS_PORT_REDS_FREE 100
+#define ERTS_PORT_REDS_TIMEOUT 400
+#define ERTS_PORT_REDS_INPUT 400
+#define ERTS_PORT_REDS_OUTPUT 400
+#define ERTS_PORT_REDS_EVENT 400
+#define ERTS_PORT_REDS_CMD_OUTPUTV 400
+#define ERTS_PORT_REDS_CMD_OUTPUT 400
+#define ERTS_PORT_REDS_EXIT 300
+#define ERTS_PORT_REDS_CONNECT 40
+#define ERTS_PORT_REDS_UNLINK 40
+#define ERTS_PORT_REDS_LINK 40
+#define ERTS_PORT_REDS_BADSIG 40
+#define ERTS_PORT_REDS_CONTROL 400
+#define ERTS_PORT_REDS_CALL 400
+#define ERTS_PORT_REDS_INFO 100
+#define ERTS_PORT_REDS_SET_DATA 40
+#define ERTS_PORT_REDS_GET_DATA 40
+#define ERTS_PORT_REDS_TERMINATE 200
+
+void print_port_info(Port *, int, void *);
+void erts_port_free(Port *);
+#ifndef ERTS_SMP
+void erts_port_cleanup(Port *);
+#endif
+void erts_fire_port_monitor(Port *prt, Eterm ref);
+#ifdef ERTS_SMP
+int erts_port_handle_xports(Port *);
+#endif
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_lc_is_port_locked(Port *);
+#endif
+
+ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt);
+ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt);
+ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc);
+
+ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt);
+ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt);
+ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void erts_port_inc_refc(Port *prt)
+{
+#ifdef ERTS_SMP
+ erts_ptab_inc_refc(&prt->common);
+#else
+ erts_atomic32_inc_nob(&prt->refc);
+#endif
+}
+
+ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt)
+{
+#ifdef ERTS_SMP
+ int referred = erts_ptab_dec_test_refc(&prt->common);
+ if (!referred)
+ erts_port_free(prt);
+#else
+ int refc = erts_atomic32_dec_read_nob(&prt->refc);
+ if (refc == 0)
+ erts_port_free(prt);
+#endif
+}
+
+ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc)
+{
+#ifdef ERTS_SMP
+ int referred = erts_ptab_add_test_refc(&prt->common, add_refc);
+ if (!referred)
+ erts_port_free(prt);
+#else
+ int refc = erts_atomic32_add_read_nob(&prt->refc, add_refc);
+ if (refc == 0)
+ erts_port_free(prt);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_smp_port_trylock(Port *prt)
+{
+#ifdef ERTS_SMP
+ /* *Need* to be a managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ return erts_mtx_trylock(prt->lock);
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_port_lock(Port *prt)
+{
+#ifdef ERTS_SMP
+ /* *Need* to be a managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ erts_mtx_lock(prt->lock);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_smp_port_unlock(Port *prt)
+{
+#ifdef ERTS_SMP
+ /* *Need* to be a managed thread */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+ erts_mtx_unlock(prt->lock);
+#endif
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+#define ERTS_INVALID_PORT_OPT(PP, ID, FLGS) \
+ (!(PP) \
+ || (erts_atomic32_read_nob(&(PP)->state) & (FLGS)) \
+ || (PP)->common.id != (ID))
+
+/* port lookup */
+
+#define INVALID_PORT(PP, ID) \
+ ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_LOOKUP)
+
+/* Invalidate trace port if anything suspicious, for instance
+ * that the port is a distribution port or it is busy.
+ */
+#define INVALID_TRACER_PORT(PP, ID) \
+ ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)
+
+#define ERTS_PORT_SCHED_ID(P, ID) \
+ ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID)))
+
+extern const Port erts_invalid_port;
+#define ERTS_PORT_LOCK_BUSY ((Port *) &erts_invalid_port)
+
+int erts_is_port_ioq_empty(Port *);
+void erts_terminate_port(Port *);
+
+#ifdef ERTS_SMP
+Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
+#endif
+
+ERTS_GLB_INLINE Port *erts_pix2port(int);
+ERTS_GLB_INLINE Port *erts_port_lookup_raw(Eterm);
+ERTS_GLB_INLINE Port *erts_port_lookup(Eterm, Uint32);
+ERTS_GLB_INLINE Port*erts_id2port(Eterm id);
+ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
+ERTS_GLB_INLINE void erts_port_release(Port *);
+#ifdef ERTS_SMP
+ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs);
+ERTS_GLB_INLINE void erts_thr_port_release(Port *prt);
+#endif
+ERTS_GLB_INLINE Port *erts_thr_drvport2port_raw(ErlDrvPort, int);
+ERTS_GLB_INLINE Port *erts_drvport2port_raw(ErlDrvPort drvport);
+ERTS_GLB_INLINE Port *erts_drvport2port(ErlDrvPort, erts_aint32_t *);
+ERTS_GLB_INLINE Port *erts_drvportid2port(Eterm);
+ERTS_GLB_INLINE Eterm erts_drvport2id(ErlDrvPort);
+ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm);
+ERTS_GLB_INLINE int erts_is_port_alive(Eterm);
+ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm);
+ERTS_GLB_INLINE int erts_port_driver_callback_epilogue(Port *, erts_aint32_t *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Port *erts_pix2port(int ix)
+{
+ Port *prt;
+ ASSERT(0 <= ix && ix < erts_ptab_max(&erts_port));
+ prt = (Port *) erts_ptab_pix2intptr_nob(&erts_port, ix);
+ return prt == ERTS_PORT_LOCK_BUSY ? NULL : prt;
+}
+
+ERTS_GLB_INLINE Port *
+erts_port_lookup_raw(Eterm id)
+{
+ Port *prt;
+
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+ return prt && prt->common.id == id ? prt : NULL;
+}
+
+ERTS_GLB_INLINE Port *
+erts_port_lookup(Eterm id, Uint32 invalid_sflgs)
+{
+ Port *prt = erts_port_lookup_raw(id);
+ return (!prt
+ ? NULL
+ : ((invalid_sflgs & erts_atomic32_read_nob(&prt->state))
+ ? NULL
+ : prt));
+}
+
+
+ERTS_GLB_INLINE Port*
+erts_id2port(Eterm id)
+{
+ erts_aint32_t state;
+ Port *prt;
+
+ /* Only allowed to be called from managed threads */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id)
+ return NULL;
+
+ erts_smp_port_lock(prt);
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) {
+ erts_smp_port_unlock(prt);
+ return NULL;
+ }
+
+ return prt;
+}
+
+
+ERTS_GLB_INLINE Port*
+erts_id2port_sflgs(Eterm id,
+ Process *c_p, ErtsProcLocks c_p_locks,
+ Uint32 invalid_sflgs)
+{
+#ifdef ERTS_SMP
+ int no_proc_locks = !c_p || !c_p_locks;
+#endif
+ erts_aint32_t state;
+ Port *prt;
+
+ /* Only allowed to be called from managed threads */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id)
+ return NULL;
+
+#ifdef ERTS_SMP
+ if (no_proc_locks)
+ erts_smp_port_lock(prt);
+ else if (erts_smp_port_trylock(prt) == EBUSY) {
+ /* Unlock process locks, and acquire locks in lock order... */
+ erts_smp_proc_unlock(c_p, c_p_locks);
+ erts_smp_port_lock(prt);
+ erts_smp_proc_lock(c_p, c_p_locks);
+ }
+#endif
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & invalid_sflgs) {
+#ifdef ERTS_SMP
+ erts_smp_port_unlock(prt);
+#endif
+ return NULL;
+ }
+
+ return prt;
+}
+
+ERTS_GLB_INLINE void
+erts_port_release(Port *prt)
+{
+ /* Only allowed to be called from managed threads */
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
+#ifdef ERTS_SMP
+ erts_smp_port_unlock(prt);
+#else
+ if (prt->cleanup) {
+ prt->cleanup = 0;
+ erts_port_cleanup(prt);
+ }
+#endif
+}
+
+#ifdef ERTS_SMP
+
+/*
+ * erts_thr_id2port_sflgs() and erts_thr_port_release() can
+ * be used by unmanaged threads in the SMP case.
+ */
+ERTS_GLB_INLINE Port *
+erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs)
+{
+ Port *prt;
+ ErtsThrPrgrDelayHandle dhndl;
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id) {
+ erts_thr_progress_unmanaged_continue(dhndl);
+ prt = NULL;
+ }
+ else {
+ erts_aint32_t state;
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_port_inc_refc(prt);
+ erts_thr_progress_unmanaged_continue(dhndl);
+ }
+
+ erts_mtx_lock(prt->lock);
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & invalid_sflgs) {
+ erts_mtx_unlock(prt->lock);
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_port_dec_refc(prt);
+ prt = NULL;
+ }
+ }
+
+ return prt;
+}
+
+ERTS_GLB_INLINE void
+erts_thr_port_release(Port *prt)
+{
+ erts_mtx_unlock(prt->lock);
+#ifdef ERTS_SMP
+ if (!erts_thr_progress_is_managed_thread())
+ erts_port_dec_refc(prt);
+#endif
+}
+
+#endif
+
+ERTS_GLB_INLINE Port*
+erts_thr_drvport2port_raw(ErlDrvPort drvport, int lock_pdl)
+{
+#if ERTS_ENABLE_LOCK_CHECK
+ int emu_thread = erts_lc_is_emu_thr();
+#endif
+ if (drvport == ERTS_INVALID_ERL_DRV_PORT)
+ return NULL;
+ else {
+ Port *prt = (Port *) drvport;
+ if (lock_pdl && prt->port_data_lock)
+ driver_pdl_lock(prt->port_data_lock);
+#if ERTS_ENABLE_LOCK_CHECK
+ if (!ERTS_IS_CRASH_DUMPING) {
+ if (emu_thread) {
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ERTS_LC_ASSERT(!prt->port_data_lock
+ || erts_lc_mtx_is_locked(&prt->port_data_lock->mtx));
+ }
+ else {
+ ERTS_LC_ASSERT(prt->port_data_lock);
+ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&prt->port_data_lock->mtx));
+ }
+ }
+#endif
+ return prt;
+ }
+}
+
+ERTS_GLB_INLINE Port*
+erts_drvport2port_raw(ErlDrvPort drvport)
+{
+ ERTS_LC_ASSERT(erts_lc_is_emu_thr());
+ if (drvport == ERTS_INVALID_ERL_DRV_PORT)
+ return NULL;
+ else {
+ Port *prt = (Port *) drvport;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+ return prt;
+ }
+}
+
+ERTS_GLB_INLINE Port*
+erts_drvport2port(ErlDrvPort drvport, erts_aint32_t *statep)
+{
+ Port *prt = erts_drvport2port_raw(drvport);
+ erts_aint32_t state;
+ if (!prt)
+ return NULL;
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return NULL;
+ if (statep)
+ *statep = state;
+ return prt;
+}
+
+ERTS_GLB_INLINE Port*
+erts_drvportid2port(Eterm id)
+{
+ Port *prt;
+ erts_aint32_t state;
+ if (is_not_internal_port(id))
+ return NULL;
+ prt = (Port *) erts_ptab_pix2intptr_nob(&erts_port,
+ internal_port_index(id));
+ if (!prt)
+ return NULL;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+ if (prt->common.id != id)
+ return NULL;
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return NULL;
+ return prt;
+}
+
+ERTS_GLB_INLINE Eterm
+erts_drvport2id(ErlDrvPort drvport)
+{
+ Port *prt = erts_drvport2port_raw(drvport);
+ if (!prt)
+ return am_undefined;
+ else
+ return prt->common.id;
+}
+
+ERTS_GLB_INLINE Uint32
+erts_portid2status(Eterm id)
+{
+ Port *prt = erts_port_lookup_raw(id);
+ if (prt)
+ return (Uint32) erts_atomic32_read_acqb(&prt->state);
+ else
+ return ERTS_PORT_SFLG_INVALID;
+}
+
+ERTS_GLB_INLINE int
+erts_is_port_alive(Eterm id)
+{
+ return !(erts_portid2status(id) & (ERTS_PORT_SFLG_INVALID
+ | ERTS_PORT_SFLGS_DEAD));
+}
+
+ERTS_GLB_INLINE int
+erts_is_valid_tracer_port(Eterm id)
+{
+ return !(erts_portid2status(id) & ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+}
+
+ERTS_GLB_INLINE int
+erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep)
+{
+ int reds = 0;
+ erts_aint32_t state;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(prt)) {
+ reds += ERTS_PORT_REDS_TERMINATE;
+ erts_terminate_port(prt);
+ state = erts_atomic32_read_nob(&prt->state);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ }
+
+#ifdef ERTS_SMP
+ if (prt->xports) {
+ reds += erts_port_handle_xports(prt);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ ASSERT(!prt->xports);
+ }
+#endif
+
+ if (statep)
+ *statep = state;
+
+ return reds;
+}
+
+#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+void erts_port_resume_procs(Port *);
+
+struct binary;
+
+#define ERTS_P2P_SIG_TYPE_BAD 0
+#define ERTS_P2P_SIG_TYPE_OUTPUT 1
+#define ERTS_P2P_SIG_TYPE_OUTPUTV 2
+#define ERTS_P2P_SIG_TYPE_CONNECT 3
+#define ERTS_P2P_SIG_TYPE_EXIT 4
+#define ERTS_P2P_SIG_TYPE_CONTROL 5
+#define ERTS_P2P_SIG_TYPE_CALL 6
+#define ERTS_P2P_SIG_TYPE_INFO 7
+#define ERTS_P2P_SIG_TYPE_LINK 8
+#define ERTS_P2P_SIG_TYPE_UNLINK 9
+#define ERTS_P2P_SIG_TYPE_SET_DATA 10
+#define ERTS_P2P_SIG_TYPE_GET_DATA 11
+
+#define ERTS_P2P_SIG_TYPE_BITS 4
+#define ERTS_P2P_SIG_TYPE_MASK \
+ ((1 << ERTS_P2P_SIG_TYPE_BITS) - 1)
+
+#define ERTS_P2P_SIG_DATA_FLG(N) \
+ (1 << (ERTS_P2P_SIG_TYPE_BITS + (N)))
+#define ERTS_P2P_SIG_DATA_FLG_BANG_OP ERTS_P2P_SIG_DATA_FLG(0)
+#define ERTS_P2P_SIG_DATA_FLG_REPLY ERTS_P2P_SIG_DATA_FLG(1)
+#define ERTS_P2P_SIG_DATA_FLG_NOSUSPEND ERTS_P2P_SIG_DATA_FLG(2)
+#define ERTS_P2P_SIG_DATA_FLG_FORCE ERTS_P2P_SIG_DATA_FLG(3)
+#define ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG(4)
+#define ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG(5)
+#define ERTS_P2P_SIG_DATA_FLG_SCHED ERTS_P2P_SIG_DATA_FLG(6)
+
+struct ErtsProc2PortSigData_ {
+ int flags;
+ Eterm caller;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ union {
+ struct {
+ Eterm from;
+ ErlIOVec *evp;
+ ErlDrvBinary *cbinp;
+ } outputv;
+ struct {
+ Eterm from;
+ char *bufp;
+ ErlDrvSizeT size;
+ } output;
+ struct {
+ Eterm from;
+ Eterm connected;
+ } connect;
+ struct {
+ Eterm from;
+ Eterm reason;
+ ErlHeapFragment *bp;
+ } exit;
+ struct {
+ struct binary *binp;
+ unsigned int command;
+ char *bufp;
+ ErlDrvSizeT size;
+ } control;
+ struct {
+ unsigned int command;
+ char *bufp;
+ ErlDrvSizeT size;
+ } call;
+ struct {
+ Eterm item;
+ } info;
+ struct {
+ Eterm port;
+ Eterm to;
+ } link;
+ struct {
+ Eterm from;
+ } unlink;
+ struct {
+ ErlHeapFragment *bp;
+ Eterm data;
+ } set_data;
+ } u;
+} ;
+
+ERTS_GLB_INLINE int
+erts_proc2port_sig_is_command_op(ErtsProc2PortSigData *sigdp);
+ERTS_GLB_INLINE ErlDrvSizeT
+erts_proc2port_sig_command_data_size(ErtsProc2PortSigData *sigdp);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_proc2port_sig_is_command_op(ErtsProc2PortSigData *sigdp)
+{
+ switch (sigdp->flags & ERTS_P2P_SIG_TYPE_MASK) {
+ case ERTS_P2P_SIG_TYPE_OUTPUT: return !0;
+ case ERTS_P2P_SIG_TYPE_OUTPUTV: return !0;
+ default: return 0;
+ }
+}
+
+ERTS_GLB_INLINE ErlDrvSizeT
+erts_proc2port_sig_command_data_size(ErtsProc2PortSigData *sigdp)
+{
+ switch (sigdp->flags & ERTS_P2P_SIG_TYPE_MASK) {
+ case ERTS_P2P_SIG_TYPE_OUTPUT: return sigdp->u.output.size;
+ case ERTS_P2P_SIG_TYPE_OUTPUTV: return sigdp->u.outputv.evp->size;
+ default: return (ErlDrvSizeT) 0;
+ }
+}
+
+#endif
+
+#define ERTS_PROC2PORT_SIG_EXEC 0
+#define ERTS_PROC2PORT_SIG_ABORT 1
+#define ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND 2
+#define ERTS_PROC2PORT_SIG_ABORT_CLOSED 3
+
+typedef int (*ErtsProc2PortSigCallback)(Port *,
+ erts_aint32_t,
+ int,
+ ErtsProc2PortSigData *);
+
+typedef enum {
+ ERTS_PORT_OP_BADARG,
+ ERTS_PORT_OP_CALLER_EXIT,
+ ERTS_PORT_OP_BUSY,
+ ERTS_PORT_OP_BUSY_SCHEDULED,
+ ERTS_PORT_OP_SCHEDULED,
+ ERTS_PORT_OP_DROPPED,
+ ERTS_PORT_OP_DONE
+} ErtsPortOpResult;
+
+ErtsPortOpResult
+erts_schedule_proc2port_signal(Process *,
+ Port *,
+ Eterm,
+ Eterm *,
+ ErtsProc2PortSigData *,
+ int,
+ ErtsProc2PortSigCallback);
+
+int erts_deliver_port_exit(Port *, Eterm, Eterm, int);
+
+/*
+ * Port signal flags
+ */
+#define ERTS_PORT_SIG_FLG_BANG_OP ERTS_P2P_SIG_DATA_FLG_BANG_OP
+#define ERTS_PORT_SIG_FLG_NOSUSPEND ERTS_P2P_SIG_DATA_FLG_NOSUSPEND
+#define ERTS_PORT_SIG_FLG_FORCE ERTS_P2P_SIG_DATA_FLG_FORCE
+#define ERTS_PORT_SIG_FLG_BROKEN_LINK ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK
+#define ERTS_PORT_SIG_FLG_BAD_OUTPUT ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT
+#define ERTS_PORT_SIG_FLG_FORCE_SCHED ERTS_P2P_SIG_DATA_FLG_SCHED
+/* ERTS_PORT_SIG_FLG_FORCE_IMM_CALL only when crash dumping... */
+#define ERTS_PORT_SIG_FLG_FORCE_IMM_CALL ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT
+
+/*
+ * Port ! {Owner, {command, Data}}
+ * Port ! {Owner, {connect, NewOwner}}
+ * Port ! {Owner, close}
+ */
+ErtsPortOpResult erts_port_command(Process *, int, Port *, Eterm, Eterm *);
+
+/*
+ * Signals from processes to ports.
+ */
+ErtsPortOpResult erts_port_output(Process *, int, Port *, Eterm, Eterm, Eterm *);
+ErtsPortOpResult erts_port_exit(Process *, int, Port *, Eterm, Eterm, Eterm *);
+ErtsPortOpResult erts_port_connect(Process *, int, Port *, Eterm, Eterm, Eterm *);
+ErtsPortOpResult erts_port_link(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_unlink(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_control(Process *, Port *, unsigned int, Eterm, Eterm *);
+ErtsPortOpResult erts_port_call(Process *, Port *, unsigned int, Eterm, Eterm *);
+ErtsPortOpResult erts_port_info(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_set_data(Process *, Port *, Eterm, Eterm *);
+ErtsPortOpResult erts_port_get_data(Process *, Port *, Eterm *);
+
+#endif
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 0f1a0d441a..8ceadcdb8c 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -33,42 +33,29 @@
#include "erl_port_task.h"
#include "dist.h"
#include "dtrace-wrapper.h"
+#include <stdarg.h>
#if defined(DEBUG) && 0
-#define HARD_DEBUG
+#define ERTS_HARD_DEBUG_TASK_QUEUES
+#else
+#undef ERTS_HARD_DEBUG_TASK_QUEUES
#endif
-/*
- * Costs in reductions for some port operations.
- */
-#define ERTS_PORT_REDS_EXECUTE 0
-#define ERTS_PORT_REDS_FREE 50
-#define ERTS_PORT_REDS_TIMEOUT 200
-#define ERTS_PORT_REDS_INPUT 200
-#define ERTS_PORT_REDS_OUTPUT 200
-#define ERTS_PORT_REDS_EVENT 200
-#define ERTS_PORT_REDS_TERMINATE 100
-
-
-#define ERTS_PORT_TASK_INVALID_PORT(P, ID) \
- ((erts_port_status_get((P)) & ERTS_PORT_SFLGS_DEAD) || (P)->id != (ID))
-
-#define ERTS_PORT_IS_IN_RUNQ(RQ, P) \
- ((P)->sched.next || (P)->sched.prev || (RQ)->ports.start == (P))
-
-#define ERTS_PORT_NOT_IN_RUNQ(P) \
-do { \
- (P)->sched.prev = NULL; \
- (P)->sched.next = NULL; \
-} while (0)
+#ifdef ERTS_HARD_DEBUG_TASK_QUEUES
+static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue);
+#define ERTS_PT_DBG_CHK_TASK_QS(PP, EQ, PBQ) \
+ chk_task_queues((PP), (EQ), (PBQ))
+#else
+#define ERTS_PT_DBG_CHK_TASK_QS(PP, EQ, PBQ)
+#endif
#ifdef USE_VM_PROBES
#define DTRACE_DRIVER(PROBE_NAME, PP) \
- if (DTRACE_ENABLED(driver_ready_input)) { \
+ if (DTRACE_ENABLED(PROBE_NAME)) { \
DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE); \
DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE); \
\
- dtrace_pid_str(PP->connected, process_str); \
+ dtrace_pid_str(ERTS_PORT_GET_CONNECTED(PP), process_str); \
dtrace_port_str(PP, port_str); \
DTRACE3(PROBE_NAME, process_str, port_str, PP->name); \
}
@@ -78,83 +65,768 @@ do { \
erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
-struct ErtsPortTaskQueue_ {
- ErtsPortTask *first;
- ErtsPortTask *last;
- Port *port;
-};
+#define ERTS_PT_STATE_SCHEDULED 0
+#define ERTS_PT_STATE_ABORTED 1
+#define ERTS_PT_STATE_EXECUTING 2
+
+typedef union {
+ struct { /* I/O tasks */
+ ErlDrvEvent event;
+ ErlDrvEventData event_data;
+ } io;
+ struct {
+ ErtsProc2PortSigCallback callback;
+ ErtsProc2PortSigData data;
+ } psig;
+} ErtsPortTaskTypeData;
struct ErtsPortTask_ {
- ErtsPortTask *prev;
- ErtsPortTask *next;
- ErtsPortTaskQueue *queue;
- ErtsPortTaskHandle *handle;
+ erts_smp_atomic32_t state;
ErtsPortTaskType type;
- ErlDrvEvent event;
- ErlDrvEventData event_data;
+ union {
+ struct {
+ ErtsPortTask *next;
+ ErtsPortTaskHandle *handle;
+ int flags;
+ Uint32 ref[ERTS_MAX_REF_NUMBERS];
+ ErtsPortTaskTypeData td;
+ } alive;
+ ErtsThrPrgrLaterOp release;
+ } u;
};
-#ifdef HARD_DEBUG
-#define ERTS_PT_CHK_PORTQ(RQ) check_port_queue((RQ), NULL, 0)
-#define ERTS_PT_CHK_PRES_PORTQ(RQ, PP) check_port_queue((RQ), (PP), -1)
-#define ERTS_PT_CHK_IN_PORTQ(RQ, PP) check_port_queue((RQ), (PP), 1)
-#define ERTS_PT_CHK_NOT_IN_PORTQ(RQ, PP) check_port_queue((RQ), (PP), 0)
-#define ERTS_PT_CHK_TASKQ(Q) check_task_queue((Q), NULL, 0)
-#define ERTS_PT_CHK_IN_TASKQ(Q, T) check_task_queue((Q), (T), 1)
-#define ERTS_PT_CHK_NOT_IN_TASKQ(Q, T) check_task_queue((Q), (T), 0)
-static void
-check_port_queue(Port *chk_pp, int inq);
-static void
-check_task_queue(ErtsPortTaskQueue *ptqp,
- ErtsPortTask *chk_ptp,
- int inq);
-#else
-#define ERTS_PT_CHK_PORTQ(RQ)
-#define ERTS_PT_CHK_PRES_PORTQ(RQ, PP)
-#define ERTS_PT_CHK_IN_PORTQ(RQ, PP)
-#define ERTS_PT_CHK_NOT_IN_PORTQ(RQ, PP)
-#define ERTS_PT_CHK_TASKQ(Q)
-#define ERTS_PT_CHK_IN_TASKQ(Q, T)
-#define ERTS_PT_CHK_NOT_IN_TASKQ(Q, T)
+struct ErtsPortTaskHandleList_ {
+ ErtsPortTaskHandle handle;
+ union {
+ ErtsPortTaskHandleList *next;
+#ifdef ERTS_SMP
+ ErtsThrPrgrLaterOp release;
#endif
+ } u;
+};
+
+typedef struct ErtsPortTaskBusyCaller_ ErtsPortTaskBusyCaller;
+struct ErtsPortTaskBusyCaller_ {
+ ErtsPortTaskBusyCaller *next;
+ Eterm caller;
+ SWord count;
+ ErtsPortTask *last;
+};
+
+#define ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS 17
+struct ErtsPortTaskBusyCallerTable_ {
+ ErtsPortTaskBusyCaller *bucket[ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS];
+ ErtsPortTaskBusyCaller pre_alloc_busy_caller;
+};
+
-static void handle_remaining_tasks(ErtsRunQueue *runq, Port *pp);
+static void begin_port_cleanup(Port *pp,
+ ErtsPortTask **execq,
+ int *processing_busy_q_p);
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_task,
ErtsPortTask,
- 200,
+ 1000,
ERTS_ALC_T_PORT_TASK)
-ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_taskq,
- ErtsPortTaskQueue,
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table,
+ ErtsPortTaskBusyCallerTable,
50,
- ERTS_ALC_T_PORT_TASKQ)
+ ERTS_ALC_T_BUSY_CALLER_TAB)
+
+#ifdef ERTS_SMP
+static void
+call_port_task_free(void *vptp)
+{
+ port_task_free((ErtsPortTask *) vptp);
+}
+#endif
+
+static ERTS_INLINE void
+schedule_port_task_free(ErtsPortTask *ptp)
+{
+#ifdef ERTS_SMP
+ erts_schedule_thr_prgr_later_op(call_port_task_free,
+ (void *) ptp,
+ &ptp->u.release);
+#else
+ port_task_free(ptp);
+#endif
+}
+
+static ERTS_INLINE ErtsPortTask *
+p2p_sig_data_to_task(ErtsProc2PortSigData *sigdp)
+{
+ ErtsPortTask *ptp;
+ char *ptr = (char *) sigdp;
+ ptr -= offsetof(ErtsPortTask, u.alive.td.psig.data);
+ ptp = (ErtsPortTask *) ptr;
+ ASSERT(ptp->type == ERTS_PORT_TASK_PROC_SIG);
+ return ptp;
+}
+
+ErtsProc2PortSigData *
+erts_port_task_alloc_p2p_sig_data(void)
+{
+ ErtsPortTask *ptp = port_task_alloc();
+
+ ptp->type = ERTS_PORT_TASK_PROC_SIG;
+ ptp->u.alive.flags = ERTS_PT_FLG_SIG_DEP;
+ erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
+
+ ASSERT(ptp == p2p_sig_data_to_task(&ptp->u.alive.td.psig.data));
+
+ return &ptp->u.alive.td.psig.data;
+}
+
+static ERTS_INLINE Eterm
+task_caller(ErtsPortTask *ptp)
+{
+ Eterm caller;
+
+ ASSERT(ptp->type == ERTS_PORT_TASK_PROC_SIG);
+
+ caller = ptp->u.alive.td.psig.data.caller;
+
+ ASSERT(is_internal_pid(caller) || is_internal_port(caller));
+
+ return caller;
+}
+
+/*
+ * Busy queue management
+ */
+
+static ERTS_INLINE int
+caller2bix(Eterm caller)
+{
+ ASSERT(is_internal_pid(caller) || is_internal_port(caller));
+ return (int) (_GET_PID_DATA(caller) % ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS);
+}
+
+
+static void
+popped_from_busy_queue(Port *pp, ErtsPortTask *ptp, int last)
+{
+ ErtsPortTaskBusyCaller **prev_bcpp = NULL, *bcp;
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ Eterm caller = task_caller(ptp);
+ int bix = caller2bix(caller);
+
+ ASSERT(is_internal_pid(caller));
+
+ ASSERT(tabp);
+ bcp = tabp->bucket[bix];
+ prev_bcpp = &tabp->bucket[bix];
+ ASSERT(bcp);
+ while (bcp->caller != caller) {
+ prev_bcpp = &bcp->next;
+ bcp = bcp->next;
+ ASSERT(bcp);
+ }
+ ASSERT(bcp->count > 0);
+ if (--bcp->count != 0) {
+ ASSERT(!last);
+ }
+ else {
+ *prev_bcpp = bcp->next;
+ if (bcp == &tabp->pre_alloc_busy_caller)
+ bcp->caller = am_undefined;
+ else
+ erts_free(ERTS_ALC_T_BUSY_CALLER, bcp);
+ if (last) {
+#ifdef DEBUG
+ erts_aint32_t flags =
+#endif
+ erts_smp_atomic32_read_band_nob(
+ &pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+#ifdef DEBUG
+ for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) {
+ ASSERT(!tabp->bucket[bix]);
+ }
+#endif
+ busy_caller_table_free(tabp);
+ pp->sched.taskq.local.busy.first = NULL;
+ pp->sched.taskq.local.busy.last = NULL;
+ pp->sched.taskq.local.busy.table = NULL;
+ }
+ }
+}
+
+static void
+busy_wait_move_to_busy_queue(Port *pp, ErtsPortTask *ptp)
+{
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ Eterm caller = task_caller(ptp);
+ ErtsPortTaskBusyCaller *bcp;
+ int bix;
+
+ ASSERT(is_internal_pid(caller));
+ /*
+ * Port is busy and this task type needs to wait until not busy.
+ */
+
+ ASSERT(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY);
+
+ ptp->u.alive.next = NULL;
+ if (pp->sched.taskq.local.busy.last) {
+ ASSERT(pp->sched.taskq.local.busy.first);
+ pp->sched.taskq.local.busy.last->u.alive.next = ptp;
+ }
+ else {
+ int i;
+ erts_aint32_t flags;
+
+ pp->sched.taskq.local.busy.first = ptp;
+ flags = erts_smp_atomic32_read_bor_nob(&pp->sched.flags,
+ ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(!(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
+
+ ASSERT(!tabp);
+
+ tabp = busy_caller_table_alloc();
+ pp->sched.taskq.local.busy.table = tabp;
+ for (i = 0; i < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; i++)
+ tabp->bucket[i] = NULL;
+ tabp->pre_alloc_busy_caller.caller = am_undefined;
+ }
+ pp->sched.taskq.local.busy.last = ptp;
+
+ bix = caller2bix(caller);
+ ASSERT(tabp);
+ bcp = tabp->bucket[bix];
+
+ while (bcp && bcp->caller != caller)
+ bcp = bcp->next;
+
+ if (bcp)
+ bcp->count++;
+ else {
+ if (tabp->pre_alloc_busy_caller.caller == am_undefined)
+ bcp = &tabp->pre_alloc_busy_caller;
+ else
+ bcp = erts_alloc(ERTS_ALC_T_BUSY_CALLER,
+ sizeof(ErtsPortTaskBusyCaller));
+ bcp->caller = caller;
+ bcp->count = 1;
+ bcp->next = tabp->bucket[bix];
+ tabp->bucket[bix] = bcp;
+ }
+
+ bcp->last = ptp;
+}
+
+static ERTS_INLINE int
+check_sig_dep_move_to_busy_queue(Port *pp, ErtsPortTask *ptp)
+{
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ ErtsPortTask *last_ptp;
+ ErtsPortTaskBusyCaller *bcp;
+ int bix;
+ Eterm caller;
+
+ ASSERT(ptp->u.alive.flags & ERTS_PT_FLG_SIG_DEP);
+ ASSERT(pp->sched.taskq.local.busy.last);
+ ASSERT(tabp);
+
+
+ /*
+ * We are either not busy, or the task does not imply wait on busy port.
+ * However, due to the signaling order requirements the task might depend
+ * on other tasks in the busy queue.
+ */
+
+ caller = task_caller(ptp);
+ bix = caller2bix(caller);
+ bcp = tabp->bucket[bix];
+ while (bcp && bcp->caller != caller)
+ bcp = bcp->next;
+
+ if (!bcp)
+ return 0;
+
+ /*
+ * There are other tasks that we depend on in the busy queue;
+ * move into busy queue.
+ */
+
+ bcp->count++;
+ last_ptp = bcp->last;
+ ptp->u.alive.next = last_ptp->u.alive.next;
+ if (!ptp->u.alive.next) {
+ ASSERT(pp->sched.taskq.local.busy.last == last_ptp);
+ pp->sched.taskq.local.busy.last = ptp;
+ }
+ last_ptp->u.alive.next = ptp;
+ bcp->last = ptp;
+
+ return 1;
+}
+
+static void
+no_sig_dep_move_from_busyq(Port *pp)
+{
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ ErtsPortTask *first_ptp, *last_ptp, *ptp;
+ ErtsPortTaskBusyCaller **prev_bcpp = NULL, *bcp = NULL;
+
+ /*
+ * Move tasks at the head of the busy queue that no longer
+ * have any dependencies to busy wait tasks into the ordinary
+ * queue.
+ */
+
+ first_ptp = ptp = pp->sched.taskq.local.busy.first;
+
+ ASSERT(ptp && !(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY));
+ ASSERT(tabp);
+
+ do {
+ Eterm caller = task_caller(ptp);
+
+ if (!bcp || bcp->caller != caller) {
+ int bix = caller2bix(caller);
+
+ prev_bcpp = &tabp->bucket[bix];
+ bcp = tabp->bucket[bix];
+ ASSERT(bcp);
+ while (bcp->caller != caller) {
+ ASSERT(bcp);
+ prev_bcpp = &bcp->next;
+ bcp = bcp->next;
+ }
+ }
+
+ ASSERT(bcp->caller == caller);
+ ASSERT(bcp->count > 0);
+
+ if (--bcp->count == 0) {
+ *prev_bcpp = bcp->next;
+ if (bcp == &tabp->pre_alloc_busy_caller)
+ bcp->caller = am_undefined;
+ else
+ erts_free(ERTS_ALC_T_BUSY_CALLER, bcp);
+ }
+
+ last_ptp = ptp;
+ ptp = ptp->u.alive.next;
+ } while (ptp && !(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY));
+
+ pp->sched.taskq.local.busy.first = last_ptp->u.alive.next;
+ if (!pp->sched.taskq.local.busy.first) {
+#ifdef DEBUG
+ int bix;
+ erts_aint32_t flags =
+#endif
+ erts_smp_atomic32_read_band_nob(
+ &pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+#ifdef DEBUG
+ for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) {
+ ASSERT(!tabp->bucket[bix]);
+ }
+#endif
+ busy_caller_table_free(tabp);
+ pp->sched.taskq.local.busy.last = NULL;
+ pp->sched.taskq.local.busy.table = NULL;
+ }
+ last_ptp->u.alive.next = pp->sched.taskq.local.first;
+ pp->sched.taskq.local.first = first_ptp;
+}
+
+#ifdef ERTS_HARD_DEBUG_TASK_QUEUES
+
+static void
+chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue)
+{
+ Sint tot_count, tot_table_count;
+ int bix;
+ ErtsPortTask *ptp, *last;
+ ErtsPortTask *first = processing_busy_queue ? execq : pp->sched.taskq.local.busy.first;
+ ErtsPortTask *nb_task_queue = processing_busy_queue ? pp->sched.taskq.local.first : execq;
+ ErtsPortTaskBusyCallerTable *tabp = pp->sched.taskq.local.busy.table;
+ ErtsPortTaskBusyCaller *bcp;
+
+ if (!first) {
+ ASSERT(!tabp);
+ ASSERT(!pp->sched.taskq.local.busy.last);
+ ASSERT(!(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS));
+ return;
+ }
+
+ ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS);
+ ASSERT(tabp);
+
+ tot_count = 0;
+ ptp = first;
+ while (ptp) {
+ Sint count = 0;
+ Eterm caller = task_caller(ptp);
+ int bix = caller2bix(caller);
+ for (bcp = tabp->bucket[bix]; bcp; bcp = bcp->next)
+ if (bcp->caller == caller)
+ break;
+ ASSERT(bcp && bcp->caller == caller);
+
+ ASSERT(bcp->last);
+ while (1) {
+ ErtsPortTask *ptp2;
+
+ ASSERT(caller == task_caller(ptp));
+ count++;
+ tot_count++;
+ last = ptp;
+
+ for (ptp2 = nb_task_queue; ptp2; ptp2 = ptp2->u.alive.next) {
+ ASSERT(ptp != ptp2);
+ }
+
+ if (ptp == bcp->last)
+ break;
+ ptp = ptp->u.alive.next;
+ }
+
+ ASSERT(count == bcp->count);
+ ptp = ptp->u.alive.next;
+ }
+
+ tot_table_count = 0;
+ for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) {
+ for (bcp = tabp->bucket[bix]; bcp; bcp = bcp->next)
+ tot_table_count += bcp->count;
+ }
+
+ ASSERT(tot_count == tot_table_count);
+
+ ASSERT(last == pp->sched.taskq.local.busy.last);
+}
+
+#endif /* ERTS_HARD_DEBUG_TASK_QUEUES */
/*
* Task handle manipulation.
*/
+static ERTS_INLINE void
+reset_port_task_handle(ErtsPortTaskHandle *pthp)
+{
+ erts_smp_atomic_set_relb(pthp, (erts_aint_t) NULL);
+}
+
static ERTS_INLINE ErtsPortTask *
handle2task(ErtsPortTaskHandle *pthp)
{
- return (ErtsPortTask *) erts_smp_atomic_read_nob(pthp);
+ return (ErtsPortTask *) erts_smp_atomic_read_acqb(pthp);
}
static ERTS_INLINE void
reset_handle(ErtsPortTask *ptp)
{
- if (ptp->handle) {
- ASSERT(ptp == handle2task(ptp->handle));
- erts_smp_atomic_set_nob(ptp->handle, (erts_aint_t) NULL);
+ if (ptp->u.alive.handle) {
+ ASSERT(ptp == handle2task(ptp->u.alive.handle));
+ reset_port_task_handle(ptp->u.alive.handle);
}
}
static ERTS_INLINE void
set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp)
{
- ptp->handle = pthp;
+ ptp->u.alive.handle = pthp;
if (pthp) {
- erts_smp_atomic_set_nob(pthp, (erts_aint_t) ptp);
- ASSERT(ptp == handle2task(ptp->handle));
+ erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp);
+ ASSERT(ptp == handle2task(ptp->u.alive.handle));
+ }
+}
+
+
+/*
+ * Busy port queue management
+ */
+
+static erts_aint32_t
+check_unset_busy_port_q(Port *pp,
+ erts_aint32_t flags,
+ ErtsPortTaskBusyPortQ *bpq)
+{
+ ErlDrvSizeT qsize, low;
+ int resume_procs = 0;
+
+ ASSERT(bpq);
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+
+ erts_port_task_sched_lock(&pp->sched);
+ qsize = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size);
+ low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low);
+ if (qsize < low) {
+ erts_aint32_t mask = ~(ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q
+ | ERTS_PTS_FLG_BUSY_PORT_Q);
+ flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, mask);
+ if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q)
+ resume_procs = 1;
+ }
+ else if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) {
+ flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags,
+ ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
+ flags &= ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q;
+ }
+ erts_port_task_sched_unlock(&pp->sched);
+ if (resume_procs)
+ erts_port_resume_procs(pp);
+
+ return flags;
+}
+
+static ERTS_INLINE void
+aborted_proc2port_data(Port *pp, ErlDrvSizeT size)
+{
+ ErtsPortTaskBusyPortQ *bpq;
+ erts_aint32_t flags;
+ ErlDrvSizeT qsz;
+
+ ASSERT(pp->sched.taskq.bpq);
+
+ if (size == 0)
+ return;
+
+ bpq = pp->sched.taskq.bpq;
+
+ qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size,
+ (erts_aint_t) -size);
+ ASSERT(qsz + size > qsz);
+ flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ ASSERT(pp->sched.taskq.bpq);
+ if ((flags & (ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q
+ | ERTS_PTS_FLG_BUSY_PORT_Q)) != ERTS_PTS_FLG_BUSY_PORT_Q)
+ return;
+ if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low))
+ erts_smp_atomic32_read_bor_nob(&pp->sched.flags,
+ ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
+}
+
+static ERTS_INLINE void
+dequeued_proc2port_data(Port *pp, ErlDrvSizeT size)
+{
+ ErtsPortTaskBusyPortQ *bpq;
+ erts_aint32_t flags;
+ ErlDrvSizeT qsz;
+
+ ASSERT(pp->sched.taskq.bpq);
+
+ if (size == 0)
+ return;
+
+ bpq = pp->sched.taskq.bpq;
+
+ qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size,
+ (erts_aint_t) -size);
+ ASSERT(qsz + size > qsz);
+ flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q))
+ return;
+ if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->low))
+ check_unset_busy_port_q(pp, flags, bpq);
+}
+
+static ERTS_INLINE erts_aint32_t
+enqueue_proc2port_data(Port *pp,
+ ErtsProc2PortSigData *sigdp,
+ erts_aint32_t flags)
+{
+ ErtsPortTaskBusyPortQ *bpq = pp->sched.taskq.bpq;
+ if (sigdp && bpq) {
+ ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp);
+ if (size) {
+ erts_aint_t asize = erts_smp_atomic_add_read_acqb(&bpq->size,
+ (erts_aint_t) size);
+ ErlDrvSizeT qsz = (ErlDrvSizeT) asize;
+
+ ASSERT(qsz - size < qsz);
+
+ if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q) && qsz > bpq->high) {
+ flags = erts_smp_atomic32_read_bor_acqb(&pp->sched.flags,
+ ERTS_PTS_FLG_BUSY_PORT_Q);
+ flags |= ERTS_PTS_FLG_BUSY_PORT_Q;
+ qsz = (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->size);
+ if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) {
+ flags = (erts_smp_atomic32_read_bor_relb(
+ &pp->sched.flags,
+ ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q));
+ flags |= ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q;
+ }
+ }
+ ASSERT(!(flags & ERTS_PTS_FLG_EXIT));
+ }
+ }
+ return flags;
+}
+
+/*
+ * erl_drv_busy_msgq_limits() is called by drivers either reading or
+ * writing the limits.
+ *
+ * A limit of zero is interpreted as a read only request (using a
+ * limit of zero would not be useful). Other values are interpreted
+ * as a write-read request.
+ */
+
+void
+erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp)
+{
+ Port *pp = erts_drvport2port(dport, NULL);
+ ErtsPortTaskBusyPortQ *bpq = pp->sched.taskq.bpq;
+ int written = 0, resume_procs = 0;
+ ErlDrvSizeT low, high;
+
+ if (!pp || !bpq) {
+ if (lowp)
+ *lowp = ERL_DRV_BUSY_MSGQ_DISABLED;
+ if (highp)
+ *highp = ERL_DRV_BUSY_MSGQ_DISABLED;
+ return;
+ }
+
+ low = lowp ? *lowp : 0;
+ high = highp ? *highp : 0;
+
+ erts_port_task_sched_lock(&pp->sched);
+
+ if (low == ERL_DRV_BUSY_MSGQ_DISABLED
+ || high == ERL_DRV_BUSY_MSGQ_DISABLED) {
+ /* Disable busy msgq feature */
+ erts_aint32_t flags;
+ pp->sched.taskq.bpq = NULL;
+ flags = ~(ERTS_PTS_FLG_BUSY_PORT_Q|ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
+ flags = erts_smp_atomic32_read_band_acqb(&pp->sched.flags, flags);
+ if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q)
+ resume_procs = 1;
+ }
+ else {
+
+ if (!low)
+ low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low);
+ else {
+ if (bpq->high < low)
+ bpq->high = low;
+ erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low);
+ written = 1;
+ }
+
+ if (!high)
+ high = bpq->high;
+ else {
+ if (low > high) {
+ low = high;
+ erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low);
+ }
+ bpq->high = high;
+ written = 1;
+ }
+
+ if (written) {
+ ErlDrvSizeT size = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size);
+ if (size > high)
+ erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ ERTS_PTS_FLG_BUSY_PORT_Q);
+ else if (size < low)
+ erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q);
+ }
+ }
+
+ erts_port_task_sched_unlock(&pp->sched);
+
+ if (resume_procs)
+ erts_port_resume_procs(pp);
+ if (lowp)
+ *lowp = low;
+ if (highp)
+ *highp = high;
+}
+
+/*
+ * No-suspend handles.
+ */
+
+#ifdef ERTS_SMP
+static void
+free_port_task_handle_list(void *vpthlp)
+{
+ erts_free(ERTS_ALC_T_PT_HNDL_LIST, vpthlp);
+}
+#endif
+
+static void
+schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
+{
+#ifdef ERTS_SMP
+ erts_schedule_thr_prgr_later_op(free_port_task_handle_list,
+ (void *) pthlp,
+ &pthlp->u.release);
+#else
+ erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp);
+#endif
+}
+
+static ERTS_INLINE void
+abort_nosuspend_task(Port *pp,
+ ErtsPortTaskType type,
+ ErtsPortTaskTypeData *tdp)
+{
+
+ ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
+
+ if (!pp->sched.taskq.bpq)
+ tdp->psig.callback(NULL,
+ ERTS_PORT_SFLG_INVALID,
+ ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ &tdp->psig.data);
+ else {
+ ErlDrvSizeT size = erts_proc2port_sig_command_data_size(&tdp->psig.data);
+ tdp->psig.callback(NULL,
+ ERTS_PORT_SFLG_INVALID,
+ ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
+ &tdp->psig.data);
+ aborted_proc2port_data(pp, size);
+ }
+}
+
+static ErtsPortTaskHandleList *
+get_free_nosuspend_handles(Port *pp)
+{
+ ErtsPortTaskHandleList *nshp, *last_nshp = NULL;
+
+ ERTS_SMP_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched));
+
+ nshp = pp->sched.taskq.local.busy.nosuspend;
+
+ while (nshp && !erts_port_task_is_scheduled(&nshp->handle)) {
+ last_nshp = nshp;
+ nshp = nshp->u.next;
+ }
+
+ if (!last_nshp)
+ nshp = NULL;
+ else {
+ nshp = pp->sched.taskq.local.busy.nosuspend;
+ pp->sched.taskq.local.busy.nosuspend = last_nshp->u.next;
+ last_nshp->u.next = NULL;
+ if (!pp->sched.taskq.local.busy.nosuspend)
+ erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_NS_TASKS);
+ }
+ return nshp;
+}
+
+static void
+free_nosuspend_handles(ErtsPortTaskHandleList *free_nshp)
+{
+ while (free_nshp) {
+ ErtsPortTaskHandleList *nshp = free_nshp;
+ free_nshp = free_nshp->u.next;
+ schedule_port_task_handle_list_free(nshp);
}
}
@@ -167,7 +839,6 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
pp->sched.next = NULL;
- pp->sched.prev = runq->ports.end;
if (runq->ports.end) {
ASSERT(runq->ports.start);
runq->ports.end->sched.next = pp;
@@ -177,39 +848,10 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
runq->ports.start = pp;
}
- runq->ports.info.len++;
- if (runq->ports.info.max_len < runq->ports.info.len)
- runq->ports.info.max_len = runq->ports.info.len;
- runq->len++;
- if (runq->max_len < runq->len)
- runq->max_len = runq->len;
runq->ports.end = pp;
ASSERT(runq->ports.start && runq->ports.end);
-}
-
-static ERTS_INLINE void
-dequeue_port(ErtsRunQueue *runq, Port *pp)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- if (pp->sched.next)
- pp->sched.next->sched.prev = pp->sched.prev;
- else {
- ASSERT(runq->ports.end == pp);
- runq->ports.end = pp->sched.prev;
- }
- if (pp->sched.prev)
- pp->sched.prev->sched.next = pp->sched.next;
- else {
- ASSERT(runq->ports.start == pp);
- runq->ports.start = pp->sched.next;
- }
- ASSERT(runq->ports.info.len > 0);
- runq->ports.info.len--;
- ASSERT(runq->len > 0);
- runq->len--;
- ASSERT(runq->ports.start || !runq->ports.end);
- ASSERT(runq->ports.end || !runq->ports.start);
+ erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
}
static ERTS_INLINE Port *
@@ -222,16 +864,11 @@ pop_port(ErtsRunQueue *runq)
}
else {
runq->ports.start = runq->ports.start->sched.next;
- if (runq->ports.start)
- runq->ports.start->sched.prev = NULL;
- else {
+ if (!runq->ports.start) {
ASSERT(runq->ports.end == pp);
runq->ports.end = NULL;
}
- ASSERT(runq->ports.info.len > 0);
- runq->ports.info.len--;
- ASSERT(runq->len > 0);
- runq->len--;
+ erts_smp_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
}
ASSERT(runq->ports.start || !runq->ports.end);
@@ -239,294 +876,423 @@ pop_port(ErtsRunQueue *runq)
return pp;
}
+/*
+ * Task queue operations
+ */
-#ifdef HARD_DEBUG
+static ERTS_INLINE int
+enqueue_task(Port *pp,
+ ErtsPortTask *ptp,
+ ErtsProc2PortSigData *sigdp,
+ ErtsPortTaskHandleList *ns_pthlp,
+ erts_aint32_t *flagsp)
-static void
-check_port_queue(ErtsRunQueue *runq, Port *chk_pp, int inq)
{
- Port *pp;
- Port *last_pp;
- Port *first_pp = runq->ports.start;
- int no_forward = 0, no_backward = 0;
- int found_forward = 0, found_backward = 0;
- if (!first_pp) {
- ASSERT(!runq->ports.end);
- }
+ int res;
+ erts_aint32_t fail_flags = ERTS_PTS_FLG_EXIT;
+ erts_aint32_t flags;
+ ptp->u.alive.next = NULL;
+ if (ns_pthlp)
+ fail_flags |= ERTS_PTS_FLG_BUSY_PORT;
+ erts_port_task_sched_lock(&pp->sched);
+ flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ if (flags & fail_flags)
+ res = 0;
else {
- ASSERT(!first_pp->sched.prev);
- for (pp = first_pp; pp; pp = pp->sched.next) {
- ASSERT(pp->sched.taskq);
- if (pp->sched.taskq->first)
- no_forward++;
- if (chk_pp == pp)
- found_forward = 1;
- if (!pp->sched.prev) {
- ASSERT(first_pp == pp);
- }
- if (!pp->sched.next) {
- ASSERT(runq->ports.end == pp);
- last_pp = pp;
- }
+ if (ns_pthlp) {
+ ns_pthlp->u.next = pp->sched.taskq.local.busy.nosuspend;
+ pp->sched.taskq.local.busy.nosuspend = ns_pthlp;
}
- for (pp = last_pp; pp; pp = pp->sched.prev) {
- ASSERT(pp->sched.taskq);
- if (pp->sched.taskq->last)
- no_backward++;
- if (chk_pp == pp)
- found_backward = 1;
- if (!pp->sched.prev) {
- ASSERT(first_pp == pp);
- }
- if (!pp->sched.next) {
- ASSERT(runq->ports.end == pp);
- }
- check_task_queue(pp->sched.taskq, NULL, 0);
- }
- ASSERT(no_forward == no_backward);
- }
- ASSERT(no_forward == runq->ports.info.len);
- if (chk_pp) {
- if (chk_pp->sched.taskq || chk_pp->sched.exe_taskq) {
- ASSERT(chk_pp->sched.taskq != chk_pp->sched.exe_taskq);
- }
- ASSERT(!chk_pp->sched.taskq || chk_pp->sched.taskq->first);
- if (inq < 0)
- inq = chk_pp->sched.taskq && !chk_pp->sched.exe_taskq;
- if (inq) {
- ASSERT(found_forward && found_backward);
+ if (pp->sched.taskq.in.last) {
+ ASSERT(pp->sched.taskq.in.first);
+ ASSERT(!pp->sched.taskq.in.last->u.alive.next);
+
+ pp->sched.taskq.in.last->u.alive.next = ptp;
}
else {
- ASSERT(!found_forward && !found_backward);
- }
- }
-}
-
-#endif
+ ASSERT(!pp->sched.taskq.in.first);
-/*
- * Task queue operations
- */
-
-static ERTS_INLINE ErtsPortTaskQueue *
-port_taskq_init(ErtsPortTaskQueue *ptqp, Port *pp)
-{
- if (ptqp) {
- ptqp->first = NULL;
- ptqp->last = NULL;
- ptqp->port = pp;
+ pp->sched.taskq.in.first = ptp;
+ }
+ pp->sched.taskq.in.last = ptp;
+ flags = enqueue_proc2port_data(pp, sigdp, flags);
+ res = 1;
}
- return ptqp;
+ erts_port_task_sched_unlock(&pp->sched);
+ *flagsp = flags;
+ return res;
}
static ERTS_INLINE void
-enqueue_task(ErtsPortTaskQueue *ptqp, ErtsPortTask *ptp)
+prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
- ptp->next = NULL;
- ptp->prev = ptqp->last;
- ptp->queue = ptqp;
- if (ptqp->last) {
- ASSERT(ptqp->first);
- ptqp->last->next = ptp;
+ erts_aint32_t act = erts_smp_atomic32_read_nob(&pp->sched.flags);
+
+ if (!pp->sched.taskq.local.busy.first || (act & ERTS_PTS_FLG_BUSY_PORT)) {
+ *execqp = pp->sched.taskq.local.first;
+ *processing_busy_q_p = 0;
}
else {
- ASSERT(!ptqp->first);
- ptqp->first = ptp;
+ *execqp = pp->sched.taskq.local.busy.first;
+ *processing_busy_q_p = 1;
+ }
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
+ while (1) {
+ erts_aint32_t new, exp;
+
+ new = exp = act;
+
+ new &= ~ERTS_PTS_FLG_IN_RUNQ;
+ new |= ERTS_PTS_FLG_EXEC;
+
+ act = erts_smp_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp);
+
+ ASSERT(act & ERTS_PTS_FLG_IN_RUNQ);
+
+ if (exp == act)
+ break;
}
- ptqp->last = ptp;
- ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
}
-static ERTS_INLINE void
-push_task(ErtsPortTaskQueue *ptqp, ErtsPortTask *ptp)
+/* finalize_exec() return value != 0 if port should remain active */
+static ERTS_INLINE int
+finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
{
- ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
- ptp->next = ptqp->first;
- ptp->prev = NULL;
- ptp->queue = ptqp;
- if (ptqp->first) {
- ASSERT(ptqp->last);
- ptqp->first->prev = ptp;
- }
+ erts_aint32_t act;
+
+ if (!processing_busy_q)
+ pp->sched.taskq.local.first = *execq;
else {
- ASSERT(!ptqp->last);
- ptqp->last = ptp;
+ pp->sched.taskq.local.busy.first = *execq;
+ ASSERT(*execq);
+ }
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execq, processing_busy_q);
+
+ *execq = NULL;
+
+ act = erts_smp_atomic32_read_nob(&pp->sched.flags);
+ if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
+ act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
+
+ while (1) {
+ erts_aint32_t new, exp;
+
+ new = exp = act;
+
+ new &= ~ERTS_PTS_FLG_EXEC;
+ if (act & ERTS_PTS_FLG_HAVE_TASKS)
+ new |= ERTS_PTS_FLG_IN_RUNQ;
+
+ act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
+
+ ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+
+ if (exp == act)
+ break;
}
- ptqp->first = ptp;
- ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
+
+ return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0;
}
-static ERTS_INLINE void
-dequeue_task(ErtsPortTask *ptp)
+static ERTS_INLINE erts_aint32_t
+select_queue_for_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- ASSERT(ptp);
- ASSERT(ptp->queue);
- ERTS_PT_CHK_IN_TASKQ(ptp->queue, ptp);
- if (ptp->next)
- ptp->next->prev = ptp->prev;
- else {
- ASSERT(ptp->queue->last == ptp);
- ptp->queue->last = ptp->prev;
+ erts_aint32_t flags = erts_smp_atomic32_read_nob(&pp->sched.flags);
+
+ if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
+ flags = check_unset_busy_port_q(pp, flags, pp->sched.taskq.bpq);
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
+ if (flags & ERTS_PTS_FLG_BUSY_PORT) {
+ if (*processing_busy_q_p) {
+ ErtsPortTask *ptp;
+
+ ptp = pp->sched.taskq.local.busy.first = *execqp;
+ if (!ptp)
+ pp->sched.taskq.local.busy.last = NULL;
+ else if (!(ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY))
+ no_sig_dep_move_from_busyq(pp);
+
+ *execqp = pp->sched.taskq.local.first;
+ *processing_busy_q_p = 0;
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+ }
+
+ return flags;
}
- if (ptp->prev)
- ptp->prev->next = ptp->next;
- else {
- ASSERT(ptp->queue->first == ptp);
- ptp->queue->first = ptp->next;
+
+ /* Not busy */
+
+ if (!*processing_busy_q_p && pp->sched.taskq.local.busy.first) {
+ pp->sched.taskq.local.first = *execqp;
+ *execqp = pp->sched.taskq.local.busy.first;
+ *processing_busy_q_p = 1;
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
}
- ASSERT(ptp->queue->first || !ptp->queue->last);
- ASSERT(ptp->queue->last || !ptp->queue->first);
- ERTS_PT_CHK_NOT_IN_TASKQ(ptp->queue, ptp);
+ return flags;
}
-static ERTS_INLINE ErtsPortTask *
-pop_task(ErtsPortTaskQueue *ptqp)
+/*
+ * check_task_for_exec() returns a value !0 if the task
+ * is ok to execute; otherwise 0.
+ */
+static ERTS_INLINE int
+check_task_for_exec(Port *pp,
+ erts_aint32_t flags,
+ ErtsPortTask **execqp,
+ int *processing_busy_q_p,
+ ErtsPortTask *ptp)
{
- ErtsPortTask *ptp = ptqp->first;
- if (!ptp) {
- ASSERT(!ptqp->last);
+
+ if (!*processing_busy_q_p) {
+ /* Processing normal queue */
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, ptp, *processing_busy_q_p);
+
+ if ((flags & ERTS_PTS_FLG_BUSY_PORT)
+ && (ptp->u.alive.flags & ERTS_PT_FLG_WAIT_BUSY)) {
+
+ busy_wait_move_to_busy_queue(pp, ptp);
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
+ return 0;
+ }
+
+ if (pp->sched.taskq.local.busy.last
+ && (ptp->u.alive.flags & ERTS_PT_FLG_SIG_DEP)) {
+
+ int res = !check_sig_dep_move_to_busy_queue(pp, ptp);
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
+ return res;
+ }
+
}
else {
- ERTS_PT_CHK_IN_TASKQ(ptqp, ptp);
- ASSERT(!ptp->prev);
- ptqp->first = ptp->next;
- if (ptqp->first)
- ptqp->first->prev = NULL;
- else {
- ASSERT(ptqp->last == ptp);
- ptqp->last = NULL;
+ /* Processing busy queue */
+
+ ASSERT(!(flags & ERTS_PTS_FLG_BUSY_PORT));
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, ptp, *processing_busy_q_p);
+
+ popped_from_busy_queue(pp, ptp, !*execqp);
+
+ if (!*execqp) {
+ *execqp = pp->sched.taskq.local.first;
+ *processing_busy_q_p = 0;
}
- ASSERT(ptp->queue->first || !ptp->queue->last);
- ASSERT(ptp->queue->last || !ptp->queue->first);
+
+ ERTS_PT_DBG_CHK_TASK_QS(pp, *execqp, *processing_busy_q_p);
+
}
- ERTS_PT_CHK_NOT_IN_TASKQ(ptqp, ptp);
- return ptp;
+
+ return 1;
}
-#ifdef HARD_DEBUG
+static ErtsPortTask *
+fetch_in_queue(Port *pp, ErtsPortTask **execqp)
+{
+ ErtsPortTask *ptp;
+ ErtsPortTaskHandleList *free_nshp = NULL;
-static void
-check_task_queue(ErtsPortTaskQueue *ptqp,
- ErtsPortTask *chk_ptp,
- int inq)
+ erts_port_task_sched_lock(&pp->sched);
+
+ ptp = pp->sched.taskq.in.first;
+ pp->sched.taskq.in.first = NULL;
+ pp->sched.taskq.in.last = NULL;
+ if (ptp)
+ *execqp = ptp->u.alive.next;
+ else
+ erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_TASKS);
+
+
+ if (pp->sched.taskq.local.busy.nosuspend)
+ free_nshp = get_free_nosuspend_handles(pp);
+
+ erts_port_task_sched_unlock(&pp->sched);
+
+ if (free_nshp)
+ free_nosuspend_handles(free_nshp);
+
+ return ptp;
+}
+
+static ERTS_INLINE ErtsPortTask *
+select_task_for_exec(Port *pp,
+ ErtsPortTask **execqp,
+ int *processing_busy_q_p)
{
ErtsPortTask *ptp;
- ErtsPortTask *last_ptp;
- ErtsPortTask *first_ptp = ptqp->first;
- int found_forward = 0, found_backward = 0;
- if (!first_ptp) {
- ASSERT(!ptqp->last);
- }
- else {
- ASSERT(!first_ptp->prev);
- for (ptp = first_ptp; ptp; ptp = ptp->next) {
- ASSERT(ptp->queue == ptqp);
- if (chk_ptp == ptp)
- found_forward = 1;
- if (!ptp->prev) {
- ASSERT(first_ptp == ptp);
- }
- if (!ptp->next) {
- ASSERT(ptqp->last == ptp);
- last_ptp = ptp;
- }
- }
- for (ptp = last_ptp; ptp; ptp = ptp->prev) {
- ASSERT(ptp->queue == ptqp);
- if (chk_ptp == ptp)
- found_backward = 1;
- if (!ptp->prev) {
- ASSERT(first_ptp == ptp);
- }
- if (!ptp->next) {
- ASSERT(ptqp->last == ptp);
- }
- }
- }
- if (chk_ptp) {
- if (inq) {
- ASSERT(found_forward && found_backward);
- }
+ erts_aint32_t flags;
+
+ flags = select_queue_for_exec(pp, execqp, processing_busy_q_p);
+
+ while (1) {
+ ptp = *execqp;
+ if (ptp)
+ *execqp = ptp->u.alive.next;
else {
- ASSERT(!found_forward && !found_backward);
+ ptp = fetch_in_queue(pp, execqp);
+ if (!ptp)
+ return NULL;
}
+ if (check_task_for_exec(pp, flags, execqp, processing_busy_q_p, ptp))
+ return ptp;
}
}
-#endif
/*
* Abort a scheduled task.
*/
int
-erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp)
+erts_port_task_abort(ErtsPortTaskHandle *pthp)
{
- ErtsRunQueue *runq;
- ErtsPortTaskQueue *ptqp;
+ int res;
ErtsPortTask *ptp;
- Port *pp;
- int port_is_dequeued = 0;
-
- pp = &erts_port[internal_port_index(id)];
- runq = erts_port_runq(pp);
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
+#endif
ptp = handle2task(pthp);
+ if (!ptp)
+ res = -1;
+ else {
+ erts_aint32_t old_state;
+
+#ifdef DEBUG
+ ErtsPortTaskHandle *saved_pthp = ptp->u.alive.handle;
+ ERTS_SMP_READ_MEMORY_BARRIER;
+ old_state = erts_smp_atomic32_read_nob(&ptp->state);
+ if (old_state == ERTS_PT_STATE_SCHEDULED) {
+ ASSERT(saved_pthp == pthp);
+ }
+#endif
- if (!ptp) {
- erts_smp_runq_unlock(runq);
- return 1;
+ old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ ERTS_PT_STATE_ABORTED,
+ ERTS_PT_STATE_SCHEDULED);
+ if (old_state != ERTS_PT_STATE_SCHEDULED)
+ res = - 1; /* Task already aborted, executing, or executed */
+ else {
+
+ reset_port_task_handle(pthp);
+
+ switch (ptp->type) {
+ case ERTS_PORT_TASK_INPUT:
+ case ERTS_PORT_TASK_OUTPUT:
+ case ERTS_PORT_TASK_EVENT:
+ ASSERT(erts_smp_atomic_read_nob(
+ &erts_port_task_outstanding_io_tasks) > 0);
+ erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
+ break;
+ case ERTS_PORT_TASK_PROC_SIG:
+ ERTS_INTERNAL_ERROR("Aborted process to port signal");
+ break;
+ default:
+ break;
+ }
+
+ res = 0;
+ }
}
- ASSERT(ptp->handle == pthp);
- ptqp = ptp->queue;
- ASSERT(pp == ptqp->port);
+#ifdef ERTS_SMP
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- ASSERT(ptqp);
- ASSERT(ptqp->first);
+ return res;
+}
- dequeue_task(ptp);
- reset_handle(ptp);
+void
+erts_port_task_abort_nosuspend_tasks(Port *pp)
+{
+ erts_aint32_t flags;
+ ErtsPortTaskHandleList *abort_list;
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID;
+#endif
- switch (ptp->type) {
- case ERTS_PORT_TASK_INPUT:
- case ERTS_PORT_TASK_OUTPUT:
- case ERTS_PORT_TASK_EVENT:
- ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0);
- erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
- break;
- default:
- break;
- }
+ erts_port_task_sched_lock(&pp->sched);
+ flags = erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ ~ERTS_PTS_FLG_HAVE_NS_TASKS);
+ abort_list = pp->sched.taskq.local.busy.nosuspend;
+ pp->sched.taskq.local.busy.nosuspend = NULL;
+ erts_port_task_sched_unlock(&pp->sched);
- ASSERT(ptqp == pp->sched.taskq || ptqp == pp->sched.exe_taskq);
+ while (abort_list) {
+#ifdef DEBUG
+ ErtsPortTaskHandle *saved_pthp;
+#endif
+ ErtsPortTaskType type;
+ ErtsPortTaskTypeData td;
+ ErtsPortTaskHandle *pthp;
+ ErtsPortTask *ptp;
+ ErtsPortTaskHandleList *pthlp;
+ erts_aint32_t old_state;
- if (ptqp->first || pp->sched.taskq != ptqp)
- ptqp = NULL;
- else {
- pp->sched.taskq = NULL;
- if (!pp->sched.exe_taskq) {
- dequeue_port(runq, pp);
- ERTS_PORT_NOT_IN_RUNQ(pp);
- port_is_dequeued = 1;
+ pthlp = abort_list;
+ abort_list = pthlp->u.next;
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ dhndl = erts_thr_progress_unmanaged_delay();
+#endif
+
+ pthp = &pthlp->handle;
+ ptp = handle2task(pthp);
+ if (!ptp) {
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+ schedule_port_task_handle_list_free(pthlp);
+ continue;
}
- }
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
+#ifdef DEBUG
+ saved_pthp = ptp->u.alive.handle;
+ ERTS_SMP_READ_MEMORY_BARRIER;
+ old_state = erts_smp_atomic32_read_nob(&ptp->state);
+ if (old_state == ERTS_PT_STATE_SCHEDULED) {
+ ASSERT(saved_pthp == pthp);
+ }
+#endif
- erts_smp_runq_unlock(runq);
-
- if (erts_system_profile_flags.runnable_ports && port_is_dequeued) {
- profile_runnable_port(pp, am_inactive);
- }
+ old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ ERTS_PT_STATE_ABORTED,
+ ERTS_PT_STATE_SCHEDULED);
+ if (old_state != ERTS_PT_STATE_SCHEDULED) {
+ /* Task already aborted, executing, or executed */
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+ schedule_port_task_handle_list_free(pthlp);
+ continue;
+ }
- port_task_free(ptp);
- if (ptqp)
- port_taskq_free(ptqp);
+ reset_port_task_handle(pthp);
- return 0;
+ type = ptp->type;
+ td = ptp->u.alive.td;
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+ schedule_port_task_handle_list_free(pthlp);
+
+ abort_nosuspend_task(pp, type, &td);
+ }
}
/*
@@ -537,243 +1303,264 @@ int
erts_port_task_schedule(Eterm id,
ErtsPortTaskHandle *pthp,
ErtsPortTaskType type,
- ErlDrvEvent event,
- ErlDrvEventData event_data)
+ ...)
{
+ ErtsProc2PortSigData *sigdp = NULL;
+ ErtsPortTaskHandleList *ns_pthlp = NULL;
+#ifdef ERTS_SMP
+ ErtsRunQueue *xrunq;
+ ErtsThrPrgrDelayHandle dhndl;
+#endif
ErtsRunQueue *runq;
Port *pp;
- ErtsPortTask *ptp;
- int enq_port = 0;
-
- /*
- * NOTE: We might not have the port lock here. We are only
- * allowed to access the 'sched', 'tab_status',
- * and 'id' fields of the port struct while
- * tasks_lock is held.
- */
+ ErtsPortTask *ptp = NULL;
+ erts_aint32_t act, add_flags;
if (pthp && erts_port_task_is_scheduled(pthp)) {
ASSERT(0);
- erts_port_task_abort(id, pthp);
+ erts_port_task_abort(pthp);
}
- ptp = port_task_alloc();
-
ASSERT(is_internal_port(id));
- pp = &erts_port[internal_port_index(id)];
- runq = erts_port_runq(pp);
-
- if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) {
- if (runq)
- erts_smp_runq_unlock(runq);
- return -1;
- }
- ASSERT(!erts_port_task_is_scheduled(pthp));
+#ifdef ERTS_SMP
+ dhndl = erts_thr_progress_unmanaged_delay();
+#endif
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
-
- if (!pp->sched.taskq) {
- pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp);
- enq_port = !pp->sched.exe_taskq;
- }
+ pp = erts_port_lookup_raw(id);
#ifdef ERTS_SMP
- if (enq_port) {
- ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
- if (xrunq) {
- /* Port emigrated ... */
- erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
- erts_smp_runq_unlock(runq);
- runq = xrunq;
- }
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ if (pp)
+ erts_port_inc_refc(pp);
+ erts_thr_progress_unmanaged_continue(dhndl);
}
#endif
- ASSERT(!enq_port || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED));
+ if (!pp)
+ goto fail;
- ASSERT(pp->sched.taskq);
- ASSERT(ptp);
+ if (type != ERTS_PORT_TASK_PROC_SIG) {
+ ptp = port_task_alloc();
- ptp->type = type;
- ptp->event = event;
- ptp->event_data = event_data;
+ ptp->type = type;
+ ptp->u.alive.flags = 0;
- set_handle(ptp, pthp);
+ erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED);
+
+ set_handle(ptp, pthp);
+ }
switch (type) {
- case ERTS_PORT_TASK_FREE:
- erl_exit(ERTS_ABORT_EXIT,
- "erts_port_task_schedule(): Cannot schedule free task\n");
- break;
case ERTS_PORT_TASK_INPUT:
- case ERTS_PORT_TASK_OUTPUT:
- case ERTS_PORT_TASK_EVENT:
+ case ERTS_PORT_TASK_OUTPUT: {
+ va_list argp;
+ va_start(argp, type);
+ ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
+ va_end(argp);
+ erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
+ break;
+ }
+ case ERTS_PORT_TASK_EVENT: {
+ va_list argp;
+ va_start(argp, type);
+ ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent);
+ ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData);
+ va_end(argp);
erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
- /* Fall through... */
+ break;
+ }
+ case ERTS_PORT_TASK_PROC_SIG: {
+ va_list argp;
+ ASSERT(!pthp);
+ va_start(argp, type);
+ sigdp = va_arg(argp, ErtsProc2PortSigData *);
+ ptp = p2p_sig_data_to_task(sigdp);
+ ptp->u.alive.td.psig.callback = va_arg(argp, ErtsProc2PortSigCallback);
+ ptp->u.alive.flags |= va_arg(argp, int);
+ va_end(argp);
+ if (!(ptp->u.alive.flags & ERTS_PT_FLG_NOSUSPEND))
+ set_handle(ptp, pthp);
+ else {
+ ns_pthlp = erts_alloc(ERTS_ALC_T_PT_HNDL_LIST,
+ sizeof(ErtsPortTaskHandleList));
+ set_handle(ptp, &ns_pthlp->handle);
+ }
+ break;
+ }
default:
- enqueue_task(pp->sched.taskq, ptp);
break;
}
-#ifndef ERTS_SMP
- /*
- * When (!enq_port && !pp->sched.exe_taskq) is true in the smp case,
- * the port might not be in the run queue. If this is the case, another
- * thread is in the process of enqueueing the port. This very seldom
- * occur, but do occur and is a valid scenario. Debug info showing this
- * enqueue in progress must be introduced before we can enable (modified
- * versions of these) assertions in the smp case again.
- */
-#if defined(HARD_DEBUG)
- if (pp->sched.exe_taskq || enq_port)
- ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp);
- else
- ERTS_PT_CHK_IN_PORTQ(runq, pp);
-#elif defined(DEBUG)
- if (!enq_port && !pp->sched.exe_taskq) {
- /* We should be in port run q */
- ASSERT(pp->sched.prev || runq->ports.start == pp);
+ if (!enqueue_task(pp, ptp, sigdp, ns_pthlp, &act)) {
+ reset_handle(ptp);
+ if (ns_pthlp && !(act & ERTS_PTS_FLG_EXIT))
+ goto abort_nosuspend;
+ else
+ goto fail;
}
-#endif
-#endif
- if (!enq_port) {
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- erts_smp_runq_unlock(runq);
- }
- else {
- enqueue_port(runq, pp);
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
-
- if (erts_system_profile_flags.runnable_ports) {
- profile_runnable_port(pp, am_active);
+ add_flags = ERTS_PTS_FLG_HAVE_TASKS;
+ if (ns_pthlp)
+ add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS;
+
+ while (1) {
+ erts_aint32_t new, exp;
+
+ if ((act & add_flags) == add_flags
+ && (act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ goto done; /* Done */
+
+ new = exp = act;
+ new |= add_flags;
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ new |= ERTS_PTS_FLG_IN_RUNQ;
+
+ act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
+
+ if (exp == act) {
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ break; /* Need to enqueue port */
+ goto done; /* Done */
}
+ if (act & ERTS_PTS_FLG_EXIT)
+ goto done; /* Died after our task insert... */
+ }
+
+ /* Enqueue port on run-queue */
+
+ runq = erts_port_runq(pp);
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+
+#ifdef ERTS_SMP
+ xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ if (xrunq) {
+ /* Port emigrated ... */
+ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
+ runq = erts_port_runq(pp);
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ }
+#endif
- erts_smp_notify_inc_runq(runq);
+ enqueue_port(runq, pp);
+
+ if (erts_system_profile_flags.runnable_ports) {
+ profile_runnable_port(pp, am_active);
}
+
+ erts_smp_runq_unlock(runq);
+
+ erts_smp_notify_inc_runq(runq);
+
+done:
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_port_dec_refc(pp);
+#endif
+
+ return 0;
+
+abort_nosuspend:
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_port_dec_refc(pp);
+#endif
+
+ abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td);
+
+ ASSERT(ns_pthlp);
+ erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
+ if (ptp)
+ port_task_free(ptp);
+
return 0;
+
+fail:
+
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_port_dec_refc(pp);
+#endif
+
+ if (ns_pthlp)
+ erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
+
+ if (ptp)
+ port_task_free(ptp);
+
+ return -1;
}
void
erts_port_task_free_port(Port *pp)
{
+ ErtsProcList *suspended;
+ erts_aint32_t flags;
ErtsRunQueue *runq;
- int port_is_dequeued = 0;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
- ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
+ ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
+
runq = erts_port_runq(pp);
- ASSERT(runq);
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- if (pp->sched.exe_taskq) {
- /* I (this thread) am currently executing this port, free it
- when scheduled out... */
- ErtsPortTask *ptp = port_task_alloc();
- erts_smp_port_state_lock(pp);
- pp->status &= ~ERTS_PORT_SFLG_CLOSING;
- pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
- erts_may_save_closed_port(pp);
- erts_smp_port_state_unlock(pp);
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 1);
- ptp->type = ERTS_PORT_TASK_FREE;
- ptp->event = (ErlDrvEvent) -1;
- ptp->event_data = NULL;
- set_handle(ptp, NULL);
- push_task(pp->sched.exe_taskq, ptp);
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- erts_smp_runq_unlock(runq);
- }
- else {
- ErtsPortTaskQueue *ptqp = pp->sched.taskq;
- if (ptqp) {
- dequeue_port(runq, pp);
- ERTS_PORT_NOT_IN_RUNQ(pp);
- port_is_dequeued = 1;
- }
- erts_smp_port_state_lock(pp);
- pp->status &= ~ERTS_PORT_SFLG_CLOSING;
- pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
- erts_may_save_closed_port(pp);
- erts_smp_port_state_unlock(pp);
-#ifdef ERTS_SMP
- erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
-#endif
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
- handle_remaining_tasks(runq, pp); /* May release runq lock */
- ASSERT(!pp->sched.exe_taskq && (!ptqp || !ptqp->first));
- pp->sched.taskq = NULL;
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
-#ifndef ERTS_SMP
- ASSERT(pp->status & ERTS_PORT_SFLG_PORT_DEBUG);
- erts_port_status_set(pp, ERTS_PORT_SFLG_FREE);
-#endif
- erts_smp_runq_unlock(runq);
+ if (!runq)
+ ERTS_INTERNAL_ERROR("Missing run-queue");
+ erts_port_task_sched_lock(&pp->sched);
+ flags = erts_smp_atomic32_read_bor_relb(&pp->sched.flags,
+ ERTS_PTS_FLG_EXIT);
+ suspended = pp->suspended;
+ pp->suspended = NULL;
+ erts_port_task_sched_unlock(&pp->sched);
+ erts_atomic32_read_bset_relb(&pp->state,
+ (ERTS_PORT_SFLG_CLOSING
+ | ERTS_PORT_SFLG_FREE),
+ ERTS_PORT_SFLG_FREE);
- if (erts_system_profile_flags.runnable_ports && port_is_dequeued) {
- profile_runnable_port(pp, am_inactive);
- }
+ erts_smp_runq_unlock(runq);
- if (ptqp)
- port_taskq_free(ptqp);
- }
-}
+ if (erts_proclist_fetch(&suspended, NULL))
+ erts_resume_processes(suspended);
-typedef struct {
- ErtsRunQueue *runq;
- int *resp;
-} ErtsPortTaskExeBlockData;
+ if (!(flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ begin_port_cleanup(pp, NULL, NULL);
+}
/*
- * Run all scheduled tasks for the first port in run queue. If
- * new tasks appear while running reschedule port (free task is
- * an exception; it is always handled instantly).
+ * Execute scheduled tasks of a port.
*
* erts_port_task_execute() is called by scheduler threads between
- * scheduleing of processes. Sched lock should be held by caller.
+ * scheduling of processes. Run-queue lock should be held by caller.
*/
int
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
- int port_was_enqueued = 0;
Port *pp;
- ErtsPortTaskQueue *ptqp;
- ErtsPortTask *ptp;
+ ErtsPortTask *execq;
+ int processing_busy_q;
int res = 0;
int reds = ERTS_PORT_REDS_EXECUTE;
erts_aint_t io_tasks_executed = 0;
int fpe_was_unmasked;
+ erts_aint32_t state;
+ int active;
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- ERTS_PT_CHK_PORTQ(runq);
-
pp = pop_port(runq);
if (!pp) {
res = 0;
goto done;
}
- ERTS_PORT_NOT_IN_RUNQ(pp);
+ erts_smp_runq_unlock(runq);
*curr_port_pp = pp;
-
- ASSERT(pp->sched.taskq);
- ASSERT(pp->sched.taskq->first);
- ptqp = pp->sched.taskq;
- pp->sched.taskq = NULL;
-
- ASSERT(!pp->sched.exe_taskq);
- pp->sched.exe_taskq = ptqp;
-
- if (erts_smp_port_trylock(pp) == EBUSY) {
- erts_smp_runq_unlock(runq);
- erts_smp_port_lock(pp);
- erts_smp_runq_lock(runq);
- }
if (erts_sched_stat.enabled) {
ErtsSchedulerData *esdp = erts_get_scheduler_data();
@@ -790,81 +1577,94 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_spin_unlock(&erts_sched_stat.lock);
}
+ prepare_exec(pp, &execq, &processing_busy_q);
+
+ erts_smp_port_lock(pp);
+
/* trace port scheduling, in */
if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
trace_sched_ports(pp, am_in);
}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ fpe_was_unmasked = erts_block_fpe();
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
- ptp = pop_task(ptqp);
+ state = erts_atomic32_read_nob(&pp->state);
+ goto begin_handle_tasks;
- fpe_was_unmasked = erts_block_fpe();
+ while (1) {
+ erts_aint32_t task_state;
+ ErtsPortTask *ptp;
- while (ptp) {
- ASSERT(pp->sched.taskq != pp->sched.exe_taskq);
+ ptp = select_task_for_exec(pp, &execq, &processing_busy_q);
+ if (!ptp)
+ break;
+
+ task_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ ERTS_PT_STATE_EXECUTING,
+ ERTS_PT_STATE_SCHEDULED);
+ if (task_state != ERTS_PT_STATE_SCHEDULED) {
+ ASSERT(task_state == ERTS_PT_STATE_ABORTED);
+ goto aborted_port_task;
+ }
reset_handle(ptp);
- erts_smp_runq_unlock(runq);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
ERTS_SMP_CHK_NO_PROC_LOCKS;
ASSERT(pp->drv_ptr);
switch (ptp->type) {
- case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */
- reds += ERTS_PORT_REDS_FREE;
- erts_smp_runq_lock(runq);
-
- erts_unblock_fpe(fpe_was_unmasked);
- ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
- if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first))
- handle_remaining_tasks(runq, pp);
- ASSERT(!ptqp->first
- && (!pp->sched.taskq || !pp->sched.taskq->first));
-#ifdef ERTS_SMP
- erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
-#else
- erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE);
-#endif
-
- port_task_free(ptp);
- if (pp->sched.taskq)
- port_taskq_free(pp->sched.taskq);
- pp->sched.taskq = NULL;
-
- goto tasks_done;
case ERTS_PORT_TASK_TIMEOUT:
reds += ERTS_PORT_REDS_TIMEOUT;
- if (!(pp->status & ERTS_PORT_SFLGS_DEAD)) {
+ if (!(state & ERTS_PORT_SFLGS_DEAD)) {
DTRACE_DRIVER(driver_timeout, pp);
(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
}
break;
case ERTS_PORT_TASK_INPUT:
reds += ERTS_PORT_REDS_INPUT;
- ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_input, pp);
/* NOTE some windows drivers use ->ready_input for input and output */
- (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
+ (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data,
+ ptp->u.alive.td.io.event);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_OUTPUT:
reds += ERTS_PORT_REDS_OUTPUT;
- ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_ready_output, pp);
- (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
+ (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data,
+ ptp->u.alive.td.io.event);
io_tasks_executed++;
break;
case ERTS_PORT_TASK_EVENT:
reds += ERTS_PORT_REDS_EVENT;
- ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
+ ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
DTRACE_DRIVER(driver_event, pp);
- (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
+ (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data,
+ ptp->u.alive.td.io.event,
+ ptp->u.alive.td.io.event_data);
io_tasks_executed++;
break;
+ case ERTS_PORT_TASK_PROC_SIG: {
+ ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
+ ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0);
+ if (!pp->sched.taskq.bpq)
+ reds += ptp->u.alive.td.psig.callback(pp,
+ state,
+ ERTS_PROC2PORT_SIG_EXEC,
+ sigdp);
+ else {
+ ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp);
+ reds += ptp->u.alive.td.psig.callback(pp,
+ state,
+ ERTS_PROC2PORT_SIG_EXEC,
+ sigdp);
+ dequeued_proc2port_data(pp, size);
+ }
+ break;
+ }
case ERTS_PORT_TASK_DIST_CMD:
reds += erts_dist_command(pp, CONTEXT_REDS-reds);
break;
@@ -875,33 +1675,30 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
break;
}
- if ((pp->status & ERTS_PORT_SFLG_CLOSING)
- && erts_is_port_ioq_empty(pp)) {
- reds += ERTS_PORT_REDS_TERMINATE;
- erts_terminate_port(pp);
- }
+ reds += erts_port_driver_callback_epilogue(pp, &state);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ aborted_port_task:
+ schedule_port_task_free(ptp);
-#ifdef ERTS_SMP
- if (pp->xports)
- erts_smp_xports_unlock(pp);
- ASSERT(!pp->xports);
-#endif
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
-
- port_task_free(ptp);
+ begin_handle_tasks:
+ if (state & ERTS_PORT_SFLG_FREE) {
+ reds += ERTS_PORT_REDS_FREE;
+ begin_port_cleanup(pp, &execq, &processing_busy_q);
- erts_smp_runq_lock(runq);
+ break;
+ }
- ptp = pop_task(ptqp);
+ if (reds >= CONTEXT_REDS)
+ break;
}
- tasks_done:
-
erts_unblock_fpe(fpe_was_unmasked);
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ trace_sched_ports(pp, am_out);
+ }
+
if (io_tasks_executed) {
ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
>= io_tasks_executed);
@@ -909,203 +1706,264 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
-1*io_tasks_executed);
}
- *curr_port_pp = NULL;
-
#ifdef ERTS_SMP
ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif
- if (!pp->sched.taskq) {
- ASSERT(pp->sched.exe_taskq);
- pp->sched.exe_taskq = NULL;
+ active = finalize_exec(pp, &execq, processing_busy_q);
+
+ erts_port_release(pp);
+
+ *curr_port_pp = NULL;
+
+ erts_smp_runq_lock(runq);
+
+ if (!active) {
+ if (erts_system_profile_flags.runnable_ports)
+ profile_runnable_port(pp, am_inactive);
}
else {
#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
#endif
- ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
- ASSERT(pp->sched.taskq->first);
+ ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
if (!xrunq) {
#endif
enqueue_port(runq, pp);
- ASSERT(pp->sched.exe_taskq);
- pp->sched.exe_taskq = NULL;
/* No need to notify ourselves about inc in runq. */
#ifdef ERTS_SMP
}
else {
/* Port emigrated ... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
+ erts_smp_runq_unlock(runq);
+
+ xrunq = erts_port_runq(pp);
+ ASSERT(xrunq);
enqueue_port(xrunq, pp);
- ASSERT(pp->sched.exe_taskq);
- pp->sched.exe_taskq = NULL;
erts_smp_runq_unlock(xrunq);
erts_smp_notify_inc_runq(xrunq);
+
+ erts_smp_runq_lock(runq);
}
#endif
- port_was_enqueued = 1;
}
+ done:
res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
!= (erts_aint_t) 0);
- ERTS_PT_CHK_PRES_PORTQ(runq, pp);
-
- port_taskq_free(ptqp);
-
- if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) {
- profile_runnable_port(pp, am_inactive);
- }
+ runq->scheduler->reductions += reds;
- /* trace port scheduling, out */
- if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports(pp, am_out);
- }
-#ifndef ERTS_SMP
- erts_port_release(pp);
-#else
- {
- erts_aint_t refc;
- erts_smp_mtx_unlock(pp->lock);
- refc = erts_smp_atomic_dec_read_nob(&pp->refc);
- ASSERT(refc >= 0);
- if (refc == 0) {
- erts_smp_runq_unlock(runq);
- erts_port_cleanup(pp); /* Might aquire runq lock */
- erts_smp_runq_lock(runq);
- res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
- != (erts_aint_t) 0);
- }
- }
-#endif
-
- done:
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
-
ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);
return res;
}
-/*
- * Handle remaining tasks after a free task.
- */
+#ifdef ERTS_SMP
+static void
+release_port(void *vport)
+{
+ erts_port_dec_refc((Port *) vport);
+}
+#endif
static void
-handle_remaining_tasks(ErtsRunQueue *runq, Port *pp)
+begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
{
- int i;
- ErtsPortTask *ptp;
- ErtsPortTaskQueue *ptqps[] = {pp->sched.exe_taskq, pp->sched.taskq};
+ int i, max;
+ ErtsPortTaskBusyCallerTable *tabp;
+ ErtsPortTask *qs[3];
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
- for (i = 0; i < sizeof(ptqps)/sizeof(ErtsPortTaskQueue *); i++) {
- if (!ptqps[i])
- continue;
- ptp = pop_task(ptqps[i]);
- while (ptp) {
+ /*
+ * Abort remaining tasks...
+ *
+ * We want to process queues in the following order in order
+ * to preserve signal ordering guarantees:
+ * 1. Local busy queue
+ * 2. Local queue
+ * 3. In queue
+ */
+
+ max = 0;
+ if (!execqp) {
+ if (pp->sched.taskq.local.busy.first)
+ qs[max++] = pp->sched.taskq.local.busy.first;
+ if (pp->sched.taskq.local.first)
+ qs[max++] = pp->sched.taskq.local.first;
+ }
+ else {
+ if (*processing_busy_q_p) {
+ if (*execqp)
+ qs[max++] = *execqp;
+ if (pp->sched.taskq.local.first)
+ qs[max++] = pp->sched.taskq.local.first;
+ }
+ else {
+ if (pp->sched.taskq.local.busy.first)
+ qs[max++] = pp->sched.taskq.local.busy.first;
+ if (*execqp)
+ qs[max++] = *execqp;
+ }
+ *execqp = NULL;
+ *processing_busy_q_p = 0;
+ }
+ pp->sched.taskq.local.busy.first = NULL;
+ pp->sched.taskq.local.busy.last = NULL;
+ pp->sched.taskq.local.first = NULL;
+ tabp = pp->sched.taskq.local.busy.table;
+ if (tabp) {
+ int bix;
+ for (bix = 0; bix < ERTS_PORT_TASK_BUSY_CALLER_TABLE_BUCKETS; bix++) {
+ ErtsPortTaskBusyCaller *bcp = tabp->bucket[bix];
+ while (bcp) {
+ ErtsPortTaskBusyCaller *free_bcp = bcp;
+ bcp = bcp->next;
+ if (free_bcp != &tabp->pre_alloc_busy_caller)
+ erts_free(ERTS_ALC_T_BUSY_CALLER, free_bcp);
+ }
+ }
+
+ busy_caller_table_free(tabp);
+ pp->sched.taskq.local.busy.table = NULL;
+ }
+
+ erts_port_task_sched_lock(&pp->sched);
+ qs[max] = pp->sched.taskq.in.first;
+ pp->sched.taskq.in.first = NULL;
+ pp->sched.taskq.in.last = NULL;
+ erts_port_task_sched_unlock(&pp->sched);
+ if (qs[max])
+ max++;
+
+ for (i = 0; i < max; i++) {
+ while (1) {
+ erts_aint32_t state;
+ ErtsPortTask *ptp = qs[i];
+ if (!ptp)
+ break;
+
+ qs[i] = ptp->u.alive.next;
+
+ /* Normal case here is aborted tasks... */
+ state = erts_smp_atomic32_read_nob(&ptp->state);
+ if (state == ERTS_PT_STATE_ABORTED)
+ goto aborted_port_task;
+
+ state = erts_smp_atomic32_cmpxchg_nob(&ptp->state,
+ ERTS_PT_STATE_EXECUTING,
+ ERTS_PT_STATE_SCHEDULED);
+ if (state != ERTS_PT_STATE_SCHEDULED) {
+ ASSERT(state == ERTS_PT_STATE_ABORTED);
+ goto aborted_port_task;
+ }
+
reset_handle(ptp);
- erts_smp_runq_unlock(runq);
switch (ptp->type) {
- case ERTS_PORT_TASK_FREE:
case ERTS_PORT_TASK_TIMEOUT:
break;
case ERTS_PORT_TASK_INPUT:
- erts_stale_drv_select(pp->id, ptp->event, DO_READ, 1);
+ erts_stale_drv_select(pp->common.id,
+ ptp->u.alive.td.io.event,
+ DO_READ,
+ 1);
break;
case ERTS_PORT_TASK_OUTPUT:
- erts_stale_drv_select(pp->id, ptp->event, DO_WRITE, 1);
+ erts_stale_drv_select(pp->common.id,
+ ptp->u.alive.td.io.event,
+ DO_WRITE,
+ 1);
break;
case ERTS_PORT_TASK_EVENT:
- erts_stale_drv_select(pp->id, ptp->event, 0, 1);
+ erts_stale_drv_select(pp->common.id,
+ ptp->u.alive.td.io.event,
+ 0,
+ 1);
break;
case ERTS_PORT_TASK_DIST_CMD:
break;
+ case ERTS_PORT_TASK_PROC_SIG: {
+ ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data;
+ if (!pp->sched.taskq.bpq)
+ ptp->u.alive.td.psig.callback(NULL,
+ ERTS_PORT_SFLG_INVALID,
+ ERTS_PROC2PORT_SIG_ABORT_CLOSED,
+ sigdp);
+ else {
+ ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp);
+ ptp->u.alive.td.psig.callback(NULL,
+ ERTS_PORT_SFLG_INVALID,
+ ERTS_PROC2PORT_SIG_ABORT_CLOSED,
+ sigdp);
+ aborted_proc2port_data(pp, size);
+ }
+ break;
+ }
default:
erl_exit(ERTS_ABORT_EXIT,
"Invalid port task type: %d\n",
(int) ptp->type);
}
- port_task_free(ptp);
-
- erts_smp_runq_lock(runq);
- ptp = pop_task(ptqps[i]);
+ aborted_port_task:
+ schedule_port_task_free(ptp);
}
}
- ASSERT(!pp->sched.taskq || !pp->sched.taskq->first);
+ erts_smp_atomic32_read_band_nob(&pp->sched.flags,
+ ~(ERTS_PTS_FLG_HAVE_BUSY_TASKS
+ |ERTS_PTS_FLG_HAVE_TASKS));
+
+ /*
+ * Schedule cleanup of port structure...
+ */
+#ifdef ERTS_SMP
+ erts_schedule_thr_prgr_later_op(release_port,
+ (void *) pp,
+ &pp->common.u.release);
+#else
+ pp->cleanup = 1;
+#endif
}
int
erts_port_is_scheduled(Port *pp)
{
- int res;
- ErtsRunQueue *runq = erts_port_runq(pp);
- res = pp->sched.taskq || pp->sched.exe_taskq;
- erts_smp_runq_unlock(runq);
- return res;
+ erts_aint32_t flags = erts_smp_atomic32_read_acqb(&pp->sched.flags);
+ return (flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)) != 0;
}
#ifdef ERTS_SMP
-ErtsMigrateResult
-erts_port_migrate(Port *prt, int *prt_locked,
- ErtsRunQueue *from_rq, int *from_locked,
- ErtsRunQueue *to_rq, int *to_locked)
+void
+erts_enqueue_port(ErtsRunQueue *rq, Port *pp)
{
- ERTS_SMP_LC_ASSERT(*from_locked);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
-
- if (!*from_locked || !*to_locked) {
- if (from_rq < to_rq) {
- if (!*to_locked) {
- if (!*from_locked)
- erts_smp_runq_lock(from_rq);
- erts_smp_runq_lock(to_rq);
- }
- else if (erts_smp_runq_trylock(from_rq) == EBUSY) {
- erts_smp_runq_unlock(to_rq);
- erts_smp_runq_lock(from_rq);
- erts_smp_runq_lock(to_rq);
- }
- }
- else {
- if (!*from_locked) {
- if (!*to_locked)
- erts_smp_runq_lock(to_rq);
- erts_smp_runq_lock(from_rq);
- }
- else if (erts_smp_runq_trylock(to_rq) == EBUSY) {
- erts_smp_runq_unlock(from_rq);
- erts_smp_runq_lock(to_rq);
- erts_smp_runq_lock(from_rq);
- }
- }
- *to_locked = *from_locked = 1;
- }
- ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
-
- /* Refuse to migrate to a suspended run queue */
- if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED;
- if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue))
- return ERTS_MIGRATE_FAILED_RUNQ_CHANGED;
- if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt))
- return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
- dequeue_port(from_rq, prt);
- erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) to_rq);
- enqueue_port(to_rq, prt);
- return ERTS_MIGRATE_SUCCESS;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
+ ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ);
+ enqueue_port(rq, pp);
+}
+
+Port *
+erts_dequeue_port(ErtsRunQueue *rq)
+{
+ Port *pp;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+ pp = pop_port(rq);
+ ASSERT(!pp
+ || rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
+ ASSERT(!pp || (erts_smp_atomic32_read_nob(&pp->sched.flags)
+ & ERTS_PTS_FLG_IN_RUNQ));
+ return pp;
}
#endif
@@ -1119,5 +1977,5 @@ erts_port_task_init(void)
erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks,
(erts_aint_t) 0);
init_port_task_alloc();
- init_port_taskq_alloc();
+ init_busy_caller_table_alloc();
}
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index d7104e1143..ae6cd69ae2 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2006-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2006-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -27,6 +27,9 @@
#define ERTS_PORT_TASK_H_BASIC_TYPES__
#include "erl_sys_driver.h"
#include "erl_smp.h"
+#define ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_port.h"
+#undef ERL_PORT_GET_PORT_TYPE_ONLY__
typedef erts_smp_atomic_t ErtsPortTaskHandle;
#endif
@@ -43,13 +46,19 @@ typedef erts_smp_atomic_t ErtsPortTaskHandle;
#define ERTS_INCLUDE_SCHEDULER_INTERNALS
#endif
+#define ERTS_PT_FLG_WAIT_BUSY (1 << 0)
+#define ERTS_PT_FLG_SIG_DEP (1 << 1)
+#define ERTS_PT_FLG_NOSUSPEND (1 << 2)
+#define ERTS_PT_FLG_REF (1 << 3)
+#define ERTS_PT_FLG_BAD_OUTPUT (1 << 4)
+
typedef enum {
- ERTS_PORT_TASK_FREE,
ERTS_PORT_TASK_INPUT,
ERTS_PORT_TASK_OUTPUT,
ERTS_PORT_TASK_EVENT,
ERTS_PORT_TASK_TIMEOUT,
- ERTS_PORT_TASK_DIST_CMD
+ ERTS_PORT_TASK_DIST_CMD,
+ ERTS_PORT_TASK_PROC_SIG
} ErtsPortTaskType;
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
@@ -57,19 +66,76 @@ typedef enum {
extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#endif
+#define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0)
+#define ERTS_PTS_FLG_EXEC (((erts_aint32_t) 1) << 1)
+#define ERTS_PTS_FLG_HAVE_TASKS (((erts_aint32_t) 1) << 2)
+#define ERTS_PTS_FLG_EXIT (((erts_aint32_t) 1) << 3)
+#define ERTS_PTS_FLG_BUSY_PORT (((erts_aint32_t) 1) << 4)
+#define ERTS_PTS_FLG_BUSY_PORT_Q (((erts_aint32_t) 1) << 5)
+#define ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q (((erts_aint32_t) 1) << 6)
+#define ERTS_PTS_FLG_HAVE_BUSY_TASKS (((erts_aint32_t) 1) << 7)
+#define ERTS_PTS_FLG_HAVE_NS_TASKS (((erts_aint32_t) 1) << 8)
+#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9)
+#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10)
+
+#define ERTS_PTS_FLGS_BUSY \
+ (ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q)
+
+#define ERTS_PTS_FLGS_FORCE_SCHEDULE_OP \
+ (ERTS_PTS_FLG_EXIT \
+ | ERTS_PTS_FLG_HAVE_BUSY_TASKS \
+ | ERTS_PTS_FLG_HAVE_TASKS \
+ | ERTS_PTS_FLG_EXEC \
+ | ERTS_PTS_FLG_FORCE_SCHED)
+
+#define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH 8192
+#define ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW 4096
+
+typedef struct {
+ ErlDrvSizeT high;
+ erts_smp_atomic_t low;
+ erts_smp_atomic_t size;
+} ErtsPortTaskBusyPortQ;
+
typedef struct ErtsPortTask_ ErtsPortTask;
-typedef struct ErtsPortTaskQueue_ ErtsPortTaskQueue;
+typedef struct ErtsPortTaskBusyCallerTable_ ErtsPortTaskBusyCallerTable;
+typedef struct ErtsPortTaskHandleList_ ErtsPortTaskHandleList;
typedef struct {
Port *next;
- Port *prev;
- ErtsPortTaskQueue *taskq;
- ErtsPortTaskQueue *exe_taskq;
+ struct {
+ struct {
+ struct {
+ ErtsPortTask *first;
+ ErtsPortTask *last;
+ ErtsPortTaskBusyCallerTable *table;
+ ErtsPortTaskHandleList *nosuspend;
+ } busy;
+ ErtsPortTask *first;
+ } local;
+ struct {
+ ErtsPortTask *first;
+ ErtsPortTask *last;
+ } in;
+ ErtsPortTaskBusyPortQ *bpq;
+ } taskq;
+ erts_smp_atomic32_t flags;
+#ifdef ERTS_SMP
+ erts_mtx_t mtx;
+#endif
} ErtsPortTaskSched;
ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp);
ERTS_GLB_INLINE int erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp);
-ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
+ ErtsPortTaskBusyPortQ *bpq);
+ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp,
+ Eterm id);
+ERTS_GLB_INLINE void erts_port_task_fini_sched(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE void erts_port_task_sched_lock(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp);
+ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp);
+
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void);
#endif
@@ -88,13 +154,75 @@ erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp)
return ((void *) erts_smp_atomic_read_nob(pthp)) != NULL;
}
+ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
+ ErtsPortTaskBusyPortQ *bpq)
+{
+ if (bpq) {
+ erts_aint_t low = (erts_aint_t) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW;
+ erts_smp_atomic_init_nob(&bpq->low, low);
+ bpq->high = (ErlDrvSizeT) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH;
+ erts_smp_atomic_init_nob(&bpq->size, (erts_aint_t) 0);
+ }
+ ptsp->taskq.bpq = bpq;
+}
+
ERTS_GLB_INLINE void
-erts_port_task_init_sched(ErtsPortTaskSched *ptsp)
+erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
{
+#ifdef ERTS_SMP
+ char *lock_str = "port_sched_lock";
+#endif
ptsp->next = NULL;
- ptsp->prev = NULL;
- ptsp->taskq = NULL;
- ptsp->exe_taskq = NULL;
+ ptsp->taskq.local.busy.first = NULL;
+ ptsp->taskq.local.busy.last = NULL;
+ ptsp->taskq.local.busy.table = NULL;
+ ptsp->taskq.local.busy.nosuspend = NULL;
+ ptsp->taskq.local.first = NULL;
+ ptsp->taskq.in.first = NULL;
+ ptsp->taskq.in.last = NULL;
+ erts_smp_atomic32_init_nob(&ptsp->flags, 0);
+#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
+ lock_str = NULL;
+#endif
+ erts_mtx_init_x(&ptsp->mtx, lock_str, instr_id);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_port_task_sched_lock(ErtsPortTaskSched *ptsp)
+{
+#ifdef ERTS_SMP
+ erts_mtx_lock(&ptsp->mtx);
+#endif
+}
+
+ERTS_GLB_INLINE void
+erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp)
+{
+#ifdef ERTS_SMP
+ erts_mtx_unlock(&ptsp->mtx);
+#endif
+}
+
+ERTS_GLB_INLINE int
+erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp)
+{
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ return erts_lc_mtx_is_locked(&ptsp->mtx);
+#else
+ return 0;
+#endif
+}
+
+
+ERTS_GLB_INLINE void
+erts_port_task_fini_sched(ErtsPortTaskSched *ptsp)
+{
+#ifdef ERTS_SMP
+ erts_mtx_destroy(&ptsp->mtx);
+#endif
}
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
@@ -115,21 +243,20 @@ int erts_port_task_execute(ErtsRunQueue *, Port **);
void erts_port_task_init(void);
#endif
-int erts_port_task_abort(Eterm id, ErtsPortTaskHandle *);
+int erts_port_task_abort(ErtsPortTaskHandle *);
+void erts_port_task_abort_nosuspend_tasks(Port *);
+
int erts_port_task_schedule(Eterm,
ErtsPortTaskHandle *,
ErtsPortTaskType,
- ErlDrvEvent,
- ErlDrvEventData);
+ ...);
void erts_port_task_free_port(Port *);
int erts_port_is_scheduled(Port *);
+ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void);
+
#ifdef ERTS_SMP
-ErtsMigrateResult erts_port_migrate(Port *,
- int *,
- ErtsRunQueue *,
- int *,
- ErtsRunQueue *,
- int *);
+void erts_enqueue_port(ErtsRunQueue *rq, Port *pp);
+Port *erts_dequeue_port(ErtsRunQueue *rq);
#endif
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#endif /* ERL_PORT_TASK_H__ */
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index 34da9cab84..2320b64295 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -437,7 +437,10 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount,
}
break;
case BINARY_DEF:
- {
+ if (header_is_bin_matchstate(*boxed_val(wobj))) {
+ PRINT_STRING(res, fn, arg, "#MatchState");
+ }
+ else {
ProcBin* pb = (ProcBin *) binary_val(wobj);
if (pb->size == 1)
PRINT_STRING(res, fn, arg, "<<1 byte>>");
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 0fa2def5af..aaca4b5f59 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -42,6 +42,10 @@
#include "erl_thr_queue.h"
#include "erl_async.h"
#include "dtrace-wrapper.h"
+#include "erl_ptab.h"
+
+#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
+#define ERTS_DELAYED_WAKEUP_REDUCTIONS ((Uint64) CONTEXT_REDS/2)
#define ERTS_RUNQ_CHECK_BALANCE_REDS_PER_SCHED (2000*CONTEXT_REDS)
#define ERTS_RUNQ_CALL_CHECK_BALANCE_REDS \
@@ -94,34 +98,55 @@
#define HIGH_BIT (1 << PRIORITY_HIGH)
#define NORMAL_BIT (1 << PRIORITY_NORMAL)
#define LOW_BIT (1 << PRIORITY_LOW)
+#define PORT_BIT (1 << ERTS_PORT_PRIO_LEVEL)
-#define ERTS_MAYBE_SAVE_TERMINATING_PROCESS(P) \
-do { \
- ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx)); \
- if (saved_term_procs.end) \
- save_terminating_process((P)); \
-} while (0)
+#define ERTS_EMPTY_RUNQ(RQ) \
+ ((ERTS_RUNQ_FLGS_GET_NOB((RQ)) & ERTS_RUNQ_FLGS_QMASK) == 0 \
+ && (RQ)->misc.start == NULL)
-#define ERTS_EMPTY_RUNQ(RQ) \
- ((RQ)->len == 0 && (RQ)->misc.start == NULL)
+#undef RUNQ_READ_RQ
+#undef RUNQ_SET_RQ
+#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_smp_atomic_read_nob((X)))
+#define RUNQ_SET_RQ(X, RQ) erts_smp_atomic_set_nob((X), (erts_aint_t) (RQ))
+
+#ifdef DEBUG
+# if defined(ARCH_64) && !HALFWORD_HEAP
+# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
+ (RUNQ_SET_RQ((RQP), (0xdeadbeefdead0003LL | ((N) << 4)))
+# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
+do { \
+ ASSERT((RQP) != NULL); \
+ ASSERT(((((Uint) (RQP)) & ((Uint) 0x3))) == ((Uint) 0)); \
+ ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdeadbeefdead0000LL));\
+} while (0)
+# else
+# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
+ (RUNQ_SET_RQ((RQP), (0xdead0003 | ((N) << 4))))
+# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
+do { \
+ ASSERT((RQP) != NULL); \
+ ASSERT(((((UWord) (RQP)) & ((UWord) 1))) == ((UWord) 0)); \
+ ASSERT((((UWord) (RQP)) & ~((UWord) 0xffff)) != ((UWord) 0xdead0000)); \
+} while (0)
+# endif
+#else
+# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N)
+# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP)
+#endif
#define ERTS_EMPTY_RUNQ_PORTS(RQ) \
- ((RQ)->ports.info.len == 0 && (RQ)->misc.start == NULL)
+ (RUNQ_READ_LEN(&(RQ)->ports.info.len) == 0 && (RQ)->misc.start == NULL)
+
+const Process erts_invalid_process = {{ERTS_INVALID_PID}};
extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-static Sint p_last;
-static Sint p_next;
-static Sint p_serial;
-static Uint p_serial_mask;
-static Uint p_serial_shift;
-
int erts_sched_compact_load;
Uint erts_no_schedulers;
-Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES;
-Uint erts_process_tab_index_mask;
+
+ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
int erts_sched_thread_suggested_stack_size = -1;
@@ -188,7 +213,7 @@ static struct {
struct {
int active_runqs;
int reds;
- int max_len;
+ erts_aint32_t max_len;
} prev_rise;
Uint n;
} balance_info;
@@ -208,8 +233,6 @@ erts_sched_stat_t erts_sched_stat;
static erts_tsd_key_t sched_data_key;
#endif
-static erts_smp_mtx_t proc_tab_mtx;
-
static erts_smp_atomic32_t function_calls;
#ifdef ERTS_SMP
@@ -231,7 +254,6 @@ typedef union {
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
-Process** process_tab;
static Uint last_reductions;
static Uint last_exact_reductions;
Uint erts_default_process_flags;
@@ -247,29 +269,6 @@ struct erts_system_profile_flags_t erts_system_profile_flags;
#if ERTS_MAX_PROCESSES > 0x7fffffff
#error "Need to store process_count in another type"
#endif
-static erts_smp_atomic32_t process_count;
-
-typedef struct ErtsTermProcElement_ ErtsTermProcElement;
-struct ErtsTermProcElement_ {
- ErtsTermProcElement *next;
- ErtsTermProcElement *prev;
- int ix;
- union {
- struct {
- Eterm pid;
- SysTimeval spawned;
- SysTimeval exited;
- } process;
- struct {
- SysTimeval time;
- } bif_invocation;
- } u;
-};
-
-static struct {
- ErtsTermProcElement *start;
- ErtsTermProcElement *end;
-} saved_term_procs;
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list,
ErtsMiscOpList,
@@ -331,8 +330,6 @@ do { \
* Local functions.
*/
-static void init_processes_bif(void);
-static void save_terminating_process(Process *p);
static void exec_misc_ops(ErtsRunQueue *);
static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
static int stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp,
@@ -363,6 +360,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_DD;
valid |= ERTS_SSI_AUX_WORK_DD_THR_PRGR;
+ valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
#endif
#if HAVE_ERTS_MSEG
valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
@@ -399,6 +397,31 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq)
}
#endif
+
+static ERTS_INLINE Uint64
+ensure_later_proc_interval(Uint64 interval)
+{
+ return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval);
+}
+
+Uint64
+erts_get_proc_interval(void)
+{
+ return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc));
+}
+
+Uint64
+erts_ensure_later_proc_interval(Uint64 interval)
+{
+ return ensure_later_proc_interval(interval);
+}
+
+Uint64
+erts_step_proc_interval(void)
+{
+ return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc));
+}
+
void
erts_pre_init_process(void)
{
@@ -444,11 +467,18 @@ erts_pre_init_process(void)
#endif
}
+#ifdef ERTS_SMP
+static void
+release_process(void *vproc)
+{
+ erts_smp_proc_dec_refc((Process *) vproc);
+}
+#endif
+
/* initialize the scheduler */
void
-erts_init_process(int ncpu)
+erts_init_process(int ncpu, int proc_tab_size)
{
- Uint proc_bits = ERTS_PROC_BITS;
#ifdef ERTS_SMP
erts_disable_proc_not_running_opt = 0;
@@ -457,25 +487,17 @@ erts_init_process(int ncpu)
init_proclist_alloc();
- erts_smp_atomic32_init_nob(&process_count, 0);
-
- if (erts_use_r9_pids_ports) {
- proc_bits = ERTS_R9_PROC_BITS;
- ASSERT(erts_max_processes <= (1 << ERTS_R9_PROC_BITS));
- }
-
- process_tab = (Process**) erts_alloc(ERTS_ALC_T_PROC_TABLE,
- erts_max_processes*sizeof(Process*));
- sys_memzero(process_tab, erts_max_processes * sizeof(Process*));
-
- erts_smp_mtx_init(&proc_tab_mtx, "proc_tab");
- p_last = -1;
- p_next = 0;
- p_serial = 0;
+ erts_ptab_init_table(&erts_proc,
+ ERTS_ALC_T_PROC_TABLE,
+#ifdef ERTS_SMP
+ release_process,
+#else
+ NULL,
+#endif
+ (ErtsPTabElementCommon *) &erts_invalid_process.common,
+ proc_tab_size,
+ "process_table");
- p_serial_shift = erts_fit_in_bits(erts_max_processes - 1);
- p_serial_mask = ((~(~((Uint) 0) << proc_bits)) >> p_serial_shift);
- erts_process_tab_index_mask = ~(~((Uint) 0) << p_serial_shift);
last_reductions = 0;
last_exact_reductions = 0;
erts_default_process_flags = 0;
@@ -485,7 +507,6 @@ void
erts_late_init_process(void)
{
int ix;
- init_processes_bif();
erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat");
for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) {
@@ -733,8 +754,9 @@ static ERTS_INLINE ErtsProcList *
proclist_create(Process *p)
{
ErtsProcList *plp = proclist_alloc();
- plp->pid = p->id;
- plp->started = p->started;
+ ensure_later_proc_interval(p->common.u.alive.started_interval);
+ plp->pid = p->common.id;
+ plp->started_interval = p->common.u.alive.started_interval;
return plp;
}
@@ -744,13 +766,6 @@ proclist_destroy(ErtsProcList *plp)
proclist_free(plp);
}
-static ERTS_INLINE int
-proclist_same(ErtsProcList *plp, Process *p)
-{
- return (plp->pid == p->id
- && erts_cmp_timeval(&plp->started, &p->started) == 0);
-}
-
ErtsProcList *
erts_proclist_create(Process *p)
{
@@ -763,12 +778,6 @@ erts_proclist_destroy(ErtsProcList *plp)
proclist_destroy(plp);
}
-int
-erts_proclist_same(ErtsProcList *plp, Process *p)
-{
- return proclist_same(plp, p);
-}
-
void *
erts_psd_set_init(Process *p, ErtsProcLocks plocks, int ix, void *data)
{
@@ -932,9 +941,15 @@ haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp)
}
static ERTS_INLINE erts_aint32_t
-handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
int jix, max_jix;
+
+ ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY);
+
+ if (!waiting && awdp->delayed_wakeup.next > awdp->esdp->reductions)
+ return aux_work;
+
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP);
ERTS_THR_MEMORY_BARRIER;
@@ -950,11 +965,14 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(sched-1),
aux_work);
}
+ awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY;
return aux_work & ~ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP;
}
static ERTS_INLINE void
-schedule_aux_work_wakeup(ErtsAuxWorkData *awdp, int sched, erts_aint32_t aux_work)
+schedule_aux_work_wakeup(ErtsAuxWorkData *awdp,
+ int sched,
+ erts_aint32_t aux_work)
{
int jix = awdp->delayed_wakeup.sched2jix[sched];
if (jix >= 0) {
@@ -967,7 +985,20 @@ schedule_aux_work_wakeup(ErtsAuxWorkData *awdp, int sched, erts_aint32_t aux_wor
awdp->delayed_wakeup.job[jix].sched = sched;
awdp->delayed_wakeup.job[jix].aux_work = aux_work;
}
- set_aux_work_flags_wakeup_nob(awdp->ssi, ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP);
+
+ if (awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY) {
+ ASSERT(erts_atomic32_read_nob(&awdp->ssi->aux_work)
+ & ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP);
+ }
+ else {
+ awdp->delayed_wakeup.next = (awdp->esdp->reductions
+ + ERTS_DELAYED_WAKEUP_REDUCTIONS);
+
+ ASSERT(!(erts_atomic32_read_nob(&awdp->ssi->aux_work)
+ & ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP));
+ set_aux_work_flags_wakeup_nob(awdp->ssi,
+ ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP);
+ }
}
#endif
@@ -1046,7 +1077,8 @@ misc_aux_work_clean(ErtsThrQ_t *q,
static ERTS_INLINE erts_aint32_t
handle_misc_aux_work(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
+ erts_aint32_t aux_work,
+ int waiting)
{
ErtsThrQ_t *q = &misc_aux_work_queues[awdp->sched_id].q;
@@ -1066,7 +1098,8 @@ handle_misc_aux_work(ErtsAuxWorkData *awdp,
static ERTS_INLINE erts_aint32_t
handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
+ erts_aint32_t aux_work,
+ int waiting)
{
if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->misc.thr_prgr))
@@ -1145,7 +1178,8 @@ erts_notify_check_async_ready_queue(void *vno)
static ERTS_INLINE erts_aint32_t
handle_async_ready(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
+ erts_aint32_t aux_work,
+ int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY);
@@ -1167,7 +1201,8 @@ handle_async_ready(ErtsAuxWorkData *awdp,
static ERTS_INLINE erts_aint32_t
handle_async_ready_clean(ErtsAuxWorkData *awdp,
- erts_aint32_t aux_work)
+ erts_aint32_t aux_work,
+ int waiting)
{
void *thr_prgr_p;
@@ -1200,10 +1235,11 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
}
}
-#endif
+#endif /* ERTS_USE_ASYNC_READY_Q */
+
static ERTS_INLINE erts_aint32_t
-handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
erts_aint32_t res;
@@ -1237,7 +1273,7 @@ erts_alloc_notify_delayed_dealloc(int ix)
}
static ERTS_INLINE erts_aint32_t
-handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
ErtsSchedulerSleepInfo *ssi = awdp->ssi;
int need_thr_progress = 0;
@@ -1275,7 +1311,7 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
}
static ERTS_INLINE erts_aint32_t
-handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
ErtsSchedulerSleepInfo *ssi;
int need_thr_progress;
@@ -1319,6 +1355,77 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR;
}
+/*
+ * Handle scheduled thread progress later operations.
+ */
+#define ERTS_MAX_THR_PRGR_LATER_OPS 50
+
+static ERTS_INLINE erts_aint32_t
+handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ int lops;
+ ErtsThrPrgrVal current = haw_thr_prgr_current(awdp);
+
+ for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
+ ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+ if (!erts_thr_progress_has_reached_this(current, lop->later))
+ return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
+ awdp->later_op.first = lop->next;
+ if (!awdp->later_op.first) {
+ awdp->later_op.last = NULL;
+ }
+ lop->func(lop->data);
+ if (!awdp->later_op.first) {
+ awdp->later_op.last = NULL;
+ unset_aux_work_flags(awdp->ssi,
+ ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP);
+ return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
+ }
+ }
+
+ return aux_work;
+}
+
+#endif /* ERTS_SMP */
+
+void
+erts_schedule_thr_prgr_later_op(void (*later_func)(void *),
+ void *later_data,
+ ErtsThrPrgrLaterOp *lop)
+{
+#ifndef ERTS_SMP
+ later_func(later_data);
+#else
+ ErtsSchedulerData *esdp;
+ ErtsAuxWorkData *awdp;
+ int request_wakeup = 1;
+
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ awdp = &esdp->aux_work_data;
+
+ lop->func = later_func;
+ lop->data = later_data;
+ lop->later = erts_thr_progress_later(esdp);
+ lop->next = NULL;
+ if (!awdp->later_op.last)
+ awdp->later_op.first = lop;
+ else {
+ ErtsThrPrgrLaterOp *last = awdp->later_op.last;
+ last->next = lop;
+ if (erts_thr_progress_equal(last->later, lop->later))
+ request_wakeup = 0;
+ }
+ awdp->later_op.last = lop;
+ set_aux_work_flags_wakeup_nob(awdp->ssi,
+ ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP);
+ if (request_wakeup)
+ erts_thr_progress_wakeup(esdp, lop->later);
+#endif
+}
+
+#ifdef ERTS_SMP
+
static erts_atomic32_t completed_dealloc_count;
static void
@@ -1403,7 +1510,7 @@ erts_smp_notify_check_children_needed(void)
}
static ERTS_INLINE erts_aint32_t
-handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
erts_check_children();
@@ -1426,42 +1533,37 @@ erts_smp_atomic32_t erts_halt_progress;
int erts_halt_code;
static ERTS_INLINE erts_aint32_t
-handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS);
awdp->esdp->run_queue->halt_in_progress = 1;
if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) {
- int i;
+ int i, max = erts_ptab_max(&erts_port);
erts_smp_atomic32_set_nob(&erts_halt_progress, 1);
- for (i = 0; i < erts_max_ports; i++) {
- Port *prt = &erts_port[i];
- erts_smp_port_state_lock(prt);
- if ((prt->status & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
- | ERTS_PORT_SFLG_HALT))) {
- erts_smp_port_state_unlock(prt);
+ for (i = 0; i < max; i++) {
+ erts_aint32_t state;
+ Port *prt = erts_pix2port(i);
+ if (!prt)
continue;
- }
- /* We need to set the halt flag - get the port lock */
-#ifdef ERTS_SMP
- erts_smp_atomic_inc_nob(&prt->refc);
-#endif
- erts_smp_port_state_unlock(prt);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(prt->lock);
-#endif
- if ((prt->status & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
- | ERTS_PORT_SFLG_HALT))) {
- erts_port_release(prt);
- continue;
- }
- erts_port_status_bor_set(prt, ERTS_PORT_SFLG_HALT);
- erts_smp_atomic32_inc_nob(&erts_halt_progress);
- if (prt->status & (ERTS_PORT_SFLG_EXITING
- | ERTS_PORT_SFLG_CLOSING)) {
- erts_port_release(prt);
+ state = erts_atomic32_read_acqb(&prt->state);
+ if (state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ | ERTS_PORT_SFLG_HALT))
continue;
+
+ /* We need to set the halt flag - get the port lock */
+
+ erts_smp_port_lock(prt);
+
+ state = erts_atomic32_read_nob(&prt->state);
+ if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ | ERTS_PORT_SFLG_HALT))) {
+ state = erts_atomic32_read_bor_relb(&prt->state,
+ ERTS_PORT_SFLG_HALT);
+ erts_smp_atomic32_inc_nob(&erts_halt_progress);
+ if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING)))
+ erts_deliver_port_exit(prt, prt->common.id, am_killed, 0);
}
- erts_do_exit_port(prt, prt->id, am_killed);
+
erts_port_release(prt);
}
if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) {
@@ -1474,7 +1576,7 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
#if HAVE_ERTS_MSEG
static ERTS_INLINE erts_aint32_t
-handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK);
erts_mseg_cache_check();
@@ -1484,7 +1586,7 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
#endif
static ERTS_INLINE erts_aint32_t
-handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work)
+handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO);
setup_aux_work_timer();
@@ -1498,7 +1600,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
#define HANDLE_AUX_WORK(FLG, HNDLR) \
ignore |= FLG; \
if (aux_work & FLG) { \
- aux_work = HNDLR(awdp, aux_work); \
+ aux_work = HNDLR(awdp, aux_work, waiting); \
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
if (!(aux_work & ~ignore)) { \
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
@@ -1544,6 +1646,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
handle_fix_alloc);
+#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP,
+ handle_thr_prgr_later_op);
+#endif
+
#if ERTS_USE_ASYNC_READY_Q
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY,
handle_async_ready);
@@ -1734,8 +1841,8 @@ sched_waiting_sys(Uint no, ErtsRunQueue *rq)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
ASSERT(rq->waiting >= 0);
- rq->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
+ (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
rq->waiting++;
rq->waiting *= -1;
rq->woken = 0;
@@ -1811,8 +1918,8 @@ static ERTS_INLINE void
sched_waiting(Uint no, ErtsRunQueue *rq)
{
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- rq->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
+ (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
if (rq->waiting < 0)
rq->waiting--;
else
@@ -1844,9 +1951,8 @@ ongoing_multi_scheduling_block(void)
static ERTS_INLINE void
empty_runq(ErtsRunQueue *rq)
{
- erts_aint32_t oifls = erts_smp_atomic32_read_band_nob(&rq->info_flags,
- ~ERTS_RUNQ_IFLG_NONEMPTY);
- if (oifls & ERTS_RUNQ_IFLG_NONEMPTY) {
+ Uint32 old_flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_NONEMPTY|ERTS_RUNQ_FLG_PROTECTED);
+ if (old_flags & ERTS_RUNQ_FLG_NONEMPTY) {
#ifdef DEBUG
erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
/*
@@ -1862,9 +1968,8 @@ empty_runq(ErtsRunQueue *rq)
static ERTS_INLINE void
non_empty_runq(ErtsRunQueue *rq)
{
- erts_aint32_t oifls = erts_smp_atomic32_read_bor_nob(&rq->info_flags,
- ERTS_RUNQ_IFLG_NONEMPTY);
- if (!(oifls & ERTS_RUNQ_IFLG_NONEMPTY)) {
+ Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY);
+ if (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY)) {
#ifdef DEBUG
erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues);
/*
@@ -2479,19 +2584,16 @@ try_inc_no_active_runqs(int active)
static ERTS_INLINE int
chk_wake_sched(ErtsRunQueue *crq, int ix, int activate)
{
- erts_aint32_t iflgs;
+ Uint32 flags;
ErtsRunQueue *wrq;
if (crq->ix == ix)
return 0;
wrq = ERTS_RUNQ_IX(ix);
- iflgs = erts_smp_atomic32_read_nob(&wrq->info_flags);
- if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) {
+ flags = ERTS_RUNQ_FLGS_GET(wrq);
+ if (!(flags & (ERTS_RUNQ_FLG_SUSPENDED|ERTS_RUNQ_FLG_NONEMPTY))) {
if (activate) {
- if (try_inc_no_active_runqs(ix+1)) {
- erts_smp_xrunq_lock(crq, wrq);
- wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE;
- erts_smp_xrunq_unlock(crq, wrq);
- }
+ if (try_inc_no_active_runqs(ix+1))
+ (void) ERTS_RUNQ_FLGS_UNSET(wrq, ERTS_RUNQ_FLG_INACTIVE);
}
wake_scheduler(wrq, 0);
return 1;
@@ -2558,9 +2660,7 @@ erts_sched_notify_check_cpu_bind(void)
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- rq->flags |= ERTS_RUNQ_FLG_CHK_CPU_BIND;
- erts_smp_runq_unlock(rq);
+ (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
wake_scheduler(rq, 0);
}
#else
@@ -2569,409 +2669,547 @@ erts_sched_notify_check_cpu_bind(void)
}
-#ifdef ERTS_SMP
+static ERTS_INLINE void
+enqueue_process(ErtsRunQueue *runq, int prio, Process *p)
+{
+ ErtsRunPrioQueue *rpq;
-ErtsRunQueue *
-erts_prepare_emigrate(ErtsRunQueue *c_rq, ErtsRunQueueInfo *c_rqi, int prio)
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+
+ erts_smp_inc_runq_len(runq, &runq->procs.prio_info[prio], prio);
+
+ if (prio == PRIORITY_LOW) {
+ p->schedule_count = RESCHEDULE_LOW;
+ rpq = &runq->procs.prio[PRIORITY_NORMAL];
+ }
+ else {
+ p->schedule_count = 1;
+ rpq = &runq->procs.prio[prio];
+ }
+
+ p->next = NULL;
+ if (rpq->last)
+ rpq->last->next = p;
+ else
+ rpq->first = p;
+ rpq->last = p;
+}
+
+
+static ERTS_INLINE void
+unqueue_process(ErtsRunQueue *runq,
+ ErtsRunPrioQueue *rpq,
+ ErtsRunQueueInfo *rqi,
+ int prio,
+ Process *prev_proc,
+ Process *proc)
{
- ASSERT(ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio));
- ASSERT(ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio)
- || c_rqi->len >= c_rqi->migrate.limit.this);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- while (1) {
- ErtsRunQueue *n_rq = c_rqi->migrate.runq;
- ERTS_DBG_VERIFY_VALID_RUNQP(n_rq);
- erts_smp_xrunq_lock(c_rq, n_rq);
-
- /*
- * erts_smp_xrunq_lock() may release lock on c_rq! We have
- * to check that we still want to emigrate and emigrate
- * to the same run queue as before.
- */
+ if (prev_proc)
+ prev_proc->next = proc->next;
+ else
+ rpq->first = proc->next;
+ if (!proc->next)
+ rpq->last = prev_proc;
- if (ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio)) {
- Uint32 force = (ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio)
- | (c_rq->flags & ERTS_RUNQ_FLG_INACTIVE));
- if (force || c_rqi->len > c_rqi->migrate.limit.this) {
- ErtsRunQueueInfo *n_rqi;
- /* We still want to emigrate */
-
- if (n_rq != c_rqi->migrate.runq) {
- /* Ahh... run queue changed; need to do it all over again... */
- erts_smp_runq_unlock(n_rq);
- continue;
- }
- else {
+ if (!rpq->first)
+ rpq->last = NULL;
- if (prio == ERTS_PORT_PRIO_LEVEL)
- n_rqi = &n_rq->ports.info;
- else
- n_rqi = &n_rq->procs.prio_info[prio];
+ erts_smp_dec_runq_len(runq, rqi, prio);
+}
- if (force || (n_rqi->len < c_rqi->migrate.limit.other)) {
- /* emigrate ... */
- return n_rq;
- }
- }
- }
- }
- ASSERT(n_rq != c_rq);
- erts_smp_runq_unlock(n_rq);
- if (!(c_rq->flags & ERTS_RUNQ_FLG_INACTIVE)) {
- /* No more emigrations to this runq */
- ERTS_UNSET_RUNQ_FLG_EMIGRATE(c_rq->flags, prio);
- ERTS_DBG_SET_INVALID_RUNQP(c_rqi->migrate.runq, 0x3);
- }
+static ERTS_INLINE Process *
+dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep)
+{
+ erts_aint32_t state;
+ int prio;
+ ErtsRunPrioQueue *rpq;
+ ErtsRunQueueInfo *rqi;
+ Process *p;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
+
+ ASSERT(PRIORITY_NORMAL == prio_q
+ || PRIORITY_HIGH == prio_q
+ || PRIORITY_MAX == prio_q);
+
+ rpq = &runq->procs.prio[prio_q];
+ p = rpq->first;
+ if (!p)
return NULL;
+
+ ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
+
+ state = erts_smp_atomic32_read_nob(&p->state);
+ if (statep)
+ *statep = state;
+
+ prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+
+ rqi = &runq->procs.prio_info[prio];
+
+ if (p)
+ unqueue_process(runq, rpq, rqi, prio, NULL, p);
+
+ return p;
+}
+
+static ERTS_INLINE int
+check_requeue_process(ErtsRunQueue *rq, int prio_q)
+{
+ ErtsRunPrioQueue *rpq = &rq->procs.prio[prio_q];
+ Process *p = rpq->first;
+ if (--p->schedule_count > 0 && p != rpq->last) {
+ /* reschedule */
+ rpq->first = p->next;
+ rpq->last->next = p;
+ rpq->last = p;
+ p->next = NULL;
+ return 1;
}
+ return 0;
+}
+
+#ifdef ERTS_SMP
+
+static ErtsRunQueue *
+check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio)
+{
+ int len;
+ Uint32 f_flags, f_rq_flags;
+ ErtsRunQueue *f_rq;
+
+ f_flags = mp->prio[prio].flags;
+
+ ASSERT(ERTS_CHK_RUNQ_FLG_IMMIGRATE(mp->flags, prio));
+
+ f_rq = mp->prio[prio].runq;
+ if (!f_rq)
+ return NULL;
+
+ f_rq_flags = ERTS_RUNQ_FLGS_GET(f_rq);
+ if (f_rq_flags & ERTS_RUNQ_FLG_PROTECTED)
+ return NULL;
+
+ if (ERTS_CHK_RUNQ_FLG_EVACUATE(f_flags, prio))
+ return f_rq;
+
+ if (f_rq_flags & ERTS_RUNQ_FLG_INACTIVE)
+ return f_rq;
+
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ len = RUNQ_READ_LEN(&c_rq->ports.info.len);
+ else
+ len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len);
+
+ if (len < mp->prio[prio].limit.this) {
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ len = RUNQ_READ_LEN(&f_rq->ports.info.len);
+ else
+ len = RUNQ_READ_LEN(&f_rq->procs.prio_info[prio].len);
+
+ if (len > mp->prio[prio].limit.other)
+ return f_rq;
+ }
+ return NULL;
}
static void
-immigrate(ErtsRunQueue *rq)
+immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
{
- int prio;
+ Uint32 iflags, iflag;
+ erts_smp_runq_unlock(c_rq);
- ASSERT(rq->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK);
+ ASSERT(erts_thr_progress_is_managed_thread());
- for (prio = 0; prio < ERTS_NO_PRIO_LEVELS; prio++) {
- if (ERTS_CHK_RUNQ_FLG_IMMIGRATE(rq->flags, prio)) {
- ErtsRunQueueInfo *rqi = (prio == ERTS_PORT_PRIO_LEVEL
- ? &rq->ports.info
- : &rq->procs.prio_info[prio]);
- ErtsRunQueue *from_rq = rqi->migrate.runq;
- int rq_locked, from_rq_locked;
+ iflags = mp->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
- ERTS_DBG_VERIFY_VALID_RUNQP(from_rq);
+ iflag = iflags & -iflags;
- rq_locked = 1;
- from_rq_locked = 1;
- erts_smp_xrunq_lock(rq, from_rq);
- /*
- * erts_smp_xrunq_lock() may release lock on rq! We have
- * to check that we still want to immigrate from the same
- * run queue as before.
- */
- if (ERTS_CHK_RUNQ_FLG_IMMIGRATE(rq->flags, prio)
- && from_rq == rqi->migrate.runq) {
- ErtsRunQueueInfo *from_rqi = (prio == ERTS_PORT_PRIO_LEVEL
- ? &from_rq->ports.info
- : &from_rq->procs.prio_info[prio]);
- if ((ERTS_CHK_RUNQ_FLG_EVACUATE(rq->flags, prio)
- && ERTS_CHK_RUNQ_FLG_EVACUATE(from_rq->flags, prio)
- && from_rqi->len)
- || (from_rqi->len > rqi->migrate.limit.other
- && rqi->len < rqi->migrate.limit.this)) {
- if (prio == ERTS_PORT_PRIO_LEVEL) {
- Port *prt = from_rq->ports.start;
- if (prt) {
- int prt_locked = 0;
- (void) erts_port_migrate(prt, &prt_locked,
- from_rq, &from_rq_locked,
- rq, &rq_locked);
- if (prt_locked)
- erts_smp_port_unlock(prt);
- }
- }
- else {
- Process *proc;
- ErtsRunPrioQueue *from_rpq;
- from_rpq = (prio == PRIORITY_LOW
- ? &from_rq->procs.prio[PRIORITY_NORMAL]
- : &from_rq->procs.prio[prio]);
- for (proc = from_rpq->first; proc; proc = proc->next)
- if (proc->prio == prio && !proc->bound_runq)
- break;
- if (proc) {
- ErtsProcLocks proc_locks = 0;
- (void) erts_proc_migrate(proc, &proc_locks,
- from_rq, &from_rq_locked,
- rq, &rq_locked);
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
- }
+ while (iflag) {
+ ErtsRunQueue *rq;
+ int prio;
+
+ switch (iflag) {
+ case (MAX_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT):
+ prio = PRIORITY_MAX;
+ break;
+ case (HIGH_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT):
+ prio = PRIORITY_HIGH;
+ break;
+ case (NORMAL_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT):
+ prio = PRIORITY_NORMAL;
+ break;
+ case (LOW_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT):
+ prio = PRIORITY_LOW;
+ break;
+ case (PORT_BIT << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT):
+ prio = ERTS_PORT_PRIO_LEVEL;
+ break;
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Invalid immigrate queue mask",
+ __FILE__, __LINE__, __func__);
+ prio = 0;
+ break;
+ }
+
+ iflags &= ~iflag;
+ iflag = iflags & -iflags;
+
+ rq = check_immigration_need(c_rq, mp, prio);
+ if (rq) {
+ erts_smp_runq_lock(rq);
+ if (prio == ERTS_PORT_PRIO_LEVEL) {
+ Port *prt;
+ prt = erts_dequeue_port(rq);
+ if (prt)
+ RUNQ_SET_RQ(&prt->run_queue, c_rq);
+ erts_smp_runq_unlock(rq);
+ if (prt) {
+ /* port might terminate while we have no lock... */
+ rq = erts_port_runq(prt);
+ if (rq) {
+ if (rq != c_rq)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s(): Internal error",
+ __FILE__, __LINE__, __func__);
+ erts_enqueue_port(c_rq, prt);
+ if (!iflag)
+ return; /* done */
+ erts_smp_runq_unlock(c_rq);
}
}
- else {
- ERTS_UNSET_RUNQ_FLG_IMMIGRATE(rq->flags, prio);
- ERTS_DBG_SET_INVALID_RUNQP(rqi->migrate.runq, 0x1);
+ }
+ else {
+ ErtsRunPrioQueue *rpq = &rq->procs.prio[prio == PRIORITY_LOW
+ ? PRIORITY_NORMAL
+ : prio];
+ Process *prev_proc = NULL;
+ Process *proc = rpq->first;
+ int rq_locked = 1;
+
+ while (proc) {
+ erts_aint32_t state;
+ state = erts_smp_atomic32_read_acqb(&proc->state);
+ if (!(ERTS_PSFLG_BOUND & state)
+ && (prio == (int) (ERTS_PSFLG_PRIO_MASK & state))) {
+ ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio];
+ unqueue_process(rq, rpq, rqi, prio, prev_proc, proc);
+ erts_smp_runq_unlock(rq);
+ RUNQ_SET_RQ(&proc->run_queue, c_rq);
+ rq_locked = 0;
+
+ erts_smp_runq_lock(c_rq);
+ enqueue_process(c_rq, prio, proc);
+ if (!iflag)
+ return; /* done */
+ erts_smp_runq_unlock(c_rq);
+ break;
+ }
+ prev_proc = proc;
+ proc = proc->next;
}
+ if (rq_locked)
+ erts_smp_runq_unlock(rq);
}
- if (from_rq_locked)
- erts_smp_runq_unlock(from_rq);
- if (!rq_locked)
- erts_smp_runq_lock(rq);
}
}
+
+ erts_smp_runq_lock(c_rq);
+}
+
+static ERTS_INLINE void
+suspend_run_queue(ErtsRunQueue *rq)
+{
+ erts_smp_atomic32_read_bor_nob(&rq->scheduler->ssi->flags,
+ ERTS_SSI_FLG_SUSPENDED);
+ (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED);
+
+ wake_scheduler(rq, 0);
+}
+
+static void scheduler_ix_resume_wake(Uint ix);
+
+static ERTS_INLINE void
+resume_run_queue(ErtsRunQueue *rq)
+{
+ int pix;
+
+ erts_smp_runq_lock(rq);
+
+ (void) ERTS_RUNQ_FLGS_READ_BSET(rq,
+ (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_SUSPENDED),
+ (ERTS_RUNQ_FLG_OUT_OF_WORK
+ | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK));
+
+ rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
+ for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
+ rq->procs.prio_info[pix].max_len = 0;
+ rq->procs.prio_info[pix].reds = 0;
+ }
+ rq->ports.info.max_len = 0;
+ rq->ports.info.reds = 0;
+ rq->max_len = 0;
+
+ erts_smp_runq_unlock(rq);
+
+ scheduler_ix_resume_wake(rq->ix);
}
+typedef struct {
+ Process *first;
+ Process *last;
+} ErtsStuckBoundProcesses;
+
static void
-evacuate_run_queue(ErtsRunQueue *evac_rq, ErtsRunQueue *rq)
+schedule_bound_processes(ErtsRunQueue *rq,
+ ErtsStuckBoundProcesses *sbpp)
{
- Port *prt;
- int notify_to_rq = 0;
- int prio;
- int prt_locked = 0;
- int rq_locked = 0;
- int evac_rq_locked = 1;
- ErtsMigrateResult mres;
+ Process *proc, *next;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- erts_smp_runq_lock(evac_rq);
+ proc = sbpp->first;
+ while (proc) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
+ next = proc->next;
+ enqueue_process(rq, (int) (ERTS_PSFLG_PRIO_MASK & state), proc);
+ proc = next;
+ }
+}
- erts_smp_atomic32_read_bor_nob(&evac_rq->scheduler->ssi->flags,
- ERTS_SSI_FLG_SUSPENDED);
+static void
+evacuate_run_queue(ErtsRunQueue *rq,
+ ErtsStuckBoundProcesses *sbpp)
+{
+ int prio_q;
+ ErtsRunQueue *to_rq;
+ ErtsMigrationPaths *mps;
+ ErtsMigrationPath *mp;
- evac_rq->flags &= ~ERTS_RUNQ_FLGS_IMMIGRATE_QMASK;
- evac_rq->flags |= (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
- | ERTS_RUNQ_FLGS_EVACUATE_QMASK
- | ERTS_RUNQ_FLG_SUSPENDED);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- erts_smp_atomic32_read_bor_nob(&evac_rq->info_flags, ERTS_RUNQ_IFLG_SUSPENDED);
- /*
- * Need to set up evacuation paths first since we
- * may release the run queue lock on evac_rq
- * when evacuating.
- */
- evac_rq->misc.evac_runq = rq;
- evac_rq->ports.info.migrate.runq = rq;
- for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++)
- evac_rq->procs.prio_info[prio].migrate.runq = rq;
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
/* Evacuate scheduled misc ops */
- if (evac_rq->misc.start) {
- rq_locked = 1;
- erts_smp_xrunq_lock(evac_rq, rq);
- if (rq->misc.end)
- rq->misc.end->next = evac_rq->misc.start;
+ if (rq->misc.start) {
+ ErtsMiscOpList *start, *end;
+
+ to_rq = mp->misc_evac_runq;
+ if (!to_rq)
+ return;
+
+ start = rq->misc.start;
+ end = rq->misc.end;
+ rq->misc.start = NULL;
+ rq->misc.end = NULL;
+ erts_smp_runq_unlock(rq);
+
+ erts_smp_runq_lock(to_rq);
+ if (to_rq->misc.end)
+ to_rq->misc.end->next = start;
else
- rq->misc.start = evac_rq->misc.start;
- rq->misc.end = evac_rq->misc.end;
- evac_rq->misc.start = NULL;
- evac_rq->misc.end = NULL;
+ to_rq->misc.start = start;
+
+ to_rq->misc.end = end;
+ erts_smp_runq_unlock(to_rq);
+ smp_notify_inc_runq(to_rq);
+ erts_smp_runq_lock(to_rq);
}
- /* Evacuate scheduled ports */
- prt = evac_rq->ports.start;
- while (prt) {
- mres = erts_port_migrate(prt, &prt_locked,
- evac_rq, &evac_rq_locked,
- rq, &rq_locked);
- if (mres == ERTS_MIGRATE_SUCCESS)
- notify_to_rq = 1;
- if (prt_locked)
- erts_smp_port_unlock(prt);
- if (!evac_rq_locked) {
- evac_rq_locked = 1;
- erts_smp_runq_lock(evac_rq);
+ if (rq->ports.start) {
+ Port *prt;
+
+ to_rq = mp->prio[ERTS_PORT_PRIO_LEVEL].runq;
+ if (!to_rq)
+ return;
+
+ /* Evacuate scheduled ports */
+ prt = rq->ports.start;
+ while (prt) {
+ ErtsRunQueue *prt_rq;
+ prt = erts_dequeue_port(rq);
+ RUNQ_SET_RQ(&prt->run_queue, to_rq);
+ erts_smp_runq_unlock(rq);
+ /*
+ * The port might terminate while
+ * we have no lock on it...
+ */
+ prt_rq = erts_port_runq(prt);
+ if (prt_rq) {
+ if (prt_rq != to_rq)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s() internal error\n",
+ __FILE__, __LINE__, __func__);
+ erts_enqueue_port(to_rq, prt);
+ erts_smp_runq_unlock(to_rq);
+ }
+ erts_smp_runq_lock(rq);
+ prt = rq->ports.start;
}
- prt = evac_rq->ports.start;
+ smp_notify_inc_runq(to_rq);
}
/* Evacuate scheduled processes */
- for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++) {
+ for (prio_q = 0; prio_q < ERTS_NO_PROC_PRIO_QUEUES; prio_q++) {
+ erts_aint32_t state;
Process *proc;
+ int notify = 0;
+ to_rq = NULL;
- switch (prio) {
- case PRIORITY_MAX:
- case PRIORITY_HIGH:
- case PRIORITY_NORMAL:
- proc = evac_rq->procs.prio[prio].first;
- while (proc) {
- ErtsProcLocks proc_locks = 0;
-
- /* Bound processes are stuck... */
- while (proc->bound_runq) {
- proc = proc->next;
- if (!proc)
- goto end_of_proc;
- }
-
- mres = erts_proc_migrate(proc, &proc_locks,
- evac_rq, &evac_rq_locked,
- rq, &rq_locked);
- if (mres == ERTS_MIGRATE_SUCCESS)
- notify_to_rq = 1;
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
- if (!evac_rq_locked) {
- erts_smp_runq_lock(evac_rq);
- evac_rq_locked = 1;
- }
+ if (!mp->prio[prio_q].runq)
+ return;
+ if (prio_q == PRIORITY_NORMAL && !mp->prio[PRIORITY_LOW].runq)
+ return;
- proc = evac_rq->procs.prio[prio].first;
+ proc = dequeue_process(rq, prio_q, &state);
+ while (proc) {
+ if (ERTS_PSFLG_BOUND & state) {
+ /* Bound processes get stuck here... */
+ proc->next = NULL;
+ if (sbpp->last)
+ sbpp->last->next = proc;
+ else
+ sbpp->first = proc;
+ sbpp->last = proc;
}
+ else {
+ int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+ erts_smp_runq_unlock(rq);
- end_of_proc:
+ to_rq = mp->prio[prio].runq;
+ RUNQ_SET_RQ(&proc->run_queue, to_rq);
-#ifdef DEBUG
- for (proc = evac_rq->procs.prio[prio].first;
- proc;
- proc = proc->next) {
- ASSERT(proc->bound_runq);
+ erts_smp_runq_lock(to_rq);
+ enqueue_process(to_rq, prio, proc);
+ erts_smp_runq_unlock(to_rq);
+ notify = 1;
+
+ erts_smp_runq_lock(rq);
}
-#endif
- break;
- case PRIORITY_LOW:
- break;
- default:
- ASSERT(!"Invalid process priority");
- break;
+ proc = dequeue_process(rq, prio_q, &state);
}
+ if (notify)
+ smp_notify_inc_runq(to_rq);
}
- if (rq_locked)
- erts_smp_runq_unlock(rq);
-
- if (evac_rq_locked)
- erts_smp_runq_unlock(evac_rq);
-
- if (notify_to_rq)
- smp_notify_inc_runq(rq);
-
- wake_scheduler(evac_rq, 0);
+ if (ERTS_EMPTY_RUNQ(rq))
+ empty_runq(rq);
}
static int
-try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq)
+try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, Uint32 flags)
{
- Process *proc;
- int vrq_locked;
+ Uint32 procs_qmask = flags & ERTS_RUNQ_FLGS_PROCS_QMASK;
+ int max_prio_bit;
+ ErtsRunPrioQueue *rpq;
- if (*rq_lockedp)
- erts_smp_xrunq_lock(rq, vrq);
- else
- erts_smp_runq_lock(vrq);
- vrq_locked = 1;
+ if (*rq_lockedp) {
+ erts_smp_runq_unlock(rq);
+ *rq_lockedp = 0;
+ }
+
+ ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq));
- ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked);
+ erts_smp_runq_lock(vrq);
if (rq->halt_in_progress)
- goto try_steal_port;
+ goto no_procs;
/*
* Check for a runnable process to steal...
*/
- switch (vrq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) {
- case MAX_BIT:
- case MAX_BIT|HIGH_BIT:
- case MAX_BIT|NORMAL_BIT:
- case MAX_BIT|LOW_BIT:
- case MAX_BIT|HIGH_BIT|NORMAL_BIT:
- case MAX_BIT|HIGH_BIT|LOW_BIT:
- case MAX_BIT|NORMAL_BIT|LOW_BIT:
- case MAX_BIT|HIGH_BIT|NORMAL_BIT|LOW_BIT:
- for (proc = vrq->procs.prio[PRIORITY_MAX].last;
- proc;
- proc = proc->prev) {
- if (!proc->bound_runq)
- break;
- }
- if (proc)
+ while (procs_qmask) {
+ Process *prev_proc;
+ Process *proc;
+
+ max_prio_bit = procs_qmask & -procs_qmask;
+ switch (max_prio_bit) {
+ case MAX_BIT:
+ rpq = &vrq->procs.prio[PRIORITY_MAX];
break;
- case HIGH_BIT:
- case HIGH_BIT|NORMAL_BIT:
- case HIGH_BIT|LOW_BIT:
- case HIGH_BIT|NORMAL_BIT|LOW_BIT:
- for (proc = vrq->procs.prio[PRIORITY_HIGH].last;
- proc;
- proc = proc->prev) {
- if (!proc->bound_runq)
- break;
- }
- if (proc)
+ case HIGH_BIT:
+ rpq = &vrq->procs.prio[PRIORITY_HIGH];
break;
- case NORMAL_BIT:
- case LOW_BIT:
- case NORMAL_BIT|LOW_BIT:
- for (proc = vrq->procs.prio[PRIORITY_NORMAL].last;
- proc;
- proc = proc->prev) {
- if (!proc->bound_runq)
- break;
- }
- if (proc)
+ case NORMAL_BIT:
+ case LOW_BIT:
+ rpq = &vrq->procs.prio[PRIORITY_NORMAL];
break;
- case 0:
- proc = NULL;
- break;
- default:
- ASSERT(!"Invalid queue mask");
- proc = NULL;
- break;
- }
+ case 0:
+ goto no_procs;
+ default:
+ ASSERT(!"Invalid queue mask");
+ goto no_procs;
+ }
- if (proc) {
- ErtsProcLocks proc_locks = 0;
- int res;
- ErtsMigrateResult mres;
- mres = erts_proc_migrate(proc, &proc_locks,
- vrq, &vrq_locked,
- rq, rq_lockedp);
- if (proc_locks)
- erts_smp_proc_unlock(proc, proc_locks);
- res = !0;
- switch (mres) {
- case ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED:
- res = 0;
- case ERTS_MIGRATE_SUCCESS:
- if (vrq_locked)
+ prev_proc = NULL;
+ proc = rpq->first;
+
+ while (proc) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
+ if (!(ERTS_PSFLG_BOUND & state)) {
+ /* Steal process */
+ int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+ ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio];
+ unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc);
erts_smp_runq_unlock(vrq);
- return res;
- default: /* Other failures */
- break;
- }
- }
+ RUNQ_SET_RQ(&proc->run_queue, rq);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked);
+ erts_smp_runq_lock(rq);
+ *rq_lockedp = 1;
+ enqueue_process(rq, prio, proc);
+ return !0;
+ }
+ prev_proc = proc;
+ proc = proc->next;
+ }
- if (!vrq_locked) {
- if (*rq_lockedp)
- erts_smp_xrunq_lock(rq, vrq);
- else
- erts_smp_runq_lock(vrq);
- vrq_locked = 1;
+ procs_qmask &= ~max_prio_bit;
}
- try_steal_port:
+no_procs:
- ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, *rq_lockedp);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(vrq, vrq_locked);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(vrq));
/*
* Check for a runnable port to steal...
*/
- if (vrq->ports.info.len) {
- Port *prt = vrq->ports.end;
- int prt_locked = 0;
- int res;
- ErtsMigrateResult mres;
-
- mres = erts_port_migrate(prt, &prt_locked,
- vrq, &vrq_locked,
- rq, rq_lockedp);
- if (prt_locked)
- erts_smp_port_unlock(prt);
- res = !0;
- switch (mres) {
- case ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED:
- res = 0;
- case ERTS_MIGRATE_SUCCESS:
- if (vrq_locked)
- erts_smp_runq_unlock(vrq);
- return res;
- default: /* Other failures */
- break;
+ if (vrq->ports.start) {
+ ErtsRunQueue *prt_rq;
+ Port *prt = erts_dequeue_port(vrq);
+ RUNQ_SET_RQ(&prt->run_queue, rq);
+ erts_smp_runq_unlock(vrq);
+
+ /*
+ * The port might terminate while
+ * we have no lock on it...
+ */
+
+ prt_rq = erts_port_runq(prt);
+ if (!prt_rq)
+ return 0;
+ else {
+ if (prt_rq != rq)
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:%s() internal error\n",
+ __FILE__, __LINE__, __func__);
+ *rq_lockedp = 1;
+ erts_enqueue_port(rq, prt);
+ return !0;
}
}
- if (vrq_locked)
- erts_smp_runq_unlock(vrq);
+ erts_smp_runq_unlock(vrq);
return 0;
}
@@ -2981,9 +3219,10 @@ static ERTS_INLINE int
check_possible_steal_victim(ErtsRunQueue *rq, int *rq_lockedp, int vix)
{
ErtsRunQueue *vrq = ERTS_RUNQ_IX(vix);
- erts_aint32_t iflgs = erts_smp_atomic32_read_nob(&vrq->info_flags);
- if (iflgs & ERTS_RUNQ_IFLG_NONEMPTY)
- return try_steal_task_from_victim(rq, rq_lockedp, vrq);
+ Uint32 flags = ERTS_RUNQ_FLGS_GET(vrq);
+ if ((flags & (ERTS_RUNQ_FLG_NONEMPTY
+ | ERTS_RUNQ_FLG_PROTECTED)) == ERTS_RUNQ_FLG_NONEMPTY)
+ return try_steal_task_from_victim(rq, rq_lockedp, vrq, flags);
else
return 0;
}
@@ -2993,15 +3232,12 @@ static int
try_steal_task(ErtsRunQueue *rq)
{
int res, rq_locked, vix, active_rqs, blnc_rqs;
+ Uint32 flags;
- /*
- * We are not allowed to steal jobs to this run queue
- * if it is suspended. Note that it might get suspended
- * at any time when we don't have the lock on the run
- * queue.
- */
- if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- return 0;
+ /* Protect jobs we steal from getting stolen from us... */
+ flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_PROTECTED);
+ if (flags & ERTS_RUNQ_FLG_SUSPENDED)
+ return 0; /* go suspend instead... */
res = 0;
rq_locked = 1;
@@ -3120,12 +3356,123 @@ do { \
ASSERT(sum__ == (RQ)->full_reds_history_sum); \
} while (0);
+#define ERTS_PRE_ALLOCED_MPATHS 8
+
+erts_atomic_t erts_migration_paths;
+
+static struct {
+ size_t size;
+ ErtsMigrationPaths *freelist;
+ struct {
+ ErtsMigrationPaths *first;
+ ErtsMigrationPaths *last;
+ } retired;
+} mpaths;
+
+static void
+init_migration_paths(void)
+{
+ int qix, i;
+ char *p;
+ ErtsMigrationPaths *mps;
+
+ mpaths.size = sizeof(ErtsMigrationPaths);
+ mpaths.size += sizeof(ErtsMigrationPath)*(erts_no_schedulers-1);
+ mpaths.size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(mpaths.size);
+
+ p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_LL_MPATHS,
+ (mpaths.size
+ * ERTS_PRE_ALLOCED_MPATHS));
+ mpaths.freelist = NULL;
+ for (i = 0; i < ERTS_PRE_ALLOCED_MPATHS-1; i++) {
+ mps = (ErtsMigrationPaths *) p;
+ mps->next = mpaths.freelist;
+ mpaths.freelist = mps;
+ p += mpaths.size;
+ }
+
+ mps = (ErtsMigrationPaths *) p;
+ mps->block = NULL;
+ for (qix = 0; qix < erts_no_run_queues; qix++) {
+ int pix;
+ mps->mpath[qix].flags = 0;
+ mps->mpath[qix].misc_evac_runq = NULL;
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ mps->mpath[qix].prio[pix].limit.this = -1;
+ mps->mpath[qix].prio[pix].limit.other = -1;
+ mps->mpath[qix].prio[pix].runq = NULL;
+ mps->mpath[qix].prio[pix].flags = 0;
+ }
+ }
+ erts_atomic_init_wb(&erts_migration_paths, (erts_aint_t) mps);
+}
+
+static ERTS_INLINE ErtsMigrationPaths *
+alloc_mpaths(void)
+{
+ void *block;
+ ErtsMigrationPaths *res;
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx));
+
+ res = mpaths.freelist;
+ if (res) {
+ mpaths.freelist = res->next;
+ res->block = NULL;
+ return res;
+ }
+ res = erts_alloc(ERTS_ALC_T_SL_MPATHS,
+ mpaths.size+ERTS_CACHE_LINE_SIZE);
+ block = (void *) res;
+ if (((UWord) res) & ERTS_CACHE_LINE_MASK)
+ res = (ErtsMigrationPaths *) ((((UWord) res) & ~ERTS_CACHE_LINE_MASK)
+ + ERTS_CACHE_LINE_SIZE);
+ res->block = block;
+ return res;
+}
+
+static ERTS_INLINE void
+retire_mpaths(ErtsMigrationPaths *mps)
+{
+ ErtsThrPrgrVal current;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx));
+
+ current = erts_thr_progress_current();
+
+ while (mpaths.retired.first) {
+ ErtsMigrationPaths *tmp = mpaths.retired.first;
+ if (!erts_thr_progress_has_reached_this(current, tmp->thr_prgr))
+ break;
+ mpaths.retired.first = tmp->next;
+ if (tmp->block) {
+ erts_free(ERTS_ALC_T_SL_MPATHS, tmp->block);
+ }
+ else {
+ tmp->next = mpaths.freelist;
+ mpaths.freelist = tmp;
+ }
+ }
+
+ if (!mpaths.retired.first)
+ mpaths.retired.last = NULL;
+
+ mps->thr_prgr = erts_thr_progress_later(NULL);
+ mps->next = NULL;
+
+ if (mpaths.retired.last)
+ mpaths.retired.last->next = mps;
+ else
+ mpaths.retired.first = mps;
+ mpaths.retired.last = mps;
+}
+
static void
check_balance(ErtsRunQueue *c_rq)
{
#if ERTS_MAX_PROCESSES >= (1 << 27)
# error check_balance() assumes ERTS_MAX_PROCESS < (1 << 27)
#endif
+ ErtsMigrationPaths *new_mpaths, *old_mpaths;
ErtsRunQueueBalance avg = {0};
Sint64 scheds_reds, full_scheds_reds;
int forced, active, current_active, oowc, half_full_scheds, full_scheds,
@@ -3151,9 +3498,9 @@ check_balance(ErtsRunQueue *c_rq)
ERTS_FOREACH_RUNQ(rq,
{
if (rq->waiting)
- rq->flags |= ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK;
+ (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
else
- rq->flags &= ~ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK;
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK);
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
});
@@ -3195,7 +3542,7 @@ check_balance(ErtsRunQueue *c_rq)
ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
erts_smp_runq_lock(rq);
- run_queue_info[qix].flags = rq->flags;
+ run_queue_info[qix].flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
run_queue_info[qix].prio[pix].max_len
= rq->procs.prio_info[pix].max_len;
@@ -3529,47 +3876,27 @@ erts_fprintf(stderr, "--------------------------------\n");
set_no_active_runqs(active);
balance_info.halftime = 1;
- erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+ new_mpaths = alloc_mpaths();
+
+ /* Write migration paths */
- /* Write migration paths and reset balance statistics in all queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
int mqix;
- Uint32 flags;
- ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
- ErtsRunQueueInfo *rqi;
- flags = run_queue_info[qix].flags;
- erts_smp_runq_lock(rq);
- flags |= (rq->flags & ~ERTS_RUNQ_FLGS_MIGRATION_INFO);
- ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK));
- if (rq->waiting)
- flags |= ERTS_RUNQ_FLG_OUT_OF_WORK;
+ Uint32 flags = run_queue_info[qix].flags;
+ ErtsMigrationPath *mp = &new_mpaths->mpath[qix];
- rq->full_reds_history_sum
- = run_queue_info[qix].full_reds_history_sum;
- rq->full_reds_history[freds_hist_ix]
- = run_queue_info[qix].full_reds_history_change;
+ mp->flags = flags;
+ mp->misc_evac_runq = NULL;
- ERTS_DBG_CHK_FULL_REDS_HISTORY(rq);
-
- rq->out_of_work_count = 0;
- rq->flags = flags;
- rq->max_len = rq->len;
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
- rqi = (pix == ERTS_PORT_PRIO_LEVEL
- ? &rq->ports.info
- : &rq->procs.prio_info[pix]);
- rqi->max_len = rqi->len;
- rqi->reds = 0;
if (!(ERTS_CHK_RUNQ_FLG_EMIGRATE(flags, pix)
| ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix))) {
ASSERT(run_queue_info[qix].prio[pix].immigrate_from < 0);
ASSERT(run_queue_info[qix].prio[pix].emigrate_to < 0);
-#ifdef DEBUG
- rqi->migrate.limit.this = -1;
- rqi->migrate.limit.other = -1;
- ERTS_DBG_SET_INVALID_RUNQP(rqi->migrate.runq, 0x2);
-#endif
-
+ mp->prio[pix].limit.this = -1;
+ mp->prio[pix].limit.other = -1;
+ mp->prio[pix].runq = NULL;
+ mp->prio[pix].flags = 0;
}
else if (ERTS_CHK_RUNQ_FLG_EMIGRATE(flags, pix)) {
ASSERT(!ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix));
@@ -3577,11 +3904,12 @@ erts_fprintf(stderr, "--------------------------------\n");
ASSERT(run_queue_info[qix].prio[pix].emigrate_to >= 0);
mqix = run_queue_info[qix].prio[pix].emigrate_to;
- rqi->migrate.limit.this
+ mp->prio[pix].limit.this
= run_queue_info[qix].prio[pix].migration_limit;
- rqi->migrate.limit.other
+ mp->prio[pix].limit.other
= run_queue_info[mqix].prio[pix].migration_limit;
- rqi->migrate.runq = ERTS_RUNQ_IX(mqix);
+ mp->prio[pix].runq = ERTS_RUNQ_IX(mqix);
+ mp->prio[pix].flags = run_queue_info[mqix].flags;
}
else {
ASSERT(ERTS_CHK_RUNQ_FLG_IMMIGRATE(flags, pix));
@@ -3589,24 +3917,142 @@ erts_fprintf(stderr, "--------------------------------\n");
ASSERT(run_queue_info[qix].prio[pix].immigrate_from >= 0);
mqix = run_queue_info[qix].prio[pix].immigrate_from;
- rqi->migrate.limit.this
+ mp->prio[pix].limit.this
= run_queue_info[qix].prio[pix].migration_limit;
- rqi->migrate.limit.other
+ mp->prio[pix].limit.other
= run_queue_info[mqix].prio[pix].migration_limit;
- rqi->migrate.runq = ERTS_RUNQ_IX(mqix);
+ mp->prio[pix].runq = ERTS_RUNQ_IX(mqix);
+ mp->prio[pix].flags = run_queue_info[mqix].flags;
}
}
+ }
+
+ old_mpaths = erts_get_migration_paths_managed();
+
+ /* Keep offline run-queues as is */
+ for (qix = blnc_no_rqs; qix < erts_no_schedulers; qix++) {
+ ErtsMigrationPath *nmp = &new_mpaths->mpath[qix];
+ ErtsMigrationPath *omp = &old_mpaths->mpath[qix];
+
+ nmp->flags = omp->flags;
+ nmp->misc_evac_runq = omp->misc_evac_runq;
+
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ nmp->prio[pix].limit.this = omp->prio[pix].limit.this;
+ nmp->prio[pix].limit.other = omp->prio[pix].limit.other;
+ nmp->prio[pix].runq = omp->prio[pix].runq;
+ nmp->prio[pix].flags = omp->prio[pix].flags;
+ }
+ }
+
+
+ /* Publish new migration paths... */
+ erts_atomic_set_wb(&erts_migration_paths, (erts_aint_t) new_mpaths);
+
+ /* Reset balance statistics in all online queues */
+ for (qix = 0; qix < blnc_no_rqs; qix++) {
+ Uint32 flags = run_queue_info[qix].flags;
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(qix);
+
+ erts_smp_runq_lock(rq);
+ ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK));
+ if (rq->waiting)
+ flags |= ERTS_RUNQ_FLG_OUT_OF_WORK;
+
+ rq->full_reds_history_sum
+ = run_queue_info[qix].full_reds_history_sum;
+ rq->full_reds_history[freds_hist_ix]
+ = run_queue_info[qix].full_reds_history_change;
+
+ ERTS_DBG_CHK_FULL_REDS_HISTORY(rq);
+
+ rq->out_of_work_count = 0;
+ (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
+
+ rq->max_len = rq->len;
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ ErtsRunQueueInfo *rqi;
+ rqi = (pix == ERTS_PORT_PRIO_LEVEL
+ ? &rq->ports.info
+ : &rq->procs.prio_info[pix]);
+ erts_smp_reset_max_len(rq, rqi);
+ rqi->reds = 0;
+ }
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
erts_smp_runq_unlock(rq);
}
+ erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0);
+
balance_info.n++;
+ retire_mpaths(old_mpaths);
erts_smp_mtx_unlock(&balance_info.update_mtx);
erts_smp_runq_lock(c_rq);
}
+static void
+change_no_used_runqs(int used)
+{
+ ErtsMigrationPaths *new_mpaths, *old_mpaths;
+ int active, qix;
+ erts_smp_mtx_lock(&balance_info.update_mtx);
+ get_no_runqs(&active, NULL);
+ set_no_used_runqs(used);
+
+ old_mpaths = erts_get_migration_paths_managed();
+ new_mpaths = alloc_mpaths();
+
+ /* Write migration paths... */
+
+ for (qix = 0; qix < used; qix++) {
+ int pix;
+ ErtsMigrationPath *omp = &old_mpaths->mpath[qix];
+ ErtsMigrationPath *nmp = &new_mpaths->mpath[qix];
+
+ nmp->flags = omp->flags & ~ERTS_RUNQ_FLGS_MIGRATION_QMASKS;
+ nmp->misc_evac_runq = NULL;
+
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ nmp->prio[pix].limit.this = -1;
+ nmp->prio[pix].limit.other = -1;
+ nmp->prio[pix].runq = NULL;
+ nmp->prio[pix].flags = 0;
+ }
+ }
+ for (qix = used; qix < erts_no_run_queues; qix++) {
+ int pix;
+ ErtsRunQueue *to_rq = ERTS_RUNQ_IX(qix % used);
+ ErtsMigrationPath *nmp = &new_mpaths->mpath[qix];
+
+ nmp->flags = (ERTS_RUNQ_FLGS_EMIGRATE_QMASK
+ | ERTS_RUNQ_FLGS_EVACUATE_QMASK);
+ nmp->misc_evac_runq = to_rq;
+ for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
+ nmp->prio[pix].limit.this = -1;
+ nmp->prio[pix].limit.other = -1;
+ nmp->prio[pix].runq = to_rq;
+ nmp->prio[pix].flags = 0;
+ }
+ }
+
+ /* ... and publish them. */
+ erts_atomic_set_wb(&erts_migration_paths, (erts_aint_t) new_mpaths);
+
+ retire_mpaths(old_mpaths);
+
+ /* Make sure that we balance soon... */
+ balance_info.forced_check_balance = 1;
+
+ erts_smp_mtx_unlock(&balance_info.update_mtx);
+
+ erts_smp_runq_lock(ERTS_RUNQ_IX(0));
+ ERTS_RUNQ_IX(0)->check_balance_reds = 0;
+ erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
+}
+
+
#endif /* #ifdef ERTS_SMP */
Uint
@@ -3674,11 +4120,11 @@ static struct {
int limit;
int dec_shift;
int dec_mask;
- void (*check)(ErtsRunQueue *rq);
+ void (*check)(ErtsRunQueue *rq, Uint32 flags);
} wakeup_other;
static void
-wakeup_other_check(ErtsRunQueue *rq)
+wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
{
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
@@ -3696,6 +4142,8 @@ wakeup_other_check(ErtsRunQueue *rq)
if (rq->wakeup_other > wakeup_other.limit) {
int empty_rqs =
erts_smp_atomic32_read_acqb(&no_empty_run_queues);
+ if (flags & ERTS_RUNQ_FLG_PROTECTED)
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
if (empty_rqs != 0)
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
@@ -3740,18 +4188,21 @@ wakeup_other_set_limit(void)
}
static void
-wakeup_other_check_legacy(ErtsRunQueue *rq)
+wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags)
{
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
- if (rq->len < 2) {
+ erts_aint32_t len = rq->len;
+ if (len < 2) {
rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds;
if (rq->wakeup_other < 0)
rq->wakeup_other = 0;
}
else if (rq->wakeup_other < wakeup_other.limit)
- rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY;
+ rq->wakeup_other += len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY;
else {
+ if (flags & ERTS_RUNQ_FLG_PROTECTED)
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) {
wake_scheduler_on_empty_runq(rq);
rq->wakeup_other = 0;
@@ -3908,6 +4359,8 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->dd.completed_callback = NULL;
awdp->dd.completed_arg = NULL;
+ awdp->later_op.first = NULL;
+ awdp->later_op.last = NULL;
#endif
#ifdef ERTS_USE_ASYNC_READY_Q
#ifdef ERTS_SMP
@@ -3917,6 +4370,7 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
awdp->async_ready.queue = NULL;
#endif
#ifdef ERTS_SMP
+ awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY;
if (!dawwp) {
awdp->delayed_wakeup.job = NULL;
awdp->delayed_wakeup.sched2jix = NULL;
@@ -3971,7 +4425,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
rq->ix = ix;
- erts_smp_atomic32_init_nob(&rq->info_flags, ERTS_RUNQ_IFLG_NONEMPTY);
/* make sure that the "extra" id correponds to the schedulers
* id if the esdp->no <-> ix+1 mapping change.
@@ -3982,7 +4435,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
rq->waiting = 0;
rq->woken = 0;
- rq->flags = 0;
+ ERTS_RUNQ_FLGS_INIT(rq, ERTS_RUNQ_FLG_NONEMPTY);
rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
rq->full_reds_history_sum = 0;
for (rix = 0; rix < ERTS_FULL_REDS_HISTORY_SIZE; rix++) {
@@ -3996,19 +4449,14 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
rq->wakeup_other_reds = 0;
rq->halt_in_progress = 0;
- rq->procs.len = 0;
rq->procs.pending_exiters = NULL;
rq->procs.context_switches = 0;
rq->procs.reductions = 0;
for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
- rq->procs.prio_info[pix].len = 0;
+ erts_smp_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0);
rq->procs.prio_info[pix].max_len = 0;
rq->procs.prio_info[pix].reds = 0;
- rq->procs.prio_info[pix].migrate.limit.this = 0;
- rq->procs.prio_info[pix].migrate.limit.other = 0;
- ERTS_DBG_SET_INVALID_RUNQP(rq->procs.prio_info[pix].migrate.runq,
- 0x0);
if (pix < ERTS_NO_PROC_PRIO_LEVELS - 1) {
rq->procs.prio[pix].first = NULL;
rq->procs.prio[pix].last = NULL;
@@ -4017,14 +4465,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
rq->misc.start = NULL;
rq->misc.end = NULL;
- rq->misc.evac_runq = NULL;
- rq->ports.info.len = 0;
+ erts_smp_atomic32_init_nob(&rq->ports.info.len, 0);
rq->ports.info.max_len = 0;
rq->ports.info.reds = 0;
- rq->ports.info.migrate.limit.this = 0;
- rq->ports.info.migrate.limit.other = 0;
- rq->ports.info.migrate.runq = NULL;
rq->ports.start = NULL;
rq->ports.end = NULL;
}
@@ -4121,6 +4565,9 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
#ifdef ERTS_SMP
daww_ptr += daww_sz;
#endif
+
+ esdp->reductions = 0;
+
init_sched_wall_time(&esdp->sched_wall_time);
}
@@ -4159,10 +4606,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
balance_info.prev_rise.reds = 0;
balance_info.n = 0;
+ init_migration_paths();
+
if (no_schedulers_online < no_schedulers) {
+ change_no_used_runqs(no_schedulers_online);
for (ix = no_schedulers_online; ix < erts_no_run_queues; ix++)
- evacuate_run_queue(ERTS_RUNQ_IX(ix),
- ERTS_RUNQ_IX(ix % no_schedulers_online));
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
}
schdlr_sspnd.wait_curr_online = no_schedulers_online;
@@ -4204,6 +4653,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
erts_smp_atomic32_init_relb(&erts_halt_progress, -1);
erts_halt_code = 0;
+
+#if !defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+ erts_lc_set_thread_name("scheduler 1");
+#endif
+
}
ErtsRunQueue *
@@ -4225,91 +4679,197 @@ erts_get_scheduler_data(void)
#endif
-static int remove_proc_from_runq(ErtsRunQueue *rq, Process *p, int to_inactive);
+/*
+ * scheduler_out_process() return with c_rq locked.
+ */
+static ERTS_INLINE int
+schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p)
+{
+ erts_aint32_t a, e, n;
+ int res = 0;
+
+ a = state;
+
+ while (1) {
+ n = e = a;
+
+ ASSERT(a & ERTS_PSFLG_RUNNING);
+ ASSERT(!(a & ERTS_PSFLG_IN_RUNQ));
+
+ n &= ~ERTS_PSFLG_RUNNING;
+ if ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE)
+ n |= ERTS_PSFLG_IN_RUNQ;
+ a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e)
+ break;
+ }
+
+ if (!(n & ERTS_PSFLG_IN_RUNQ)) {
+ if (erts_system_profile_flags.runnable_procs)
+ profile_runnable_proc(p, am_inactive);
+ }
+ else {
+ int prio = (int) (ERTS_PSFLG_PRIO_MASK & n);
+ ErtsRunQueue *runq = erts_get_runq_proc(p);
+
+ ASSERT(!(n & ERTS_PSFLG_SUSPENDED));
+
+#ifdef ERTS_SMP
+ if (!(ERTS_PSFLG_BOUND & n)) {
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio);
+ if (new_runq) {
+ RUNQ_SET_RQ(&p->run_queue, new_runq);
+ runq = new_runq;
+ }
+ }
+#endif
+ ASSERT(runq);
+ res = 1;
+
+ erts_smp_runq_lock(runq);
+
+ /* Enqueue the process */
+ enqueue_process(runq, prio, p);
+
+ if (runq == c_rq)
+ return res;
+ erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(runq);
+ }
+ erts_smp_runq_lock(c_rq);
+ return res;
+}
static ERTS_INLINE void
-suspend_process(ErtsRunQueue *rq, Process *p)
+add2runq(Process *p, erts_aint32_t state)
{
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- p->rcount++; /* count number of suspend */
-#ifdef ERTS_SMP
- ASSERT(!(p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)
- || p == erts_get_current_process());
- ASSERT(p->status != P_RUNNING
- || p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING);
- if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ)
- goto runable;
-#endif
- switch(p->status) {
- case P_SUSPENDED:
- break;
- case P_RUNABLE:
+ int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+ ErtsRunQueue *runq = erts_get_runq_proc(p);
+
#ifdef ERTS_SMP
- runable:
- if (!ERTS_PROC_PENDING_EXIT(p))
+ if (!(ERTS_PSFLG_BOUND & state)) {
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio);
+ if (new_runq) {
+ RUNQ_SET_RQ(&p->run_queue, new_runq);
+ runq = new_runq;
+ }
+ }
#endif
- remove_proc_from_runq(rq, p, 1);
- /* else:
- * leave process in schedq so it will discover the pending exit
- */
- p->rstatus = P_RUNABLE; /* wakeup as runnable */
- break;
- case P_RUNNING:
- p->rstatus = P_RUNABLE; /* wakeup as runnable */
- break;
- case P_WAITING:
- p->rstatus = P_WAITING; /* wakeup as waiting */
- break;
- case P_EXITING:
- return; /* ignore this */
- case P_GARBING:
- case P_FREE:
- erl_exit(1, "bad state in suspend_process()\n");
+ ASSERT(runq);
+
+ erts_smp_runq_lock(runq);
+
+ /* Enqueue the process */
+ enqueue_process(runq, prio, p);
+
+ erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(runq);
+
+}
+
+static ERTS_INLINE void
+schedule_process(Process *p, erts_aint32_t state, int active_enq)
+{
+ erts_aint32_t a = state, n;
+
+ while (1) {
+ erts_aint32_t e;
+ n = e = a;
+
+ if (a & ERTS_PSFLG_FREE)
+ return; /* We don't want to schedule free processes... */
+ n |= ERTS_PSFLG_ACTIVE;
+ if (!(a & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_RUNNING)))
+ n |= ERTS_PSFLG_IN_RUNQ;
+ a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e)
+ break;
+ if (!active_enq && (a & ERTS_PSFLG_ACTIVE))
+ return; /* Someone else activated process ... */
}
- if ((erts_system_profile_flags.runnable_procs) && (p->rcount == 1) && (p->status != P_WAITING)) {
- profile_runnable_proc(p, am_inactive);
+ if (erts_system_profile_flags.runnable_procs
+ && !(a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) {
+ profile_runnable_proc(p, am_active);
}
- p->status = P_SUSPENDED;
-
+ if ((n & ERTS_PSFLG_IN_RUNQ) && !(a & ERTS_PSFLG_IN_RUNQ))
+ add2runq(p, n);
}
-static ERTS_INLINE void
-resume_process(Process *p)
+void
+erts_schedule_process(Process *p, erts_aint32_t state)
{
- Uint32 *statusp;
+ schedule_process(p, state, 0);
+}
+
+static ERTS_INLINE int
+suspend_process(Process *c_p, Process *p)
+{
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ int suspended = 0;
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- switch (p->status) {
- case P_SUSPENDED:
- statusp = &p->status;
- break;
- case P_GARBING:
- if (p->gcstatus == P_SUSPENDED) {
- statusp = &p->gcstatus;
- break;
+
+ if ((state & ERTS_PSFLG_SUSPENDED))
+ suspended = -1;
+ else {
+ if (c_p == p) {
+ state = erts_smp_atomic32_read_bor_relb(&p->state,
+ ERTS_PSFLG_SUSPENDED);
+ state |= ERTS_PSFLG_SUSPENDED;
+ ASSERT(state & ERTS_PSFLG_RUNNING);
+ suspended = 1;
+ }
+ else {
+ while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) {
+ erts_aint32_t e, n;
+ n = e = state;
+ n |= ERTS_PSFLG_SUSPENDED;
+ state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e);
+ if (state == e) {
+ state = n;
+ suspended = 1;
+ break;
+ }
+ }
}
- /* Fall through */
- default:
- return;
}
+ if (state & ERTS_PSFLG_SUSPENDED) {
+
+ ASSERT(!(ERTS_PSFLG_RUNNING & state)
+ || p == erts_get_current_process());
+
+ if (erts_system_profile_flags.runnable_procs
+ && (p->rcount == 0)
+ && (state & ERTS_PSFLG_ACTIVE)) {
+ profile_runnable_proc(p, am_inactive);
+ }
+
+ p->rcount++; /* count number of suspend */
+ }
+ return suspended;
+}
+
+static ERTS_INLINE void
+resume_process(Process *p)
+{
+ erts_aint32_t state;
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
ASSERT(p->rcount > 0);
if (--p->rcount > 0) /* multiple suspend */
return;
- switch(p->rstatus) {
- case P_RUNABLE:
- erts_add_to_runq(p);
- break;
- case P_WAITING:
- *statusp = P_WAITING;
- break;
- default:
- erl_exit(1, "bad state in resume_process()\n");
+
+ state = erts_smp_atomic32_read_band_mb(&p->state, ~ERTS_PSFLG_SUSPENDED);
+ state &= ~ERTS_PSFLG_SUSPENDED;
+ if ((state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_IN_RUNQ
+ | ERTS_PSFLG_RUNNING)) == ERTS_PSFLG_ACTIVE) {
+ schedule_process(p, state, 1);
}
- p->rstatus = P_FREE;
}
int
@@ -4433,6 +4993,7 @@ suspend_scheduler(ErtsSchedulerData *esdp)
int wake = 0;
erts_aint32_t aux_work;
int thr_prgr_active = 1;
+ ErtsStuckBoundProcesses sbp = {NULL, NULL};
/*
* Schedulers may be suspended in two different ways:
@@ -4447,6 +5008,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ASSERT(no != 1);
+ evacuate_run_queue(esdp->run_queue, &sbp);
+
erts_smp_runq_unlock(esdp->run_queue);
erts_sched_check_cpu_bind_prep_suspend(esdp);
@@ -4510,19 +5073,28 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
while (1) {
+ erts_aint32_t qmask;
erts_aint32_t flgs;
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
+ if (aux_work|qmask) {
if (!thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
- aux_work = handle_aux_work(&esdp->aux_work_data,
- aux_work,
- 1);
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
+ if (qmask) {
+ erts_smp_runq_lock(esdp->run_queue);
+ evacuate_run_queue(esdp->run_queue, &sbp);
+ erts_smp_runq_unlock(esdp->run_queue);
+ }
}
if (!aux_work) {
@@ -4592,56 +5164,11 @@ suspend_scheduler(ErtsSchedulerData *esdp)
erts_smp_runq_lock(esdp->run_queue);
non_empty_runq(esdp->run_queue);
+ schedule_bound_processes(esdp->run_queue, &sbp);
+
erts_sched_check_cpu_bind_post_suspend(esdp);
}
-#define ERTS_RUNQ_RESET_SUSPEND_INFO(RQ, DBG_ID) \
-do { \
- int pix__; \
- (RQ)->misc.evac_runq = NULL; \
- (RQ)->ports.info.migrate.runq = NULL; \
- (RQ)->flags &= ~(ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
- | ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
- | ERTS_RUNQ_FLGS_EVACUATE_QMASK \
- | ERTS_RUNQ_FLG_SUSPENDED); \
- (RQ)->flags |= (ERTS_RUNQ_FLG_OUT_OF_WORK \
- | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK); \
- (RQ)->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; \
- erts_smp_atomic32_read_band_nob(&(RQ)->info_flags, ~ERTS_RUNQ_IFLG_SUSPENDED);\
- for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) { \
- (RQ)->procs.prio_info[pix__].max_len = 0; \
- (RQ)->procs.prio_info[pix__].reds = 0; \
- ERTS_DBG_SET_INVALID_RUNQP((RQ)->procs.prio_info[pix__].migrate.runq,\
- (DBG_ID)); \
- } \
- (RQ)->ports.info.max_len = 0; \
- (RQ)->ports.info.reds = 0; \
-} while (0)
-
-#define ERTS_RUNQ_RESET_MIGRATION_PATHS__(RQ) \
-do { \
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \
- (RQ)->misc.evac_runq = NULL; \
- (RQ)->ports.info.migrate.runq = NULL; \
- (RQ)->flags &= ~(ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
- | ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
- | ERTS_RUNQ_FLGS_EVACUATE_QMASK); \
-} while (0)
-
-#ifdef DEBUG
-#define ERTS_RUNQ_RESET_MIGRATION_PATHS(RQ, DBG_ID) \
-do { \
- int pix__; \
- ERTS_RUNQ_RESET_MIGRATION_PATHS__((RQ)); \
- for (pix__ = 0; pix__ < ERTS_NO_PROC_PRIO_LEVELS; pix__++) \
- ERTS_DBG_SET_INVALID_RUNQP((RQ)->procs.prio_info[pix__].migrate.runq,\
- (DBG_ID)); \
-} while (0)
-#else
-#define ERTS_RUNQ_RESET_MIGRATION_PATHS(RQ, DBG_ID) \
- ERTS_RUNQ_RESET_MIGRATION_PATHS__((RQ))
-#endif
-
ErtsSchedSuspendResult
erts_schedulers_state(Uint *total,
Uint *online,
@@ -4711,27 +5238,13 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_smp_mtx_lock(&balance_info.update_mtx);
- for (ix = online; ix < no; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x5);
- erts_smp_runq_unlock(rq);
- scheduler_ix_resume_wake(ix);
- }
- /*
- * Spread evacuation paths among all online
- * run queues.
- */
- for (ix = no; ix < erts_no_run_queues; ix++) {
- ErtsRunQueue *from_rq = ERTS_RUNQ_IX(ix);
- ErtsRunQueue *to_rq = ERTS_RUNQ_IX(ix % no);
- evacuate_run_queue(from_rq, to_rq);
- }
- set_no_used_runqs(no);
- erts_smp_mtx_unlock(&balance_info.update_mtx);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ change_no_used_runqs(no);
+
+ for (ix = online; ix < no; ix++)
+ resume_run_queue(ERTS_RUNQ_IX(ix));
+
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
}
res = ERTS_SCHDLR_SSPND_DONE;
}
@@ -4758,25 +5271,11 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_smp_mtx_lock(&balance_info.update_mtx);
-
- for (ix = 0; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x6);
- erts_smp_runq_unlock(rq);
- }
- /*
- * Evacutation order important! Newly suspended run queues
- * has to be evacuated last.
- */
- for (ix = erts_no_run_queues-1; ix >= no; ix--)
- evacuate_run_queue(ERTS_RUNQ_IX(ix),
- ERTS_RUNQ_IX(ix % no));
- set_no_used_runqs(no);
- erts_smp_mtx_unlock(&balance_info.update_mtx);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+
+ change_no_used_runqs(no);
+ for (ix = no; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
+
for (ix = no; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
wake_scheduler(rq, 0);
@@ -4836,8 +5335,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
else if (on) { /* ------ BLOCK ------ */
if (schdlr_sspnd.msb.procs) {
plp = proclist_create(p);
- plp->next = schdlr_sspnd.msb.procs;
- schdlr_sspnd.msb.procs = plp;
+ erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
p->flags |= F_HAVE_BLCKD_MSCHED;
ASSERT(erts_smp_atomic32_read_nob(&schdlr_sspnd.active) == 1);
ASSERT(p->scheduler_data->no == 1);
@@ -4873,25 +5371,14 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
schdlr_sspnd.msb.wait_active = 2;
}
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- erts_smp_mtx_lock(&balance_info.update_mtx);
- set_no_used_runqs(1);
- for (ix = 0; ix < online; ix++) {
+ change_no_used_runqs(1);
+ for (ix = 1; ix < erts_no_run_queues; ix++)
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
+
+ for (ix = 1; ix < online; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
- ERTS_RUNQ_RESET_MIGRATION_PATHS(rq, 0x7);
- erts_smp_runq_unlock(rq);
+ wake_scheduler(rq, 0);
}
- /*
- * Evacuate all activities in all other run queues
- * into the first run queue. Note order is important,
- * online run queues has to be evacuated last.
- */
- for (ix = erts_no_run_queues-1; ix >= 1; ix--)
- evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(0));
- erts_smp_mtx_unlock(&balance_info.update_mtx);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
if (erts_smp_atomic32_read_nob(&schdlr_sspnd.active)
!= schdlr_sspnd.msb.wait_active) {
@@ -4933,17 +5420,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
~ERTS_SCHDLR_SSPND_CHNG_WAITER);
}
plp = proclist_create(p);
- plp->next = schdlr_sspnd.msb.procs;
- schdlr_sspnd.msb.procs = plp;
-#ifdef DEBUG
- ERTS_FOREACH_RUNQ(srq,
- {
- if (srq != ERTS_RUNQ_IX(0)) {
- ASSERT(ERTS_EMPTY_RUNQ(srq));
- ASSERT(srq->flags & ERTS_RUNQ_FLG_SUSPENDED);
- }
- });
-#endif
+ erts_proclist_store_last(&schdlr_sspnd.msb.procs, plp);
ASSERT(p->scheduler_data);
}
}
@@ -4954,20 +5431,16 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
}
else { /* ------ UNBLOCK ------ */
if (p->flags & F_HAVE_BLCKD_MSCHED) {
- ErtsProcList **plpp = &schdlr_sspnd.msb.procs;
- plp = schdlr_sspnd.msb.procs;
+ ErtsProcList *plp = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
while (plp) {
- if (!proclist_same(plp, p)){
- plpp = &plp->next;
- plp = plp->next;
- }
- else {
- *plpp = plp->next;
- proclist_destroy(plp);
+ ErtsProcList *tmp_plp = plp;
+ plp = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp);
+ if (erts_proclist_same(tmp_plp, p)) {
+ erts_proclist_remove(&schdlr_sspnd.msb.procs, tmp_plp);
+ proclist_destroy(tmp_plp);
if (!all)
break;
- plp = *plpp;
}
}
}
@@ -4975,27 +5448,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else {
ERTS_SCHDLR_SSPND_CHNG_SET(ERTS_SCHDLR_SSPND_CHNG_MSB, 0);
-#ifdef DEBUG
- ERTS_FOREACH_RUNQ(rq,
- {
- if (rq != p->scheduler_data->run_queue) {
- if (!ERTS_EMPTY_RUNQ(rq)) {
- Process *rp;
- int pix;
- ASSERT(rq->ports.info.len == 0);
- for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) {
- for (rp = rq->procs.prio[pix].first;
- rp;
- rp = rp->next) {
- ASSERT(rp->bound_runq);
- }
- }
- }
-
- ASSERT(rq->flags & ERTS_RUNQ_FLG_SUSPENDED);
- }
- });
-#endif
p->flags &= ~F_HAVE_BLCKD_MSCHED;
schdlr_sspnd.msb.ongoing = 0;
if (schdlr_sspnd.online == 1) {
@@ -5005,35 +5457,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all)
}
else {
int online = schdlr_sspnd.online;
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
if (plocks) {
have_unlocked_plocks = 1;
erts_smp_proc_unlock(p, plocks);
}
- erts_smp_mtx_lock(&balance_info.update_mtx);
+
+ change_no_used_runqs(online);
/* Resume all online run queues */
- for (ix = 1; ix < online; ix++) {
- ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
- erts_smp_runq_lock(rq);
- ERTS_RUNQ_RESET_SUSPEND_INFO(rq, 0x4);
- erts_smp_runq_unlock(rq);
- scheduler_ix_resume_wake(ix);
- }
+ for (ix = 1; ix < online; ix++)
+ resume_run_queue(ERTS_RUNQ_IX(ix));
- /* Spread evacuation paths among all online run queues */
for (ix = online; ix < erts_no_run_queues; ix++)
- evacuate_run_queue(ERTS_RUNQ_IX(ix),
- ERTS_RUNQ_IX(ix % online));
-
- set_no_used_runqs(online);
- /* Make sure that we balance soon... */
- balance_info.forced_check_balance = 1;
- erts_smp_runq_lock(ERTS_RUNQ_IX(0));
- ERTS_RUNQ_IX(0)->check_balance_reds = 0;
- erts_smp_runq_unlock(ERTS_RUNQ_IX(0));
- erts_smp_mtx_unlock(&balance_info.update_mtx);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
+ suspend_run_queue(ERTS_RUNQ_IX(ix));
}
res = ERTS_SCHDLR_SSPND_DONE;
}
@@ -5073,23 +5509,25 @@ erts_multi_scheduling_blockers(Process *p)
Eterm res = NIL;
erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- if (schdlr_sspnd.msb.procs) {
+ if (!erts_proclist_is_empty(schdlr_sspnd.msb.procs)) {
Eterm *hp, *hp_end;
ErtsProcList *plp1, *plp2;
- Uint max_size;
- ASSERT(schdlr_sspnd.msb.procs);
- for (max_size = 0, plp1 = schdlr_sspnd.msb.procs;
+ Uint max_size = 0;
+
+ for (plp1 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
plp1;
- plp1 = plp1->next) {
+ plp1 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp1)) {
max_size += 2;
}
ASSERT(max_size);
hp = HAlloc(p, max_size);
hp_end = hp + max_size;
- for (plp1 = schdlr_sspnd.msb.procs; plp1; plp1 = plp1->next) {
- for (plp2 = schdlr_sspnd.msb.procs;
+ for (plp1 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
+ plp1;
+ plp1 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp1)) {
+ for (plp2 = erts_proclist_peek_first(schdlr_sspnd.msb.procs);
plp2->pid != plp1->pid;
- plp2 = plp2->next);
+ plp2 = erts_proclist_peek_next(schdlr_sspnd.msb.procs, plp2));
if (plp2 == plp1) {
res = CONS(hp, plp1->pid, res);
hp += 2;
@@ -5340,11 +5778,8 @@ handle_pend_sync_suspend(Process *suspendee,
if (suspender) {
ASSERT(is_nil(suspender->suspendee));
if (suspendee_alive) {
- ErtsRunQueue *rq = erts_get_runq_proc(suspendee);
- erts_smp_runq_lock(rq);
- suspend_process(rq, suspendee);
- erts_smp_runq_unlock(rq);
- suspender->suspendee = suspendee->id;
+ erts_suspend(suspendee, suspendee_locks, NULL);
+ suspender->suspendee = suspendee->common.id;
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
@@ -5365,7 +5800,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN);
ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
- if (c_p->id == pid)
+ if (c_p->common.id == pid)
return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
if (c_p_locks & ERTS_PROC_LOCK_STATUS)
@@ -5386,10 +5821,9 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
resume_process(rp);
}
else {
- ErtsRunQueue *cp_rq, *rp_rq;
rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, ERTS_PROC_LOCK_STATUS);
+ pid, pid_locks|ERTS_PROC_LOCK_STATUS);
if (!rp) {
c_p->flags &= ~F_P2PNR_RESCHED;
@@ -5398,58 +5832,36 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(!(c_p->flags & F_P2PNR_RESCHED));
- cp_rq = erts_get_runq_proc(c_p);
- rp_rq = erts_get_runq_proc(rp);
- erts_smp_runqs_lock(cp_rq, rp_rq);
- if (rp->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
- running:
- /* Phiu... */
-
- /*
- * If we got pending suspenders and suspend ourselves waiting
- * to suspend another process we might deadlock.
- * In this case we have to yield, be suspended by
- * someone else and then do it all over again.
- */
- if (!c_p->pending_suspenders) {
- /* Mark rp pending for suspend by c_p */
- add_pend_suspend(rp, c_p->id, handle_pend_sync_suspend);
- ASSERT(is_nil(c_p->suspendee));
-
- /* Suspend c_p; when rp is suspended c_p will be resumed. */
- suspend_process(cp_rq, c_p);
- c_p->flags |= F_P2PNR_RESCHED;
- }
- /* Yield (caller is assumed to yield immediately in bif). */
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = ERTS_PROC_LOCK_BUSY;
+ if (suspend) {
+ if (suspend_process(c_p, rp))
+ goto done;
}
else {
- ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
- if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
- erts_smp_runqs_unlock(cp_rq, rp_rq);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
- if (!rp)
- goto done;
- /* run-queues may have changed */
- cp_rq = erts_get_runq_proc(c_p);
- rp_rq = erts_get_runq_proc(rp);
- erts_smp_runqs_lock(cp_rq, rp_rq);
- if (rp->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
- /* Ahh... */
- erts_smp_proc_unlock(rp,
- pid_locks & ~ERTS_PROC_LOCK_STATUS);
- goto running;
- }
- }
+ if (!(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&rp->state)))
+ goto done;
+
+ }
+
+ /* Other process running */
- /* rp is not running and we got the locks we want... */
- if (suspend)
- suspend_process(rp_rq, rp);
+ /*
+ * If we got pending suspenders and suspend ourselves waiting
+ * to suspend another process we might deadlock.
+ * In this case we have to yield, be suspended by
+ * someone else and then do it all over again.
+ */
+ if (!c_p->pending_suspenders) {
+ /* Mark rp pending for suspend by c_p */
+ add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
+ ASSERT(is_nil(c_p->suspendee));
+
+ /* Suspend c_p; when rp is suspended c_p will be resumed. */
+ suspend_process(c_p, c_p);
+ c_p->flags |= F_P2PNR_RESCHED;
}
- erts_smp_runqs_unlock(cp_rq, rp_rq);
+ /* Yield (caller is assumed to yield immediately in bif). */
+ erts_smp_proc_unlock(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
+ rp = ERTS_PROC_LOCK_BUSY;
}
done:
@@ -5506,36 +5918,26 @@ erts_pid2proc_nropt(Process *c_p, ErtsProcLocks c_p_locks,
return erts_pid2proc_not_running(c_p, c_p_locks, pid, pid_locks);
}
-static ERTS_INLINE void
-do_bif_suspend_process(ErtsSuspendMonitor *smon,
- Process *suspendee,
- ErtsRunQueue *locked_runq)
+static ERTS_INLINE int
+do_bif_suspend_process(Process *c_p,
+ ErtsSuspendMonitor *smon,
+ Process *suspendee)
{
ASSERT(suspendee);
- ASSERT(!suspendee->is_exiting);
+ ASSERT(!ERTS_PROC_IS_EXITING(suspendee));
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
& erts_proc_lc_my_proc_locks(suspendee));
if (smon) {
if (!smon->active) {
- ErtsRunQueue *rq;
-
- if (locked_runq)
- rq = locked_runq;
- else {
- rq = erts_get_runq_proc(suspendee);
- erts_smp_runq_lock(rq);
- }
-
- suspend_process(rq, suspendee);
-
- if (!locked_runq)
- erts_smp_runq_unlock(rq);
+ if (!suspend_process(c_p, suspendee))
+ return 0;
}
smon->active += smon->pending;
ASSERT(smon->active);
smon->pending = 0;
+ return 1;
}
-
+ return 0;
}
static void
@@ -5556,13 +5958,20 @@ handle_pend_bif_sync_suspend(Process *suspendee,
ASSERT(is_nil(suspender->suspendee));
if (!suspendee_alive)
erts_delete_suspend_monitor(&suspender->suspend_monitors,
- suspendee->id);
+ suspendee->common.id);
else {
+#ifdef DEBUG
+ int res;
+#endif
ErtsSuspendMonitor *smon;
smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
- suspendee->id);
- do_bif_suspend_process(smon, suspendee, NULL);
- suspender->suspendee = suspendee->id;
+ suspendee->common.id);
+#ifdef DEBUG
+ res =
+#endif
+ do_bif_suspend_process(suspendee, smon, suspendee);
+ ASSERT(!smon || res != 0);
+ suspender->suspendee = suspendee->common.id;
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
@@ -5591,12 +6000,19 @@ handle_pend_bif_async_suspend(Process *suspendee,
ASSERT(is_nil(suspender->suspendee));
if (!suspendee_alive)
erts_delete_suspend_monitor(&suspender->suspend_monitors,
- suspendee->id);
+ suspendee->common.id);
else {
+#ifdef DEBUG
+ int res;
+#endif
ErtsSuspendMonitor *smon;
smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
- suspendee->id);
- do_bif_suspend_process(smon, suspendee, NULL);
+ suspendee->common.id);
+#ifdef DEBUG
+ res =
+#endif
+ do_bif_suspend_process(suspendee, smon, suspendee);
+ ASSERT(!smon || res != 0);
}
erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK);
}
@@ -5636,7 +6052,7 @@ suspend_process_2(BIF_ALIST_2)
int unless_suspending = 0;
- if (BIF_P->id == BIF_ARG_1)
+ if (BIF_P->common.id == BIF_ARG_1)
goto badarg; /* We are not allowed to suspend ourselves */
if (is_not_nil(BIF_ARG_2)) {
@@ -5681,7 +6097,8 @@ suspend_process_2(BIF_ALIST_2)
/* This is really a piece of cake without SMP support... */
if (!smon->active) {
- suspend_process(ERTS_RUNQ_IX(0), suspendee);
+ erts_smp_atomic32_read_bor_nob(&suspendee->state, ERTS_PSFLG_SUSPENDED);
+ suspend_process(BIF_P, suspendee);
smon->active++;
res = am_true;
}
@@ -5724,21 +6141,15 @@ suspend_process_2(BIF_ALIST_2)
if (smon->pending && unless_suspending)
res = am_false;
else {
- ErtsRunQueue *rq;
if (smon->pending == INT_MAX)
goto system_limit;
smon->pending++;
- rq = erts_get_runq_proc(suspendee);
- erts_smp_runq_lock(rq);
- if (suspendee->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)
+ if (!do_bif_suspend_process(BIF_P, smon, suspendee))
add_pend_suspend(suspendee,
- BIF_P->id,
+ BIF_P->common.id,
handle_pend_bif_async_suspend);
- else
- do_bif_suspend_process(smon, suspendee, rq);
- erts_smp_runq_unlock(rq);
res = am_true;
}
@@ -5775,7 +6186,6 @@ suspend_process_2(BIF_ALIST_2)
/* done */
}
else {
- ErtsRunQueue *cp_rq, *s_rq;
/* We haven't got any active suspends on the suspendee */
/*
@@ -5792,12 +6202,7 @@ suspend_process_2(BIF_ALIST_2)
if (!unless_suspending || smon->pending == 0)
smon->pending++;
- cp_rq = erts_get_runq_proc(BIF_P);
- s_rq = erts_get_runq_proc(suspendee);
- erts_smp_runqs_lock(cp_rq, s_rq);
- if (!(suspendee->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)) {
- do_bif_suspend_process(smon, suspendee, s_rq);
- erts_smp_runqs_unlock(cp_rq, s_rq);
+ if (do_bif_suspend_process(BIF_P, smon, suspendee)) {
res = (!unless_suspending || smon->active == 1
? am_true
: am_false);
@@ -5806,7 +6211,7 @@ suspend_process_2(BIF_ALIST_2)
else {
/* Mark suspendee pending for suspend by BIF_P */
add_pend_suspend(suspendee,
- BIF_P->id,
+ BIF_P->common.id,
handle_pend_bif_sync_suspend);
ASSERT(is_nil(BIF_P->suspendee));
@@ -5817,8 +6222,7 @@ suspend_process_2(BIF_ALIST_2)
* This time with BIF_P->suspendee == BIF_ARG_1 (see
* above).
*/
- suspend_process(cp_rq, BIF_P);
- erts_smp_runqs_unlock(cp_rq, s_rq);
+ suspend_process(BIF_P, BIF_P);
goto yield;
}
}
@@ -5826,9 +6230,15 @@ suspend_process_2(BIF_ALIST_2)
}
#endif /* ERTS_SMP */
-
- ASSERT(suspendee->status == P_SUSPENDED || (asynchronous && smon->pending));
- ASSERT(suspendee->status == P_SUSPENDED || !smon->active);
+#ifdef DEBUG
+ {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&suspendee->state);
+ ASSERT((state & ERTS_PSFLG_SUSPENDED)
+ || (asynchronous && smon->pending));
+ ASSERT((state & ERTS_PSFLG_SUSPENDED)
+ || !smon->active);
+ }
+#endif
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(BIF_P, xlocks);
@@ -5875,7 +6285,7 @@ resume_process_1(BIF_ALIST_1)
Process *suspendee;
int is_active;
- if (BIF_P->id == BIF_ARG_1)
+ if (BIF_P->common.id == BIF_ARG_1)
BIF_ERROR(BIF_P, BADARG);
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
@@ -5923,9 +6333,8 @@ resume_process_1(BIF_ALIST_1)
if (!suspendee)
goto no_suspendee;
- ASSERT(suspendee->status == P_SUSPENDED
- || (suspendee->status == P_GARBING
- && suspendee->gcstatus == P_SUSPENDED));
+ ASSERT(ERTS_PSFLG_SUSPENDED
+ & erts_smp_atomic32_read_nob(&suspendee->state));
resume_process(suspendee);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
@@ -5954,449 +6363,46 @@ erts_run_queues_len(Uint *qlen)
Uint len = 0;
ERTS_ATOMIC_FOREACH_RUNQ(rq,
{
- if (qlen)
- qlen[i++] = rq->procs.len;
- len += rq->procs.len;
+ Sint pqlen = 0;
+ int pix;
+ for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++)
+ pqlen += RUNQ_READ_LEN(&rq->procs.prio_info[pix].len);
+
+ if (pqlen < 0)
+ pqlen = 0;
+ if (qlen)
+ qlen[i++] = pqlen;
+ len += pqlen;
}
);
return len;
}
-#ifdef HARDDEBUG_RUNQS
-static void
-check_procs_runq(ErtsRunQueue *runq, Process *p_in_q, Process *p_not_in_q)
-{
- int len[ERTS_NO_PROC_PRIO_LEVELS] = {0};
- int tot_len;
- int prioq, prio;
- int found_p_in_q;
- Process *p, *prevp;
-
- found_p_in_q = 0;
- for (prioq = 0; prioq < ERTS_NO_PROC_PRIO_LEVELS - 1; prioq++) {
- prevp = NULL;
- for (p = runq->procs.prio[prioq].first; p; p = p->next) {
- ASSERT(p != p_not_in_q);
- if (p == p_in_q)
- found_p_in_q = 1;
- switch (p->prio) {
- case PRIORITY_MAX:
- case PRIORITY_HIGH:
- case PRIORITY_NORMAL:
- ASSERT(prioq == p->prio);
- break;
- case PRIORITY_LOW:
- ASSERT(prioq == PRIORITY_NORMAL);
- break;
- default:
- ASSERT(!"Bad prio on process");
- }
- len[p->prio]++;
- ASSERT(prevp == p->prev);
- if (p->prev) {
- ASSERT(p->prev->next == p);
- }
- else {
- ASSERT(runq->procs.prio[prioq].first == p);
- }
- if (p->next) {
- ASSERT(p->next->prev == p);
- }
- else {
- ASSERT(runq->procs.prio[prioq].last == p);
- }
- ASSERT(p->run_queue == runq);
- prevp = p;
- }
- }
-
- ASSERT(!p_in_q || found_p_in_q);
-
- tot_len = 0;
- for (prio = 0; prio < ERTS_NO_PROC_PRIO_LEVELS; prio++) {
- ASSERT(len[prio] == runq->procs.prio_info[prio].len);
- if (len[prio]) {
- ASSERT(runq->flags & (1 << prio));
- }
- else {
- ASSERT(!(runq->flags & (1 << prio)));
- }
- tot_len += len[prio];
- }
- ASSERT(runq->procs.len == tot_len);
-}
-# define ERTS_DBG_CHK_PROCS_RUNQ(RQ) check_procs_runq((RQ), NULL, NULL)
-# define ERTS_DBG_CHK_PROCS_RUNQ_PROC(RQ, P) check_procs_runq((RQ), (P), NULL)
-# define ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(RQ, P) check_procs_runq((RQ), NULL, (P))
-#else
-# define ERTS_DBG_CHK_PROCS_RUNQ(RQ)
-# define ERTS_DBG_CHK_PROCS_RUNQ_PROC(RQ, P)
-# define ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(RQ, P)
-#endif
-
-
-static ERTS_INLINE void
-enqueue_process(ErtsRunQueue *runq, Process *p)
-{
- ErtsRunPrioQueue *rpq;
- ErtsRunQueueInfo *rqi;
-
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
-
- ASSERT(p->bound_runq || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED));
-
- rqi = &runq->procs.prio_info[p->prio];
- rqi->len++;
- if (rqi->max_len < rqi->len)
- rqi->max_len = rqi->len;
-
- runq->procs.len++;
- runq->len++;
- if (runq->max_len < runq->len)
- runq->max_len = runq->len;
-
- runq->flags |= (1 << p->prio);
-
- rpq = (p->prio == PRIORITY_LOW
- ? &runq->procs.prio[PRIORITY_NORMAL]
- : &runq->procs.prio[p->prio]);
-
- p->next = NULL;
- p->prev = rpq->last;
- if (rpq->last)
- rpq->last->next = p;
- else
- rpq->first = p;
- rpq->last = p;
-
- switch (p->status) {
- case P_EXITING:
- break;
- case P_GARBING:
- p->gcstatus = P_RUNABLE;
- break;
- default:
- p->status = P_RUNABLE;
- break;
- }
-
-#ifdef ERTS_SMP
- p->status_flags |= ERTS_PROC_SFLG_INRUNQ;
-#endif
-
- ERTS_DBG_CHK_PROCS_RUNQ_PROC(runq, p);
-}
-
-
-static ERTS_INLINE int
-dequeue_process(ErtsRunQueue *runq, Process *p)
-{
- ErtsRunPrioQueue *rpq;
- int res = 1;
-
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
-
- ERTS_DBG_CHK_PROCS_RUNQ(runq);
-
- rpq = &runq->procs.prio[p->prio == PRIORITY_LOW ? PRIORITY_NORMAL : p->prio];
- if (p->prev) {
- p->prev->next = p->next;
- }
- else if (rpq->first == p) {
- rpq->first = p->next;
- }
- else {
- res = 0;
- }
- if (p->next) {
- p->next->prev = p->prev;
- }
- else if (rpq->last == p) {
- rpq->last = p->prev;
- }
- else {
- ASSERT(res == 0);
- }
-
- if (res) {
-
- if (--runq->procs.prio_info[p->prio].len == 0)
- runq->flags &= ~(1 << p->prio);
- runq->procs.len--;
- runq->len--;
-
-#ifdef ERTS_SMP
- p->status_flags &= ~ERTS_PROC_SFLG_INRUNQ;
-#endif
- }
-
- ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
- return res;
-}
-
-/* schedule a process */
-static ERTS_INLINE ErtsRunQueue *
-internal_add_to_runq(ErtsRunQueue *runq, Process *p)
-{
- Uint32 prev_status = p->status;
- ErtsRunQueue *add_runq;
-#ifdef ERTS_SMP
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));
-
- if (p->status_flags & ERTS_PROC_SFLG_INRUNQ)
- return NULL;
- else if (p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING) {
- ASSERT(ERTS_PROC_IS_EXITING(p) || p->rcount == 0);
- ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
- p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- return NULL;
- }
- ASSERT(!p->scheduler_data);
-#endif
-
- ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(runq, p);
-#ifndef ERTS_SMP
- /* Never schedule a suspended process (ok in smp case) */
- ASSERT(ERTS_PROC_IS_EXITING(p) || p->rcount == 0);
- add_runq = runq;
-#else
- ASSERT(!p->bound_runq || p->bound_runq == p->run_queue);
- if (p->bound_runq) {
- if (p->bound_runq == runq)
- add_runq = runq;
- else {
- add_runq = p->bound_runq;
- erts_smp_xrunq_lock(runq, add_runq);
- }
- }
- else {
- add_runq = erts_check_emigration_need(runq, p->prio);
- if (!add_runq)
- add_runq = runq;
- else /* Process emigrated */
- p->run_queue = add_runq;
- }
-#endif
-
- /* Enqueue the process */
- enqueue_process(add_runq, p);
-
- if ((erts_system_profile_flags.runnable_procs)
- && (prev_status == P_WAITING
- || prev_status == P_SUSPENDED)) {
- profile_runnable_proc(p, am_active);
- }
-
- if (add_runq != runq)
- erts_smp_runq_unlock(add_runq);
-
- return add_runq;
-}
-
-
-void
-erts_add_to_runq(Process *p)
-{
- ErtsRunQueue *notify_runq;
- ErtsRunQueue *runq = erts_get_runq_proc(p);
- erts_smp_runq_lock(runq);
- notify_runq = internal_add_to_runq(runq, p);
- erts_smp_runq_unlock(runq);
- smp_notify_inc_runq(notify_runq);
-
-}
-
-/* Possibly remove a scheduled process we need to suspend */
-
-static int
-remove_proc_from_runq(ErtsRunQueue *rq, Process *p, int to_inactive)
-{
- int res;
-
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
-
-#ifdef ERTS_SMP
- if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
- p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- ASSERT(!remove_proc_from_runq(rq, p, 0));
- return 1;
- }
-#endif
-
- res = dequeue_process(rq, p);
-
- if (res && erts_system_profile_flags.runnable_procs && to_inactive)
- profile_runnable_proc(p, am_inactive);
-
-#ifdef ERTS_SMP
- ASSERT(!(p->status_flags & ERTS_PROC_SFLG_INRUNQ));
-#endif
-
- return res;
-}
-
-#ifdef ERTS_SMP
-
-ErtsMigrateResult
-erts_proc_migrate(Process *p, ErtsProcLocks *plcks,
- ErtsRunQueue *from_rq, int *from_locked,
- ErtsRunQueue *to_rq, int *to_locked)
-{
- ERTS_SMP_LC_ASSERT(*plcks == erts_proc_lc_my_proc_locks(p));
- ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_STATUS & *plcks)
- || from_locked);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
-
- /*
- * If we have the lock on the run queue to migrate to,
- * check that it isn't suspended. If it is suspended,
- * we will refuse to migrate to it anyway.
- */
- if (*to_locked && (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED))
- return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED;
-
- /* We need status lock on process and locks on both run queues */
-
- if (!(ERTS_PROC_LOCK_STATUS & *plcks)) {
- if (erts_smp_proc_trylock(p, ERTS_PROC_LOCK_STATUS) == EBUSY) {
- ErtsProcLocks lcks = *plcks;
- Eterm pid = p->id;
- Process *proc = *plcks ? p : NULL;
-
- if (*from_locked) {
- *from_locked = 0;
- erts_smp_runq_unlock(from_rq);
- }
- if (*to_locked) {
- *to_locked = 0;
- erts_smp_runq_unlock(to_rq);
- }
-
- proc = erts_pid2proc_opt(proc,
- lcks,
- pid,
- lcks|ERTS_PROC_LOCK_STATUS,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- if (!proc) {
- *plcks = 0;
- return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
- }
- ASSERT(proc == p);
- }
- *plcks |= ERTS_PROC_LOCK_STATUS;
- }
-
- ASSERT(!p->bound_runq);
-
- ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
-
- if (p->run_queue != from_rq)
- return ERTS_MIGRATE_FAILED_RUNQ_CHANGED;
-
- if (!*from_locked || !*to_locked) {
- if (from_rq < to_rq) {
- if (!*to_locked) {
- if (!*from_locked)
- erts_smp_runq_lock(from_rq);
- erts_smp_runq_lock(to_rq);
- }
- else if (erts_smp_runq_trylock(from_rq) == EBUSY) {
- erts_smp_runq_unlock(to_rq);
- erts_smp_runq_lock(from_rq);
- erts_smp_runq_lock(to_rq);
- }
- }
- else {
- if (!*from_locked) {
- if (!*to_locked)
- erts_smp_runq_lock(to_rq);
- erts_smp_runq_lock(from_rq);
- }
- else if (erts_smp_runq_trylock(to_rq) == EBUSY) {
- erts_smp_runq_unlock(from_rq);
- erts_smp_runq_lock(to_rq);
- erts_smp_runq_lock(from_rq);
- }
- }
- *to_locked = *from_locked = 1;
- }
-
- ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
- ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);
-
- /* Ok we now got all locks we need; do it... */
-
- /* Refuse to migrate to a suspended run queue */
- if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
- return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED;
-
- if ((p->runq_flags & ERTS_PROC_RUNQ_FLG_RUNNING)
- || !(p->status_flags & ERTS_PROC_SFLG_INRUNQ))
- return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
-
- dequeue_process(from_rq, p);
- p->run_queue = to_rq;
- enqueue_process(to_rq, p);
-
- return ERTS_MIGRATE_SUCCESS;
-}
-#endif /* ERTS_SMP */
-
Eterm
erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
Process *rp, Eterm rpid)
{
Eterm res = am_undefined;
- Process *p;
-
- if (rp) {
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- & erts_proc_lc_my_proc_locks(rp));
- p = rp;
- }
- else {
- p = erts_pid2proc_opt(c_p, c_p_locks,
- rpid, ERTS_PROC_LOCK_STATUS,
- ERTS_P2P_FLG_ALLOW_OTHER_X);
- }
+ Process *p = rp ? rp : erts_proc_lookup_raw(rpid);
if (p) {
- switch (p->status) {
- case P_RUNABLE:
- res = am_runnable;
- break;
- case P_WAITING:
- res = am_waiting;
- break;
- case P_RUNNING:
- res = am_running;
- break;
- case P_EXITING:
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ if (state & ERTS_PSFLG_FREE)
+ res = am_free;
+ else if (state & ERTS_PSFLG_EXITING)
res = am_exiting;
- break;
- case P_GARBING:
+ else if (state & ERTS_PSFLG_GC)
res = am_garbage_collecting;
- break;
- case P_SUSPENDED:
+ else if (state & ERTS_PSFLG_SUSPENDED)
res = am_suspended;
- break;
- case P_FREE: /* We cannot look up a process in P_FREE... */
- default: /* Not a valid status... */
- erl_exit(1, "Bad status (%b32u) found for process %T\n",
- p->status, p->id);
- break;
- }
-
-#ifdef ERTS_SMP
- if (!rp && (p != c_p || !(ERTS_PROC_LOCK_STATUS & c_p_locks)))
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ else if (state & ERTS_PSFLG_RUNNING)
+ res = am_running;
+ else if (state & ERTS_PSFLG_ACTIVE)
+ res = am_runnable;
+ else
+ res = am_waiting;
}
+#ifdef ERTS_SMP
else {
int i;
ErtsSchedulerData *esdp;
@@ -6404,50 +6410,53 @@ erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
for (i = 0; i < erts_no_schedulers; i++) {
esdp = ERTS_SCHEDULER_IX(i);
erts_smp_runq_lock(esdp->run_queue);
- if (esdp->free_process && esdp->free_process->id == rpid) {
+ if (esdp->free_process
+ && esdp->free_process->common.id == rpid) {
res = am_free;
erts_smp_runq_unlock(esdp->run_queue);
break;
}
erts_smp_runq_unlock(esdp->run_queue);
}
-
-#endif
-
}
-
+#endif
return res;
}
/*
-** Suspend a process
+** Suspend a currently executing process
** If we are to suspend on a port the busy_port is the thing
** otherwise busy_port is NIL
*/
void
-erts_suspend(Process* process, ErtsProcLocks process_locks, Port *busy_port)
+erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port)
{
- ErtsRunQueue *rq;
+ int suspend;
- ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
- if (!(process_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
-
- rq = erts_get_runq_proc(process);
-
- erts_smp_runq_lock(rq);
-
- suspend_process(rq, process);
-
- erts_smp_runq_unlock(rq);
+ ASSERT(c_p == erts_get_current_process());
+ ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
+ if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
if (busy_port)
- erts_wake_process_later(busy_port, process);
+ suspend = erts_save_suspend_process_on_port(busy_port, c_p);
+ else
+ suspend = 1;
- if (!(process_locks & ERTS_PROC_LOCK_STATUS))
- erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
+ if (suspend) {
+#ifdef DEBUG
+ int res =
+#endif
+ suspend_process(c_p, c_p);
+ ASSERT(res);
+ }
+
+ if (!(c_p_locks & ERTS_PROC_LOCK_STATUS))
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+ if (suspend && busy_port && erts_system_monitor_flags.busy_port)
+ monitor_generic(c_p, am_busy_port, busy_port->common.id);
}
void
@@ -6462,16 +6471,19 @@ erts_resume(Process* process, ErtsProcLocks process_locks)
}
int
-erts_resume_processes(ErtsProcList *plp)
+erts_resume_processes(ErtsProcList *list)
{
+ /* 'list' is expected to have been fetched (i.e. not a ring anymore) */
int nresumed = 0;
+ ErtsProcList *plp = list;
+
while (plp) {
Process *proc;
ErtsProcList *fplp;
ASSERT(is_internal_pid(plp->pid));
proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
if (proc) {
- if (proclist_same(plp, proc)) {
+ if (erts_proclist_same(plp, proc)) {
resume_process(proc);
nresumed++;
}
@@ -6487,57 +6499,54 @@ erts_resume_processes(ErtsProcList *plp)
Eterm
erts_get_process_priority(Process *p)
{
- ErtsRunQueue *rq;
- Eterm value;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- rq = erts_get_runq_proc(p);
- erts_smp_runq_lock(rq);
- switch(p->prio) {
- case PRIORITY_MAX: value = am_max; break;
- case PRIORITY_HIGH: value = am_high; break;
- case PRIORITY_NORMAL: value = am_normal; break;
- case PRIORITY_LOW: value = am_low; break;
- default: ASSERT(0); value = am_undefined; break;
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ switch (state & ERTS_PSFLG_PRIO_MASK) {
+ case PRIORITY_MAX: return am_max;
+ case PRIORITY_HIGH: return am_high;
+ case PRIORITY_NORMAL: return am_normal;
+ case PRIORITY_LOW: return am_low;
+ default: ASSERT(0); return am_undefined;
}
- erts_smp_runq_unlock(rq);
- return value;
}
Eterm
-erts_set_process_priority(Process *p, Eterm new_value)
+erts_set_process_priority(Process *p, Eterm value)
{
- ErtsRunQueue *rq;
- Eterm old_value;
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- rq = erts_get_runq_proc(p);
-#ifdef ERTS_SMP
- ASSERT(!(p->status_flags & ERTS_PROC_SFLG_INRUNQ));
-#endif
- erts_smp_runq_lock(rq);
- switch(p->prio) {
- case PRIORITY_MAX: old_value = am_max; break;
- case PRIORITY_HIGH: old_value = am_high; break;
- case PRIORITY_NORMAL: old_value = am_normal; break;
- case PRIORITY_LOW: old_value = am_low; break;
- default: ASSERT(0); old_value = am_undefined; break;
- }
- switch (new_value) {
- case am_max: p->prio = PRIORITY_MAX; break;
- case am_high: p->prio = PRIORITY_HIGH; break;
- case am_normal: p->prio = PRIORITY_NORMAL; break;
- case am_low: p->prio = PRIORITY_LOW; break;
- default: old_value = THE_NON_VALUE; break;
+ erts_aint32_t a, oprio, nprio;
+
+ switch (value) {
+ case am_max: nprio = (erts_aint32_t) PRIORITY_MAX; break;
+ case am_high: nprio = (erts_aint32_t) PRIORITY_HIGH; break;
+ case am_normal: nprio = (erts_aint32_t) PRIORITY_NORMAL; break;
+ case am_low: nprio = (erts_aint32_t) PRIORITY_LOW; break;
+ default: return THE_NON_VALUE; break;
}
- erts_smp_runq_unlock(rq);
- return old_value;
-}
-/* note that P_RUNNING is only set so that we don't try to remove
-** running processes from the schedule queue if they exit - a running
-** process not being in the schedule queue!!
-** Schedule for up to INPUT_REDUCTIONS context switches,
-** return 1 if more to do.
-*/
+ a = erts_smp_atomic32_read_nob(&p->state);
+ if (nprio == (a & ERTS_PSFLG_PRIO_MASK))
+ oprio = nprio;
+ else {
+ erts_aint32_t e, n;
+ do {
+ oprio = a & ERTS_PSFLG_PRIO_MASK;
+ n = e = a;
+
+ ASSERT(!(a & ERTS_PSFLG_IN_RUNQ));
+
+ n &= ~ERTS_PSFLG_PRIO_MASK;
+ n |= nprio;
+ a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ } while (a != e);
+ }
+
+ switch (oprio) {
+ case PRIORITY_MAX: return am_max;
+ case PRIORITY_HIGH: return am_high;
+ case PRIORITY_NORMAL: return am_normal;
+ case PRIORITY_LOW: return am_low;
+ default: ASSERT(0); return am_undefined;
+ }
+}
/*
* schedule() is called from BEAM (process_main()) or HiPE
@@ -6560,7 +6569,6 @@ erts_set_process_priority(Process *p, Eterm new_value)
Process *schedule(Process *p, int calls)
{
ErtsRunQueue *rq;
- ErtsRunPrioQueue *rpq;
erts_aint_t dt;
ErtsSchedulerData *esdp;
int context_reds;
@@ -6568,6 +6576,8 @@ Process *schedule(Process *p, int calls)
int input_reductions;
int actual_reds;
int reds;
+ Uint32 flags;
+ erts_aint32_t state = 0; /* Supress warning... */
#ifdef USE_VM_PROBES
if (p != NULL && DTRACE_ENABLED(process_unscheduled)) {
@@ -6623,92 +6633,61 @@ Process *schedule(Process *p, int calls)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- if ((erts_system_profile_flags.runnable_procs)
- && (p->status == P_WAITING)) {
- profile_runnable_proc(p, am_inactive);
- }
+ state = erts_smp_atomic32_read_acqb(&p->state);
if (IS_TRACED(p)) {
- if (IS_TRACED_FL(p, F_TRACE_CALLS) && p->status != P_FREE) {
+ if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_OUT);
- }
- switch (p->status) {
- case P_EXITING:
- if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, am_out_exiting);
- break;
- case P_FREE:
+ if (state & (ERTS_PSFLG_FREE|ERTS_PSFLG_EXITING)) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
- trace_sched(p, am_out_exited);
- break;
- default:
+ trace_sched(p, ((state & ERTS_PSFLG_FREE)
+ ? am_out_exited
+ : am_out_exiting));
+ }
+ else {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
trace_sched(p, am_out);
else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(p, am_out);
- break;
}
- }
-
-#ifdef ERTS_SMP
- if (ERTS_PROC_PENDING_EXIT(p)) {
- erts_handle_pending_exit(p,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- p->status_flags |= ERTS_PROC_SFLG_PENDADD2SCHEDQ;
}
- if (p->pending_suspenders) {
- handle_pending_suspend(p,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- ASSERT(!(p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ)
- || p->rcount == 0);
- }
+#ifdef ERTS_SMP
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS));
+ if (p->pending_suspenders)
+ handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS));
#endif
- erts_smp_runq_lock(rq);
- ERTS_PROC_REDUCTIONS_EXECUTED(rq, p->prio, reds, actual_reds);
+ esdp->reductions += reds;
+
+ schedule_out_process(rq, state, p); /* Returns with rq locked! */
+
+ ERTS_PROC_REDUCTIONS_EXECUTED(rq,
+ (int) (state & ERTS_PSFLG_PRIO_MASK),
+ reds,
+ actual_reds);
esdp->current_process = NULL;
#ifdef ERTS_SMP
p->scheduler_data = NULL;
- p->runq_flags &= ~ERTS_PROC_RUNQ_FLG_RUNNING;
- p->status_flags &= ~ERTS_PROC_SFLG_RUNNING;
-
- if (p->status_flags & ERTS_PROC_SFLG_PENDADD2SCHEDQ) {
- ErtsRunQueue *notify_runq;
- p->status_flags &= ~ERTS_PROC_SFLG_PENDADD2SCHEDQ;
- notify_runq = internal_add_to_runq(rq, p);
- if (notify_runq != rq)
- smp_notify_inc_runq(notify_runq);
- }
#endif
- if (p->status == P_FREE) {
+ if (state & ERTS_PSFLG_FREE) {
#ifdef ERTS_SMP
ASSERT(esdp->free_process == p);
esdp->free_process = NULL;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- erts_smp_proc_dec_refc(p);
#else
erts_free_proc(p);
#endif
- } else {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
}
-#ifdef ERTS_SMP
- {
- ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
- rq->procs.pending_exiters = NULL;
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- if (pnd_xtrs) {
- erts_smp_runq_unlock(rq);
- handle_pending_exiters(pnd_xtrs);
- erts_smp_runq_lock(rq);
- }
-
- }
+#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
#endif
ASSERT(!esdp->current_process);
@@ -6728,8 +6707,22 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
check_activities_to_run: {
+#ifdef ERTS_SMP
+ ErtsMigrationPaths *mps;
+ ErtsMigrationPath *mp;
#ifdef ERTS_SMP
+ {
+ ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
+ if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
+ rq->procs.pending_exiters = NULL;
+ erts_smp_runq_unlock(rq);
+ handle_pending_exiters(pnd_xtrs);
+ erts_smp_runq_lock(rq);
+ }
+
+ }
+#endif
if (rq->check_balance_reds <= 0)
check_balance(rq);
@@ -6737,20 +6730,28 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
- if (rq->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK)
- immigrate(rq);
+ mps = erts_get_migration_paths_managed();
+ mp = &mps->mpath[rq->ix];
+
+ if (mp->flags & ERTS_RUNQ_FLGS_IMMIGRATE_QMASK)
+ immigrate(rq, mp);
- continue_check_activities_to_run:
+ continue_check_activities_to_run:
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ continue_check_activities_to_run_known_flags:
- if (rq->flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND
- | ERTS_RUNQ_FLG_SUSPENDED)) {
- if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) {
- ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED);
+
+ if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
+
+ if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
suspend_scheduler(esdp);
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
}
- if (rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND)
+ if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
+ flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
+ flags &= ~ ERTS_RUNQ_FLG_CHK_CPU_BIND;
erts_sched_check_cpu_bind(esdp);
+ }
}
{
@@ -6779,14 +6780,12 @@ Process *schedule(Process *p, int calls)
}
#endif /* ERTS_SMP */
- ASSERT(rq->len == rq->procs.len + rq->ports.info.len);
-
- if ((rq->len == 0 && !rq->misc.start)
- || (rq->halt_in_progress
- && rq->ports.info.len == 0 && !rq->misc.start)) {
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
+ || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
+ /* Prepare for scheduler wait */
#ifdef ERTS_SMP
-
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
rq->wakeup_other = 0;
@@ -6794,21 +6793,27 @@ Process *schedule(Process *p, int calls)
empty_runq(rq);
- if (rq->flags & ERTS_RUNQ_FLG_SUSPENDED) {
- ASSERT(erts_smp_atomic32_read_nob(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED);
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
non_empty_runq(rq);
- goto continue_check_activities_to_run;
+ goto continue_check_activities_to_run_known_flags;
}
- else if (!(rq->flags & ERTS_RUNQ_FLG_INACTIVE)) {
+ else if (!(flags & ERTS_RUNQ_FLG_INACTIVE)) {
+ if (try_steal_task(rq)) {
+ non_empty_runq(rq);
+ goto continue_check_activities_to_run;
+ }
+
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+
/*
* Check for ERTS_RUNQ_FLG_SUSPENDED has to be done
* after trying to steal a task.
*/
- if (try_steal_task(rq)
- || (rq->flags & ERTS_RUNQ_FLG_SUSPENDED)) {
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
non_empty_runq(rq);
- goto continue_check_activities_to_run;
+ goto continue_check_activities_to_run_known_flags;
}
}
@@ -6839,6 +6844,7 @@ Process *schedule(Process *p, int calls)
erl_sys_schedule(1);
dt = erts_do_time_read_and_reset();
if (dt) erts_bump_timer(dt);
+
#ifdef ERTS_SMP
erts_smp_runq_lock(rq);
clear_sys_scheduling();
@@ -6852,14 +6858,14 @@ Process *schedule(Process *p, int calls)
exec_misc_ops(rq);
#ifdef ERTS_SMP
- wakeup_other.check(rq);
+ wakeup_other.check(rq, flags);
#endif
/*
* Find a new port to run.
*/
- if (rq->ports.info.len) {
+ if (RUNQ_READ_LEN(&rq->ports.info.len)) {
int have_outstanding_io;
have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port);
if ((have_outstanding_io && fcalls > 2*input_reductions)
@@ -6886,167 +6892,130 @@ Process *schedule(Process *p, int calls)
/*
* Find a new process to run.
*/
- pick_next_process:
-
- ERTS_DBG_CHK_PROCS_RUNQ(rq);
-
- switch (rq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) {
- case MAX_BIT:
- case MAX_BIT|HIGH_BIT:
- case MAX_BIT|NORMAL_BIT:
- case MAX_BIT|LOW_BIT:
- case MAX_BIT|HIGH_BIT|NORMAL_BIT:
- case MAX_BIT|HIGH_BIT|LOW_BIT:
- case MAX_BIT|NORMAL_BIT|LOW_BIT:
- case MAX_BIT|HIGH_BIT|NORMAL_BIT|LOW_BIT:
- rpq = &rq->procs.prio[PRIORITY_MAX];
- break;
- case HIGH_BIT:
- case HIGH_BIT|NORMAL_BIT:
- case HIGH_BIT|LOW_BIT:
- case HIGH_BIT|NORMAL_BIT|LOW_BIT:
- rpq = &rq->procs.prio[PRIORITY_HIGH];
- break;
- case NORMAL_BIT:
- rpq = &rq->procs.prio[PRIORITY_NORMAL];
- break;
- case LOW_BIT:
- rpq = &rq->procs.prio[PRIORITY_NORMAL];
- break;
- case NORMAL_BIT|LOW_BIT:
- rpq = &rq->procs.prio[PRIORITY_NORMAL];
- ASSERT(rpq->first != NULL);
- p = rpq->first;
- if (p->prio == PRIORITY_LOW) {
- if (p == rpq->last || p->skipped >= RESCHEDULE_LOW-1)
- p->skipped = 0;
- else {
- /* skip it */
- p->skipped++;
- rpq->first = p->next;
- rpq->first->prev = NULL;
- rpq->last->next = p;
- p->prev = rpq->last;
- p->next = NULL;
- rpq->last = p;
+ pick_next_process: {
+ int prio_q;
+ int qmask;
+
+ flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ qmask = (int) (flags & ERTS_RUNQ_FLGS_PROCS_QMASK);
+ switch (qmask & -qmask) {
+ case MAX_BIT:
+ prio_q = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ prio_q = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ case LOW_BIT:
+ prio_q = PRIORITY_NORMAL;
+ if (check_requeue_process(rq, PRIORITY_NORMAL))
goto pick_next_process;
- }
+ break;
+ case 0: /* No process at all */
+ default:
+ ASSERT(qmask == 0);
+ goto check_activities_to_run;
}
- break;
- case 0: /* No process at all */
- default:
- ASSERT((rq->flags & ERTS_RUNQ_FLGS_PROCS_QMASK) == 0);
- ASSERT(rq->procs.len == 0);
- goto check_activities_to_run;
- }
-
- BM_START_TIMER(system);
- /*
- * Take the chosen process out of the queue.
- */
- ASSERT(rpq->first); /* Wrong qmask in rq->flags? */
- p = rpq->first;
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(rq == p->run_queue);
-#endif
- rpq->first = p->next;
- if (!rpq->first)
- rpq->last = NULL;
- else
- rpq->first->prev = NULL;
+ BM_START_TIMER(system);
- p->next = p->prev = NULL;
+ /*
+ * Take the chosen process out of the queue.
+ */
+ p = dequeue_process(rq, prio_q, &state);
- if (--rq->procs.prio_info[p->prio].len == 0)
- rq->flags &= ~(1 << p->prio);
- ASSERT(rq->procs.len > 0);
- rq->procs.len--;
- ASSERT(rq->len > 0);
- rq->len--;
+ ASSERT(p); /* Wrong qmask in rq->flags? */
- {
- Uint32 ee_flgs = (ERTS_RUNQ_FLG_EVACUATE(p->prio)
- | ERTS_RUNQ_FLG_EMIGRATE(p->prio));
-
- if ((rq->flags & (ERTS_RUNQ_FLG_SUSPENDED|ee_flgs)) == ee_flgs)
- ERTS_UNSET_RUNQ_FLG_EVACUATE(rq->flags, p->prio);
- }
+ while (1) {
+ erts_aint32_t exp, new, tmp;
+ tmp = new = exp = state;
+ new &= ~ERTS_PSFLG_IN_RUNQ;
+ tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
+ if (tmp != ERTS_PSFLG_SUSPENDED)
+ new |= ERTS_PSFLG_RUNNING;
+ state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp);
+ if (state == exp) {
+ tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
+ if (tmp == ERTS_PSFLG_SUSPENDED)
+ goto pick_next_process;
+ state = new;
+ break;
+ }
+ }
- ERTS_DBG_CHK_PROCS_RUNQ_NOPROC(rq, p);
+ rq->procs.context_switches++;
- rq->procs.context_switches++;
+ esdp->current_process = p;
- esdp->current_process = p;
+ }
#ifdef ERTS_SMP
- p->runq_flags |= ERTS_PROC_RUNQ_FLG_RUNNING;
erts_smp_runq_unlock(rq);
+ if (flags & ERTS_RUNQ_FLG_PROTECTED)
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
+
ERTS_SMP_CHK_NO_PROC_LOCKS;
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
if (erts_sched_stat.enabled) {
+ int prio;
UWord old = ERTS_PROC_SCHED_ID(p,
(ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_STATUS),
(UWord) esdp->no);
int migrated = old && old != esdp->no;
+ prio = (int) (state & ERTS_PSFLG_PRIO_MASK);
+
erts_smp_spin_lock(&erts_sched_stat.lock);
- erts_sched_stat.prio[p->prio].total_executed++;
- erts_sched_stat.prio[p->prio].executed++;
+ erts_sched_stat.prio[prio].total_executed++;
+ erts_sched_stat.prio[prio].executed++;
if (migrated) {
- erts_sched_stat.prio[p->prio].total_migrated++;
- erts_sched_stat.prio[p->prio].migrated++;
+ erts_sched_stat.prio[prio].total_migrated++;
+ erts_sched_stat.prio[prio].migrated++;
}
erts_smp_spin_unlock(&erts_sched_stat.lock);
}
- p->status_flags |= ERTS_PROC_SFLG_RUNNING;
- p->status_flags &= ~ERTS_PROC_SFLG_INRUNQ;
if (ERTS_PROC_PENDING_EXIT(p)) {
erts_handle_pending_exit(p,
ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ state = erts_smp_atomic32_read_nob(&p->state);
}
ASSERT(!p->scheduler_data);
p->scheduler_data = esdp;
-
#endif
- ASSERT(p->status != P_SUSPENDED); /* Never run a suspended process */
+ /* Never run a suspended process */
+ ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
reds = context_reds;
if (IS_TRACED(p)) {
- switch (p->status) {
- case P_EXITING:
+ if (state & ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
trace_sched(p, am_in_exiting);
- break;
- default:
+ }
+ else {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED))
trace_sched(p, am_in);
else if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(p, am_in);
- break;
}
if (IS_TRACED_FL(p, F_TRACE_CALLS)) {
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_IN);
}
}
- if (p->status != P_EXITING)
- p->status = P_RUNNING;
-
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#ifdef ERTS_SMP
- if (is_not_nil(p->tracer_proc))
+ if (is_not_nil(ERTS_TRACER_PROC(p)))
erts_check_my_tracer_proc(p);
#endif
- if (!ERTS_PROC_IS_EXITING(p)
+ if (!(state & ERTS_PSFLG_EXITING)
&& ((FLAGS(p) & F_FORCE_GC)
|| (MSO(p).overhead > BIN_VHEAP_SZ(p)))) {
reds -= erts_garbage_collect(p, 0, p->arg_reg, p->arity);
@@ -7137,17 +7106,19 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0);
ErtsMiscOpList *molp = misc_op_list_alloc();
+#ifdef ERTS_SMP
+ ErtsMigrationPaths *mpaths = erts_get_migration_paths();
- erts_smp_runq_lock(rq);
-
- while (rq->misc.evac_runq) {
- ErtsRunQueue *tmp_rq = rq->misc.evac_runq;
- erts_smp_runq_unlock(rq);
- rq = tmp_rq;
- erts_smp_runq_lock(rq);
+ if (!mpaths)
+ rq = ERTS_RUNQ_IX(0);
+ else {
+ ErtsRunQueue *erq = mpaths->mpath[rq->ix].misc_evac_runq;
+ if (erq)
+ rq = erq;
}
+#endif
- ASSERT(!(rq->flags & ERTS_RUNQ_FLG_SUSPENDED));
+ erts_smp_runq_lock(rq);
molp->next = NULL;
molp->func = func;
@@ -7157,7 +7128,9 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
else
rq->misc.start = molp;
rq->misc.end = molp;
+
erts_smp_runq_unlock(rq);
+
smp_notify_inc_runq(rq);
}
@@ -7241,156 +7214,75 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-/*
- * erts_test_next_pid() is only used for testing.
- */
-Sint
-erts_test_next_pid(int set, Uint next)
+void
+erts_free_proc(Process *p)
{
- Sint res;
- Sint p_prev;
-
-
- erts_smp_mtx_lock(&proc_tab_mtx);
-
- if (!set) {
- res = p_next < 0 ? -1 : (p_serial << p_serial_shift | p_next);
- }
- else {
-
- p_serial = (Sint) ((next >> p_serial_shift) & p_serial_mask);
- p_next = (Sint) (erts_process_tab_index_mask & next);
-
- if (p_next >= erts_max_processes) {
- p_next = 0;
- p_serial++;
- p_serial &= p_serial_mask;
- }
-
- p_prev = p_next;
-
- do {
- if (!process_tab[p_next])
- break;
- p_next++;
- if(p_next >= erts_max_processes) {
- p_next = 0;
- p_serial++;
- p_serial &= p_serial_mask;
- }
- } while (p_prev != p_next);
-
- res = process_tab[p_next] ? -1 : (p_serial << p_serial_shift | p_next);
-
- }
+#ifdef ERTS_SMP
+ erts_proc_lock_fin(p);
+#endif
+ erts_free(ERTS_ALC_T_PROC, (void *) p);
+}
- erts_smp_mtx_unlock(&proc_tab_mtx);
+typedef struct {
+ Process *proc;
+ erts_aint32_t state;
+ ErtsRunQueue *run_queue;
+} ErtsEarlyProcInit;
- return res;
+static void early_init_process_struct(void *varg, Eterm data)
+{
+ ErtsEarlyProcInit *arg = (ErtsEarlyProcInit *) varg;
+ Process *proc = arg->proc;
-}
+ proc->common.id = make_internal_pid(data);
+ erts_smp_atomic32_init_relb(&proc->state, arg->state);
-Uint erts_process_count(void)
-{
- erts_aint32_t res = erts_smp_atomic32_read_nob(&process_count);
- ASSERT(res >= 0);
- return (Uint) res;
-}
+#ifdef ERTS_SMP
+ RUNQ_SET_RQ(&proc->run_queue, arg->run_queue);
-void
-erts_free_proc(Process *p)
-{
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
- erts_lcnt_proc_lock_destroy(p);
+ erts_proc_lock_init(proc); /* All locks locked */
#endif
- erts_free(ERTS_ALC_T_PROC, (void *) p);
-}
+}
/*
** Allocate process and find out where to place next process.
*/
static Process*
-alloc_process(void)
+alloc_process(ErtsRunQueue *rq, erts_aint32_t state)
{
-#ifdef ERTS_SMP
- erts_pix_lock_t *pix_lock;
-#endif
- Process* p;
- int p_prev;
-
- erts_smp_mtx_lock(&proc_tab_mtx);
-
- if (p_next == -1) {
- p = NULL;
- goto error; /* Process table full! */
- }
+ ErtsEarlyProcInit init_arg;
+ Process *p;
- p = (Process*) erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process));
+ p = erts_alloc_fnf(ERTS_ALC_T_PROC, sizeof(Process));
if (!p)
- goto error; /* ENOMEM */
+ return NULL;
- p_last = p_next;
+ init_arg.proc = (Process *) p;
+ init_arg.run_queue = rq;
+ init_arg.state = state;
- erts_get_emu_time(&p->started);
+ ASSERT(((char *) p) == ((char *) &p->common));
-#ifdef ERTS_SMP
- pix_lock = ERTS_PIX2PIXLOCK(p_next);
- erts_pix_lock(pix_lock);
-#endif
- ASSERT(!process_tab[p_next]);
-
- process_tab[p_next] = p;
- erts_smp_atomic32_inc_nob(&process_count);
- p->id = make_internal_pid(p_serial << p_serial_shift | p_next);
- if (p->id == ERTS_INVALID_PID) {
- /* Do not use the invalid pid; change serial */
- p_serial++;
- p_serial &= p_serial_mask;
- p->id = make_internal_pid(p_serial << p_serial_shift | p_next);
- ASSERT(p->id != ERTS_INVALID_PID);
+ if (!erts_ptab_new_element(&erts_proc,
+ &p->common,
+ (void *) &init_arg,
+ early_init_process_struct)) {
+ erts_free(ERTS_ALC_T_PROC, p);
+ return NULL;
}
- ASSERT(internal_pid_serial(p->id) <= (erts_use_r9_pids_ports
- ? ERTS_MAX_PID_R9_SERIAL
- : ERTS_MAX_PID_SERIAL));
-
-#ifdef ERTS_SMP
- erts_proc_lock_init(p); /* All locks locked */
- erts_pix_unlock(pix_lock);
-#endif
- p->rstatus = P_FREE;
+ ASSERT(internal_pid_serial(p->common.id) <= ERTS_MAX_PID_SERIAL);
+
+ p->approx_started = erts_get_approx_time();
p->rcount = 0;
- /*
- * set p_next to the next available slot
- */
-
- p_prev = p_next;
-
- while (1) {
- p_next++;
- if(p_next >= erts_max_processes) {
- p_serial++;
- p_serial &= p_serial_mask;
- p_next = 0;
- }
-
- if (p_prev == p_next) {
- p_next = -1;
- break; /* Table full! */
- }
-
- if (!process_tab[p_next])
- break; /* found a free slot */
- }
-
- error:
- erts_smp_mtx_unlock(&proc_tab_mtx);
+ ASSERT(p == (Process *) (erts_ptab_pix2intptr_nob(
+ &erts_proc,
+ internal_pid_index(p->common.id))));
return p;
-
}
Eterm
@@ -7400,13 +7292,15 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
- ErtsRunQueue *rq, *notify_runq;
+ ErtsRunQueue *rq = NULL;
Process *p;
Sint arity; /* Number of arguments. */
Uint arg_size; /* Size of arguments. */
Uint sz; /* Needed words on heap. */
Uint heap_need; /* Size needed on heap. */
Eterm res = THE_NON_VALUE;
+ erts_aint32_t state = 0;
+ erts_aint32_t prio = (erts_aint32_t) PRIORITY_NORMAL;
#ifdef ERTS_SMP
erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -7420,8 +7314,24 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
so->error_code = BADARG;
goto error;
}
- p = alloc_process(); /* All proc locks are locked by this thread
- on success */
+
+ if (so->flags & SPO_USE_ARGS) {
+ if (so->scheduler) {
+ int ix = so->scheduler-1;
+ ASSERT(0 <= ix && ix < erts_no_run_queues);
+ rq = ERTS_RUNQ_IX(ix);
+ state |= ERTS_PSFLG_BOUND;
+ }
+ prio = (erts_aint32_t) so->priority;
+ }
+
+ state |= (prio & ERTS_PSFLG_PRIO_MASK);
+
+ if (!rq)
+ rq = erts_get_runq_proc(parent);
+
+ p = alloc_process(rq, state); /* All proc locks are locked by this thread
+ on success */
if (!p) {
erts_send_error_to_logger_str(parent->group_leader,
"Too many processes\n");
@@ -7441,22 +7351,16 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->flags = erts_default_process_flags;
- /* Scheduler queue mutex should be locked when changeing
- * prio. In this case we don't have to lock it, since
- * noone except us has access to the process.
- */
if (so->flags & SPO_USE_ARGS) {
p->min_heap_size = so->min_heap_size;
p->min_vheap_size = so->min_vheap_size;
- p->prio = so->priority;
p->max_gen_gcs = so->max_gen_gcs;
} else {
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
- p->prio = PRIORITY_NORMAL;
p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
}
- p->skipped = 0;
+ p->schedule_count = 0;
ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0));
p->initial[INITIAL_MOD] = mod;
@@ -7525,21 +7429,21 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->reds = 0;
#ifdef ERTS_SMP
- p->u.ptimer = NULL;
+ p->common.u.alive.ptimer = NULL;
#else
- sys_memset(&p->u.tm, 0, sizeof(ErlTimer));
+ sys_memset(&p->common.u.alive.tm, 0, sizeof(ErlTimer));
#endif
- p->reg = NULL;
- p->nlinks = NULL;
- p->monitors = NULL;
+ p->common.u.alive.reg = NULL;
+ ERTS_P_LINKS(p) = NULL;
+ ERTS_P_MONITORS(p) = NULL;
p->nodes_monitors = NULL;
p->suspend_monitors = NULL;
ASSERT(is_pid(parent->group_leader));
if (parent->group_leader == ERTS_INVALID_PID)
- p->group_leader = p->id;
+ p->group_leader = p->common.id;
else {
/* Needs to be done after the heap has been set up */
p->group_leader =
@@ -7548,7 +7452,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
: STORE_NC(&p->htop, &p->off_heap, parent->group_leader);
}
- erts_get_default_tracing(&p->trace_flags, &p->tracer_proc);
+ erts_get_default_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER_PROC(p));
p->msg.first = NULL;
p->msg.last = &p->msg.first;
@@ -7558,9 +7462,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->msg_inq.first = NULL;
p->msg_inq.last = &p->msg_inq.first;
p->msg_inq.len = 0;
- p->bound_runq = NULL;
#endif
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
p->mbuf = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
@@ -7572,7 +7475,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
DT_UTAG(p) = NIL;
DT_UTAG_FLAGS(p) = 0;
#endif
- p->parent = parent->id == ERTS_INVALID_PID ? NIL : parent->id;
+ p->parent = (parent->common.id == ERTS_INVALID_PID
+ ? NIL
+ : parent->common.id);
INIT_HOLE_CHECK(p);
#ifdef DEBUG
@@ -7580,18 +7485,19 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#endif
if (IS_TRACED(parent)) {
- if (parent->trace_flags & F_TRACE_SOS) {
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc;
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
}
if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) {
- trace_proc_spawn(parent, p->id, mod, func, args);
+ trace_proc_spawn(parent, p->common.id, mod, func, args);
}
- if (parent->trace_flags & F_TRACE_SOS1) { /* Overrides TRACE_CHILDREN */
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc;
- p->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
- parent->trace_flags &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOS1) {
+ /* Overrides TRACE_CHILDREN */
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent) & TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent);
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOS1 | F_TRACE_SOS);
}
}
@@ -7604,27 +7510,27 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
int ret;
#endif
if (IS_TRACED_FL(parent, F_TRACE_PROCS)) {
- trace_proc(parent, parent, am_link, p->id);
+ trace_proc(parent, parent, am_link, p->common.id);
}
#ifdef DEBUG
- ret = erts_add_link(&(parent->nlinks), LINK_PID, p->id);
+ ret = erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
ASSERT(ret == 0);
- ret = erts_add_link(&(p->nlinks), LINK_PID, parent->id);
+ ret = erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
ASSERT(ret == 0);
#else
- erts_add_link(&(parent->nlinks), LINK_PID, p->id);
- erts_add_link(&(p->nlinks), LINK_PID, parent->id);
+ erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
+ erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
#endif
if (IS_TRACED(parent)) {
- if (parent->trace_flags & (F_TRACE_SOL|F_TRACE_SOL1)) {
- p->trace_flags |= (parent->trace_flags & TRACEE_FLAGS);
- p->tracer_proc = parent->tracer_proc; /* maybe steal */
+ if (ERTS_TRACE_FLAGS(parent) & (F_TRACE_SOL|F_TRACE_SOL1)) {
+ ERTS_TRACE_FLAGS(p) |= (ERTS_TRACE_FLAGS(parent)&TRACEE_FLAGS);
+ ERTS_TRACER_PROC(p) = ERTS_TRACER_PROC(parent); /*maybe steal*/
- if (parent->trace_flags & F_TRACE_SOL1) { /* maybe override */
- p ->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
- parent->trace_flags &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ if (ERTS_TRACE_FLAGS(parent) & F_TRACE_SOL1) {/*maybe override*/
+ ERTS_TRACE_FLAGS(p) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
+ ERTS_TRACE_FLAGS(parent) &= ~(F_TRACE_SOL1 | F_TRACE_SOL);
}
}
}
@@ -7637,16 +7543,13 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm mref;
mref = erts_make_ref(parent);
- erts_add_monitor(&(parent->monitors), MON_ORIGIN, mref, p->id, NIL);
- erts_add_monitor(&(p->monitors), MON_TARGET, mref, parent->id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(parent), MON_ORIGIN, mref, p->common.id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(p), MON_TARGET, mref, parent->common.id, NIL);
so->mref = mref;
}
#ifdef ERTS_SMP
p->scheduler_data = NULL;
- p->is_exiting = 0;
- p->status_flags = 0;
- p->runq_flags = 0;
p->suspendee = NIL;
p->pending_suspenders = NULL;
p->pending_exit.reason = THE_NON_VALUE;
@@ -7657,36 +7560,17 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->fp_exception = 0;
#endif
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+
+ res = p->common.id;
+
/*
* Schedule process for execution.
*/
- if (!((so->flags & SPO_USE_ARGS) && so->scheduler))
- rq = erts_get_runq_proc(parent);
- else {
- int ix = so->scheduler-1;
- ASSERT(0 <= ix && ix < erts_no_run_queues);
- rq = ERTS_RUNQ_IX(ix);
- p->bound_runq = rq;
- }
-
- erts_smp_runq_lock(rq);
-
-#ifdef ERTS_SMP
- p->run_queue = rq;
-#endif
-
- p->status = P_WAITING;
- notify_runq = internal_add_to_runq(rq, p);
-
- erts_smp_runq_unlock(rq);
-
- smp_notify_inc_runq(notify_runq);
+ schedule_process(p, state, 0);
- res = p->id;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
-
- VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->id));
+ VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id));
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_spawn)) {
@@ -7720,15 +7604,11 @@ void erts_init_empty_process(Process *p)
p->max_gen_gcs = 0;
p->min_heap_size = 0;
p->min_vheap_size = 0;
- p->status = P_RUNABLE;
- p->gcstatus = P_RUNABLE;
- p->rstatus = P_RUNABLE;
p->rcount = 0;
- p->id = ERTS_INVALID_PID;
- p->prio = PRIORITY_NORMAL;
+ p->common.id = ERTS_INVALID_PID;
p->reds = 0;
- p->tracer_proc = NIL;
- p->trace_flags = F_INITIAL_TRACE_FLAGS;
+ ERTS_TRACER_PROC(p) = NIL;
+ ERTS_TRACE_FLAGS(p) = F_INITIAL_TRACE_FLAGS;
p->group_leader = ERTS_INVALID_PID;
p->flags = 0;
p->fvalue = NIL;
@@ -7741,15 +7621,14 @@ void erts_init_empty_process(Process *p)
p->bin_old_vheap = 0;
p->bin_vheap_mature = 0;
#ifdef ERTS_SMP
- p->u.ptimer = NULL;
- p->bound_runq = NULL;
+ p->common.u.alive.ptimer = NULL;
#else
- memset(&(p->u.tm), 0, sizeof(ErlTimer));
+ memset(&(p->common.u.alive.tm), 0, sizeof(ErlTimer));
#endif
p->next = NULL;
p->off_heap.first = NULL;
p->off_heap.overhead = 0;
- p->reg = NULL;
+ p->common.u.alive.reg = NULL;
p->heap_sz = 0;
p->high_water = NULL;
p->old_hend = NULL;
@@ -7758,15 +7637,15 @@ void erts_init_empty_process(Process *p)
p->mbuf = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
- p->monitors = NULL;
- p->nlinks = NULL; /* List of links */
+ ERTS_P_MONITORS(p) = NULL;
+ ERTS_P_LINKS(p) = NULL; /* List of links */
p->nodes_monitors = NULL;
p->suspend_monitors = NULL;
p->msg.first = NULL;
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
p->msg.len = 0;
- p->bif_timers = NULL;
+ p->u.bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
p->seq_trace_lastcnt = 0;
@@ -7793,8 +7672,8 @@ void erts_init_empty_process(Process *p)
p->def_arg_reg[5] = 0;
p->parent = NIL;
- p->started.tv_sec = 0;
- p->started.tv_usec = 0;
+ p->approx_started = 0;
+ p->common.u.alive.started_interval = 0;
#ifdef HIPE
hipe_init_process(&p->hipe);
@@ -7808,12 +7687,10 @@ void erts_init_empty_process(Process *p)
p->last_old_htop = NULL;
#endif
+ erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
#ifdef ERTS_SMP
p->scheduler_data = NULL;
- p->is_exiting = 0;
- p->status_flags = 0;
- p->runq_flags = 0;
p->msg_inq.first = NULL;
p->msg_inq.last = &p->msg_inq.first;
p->msg_inq.len = 0;
@@ -7823,7 +7700,7 @@ void erts_init_empty_process(Process *p)
p->pending_exit.bp = NULL;
erts_proc_lock_init(p);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
- p->run_queue = ERTS_RUNQ_IX(0);
+ RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
#endif
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
@@ -7842,25 +7719,25 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->stop == NULL);
ASSERT(p->hend == NULL);
ASSERT(p->heap == NULL);
- ASSERT(p->id == ERTS_INVALID_PID);
- ASSERT(p->tracer_proc == NIL);
- ASSERT(p->trace_flags == F_INITIAL_TRACE_FLAGS);
+ ASSERT(p->common.id == ERTS_INVALID_PID);
+ ASSERT(ERTS_TRACER_PROC(p) == NIL);
+ ASSERT(ERTS_TRACE_FLAGS(p) == F_INITIAL_TRACE_FLAGS);
ASSERT(p->group_leader == ERTS_INVALID_PID);
ASSERT(p->next == NULL);
- ASSERT(p->reg == NULL);
+ ASSERT(p->common.u.alive.reg == NULL);
ASSERT(p->heap_sz == 0);
ASSERT(p->high_water == NULL);
ASSERT(p->old_hend == NULL);
ASSERT(p->old_htop == NULL);
ASSERT(p->old_heap == NULL);
- ASSERT(p->monitors == NULL);
- ASSERT(p->nlinks == NULL);
+ ASSERT(ERTS_P_MONITORS(p) == NULL);
+ ASSERT(ERTS_P_LINKS(p) == NULL);
ASSERT(p->nodes_monitors == NULL);
ASSERT(p->suspend_monitors == NULL);
ASSERT(p->msg.first == NULL);
ASSERT(p->msg.len == 0);
- ASSERT(p->bif_timers == NULL);
+ ASSERT(p->u.bif_timers == NULL);
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
ASSERT(p->cp == NULL);
@@ -7901,8 +7778,8 @@ erts_cleanup_empty_process(Process* p)
free_message_buffer(p->mbuf);
p->mbuf = NULL;
}
-#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
- erts_lcnt_proc_lock_destroy(p);
+#ifdef ERTS_SMP
+ erts_proc_lock_fin(p);
#endif
#ifdef DEBUG
erts_debug_verify_clean_empty_process(p);
@@ -7917,7 +7794,7 @@ delete_process(Process* p)
{
ErlMessage* mp;
- VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->id));
+ VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
/* Cleanup psd */
@@ -7991,34 +7868,40 @@ delete_process(Process* p)
mp = next_mp;
}
- ASSERT(!p->monitors);
- ASSERT(!p->nlinks);
ASSERT(!p->nodes_monitors);
ASSERT(!p->suspend_monitors);
p->fvalue = NIL;
}
+static ERTS_INLINE erts_aint32_t
+set_proc_exiting_state(Process *p, erts_aint32_t state)
+{
+ erts_aint32_t a, n, e;
+ a = state;
+ while (1) {
+ n = e = a;
+ n &= ~(ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
+ n |= ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE;
+ if (!(a & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING)))
+ n |= ERTS_PSFLG_IN_RUNQ;
+ a = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e);
+ if (a == e)
+ break;
+ }
+ return a;
+}
+
static ERTS_INLINE void
-set_proc_exiting(Process *p, Eterm reason, ErlHeapFragment *bp)
+set_proc_exiting(Process *p,
+ erts_aint32_t state,
+ Eterm reason,
+ ErlHeapFragment *bp)
{
-#ifdef ERTS_SMP
- erts_pix_lock_t *pix_lock = ERTS_PID2PIXLOCK(p->id);
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
- /*
- * You are required to have all proc locks and the pix lock when going
- * to status P_EXITING. This makes it is enough to take any lock when
- * looking up a process (pid2proc()) to prevent the looked up process
- * from exiting until the lock has been released.
- */
- erts_pix_lock(pix_lock);
- p->is_exiting = 1;
-#endif
- p->status = P_EXITING;
-#ifdef ERTS_SMP
- erts_pix_unlock(pix_lock);
-#endif
+ state = set_proc_exiting_state(p, state);
+
p->fvalue = reason;
if (bp)
erts_link_mbuf_to_proc(p, bp);
@@ -8031,6 +7914,14 @@ set_proc_exiting(Process *p, Eterm reason, ErlHeapFragment *bp)
KILL_CATCHES(p);
cancel_timer(p);
p->i = (BeamInstr *) beam_exit;
+
+ if (erts_system_profile_flags.runnable_procs
+ && !(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) {
+ profile_runnable_proc(p, am_active);
+ }
+
+ if (!(state & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING)))
+ add2runq(p, state);
}
@@ -8043,8 +7934,8 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
ASSERT(is_value(c_p->pending_exit.reason));
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks);
ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_LC_ASSERT(c_p->status != P_EXITING);
- ERTS_SMP_LC_ASSERT(c_p->status != P_FREE);
+ ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)
+ & erts_smp_atomic32_read_nob(&c_p->state)));
/* Ensure that all locks on c_p are locked before proceeding... */
if (locks == ERTS_PROC_LOCKS_ALL)
@@ -8057,7 +7948,10 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
}
}
- set_proc_exiting(c_p, c_p->pending_exit.reason, c_p->pending_exit.bp);
+ set_proc_exiting(c_p,
+ erts_smp_atomic32_read_acqb(&c_p->state),
+ c_p->pending_exit.reason,
+ c_p->pending_exit.bp);
c_p->pending_exit.reason = THE_NON_VALUE;
c_p->pending_exit.bp = NULL;
@@ -8068,16 +7962,19 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
static void
handle_pending_exiters(ErtsProcList *pnd_xtrs)
{
+ /* 'list' is expected to have been fetched (i.e. not a ring anymore) */
ErtsProcList *plp = pnd_xtrs;
- ErtsProcList *free_plp;
+
while (plp) {
+ ErtsProcList *free_plp;
Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL);
if (p) {
- if (proclist_same(plp, p)
- && !(p->status_flags & ERTS_PROC_SFLG_RUNNING)) {
- ASSERT(p->status_flags & ERTS_PROC_SFLG_INRUNQ);
- ASSERT(ERTS_PROC_PENDING_EXIT(p));
- erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ if (erts_proclist_same(plp, p)) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ if (!(state & ERTS_PSFLG_RUNNING)) {
+ ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
+ erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ }
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
}
@@ -8101,11 +7998,10 @@ save_pending_exiter(Process *p)
erts_smp_runq_lock(rq);
- plp->next = rq->procs.pending_exiters;
- rq->procs.pending_exiters = plp;
+ erts_proclist_store_last(&rq->procs.pending_exiters, plp);
erts_smp_runq_unlock(rq);
-
+ wake_scheduler(rq, 1);
}
#endif
@@ -8149,7 +8045,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
hp = bp->mem;
mess = copy_struct(exit_term, term_size, &hp, &bp->off_heap);
/* the trace token must in this case be updated by the caller */
- seq_trace_output(token, mess, SEQ_TRACE_SEND, to->id, NULL);
+ seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL);
temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap);
erts_queue_message(to, to_locksp, bp, mess, temp_token
#ifdef USE_VM_PROBES
@@ -8167,7 +8063,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
* SMP emulator). When the signal is received the receiver receives an
* 'EXIT' message if it is trapping exits; otherwise, it will either
* ignore the signal if the exit reason is normal, or go into an
- * exiting state (status P_EXITING). When a process has gone into the
+ * exiting state (ERTS_PSFLG_EXITING). When a process has gone into the
* exiting state it will not execute any more Erlang code, but it might
* take a while before it actually exits. The exit signal is being
* received when the 'EXIT' message is put in the message queue, the
@@ -8240,6 +8136,7 @@ send_exit_signal(Process *c_p, /* current process if and only
Uint32 flags /* flags */
)
{
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
Eterm rsn = reason == am_kill ? am_killed : reason;
ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp));
@@ -8261,7 +8158,7 @@ send_exit_signal(Process *c_p, /* current process if and only
}
#endif
- if (ERTS_PROC_IS_TRAPPING_EXITS(rp)
+ if ((state & ERTS_PSFLG_TRAP_EXIT)
&& (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
if (is_not_nil(token)
#ifdef USE_VM_PROBES
@@ -8277,9 +8174,7 @@ send_exit_signal(Process *c_p, /* current process if and only
}
else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
#ifdef ERTS_SMP
- if (!ERTS_PROC_PENDING_EXIT(rp) && !rp->is_exiting) {
- ASSERT(rp->status != P_EXITING);
- ASSERT(rp->status != P_FREE);
+ if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) {
ASSERT(!rp->pending_exit.bp);
if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) {
@@ -8295,9 +8190,9 @@ send_exit_signal(Process *c_p, /* current process if and only
}
*rp_locks = ERTS_PROC_LOCKS_ALL;
}
- set_proc_exiting(c_p, rsn, NULL);
+ set_proc_exiting(c_p, state, rsn, NULL);
}
- else if (!(rp->status_flags & ERTS_PROC_SFLG_RUNNING)) {
+ else if (!(state & ERTS_PSFLG_RUNNING)) {
/* Process not running ... */
ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
if (need_locks
@@ -8314,6 +8209,7 @@ send_exit_signal(Process *c_p, /* current process if and only
/* ...and we have all locks on it... */
*rp_locks = ERTS_PROC_LOCKS_ALL;
set_proc_exiting(rp,
+ state,
(is_immed(rsn)
? rsn
: copy_object(rsn, rp)),
@@ -8343,11 +8239,9 @@ send_exit_signal(Process *c_p, /* current process if and only
&bp->off_heap);
rp->pending_exit.bp = bp;
}
- ASSERT(ERTS_PROC_PENDING_EXIT(rp));
+ erts_smp_atomic32_read_bor_relb(&rp->state,
+ ERTS_PSFLG_PENDING_EXIT);
}
- if (!(rp->status_flags
- & (ERTS_PROC_SFLG_INRUNQ|ERTS_PROC_SFLG_RUNNING)))
- erts_add_to_runq(rp);
}
/* else:
*
@@ -8359,17 +8253,14 @@ send_exit_signal(Process *c_p, /* current process if and only
* exit or by itself before seeing the pending exit.
*/
#else /* !ERTS_SMP */
- if (c_p == rp) {
- rp->status = P_EXITING;
- c_p->fvalue = rsn;
- }
- else if (rp->status != P_EXITING) { /* No recursive process exits /PaN */
- Eterm old_status = rp->status;
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
+ if (!(state & ERTS_PSFLG_EXITING)) {
set_proc_exiting(rp,
- is_immed(rsn) ? rsn : copy_object(rsn, rp),
+ state,
+ (is_immed(rsn) || c_p == rp
+ ? rsn
+ : copy_object(rsn, rp)),
NULL);
- if (old_status != P_RUNABLE && old_status != P_RUNNING)
- erts_add_to_runq(rp);
}
#endif
return -1; /* Receiver will exit */
@@ -8445,7 +8336,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
if (!rp) {
goto done;
}
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon == NULL) {
goto done;
@@ -8480,7 +8371,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
ASSERT(mon->type == MON_TARGET);
ASSERT(is_pid(mon->pid) || is_internal_port(mon->pid));
if (is_internal_port(mon->pid)) {
- Port *prt = erts_id2port(mon->pid, NULL, 0);
+ Port *prt = erts_id2port(mon->pid);
if (prt == NULL) {
goto done;
}
@@ -8496,13 +8387,13 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
goto done;
}
UseTmpHeapNoproc(3);
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
if (rmon) {
erts_destroy_monitor(rmon);
watched = (is_atom(mon->name)
? TUPLE2(lhp, mon->name,
erts_this_dist_entry->sysname)
- : pcontext->p->id);
+ : pcontext->p->common.id);
erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
watched, pcontext->reason);
}
@@ -8567,21 +8458,22 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
switch(lnk->type) {
case LINK_PID:
if(is_internal_port(item)) {
- Port *prt = erts_id2port(item, NULL, 0);
- if (prt) {
- rlnk = erts_remove_link(&prt->nlinks, p->id);
- if (rlnk)
- erts_destroy_link(rlnk);
- erts_do_exit_port(prt, p->id, reason);
- erts_port_release(prt);
- }
+ Port *prt = erts_port_lookup(item, ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (prt)
+ erts_port_exit(NULL,
+ (ERTS_PORT_SIG_FLG_FORCE_SCHED
+ | ERTS_PORT_SIG_FLG_BROKEN_LINK),
+ prt,
+ p->common.id,
+ reason,
+ NULL);
}
else if(is_external_port(item)) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Erroneous link between %T and external port %T "
"found\n",
- p->id,
+ p->common.id,
item);
erts_send_error_to_logger_nogl(dsbufp);
ASSERT(0); /* It isn't possible to setup such a link... */
@@ -8591,14 +8483,14 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
| ERTS_PROC_LOCKS_XSIG_SEND);
rp = erts_pid2proc(NULL, 0, item, rp_locks);
if (rp) {
- rlnk = erts_remove_link(&(rp->nlinks), p->id);
+ rlnk = erts_remove_link(&ERTS_P_LINKS(rp), p->common.id);
/* If rlnk == NULL, we got unlinked while exiting,
i.e., do nothing... */
if (rlnk) {
int xres;
erts_destroy_link(rlnk);
xres = send_exit_signal(NULL,
- p->id,
+ p->common.id,
rp,
&rp_locks,
reason,
@@ -8610,7 +8502,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
/* We didn't exit the process and it is traced */
if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- trace_proc(p, rp, am_getting_unlinked, p->id);
+ trace_proc(p, rp, am_getting_unlinked, p->common.id);
}
}
}
@@ -8624,12 +8516,12 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
ErtsDSigData dsd;
int code;
ErtsDistLinkData dld;
- erts_remove_dist_link(&dld, p->id, item, dep);
+ erts_remove_dist_link(&dld, p->common.id, item, dep);
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0);
if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_exit_tt(&dsd, p->id, item, reason,
- SEQ_TRACE_TOKEN(p));
+ code = erts_dsig_send_exit_tt(&dsd, p->common.id, item,
+ reason, SEQ_TRACE_TOKEN(p));
ASSERT(code == ERTS_DSIG_SEND_OK);
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
@@ -8644,7 +8536,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
/* dist entries have node links in a separate structure to
avoid confusion */
erts_smp_de_links_lock(dep);
- rlnk = erts_remove_link(&(dep->node_links), p->id);
+ rlnk = erts_remove_link(&(dep->node_links), p->common.id);
erts_smp_de_links_unlock(dep);
if (rlnk)
erts_destroy_link(rlnk);
@@ -8672,22 +8564,14 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
erts_destroy_suspend_monitor(smon);
}
-static void
-continue_exit_process(Process *p
-#ifdef ERTS_SMP
- , erts_pix_lock_t *pix_lock
-#endif
- );
-
/* this function fishishes a process and propagates exit messages - called
by process_main when a process dies */
void
erts_do_exit_process(Process* p, Eterm reason)
{
#ifdef ERTS_SMP
- erts_pix_lock_t *pix_lock = ERTS_PID2PIXLOCK(p->id);
+ erts_aint32_t state;
#endif
-
p->arity = 0; /* No live registers */
p->fvalue = reason;
@@ -8705,27 +8589,17 @@ erts_do_exit_process(Process* p, Eterm reason)
#ifdef ERTS_SMP
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* By locking all locks (main lock is already locked) when going
- to status P_EXITING, it is enough to take any lock when
+ to exiting state (ERTS_PSFLG_EXITING), it is enough to take any lock when
looking up a process (erts_pid2proc()) to prevent the looked up
process from exiting until the lock has been released. */
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
-
- if (erts_system_profile_flags.runnable_procs && (p->status != P_WAITING)) {
- profile_runnable_proc(p, am_inactive);
- }
-
-#ifdef ERTS_SMP
- erts_pix_lock(pix_lock);
- p->is_exiting = 1;
-#endif
-
- p->status = P_EXITING;
-#ifdef ERTS_SMP
- erts_pix_unlock(pix_lock);
-
- if (ERTS_PROC_PENDING_EXIT(p)) {
+#ifndef ERTS_SMP
+ set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state));
+#else
+ state = set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state));
+ if (state & ERTS_PSFLG_PENDING_EXIT) {
/* Process exited before pending exit was received... */
p->pending_exit.reason = THE_NON_VALUE;
if (p->pending_exit.bp) {
@@ -8747,46 +8621,30 @@ erts_do_exit_process(Process* p, Eterm reason)
trace_proc(p, p, am_exit, reason);
}
- erts_trace_check_exiting(p->id);
+ erts_trace_check_exiting(p->common.id);
- ASSERT((p->trace_flags & F_INITIAL_TRACE_FLAGS) == F_INITIAL_TRACE_FLAGS);
+ ASSERT((ERTS_TRACE_FLAGS(p) & F_INITIAL_TRACE_FLAGS)
+ == F_INITIAL_TRACE_FLAGS);
cancel_timer(p); /* Always cancel timer just in case */
- /*
- * The timer of this process can *not* be used anymore. The field used
- * for the timer is now used for misc exiting data.
- */
- p->u.exit_data = NULL;
-
- if (p->bif_timers)
+ if (p->u.bif_timers)
erts_cancel_bif_timers(p, ERTS_PROC_LOCKS_ALL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#ifdef ERTS_SMP
- continue_exit_process(p, pix_lock);
-#else
- continue_exit_process(p);
-#endif
-}
+ /*
+ * The p->u.bif_timers of this process can *not* be used anymore;
+ * will be overwritten by misc termination data.
+ */
+ p->u.terminate = NULL;
-void
-erts_continue_exit_process(Process *c_p)
-{
-#ifdef ERTS_SMP
- continue_exit_process(c_p, ERTS_PID2PIXLOCK(c_p->id));
-#else
- continue_exit_process(c_p);
-#endif
+
+ erts_continue_exit_process(p);
}
-static void
-continue_exit_process(Process *p
-#ifdef ERTS_SMP
- , erts_pix_lock_t *pix_lock
-#endif
- )
+void
+erts_continue_exit_process(Process *p)
{
ErtsLink* lnk;
ErtsMonitor *mon;
@@ -8802,11 +8660,7 @@ continue_exit_process(Process *p
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
-#ifdef DEBUG
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- ASSERT(p->status == P_EXITING);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
-#endif
+ ASSERT(ERTS_PROC_IS_EXITING(p));
#ifdef ERTS_SMP
if (p->flags & F_HAVE_BLCKD_MSCHED) {
@@ -8857,9 +8711,9 @@ continue_exit_process(Process *p
* The registered name *should* be the last "erlang resource" to
* cleanup.
*/
- if (p->reg) {
+ if (p->common.u.alive.reg) {
(void) erts_unregister_name(p, ERTS_PROC_LOCK_MAIN, NULL, THE_NON_VALUE);
- ASSERT(!p->reg);
+ ASSERT(!p->common.u.alive.reg);
}
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -8873,49 +8727,33 @@ continue_exit_process(Process *p
yield_allowed = 0;
#endif
+ /*
+ * Note! The monitor and link fields will be overwritten
+ * by erts_ptab_delete_element() below.
+ */
+ mon = ERTS_P_MONITORS(p);
+ lnk = ERTS_P_LINKS(p);
+
{
- int pix;
/* Do *not* use erts_get_runq_proc() */
ErtsRunQueue *rq;
rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p));
- ASSERT(internal_pid_index(p->id) < erts_max_processes);
- pix = internal_pid_index(p->id);
-
- erts_smp_mtx_lock(&proc_tab_mtx);
erts_smp_runq_lock(rq);
#ifdef ERTS_SMP
- erts_pix_lock(pix_lock);
-
ASSERT(p->scheduler_data);
ASSERT(p->scheduler_data->current_process == p);
ASSERT(p->scheduler_data->free_process == NULL);
p->scheduler_data->current_process = NULL;
p->scheduler_data->free_process = p;
- p->status_flags = 0;
#endif
- process_tab[pix] = NULL; /* Time of death! */
- ASSERT(erts_smp_atomic32_read_nob(&process_count) > 0);
- erts_smp_atomic32_dec_nob(&process_count);
-#ifdef ERTS_SMP
- erts_pix_unlock(pix_lock);
-#endif
- erts_smp_runq_unlock(rq);
+ /* Time of death! */
+ erts_ptab_delete_element(&erts_proc, &p->common);
- if (p_next < 0) {
- if (p_last >= p_next) {
- p_serial++;
- p_serial &= p_serial_mask;
- }
- p_next = pix;
- }
-
- ERTS_MAYBE_SAVE_TERMINATING_PROCESS(p);
-
- erts_smp_mtx_unlock(&proc_tab_mtx);
+ erts_smp_runq_unlock(rq);
}
/*
@@ -8925,12 +8763,20 @@ continue_exit_process(Process *p
* when the monitors and/or links hit.
*/
- mon = p->monitors;
- p->monitors = NULL; /* to avoid recursive deletion during traversal */
+ {
+ /* Inactivate and notify free */
+ erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state);
+ while (1) {
+ n = e = a;
+ ASSERT(a & ERTS_PSFLG_EXITING);
+ n |= ERTS_PSFLG_FREE;
+ n &= ~ERTS_PSFLG_ACTIVE;
+ a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e)
+ break;
+ }
+ }
- lnk = p->nlinks;
- p->nlinks = NULL;
- p->status = P_FREE;
dep = ((p->flags & F_DISTRIBUTION)
? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL)
: NULL);
@@ -8960,7 +8806,7 @@ continue_exit_process(Process *p
UseTmpHeap(4,p);
hp = &tmp_heap[0];
- exit_tuple = TUPLE3(hp, am_EXIT, p->id, reason);
+ exit_tuple = TUPLE3(hp, am_EXIT, p->common.id, reason);
exit_tuple_sz = size_object(exit_tuple);
@@ -8985,8 +8831,10 @@ continue_exit_process(Process *p
delete_process(p);
+#ifdef ERTS_SMP
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+#endif
return;
@@ -8999,8 +8847,6 @@ continue_exit_process(Process *p
ERTS_SMP_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p));
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks);
- ASSERT(p->status == P_EXITING);
-
p->i = (BeamInstr *) beam_continue_exit;
if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) {
@@ -9008,8 +8854,6 @@ continue_exit_process(Process *p
curr_locks |= ERTS_PROC_LOCK_STATUS;
}
- erts_add_to_runq(p);
-
if (curr_locks != ERTS_PROC_LOCK_MAIN)
erts_smp_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks);
@@ -9021,33 +8865,15 @@ continue_exit_process(Process *p
static void
timeout_proc(Process* p)
{
+ erts_aint32_t state;
BeamInstr** pi = (BeamInstr **) p->def_arg_reg;
p->i = *pi;
p->flags |= F_TIMO;
p->flags &= ~F_INSLPQUEUE;
- switch (p->status) {
- case P_GARBING:
- switch (p->gcstatus) {
- case P_SUSPENDED:
- goto suspended;
- case P_WAITING:
- goto waiting;
- default:
- break;
- }
- break;
- case P_WAITING:
- waiting:
- erts_add_to_runq(p);
- break;
- case P_SUSPENDED:
- suspended:
- p->rstatus = P_RUNABLE; /* MUST set resume status to runnable */
- break;
- default:
- break;
- }
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (!(state & ERTS_PSFLG_ACTIVE))
+ schedule_process(p, state, 0);
}
@@ -9057,9 +8883,9 @@ cancel_timer(Process* p)
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));
p->flags &= ~(F_INSLPQUEUE|F_TIMO);
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(p->u.ptimer);
+ erts_cancel_smp_ptimer(p->common.u.alive.ptimer);
#else
- erts_cancel_timer(&p->u.tm);
+ erts_cancel_timer(&p->common.u.alive.tm);
#endif
}
@@ -9080,12 +8906,12 @@ set_timer(Process* p, Uint timeout)
p->flags &= ~F_TIMO;
#ifdef ERTS_SMP
- erts_create_smp_ptimer(&p->u.ptimer,
- p->id,
+ erts_create_smp_ptimer(&p->common.u.alive.ptimer,
+ p->common.id,
(ErlTimeoutProc) timeout_proc,
timeout);
#else
- erts_set_timer(&p->u.tm,
+ erts_set_timer(&p->common.u.alive.tm,
(ErlTimeoutProc) timeout_proc,
NULL,
(void*) p,
@@ -9103,7 +8929,7 @@ erts_stack_dump(int to, void *to_arg, Process *p)
Eterm* sp;
int yreg = -1;
- if (p->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
return;
}
erts_program_counter_info(to, to_arg, p);
@@ -9115,6 +8941,7 @@ erts_stack_dump(int to, void *to_arg, Process *p)
void
erts_program_counter_info(int to, void *to_arg, Process *p)
{
+ erts_aint32_t state;
int i;
erts_print(to, to_arg, "Program counter: %p (", p->i);
@@ -9123,7 +8950,8 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
erts_print(to, to_arg, "CP: %p (", p->cp);
print_function_from_pc(to, to_arg, p->cp);
erts_print(to, to_arg, ")\n");
- if (!((p->status == P_RUNNING) || (p->status == P_GARBING))) {
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_GC))) {
erts_print(to, to_arg, "arity = %d\n",p->arity);
if (!ERTS_IS_CRASH_DUMPING) {
/*
@@ -9169,7 +8997,7 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
erts_print(to, to_arg, "\n%p ", sp);
} else {
char sbuf[16];
- sprintf(sbuf, "y(%d)", yreg);
+ erts_snprintf(sbuf, sizeof(sbuf), "y(%d)", yreg);
erts_print(to, to_arg, "%-8s ", sbuf);
yreg++;
}
@@ -9189,1085 +9017,6 @@ stack_element_dump(int to, void *to_arg, Process* p, Eterm* sp, int yreg)
return yreg;
}
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
- * The processes/0 BIF implementation. *
-\* */
-
-
-#define ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED 25
-#define ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE 1000
-#define ERTS_PROCESSES_BIF_MIN_START_REDS \
- (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \
- / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED)
-
-#define ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS 1
-
-#define ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED 10
-
-#define ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS \
- (ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE \
- / ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED)
-
-
-#define ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED 75
-
-#define ERTS_PROCS_DBG_DO_TRACE 0
-
-#ifdef DEBUG
-# define ERTS_PROCESSES_BIF_DEBUGLEVEL 100
-#else
-# define ERTS_PROCESSES_BIF_DEBUGLEVEL 0
-#endif
-
-#define ERTS_PROCS_DBGLVL_CHK_HALLOC 1
-#define ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS 5
-#define ERTS_PROCS_DBGLVL_CHK_PIDS 10
-#define ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST 20
-#define ERTS_PROCS_DBGLVL_CHK_RESLIST 20
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0
-# define ERTS_PROCS_ASSERT(EXP)
-#else
-# define ERTS_PROCS_ASSERT(EXP) \
- ((void) ((EXP) \
- ? 1 \
- : (debug_processes_assert_error(#EXP, __FILE__, __LINE__), 0)))
-#endif
-
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC
-# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ) \
-do { \
- ERTS_PROCS_ASSERT(!(PBDP)->debug.heap); \
- ERTS_PROCS_ASSERT(!(PBDP)->debug.heap_size); \
- (PBDP)->debug.heap = (HP); \
- (PBDP)->debug.heap_size = (SZ); \
-} while (0)
-# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP) \
-do { \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap); \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap_size); \
- ERTS_PROCS_ASSERT((PBDP)->debug.heap + (PBDP)->debug.heap_size == (HP));\
- (PBDP)->debug.heap = NULL; \
- (PBDP)->debug.heap_size = 0; \
-} while (0)
-# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP) \
-do { \
- (PBDP)->debug.heap = NULL; \
- (PBDP)->debug.heap_size = 0; \
-} while (0)
-#else
-# define ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(PBDP, HP, SZ)
-# define ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(PBDP, HP)
-# define ERTS_PROCS_DBG_HEAP_ALLOC_INIT(PBDP)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-# define ERTS_PROCS_DBG_CHK_RESLIST(R) debug_processes_check_res_list((R))
-#else
-# define ERTS_PROCS_DBG_CHK_RESLIST(R)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP) debug_processes_save_all_pids((PBDP))
-# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP) \
-do { \
- if (!(PBDP)->debug.correct_pids_verified) \
- debug_processes_verify_all_pids((PBDP)); \
-} while (0)
-# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP) \
-do { \
- if ((PBDP)->debug.correct_pids) { \
- erts_free(ERTS_ALC_T_PROCS_PIDS, \
- (PBDP)->debug.correct_pids); \
- (PBDP)->debug.correct_pids = NULL; \
- } \
-} while(0)
-# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP) \
-do { \
- (PBDP)->debug.correct_pids_verified = 0; \
- (PBDP)->debug.correct_pids = NULL; \
-} while (0)
-#else
-# define ERTS_PROCS_DBG_SAVE_PIDS(PBDP)
-# define ERTS_PROCS_DBG_VERIFY_PIDS(PBDP)
-# define ERTS_PROCS_DBG_CLEANUP_CHK_PIDS(PBDP)
-# define ERTS_PROCS_DBG_CHK_PIDS_INIT(PBDP)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, TVP) \
- debug_processes_check_found_pid((PBDP), (PID), (TVP), 1)
-# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, TVP) \
- debug_processes_check_found_pid((PBDP), (PID), (TVP), 0)
-#else
-# define ERTS_PROCS_DBG_CHK_PID_FOUND(PBDP, PID, TVP)
-# define ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(PBDP, PID, TVP)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-# define ERTS_PROCS_DBG_CHK_TPLIST() \
- debug_processes_check_term_proc_list()
-# define ERTS_PROCS_DBG_CHK_FREELIST(FL) \
- debug_processes_check_term_proc_free_list(FL)
-#else
-# define ERTS_PROCS_DBG_CHK_TPLIST()
-# define ERTS_PROCS_DBG_CHK_FREELIST(FL)
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL == 0
-#if ERTS_PROCS_DBG_DO_TRACE
-# define ERTS_PROCS_DBG_INIT(P, PBDP) (PBDP)->debug.caller = (P)->id
-# else
-# define ERTS_PROCS_DBG_INIT(P, PBDP)
-# endif
-# define ERTS_PROCS_DBG_CLEANUP(PBDP)
-#else
-# define ERTS_PROCS_DBG_INIT(P, PBDP) \
-do { \
- (PBDP)->debug.caller = (P)->id; \
- ERTS_PROCS_DBG_HEAP_ALLOC_INIT((PBDP)); \
- ERTS_PROCS_DBG_CHK_PIDS_INIT((PBDP)); \
-} while (0)
-# define ERTS_PROCS_DBG_CLEANUP(PBDP) \
-do { \
- ERTS_PROCS_DBG_CLEANUP_CHK_PIDS((PBDP)); \
-} while (0)
-#endif
-
-#if ERTS_PROCS_DBG_DO_TRACE
-# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT) \
- erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \
- (PID), __FILE__, __LINE__, #FUNC, #WHAT)
-#else
-# define ERTS_PROCS_DBG_TRACE(PID, FUNC, WHAT)
-#endif
-
-static Uint processes_bif_tab_chunks;
-static Export processes_trap_export;
-
-typedef struct {
- SysTimeval time;
-} ErtsProcessesBifChunkInfo;
-
-typedef enum {
- INITIALIZING,
- INSPECTING_TABLE,
- INSPECTING_TERMINATED_PROCESSES,
- BUILDING_RESULT,
- RETURN_RESULT
-} ErtsProcessesBifState;
-
-typedef struct {
- ErtsProcessesBifState state;
- Eterm caller;
- ErtsProcessesBifChunkInfo *chunk;
- int tix;
- int pid_ix;
- int pid_sz;
- Eterm *pid;
- ErtsTermProcElement *bif_invocation; /* Only used when > 1 chunk */
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0 || ERTS_PROCS_DBG_DO_TRACE
- struct {
- Eterm caller;
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- SysTimeval *pid_started;
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_HALLOC
- Eterm *heap;
- Uint heap_size;
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
- int correct_pids_verified;
- Eterm *correct_pids;
-#endif
- } debug;
-#endif
-
-} ErtsProcessesBifData;
-
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0
-static void debug_processes_assert_error(char* expr, char* file, int line);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-static void debug_processes_check_res_list(Eterm list);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-static void debug_processes_save_all_pids(ErtsProcessesBifData *pbdp);
-static void debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-static void debug_processes_check_found_pid(ErtsProcessesBifData *pbdp,
- Eterm pid,
- SysTimeval *started,
- int pid_should_be_found);
-#endif
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-static SysTimeval debug_tv_start;
-static void debug_processes_check_term_proc_list(void);
-static void debug_processes_check_term_proc_free_list(ErtsTermProcElement *tpep);
-#endif
-
-static void
-save_terminating_process(Process *p)
-{
- ErtsTermProcElement *tpep = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL,
- sizeof(ErtsTermProcElement));
- ERTS_PROCS_ASSERT(saved_term_procs.start && saved_term_procs.end);
- ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx));
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- tpep->prev = saved_term_procs.end;
- tpep->next = NULL;
- tpep->ix = internal_pid_index(p->id);
- tpep->u.process.pid = p->id;
- tpep->u.process.spawned = p->started;
- erts_get_emu_time(&tpep->u.process.exited);
-
- saved_term_procs.end->next = tpep;
- saved_term_procs.end = tpep;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- ERTS_PROCS_ASSERT((tpep->prev->ix >= 0
- ? erts_cmp_timeval(&tpep->u.process.exited,
- &tpep->prev->u.process.exited)
- : erts_cmp_timeval(&tpep->u.process.exited,
- &tpep->prev->u.bif_invocation.time)) > 0);
-}
-
-static void
-cleanup_processes_bif_data(Binary *bp)
-{
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(bp);
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller, cleanup_processes_bif_data, call);
-
- if (pbdp->state != INITIALIZING) {
-
- if (pbdp->chunk) {
- erts_free(ERTS_ALC_T_PROCS_CNKINF, pbdp->chunk);
- pbdp->chunk = NULL;
- }
- if (pbdp->pid) {
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->pid);
- pbdp->pid = NULL;
- }
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- if (pbdp->debug.pid_started) {
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.pid_started);
- pbdp->debug.pid_started = NULL;
- }
-#endif
-
- if (pbdp->bif_invocation) {
- ErtsTermProcElement *tpep;
-
- erts_smp_mtx_lock(&proc_tab_mtx);
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller,
- cleanup_processes_bif_data,
- term_proc_cleanup);
-
- tpep = pbdp->bif_invocation;
- pbdp->bif_invocation = NULL;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- if (tpep->prev) {
- /*
- * Only remove this bif invokation when we
- * have preceding invokations.
- */
- tpep->prev->next = tpep->next;
- if (tpep->next)
- tpep->next->prev = tpep->prev;
- else {
- /*
- * At the time of writing this branch cannot be
- * reached. I don't want to remove this code though
- * since it may be possible to reach this line
- * in the future if the cleanup order in
- * erts_do_exit_process() is changed. The ASSERT(0)
- * is only here to make us aware that the reorder
- * has happened. /rickard
- */
- ASSERT(0);
- saved_term_procs.end = tpep->prev;
- }
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep);
- }
- else {
- /*
- * Free all elements until next bif invokation
- * is found.
- */
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- do {
- ErtsTermProcElement *ftpep = tpep;
- tpep = tpep->next;
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, ftpep);
- } while (tpep && tpep->ix >= 0);
- saved_term_procs.start = tpep;
- if (tpep)
- tpep->prev = NULL;
- else
- saved_term_procs.end = NULL;
- }
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- erts_smp_mtx_unlock(&proc_tab_mtx);
-
- }
- }
-
- ERTS_PROCS_DBG_TRACE(pbdp->debug.caller,
- cleanup_processes_bif_data,
- return);
- ERTS_PROCS_DBG_CLEANUP(pbdp);
-}
-
-static int
-processes_bif_engine(Process *p, Eterm *res_accp, Binary *mbp)
-{
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp);
- int have_reds;
- int reds;
- int locked = 0;
-
- do {
- switch (pbdp->state) {
- case INITIALIZING:
- pbdp->chunk = erts_alloc(ERTS_ALC_T_PROCS_CNKINF,
- (sizeof(ErtsProcessesBifChunkInfo)
- * processes_bif_tab_chunks));
- pbdp->tix = 0;
- pbdp->pid_ix = 0;
-
- erts_smp_mtx_lock(&proc_tab_mtx);
- locked = 1;
-
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, init);
-
- pbdp->pid_sz = erts_process_count();
- pbdp->pid = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(Eterm)*pbdp->pid_sz);
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(SysTimeval)*pbdp->pid_sz);
-#endif
-
- ERTS_PROCS_DBG_SAVE_PIDS(pbdp);
-
- if (processes_bif_tab_chunks == 1)
- pbdp->bif_invocation = NULL;
- else {
- /*
- * We will have to access the table multiple times
- * releasing the table lock in between chunks.
- */
- pbdp->bif_invocation = erts_alloc(ERTS_ALC_T_PROCS_TPROC_EL,
- sizeof(ErtsTermProcElement));
- pbdp->bif_invocation->ix = -1;
- erts_get_emu_time(&pbdp->bif_invocation->u.bif_invocation.time);
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- pbdp->bif_invocation->next = NULL;
- if (saved_term_procs.end) {
- pbdp->bif_invocation->prev = saved_term_procs.end;
- saved_term_procs.end->next = pbdp->bif_invocation;
- ERTS_PROCS_ASSERT(saved_term_procs.start);
- }
- else {
- pbdp->bif_invocation->prev = NULL;
- saved_term_procs.start = pbdp->bif_invocation;
- }
- saved_term_procs.end = pbdp->bif_invocation;
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- }
-
- pbdp->state = INSPECTING_TABLE;
- /* Fall through */
-
- case INSPECTING_TABLE: {
- int ix = pbdp->tix;
- int indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- int cix = ix / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- int end_ix = ix + indices;
- SysTimeval *invocation_timep;
-
- invocation_timep = (pbdp->bif_invocation
- ? &pbdp->bif_invocation->u.bif_invocation.time
- : NULL);
-
- ERTS_PROCS_ASSERT(is_nil(*res_accp));
- if (!locked) {
- erts_smp_mtx_lock(&proc_tab_mtx);
- locked = 1;
- }
-
- ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx));
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_table);
-
- if (cix != 0)
- erts_get_emu_time(&pbdp->chunk[cix].time);
- else if (pbdp->bif_invocation)
- pbdp->chunk[0].time = *invocation_timep;
- /* else: Time is irrelevant */
-
- if (end_ix >= erts_max_processes) {
- ERTS_PROCS_ASSERT(cix+1 == processes_bif_tab_chunks);
- end_ix = erts_max_processes;
- indices = end_ix - ix;
- /* What to do when done with this chunk */
- pbdp->state = (processes_bif_tab_chunks == 1
- ? BUILDING_RESULT
- : INSPECTING_TERMINATED_PROCESSES);
- }
-
- for (; ix < end_ix; ix++) {
- Process *rp = process_tab[ix];
- if (rp
- && (!invocation_timep
- || erts_cmp_timeval(&rp->started,
- invocation_timep) < 0)) {
- ERTS_PROCS_ASSERT(is_internal_pid(rp->id));
- pbdp->pid[pbdp->pid_ix] = rp->id;
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started[pbdp->pid_ix] = rp->started;
-#endif
-
- pbdp->pid_ix++;
- ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz);
- }
- }
-
- pbdp->tix = end_ix;
-
- erts_smp_mtx_unlock(&proc_tab_mtx);
- locked = 0;
-
- reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED;
- BUMP_REDS(p, reds);
-
- have_reds = ERTS_BIF_REDS_LEFT(p);
-
- if (have_reds && pbdp->state == INSPECTING_TABLE) {
- ix = pbdp->tix;
- indices = ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- end_ix = ix + indices;
- if (end_ix > erts_max_processes) {
- end_ix = erts_max_processes;
- indices = end_ix - ix;
- }
-
- reds = indices/ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED;
-
- /* Pretend we have no reds left if we haven't got enough
- reductions to complete next chunk */
- if (reds > have_reds)
- have_reds = 0;
- }
-
- break;
- }
-
- case INSPECTING_TERMINATED_PROCESSES: {
- int i;
- int max_reds;
- int free_term_procs = 0;
- SysTimeval *invocation_timep;
- ErtsTermProcElement *tpep;
- ErtsTermProcElement *free_list = NULL;
-
- tpep = pbdp->bif_invocation;
- ERTS_PROCS_ASSERT(tpep);
- invocation_timep = &tpep->u.bif_invocation.time;
-
- max_reds = have_reds = ERTS_BIF_REDS_LEFT(p);
- if (max_reds > ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS)
- max_reds = ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS;
-
- reds = 0;
- erts_smp_mtx_lock(&proc_tab_mtx);
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, insp_term_procs);
-
- ERTS_PROCS_DBG_CHK_TPLIST();
-
- if (tpep->prev)
- tpep->prev->next = tpep->next;
- else {
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- saved_term_procs.start = tpep->next;
-
- if (saved_term_procs.start && saved_term_procs.start->ix >= 0) {
- free_list = saved_term_procs.start;
- free_term_procs = 1;
- }
- }
-
- if (tpep->next)
- tpep->next->prev = tpep->prev;
- else
- saved_term_procs.end = tpep->prev;
-
- tpep = tpep->next;
-
- i = 0;
- while (reds < max_reds && tpep) {
- if (tpep->ix < 0) {
- if (free_term_procs) {
- ERTS_PROCS_ASSERT(free_list);
- ERTS_PROCS_ASSERT(tpep->prev);
-
- tpep->prev->next = NULL; /* end of free_list */
- saved_term_procs.start = tpep;
- tpep->prev = NULL;
- free_term_procs = 0;
- }
- }
- else {
- int cix = tpep->ix/ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE;
- SysTimeval *chunk_timep = &pbdp->chunk[cix].time;
- Eterm pid = tpep->u.process.pid;
- ERTS_PROCS_ASSERT(is_internal_pid(pid));
-
- if (erts_cmp_timeval(&tpep->u.process.spawned,
- invocation_timep) < 0) {
- if (erts_cmp_timeval(&tpep->u.process.exited,
- chunk_timep) < 0) {
- ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp,
- pid,
- &tpep->u.process.spawned);
- pbdp->pid[pbdp->pid_ix] = pid;
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started[pbdp->pid_ix] = tpep->u.process.spawned;
-#endif
- pbdp->pid_ix++;
- ERTS_PROCS_ASSERT(pbdp->pid_ix <= pbdp->pid_sz);
- }
- else {
- ERTS_PROCS_DBG_CHK_PID_FOUND(pbdp,
- pid,
- &tpep->u.process.spawned);
- }
- }
- else {
- ERTS_PROCS_DBG_CHK_PID_NOT_FOUND(pbdp,
- pid,
- &tpep->u.process.spawned);
- }
-
- i++;
- if (i == ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED) {
- reds++;
- i = 0;
- }
- if (free_term_procs)
- reds += ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS;
- }
- tpep = tpep->next;
- }
-
- if (free_term_procs) {
- ERTS_PROCS_ASSERT(free_list);
- saved_term_procs.start = tpep;
- if (!tpep)
- saved_term_procs.end = NULL;
- else {
- ERTS_PROCS_ASSERT(tpep->prev);
- tpep->prev->next = NULL; /* end of free_list */
- tpep->prev = NULL;
- }
- }
-
- if (!tpep) {
- /* Done */
- ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz);
- pbdp->state = BUILDING_RESULT;
- pbdp->bif_invocation->next = free_list;
- free_list = pbdp->bif_invocation;
- pbdp->bif_invocation = NULL;
- }
- else {
- /* Link in bif_invocation again where we left off */
- pbdp->bif_invocation->prev = tpep->prev;
- pbdp->bif_invocation->next = tpep;
- tpep->prev = pbdp->bif_invocation;
- if (pbdp->bif_invocation->prev)
- pbdp->bif_invocation->prev->next = pbdp->bif_invocation;
- else {
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- saved_term_procs.start = pbdp->bif_invocation;
- }
- }
-
- ERTS_PROCS_DBG_CHK_TPLIST();
- ERTS_PROCS_DBG_CHK_FREELIST(free_list);
- erts_smp_mtx_unlock(&proc_tab_mtx);
-
- /*
- * We do the actual free of term proc structures now when we
- * have released the table lock instead of when we encountered
- * them. This since free() isn't for free and we don't want to
- * unnecessarily block other schedulers.
- */
- while (free_list) {
- tpep = free_list;
- free_list = tpep->next;
- erts_free(ERTS_ALC_T_PROCS_TPROC_EL, tpep);
- }
-
- have_reds -= reds;
- if (have_reds < 0)
- have_reds = 0;
- BUMP_REDS(p, reds);
- break;
- }
-
- case BUILDING_RESULT: {
- int conses, ix, min_ix;
- Eterm *hp;
- Eterm res = *res_accp;
-
- ERTS_PROCS_DBG_VERIFY_PIDS(pbdp);
- ERTS_PROCS_DBG_CHK_RESLIST(res);
-
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, begin_build_res);
-
- have_reds = ERTS_BIF_REDS_LEFT(p);
- conses = ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds;
- min_ix = pbdp->pid_ix - conses;
- if (min_ix < 0) {
- min_ix = 0;
- conses = pbdp->pid_ix;
- }
-
- hp = HAlloc(p, conses*2);
- ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, conses*2);
-
- for (ix = pbdp->pid_ix - 1; ix >= min_ix; ix--) {
- ERTS_PROCS_ASSERT(is_internal_pid(pbdp->pid[ix]));
- res = CONS(hp, pbdp->pid[ix], res);
- hp += 2;
- }
-
- ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp);
-
- pbdp->pid_ix = min_ix;
- if (min_ix == 0)
- pbdp->state = RETURN_RESULT;
- else {
- pbdp->pid_sz = min_ix;
- pbdp->pid = erts_realloc(ERTS_ALC_T_PROCS_PIDS,
- pbdp->pid,
- sizeof(Eterm)*pbdp->pid_sz);
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
- pbdp->debug.pid_started = erts_realloc(ERTS_ALC_T_PROCS_PIDS,
- pbdp->debug.pid_started,
- sizeof(SysTimeval)*pbdp->pid_sz);
-#endif
- }
- reds = conses/ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED;
- BUMP_REDS(p, reds);
- have_reds -= reds;
-
- ERTS_PROCS_DBG_CHK_RESLIST(res);
- ERTS_PROCS_DBG_TRACE(p->id, processes_bif_engine, end_build_res);
- *res_accp = res;
- break;
- }
- case RETURN_RESULT:
- cleanup_processes_bif_data(mbp);
- return 1;
-
- default:
- erl_exit(ERTS_ABORT_EXIT,
- "erlang:processes/0: Invalid state: %d\n",
- (int) pbdp->state);
- }
-
-
- } while (have_reds || pbdp->state == RETURN_RESULT);
-
- return 0;
-}
-
-/*
- * processes_trap/2 is a hidden BIF that processes/0 traps to.
- */
-
-static BIF_RETTYPE processes_trap(BIF_ALIST_2)
-{
- Eterm res_acc;
- Binary *mbp;
-
- /*
- * This bif cannot be called from erlang code. It can only be
- * trapped to from processes/0; therefore, a bad argument
- * is a processes/0 internal error.
- */
-
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, call);
- ERTS_PROCS_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1));
-
- res_acc = BIF_ARG_1;
-
- ERTS_PROCS_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
-
- mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
-
- ERTS_PROCS_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
- == cleanup_processes_bif_data);
- ERTS_PROCS_ASSERT(
- ((ErtsProcessesBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller
- == BIF_P->id);
-
- if (processes_bif_engine(BIF_P, &res_acc, mbp)) {
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, return);
- BIF_RET(res_acc);
- }
- else {
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_trap, trap);
- ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, BIF_ARG_2);
- }
-}
-
-
-
-/*
- * The actual processes/0 BIF.
- */
-
-BIF_RETTYPE processes_0(BIF_ALIST_0)
-{
- /*
- * A requirement: The list of pids returned should be a consistent
- * snapshot of all processes existing at some point
- * in time during the execution of processes/0. Since
- * processes might terminate while processes/0 is
- * executing, we have to keep track of terminated
- * processes and add them to the result. We also
- * ignore processes created after processes/0 has
- * begun executing.
- */
- Eterm res_acc = NIL;
- Binary *mbp = erts_create_magic_binary(sizeof(ErtsProcessesBifData),
- cleanup_processes_bif_data);
- ErtsProcessesBifData *pbdp = ERTS_MAGIC_BIN_DATA(mbp);
-
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, call);
- pbdp->state = INITIALIZING;
- ERTS_PROCS_DBG_INIT(BIF_P, pbdp);
-
- if (ERTS_BIF_REDS_LEFT(BIF_P) >= ERTS_PROCESSES_BIF_MIN_START_REDS
- && processes_bif_engine(BIF_P, &res_acc, mbp)) {
- erts_bin_free(mbp);
- ERTS_PROCS_DBG_CHK_RESLIST(res_acc);
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, return);
- BIF_RET(res_acc);
- }
- else {
- Eterm *hp;
- Eterm magic_bin;
- ERTS_PROCS_DBG_CHK_RESLIST(res_acc);
- hp = HAlloc(BIF_P, PROC_BIN_SIZE);
- ERTS_PROCS_DBG_SAVE_HEAP_ALLOC(pbdp, hp, PROC_BIN_SIZE);
- magic_bin = erts_mk_magic_binary_term(&hp, &MSO(BIF_P), mbp);
- ERTS_PROCS_DBG_VERIFY_HEAP_ALLOC_USED(pbdp, hp);
- ERTS_PROCS_DBG_TRACE(BIF_P->id, processes_0, trap);
- ERTS_BIF_YIELD2(&processes_trap_export, BIF_P, res_acc, magic_bin);
- }
-}
-
-static void
-init_processes_bif(void)
-{
- saved_term_procs.start = NULL;
- saved_term_procs.end = NULL;
- processes_bif_tab_chunks = (((erts_max_processes - 1)
- / ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE)
- + 1);
-
- /* processes_trap/2 is a hidden BIF that the processes/0 BIF traps to. */
- sys_memset((void *) &processes_trap_export, 0, sizeof(Export));
- processes_trap_export.address = &processes_trap_export.code[3];
- processes_trap_export.code[0] = am_erlang;
- processes_trap_export.code[1] = am_processes_trap;
- processes_trap_export.code[2] = 2;
- processes_trap_export.code[3] = (BeamInstr) em_apply_bif;
- processes_trap_export.code[4] = (BeamInstr) &processes_trap;
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
- erts_get_emu_time(&debug_tv_start);
-#endif
-
-}
-
-/*
- * Debug stuff
- */
-
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-int
-erts_dbg_check_halloc_lock(Process *p)
-{
- if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
- return 1;
- if (p->id == ERTS_INVALID_PID)
- return 1;
- if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
- return 1;
- if (erts_thr_progress_is_blocking())
- return 1;
- return 0;
-}
-#endif
-
-Eterm
-erts_debug_processes(Process *c_p)
-{
- /* This is the old processes/0 BIF. */
- int i;
- Uint need;
- Eterm res;
- Eterm* hp;
- Process *p;
-#ifdef DEBUG
- Eterm *hp_end;
-#endif
-
- erts_smp_mtx_lock(&proc_tab_mtx);
-
- res = NIL;
- need = erts_process_count() * 2;
- hp = HAlloc(c_p, need); /* we need two heap words for each pid */
-#ifdef DEBUG
- hp_end = hp + need;
-#endif
-
- /* make the list by scanning bakward */
-
-
- for (i = erts_max_processes-1; i >= 0; i--) {
- if ((p = process_tab[i]) != NULL) {
- res = CONS(hp, process_tab[i]->id, res);
- hp += 2;
- }
- }
- ASSERT(hp == hp_end);
-
- erts_smp_mtx_unlock(&proc_tab_mtx);
-
- return res;
-}
-
-Eterm
-erts_debug_processes_bif_info(Process *c_p)
-{
- ERTS_DECL_AM(processes_bif_info);
- Eterm elements[] = {
- AM_processes_bif_info,
- make_small((Uint) ERTS_PROCESSES_BIF_MIN_START_REDS),
- make_small((Uint) processes_bif_tab_chunks),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_CHUNK_SIZE),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_INSPECT_INDICES_PER_RED),
- make_small((Uint) ERTS_PROCESSES_BIF_TAB_FREE_TERM_PROC_REDS),
- make_small((Uint) ERTS_PROCESSES_BIF_INSPECT_TERM_PROC_PER_RED),
- make_small((Uint) ERTS_PROCESSES_INSPECT_TERM_PROC_MAX_REDS),
- make_small((Uint) ERTS_PROCESSES_BIF_BUILD_RESULT_CONSES_PER_RED),
- make_small((Uint) ERTS_PROCESSES_BIF_DEBUGLEVEL)
- };
- Uint sz = 0;
- Eterm *hp;
- (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements);
- hp = HAlloc(c_p, sz);
- return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements);
-}
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_FOUND_PIDS
-static void
-debug_processes_check_found_pid(ErtsProcessesBifData *pbdp,
- Eterm pid,
- SysTimeval *tvp,
- int pid_should_be_found)
-{
- int i;
- for (i = 0; i < pbdp->pid_ix; i++) {
- if (pbdp->pid[i] == pid
- && pbdp->debug.pid_started[i].tv_sec == tvp->tv_sec
- && pbdp->debug.pid_started[i].tv_usec == tvp->tv_usec) {
- ERTS_PROCS_ASSERT(pid_should_be_found);
- return;
- }
- }
- ERTS_PROCS_ASSERT(!pid_should_be_found);
-}
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_RESLIST
-static void
-debug_processes_check_res_list(Eterm list)
-{
- while (is_list(list)) {
- Eterm* consp = list_val(list);
- Eterm hd = CAR(consp);
- ERTS_PROCS_ASSERT(is_internal_pid(hd));
- list = CDR(consp);
- }
-
- ERTS_PROCS_ASSERT(is_nil(list));
-}
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS
-
-static void
-debug_processes_save_all_pids(ErtsProcessesBifData *pbdp)
-{
- int ix, tix, cpix;
- pbdp->debug.correct_pids_verified = 0;
- pbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PROCS_PIDS,
- sizeof(Eterm)*pbdp->pid_sz);
-
- for (tix = 0, cpix = 0; tix < erts_max_processes; tix++) {
- Process *rp = process_tab[tix];
- if (rp) {
- ERTS_PROCS_ASSERT(is_internal_pid(rp->id));
- pbdp->debug.correct_pids[cpix++] = rp->id;
- ERTS_PROCS_ASSERT(cpix <= pbdp->pid_sz);
- }
- }
- ERTS_PROCS_ASSERT(cpix == pbdp->pid_sz);
-
- for (ix = 0; ix < pbdp->pid_sz; ix++)
- pbdp->pid[ix] = make_small(ix);
-}
-
-static void
-debug_processes_verify_all_pids(ErtsProcessesBifData *pbdp)
-{
- int ix, cpix;
-
- ERTS_PROCS_ASSERT(pbdp->pid_ix == pbdp->pid_sz);
-
- for (ix = 0; ix < pbdp->pid_sz; ix++) {
- int found = 0;
- Eterm pid = pbdp->pid[ix];
- ERTS_PROCS_ASSERT(is_internal_pid(pid));
- for (cpix = ix; cpix < pbdp->pid_sz; cpix++) {
- if (pbdp->debug.correct_pids[cpix] == pid) {
- pbdp->debug.correct_pids[cpix] = NIL;
- found = 1;
- break;
- }
- }
- if (!found) {
- for (cpix = 0; cpix < ix; cpix++) {
- if (pbdp->debug.correct_pids[cpix] == pid) {
- pbdp->debug.correct_pids[cpix] = NIL;
- found = 1;
- break;
- }
- }
- }
- ERTS_PROCS_ASSERT(found);
- }
- pbdp->debug.correct_pids_verified = 1;
-
- erts_free(ERTS_ALC_T_PROCS_PIDS, pbdp->debug.correct_pids);
- pbdp->debug.correct_pids = NULL;
-}
-#endif /* ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_PIDS */
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL >= ERTS_PROCS_DBGLVL_CHK_TERM_PROC_LIST
-static void
-debug_processes_check_term_proc_list(void)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&proc_tab_mtx));
- if (!saved_term_procs.start)
- ERTS_PROCS_ASSERT(!saved_term_procs.end);
- else {
- SysTimeval tv_now;
- SysTimeval *prev_xtvp = NULL;
- ErtsTermProcElement *tpep;
- erts_get_emu_time(&tv_now);
-
- for (tpep = saved_term_procs.start; tpep; tpep = tpep->next) {
- if (!tpep->prev)
- ERTS_PROCS_ASSERT(saved_term_procs.start == tpep);
- else
- ERTS_PROCS_ASSERT(tpep->prev->next == tpep);
- if (!tpep->next)
- ERTS_PROCS_ASSERT(saved_term_procs.end == tpep);
- else
- ERTS_PROCS_ASSERT(tpep->next->prev == tpep);
- if (tpep->ix < 0) {
- SysTimeval *tvp = &tpep->u.bif_invocation.time;
- ERTS_PROCS_ASSERT(erts_cmp_timeval(&debug_tv_start, tvp) < 0
- && erts_cmp_timeval(tvp, &tv_now) < 0);
- }
- else {
- SysTimeval *stvp = &tpep->u.process.spawned;
- SysTimeval *xtvp = &tpep->u.process.exited;
-
- ERTS_PROCS_ASSERT(erts_cmp_timeval(&debug_tv_start,
- stvp) < 0);
- ERTS_PROCS_ASSERT(erts_cmp_timeval(stvp, xtvp) < 0);
- if (prev_xtvp)
- ERTS_PROCS_ASSERT(erts_cmp_timeval(prev_xtvp, xtvp) < 0);
- prev_xtvp = xtvp;
- ERTS_PROCS_ASSERT(is_internal_pid(tpep->u.process.pid));
- ERTS_PROCS_ASSERT(tpep->ix
- == internal_pid_index(tpep->u.process.pid));
- }
- }
-
- }
-}
-
-static void
-debug_processes_check_term_proc_free_list(ErtsTermProcElement *free_list)
-{
- if (saved_term_procs.start) {
- ErtsTermProcElement *ftpep;
- ErtsTermProcElement *tpep;
-
- for (ftpep = free_list; ftpep; ftpep = ftpep->next) {
- for (tpep = saved_term_procs.start; tpep; tpep = tpep->next)
- ERTS_PROCS_ASSERT(ftpep != tpep);
- }
- }
-}
-
-#endif
-
-#if ERTS_PROCESSES_BIF_DEBUGLEVEL != 0
-
-static void
-debug_processes_assert_error(char* expr, char* file, int line)
-{
- fflush(stdout);
- erts_fprintf(stderr, "%s:%d: Assertion failed: %s\n", file, line, expr);
- fflush(stderr);
- abort();
-}
-
-#endif
-
-/* *\
- * End of the processes/0 BIF implementation. *
-\* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
/*
* A nice system halt closing all open port goes as follows:
* 1) This function schedules the aux work ERTS_SSI_AUX_WORK_REAP_PORTS
@@ -10294,3 +9043,19 @@ void erl_halt(int code)
notify_reap_ports_relb();
}
}
+
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int
+erts_dbg_check_halloc_lock(Process *p)
+{
+ if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
+ return 1;
+ if (p->common.id == ERTS_INVALID_PID)
+ return 1;
+ if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
+ return 1;
+ if (erts_thr_progress_is_blocking())
+ return 1;
+ return 0;
+}
+#endif
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 9e7a5a5c74..6d1032c292 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -42,6 +42,9 @@ typedef struct process Process;
#include "erl_process_lock.h" /* Only pull out important types... */
#undef ERTS_PROCESS_LOCK_ONLY_PROC_LOCK_TYPE__
+#define ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_port.h"
+#undef ERL_PORT_GET_PORT_TYPE_ONLY__
#include "erl_vm.h"
#include "erl_smp.h"
#include "erl_message.h"
@@ -66,11 +69,10 @@ typedef struct process Process;
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
struct ErtsNodesMonitor_;
-struct port;
#define ERTS_MAX_NO_OF_SCHEDULERS 1024
-#define ERTS_DEFAULT_MAX_PROCESSES (1 << 15)
+#define ERTS_DEFAULT_MAX_PROCESSES (1 << 18)
#define ERTS_HEAP_ALLOC(Type, Size) \
erts_alloc((Type), (Size))
@@ -112,28 +114,29 @@ extern int erts_sched_thread_suggested_stack_size;
#define PRIORITY_NORMAL 2
#define PRIORITY_LOW 3
#define ERTS_NO_PROC_PRIO_LEVELS 4
+#define ERTS_NO_PROC_PRIO_QUEUES 3
#define ERTS_PORT_PRIO_LEVEL ERTS_NO_PROC_PRIO_LEVELS
+#define ERTS_NO_PRIO_LEVELS (ERTS_NO_PROC_PRIO_LEVELS + 1)
#define ERTS_RUNQ_FLGS_PROCS_QMASK \
((((Uint32) 1) << ERTS_NO_PROC_PRIO_LEVELS) - 1)
-#define ERTS_NO_PRIO_LEVELS (ERTS_NO_PROC_PRIO_LEVELS + 1)
-#define ERTS_RUNQ_FLGS_MIGRATE_QMASK \
+#define ERTS_RUNQ_FLGS_QMASK \
((((Uint32) 1) << ERTS_NO_PRIO_LEVELS) - 1)
#define ERTS_RUNQ_FLGS_EMIGRATE_SHFT \
- ERTS_NO_PROC_PRIO_LEVELS
+ ERTS_NO_PRIO_LEVELS
#define ERTS_RUNQ_FLGS_IMMIGRATE_SHFT \
(ERTS_RUNQ_FLGS_EMIGRATE_SHFT + ERTS_NO_PRIO_LEVELS)
#define ERTS_RUNQ_FLGS_EVACUATE_SHFT \
(ERTS_RUNQ_FLGS_IMMIGRATE_SHFT + ERTS_NO_PRIO_LEVELS)
#define ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
- (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_EMIGRATE_SHFT)
+ (ERTS_RUNQ_FLGS_QMASK << ERTS_RUNQ_FLGS_EMIGRATE_SHFT)
#define ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
- (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)
+ (ERTS_RUNQ_FLGS_QMASK << ERTS_RUNQ_FLGS_IMMIGRATE_SHFT)
#define ERTS_RUNQ_FLGS_EVACUATE_QMASK \
- (ERTS_RUNQ_FLGS_MIGRATE_QMASK << ERTS_RUNQ_FLGS_EVACUATE_SHFT)
+ (ERTS_RUNQ_FLGS_QMASK << ERTS_RUNQ_FLGS_EVACUATE_SHFT)
#define ERTS_RUNQ_FLG_BASE2 \
(ERTS_RUNQ_FLGS_EVACUATE_SHFT + ERTS_NO_PRIO_LEVELS)
@@ -148,14 +151,18 @@ extern int erts_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 3))
#define ERTS_RUNQ_FLG_INACTIVE \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 4))
+#define ERTS_RUNQ_FLG_NONEMPTY \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 5))
+#define ERTS_RUNQ_FLG_PROTECTED \
+ (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 6))
#define ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
(ERTS_RUNQ_FLGS_EMIGRATE_QMASK \
| ERTS_RUNQ_FLGS_IMMIGRATE_QMASK \
| ERTS_RUNQ_FLGS_EVACUATE_QMASK)
+
#define ERTS_RUNQ_FLGS_MIGRATION_INFO \
- (ERTS_RUNQ_FLGS_MIGRATION_QMASKS \
- | ERTS_RUNQ_FLG_INACTIVE \
+ (ERTS_RUNQ_FLG_INACTIVE \
| ERTS_RUNQ_FLG_OUT_OF_WORK \
| ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)
@@ -186,34 +193,24 @@ extern int erts_sched_thread_suggested_stack_size;
#define ERTS_UNSET_RUNQ_FLG_EVACUATE(FLGS, PRIO) \
((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO)))
-#define ERTS_RUNQ_IFLG_SUSPENDED (((erts_aint32_t) 1) << 0)
-#define ERTS_RUNQ_IFLG_NONEMPTY (((erts_aint32_t) 1) << 1)
-
-
-#ifdef DEBUG
-# if defined(ARCH_64) && !HALFWORD_HEAP
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
- (*((char **) &(RQP)) = (char *) (0xdeadbeefdead0003 | ((N) << 4)))
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
-do { \
- ASSERT((RQP) != NULL); \
- ASSERT(((((Uint) (RQP)) & ((Uint) 0x3))) == ((Uint) 0)); \
- ASSERT((((Uint) (RQP)) & ~((Uint) 0xffff)) != ((Uint) 0xdeadbeefdead0000));\
-} while (0)
-# else
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
- (*((char **) &(RQP)) = (char *) (0xdead0003 | ((N) << 4)))
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
-do { \
- ASSERT((RQP) != NULL); \
- ASSERT(((((UWord) (RQP)) & ((UWord) 1))) == ((UWord) 0)); \
- ASSERT((((UWord) (RQP)) & ~((UWord) 0xffff)) != ((UWord) 0xdead0000)); \
-} while (0)
-# endif
-#else
-# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N)
-# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP)
-#endif
+#define ERTS_RUNQ_FLGS_INIT(RQ, INIT) \
+ erts_smp_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT))
+#define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \
+ ((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \
+ (erts_aint32_t) (FLGS)))
+#define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \
+ ((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \
+ (erts_aint32_t) ~(FLGS)))
+#define ERTS_RUNQ_FLGS_GET(RQ) \
+ ((Uint32) erts_smp_atomic32_read_acqb(&(RQ)->flags))
+#define ERTS_RUNQ_FLGS_GET_NOB(RQ) \
+ ((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags))
+#define ERTS_RUNQ_FLGS_GET_MB(RQ) \
+ ((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags))
+#define ERTS_RUNQ_FLGS_READ_BSET(RQ, MSK, FLGS) \
+ ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \
+ (erts_aint32_t) (MSK), \
+ (erts_aint32_t) (FLGS)))
typedef enum {
ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED,
@@ -258,14 +255,15 @@ typedef enum {
#define ERTS_SSI_AUX_WORK_DD_THR_PRGR (((erts_aint32_t) 1) << 2)
#define ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC (((erts_aint32_t) 1) << 3)
#define ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM (((erts_aint32_t) 1) << 4)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 5)
-#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 6)
-#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 7)
-#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 8)
-#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 9)
-#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 10)
-#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 11)
-#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 12)
+#define ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP (((erts_aint32_t) 1) << 5)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY (((erts_aint32_t) 1) << 6)
+#define ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN (((erts_aint32_t) 1) << 7)
+#define ERTS_SSI_AUX_WORK_MISC_THR_PRGR (((erts_aint32_t) 1) << 8)
+#define ERTS_SSI_AUX_WORK_MISC (((erts_aint32_t) 1) << 9)
+#define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((erts_aint32_t) 1) << 10)
+#define ERTS_SSI_AUX_WORK_SET_TMO (((erts_aint32_t) 1) << 11)
+#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK (((erts_aint32_t) 1) << 12)
+#define ERTS_SSI_AUX_WORK_REAP_PORTS (((erts_aint32_t) 1) << 13)
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
@@ -291,8 +289,9 @@ struct ErtsSchedulerSleepInfo_ {
typedef struct ErtsProcList_ ErtsProcList;
struct ErtsProcList_ {
Eterm pid;
- SysTimeval started;
+ Uint64 started_interval;
ErtsProcList* next;
+ ErtsProcList* prev;
};
typedef struct ErtsMiscOpList_ ErtsMiscOpList;
@@ -312,21 +311,39 @@ typedef struct ErtsSchedulerData_ ErtsSchedulerData;
typedef struct ErtsRunQueue_ ErtsRunQueue;
typedef struct {
- int len;
- int max_len;
+ erts_smp_atomic32_t len;
+ erts_aint32_t max_len;
int reds;
+} ErtsRunQueueInfo;
+
+#ifdef ERTS_SMP
+
+typedef struct {
+ Uint32 flags;
+ ErtsRunQueue *misc_evac_runq;
struct {
struct {
int this;
int other;
} limit;
ErtsRunQueue *runq;
- } migrate;
-} ErtsRunQueueInfo;
+ Uint32 flags;
+ } prio[ERTS_NO_PRIO_LEVELS];
+} ErtsMigrationPath;
+
+typedef struct ErtsMigrationPaths_ ErtsMigrationPaths;
+
+struct ErtsMigrationPaths_ {
+ void *block;
+ ErtsMigrationPaths *next;
+ ErtsThrPrgrVal thr_prgr;
+ ErtsMigrationPath mpath[1];
+};
+
+#endif /* ERTS_SMP */
struct ErtsRunQueue_ {
int ix;
- erts_smp_atomic32_t info_flags;
erts_smp_mtx_t mtx;
erts_smp_cnd_t cnd;
@@ -334,19 +351,18 @@ struct ErtsRunQueue_ {
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
int woken;
- Uint32 flags;
+ erts_smp_atomic32_t flags;
int check_balance_reds;
int full_reds_history_sum;
int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE];
int out_of_work_count;
- int max_len;
- int len;
+ erts_aint32_t max_len;
+ erts_aint32_t len;
int wakeup_other;
int wakeup_other_reds;
int halt_in_progress;
struct {
- int len;
ErtsProcList *pending_exiters;
Uint context_switches;
Uint reductions;
@@ -361,13 +377,13 @@ struct ErtsRunQueue_ {
struct {
ErtsMiscOpList *start;
ErtsMiscOpList *end;
- ErtsRunQueue *evac_runq;
+ erts_smp_atomic_t evac_runq;
} misc;
struct {
ErtsRunQueueInfo info;
- struct port *start;
- struct port *end;
+ Port *start;
+ Port *end;
} ports;
};
@@ -381,7 +397,7 @@ extern ErtsAlignedRunQueue *erts_aligned_run_queues;
#define ERTS_PROC_REDUCTIONS_EXECUTED(RQ, PRIO, REDS, AREDS) \
do { \
(RQ)->procs.reductions += (AREDS); \
- (RQ)->procs.prio_info[p->prio].reds += (REDS); \
+ (RQ)->procs.prio_info[(PRIO)].reds += (REDS); \
(RQ)->check_balance_reds -= (REDS); \
(RQ)->wakeup_other_reds += (AREDS); \
} while (0)
@@ -427,6 +443,10 @@ typedef struct {
void (*completed_callback)(void *);
void (*completed_arg)(void *);
} dd;
+ struct {
+ ErtsThrPrgrLaterOp *first;
+ ErtsThrPrgrLaterOp *last;
+ } later_op;
#endif
#ifdef ERTS_USE_ASYNC_READY_Q
struct {
@@ -439,6 +459,7 @@ typedef struct {
#endif
#ifdef ERTS_SMP
struct {
+ Uint64 next;
int *sched2jix;
int jix;
ErtsDelayedAuxWorkWakeupJob *job;
@@ -472,7 +493,7 @@ struct ErtsSchedulerData_ {
ErtsSchedulerSleepInfo *ssi;
Process *current_process;
Uint no; /* Scheduler number */
- struct port *current_port;
+ Port *current_port;
ErtsRunQueue *run_queue;
int virtual_reds;
int cpu_id; /* >= 0 when bound */
@@ -481,6 +502,7 @@ struct ErtsSchedulerData_ {
ErtsSchedAllocData alloc_data;
+ Uint64 reductions;
ErtsSchedWallTime sched_wall_time;
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
@@ -500,6 +522,90 @@ extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
extern ErtsSchedulerData *erts_scheduler_data;
#endif
+#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
+int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
+#endif
+
+#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+
+/*
+ * Run queue locked during modifications. We use atomic ops since
+ * other threads peek at values without run queue lock.
+ */
+
+ERTS_GLB_INLINE void erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
+ERTS_GLB_INLINE void erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio);
+ERTS_GLB_INLINE void erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE void
+erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
+{
+ erts_aint32_t len;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+ len = erts_smp_atomic32_read_nob(&rqi->len);
+ ASSERT(len >= 0);
+ if (len == 0) {
+ ASSERT((erts_smp_atomic32_read_nob(&rq->flags)
+ & ((erts_aint32_t) (1 << prio))) == 0);
+ erts_smp_atomic32_read_bor_nob(&rq->flags,
+ (erts_aint32_t) (1 << prio));
+ }
+ len++;
+ if (rqi->max_len < len)
+ rqi->max_len = len;
+
+ erts_smp_atomic32_set_relb(&rqi->len, len);
+
+ rq->len++;
+ if (rq->max_len < rq->len)
+ rq->max_len = len;
+ ASSERT(rq->len > 0);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
+{
+ erts_aint32_t len;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+ len = erts_smp_atomic32_read_nob(&rqi->len);
+ len--;
+ ASSERT(len >= 0);
+ if (len == 0) {
+ ASSERT((erts_smp_atomic32_read_nob(&rq->flags)
+ & ((erts_aint32_t) (1 << prio))));
+ erts_smp_atomic32_read_band_nob(&rq->flags,
+ ~((erts_aint32_t) (1 << prio)));
+ }
+ erts_smp_atomic32_set_relb(&rqi->len, len);
+
+ rq->len--;
+ ASSERT(rq->len >= 0);
+}
+
+ERTS_GLB_INLINE void
+erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
+{
+ erts_aint32_t len;
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
+
+ len = erts_smp_atomic32_read_nob(&rqi->len);
+ ASSERT(rqi->max_len >= len);
+ rqi->max_len = len;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#define RUNQ_READ_LEN(X) erts_smp_atomic32_read_nob((X))
+
+#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */
+
/*
* Process Specific Data.
*
@@ -612,6 +718,8 @@ struct ErtsPendingSuspend_ {
# define BIN_OLD_VHEAP(p) (p)->bin_old_vheap
struct process {
+ ErtsPTabElementCommon common; /* *Need* to be first in struct */
+
/* All fields in the PCB that differs between different heap
* architectures, have been moved to the end of this struct to
* make sure that as few offsets as possible differ. Different
@@ -654,17 +762,9 @@ struct process {
* Number of reductions left to execute.
* Only valid for the current process.
*/
- Uint32 status; /* process STATE */
- Uint32 gcstatus; /* process gc STATE */
- Uint32 rstatus; /* process resume STATE */
Uint32 rcount; /* suspend count */
- Eterm id; /* The pid of this process */
- int prio; /* Priority of process */
- int skipped; /* Times a low prio process has been rescheduled */
+ int schedule_count; /* Times left to reschedule a low prio process */
Uint reds; /* No of reductions for this process */
- Eterm tracer_proc; /* If proc is traced, this is the tracer
- (can NOT be boxed) */
- Uint trace_flags; /* Trace flags (used to be in flags) */
Eterm group_leader; /* Pid in charge
(can be boxed) */
Uint flags; /* Trap exit, etc (no trace flags anymore) */
@@ -673,11 +773,6 @@ struct process {
Eterm ftrace; /* Latest exception stack trace dump */
Process *next; /* Pointer to next process in run queue */
- Process *prev; /* Pointer to prev process in run queue */
-
- struct reg_proc *reg; /* NULL iff not registered */
- ErtsLink *nlinks;
- ErtsMonitor *monitors; /* The process monitors, both ends */
struct ErtsNodesMonitor_ *nodes_monitors;
@@ -687,7 +782,10 @@ struct process {
ErlMessageQueue msg; /* Message queue */
- ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
+ union {
+ ErtsBifTimer *bif_timers; /* Bif timers aiming at this process */
+ void *terminate;
+ } u;
ProcDict *dictionary; /* Process dictionary, may be NULL */
@@ -711,8 +809,7 @@ struct process {
* Information mainly for post-mortem use (erl crash dump).
*/
Eterm parent; /* Pid of process that created this process. */
- SysTimeval started; /* Time when started. */
-
+ erts_approx_time_t approx_started; /* Time when started. */
/* This is the place, where all fields that differs between memory
* architectures, have gone to.
@@ -734,28 +831,16 @@ struct process {
Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
- union {
-#ifdef ERTS_SMP
- ErtsSmpPTimer *ptimer;
-#else
- ErlTimer tm; /* Timer entry */
-#endif
- void *exit_data; /* Misc data referred during termination */
- } u;
-
- ErtsRunQueue *bound_runq;
+ erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
#ifdef ERTS_SMP
+ ErlMessageInQueue msg_inq;
+ ErtsPendExit pending_exit;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
- int is_exiting;
- Uint32 runq_flags;
- Uint32 status_flags;
- ErlMessageInQueue msg_inq;
Eterm suspendee;
ErtsPendingSuspend *pending_suspenders;
- ErtsPendExit pending_exit;
- ErtsRunQueue *run_queue;
+ erts_smp_atomic_t run_queue;
#ifdef HIPE
struct hipe_process_state_smp hipe_smp;
#endif
@@ -780,6 +865,8 @@ struct process {
#endif
};
+extern const Process erts_invalid_process;
+
#ifdef CHECK_FOR_HOLES
# define INIT_HOLE_CHECK(p) \
do { \
@@ -809,6 +896,29 @@ void erts_check_for_holes(Process* p);
#define SEQ_TRACE_TOKEN(p) ((p)->seq_trace_token)
+#if ERTS_NO_PROC_PRIO_LEVELS > 4
+# error "Need to increase ERTS_PSFLG_PRIO_SHIFT"
+#endif
+
+#define ERTS_PSFLG_PRIO_SHIFT 2
+
+#define ERTS_PSFLG_BIT(N) \
+ (((erts_aint32_t) 1) << (ERTS_PSFLG_PRIO_SHIFT + (N)))
+
+#define ERTS_PSFLG_PRIO_MASK (ERTS_PSFLG_BIT(0) - 1)
+
+#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(0)
+#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(1)
+#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(2)
+#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(3)
+#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(4)
+#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(5)
+#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(6)
+#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(7)
+#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(8)
+#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(9)
+
+
/* The sequential tracing token is a tuple of size 5:
*
* {Flags, Label, Serial, Sender}
@@ -881,9 +991,6 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra);
Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz);
#endif
-extern Process** process_tab;
-extern Uint erts_max_processes;
-extern Uint erts_process_tab_index_mask;
extern Uint erts_default_process_flags;
extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx;
/* If any of the erts_system_monitor_* variables are set (enabled),
@@ -912,16 +1019,8 @@ struct erts_system_profile_flags_t {
};
extern struct erts_system_profile_flags_t erts_system_profile_flags;
-#define INVALID_PID(p, pid) ((p) == NULL \
- || (p)->id != (pid) \
- || (p)->status == P_EXITING)
-
-#define IS_TRACED(p) ( (p)->tracer_proc != NIL )
-#define ARE_TRACE_FLAGS_ON(p,tf) ( ((p)->trace_flags & (tf|F_SENSITIVE)) == (tf) )
-#define IS_TRACED_FL(p,tf) ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) )
-
/* process flags */
-#define F_TRAPEXIT (1 << 0)
+#define F_HIBERNATE_SCHED (1 << 0) /* Schedule out after hibernate op */
#define F_INSLPQUEUE (1 << 1) /* Set if in timer queue */
#define F_TIMO (1 << 2) /* Set if timeout */
#define F_HEAP_GROW (1 << 3)
@@ -932,7 +1031,6 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */
#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
-#define F_HIBERNATE_SCHED (1 << 11) /* Schedule out after hibernate op */
/* process trace_flags */
#define F_SENSITIVE (1 << 0)
@@ -999,67 +1097,10 @@ extern struct erts_system_profile_flags_t erts_system_profile_flags;
#define DT_UTAG_FLAGS(P) ((P)->dt_utag_flags)
#endif
-
-#ifdef ERTS_SMP
-/* Status flags ... */
-#define ERTS_PROC_SFLG_PENDADD2SCHEDQ (((Uint32) 1) << 0) /* Pending
- add to
- schedule q */
-#define ERTS_PROC_SFLG_INRUNQ (((Uint32) 1) << 1) /* Process is
- in run q */
-#define ERTS_PROC_SFLG_TRAPEXIT (((Uint32) 1) << 2) /* Process is
- trapping
- exit */
-#define ERTS_PROC_SFLG_RUNNING (((Uint32) 1) << 3) /* Process is
- running */
-/* Scheduler flags in process struct... */
-#define ERTS_PROC_RUNQ_FLG_RUNNING (((Uint32) 1) << 0) /* Process is
- running */
-
-#endif
-
-
-#ifdef ERTS_SMP
-#define ERTS_PROC_IS_TRAPPING_EXITS(P) \
- (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) \
- & ERTS_PROC_LOCK_STATUS), \
- (P)->status_flags & ERTS_PROC_SFLG_TRAPEXIT)
-
-#define ERTS_PROC_SET_TRAP_EXIT(P) \
- (ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS) \
- & erts_proc_lc_my_proc_locks((P))) \
- == (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)), \
- (P)->status_flags |= ERTS_PROC_SFLG_TRAPEXIT, \
- (P)->flags |= F_TRAPEXIT, \
- 1)
-
-#define ERTS_PROC_UNSET_TRAP_EXIT(P) \
- (ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS) \
- & erts_proc_lc_my_proc_locks((P))) \
- == (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)), \
- (P)->status_flags &= ~ERTS_PROC_SFLG_TRAPEXIT, \
- (P)->flags &= ~F_TRAPEXIT, \
- 0)
-#else
-#define ERTS_PROC_IS_TRAPPING_EXITS(P) ((P)->flags & F_TRAPEXIT)
-#define ERTS_PROC_SET_TRAP_EXIT(P) ((P)->flags |= F_TRAPEXIT, 1)
-#define ERTS_PROC_UNSET_TRAP_EXIT(P) ((P)->flags &= ~F_TRAPEXIT, 0)
-#endif
-
/* Option flags to erts_send_exit_signal() */
#define ERTS_XSIG_FLG_IGN_KILL (((Uint32) 1) << 0)
#define ERTS_XSIG_FLG_NO_IGN_NORMAL (((Uint32) 1) << 1)
-
-/* Process status values */
-#define P_FREE 0
-#define P_RUNABLE 1
-#define P_WAITING 2
-#define P_RUNNING 3
-#define P_EXITING 4
-#define P_GARBING 5
-#define P_SUSPENDED 6
-
#define CANCEL_TIMER(p) \
do { \
if ((p)->flags & (F_INSLPQUEUE)) \
@@ -1081,15 +1122,187 @@ void erts_early_init_scheduling(int);
void erts_init_scheduling(int, int);
Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable);
+Uint64 erts_get_proc_interval(void);
+Uint64 erts_ensure_later_proc_interval(Uint64);
+Uint64 erts_step_proc_interval(void);
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
-int erts_proclist_same(ErtsProcList *, Process *);
+
+ERTS_GLB_INLINE int erts_proclist_same(ErtsProcList *, Process *);
+ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **, ErtsProcList *);
+ERTS_GLB_INLINE void erts_proclist_store_last(ErtsProcList **, ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_first(ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_last(ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_next(ErtsProcList *, ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_prev(ErtsProcList *, ErtsProcList *);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **);
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_last(ErtsProcList **);
+ERTS_GLB_INLINE int erts_proclist_fetch(ErtsProcList **, ErtsProcList **);
+ERTS_GLB_INLINE void erts_proclist_remove(ErtsProcList **, ErtsProcList *);
+ERTS_GLB_INLINE int erts_proclist_is_empty(ErtsProcList *);
+ERTS_GLB_INLINE int erts_proclist_is_first(ErtsProcList *, ErtsProcList *);
+ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *, ErtsProcList *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int
+erts_proclist_same(ErtsProcList *plp, Process *p)
+{
+ return (plp->pid == p->common.id
+ && (plp->started_interval
+ == p->common.u.alive.started_interval));
+}
+
+ERTS_GLB_INLINE void erts_proclist_store_first(ErtsProcList **list,
+ ErtsProcList *element)
+{
+ if (!*list)
+ element->next = element->prev = element;
+ else {
+ element->prev = (*list)->prev;
+ element->next = *list;
+ element->prev->next = element;
+ element->next->prev = element;
+ }
+ *list = element;
+}
+
+ERTS_GLB_INLINE void erts_proclist_store_last(ErtsProcList **list,
+ ErtsProcList *element)
+{
+ if (!*list) {
+ element->next = element->prev = element;
+ *list = element;
+ }
+ else {
+ element->prev = (*list)->prev;
+ element->next = *list;
+ element->prev->next = element;
+ element->next->prev = element;
+ }
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_first(ErtsProcList *list)
+{
+ return list;
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_last(ErtsProcList *list)
+{
+ if (!list)
+ return NULL;
+ else
+ return list->prev;
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_next(ErtsProcList *list,
+ ErtsProcList *element)
+{
+ ErtsProcList *next;
+ ASSERT(list && element);
+ next = element->next;
+ return list == next ? NULL : next;
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_peek_prev(ErtsProcList *list,
+ ErtsProcList *element)
+{
+ ErtsProcList *prev;
+ ASSERT(list && element);
+ prev = element->prev;
+ return list == element ? NULL : prev;
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
+{
+ if (!*list)
+ return NULL;
+ else {
+ ErtsProcList *res = *list;
+ if (res == *list)
+ *list = NULL;
+ else
+ *list = res->next;
+ res->next->prev = res->prev;
+ res->prev->next = res->next;
+ return res;
+ }
+}
+
+ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_last(ErtsProcList **list)
+{
+ if (!*list)
+ return NULL;
+ else {
+ ErtsProcList *res = (*list)->prev;
+ if (res == *list)
+ *list = NULL;
+ res->next->prev = res->prev;
+ res->prev->next = res->next;
+ return res;
+ }
+}
+
+ERTS_GLB_INLINE int erts_proclist_fetch(ErtsProcList **list_first,
+ ErtsProcList **list_last)
+{
+ if (!*list_first) {
+ if (list_last)
+ *list_last = NULL;
+ return 0;
+ }
+ else {
+ if (list_last)
+ *list_last = (*list_first)->prev;
+ (*list_first)->prev->next = NULL;
+ (*list_first)->prev = NULL;
+ return !0;
+ }
+}
+
+ERTS_GLB_INLINE void erts_proclist_remove(ErtsProcList **list,
+ ErtsProcList *element)
+{
+ ASSERT(list && *list);
+ if (*list == element) {
+ *list = element->next;
+ if (*list == element)
+ *list = NULL;
+ }
+ element->next->prev = element->prev;
+ element->prev->next = element->next;
+}
+
+ERTS_GLB_INLINE int erts_proclist_is_empty(ErtsProcList *list)
+{
+ return list == NULL;
+}
+
+ERTS_GLB_INLINE int erts_proclist_is_first(ErtsProcList *list,
+ ErtsProcList *element)
+{
+ ASSERT(list && element);
+ return list == element;
+}
+
+ERTS_GLB_INLINE int erts_proclist_is_last(ErtsProcList *list,
+ ErtsProcList *element)
+{
+ ASSERT(list && element);
+ return list->prev == element;
+}
+
+#endif
int erts_sched_set_wakeup_other_thresold(char *str);
int erts_sched_set_wakeup_other_type(char *str);
int erts_sched_set_busy_wait_threshold(char *str);
+void erts_schedule_thr_prgr_later_op(void (*)(void *),
+ void *,
+ ErtsThrPrgrLaterOp *);
+
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_dbg_check_halloc_lock(Process *p);
#endif
@@ -1116,6 +1329,10 @@ void erts_smp_notify_check_children_needed(void);
#if ERTS_USE_ASYNC_READY_Q
void erts_notify_check_async_ready_queue(void *);
#endif
+#ifdef ERTS_SMP
+void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later);
+void erts_notify_finish_breakpointing(Process* p);
+#endif
void erts_schedule_misc_aux_work(int sched_id,
void (*func)(void *),
void *arg);
@@ -1126,7 +1343,7 @@ void erts_schedule_multi_misc_aux_work(int ignore_self,
erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
-void erts_init_process(int);
+void erts_init_process(int, int);
Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
Uint erts_run_queues_len(Uint *);
void erts_add_to_runq(Process *);
@@ -1136,14 +1353,6 @@ Eterm erts_get_schedulers_binds(Process *c_p);
Eterm erts_set_cpu_topology(Process *c_p, Eterm term);
Eterm erts_bind_schedulers(Process *c_p, Eterm how);
ErtsRunQueue *erts_schedid2runq(Uint);
-#ifdef ERTS_SMP
-ErtsMigrateResult erts_proc_migrate(Process *,
- ErtsProcLocks *,
- ErtsRunQueue *,
- int *,
- ErtsRunQueue *,
- int *);
-#endif
Process *schedule(Process*, int);
void erts_schedule_misc_op(void (*)(void *), void *);
Eterm erl_create_process(Process*, Eterm, Eterm, Eterm, ErlSpawnOpts*);
@@ -1153,7 +1362,6 @@ void set_timer(Process*, Uint);
void cancel_timer(Process*);
/* Begin System profile */
Uint erts_runnable_process_count(void);
-Uint erts_process_count(void);
/* End System profile */
void erts_init_empty_process(Process *p);
void erts_cleanup_empty_process(Process* p);
@@ -1177,7 +1385,7 @@ Eterm erts_sched_stat_term(Process *p, int total);
void erts_free_proc(Process *);
-void erts_suspend(Process*, ErtsProcLocks, struct port*);
+void erts_suspend(Process*, ErtsProcLocks, Port*);
void erts_resume(Process*, ErtsProcLocks);
int erts_resume_processes(ErtsProcList *);
@@ -1192,8 +1400,7 @@ int erts_send_exit_signal(Process *,
#ifdef ERTS_SMP
void erts_handle_pending_exit(Process *, ErtsProcLocks);
#define ERTS_PROC_PENDING_EXIT(P) \
- (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) & ERTS_PROC_LOCK_STATUS),\
- (P)->pending_exit.reason != THE_NON_VALUE)
+ (ERTS_PSFLG_PENDING_EXIT & erts_smp_atomic32_read_acqb(&(P)->state))
#else
#define ERTS_PROC_PENDING_EXIT(P) 0
#endif
@@ -1203,9 +1410,6 @@ void erts_deep_process_dump(int, void *);
Eterm erts_get_reader_groups_map(Process *c_p);
Eterm erts_debug_reader_groups_map(Process *c_p, int groups);
-Sint erts_test_next_pid(int, Uint);
-Eterm erts_debug_processes(Process *c_p);
-Eterm erts_debug_processes_bif_info(Process *c_p);
Uint erts_debug_nbalance(void);
int erts_debug_wait_deallocations(Process *c_p);
@@ -1245,13 +1449,26 @@ ErtsSchedulerData *erts_get_scheduler_data(void)
#endif
#endif
+void erts_schedule_process(Process *, erts_aint32_t);
+
+ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE void
+erts_proc_notify_new_message(Process *p)
+{
+ /* No barrier needed, due to msg lock */
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+ if (!(state & ERTS_PSFLG_ACTIVE))
+ erts_schedule_process(p, state);
+}
+#endif
+
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
#define ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__
#include "erl_process_lock.h"
#undef ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__
-int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) \
do { \
if ((L)) \
@@ -1377,13 +1594,91 @@ erts_proc_set_error_handler(Process *p, ErtsProcLocks plocks, Eterm handler)
#endif
+#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
+
#ifdef ERTS_SMP
-ErtsRunQueue *erts_prepare_emigrate(ErtsRunQueue *c_rq,
- ErtsRunQueueInfo *c_rqi,
- int prio);
+#include "erl_thr_progress.h"
+
+extern erts_atomic_t erts_migration_paths;
+
+ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths_managed(void);
+ERTS_GLB_INLINE ErtsMigrationPaths *erts_get_migration_paths(void);
ERTS_GLB_INLINE ErtsRunQueue *erts_check_emigration_need(ErtsRunQueue *c_rq,
int prio);
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE ErtsMigrationPaths *
+erts_get_migration_paths_managed(void)
+{
+ return (ErtsMigrationPaths *) erts_atomic_read_ddrb(&erts_migration_paths);
+}
+
+ERTS_GLB_INLINE ErtsMigrationPaths *
+erts_get_migration_paths(void)
+{
+ if (erts_thr_progress_is_managed_thread())
+ return erts_get_migration_paths_managed();
+ else
+ return NULL;
+}
+
+ERTS_GLB_INLINE ErtsRunQueue *
+erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
+{
+ ErtsMigrationPaths *mps = erts_get_migration_paths();
+ ErtsMigrationPath *mp;
+ Uint32 flags;
+
+ if (!mps)
+ return NULL;
+
+ mp = &mps->mpath[c_rq->ix];
+ flags = mp->flags;
+
+ if (ERTS_CHK_RUNQ_FLG_EMIGRATE(flags, prio)) {
+ int len;
+
+ if (ERTS_CHK_RUNQ_FLG_EVACUATE(flags, prio)) {
+ /* force emigration */
+ return mp->prio[prio].runq;
+ }
+
+ if (flags & ERTS_RUNQ_FLG_INACTIVE) {
+ /*
+ * Run queue was inactive at last balance. Verify that
+ * it still is before forcing emigration.
+ */
+ if (ERTS_RUNQ_FLGS_GET(c_rq) & ERTS_RUNQ_FLG_INACTIVE)
+ return mp->prio[prio].runq;
+ }
+
+
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ len = RUNQ_READ_LEN(&c_rq->ports.info.len);
+ else
+ len = RUNQ_READ_LEN(&c_rq->procs.prio_info[prio].len);
+
+ if (len > mp->prio[prio].limit.this) {
+ ErtsRunQueue *n_rq = mp->prio[prio].runq;
+ if (n_rq) {
+ if (prio == ERTS_PORT_PRIO_LEVEL)
+ len = RUNQ_READ_LEN(&n_rq->ports.info.len);
+ else
+ len = RUNQ_READ_LEN(&n_rq->procs.prio_info[prio].len);
+
+ if (len < mp->prio[prio].limit.other)
+ return n_rq;
+ }
+ }
+ }
+ return NULL;
+}
+
+#endif
+
+#endif
+
#endif
ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp);
@@ -1406,29 +1701,6 @@ ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ERTS_SMP
-ERTS_GLB_INLINE ErtsRunQueue *
-erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
-{
- ErtsRunQueueInfo *c_rqi;
-
- if (!ERTS_CHK_RUNQ_FLG_EMIGRATE(c_rq->flags, prio))
- return NULL;
-
- if (prio == ERTS_PORT_PRIO_LEVEL)
- c_rqi = &c_rq->ports.info;
- else
- c_rqi = &c_rq->procs.prio_info[prio];
-
- if (!ERTS_CHK_RUNQ_FLG_EVACUATE(c_rq->flags, prio)
- && !(c_rq->flags & ERTS_RUNQ_FLG_INACTIVE)
- && c_rqi->len <= c_rqi->migrate.limit.this)
- return NULL;
-
- return erts_prepare_emigrate(c_rq, c_rqi, prio);
-}
-#endif
-
ERTS_GLB_INLINE
int erts_is_scheduler_bound(ErtsSchedulerData *esdp)
{
@@ -1449,7 +1721,7 @@ ERTS_GLB_INLINE
Eterm erts_get_current_pid(void)
{
Process *proc = erts_get_current_process();
- return proc ? proc->id : THE_NON_VALUE;
+ return proc ? proc->common.id : THE_NON_VALUE;
}
ERTS_GLB_INLINE
@@ -1466,10 +1738,9 @@ Uint erts_get_scheduler_id(void)
ERTS_GLB_INLINE ErtsRunQueue *
erts_get_runq_proc(Process *p)
{
- ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
#ifdef ERTS_SMP
- ASSERT(p->run_queue);
- return p->run_queue;
+ ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue));
+ return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue);
#else
return ERTS_RUNQ_IX(0);
#endif
@@ -1640,25 +1911,13 @@ extern int erts_disable_proc_not_running_opt;
#ifdef DEBUG
#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) \
- do { ASSERT(!(P)->is_exiting); } while (0)
+ do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0)
#else
#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
#endif
-/* NOTE: At least one process lock has to be held on P! */
-#ifdef ERTS_ENABLE_LOCK_CHECK
-#define ERTS_PROC_IS_EXITING(P) \
- (ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((P)) != 0 \
- || erts_lc_pix_lock_is_locked(ERTS_PID2PIXLOCK((P)->id))),\
- (P)->is_exiting)
-#else
-#define ERTS_PROC_IS_EXITING(P) ((P)->is_exiting)
-#endif
-
#else /* !ERTS_SMP */
-#define ERTS_PROC_IS_EXITING(P) ((P)->status == P_EXITING)
-
#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
#define erts_pid2proc_not_running erts_pid2proc
@@ -1666,11 +1925,15 @@ extern int erts_disable_proc_not_running_opt;
#endif
+#define ERTS_PROC_IS_EXITING(P) \
+ (ERTS_PSFLG_EXITING & erts_smp_atomic32_read_acqb(&(P)->state))
+
+
/* Minimum NUMBER of processes for a small system to start */
-#ifdef ERTS_SMP
+#define ERTS_MIN_PROCESSES 1024
+#if defined(ERTS_SMP) && ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS
+#undef ERTS_MIN_PROCESSES
#define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS
-#else
-#define ERTS_MIN_PROCESSES 16
#endif
void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index 93466da3aa..bf384c66e1 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -360,7 +360,7 @@ static void pd_hash_erase(Process *p, Eterm id, Eterm *ret)
erts_fprintf(stderr,
"Process dictionary for process %T is broken, trying to "
"display term found in line %d:\n"
- "%T\n", p->id, __LINE__, old);
+ "%T\n", p->common.id, __LINE__, old);
#endif
erl_exit(1, "Damaged process dictionary found during erase/1.");
}
@@ -405,7 +405,7 @@ Eterm erts_pd_hash_get(Process *p, Eterm id)
erts_fprintf(stderr,
"Process dictionary for process %T is broken, trying to "
"display term found in line %d:\n"
- "%T\n", p->id, __LINE__, tmp);
+ "%T\n", p->common.id, __LINE__, tmp);
#endif
erl_exit(1, "Damaged process dictionary found during get/1.");
}
@@ -614,7 +614,7 @@ static Eterm pd_hash_put(Process *p, Eterm id, Eterm value)
erts_fprintf(stderr,
"Process dictionary for process %T is broken, trying to "
"display term found in line %d:\n"
- "%T\n", p->id, __LINE__, old);
+ "%T\n", p->common.id, __LINE__, old);
#endif
erl_exit(1, "Damaged process dictionary found during put/2.");
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 3550f1396c..ba74dfd6a1 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -60,19 +60,16 @@ extern BeamInstr beam_continue_exit[];
void
erts_deep_process_dump(int to, void *to_arg)
{
- int i;
+ int i, max = erts_ptab_max(&erts_proc);
all_binaries = NULL;
- for (i = 0; i < erts_max_processes; i++) {
- if ((process_tab[i] != NULL) && (process_tab[i]->i != ENULL)) {
- if (process_tab[i]->status != P_EXITING) {
- Process* p = process_tab[i];
-
- if (p->status != P_GARBING) {
- dump_process_info(to, to_arg, p);
- }
- }
+ for (i = 0; i < max; i++) {
+ Process *p = erts_pix2proc(i);
+ if (p && p->i != ENULL) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC)))
+ dump_process_info(to, to_arg, p);
}
}
@@ -88,8 +85,8 @@ dump_process_info(int to, void *to_arg, Process *p)
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
- if ((p->trace_flags & F_SENSITIVE) == 0 && p->msg.first) {
- erts_print(to, to_arg, "=proc_messages:%T\n", p->id);
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) {
+ erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id);
for (mp = p->msg.first; mp != NULL; mp = mp->next) {
Eterm mesg = ERL_MESSAGE_TERM(mp);
if (is_value(mesg))
@@ -103,21 +100,21 @@ dump_process_info(int to, void *to_arg, Process *p)
}
}
- if ((p->trace_flags & F_SENSITIVE) == 0) {
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
if (p->dictionary) {
- erts_print(to, to_arg, "=proc_dictionary:%T\n", p->id);
+ erts_print(to, to_arg, "=proc_dictionary:%T\n", p->common.id);
erts_deep_dictionary_dump(to, to_arg,
p->dictionary, dump_element_nl);
}
}
- if ((p->trace_flags & F_SENSITIVE) == 0) {
- erts_print(to, to_arg, "=proc_stack:%T\n", p->id);
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
+ erts_print(to, to_arg, "=proc_stack:%T\n", p->common.id);
for (sp = p->stop; sp < STACK_START(p); sp++) {
yreg = stack_element_dump(to, to_arg, p, sp, yreg);
}
- erts_print(to, to_arg, "=proc_heap:%T\n", p->id);
+ erts_print(to, to_arg, "=proc_heap:%T\n", p->common.id);
for (sp = p->stop; sp < STACK_START(p); sp++) {
Eterm term = *sp;
@@ -326,7 +323,7 @@ heap_dump(int to, void *to_arg, Eterm x)
int i;
GET_DOUBLE_DATA((ptr+1), f);
- i = sys_double_to_chars(f.fd, (char*) sbuf);
+ i = sys_double_to_chars(f.fd, (char*) sbuf, sizeof(sbuf));
sys_memset(sbuf+i, 0, 31-i);
erts_print(to, to_arg, "F%X:%s\n", i, sbuf);
*ptr = OUR_NIL;
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index f7900317cc..2db5df06b4 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -66,11 +66,12 @@
#endif
#include "erl_process.h"
-
-const Process erts_proc_lock_busy;
+#include "erl_thr_progress.h"
#ifdef ERTS_SMP
+#if ERTS_PROC_LOCK_OWN_IMPL
+
#define ERTS_PROC_LOCK_SPIN_COUNT_MAX 2000
#define ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC 32
#define ERTS_PROC_LOCK_SPIN_COUNT_BASE 1000
@@ -90,6 +91,13 @@ static void check_queue(erts_proc_lock_t *lck);
#error "The size of the 'uflgs' field of the erts_tse_t type is too small"
#endif
+static int proc_lock_spin_count;
+static int aux_thr_proc_lock_spin_count;
+
+static void cleanup_tse(void);
+
+#endif /* ERTS_PROC_LOCK_OWN_IMPL */
+
#ifdef ERTS_ENABLE_LOCK_CHECK
static struct {
Sint16 proc_lock_main;
@@ -101,10 +109,6 @@ static struct {
erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
-static int proc_lock_spin_count;
-static int aux_thr_proc_lock_spin_count;
-
-static void cleanup_tse(void);
void
erts_init_proc_lock(int cpus)
@@ -118,13 +122,8 @@ erts_init_proc_lock(int cpus)
erts_mtx_init(&erts_pix_locks[i].u.mtx, "pix_lock");
#endif
}
+#if ERTS_PROC_LOCK_OWN_IMPL
erts_thr_install_exit_handler(cleanup_tse);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
- lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
- lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
- lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
-#endif
if (cpus > 1) {
proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_BASE;
proc_lock_spin_count += (ERTS_PROC_LOCK_SPIN_COUNT_SCHED_INC
@@ -141,8 +140,17 @@ erts_init_proc_lock(int cpus)
}
if (proc_lock_spin_count > ERTS_PROC_LOCK_SPIN_COUNT_MAX)
proc_lock_spin_count = ERTS_PROC_LOCK_SPIN_COUNT_MAX;
+#endif
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ lc_id.proc_lock_main = erts_lc_get_lock_order_id("proc_main");
+ lc_id.proc_lock_link = erts_lc_get_lock_order_id("proc_link");
+ lc_id.proc_lock_msgq = erts_lc_get_lock_order_id("proc_msgq");
+ lc_id.proc_lock_status = erts_lc_get_lock_order_id("proc_status");
+#endif
}
+#if ERTS_PROC_LOCK_OWN_IMPL
+
#ifdef ERTS_ENABLE_LOCK_CHECK
#define CHECK_UNUSED_TSE(W) ERTS_LC_ASSERT((W)->uflgs == 0)
#else
@@ -164,13 +172,6 @@ tse_return(erts_tse_t *tse)
erts_tse_return(tse);
}
-void
-erts_proc_lock_prepare_proc_lock_waiter(void)
-{
- tse_return(tse_fetch(NULL));
-}
-
-
static void
cleanup_tse(void)
{
@@ -397,7 +398,7 @@ wait_for_locks(Process *p,
ErtsProcLocks need_locks,
ErtsProcLocks olflgs)
{
- erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
erts_tse_t *wtr;
/* Acquire a waiter object on which this thread can wait. */
@@ -551,7 +552,7 @@ erts_proc_unlock_failed(Process *p,
erts_pix_lock_t *pixlck,
ErtsProcLocks wait_locks)
{
- erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id);
+ erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id);
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lock);
@@ -560,6 +561,16 @@ erts_proc_unlock_failed(Process *p,
transfer_locks(p, wait_locks, pix_lock, 1); /* unlocks pix_lock */
}
+#endif /* ERTS_PROC_LOCK_OWN_IMPL */
+
+void
+erts_proc_lock_prepare_proc_lock_waiter(void)
+{
+#if ERTS_PROC_LOCK_OWN_IMPL
+ tse_return(tse_fetch(NULL));
+#endif
+}
+
/*
* proc_safelock() locks process locks on two processes. In order
* to avoid a deadlock, proc_safelock() unlocks those locks that
@@ -568,12 +579,11 @@ erts_proc_unlock_failed(Process *p,
*/
static void
-proc_safelock(Process *a_proc,
- erts_pix_lock_t *a_pix_lck,
+proc_safelock(int is_managed,
+ Process *a_proc,
ErtsProcLocks a_have_locks,
ErtsProcLocks a_need_locks,
Process *b_proc,
- erts_pix_lock_t *b_pix_lck,
ErtsProcLocks b_have_locks,
ErtsProcLocks b_need_locks)
{
@@ -581,7 +591,6 @@ proc_safelock(Process *a_proc,
#ifdef ERTS_ENABLE_LOCK_CHECK
Eterm pid1, pid2;
#endif
- erts_pix_lock_t *pix_lck1, *pix_lck2;
ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2;
ErtsProcLocks unlock_mask;
int lock_no, refc1 = 0, refc2 = 0;
@@ -593,53 +602,47 @@ proc_safelock(Process *a_proc,
* Locks with the same lock order should be locked on p1 before p2.
*/
if (a_proc) {
- if (a_proc->id < b_proc->id) {
+ if (a_proc->common.id < b_proc->common.id) {
p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = a_proc->id;
+ pid1 = a_proc->common.id;
#endif
- pix_lck1 = a_pix_lck;
need_locks1 = a_need_locks;
have_locks1 = a_have_locks;
p2 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid2 = b_proc->id;
+ pid2 = b_proc->common.id;
#endif
- pix_lck2 = b_pix_lck;
need_locks2 = b_need_locks;
have_locks2 = b_have_locks;
}
- else if (a_proc->id > b_proc->id) {
+ else if (a_proc->common.id > b_proc->common.id) {
p1 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = b_proc->id;
+ pid1 = b_proc->common.id;
#endif
- pix_lck1 = b_pix_lck;
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
p2 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid2 = a_proc->id;
+ pid2 = a_proc->common.id;
#endif
- pix_lck2 = a_pix_lck;
need_locks2 = a_need_locks;
have_locks2 = a_have_locks;
}
else {
ERTS_LC_ASSERT(a_proc == b_proc);
- ERTS_LC_ASSERT(a_proc->id == b_proc->id);
+ ERTS_LC_ASSERT(a_proc->common.id == b_proc->common.id);
p1 = a_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = a_proc->id;
+ pid1 = a_proc->common.id;
#endif
- pix_lck1 = a_pix_lck;
need_locks1 = a_need_locks | b_need_locks;
have_locks1 = a_have_locks | b_have_locks;
p2 = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = 0;
#endif
- pix_lck2 = NULL;
need_locks2 = 0;
have_locks2 = 0;
}
@@ -647,16 +650,14 @@ proc_safelock(Process *a_proc,
else {
p1 = b_proc;
#ifdef ERTS_ENABLE_LOCK_CHECK
- pid1 = b_proc->id;
+ pid1 = b_proc->common.id;
#endif
- pix_lck1 = b_pix_lck;
need_locks1 = b_need_locks;
have_locks1 = b_have_locks;
p2 = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
pid2 = 0;
#endif
- pix_lck2 = NULL;
need_locks2 = 0;
have_locks2 = 0;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -704,21 +705,21 @@ proc_safelock(Process *a_proc,
if (unlock_locks) {
have_locks1 &= ~unlock_locks;
need_locks1 |= unlock_locks;
- if (!have_locks1) {
+ if (!is_managed && !have_locks1) {
refc1 = 1;
erts_smp_proc_inc_refc(p1);
}
- erts_smp_proc_unlock__(p1, pix_lck1, unlock_locks);
+ erts_smp_proc_unlock(p1, unlock_locks);
}
unlock_locks = unlock_mask & have_locks2;
if (unlock_locks) {
have_locks2 &= ~unlock_locks;
need_locks2 |= unlock_locks;
- if (!have_locks2) {
+ if (!is_managed && !have_locks2) {
refc2 = 1;
erts_smp_proc_inc_refc(p2);
}
- erts_smp_proc_unlock__(p2, pix_lck2, unlock_locks);
+ erts_smp_proc_unlock(p2, unlock_locks);
}
}
@@ -749,7 +750,7 @@ proc_safelock(Process *a_proc,
if (need_locks2 & lock)
lock_no--;
locks = need_locks1 & lock_mask;
- erts_smp_proc_lock__(p1, pix_lck1, locks);
+ erts_smp_proc_lock(p1, locks);
have_locks1 |= locks;
need_locks1 &= ~locks;
}
@@ -760,7 +761,7 @@ proc_safelock(Process *a_proc,
lock = (1 << ++lock_no);
}
locks = need_locks2 & lock_mask;
- erts_smp_proc_lock__(p2, pix_lck2, locks);
+ erts_smp_proc_lock(p2, locks);
have_locks2 |= locks;
need_locks2 &= ~locks;
}
@@ -795,10 +796,12 @@ proc_safelock(Process *a_proc,
}
#endif
- if (refc1)
- erts_smp_proc_dec_refc(p1);
- if (refc2)
- erts_smp_proc_dec_refc(p2);
+ if (!is_managed) {
+ if (refc1)
+ erts_smp_proc_dec_refc(p1);
+ if (refc2)
+ erts_smp_proc_dec_refc(p2);
+ }
}
void
@@ -809,77 +812,196 @@ erts_proc_safelock(Process *a_proc,
ErtsProcLocks b_have_locks,
ErtsProcLocks b_need_locks)
{
- proc_safelock(a_proc,
- a_proc ? ERTS_PID2PIXLOCK(a_proc->id) : NULL,
+ proc_safelock(erts_get_scheduler_id() != 0,
+ a_proc,
a_have_locks,
a_need_locks,
b_proc,
- b_proc ? ERTS_PID2PIXLOCK(b_proc->id) : NULL,
b_have_locks,
b_need_locks);
}
-/*
- * erts_pid2proc_safelock() is called from erts_pid2proc_opt() when
- * it wasn't possible to trylock all locks needed.
- * c_p - current process
- * c_p_have_locks - locks held on c_p
- * pid - process id of process we are looking up
- * proc - process struct of process we are looking
- * up (both in and out argument)
- * need_locks - all locks we need (including have_locks)
- * pix_lock - pix lock for process we are looking up
- * flags - option flags
- */
-void
-erts_pid2proc_safelock(Process *c_p,
- ErtsProcLocks c_p_have_locks,
- Process **proc,
- ErtsProcLocks need_locks,
- erts_pix_lock_t *pix_lock,
- int flags)
+Process *
+erts_pid2proc_opt(Process *c_p,
+ ErtsProcLocks c_p_have_locks,
+ Eterm pid,
+ ErtsProcLocks pid_need_locks,
+ int flags)
{
- Process *p = *proc;
- ERTS_LC_ASSERT(p->lock.refc > 0);
- ERTS_LC_ASSERT(process_tab[internal_pid_index(p->id)] == p);
- p->lock.refc++;
- erts_pix_unlock(pix_lock);
-
- proc_safelock(c_p,
- c_p ? ERTS_PID2PIXLOCK(c_p->id) : NULL,
- c_p_have_locks,
- c_p_have_locks,
- p,
- pix_lock,
- 0,
- need_locks);
+ Process *dec_refc_proc = NULL;
+ ErtsThrPrgrDelayHandle dhndl;
+ ErtsProcLocks need_locks;
+ Uint pix;
+ Process *proc;
+#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
+ ErtsProcLocks lcnt_locks;
+#endif
- erts_pix_lock(pix_lock);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ if (c_p) {
+ ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks;
+ if (might_unlock)
+ erts_proc_lc_might_unlock(c_p, might_unlock);
+ }
+#endif
- if (!p->is_exiting
- || ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
- && process_tab[internal_pid_index(p->id)] == p)) {
- ERTS_LC_ASSERT(p->lock.refc > 1);
- p->lock.refc--;
+ if (is_not_internal_pid(pid))
+ return NULL;
+ pix = internal_pid_index(pid);
+
+ ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
+ need_locks = pid_need_locks;
+
+ if (c_p && c_p->common.id == pid) {
+ ASSERT(c_p->common.id != ERTS_INVALID_PID);
+ ASSERT(c_p == erts_pix2proc(pix));
+
+ if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
+ && ERTS_PROC_IS_EXITING(c_p))
+ return NULL;
+ need_locks &= ~c_p_have_locks;
+ if (!need_locks) {
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ erts_smp_proc_inc_refc(c_p);
+ return c_p;
+ }
}
- else {
- /* No proc. Note, we need to keep refc until after process unlock */
- erts_pix_unlock(pix_lock);
- erts_smp_proc_unlock__(p, pix_lock, need_locks);
- *proc = NULL;
- erts_pix_lock(pix_lock);
- ERTS_LC_ASSERT(p->lock.refc > 0);
- if (--p->lock.refc == 0) {
- erts_pix_unlock(pix_lock);
- erts_free_proc(p);
- erts_pix_lock(pix_lock);
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+
+ proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix);
+
+ if (proc) {
+ if (proc->common.id != pid)
+ proc = NULL;
+ else if (!need_locks) {
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ erts_smp_proc_inc_refc(proc);
}
+ else {
+ int busy;
+
+#if ERTS_PROC_LOCK_OWN_IMPL
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ lcnt_locks = need_locks;
+ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
+ erts_lcnt_proc_lock(&proc->lock, need_locks);
+ }
+#endif
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ /* Make sure erts_pid2proc_safelock() is enough to handle
+ a potential lock order violation situation... */
+ busy = erts_proc_lc_trylock_force_busy(proc, need_locks);
+ if (!busy)
+#endif
+#endif /* ERTS_PROC_LOCK_OWN_IMPL */
+ {
+ /* Try a quick trylock to grab all the locks we need. */
+ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);
+
+#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
+ erts_proc_lc_trylock(proc, need_locks, !busy);
+#endif
+#ifdef ERTS_PROC_LOCK_DEBUG
+ if (!busy)
+ erts_proc_lock_op_debug(proc, need_locks, 1);
+#endif
+ }
+
+#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
+ if (flags & ERTS_P2P_FLG_TRY_LOCK)
+ erts_lcnt_proc_trylock(&proc->lock, need_locks,
+ busy ? EBUSY : 0);
+#endif
+
+ if (!busy) {
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ erts_smp_proc_inc_refc(proc);
+
+#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
+ /* all is great */
+ if (!(flags & ERTS_P2P_FLG_TRY_LOCK))
+ erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks,
+ __FILE__, __LINE__);
+#endif
+
+ }
+ else {
+ if (flags & ERTS_P2P_FLG_TRY_LOCK)
+ proc = ERTS_PROC_LOCK_BUSY;
+ else {
+ int managed;
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ erts_smp_proc_inc_refc(proc);
+
+#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
+ erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
+#endif
+
+ managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED;
+ if (!managed) {
+ erts_smp_proc_inc_refc(proc);
+ erts_thr_progress_unmanaged_continue(dhndl);
+ dec_refc_proc = proc;
+
+ /*
+ * We don't want to call
+ * erts_thr_progress_unmanaged_continue()
+ * again.
+ */
+ dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED;
+ }
+
+ proc_safelock(managed,
+ c_p,
+ c_p_have_locks,
+ c_p_have_locks,
+ proc,
+ 0,
+ need_locks);
+ }
+ }
+ }
+ }
+
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+
+ if (need_locks
+ && proc
+ && proc != ERTS_PROC_LOCK_BUSY
+ && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
+ ? ERTS_PROC_IS_EXITING(proc)
+ : (proc
+ != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) {
+
+ erts_smp_proc_unlock(proc, need_locks);
+
+ if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
+ dec_refc_proc = proc;
+ proc = NULL;
+
}
+
+ if (dec_refc_proc)
+ erts_smp_proc_dec_refc(dec_refc_proc);
+
+#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
+ ERTS_LC_ASSERT(!proc
+ || proc == ERTS_PROC_LOCK_BUSY
+ || (pid_need_locks ==
+ (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
+ & pid_need_locks)));
+#endif
+
+ return proc;
}
void
erts_proc_lock_init(Process *p)
{
+#if ERTS_PROC_LOCK_OWN_IMPL
int i;
/* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
@@ -890,32 +1012,67 @@ erts_proc_lock_init(Process *p)
#endif
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
p->lock.queue[i] = NULL;
- p->lock.refc = 1;
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_proc_lock_init(p);
- erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL);
- erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__);
-#endif
-
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
#endif
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_mtx_init_x(&p->lock.main, "proc_main", p->common.id);
+ ethr_mutex_lock(&p->lock.main.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.main.lc);
+#endif
+ erts_mtx_init_x(&p->lock.link, "proc_link", p->common.id);
+ ethr_mutex_lock(&p->lock.link.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.link.lc);
+#endif
+ erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->common.id);
+ ethr_mutex_lock(&p->lock.msgq.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.msgq.lc);
+#endif
+ erts_mtx_init_x(&p->lock.status, "proc_status", p->common.id);
+ ethr_mutex_lock(&p->lock.status.mtx);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_lc_trylock(1, &p->lock.status.lc);
+#endif
+#endif
+ erts_atomic32_init_nob(&p->lock.refc, 1);
#ifdef ERTS_PROC_LOCK_DEBUG
for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
#endif
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ erts_lcnt_proc_lock_init(p);
+ erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL);
+ erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__);
+#endif
+}
+
+void
+erts_proc_lock_fin(Process *p)
+{
+#if ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_mtx_destroy(&p->lock.main);
+ erts_mtx_destroy(&p->lock.link);
+ erts_mtx_destroy(&p->lock.msgq);
+ erts_mtx_destroy(&p->lock.status);
+#endif
+#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
+ erts_lcnt_proc_lock_destroy(p);
+#endif
}
/* --- Process lock counting ----------------------------------------------- */
-#ifdef ERTS_ENABLE_LOCK_COUNT
+#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_proc_lock_init(Process *p) {
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) {
- if (p->id != ERTS_INVALID_PID) {
- erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->id);
- erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->id);
+ if (p->common.id != ERTS_INVALID_PID) {
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK, p->common.id);
+ erts_lcnt_init_lock_x(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK, p->common.id);
} else {
erts_lcnt_init_lock(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK);
erts_lcnt_init_lock(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK);
@@ -1023,11 +1180,12 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res
}
-void erts_lcnt_enable_proc_lock_count(int enable) {
- int i;
+void erts_lcnt_enable_proc_lock_count(int enable)
+{
+ int i, max = erts_ptab_max(&erts_proc);
- for (i = 0; i < erts_max_processes; ++i) {
- Process* p = process_tab[i];
+ for (i = 0; i < max; ++i) {
+ Process* p = erts_pix2proc(i);
if (p) {
if (enable) {
if (!ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) {
@@ -1049,11 +1207,13 @@ void erts_lcnt_enable_proc_lock_count(int enable) {
#ifdef ERTS_ENABLE_LOCK_CHECK
+#if ERTS_PROC_LOCK_OWN_IMPL
+
void
erts_proc_lc_lock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1077,7 +1237,7 @@ void
erts_proc_lc_trylock(Process *p, ErtsProcLocks locks, int locked)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1101,7 +1261,7 @@ void
erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
{
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1121,11 +1281,14 @@ erts_proc_lc_unlock(Process *p, ErtsProcLocks locks)
}
}
+#endif /* ERTS_PROC_LOCK_OWN_IMPL */
+
void
erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
{
+#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1143,13 +1306,24 @@ erts_proc_lc_might_unlock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_main;
erts_lc_might_unlock(&lck);
}
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ erts_lc_might_unlock(&p->lock.main.lc);
+ if (locks & ERTS_PROC_LOCK_LINK)
+ erts_lc_might_unlock(&p->lock.link.lc);
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ erts_lc_might_unlock(&p->lock.msgq.lc);
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ erts_lc_might_unlock(&p->lock.status.lc);
+#endif
}
void
erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
{
+#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN) {
lck.id = lc_id.proc_lock_main;
@@ -1167,13 +1341,24 @@ erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_status;
erts_lc_require_lock(&lck);
}
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ erts_lc_require_lock(&p->lock.main.lc);
+ if (locks & ERTS_PROC_LOCK_LINK)
+ erts_lc_require_lock(&p->lock.link.lc);
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ erts_lc_require_lock(&p->lock.msgq.lc);
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ erts_lc_require_lock(&p->lock.status.lc);
+#endif
}
void
erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
{
+#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_STATUS) {
lck.id = lc_id.proc_lock_status;
@@ -1191,15 +1376,26 @@ erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks)
lck.id = lc_id.proc_lock_main;
erts_lc_unrequire_lock(&lck);
}
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ erts_lc_unrequire_lock(&p->lock.main.lc);
+ if (locks & ERTS_PROC_LOCK_LINK)
+ erts_lc_unrequire_lock(&p->lock.link.lc);
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ erts_lc_unrequire_lock(&p->lock.msgq.lc);
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ erts_lc_unrequire_lock(&p->lock.status.lc);
+#endif
}
+#if ERTS_PROC_LOCK_OWN_IMPL
int
erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
{
if (locks & ERTS_PROC_LOCKS_ALL) {
erts_lc_lock_t lck = ERTS_LC_LOCK_INIT(-1,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
if (locks & ERTS_PROC_LOCK_MAIN)
@@ -1218,42 +1414,61 @@ erts_proc_lc_trylock_force_busy(Process *p, ErtsProcLocks locks)
return 0;
}
+#endif /* ERTS_PROC_LOCK_OWN_IMPL */
+
void erts_proc_lc_chk_only_proc_main(Process *p)
{
+#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t proc_main = ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK);
erts_lc_check_exact(&proc_main, 1);
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_lc_check_exact(&p->lock.main.lc, 1);
+#endif
}
+#if ERTS_PROC_LOCK_OWN_IMPL
#define ERTS_PROC_LC_EMPTY_LOCK_INIT \
ERTS_LC_LOCK_INIT(-1, THE_NON_VALUE, ERTS_LC_FLG_LT_PROCLOCK)
+#endif /* ERTS_PROC_LOCK_OWN_IMPL */
void
erts_proc_lc_chk_have_proc_locks(Process *p, ErtsProcLocks locks)
{
int have_locks_len = 0;
+#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT};
if (locks & ERTS_PROC_LOCK_MAIN) {
have_locks[have_locks_len].id = lc_id.proc_lock_main;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_LINK) {
have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
- have_locks[have_locks_len++].extra = p->id;
- }
-
+ have_locks[have_locks_len++].extra = p->common.id;
+ }
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_lc_lock_t have_locks[4];
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ have_locks[have_locks_len++] = p->lock.main.lc;
+ if (locks & ERTS_PROC_LOCK_LINK)
+ have_locks[have_locks_len++] = p->lock.link.lc;
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ have_locks[have_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ have_locks[have_locks_len++] = p->lock.status.lc;
+#endif
erts_lc_check(have_locks, have_locks_len, NULL, 0);
}
@@ -1262,6 +1477,7 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
{
int have_locks_len = 0;
int have_not_locks_len = 0;
+#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t have_locks[4] = {ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
ERTS_PROC_LC_EMPTY_LOCK_INIT,
@@ -1273,36 +1489,57 @@ erts_proc_lc_chk_proc_locks(Process *p, ErtsProcLocks locks)
if (locks & ERTS_PROC_LOCK_MAIN) {
have_locks[have_locks_len].id = lc_id.proc_lock_main;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_main;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_LINK) {
have_locks[have_locks_len].id = lc_id.proc_lock_link;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_link;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_MSGQ) {
have_locks[have_locks_len].id = lc_id.proc_lock_msgq;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_msgq;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
if (locks & ERTS_PROC_LOCK_STATUS) {
have_locks[have_locks_len].id = lc_id.proc_lock_status;
- have_locks[have_locks_len++].extra = p->id;
+ have_locks[have_locks_len++].extra = p->common.id;
}
else {
have_not_locks[have_not_locks_len].id = lc_id.proc_lock_status;
- have_not_locks[have_not_locks_len++].extra = p->id;
+ have_not_locks[have_not_locks_len++].extra = p->common.id;
}
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_lc_lock_t have_locks[4];
+ erts_lc_lock_t have_not_locks[4];
+
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ have_locks[have_locks_len++] = p->lock.main.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.main.lc;
+ if (locks & ERTS_PROC_LOCK_LINK)
+ have_locks[have_locks_len++] = p->lock.link.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.link.lc;
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ have_locks[have_locks_len++] = p->lock.msgq.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.msgq.lc;
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ have_locks[have_locks_len++] = p->lock.status.lc;
+ else
+ have_not_locks[have_not_locks_len++] = p->lock.status.lc;
+#endif
erts_lc_check(have_locks, have_locks_len,
have_not_locks, have_not_locks_len);
@@ -1312,20 +1549,26 @@ ErtsProcLocks
erts_proc_lc_my_proc_locks(Process *p)
{
int resv[4];
+ ErtsProcLocks res = 0;
+#if ERTS_PROC_LOCK_OWN_IMPL
erts_lc_lock_t locks[4] = {ERTS_LC_LOCK_INIT(lc_id.proc_lock_main,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_link,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_msgq,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK),
ERTS_LC_LOCK_INIT(lc_id.proc_lock_status,
- p->id,
+ p->common.id,
ERTS_LC_FLG_LT_PROCLOCK)};
-
- ErtsProcLocks res = 0;
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_lc_lock_t locks[4] = {p->lock.main.lc,
+ p->lock.link.lc,
+ p->lock.msgq.lc,
+ p->lock.status.lc};
+#endif
erts_lc_have_locks(resv, locks, 4);
if (resv[0])
@@ -1358,7 +1601,7 @@ erts_proc_lc_chk_no_proc_locks(char *file, int line)
#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
-#ifdef ERTS_PROC_LOCK_HARD_DEBUG
+#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_HARD_DEBUG)
void
check_queue(erts_proc_lock_t *lck)
{
@@ -1391,4 +1634,4 @@ check_queue(erts_proc_lock_t *lck)
}
#endif
-#endif /* ERTS_SMP (the whole file) */
+#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 062b8366d3..9dd503f3cb 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -37,10 +37,21 @@
#include "erl_smp.h"
+#if defined(VALGRIND) || defined(ETHR_DISABLE_NATIVE_IMPLS)
+# define ERTS_PROC_LOCK_OWN_IMPL 0
+#else
+# define ERTS_PROC_LOCK_OWN_IMPL 1
+#endif
+
#define ERTS_PROC_LOCK_ATOMIC_IMPL 0
#define ERTS_PROC_LOCK_SPINLOCK_IMPL 0
#define ERTS_PROC_LOCK_MUTEX_IMPL 0
+#if !ERTS_PROC_LOCK_OWN_IMPL
+#define ERTS_PROC_LOCK_RAW_MUTEX_IMPL 1
+#else
+#define ERTS_PROC_LOCK_RAW_MUTEX_IMPL 0
+
#if defined(ETHR_HAVE_32BIT_NATIVE_ATOMIC_OPS)
# undef ERTS_PROC_LOCK_ATOMIC_IMPL
# define ERTS_PROC_LOCK_ATOMIC_IMPL 1
@@ -52,27 +63,38 @@
# define ERTS_PROC_LOCK_MUTEX_IMPL 1
#endif
+#endif
+
#define ERTS_PROC_LOCK_MAX_BIT 3
typedef erts_aint32_t ErtsProcLocks;
typedef struct erts_proc_lock_t_ {
+#if ERTS_PROC_LOCK_OWN_IMPL
#if ERTS_PROC_LOCK_ATOMIC_IMPL
erts_smp_atomic32_t flags;
#else
ErtsProcLocks flags;
#endif
erts_tse_t *queue[ERTS_PROC_LOCK_MAX_BIT+1];
- Sint32 refc;
-#ifdef ERTS_PROC_LOCK_DEBUG
- erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
-#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_t lcnt_main;
erts_lcnt_lock_t lcnt_link;
erts_lcnt_lock_t lcnt_msgq;
erts_lcnt_lock_t lcnt_status;
#endif
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ erts_mtx_t main;
+ erts_mtx_t link;
+ erts_mtx_t msgq;
+ erts_mtx_t status;
+#else
+# error "no implementation"
+#endif
+ erts_atomic32_t refc;
+#ifdef ERTS_PROC_LOCK_DEBUG
+ erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1];
+#endif
} erts_proc_lock_t;
/* Process lock flags */
@@ -105,11 +127,9 @@ typedef struct erts_proc_lock_t_ {
/*
* Status lock:
* Protects the following fields in the process structure:
- * * status
- * * rstatus
- * * status_flags
* * pending_suspenders
* * suspendee
+ * * ...
*/
#define ERTS_PROC_LOCK_STATUS (((ErtsProcLocks) 1) << ERTS_PROC_LOCK_MAX_BIT)
@@ -141,14 +161,11 @@ typedef struct erts_proc_lock_t_ {
* Other rules regarding process locking:
*
* Exiting processes:
- * When changing status to P_EXITING on a process, you are required
- * to take all process locks (ERTS_PROC_LOCKS_ALL). Thus, by holding
- * at least one process lock (whichever one doesn't matter) you
- * are guaranteed that the process won't exit until the lock you are
- * holding has been released. Appart from all process locks also
- * the pix lock corresponding to the process has to be held.
- * At the same time as status is changed to P_EXITING, also the
- * field 'is_exiting' in the process structure is set to a value != 0.
+ * When changing state to exiting (ERTS_PSFLG_EXITING) on a process,
+ * you are required to take all process locks (ERTS_PROC_LOCKS_ALL).
+ * Thus, by holding at least one process lock (whichever one doesn't
+ * matter) you are guaranteed that the process won't exit until the
+ * lock you are holding has been released.
*
* Lock order:
* Process locks with low numeric values has to be locked before
@@ -159,8 +176,8 @@ typedef struct erts_proc_lock_t_ {
* on multiple processes, locks on processes with low process ids
* have to be locked before locks on processes with high process
* ids. E.g., if the main and the message queue locks are to be
- * locked on processes p1 and p2 and p1->id < p2->id, then locks
- * should be locked in the following order:
+ * locked on processes p1 and p2 and p1->common.id < p2->common.id,
+ * then locks should be locked in the following order:
* 1. main lock on p1
* 2. main lock on p2
* 3. message queue lock on p1
@@ -186,7 +203,7 @@ typedef struct erts_proc_lock_t_ {
& ~ERTS_PROC_LOCK_MAIN)
-#define ERTS_PIX_LOCKS_BITS 8
+#define ERTS_PIX_LOCKS_BITS 10
#define ERTS_NO_OF_PIX_LOCKS (1 << ERTS_PIX_LOCKS_BITS)
@@ -260,12 +277,10 @@ typedef struct {
} u;
} erts_pix_lock_t;
-#define ERTS_PIX2PIXLOCKIX(PIX) \
- ((PIX) & ((1 << ERTS_PIX_LOCKS_BITS) - 1))
-#define ERTS_PIX2PIXLOCK(PIX) \
- (&erts_pix_locks[ERTS_PIX2PIXLOCKIX((PIX))])
#define ERTS_PID2PIXLOCK(PID) \
- ERTS_PIX2PIXLOCK(internal_pid_data((PID)))
+ (&erts_pix_locks[(internal_pid_data((PID)) & ((1 << ERTS_PIX_LOCKS_BITS) - 1))])
+
+#if ERTS_PROC_LOCK_OWN_IMPL
#if ERTS_PROC_LOCK_ATOMIC_IMPL
@@ -335,11 +350,13 @@ erts_proc_lock_flags_cmpxchg(erts_proc_lock_t *lck, ErtsProcLocks new,
#define ERTS_PROC_LOCK_FLGS_READ_(L) ((L)->flags)
#endif /* end no opt atomic ops */
+#endif /* ERTS_PROC_LOCK_OWN_IMPL */
extern erts_pix_lock_t erts_pix_locks[ERTS_NO_OF_PIX_LOCKS];
void erts_init_proc_lock(int cpus);
void erts_proc_lock_prepare_proc_lock_waiter(void);
+#if ERTS_PROC_LOCK_OWN_IMPL
void erts_proc_lock_failed(Process *,
erts_pix_lock_t *,
ErtsProcLocks,
@@ -347,6 +364,7 @@ void erts_proc_lock_failed(Process *,
void erts_proc_unlock_failed(Process *,
erts_pix_lock_t *,
ErtsProcLocks);
+#endif
ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *);
ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *);
@@ -410,6 +428,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck)
ERTS_GLB_INLINE ErtsProcLocks
erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
{
+#if ERTS_PROC_LOCK_OWN_IMPL
ErtsProcLocks expct_lflgs = 0;
while (1) {
@@ -429,8 +448,38 @@ erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks)
/* cmpxchg failed, try again (should be rare). */
expct_lflgs = lflgs;
}
-}
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ if (erts_mtx_trylock(&p->lock.main) == EBUSY)
+ goto busy_main;
+ if (locks & ERTS_PROC_LOCK_LINK)
+ if (erts_mtx_trylock(&p->lock.link) == EBUSY)
+ goto busy_link;
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ if (erts_mtx_trylock(&p->lock.msgq) == EBUSY)
+ goto busy_msgq;
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ if (erts_mtx_trylock(&p->lock.status) == EBUSY)
+ goto busy_status;
+
+ return 0;
+
+busy_status:
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ erts_mtx_unlock(&p->lock.msgq);
+busy_msgq:
+ if (locks & ERTS_PROC_LOCK_LINK)
+ erts_mtx_unlock(&p->lock.link);
+busy_link:
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ erts_mtx_unlock(&p->lock.main);
+busy_main:
+
+ return EBUSY;
+#endif
+}
ERTS_GLB_INLINE void
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -444,10 +493,13 @@ erts_smp_proc_lock__(Process *p,
ErtsProcLocks locks)
#endif
{
+#if ERTS_PROC_LOCK_OWN_IMPL
+
ErtsProcLocks old_lflgs;
#if !ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lck);
#endif
+
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_proc_lock(&(p->lock), locks);
#endif
@@ -471,12 +523,14 @@ erts_smp_proc_lock__(Process *p,
erts_pix_unlock(pix_lck);
}
#endif
+
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_proc_lock_post_x(&(p->lock), locks, file, line);
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_proc_lc_lock(p, locks);
#endif
+
#ifdef ERTS_PROC_LOCK_DEBUG
erts_proc_lock_op_debug(p, locks, 1);
#endif
@@ -484,6 +538,22 @@ erts_smp_proc_lock__(Process *p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
ETHR_COMPILER_BARRIER;
#endif
+
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ erts_mtx_lock(&p->lock.main);
+ if (locks & ERTS_PROC_LOCK_LINK)
+ erts_mtx_lock(&p->lock.link);
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ erts_mtx_lock(&p->lock.msgq);
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ erts_mtx_lock(&p->lock.status);
+
+#ifdef ERTS_PROC_LOCK_DEBUG
+ erts_proc_lock_op_debug(p, locks, 1);
+#endif
+
+#endif
}
ERTS_GLB_INLINE void
@@ -491,6 +561,7 @@ erts_smp_proc_unlock__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks)
{
+#if ERTS_PROC_LOCK_OWN_IMPL
ErtsProcLocks old_lflgs;
#if ERTS_PROC_LOCK_ATOMIC_IMPL
@@ -555,6 +626,23 @@ erts_smp_proc_unlock__(Process *p,
break;
}
+
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+
+#ifdef ERTS_PROC_LOCK_DEBUG
+ erts_proc_lock_op_debug(p, locks, 0);
+#endif
+
+ if (locks & ERTS_PROC_LOCK_STATUS)
+ erts_mtx_unlock(&p->lock.status);
+ if (locks & ERTS_PROC_LOCK_MSGQ)
+ erts_mtx_unlock(&p->lock.msgq);
+ if (locks & ERTS_PROC_LOCK_LINK)
+ erts_mtx_unlock(&p->lock.link);
+ if (locks & ERTS_PROC_LOCK_MAIN)
+ erts_mtx_unlock(&p->lock.main);
+#endif
+
}
ERTS_GLB_INLINE int
@@ -562,6 +650,7 @@ erts_smp_proc_trylock__(Process *p,
erts_pix_lock_t *pix_lck,
ErtsProcLocks locks)
{
+#if ERTS_PROC_LOCK_OWN_IMPL
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -573,6 +662,7 @@ erts_smp_proc_trylock__(Process *p,
else
#endif
{
+
#if !ERTS_PROC_LOCK_ATOMIC_IMPL
erts_pix_lock(pix_lck);
#endif
@@ -611,8 +701,18 @@ erts_smp_proc_trylock__(Process *p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
ETHR_COMPILER_BARRIER;
#endif
-
return res;
+
+#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
+ if (erts_smp_proc_raw_trylock__(p, locks) != 0)
+ return EBUSY;
+ else {
+#ifdef ERTS_PROC_LOCK_DEBUG
+ erts_proc_lock_op_debug(p, locks, 1);
+#endif
+ return 0;
+ }
+#endif
}
#ifdef ERTS_PROC_LOCK_DEBUG
@@ -667,7 +767,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks, file, line);
#elif defined(ERTS_SMP)
@@ -675,7 +775,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks);
#endif /*ERTS_SMP*/
@@ -689,7 +789,7 @@ erts_smp_proc_unlock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
#endif
@@ -705,50 +805,34 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
#else
- ERTS_PID2PIXLOCK(p->id),
+ ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
#endif
}
-
ERTS_GLB_INLINE void erts_smp_proc_inc_refc(Process *p)
{
#ifdef ERTS_SMP
- erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
- erts_pix_lock(pixlck);
- ERTS_LC_ASSERT(p->lock.refc > 0);
- p->lock.refc++;
- erts_pix_unlock(pixlck);
+ erts_ptab_inc_refc(&p->common);
#endif
}
ERTS_GLB_INLINE void erts_smp_proc_dec_refc(Process *p)
{
#ifdef ERTS_SMP
- Process *fp;
- erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
- erts_pix_lock(pixlck);
- ERTS_LC_ASSERT(p->lock.refc > 0);
- fp = --p->lock.refc == 0 ? p : NULL;
- erts_pix_unlock(pixlck);
- if (fp)
- erts_free_proc(fp);
+ int referred = erts_ptab_dec_test_refc(&p->common);
+ if (!referred)
+ erts_free_proc(p);
#endif
}
-ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 refc)
+ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 add_refc)
{
#ifdef ERTS_SMP
- Process *fp;
- erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
- erts_pix_lock(pixlck);
- ERTS_LC_ASSERT(p->lock.refc > 0);
- p->lock.refc += refc;
- fp = p->lock.refc == 0 ? p : NULL;
- erts_pix_unlock(pixlck);
- if (fp)
- erts_free_proc(fp);
+ int referred = erts_ptab_add_test_refc(&p->common, add_refc);
+ if (!referred)
+ erts_free_proc(p);
#endif
}
@@ -756,6 +840,7 @@ ERTS_GLB_INLINE void erts_smp_proc_add_refc(Process *p, Sint32 refc)
#ifdef ERTS_SMP
void erts_proc_lock_init(Process *);
+void erts_proc_lock_fin(Process *);
void erts_proc_safelock(Process *a_proc,
ErtsProcLocks a_have_locks,
ErtsProcLocks a_need_locks,
@@ -782,217 +867,71 @@ void erts_proc_safelock(Process *a_proc,
#define ERTS_P2P_FLG_TRY_LOCK (1 << 1)
#define ERTS_P2P_FLG_SMP_INC_REFC (1 << 2)
-#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_proc_lock_busy)
-extern const Process erts_proc_lock_busy;
+#define ERTS_PROC_LOCK_BUSY ((Process *) &erts_invalid_process)
#define erts_pid2proc(PROC, HL, PID, NL) \
erts_pid2proc_opt((PROC), (HL), (PID), (NL), 0)
-ERTS_GLB_INLINE Process *
-erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int);
-#ifdef ERTS_SMP
-void
-erts_pid2proc_safelock(Process *c_p,
- ErtsProcLocks c_p_have_locks,
- Process **proc,
- ErtsProcLocks need_locks,
- erts_pix_lock_t *pix_lock,
- int flags);
-ERTS_GLB_INLINE Process *erts_pid2proc_unlocked_opt(Eterm pid, int flags);
-#define erts_pid2proc_unlocked(PID) erts_pid2proc_unlocked_opt((PID), 0)
-#else
-#define erts_pid2proc_unlocked_opt(PID, FLGS) \
- erts_pid2proc_opt(NULL, 0, (PID), 0, FLGS)
-#define erts_pid2proc_unlocked(PID) erts_pid2proc_opt(NULL, 0, (PID), 0, 0)
+ERTS_GLB_INLINE Process *erts_pix2proc(int ix);
+ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid);
+ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid);
+
+#ifndef ERTS_SMP
+ERTS_GLB_INLINE
#endif
+Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-ERTS_GLB_INLINE Process *
-#ifdef ERTS_SMP
-erts_pid2proc_unlocked_opt(Eterm pid, int flags)
-#else
-erts_pid2proc_opt(Process *c_p_unused,
- ErtsProcLocks c_p_have_locks_unused,
- Eterm pid,
- ErtsProcLocks pid_need_locks_unused,
- int flags)
-#endif
+ERTS_GLB_INLINE Process *erts_pix2proc(int ix)
{
- Uint pix;
Process *proc;
+ ASSERT(0 <= ix && ix < erts_ptab_max(&erts_proc));
+ proc = (Process *) erts_ptab_pix2intptr_nob(&erts_proc, ix);
+ return proc == ERTS_PROC_LOCK_BUSY ? NULL : proc;
+}
+
+ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid)
+{
+ Process *proc;
+
+ ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying());
if (is_not_internal_pid(pid))
return NULL;
- pix = internal_pid_index(pid);
- if(pix >= erts_max_processes)
+
+ proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
+ internal_pid_index(pid));
+ if (proc && proc->common.id != pid)
return NULL;
- proc = process_tab[pix];
- if (proc) {
- if (proc->id != pid
- || (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
- && proc->status == P_EXITING))
- proc = NULL;
- }
return proc;
}
-#ifdef ERTS_SMP
+ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid)
+{
+ Process *proc = erts_proc_lookup_raw(pid);
+ if (proc && ERTS_PROC_IS_EXITING(proc))
+ return NULL;
+ return proc;
+}
+#ifndef ERTS_SMP
ERTS_GLB_INLINE Process *
-erts_pid2proc_opt(Process *c_p,
- ErtsProcLocks c_p_have_locks,
+erts_pid2proc_opt(Process *c_p_unused,
+ ErtsProcLocks c_p_have_locks_unused,
Eterm pid,
- ErtsProcLocks pid_need_locks,
+ ErtsProcLocks pid_need_locks_unused,
int flags)
{
- erts_pix_lock_t *pix_lock;
- ErtsProcLocks need_locks;
- Uint pix;
- Process *proc;
-#ifdef ERTS_ENABLE_LOCK_COUNT
- ErtsProcLocks lcnt_locks;
-#endif
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- if (c_p) {
- ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks;
- if (might_unlock)
- erts_proc_lc_might_unlock(c_p, might_unlock);
- }
-#endif
- if (is_not_internal_pid(pid)) {
- proc = NULL;
- goto done;
- }
- pix = internal_pid_index(pid);
- if(pix >= erts_max_processes) {
- proc = NULL;
- goto done;
- }
-
- ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
- need_locks = pid_need_locks;
-
- pix_lock = ERTS_PIX2PIXLOCK(pix);
-
- if (c_p && c_p->id == pid) {
- ASSERT(c_p->id != ERTS_INVALID_PID);
- ASSERT(c_p == process_tab[pix]);
- if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && c_p->is_exiting) {
- proc = NULL;
- goto done;
- }
- need_locks &= ~c_p_have_locks;
- if (!need_locks) {
- proc = c_p;
- erts_pix_lock(pix_lock);
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- proc->lock.refc++;
- erts_pix_unlock(pix_lock);
- goto done;
- }
- }
-
- erts_pix_lock(pix_lock);
-
- proc = process_tab[pix];
- if (proc) {
- if (proc->id != pid || (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
- && ERTS_PROC_IS_EXITING(proc))) {
- proc = NULL;
- }
- else if (!need_locks) {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- proc->lock.refc++;
- }
- else {
- int busy;
-
-#ifdef ERTS_ENABLE_LOCK_COUNT
- lcnt_locks = need_locks;
- if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
- erts_lcnt_proc_lock(&proc->lock, need_locks);
- }
-#endif
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- /* Make sure erts_pid2proc_safelock() is enough to handle
- a potential lock order violation situation... */
- busy = erts_proc_lc_trylock_force_busy(proc, need_locks);
- if (!busy)
-#endif
- {
- /* Try a quick trylock to grab all the locks we need. */
- busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);
-#ifdef ERTS_ENABLE_LOCK_CHECK
- erts_proc_lc_trylock(proc, need_locks, !busy);
-#endif
-#ifdef ERTS_PROC_LOCK_DEBUG
- if (!busy)
- erts_proc_lock_op_debug(proc, need_locks, 1);
-#endif
- }
-
-#ifdef ERTS_ENABLE_LOCK_COUNT
- if (flags & ERTS_P2P_FLG_TRY_LOCK) {
- if (busy) {
- erts_lcnt_proc_trylock(&proc->lock, need_locks, EBUSY);
- } else {
- erts_lcnt_proc_trylock(&proc->lock, need_locks, 0);
- }
- }
-#endif
- if (!busy) {
- if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
- proc->lock.refc++;
-#ifdef ERTS_ENABLE_LOCK_COUNT
- /* all is great */
- if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
- erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__);
- }
-#endif
- }
- else {
- if (flags & ERTS_P2P_FLG_TRY_LOCK)
- proc = ERTS_PROC_LOCK_BUSY;
- else {
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
-#endif
- erts_pid2proc_safelock(c_p,
- c_p_have_locks,
- &proc,
- pid_need_locks,
- pix_lock,
- flags);
- if (proc && (flags & ERTS_P2P_FLG_SMP_INC_REFC))
- proc->lock.refc++;
- }
- }
- }
- }
-
- erts_pix_unlock(pix_lock);
-#ifdef ERTS_PROC_LOCK_DEBUG
- ERTS_LC_ASSERT(!proc
- || proc == ERTS_PROC_LOCK_BUSY
- || (pid_need_locks ==
- (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
- & pid_need_locks)));
-#endif
-
-
- done:
-
-#if ERTS_PROC_LOCK_ATOMIC_IMPL
- ETHR_COMPILER_BARRIER;
-#endif
-
- return proc;
+ Process *proc = erts_proc_lookup_raw(pid);
+ return ((!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
+ && proc
+ && ERTS_PROC_IS_EXITING(proc))
+ ? NULL
+ : proc);
}
-#endif /* ERTS_SMP */
+#endif /* !ERTS_SMP */
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c
new file mode 100644
index 0000000000..87beeafa1a
--- /dev/null
+++ b/erts/emulator/beam/erl_ptab.c
@@ -0,0 +1,1566 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Process/Port table implementation.
+ *
+ * Author: Rickard Green
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+#define ERTS_PTAB_WANT_BIF_IMPL__
+#define ERTS_PTAB_WANT_DEBUG_FUNCS__
+#include "erl_ptab.h"
+#include "global.h"
+#include "erl_binary.h"
+
+typedef struct ErtsPTabListBifData_ ErtsPTabListBifData;
+
+#define ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED 25
+#define ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE 1000
+#define ERTS_PTAB_LIST_BIF_MIN_START_REDS \
+ (ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE \
+ / ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED)
+
+#define ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS 1
+
+#define ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED 10
+
+#define ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS \
+ (ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE \
+ / ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED)
+
+
+#define ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED 75
+
+#define ERTS_PTAB_LIST_DBG_DO_TRACE 0
+
+#ifdef DEBUG
+# define ERTS_PTAB_LIST_BIF_DEBUGLEVEL 100
+#else
+# define ERTS_PTAB_LIST_BIF_DEBUGLEVEL 0
+#endif
+
+#define ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC 1
+#define ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS 5
+#define ERTS_PTAB_LIST_DBGLVL_CHK_PIDS 10
+#define ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST 20
+#define ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST 20
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL == 0
+# define ERTS_PTAB_LIST_ASSERT(EXP)
+#else
+# define ERTS_PTAB_LIST_ASSERT(EXP) \
+ ((void) ((EXP) \
+ ? 1 \
+ : (debug_ptab_list_assert_error(#EXP, \
+ __FILE__, \
+ __LINE__, \
+ __func__), \
+ 0)))
+#endif
+
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC
+# define ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(PTLBDP, HP, SZ) \
+do { \
+ ERTS_PTAB_LIST_ASSERT(!(PTLBDP)->debug.heap); \
+ ERTS_PTAB_LIST_ASSERT(!(PTLBDP)->debug.heap_size); \
+ (PTLBDP)->debug.heap = (HP); \
+ (PTLBDP)->debug.heap_size = (SZ); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(PTLBDP, HP) \
+do { \
+ ERTS_PTAB_LIST_ASSERT((PTLBDP)->debug.heap); \
+ ERTS_PTAB_LIST_ASSERT((PTLBDP)->debug.heap_size); \
+ ERTS_PTAB_LIST_ASSERT(((PTLBDP)->debug.heap \
+ + (PTLBDP)->debug.heap_size) \
+ == (HP)); \
+ (PTLBDP)->debug.heap = NULL; \
+ (PTLBDP)->debug.heap_size = 0; \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT(PTLBDP) \
+do { \
+ (PTLBDP)->debug.heap = NULL; \
+ (PTLBDP)->debug.heap_size = 0; \
+} while (0)
+#else
+# define ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(PTLBDP, HP, SZ)
+# define ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(PTLBDP, HP)
+# define ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT(PTLBDP)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+# define ERTS_PTAB_LIST_DBG_CHK_RESLIST(R) \
+ debug_ptab_list_check_res_list((R))
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_RESLIST(R)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+# define ERTS_PTAB_LIST_DBG_SAVE_PIDS(PTLBDP) \
+ debug_ptab_list_save_all_pids((PTLBDP))
+# define ERTS_PTAB_LIST_DBG_VERIFY_PIDS(PTLBDP) \
+do { \
+ if (!(PTLBDP)->debug.correct_pids_verified) \
+ debug_ptab_list_verify_all_pids((PTLBDP)); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS(PTLBDP) \
+do { \
+ if ((PTLBDP)->debug.correct_pids) { \
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, \
+ (PTLBDP)->debug.correct_pids); \
+ (PTLBDP)->debug.correct_pids = NULL; \
+ } \
+} while(0)
+# define ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT(PTLBDP) \
+do { \
+ (PTLBDP)->debug.correct_pids_verified = 0; \
+ (PTLBDP)->debug.correct_pids = NULL; \
+} while (0)
+#else
+# define ERTS_PTAB_LIST_DBG_SAVE_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_VERIFY_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS(PTLBDP)
+# define ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT(PTLBDP)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+# define ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(PTLBDP, PID, IC) \
+ debug_ptab_list_check_found_pid((PTLBDP), (PID), (IC), 1)
+# define ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(PTLBDP, PID, IC) \
+ debug_ptab_list_check_found_pid((PTLBDP), (PID), (IC), 0)
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(PTLBDP, PID, IC)
+# define ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(PTLBDP, PID, IC)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+# define ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(PTab) \
+ debug_ptab_list_check_del_list((PTab))
+# define ERTS_PTAB_LIST_DBG_CHK_FREELIST(PTab, FL) \
+ debug_ptab_list_check_del_free_list((PTab), (FL))
+#else
+# define ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(PTab)
+# define ERTS_PTAB_LIST_DBG_CHK_FREELIST(PTab, FL)
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL == 0
+#if ERTS_PTAB_LIST_DBG_DO_TRACE
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) \
+ (PTLBDP)->debug.caller = (P)->common.id
+# else
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP)
+# endif
+# define ERTS_PTAB_LIST_DBG_CLEANUP(PTLBDP)
+#else
+# define ERTS_PTAB_LIST_DBG_INIT(P, PTLBDP) \
+do { \
+ (PTLBDP)->debug.caller = (P)->common.id; \
+ ERTS_PTAB_LIST_DBG_HEAP_ALLOC_INIT((PTLBDP)); \
+ ERTS_PTAB_LIST_DBG_CHK_PIDS_INIT((PTLBDP)); \
+} while (0)
+# define ERTS_PTAB_LIST_DBG_CLEANUP(PTLBDP) \
+do { \
+ ERTS_PTAB_LIST_DBG_CLEANUP_CHK_PIDS((PTLBDP)); \
+} while (0)
+#endif
+
+#if ERTS_PTAB_LIST_DBG_DO_TRACE
+# define ERTS_PTAB_LIST_DBG_TRACE(PID, WHAT) \
+ erts_fprintf(stderr, "%T %s:%d:%s(): %s\n", \
+ (PID), __FILE__, __LINE__, __func__, #WHAT)
+#else
+# define ERTS_PTAB_LIST_DBG_TRACE(PID, WHAT)
+#endif
+
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0
+static void debug_ptab_list_assert_error(char* expr,
+ const char* file,
+ int line,
+ const char *func);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+static void debug_ptab_list_check_res_list(Eterm list);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+static void debug_ptab_list_save_all_pids(ErtsPTabListBifData *ptlbdp);
+static void debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+static void debug_ptab_list_check_found_pid(ErtsPTabListBifData *ptlbdp,
+ Eterm pid,
+ Uint64 ic,
+ int pid_should_be_found);
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+static void debug_ptab_list_check_del_list(ErtsPTab *ptab);
+static void debug_ptab_list_check_del_free_list(ErtsPTab *ptab,
+ ErtsPTabDeletedElement *ptdep);
+#endif
+
+struct ErtsPTabDeletedElement_ {
+ ErtsPTabDeletedElement *next;
+ ErtsPTabDeletedElement *prev;
+ int ix;
+ union {
+ struct {
+ Eterm id;
+ Uint64 inserted;
+ Uint64 deleted;
+ } element;
+ struct {
+ Uint64 interval;
+ } bif_invocation;
+ } u;
+};
+
+static Export ptab_list_continue_export;
+
+typedef struct {
+ Uint64 interval;
+} ErtsPTabListBifChunkInfo;
+
+typedef enum {
+ INITIALIZING,
+ INSPECTING_TABLE,
+ INSPECTING_DELETED,
+ BUILDING_RESULT,
+ RETURN_RESULT
+} ErtsPTabListBifState;
+
+struct ErtsPTabListBifData_ {
+ ErtsPTab *ptab;
+ ErtsPTabListBifState state;
+ Eterm caller;
+ ErtsPTabListBifChunkInfo *chunk;
+ int tix;
+ int pid_ix;
+ int pid_sz;
+ Eterm *pid;
+ ErtsPTabDeletedElement *bif_invocation; /* Only used when > 1 chunk */
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0 || ERTS_PTAB_LIST_DBG_DO_TRACE
+ struct {
+ Eterm caller;
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ Uint64 *pid_started;
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_HALLOC
+ Eterm *heap;
+ Uint heap_size;
+#endif
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+ int correct_pids_verified;
+ Eterm *correct_pids;
+#endif
+ } debug;
+#endif
+
+};
+
+#ifdef ARCH_32
+
+static ERTS_INLINE Uint64
+dw_aint_to_uint64(erts_dw_aint_t *dw)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (Uint64) dw->dw_sint;
+#else
+ Uint64 res;
+ res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+#endif
+}
+
+static void
+unint64_to_dw_aint(erts_dw_aint_t *dw, Uint64 val)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ dw->dw_sint = (ETHR_SU_DW_NAINT_T__) val;
+#else
+ dw->sint[ERTS_DW_AINT_LOW_WORD] = (erts_aint_t) (val & 0xffffffff);
+ dw->sint[ERTS_DW_AINT_HIGH_WORD] = (erts_aint_t) ((val >> 32) & 0xffffffff);
+#endif
+}
+
+static ERTS_INLINE void
+last_data_init_nob(ErtsPTab *ptab, Uint64 val)
+{
+ erts_dw_aint_t dw;
+ unint64_to_dw_aint(&dw, val);
+ erts_smp_dw_atomic_init_nob(&ptab->vola.tile.last_data, &dw);
+}
+
+static ERTS_INLINE void
+last_data_set_relb(ErtsPTab *ptab, Uint64 val)
+{
+ erts_dw_aint_t dw;
+ unint64_to_dw_aint(&dw, val);
+ erts_smp_dw_atomic_set_relb(&ptab->vola.tile.last_data, &dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_nob(ErtsPTab *ptab)
+{
+ erts_dw_aint_t dw;
+ erts_smp_dw_atomic_read_nob(&ptab->vola.tile.last_data, &dw);
+ return dw_aint_to_uint64(&dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_acqb(ErtsPTab *ptab)
+{
+ erts_dw_aint_t dw;
+ erts_smp_dw_atomic_read_acqb(&ptab->vola.tile.last_data, &dw);
+ return dw_aint_to_uint64(&dw);
+}
+
+static ERTS_INLINE Uint64
+last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
+{
+ erts_dw_aint_t dw_new, dw_xchg;
+
+ unint64_to_dw_aint(&dw_new, new);
+ unint64_to_dw_aint(&dw_xchg, exp);
+
+ if (erts_smp_dw_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
+ &dw_new,
+ &dw_xchg))
+ return exp;
+ else
+ return dw_aint_to_uint64(&dw_xchg);
+}
+
+#elif defined(ARCH_64)
+
+union {
+ erts_smp_atomic_t pid_data;
+ char align[ERTS_CACHE_LINE_SIZE];
+} last erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static ERTS_INLINE void
+last_data_init_nob(ErtsPTab *ptab, Uint64 val)
+{
+ erts_smp_atomic_init_nob(&ptab->vola.tile.last_data, (erts_aint_t) val);
+}
+
+static ERTS_INLINE void
+last_data_set_relb(ErtsPTab *ptab, Uint64 val)
+{
+ erts_smp_atomic_set_relb(&ptab->vola.tile.last_data, (erts_aint_t) val);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_nob(ErtsPTab *ptab)
+{
+ return (Uint64) erts_smp_atomic_read_nob(&ptab->vola.tile.last_data);
+}
+
+static ERTS_INLINE Uint64
+last_data_read_acqb(ErtsPTab *ptab)
+{
+ return (Uint64) erts_smp_atomic_read_acqb(&ptab->vola.tile.last_data);
+}
+
+static ERTS_INLINE Uint64
+last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp)
+{
+ return (Uint64) erts_smp_atomic_cmpxchg_relb(&ptab->vola.tile.last_data,
+ (erts_aint_t) new,
+ (erts_aint_t) exp);
+}
+
+#else
+# error "Not 64-bit, nor 32-bit architecture..."
+#endif
+
+static ERTS_INLINE int
+last_data_cmp(Uint64 ld1, Uint64 ld2)
+{
+ Uint64 ld1_wrap;
+
+ if (ld1 == ld2)
+ return 0;
+
+ ld1_wrap = ld1 + (((Uint64) 1) << 63);
+
+ if (ld1 < ld1_wrap)
+ return (ld1 < ld2 && ld2 < ld1_wrap) ? -1 : 1;
+ else
+ return (ld1_wrap <= ld2 && ld2 < ld1) ? 1 : -1;
+}
+
+#define ERTS_PTAB_LastData2EtermData(LD) \
+ ((Eterm) ((LD) & ~(~((Uint64) 0) << ERTS_PTAB_ID_DATA_SIZE)))
+
+void
+erts_ptab_init_table(ErtsPTab *ptab,
+ ErtsAlcType_t atype,
+ void (*release_element)(void *),
+ ErtsPTabElementCommon *invalid_element,
+ int size,
+ char *name)
+{
+ size_t tab_sz;
+ int bits;
+ char *tab_end;
+ erts_smp_atomic_t *tab_entry;
+ erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name);
+ erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0);
+ last_data_init_nob(ptab, ~((Uint64) 0));
+
+ /* A size that is a power of 2 is to prefer performance wise */
+ bits = erts_fit_in_bits_int32(size-1);
+ size = 1 << bits;
+ if (size > ERTS_PTAB_MAX_SIZE) {
+ size = ERTS_PTAB_MAX_SIZE;
+ bits = erts_fit_in_bits_int32((Sint32) size - 1);
+ }
+
+ ptab->r.o.max = size;
+
+ tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t));
+ ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, tab_sz);
+ tab_end = ((char *) ptab->r.o.tab) + tab_sz;
+ tab_entry = ptab->r.o.tab;
+ while (tab_end > ((char *) tab_entry)) {
+ erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL);
+ tab_entry++;
+ }
+
+ ptab->r.o.tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE;
+ ptab->r.o.pix_per_cache_line = (ERTS_CACHE_LINE_SIZE
+ / sizeof(erts_smp_atomic_t));
+ ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */
+ ASSERT((ptab->r.o.pix_per_cache_line
+ & (ptab->r.o.pix_per_cache_line - 1)) == 0); /* power of 2 */
+ ASSERT((ptab->r.o.tab_cache_lines
+ & (ptab->r.o.tab_cache_lines - 1)) == 0); /* power of 2 */
+
+ ptab->r.o.pix_mask
+ = (1 << bits) - 1;
+ ptab->r.o.pix_cl_mask
+ = ptab->r.o.tab_cache_lines-1;
+ ptab->r.o.pix_cl_shift
+ = erts_fit_in_bits_int32(ptab->r.o.pix_per_cache_line-1);
+ ptab->r.o.pix_cli_shift
+ = erts_fit_in_bits_int32(ptab->r.o.pix_cl_mask);
+ ptab->r.o.pix_cli_mask
+ = (1 << (bits - ptab->r.o.pix_cli_shift)) - 1;
+
+ ASSERT(ptab->r.o.pix_cl_shift + ptab->r.o.pix_cli_shift == bits);
+
+ ptab->r.o.invalid_element = invalid_element;
+ ptab->r.o.invalid_data = erts_ptab_id2data(ptab, invalid_element->id);
+ ptab->r.o.release_element = release_element;
+
+ erts_smp_interval_init(&ptab->list.data.interval);
+ ptab->list.data.deleted.start = NULL;
+ ptab->list.data.deleted.end = NULL;
+ ptab->list.data.chunks = (((ptab->r.o.max - 1)
+ / ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE)
+ + 1);
+
+ if (size == ERTS_PTAB_MAX_SIZE) {
+ int pix;
+ /*
+ * We want a table size of a power of 2 which ERTS_PTAB_MAX_SIZE
+ * is. We only have ERTS_PTAB_MAX_SIZE-1 unique identifiers and
+ * we don't want to shrink the size to ERTS_PTAB_MAX_SIZE/2.
+ *
+ * In order to fix this, we insert a pointer from the table
+ * to the invalid_element, wich will be interpreted as a
+ * slot currently being modified. This way we will be able to
+ * have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while
+ * still having a table size of the power of 2.
+ */
+ erts_smp_atomic32_inc_nob(&ptab->vola.tile.count);
+ pix = erts_ptab_data2pix(ptab, ptab->r.o.invalid_data);
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix],
+ (erts_aint_t) ptab->r.o.invalid_element);
+ }
+
+}
+
+int
+erts_ptab_initialized(ErtsPTab *ptab)
+{
+ return ptab->r.o.tab != NULL;
+}
+
+int
+erts_ptab_new_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el,
+ void *init_arg,
+ void (*init_ptab_el)(void *, Eterm))
+{
+ int pix;
+ Uint64 ld, exp_ld;
+ Eterm data;
+ erts_aint32_t count;
+ erts_aint_t invalid = (erts_aint_t) ptab->r.o.invalid_element;
+
+ erts_ptab_rlock(ptab);
+
+ count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count);
+ if (count > ptab->r.o.max) {
+ while (1) {
+ erts_aint32_t act_count;
+
+ act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count,
+ count-1,
+ count);
+ if (act_count == count) {
+ erts_ptab_runlock(ptab);
+ return 0;
+ }
+ count = act_count;
+ if (count <= ptab->r.o.max)
+ break;
+ }
+ }
+
+ ptab_el->u.alive.started_interval
+ = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+
+ ld = last_data_read_acqb(ptab);
+
+ /* Reserve slot */
+ while (1) {
+ ld++;
+ pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld));
+ if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix]) == ERTS_AINT_NULL) {
+ erts_aint_t val;
+ val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix],
+ invalid,
+ ERTS_AINT_NULL);
+
+ if (ERTS_AINT_NULL == val)
+ break;
+ }
+ }
+
+ data = ERTS_PTAB_LastData2EtermData(ld);
+
+ if (data == ptab->r.o.invalid_data) {
+ /* Do not use invalid data; fix it... */
+ ld += ptab->r.o.max;
+ ASSERT(pix == erts_ptab_data2pix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ ASSERT(data != ptab->r.o.invalid_data);
+ }
+
+ exp_ld = last_data_read_nob(ptab);
+
+ /* Move last data forward */
+ while (1) {
+ Uint64 act_ld;
+ if (last_data_cmp(ld, exp_ld) < 0)
+ break;
+ act_ld = last_data_cmpxchg_relb(ptab, ld, exp_ld);
+ if (act_ld == exp_ld)
+ break;
+ exp_ld = act_ld;
+ }
+
+ init_ptab_el(init_arg, data);
+
+#ifdef ERTS_SMP
+ erts_smp_atomic32_init_nob(&ptab_el->refc, 1);
+#endif
+
+ /* Move into slot reserved */
+#ifdef DEBUG
+ ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix],
+ (erts_aint_t) ptab_el));
+#else
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el);
+#endif
+
+ erts_ptab_runlock(ptab);
+
+ return 1;
+}
+
+static void
+save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el)
+{
+ ErtsPTabDeletedElement *ptdep = erts_alloc(ERTS_ALC_T_PTAB_LIST_DEL,
+ sizeof(ErtsPTabDeletedElement));
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start
+ && ptab->list.data.deleted.end);
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ptdep->prev = ptab->list.data.deleted.end;
+ ptdep->next = NULL;
+ ptdep->ix = erts_ptab_id2pix(ptab, ptab_el->id);
+ ptdep->u.element.id = ptab_el->id;
+ ptdep->u.element.inserted = ptab_el->u.alive.started_interval;
+ ptdep->u.element.deleted =
+ erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+
+ ptab->list.data.deleted.end->next = ptdep;
+ ptab->list.data.deleted.end = ptdep;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev->ix >= 0
+ ? (ptdep->u.element.deleted
+ >= ptdep->prev->u.element.deleted)
+ : (ptdep->u.element.deleted
+ >= ptdep->prev->u.bif_invocation.interval));
+}
+
+void
+erts_ptab_delete_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el)
+{
+ int maybe_save;
+ int pix = erts_ptab_id2pix(ptab, ptab_el->id);
+
+ ASSERT(erts_get_scheduler_id()); /* *Need* to be a scheduler */
+
+ erts_ptab_rlock(ptab);
+ maybe_save = ptab->list.data.deleted.end != NULL;
+ if (maybe_save) {
+ erts_ptab_runlock(ptab);
+ erts_ptab_rwlock(ptab);
+ }
+
+ erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL);
+
+ ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0);
+ erts_smp_atomic32_dec_relb(&ptab->vola.tile.count);
+
+ if (!maybe_save)
+ erts_ptab_runlock(ptab);
+ else {
+ if (ptab->list.data.deleted.end)
+ save_deleted_element(ptab, ptab_el);
+ erts_ptab_rwunlock(ptab);
+ }
+
+ if (ptab->r.o.release_element)
+ erts_schedule_thr_prgr_later_op(ptab->r.o.release_element,
+ (void *) ptab_el,
+ &ptab_el->u.release);
+}
+
+/*
+ * erts_ptab_list() implements BIFs listing the content of the table,
+ * e.g. erlang:processes/0.
+ */
+static void cleanup_ptab_list_bif_data(Binary *bp);
+static int ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp);
+
+
+BIF_RETTYPE
+erts_ptab_list(Process *c_p, ErtsPTab *ptab)
+{
+ /*
+ * A requirement: The list of identifiers returned should be a
+ * consistent snapshot of all elements existing
+ * in the table at some point in time during the
+ * execution of the BIF calling this function.
+ * Since elements might be deleted while the BIF
+ * is executing, we have to keep track of all
+ * deleted elements and add them to the result.
+ * We also ignore elements created after the BIF
+ * has begun executing.
+ */
+ BIF_RETTYPE ret_val;
+ Eterm res_acc = NIL;
+ Binary *mbp = erts_create_magic_binary(sizeof(ErtsPTabListBifData),
+ cleanup_ptab_list_bif_data);
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(mbp);
+
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, call);
+ ptlbdp->ptab = ptab;
+ ptlbdp->state = INITIALIZING;
+ ERTS_PTAB_LIST_DBG_INIT(c_p, ptlbdp);
+
+ if (ERTS_BIF_REDS_LEFT(c_p) >= ERTS_PTAB_LIST_BIF_MIN_START_REDS
+ && ptab_list_bif_engine(c_p, &res_acc, mbp)) {
+ erts_bin_free(mbp);
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, return);
+ ERTS_BIF_PREP_RET(ret_val, res_acc);
+ }
+ else {
+ Eterm *hp;
+ Eterm magic_bin;
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res_acc);
+ hp = HAlloc(c_p, PROC_BIN_SIZE);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, PROC_BIN_SIZE);
+ magic_bin = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp);
+ ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, trap);
+ ERTS_BIF_PREP_YIELD2(ret_val,
+ &ptab_list_continue_export,
+ c_p,
+ res_acc,
+ magic_bin);
+ }
+ return ret_val;
+}
+
+static void
+cleanup_ptab_list_bif_data(Binary *bp)
+{
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(bp);
+ ErtsPTab *ptab = ptlbdp->ptab;
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, call);
+
+ if (ptlbdp->state != INITIALIZING) {
+
+ if (ptlbdp->chunk) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_CNKI, ptlbdp->chunk);
+ ptlbdp->chunk = NULL;
+ }
+ if (ptlbdp->pid) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->pid);
+ ptlbdp->pid = NULL;
+ }
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ if (ptlbdp->debug.pid_started) {
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->debug.pid_started);
+ ptlbdp->debug.pid_started = NULL;
+ }
+#endif
+
+ if (ptlbdp->bif_invocation) {
+ ErtsPTabDeletedElement *ptdep;
+
+ erts_ptab_rwlock(ptab);
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, deleted_cleanup);
+
+ ptdep = ptlbdp->bif_invocation;
+ ptlbdp->bif_invocation = NULL;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ if (ptdep->prev) {
+ /*
+ * Only remove this bif invokation when we
+ * have preceding invokations.
+ */
+ ptdep->prev->next = ptdep->next;
+ if (ptdep->next)
+ ptdep->next->prev = ptdep->prev;
+ else {
+ /*
+ * At the time of writing this branch cannot be
+ * reached. I don't want to remove this code though
+ * since it may be possible to reach this line
+ * in the future if the cleanup order in
+ * erts_do_exit_process() is changed. The ASSERT(0)
+ * is only here to make us aware that the reorder
+ * has happened. /rickard
+ */
+ ASSERT(0);
+ ptab->list.data.deleted.end = ptdep->prev;
+ }
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, ptdep);
+ }
+ else {
+ /*
+ * Free all elements until next bif invokation
+ * is found.
+ */
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ do {
+ ErtsPTabDeletedElement *fptdep = ptdep;
+ ptdep = ptdep->next;
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, fptdep);
+ } while (ptdep && ptdep->ix >= 0);
+ ptab->list.data.deleted.start = ptdep;
+ if (ptdep)
+ ptdep->prev = NULL;
+ else
+ ptab->list.data.deleted.end = NULL;
+ }
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ erts_ptab_rwunlock(ptab);
+
+ }
+ }
+
+ ERTS_PTAB_LIST_DBG_TRACE(ptlbdp->debug.caller, return);
+ ERTS_PTAB_LIST_DBG_CLEANUP(ptlbdp);
+}
+
+static int
+ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp)
+{
+ ErtsPTabListBifData *ptlbdp = ERTS_MAGIC_BIN_DATA(mbp);
+ ErtsPTab *ptab = ptlbdp->ptab;
+ int have_reds;
+ int reds;
+ int locked = 0;
+
+ do {
+ switch (ptlbdp->state) {
+ case INITIALIZING:
+ ptlbdp->chunk = erts_alloc(ERTS_ALC_T_PTAB_LIST_CNKI,
+ (sizeof(ErtsPTabListBifChunkInfo)
+ * ptab->list.data.chunks));
+ ptlbdp->tix = 0;
+ ptlbdp->pid_ix = 0;
+
+ erts_ptab_rwlock(ptab);
+ locked = 1;
+
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, init);
+
+ ptlbdp->pid_sz = erts_ptab_count(ptab);
+ ptlbdp->pid = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started
+ = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Uint64)*ptlbdp->pid_sz);
+#endif
+
+ ERTS_PTAB_LIST_DBG_SAVE_PIDS(ptlbdp);
+
+ if (ptab->list.data.chunks == 1)
+ ptlbdp->bif_invocation = NULL;
+ else {
+ /*
+ * We will have to access the table multiple times
+ * releasing the table lock in between chunks.
+ */
+ ptlbdp->bif_invocation
+ = erts_alloc(ERTS_ALC_T_PTAB_LIST_DEL,
+ sizeof(ErtsPTabDeletedElement));
+ ptlbdp->bif_invocation->ix = -1;
+ ptlbdp->bif_invocation->u.bif_invocation.interval
+ = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ ptlbdp->bif_invocation->next = NULL;
+ if (ptab->list.data.deleted.end) {
+ ptlbdp->bif_invocation->prev = ptab->list.data.deleted.end;
+ ptab->list.data.deleted.end->next = ptlbdp->bif_invocation;
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start);
+ }
+ else {
+ ptlbdp->bif_invocation->prev = NULL;
+ ptab->list.data.deleted.start = ptlbdp->bif_invocation;
+ }
+ ptab->list.data.deleted.end = ptlbdp->bif_invocation;
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ }
+
+ ptlbdp->state = INSPECTING_TABLE;
+ /* Fall through */
+
+ case INSPECTING_TABLE: {
+ int ix = ptlbdp->tix;
+ int indices = ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ int cix = ix / ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ int end_ix = ix + indices;
+ Uint64 *invocation_interval_p;
+ ErtsPTabElementCommon *invalid_element;
+
+ invocation_interval_p
+ = (ptlbdp->bif_invocation
+ ? &ptlbdp->bif_invocation->u.bif_invocation.interval
+ : NULL);
+
+ ERTS_PTAB_LIST_ASSERT(is_nil(*res_accp));
+ if (!locked) {
+ erts_ptab_rwlock(ptab);
+ locked = 1;
+ }
+
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table);
+
+ if (cix != 0)
+ ptlbdp->chunk[cix].interval
+ = erts_smp_step_interval_nob(erts_ptab_interval(ptab));
+ else if (ptlbdp->bif_invocation)
+ ptlbdp->chunk[0].interval = *invocation_interval_p;
+ /* else: interval is irrelevant */
+
+ if (end_ix >= ptab->r.o.max) {
+ ERTS_PTAB_LIST_ASSERT(cix+1 == ptab->list.data.chunks);
+ end_ix = ptab->r.o.max;
+ indices = end_ix - ix;
+ /* What to do when done with this chunk */
+ ptlbdp->state = (ptab->list.data.chunks == 1
+ ? BUILDING_RESULT
+ : INSPECTING_DELETED);
+ }
+
+ invalid_element = ptab->r.o.invalid_element;
+ for (; ix < end_ix; ix++) {
+ ErtsPTabElementCommon *el;
+ el = (ErtsPTabElementCommon *) erts_ptab_pix2intptr_nob(ptab,
+ ix);
+ if (el
+ && el != invalid_element
+ && (!invocation_interval_p
+ || el->u.alive.started_interval < *invocation_interval_p)) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(el->id));
+ ptlbdp->pid[ptlbdp->pid_ix] = el->id;
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started[ptlbdp->pid_ix]
+ = el->u.alive.started_interval;
+#endif
+
+ ptlbdp->pid_ix++;
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix <= ptlbdp->pid_sz);
+ }
+ }
+
+ ptlbdp->tix = end_ix;
+
+ erts_ptab_rwunlock(ptab);
+ locked = 0;
+
+ reds = indices/ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED;
+ BUMP_REDS(c_p, reds);
+
+ have_reds = ERTS_BIF_REDS_LEFT(c_p);
+
+ if (have_reds && ptlbdp->state == INSPECTING_TABLE) {
+ ix = ptlbdp->tix;
+ indices = ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ end_ix = ix + indices;
+ if (end_ix > ptab->r.o.max) {
+ end_ix = ptab->r.o.max;
+ indices = end_ix - ix;
+ }
+
+ reds = indices/ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED;
+
+ /* Pretend we have no reds left if we haven't got enough
+ reductions to complete next chunk */
+ if (reds > have_reds)
+ have_reds = 0;
+ }
+
+ break;
+ }
+
+ case INSPECTING_DELETED: {
+ int i;
+ int max_reds;
+ int free_deleted = 0;
+ Uint64 invocation_interval;
+ ErtsPTabDeletedElement *ptdep;
+ ErtsPTabDeletedElement *free_list = NULL;
+
+ ptdep = ptlbdp->bif_invocation;
+ ERTS_PTAB_LIST_ASSERT(ptdep);
+ invocation_interval = ptdep->u.bif_invocation.interval;
+
+ max_reds = have_reds = ERTS_BIF_REDS_LEFT(c_p);
+ if (max_reds > ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS)
+ max_reds = ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS;
+
+ reds = 0;
+ erts_ptab_rwlock(ptab);
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_term_procs);
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+
+ if (ptdep->prev)
+ ptdep->prev->next = ptdep->next;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ ptab->list.data.deleted.start = ptdep->next;
+
+ if (ptab->list.data.deleted.start
+ && ptab->list.data.deleted.start->ix >= 0) {
+ free_list = ptab->list.data.deleted.start;
+ free_deleted = 1;
+ }
+ }
+
+ if (ptdep->next)
+ ptdep->next->prev = ptdep->prev;
+ else
+ ptab->list.data.deleted.end = ptdep->prev;
+
+ ptdep = ptdep->next;
+
+ i = 0;
+ while (reds < max_reds && ptdep) {
+ if (ptdep->ix < 0) {
+ if (free_deleted) {
+ ERTS_PTAB_LIST_ASSERT(free_list);
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev);
+
+ ptdep->prev->next = NULL; /* end of free_list */
+ ptab->list.data.deleted.start = ptdep;
+ ptdep->prev = NULL;
+ free_deleted = 0;
+ }
+ }
+ else {
+ int cix = ptdep->ix/ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE;
+ Uint64 chunk_interval = ptlbdp->chunk[cix].interval;
+ Eterm pid = ptdep->u.element.id;
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(pid));
+
+ if (ptdep->u.element.inserted < invocation_interval) {
+ if (ptdep->u.element.deleted < chunk_interval) {
+ ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ ptlbdp->pid[ptlbdp->pid_ix] = pid;
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started[ptlbdp->pid_ix]
+ = ptdep->u.element.inserted;
+#endif
+ ptlbdp->pid_ix++;
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix
+ <= ptlbdp->pid_sz);
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_CHK_PID_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ }
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_CHK_PID_NOT_FOUND(
+ ptlbdp,
+ pid,
+ ptdep->u.element.inserted);
+ }
+
+ i++;
+ if (i == ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED) {
+ reds++;
+ i = 0;
+ }
+ if (free_deleted)
+ reds += ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS;
+ }
+ ptdep = ptdep->next;
+ }
+
+ if (free_deleted) {
+ ERTS_PTAB_LIST_ASSERT(free_list);
+ ptab->list.data.deleted.start = ptdep;
+ if (!ptdep)
+ ptab->list.data.deleted.end = NULL;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev);
+ ptdep->prev->next = NULL; /* end of free_list */
+ ptdep->prev = NULL;
+ }
+ }
+
+ if (!ptdep) {
+ /* Done */
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix == ptlbdp->pid_sz);
+ ptlbdp->state = BUILDING_RESULT;
+ ptlbdp->bif_invocation->next = free_list;
+ free_list = ptlbdp->bif_invocation;
+ ptlbdp->bif_invocation = NULL;
+ }
+ else {
+ /* Link in bif_invocation again where we left off */
+ ptlbdp->bif_invocation->prev = ptdep->prev;
+ ptlbdp->bif_invocation->next = ptdep;
+ ptdep->prev = ptlbdp->bif_invocation;
+ if (ptlbdp->bif_invocation->prev)
+ ptlbdp->bif_invocation->prev->next = ptlbdp->bif_invocation;
+ else {
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start
+ == ptdep);
+ ptab->list.data.deleted.start = ptlbdp->bif_invocation;
+ }
+ }
+
+ ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab);
+ ERTS_PTAB_LIST_DBG_CHK_FREELIST(ptab, free_list);
+ erts_ptab_rwunlock(ptab);
+
+ /*
+ * We do the actual free of deleted structures now when we
+ * have released the table lock instead of when we encountered
+ * them. This since free() isn't for free and we don't want to
+ * unnecessarily block other schedulers.
+ */
+ while (free_list) {
+ ptdep = free_list;
+ free_list = ptdep->next;
+ erts_free(ERTS_ALC_T_PTAB_LIST_DEL, ptdep);
+ }
+
+ have_reds -= reds;
+ if (have_reds < 0)
+ have_reds = 0;
+ BUMP_REDS(c_p, reds);
+ break;
+ }
+
+ case BUILDING_RESULT: {
+ int conses, ix, min_ix;
+ Eterm *hp;
+ Eterm res = *res_accp;
+
+ ERTS_PTAB_LIST_DBG_VERIFY_PIDS(ptlbdp);
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res);
+
+ ERTS_PTAB_LIST_DBG_TRACE(p->common.id, begin_build_res);
+
+ have_reds = ERTS_BIF_REDS_LEFT(c_p);
+ conses = ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED*have_reds;
+ min_ix = ptlbdp->pid_ix - conses;
+ if (min_ix < 0) {
+ min_ix = 0;
+ conses = ptlbdp->pid_ix;
+ }
+
+ if (conses) {
+ hp = HAlloc(c_p, conses*2);
+ ERTS_PTAB_LIST_DBG_SAVE_HEAP_ALLOC(ptlbdp, hp, conses*2);
+
+ for (ix = ptlbdp->pid_ix - 1; ix >= min_ix; ix--) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(ptlbdp->pid[ix]));
+ res = CONS(hp, ptlbdp->pid[ix], res);
+ hp += 2;
+ }
+
+ ERTS_PTAB_LIST_DBG_VERIFY_HEAP_ALLOC_USED(ptlbdp, hp);
+ }
+
+ ptlbdp->pid_ix = min_ix;
+ if (min_ix == 0)
+ ptlbdp->state = RETURN_RESULT;
+ else {
+ ptlbdp->pid_sz = min_ix;
+ ptlbdp->pid = erts_realloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ ptlbdp->pid,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+ ptlbdp->debug.pid_started
+ = erts_realloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ ptlbdp->debug.pid_started,
+ sizeof(Uint64) * ptlbdp->pid_sz);
+#endif
+ }
+ reds = conses/ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED;
+ BUMP_REDS(c_p, reds);
+ have_reds -= reds;
+
+ ERTS_PTAB_LIST_DBG_CHK_RESLIST(res);
+ ERTS_PTAB_LIST_DBG_TRACE(c_p->common.id, end_build_res);
+ *res_accp = res;
+ break;
+ }
+ case RETURN_RESULT:
+ cleanup_ptab_list_bif_data(mbp);
+ return 1;
+
+ default:
+ erl_exit(ERTS_ABORT_EXIT,
+ "%s:%d:ptab_list_bif_engine(): Invalid state: %d\n",
+ __FILE__, __LINE__, (int) ptlbdp->state);
+ }
+
+
+ } while (have_reds || ptlbdp->state == RETURN_RESULT);
+
+ return 0;
+}
+
+/*
+ * ptab_list_continue/2 is a hidden BIF that the original BIF traps to
+ * if there are too much work to do in one go.
+ */
+
+static BIF_RETTYPE ptab_list_continue(BIF_ALIST_2)
+{
+ Eterm res_acc;
+ Binary *mbp;
+
+ /*
+ * This bif cannot be called from erlang code. It can only be
+ * trapped to from other BIFs; therefore, a bad argument
+ * is an internal error and should never occur...
+ */
+
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, call);
+ ERTS_PTAB_LIST_ASSERT(is_nil(BIF_ARG_1) || is_list(BIF_ARG_1));
+
+ res_acc = BIF_ARG_1;
+
+ ERTS_PTAB_LIST_ASSERT(ERTS_TERM_IS_MAGIC_BINARY(BIF_ARG_2));
+
+ mbp = ((ProcBin *) binary_val(BIF_ARG_2))->val;
+
+ ERTS_PTAB_LIST_ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp)
+ == cleanup_ptab_list_bif_data);
+ ERTS_PTAB_LIST_ASSERT(
+ ((ErtsPTabListBifData *) ERTS_MAGIC_BIN_DATA(mbp))->debug.caller
+ == BIF_P->common.id);
+
+ if (ptab_list_bif_engine(BIF_P, &res_acc, mbp)) {
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, return);
+ BIF_RET(res_acc);
+ }
+ else {
+ ERTS_PTAB_LIST_DBG_TRACE(BIF_P->common.id, trap);
+ ERTS_BIF_YIELD2(&ptab_list_continue_export, BIF_P, res_acc, BIF_ARG_2);
+ }
+}
+
+void
+erts_ptab_init(void)
+{
+ /* ptab_list_continue/2 is a hidden BIF that the original BIF traps to. */
+ erts_init_trap_export(&ptab_list_continue_export,
+ am_erlang, am_ptab_list_continue, 2,
+ &ptab_list_continue);
+
+}
+
+/*
+ * Debug stuff
+ */
+
+Sint
+erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next)
+{
+ Uint64 ld;
+ Sint res;
+ Eterm data;
+ int first_pix = -1;
+
+ erts_ptab_rwlock(ptab);
+
+ if (!set)
+ ld = last_data_read_nob(ptab);
+ else {
+
+ ld = (Uint64) next;
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ if (ptab->r.o.invalid_data == data) {
+ ld += ptab->r.o.max;
+ ASSERT(erts_ptab_data2pix(ptab, data)
+ == erts_ptab_data2pix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ }
+ last_data_set_relb(ptab, ld);
+ }
+
+ while (1) {
+ int pix;
+ ld++;
+ pix = (int) (ld % ptab->r.o.max);
+ if (first_pix < 0)
+ first_pix = pix;
+ else if (pix == first_pix) {
+ res = -1;
+ break;
+ }
+ if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) {
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ if (ptab->r.o.invalid_data == data) {
+ ld += ptab->r.o.max;
+ ASSERT(erts_ptab_data2pix(ptab, data)
+ == erts_ptab_data2pix(ptab,
+ ERTS_PTAB_LastData2EtermData(ld)));
+ data = ERTS_PTAB_LastData2EtermData(ld);
+ }
+ res = data;
+ break;
+ }
+ }
+
+ erts_ptab_rwunlock(ptab);
+
+ return res;
+}
+
+static ERTS_INLINE ErtsPTabElementCommon *
+ptab_pix2el(ErtsPTab *ptab, int ix)
+{
+ ErtsPTabElementCommon *ptab_el;
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ ptab_el = (ErtsPTabElementCommon *) erts_ptab_pix2intptr_nob(ptab, ix);
+ if (ptab_el == ptab->r.o.invalid_element)
+ return NULL;
+ else
+ return ptab_el;
+}
+
+Eterm
+erts_debug_ptab_list(Process *c_p, ErtsPTab *ptab)
+{
+ int i;
+ Uint need;
+ Eterm res;
+ Eterm* hp;
+ Eterm *hp_end;
+
+ erts_ptab_rwlock(ptab);
+
+ res = NIL;
+ need = erts_ptab_count(ptab) * 2;
+ hp = HAlloc(c_p, need); /* we need two heap words for each id */
+ hp_end = hp + need;
+
+ /* make the list by scanning bakward */
+
+
+ for (i = ptab->r.o.max-1; i >= 0; i--) {
+ ErtsPTabElementCommon *el = ptab_pix2el(ptab, i);
+ if (el) {
+ res = CONS(hp, el->id, res);
+ hp += 2;
+ }
+ }
+
+ erts_ptab_rwunlock(ptab);
+
+ HRelease(c_p, hp_end, hp);
+
+ return res;
+}
+
+Eterm
+erts_debug_ptab_list_bif_info(Process *c_p, ErtsPTab *ptab)
+{
+ ERTS_DECL_AM(ptab_list_bif_info);
+ Eterm elements[] = {
+ AM_ptab_list_bif_info,
+ make_small((Uint) ERTS_PTAB_LIST_BIF_MIN_START_REDS),
+ make_small((Uint) ptab->list.data.chunks),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_CHUNK_SIZE),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_INSPECT_INDICES_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_TAB_FREE_DELETED_REDS),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_INSPECT_DELETED_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_INSPECT_DELETED_MAX_REDS),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_BUILD_RESULT_CONSES_PER_RED),
+ make_small((Uint) ERTS_PTAB_LIST_BIF_DEBUGLEVEL)
+ };
+ Uint sz = 0;
+ Eterm *hp;
+ (void) erts_bld_tuplev(NULL, &sz, sizeof(elements)/sizeof(Eterm), elements);
+ hp = HAlloc(c_p, sz);
+ return erts_bld_tuplev(&hp, NULL, sizeof(elements)/sizeof(Eterm), elements);
+}
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_FOUND_PIDS
+static void
+debug_ptab_list_check_found_pid(ErtsPTabListBifData *ptlbdp,
+ Eterm pid,
+ Uint64 ic,
+ int pid_should_be_found)
+{
+ int i;
+ for (i = 0; i < ptlbdp->pid_ix; i++) {
+ if (ptlbdp->pid[i] == pid && ptlbdp->debug.pid_started[i] == ic) {
+ ERTS_PTAB_LIST_ASSERT(pid_should_be_found);
+ return;
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(!pid_should_be_found);
+}
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_RESLIST
+static void
+debug_ptab_list_check_res_list(Eterm list)
+{
+ while (is_list(list)) {
+ Eterm* consp = list_val(list);
+ Eterm hd = CAR(consp);
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(hd));
+ list = CDR(consp);
+ }
+
+ ERTS_PTAB_LIST_ASSERT(is_nil(list));
+}
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS
+
+static void
+debug_ptab_list_save_all_pids(ErtsPTabListBifData *ptlbdp)
+{
+ int ix, tix, cpix;
+ ErtsPTab *ptab = ptlbdp->ptab;
+ ptlbdp->debug.correct_pids_verified = 0;
+ ptlbdp->debug.correct_pids = erts_alloc(ERTS_ALC_T_PTAB_LIST_PIDS,
+ sizeof(Eterm)*ptlbdp->pid_sz);
+
+ for (tix = 0, cpix = 0; tix < ptab->r.o.max; tix++) {
+ ErtsPTabElementCommon *el = ptab_pix2el(ptab, tix);
+ if (el) {
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(el->id));
+ ptlbdp->debug.correct_pids[cpix++] = el->id;
+ ERTS_PTAB_LIST_ASSERT(cpix <= ptlbdp->pid_sz);
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(cpix == ptlbdp->pid_sz);
+
+ for (ix = 0; ix < ptlbdp->pid_sz; ix++)
+ ptlbdp->pid[ix] = make_small(ix);
+}
+
+static void
+debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp)
+{
+ int ix, cpix;
+
+ ERTS_PTAB_LIST_ASSERT(ptlbdp->pid_ix == ptlbdp->pid_sz);
+
+ for (ix = 0; ix < ptlbdp->pid_sz; ix++) {
+ int found = 0;
+ Eterm pid = ptlbdp->pid[ix];
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_is_valid_id(pid));
+ for (cpix = ix; cpix < ptlbdp->pid_sz; cpix++) {
+ if (ptlbdp->debug.correct_pids[cpix] == pid) {
+ ptlbdp->debug.correct_pids[cpix] = NIL;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ for (cpix = 0; cpix < ix; cpix++) {
+ if (ptlbdp->debug.correct_pids[cpix] == pid) {
+ ptlbdp->debug.correct_pids[cpix] = NIL;
+ found = 1;
+ break;
+ }
+ }
+ }
+ ERTS_PTAB_LIST_ASSERT(found);
+ }
+ ptlbdp->debug.correct_pids_verified = 1;
+
+ erts_free(ERTS_ALC_T_PTAB_LIST_PIDS, ptlbdp->debug.correct_pids);
+ ptlbdp->debug.correct_pids = NULL;
+}
+#endif /* ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_PIDS */
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL >= ERTS_PTAB_LIST_DBGLVL_CHK_DEL_LIST
+static void
+debug_ptab_list_check_del_list(ErtsPTab *ptab)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab));
+ if (!ptab->list.data.deleted.start)
+ ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end);
+ else {
+ Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab));
+ Uint64 *prev_x_interval_p = NULL;
+ ErtsPTabDeletedElement *ptdep;
+
+ for (ptdep = ptab->list.data.deleted.start;
+ ptdep;
+ ptdep = ptdep->next) {
+ if (!ptdep->prev)
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start == ptdep);
+ else
+ ERTS_PTAB_LIST_ASSERT(ptdep->prev->next == ptdep);
+ if (!ptdep->next)
+ ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.end == ptdep);
+ else
+ ERTS_PTAB_LIST_ASSERT(ptdep->next->prev == ptdep);
+ if (ptdep->ix < 0) {
+ Uint64 interval = ptdep->u.bif_invocation.interval;
+ ERTS_PTAB_LIST_ASSERT(interval <= curr_interval);
+ }
+ else {
+ Uint64 s_interval = ptdep->u.element.inserted;
+ Uint64 x_interval = ptdep->u.element.deleted;
+
+ ERTS_PTAB_LIST_ASSERT(s_interval <= x_interval);
+ if (prev_x_interval_p)
+ ERTS_PTAB_LIST_ASSERT(*prev_x_interval_p <= x_interval);
+ prev_x_interval_p = &ptdep->u.element.deleted;
+ ERTS_PTAB_LIST_ASSERT(
+ erts_ptab_is_valid_id(ptdep->u.element.id));
+ ERTS_PTAB_LIST_ASSERT(erts_ptab_id2pix(ptab,
+ ptdep->u.element.id)
+ == ptdep->ix);
+
+ }
+ }
+
+ }
+}
+
+static void
+debug_ptab_list_check_del_free_list(ErtsPTab *ptab,
+ ErtsPTabDeletedElement *free_list)
+{
+ if (ptab->list.data.deleted.start) {
+ ErtsPTabDeletedElement *fptdep;
+ ErtsPTabDeletedElement *ptdep;
+
+ for (fptdep = free_list; fptdep; fptdep = fptdep->next) {
+ for (ptdep = ptab->list.data.deleted.start;
+ ptdep;
+ ptdep = ptdep->next) {
+ ERTS_PTAB_LIST_ASSERT(fptdep != ptdep);
+ }
+ }
+ }
+}
+
+#endif
+
+#if ERTS_PTAB_LIST_BIF_DEBUGLEVEL != 0
+
+static void
+debug_ptab_list_assert_error(char* expr, const char* file, int line, const char *func)
+{
+ fflush(stdout);
+ erts_fprintf(stderr, "%s:%d:%s(): Assertion failed: %s\n",
+ (char *) file, line, (char *) func, expr);
+ fflush(stderr);
+ abort();
+}
+
+#endif
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
new file mode 100644
index 0000000000..8a130f42a3
--- /dev/null
+++ b/erts/emulator/beam/erl_ptab.h
@@ -0,0 +1,472 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Description: Process/Port table implementation.
+ *
+ * Author: Rickard Green
+ */
+
+#ifndef ERL_PTAB_H__
+#define ERL_PTAB_H__
+
+#include "sys.h"
+#include "erl_term.h"
+#include "erl_time.h"
+#include "erl_utils.h"
+#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_thr_progress.h"
+#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
+#include "erl_alloc.h"
+#include "erl_monitors.h"
+
+#define ERTS_TRACER_PROC(P) ((P)->common.tracer_proc)
+#define ERTS_TRACE_FLAGS(P) ((P)->common.trace_flags)
+
+#define ERTS_P_LINKS(P) ((P)->common.u.alive.links)
+#define ERTS_P_MONITORS(P) ((P)->common.u.alive.monitors)
+
+#define IS_TRACED(p) \
+ (ERTS_TRACER_PROC((p)) != NIL)
+#define ARE_TRACE_FLAGS_ON(p,tf) \
+ ((ERTS_TRACE_FLAGS((p)) & (tf|F_SENSITIVE)) == (tf))
+#define IS_TRACED_FL(p,tf) \
+ ( IS_TRACED(p) && ARE_TRACE_FLAGS_ON(p,tf) )
+
+typedef struct {
+ Eterm id;
+#ifdef ERTS_SMP
+ erts_atomic32_t refc;
+#endif
+ Eterm tracer_proc;
+ Uint trace_flags;
+ union {
+ /* --- While being alive --- */
+ struct {
+ Uint64 started_interval;
+ struct reg_proc *reg;
+ ErtsLink *links;
+ ErtsMonitor *monitors;
+#ifdef ERTS_SMP
+ ErtsSmpPTimer *ptimer;
+#else
+ ErlTimer tm;
+#endif
+ } alive;
+
+ /* --- While being released --- */
+ ErtsThrPrgrLaterOp release;
+ } u;
+} ErtsPTabElementCommon;
+
+typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement;
+
+typedef struct {
+ erts_smp_rwmtx_t rwmtx;
+ erts_interval_t interval;
+ struct {
+ ErtsPTabDeletedElement *start;
+ ErtsPTabDeletedElement *end;
+ } deleted;
+ int chunks;
+} ErtsPTabListData;
+
+typedef struct {
+#ifdef ARCH_32
+ erts_smp_dw_atomic_t last_data;
+#else
+ erts_smp_atomic_t last_data;
+#endif
+ erts_smp_atomic32_t count;
+} ErtsPTabVolatileData;
+
+typedef struct {
+ erts_smp_atomic_t *tab;
+ Uint32 max;
+ Uint32 tab_cache_lines;
+ Uint32 pix_per_cache_line;
+ Uint32 pix_mask;
+ Uint32 pix_cl_mask;
+ Uint32 pix_cl_shift;
+ Uint32 pix_cli_mask;
+ Uint32 pix_cli_shift;
+ ErtsPTabElementCommon *invalid_element;
+ Eterm invalid_data;
+ void (*release_element)(void *);
+} ErtsPTabReadOnlyData;
+
+typedef struct {
+ /*
+ * Data mainly modified when someone is listing
+ * the content of the table.
+ */
+ union {
+ ErtsPTabListData data;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabListData))];
+ } list;
+
+ /*
+ * Frequently modified data.
+ */
+ union {
+ ErtsPTabVolatileData tile;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabVolatileData))];
+ } vola;
+
+ /*
+ * Read only data.
+ */
+ union {
+ ErtsPTabReadOnlyData o;
+ char algn[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsPTabReadOnlyData))];
+ } r;
+} ErtsPTab;
+
+#define ERTS_PTAB_ID_DATA_SIZE 28
+#define ERTS_PTAB_ID_DATA_SHIFT (_TAG_IMMED1_SIZE)
+/* ERTS_PTAB_MAX_SIZE must be a power of 2 */
+#define ERTS_PTAB_MAX_SIZE (SWORD_CONSTANT(1) << 27)
+#if (ERTS_PTAB_MAX_SIZE-1) > MAX_SMALL
+# error "The maximum number of processes/ports must fit in a SMALL."
+#endif
+
+
+/*
+ * Currently pids and ports are allowed.
+ */
+#if _PID_DATA_SIZE != ERTS_PTAB_ID_DATA_SIZE
+# error "Unexpected pid data size"
+#endif
+#if _PID_DATA_SHIFT != ERTS_PTAB_ID_DATA_SHIFT
+# error "Unexpected pid tag size"
+#endif
+#if _PORT_DATA_SIZE != ERTS_PTAB_ID_DATA_SIZE
+# error "Unexpected port data size"
+#endif
+#if _PORT_DATA_SHIFT != ERTS_PTAB_ID_DATA_SHIFT
+# error "Unexpected port tag size"
+#endif
+
+#define ERTS_PTAB_INVALID_ID(TAG) \
+ ((Eterm) \
+ ((((1 << ERTS_PTAB_ID_DATA_SIZE) - 1) << ERTS_PTAB_ID_DATA_SHIFT) \
+ | (TAG)))
+
+#define erts_ptab_is_valid_id(ID) \
+ (is_internal_pid((ID)) || is_internal_port((ID)))
+
+void erts_ptab_init(void);
+void erts_ptab_init_table(ErtsPTab *ptab,
+ ErtsAlcType_t atype,
+ void (*release_element)(void *),
+ ErtsPTabElementCommon *invalid_element,
+ int size,
+ char *name);
+int erts_ptab_new_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el,
+ void *init_arg,
+ void (*init_ptab_el)(void *, Eterm));
+void erts_ptab_delete_element(ErtsPTab *ptab,
+ ErtsPTabElementCommon *ptab_el);
+int erts_ptab_initialized(ErtsPTab *ptab);
+
+ERTS_GLB_INLINE erts_interval_t *erts_ptab_interval(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_max(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_count(ErtsPTab *ptab);
+ERTS_GLB_INLINE Uint erts_ptab_pixdata2data(ErtsPTab *ptab, Eterm pixdata);
+ERTS_GLB_INLINE Uint32 erts_ptab_pixdata2pix(ErtsPTab *ptab, Eterm pixdata);
+ERTS_GLB_INLINE Uint32 erts_ptab_data2pix(ErtsPTab *ptab, Eterm data);
+ERTS_GLB_INLINE Uint erts_ptab_data2pixdata(ErtsPTab *ptab, Eterm data);
+ERTS_GLB_INLINE Eterm erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag);
+ERTS_GLB_INLINE int erts_ptab_id2pix(ErtsPTab *ptab, Eterm id);
+ERTS_GLB_INLINE Uint erts_ptab_id2data(ErtsPTab *ptab, Eterm id);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix);
+ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el);
+ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint32 add_refc);
+ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab);
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE erts_interval_t *
+erts_ptab_interval(ErtsPTab *ptab)
+{
+ return &ptab->list.data.interval;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_max(ErtsPTab *ptab)
+{
+ int max = ptab->r.o.max;
+ return max == ERTS_PTAB_MAX_SIZE ? max - 1 : max;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_count(ErtsPTab *ptab)
+{
+ int max = ptab->r.o.max;
+ erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count);
+ if (max == ERTS_PTAB_MAX_SIZE) {
+ max--;
+ res--;
+ }
+ if (res > max)
+ return max;
+ ASSERT(res >= 0);
+ return (int) res;
+
+}
+
+ERTS_GLB_INLINE Uint erts_ptab_pixdata2data(ErtsPTab *ptab, Eterm pixdata)
+{
+ Uint32 data = ((Uint32) pixdata) & ~ptab->r.o.pix_mask;
+ data |= (pixdata >> ptab->r.o.pix_cl_shift) & ptab->r.o.pix_cl_mask;
+ data |= (pixdata & ptab->r.o.pix_cli_mask) << ptab->r.o.pix_cli_shift;
+ return data;
+}
+
+ERTS_GLB_INLINE Uint32 erts_ptab_pixdata2pix(ErtsPTab *ptab, Eterm pixdata)
+{
+ return ((Uint32) pixdata) & ptab->r.o.pix_mask;
+}
+
+ERTS_GLB_INLINE Uint32 erts_ptab_data2pix(ErtsPTab *ptab, Eterm data)
+{
+ Uint32 n, pix;
+ n = (Uint32) data;
+ pix = ((n & ptab->r.o.pix_cl_mask) << ptab->r.o.pix_cl_shift);
+ pix += ((n >> ptab->r.o.pix_cli_shift) & ptab->r.o.pix_cli_mask);
+ ASSERT(0 <= pix && pix < ptab->r.o.max);
+ return pix;
+}
+
+ERTS_GLB_INLINE Uint erts_ptab_data2pixdata(ErtsPTab *ptab, Eterm data)
+{
+ Uint pixdata = data & ~((Uint) ptab->r.o.pix_mask);
+ pixdata |= (Uint) erts_ptab_data2pix(ptab, data);
+ ASSERT(data == erts_ptab_pixdata2data(ptab, pixdata));
+ return pixdata;
+}
+
+#if ERTS_SIZEOF_TERM == 8
+
+ERTS_GLB_INLINE Eterm
+erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag)
+{
+ HUint huint;
+ Uint32 low_data = (Uint32) data;
+ low_data &= (1 << ERTS_PTAB_ID_DATA_SIZE) - 1;
+ low_data <<= ERTS_PTAB_ID_DATA_SHIFT;
+ huint.hval[ERTS_HUINT_HVAL_HIGH] = erts_ptab_data2pix(ptab, data);
+ huint.hval[ERTS_HUINT_HVAL_LOW] = low_data | ((Uint32) tag);
+ return (Eterm) huint.val;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_id2pix(ErtsPTab *ptab, Eterm id)
+{
+ HUint huint;
+ huint.val = id;
+ return (int) huint.hval[ERTS_HUINT_HVAL_HIGH];
+}
+
+ERTS_GLB_INLINE Uint
+erts_ptab_id2data(ErtsPTab *ptab, Eterm id)
+{
+ HUint huint;
+ huint.val = id;
+ return (Uint) (huint.hval[ERTS_HUINT_HVAL_LOW] >> ERTS_PTAB_ID_DATA_SHIFT);
+}
+
+#elif ERTS_SIZEOF_TERM == 4
+
+ERTS_GLB_INLINE Eterm
+erts_ptab_make_id(ErtsPTab *ptab, Eterm data, Eterm tag)
+{
+ Eterm id;
+ data &= ((1 << ERTS_PTAB_ID_DATA_SIZE) - 1);
+ id = (Eterm) erts_ptab_data2pixdata(ptab, data);
+ return (id << ERTS_PTAB_ID_DATA_SHIFT) | tag;
+}
+
+ERTS_GLB_INLINE int
+erts_ptab_id2pix(ErtsPTab *ptab, Eterm id)
+{
+ Uint pixdata = (Uint) id;
+ pixdata >>= ERTS_PTAB_ID_DATA_SHIFT;
+ return (int) erts_ptab_pixdata2pix(ptab, pixdata);
+}
+
+ERTS_GLB_INLINE Uint
+erts_ptab_id2data(ErtsPTab *ptab, Eterm id)
+{
+ Uint pixdata = (Uint) id;
+ pixdata >>= ERTS_PTAB_ID_DATA_SHIFT;
+ return erts_ptab_pixdata2data(ptab, pixdata);
+}
+
+#else
+#error "Unsupported size of term"
+#endif
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix)
+{
+ ASSERT(0 <= ix && ix < ptab->r.o.max);
+ return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]);
+}
+
+ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el)
+{
+#ifdef ERTS_SMP
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ erts_aint32_t refc = erts_atomic32_inc_read_nob(&ptab_el->refc);
+ ERTS_SMP_LC_ASSERT(refc > 1);
+#else
+ erts_atomic32_inc_nob(&ptab_el->refc);
+#endif
+#endif
+}
+
+ERTS_GLB_INLINE int erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el)
+{
+#ifdef ERTS_SMP
+ erts_aint32_t refc = erts_atomic32_dec_read_nob(&ptab_el->refc);
+ ERTS_SMP_LC_ASSERT(refc >= 0);
+ return (int) refc;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE int erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el,
+ Sint32 add_refc)
+{
+#ifdef ERTS_SMP
+ erts_aint32_t refc;
+
+#ifndef ERTS_ENABLE_LOCK_CHECK
+ if (add_refc >= 0) {
+ erts_atomic32_add_nob(&ptab_el->refc,
+ (erts_aint32_t) add_refc);
+ return 1;
+ }
+#endif
+
+ refc = erts_atomic32_add_read_nob(&ptab_el->refc,
+ (erts_aint32_t) add_refc);
+ ERTS_SMP_LC_ASSERT(refc >= 0);
+ return (int) refc;
+#else
+ return 0;
+#endif
+}
+
+ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab)
+{
+ return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab)
+{
+ return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab)
+{
+ erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab)
+{
+ return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx);
+}
+
+ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab)
+{
+ return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx);
+}
+
+#endif
+
+#endif
+
+#if defined(ERTS_PTAB_WANT_BIF_IMPL__) && !defined(ERTS_PTAB_LIST__)
+#define ERTS_PTAB_LIST__
+
+#include "erl_process.h"
+#include "bif.h"
+
+BIF_RETTYPE erts_ptab_list(struct process *c_p, ErtsPTab *ptab);
+
+#endif
+
+#if defined(ERTS_PTAB_WANT_DEBUG_FUNCS__) && !defined(ERTS_PTAB_DEBUG_FUNCS__)
+#define ERTS_PTAB_DEBUG_FUNCS__
+#include "erl_process.h"
+
+/* Debug functions */
+Sint erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next);
+Eterm erts_debug_ptab_list(Process *c_p, ErtsPTab *ptab);
+Eterm erts_debug_ptab_list_bif_info(Process *c_p, ErtsPTab *ptab);
+
+#endif
diff --git a/erts/emulator/beam/erl_resolv_dns.c b/erts/emulator/beam/erl_resolv_dns.c
deleted file mode 100644
index 9d76fa89f8..0000000000
--- a/erts/emulator/beam/erl_resolv_dns.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * Set this to non-zero value if DNS should be used.
- */
-int erl_use_resolver = 1;
diff --git a/erts/emulator/beam/erl_resolv_nodns.c b/erts/emulator/beam/erl_resolv_nodns.c
deleted file mode 100644
index f14ab68e27..0000000000
--- a/erts/emulator/beam/erl_resolv_nodns.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * Set this to non-zero value if DNS should be used.
- */
-int erl_use_resolver = 0;
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index a32e9d9d7c..34c90c0bda 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -274,6 +274,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob
#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob
#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob
+#define erts_smp_atomic_read_bset_nob erts_atomic_read_bset_nob
#define erts_smp_atomic_init_mb erts_atomic_init_mb
#define erts_smp_atomic_set_mb erts_atomic_set_mb
@@ -288,6 +289,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb
#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb
#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb
+#define erts_smp_atomic_read_bset_mb erts_atomic_read_bset_mb
#define erts_smp_atomic_init_acqb erts_atomic_init_acqb
#define erts_smp_atomic_set_acqb erts_atomic_set_acqb
@@ -302,6 +304,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb
#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb
#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb
+#define erts_smp_atomic_read_bset_acqb erts_atomic_read_bset_acqb
#define erts_smp_atomic_init_relb erts_atomic_init_relb
#define erts_smp_atomic_set_relb erts_atomic_set_relb
@@ -316,6 +319,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb
#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb
#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb
+#define erts_smp_atomic_read_bset_relb erts_atomic_read_bset_relb
#define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb
#define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb
@@ -330,6 +334,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb
#define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb
#define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb
+#define erts_smp_atomic_read_bset_ddrb erts_atomic_read_bset_ddrb
#define erts_smp_atomic_init_rb erts_atomic_init_rb
#define erts_smp_atomic_set_rb erts_atomic_set_rb
@@ -344,6 +349,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb
#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb
#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb
+#define erts_smp_atomic_read_bset_rb erts_atomic_read_bset_rb
#define erts_smp_atomic_init_wb erts_atomic_init_wb
#define erts_smp_atomic_set_wb erts_atomic_set_wb
@@ -358,6 +364,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb
#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb
#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb
+#define erts_smp_atomic_read_bset_wb erts_atomic_read_bset_wb
/* 32-bit atomics */
@@ -374,6 +381,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob
#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob
#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob
+#define erts_smp_atomic32_read_bset_nob erts_atomic32_read_bset_nob
#define erts_smp_atomic32_init_mb erts_atomic32_init_mb
#define erts_smp_atomic32_set_mb erts_atomic32_set_mb
@@ -388,6 +396,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb
#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb
#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb
+#define erts_smp_atomic32_read_bset_mb erts_atomic32_read_bset_mb
#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb
#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb
@@ -402,6 +411,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb
#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb
#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb
+#define erts_smp_atomic32_read_bset_acqb erts_atomic32_read_bset_acqb
#define erts_smp_atomic32_init_relb erts_atomic32_init_relb
#define erts_smp_atomic32_set_relb erts_atomic32_set_relb
@@ -416,6 +426,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb
#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb
#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb
+#define erts_smp_atomic32_read_bset_relb erts_atomic32_read_bset_relb
#define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb
#define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb
@@ -430,6 +441,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb
#define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb
#define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb
+#define erts_smp_atomic32_read_bset_ddrb erts_atomic32_read_bset_ddrb
#define erts_smp_atomic32_init_rb erts_atomic32_init_rb
#define erts_smp_atomic32_set_rb erts_atomic32_set_rb
@@ -444,6 +456,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb
#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb
#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb
+#define erts_smp_atomic32_read_bset_rb erts_atomic32_read_bset_rb
#define erts_smp_atomic32_init_wb erts_atomic32_init_wb
#define erts_smp_atomic32_set_wb erts_atomic32_set_wb
@@ -458,6 +471,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb
#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb
#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb
+#define erts_smp_atomic32_read_bset_wb erts_atomic32_read_bset_wb
#else /* !ERTS_SMP */
@@ -513,6 +527,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band
#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_nob erts_no_atomic_read_bset
#define erts_smp_atomic_init_mb erts_no_atomic_set
#define erts_smp_atomic_set_mb erts_no_atomic_set
@@ -527,6 +542,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_mb erts_no_atomic_read_bset
#define erts_smp_atomic_init_acqb erts_no_atomic_set
#define erts_smp_atomic_set_acqb erts_no_atomic_set
@@ -541,6 +557,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_acqb erts_no_atomic_read_bset
#define erts_smp_atomic_init_relb erts_no_atomic_set
#define erts_smp_atomic_set_relb erts_no_atomic_set
@@ -555,6 +572,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_relb erts_no_atomic_read_bset
#define erts_smp_atomic_init_ddrb erts_no_atomic_set
#define erts_smp_atomic_set_ddrb erts_no_atomic_set
@@ -569,6 +587,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_ddrb erts_no_atomic_read_bset
#define erts_smp_atomic_init_rb erts_no_atomic_set
#define erts_smp_atomic_set_rb erts_no_atomic_set
@@ -583,6 +602,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_rb erts_no_atomic_read_bset
#define erts_smp_atomic_init_wb erts_no_atomic_set
#define erts_smp_atomic_set_wb erts_no_atomic_set
@@ -597,6 +617,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band
#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg
#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
+#define erts_smp_atomic_read_bset_wb erts_no_atomic_read_bset
/* 32-bit atomics */
@@ -613,6 +634,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_nob erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_mb erts_no_atomic32_set
#define erts_smp_atomic32_set_mb erts_no_atomic32_set
@@ -627,6 +649,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_mb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_acqb erts_no_atomic32_set
#define erts_smp_atomic32_set_acqb erts_no_atomic32_set
@@ -641,6 +664,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_acqb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_relb erts_no_atomic32_set
#define erts_smp_atomic32_set_relb erts_no_atomic32_set
@@ -655,6 +679,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_relb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_ddrb erts_no_atomic32_set
#define erts_smp_atomic32_set_ddrb erts_no_atomic32_set
@@ -669,6 +694,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_rb erts_no_atomic32_set
#define erts_smp_atomic32_set_rb erts_no_atomic32_set
@@ -683,6 +709,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_rb erts_no_atomic32_read_bset
#define erts_smp_atomic32_init_wb erts_no_atomic32_set
#define erts_smp_atomic32_set_wb erts_no_atomic32_set
@@ -697,6 +724,7 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band
#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg
#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
+#define erts_smp_atomic32_read_bset_wb erts_no_atomic32_read_bset
#endif /* !ERTS_SMP */
diff --git a/erts/emulator/beam/erl_sys_driver.h b/erts/emulator/beam/erl_sys_driver.h
index d429d0ce96..b991a2840c 100644
--- a/erts/emulator/beam/erl_sys_driver.h
+++ b/erts/emulator/beam/erl_sys_driver.h
@@ -31,7 +31,6 @@
#define ERL_SYS_DRV
typedef long ErlDrvEvent; /* An event to be selected on. */
-typedef long ErlDrvPort; /* A port descriptor. */
/* typedef struct _SysDriverOpts SysDriverOpts; defined in sys.h */
diff --git a/erts/emulator/beam/erl_term.c b/erts/emulator/beam/erl_term.c
index f77e8b798f..4587cd84d1 100644
--- a/erts/emulator/beam/erl_term.c
+++ b/erts/emulator/beam/erl_term.c
@@ -105,7 +105,7 @@ unsigned tag_val_def(Wterm x)
break;
}
}
- sprintf(msg, "tag_val_def: %#lx", (unsigned long) x);
+ erts_snprintf(msg, sizeof(msg), "tag_val_def: %#lx", (unsigned long) x);
et_abort(msg, file, line);
#undef file
#undef line
@@ -133,7 +133,7 @@ ET_DEFINE_CHECKED(Uint,unsigned_val,Eterm,is_small);
ET_DEFINE_CHECKED(Sint,signed_val,Eterm,is_small);
ET_DEFINE_CHECKED(Uint,atom_val,Eterm,is_atom);
ET_DEFINE_CHECKED(Uint,header_arity,Eterm,is_header);
-ET_DEFINE_CHECKED(Uint,arityval,Eterm,is_arity_value);
+ET_DEFINE_CHECKED(Uint,arityval,Eterm,is_sane_arity_value);
ET_DEFINE_CHECKED(Uint,thing_arityval,Eterm,is_thing);
ET_DEFINE_CHECKED(Uint,thing_subtag,Eterm,is_thing);
ET_DEFINE_CHECKED(Eterm*,binary_val,Wterm,is_binary);
@@ -144,9 +144,7 @@ ET_DEFINE_CHECKED(Uint,bignum_header_arity,Eterm,_is_bignum_header);
ET_DEFINE_CHECKED(Eterm*,big_val,Wterm,is_big);
ET_DEFINE_CHECKED(Eterm*,float_val,Wterm,is_float);
ET_DEFINE_CHECKED(Eterm*,tuple_val,Wterm,is_tuple);
-ET_DEFINE_CHECKED(Uint,internal_pid_data,Eterm,is_internal_pid);
ET_DEFINE_CHECKED(struct erl_node_*,internal_pid_node,Eterm,is_internal_pid);
-ET_DEFINE_CHECKED(Uint,internal_port_data,Eterm,is_internal_port);
ET_DEFINE_CHECKED(struct erl_node_*,internal_port_node,Eterm,is_internal_port);
ET_DEFINE_CHECKED(Eterm*,internal_ref_val,Wterm,is_internal_ref);
ET_DEFINE_CHECKED(Uint,internal_ref_data_words,Wterm,is_internal_ref);
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index c270d13365..fb3ef9cd6c 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -300,8 +300,17 @@ _ET_DECLARE_CHECKED(Uint,header_arity,Eterm)
#define header_arity(x) _ET_APPLY(header_arity,(x))
/* arityval access methods */
+/* Erlang Spec. 4.7.3 defines max arity to 65535
+ * we will however enforce max arity of 16777215 (24 bits)
+ * (checked in bifs and asserted in debug)
+ */
+#define MAX_ARITYVAL ((((Uint)1) << 24) - 1)
+#define ERTS_MAX_TUPLE_SIZE MAX_ARITYVAL
+
#define make_arityval(sz) _make_header((sz),_TAG_HEADER_ARITYVAL)
#define is_arity_value(x) (((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL)
+#define is_sane_arity_value(x) ((((x) & _TAG_HEADER_MASK) == _TAG_HEADER_ARITYVAL) && \
+ (((x) >> _HEADER_ARITY_OFFS) <= MAX_ARITYVAL))
#define is_not_arity_value(x) (!is_arity_value((x)))
#define _unchecked_arityval(x) _unchecked_header_arity((x))
_ET_DECLARE_CHECKED(Uint,arityval,Eterm)
@@ -542,12 +551,6 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm)
#define _GETBITS(X,Pos,Size) (((X) >> (Pos)) & ~(~((Uint) 0) << (Size)))
/*
- * Observe! New layout for pids, ports and references in R9 (see also note
- * in erl_node_container_utils.h).
- */
-
-
-/*
* Creation in node specific data (pids, ports, refs)
*/
@@ -584,7 +587,6 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm)
*
*/
-#define _PID_R9_SER_SIZE 3
#define _PID_SER_SIZE (_PID_DATA_SIZE - _PID_NUM_SIZE)
#define _PID_NUM_SIZE 15
@@ -598,23 +600,13 @@ _ET_DECLARE_CHECKED(Eterm*,tuple_val,Wterm)
#define make_pid_data(Ser, Num) \
((Uint) ((Ser) << _PID_NUM_SIZE | (Num)))
-#define make_internal_pid(X) \
- ((Eterm) (((X) << _PID_DATA_SHIFT) | _TAG_IMMED1_PID))
-
#define is_internal_pid(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_PID)
#define is_not_internal_pid(x) (!is_internal_pid((x)))
-#define _unchecked_internal_pid_data(x) _GET_PID_DATA((x))
-_ET_DECLARE_CHECKED(Uint,internal_pid_data,Eterm)
-#define internal_pid_data(x) _ET_APPLY(internal_pid_data,(x))
-
#define _unchecked_internal_pid_node(x) erts_this_node
_ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm)
#define internal_pid_node(x) _ET_APPLY(internal_pid_node,(x))
-#define internal_pid_number(x) _GET_PID_NUM(internal_pid_data((x)))
-#define internal_pid_serial(x) _GET_PID_SER(internal_pid_data((x)))
-
#define internal_pid_data_words(x) (1)
/*
@@ -644,7 +636,6 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm)
* N : node number
*
*/
-#define _PORT_R9_NUM_SIZE 18
#define _PORT_NUM_SIZE _PORT_DATA_SIZE
#define _PORT_DATA_SIZE 28
@@ -654,18 +645,9 @@ _ET_DECLARE_CHECKED(struct erl_node_*,internal_pid_node,Eterm)
#define _GET_PORT_NUM(X) _GETBITS((X), 0, _PORT_NUM_SIZE)
-#define make_internal_port(X) \
- ((Eterm) (((X) << _PORT_DATA_SHIFT) | _TAG_IMMED1_PORT))
-
#define is_internal_port(x) (((x) & _TAG_IMMED1_MASK) == _TAG_IMMED1_PORT)
#define is_not_internal_port(x) (!is_internal_port(x))
-#define _unchecked_internal_port_data(x) _GET_PORT_DATA((x))
-_ET_DECLARE_CHECKED(Uint,internal_port_data,Eterm)
-#define internal_port_data(x) _ET_APPLY(internal_port_data,(x))
-
-#define internal_port_number(x) _GET_PORT_NUM(internal_port_data((x)))
-
#define _unchecked_internal_port_node(x) erts_this_node
_ET_DECLARE_CHECKED(struct erl_node_*,internal_port_node,Eterm)
#define internal_port_node(x) _ET_APPLY(internal_port_node,(x))
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 88524bdd4c..9678d7e08b 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -96,17 +96,14 @@
#define ERTS_THR_PRGR_LFLG_BLOCK (((erts_aint32_t) 1) << 31)
#define ERTS_THR_PRGR_LFLG_NO_LEADER (((erts_aint32_t) 1) << 30)
-#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
- | ERTS_THR_PRGR_LFLG_BLOCK))
+#define ERTS_THR_PRGR_LFLG_WAITING_UM (((erts_aint32_t) 1) << 29)
+#define ERTS_THR_PRGR_LFLG_ACTIVE_MASK (~(ERTS_THR_PRGR_LFLG_NO_LEADER \
+ | ERTS_THR_PRGR_LFLG_BLOCK \
+ | ERTS_THR_PRGR_LFLG_WAITING_UM))
-#define ERTS_THR_PRGR_LFLGS_ACTIVE(LFLGS) \
+#define ERTS_THR_PRGR_LFLGS_ACTIVE(LFLGS) \
((LFLGS) & ERTS_THR_PRGR_LFLG_ACTIVE_MASK)
-#define ERTS_THR_PRGR_LFLGS_ALL_WAITING(LFLGS) \
- (((LFLGS) & (ERTS_THR_PRGR_LFLG_NO_LEADER \
- |ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) \
- == ERTS_THR_PRGR_LFLG_NO_LEADER)
-
/*
* We use a 64-bit value for thread progress. By this wrapping of
* the thread progress will more or less never occur.
@@ -262,6 +259,11 @@ typedef struct {
erts_atomic32_t managed_count;
erts_atomic32_t managed_id;
erts_atomic32_t unmanaged_id;
+ int chk_next_ix;
+ struct {
+ int waiting;
+ erts_atomic32_t current;
+ } umrefc_ix;
} ErtsThrPrgrMiscData;
typedef struct {
@@ -276,12 +278,18 @@ typedef union {
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsThrPrgrElement))];
} ErtsThrPrgrArray;
+typedef union {
+ erts_atomic_t refc;
+ char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic_t))];
+} ErtsThrPrgrUnmanagedRefc;
+
typedef struct {
union {
ErtsThrPrgrMiscData data;
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
sizeof(ErtsThrPrgrMiscData))];
} misc;
+ ErtsThrPrgrUnmanagedRefc umrefc[2];
ErtsThrPrgrArray *thr;
struct {
int no;
@@ -346,7 +354,9 @@ init_tmp_thr_prgr_data(ErtsThrPrgrData *tpd)
tpd->is_managed = 0;
tpd->is_blocking = 0;
tpd->is_temporary = 1;
-
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ tpd->is_delaying = 0;
+#endif
erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
}
@@ -461,6 +471,12 @@ erts_thr_progress_init(int no_schedulers, int managed, int unmanaged)
erts_atomic32_init_nob(&intrnl->misc.data.managed_count, 0);
erts_atomic32_init_nob(&intrnl->misc.data.managed_id, no_schedulers);
erts_atomic32_init_nob(&intrnl->misc.data.unmanaged_id, -1);
+ intrnl->misc.data.chk_next_ix = 0;
+ intrnl->misc.data.umrefc_ix.waiting = -1;
+ erts_atomic32_init_nob(&intrnl->misc.data.umrefc_ix.current, 0);
+
+ erts_atomic_init_nob(&intrnl->umrefc[0].refc, (erts_aint_t) 0);
+ erts_atomic_init_nob(&intrnl->umrefc[1].refc, (erts_aint_t) 0);
intrnl->thr = (ErtsThrPrgrArray *) ptr;
ptr += thr_arr_sz;
@@ -547,6 +563,9 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
tpd->is_managed = 0;
tpd->is_blocking = is_blocking;
tpd->is_temporary = 0;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ tpd->is_delaying = 0;
+#endif
ASSERT(tpd->id >= 0);
if (tpd->id >= intrnl->unmanaged.no)
erl_exit(ERTS_ABORT_EXIT,
@@ -600,6 +619,9 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
tpd->is_managed = 1;
tpd->is_blocking = is_blocking;
tpd->is_temporary = 0;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ tpd->is_delaying = 1;
+#endif
init_wakeup_request_array(&tpd->wakeup_request[0]);
@@ -607,8 +629,8 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
tpd->leader = 0;
tpd->active = 1;
- tpd->previous.local = 0;
- tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING;
+ tpd->confirmed = 0;
+ tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING;
erts_tsd_set(erts_thr_prgr_data_key__, (void *) tpd);
erts_atomic32_inc_nob(&intrnl->misc.data.lflgs);
@@ -651,60 +673,113 @@ leader_update(ErtsThrPrgrData *tpd)
block_thread(tpd);
}
else {
+ ErtsThrPrgrVal current;
+ int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged;
erts_aint32_t lflgs;
ErtsThrPrgrVal next;
- int ix, sz, make_progress;
+ erts_aint_t refc;
- if (tpd->previous.current == ERTS_THR_PRGR_VAL_WAITING) {
- /* Took over as leader from another thread */
- tpd->previous.current = read_acqb(&erts_thr_prgr__.current);
- tpd->previous.next = tpd->previous.current;
- tpd->previous.next++;
- if (tpd->previous.next == ERTS_THR_PRGR_VAL_WAITING)
- tpd->previous.next = 0;
- }
+ my_ix = tpd->id;
- if (tpd->previous.local == tpd->previous.current) {
- ErtsThrPrgrVal val = tpd->previous.current + 1;
- if (val == ERTS_THR_PRGR_VAL_WAITING)
- val = 0;
- tpd->previous.local = val;
- set_mb(&intrnl->thr[tpd->id].data.current, val);
+ if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) {
+ /* Took over as leader from another thread */
+ tpd->leader_state.current = read_nob(&erts_thr_prgr__.current);
+ tpd->leader_state.next = tpd->leader_state.current;
+ tpd->leader_state.next++;
+ if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING)
+ tpd->leader_state.next = 0;
+ tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix;
+ tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting;
+ tpd->leader_state.umrefc_ix.current =
+ (int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current);
+
+ if (tpd->confirmed == tpd->leader_state.current) {
+ ErtsThrPrgrVal val = tpd->leader_state.current + 1;
+ if (val == ERTS_THR_PRGR_VAL_WAITING)
+ val = 0;
+ tpd->confirmed = val;
+ set_mb(&intrnl->thr[my_ix].data.current, val);
+ }
}
- next = tpd->previous.next;
- make_progress = 1;
- sz = intrnl->managed.no;
- for (ix = 0; ix < sz; ix++) {
- ErtsThrPrgrVal tmp;
- tmp = read_nob(&intrnl->thr[ix].data.current);
- if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
- make_progress = 0;
- ASSERT(erts_thr_progress_has_passed__(next, tmp));
- break;
+ next = tpd->leader_state.next;
+
+ waiting_unmanaged = 0;
+ umrefc_ix = -1; /* Shut up annoying warning */
+
+ chk_next_ix = tpd->leader_state.chk_next_ix;
+ no_managed = intrnl->managed.no;
+ ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed);
+ /* Check manged threads */
+ if (chk_next_ix < no_managed) {
+ for (ix = chk_next_ix; ix < no_managed; ix++) {
+ ErtsThrPrgrVal tmp;
+ if (ix == my_ix)
+ continue;
+ tmp = read_nob(&intrnl->thr[ix].data.current);
+ if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) {
+ tpd->leader_state.chk_next_ix = ix;
+ ASSERT(erts_thr_progress_has_passed__(next, tmp));
+ goto done;
+ }
}
}
- if (make_progress) {
- ErtsThrPrgrVal current = next;
+ /* Check unmanged threads */
+ waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1;
+ umrefc_ix = (waiting_unmanaged
+ ? tpd->leader_state.umrefc_ix.waiting
+ : tpd->leader_state.umrefc_ix.current);
+ refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
+ ASSERT(refc >= 0);
+ if (refc != 0) {
+ int new_umrefc_ix;
+
+ if (waiting_unmanaged)
+ goto done;
+
+ new_umrefc_ix = (umrefc_ix + 1) & 0x1;
+ tpd->leader_state.umrefc_ix.waiting = umrefc_ix;
+ tpd->leader_state.chk_next_ix = no_managed;
+ erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current,
+ (erts_aint32_t) new_umrefc_ix);
+ ETHR_MEMBAR(ETHR_StoreLoad);
+ refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
+ ASSERT(refc >= 0);
+ waiting_unmanaged = 1;
+ if (refc != 0)
+ goto done;
+ }
- next++;
- if (next == ERTS_THR_PRGR_VAL_WAITING)
- next = 0;
+ /* Make progress */
+ current = next;
- set_nob(&intrnl->thr[tpd->id].data.current, next);
- set_mb(&erts_thr_prgr__.current, current);
- tpd->previous.local = next;
- tpd->previous.next = next;
- tpd->previous.current = current;
+ next++;
+ if (next == ERTS_THR_PRGR_VAL_WAITING)
+ next = 0;
+
+ set_nob(&intrnl->thr[my_ix].data.current, next);
+ set_mb(&erts_thr_prgr__.current, current);
+ tpd->confirmed = next;
+ tpd->leader_state.next = next;
+ tpd->leader_state.current = current;
#if ERTS_THR_PRGR_PRINT_VAL
- if (current % 1000 == 0)
- erts_fprintf(stderr, "%b64u\n", current);
+ if (current % 1000 == 0)
+ erts_fprintf(stderr, "%b64u\n", current);
#endif
- handle_wakeup_requests(current);
+ handle_wakeup_requests(current);
+
+ if (waiting_unmanaged) {
+ waiting_unmanaged = 0;
+ tpd->leader_state.umrefc_ix.waiting = -1;
+ erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs,
+ ~ERTS_THR_PRGR_LFLG_WAITING_UM);
}
+ tpd->leader_state.chk_next_ix = 0;
+
+ done:
if (tpd->active) {
lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
@@ -712,20 +787,44 @@ leader_update(ErtsThrPrgrData *tpd)
(void) block_thread(tpd);
}
else {
+ int force_wakeup_check = 0;
+ erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER;
tpd->leader = 0;
- tpd->previous.current = ERTS_THR_PRGR_VAL_WAITING;
+ tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING;
#if ERTS_THR_PRGR_PRINT_LEADER
erts_fprintf(stderr, "L <- %d\n", tpd->id);
#endif
ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0);
+ if (waiting_unmanaged)
+ set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM;
+
lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs,
- ERTS_THR_PRGR_LFLG_NO_LEADER);
+ set_flags);
+ lflgs |= set_flags;
if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK)
lflgs = block_thread(tpd);
- if (ERTS_THR_PRGR_LFLGS_ACTIVE(lflgs) == 0 && got_sched_wakeups())
+
+ if (waiting_unmanaged) {
+ /* Need to check umrefc again */
+ ETHR_MEMBAR(ETHR_StoreLoad);
+ refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc);
+ if (refc == 0) {
+ /* Need to force wakeup check */
+ force_wakeup_check = 1;
+ }
+ }
+
+ if ((force_wakeup_check
+ || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
+ | ERTS_THR_PRGR_LFLG_WAITING_UM
+ | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
+ == ERTS_THR_PRGR_LFLG_NO_LEADER))
+ && got_sched_wakeups()) {
+ /* Someone need to make progress */
wakeup_managed(0);
+ }
}
}
@@ -744,11 +843,11 @@ update(ErtsThrPrgrData *tpd)
erts_aint32_t lflgs;
res = 0;
val = read_acqb(&erts_thr_prgr__.current);
- if (tpd->previous.local == val) {
+ if (tpd->confirmed == val) {
val++;
if (val == ERTS_THR_PRGR_VAL_WAITING)
val = 0;
- tpd->previous.local = val;
+ tpd->confirmed = val;
set_mb(&intrnl->thr[tpd->id].data.current, val);
}
@@ -801,12 +900,19 @@ erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp)
block_count_dec();
- tpd->previous.local = ERTS_THR_PRGR_VAL_WAITING;
+ tpd->confirmed = ERTS_THR_PRGR_VAL_WAITING;
set_mb(&intrnl->thr[tpd->id].data.current, ERTS_THR_PRGR_VAL_WAITING);
lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
- if (ERTS_THR_PRGR_LFLGS_ALL_WAITING(lflgs) && got_sched_wakeups())
- wakeup_managed(0); /* Someone need to make progress */
+
+ if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
+ | ERTS_THR_PRGR_LFLG_WAITING_UM
+ | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
+ == ERTS_THR_PRGR_LFLG_NO_LEADER
+ && got_sched_wakeups()) {
+ /* Someone need to make progress */
+ wakeup_managed(0);
+ }
}
void
@@ -828,7 +934,7 @@ erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp)
val++;
if (val == ERTS_THR_PRGR_VAL_WAITING)
val = 0;
- tpd->previous.local = val;
+ tpd->confirmed = val;
set_mb(&intrnl->thr[tpd->id].data.current, val);
val = read_acqb(&erts_thr_prgr__.current);
if (current == val)
@@ -875,6 +981,68 @@ erts_thr_progress_active(ErtsSchedulerData *esdp, int on)
}
+static ERTS_INLINE void
+unmanaged_continue(ErtsThrPrgrDelayHandle handle)
+{
+ int umrefc_ix = (int) handle;
+ erts_aint_t refc;
+
+ ASSERT(umrefc_ix == 0 || umrefc_ix == 1);
+ refc = erts_atomic_dec_read_relb(&intrnl->umrefc[umrefc_ix].refc);
+ ASSERT(refc >= 0);
+ if (refc == 0) {
+ erts_aint_t lflgs;
+ ERTS_THR_READ_MEMORY_BARRIER;
+ lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs);
+ if ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER
+ | ERTS_THR_PRGR_LFLG_WAITING_UM
+ | ERTS_THR_PRGR_LFLG_ACTIVE_MASK))
+ == (ERTS_THR_PRGR_LFLG_NO_LEADER|ERTS_THR_PRGR_LFLG_WAITING_UM)
+ && got_sched_wakeups()) {
+ /* Others waiting for us... */
+ wakeup_managed(0);
+ }
+ }
+}
+
+void
+erts_thr_progress_unmanaged_continue__(ErtsThrPrgrDelayHandle handle)
+{
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
+ ERTS_LC_ASSERT(tpd && tpd->is_delaying);
+ tpd->is_delaying = 0;
+ return_tmp_thr_prgr_data(tpd);
+#endif
+ ASSERT(!erts_thr_progress_is_managed_thread());
+
+ unmanaged_continue(handle);
+}
+
+ErtsThrPrgrDelayHandle
+erts_thr_progress_unmanaged_delay__(void)
+{
+ int umrefc_ix;
+ ASSERT(!erts_thr_progress_is_managed_thread());
+ umrefc_ix = (int) erts_atomic32_read_acqb(&intrnl->misc.data.umrefc_ix.current);
+ while (1) {
+ int tmp_ix;
+ erts_atomic_inc_acqb(&intrnl->umrefc[umrefc_ix].refc);
+ tmp_ix = (int) erts_atomic32_read_acqb(&intrnl->misc.data.umrefc_ix.current);
+ if (tmp_ix == umrefc_ix)
+ break;
+ unmanaged_continue(umrefc_ix);
+ umrefc_ix = tmp_ix;
+ }
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ {
+ ErtsThrPrgrData *tpd = tmp_thr_prgr_data(NULL);
+ tpd->is_delaying = 1;
+ }
+#endif
+ return (ErtsThrPrgrDelayHandle) umrefc_ix;
+}
+
static ERTS_INLINE int
has_reached_wakeup(ErtsThrPrgrVal wakeup)
{
@@ -931,7 +1099,7 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
*/
ASSERT(tpd->is_managed);
- ASSERT(tpd->previous.local != ERTS_THR_PRGR_VAL_WAITING);
+ ASSERT(tpd->confirmed != ERTS_THR_PRGR_VAL_WAITING);
if (has_reached_wakeup(value)) {
wakeup_managed(tpd->id);
@@ -946,7 +1114,7 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
tpd->wakeup_request[wix]));
- if (tpd->previous.local == value) {
+ if (tpd->confirmed == value) {
/*
* We have already confirmed this value. We need to request
* wakeup for a value later than our latest confirmed value in
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 89486b065b..1416aa6166 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -53,9 +53,22 @@ typedef Uint64 ErtsThrPrgrVal;
#define ERTS_THR_PRGR_WAKEUP_DATA_SIZE 4 /* Need to be an even power of 2. */
typedef struct {
+ ErtsThrPrgrVal next;
+ ErtsThrPrgrVal current;
+ int chk_next_ix;
+ struct {
+ int current;
+ int waiting;
+ } umrefc_ix;
+} ErtsThrPrgrLeaderState;
+
+typedef struct {
int id;
int is_managed;
int is_blocking;
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ int is_delaying; /* managed is always delaying */
+#endif
int is_temporary;
/* --- Part below only for registered threads --- */
@@ -66,11 +79,8 @@ typedef struct {
int leader; /* Needs to be first in the managed threads part */
int active;
- struct {
- ErtsThrPrgrVal local;
- ErtsThrPrgrVal next;
- ErtsThrPrgrVal current;
- } previous;
+ ErtsThrPrgrVal confirmed;
+ ErtsThrPrgrLeaderState leader_state;
} ErtsThrPrgrData;
void erts_thr_progress_fatal_error_block(SWord timeout,
@@ -78,6 +88,16 @@ void erts_thr_progress_fatal_error_block(SWord timeout,
#endif /* ERTS_SMP */
+typedef struct ErtsThrPrgrLaterOp_ ErtsThrPrgrLaterOp;
+struct ErtsThrPrgrLaterOp_ {
+#ifdef ERTS_SMP
+ ErtsThrPrgrVal later;
+#endif
+ void (*func)(void *);
+ void *data;
+ ErtsThrPrgrLaterOp *next;
+};
+
#endif
#if !defined(ERL_THR_PROGRESS_H__) && !defined(ERL_THR_PROGRESS_TSD_TYPE_ONLY)
@@ -111,6 +131,11 @@ typedef struct {
ERTS_THR_PRGR_ATOMIC current;
} ErtsThrPrgr;
+typedef int ErtsThrPrgrDelayHandle;
+#define ERTS_THR_PRGR_DHANDLE_MANAGED ((ErtsThrPrgrDelayHandle) -1)
+/* ERTS_THR_PRGR_DHANDLE_MANAGED implies managed thread */
+#define ERTS_THR_PRGR_DHANDLE_INVALID ((ErtsThrPrgrDelayHandle) -2)
+
extern ErtsThrPrgr erts_thr_prgr__;
void erts_thr_progress_pre_init(void);
@@ -126,6 +151,8 @@ int erts_thr_progress_update(ErtsSchedulerData *esdp);
int erts_thr_progress_leader_update(ErtsSchedulerData *esdp);
void erts_thr_progress_prepare_wait(ErtsSchedulerData *esdp);
void erts_thr_progress_finalize_wait(ErtsSchedulerData *esdp);
+ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay__(void);
+void erts_thr_progress_unmanaged_continue__(int umrefc_ix);
void erts_thr_progress_dbg_print_state(void);
@@ -138,6 +165,11 @@ ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_acqb__(ERTS_THR_PRGR_ATOMIC *a
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_prgr_read_mb__(ERTS_THR_PRGR_ATOMIC *atmc);
ERTS_GLB_INLINE int erts_thr_progress_is_managed_thread(void);
+ERTS_GLB_INLINE ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay(void);
+ERTS_GLB_INLINE void erts_thr_progress_unmanaged_continue(ErtsThrPrgrDelayHandle handle);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ERTS_GLB_INLINE int erts_thr_progress_lc_is_delaying(void);
+#endif
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current_to_later__(ErtsThrPrgrVal val);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_later(ErtsSchedulerData *);
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_progress_current(void);
@@ -219,6 +251,35 @@ erts_thr_progress_is_managed_thread(void)
return tpd && tpd->is_managed;
}
+ERTS_GLB_INLINE ErtsThrPrgrDelayHandle
+erts_thr_progress_unmanaged_delay(void)
+{
+ if (erts_thr_progress_is_managed_thread())
+ return ERTS_THR_PRGR_DHANDLE_MANAGED; /* Nothing to do */
+ else
+ return erts_thr_progress_unmanaged_delay__();
+}
+
+ERTS_GLB_INLINE void
+erts_thr_progress_unmanaged_continue(ErtsThrPrgrDelayHandle handle)
+{
+ ASSERT(handle != ERTS_THR_PRGR_DHANDLE_MANAGED
+ || erts_thr_progress_is_managed_thread());
+ if (handle != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue__(handle);
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+
+ERTS_GLB_INLINE int
+erts_thr_progress_lc_is_delaying(void)
+{
+ ErtsThrPrgrData *tpd = erts_tsd_get(erts_thr_prgr_data_key__);
+ return tpd && tpd->is_delaying;
+}
+
+#endif
+
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_progress_current_to_later__(ErtsThrPrgrVal val)
{
@@ -238,7 +299,7 @@ erts_thr_progress_later(ErtsSchedulerData *esdp)
if (esdp) {
tpd = &esdp->thr_progress_data;
managed_thread:
- val = tpd->previous.local;
+ val = tpd->confirmed;
ERTS_THR_MEMORY_BARRIER;
}
else {
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index ee47c98009..1dc3ffeb3c 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -258,10 +258,6 @@
#include "sys.h"
-typedef struct { SWord sint[2]; } erts_no_dw_atomic_t;
-typedef SWord erts_no_atomic_t;
-typedef Sint32 erts_no_atomic32_t;
-
#ifdef USE_THREADS
#define ETHR_TRY_INLINE_FUNCS
@@ -411,12 +407,15 @@ typedef struct {
typedef int erts_rwmtx_t;
typedef int erts_tsd_key_t;
typedef int erts_tse_t;
-#define erts_dw_aint_t erts_no_dw_atomic_t
-#define erts_dw_atomic_t erts_no_dw_atomic_t
-#define erts_aint_t SWord
-#define erts_atomic_t erts_no_atomic_t
-#define erts_aint32_t Sint32
-#define erts_atomic32_t erts_no_atomic32_t
+
+typedef struct { SWord sint[2]; } erts_dw_aint_t;
+typedef SWord erts_aint_t;
+typedef Sint32 erts_aint32_t;
+
+#define erts_dw_atomic_t erts_dw_aint_t
+#define erts_atomic_t erts_aint_t
+#define erts_atomic32_t erts_aint32_t
+
#if __GNUC__ > 2
typedef struct { } erts_spinlock_t;
typedef struct { } erts_rwlock_t;
@@ -425,6 +424,14 @@ typedef struct { int gcc_is_buggy; } erts_spinlock_t;
typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#endif
+#ifdef WORDS_BIGENDIAN
+#define ERTS_DW_AINT_LOW_WORD 1
+#define ERTS_DW_AINT_HIGH_WORD 0
+#else
+#define ERTS_DW_AINT_LOW_WORD 0
+#define ERTS_DW_AINT_HIGH_WORD 1
+#endif
+
#define ERTS_MTX_INITER 0
#define ERTS_CND_INITER 0
#define ERTS_THR_INIT_DATA_DEF_INITER 0
@@ -433,6 +440,10 @@ typedef struct { int gcc_is_buggy; } erts_rwlock_t;
#endif /* #ifdef USE_THREADS */
+#define erts_no_dw_atomic_t erts_dw_aint_t
+#define erts_no_atomic_t erts_aint_t
+#define erts_no_atomic32_t erts_aint32_t
+
#define ERTS_AINT_NULL ((erts_aint_t) NULL)
#define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1)))
@@ -522,6 +533,9 @@ ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp,
ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
erts_aint_t new,
erts_aint_t expected);
+ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bset(erts_no_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var,
erts_aint32_t i);
ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var);
@@ -542,6 +556,9 @@ ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp,
ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
erts_aint32_t new,
erts_aint32_t expected);
+ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
ERTS_GLB_INLINE void erts_spinlock_init_x_opt(erts_spinlock_t *lock,
char *name,
@@ -601,6 +618,78 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#ifdef USE_THREADS
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_nob(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_ddrb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_rb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_wb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_acqb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_relb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_mb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_nob(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_ddrb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_rb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_wb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_acqb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_relb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_mb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+#define ERTS_ATOMIC_BSET_IMPL__(Type, ReadOp, CmpxchgOp, VarP, Mask, Set) \
+do { \
+ Type act = ReadOp((VarP)); \
+ while (1) { \
+ Type exp = act; \
+ Type new = exp & ~(Mask); \
+ new |= ((Mask) & (Set)); \
+ act = CmpxchgOp((VarP), new, exp); \
+ if (act == exp) \
+ return act; \
+ } \
+} while (0)
+#endif
+
/*
* See "Documentation of atomics and memory barriers" at the top
* of this file for info on atomics.
@@ -659,6 +748,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_nob ethr_atomic_xchg
#define erts_atomic_cmpxchg_nob ethr_atomic_cmpxchg
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_nob(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_mb ethr_atomic_init_mb
#define erts_atomic_set_mb ethr_atomic_set_mb
#define erts_atomic_read_mb ethr_atomic_read_mb
@@ -673,6 +775,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_mb ethr_atomic_xchg_mb
#define erts_atomic_cmpxchg_mb ethr_atomic_cmpxchg_mb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_mb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_mb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_acqb ethr_atomic_init_acqb
#define erts_atomic_set_acqb ethr_atomic_set_acqb
#define erts_atomic_read_acqb ethr_atomic_read_acqb
@@ -687,6 +802,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_acqb ethr_atomic_xchg_acqb
#define erts_atomic_cmpxchg_acqb ethr_atomic_cmpxchg_acqb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_acqb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_acqb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_relb ethr_atomic_init_relb
#define erts_atomic_set_relb ethr_atomic_set_relb
#define erts_atomic_read_relb ethr_atomic_read_relb
@@ -701,6 +829,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_relb ethr_atomic_xchg_relb
#define erts_atomic_cmpxchg_relb ethr_atomic_cmpxchg_relb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_relb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_relb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_ddrb ethr_atomic_init_ddrb
#define erts_atomic_set_ddrb ethr_atomic_set_ddrb
#define erts_atomic_read_ddrb ethr_atomic_read_ddrb
@@ -715,6 +856,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_ddrb ethr_atomic_xchg_ddrb
#define erts_atomic_cmpxchg_ddrb ethr_atomic_cmpxchg_ddrb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_ddrb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_ddrb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_rb ethr_atomic_init_rb
#define erts_atomic_set_rb ethr_atomic_set_rb
#define erts_atomic_read_rb ethr_atomic_read_rb
@@ -729,6 +883,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_rb ethr_atomic_xchg_rb
#define erts_atomic_cmpxchg_rb ethr_atomic_cmpxchg_rb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_rb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_rb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic_init_wb ethr_atomic_init_wb
#define erts_atomic_set_wb ethr_atomic_set_wb
#define erts_atomic_read_wb ethr_atomic_read_wb
@@ -743,6 +910,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_xchg_wb ethr_atomic_xchg_wb
#define erts_atomic_cmpxchg_wb ethr_atomic_cmpxchg_wb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint_t
+erts_atomic_read_bset_wb(erts_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint_t,
+ ethr_atomic_read,
+ ethr_atomic_cmpxchg_wb,
+ var, mask, set);
+}
+#endif
+
/* 32-bit atomics */
#define erts_atomic32_init_nob ethr_atomic32_init
@@ -759,6 +939,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_nob ethr_atomic32_xchg
#define erts_atomic32_cmpxchg_nob ethr_atomic32_cmpxchg
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_nob(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_mb ethr_atomic32_init_mb
#define erts_atomic32_set_mb ethr_atomic32_set_mb
#define erts_atomic32_read_mb ethr_atomic32_read_mb
@@ -773,6 +966,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_mb ethr_atomic32_xchg_mb
#define erts_atomic32_cmpxchg_mb ethr_atomic32_cmpxchg_mb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_mb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_mb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_acqb ethr_atomic32_init_acqb
#define erts_atomic32_set_acqb ethr_atomic32_set_acqb
#define erts_atomic32_read_acqb ethr_atomic32_read_acqb
@@ -787,6 +993,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_acqb ethr_atomic32_xchg_acqb
#define erts_atomic32_cmpxchg_acqb ethr_atomic32_cmpxchg_acqb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_acqb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_acqb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_relb ethr_atomic32_init_relb
#define erts_atomic32_set_relb ethr_atomic32_set_relb
#define erts_atomic32_read_relb ethr_atomic32_read_relb
@@ -801,6 +1020,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_relb ethr_atomic32_xchg_relb
#define erts_atomic32_cmpxchg_relb ethr_atomic32_cmpxchg_relb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_relb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_relb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_ddrb ethr_atomic32_init_ddrb
#define erts_atomic32_set_ddrb ethr_atomic32_set_ddrb
#define erts_atomic32_read_ddrb ethr_atomic32_read_ddrb
@@ -815,6 +1047,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_ddrb ethr_atomic32_xchg_ddrb
#define erts_atomic32_cmpxchg_ddrb ethr_atomic32_cmpxchg_ddrb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_ddrb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_ddrb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_rb ethr_atomic32_init_rb
#define erts_atomic32_set_rb ethr_atomic32_set_rb
#define erts_atomic32_read_rb ethr_atomic32_read_rb
@@ -829,6 +1074,19 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_rb ethr_atomic32_xchg_rb
#define erts_atomic32_cmpxchg_rb ethr_atomic32_cmpxchg_rb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_rb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_rb,
+ var, mask, set);
+}
+#endif
+
#define erts_atomic32_init_wb ethr_atomic32_init_wb
#define erts_atomic32_set_wb ethr_atomic32_set_wb
#define erts_atomic32_read_wb ethr_atomic32_read_wb
@@ -843,6 +1101,21 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_xchg_wb ethr_atomic32_xchg_wb
#define erts_atomic32_cmpxchg_wb ethr_atomic32_cmpxchg_wb
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE erts_aint32_t
+erts_atomic32_read_bset_wb(erts_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ ERTS_ATOMIC_BSET_IMPL__(erts_aint32_t,
+ ethr_atomic32_read,
+ ethr_atomic32_cmpxchg_wb,
+ var, mask, set);
+}
+#endif
+
+#undef ERTS_ATOMIC_BSET_IMPL__
+
#else /* !USE_THREADS */
/* Double word size atomics */
@@ -897,6 +1170,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_nob erts_no_atomic_read_band
#define erts_atomic_xchg_nob erts_no_atomic_xchg
#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_nob erts_no_atomic_read_bset
#define erts_atomic_init_mb erts_no_atomic_set
#define erts_atomic_set_mb erts_no_atomic_set
@@ -911,6 +1185,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_mb erts_no_atomic_read_band
#define erts_atomic_xchg_mb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_mb erts_no_atomic_read_bset
#define erts_atomic_init_acqb erts_no_atomic_set
#define erts_atomic_set_acqb erts_no_atomic_set
@@ -925,6 +1200,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_acqb erts_no_atomic_read_band
#define erts_atomic_xchg_acqb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_acqb erts_no_atomic_read_bset
#define erts_atomic_init_relb erts_no_atomic_set
#define erts_atomic_set_relb erts_no_atomic_set
@@ -939,6 +1215,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_relb erts_no_atomic_read_band
#define erts_atomic_xchg_relb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_relb erts_no_atomic_read_bset
#define erts_atomic_init_ddrb erts_no_atomic_set
#define erts_atomic_set_ddrb erts_no_atomic_set
@@ -953,6 +1230,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_ddrb erts_no_atomic_read_band
#define erts_atomic_xchg_ddrb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_ddrb erts_no_atomic_read_bset
#define erts_atomic_init_rb erts_no_atomic_set
#define erts_atomic_set_rb erts_no_atomic_set
@@ -967,6 +1245,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_rb erts_no_atomic_read_band
#define erts_atomic_xchg_rb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_rb erts_no_atomic_read_bset
#define erts_atomic_init_wb erts_no_atomic_set
#define erts_atomic_set_wb erts_no_atomic_set
@@ -981,6 +1260,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic_read_band_wb erts_no_atomic_read_band
#define erts_atomic_xchg_wb erts_no_atomic_xchg
#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
+#define erts_atomic_read_bset_wb erts_no_atomic_read_bset
/* 32-bit atomics */
@@ -997,6 +1277,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_nob erts_no_atomic32_read_band
#define erts_atomic32_xchg_nob erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_nob erts_no_atomic32_read_bset
#define erts_atomic32_init_mb erts_no_atomic32_set
#define erts_atomic32_set_mb erts_no_atomic32_set
@@ -1011,6 +1292,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_mb erts_no_atomic32_read_band
#define erts_atomic32_xchg_mb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_mb erts_no_atomic32_read_bset
#define erts_atomic32_init_acqb erts_no_atomic32_set
#define erts_atomic32_set_acqb erts_no_atomic32_set
@@ -1025,6 +1307,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band
#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_acqb erts_no_atomic32_read_bset
#define erts_atomic32_init_relb erts_no_atomic32_set
#define erts_atomic32_set_relb erts_no_atomic32_set
@@ -1039,6 +1322,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_relb erts_no_atomic32_read_band
#define erts_atomic32_xchg_relb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_relb erts_no_atomic32_read_bset
#define erts_atomic32_init_ddrb erts_no_atomic32_set
#define erts_atomic32_set_ddrb erts_no_atomic32_set
@@ -1053,6 +1337,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band
#define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
#define erts_atomic32_init_rb erts_no_atomic32_set
#define erts_atomic32_set_rb erts_no_atomic32_set
@@ -1067,6 +1352,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_rb erts_no_atomic32_read_band
#define erts_atomic32_xchg_rb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_rb erts_no_atomic32_read_bset
#define erts_atomic32_init_wb erts_no_atomic32_set
#define erts_atomic32_set_wb erts_no_atomic32_set
@@ -1081,6 +1367,7 @@ ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
#define erts_atomic32_read_band_wb erts_no_atomic32_read_band
#define erts_atomic32_xchg_wb erts_no_atomic32_xchg
#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
+#define erts_atomic32_read_bset_wb erts_no_atomic32_read_bset
#endif /* !USE_THREADS */
@@ -1845,6 +2132,17 @@ erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp,
return old;
}
+ERTS_GLB_INLINE erts_aint_t
+erts_no_atomic_read_bset(erts_no_atomic_t *var,
+ erts_aint_t mask,
+ erts_aint_t set)
+{
+ erts_aint_t old = *var;
+ *var &= ~mask;
+ *var |= (mask & set);
+ return old;
+}
+
/* atomic32 */
ERTS_GLB_INLINE void
@@ -1932,6 +2230,17 @@ erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp,
return old;
}
+ERTS_GLB_INLINE erts_aint32_t
+erts_no_atomic32_read_bset(erts_no_atomic32_t *var,
+ erts_aint32_t mask,
+ erts_aint32_t set)
+{
+ erts_aint32_t old = *var;
+ *var &= ~mask;
+ *var |= (mask & set);
+ return old;
+}
+
/* spinlock */
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/erl_time.h b/erts/emulator/beam/erl_time.h
index 6c6e193818..4bbdcaa3e3 100644
--- a/erts/emulator/beam/erl_time.h
+++ b/erts/emulator/beam/erl_time.h
@@ -118,9 +118,11 @@ ERTS_GLB_INLINE void erts_do_time_add(erts_short_time_t elapsed)
void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec);
#endif
+typedef UWord erts_approx_time_t;
+erts_approx_time_t erts_get_approx_time(void);
+
void erts_get_timeval(SysTimeval *tv);
erts_time_t erts_get_time(void);
-void erts_get_emu_time(SysTimeval *);
ERTS_GLB_INLINE int erts_cmp_timeval(SysTimeval *t1p, SysTimeval *t2p);
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 04147408d5..3272a5326d 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -91,6 +91,41 @@ static SysTimeval then; /* Used in get_now */
static SysTimeval last_emu_time; /* Used in erts_get_emu_time() */
SysTimeval erts_first_emu_time; /* Used in erts_get_emu_time() */
+union {
+ erts_smp_atomic_t time;
+ char align[ERTS_CACHE_LINE_SIZE];
+} approx erts_align_attribute(ERTS_CACHE_LINE_SIZE);
+
+static void
+init_approx_time(void)
+{
+ erts_smp_atomic_init_nob(&approx.time, 0);
+}
+
+static ERTS_INLINE erts_approx_time_t
+get_approx_time(void)
+{
+ return (erts_approx_time_t) erts_smp_atomic_read_nob(&approx.time);
+}
+
+static ERTS_INLINE void
+update_approx_time(SysTimeval *tv)
+{
+ erts_approx_time_t new_secs = (erts_approx_time_t) tv->tv_sec;
+ erts_approx_time_t old_secs = get_approx_time();
+ if (old_secs != new_secs)
+ erts_smp_atomic_set_nob(&approx.time, new_secs);
+}
+
+/*
+ * erts_get_approx_time() returns an *approximate* time
+ * in seconds. NOTE that this time may jump backwards!!!
+ */
+erts_approx_time_t
+erts_get_approx_time(void)
+{
+ return get_approx_time();
+}
#ifdef HAVE_GETHRTIME
@@ -398,6 +433,8 @@ erts_init_time_sup(void)
{
erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday");
+ init_approx_time();
+
last_emu_time.tv_sec = 0;
last_emu_time.tv_usec = 0;
@@ -417,7 +454,7 @@ erts_init_time_sup(void)
gtv = inittv;
then.tv_sec = then.tv_usec = 0;
- erts_get_emu_time(&erts_first_emu_time);
+ erts_deliver_time();
return CLOCK_RESOLUTION;
}
@@ -883,6 +920,8 @@ get_now(Uint* megasec, Uint* sec, Uint* microsec)
*megasec = (Uint) (now.tv_sec / 1000000);
*sec = (Uint) (now.tv_sec % 1000000);
*microsec = (Uint) (now.tv_usec);
+
+ update_approx_time(&now);
}
void
@@ -895,6 +934,8 @@ get_sys_now(Uint* megasec, Uint* sec, Uint* microsec)
*megasec = (Uint) (now.tv_sec / 1000000);
*sec = (Uint) (now.tv_sec % 1000000);
*microsec = (Uint) (now.tv_usec);
+
+ update_approx_time(&now);
}
@@ -911,6 +952,8 @@ void erts_deliver_time(void) {
do_erts_deliver_time(&now);
erts_smp_mtx_unlock(&erts_timeofday_mtx);
+
+ update_approx_time(&now);
}
/* get *real* time (not ticks) remaining until next timeout - if there
@@ -959,6 +1002,7 @@ void erts_get_timeval(SysTimeval *tv)
erts_smp_mtx_lock(&erts_timeofday_mtx);
get_tolerant_timeofday(tv);
erts_smp_mtx_unlock(&erts_timeofday_mtx);
+ update_approx_time(tv);
}
erts_time_t
@@ -971,7 +1015,9 @@ erts_get_time(void)
get_tolerant_timeofday(&sys_tv);
erts_smp_mtx_unlock(&erts_timeofday_mtx);
-
+
+ update_approx_time(&sys_tv);
+
return sys_tv.tv_sec;
}
@@ -987,38 +1033,3 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
*sec = (Uint)(tp.tv_sec % 1000000);
}
#endif
-
-
-/*
- * erts_get_emu_time() is similar to get_now(). You will
- * always get different times from erts_get_emu_time(), but they
- * may equal a time from get_now().
- *
- * erts_get_emu_time() is only used internally in the emulator in
- * order to order emulator internal events.
- */
-
-void
-erts_get_emu_time(SysTimeval *this_emu_time_p)
-{
- erts_smp_mtx_lock(&erts_timeofday_mtx);
-
- get_tolerant_timeofday(this_emu_time_p);
-
- /* Make sure time is later than last */
- if (last_emu_time.tv_sec > this_emu_time_p->tv_sec ||
- (last_emu_time.tv_sec == this_emu_time_p->tv_sec
- && last_emu_time.tv_usec >= this_emu_time_p->tv_usec)) {
- *this_emu_time_p = last_emu_time;
- this_emu_time_p->tv_usec++;
- }
- /* Check for carry from above + general reasonability */
- if (this_emu_time_p->tv_usec >= 1000000) {
- this_emu_time_p->tv_usec = 0;
- this_emu_time_p->tv_sec++;
- }
-
- last_emu_time = *this_emu_time_p;
-
- erts_smp_mtx_unlock(&erts_timeofday_mtx);
-}
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 009ca1eb52..848877d43e 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -44,9 +44,9 @@
#undef DEBUG_PRINTOUTS
#endif
-extern Eterm beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
-extern Eterm beam_return_trace[1]; /* OpCode(i_return_trace) */
-extern Eterm beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
+extern BeamInstr beam_return_to_trace[1]; /* OpCode(i_return_to_trace) */
+extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */
+extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
/* Pseudo export entries. Never filled in with data, only used to
yield unique pointers of the correct type. */
@@ -64,7 +64,7 @@ int erts_cpu_timestamp;
#endif
static erts_smp_mtx_t smq_mtx;
-static erts_smp_mtx_t sys_trace_mtx;
+static erts_smp_rwmtx_t sys_trace_rwmtx;
enum ErtsSysMsgType {
SYS_MSG_TYPE_UNDEFINED,
@@ -91,7 +91,12 @@ static void init_sys_msg_dispatcher(void);
#endif
void erts_init_trace(void) {
- erts_smp_mtx_init(&sys_trace_mtx, "sys_tracers");
+ erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+
+ erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers");
+
#ifdef HAVE_ERTS_NOW_CPU
erts_cpu_timestamp = 0;
#endif
@@ -151,8 +156,8 @@ do { (RES) = (TPID); } while(0)
#define ERTS_TRACER_REF_TYPE Process *
#define ERTS_GET_TRACER_REF(RES, TPID, TRACEE_FLGS) \
do { \
- (RES) = process_tab[internal_pid_index((TPID))]; \
- if (INVALID_PID((RES), (TPID)) || !((RES)->trace_flags & F_TRACER)) { \
+ (RES) = erts_proc_lookup((TPID)); \
+ if (!(RES) || !(ERTS_TRACE_FLAGS((RES)) & F_TRACER)) { \
(TPID) = NIL; \
(TRACEE_FLGS) &= ~TRACEE_FLAGS; \
return; \
@@ -169,10 +174,10 @@ erts_system_profile_setup_active_schedulers(void)
active_sched = erts_active_schedulers();
}
-void
-erts_trace_check_exiting(Eterm exiting)
+static void
+exiting_reset(Eterm exiting)
{
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
if (exiting == default_tracer) {
default_tracer = NIL;
default_trace_flags &= TRACEE_FLAGS;
@@ -202,29 +207,49 @@ erts_trace_check_exiting(Eterm exiting)
erts_system_profile_clear(NULL);
#endif
}
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
+}
+
+void
+erts_trace_check_exiting(Eterm exiting)
+{
+ int reset = 0;
+ erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
+ if (exiting == default_tracer)
+ reset = 1;
+ else if (exiting == system_seq_tracer)
+ reset = 1;
+ else if (exiting == system_monitor)
+ reset = 1;
+ else if (exiting == system_profile)
+ reset = 1;
+ erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
+ if (reset)
+ exiting_reset(exiting);
+}
+
+static ERTS_INLINE int
+is_valid_tracer(Eterm tracer)
+{
+ return erts_proc_lookup(tracer) || erts_is_valid_tracer_port(tracer);
}
Eterm
erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, Eterm new)
{
- Eterm old = THE_NON_VALUE;
+ Eterm old;
- if (new != am_false) {
- if (!erts_pid2proc(c_p, c_p_locks, new, 0)
- && !erts_is_valid_tracer_port(new)) {
- return old;
- }
- }
+ if (new != am_false && !is_valid_tracer(new))
+ return THE_NON_VALUE;
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
old = system_seq_tracer;
system_seq_tracer = new;
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old);
#endif
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
return old;
}
@@ -232,12 +257,12 @@ Eterm
erts_get_system_seq_tracer(void)
{
Eterm st;
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
st = system_seq_tracer;
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "get seq tracer %T\n", st);
#endif
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
return st;
}
@@ -250,7 +275,7 @@ get_default_tracing(Uint *flagsp, Eterm *tracerp)
if (is_nil(default_tracer)) {
default_trace_flags &= ~TRACEE_FLAGS;
} else if (is_internal_pid(default_tracer)) {
- if (!erts_pid2proc(NULL, 0, default_tracer, 0)) {
+ if (!erts_proc_lookup(default_tracer)) {
reset_tracer:
default_trace_flags &= ~TRACEE_FLAGS;
default_tracer = NIL;
@@ -270,7 +295,7 @@ get_default_tracing(Uint *flagsp, Eterm *tracerp)
void
erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp)
{
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
if (flagsp) {
if (setflags)
default_trace_flags |= *flagsp;
@@ -280,48 +305,48 @@ erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp)
if (tracerp)
default_tracer = *tracerp;
get_default_tracing(flagsp, tracerp);
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
}
void
erts_get_default_tracing(Uint *flagsp, Eterm *tracerp)
{
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
get_default_tracing(flagsp, tracerp);
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
}
void
erts_set_system_monitor(Eterm monitor)
{
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
system_monitor = monitor;
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
}
Eterm
erts_get_system_monitor(void)
{
Eterm monitor;
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
monitor = system_monitor;
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
return monitor;
}
/* Performance monitoring */
void erts_set_system_profile(Eterm profile) {
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
system_profile = profile;
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
}
Eterm
erts_get_system_profile(void) {
Eterm profile;
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rlock(&sys_trace_rwmtx);
profile = system_profile;
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
return profile;
}
@@ -384,13 +409,9 @@ WRITE_SYS_MSG_TO_PORT(Eterm unused_to,
}
#ifndef ERTS_SMP
- if (!INVALID_TRACER_PORT(trace_port, trace_port->id)) {
+ if (!INVALID_TRACER_PORT(trace_port, trace_port->common.id))
#endif
erts_raw_port_command(trace_port, buffer, ptr-buffer);
-#ifndef ERTS_SMP
- erts_port_release(trace_port);
- }
-#endif
erts_free(ERTS_ALC_T_TMP, (void *) buffer);
}
@@ -420,7 +441,7 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
message = TUPLE5(hp, am_trace_ts, pid, am_out, mfarity, timestamp);
/* Note, hp is deliberately NOT incremented since it will be reused */
- do_send_to_port(trace_port->id,
+ do_send_to_port(trace_port->common.id,
trace_port,
pid,
SYS_MSG_TYPE_UNDEFINED,
@@ -430,7 +451,7 @@ do_send_schedfix_to_port(Port *trace_port, Eterm pid, Eterm timestamp) {
hp += 5;
hp = patch_ts(message, hp);
- do_send_to_port(trace_port->id,
+ do_send_to_port(trace_port->common.id,
trace_port,
pid,
SYS_MSG_TYPE_UNDEFINED,
@@ -465,13 +486,13 @@ send_to_port(Process *c_p, Eterm message,
trace_port = NULL;
#else
- if (is_not_internal_port(*tracer_pid))
- goto invalid_tracer_port;
- trace_port = &erts_port[internal_port_index(*tracer_pid)];
+ trace_port = erts_id2port_sflgs(*tracer_pid,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (INVALID_TRACER_PORT(trace_port, *tracer_pid)) {
- invalid_tracer_port:
+ if (!trace_port) {
*tracee_flags &= ~TRACEE_FLAGS;
*tracer_pid = NIL;
return;
@@ -487,10 +508,11 @@ send_to_port(Process *c_p, Eterm message,
#endif
do_send_to_port(*tracer_pid,
trace_port,
- c_p ? c_p->id : NIL,
+ c_p ? c_p->common.id : NIL,
SYS_MSG_TYPE_TRACE,
message);
#ifndef ERTS_SMP
+ erts_port_release(trace_port);
return;
}
@@ -521,7 +543,7 @@ send_to_port(Process *c_p, Eterm message,
trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
do_send_to_port(*tracer_pid,
trace_port,
- c_p ? c_p->id : NIL,
+ c_p ? c_p->common.id : NIL,
SYS_MSG_TYPE_TRACE,
message);
@@ -535,8 +557,11 @@ send_to_port(Process *c_p, Eterm message,
* just after writning the real trace message, and now gets scheduled
* in again.
*/
- do_send_schedfix_to_port(trace_port, c_p->id, ts);
+ do_send_schedfix_to_port(trace_port, c_p->common.id, ts);
}
+
+ erts_port_release(trace_port);
+
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#endif
@@ -566,23 +591,27 @@ profile_send(Eterm from, Eterm message) {
Port *profiler_port = NULL;
/* not smp */
-
-
- profiler_port = &erts_port[internal_port_index(profiler)];
-
- do_send_to_port(profiler,
- profiler_port,
- NIL, /* or current process->id */
- SYS_MSG_TYPE_SYSPROF,
- message);
+
+ profiler_port = erts_id2port_sflgs(profiler,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+ if (profiler_port) {
+ do_send_to_port(profiler,
+ profiler_port,
+ NIL, /* or current process->common.id */
+ SYS_MSG_TYPE_SYSPROF,
+ message);
+ erts_port_release(profiler_port);
+ }
} else {
- ASSERT(is_internal_pid(profiler)
- && internal_pid_index(profiler) < erts_max_processes);
+ ASSERT(is_internal_pid(profiler));
- profile_p = process_tab[internal_pid_index(profiler)];
+ profile_p = erts_proc_lookup(profiler);
- if (INVALID_PID(profile_p, profiler)) return;
+ if (!profile_p)
+ return;
sz = size_object(message);
hp = erts_alloc_message_heap(sz, &bp, &off_heap, profile_p, 0);
@@ -626,13 +655,11 @@ seq_trace_send_to_port(Process *c_p,
trace_port = NULL;
#else
- if (is_not_internal_port(seq_tracer))
- goto invalid_tracer_port;
-
- trace_port = &erts_port[internal_port_index(seq_tracer)];
-
- if (INVALID_TRACER_PORT(trace_port, seq_tracer)) {
- invalid_tracer_port:
+ trace_port = erts_id2port_sflgs(seq_tracer,
+ NULL,
+ 0,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+ if (!trace_port) {
system_seq_tracer = am_false;
#ifndef ERTS_SMP
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -645,11 +672,12 @@ seq_trace_send_to_port(Process *c_p,
#endif
do_send_to_port(seq_tracer,
trace_port,
- c_p ? c_p->id : NIL,
+ c_p ? c_p->common.id : NIL,
SYS_MSG_TYPE_SEQTRACE,
message);
#ifndef ERTS_SMP
+ erts_port_release(trace_port);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
return;
}
@@ -675,7 +703,7 @@ seq_trace_send_to_port(Process *c_p,
trace_port->control_flags &= ~PORT_CONTROL_FLAG_HEAVY;
do_send_to_port(seq_tracer,
trace_port,
- c_p ? c_p->id : NIL,
+ c_p ? c_p->common.id : NIL,
SYS_MSG_TYPE_SEQTRACE,
message);
@@ -689,15 +717,20 @@ seq_trace_send_to_port(Process *c_p,
* just after writing the real trace message, and now gets scheduled
* in again.
*/
- do_send_schedfix_to_port(trace_port, c_p->id, ts);
+ do_send_schedfix_to_port(trace_port, c_p->common.id, ts);
}
+
+ erts_port_release(trace_port);
+
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#endif
}
#define TS_HEAP_WORDS 5
-#define TS_SIZE(p) (((p)->trace_flags & F_TIMESTAMP) ? TS_HEAP_WORDS : 0)
+#define TS_SIZE(p) ((ERTS_TRACE_FLAGS((p)) & F_TIMESTAMP) \
+ ? TS_HEAP_WORDS \
+ : 0)
/*
* Patch a timestamp into a tuple. The tuple must be the last thing
@@ -732,17 +765,17 @@ send_to_tracer(Process *tracee,
erts_smp_mtx_lock(&smq_mtx);
- if (tracee->trace_flags & F_TIMESTAMP)
+ if (ERTS_TRACE_FLAGS(tracee) & F_TIMESTAMP)
*hpp = patch_ts(msg, *hpp);
- if (is_internal_pid(tracee->tracer_proc))
- ERTS_ENQ_TRACE_MSG(tracee->id, tracer_ref, msg, bp);
+ if (is_internal_pid(ERTS_TRACER_PROC(tracee)))
+ ERTS_ENQ_TRACE_MSG(tracee->common.id, tracer_ref, msg, bp);
else {
- ASSERT(is_internal_port(tracee->tracer_proc));
+ ASSERT(is_internal_port(ERTS_TRACER_PROC(tracee)));
send_to_port(no_fake_sched ? NULL : tracee,
msg,
- &tracee->tracer_proc,
- &tracee->trace_flags);
+ &ERTS_TRACER_PROC(tracee),
+ &ERTS_TRACE_FLAGS(tracee));
}
erts_smp_mtx_unlock(&smq_mtx);
@@ -760,7 +793,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
ERTS_TRACER_REF_TYPE tracer_ref = ERTS_NULL_TRACER_REF;
int sched_no, curr_func, to_port, no_fake_sched;
- if (is_nil(p->tracer_proc))
+ if (is_nil(ERTS_TRACER_PROC(p)))
return;
no_fake_sched = never_fake_sched;
@@ -780,22 +813,18 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
}
sched_no = IS_TRACED_FL(p, F_TRACE_SCHED_NO);
- to_port = is_internal_port(p->tracer_proc);
+ to_port = is_internal_port(ERTS_TRACER_PROC(p));
if (!to_port) {
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
}
- if (ERTS_PROC_IS_EXITING(p)
-#ifndef ERTS_SMP
- || p->status == P_FREE
-#endif
- ) {
+ if (ERTS_PROC_IS_EXITING(p))
curr_func = 0;
- }
else {
if (!p->current)
p->current = find_function_from_pc(p->i);
@@ -824,7 +853,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
}
if (!sched_no) {
- mess = TUPLE4(hp, am_trace, p->id, what, tmp);
+ mess = TUPLE4(hp, am_trace, p->common.id, what, tmp);
hp += 5;
}
else {
@@ -833,7 +862,7 @@ trace_sched_aux(Process *p, Eterm what, int never_fake_sched)
#else
Eterm sched_id = make_small(1);
#endif
- mess = TUPLE5(hp, am_trace, p->id, what, sched_id, tmp);
+ mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, tmp);
hp += 6;
}
@@ -874,7 +903,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
operation = am_send;
if (is_internal_pid(to)) {
- if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, to, 0))
+ if (!erts_proc_lookup(to))
goto send_to_non_existing_process;
}
else if(is_external_pid(to)
@@ -885,19 +914,19 @@ trace_send(Process *p, Eterm to, Eterm msg)
operation = am_atom_put(s, sys_strlen(s));
}
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (11)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE5(hp, am_trace, p->id, operation, msg, to);
+ mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -907,10 +936,11 @@ trace_send(Process *p, Eterm to, Eterm msg)
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
sz_msg = size_object(msg);
sz_to = size_object(to);
@@ -926,16 +956,16 @@ trace_send(Process *p, Eterm to, Eterm msg)
sz_msg,
&hp,
off_heap);
- mess = TUPLE5(hp, am_trace, p->id/* Local pid */, operation, msg, to);
+ mess = TUPLE5(hp, am_trace, p->common.id, operation, msg, to);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -950,19 +980,19 @@ trace_receive(Process *rp, Eterm msg)
size_t sz_msg;
Eterm* hp;
- if (is_internal_port(rp->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(rp))) {
#define LOCAL_HEAP_SIZE (10)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE4(hp, am_trace, rp->id, am_receive, msg);
+ mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (rp->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(rp, mess, &rp->tracer_proc, &rp->trace_flags);
+ send_to_port(rp, mess, &ERTS_TRACER_PROC(rp), &ERTS_TRACE_FLAGS(rp));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -972,10 +1002,11 @@ trace_receive(Process *rp, Eterm msg)
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(rp->tracer_proc)
- && internal_pid_index(rp->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(rp)));
- ERTS_GET_TRACER_REF(tracer_ref, rp->tracer_proc, rp->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(rp),
+ ERTS_TRACE_FLAGS(rp));
sz_msg = size_object(msg);
@@ -984,16 +1015,16 @@ trace_receive(Process *rp, Eterm msg)
hp = ERTS_ALLOC_SYSMSG_HEAP(hsz, &bp, &off_heap, tracer_ref);
msg = copy_struct(msg, sz_msg, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, rp->id/* Local pid */, am_receive, msg);
+ mess = TUPLE4(hp, am_trace, rp->common.id, am_receive, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (rp->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(rp) & F_TIMESTAMP) {
patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(rp->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(rp->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -1003,14 +1034,14 @@ seq_trace_update_send(Process *p)
{
Eterm seq_tracer = erts_get_system_seq_tracer();
ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
- if ( (p->id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL)
+ if ( (p->common.id == seq_tracer) || (SEQ_TRACE_TOKEN(p) == NIL)
#ifdef USE_VM_PROBES
|| (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
#endif
) {
return 0;
}
- SEQ_TRACE_TOKEN_SENDER(p) = p->id; /* Internal pid */
+ SEQ_TRACE_TOKEN_SENDER(p) = p->common.id;
SEQ_TRACE_TOKEN_SERIAL(p) =
make_small(++(p -> seq_trace_clock));
SEQ_TRACE_TOKEN_LASTCNT(p) =
@@ -1047,7 +1078,7 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
ASSERT(is_tuple(token) || is_nil(token));
if (SEQ_TRACE_T_SENDER(token) == seq_tracer || token == NIL ||
- (process && process->trace_flags & F_SENSITIVE)) {
+ (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE)) {
return;
}
@@ -1111,13 +1142,12 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
Uint sz_label, sz_lastcnt_serial, sz_msg, sz_ts, sz_sender,
sz_exitfrom, sz_receiver;
- ASSERT(is_internal_pid(seq_tracer)
- && internal_pid_index(seq_tracer) < erts_max_processes);
+ ASSERT(is_internal_pid(seq_tracer));
#ifndef ERTS_SMP
- tracer = process_tab[internal_pid_index(seq_tracer)];
- if (INVALID_PID(tracer, tracer->id)) {
+ tracer = erts_proc_lookup(seq_tracer);
+ if (!tracer) {
system_seq_tracer = am_false;
return; /* no need to send anything */
}
@@ -1226,17 +1256,17 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
hp += 4;
}
- mess = TUPLE4(hp, am_trace, p->id, am_return_to, mfa);
+ mess = TUPLE4(hp, am_trace, p->common.id, am_return_to, mfa);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- if (is_internal_port(p->tracer_proc)) {
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
} else {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
@@ -1246,10 +1276,11 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
/*
* Find the tracer.
*/
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
size = size_object(mess);
@@ -1259,7 +1290,7 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
* Copy the trace message into the buffer and enqueue it.
*/
mess = copy_struct(mess, size, &hp, off_heap);
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
}
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
@@ -1288,25 +1319,25 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
return;
}
ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->id) {
+ if (*tracer_pid == p->common.id) {
/* Do not generate trace messages to oneself */
return;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
- tracee = p->id;
+ tracee = p->common.id;
#endif
} else {
/* Tracer not specified in process structure =>
@@ -1335,7 +1366,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
hp = local_heap;
mfa = TUPLE3(hp, mod, name, make_small(arity));
hp += 4;
- mess = TUPLE5(hp, am_trace, p->id, am_return_from, mfa, retval);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
if (*tracee_flags & F_TIMESTAMP) {
@@ -1355,8 +1386,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
@@ -1378,7 +1408,7 @@ erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid)
mfa = TUPLE3(hp, mod, name, make_small(arity));
hp += 4;
retval = copy_struct(retval, retval_size, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->id/* Local pid */, am_return_from, mfa, retval);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_return_from, mfa, retval);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
@@ -1419,25 +1449,25 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
return;
}
ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->id) {
+ if (*tracer_pid == p->common.id) {
/* Do not generate trace messages to oneself */
return;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
- tracee = p->id;
+ tracee = p->common.id;
#endif
if (! (*tracee_flags & F_TRACE_CALLS)) {
return;
@@ -1465,7 +1495,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
hp += 4;
cv = TUPLE2(hp, class, value);
hp += 3;
- mess = TUPLE5(hp, am_trace, p->id, am_exception_from, mfa_tuple, cv);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_exception_from, mfa_tuple, cv);
hp += 6;
ASSERT((hp - local_heap) <= LOCAL_HEAP_SIZE);
erts_smp_mtx_lock(&smq_mtx);
@@ -1487,8 +1517,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
ERTS_GET_TRACER_REF(tracer_ref, *tracer_pid, *tracee_flags);
@@ -1512,7 +1541,7 @@ erts_trace_exception(Process* p, BeamInstr mfa[3], Eterm class, Eterm value,
value = copy_struct(value, value_size, &hp, off_heap);
cv = TUPLE2(hp, class, value);
hp += 3;
- mess = TUPLE5(hp, am_trace, p->id/* Local pid */,
+ mess = TUPLE5(hp, am_trace, p->common.id,
am_exception_from, mfa_tuple, cv);
hp += 6;
@@ -1566,25 +1595,25 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
/* Breakpoint trace enabled without specifying tracer =>
* use process tracer and flags
*/
- tracer_pid = &p->tracer_proc;
+ tracer_pid = &ERTS_TRACER_PROC(p);
}
if (is_nil(*tracer_pid)) {
/* Trace disabled */
return 0;
}
ASSERT(is_internal_pid(*tracer_pid) || is_internal_port(*tracer_pid));
- if (*tracer_pid == p->id) {
+ if (*tracer_pid == p->common.id) {
/* Do not generate trace messages to oneself */
return 0;
}
- if (tracer_pid == &p->tracer_proc) {
+ if (tracer_pid == &ERTS_TRACER_PROC(p)) {
/* Tracer specified in process structure =>
* non-breakpoint trace =>
* use process flags
*/
- tracee_flags = &p->trace_flags;
+ tracee_flags = &ERTS_TRACE_FLAGS(p);
#ifdef ERTS_SMP
- tracee = p->id;
+ tracee = p->common.id;
#endif
} else {
/* Tracer not specified in process structure =>
@@ -1592,7 +1621,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* meta trace =>
* use fixed flag set instead of process flags
*/
- if (p->trace_flags & F_SENSITIVE) {
+ if (ERTS_TRACE_FLAGS(p) & F_SENSITIVE) {
/* No trace messages for sensitive processes. */
return 0;
}
@@ -1650,7 +1679,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
if (!erts_is_valid_tracer_port(*tracer_pid)) {
#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);
+ ASSERT(is_nil(tracee) || tracer_pid == &ERTS_TRACER_PROC(p));
if (is_not_nil(tracee))
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
@@ -1752,7 +1781,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* Build the trace tuple and send it to the port.
*/
- mess = TUPLE4(hp, am_trace, p->id, am_call, mfa_tuple);
+ mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
hp += 5;
if (pam_result != am_true) {
hp[-5] = make_arityval(5);
@@ -1787,21 +1816,21 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
Eterm* limit;
#endif
- ASSERT(is_internal_pid(*tracer_pid)
- && internal_pid_index(*tracer_pid) < erts_max_processes);
+ ASSERT(is_internal_pid(*tracer_pid));
tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
*tracer_pid, ERTS_PROC_LOCK_STATUS);
if (!tracer)
invalid_tracer = 1;
else {
- invalid_tracer = (tracer->trace_flags & F_TRACER) == 0;
+ invalid_tracer = !(ERTS_TRACE_FLAGS(tracer) & F_TRACER);
erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
}
if (invalid_tracer) {
#ifdef ERTS_SMP
- ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);
+ ASSERT(is_nil(tracee)
+ || tracer_pid == &ERTS_TRACER_PROC(p));
if (is_not_nil(tracee))
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
#endif
@@ -1925,7 +1954,7 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* Build the trace tuple and enqueue it.
*/
- mess = TUPLE4(hp, am_trace, p->id/* Local pid */, am_call, mfa_tuple);
+ mess = TUPLE4(hp, am_trace, p->common.id, am_call, mfa_tuple);
hp += 5;
if (pam_result != am_true) {
hp[-5] = make_arityval(5);
@@ -1963,17 +1992,17 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
ERTS_SMP_LC_ASSERT((erts_proc_lc_my_proc_locks(t_p) != 0)
|| erts_thr_progress_is_blocking());
- if (is_internal_port(t_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
send_to_port(
@@ -1984,7 +2013,9 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
/* Fake schedule out and in are never sent when smp enabled */
c_p,
#endif
- mess, &t_p->tracer_proc, &t_p->trace_flags);
+ mess,
+ &ERTS_TRACER_PROC(t_p),
+ &ERTS_TRACE_FLAGS(t_p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -1995,10 +2026,11 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
ERTS_TRACER_REF_TYPE tracer_ref;
size_t sz_data;
- ASSERT(is_internal_pid(t_p->tracer_proc)
- && internal_pid_index(t_p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
- ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(t_p),
+ ERTS_TRACE_FLAGS(t_p));
sz_data = size_object(data);
@@ -2007,16 +2039,16 @@ trace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data)
hp = ERTS_ALLOC_SYSMSG_HEAP(need, &bp, &off_heap, tracer_ref);
tmp = copy_struct(data, sz_data, &hp, off_heap);
- mess = TUPLE4(hp, am_trace, t_p->id/* Local pid */, what, tmp);
+ mess = TUPLE4(hp, am_trace, t_p->common.id, what, tmp);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(t_p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -2037,7 +2069,7 @@ trace_proc_spawn(Process *p, Eterm pid,
Eterm mess;
Eterm* hp;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (4+6+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2045,13 +2077,13 @@ trace_proc_spawn(Process *p, Eterm pid,
hp = local_heap;
mfa = TUPLE3(hp, mod, func, args);
hp += 4;
- mess = TUPLE5(hp, am_trace, p->id, am_spawn, pid, mfa);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, pid, mfa);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(p, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2063,10 +2095,11 @@ trace_proc_spawn(Process *p, Eterm pid,
size_t sz_args, sz_pid;
Uint need;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
sz_args = size_object(args);
sz_pid = size_object(pid);
@@ -2078,16 +2111,16 @@ trace_proc_spawn(Process *p, Eterm pid,
mfa = TUPLE3(hp, mod, func, tmp);
hp += 4;
tmp = copy_struct(pid, sz_pid, &hp, off_heap);
- mess = TUPLE5(hp, am_trace, p->id, am_spawn, tmp, mfa);
+ mess = TUPLE5(hp, am_trace, p->common.id, am_spawn, tmp, mfa);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -2107,187 +2140,6 @@ void save_calls(Process *p, Export *e)
}
}
-/*
- * Entry point called by the trace wrap functions in erl_bif_wrap.c
- *
- * The trace wrap functions are themselves called through the export
- * entries instead of the original BIF functions.
- */
-Eterm
-erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
-{
- Eterm result;
- int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);
-
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-
- if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) {
- /* Warning! This is an Optimization.
- *
- * If neither meta trace is active nor process trace flags then
- * no tracing will occur. Doing the whole else branch will
- * also do nothing, only slower.
- */
- Eterm (*func)(Process*, Eterm*, BeamInstr*) = bif_table[bif_index].f;
- result = func(p, args, I);
- } else {
- Eterm (*func)(Process*, Eterm*, BeamInstr*);
- Export* ep = bif_export[bif_index];
- Uint32 flags = 0, flags_meta = 0;
- int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);
- int local = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);
- int time = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_CALL_TIME);
- Eterm meta_tracer_pid = NIL;
- int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
- * is actually in the
- * export entry */
- BeamInstr *cp = p->cp;
-
- /*
- * Make continuation pointer OK, it is not during direct BIF calls,
- * but it is correct during apply of bif.
- */
- if (!applying) {
- p->cp = I;
- }
- if (global || local) {
- flags = erts_call_trace(p, ep->code, ep->match_prog_set, args,
- local, &p->tracer_proc);
- }
- if (meta) {
- flags_meta = erts_bif_mtrace(p, ep->code+3, args, local,
- &meta_tracer_pid);
- }
- if (time) {
- BpDataTime *bdt = NULL;
- BeamInstr *pc = (BeamInstr *)ep->code+3;
-
- bdt = (BpDataTime *) erts_get_time_break(p, pc);
- ASSERT(bdt);
-
- if (!bdt->pause) {
- erts_trace_time_break(p, pc, bdt, ERTS_BP_CALL_TIME_CALL);
- }
- }
- /* Restore original continuation pointer (if changed). */
- p->cp = cp;
-
- func = bif_table[bif_index].f;
-
- result = func(p, args, I);
-
- if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
- BeamInstr i_return_trace = beam_return_trace[0];
- BeamInstr i_return_to_trace = beam_return_to_trace[0];
- BeamInstr i_return_time_trace = beam_return_time_trace[0];
- Eterm *cpp;
- /* Maybe advance cp to skip trace stack frames */
- for (cpp = p->stop; ; cp = cp_val(*cpp++)) {
- if (*cp == i_return_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 2; /* Skip return_trace parameters */
- } else if (*cp == i_return_time_trace) {
- /* Skip stack frame variables */
- while (is_not_CP(*cpp)) cpp++;
- cpp += 1; /* Skip return_time_trace parameters */
- } else if (*cp == i_return_to_trace) {
- /* A return_to trace message is going to be generated
- * by normal means, so we do not have to.
- */
- cp = NULL;
- break;
- } else break;
- }
- }
-
- /* Try to get these in the order
- * they usually appear in normal code... */
- if (is_non_value(result)) {
- Uint reason = p->freason;
- if (reason != TRAP) {
- Eterm class;
- Eterm value = p->fvalue;
- DeclareTmpHeapNoproc(nocatch,3);
- UseTmpHeapNoproc(3);
- /* Expand error value like in handle_error() */
- if (reason & EXF_ARGLIST) {
- Eterm *tp;
- ASSERT(is_tuple(value));
- tp = tuple_val(value);
- value = tp[1];
- }
- if ((reason & EXF_THROWN) && (p->catches <= 0)) {
- value = TUPLE2(nocatch, am_nocatch, value);
- reason = EXC_ERROR;
- }
- /* Note: expand_error_value() could theoretically
- * allocate on the heap, but not for any error
- * returned by a BIF, and it would do no harm,
- * just be annoying.
- */
- value = expand_error_value(p, reason, value);
- class = exception_tag[GET_EXC_CLASS(reason)];
-
- if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &meta_tracer_pid);
- }
- if (flags & MATCH_SET_EXCEPTION_TRACE) {
- erts_trace_exception(p, ep->code, class, value,
- &p->tracer_proc);
- }
- if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {
- /* can only happen if(local)*/
- Eterm *ptr = p->stop;
- ASSERT(is_CP(*ptr));
- ASSERT(ptr <= STACK_START(p));
- /* Search the nearest stack frame for a catch */
- while (++ptr < STACK_START(p)) {
- if (is_CP(*ptr)) break;
- if (is_catch(*ptr)) {
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into
- * calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- UnUseTmpHeapNoproc(3);
- if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {
- erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags |= F_EXCEPTION_TRACE;
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
- } else {
- if (flags_meta & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &meta_tracer_pid);
- }
- /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */
- if (flags & MATCH_SET_RX_TRACE) {
- erts_trace_return(p, ep->code, result, &p->tracer_proc);
- }
- if (flags & MATCH_SET_RETURN_TO_TRACE) {
- /* can only happen if(local)*/
- if (applying) {
- /* Apply of BIF, cp is in calling function */
- if (cp) erts_trace_return_to(p, cp);
- } else {
- /* Direct bif call, I points into calling function */
- erts_trace_return_to(p, I);
- }
- }
- }
- }
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- return result;
-}
-
/* Sends trace message:
* {trace_ts, Pid, What, Msg, Timestamp}
* or {trace, Pid, What, Msg}
@@ -2358,7 +2210,7 @@ trace_gc(Process *p, Eterm what)
UseTmpHeap(LOCAL_HEAP_SIZE,p);
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
hp = local_heap;
#ifdef DEBUG
size = 0;
@@ -2370,10 +2222,11 @@ trace_gc(Process *p, Eterm what)
size += 5/*4-tuple*/ + TS_SIZE(p);
#endif
} else {
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
size = 0;
(void) erts_bld_atom_uint_2tup_list(NULL,
@@ -2397,19 +2250,19 @@ trace_gc(Process *p, Eterm what)
tags,
values);
- msg = TUPLE4(hp, am_trace, p->id/* Local pid */, what, msg);
+ msg = TUPLE4(hp, am_trace, p->common.id, what, msg);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(msg, hp);
}
ASSERT(hp == limit);
- if (is_internal_port(p->tracer_proc))
- send_to_port(p, msg, &p->tracer_proc, &p->trace_flags);
+ if (is_internal_port(ERTS_TRACER_PROC(p)))
+ send_to_port(p, msg, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
else
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, msg, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, msg, bp);
erts_smp_mtx_unlock(&smq_mtx);
UnUseTmpHeap(LOCAL_HEAP_SIZE,p);
#undef LOCAL_HEAP_SIZE
@@ -2449,12 +2302,10 @@ monitor_long_gc(Process *p, Uint time) {
#endif
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
- monitor_p = process_tab[internal_pid_index(system_monitor)];
- if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) {
+ ASSERT(is_internal_pid(system_monitor));
+ monitor_p = erts_proc_lookup(system_monitor);
+ if (!monitor_p || p == monitor_p)
return;
- }
#endif
hsz = 0;
@@ -2476,7 +2327,7 @@ monitor_long_gc(Process *p, Uint time) {
sizeof(values)/sizeof(Uint),
tags,
values);
- msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, am_long_gc, list);
+ msg = TUPLE4(hp, am_monitor, p->common.id, am_long_gc, list);
#ifdef DEBUG
hp += 5 /* 4-tuple */;
@@ -2484,7 +2335,7 @@ monitor_long_gc(Process *p, Uint time) {
#endif
#ifdef ERTS_SMP
- enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
#else
erts_queue_message(monitor_p, NULL, bp, msg, NIL
#ifdef USE_VM_PROBES
@@ -2525,10 +2376,9 @@ monitor_large_heap(Process *p) {
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
- monitor_p = process_tab[internal_pid_index(system_monitor)];
- if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) {
+ ASSERT(is_internal_pid(system_monitor));
+ monitor_p = erts_proc_lookup(system_monitor);
+ if (monitor_p || p == monitor_p) {
return;
}
#endif
@@ -2552,7 +2402,7 @@ monitor_large_heap(Process *p) {
sizeof(values)/sizeof(Uint),
tags,
values);
- msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, am_large_heap, list);
+ msg = TUPLE4(hp, am_monitor, p->common.id, am_large_heap, list);
#ifdef DEBUG
hp += 5 /* 4-tuple */;
@@ -2560,7 +2410,7 @@ monitor_large_heap(Process *p) {
#endif
#ifdef ERTS_SMP
- enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
#else
erts_queue_message(monitor_p, NULL, bp, msg, NIL
#ifdef USE_VM_PROBES
@@ -2580,21 +2430,19 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
Eterm *hp, msg;
#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor)
- && internal_pid_index(system_monitor) < erts_max_processes);
- monitor_p = process_tab[internal_pid_index(system_monitor)];
- if (INVALID_PID(monitor_p, system_monitor) || p == monitor_p) {
+ ASSERT(is_internal_pid(system_monitor));
+ monitor_p = erts_proc_lookup(system_monitor);
+ if (!monitor_p || p == monitor_p)
return;
- }
#endif
hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p);
- msg = TUPLE4(hp, am_monitor, p->id/* Local pid */, type, spec);
+ msg = TUPLE4(hp, am_monitor, p->common.id, type, spec);
hp += 5;
#ifdef ERTS_SMP
- enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->id, NIL, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
#else
erts_queue_message(monitor_p, NULL, bp, msg, NIL
#ifdef USE_VM_PROBES
@@ -2716,21 +2564,21 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
Eterm mess;
Eterm* hp;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (5+6)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name);
+ mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake schedule */
- send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2740,25 +2588,26 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
size_t sz_data;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
sz_data = 6 + TS_SIZE(p);
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
- mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->id, drv_name);
+ mess = TUPLE5(hp, am_trace, calling_pid, am_open, p->common.id, drv_name);
hp += 6;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -2779,20 +2628,20 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p)
|| erts_thr_progress_is_blocking());
- if (is_internal_port(t_p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(t_p))) {
#define LOCAL_HEAP_SIZE (5+5)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
hp = local_heap;
- mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake schedule */
- send_to_port(NULL, mess, &t_p->tracer_proc, &t_p->trace_flags);
+ send_to_port(NULL,mess,&ERTS_TRACER_PROC(t_p),&ERTS_TRACE_FLAGS(t_p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2802,25 +2651,26 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
size_t sz_data;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(t_p->tracer_proc)
- && internal_pid_index(t_p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(t_p)));
sz_data = 5 + TS_SIZE(t_p);
- ERTS_GET_TRACER_REF(tracer_ref, t_p->tracer_proc, t_p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(t_p),
+ ERTS_TRACE_FLAGS(t_p));
hp = ERTS_ALLOC_SYSMSG_HEAP(sz_data, &bp, &off_heap, tracer_ref);
- mess = TUPLE4(hp, am_trace, t_p->id, what, data);
+ mess = TUPLE4(hp, am_trace, t_p->common.id, what, data);
hp += 5;
erts_smp_mtx_lock(&smq_mtx);
- if (t_p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(t_p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(t_p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(t_p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -2845,7 +2695,7 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
int ws = 5;
Eterm sched_id = am_undefined;
- if (is_internal_port(p->tracer_proc)) {
+ if (is_internal_port(ERTS_TRACER_PROC(p))) {
#define LOCAL_HEAP_SIZE (5+6)
DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
@@ -2860,21 +2710,21 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
#else
sched_id = make_small(1);
#endif
- mess = TUPLE5(hp, am_trace, p->id, what, sched_id, where);
+ mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
ws = 6;
} else {
- mess = TUPLE4(hp, am_trace, p->id, what, where);
+ mess = TUPLE4(hp, am_trace, p->common.id, what, where);
ws = 5;
}
hp += ws;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
/* No fake scheduling */
- send_to_port(NULL, mess, &p->tracer_proc, &p->trace_flags);
+ send_to_port(NULL, mess, &ERTS_TRACER_PROC(p), &ERTS_TRACE_FLAGS(p));
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
erts_smp_mtx_unlock(&smq_mtx);
@@ -2883,12 +2733,13 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
ErlOffHeap *off_heap;
ERTS_TRACER_REF_TYPE tracer_ref;
- ASSERT(is_internal_pid(p->tracer_proc)
- && internal_pid_index(p->tracer_proc) < erts_max_processes);
+ ASSERT(is_internal_pid(ERTS_TRACER_PROC(p)));
if (IS_TRACED_FL(p, F_TRACE_SCHED_NO)) ws = 6; /* Make place for scheduler id */
- ERTS_GET_TRACER_REF(tracer_ref, p->tracer_proc, p->trace_flags);
+ ERTS_GET_TRACER_REF(tracer_ref,
+ ERTS_TRACER_PROC(p),
+ ERTS_TRACE_FLAGS(p));
hp = ERTS_ALLOC_SYSMSG_HEAP(ws+TS_SIZE(p), &bp, &off_heap, tracer_ref);
@@ -2900,19 +2751,19 @@ trace_sched_ports_where(Port *p, Eterm what, Eterm where) {
#else
sched_id = make_small(1);
#endif
- mess = TUPLE5(hp, am_trace, p->id, what, sched_id, where);
+ mess = TUPLE5(hp, am_trace, p->common.id, what, sched_id, where);
} else {
- mess = TUPLE4(hp, am_trace, p->id, what, where);
+ mess = TUPLE4(hp, am_trace, p->common.id, what, where);
}
hp += ws;
erts_smp_mtx_lock(&smq_mtx);
- if (p->trace_flags & F_TIMESTAMP) {
+ if (ERTS_TRACE_FLAGS(p) & F_TIMESTAMP) {
hp = patch_ts(mess, hp);
}
- ERTS_ENQ_TRACE_MSG(p->id, tracer_ref, mess, bp);
+ ERTS_ENQ_TRACE_MSG(p->common.id, tracer_ref, mess, bp);
erts_smp_mtx_unlock(&smq_mtx);
}
}
@@ -2948,14 +2799,14 @@ profile_runnable_port(Port *p, Eterm status) {
GET_NOW(&Ms, &s, &us);
timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE5(hp, am_profile, p->id, status, count, timestamp); hp += 6;
+ msg = TUPLE5(hp, am_profile, p->common.id, status, count, timestamp); hp += 6;
#ifndef ERTS_SMP
- profile_send(p->id, msg);
+ profile_send(p->common.id, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#else
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->id, NIL, msg, bp);
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
#endif
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -3002,13 +2853,13 @@ profile_runnable_proc(Process *p, Eterm status){
GET_NOW(&Ms, &s, &us);
timestamp = TUPLE3(hp, make_small(Ms), make_small(s), make_small(us)); hp += 4;
- msg = TUPLE5(hp, am_profile, p->id, status, where, timestamp); hp += 6;
+ msg = TUPLE5(hp, am_profile, p->common.id, status, where, timestamp); hp += 6;
#ifndef ERTS_SMP
- profile_send(p->id, msg);
+ profile_send(p->common.id, msg);
UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
#undef LOCAL_HEAP_SIZE
#else
- enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->id, NIL, msg, bp);
+ enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
#endif
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -3021,16 +2872,19 @@ profile_runnable_proc(Process *p, Eterm status){
void
erts_check_my_tracer_proc(Process *p)
{
- if (is_internal_pid(p->tracer_proc)) {
- Process *tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- p->tracer_proc, ERTS_PROC_LOCK_STATUS);
- int invalid_tracer = !tracer || !(tracer->trace_flags & F_TRACER);
+ if (is_internal_pid(ERTS_TRACER_PROC(p))) {
+ Process *tracer = erts_pid2proc(p,
+ ERTS_PROC_LOCK_MAIN,
+ ERTS_TRACER_PROC(p),
+ ERTS_PROC_LOCK_STATUS);
+ int invalid_tracer = (!tracer
+ || !(ERTS_TRACE_FLAGS(tracer) & F_TRACER));
if (tracer)
erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);
if (invalid_tracer) {
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- p->trace_flags &= ~TRACEE_FLAGS;
- p->tracer_proc = NIL;
+ ERTS_TRACE_FLAGS(p) &= ~TRACEE_FLAGS;
+ ERTS_TRACER_PROC(p) = NIL;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);
}
}
@@ -3149,10 +3003,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
break;
case SYS_MSG_TYPE_SEQTRACE:
/* Reset seq_tracer if it hasn't changed */
- erts_smp_mtx_lock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
if (system_seq_tracer == receiver)
system_seq_tracer = am_false;
- erts_smp_mtx_unlock(&sys_trace_mtx);
+ erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
break;
case SYS_MSG_TYPE_SYSMON:
if (receiver == NIL
@@ -3374,7 +3228,7 @@ sys_msg_dispatcher_func(void *unused)
proc = erts_pid2proc(NULL, 0, receiver, proc_locks);
if (!proc
|| (smqp->type == SYS_MSG_TYPE_TRACE
- && !(proc->trace_flags & F_TRACER))) {
+ && !(ERTS_TRACE_FLAGS(proc) & F_TRACER))) {
/* Bad tracer */
#ifdef DEBUG_PRINTOUTS
if (smqp->type == SYS_MSG_TYPE_TRACE && proc)
@@ -3401,18 +3255,16 @@ sys_msg_dispatcher_func(void *unused)
proc = erts_whereis_process(NULL,0,receiver,proc_locks,0);
if (!proc)
goto failure;
- else if (smqp->from == proc->id)
+ else if (smqp->from == proc->common.id)
goto drop_sys_msg;
else
goto queue_proc_msg;
}
else if (is_internal_port(receiver)) {
- port = erts_id2port(receiver, NULL, 0);
- if (INVALID_TRACER_PORT(port, receiver)) {
- if (port)
- erts_port_release(port);
+ port = erts_thr_id2port_sflgs(receiver,
+ ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
+ if (!port)
goto failure;
- }
else {
write_sys_msg_to_port(receiver,
port,
@@ -3424,7 +3276,7 @@ sys_msg_dispatcher_func(void *unused)
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "delivered\n");
#endif
- erts_port_release(port);
+ erts_thr_port_release(port);
if (smqp->bp)
free_message_buffer(smqp->bp);
}
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
new file mode 100644
index 0000000000..50fb27aab0
--- /dev/null
+++ b/erts/emulator/beam/erl_trace.h
@@ -0,0 +1,141 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+
+#ifndef ERL_TRACE_H__
+#define ERL_TRACE_H__
+
+struct binary;
+
+/* erl_bif_trace.c */
+Eterm erl_seq_trace_info(Process *p, Eterm arg1);
+void erts_system_monitor_clear(Process *c_p);
+void erts_system_profile_clear(Process *c_p);
+
+/* erl_trace.c */
+void erts_init_trace(void);
+void erts_trace_check_exiting(Eterm exiting);
+Eterm erts_set_system_seq_tracer(Process *c_p,
+ ErtsProcLocks c_p_locks,
+ Eterm new);
+Eterm erts_get_system_seq_tracer(void);
+void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
+void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
+void erts_set_system_monitor(Eterm monitor);
+Eterm erts_get_system_monitor(void);
+
+#ifdef ERTS_SMP
+void erts_check_my_tracer_proc(Process *);
+void erts_block_sys_msg_dispatcher(void);
+void erts_release_sys_msg_dispatcher(void);
+void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
+ Eterm,
+ Eterm,
+ ErlHeapFragment *));
+void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
+#endif
+
+void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
+void trace_send(Process*, Eterm, Eterm);
+void trace_receive(Process*, Eterm);
+Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec, Eterm* args,
+ int local, Eterm *tracer_pid);
+void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid);
+void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
+ Eterm *tracer);
+void erts_trace_return_to(Process *p, BeamInstr *pc);
+void trace_sched(Process*, Eterm);
+void trace_proc(Process*, Process*, Eterm, Eterm);
+void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
+void save_calls(Process *p, Export *);
+void trace_gc(Process *p, Eterm what);
+/* port tracing */
+void trace_virtual_sched(Process*, Eterm);
+void trace_sched_ports(Port *pp, Eterm);
+void trace_sched_ports_where(Port *pp, Eterm, Eterm);
+void trace_port(Port *, Eterm what, Eterm data);
+void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name);
+
+/* system_profile */
+void erts_set_system_profile(Eterm profile);
+Eterm erts_get_system_profile(void);
+void profile_scheduler(Eterm scheduler_id, Eterm);
+void profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us);
+void profile_runnable_proc(Process* p, Eterm status);
+void profile_runnable_port(Port* p, Eterm status);
+void erts_system_profile_setup_active_schedulers(void);
+
+/* system_monitor */
+void monitor_long_gc(Process *p, Uint time);
+void monitor_large_heap(Process *p);
+void monitor_generic(Process *p, Eterm type, Eterm spec);
+Uint erts_trace_flag2bit(Eterm flag);
+int erts_trace_flags(Eterm List,
+ Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
+Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
+
+#ifdef ERTS_SMP
+void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
+#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \
+do { \
+ if ((ESDP)->pending_trace_msgs) \
+ erts_send_pending_trace_msgs((ESDP)); \
+} while (0)
+#else
+#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
+#endif
+
+#define seq_trace_output(token, msg, type, receiver, process) \
+seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
+#define seq_trace_output_exit(token, msg, type, receiver, exitfrom) \
+seq_trace_output_generic((token), (msg), (type), (receiver), NULL, (exitfrom))
+void seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
+ Eterm receiver, Process *process, Eterm exitfrom);
+
+int seq_trace_update_send(Process *process);
+
+Eterm erts_seq_trace(Process *process,
+ Eterm atom_type, Eterm atom_true_or_false,
+ int build_result);
+
+struct trace_pattern_flags {
+ unsigned int breakpoint : 1; /* Set if any other is set */
+ unsigned int local : 1; /* Local call trace breakpoint */
+ unsigned int meta : 1; /* Metadata trace breakpoint */
+ unsigned int call_count : 1; /* Fast call count breakpoint */
+ unsigned int call_time : 1; /* Fast call time breakpoint */
+};
+extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
+extern int erts_call_time_breakpoint_tracing;
+int erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
+ struct binary* match_prog_set,
+ struct binary *meta_match_prog_set,
+ int on, struct trace_pattern_flags,
+ Eterm meta_tracer_pid, int is_blocking);
+void
+erts_get_default_trace_pattern(int *trace_pattern_is_on,
+ struct binary **match_spec,
+ struct binary **meta_match_spec,
+ struct trace_pattern_flags *trace_pattern_flags,
+ Eterm *meta_tracer_pid);
+int erts_is_default_trace_enabled(void);
+void erts_bif_trace_init(void);
+int erts_finish_breakpointing(void);
+
+#endif /* ERL_TRACE_H__ */
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 26ac231ddb..51559aea1c 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -77,66 +77,25 @@ void erts_init_unicode(void)
{
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
/* Non visual BIFs to trap to. */
- memset(&characters_to_utf8_trap_exp, 0, sizeof(Export));
- characters_to_utf8_trap_exp.address =
- &characters_to_utf8_trap_exp.code[3];
- characters_to_utf8_trap_exp.code[0] = am_erlang;
- characters_to_utf8_trap_exp.code[1] =
- am_atom_put("characters_to_utf8_trap",23);
- characters_to_utf8_trap_exp.code[2] = 3;
- characters_to_utf8_trap_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_utf8_trap_exp.code[4] =
- (BeamInstr) &characters_to_utf8_trap;
-
- memset(&characters_to_list_trap_1_exp, 0, sizeof(Export));
- characters_to_list_trap_1_exp.address =
- &characters_to_list_trap_1_exp.code[3];
- characters_to_list_trap_1_exp.code[0] = am_erlang;
- characters_to_list_trap_1_exp.code[1] =
- am_atom_put("characters_to_list_trap_1",25);
- characters_to_list_trap_1_exp.code[2] = 3;
- characters_to_list_trap_1_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_list_trap_1_exp.code[4] =
- (BeamInstr) &characters_to_list_trap_1;
-
- memset(&characters_to_list_trap_2_exp, 0, sizeof(Export));
- characters_to_list_trap_2_exp.address =
- &characters_to_list_trap_2_exp.code[3];
- characters_to_list_trap_2_exp.code[0] = am_erlang;
- characters_to_list_trap_2_exp.code[1] =
- am_atom_put("characters_to_list_trap_2",25);
- characters_to_list_trap_2_exp.code[2] = 3;
- characters_to_list_trap_2_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_list_trap_2_exp.code[4] =
- (BeamInstr) &characters_to_list_trap_2;
-
-
- memset(&characters_to_list_trap_3_exp, 0, sizeof(Export));
- characters_to_list_trap_3_exp.address =
- &characters_to_list_trap_3_exp.code[3];
- characters_to_list_trap_3_exp.code[0] = am_erlang;
- characters_to_list_trap_3_exp.code[1] =
- am_atom_put("characters_to_list_trap_3",25);
- characters_to_list_trap_3_exp.code[2] = 3;
- characters_to_list_trap_3_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_list_trap_3_exp.code[4] =
- (BeamInstr) &characters_to_list_trap_3;
-
- memset(&characters_to_list_trap_4_exp, 0, sizeof(Export));
- characters_to_list_trap_4_exp.address =
- &characters_to_list_trap_4_exp.code[3];
- characters_to_list_trap_4_exp.code[0] = am_erlang;
- characters_to_list_trap_4_exp.code[1] =
- am_atom_put("characters_to_list_trap_4",25);
- characters_to_list_trap_4_exp.code[2] = 1;
- characters_to_list_trap_4_exp.code[3] =
- (BeamInstr) em_apply_bif;
- characters_to_list_trap_4_exp.code[4] =
- (BeamInstr) &characters_to_list_trap_4;
+ erts_init_trap_export(&characters_to_utf8_trap_exp,
+ am_erlang, am_atom_put("characters_to_utf8_trap",23), 3,
+ &characters_to_utf8_trap);
+
+ erts_init_trap_export(&characters_to_list_trap_1_exp,
+ am_erlang, am_atom_put("characters_to_list_trap_1",25), 3,
+ &characters_to_list_trap_1);
+
+ erts_init_trap_export(&characters_to_list_trap_2_exp,
+ am_erlang, am_atom_put("characters_to_list_trap_2",25), 3,
+ &characters_to_list_trap_2);
+
+ erts_init_trap_export(&characters_to_list_trap_3_exp,
+ am_erlang, am_atom_put("characters_to_list_trap_3",25), 3,
+ &characters_to_list_trap_3);
+
+ erts_init_trap_export(&characters_to_list_trap_4_exp,
+ am_erlang, am_atom_put("characters_to_list_trap_4",25), 1,
+ &characters_to_list_trap_4);
c_to_b_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_binary_int,2);
c_to_l_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_list_int,2);
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
new file mode 100644
index 0000000000..a2064bd8a3
--- /dev/null
+++ b/erts/emulator/beam/erl_utils.h
@@ -0,0 +1,215 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#ifndef ERL_UTILS_H__
+#define ERL_UTILS_H__
+
+#include "sys.h"
+#include "erl_smp.h"
+#include "erl_printf.h"
+
+typedef struct {
+#ifdef DEBUG
+ int smp_api;
+#endif
+ union {
+ Uint64 not_atomic;
+#ifdef ARCH_64
+ erts_atomic_t atomic;
+#else
+ erts_dw_atomic_t atomic;
+#endif
+ } counter;
+} erts_interval_t;
+
+void erts_interval_init(erts_interval_t *);
+void erts_smp_interval_init(erts_interval_t *);
+Uint64 erts_step_interval_nob(erts_interval_t *);
+Uint64 erts_step_interval_relb(erts_interval_t *);
+Uint64 erts_smp_step_interval_nob(erts_interval_t *);
+Uint64 erts_smp_step_interval_relb(erts_interval_t *);
+Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64);
+Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64);
+Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64);
+Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64);
+#ifdef ARCH_32
+ERTS_GLB_INLINE Uint64 erts_interval_dw_aint_to_val__(erts_dw_aint_t *);
+#endif
+ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *);
+ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+#ifdef ARCH_32
+
+ERTS_GLB_INLINE Uint64
+erts_interval_dw_aint_to_val__(erts_dw_aint_t *dw)
+{
+#ifdef ETHR_SU_DW_NAINT_T__
+ return (Uint64) dw->dw_sint;
+#else
+ Uint64 res;
+ res = (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_HIGH_WORD]);
+ res <<= 32;
+ res |= (Uint64) ((Uint32) dw->sint[ERTS_DW_AINT_LOW_WORD]);
+ return res;
+#endif
+}
+
+#endif
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_nob__(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ return (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
+#else
+ erts_dw_aint_t dw;
+ erts_dw_atomic_read_nob(&icp->counter.atomic, &dw);
+ return erts_interval_dw_aint_to_val__(&dw);
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_acqb__(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ return (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
+#else
+ erts_dw_aint_t dw;
+ erts_dw_atomic_read_acqb(&icp->counter.atomic, &dw);
+ return erts_interval_dw_aint_to_val__(&dw);
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_nob(erts_interval_t *icp)
+{
+ ASSERT(!icp->smp_api);
+ return erts_current_interval_nob__(icp);
+}
+
+ERTS_GLB_INLINE Uint64
+erts_current_interval_acqb(erts_interval_t *icp)
+{
+ ASSERT(!icp->smp_api);
+ return erts_current_interval_acqb__(icp);
+}
+
+ERTS_GLB_INLINE Uint64
+erts_smp_current_interval_nob(erts_interval_t *icp)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return erts_current_interval_nob__(icp);
+#else
+ return icp->counter.not_atomic;
+#endif
+}
+
+ERTS_GLB_INLINE Uint64
+erts_smp_current_interval_acqb(erts_interval_t *icp)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return erts_current_interval_acqb__(icp);
+#else
+ return icp->counter.not_atomic;
+#endif
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+/*
+ * To be used to silence unused result warnings, but do not abuse it.
+ */
+void erts_silence_warn_unused_result(long unused);
+
+
+int erts_fit_in_bits_int64(Sint64);
+int erts_fit_in_bits_int32(Sint32);
+int list_length(Eterm);
+int erts_is_builtin(Eterm, Eterm, int);
+Uint32 make_broken_hash(Eterm);
+Uint32 block_hash(byte *, unsigned, Uint32);
+Uint32 make_hash2(Eterm);
+Uint32 make_hash(Eterm);
+
+
+Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
+Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
+Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
+Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
+Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
+Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
+Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
+Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
+Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
+#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
+Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
+Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm terms1[], Uint terms2[]);
+Eterm
+erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
+ Sint length, Eterm atoms[], Uint uints[]);
+Eterm
+erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
+ Eterm atoms[], Uint uints1[], Uint uints2[]);
+
+void erts_init_utils(void);
+void erts_init_utils_mem(void);
+
+erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint);
+void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *);
+
+#if HALFWORD_HEAP
+int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base);
+# define eq(A,B) eq_rel(A,NULL,B,NULL)
+#else
+int eq(Eterm, Eterm);
+# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B)
+#endif
+
+#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
+
+#if HALFWORD_HEAP
+Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*);
+#define CMP(A,B) cmp_rel(A,NULL,B,NULL)
+#else
+Sint cmp(Eterm, Eterm);
+#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B)
+#define CMP(A,B) cmp(A,B)
+#endif
+#define cmp_lt(a,b) (CMP((a),(b)) < 0)
+#define cmp_le(a,b) (CMP((a),(b)) <= 0)
+#define cmp_eq(a,b) (CMP((a),(b)) == 0)
+#define cmp_ne(a,b) (CMP((a),(b)) != 0)
+#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
+#define cmp_gt(a,b) (CMP((a),(b)) > 0)
+
+#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
+#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
+#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
+#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
+
+#endif
diff --git a/erts/emulator/beam/erlang_dtrace.d b/erts/emulator/beam/erlang_dtrace.d
index bdfde58845..e3ebbb84f4 100644
--- a/erts/emulator/beam/erlang_dtrace.d
+++ b/erts/emulator/beam/erlang_dtrace.d
@@ -639,9 +639,9 @@ provider erlang {
* Entry into the efile_drv.c file I/O driver
*
* For a list of command numbers used by this driver, see the section
- * "Guide to probe arguments" in ../../../README.md. That section
- * also contains explanation of the various integer and string
- * arguments that may be present when any particular probe fires.
+ * "Guide to efile_drv.c probe arguments" in ../../../HOWTO/DTRACE.md.
+ * That section also contains explanation of the various integer and
+ * string arguments that may be present when any particular probe fires.
*
* NOTE: Not all Linux platforms (using SystemTap) can support
* arguments beyond arg9.
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index fb0ee99119..6b5121f917 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -32,98 +32,162 @@
#define EXPORT_HASH(m,f,a) ((m)*(f)+(a))
-static IndexTable export_table; /* Not locked. */
-static Hash secondary_export_table; /* Locked. */
+#ifdef DEBUG
+# define IF_DEBUG(x) x
+#else
+# define IF_DEBUG(x)
+#endif
-#include "erl_smp.h"
+static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */
-static erts_smp_rwmtx_t export_table_lock; /* Locks the secondary export table. */
+static erts_smp_atomic_t total_entries_bytes;
-#define export_read_lock() erts_smp_rwmtx_rlock(&export_table_lock)
-#define export_read_unlock() erts_smp_rwmtx_runlock(&export_table_lock)
-#define export_write_lock() erts_smp_rwmtx_rwlock(&export_table_lock)
-#define export_write_unlock() erts_smp_rwmtx_rwunlock(&export_table_lock)
+#include "erl_smp.h"
+
+/* This lock protects the staging export table from concurrent access
+ * AND it protects the staging table from becoming active.
+ */
+erts_smp_mtx_t export_staging_lock;
extern BeamInstr* em_call_error_handler;
extern BeamInstr* em_call_traced_function;
+struct export_entry
+{
+ IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
+ Export* ep;
+};
+
+/* Helper struct that brings things together in one allocation
+*/
+struct export_blob
+{
+ Export exp;
+ struct export_entry entryv[ERTS_NUM_CODE_IX];
+ /* Note that entryv is not indexed by "code_ix".
+ */
+};
+
+/* Helper struct only used as template
+*/
+struct export_templ
+{
+ struct export_entry entry;
+ Export exp;
+};
+
+static struct export_blob* entry_to_blob(struct export_entry* ee)
+{
+ return (struct export_blob*)
+ ((char*)ee->ep - offsetof(struct export_blob,exp));
+}
+
void
export_info(int to, void *to_arg)
{
#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
- export_read_lock();
+ export_staging_lock();
#endif
- index_info(to, to_arg, &export_table);
- hash_info(to, to_arg, &secondary_export_table);
+ index_info(to, to_arg, &export_tables[erts_active_code_ix()]);
+ hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable);
#ifdef ERTS_SMP
if (lock)
- export_read_unlock();
+ export_staging_unlock();
#endif
}
static HashValue
-export_hash(Export* x)
+export_hash(struct export_entry* ee)
{
+ Export* x = ee->ep;
return EXPORT_HASH(x->code[0], x->code[1], x->code[2]);
}
static int
-export_cmp(Export* tmpl, Export* obj)
+export_cmp(struct export_entry* tmpl_e, struct export_entry* obj_e)
{
+ Export* tmpl = tmpl_e->ep;
+ Export* obj = obj_e->ep;
return !(tmpl->code[0] == obj->code[0] &&
tmpl->code[1] == obj->code[1] &&
tmpl->code[2] == obj->code[2]);
}
-static Export*
-export_alloc(Export* tmpl)
+static struct export_entry*
+export_alloc(struct export_entry* tmpl_e)
{
- Export* obj = (Export*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(Export));
-
- obj->fake_op_func_info_for_hipe[0] = 0;
- obj->fake_op_func_info_for_hipe[1] = 0;
- obj->code[0] = tmpl->code[0];
- obj->code[1] = tmpl->code[1];
- obj->code[2] = tmpl->code[2];
- obj->slot.index = -1;
- obj->address = obj->code+3;
- obj->code[3] = (BeamInstr) em_call_error_handler;
- obj->code[4] = 0;
- obj->match_prog_set = NULL;
- return obj;
+ struct export_blob* blob;
+ unsigned ix;
+
+ if (tmpl_e->slot.index == -1) { /* Template, allocate blob */
+ Export* tmpl = tmpl_e->ep;
+ Export* obj;
+
+ blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob));
+ erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob));
+ obj = &blob->exp;
+ obj->fake_op_func_info_for_hipe[0] = 0;
+ obj->fake_op_func_info_for_hipe[1] = 0;
+ obj->code[0] = tmpl->code[0];
+ obj->code[1] = tmpl->code[1];
+ obj->code[2] = tmpl->code[2];
+ obj->code[3] = (BeamInstr) em_call_error_handler;
+ obj->code[4] = 0;
+
+ for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) {
+ obj->addressv[ix] = obj->code+3;
+
+ blob->entryv[ix].slot.index = -1;
+ blob->entryv[ix].ep = &blob->exp;
+ }
+ ix = 0;
+ }
+ else { /* Existing entry in another table, use free entry in blob */
+ blob = entry_to_blob(tmpl_e);
+ for (ix = 0; blob->entryv[ix].slot.index >= 0; ix++) {
+ ASSERT(ix < ERTS_NUM_CODE_IX);
+ }
+ }
+ return &blob->entryv[ix];
}
-
-static void
-export_free(Export* obj)
+static void
+export_free(struct export_entry* obj)
{
- erts_free(ERTS_ALC_T_EXPORT, (void*) obj);
+ struct export_blob* blob = entry_to_blob(obj);
+ int i;
+ obj->slot.index = -1;
+ for (i=0; i < ERTS_NUM_CODE_IX; i++) {
+ if (blob->entryv[i].slot.index >= 0) {
+ return;
+ }
+ }
+ erts_free(ERTS_ALC_T_EXPORT, blob);
+ erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob));
}
-
void
init_export_table(void)
{
HashFunctions f;
- erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
- rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
- rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
+ int i;
- erts_smp_rwmtx_init_opt(&export_table_lock, &rwmtx_opt, "export_tab");
+ erts_smp_mtx_init(&export_staging_lock, "export_tab");
+ erts_smp_atomic_init_nob(&total_entries_bytes, 0);
f.hash = (H_FUN) export_hash;
f.cmp = (HCMP_FUN) export_cmp;
f.alloc = (HALLOC_FUN) export_alloc;
f.free = (HFREE_FUN) export_free;
- erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_table, "export_list",
- EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f);
- hash_init(ERTS_ALC_T_EXPORT_TABLE, &secondary_export_table,
- "secondary_export_table", 50, f);
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ erts_index_init(ERTS_ALC_T_EXPORT_TABLE, &export_tables[i], "export_list",
+ EXPORT_INITIAL_SIZE, EXPORT_LIMIT, f);
+ }
}
/*
@@ -138,29 +202,42 @@ init_export_table(void)
* called through such an export entry.
* 3) This function is suitable for the implementation of erlang:apply/3.
*/
+extern Export* /* inline-helper */
+erts_find_export_entry(Eterm m, Eterm f, unsigned int a,ErtsCodeIndex code_ix);
Export*
-erts_find_export_entry(Eterm m, Eterm f, unsigned int a)
+erts_find_export_entry(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
{
HashValue hval = EXPORT_HASH((BeamInstr) m, (BeamInstr) f, (BeamInstr) a);
int ix;
HashBucket* b;
- ix = hval % export_table.htable.size;
- b = export_table.htable.bucket[ix];
+ ix = hval % export_tables[code_ix].htable.size;
+ b = export_tables[code_ix].htable.bucket[ix];
/*
* Note: We have inlined the code from hash.c for speed.
*/
while (b != (HashBucket*) 0) {
- Export* ep = (Export *) b;
+ Export* ep = ((struct export_entry*) b)->ep;
if (ep->code[0] == m && ep->code[1] == f && ep->code[2] == a) {
- break;
+ return ep;
}
b = b->next;
}
- return (Export*)b;
+ return NULL;
+}
+
+static struct export_entry* init_template(struct export_templ* templ,
+ Eterm m, Eterm f, unsigned a)
+{
+ templ->entry.ep = &templ->exp;
+ templ->entry.slot.index = -1;
+ templ->exp.code[0] = m;
+ templ->exp.code[1] = f;
+ templ->exp.code[2] = a;
+ return &templ->entry;
}
@@ -176,47 +253,42 @@ erts_find_export_entry(Eterm m, Eterm f, unsigned int a)
*/
Export*
-erts_find_function(Eterm m, Eterm f, unsigned int a)
+erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix)
{
- Export e;
- Export* ep;
-
- e.code[0] = m;
- e.code[1] = f;
- e.code[2] = a;
-
- ep = hash_get(&export_table.htable, (void*) &e);
- if (ep != NULL && ep->address == ep->code+3 &&
- ep->code[3] != (BeamInstr) em_call_traced_function) {
- ep = NULL;
+ struct export_templ templ;
+ struct export_entry* ee;
+
+ ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a));
+ if (ee == NULL ||
+ (ee->ep->addressv[code_ix] == ee->ep->code+3 &&
+ ee->ep->code[3] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) {
+ return NULL;
}
- return ep;
+ return ee->ep;
}
/*
* Returns a pointer to an existing export entry for a MFA,
* or creates a new one and returns the pointer.
*
- * This function provides unlocked write access to the main export
- * table. It should only be used during start up or when
- * all other threads are blocked.
+ * This function acts on the staging export table. It should only be used
+ * to load new code.
*/
Export*
erts_export_put(Eterm mod, Eterm func, unsigned int arity)
{
- Export e;
- int ix;
+ ErtsCodeIndex code_ix = erts_staging_code_ix();
+ struct export_templ templ;
+ struct export_entry* ee;
- ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_smp_thr_progress_is_blocking());
ASSERT(is_atom(mod));
ASSERT(is_atom(func));
- e.code[0] = mod;
- e.code[1] = func;
- e.code[2] = arity;
- ix = index_put(&export_table, (void*) &e);
- return (Export*) erts_index_lookup(&export_table, ix);
+ export_staging_lock();
+ ee = (struct export_entry*) index_put_entry(&export_tables[code_ix],
+ init_template(&templ, mod, func, arity));
+ export_staging_unlock();
+ return ee->ep;
}
/*
@@ -224,77 +296,117 @@ erts_export_put(Eterm mod, Eterm func, unsigned int arity)
* export entry (making a call through it will cause the error_handler to
* be called).
*
- * Stub export entries will be placed in the secondary export table.
- * erts_export_consolidate() will move all stub export entries into the
- * main export table (will be done the next time code is loaded).
+ * Stub export entries will be placed in the loader export table.
*/
Export*
erts_export_get_or_make_stub(Eterm mod, Eterm func, unsigned int arity)
{
- Export e;
+ ErtsCodeIndex code_ix;
Export* ep;
+ IF_DEBUG(int retrying = 0;)
ASSERT(is_atom(mod));
ASSERT(is_atom(func));
-
- e.code[0] = mod;
- e.code[1] = func;
- e.code[2] = arity;
- ep = erts_find_export_entry(mod, func, arity);
- if (ep == 0) {
- /*
- * The code is not loaded (yet). Put the export in the secondary
- * export table, to avoid having to lock the main export table.
- */
- export_write_lock();
- ep = (Export *) hash_put(&secondary_export_table, (void*) &e);
- export_write_unlock();
- }
+
+ do {
+ code_ix = erts_active_code_ix();
+ ep = erts_find_export_entry(mod, func, arity, code_ix);
+ if (ep == 0) {
+ /*
+ * The code is not loaded (yet). Put the export in the staging
+ * export table, to avoid having to lock the active export table.
+ */
+ export_staging_lock();
+ if (erts_active_code_ix() == code_ix) {
+ struct export_templ templ;
+ struct export_entry* entry;
+
+ IndexTable* tab = &export_tables[erts_staging_code_ix()];
+ init_template(&templ, mod, func, arity);
+ entry = (struct export_entry *) index_put_entry(tab, &templ.entry);
+ ep = entry->ep;
+ ASSERT(ep);
+ }
+ else { /* race */
+ ASSERT(!retrying);
+ IF_DEBUG(retrying = 1);
+ }
+ export_staging_unlock();
+ }
+ } while (!ep);
return ep;
}
-/*
- * To be called before loading code (with other threads blocked).
- * This function will move all export entries from the secondary
- * export table into the primary.
- */
-void
-erts_export_consolidate(void)
+Export *export_list(int i, ErtsCodeIndex code_ix)
{
-#ifdef DEBUG
- HashInfo hi;
-#endif
-
- ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_smp_thr_progress_is_blocking());
-
- export_write_lock();
- erts_index_merge(&secondary_export_table, &export_table);
- erts_hash_merge(&secondary_export_table, &export_table.htable);
- export_write_unlock();
-#ifdef DEBUG
- hash_get_info(&hi, &export_table.htable);
- ASSERT(export_table.entries == hi.objs);
-#endif
+ return ((struct export_entry*) erts_index_lookup(&export_tables[code_ix], i))->ep;
}
-Export *export_list(int i)
+int export_list_size(ErtsCodeIndex code_ix)
{
- return (Export*) erts_index_lookup(&export_table, i);
+ return export_tables[code_ix].entries;
}
-int export_list_size(void)
+int export_table_sz(void)
+{
+ int i, bytes = 0;
+
+ export_staging_lock();
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ bytes += index_table_sz(&export_tables[i]);
+ }
+ export_staging_unlock();
+ return bytes;
+}
+int export_entries_sz(void)
{
- return export_table.entries;
+ return erts_smp_atomic_read_nob(&total_entries_bytes);
}
+Export *export_get(Export *e)
+{
+ struct export_entry ee;
+ struct export_entry* entry;
-int export_table_sz(void)
+ ee.ep = e;
+ entry = (struct export_entry*)hash_get(&export_tables[erts_active_code_ix()].htable, &ee);
+ return entry ? entry->ep : NULL;
+}
+
+IF_DEBUG(static ErtsCodeIndex debug_start_load_ix = 0;)
+
+
+void export_start_staging(void)
{
- return index_table_sz(&export_table);
+ ErtsCodeIndex dst_ix = erts_staging_code_ix();
+ ErtsCodeIndex src_ix = erts_active_code_ix();
+ IndexTable* dst = &export_tables[dst_ix];
+ IndexTable* src = &export_tables[src_ix];
+ struct export_entry* src_entry;
+ struct export_entry* dst_entry;
+ int i;
+
+ ASSERT(dst_ix != src_ix);
+ ASSERT(debug_start_load_ix == -1);
+
+ export_staging_lock();
+ /*
+ * Insert all entries in src into dst table
+ */
+ for (i = 0; i < src->entries; i++) {
+ src_entry = (struct export_entry*) erts_index_lookup(src, i);
+ src_entry->ep->addressv[dst_ix] = src_entry->ep->addressv[src_ix];
+ dst_entry = (struct export_entry*) index_put_entry(dst, src_entry);
+ ASSERT(entry_to_blob(src_entry) == entry_to_blob(dst_entry));
+ }
+ export_staging_unlock();
+
+ IF_DEBUG(debug_start_load_ix = dst_ix);
}
-Export *export_get(Export *e)
+void export_end_staging(int commit)
{
- return hash_get(&export_table.htable, e);
+ ASSERT(debug_start_load_ix == erts_staging_code_ix());
+ IF_DEBUG(debug_start_load_ix = -1);
}
+
diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h
index c604fdf7c3..ee06e69aff 100644
--- a/erts/emulator/beam/export.h
+++ b/erts/emulator/beam/export.h
@@ -28,14 +28,15 @@
#include "index.h"
#endif
+#include "code_ix.h"
+
/*
** Export entry
*/
+
typedef struct export
{
- IndexSlot slot; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
- void* address; /* Pointer to code for function. */
- struct binary* match_prog_set; /* Match program for tracing. */
+ void* addressv[ERTS_NUM_CODE_IX]; /* Pointer to code for function. */
BeamInstr fake_op_func_info_for_hipe[2]; /* MUST be just before code[] */
/*
@@ -44,12 +45,12 @@ typedef struct export
* code[2]: Arity (untagged integer).
* code[3]: This entry is 0 unless the 'address' field points to it.
* Threaded code instruction to load function
- * (em_call_error_handler), execute BIF (em_apply_bif,
- * em_apply_apply), or call a traced function
- * (em_call_traced_function).
- * code[4]: Function pointer to BIF function (for BIFs only)
+ * (em_call_error_handler), execute BIF (em_apply_bif),
+ * or a breakpoint instruction (op_i_generic_breakpoint).
+ * code[4]: Function pointer to BIF function (for BIFs only),
* or pointer to threaded code if the module has an
- * on_load function that has not been run yet.
+ * on_load function that has not been run yet, or pointer
+ * to code for function code[3] is a breakpont instruction.
* Otherwise: 0.
*/
BeamInstr code[5];
@@ -59,21 +60,38 @@ typedef struct export
void init_export_table(void);
void export_info(int, void *);
-Export* erts_find_export_entry(Eterm m, Eterm f, unsigned int a);
+ERTS_GLB_INLINE Export* erts_active_export_entry(Eterm m, Eterm f, unsigned a);
Export* erts_export_put(Eterm mod, Eterm func, unsigned int arity);
-
Export* erts_export_get_or_make_stub(Eterm, Eterm, unsigned);
-void erts_export_consolidate(void);
-Export *export_list(int);
-int export_list_size(void);
+Export *export_list(int,ErtsCodeIndex);
+int export_list_size(ErtsCodeIndex);
int export_table_sz(void);
+int export_entries_sz(void);
Export *export_get(Export*);
+void export_start_staging(void);
+void export_end_staging(int commit);
+
+extern erts_smp_mtx_t export_staging_lock;
+#define export_staging_lock() erts_smp_mtx_lock(&export_staging_lock)
+#define export_staging_unlock() erts_smp_mtx_unlock(&export_staging_lock)
#include "beam_load.h" /* For em_* extern declarations */
#define ExportIsBuiltIn(EntryPtr) \
-(((EntryPtr)->address == (EntryPtr)->code + 3) && \
+(((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->code + 3) && \
((EntryPtr)->code[3] == (BeamInstr) em_apply_bif))
-#endif
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Export*
+erts_active_export_entry(Eterm m, Eterm f, unsigned int a)
+{
+ extern Export* erts_find_export_entry(Eterm m, Eterm f, unsigned a, ErtsCodeIndex);
+ return erts_find_export_entry(m, f, a, erts_active_code_ix());
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+#endif /* __EXPORT_H__ */
+
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 52f45b924f..ab1065aaa1 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1850,8 +1850,8 @@ enc_term(ErtsAtomCacheMap *acmp, Eterm obj, byte* ep, Uint32 dflags,
} else {
*ep++ = FLOAT_EXT;
- /* now the sprintf which does the work */
- i = sys_double_to_chars(f.fd, (char*) ep);
+ /* now the erts_snprintf which does the work */
+ i = sys_double_to_chars(f.fd, (char*) ep, (size_t)31);
/* Don't leave garbage after the float! (Bad practice in general,
* and Purify complains.)
@@ -2564,7 +2564,7 @@ dec_term_atom_common:
goto error;
}
if (edep && (edep->flags & ERTS_DIST_EXT_BTT_SAFE)) {
- if (!erts_find_export_entry(mod, name, arity))
+ if (!erts_active_export_entry(mod, name, arity))
goto error;
}
*objp = make_export(hp);
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 2c20e3da3b..298241618f 100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -28,6 +28,7 @@
#include "hash.h"
#include "index.h"
#include "atom.h"
+#include "code_ix.h"
#include "export.h"
#include "module.h"
#include "register.h"
@@ -38,38 +39,8 @@
#include "erl_sys_driver.h"
#include "erl_debug.h"
#include "error.h"
-
-typedef struct port Port;
-#include "erl_port_task.h"
-
-typedef struct erts_driver_t_ erts_driver_t;
-
-#define SMALL_IO_QUEUE 5 /* Number of fixed elements */
-
-typedef struct {
- ErlDrvSizeT size; /* total size in bytes */
-
- SysIOVec* v_start;
- SysIOVec* v_end;
- SysIOVec* v_head;
- SysIOVec* v_tail;
- SysIOVec v_small[SMALL_IO_QUEUE];
-
- ErlDrvBinary** b_start;
- ErlDrvBinary** b_end;
- ErlDrvBinary** b_head;
- ErlDrvBinary** b_tail;
- ErlDrvBinary* b_small[SMALL_IO_QUEUE];
-} ErlIOQueue;
-
-typedef struct line_buf { /* Buffer used in line oriented I/O */
- ErlDrvSizeT bufsiz; /* Size of character buffer */
- ErlDrvSizeT ovlen; /* Length of overflow data */
- ErlDrvSizeT ovsiz; /* Actual size of overflow buffer */
- char data[1]; /* Starting point of buffer data,
- data[0] is a flag indicating an unprocess CR,
- The rest is the overflow buffer. */
-} LineBuf;
+#include "erl_utils.h"
+#include "erl_port.h"
struct enif_environment_t /* ErlNifEnv */
{
@@ -89,158 +60,6 @@ extern void erts_print_nif_taints(int to, void* to_arg);
void erts_unload_nif(struct erl_module_nif* nif);
extern void erl_nif_init(void);
-/*
- * Port Specific Data.
- *
- * Only use PrtSD for very rarely used data.
- */
-
-#define ERTS_PRTSD_SCHED_ID 0
-
-#define ERTS_PRTSD_SIZE 1
-
-typedef struct {
- void *data[ERTS_PRTSD_SIZE];
-} ErtsPrtSD;
-
-#ifdef ERTS_SMP
-typedef struct ErtsXPortsList_ ErtsXPortsList;
-#endif
-
-/*
- * Port locking:
- *
- * Locking is done either driver specific or port specific. When
- * driver specific locking is used, all instances of the driver,
- * i.e. ports running the driver, share the same lock. When port
- * specific locking is used each instance have its own lock.
- *
- * Most fields in the Port structure are protected by the lock
- * referred to by the lock field. I'v called it the port lock.
- * This lock is shared between all ports running the same driver
- * when driver specific locking is used.
- *
- * The 'sched' field is protected by the port tasks lock
- * (see erl_port_tasks.c)
- *
- * The 'status' field is protected by a combination of the port lock,
- * the port tasks lock, and the state_lck. It may be read if
- * the state_lck, or the port lock is held. It may only be
- * modified if both the port lock and the state_lck is held
- * (with one exception; see below). When changeing status from alive
- * to dead or vice versa, also the port task lock has to be held.
- * This in order to guarantee that tasks are scheduled only for
- * ports that are alive.
- *
- * The status field may be modified with only the state_lck
- * held when status is changed from dead to alive. This since no
- * threads can have any references to the port other than via the
- * port table.
- *
- * /rickard
- */
-
-struct port {
- ErtsPortTaskSched sched;
- ErtsPortTaskHandle timeout_task;
-#ifdef ERTS_SMP
- erts_smp_atomic_t refc;
- erts_smp_mtx_t *lock;
- ErtsXPortsList *xports;
- erts_smp_atomic_t run_queue;
- erts_smp_spinlock_t state_lck; /* protects: id, status, snapshot */
-#endif
- Eterm id; /* The Port id of this port */
- Eterm connected; /* A connected process */
- Eterm caller; /* Current caller. */
- Eterm data; /* Data associated with port. */
- ErlHeapFragment* bp; /* Heap fragment holding data (NULL if imm data). */
- ErtsLink *nlinks;
- ErtsMonitor *monitors; /* Only MON_ORIGIN monitors of pid's */
- Uint bytes_in; /* Number of bytes read */
- Uint bytes_out; /* Number of bytes written */
-#ifdef ERTS_SMP
- ErtsSmpPTimer *ptimer;
-#else
- ErlTimer tm; /* Timer entry */
-#endif
-
- Eterm tracer_proc; /* If the port is traced, this is the tracer */
- Uint trace_flags; /* Trace flags */
-
- ErlIOQueue ioq; /* driver accessible i/o queue */
- DistEntry *dist_entry; /* Dist entry used in DISTRIBUTION */
- char *name; /* String used in the open */
- erts_driver_t* drv_ptr;
- UWord drv_data;
- SWord os_pid; /* Child process ID */
- ErtsProcList *suspended; /* List of suspended processes. */
- LineBuf *linebuf; /* Buffer to hold data not ready for
- process to get (line oriented I/O)*/
- Uint32 status; /* Status and type flags */
- int control_flags; /* Flags for port_control() */
- erts_aint32_t snapshot; /* Next snapshot that port should be part of */
- struct reg_proc *reg;
- ErlDrvPDL port_data_lock;
-
- ErtsPrtSD *psd; /* Port specific data */
-};
-
-
-ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE ErtsRunQueue *
-erts_port_runq(Port *prt)
-{
-#ifdef ERTS_SMP
- ErtsRunQueue *rq1, *rq2;
- rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
- while (1) {
- erts_smp_runq_lock(rq1);
- rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
- if (rq1 == rq2)
- return rq1;
- erts_smp_runq_unlock(rq1);
- rq1 = rq2;
- }
-#else
- return ERTS_RUNQ_IX(0);
-#endif
-}
-
-#endif
-
-
-ERTS_GLB_INLINE void *erts_prtsd_get(Port *p, int ix);
-ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void *
-erts_prtsd_get(Port *prt, int ix)
-{
- return prt->psd ? prt->psd->data[ix] : NULL;
-}
-
-ERTS_GLB_INLINE void *
-erts_prtsd_set(Port *prt, int ix, void *data)
-{
- if (prt->psd) {
- void *old = prt->psd->data[ix];
- prt->psd->data[ix] = data;
- return old;
- }
- else {
- prt->psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD));
- prt->psd->data[ix] = data;
- return NULL;
- }
-}
-
-#endif
-
/* Driver handle (wrapper for old plain handle) */
#define ERL_DE_OK 0
#define ERL_DE_UNLOAD 1
@@ -292,7 +111,7 @@ typedef struct {
or that wait for it to change state */
erts_refc_t refc; /* Number of ports/processes having
references to the driver */
- Uint port_count; /* Number of ports using the driver */
+ erts_smp_atomic32_t port_count; /* Number of ports using the driver */
Uint flags; /* ERL_DE_FL_KILL_PORTS */
int status; /* ERL_DE_xxx */
char *full_path; /* Full path of the driver */
@@ -344,7 +163,7 @@ struct erts_driver_t_ {
};
extern erts_driver_t *driver_list;
-extern erts_smp_mtx_t erts_driver_list_lock;
+extern erts_smp_rwmtx_t erts_driver_list_lock;
extern void erts_ddll_init(void);
extern void erts_ddll_lock_driver(DE_Handle *dh, char *name);
@@ -524,40 +343,9 @@ union erl_off_heap_ptr {
void* voidp;
};
-/* arrays that get malloced at startup */
-extern Port* erts_port;
-
-extern Uint erts_max_ports;
-extern Uint erts_port_tab_index_mask;
-extern erts_smp_atomic32_t erts_ports_snapshot;
-extern erts_smp_atomic_t erts_dead_ports_ptr;
-
-ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt)
-{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&prt->state_lck));
- if (prt->snapshot != erts_smp_atomic32_read_acqb(&erts_ports_snapshot)) {
- /* Dead ports are added from the end of the snapshot buffer */
- Eterm* tombstone;
- tombstone = (Eterm*) erts_smp_atomic_add_read_nob(&erts_dead_ports_ptr,
- -(erts_aint_t)sizeof(Eterm));
- ASSERT(tombstone+1 != NULL);
- ASSERT(prt->snapshot == erts_smp_atomic32_read_nob(&erts_ports_snapshot) - 1);
- *tombstone = prt->id;
- }
- /*else no ongoing snapshot or port was already included or created after snapshot */
-}
-
-#endif
-
/* controls warning mapping in error_logger */
extern Eterm node_cookie;
-extern erts_smp_atomic_t erts_bytes_out; /* no bytes written out */
-extern erts_smp_atomic_t erts_bytes_in; /* no bytes sent into the system */
extern Uint display_items; /* no of items to display in traces etc */
extern int erts_backtrace_depth;
@@ -695,54 +483,6 @@ do { \
#define WSTACK_ISEMPTY(s) (WSTK_CONCAT(s,_sp) == WSTK_CONCAT(s,_start))
#define WSTACK_POP(s) (*(--WSTK_CONCAT(s,_sp)))
-
-/* port status flags */
-
-#define ERTS_PORT_SFLG_CONNECTED ((Uint32) (1 << 0))
-/* Port have begun exiting */
-#define ERTS_PORT_SFLG_EXITING ((Uint32) (1 << 1))
-/* Distribution port */
-#define ERTS_PORT_SFLG_DISTRIBUTION ((Uint32) (1 << 2))
-#define ERTS_PORT_SFLG_BINARY_IO ((Uint32) (1 << 3))
-#define ERTS_PORT_SFLG_SOFT_EOF ((Uint32) (1 << 4))
-/* Flow control */
-#define ERTS_PORT_SFLG_PORT_BUSY ((Uint32) (1 << 5))
-/* Port is closing (no i/o accepted) */
-#define ERTS_PORT_SFLG_CLOSING ((Uint32) (1 << 6))
-/* Send a closed message when terminating */
-#define ERTS_PORT_SFLG_SEND_CLOSED ((Uint32) (1 << 7))
-/* Line orinted io on port */
-#define ERTS_PORT_SFLG_LINEBUF_IO ((Uint32) (1 << 8))
-/* Immortal port (only certain system ports) */
-#define ERTS_PORT_SFLG_IMMORTAL ((Uint32) (1 << 9))
-#define ERTS_PORT_SFLG_FREE ((Uint32) (1 << 10))
-#define ERTS_PORT_SFLG_FREE_SCHEDULED ((Uint32) (1 << 11))
-#define ERTS_PORT_SFLG_INITIALIZING ((Uint32) (1 << 12))
-/* Port uses port specific locking (opposed to driver specific locking) */
-#define ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK ((Uint32) (1 << 13))
-#define ERTS_PORT_SFLG_INVALID ((Uint32) (1 << 14))
-/* Last port to terminate halts the emulator */
-#define ERTS_PORT_SFLG_HALT ((Uint32) (1 << 15))
-#ifdef DEBUG
-/* Only debug: make sure all flags aren't cleared unintentionally */
-#define ERTS_PORT_SFLG_PORT_DEBUG ((Uint32) (1 << 31))
-#endif
-
-/* Combinations of port status flags */
-#define ERTS_PORT_SFLGS_DEAD \
- (ERTS_PORT_SFLG_FREE \
- | ERTS_PORT_SFLG_FREE_SCHEDULED \
- | ERTS_PORT_SFLG_INITIALIZING)
-#define ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
- (ERTS_PORT_SFLGS_DEAD | ERTS_PORT_SFLG_INVALID)
-#define ERTS_PORT_SFLGS_INVALID_LOOKUP \
- (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP \
- | ERTS_PORT_SFLG_CLOSING)
-#define ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP \
- (ERTS_PORT_SFLGS_INVALID_LOOKUP \
- | ERTS_PORT_SFLG_PORT_BUSY \
- | ERTS_PORT_SFLG_DISTRIBUTION)
-
/* binary.c */
void erts_emasculate_writable_binary(ProcBin* pb);
@@ -753,17 +493,43 @@ Eterm erts_realloc_binary(Eterm bin, size_t size);
/* erl_bif_info.c */
+Eterm
+erts_bld_port_info(Eterm **hpp,
+ ErlOffHeap *ohp,
+ Uint *szp,
+ Port *prt,
+ Eterm item);
+
void erts_bif_info_init(void);
/* bif.c */
Eterm erts_make_ref(Process *);
Eterm erts_make_ref_in_buffer(Eterm buffer[REF_THING_SIZE]);
+void erts_make_ref_in_array(Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS]);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm
+erts_proc_store_ref(Process *c_p, Uint32 ref[ERTS_MAX_REF_NUMBERS])
+{
+ Eterm *hp = HAlloc(c_p, REF_THING_SIZE);
+ write_ref_thing(hp, ref[0], ref[1], ref[2]);
+ return make_internal_ref(hp);
+}
+
+#endif
+
void erts_queue_monitor_message(Process *,
ErtsProcLocks*,
Eterm,
Eterm,
Eterm,
Eterm);
+void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a,
+ Eterm (*bif)(Process*,Eterm*));
void erts_init_bif(void);
Eterm erl_send(Process *p, Eterm to, Eterm msg);
@@ -771,13 +537,6 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg);
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
-/* erl_bif_port.c */
-
-/* erl_bif_trace.c */
-Eterm erl_seq_trace_info(Process *p, Eterm arg1);
-void erts_system_monitor_clear(Process *c_p);
-void erts_system_profile_clear(Process *c_p);
-
/* beam_load.c */
typedef struct {
BeamInstr* current; /* Pointer to: Mod, Name, Arity */
@@ -786,24 +545,33 @@ typedef struct {
Eterm* fname_ptr; /* Pointer to fname table */
} FunctionInfo;
-struct LoaderState* erts_alloc_loader_state(void);
-Eterm erts_prepare_loading(struct LoaderState*, Process *c_p,
+Binary* erts_alloc_loader_state(void);
+Eterm erts_module_for_prepared_code(Binary* magic);
+Eterm erts_prepare_loading(Binary* loader_state, Process *c_p,
Eterm group_leader, Eterm* modp,
byte* code, Uint size);
-Eterm erts_finish_loading(struct LoaderState* stp, Process* c_p,
+Eterm erts_finish_loading(Binary* loader_state, Process* c_p,
ErtsProcLocks c_p_locks, Eterm* modp);
-Eterm erts_load_module(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm* mod, byte* code, Uint size);
+Eterm erts_preload_module(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm* mod, byte* code, Uint size);
void init_load(void);
BeamInstr* find_function_from_pc(BeamInstr* pc);
Eterm* erts_build_mfa_item(FunctionInfo* fi, Eterm* hp,
Eterm args, Eterm* mfa_p);
-void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info);
void erts_set_current_function(FunctionInfo* fi, BeamInstr* current);
Eterm erts_module_info_0(Process* p, Eterm module);
Eterm erts_module_info_1(Process* p, Eterm module, Eterm what);
Eterm erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info);
+/* beam_ranges.c */
+void erts_init_ranges(void);
+void erts_start_staging_ranges(void);
+void erts_end_staging_ranges(int commit);
+void erts_update_ranges(BeamInstr* code, Uint size);
+void erts_remove_from_ranges(BeamInstr* code);
+UWord erts_ranges_sz(void);
+void erts_lookup_function_info(FunctionInfo* fi, BeamInstr* pc, int full_info);
+
/* break.c */
void init_break_handler(void);
void erts_set_ignore_break(void);
@@ -944,11 +712,6 @@ void erts_free_heap_frags(Process* p);
/* io.c */
-struct erl_drv_port_data_lock {
- erts_mtx_t mtx;
- erts_atomic_t refc;
-};
-
typedef struct {
char *name;
char *driver_name;
@@ -957,363 +720,33 @@ typedef struct {
#define ERTS_SPAWN_DRIVER 1
#define ERTS_SPAWN_EXECUTABLE 2
#define ERTS_SPAWN_ANY (ERTS_SPAWN_DRIVER | ERTS_SPAWN_EXECUTABLE)
-
int erts_add_driver_entry(ErlDrvEntry *drv, DE_Handle *handle, int driver_list_locked);
void erts_destroy_driver(erts_driver_t *drv);
-void erts_wake_process_later(Port*, Process*);
-int erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *);
-int erts_is_port_ioq_empty(Port *);
-void erts_terminate_port(Port *);
-void close_port(Eterm);
-void init_io(void);
-void cleanup_io(void);
-void erts_do_exit_port(Port *, Eterm, Eterm);
-void erts_port_command(Process *, Eterm, Port *, Eterm);
-Eterm erts_port_control(Process*, Port*, Uint, Eterm);
-int erts_write_to_port(Eterm caller_id, Port *p, Eterm list);
-void print_port_info(int, void *, int);
+int erts_save_suspend_process_on_port(Port*, Process*);
+Port *erts_open_driver(erts_driver_t*, Eterm, char*, SysDriverOpts*, int *, int *);
+void erts_init_io(int, int);
void erts_raw_port_command(Port*, byte*, Uint);
-void driver_report_exit(int, int);
+void driver_report_exit(ErlDrvPort, int);
LineBuf* allocate_linebuf(int);
int async_ready(Port *, void*);
-Sint erts_test_next_port(int, Uint);
ErtsPortNames *erts_get_port_names(Eterm);
void erts_free_port_names(ErtsPortNames *);
Uint erts_port_ioq_size(Port *pp);
void erts_stale_drv_select(Eterm, ErlDrvEvent, int, int);
-void erts_port_cleanup(Port *);
-void erts_fire_port_monitor(Port *prt, Eterm ref);
Port *erts_get_heart_port(void);
-#ifdef ERTS_SMP
-void erts_smp_xports_unlock(Port *);
-#endif
-
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT)
void erts_lcnt_enable_io_lock_count(int enable);
#endif
-#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
-int erts_lc_is_port_locked(Port *);
-#endif
-
-ERTS_GLB_INLINE void erts_smp_port_state_lock(Port*);
-ERTS_GLB_INLINE void erts_smp_port_state_unlock(Port*);
-
-ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt);
-ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt);
-ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE void
-erts_smp_port_state_lock(Port* prt)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_lock(&prt->state_lck);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_port_state_unlock(Port *prt)
-{
-#ifdef ERTS_SMP
- erts_smp_spin_unlock(&prt->state_lck);
-#endif
-}
-
-
-ERTS_GLB_INLINE int
-erts_smp_port_trylock(Port *prt)
-{
-#ifdef ERTS_SMP
- int res;
-
- ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0);
- erts_smp_atomic_inc_nob(&prt->refc);
- res = erts_smp_mtx_trylock(prt->lock);
- if (res == EBUSY) {
- erts_smp_atomic_dec_nob(&prt->refc);
- }
-
- return res;
-#else /* !ERTS_SMP */
- return 0;
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_port_lock(Port *prt)
-{
-#ifdef ERTS_SMP
- ASSERT(erts_smp_atomic_read_nob(&prt->refc) > 0);
- erts_smp_atomic_inc_nob(&prt->refc);
- erts_smp_mtx_lock(prt->lock);
-#endif
-}
-
-ERTS_GLB_INLINE void
-erts_smp_port_unlock(Port *prt)
-{
-#ifdef ERTS_SMP
- erts_aint_t refc;
- erts_smp_mtx_unlock(prt->lock);
- refc = erts_smp_atomic_dec_read_nob(&prt->refc);
- ASSERT(refc >= 0);
- if (refc == 0)
- erts_port_cleanup(prt);
-#endif
-}
-
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
-
-#define ERTS_INVALID_PORT_OPT(PP, ID, FLGS) \
- (!(PP) || ((PP)->status & (FLGS)) || (PP)->id != (ID))
-
-/* port lookup */
-
-#define INVALID_PORT(PP, ID) \
- ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_LOOKUP)
-
-/* Invalidate trace port if anything suspicious, for instance
- * that the port is a distribution port or it is busy.
- */
-#define INVALID_TRACER_PORT(PP, ID) \
- ERTS_INVALID_PORT_OPT((PP), (ID), ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP)
-
-#define ERTS_PORT_SCHED_ID(P, ID) \
- ((Uint) (UWord) erts_prtsd_set((P), ERTS_PSD_SCHED_ID, (void *) (UWord) (ID)))
-
-#ifdef ERTS_SMP
-Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
-#endif
-
-#define erts_id2port(ID, P, PL) \
- erts_id2port_sflgs((ID), (P), (PL), ERTS_PORT_SFLGS_INVALID_LOOKUP)
-
-ERTS_GLB_INLINE Port*erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
-ERTS_GLB_INLINE void erts_port_release(Port *);
-ERTS_GLB_INLINE Port*erts_drvport2port(ErlDrvPort);
-ERTS_GLB_INLINE Port*erts_drvportid2port(Eterm);
-ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm id);
-ERTS_GLB_INLINE int erts_is_port_alive(Eterm id);
-ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm id);
-ERTS_GLB_INLINE void erts_port_status_bandor_set(Port *, Uint32, Uint32);
-ERTS_GLB_INLINE void erts_port_status_band_set(Port *, Uint32);
-ERTS_GLB_INLINE void erts_port_status_bor_set(Port *, Uint32);
-ERTS_GLB_INLINE void erts_port_status_set(Port *, Uint32);
-ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *);
-
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE Port*
-erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 sflgs)
-{
-#ifdef ERTS_SMP
- int no_proc_locks = !c_p || !c_p_locks;
-#endif
- Port *prt;
-
- if (is_not_internal_port(id))
- return NULL;
-
- prt = &erts_port[internal_port_index(id)];
-
- erts_smp_port_state_lock(prt);
- if (ERTS_INVALID_PORT_OPT(prt, id, sflgs)) {
- erts_smp_port_state_unlock(prt);
- prt = NULL;
- }
-#ifdef ERTS_SMP
- else {
- erts_smp_atomic_inc_nob(&prt->refc);
- erts_smp_port_state_unlock(prt);
-
- if (no_proc_locks)
- erts_smp_mtx_lock(prt->lock);
- else if (erts_smp_mtx_trylock(prt->lock) == EBUSY) {
- /* Unlock process locks, and acquire locks in lock order... */
- erts_smp_proc_unlock(c_p, c_p_locks);
- erts_smp_mtx_lock(prt->lock);
- erts_smp_proc_lock(c_p, c_p_locks);
- }
-
- /* The id may not have changed... */
- ERTS_SMP_LC_ASSERT(prt->id == id);
- /* ... but status may have... */
- if (prt->status & sflgs) {
- erts_smp_port_unlock(prt); /* Also decrements refc... */
- prt = NULL;
- }
- }
-#endif
-
- return prt;
-}
-
-ERTS_GLB_INLINE void
-erts_port_release(Port *prt)
-{
-#ifdef ERTS_SMP
- erts_smp_port_unlock(prt);
-#else
- if (prt->status & ERTS_PORT_SFLGS_DEAD)
- erts_port_cleanup(prt);
-#endif
-}
-
-ERTS_GLB_INLINE Port*
-erts_drvport2port(ErlDrvPort drvport)
-{
- int ix = (int) drvport;
- if (ix < 0 || erts_max_ports <= ix)
- return NULL;
- if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- return NULL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
- return &erts_port[ix];
-}
-
-ERTS_GLB_INLINE Port*
-erts_drvportid2port(Eterm id)
-{
- int ix;
- if (is_not_internal_port(id))
- return NULL;
- ix = (int) internal_port_index(id);
- if (erts_max_ports <= ix)
- return NULL;
- if (erts_port[ix].status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- return NULL;
- if (erts_port[ix].id != id)
- return NULL;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
- return &erts_port[ix];
-}
-
-ERTS_GLB_INLINE Uint32
-erts_portid2status(Eterm id)
-{
- if (is_not_internal_port(id))
- return ERTS_PORT_SFLG_INVALID;
- else {
- Uint32 status;
- int ix = internal_port_index(id);
- if (erts_max_ports <= ix)
- return ERTS_PORT_SFLG_INVALID;
- erts_smp_port_state_lock(&erts_port[ix]);
- if (erts_port[ix].id == id)
- status = erts_port[ix].status;
- else
- status = ERTS_PORT_SFLG_INVALID;
- erts_smp_port_state_unlock(&erts_port[ix]);
- return status;
- }
-}
-
-ERTS_GLB_INLINE int
-erts_is_port_alive(Eterm id)
-{
- return !(erts_portid2status(id) & (ERTS_PORT_SFLG_INVALID
- | ERTS_PORT_SFLGS_DEAD));
-}
-
-ERTS_GLB_INLINE int
-erts_is_valid_tracer_port(Eterm id)
-{
- return !(erts_portid2status(id) & ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
-}
-
-ERTS_GLB_INLINE void erts_port_status_bandor_set(Port *prt,
- Uint32 band_status,
- Uint32 bor_status)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_lock(prt);
- prt->status &= band_status;
- prt->status |= bor_status;
- erts_smp_port_state_unlock(prt);
-}
-
-ERTS_GLB_INLINE void erts_port_status_band_set(Port *prt, Uint32 status)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_lock(prt);
- prt->status &= status;
- erts_smp_port_state_unlock(prt);
-}
-
-ERTS_GLB_INLINE void erts_port_status_bor_set(Port *prt, Uint32 status)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_lock(prt);
- prt->status |= status;
- erts_smp_port_state_unlock(prt);
-}
-
-ERTS_GLB_INLINE void erts_port_status_set(Port *prt, Uint32 status)
-{
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- erts_smp_port_state_lock(prt);
- prt->status = status;
- erts_smp_port_state_unlock(prt);
-}
-
-ERTS_GLB_INLINE Uint32 erts_port_status_get(Port *prt)
-{
- Uint32 res;
- erts_smp_port_state_lock(prt);
- res = prt->status;
- erts_smp_port_state_unlock(prt);
- return res;
-}
-#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-
/* erl_drv_thread.c */
void erl_drv_thr_init(void);
-/* time.c */
-
/* utils.c */
-
-/*
- * To be used to silence unused result warnings, but do not abuse it.
- */
-void erts_silence_warn_unused_result(long unused);
-
void erts_cleanup_offheap(ErlOffHeap *offheap);
-Uint erts_fit_in_bits(Uint);
-int list_length(Eterm);
-Export* erts_find_function(Eterm, Eterm, unsigned int);
-int erts_is_builtin(Eterm, Eterm, int);
-Uint32 make_broken_hash(Eterm);
-Uint32 block_hash(byte *, unsigned, Uint32);
-Uint32 make_hash2(Eterm);
-Uint32 make_hash(Eterm);
-
-
-Eterm erts_bld_atom(Uint **hpp, Uint *szp, char *str);
-Eterm erts_bld_uint(Uint **hpp, Uint *szp, Uint ui);
-Eterm erts_bld_uword(Uint **hpp, Uint *szp, UWord uw);
-Eterm erts_bld_uint64(Uint **hpp, Uint *szp, Uint64 ui64);
-Eterm erts_bld_sint64(Uint **hpp, Uint *szp, Sint64 si64);
-Eterm erts_bld_cons(Uint **hpp, Uint *szp, Eterm car, Eterm cdr);
-Eterm erts_bld_tuple(Uint **hpp, Uint *szp, Uint arity, ...);
-Eterm erts_bld_tuplev(Uint **hpp, Uint *szp, Uint arity, Eterm terms[]);
-Eterm erts_bld_string_n(Uint **hpp, Uint *szp, const char *str, Sint len);
-#define erts_bld_string(hpp,szp,str) erts_bld_string_n(hpp,szp,str,strlen(str))
-Eterm erts_bld_list(Uint **hpp, Uint *szp, Sint length, Eterm terms[]);
-Eterm erts_bld_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm terms1[], Uint terms2[]);
-Eterm
-erts_bld_atom_uint_2tup_list(Uint **hpp, Uint *szp,
- Sint length, Eterm atoms[], Uint uints[]);
-Eterm
-erts_bld_atom_2uint_3tup_list(Uint **hpp, Uint *szp, Sint length,
- Eterm atoms[], Uint uints1[], Uint uints2[]);
+Export* erts_find_function(Eterm, Eterm, unsigned int, ErtsCodeIndex);
Eterm store_external_or_ref_in_proc_(Process *, Eterm);
Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
@@ -1328,42 +761,6 @@ Eterm store_external_or_ref_(Uint **, ErlOffHeap*, Eterm);
(ASSERT_EXPR(is_node_container((NC))), \
IS_CONST((NC)) ? (NC) : store_external_or_ref_in_proc_((Pp), (NC)))
-void erts_init_utils(void);
-void erts_init_utils_mem(void);
-
-erts_dsprintf_buf_t *erts_create_tmp_dsbuf(Uint);
-void erts_destroy_tmp_dsbuf(erts_dsprintf_buf_t *);
-
-#if HALFWORD_HEAP
-int eq_rel(Eterm a, Eterm* a_base, Eterm b, Eterm* b_base);
-# define eq(A,B) eq_rel(A,NULL,B,NULL)
-#else
-int eq(Eterm, Eterm);
-# define eq_rel(A,A_BASE,B,B_BASE) eq(A,B)
-#endif
-
-#define EQ(x,y) (((x) == (y)) || (is_not_both_immed((x),(y)) && eq((x),(y))))
-
-#if HALFWORD_HEAP
-Sint cmp_rel(Eterm, Eterm*, Eterm, Eterm*);
-#define CMP(A,B) cmp_rel(A,NULL,B,NULL)
-#else
-Sint cmp(Eterm, Eterm);
-#define cmp_rel(A,A_BASE,B,B_BASE) cmp(A,B)
-#define CMP(A,B) cmp(A,B)
-#endif
-#define cmp_lt(a,b) (CMP((a),(b)) < 0)
-#define cmp_le(a,b) (CMP((a),(b)) <= 0)
-#define cmp_eq(a,b) (CMP((a),(b)) == 0)
-#define cmp_ne(a,b) (CMP((a),(b)) != 0)
-#define cmp_ge(a,b) (CMP((a),(b)) >= 0)
-#define cmp_gt(a,b) (CMP((a),(b)) > 0)
-
-#define CMP_LT(a,b) ((a) != (b) && cmp_lt((a),(b)))
-#define CMP_GE(a,b) ((a) == (b) || cmp_ge((a),(b)))
-#define CMP_EQ(a,b) ((a) == (b) || cmp_eq((a),(b)))
-#define CMP_NE(a,b) ((a) != (b) && cmp_ne((a),(b)))
-
/* duplicates from big.h */
int term_to_Uint(Eterm term, Uint *up);
int term_to_UWord(Eterm, UWord*);
@@ -1400,79 +797,6 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes);
#define ERTS_UTF8_ERROR 2
#define ERTS_UTF8_ANALYZE_MORE 3
-/* erl_trace.c */
-void erts_init_trace(void);
-void erts_trace_check_exiting(Eterm exiting);
-Eterm erts_set_system_seq_tracer(Process *c_p,
- ErtsProcLocks c_p_locks,
- Eterm new);
-Eterm erts_get_system_seq_tracer(void);
-void erts_change_default_tracing(int setflags, Uint *flagsp, Eterm *tracerp);
-void erts_get_default_tracing(Uint *flagsp, Eterm *tracerp);
-void erts_set_system_monitor(Eterm monitor);
-Eterm erts_get_system_monitor(void);
-
-#ifdef ERTS_SMP
-void erts_check_my_tracer_proc(Process *);
-void erts_block_sys_msg_dispatcher(void);
-void erts_release_sys_msg_dispatcher(void);
-void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
- Eterm,
- Eterm,
- ErlHeapFragment *));
-void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
-#endif
-
-void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
-void trace_send(Process*, Eterm, Eterm);
-void trace_receive(Process*, Eterm);
-Uint32 erts_call_trace(Process *p, BeamInstr mfa[], Binary *match_spec, Eterm* args,
- int local, Eterm *tracer_pid);
-void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval, Eterm *tracer_pid);
-void erts_trace_exception(Process* p, BeamInstr mfa[], Eterm class, Eterm value,
- Eterm *tracer);
-void erts_trace_return_to(Process *p, BeamInstr *pc);
-void trace_sched(Process*, Eterm);
-void trace_proc(Process*, Process*, Eterm, Eterm);
-void trace_proc_spawn(Process*, Eterm pid, Eterm mod, Eterm func, Eterm args);
-void save_calls(Process *p, Export *);
-void trace_gc(Process *p, Eterm what);
-/* port tracing */
-void trace_virtual_sched(Process*, Eterm);
-void trace_sched_ports(Port *pp, Eterm);
-void trace_sched_ports_where(Port *pp, Eterm, Eterm);
-void trace_port(Port *, Eterm what, Eterm data);
-void trace_port_open(Port *, Eterm calling_pid, Eterm drv_name);
-
-/* system_profile */
-void erts_set_system_profile(Eterm profile);
-Eterm erts_get_system_profile(void);
-void profile_scheduler(Eterm scheduler_id, Eterm);
-void profile_scheduler_q(Eterm scheduler_id, Eterm state, Eterm no_schedulers, Uint Ms, Uint s, Uint us);
-void profile_runnable_proc(Process* p, Eterm status);
-void profile_runnable_port(Port* p, Eterm status);
-void erts_system_profile_setup_active_schedulers(void);
-
-/* system_monitor */
-void monitor_long_gc(Process *p, Uint time);
-void monitor_large_heap(Process *p);
-void monitor_generic(Process *p, Eterm type, Eterm spec);
-Uint erts_trace_flag2bit(Eterm flag);
-int erts_trace_flags(Eterm List,
- Uint *pMask, Eterm *pTracer, int *pCpuTimestamp);
-Eterm erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr *I);
-
-#ifdef ERTS_SMP
-void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \
-do { \
- if ((ESDP)->pending_trace_msgs) \
- erts_send_pending_trace_msgs((ESDP)); \
-} while (0)
-#else
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
-#endif
-
void bin_write(int, void*, byte*, size_t);
int intlist_to_buf(Eterm, char*, int); /* most callers pass plain char*'s */
@@ -1490,9 +814,16 @@ char* Sint_to_buf(Sint, struct Sint_buf*);
#define ERTS_IOLIST_TYPE 2
Eterm buf_to_intlist(Eterm**, char*, size_t, Eterm); /* most callers pass plain char*'s */
-int io_list_to_buf(Eterm, char*, int);
-int io_list_to_buf2(Eterm, char*, int);
-int erts_iolist_size(Eterm, Uint *);
+
+#define ERTS_IOLIST_TO_BUF_OVERFLOW (~((ErlDrvSizeT) 0))
+#define ERTS_IOLIST_TO_BUF_TYPE_ERROR (~((ErlDrvSizeT) 1))
+#define ERTS_IOLIST_TO_BUF_FAILED(R) \
+ (((R) & (~((ErlDrvSizeT) 1))) == (~((ErlDrvSizeT) 1)))
+#define ERTS_IOLIST_TO_BUF_SUCCEEDED(R) \
+ (!ERTS_IOLIST_TO_BUF_FAILED((R)))
+
+ErlDrvSizeT erts_iolist_to_buf(Eterm, char*, ErlDrvSizeT);
+int erts_iolist_size(Eterm, ErlDrvSizeT *);
int is_string(Eterm);
void erl_at_exit(void (*) (void*), void*);
Eterm collect_memory(Process *);
@@ -1537,39 +868,6 @@ Uint erts_current_reductions(Process* current, Process *p);
int erts_print_system_version(int to, void *arg, Process *c_p);
int erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg);
-#define seq_trace_output(token, msg, type, receiver, process) \
-seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
-#define seq_trace_output_exit(token, msg, type, receiver, exitfrom) \
-seq_trace_output_generic((token), (msg), (type), (receiver), NULL, (exitfrom))
-void seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
- Eterm receiver, Process *process, Eterm exitfrom);
-
-int seq_trace_update_send(Process *process);
-
-Eterm erts_seq_trace(Process *process,
- Eterm atom_type, Eterm atom_true_or_false,
- int build_result);
-
-struct trace_pattern_flags {
- unsigned int breakpoint : 1; /* Set if any other is set */
- unsigned int local : 1; /* Local call trace breakpoint */
- unsigned int meta : 1; /* Metadata trace breakpoint */
- unsigned int call_count : 1; /* Fast call count breakpoint */
- unsigned int call_time : 1; /* Fast call time breakpoint */
-};
-extern const struct trace_pattern_flags erts_trace_pattern_flags_off;
-extern int erts_call_time_breakpoint_tracing;
-int erts_set_trace_pattern(Eterm* mfa, int specified,
- Binary* match_prog_set, Binary *meta_match_prog_set,
- int on, struct trace_pattern_flags,
- Eterm meta_tracer_pid);
-void
-erts_get_default_trace_pattern(int *trace_pattern_is_on,
- Binary **match_spec,
- Binary **meta_match_spec,
- struct trace_pattern_flags *trace_pattern_flags,
- Eterm *meta_tracer_pid);
-void erts_bif_trace_init(void);
/*
** Call_trace uses this API for the parameter matching functions
@@ -1615,20 +913,19 @@ extern void erts_match_prog_foreach_offheap(Binary *b,
breakpoint functions */
#define MATCH_SET_EXCEPTION_TRACE (0x4) /* exception trace requested */
#define MATCH_SET_RX_TRACE (MATCH_SET_RETURN_TRACE|MATCH_SET_EXCEPTION_TRACE)
-/*
- * Flag values when tracing bif
- * Future note: flag field is 8 bits
- */
-#define BIF_TRACE_AS_LOCAL (0x1)
-#define BIF_TRACE_AS_GLOBAL (0x2)
-#define BIF_TRACE_AS_META (0x4)
-#define BIF_TRACE_AS_CALL_TIME (0x8)
extern erts_driver_t vanilla_driver;
extern erts_driver_t spawn_driver;
extern erts_driver_t fd_driver;
/* Should maybe be placed in erl_message.h, but then we get an include mess. */
+ERTS_GLB_INLINE Eterm *
+erts_alloc_message_heap_state(Uint size,
+ ErlHeapFragment **bpp,
+ ErlOffHeap **ohpp,
+ Process *receiver,
+ ErtsProcLocks *receiver_locks,
+ erts_aint32_t *statep);
ERTS_GLB_INLINE Eterm *
erts_alloc_message_heap(Uint size,
@@ -1651,16 +948,22 @@ erts_alloc_message_heap(Uint size,
*/
ERTS_GLB_INLINE Eterm *
-erts_alloc_message_heap(Uint size,
- ErlHeapFragment **bpp,
- ErlOffHeap **ohpp,
- Process *receiver,
- ErtsProcLocks *receiver_locks)
+erts_alloc_message_heap_state(Uint size,
+ ErlHeapFragment **bpp,
+ ErlOffHeap **ohpp,
+ Process *receiver,
+ ErtsProcLocks *receiver_locks,
+ erts_aint32_t *statep)
{
Eterm *hp;
+ erts_aint32_t state;
#ifdef ERTS_SMP
int locked_main = 0;
- ErtsProcLocks ulocks = *receiver_locks & ERTS_PROC_LOCKS_MSG_SEND;
+ state = erts_smp_atomic32_read_acqb(&receiver->state);
+ if (statep)
+ *statep = state;
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
+ goto allocate_in_mbuf;
#endif
if (size > (Uint) INT_MAX)
@@ -1676,20 +979,19 @@ erts_alloc_message_heap(Uint size,
#ifdef ERTS_SMP
try_allocate_on_heap:
#endif
- if (ERTS_PROC_IS_EXITING(receiver)
+ state = erts_smp_atomic32_read_nob(&receiver->state);
+ if (statep)
+ *statep = state;
+ if ((state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
|| HEAP_LIMIT(receiver) - HEAP_TOP(receiver) <= size) {
#ifdef ERTS_SMP
- if (locked_main)
- ulocks |= ERTS_PROC_LOCK_MAIN;
+ if (locked_main) {
+ *receiver_locks &= ~ERTS_PROC_LOCK_MAIN;
+ erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MAIN);
+ }
#endif
goto allocate_in_mbuf;
}
-#ifdef ERTS_SMP
- if (ulocks) {
- erts_smp_proc_unlock(receiver, ulocks);
- *receiver_locks &= ~ulocks;
- }
-#endif
hp = HEAP_TOP(receiver);
HEAP_TOP(receiver) = hp + size;
*bpp = NULL;
@@ -1705,12 +1007,6 @@ erts_alloc_message_heap(Uint size,
else {
ErlHeapFragment *bp;
allocate_in_mbuf:
-#ifdef ERTS_SMP
- if (ulocks) {
- *receiver_locks &= ~ulocks;
- erts_smp_proc_unlock(receiver, ulocks);
- }
-#endif
bp = new_message_buffer(size);
hp = bp->mem;
*bpp = bp;
@@ -1720,6 +1016,17 @@ erts_alloc_message_heap(Uint size,
return hp;
}
+ERTS_GLB_INLINE Eterm *
+erts_alloc_message_heap(Uint size,
+ ErlHeapFragment **bpp,
+ ErlOffHeap **ohpp,
+ Process *receiver,
+ ErtsProcLocks *receiver_locks)
+{
+ return erts_alloc_message_heap_state(size, bpp, ohpp, receiver,
+ receiver_locks, NULL);
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
#if !HEAP_ON_C_STACK
@@ -1802,15 +1109,15 @@ dtrace_pid_str(Eterm pid, char *process_buf)
ERTS_GLB_INLINE void
dtrace_proc_str(Process *process, char *process_buf)
{
- dtrace_pid_str(process->id, process_buf);
+ dtrace_pid_str(process->common.id, process_buf);
}
ERTS_GLB_INLINE void
dtrace_port_str(Port *port, char *port_buf)
{
erts_snprintf(port_buf, DTRACE_TERM_BUF_SIZE, "#Port<%lu.%lu>",
- port_channel_no(port->id),
- port_number(port->id));
+ port_channel_no(port->common.id),
+ port_number(port->common.id));
}
ERTS_GLB_INLINE void
diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c
index ad4672c3de..79c3ecf1b3 100644
--- a/erts/emulator/beam/index.c
+++ b/erts/emulator/beam/index.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2009. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -68,14 +68,14 @@ erts_index_init(ErtsAlcType_t type, IndexTable* t, char* name,
return t;
}
-int
-index_put(IndexTable* t, void* tmpl)
+IndexSlot*
+index_put_entry(IndexTable* t, void* tmpl)
{
int ix;
IndexSlot* p = (IndexSlot*) hash_put(&t->htable, tmpl);
if (p->index >= 0) {
- return p->index;
+ return p;
}
ix = t->entries;
@@ -93,7 +93,7 @@ index_put(IndexTable* t, void* tmpl)
t->entries++;
p->index = ix;
t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK] = p;
- return ix;
+ return p;
}
int index_get(IndexTable* t, void* tmpl)
@@ -136,3 +136,18 @@ void erts_index_merge(Hash* src, IndexTable* dst)
}
}
}
+
+void index_erase_latest_from(IndexTable* t, Uint from_ix)
+{
+ if(from_ix < (Uint)t->entries) {
+ int ix;
+ for (ix = from_ix; ix < t->entries; ix++) {
+ IndexSlot* obj = t->seg_table[ix>>INDEX_PAGE_SHIFT][ix&INDEX_PAGE_MASK];
+ hash_erase(&t->htable, obj);
+ }
+ t->entries = from_ix;
+ }
+ else {
+ ASSERT(from_ix == t->entries);
+ }
+}
diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h
index 4eb9b1f992..3afe48d007 100644
--- a/erts/emulator/beam/index.h
+++ b/erts/emulator/beam/index.h
@@ -55,12 +55,24 @@ void index_info(int, void *, IndexTable*);
int index_table_sz(IndexTable *);
int index_get(IndexTable*, void*);
-int index_put(IndexTable*, void*);
+
+IndexSlot* index_put_entry(IndexTable*, void*);
void erts_index_merge(Hash*, IndexTable*);
+/* Erase all entries with index 'ix' and higher
+*/
+void index_erase_latest_from(IndexTable*, Uint ix);
+
+ERTS_GLB_INLINE int index_put(IndexTable*, void*);
ERTS_GLB_INLINE IndexSlot* erts_index_lookup(IndexTable*, Uint);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE int index_put(IndexTable* t, void* tmpl)
+{
+ return index_put_entry(t, tmpl)->index;
+}
+
ERTS_GLB_INLINE IndexSlot*
erts_index_lookup(IndexTable* t, Uint ix)
{
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 609fe9f5fb..e466f0e299 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -43,6 +43,8 @@
#include "erl_version.h"
#include "error.h"
#include "erl_async.h"
+#define ERTS_WANT_EXTERNAL_TAGS
+#include "external.h"
#include "dtrace-wrapper.h"
extern ErlDrvEntry fd_driver_entry;
@@ -51,34 +53,40 @@ extern ErlDrvEntry spawn_driver_entry;
extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */
erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */
-erts_smp_mtx_t erts_driver_list_lock; /* Mutex for driver list */
+erts_smp_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */
static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling
driver init */
static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a
per thread basis (for BC interfaces) */
-Port* erts_port; /* The port table */
+ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */
erts_smp_atomic_t erts_bytes_out; /* No bytes sent out of the system */
erts_smp_atomic_t erts_bytes_in; /* No bytes gotten into the system */
-Uint erts_max_ports;
-Uint erts_port_tab_index_mask;
-
const ErlDrvTermData driver_term_nil = (ErlDrvTermData)NIL;
+const Port erts_invalid_port = {{ERTS_INVALID_PORT}};
+
erts_driver_t vanilla_driver;
erts_driver_t spawn_driver;
erts_driver_t fd_driver;
+int erts_port_synchronous_ops = 0;
+int erts_port_schedule_all_ops = 0;
+int erts_port_parallelism = 0;
+
+static void deliver_result(Eterm sender, Eterm pid, Eterm res);
static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *);
static void terminate_port(Port *p);
static void pdl_init(void);
#ifdef ERTS_SMP
static void driver_monitor_lock_pdl(Port *p);
static void driver_monitor_unlock_pdl(Port *p);
+#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port_raw((Port), 1)
#define DRV_MONITOR_LOCK_PDL(Port) driver_monitor_lock_pdl(Port)
#define DRV_MONITOR_UNLOCK_PDL(Port) driver_monitor_unlock_pdl(Port)
#else
+#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port_raw((Port), 0)
#define DRV_MONITOR_LOCK_PDL(Port) /* nothing */
#define DRV_MONITOR_UNLOCK_PDL(Port) /* nothing */
#endif
@@ -89,36 +97,12 @@ static void driver_monitor_unlock_pdl(Port *p);
static ERTS_INLINE ErlIOQueue*
drvport2ioq(ErlDrvPort drvport)
{
- int ix = (int) drvport;
- Uint32 status;
-
- if (ix < 0 || erts_max_ports <= ix)
+ Port *prt = erts_thr_drvport2port_raw(drvport, 0);
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
return NULL;
-
- if (erts_get_scheduler_data()) {
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[ix]));
- ERTS_LC_ASSERT(!erts_port[ix].port_data_lock
- || erts_lc_mtx_is_locked(
- &erts_port[ix].port_data_lock->mtx));
-
- status = erts_port[ix].status;
- }
- else {
- erts_smp_port_state_lock(&erts_port[ix]);
- status = erts_port[ix].status;
- erts_smp_port_state_unlock(&erts_port[ix]);
-
- ERTS_LC_ASSERT((status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- || erts_port[ix].port_data_lock);
- ERTS_LC_ASSERT(!erts_port[ix].port_data_lock
- || erts_lc_mtx_is_locked(
- &erts_port[ix].port_data_lock->mtx));
-
- }
-
- return ((status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
- ? NULL
- : &erts_port[ix].ioq);
+ else
+ return &prt->ioq;
}
static ERTS_INLINE int
@@ -196,27 +180,13 @@ typedef struct line_buf_context {
dtrace_port_str((PORT), port_str);
#endif
-/* The 'number' field in a port now has two parts: the lowest bits
- contain the index in the port table, and the higher bits are a counter
- which is incremented each time we look for a free port and start from
- the beginning of the table. erts_max_ports is the number of file descriptors,
- rounded up to a power of 2.
- To get the index from a port, use the macro 'internal_port_index';
- 'port_number' returns the whole number field.
-*/
-
-static erts_smp_spinlock_t get_free_port_lck;
-static Uint last_port_num;
-static Uint port_num_mask;
-erts_smp_atomic32_t erts_ports_snapshot; /* Identifies the _next_ snapshot (not the ongoing) */
-
-
static ERTS_INLINE void
kill_port(Port *pp)
{
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
+ erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */
erts_port_task_free_port(pp);
- ASSERT(pp->status & ERTS_PORT_SFLGS_DEAD);
+ /* In non-smp case the port structure may have been deallocated now */
}
#ifdef ERTS_SMP
@@ -227,148 +197,280 @@ erts_lc_is_port_locked(Port *prt)
{
if (!prt)
return 0;
+ ERTS_SMP_LC_ASSERT(prt->lock);
return erts_smp_lc_mtx_is_locked(prt->lock);
}
#endif
#endif /* #ifdef ERTS_SMP */
-static int
-get_free_port(void)
-{
- Uint num;
- Uint tries = erts_max_ports;
- Port* port;
+static void initq(Port* prt);
- erts_smp_spin_lock(&get_free_port_lck);
- num = last_port_num + 1;
- for (;; ++num) {
- port = &erts_port[num & erts_port_tab_index_mask];
+#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
+#define ERTS_PORT_INIT_INSTR_NEED_ID 1
+#else
+#define ERTS_PORT_INIT_INSTR_NEED_ID 0
+#endif
- erts_smp_port_state_lock(port);
- if (port->status & ERTS_PORT_SFLG_FREE) {
- last_port_num = num;
- erts_smp_spin_unlock(&get_free_port_lck);
- break;
- }
- erts_smp_port_state_unlock(port);
+static ERTS_INLINE void port_init_instr(Port *prt
+#if ERTS_PORT_INIT_INSTR_NEED_ID
+ , Eterm id
+#endif
+ )
+{
+#if !ERTS_PORT_INIT_INSTR_NEED_ID
+ Eterm id = NIL; /* Not used */
+#endif
- if (--tries == 0) {
- erts_smp_spin_unlock(&get_free_port_lck);
- return -1;
- }
+ /*
+ * Stuff that need to be initialized with the port id
+ * in the instrumented case, but not in the normal case.
+ */
+#ifdef ERTS_SMP
+ ASSERT(prt->drv_ptr && prt->lock);
+ if (!prt->drv_ptr->lock) {
+ char *lock_str = "port_lock";
+#ifdef ERTS_ENABLE_LOCK_COUNT
+ if (!(erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK))
+ lock_str = NULL;
+#endif
+ erts_mtx_init_locked_x(prt->lock, lock_str, id);
}
- port->status = ERTS_PORT_SFLG_INITIALIZING;
+#endif
+ erts_port_task_init_sched(&prt->sched, id);
+}
+
+#if !ERTS_PORT_INIT_INSTR_NEED_ID
+static ERTS_INLINE void port_init_instr_abort(Port *prt)
+{
#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 0);
- erts_smp_atomic_set_nob(&port->refc, 2); /* Port alive + lock */
-#endif
- erts_smp_port_state_unlock(port);
- return num & port_num_mask;
+ ASSERT(prt->drv_ptr && prt->lock);
+ if (!prt->drv_ptr->lock) {
+ erts_mtx_unlock(prt->lock);
+ erts_mtx_destroy(prt->lock);
+ }
+#endif
+ erts_port_task_fini_sched(&prt->sched);
}
+#endif
-/*
- * erts_test_next_port() is only used for testing.
- */
-Sint
-erts_test_next_port(int set, Uint next)
+static void insert_port_struct(void *vprt, Eterm data)
+{
+ Port *prt = (Port *) vprt;
+ Eterm id = make_internal_port(data);
+#if ERTS_PORT_INIT_INSTR_NEED_ID
+ /*
+ * This cannot be done earlier in the instrumented
+ * case since we don't now 'id' until now.
+ */
+ port_init_instr(prt, id);
+#endif
+ prt->common.id = id;
+ erts_atomic32_init_relb(&prt->state, ERTS_PORT_SFLG_INITIALIZING);
+}
+
+#define ERTS_CREATE_PORT_FLAG_PARALLELISM (1 << 0)
+
+static Port *create_port(char *name,
+ erts_driver_t *driver,
+ erts_mtx_t *driver_lock,
+ int create_flags,
+ Eterm pid,
+ int *enop)
{
- Uint i, num;
- Sint res = -1;
+ ErtsPortTaskBusyPortQ *busy_port_queue;
+ Port *prt;
+ char *p;
+ size_t port_size, busy_port_queue_size, size;
+ erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED;
+ erts_aint32_t x_pts_flgs = 0;
+#ifdef DEBUG
+ /* Make sure the debug flags survives until port is freed */
+ state |= ERTS_PORT_SFLG_PORT_DEBUG;
+#endif
- erts_smp_spin_lock(&get_free_port_lck);
- if (set) {
- last_port_num = (next - 1) & port_num_mask;
+#ifdef ERTS_SMP
+ if (!driver_lock) {
+ /* Align size for mutex following port struct */
+ port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
+ size += sizeof(erts_mtx_t);
}
- num = last_port_num + 1;
+ else
+#endif
+ port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
- for (i=0; i < erts_max_ports && res<0; ++i, ++num) {
-
- Port* port = &erts_port[num & erts_port_tab_index_mask];
+ busy_port_queue_size
+ = ((driver->flags & ERL_DRV_FLAG_NO_BUSY_MSGQ)
+ ? 0
+ : ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ)));
+ size += busy_port_queue_size;
- erts_smp_port_state_lock(port);
+ size += sys_strlen(name) + 1;
- if (port->status & ERTS_PORT_SFLG_FREE) {
- last_port_num = num - 1;
- res = num & port_num_mask;
- }
- erts_smp_port_state_unlock(port);
+ p = erts_alloc_fnf(ERTS_ALC_T_PORT, size);
+ if (!p) {
+ if (enop)
+ *enop = ENOMEM;
+ return NULL;
}
- erts_smp_spin_unlock(&get_free_port_lck);
- return res;
-}
+ prt = (Port *) p;
+ p += port_size;
-static void port_cleanup(Port *prt);
+ if (!busy_port_queue_size)
+ busy_port_queue = NULL;
+ else {
+ busy_port_queue = (ErtsPortTaskBusyPortQ *) p;
+ p += busy_port_queue_size;
+ }
#ifdef ERTS_SMP
-
-static void
-sched_port_cleanup(void *vprt)
-{
- Port *prt = (Port *) vprt;
- erts_smp_mtx_lock(prt->lock);
- port_cleanup(prt);
-}
-
+ if (driver_lock) {
+ prt->lock = driver_lock;
+ erts_mtx_lock(driver_lock);
+ }
+ else {
+ prt->lock = (erts_mtx_t *) p;
+ p += sizeof(erts_mtx_t);
+ state |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
+ }
+ erts_smp_atomic_set_nob(&prt->run_queue,
+ (erts_aint_t) erts_get_runq_current(NULL));
+ prt->xports = NULL;
+#else
+ erts_atomic32_init_nob(&prt->refc, 1);
+ prt->cleanup = 0;
#endif
+
+ erts_port_task_pre_init_sched(&prt->sched, busy_port_queue);
-void
-erts_port_cleanup(Port *prt)
-{
+ prt->name = p;
+ sys_strcpy(p, name);
+ prt->drv_ptr = driver;
+ ERTS_P_LINKS(prt) = NULL;
+ ERTS_P_MONITORS(prt) = NULL;
+ prt->linebuf = NULL;
+ prt->bp = NULL;
+ prt->suspended = NULL;
+ prt->data = am_undefined;
+ prt->port_data_lock = NULL;
+ prt->control_flags = 0;
+ prt->bytes_in = 0;
+ prt->bytes_out = 0;
+ prt->dist_entry = NULL;
+ ERTS_PORT_INIT_CONNECTED(prt, pid);
+ prt->common.u.alive.reg = NULL;
#ifdef ERTS_SMP
- if (erts_smp_mtx_trylock(prt->lock) == EBUSY)
- erts_schedule_misc_op(sched_port_cleanup, (void *) prt);
- else
+ prt->common.u.alive.ptimer = NULL;
+#else
+ sys_memset(&prt->common.u.alive.tm, 0, sizeof(ErlTimer));
#endif
- port_cleanup(prt);
-}
+ erts_port_task_handle_init(&prt->timeout_task);
+ prt->psd = NULL;
+ prt->drv_data = (SWord) 0;
+ prt->os_pid = -1;
-void
-port_cleanup(Port *prt)
-{
+ /* Set default tracing */
+ erts_get_default_tracing(&ERTS_TRACE_FLAGS(prt), &ERTS_TRACER_PROC(prt));
+
+ ASSERT(((char *) prt) == ((char *) &prt->common));
+
+#if !ERTS_PORT_INIT_INSTR_NEED_ID
+ /*
+ * When 'id' isn't needed (the normal case), it is better to
+ * do the initialization here avoiding unnecessary contention
+ * on table...
+ */
+ port_init_instr(prt);
+#endif
+
+ if (!erts_ptab_new_element(&erts_port,
+ &prt->common,
+ (void *) prt,
+ insert_port_struct)) {
+
+#if !ERTS_PORT_INIT_INSTR_NEED_ID
+ port_init_instr_abort(prt);
+#endif
#ifdef ERTS_SMP
- Uint32 port_specific;
- erts_smp_mtx_t *mtx;
+ if (driver_lock)
+ erts_mtx_unlock(driver_lock);
#endif
- erts_driver_t *driver;
+ if (enop)
+ *enop = 0;
+ return NULL;
+ }
- erts_smp_port_state_lock(prt);
+ ASSERT(prt == (Port *) (erts_ptab_pix2intptr_nob(
+ &erts_port,
+ internal_port_index(prt->common.id))));
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- driver = prt->drv_ptr;
- prt->drv_ptr = NULL;
- ASSERT(driver);
+ initq(prt);
-#ifdef ERTS_SMP
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ASSERT(prt->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&prt->refc) == 0);
+ if (erts_port_schedule_all_ops)
+ x_pts_flgs |= ERTS_PTS_FLG_FORCE_SCHED;
- port_specific = (prt->status & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK);
+ if (create_flags & ERTS_CREATE_PORT_FLAG_PARALLELISM)
+ x_pts_flgs |= ERTS_PTS_FLG_PARALLELISM;
- mtx = prt->lock;
- ASSERT(mtx);
+ if (x_pts_flgs)
+ erts_smp_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs);
- prt->lock = NULL;
+ erts_atomic32_set_relb(&prt->state, state);
+ return prt;
+}
- ASSERT(prt->status & ERTS_PORT_SFLG_PORT_DEBUG);
- ASSERT(!(prt->status & ERTS_PORT_SFLG_FREE));
- prt->status = ERTS_PORT_SFLG_FREE;
+#ifndef ERTS_SMP
+void
+erts_port_cleanup(Port *prt)
+{
+ if (prt->drv_ptr && prt->drv_ptr->handle)
+ erts_ddll_dereference_driver(prt->drv_ptr->handle);
+ prt->drv_ptr = NULL;
+ erts_port_dec_refc(prt);
+}
+#endif
- erts_smp_port_state_unlock(prt);
- erts_smp_mtx_unlock(mtx);
+void
+erts_port_free(Port *prt)
+{
+#if defined(ERTS_SMP) || defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+#endif
+ ERTS_LC_ASSERT(state & (ERTS_PORT_SFLG_INITIALIZING
+ | ERTS_PORT_SFLG_FREE));
+ ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG);
- if (port_specific) {
- erts_smp_mtx_destroy(mtx);
- erts_free(ERTS_ALC_T_PORT_LOCK, mtx);
- }
+#ifdef ERTS_SMP
+ ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->common.refc) == 0);
+#else
+ ERTS_LC_ASSERT(erts_atomic32_read_nob(&prt->refc) == 0);
#endif
- if (driver->handle)
- erts_ddll_dereference_driver(driver->handle);
-}
+ erts_port_task_fini_sched(&prt->sched);
+#ifdef ERTS_SMP
+ ASSERT(prt->lock);
+ if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
+ erts_mtx_destroy(prt->lock);
+
+ /*
+ * We cannot dereference a driver using driver
+ * locking until here in smp case. Otherwise,
+ * the driver lock may still be in use by others.
+ *
+ * In the non-smp case we cannot do it here since
+ * this function may be called by non-scheduler
+ * threads. This is done in erts_port_cleanup()
+ * in the non-smp case.
+ */
+ if (prt->drv_ptr->handle)
+ erts_ddll_dereference_driver(prt->drv_ptr->handle);
+#endif
+ erts_free(ERTS_ALC_T_PORT, prt);
+}
/*
** Initialize v_start to point to the small fixed vector.
@@ -416,94 +518,21 @@ static void stopq(Port* prt)
if (prt->port_data_lock) {
driver_pdl_unlock(prt->port_data_lock);
driver_pdl_dec_refc(prt->port_data_lock);
- prt->port_data_lock = NULL;
- }
-}
-
-
-
-static void
-setup_port(Port* prt, Eterm pid, erts_driver_t *driver,
- ErlDrvData drv_data, char *name, Uint32 xstatus)
-{
- ErtsRunQueue *runq = erts_get_runq_current(NULL);
- char *new_name, *old_name;
-#ifdef DEBUG
- /* Make sure the debug flags survives until port is freed */
- xstatus |= ERTS_PORT_SFLG_PORT_DEBUG;
-#endif
- ASSERT(runq);
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
-
- new_name = (char*) erts_alloc(ERTS_ALC_T_PORT_NAME, sys_strlen(name)+1);
- sys_strcpy(new_name, name);
- erts_smp_runq_lock(runq);
- erts_smp_port_state_lock(prt);
- prt->os_pid = -1;
- prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus;
- prt->snapshot = erts_smp_atomic32_read_nob(&erts_ports_snapshot);
- old_name = prt->name;
- prt->name = new_name;
-#ifdef ERTS_SMP
- erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
-#endif
- ASSERT(!prt->drv_ptr);
- prt->drv_ptr = driver;
- erts_smp_port_state_unlock(prt);
- erts_smp_runq_unlock(runq);
-#ifdef ERTS_SMP
- ASSERT(!prt->xports);
-#endif
- if (old_name) {
- erts_free(ERTS_ALC_T_PORT_NAME, (void *) old_name);
}
-
- prt->control_flags = 0;
- prt->connected = pid;
- prt->drv_data = (SWord) drv_data;
- prt->bytes_in = 0;
- prt->bytes_out = 0;
- prt->dist_entry = NULL;
- prt->reg = NULL;
-#ifdef ERTS_SMP
- prt->ptimer = NULL;
-#else
- sys_memset(&prt->tm, 0, sizeof(ErlTimer));
-#endif
- erts_port_task_handle_init(&prt->timeout_task);
- prt->suspended = NULL;
- sys_strcpy(prt->name, name);
- prt->nlinks = NULL;
- prt->monitors = NULL;
- prt->linebuf = NULL;
- prt->bp = NULL;
- prt->data = am_undefined;
- /* Set default tracing */
- erts_get_default_tracing(&(prt->trace_flags), &(prt->tracer_proc));
-
- prt->psd = NULL;
-
- initq(prt);
}
-void
-erts_wake_process_later(Port *prt, Process *process)
+int
+erts_save_suspend_process_on_port(Port *prt, Process *process)
{
- ErtsProcList** p;
- ErtsProcList* new_p;
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- if (prt->status & ERTS_PORT_SFLGS_DEAD)
- return;
-
- for (p = &(prt->suspended); *p != NULL; p = &((*p)->next))
- /* Empty loop body */;
-
- new_p = erts_proclist_create(process);
- new_p->next = NULL;
- *p = new_p;
+ int saved;
+ erts_aint32_t flags;
+ erts_port_task_sched_lock(&prt->sched);
+ flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ saved = (flags & ERTS_PTS_FLGS_BUSY) && !(flags & ERTS_PTS_FLG_EXIT);
+ if (saved)
+ erts_proclist_store_last(&prt->suspended, erts_proclist_create(process));
+ erts_port_task_sched_unlock(&prt->sched);
+ return saved;
}
/*
@@ -515,47 +544,44 @@ erts_wake_process_later(Port *prt, Process *process)
(*error_number_ptr must contain either BADARG or SYSTEM_LIMIT).
The driver start function must obey the same conventions.
*/
-int
+Port *
erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
Eterm pid, /* Current process. */
char* name, /* Driver name. */
SysDriverOpts* opts, /* Options. */
- int *error_number_ptr) /* errno in case -2 is returned */
+ int *error_type_ptr, /* error type */
+ int *error_number_ptr) /* errno in case of error type -2 */
{
- int port_num;
- int port_ix;
+
+#undef ERTS_OPEN_DRIVER_RET
+#define ERTS_OPEN_DRIVER_RET(Prt, EType, ENo) \
+ do { \
+ if (error_type_ptr) \
+ *error_type_ptr = (EType); \
+ if (error_number_ptr) \
+ *error_number_ptr = (ENo); \
+ return (Prt); \
+ } while (0)
+
ErlDrvData drv_data = 0;
- Uint32 xstatus = 0;
Port *port;
int fpe_was_unmasked;
-
- if (error_number_ptr)
- *error_number_ptr = 0;
+ int error_type, error_number;
+ int port_errno = 0;
+ erts_mtx_t *driver_lock = NULL;
+ int cprt_flgs = 0;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if ((port_num = get_free_port()) < 0) {
- if (error_number_ptr) {
- *error_number_ptr = SYSTEM_LIMIT;
- }
- return -3;
- }
-
- port_ix = port_num & erts_port_tab_index_mask;
- port = &erts_port[port_ix];
- port->id = make_internal_port(port_num);
-
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
if (!driver) {
for (driver = driver_list; driver; driver = driver->next) {
if (sys_strcmp(driver->name, name) == 0)
break;
}
if (!driver) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- if (error_number_ptr)
- *error_number_ptr = BADARG;
- return -3;
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
}
if (driver == &spawn_driver) {
@@ -599,61 +625,49 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
}
if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- if (error_number_ptr) {
- *error_number_ptr = BADARG;
- }
- /* Need to mark the port as free again */
- erts_smp_port_state_lock(port);
- port->status = ERTS_PORT_SFLG_FREE;
-#ifdef ERTS_SMP
- ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&port->refc) == 2);
- erts_smp_atomic_set_nob(&port->refc, 0);
-#endif
- erts_smp_port_state_unlock(port);
- return -3;
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
- /*
- * We'll set up the port before calling the start function,
- * to allow message sending and setting timers in the start function.
- */
-
#ifdef ERTS_SMP
- ASSERT(!port->lock);
- port->lock = driver->lock;
- if (!port->lock) {
- port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK,
- sizeof(erts_smp_mtx_t));
- erts_smp_mtx_init_x(port->lock,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_lock" : NULL,
-#else
- "port_lock",
-#endif
- port->id);
- xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
- }
+ driver_lock = driver->lock;
#endif
if (driver->handle != NULL) {
erts_ddll_increment_port_count(driver->handle);
erts_ddll_reference_driver(driver->handle);
}
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
-#ifdef ERTS_SMP
- erts_smp_mtx_lock(port->lock);
-#endif
+ /*
+ * We'll set up the port before calling the start function,
+ * to allow message sending and setting timers in the start function.
+ */
- setup_port(port, pid, driver, drv_data, name, xstatus);
+ if (opts->parallelism)
+ cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM;
+
+ port = create_port(name, driver, driver_lock, cprt_flgs, pid, &port_errno);
+ if (!port) {
+ if (driver->handle) {
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_ddll_decrement_port_count(driver->handle);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_ddll_dereference_driver(driver->handle);
+ }
+ if (port_errno)
+ ERTS_OPEN_DRIVER_RET(NULL, -2, port_errno);
+ else
+ ERTS_OPEN_DRIVER_RET(NULL, -3, SYSTEM_LIMIT);
+ }
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
trace_port_open(port,
pid,
am_atom_put(port->name, strlen(port->name)));
}
-
+
+ error_number = error_type = 0;
if (driver->start) {
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_in, am_start);
@@ -666,56 +680,63 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
}
#endif
fpe_was_unmasked = erts_block_fpe();
- drv_data = (*driver->start)((ErlDrvPort)(port_ix),
- name, opts);
+ drv_data = (*driver->start)((ErlDrvPort) port, name, opts);
+ if (((SWord) drv_data) == -1)
+ error_type = -1;
+ else if (((SWord) drv_data) == -2) {
+ /*
+ * We need to save errno quickly after the
+ * call to the 'start' callback before
+ * something else modify it.
+ */
+ error_type = -2;
+ error_number = errno;
+ }
+ else if (((SWord) drv_data) == -3) {
+ error_type = -3;
+ error_number = BADARG;
+ }
+
erts_unblock_fpe(fpe_was_unmasked);
port->caller = NIL;
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_out, am_start);
}
- if (error_number_ptr && ((SWord) drv_data) == (SWord) -2)
- *error_number_ptr = errno;
#ifdef ERTS_SMP
if (port->xports)
- erts_smp_xports_unlock(port);
+ erts_port_handle_xports(port);
ASSERT(!port->xports);
#endif
}
- if (((SWord)drv_data) == -1 ||
- ((SWord)drv_data) == -2 ||
- ((SWord)drv_data) == -3) {
- int res = (int) ((SWord) drv_data);
-
- if (res == -3 && error_number_ptr) {
- *error_number_ptr = BADARG;
- }
-
+ if (error_type) {
/*
* Must clean up the port.
*/
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(port->ptimer);
+ erts_cancel_smp_ptimer(port->common.u.alive.ptimer);
#else
- erts_cancel_timer(&(port->tm));
+ erts_cancel_timer(&(port->common.u.alive.tm));
#endif
stopq(port);
- kill_port(port);
if (port->linebuf != NULL) {
erts_free(ERTS_ALC_T_LINEBUF,
(void *) port->linebuf);
port->linebuf = NULL;
}
if (driver->handle != NULL) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(driver->handle);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
}
+ kill_port(port);
erts_port_release(port);
- return res;
+ ERTS_OPEN_DRIVER_RET(NULL, error_type, error_number);
}
- port->drv_data = (SWord) drv_data;
- return port_ix;
+ port->drv_data = (UWord) drv_data;
+ ERTS_OPEN_DRIVER_RET(port, 0, 0);
+
+#undef ERTS_OPEN_DRIVER_RET
}
#ifdef ERTS_SMP
@@ -740,102 +761,122 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
char* name, /* Driver name */
ErlDrvData drv_data) /* Driver data */
{
+ int cprt_flgs = 0;
Port *creator_port;
Port* port;
erts_driver_t *driver;
Process *rp;
- int port_num;
- Eterm port_id;
- Uint32 xstatus = 0;
+ erts_mtx_t *driver_lock = NULL;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- creator_port = erts_drvport2port(creator_port_ix);
+ /* Need to be called from a scheduler thread */
+ if (!erts_get_scheduler_id())
+ return ERTS_INVALID_ERL_DRV_PORT;
+
+ creator_port = erts_drvport2port(creator_port_ix, NULL);
if (!creator_port)
- return (ErlDrvTermData) -1;
+ return ERTS_INVALID_ERL_DRV_PORT;
+
+ rp = erts_proc_lookup(pid);
+ if (!rp)
+ return ERTS_INVALID_ERL_DRV_PORT;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port));
driver = creator_port->drv_ptr;
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
if (!erts_ddll_driver_ok(driver->handle)) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- return (ErlDrvTermData) -1;
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ return ERTS_INVALID_ERL_DRV_PORT;
}
- rp = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- return (ErlDrvTermData) -1; /* pid does not exist */
+ if (driver->handle != NULL) {
+ erts_ddll_increment_port_count(driver->handle);
+ erts_ddll_reference_referenced_driver(driver->handle);
+ }
+
+#ifdef ERTS_SMP
+ driver_lock = driver->lock;
+#endif
+
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+
+ /* Inherit parallelism flag from parent */
+ if (ERTS_PTS_FLG_PARALLELISM &
+ erts_smp_atomic32_read_nob(&creator_port->sched.flags))
+ cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM;
+ port = create_port(name, driver, driver_lock, cprt_flgs, pid, NULL);
+ if (!port) {
+ if (driver->handle) {
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_ddll_decrement_port_count(driver->handle);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ erts_ddll_dereference_driver(driver->handle);
+ }
+ return ERTS_INVALID_ERL_DRV_PORT;
}
- if ((port_num = get_free_port()) < 0) {
- errno = SYSTEM_LIMIT;
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
+
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
- return (ErlDrvTermData) -1;
+ if (driver->handle) {
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
+ erts_ddll_decrement_port_count(driver->handle);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
+ }
+ kill_port(port);
+ erts_port_release(port);
+ return ERTS_INVALID_ERL_DRV_PORT;
}
- port_id = make_internal_port(port_num);
- port = &erts_port[port_num & erts_port_tab_index_mask];
+ erts_add_link(&ERTS_P_LINKS(port), LINK_PID, pid);
+ erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
#ifdef ERTS_SMP
- ASSERT(!port->lock);
- port->lock = driver->lock;
- if (!port->lock) {
+ if (!driver_lock) {
ErtsXPortsList *xplp = xports_list_alloc();
xplp->port = port;
xplp->next = creator_port->xports;
creator_port->xports = xplp;
- port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK,
- sizeof(erts_smp_mtx_t));
- erts_smp_mtx_init_locked_x(port->lock,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_lock" : NULL,
-#else
- "port_lock",
-#endif
- port_id);
- xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK;
}
-
#endif
- if (driver->handle != NULL) {
- erts_ddll_increment_port_count(driver->handle);
- erts_ddll_reference_referenced_driver(driver->handle);
- }
- erts_smp_mtx_unlock(&erts_driver_list_lock);
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
-
- setup_port(port, pid, driver, drv_data, name, xstatus);
- port->id = port_id;
+ port->drv_data = (UWord) drv_data;
- erts_add_link(&(port->nlinks), LINK_PID, pid);
- erts_add_link(&(rp->nlinks), LINK_PID, port_id);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- return port_num & erts_port_tab_index_mask;
+ return (ErlDrvPort) port;
}
#ifdef ERTS_SMP
-void
-erts_smp_xports_unlock(Port *prt)
+int erts_port_handle_xports(Port *prt)
{
+ int reds = 0;
ErtsXPortsList *xplp;
ASSERT(prt);
xplp = prt->xports;
ASSERT(xplp);
while (xplp) {
+ Port *rprt = xplp->port;
ErtsXPortsList *free_xplp;
- if (xplp->port->xports)
- erts_smp_xports_unlock(xplp->port);
- erts_port_release(xplp->port);
+ erts_aint32_t state;
+ if (rprt->xports)
+ reds += erts_port_handle_xports(rprt);
+ state = erts_atomic32_read_nob(&rprt->state);
+ if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(rprt)) {
+ terminate_port(rprt);
+ reds += ERTS_PORT_REDS_TERMINATE;
+ }
+ erts_port_release(rprt);
free_xplp = xplp;
xplp = xplp->next;
xports_list_free(free_xplp);
+ reds++;
}
prt->xports = NULL;
+ return reds;
}
#endif
@@ -870,8 +911,8 @@ io_list_to_vec(Eterm obj, /* io-list */
DECLARE_ESTACK(s);
Eterm* objp;
char *buf = cbin->orig_bytes;
- ErlDrvSizeT len = cbin->orig_size;
- ErlDrvSizeT csize = 0;
+ Uint len = cbin->orig_size;
+ Uint csize = 0;
int vlen = 0;
char* cptr = buf;
@@ -986,7 +1027,7 @@ io_list_to_vec(Eterm obj, /* io-list */
#define IO_LIST_VEC_COUNT(obj) \
do { \
- ErlDrvSizeT _size = binary_size(obj); \
+ Uint _size = binary_size(obj); \
Eterm _real; \
ERTS_DECLARE_DUMMY(Uint _offset); \
int _bitoffs; \
@@ -1037,8 +1078,9 @@ do { \
*/
static int
-io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize,
- Uint* pvsize, Uint* pcsize, Uint* total_size)
+io_list_vec_len(Eterm obj, int* vsize, Uint* csize,
+ Uint* pvsize, Uint* pcsize,
+ ErlDrvSizeT* total_size)
{
DECLARE_ESTACK(s);
Eterm* objp;
@@ -1049,7 +1091,7 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize,
Uint p_v_size = 0;
Uint p_c_size = 0;
Uint p_in_clist = 0;
- Uint total;
+ Uint total; /* Uint due to halfword emulator */
goto L_jump_start; /* avoid a push */
@@ -1109,7 +1151,7 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize,
if (total < c_size) {
goto L_overflow_error;
}
- *total_size = total;
+ *total_size = (ErlDrvSizeT) total;
DESTROY_ESTACK(s);
*vsize = v_size;
@@ -1124,56 +1166,724 @@ io_list_vec_len(Eterm obj, Uint* vsize, Uint* csize,
return 1;
}
-/* write data to a port */
-int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
-{
- char *buf;
- erts_driver_t *drv = p->drv_ptr;
- Uint size;
+typedef enum {
+ ERTS_TRY_IMM_DRV_CALL_OK,
+ ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK,
+ ERTS_TRY_IMM_DRV_CALL_INVALID_PORT,
+ ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS
+} ErtsTryImmDrvCallResult;
+
+typedef struct {
+ Process *c_p; /* Currently executing process (unlocked) */
+ Port *port; /* Port to operate on */
+ Eterm port_op; /* port operation as an atom */
+ erts_aint32_t state; /* in: invalid state; out: read state (if read) */
+ erts_aint32_t sched_flags; /* in: invalid flags; out: read flags (if read) */
+ int async; /* Asynchronous operation */
+ int pre_chk_sched_flags; /* Check sched flags before lock? */
int fpe_was_unmasked;
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p) || ERTS_IS_CRASH_DUMPING);
+} ErtsTryImmDrvCallState;
+
+#define ERTS_INIT_TRY_IMM_DRV_CALL_STATE(C_P, PRT, SFLGS, PTS_FLGS, A, PRT_OP) \
+ {(C_P), (PRT), (PRT_OP), (SFLGS), (PTS_FLGS), (A), 1, 0}
+
+/*
+ * Try doing an immediate driver callback call from a process. If
+ * this fail, the operation should be scheduled in the normal case...
+ *
+ */
+static ERTS_INLINE ErtsTryImmDrvCallResult
+try_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ ErtsTryImmDrvCallResult res;
+ erts_aint32_t invalid_state, invalid_sched_flags;
+ Port *prt = sp->port;
+ Process *c_p = sp->c_p;
+
+ ASSERT(is_atom(sp->port_op));
+
+ invalid_sched_flags = ERTS_PTS_FLGS_FORCE_SCHEDULE_OP;
+ invalid_sched_flags |= sp->sched_flags;
+ if (sp->async)
+ invalid_sched_flags |= ERTS_PTS_FLG_PARALLELISM;
+
+ if (sp->pre_chk_sched_flags) {
+ sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sp->sched_flags & invalid_sched_flags)
+ return ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ }
+
+ if (erts_smp_port_trylock(prt) == EBUSY)
+ return ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK;
+
+ invalid_state = sp->state;
+ sp->state = erts_atomic32_read_nob(&prt->state);
+ if (sp->state & invalid_state) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_PORT;
+ goto locked_fail;
+ }
+
+ sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sp->sched_flags & invalid_sched_flags) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ goto locked_fail;
+ }
+
+ if (c_p) {
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
+ trace_virtual_sched(c_p, am_out);
+ if (erts_system_profile_flags.runnable_procs
+ && erts_system_profile_flags.exclusive)
+ profile_runnable_proc(c_p, am_inactive);
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+
ERTS_SMP_CHK_NO_PROC_LOCKS;
- p->caller = caller_id;
- if (drv->outputv != NULL) {
- Uint vsize;
- Uint csize;
- Uint pvsize;
- Uint pcsize;
- ErlDrvSizeT blimit;
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_in, sp->port_op);
+ if (erts_system_profile_flags.runnable_ports
+ && !erts_port_is_scheduled(prt))
+ profile_runnable_port(prt, am_active);
+
+ sp->fpe_was_unmasked = erts_block_fpe();
+
+ return ERTS_TRY_IMM_DRV_CALL_OK;
+
+locked_fail:
+ erts_port_release(prt);
+ return res;
+}
+
+static ERTS_INLINE void
+finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ Port *prt = sp->port;
+ Process *c_p = sp->c_p;
+
+ erts_port_driver_callback_epilogue(prt, NULL);
+
+ erts_unblock_fpe(sp->fpe_was_unmasked);
+
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_out, sp->port_op);
+ if (erts_system_profile_flags.runnable_ports
+ && !erts_port_is_scheduled(prt))
+ profile_runnable_port(prt, am_inactive);
+
+ erts_port_release(prt);
+
+ if (c_p) {
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
+ trace_virtual_sched(c_p, am_in);
+ if (erts_system_profile_flags.runnable_procs
+ && erts_system_profile_flags.exclusive)
+ profile_runnable_proc(c_p, am_active);
+ }
+}
+
+/*
+ * force_imm_drv_call()/finalize_force_imm_drv_call() should *only*
+ * be used while crash dumping...
+ */
+static ErtsTryImmDrvCallResult
+force_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ erts_aint32_t invalid_state;
+ Port *prt = sp->port;
+
+ ASSERT(ERTS_IS_CRASH_DUMPING)
+ ASSERT(is_atom(sp->port_op));
+
+ invalid_state = sp->state;
+ sp->state = erts_atomic32_read_nob(&prt->state);
+ if (sp->state & invalid_state)
+ return ERTS_TRY_IMM_DRV_CALL_INVALID_PORT;
+
+ sp->fpe_was_unmasked = erts_block_fpe();
+
+ return ERTS_TRY_IMM_DRV_CALL_OK;
+}
+
+static void
+finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp)
+{
+ erts_unblock_fpe(sp->fpe_was_unmasked);
+}
+
+#define ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE (REF_THING_SIZE + 3)
+
+static ERTS_INLINE void
+queue_port_sched_op_reply(Process *rp,
+ ErtsProcLocks *rp_locksp,
+ Eterm *hp_start,
+ Eterm *hp,
+ Uint h_size,
+ ErlHeapFragment* bp,
+ Uint32 *ref_num,
+ Eterm msg)
+{
+ Eterm ref = make_internal_ref(hp);
+ write_ref_thing(hp, ref_num[0], ref_num[1], ref_num[2]);
+ hp += REF_THING_SIZE;
+
+ msg = TUPLE2(hp, ref, msg);
+ hp += 3;
+
+ if (!bp) {
+ HRelease(rp, hp_start + h_size, hp);
+ }
+ else {
+ Uint used_h_size = hp - hp_start;
+ ASSERT(h_size >= used_h_size);
+ if (h_size > used_h_size)
+ bp = erts_resize_message_buffer(bp, used_h_size, &msg, 1);
+ }
+
+ erts_queue_message(rp,
+ rp_locksp,
+ bp,
+ msg,
+ NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+}
+
+static void
+port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg)
+{
+ Process *rp = erts_proc_lookup_raw(to);
+ if (rp) {
+ ErlOffHeap *ohp;
+ ErlHeapFragment* bp;
+ Eterm msg_copy;
+ Uint hsz, msg_sz;
+ Eterm *hp, *hp_start;
+ ErtsProcLocks rp_locks = 0;
+
+ hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+ if (is_immed(msg))
+ msg_sz = 0;
+ else {
+ msg_sz = size_object(msg);
+ hsz += msg_sz;
+ }
+
+ hp_start = hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+ if (is_immed(msg))
+ msg_copy = msg;
+ else
+ msg_copy = copy_struct(msg, msg_sz, &hp, ohp);
+
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ ref_num,
+ msg_copy);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+}
+
+
+ErtsPortOpResult
+erts_schedule_proc2port_signal(Process *c_p,
+ Port *prt,
+ Eterm caller,
+ Eterm *refp,
+ ErtsProc2PortSigData *sigdp,
+ int task_flags,
+ ErtsProc2PortSigCallback callback)
+{
+ int sched_res;
+ if (!refp) {
+ if (c_p)
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ else {
+ ASSERT(c_p);
+ sigdp->flags |= ERTS_P2P_SIG_DATA_FLG_REPLY;
+ erts_make_ref_in_array(sigdp->ref);
+ *refp = erts_proc_store_ref(c_p, sigdp->ref);
+
+ /*
+ * Caller needs to wait for a message containing
+ * the ref that we just created. No such message
+ * can exist in callers message queue at this time.
+ * We therefore move the save pointer of the
+ * callers message queue to the end of the queue.
+ *
+ * NOTE: It is of vital importance that the caller
+ * immediately do a receive unconditionaly
+ * waiting for the message with the reference;
+ * otherwise, next receive will *not* work
+ * as expected!
+ */
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+
+ if (ERTS_PROC_PENDING_EXIT(c_p)) {
+ /* need to exit caller instead */
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
+ KILL_CATCHES(c_p);
+ c_p->freason = EXC_EXIT;
+ return ERTS_PORT_OP_CALLER_EXIT;
+ }
+
+ ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
+ c_p->msg.save = c_p->msg.last;
+
+ erts_smp_proc_unlock(c_p,
+ (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCKS_MSG_RECEIVE));
+ }
+
+
+ sigdp->caller = caller;
+
+ /* Schedule port close call for later execution... */
+ sched_res = erts_port_task_schedule(prt->common.id,
+ NULL,
+ ERTS_PORT_TASK_PROC_SIG,
+ sigdp,
+ callback,
+ task_flags);
+
+ if (c_p)
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+
+ if (sched_res != 0) {
+ if (refp)
+ *refp = NIL;
+ return ERTS_PORT_OP_DROPPED;
+ }
+ return ERTS_PORT_OP_SCHEDULED;
+}
+
+static ERTS_INLINE void
+send_badsig(Port *prt)
+{
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
+ Process* rp;
+ Eterm connected = ERTS_PORT_GET_CONNECTED(prt);
+
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+ ERTS_LC_ASSERT(erts_get_scheduler_id());
+
+ ASSERT(is_internal_pid(connected));
+
+ rp = erts_proc_lookup_raw(connected);
+ if (rp) {
+ erts_smp_proc_lock(rp, rp_locks);
+ if (!ERTS_PROC_IS_EXITING(rp))
+ (void) erts_send_exit_signal(NULL,
+ prt->common.id,
+ rp,
+ &rp_locks,
+ am_badsig,
+ NIL,
+ NULL,
+ 0);
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+}
+
+static void
+badsig_received(int bang_op,
+ Port *prt,
+ erts_aint32_t state,
+ int bad_output_value)
+{
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! Something" operation
+ * else
+ * we are part of a call to a port BIF
+ * behave accordingly...
+ */
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP)) {
+ if (bad_output_value) {
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ erts_dsprintf(dsbufp, "Bad value on output port '%s'\n", prt->name);
+ erts_send_error_to_logger_nogl(dsbufp);
+ }
+ if (bang_op)
+ send_badsig(prt);
+ }
+}
+
+static int
+port_badsig(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ if (op == ERTS_PROC2PORT_SIG_EXEC)
+ badsig_received(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
+ prt,
+ state,
+ sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT);
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ return ERTS_PORT_REDS_BADSIG;
+}
+
+
+/*
+ * bad_port_signal() will
+ * - preserve signal order of signals.
+ * - send a 'badsig' exit signal to connected process if 'from' is an
+ * internal pid and the port is alive when the bad signal reaches
+ * it.
+ */
+static ErtsPortOpResult
+bad_port_signal(Process *c_p,
+ int flags,
+ Port *prt,
+ Eterm from,
+ Eterm *refp,
+ Eterm port_op)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ port_op);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ badsig_received(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ prt,
+ try_call_state.state,
+ flags & ERTS_PORT_SIG_FLG_BAD_OUTPUT);
+ finalize_imm_drv_call(&try_call_state);
+ return ERTS_PORT_OP_BADARG;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule badsig() call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = (flags & ~ERTS_P2P_SIG_TYPE_MASK) | ERTS_P2P_SIG_TYPE_BAD;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ refp,
+ sigdp,
+ 0,
+ port_badsig);
+}
+
+
+/*
+ * Driver outputv() callback
+ */
+
+static ERTS_INLINE void
+call_driver_outputv(int bang_op,
+ Eterm caller,
+ Eterm from,
+ Port *prt,
+ erts_driver_t *drv,
+ ErlIOVec *evp)
+{
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! {From, {command, Data}}" operation
+ * else
+ * we are part of a call to port_command/[2,3]
+ * behave accordingly...
+ */
+ if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt))
+ send_badsig(prt);
+ else {
+ ErlDrvSizeT size = evp->size;
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_outputv)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
+ DTRACE4(driver_outputv, process_str, port_str, prt->name, size);
+ }
+#endif
+
+ prt->caller = caller;
+ (*drv->outputv)((ErlDrvData) prt->drv_data, evp);
+ prt->caller = NIL;
+
+ prt->bytes_out += size;
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
+ }
+}
+
+static ERTS_INLINE void
+cleanup_scheduled_outputv(ErlIOVec *ev, ErlDrvBinary *cbinp)
+{
+ int i;
+ /* Need to free all binaries */
+ for (i = 1; i < ev->vsize; i++)
+ if (ev->binv[i])
+ driver_free_binary(ev->binv[i]);
+ if (cbinp)
+ driver_free_binary(cbinp);
+ erts_free(ERTS_ALC_T_DRV_CMD_DATA, ev);
+}
+
+static int
+port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ Eterm reply;
+
+ switch (op) {
+ case ERTS_PROC2PORT_SIG_EXEC:
+ /* Execution of a scheduled outputv() call */
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ reply = am_badarg;
+ else {
+ call_driver_outputv(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
+ sigdp->caller,
+ sigdp->u.outputv.from,
+ prt,
+ prt->drv_ptr,
+ sigdp->u.outputv.evp);
+ reply = am_true;
+ }
+ break;
+ case ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND:
+ reply = am_false;
+ break;
+ default:
+ reply = am_badarg;
+ break;
+ }
+
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, reply);
+
+ cleanup_scheduled_outputv(sigdp->u.outputv.evp,
+ sigdp->u.outputv.cbinp);
+
+ return ERTS_PORT_REDS_CMD_OUTPUTV;
+}
+
+/*
+ * Driver output() callback
+ */
+
+static ERTS_INLINE void
+call_driver_output(int bang_op,
+ Eterm caller,
+ Eterm from,
+ Port *prt,
+ erts_driver_t *drv,
+ char *bufp,
+ ErlDrvSizeT size)
+{
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! {From, {command, Data}}" operation
+ * else
+ * we are part of a call to port_command/[2,3]
+ * behave accordingly...
+ */
+ if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt))
+ send_badsig(prt);
+ else {
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)
+ || ERTS_IS_CRASH_DUMPING);
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(driver_output)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
+ DTRACE4(driver_output, process_str, port_str, prt->name, size);
+ }
+#endif
+
+ prt->caller = caller;
+ (*drv->output)((ErlDrvData) prt->drv_data, bufp, size);
+ prt->caller = NIL;
+
+ prt->bytes_out += size;
+ erts_smp_atomic_add_nob(&erts_bytes_out, size);
+ }
+}
+
+static ERTS_INLINE void
+cleanup_scheduled_output(char *bufp)
+{
+ erts_free(ERTS_ALC_T_DRV_CMD_DATA, bufp);
+}
+
+static int
+port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ Eterm reply;
+
+ switch (op) {
+ case ERTS_PROC2PORT_SIG_EXEC:
+ /* Execution of a scheduled output() call */
+
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ reply = am_badarg;
+ else {
+ call_driver_output(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
+ sigdp->caller,
+ sigdp->u.output.from,
+ prt,
+ prt->drv_ptr,
+ sigdp->u.output.bufp,
+ sigdp->u.output.size);
+ reply = am_true;
+ }
+ break;
+ case ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND:
+ reply = am_false;
+ break;
+ default:
+ reply = am_badarg;
+ break;
+ }
+
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, reply);
+
+ cleanup_scheduled_output(sigdp->u.output.bufp);
+
+ return ERTS_PORT_REDS_CMD_OUTPUT;
+}
+
+ErtsPortOpResult
+erts_port_output(Process *c_p,
+ int flags,
+ Port *prt,
+ Eterm from,
+ Eterm list,
+ Eterm *refp)
+{
+ ErtsPortOpResult res;
+ ErtsProc2PortSigData *sigdp;
+ erts_driver_t *drv = prt->drv_ptr;
+ size_t size;
+ int try_call;
+ erts_aint32_t sched_flags, busy_flgs, invalid_flags;
+ int task_flags;
+ ErtsProc2PortSigCallback port_sig_callback;
+ ErlDrvBinary *cbin = NULL;
+ ErlIOVec *evp = NULL;
+ char *buf = NULL;
+ int force_immediate_call = (flags & ERTS_PORT_SIG_FLG_FORCE_IMM_CALL);
+
+ ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP
+ | ERTS_PORT_SIG_FLG_NOSUSPEND
+ | ERTS_PORT_SIG_FLG_FORCE
+ | ERTS_PORT_SIG_FLG_FORCE_IMM_CALL)) == 0);
+
+ busy_flgs = ((flags & ERTS_PORT_SIG_FLG_FORCE)
+ ? ((erts_aint32_t) 0)
+ : ERTS_PTS_FLGS_BUSY);
+ invalid_flags = busy_flgs;
+ if (!refp)
+ invalid_flags |= ERTS_PTS_FLG_PARALLELISM;
+
+ /*
+ * Assumes caller have checked that port is valid...
+ */
+
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT))
+ return ((sched_flags & ERTS_PTS_FLG_EXIT)
+ ? ERTS_PORT_OP_DROPPED
+ : ERTS_PORT_OP_BUSY);
+
+ try_call = (force_immediate_call /* crash dumping */
+ || !(sched_flags & (invalid_flags
+ | ERTS_PTS_FLGS_FORCE_SCHEDULE_OP)));
+
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(c_p ? c_p->common.id : ERTS_INVALID_PID, prt);
+ DTRACE4(port_command, process_str, port_str, prt->name, "command");
+ }
+#endif
+
+ if (drv->outputv) {
+ ErlIOVec ev;
SysIOVec iv[SMALL_WRITE_VEC];
ErlDrvBinary* bv[SMALL_WRITE_VEC];
SysIOVec* ivp;
ErlDrvBinary** bvp;
- ErlDrvBinary* cbin;
- ErlIOVec ev;
+ int vsize;
+ Uint csize;
+ Uint pvsize;
+ Uint pcsize;
+ Uint blimit;
+ size_t iov_offset, binv_offset, alloc_size;
- if (io_list_vec_len(list, &vsize, &csize,
- &pvsize, &pcsize, &size)) {
+ if (io_list_vec_len(list, &vsize, &csize, &pvsize, &pcsize, &size))
goto bad_value;
+
+ iov_offset = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErlIOVec));
+ binv_offset = iov_offset;
+ binv_offset += ERTS_ALC_DATA_ALIGN_SIZE((vsize+1)*sizeof(SysIOVec));
+ alloc_size = binv_offset;
+ alloc_size += (vsize+1)*sizeof(ErlDrvBinary *);
+
+ if (try_call && vsize < SMALL_WRITE_VEC) {
+ ivp = ev.iov = iv;
+ bvp = ev.binv = bv;
+ evp = &ev;
}
+ else {
+ char *ptr = erts_alloc((try_call
+ ? ERTS_ALC_T_TMP
+ : ERTS_ALC_T_DRV_CMD_DATA), alloc_size);
+
+ evp = (ErlIOVec *) ptr;
+ ivp = evp->iov = (SysIOVec *) (ptr + iov_offset);
+ bvp = evp->binv = (ErlDrvBinary **) (ptr + binv_offset);
+ }
+
/* To pack or not to pack (small binaries) ...? */
- vsize++;
- if (vsize <= SMALL_WRITE_VEC) {
+ if (vsize < SMALL_WRITE_VEC) {
/* Do NOT pack */
blimit = 0;
- } else {
+ }
+ else {
/* Do pack */
vsize = pvsize + 1;
csize = pcsize;
blimit = ERL_SMALL_IO_BIN_LIMIT;
}
/* Use vsize and csize from now on */
- if (vsize <= SMALL_WRITE_VEC) {
- ivp = iv;
- bvp = bv;
- } else {
- ivp = (SysIOVec *) erts_alloc(ERTS_ALC_T_TMP,
- vsize * sizeof(SysIOVec));
- bvp = (ErlDrvBinary**) erts_alloc(ERTS_ALC_T_TMP,
- vsize * sizeof(ErlDrvBinary*));
- }
+
cbin = driver_alloc_binary(csize);
if (!cbin)
erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, ERTS_SIZEOF_Binary(csize));
@@ -1182,210 +1892,761 @@ int erts_write_to_port(Eterm caller_id, Port *p, Eterm list)
ivp[0].iov_base = NULL;
ivp[0].iov_len = 0;
bvp[0] = NULL;
- ev.vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
- if (ev.vsize < 0) {
- if (ivp != iv) {
- erts_free(ERTS_ALC_T_TMP, (void *) ivp);
- }
- if (bvp != bv) {
- erts_free(ERTS_ALC_T_TMP, (void *) bvp);
- }
+ evp->vsize = io_list_to_vec(list, ivp+1, bvp+1, cbin, blimit);
+ if (evp->vsize < 0) {
+ if (evp != &ev)
+ erts_free(try_call ? ERTS_ALC_T_TMP : ERTS_ALC_T_DRV_CMD_DATA,
+ evp);
driver_free_binary(cbin);
goto bad_value;
}
- ev.vsize++;
#if 0
/* This assertion may say something useful, but it can
be falsified during the emulator test suites. */
- ASSERT(ev.vsize == vsize);
-#endif
- ev.size = size; /* total size */
- ev.iov = ivp;
- ev.binv = bvp;
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(driver_outputv)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
- DTRACE4(driver_outputv, process_str, port_str, p->name, size);
- }
+ ASSERT(evp->vsize == vsize);
#endif
- fpe_was_unmasked = erts_block_fpe();
- (*drv->outputv)((ErlDrvData)p->drv_data, &ev);
- erts_unblock_fpe(fpe_was_unmasked);
- if (ivp != iv) {
- erts_free(ERTS_ALC_T_TMP, (void *) ivp);
+ evp->vsize++;
+ evp->size = size; /* total size */
+
+ if (!try_call) {
+ int i;
+ /* Need to increase refc on all binaries */
+ for (i = 1; i < evp->vsize; i++)
+ if (bvp[i])
+ driver_binary_inc_refc(bvp[i]);
}
- if (bvp != bv) {
- erts_free(ERTS_ALC_T_TMP, (void *) bvp);
- }
- driver_free_binary(cbin);
- } else {
- int r;
-
- /* Try with an 8KB buffer first (will often be enough I guess). */
- size = 8*1024;
- /* See below why the extra byte is added. */
- buf = erts_alloc(ERTS_ALC_T_TMP, size+1);
- r = io_list_to_buf(list, buf, size);
+ else {
+ int i;
+ ErlIOVec *new_evp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ invalid_flags,
+ !refp,
+ am_command);
+
+ try_call_state.pre_chk_sched_flags = 0; /* already checked */
+ if (force_immediate_call)
+ try_call_res = force_imm_drv_call(&try_call_state);
+ else
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ call_driver_outputv(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
+ from,
+ prt,
+ drv,
+ evp);
+ if (force_immediate_call)
+ finalize_force_imm_drv_call(&try_call_state);
+ else
+ finalize_imm_drv_call(&try_call_state);
+ /* Fall through... */
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ driver_free_binary(cbin);
+ if (evp != &ev)
+ erts_free(ERTS_ALC_T_TMP, evp);
+ if (try_call_res == ERTS_TRY_IMM_DRV_CALL_OK)
+ return ERTS_PORT_OP_DONE;
+ else
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ sched_flags = try_call_state.sched_flags;
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule outputv() call instead... */
+ break;
+ }
-#ifdef USE_VM_PROBES
- if(DTRACE_ENABLED(port_command)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
- DTRACE4(port_command, process_str, port_str, p->name, "command");
- }
+ /* Need to increase refc on all binaries */
+ for (i = 1; i < evp->vsize; i++)
+ if (bvp[i])
+ driver_binary_inc_refc(bvp[i]);
+
+ new_evp = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, alloc_size);
+
+ if (evp != &ev) {
+ sys_memcpy((void *) new_evp, (void *) evp, alloc_size);
+ new_evp->iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ bvp = new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
+ + binv_offset);
+
+#ifdef DEBUG
+ ASSERT(new_evp->vsize == evp->vsize);
+ ASSERT(new_evp->size == evp->size);
+ for (i = 0; i < evp->vsize; i++) {
+ ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
+ ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
+ ASSERT(new_evp->binv[i] == evp->binv[i]);
+ }
#endif
- if (r >= 0) {
- size -= r;
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(driver_output)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
- DTRACE4(driver_output, process_str, port_str, p->name, size);
- }
+ erts_free(ERTS_ALC_T_TMP, evp);
+ }
+ else { /* from stack allocated structure; offsets may differ */
+
+ sys_memcpy((void *) new_evp, (void *) evp, sizeof(ErlIOVec));
+ new_evp->iov = (SysIOVec *) (((char *) new_evp)
+ + iov_offset);
+ sys_memcpy((void *) new_evp->iov,
+ (void *) evp->iov,
+ evp->vsize * sizeof(SysIOVec));
+ new_evp->binv = (ErlDrvBinary **) (((char *) new_evp)
+ + binv_offset);
+ sys_memcpy((void *) new_evp->binv,
+ (void *) evp->binv,
+ evp->vsize * sizeof(ErlDrvBinary *));
+
+#ifdef DEBUG
+ ASSERT(new_evp->vsize == evp->vsize);
+ ASSERT(new_evp->size == evp->size);
+ for (i = 0; i < evp->vsize; i++) {
+ ASSERT(new_evp->iov[i].iov_len == evp->iov[i].iov_len);
+ ASSERT(new_evp->iov[i].iov_base == evp->iov[i].iov_base);
+ ASSERT(new_evp->binv[i] == evp->binv[i]);
+ }
#endif
- fpe_was_unmasked = erts_block_fpe();
- (*drv->output)((ErlDrvData)p->drv_data, buf, size);
- erts_unblock_fpe(fpe_was_unmasked);
- erts_free(ERTS_ALC_T_TMP, buf);
+
+ }
+
+ evp = new_evp;
}
- else if (r == -2) {
- erts_free(ERTS_ALC_T_TMP, buf);
- goto bad_value;
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUTV;
+ sigdp->u.outputv.from = from;
+ sigdp->u.outputv.evp = evp;
+ sigdp->u.outputv.cbinp = cbin;
+ port_sig_callback = port_sig_outputv;
+ }
+ else {
+ ErlDrvSizeT r;
+
+ /*
+ * Apperently there exist code that write 1 byte to
+ * much in buffer. Where it resides I don't know, but
+ * we can live with one byte extra allocated...
+ */
+
+ if (!try_call) {
+ if (erts_iolist_size(list, &size))
+ goto bad_value;
+
+ buf = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, size + 1);
+
+ r = erts_iolist_to_buf(list, buf, size);
+ ASSERT(ERTS_IOLIST_TO_BUF_SUCCEEDED(r));
}
else {
- ASSERT(r == -1); /* Overflow */
- erts_free(ERTS_ALC_T_TMP, buf);
- if (erts_iolist_size(list, &size)) {
- goto bad_value;
+ char *new_buf;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ invalid_flags,
+ !refp,
+ am_command);
+
+ /* Try with an 8KB buffer first (will often be enough I guess). */
+ size = 8*1024;
+
+ buf = erts_alloc(ERTS_ALC_T_TMP, size + 1);
+ r = erts_iolist_to_buf(list, buf, size);
+
+ if (ERTS_IOLIST_TO_BUF_SUCCEEDED(r)) {
+ ASSERT(r <= size);
+ size -= r;
+ }
+ else {
+ erts_free(ERTS_ALC_T_TMP, buf);
+ if (r == ERTS_IOLIST_TO_BUF_TYPE_ERROR)
+ goto bad_value;
+ ASSERT(r == ERTS_IOLIST_TO_BUF_OVERFLOW);
+ if (erts_iolist_size(list, &size))
+ goto bad_value;
+ buf = erts_alloc(ERTS_ALC_T_TMP, size + 1);
+ r = erts_iolist_to_buf(list, buf, size);
+ ASSERT(ERTS_IOLIST_TO_BUF_SUCCEEDED(r));
}
- /*
- * I know drivers that pad space with '\0' this is clearly
- * incorrect but I don't feel like fixing them now, insted
- * add ONE extra byte.
- */
- buf = erts_alloc(ERTS_ALC_T_TMP, size+1);
- r = io_list_to_buf(list, buf, size);
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(driver_output)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(caller_id, p)
- DTRACE4(driver_output, process_str, port_str, p->name, size);
- }
-#endif
- fpe_was_unmasked = erts_block_fpe();
- (*drv->output)((ErlDrvData)p->drv_data, buf, size);
- erts_unblock_fpe(fpe_was_unmasked);
+ try_call_state.pre_chk_sched_flags = 0; /* already checked */
+ if (force_immediate_call)
+ try_call_res = force_imm_drv_call(&try_call_state);
+ else
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ call_driver_output(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
+ from,
+ prt,
+ drv,
+ buf,
+ size);
+ if (force_immediate_call)
+ finalize_force_imm_drv_call(&try_call_state);
+ else
+ finalize_imm_drv_call(&try_call_state);
+ /* Fall through... */
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ erts_free(ERTS_ALC_T_TMP, buf);
+ if (try_call_res == ERTS_TRY_IMM_DRV_CALL_OK)
+ return ERTS_PORT_OP_DONE;
+ else
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ sched_flags = try_call_state.sched_flags;
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule outputv() call instead... */
+ break;
+ }
+
+ new_buf = erts_alloc(ERTS_ALC_T_DRV_CMD_DATA, size + 1);
+ sys_memcpy(new_buf, buf, size);
erts_free(ERTS_ALC_T_TMP, buf);
+ buf = new_buf;
}
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_OUTPUT;
+ sigdp->u.output.from = from;
+ sigdp->u.output.bufp = buf;
+ sigdp->u.output.size = size;
+ port_sig_callback = port_sig_output;
+ }
+
+ task_flags = ERTS_PT_FLG_WAIT_BUSY;
+ sigdp->flags |= flags;
+ if (flags & (ERTS_P2P_SIG_DATA_FLG_FORCE|ERTS_P2P_SIG_DATA_FLG_NOSUSPEND)) {
+ task_flags = 0;
+ if (flags & ERTS_P2P_SIG_DATA_FLG_FORCE)
+ sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND;
+ else if (flags & ERTS_P2P_SIG_DATA_FLG_NOSUSPEND)
+ task_flags = ERTS_PT_FLG_NOSUSPEND;
+ }
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : ERTS_INVALID_PID,
+ refp,
+ sigdp,
+ task_flags,
+ port_sig_callback);
+
+ if (res != ERTS_PORT_OP_SCHEDULED) {
+ if (drv->outputv)
+ cleanup_scheduled_outputv(evp, cbin);
+ else
+ cleanup_scheduled_output(buf);
+ return res;
}
- p->bytes_out += size;
- erts_smp_atomic_add_nob(&erts_bytes_out, size);
-#ifdef ERTS_SMP
- if (p->xports)
- erts_smp_xports_unlock(p);
- ASSERT(!p->xports);
+ if (!(sched_flags & ERTS_PTS_FLG_EXIT) && (sched_flags & busy_flgs))
+ return ERTS_PORT_OP_BUSY_SCHEDULED;
+
+ return res;
+
+bad_value:
+
+ flags |= ERTS_PORT_SIG_FLG_BAD_OUTPUT;
+ return bad_port_signal(c_p, flags, prt, from, refp, am_command);
+}
+
+static ERTS_INLINE ErtsPortOpResult
+call_deliver_port_exit(int bang_op,
+ Eterm from,
+ Port *prt,
+ erts_aint32_t state,
+ Eterm reason,
+ int broken_link)
+{
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! {From, close}" operation
+ * else
+ * we are part of a call to port_close(Port)
+ * behave accordingly...
+ */
+
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ return ERTS_PORT_OP_DROPPED;
+
+ if (bang_op && from != ERTS_PORT_GET_CONNECTED(prt)) {
+ send_badsig(prt);
+ return ERTS_PORT_OP_DROPPED;
+ }
+
+ if (broken_link) {
+ ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from);
+ if (lnk)
+ erts_destroy_link(lnk);
+ else
+ return ERTS_PORT_OP_DROPPED;
+ }
+
+ if (!erts_deliver_port_exit(prt, from, reason, bang_op))
+ return ERTS_PORT_OP_DROPPED;
+
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command) && bang_op) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(from, prt);
+ DTRACE4(port_command, process_str, port_str, prt->name, "close");
+ }
#endif
- p->caller = NIL;
- return 0;
- bad_value:
- p->caller = NIL;
- {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Bad value on output port '%s'\n", p->name);
- erts_send_error_to_logger_nogl(dsbufp);
- return 1;
+ return ERTS_PORT_OP_DONE;
+}
+
+static int
+port_sig_exit(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ Eterm msg = am_badarg;
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ ErtsPortOpResult res;
+ int bang_op = sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP;
+ int broken_link = sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BROKEN_LINK;
+ res = call_deliver_port_exit(bang_op,
+ sigdp->u.exit.from,
+ prt,
+ state,
+ sigdp->u.exit.reason,
+ broken_link);
+
+ if (res == ERTS_PORT_OP_DONE)
+ msg = am_true;
+ }
+ if (sigdp->u.exit.bp)
+ free_message_buffer(sigdp->u.exit.bp);
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, msg);
+
+ return ERTS_PORT_REDS_EXIT;
+}
+
+ErtsPortOpResult
+erts_port_exit(Process *c_p,
+ int flags,
+ Port *prt,
+ Eterm from,
+ Eterm reason,
+ Eterm *refp)
+{
+ ErtsPortOpResult res;
+ ErtsProc2PortSigData *sigdp;
+ ErlHeapFragment *bp = NULL;
+
+ ASSERT((flags & ~(ERTS_PORT_SIG_FLG_BANG_OP
+ | ERTS_PORT_SIG_FLG_BROKEN_LINK
+ | ERTS_PORT_SIG_FLG_FORCE_SCHED)) == 0);
+
+ if (!(flags & ERTS_PORT_SIG_FLG_FORCE_SCHED)) {
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ am_exit);
+
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ res = call_deliver_port_exit(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ from,
+ prt,
+ try_call_state.state,
+ reason,
+ flags & ERTS_PORT_SIG_FLG_BROKEN_LINK);
+ finalize_imm_drv_call(&try_call_state);
+ return res;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ default:
+ /* Schedule call instead... */
+ break;
+ }
}
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_EXIT | flags;
+ sigdp->u.exit.from = from;
+
+ if (is_immed(reason)) {
+ sigdp->u.exit.reason = reason;
+ sigdp->u.exit.bp = NULL;
+ }
+ else {
+ Eterm *hp;
+ Uint hsz = size_object(reason);
+ bp = new_message_buffer(hsz);
+ sigdp->u.exit.bp = bp;
+ hp = bp->mem;
+ sigdp->u.exit.reason = copy_struct(reason,
+ hsz,
+ &hp,
+ &bp->off_heap);
+ }
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ port_sig_exit);
+
+ if (res == ERTS_PORT_OP_DROPPED) {
+ if (bp)
+ free_message_buffer(bp);
+ }
+
+ return res;
}
-/* initialize the port array */
-void init_io(void)
+static ErtsPortOpResult
+set_port_connected(int bang_op,
+ Eterm from,
+ Port *prt,
+ erts_aint32_t state,
+ Eterm connect)
{
- int i;
- ErlDrvEntry** dp;
- char maxports[21]; /* enough for any 64-bit integer */
- size_t maxportssize = sizeof(maxports);
- Uint ports_bits = ERTS_PORTS_BITS;
- Sint port_extra_shift;
+ /*
+ * if (bang_op)
+ * we are part of a "Prt ! {From, {connect, Connect}}" operation
+ * else
+ * we are part of a call to port_connect(Port, Connect)
+ * behave accordingly...
+ */
-#ifdef ERTS_SMP
- init_xports_list_alloc();
+ if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP)
+ return ERTS_PORT_OP_DROPPED;
+
+ if (bang_op) { /* Bang operation */
+ if (is_not_internal_pid(connect) || ERTS_PORT_GET_CONNECTED(prt) != from) {
+ send_badsig(prt);
+ return ERTS_PORT_OP_DROPPED;
+ }
+
+ ERTS_PORT_SET_CONNECTED(prt, connect);
+ deliver_result(prt->common.id, from, am_connected);
+
+#ifdef USE_VM_PROBES
+ if(DTRACE_ENABLED(port_command)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(from, prt);
+ DTRACE4(port_command, process_str, port_str, prt->name, "connect");
+ }
#endif
+ }
+ else { /* Port BIF operation */
+ Process *rp = erts_proc_lookup_raw(connect);
+ if (!rp)
+ return ERTS_PORT_OP_DROPPED;
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK);
+ if (ERTS_PROC_IS_EXITING(rp)) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+ return ERTS_PORT_OP_DROPPED;
+ }
- pdl_init();
+ erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, prt->common.id);
+ erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, connect);
+
+ ERTS_PORT_SET_CONNECTED(prt, connect);
- if (erts_sys_getenv_raw("ERL_MAX_PORTS", maxports, &maxportssize) == 0)
- erts_max_ports = atoi(maxports);
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_connect)) {
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(newprocess_str, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_pid_str(connect, process_str);
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
+ dtrace_proc_str(rp, newprocess_str);
+ DTRACE4(port_connect, process_str, port_str, prt->name, newprocess_str);
+ }
+#endif
+ }
+
+ return ERTS_PORT_OP_DONE;
+}
+
+static int
+port_sig_connect(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ Eterm msg = am_badarg;
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ ErtsPortOpResult res;
+ res = set_port_connected(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BANG_OP,
+ sigdp->u.connect.from,
+ prt,
+ state,
+ sigdp->u.connect.connected);
+ if (res == ERTS_PORT_OP_DONE)
+ msg = am_true;
+ }
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, msg);
+ return ERTS_PORT_REDS_CONNECT;
+}
+
+ErtsPortOpResult
+erts_port_connect(Process *c_p,
+ int flags,
+ Port *prt,
+ Eterm from,
+ Eterm connect,
+ Eterm *refp)
+{
+ ErtsProc2PortSigData *sigdp;
+ Eterm connect_id;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ am_connect);
+
+ ASSERT((flags & ~ERTS_PORT_SIG_FLG_BANG_OP) == 0);
+
+ if (is_not_internal_pid(connect))
+ connect_id = NIL; /* Fail in op (for signal order) */
else
- erts_max_ports = sys_max_files();
+ connect_id = connect;
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ ErtsPortOpResult res;
+ res = set_port_connected(flags & ERTS_PORT_SIG_FLG_BANG_OP,
+ from,
+ prt,
+ try_call_state.state,
+ connect_id);
+ finalize_imm_drv_call(&try_call_state);
+ return res;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ default:
+ /* Schedule call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_CONNECT | flags;
- if (erts_max_ports > ERTS_MAX_PORTS)
- erts_max_ports = ERTS_MAX_PORTS;
- if (erts_max_ports < 1024)
- erts_max_ports = 1024;
+ sigdp->u.connect.from = from;
+ sigdp->u.connect.connected = connect_id;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ refp,
+ sigdp,
+ 0,
+ port_sig_connect);
+}
- if (erts_use_r9_pids_ports) {
- ports_bits = ERTS_R9_PORTS_BITS;
- if (erts_max_ports > ERTS_MAX_R9_PORTS)
- erts_max_ports = ERTS_MAX_R9_PORTS;
+static void
+port_unlink(Port *prt, Eterm from)
+{
+ ErtsLink *lnk = erts_remove_link(&ERTS_P_LINKS(prt), from);
+ if (lnk)
+ erts_destroy_link(lnk);
+}
+
+static int
+port_sig_unlink(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ if (op == ERTS_PROC2PORT_SIG_EXEC)
+ port_unlink(prt, sigdp->u.unlink.from);
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ return ERTS_PORT_REDS_UNLINK;
+}
+
+ErtsPortOpResult
+erts_port_unlink(Process *c_p, Port *prt, Eterm from, Eterm *refp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
+ prt,
+ ERTS_PORT_SFLGS_DEAD,
+ 0,
+ !refp,
+ am_unlink);
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ port_unlink(prt, from);
+ finalize_imm_drv_call(&try_call_state);
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ default:
+ /* Schedule call instead... */
+ break;
}
- port_extra_shift = erts_fit_in_bits(erts_max_ports - 1);
- port_num_mask = (1 << ports_bits) - 1;
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_UNLINK;
+ sigdp->u.unlink.from = from;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : from,
+ refp,
+ sigdp,
+ 0,
+ port_sig_unlink);
+}
- erts_port_tab_index_mask = ~(~((Uint) 0) << port_extra_shift);
- erts_max_ports = 1 << port_extra_shift;
+static void
+port_link_failure(Eterm port_id, Eterm linker)
+{
+ Process *rp;
+ ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCKS_XSIG_SEND;
+ ASSERT(is_internal_pid(linker));
+ rp = erts_pid2proc(NULL, 0, linker, rp_locks);
+ if (rp) {
+ ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), port_id);
+ if (rlnk) {
+ int xres = erts_send_exit_signal(NULL,
+ port_id,
+ rp,
+ &rp_locks,
+ am_noproc,
+ NIL,
+ NULL,
+ 0);
+ if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
+ /* We didn't exit the process and it is traced */
+ if (IS_TRACED_FL(rp, F_TRACE_PROCS))
+ trace_proc(NULL, rp, am_getting_unlinked, port_id);
+ }
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ }
+}
- erts_smp_mtx_init(&erts_driver_list_lock,"driver_list");
- driver_list = NULL;
- erts_smp_tsd_key_create(&driver_list_lock_status_key);
- erts_smp_tsd_key_create(&driver_list_last_error_key);
+static void
+port_link(Port *prt, erts_aint32_t state, Eterm to)
+{
+ if (!(state & ERTS_PORT_SFLGS_INVALID_LOOKUP))
+ erts_add_link(&ERTS_P_LINKS(prt), LINK_PID, to);
+ else
+ port_link_failure(prt->common.id, to);
+}
- if (erts_max_ports * sizeof(Port) <= erts_max_ports) {
- /* More memory needed than the whole address space. */
- erts_alloc_enomem(ERTS_ALC_T_PORT_TABLE, ~((Uint) 0));
+static int
+port_sig_link(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigdp)
+{
+ if (op == ERTS_PROC2PORT_SIG_EXEC)
+ port_link(prt, state, sigdp->u.link.to);
+ else
+ port_link_failure(sigdp->u.link.port, sigdp->u.link.to);
+ if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ return ERTS_PORT_REDS_LINK;
+}
+
+ErtsPortOpResult
+erts_port_link(Process *c_p, Port *prt, Eterm to, Eterm *refp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ am_link);
+
+ switch (try_imm_drv_call(&try_call_state)) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ port_link(prt, try_call_state.state, to);
+ finalize_imm_drv_call(&try_call_state);
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_BADARG;
+ default:
+ /* Schedule call instead... */
+ break;
}
- erts_port = (Port *) erts_alloc(ERTS_ALC_T_PORT_TABLE,
- erts_max_ports * sizeof(Port));
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_LINK;
+ sigdp->u.link.port = prt->common.id;
+ sigdp->u.link.to = to;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p ? c_p->common.id : to,
+ refp,
+ sigdp,
+ 0,
+ port_sig_link);
+}
- erts_smp_atomic_init_nob(&erts_bytes_out, 0);
- erts_smp_atomic_init_nob(&erts_bytes_in, 0);
+void erts_init_io(int port_tab_size,
+ int port_tab_size_ignore_files)
+{
+ ErlDrvEntry** dp;
+ erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
+ drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ;
+ drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED;
- for (i = 0; i < erts_max_ports; i++) {
- erts_port_task_init_sched(&erts_port[i].sched);
#ifdef ERTS_SMP
- erts_smp_atomic_init_nob(&erts_port[i].refc, 0);
- erts_port[i].lock = NULL;
- erts_port[i].xports = NULL;
- erts_smp_spinlock_init_x(&erts_port[i].state_lck,
-#ifdef ERTS_ENABLE_LOCK_COUNT
- (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_state" : NULL,
-#else
- "port_state",
-#endif
- make_small(0));
+ init_xports_list_alloc();
#endif
- erts_port[i].tracer_proc = NIL;
- erts_port[i].trace_flags = 0;
- erts_port[i].drv_ptr = NULL;
- erts_port[i].status = ERTS_PORT_SFLG_FREE;
- erts_port[i].name = NULL;
- erts_port[i].nlinks = NULL;
- erts_port[i].monitors = NULL;
- erts_port[i].linebuf = NULL;
- erts_port[i].port_data_lock = NULL;
+ pdl_init();
+
+ if (!port_tab_size_ignore_files) {
+ int max_files = sys_max_files();
+ if (port_tab_size < max_files)
+ port_tab_size = max_files;
}
- erts_smp_atomic32_init_nob(&erts_ports_snapshot, (erts_aint32_t) 0);
- last_port_num = 0;
- erts_smp_spinlock_init(&get_free_port_lck, "get_free_port");
+ if (port_tab_size > ERTS_MAX_PORTS)
+ port_tab_size = ERTS_MAX_PORTS;
+ else if (port_tab_size < ERTS_MIN_PORTS)
+ port_tab_size = ERTS_MIN_PORTS;
+
+ erts_smp_rwmtx_init_opt(&erts_driver_list_lock,
+ &drv_list_rwmtx_opts,
+ "driver_list");
+ driver_list = NULL;
+ erts_smp_tsd_key_create(&driver_list_lock_status_key);
+ erts_smp_tsd_key_create(&driver_list_last_error_key);
+
+ erts_ptab_init_table(&erts_port,
+ ERTS_ALC_T_PORT_TABLE,
+ NULL,
+ (ErtsPTabElementCommon *) &erts_invalid_port.common,
+ port_tab_size,
+ "port_table");
+
+ erts_smp_atomic_init_nob(&erts_bytes_out, 0);
+ erts_smp_atomic_init_nob(&erts_bytes_in, 0);
sys_init_io();
erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1);
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
init_driver(&fd_driver, &fd_driver_entry, NULL);
init_driver(&vanilla_driver, &vanilla_driver_entry, NULL);
@@ -1394,28 +2655,64 @@ void init_io(void)
erts_add_driver_entry(*dp, NULL, 1);
erts_smp_tsd_set(driver_list_lock_status_key, NULL);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP)
-void erts_lcnt_enable_io_lock_count(int enable) {
- int i;
- for (i = 0; i < erts_max_ports; i++) {
- Port* p = &erts_port[i];
- if (enable) {
- erts_lcnt_init_lock_x(&p->state_lck.lcnt, "port_state", ERTS_LCNT_LT_SPINLOCK, make_small(i));
- if (p->lock) {
- erts_lcnt_init_lock_x(&p->lock->lcnt, "port_lock", ERTS_LCNT_LT_MUTEX, make_small(i));
- }
- } else {
- erts_lcnt_destroy_lock(&p->state_lck.lcnt);
- if (p->lock) {
- erts_lcnt_destroy_lock(&p->lock->lcnt);
- }
- }
+static ERTS_INLINE void lcnt_enable_drv_lock_count(erts_driver_t *dp, int enable)
+{
+ if (dp->lock) {
+ if (enable)
+ erts_lcnt_init_lock_x(&dp->lock->lcnt,
+ "driver_lock",
+ ERTS_LCNT_LT_MUTEX,
+ am_atom_put(dp->name,
+ sys_strlen(dp->name)));
+ else
+ erts_lcnt_destroy_lock(&dp->lock->lcnt);
+
+ }
+}
+
+static ERTS_INLINE void lcnt_enable_port_lock_count(Port *prt, int enable)
+{
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+ if (!enable) {
+ erts_lcnt_destroy_lock(&prt->sched.mtx.lcnt);
+ if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
+ erts_lcnt_destroy_lock(&prt->lock->lcnt);
+ }
+ else {
+ erts_lcnt_init_lock_x(&prt->sched.mtx.lcnt,
+ "port_sched_lock",
+ ERTS_LCNT_LT_MUTEX,
+ prt->common.id);
+ if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
+ erts_lcnt_init_lock_x(&prt->lock->lcnt,
+ "port_lock",
+ ERTS_LCNT_LT_MUTEX,
+ prt->common.id);
}
}
+
+void erts_lcnt_enable_io_lock_count(int enable)
+{
+ erts_driver_t *dp;
+ int i, max = erts_ptab_max(&erts_port);
+
+ for (i = 0; i < max; i++) {
+ Port *prt = erts_pix2port(i);
+ if (prt)
+ lcnt_enable_port_lock_count(prt, enable);
+ }
+
+ lcnt_enable_drv_lock_count(&vanilla_driver, enable);
+ lcnt_enable_drv_lock_count(&spawn_driver, enable);
+ lcnt_enable_drv_lock_count(&fd_driver, enable);
+ for (dp = driver_list; dp; dp = dp->next)
+ lcnt_enable_drv_lock_count(dp, enable);
+}
#endif
/*
@@ -1594,14 +2891,15 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
{
Process *rp;
ErtsProcLocks rp_locks = 0;
+ int scheduler = erts_get_scheduler_id() != 0;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- ASSERT(is_internal_port(sender)
- && is_internal_pid(pid)
- && internal_pid_index(pid) < erts_max_processes);
+ ASSERT(is_internal_port(sender) && is_internal_pid(pid));
- rp = erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ rp = (scheduler
+ ? erts_proc_lookup(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC));
if (rp) {
Eterm tuple;
@@ -1609,17 +2907,22 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
ErlOffHeap *ohp;
Eterm* hp;
Uint sz_res;
- sz_res = size_object(res);
- hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks);
- res = copy_struct(res, sz_res, &hp, ohp);
- tuple = TUPLE2(hp, sender, res);
+
+ sz_res = size_object(res);
+ hp = erts_alloc_message_heap(sz_res + 3, &bp, &ohp, rp, &rp_locks);
+ res = copy_struct(res, sz_res, &hp, ohp);
+ tuple = TUPLE2(hp, sender, res);
erts_queue_message(rp, &rp_locks, bp, tuple, NIL
#ifdef USE_VM_PROBES
, NIL
#endif
);
- erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ if (!scheduler)
+ erts_smp_proc_dec_refc(rp);
+
}
}
@@ -1632,7 +2935,7 @@ deliver_result(Eterm sender, Eterm pid, Eterm res)
* len -- length of data
*/
-static void deliver_read_message(Port* prt, Eterm to,
+static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
char *hbuf, ErlDrvSizeT hlen,
char *buf, ErlDrvSizeT len, int eol)
{
@@ -1644,28 +2947,33 @@ static void deliver_read_message(Port* prt, Eterm to,
ErlHeapFragment *bp;
ErlOffHeap *ohp;
ErtsProcLocks rp_locks = 0;
+ int scheduler = erts_get_scheduler_id() != 0;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ERTS_SMP_CHK_NO_PROC_LOCKS;
need = 3 + 3 + 2*hlen;
- if (prt->status & ERTS_PORT_SFLG_LINEBUF_IO) {
+
+ if (state & ERTS_PORT_SFLG_LINEBUF_IO) {
need += 3;
}
- if (prt->status & ERTS_PORT_SFLG_BINARY_IO && buf != NULL) {
+ if ((state & ERTS_PORT_SFLG_BINARY_IO) && buf != NULL) {
need += PROC_BIN_SIZE;
} else {
need += 2*len;
}
- rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+ rp = (scheduler
+ ? erts_proc_lookup(to)
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
+
if (!rp)
return;
hp = erts_alloc_message_heap(need, &bp, &ohp, rp, &rp_locks);
listp = NIL;
- if ((prt->status & ERTS_PORT_SFLG_BINARY_IO) == 0) {
+ if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) {
listp = buf_to_intlist(&hp, buf, len, listp);
} else if (buf != NULL) {
ProcBin* pb;
@@ -1696,14 +3004,14 @@ static void deliver_read_message(Port* prt, Eterm to,
listp = buf_to_intlist(&hp, hbuf, hlen, listp);
}
- if (prt->status & ERTS_PORT_SFLG_LINEBUF_IO){
+ if (state & ERTS_PORT_SFLG_LINEBUF_IO){
listp = TUPLE2(hp, (eol) ? am_eol : am_noeol, listp);
hp += 3;
}
tuple = TUPLE2(hp, am_data, listp);
hp += 3;
- tuple = TUPLE2(hp, prt->id, tuple);
+ tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
@@ -1711,15 +3019,18 @@ static void deliver_read_message(Port* prt, Eterm to,
, NIL
#endif
);
- erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ if (!scheduler)
+ erts_smp_proc_dec_refc(rp);
}
/*
* Deliver all lines in a line buffer, repeats calls to
* deliver_read_message, and takes the same parameters.
*/
-static void deliver_linebuf_message(Port* prt, Eterm to,
+static void deliver_linebuf_message(Port* prt, erts_aint_t state,
+ Eterm to,
char* hbuf, ErlDrvSizeT hlen,
char *buf, ErlDrvSizeT len)
{
@@ -1728,7 +3039,7 @@ static void deliver_linebuf_message(Port* prt, Eterm to,
if(init_linebuf_context(&lc,&(prt->linebuf), buf, len) < 0)
return;
while((ret = read_linebuf(&lc)) > LINEBUF_EMPTY)
- deliver_read_message(prt, to, hbuf, hlen, LINEBUF_DATA(lc),
+ deliver_read_message(prt, state, to, hbuf, hlen, LINEBUF_DATA(lc),
LINEBUF_DATALEN(lc), (ret == LINEBUF_EOL));
}
@@ -1739,20 +3050,25 @@ static void deliver_linebuf_message(Port* prt, Eterm to,
* Parameters:
* prt - Pointer to a Port structure for this port.
*/
-static void flush_linebuf_messages(Port *prt)
+static void flush_linebuf_messages(Port *prt, erts_aint32_t state)
{
LineBufContext lc;
int ret;
ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt));
- if(prt == NULL || !(prt->status & ERTS_PORT_SFLG_LINEBUF_IO))
+
+ if (!prt)
+ return;
+
+ if (!(state & ERTS_PORT_SFLG_LINEBUF_IO))
return;
if(init_linebuf_context(&lc,&(prt->linebuf), NULL, 0) < 0)
return;
while((ret = flush_linebuf(&lc)) > LINEBUF_EMPTY)
deliver_read_message(prt,
- prt->connected,
+ state,
+ ERTS_PORT_GET_CONNECTED(prt),
NULL,
0,
LINEBUF_DATA(lc),
@@ -1779,6 +3095,8 @@ deliver_vec_message(Port* prt, /* Port */
ErlHeapFragment *bp;
ErlOffHeap *ohp;
ErtsProcLocks rp_locks = 0;
+ int scheduler = erts_get_scheduler_id() != 0;
+ erts_aint32_t state;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -1787,16 +3105,20 @@ deliver_vec_message(Port* prt, /* Port */
* Check arguments for validity.
*/
- rp = erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+
+ rp = (scheduler
+ ? erts_proc_lookup(to)
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
if (!rp)
return;
+ state = erts_atomic32_read_nob(&prt->state);
/*
* Calculate the exact number of heap words needed.
*/
need = 3 + 3; /* Heap space for two tuples */
- if (prt->status & ERTS_PORT_SFLG_BINARY_IO) {
+ if (state & ERTS_PORT_SFLG_BINARY_IO) {
need += (2+PROC_BIN_SIZE)*vsize - 2 + hlen*2;
} else {
need += (hlen+csize)*2;
@@ -1807,7 +3129,7 @@ deliver_vec_message(Port* prt, /* Port */
listp = NIL;
iov += vsize;
- if ((prt->status & ERTS_PORT_SFLG_BINARY_IO) == 0) {
+ if ((state & ERTS_PORT_SFLG_BINARY_IO) == 0) {
Eterm* thp = hp;
while (vsize--) {
iov--;
@@ -1860,7 +3182,7 @@ deliver_vec_message(Port* prt, /* Port */
tuple = TUPLE2(hp, am_data, listp);
hp += 3;
- tuple = TUPLE2(hp, prt->id, tuple);
+ tuple = TUPLE2(hp, prt->common.id, tuple);
hp += 3;
erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
@@ -1869,7 +3191,8 @@ deliver_vec_message(Port* prt, /* Port */
#endif
);
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ if (!scheduler)
+ erts_smp_proc_dec_refc(rp);
}
@@ -1892,7 +3215,7 @@ static void deliver_bin_message(Port* prt, /* port */
/*
* Note.
*
- * The test for (p->status & ERTS_PORT_SFLGS_DEAD) == 0 is important since the
+ * The test for ERTS_PORT_SFLGS_DEAD is important since the
* driver's flush function might call driver_async, which when using no
* threads and being short circuited will notice that the io queue is empty
* (after calling the driver's async_ready) and recursively call
@@ -1908,7 +3231,7 @@ static void flush_port(Port *p)
if (p->drv_ptr->flush != NULL) {
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_flush)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(p->connected, p)
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p)
DTRACE3(driver_flush, process_str, port_str, p->name);
}
#endif
@@ -1923,11 +3246,12 @@ static void flush_port(Port *p)
}
#ifdef ERTS_SMP
if (p->xports)
- erts_smp_xports_unlock(p);
+ erts_port_handle_xports(p);
ASSERT(!p->xports);
#endif
}
- if ((p->status & ERTS_PORT_SFLGS_DEAD) == 0 && is_port_ioq_empty(p)) {
+ if ((erts_atomic32_read_nob(&p->state) & ERTS_PORT_SFLGS_DEAD) == 0
+ && is_port_ioq_empty(p)) {
terminate_port(p);
}
}
@@ -1939,29 +3263,29 @@ terminate_port(Port *prt)
Eterm send_closed_port_id;
Eterm connected_id = NIL /* Initialize to silence compiler */;
erts_driver_t *drv;
- int halt;
+ erts_aint32_t state;
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- ASSERT(!prt->nlinks);
- ASSERT(!prt->monitors);
+ ASSERT(!ERTS_P_LINKS(prt));
+ ASSERT(!ERTS_P_MONITORS(prt));
- /* prt->status may be altered by kill_port()below */
- halt = (prt->status & ERTS_PORT_SFLG_HALT) != 0;
- if (prt->status & ERTS_PORT_SFLG_SEND_CLOSED) {
- erts_port_status_band_set(prt, ~ERTS_PORT_SFLG_SEND_CLOSED);
- send_closed_port_id = prt->id;
- connected_id = prt->connected;
+ /* state may be altered by kill_port() below */
+ state = erts_atomic32_read_band_nob(&prt->state,
+ ~ERTS_PORT_SFLG_SEND_CLOSED);
+ if (state & ERTS_PORT_SFLG_SEND_CLOSED) {
+ send_closed_port_id = prt->common.id;
+ connected_id = ERTS_PORT_GET_CONNECTED(prt);
}
else {
send_closed_port_id = NIL;
}
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(prt->ptimer);
+ erts_cancel_smp_ptimer(prt->common.u.alive.ptimer);
#else
- erts_cancel_timer(&prt->tm);
+ erts_cancel_timer(&prt->common.u.alive.tm);
#endif
drv = prt->drv_ptr;
@@ -1969,7 +3293,7 @@ terminate_port(Port *prt)
int fpe_was_unmasked = erts_block_fpe();
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_stop)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(prt->connected, prt)
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(connected_id, prt)
DTRACE3(driver_stop, process_str, drv->name, port_str);
}
#endif
@@ -1977,14 +3301,14 @@ terminate_port(Port *prt)
erts_unblock_fpe(fpe_was_unmasked);
#ifdef ERTS_SMP
if (prt->xports)
- erts_smp_xports_unlock(prt);
+ erts_port_handle_xports(prt);
ASSERT(!prt->xports);
#endif
}
if(drv->handle != NULL) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rlock(&erts_driver_list_lock);
erts_ddll_decrement_port_count(drv->handle);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_runlock(&erts_driver_list_lock);
}
stopq(prt); /* clear queue memory */
if(prt->linebuf != NULL){
@@ -2000,20 +3324,21 @@ terminate_port(Port *prt)
if (prt->psd)
erts_free(ERTS_ALC_T_PRTSD, prt->psd);
+ ASSERT(prt->dist_entry == NULL);
+
kill_port(prt);
/*
* We don't want to send the closed message until after the
* port has been removed from the port table (in kill_port()).
*/
- if (halt && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
- erts_smp_port_unlock(prt); /* We will exit and never return */
+ if ((state & ERTS_PORT_SFLG_HALT)
+ && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) {
+ erts_port_release(prt); /* We will exit and never return */
erl_exit_flush_async(erts_halt_code, "");
}
if (is_internal_port(send_closed_port_id))
deliver_result(send_closed_port_id, connected_id, am_closed);
-
- ASSERT(prt->dist_entry == NULL);
}
void
@@ -2033,7 +3358,7 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc)
if (!rp) {
goto done;
}
- rmon = erts_remove_monitor(&(rp->monitors),mon->ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon == NULL) {
goto done;
@@ -2087,7 +3412,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
ASSERT(is_internal_pid(lnk->pid));
rp = erts_pid2proc(NULL, 0, lnk->pid, rp_locks);
if (rp) {
- ErtsLink *rlnk = erts_remove_link(&(rp->nlinks), psc->port);
+ ErtsLink *rlnk = erts_remove_link(&ERTS_P_LINKS(rp), psc->port);
if (rlnk) {
int xres = erts_send_exit_signal(NULL,
@@ -2123,11 +3448,13 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc)
* that is to kill a port till reason kill. Then the port is stopped.
*
*/
-void
-erts_do_exit_port(Port *p, Eterm from, Eterm reason)
+
+int
+erts_deliver_port_exit(Port *p, Eterm from, Eterm reason, int send_closed)
{
ErtsLink *lnk;
Eterm rreason;
+ erts_aint32_t state;
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
@@ -2147,66 +3474,76 @@ erts_do_exit_port(Port *p, Eterm from, Eterm reason)
}
#endif
- if ((p->status & (ERTS_PORT_SFLGS_DEAD
- | ERTS_PORT_SFLG_EXITING
- | ERTS_PORT_SFLG_IMMORTAL))
- || ((reason == am_normal) &&
- ((from != p->connected) && (from != p->id)))) {
- return;
- }
+ state = erts_atomic32_read_nob(&p->state);
+ if (state & (ERTS_PORT_SFLGS_DEAD
+ | ERTS_PORT_SFLG_EXITING
+ | ERTS_PORT_SFLG_CLOSING))
+ return 0;
+
+ if (reason == am_normal && from != ERTS_PORT_GET_CONNECTED(p) && from != p->common.id)
+ return 0;
+
+ if (send_closed)
+ erts_atomic32_read_bor_relb(&p->state,
+ ERTS_PORT_SFLG_SEND_CLOSED);
if (IS_TRACED_FL(p, F_TRACE_PORTS)) {
trace_port(p, am_closed, reason);
}
- erts_trace_check_exiting(p->id);
+ erts_trace_check_exiting(p->common.id);
/*
* Setting the port to not busy here, frees the list of pending
* processes and makes them runnable.
*/
- set_busy_port((ErlDrvPort)internal_port_index(p->id), 0);
+ set_busy_port((ErlDrvPort) p, 0);
- if (p->reg != NULL)
- (void) erts_unregister_name(NULL, 0, p, p->reg->name);
+ if (p->common.u.alive.reg != NULL)
+ (void) erts_unregister_name(NULL, 0, p, p->common.u.alive.reg->name);
- erts_port_status_bor_set(p, ERTS_PORT_SFLG_EXITING);
+ state = erts_atomic32_read_bor_relb(&p->state, ERTS_PORT_SFLG_EXITING);
{
- SweepContext sc = {p->id, rreason};
- lnk = p->nlinks;
- p->nlinks = NULL;
+ SweepContext sc = {p->common.id, rreason};
+ lnk = ERTS_P_LINKS(p);
+ ERTS_P_LINKS(p) = NULL;
erts_sweep_links(lnk, &sweep_one_link, &sc);
}
DRV_MONITOR_LOCK_PDL(p);
{
- ErtsMonitor *moni = p->monitors;
- p->monitors = NULL;
+ ErtsMonitor *moni = ERTS_P_MONITORS(p);
+ ERTS_P_MONITORS(p) = NULL;
erts_sweep_monitors(moni, &sweep_one_monitor, NULL);
}
DRV_MONITOR_UNLOCK_PDL(p);
- if ((p->status & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) {
+ if ((state & ERTS_PORT_SFLG_DISTRIBUTION) && p->dist_entry) {
erts_do_net_exits(p->dist_entry, rreason);
erts_deref_dist_entry(p->dist_entry);
- p->dist_entry = NULL;
- erts_port_status_band_set(p, ~ERTS_PORT_SFLG_DISTRIBUTION);
+ p->dist_entry = NULL;
+ erts_atomic32_read_band_relb(&p->state,
+ ~ERTS_PORT_SFLG_DISTRIBUTION);
}
if ((reason != am_kill) && !is_port_ioq_empty(p)) {
- erts_port_status_bandor_set(p,
- ~ERTS_PORT_SFLG_EXITING, /* must turn it off */
- ERTS_PORT_SFLG_CLOSING);
+ /* must turn exiting flag off */
+ erts_atomic32_read_bset_relb(&p->state,
+ (ERTS_PORT_SFLG_EXITING
+ | ERTS_PORT_SFLG_CLOSING),
+ ERTS_PORT_SFLG_CLOSING);
flush_port(p);
}
else {
terminate_port(p);
}
+
+ return 1;
}
/* About the states ERTS_PORT_SFLG_EXITING and ERTS_PORT_SFLG_CLOSING used above.
**
-** ERTS_PORT_SFLG_EXITING is a recursion protection for erts_do_exit_port().
+** ERTS_PORT_SFLG_EXITING is a recursion protection for erts_deliver_port_exit().
** It is unclear whether this state is necessary or not, it might be possible
** to merge it with ERTS_PORT_SFLG_CLOSING. ERTS_PORT_SFLG_EXITING only persists
** over a section of sequential (but highly recursive) code.
@@ -2222,234 +3559,1108 @@ erts_do_exit_port(Port *p, Eterm from, Eterm reason)
** {PID, close}
** {PID, {command, io-list}}
** {PID, {connect, New_PID}}
-**
-**
*/
-void erts_port_command(Process *proc,
- Eterm caller_id,
- Port *port,
- Eterm command)
+ErtsPortOpResult
+erts_port_command(Process *c_p,
+ int flags,
+ Port *port,
+ Eterm command,
+ Eterm *refp)
{
Eterm *tp;
- Eterm pid;
- if (!port)
- return;
+ ASSERT(port);
- erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
- ASSERT(!INVALID_PORT(port, port->id));
+ flags |= ERTS_PORT_SIG_FLG_BANG_OP;
if (is_tuple_arity(command, 2)) {
+ Eterm cntd;
tp = tuple_val(command);
- if ((pid = port->connected) == tp[1]) {
- /* PID must be connected */
+ cntd = tp[1];
+ if (is_internal_pid(cntd)) {
if (tp[2] == am_close) {
- erts_port_status_bor_set(port, ERTS_PORT_SFLG_SEND_CLOSED);
- erts_do_exit_port(port, pid, am_normal);
-
-#ifdef USE_VM_PROBES
- if(DTRACE_ENABLED(port_command)) {
- DTRACE_FORMAT_COMMON_PROC_AND_PORT(proc, port)
- DTRACE4(port_command, process_str, port_str, port->name, "close");
- }
-#endif
- goto done;
+ if (!erts_port_synchronous_ops)
+ refp = NULL;
+ flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
+ return erts_port_exit(c_p, flags, port, cntd, am_normal, refp);
} else if (is_tuple_arity(tp[2], 2)) {
tp = tuple_val(tp[2]);
if (tp[1] == am_command) {
- if (erts_write_to_port(caller_id, port, tp[2]) == 0)
- goto done;
- } else if ((tp[1] == am_connect) && is_internal_pid(tp[2])) {
-#ifdef USE_VM_PROBES
- if(DTRACE_ENABLED(port_command)) {
- DTRACE_FORMAT_COMMON_PROC_AND_PORT(proc, port)
- DTRACE4(port_command, process_str, port_str, port->name, "connect");
- }
-#endif
- port->connected = tp[2];
- deliver_result(port->id, pid, am_connected);
- goto done;
+ if (!(flags & ERTS_PORT_SIG_FLG_NOSUSPEND)
+ && !erts_port_synchronous_ops)
+ refp = NULL;
+ return erts_port_output(c_p, flags, port, cntd, tp[2], refp);
+ }
+ else if (tp[1] == am_connect) {
+ if (!erts_port_synchronous_ops)
+ refp = NULL;
+ flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
+ return erts_port_connect(c_p, flags, port, cntd, tp[2], refp);
}
}
}
}
- {
- ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND;
- Process* rp = erts_pid2proc_opt(NULL, 0,
- port->connected, rp_locks,
- ERTS_P2P_FLG_SMP_INC_REFC);
- if (rp) {
- (void) erts_send_exit_signal(NULL,
- port->id,
- rp,
- &rp_locks,
- am_badsig,
- NIL,
- NULL,
- 0);
- erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
- }
+ /* badsig */
+ if (!erts_port_synchronous_ops)
+ refp = NULL;
+ flags &= ~ERTS_PORT_SIG_FLG_NOSUSPEND;
+ return bad_port_signal(c_p, flags, port, c_p->common.id, refp, am_command);
+}
+
+static ERTS_INLINE ErtsPortOpResult
+call_driver_control(Eterm caller,
+ Port *prt,
+ unsigned int command,
+ char *bufp,
+ ErlDrvSizeT size,
+ char **resp_bufp,
+ ErlDrvSizeT *from_size)
+{
+ ErlDrvSSizeT cres;
+
+ if (!prt->drv_ptr->control)
+ return ERTS_PORT_OP_BADARG;
+
+#ifdef USE_VM_PROBES
+ if (DTRACE_ENABLED(port_control) || DTRACE_ENABLED(driver_control)) {
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(caller, prt);
+ DTRACE4(port_control, process_str, port_str, prt->name, command);
+ DTRACE5(driver_control, process_str, port_str, prt->name,
+ command, size);
}
- done:
- erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+#endif
+
+ prt->caller = caller;
+ cres = prt->drv_ptr->control((ErlDrvData) prt->drv_data,
+ command,
+ bufp,
+ size,
+ resp_bufp,
+ *from_size);
+ prt->caller = NIL;
+
+ if (cres < 0)
+ return ERTS_PORT_OP_BADARG;
+
+ *from_size = (ErlDrvSizeT) cres;
+
+ return ERTS_PORT_OP_DONE;
}
-/*
- * Control a port synchronously.
- * Returns either a list or a binary.
- */
-Eterm
-erts_port_control(Process* p, Port* prt, Uint command, Eterm iolist)
-{
- byte* to_port = NULL; /* Buffer to write to port. */
- /* Initialization is for shutting up
- warning about use before set. */
- Uint to_len = 0; /* Length of buffer. */
- int must_free = 0; /* True if the buffer should be freed. */
- char port_result[ERL_ONHEAP_BIN_LIMIT]; /* Default buffer for result from port. */
- char* port_resp; /* Pointer to result buffer. */
- ErlDrvSSizeT n;
- ErlDrvSSizeT (*control)
- (ErlDrvData, unsigned, char*, ErlDrvSizeT, char**, ErlDrvSizeT);
- int fpe_was_unmasked;
+static void
+cleanup_scheduled_control(Binary *binp, char *bufp)
+{
+ if (binp) {
+ if (erts_refc_dectest(&binp->refc, 0) == 0)
+ erts_bin_free(binp);
+ }
+ else {
+ if (bufp)
+ erts_free(ERTS_ALC_T_DRV_CTRL_DATA, bufp);
+ }
+}
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if ((control = prt->drv_ptr->control) == NULL) {
- return THE_NON_VALUE;
+static ERTS_INLINE Uint
+port_control_result_size(int control_flags,
+ char *resp_bufp,
+ ErlDrvSizeT *resp_size,
+ char *pre_alloc_buf)
+{
+ if (!resp_bufp)
+ return (Uint) 0;
+
+ if (control_flags & PORT_CONTROL_FLAG_BINARY) {
+ if (resp_bufp != pre_alloc_buf) {
+ ErlDrvBinary *dbin = (ErlDrvBinary *) resp_bufp;
+ *resp_size = dbin->orig_size;
+ if (*resp_size > ERL_ONHEAP_BIN_LIMIT)
+ return PROC_BIN_SIZE;
+ }
+ ASSERT(*resp_size <= ERL_ONHEAP_BIN_LIMIT);
+ return (Uint) heap_bin_size((*resp_size));
}
- /*
- * Convert the iolist to a buffer, pointed to by to_port,
- * and with its length in to_len.
- */
- if (is_binary(iolist) && binary_bitoffset(iolist) == 0) {
+ return (Uint) 2*(*resp_size);
+}
+
+static ERTS_INLINE Eterm
+write_port_control_result(int control_flags,
+ char *resp_bufp,
+ ErlDrvSizeT resp_size,
+ char *pre_alloc_buf,
+ Eterm **hpp,
+ ErlHeapFragment *bp,
+ ErlOffHeap *ohp)
+{
+ Eterm res;
+ if (!resp_bufp)
+ return NIL;
+ if (control_flags & PORT_CONTROL_FLAG_BINARY) {
+ /* Binary result */
+ ErlDrvBinary *dbin;
+ ErlHeapBin *hbin;
+
+ if (resp_bufp == pre_alloc_buf)
+ dbin = NULL;
+ else {
+ dbin = (ErlDrvBinary *) resp_bufp;
+ if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) {
+ ProcBin* pb = (ProcBin *) *hpp;
+ *hpp += PROC_BIN_SIZE;
+ pb->thing_word = HEADER_PROC_BIN;
+ pb->size = dbin->orig_size;
+ pb->next = ohp->first;
+ ohp->first = (struct erl_off_heap_header *) pb;
+ pb->val = ErlDrvBinary2Binary(dbin);
+ pb->bytes = (byte*) dbin->orig_bytes;
+ pb->flags = 0;
+ OH_OVERHEAD(ohp, dbin->orig_size / sizeof(Eterm));
+ return make_binary(pb);
+ }
+ resp_bufp = dbin->orig_bytes;
+ resp_size = dbin->orig_size;
+ }
+
+ hbin = (ErlHeapBin *) *hpp;
+ *hpp += heap_bin_size(resp_size);
+ ASSERT(resp_size <= ERL_ONHEAP_BIN_LIMIT);
+ hbin->thing_word = header_heap_bin(resp_size);
+ hbin->size = resp_size;
+ sys_memcpy(hbin->data, resp_bufp, resp_size);
+ if (dbin)
+ driver_free_binary(dbin);
+ return make_binary(hbin);
+ }
+
+ /* List result */
+ res = buf_to_intlist(hpp, resp_bufp, resp_size, NIL);
+ if (resp_bufp != pre_alloc_buf)
+ driver_free(resp_bufp);
+ return res;
+}
+
+static int
+port_sig_control(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ char resp_buf[ERL_ONHEAP_BIN_LIMIT];
+ ErlDrvSizeT resp_size = sizeof(resp_buf);
+ char *resp_bufp = &resp_buf[0];
+ ErtsPortOpResult res;
+
+ res = call_driver_control(sigdp->caller,
+ prt,
+ sigdp->u.control.command,
+ sigdp->u.control.bufp,
+ sigdp->u.control.size,
+ &resp_bufp,
+ &resp_size);
+
+ if (res == ERTS_PORT_OP_DONE) {
+ Eterm msg;
+ Eterm *hp, *hp_start;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+ Uint hsz;
+ int control_flags;
+
+ rp = erts_proc_lookup_raw(sigdp->caller);
+ if (!rp)
+ goto done;
+
+ control_flags = prt->control_flags;
+
+ hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+ hsz += port_control_result_size(control_flags,
+ resp_bufp,
+ &resp_size,
+ &resp_buf[0]);
+
+ hp_start = hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+
+ msg = write_port_control_result(control_flags,
+ resp_bufp,
+ resp_size,
+ &resp_buf[0],
+ &hp,
+ bp,
+ ohp);
+
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ sigdp->ref,
+ msg);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ goto done;
+ }
+ }
+
+ /* failure */
+
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+
+done:
+
+ cleanup_scheduled_control(sigdp->u.control.binp,
+ sigdp->u.control.bufp);
+
+ return ERTS_PORT_REDS_CONTROL;
+}
+
+
+ErtsPortOpResult
+erts_port_control(Process* c_p,
+ Port *prt,
+ unsigned int command,
+ Eterm data,
+ Eterm *retvalp)
+{
+ ErtsPortOpResult res;
+ char *bufp = NULL;
+ ErlDrvSizeT size = 0;
+ int try_call;
+ int tmp_alloced = 0;
+ erts_aint32_t sched_flags;
+ Binary *binp;
+ int copy;
+ ErtsProc2PortSigData *sigdp;
+
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sched_flags & ERTS_PTS_FLG_EXIT)
+ return ERTS_PORT_OP_BADARG;
+
+ try_call = !(sched_flags & ERTS_PTS_FLGS_FORCE_SCHEDULE_OP);
+
+ if (is_binary(data) && binary_bitoffset(data) == 0) {
+ byte *bytep;
ERTS_DECLARE_DUMMY(Uint bitoffs);
ERTS_DECLARE_DUMMY(Uint bitsize);
- ERTS_GET_BINARY_BYTES(iolist, to_port, bitoffs, bitsize);
- to_len = binary_size(iolist);
+ ERTS_GET_BINARY_BYTES(data, bytep, bitoffs, bitsize);
+ bufp = (char *) bytep;
+ size = binary_size(data);
} else {
int r;
- /* Try with an 8KB buffer first (will often be enough I guess). */
- to_len = 8*1024;
- to_port = erts_alloc(ERTS_ALC_T_TMP, to_len);
- must_free = 1;
+ if (!try_call) {
+ if (erts_iolist_size(data, &size))
+ return ERTS_PORT_OP_BADARG;
+ bufp = erts_alloc(ERTS_ALC_T_DRV_CTRL_DATA, size);
+ r = erts_iolist_to_buf(data, bufp, size);
+ ASSERT(r == 0);
+ }
+ else {
+ /* Try with an 8KB buffer first (will often be enough I guess). */
+ size = 8*1024;
+ bufp = erts_alloc(ERTS_ALC_T_TMP, size);
+ tmp_alloced = 1;
+
+ r = erts_iolist_to_buf(data, bufp, size);
+ if (ERTS_IOLIST_TO_BUF_SUCCEEDED(r)) {
+ size -= r;
+ } else {
+ if (r == ERTS_IOLIST_TO_BUF_TYPE_ERROR) { /* Type error */
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ return ERTS_PORT_OP_BADARG;
+ }
+ else {
+ ASSERT(r == ERTS_IOLIST_TO_BUF_OVERFLOW); /* Overflow */
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ if (erts_iolist_size(data, &size))
+ return ERTS_PORT_OP_BADARG; /* Type error */
+ }
+ bufp = erts_alloc(ERTS_ALC_T_TMP, size);
+ r = erts_iolist_to_buf(data, bufp, size);
+ ASSERT(r == 0);
+ }
+ }
+ }
- /*
- * In versions before R10B, we used to reserve random
- * amounts of extra memory. From R10B, we allocate the
- * exact amount.
- */
- r = io_list_to_buf(iolist, (char*) to_port, to_len);
- if (r >= 0) {
- to_len -= r;
- } else if (r == -2) { /* Type error */
- erts_free(ERTS_ALC_T_TMP, (void *) to_port);
- return THE_NON_VALUE;
- } else {
- ASSERT(r == -1); /* Overflow */
- erts_free(ERTS_ALC_T_TMP, (void *) to_port);
- if (erts_iolist_size(iolist, &to_len)) { /* Type error */
- return THE_NON_VALUE;
+ if (try_call) {
+ char resp_buf[ERL_ONHEAP_BIN_LIMIT];
+ char* resp_bufp = &resp_buf[0];
+ ErlDrvSizeT resp_size = sizeof(resp_buf);
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0,
+ am_control);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ Eterm *hp;
+ Uint hsz;
+ int control_flags;
+
+ res = call_driver_control(c_p->common.id,
+ prt,
+ command,
+ bufp,
+ size,
+ &resp_bufp,
+ &resp_size);
+ finalize_imm_drv_call(&try_call_state);
+ if (tmp_alloced)
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ if (res == ERTS_PORT_OP_BADARG) {
+ return ERTS_PORT_OP_BADARG;
}
- must_free = 1;
- to_port = erts_alloc(ERTS_ALC_T_TMP, to_len);
- r = io_list_to_buf(iolist, (char*) to_port, to_len);
- ASSERT(r == 0);
+
+ control_flags = prt->control_flags;
+
+ hsz = port_control_result_size(control_flags,
+ resp_bufp,
+ &resp_size,
+ &resp_buf[0]);
+ hp = HAlloc(c_p, hsz);
+ *retvalp = write_port_control_result(control_flags,
+ resp_bufp,
+ resp_size,
+ &resp_buf[0],
+ &hp,
+ NULL,
+ &c_p->off_heap);
+ return ERTS_PORT_OP_DONE;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ if (tmp_alloced)
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ return ERTS_PORT_OP_BADARG;
+ default:
+ /* Schedule control() call instead... */
+ break;
}
}
- prt->caller = p->id; /* Internal pid */
+ /* Convert data into something that can be scheduled */
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- ERTS_SMP_CHK_NO_PROC_LOCKS;
+ copy = tmp_alloced;
+
+ binp = NULL;
+
+ if (is_binary(data) && binary_bitoffset(data) == 0) {
+ Eterm *ebinp = binary_val_rel(data, NULL);
+ ASSERT(!tmp_alloced);
+ if (*ebinp == HEADER_SUB_BIN)
+ ebinp = binary_val_rel(((ErlSubBin *) ebinp)->orig, NULL);
+ if (*ebinp != HEADER_PROC_BIN)
+ copy = 1;
+ else {
+ binp = ((ProcBin *) ebinp)->val;
+ ASSERT(bufp < bufp + size);
+ ASSERT(binp->orig_bytes <= bufp
+ && bufp + size <= binp->orig_bytes + binp->orig_size);
+ erts_refc_inc(&binp->refc, 1);
+ }
+ }
+
+ if (copy) {
+ char *old_bufp = bufp;
+ bufp = erts_alloc(ERTS_ALC_T_DRV_CTRL_DATA, size);
+ sys_memcpy(bufp, old_bufp, size);
+ if (tmp_alloced)
+ erts_free(ERTS_ALC_T_TMP, old_bufp);
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_CONTROL;
+ sigdp->u.control.binp = binp;
+ sigdp->u.control.command = command;
+ sigdp->u.control.bufp = bufp;
+ sigdp->u.control.size = size;
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ retvalp,
+ sigdp,
+ 0,
+ port_sig_control);
+ if (res != ERTS_PORT_OP_SCHEDULED) {
+ cleanup_scheduled_control(binp, bufp);
+ return ERTS_PORT_OP_BADARG;
+ }
+ return res;
+}
+
+static ERTS_INLINE ErtsPortOpResult
+call_driver_call(Eterm caller,
+ Port *prt,
+ unsigned int command,
+ char *bufp,
+ ErlDrvSizeT size,
+ char **resp_bufp,
+ ErlDrvSizeT *from_size,
+ unsigned *ret_flagsp)
+{
+ ErlDrvSSizeT cres;
+
+ if (!prt->drv_ptr->call)
+ return ERTS_PORT_OP_BADARG;
#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(port_control) || DTRACE_ENABLED(driver_control)) {
- DTRACE_FORMAT_COMMON_PROC_AND_PORT(p, prt);
- DTRACE4(port_control, process_str, port_str, prt->name, command);
- DTRACE5(driver_control, process_str, port_str, prt->name,
- command, to_len);
+ if (DTRACE_ENABLED(driver_call)) {
+ DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
+ DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);
+
+ dtrace_pid_str(caller, process_str);
+ dtrace_port_str(prt, port_str);
+ DTRACE5(driver_call, process_str, port_str, prt->name, command, size);
}
#endif
- /*
- * Call the port's control routine.
- */
+ prt->caller = caller;
+ cres = prt->drv_ptr->call((ErlDrvData) prt->drv_data,
+ command,
+ bufp,
+ size,
+ resp_bufp,
+ *from_size,
+ ret_flagsp);
+ prt->caller = NIL;
- port_resp = port_result;
- fpe_was_unmasked = erts_block_fpe();
- n = control((ErlDrvData)prt->drv_data, command, (char*)to_port, to_len,
- &port_resp, sizeof(port_result));
- erts_unblock_fpe(fpe_was_unmasked);
- if (must_free) {
- erts_free(ERTS_ALC_T_TMP, (void *) to_port);
+ if (cres <= 0
+ || ((byte) (*resp_bufp)[0]) != VERSION_MAGIC)
+ return ERTS_PORT_OP_BADARG;
+
+ *from_size = (ErlDrvSizeT) cres;
+
+ return ERTS_PORT_OP_DONE;
+}
+
+
+static
+void cleanup_scheduled_call(char *bufp)
+{
+ if (bufp)
+ erts_free(ERTS_ALC_T_DRV_CALL_DATA, bufp);
+}
+
+static int
+port_sig_call(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ char resp_buf[256];
+ ErlDrvSizeT resp_size = sizeof(resp_buf);
+ char *resp_bufp = &resp_buf[0];
+ unsigned ret_flags = 0U;
+
+
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ ErtsPortOpResult res;
+
+ res = call_driver_call(sigdp->caller,
+ prt,
+ sigdp->u.call.command,
+ sigdp->u.call.bufp,
+ sigdp->u.call.size,
+ &resp_bufp,
+ &resp_size,
+ &ret_flags);
+
+ if (res == ERTS_PORT_OP_DONE) {
+ Eterm msg;
+ Eterm *hp;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+ Uint hsz;
+
+ rp = erts_proc_lookup_raw(sigdp->caller);
+ if (!rp)
+ goto done;
+
+ hsz = erts_decode_ext_size((byte *) resp_bufp, resp_size);
+ if (hsz >= 0) {
+ Eterm *hp_start;
+ byte *endp;
+
+ hsz += 3; /* ok tuple */
+ hsz += ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+
+ hp_start = hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+ endp = (byte *) resp_bufp;
+ msg = erts_decode_ext(&hp, ohp, &endp);
+ if (is_value(msg)) {
+ msg = TUPLE2(hp, am_ok, msg);
+ hp += 3;
+
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ sigdp->ref,
+ msg);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ goto done;
+ }
+ if (bp)
+ free_message_buffer(bp);
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ }
}
- prt->caller = NIL;
-#ifdef ERTS_SMP
- if (prt->xports)
- erts_smp_xports_unlock(prt);
- ASSERT(!prt->xports);
-#endif
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- /*
- * Handle the result.
- */
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+
+done:
+
+ if (resp_bufp != &resp_buf[0] && !(ret_flags & DRIVER_CALL_KEEP_BUFFER))
+ driver_free(resp_bufp);
+
+ cleanup_scheduled_call(sigdp->u.call.bufp);
+
+ return ERTS_PORT_REDS_CALL;
+}
+
+
+ErtsPortOpResult
+erts_port_call(Process* c_p,
+ Port *prt,
+ unsigned int command,
+ Eterm data,
+ Eterm *retvalp)
+{
+ ErtsPortOpResult res;
+ char input_buf[256];
+ char *bufp;
+ byte *endp;
+ ErlDrvSizeT size;
+ int try_call;
+ erts_aint32_t sched_flags;
+ ErtsProc2PortSigData *sigdp;
- if (n < 0) {
- return THE_NON_VALUE;
+ sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ if (sched_flags & ERTS_PTS_FLG_EXIT) {
+ return ERTS_PORT_OP_BADARG;
}
- if ((prt->control_flags & PORT_CONTROL_FLAG_BINARY) == 0) { /* List result */
- Eterm ret;
- Eterm* hp = HAlloc(p, 2*n);
- ret = buf_to_intlist(&hp, port_resp, n, NIL);
- if (port_resp != port_result) {
- driver_free(port_resp);
+ try_call = !(sched_flags & ERTS_PTS_FLGS_FORCE_SCHEDULE_OP);
+
+ size = erts_encode_ext_size(data);
+
+ if (!try_call)
+ bufp = erts_alloc(ERTS_ALC_T_DRV_CALL_DATA, size);
+ else if (size <= sizeof(input_buf))
+ bufp = &input_buf[0];
+ else
+ bufp = erts_alloc(ERTS_ALC_T_TMP, size);
+
+ endp = (byte *) bufp;
+ erts_encode_ext(data, &endp);
+
+ if (endp - (byte *) bufp > size)
+ ERTS_INTERNAL_ERROR("erts_internal:port_call() - Buffer overflow");
+
+ size = endp - (byte *) bufp;
+
+ if (try_call) {
+ char resp_buf[255];
+ char* resp_bufp = &resp_buf[0];
+ ErlDrvSizeT resp_size = sizeof(resp_buf);
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0,
+ am_call);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ Eterm *hp, *hp_end;
+ Uint hsz;
+ unsigned ret_flags = 0U;
+ Eterm term;
+
+ res = call_driver_call(c_p->common.id,
+ prt,
+ command,
+ bufp,
+ size,
+ &resp_bufp,
+ &resp_size,
+ &ret_flags);
+
+ finalize_imm_drv_call(&try_call_state);
+ if (bufp != &input_buf[0])
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ if (res == ERTS_PORT_OP_BADARG)
+ return ERTS_PORT_OP_BADARG;
+ hsz = erts_decode_ext_size((byte *) resp_bufp, resp_size);
+ if (hsz < 0)
+ return ERTS_PORT_OP_BADARG;
+ hsz += 3;
+ hp = HAlloc(c_p, hsz);
+ hp_end = hp + hsz;
+ endp = (byte *) resp_bufp;
+ term = erts_decode_ext(&hp, &MSO(c_p), &endp);
+ if (term == THE_NON_VALUE)
+ return ERTS_PORT_OP_BADARG;
+ *retvalp = TUPLE2(hp, am_ok, term);
+ hp += 3;
+ HRelease(c_p, hp_end, hp);
+ if (resp_buf != &resp_buf[0]
+ && !(ret_flags & DRIVER_CALL_KEEP_BUFFER))
+ driver_free(resp_buf);
+ return ERTS_PORT_OP_DONE;
}
- return ret;
- }
- else if (port_resp == NULL) {
- return NIL;
- }
- else { /* Binary result */
- ErlDrvBinary *dbin;
- ErlHeapBin *hbin;
- if (port_resp != port_result) {
- dbin = (ErlDrvBinary *) port_resp;
- if (dbin->orig_size > ERL_ONHEAP_BIN_LIMIT) {
- ProcBin* pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
- pb->thing_word = HEADER_PROC_BIN;
- pb->size = dbin->orig_size;
- pb->next = MSO(p).first;
- MSO(p).first = (struct erl_off_heap_header*)pb;
- pb->val = ErlDrvBinary2Binary(dbin);
- pb->bytes = (byte*) dbin->orig_bytes;
- pb->flags = 0;
- OH_OVERHEAD(&(MSO(p)), dbin->orig_size / sizeof(Eterm));
- return make_binary(pb);
- }
- port_resp = dbin->orig_bytes;
- n = dbin->orig_size;
- } else {
- dbin = NULL;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ if (bufp != &input_buf[0])
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ return ERTS_PORT_OP_BADARG;
+ default:
+ /* Schedule call() call instead... */
+ break;
}
- hbin = (ErlHeapBin*) HAlloc(p, heap_bin_size(n));
- ASSERT(n <= ERL_ONHEAP_BIN_LIMIT);
- hbin->thing_word = header_heap_bin(n);
- hbin->size = n;
- sys_memcpy(hbin->data, port_resp, n);
- if (dbin != NULL) {
- driver_free_binary(dbin);
+ }
+
+ /* Convert data into something that can be scheduled */
+
+ if (bufp == &input_buf[0] || try_call) {
+ char *new_bufp = erts_alloc(ERTS_ALC_T_DRV_CALL_DATA, size);
+ sys_memcpy(new_bufp, bufp, size);
+ if (bufp != &input_buf[0])
+ erts_free(ERTS_ALC_T_TMP, bufp);
+ bufp = new_bufp;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_CALL;
+ sigdp->u.call.command = command;
+ sigdp->u.call.bufp = bufp;
+ sigdp->u.call.size = size;
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ retvalp,
+ sigdp,
+ 0,
+ port_sig_call);
+ if (res != ERTS_PORT_OP_SCHEDULED) {
+ cleanup_scheduled_call(bufp);
+ return ERTS_PORT_OP_BADARG;
+ }
+ return res;
+}
+
+static Eterm
+make_port_info_term(Eterm **hpp_start,
+ Eterm **hpp,
+ Uint *hszp,
+ ErlHeapFragment **bpp,
+ Port *prt,
+ Eterm item)
+{
+ ErlOffHeap *ohp;
+
+ if (is_value(item)) {
+ if (erts_bld_port_info(NULL, NULL, hszp, prt, item) == am_false)
+ return THE_NON_VALUE;
+ if (*hszp) {
+ *bpp = new_message_buffer(*hszp);
+ *hpp_start = *hpp = (*bpp)->mem;
+ ohp = &(*bpp)->off_heap;
}
- return make_binary(hbin);
+ else {
+ *bpp = NULL;
+ *hpp_start = *hpp = NULL;
+ ohp = NULL;
+ }
+ return erts_bld_port_info(hpp, ohp, NULL, prt, item);
+ }
+ else {
+ int i;
+ int len;
+ int start;
+ static Eterm item[] = ERTS_PORT_INFO_1_ITEMS;
+ static Eterm value[sizeof(item)/sizeof(item[0])];
+
+ start = 0;
+ len = sizeof(item)/sizeof(item[0]);
+
+ for (i = start; i < sizeof(item)/sizeof(item[0]); i++) {
+ ASSERT(is_atom(item[i]));
+ value[i] = erts_bld_port_info(NULL, NULL, hszp, prt, item[i]);
+ }
+
+ if (value[0] == am_undefined) {
+ start++;
+ len--;
+ }
+
+ erts_bld_list(NULL, hszp, len, &value[start]);
+
+ *bpp = new_message_buffer(*hszp);
+ *hpp_start = *hpp = (*bpp)->mem;
+ ohp = &(*bpp)->off_heap;
+
+ for (i = start; i < sizeof(item)/sizeof(item[0]); i++)
+ value[i] = erts_bld_port_info(hpp, ohp, NULL, prt, item[i]);
+
+ return erts_bld_list(hpp, NULL, len, &value[start]);
+ }
+}
+
+static int
+port_sig_info(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+ if (op != ERTS_PROC2PORT_SIG_EXEC)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined);
+ else {
+ Eterm *hp, *hp_start;
+ Uint hsz;
+ ErlHeapFragment *bp;
+ Eterm value;
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+
+ rp = erts_proc_lookup_raw(sigdp->caller);
+ if (!rp)
+ return ERTS_PORT_REDS_INFO;
+
+ hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+ value = make_port_info_term(&hp_start,
+ &hp,
+ &hsz,
+ &bp,
+ prt,
+ sigdp->u.info.item);
+ if (is_value(value)) {
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ sigdp->ref,
+ value);
+ }
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ return ERTS_PORT_REDS_INFO;
+}
+
+ErtsPortOpResult
+erts_port_info(Process* c_p,
+ Port *prt,
+ Eterm item,
+ Eterm *retvalp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0,
+ am_info);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ Eterm *hp, *hp_start;
+ ErlHeapFragment *bp;
+ Uint hsz = 0;
+ Eterm value = make_port_info_term(&hp_start, &hp, &hsz, &bp, prt, item);
+ finalize_imm_drv_call(&try_call_state);
+ if (is_non_value(value))
+ return ERTS_PORT_OP_BADARG;
+ else if (is_immed(value))
+ *retvalp = value;
+ else {
+ Uint used_h_size = hp - hp_start;
+ hp = HAlloc(c_p, used_h_size);
+ *retvalp = copy_struct(value, used_h_size, &hp, &MSO(c_p));
+ free_message_buffer(bp);
+ }
+ return ERTS_PORT_OP_DONE;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_INFO;
+ sigdp->u.info.item = item;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ retvalp,
+ sigdp,
+ 0,
+ port_sig_info);
+}
+
+static int
+port_sig_set_data(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+
+ if (op == ERTS_PROC2PORT_SIG_EXEC) {
+ if (prt->bp)
+ free_message_buffer(prt->bp);
+ prt->bp = sigdp->u.set_data.bp;
+ prt->data = sigdp->u.set_data.data;
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ }
+ else {
+ if (sigdp->u.set_data.bp)
+ free_message_buffer(sigdp->u.set_data.bp);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ }
+ return ERTS_PORT_REDS_SET_DATA;
+}
+
+ErtsPortOpResult
+erts_port_set_data(Process* c_p,
+ Port *prt,
+ Eterm data,
+ Eterm *refp)
+{
+ ErtsPortOpResult res;
+ Eterm set_data;
+ ErlHeapFragment *bp;
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ !refp,
+ am_set_data);
+
+ if (is_immed(data)) {
+ set_data = data;
+ bp = NULL;
}
+ else {
+ Eterm *hp;
+ Uint sz = size_object(data);
+ bp = new_message_buffer(sz);
+ hp = bp->mem;
+ set_data = copy_struct(data, sz, &hp, &bp->off_heap);
+ }
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK:
+ if (prt->bp)
+ free_message_buffer(prt->bp);
+ prt->bp = bp;
+ prt->data = set_data;
+ finalize_imm_drv_call(&try_call_state);
+ return ERTS_PORT_OP_DONE;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_SET_DATA;
+ sigdp->u.set_data.data = set_data;
+ sigdp->u.set_data.bp = bp;
+
+ res = erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ refp,
+ sigdp,
+ 0,
+ port_sig_set_data);
+ if (res != ERTS_PORT_OP_SCHEDULED && bp)
+ free_message_buffer(bp);
+ return res;
+}
+
+static int
+port_sig_get_data(Port *prt,
+ erts_aint32_t state,
+ int op,
+ ErtsProc2PortSigData *sigdp)
+{
+ ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
+ if (op != ERTS_PROC2PORT_SIG_EXEC)
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ else {
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+
+ rp = erts_proc_lookup_raw(sigdp->caller);
+ if (rp) {
+ Uint hsz;
+ Eterm *hp, *hp_start;
+ Eterm data, msg;
+ ErlHeapFragment *bp;
+ ErlOffHeap *ohp;
+
+ hsz = ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE;
+ hsz += 3;
+ if (prt->bp)
+ hsz += prt->bp->used_size;
+
+ hp_start = hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+
+ if (is_immed(prt->data))
+ data = prt->data;
+ else
+ data = copy_struct(prt->data,
+ prt->bp->used_size,
+ &hp,
+ &bp->off_heap);
+
+
+
+ msg = TUPLE2(hp, am_ok, data);
+ hp += 3;
+
+ queue_port_sched_op_reply(rp,
+ &rp_locks,
+ hp_start,
+ hp,
+ hsz,
+ bp,
+ sigdp->ref,
+ msg);
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+ }
+ return ERTS_PORT_REDS_GET_DATA;
+}
+
+ErtsPortOpResult
+erts_port_get_data(Process* c_p,
+ Port *prt,
+ Eterm *retvalp)
+{
+ ErtsProc2PortSigData *sigdp;
+ ErtsTryImmDrvCallResult try_call_res;
+ ErtsTryImmDrvCallState try_call_state
+ = ERTS_INIT_TRY_IMM_DRV_CALL_STATE(
+ c_p,
+ prt,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP,
+ 0,
+ 0,
+ am_get_data);
+
+ try_call_res = try_imm_drv_call(&try_call_state);
+ switch (try_call_res) {
+ case ERTS_TRY_IMM_DRV_CALL_OK: {
+ Eterm *hp;
+ Eterm data;
+ ErlHeapFragment *bp;
+ Uint sz;
+ if (is_immed(prt->data)) {
+ bp = NULL;
+ data = prt->data;
+ }
+ else {
+ bp = new_message_buffer(prt->bp->used_size);
+ data = copy_struct(prt->data,
+ prt->bp->used_size,
+ &hp,
+ &bp->off_heap);
+ }
+ finalize_imm_drv_call(&try_call_state);
+ if (is_immed(data))
+ sz = 0;
+ else
+ sz = bp->used_size;
+
+ hp = HAlloc(c_p, sz + 3);
+ if (is_not_immed(data)) {
+ data = copy_struct(data, bp->used_size, &hp, &MSO(c_p));
+ free_message_buffer(bp);
+ }
+ *retvalp = TUPLE2(hp, am_ok, data);
+ return ERTS_PORT_OP_DONE;
+ }
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_PORT:
+ return ERTS_PORT_OP_DROPPED;
+ case ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS:
+ case ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK:
+ /* Schedule call instead... */
+ break;
+ }
+
+ sigdp = erts_port_task_alloc_p2p_sig_data();
+ sigdp->flags = ERTS_P2P_SIG_TYPE_GET_DATA;
+
+ return erts_schedule_proc2port_signal(c_p,
+ prt,
+ c_p->common.id,
+ retvalp,
+ sigdp,
+ 0,
+ port_sig_get_data);
}
typedef struct {
@@ -2470,39 +4681,39 @@ static void prt_one_lnk(ErtsLink *lnk, void *vprtd)
}
void
-print_port_info(int to, void *arg, int i)
+print_port_info(Port *p, int to, void *arg)
{
- Port* p = &erts_port[i];
+ erts_aint32_t state = erts_atomic32_read_nob(&p->state);
- if (p->status & ERTS_PORT_SFLGS_DEAD)
+ if (state & ERTS_PORT_SFLGS_DEAD)
return;
- erts_print(to, arg, "=port:%T\n", p->id);
- erts_print(to, arg, "Slot: %d\n", i);
- if (p->status & ERTS_PORT_SFLG_CONNECTED) {
- erts_print(to, arg, "Connected: %T", p->connected);
+ erts_print(to, arg, "=port:%T\n", p->common.id);
+ erts_print(to, arg, "Slot: %d\n", internal_port_index(p->common.id));
+ if (state & ERTS_PORT_SFLG_CONNECTED) {
+ erts_print(to, arg, "Connected: %T", ERTS_PORT_GET_CONNECTED(p));
erts_print(to, arg, "\n");
}
- if (p->nlinks != NULL) {
+ if (ERTS_P_LINKS(p)) {
prt_one_lnk_data prtd;
prtd.to = to;
prtd.arg = arg;
erts_print(to, arg, "Links: ");
- erts_doforall_links(p->nlinks, &prt_one_lnk, &prtd);
+ erts_doforall_links(ERTS_P_LINKS(p), &prt_one_lnk, &prtd);
erts_print(to, arg, "\n");
}
- if (p->monitors != NULL) {
+ if (ERTS_P_MONITORS(p)) {
prt_one_lnk_data prtd;
prtd.to = to;
prtd.arg = arg;
erts_print(to, arg, "Monitors: ");
- erts_doforall_monitors(p->monitors, &prt_one_monitor, &prtd);
+ erts_doforall_monitors(ERTS_P_MONITORS(p), &prt_one_monitor, &prtd);
erts_print(to, arg, "\n");
}
- if (p->reg != NULL)
- erts_print(to, arg, "Registered as: %T\n", p->reg->name);
+ if (p->common.u.alive.reg != NULL)
+ erts_print(to, arg, "Registered as: %T\n", p->common.u.alive.reg->name);
if (p->drv_ptr == &fd_driver) {
erts_print(to, arg, "Port is UNIX fd not opened by emulator: %s\n", p->name);
@@ -2516,109 +4727,143 @@ print_port_info(int to, void *arg, int i)
}
void
-set_busy_port(ErlDrvPort port_num, int on)
+set_busy_port(ErlDrvPort dprt, int on)
{
+ Port *prt;
+ erts_aint32_t flags;
+
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(port_str, 16);
#endif
ERTS_SMP_CHK_NO_PROC_LOCKS;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num]));
+ prt = erts_drvport2port_raw(dprt);
+ if (!prt)
+ return;
if (on) {
- erts_port_status_bor_set(&erts_port[port_num],
- ERTS_PORT_SFLG_PORT_BUSY);
+ flags = erts_smp_atomic32_read_bor_acqb(&prt->sched.flags,
+ ERTS_PTS_FLG_BUSY_PORT);
+ if (flags & ERTS_PTS_FLG_BUSY_PORT)
+ return; /* Already busy */
+
+ if (flags & ERTS_PTS_FLG_HAVE_NS_TASKS)
+ erts_port_task_abort_nosuspend_tasks(prt);
+
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_busy)) {
erts_snprintf(port_str, sizeof(port_str),
- "%T", erts_port[port_num].id);
+ "%T", prt->common.id);
DTRACE1(port_busy, port_str);
}
#endif
} else {
- ErtsProcList* plp = erts_port[port_num].suspended;
- erts_port_status_band_set(&erts_port[port_num],
- ~ERTS_PORT_SFLG_PORT_BUSY);
- erts_port[port_num].suspended = NULL;
+ flags = erts_smp_atomic32_read_band_acqb(&prt->sched.flags,
+ ~ERTS_PTS_FLG_BUSY_PORT);
+ if (!(flags & ERTS_PTS_FLG_BUSY_PORT))
+ return; /* Already non-busy */
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(port_not_busy)) {
erts_snprintf(port_str, sizeof(port_str),
- "%T", erts_port[port_num].id);
+ "%T", prt->common.id);
DTRACE1(port_not_busy, port_str);
}
#endif
- if (erts_port[port_num].dist_entry) {
+ if (prt->dist_entry) {
/*
* Processes suspended on distribution ports are
* normally queued on the dist entry.
*/
- erts_dist_port_not_busy(&erts_port[port_num]);
+ erts_dist_port_not_busy(prt);
}
- /*
- * Resume, in a round-robin fashion, all processes waiting on the port.
- *
- * This version submitted by Tony Rogvall. The earlier version used
- * to resume the processes in order, which caused starvation of all but
- * the first process.
- */
+ if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q))
+ erts_port_resume_procs(prt);
+ }
+}
+
+void
+erts_port_resume_procs(Port *prt)
+{
+ /*
+ * Resume, in a round-robin fashion, all processes waiting on the port.
+ *
+ * This version submitted by Tony Rogvall. The earlier version used
+ * to resume the processes in order, which caused starvation of all but
+ * the first process.
+ */
+ ErtsProcList *plp;
+
+ erts_port_task_sched_lock(&prt->sched);
+ plp = prt->suspended;
+ prt->suspended = NULL;
+ erts_port_task_sched_unlock(&prt->sched);
+
+ if (erts_proclist_fetch(&plp, NULL)) {
- if (plp) {
#ifdef USE_VM_PROBES
- /*
- * Hrm, for blocked dist ports, plp always seems to be NULL.
- * That's not so fun.
- * Well, another way to get the same info is using a D
- * script to correlate an earlier process-port_blocked+pid
- * event with a later process-scheduled event. That's
- * subject to the multi-CPU races with how events are
- * handled, but hey, that way works most of the time.
- */
- if (DTRACE_ENABLED(process_port_unblocked)) {
- DTRACE_CHARBUF(pid_str, 16);
- ErtsProcList* plp2 = plp;
-
- erts_snprintf(port_str, sizeof(port_str),
- "%T", erts_port[port_num]);
- while (plp2 != NULL) {
- erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
- DTRACE2(process_port_unblocked, pid_str, port_str);
- }
- }
-#endif
- /* First proc should be resumed last */
- if (plp->next) {
- erts_resume_processes(plp->next);
- plp->next = NULL;
+ /*
+ * Hrm, for blocked dist ports, plp always seems to be NULL.
+ * That's not so fun.
+ * Well, another way to get the same info is using a D
+ * script to correlate an earlier process-port_blocked+pid
+ * event with a later process-scheduled event. That's
+ * subject to the multi-CPU races with how events are
+ * handled, but hey, that way works most of the time.
+ */
+ if (DTRACE_ENABLED(process_port_unblocked)) {
+ DTRACE_CHARBUF(port_str, 16);
+ DTRACE_CHARBUF(pid_str, 16);
+ ErtsProcList* plp2 = plp;
+
+ erts_snprintf(port_str, sizeof(port_str), "%T", prt->common.id);
+ while (plp2 != NULL) {
+ erts_snprintf(pid_str, sizeof(pid_str), "%T", plp2->pid);
+ DTRACE2(process_port_unblocked, pid_str, port_str);
}
- erts_resume_processes(plp);
- }
+ }
+#endif
+
+ /* First proc should be resumed last */
+ if (plp->next) {
+ plp->next->prev = NULL;
+ erts_resume_processes(plp->next);
+ plp->next = NULL;
+ }
+ erts_resume_processes(plp);
}
}
void set_port_control_flags(ErlDrvPort port_num, int flags)
{
-
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(&erts_port[port_num]));
-
- erts_port[port_num].control_flags = flags;
+ Port *prt = erts_drvport2port_raw(port_num);
+ if (prt)
+ prt->control_flags = flags;
}
-int get_port_flags(ErlDrvPort ix) {
- Port* prt = erts_drvport2port(ix);
+int get_port_flags(ErlDrvPort ix)
+{
+ int flags;
+ Port *prt;
+ erts_aint32_t state;
+
+ prt = erts_drvport2port(ix, &state);
+ if (!prt)
+ return 0;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (prt == NULL)
- return 0;
+ flags = 0;
+ if (state & ERTS_PORT_SFLG_BINARY_IO)
+ flags |= PORT_FLAG_BINARY;
+ if (state & ERTS_PORT_SFLG_LINEBUF_IO)
+ flags |= PORT_FLAG_LINE;
- return (prt->status & ERTS_PORT_SFLG_BINARY_IO ? PORT_FLAG_BINARY : 0)
- | (prt->status & ERTS_PORT_SFLG_LINEBUF_IO ? PORT_FLAG_LINE : 0);
+ return flags;
}
-
void erts_raw_port_command(Port* p, byte* buf, Uint len)
{
int fpe_was_unmasked;
@@ -2655,25 +4900,18 @@ int async_ready(Port *p, void* data)
if (p) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
- ASSERT(!(p->status & ERTS_PORT_SFLGS_DEAD));
if (p->drv_ptr->ready_async != NULL) {
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_ready_async)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(p->connected, p)
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(p), p)
DTRACE3(driver_ready_async, process_str, port_str, p->name);
}
#endif
(*p->drv_ptr->ready_async)((ErlDrvData)p->drv_data, data);
need_free = 0;
-#ifdef ERTS_SMP
- if (p->xports)
- erts_smp_xports_unlock(p);
- ASSERT(!p->xports);
-#endif
- }
- if ((p->status & ERTS_PORT_SFLG_CLOSING) && is_port_ioq_empty(p)) {
- terminate_port(p);
+
}
+ erts_port_driver_callback_epilogue(p, NULL);
}
return need_free;
}
@@ -2681,12 +4919,12 @@ int async_ready(Port *p, void* data)
static void
report_missing_drv_callback(Port *p, char *drv_type, char *callback)
{
- ErtsPortNames *pnp = erts_get_port_names(p->id);
+ ErtsPortNames *pnp = erts_get_port_names(p->common.id);
char *unknown = "<unknown>";
char *drv_name = pnp->driver_name ? pnp->driver_name : unknown;
char *prt_name = pnp->name ? pnp->name : unknown;
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "%T: %s driver '%s' ", p->id, drv_type, drv_name);
+ erts_dsprintf(dsbufp, "%T: %s driver '%s' ", p->common.id, drv_type, drv_name);
if (sys_strcmp(drv_name, prt_name) != 0)
erts_dsprintf(dsbufp, "(%s) ", prt_name);
erts_dsprintf(dsbufp, "does not implement the %s callback!\n", callback);
@@ -2701,7 +4939,7 @@ erts_stale_drv_select(Eterm port,
int deselect)
{
char *type;
- ErlDrvPort drv_port = internal_port_index(port);
+ ErlDrvPort drv_port = (ErlDrvPort) erts_port_lookup_raw(port);
ErtsPortNames *pnp = erts_get_port_names(port);
erts_dsprintf_buf_t *dsbufp;
@@ -2741,16 +4979,16 @@ erts_stale_drv_select(Eterm port,
ErtsPortNames *
erts_get_port_names(Eterm id)
{
+ Port *prt = erts_port_lookup_raw(id);
ErtsPortNames *pnp;
ASSERT(is_nil(id) || is_internal_port(id));
-
- if (is_not_internal_port(id)) {
+
+ if (!prt) {
pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, sizeof(ErtsPortNames));
pnp->name = NULL;
pnp->driver_name = NULL;
}
else {
- Port* prt = &erts_port[internal_port_index(id)];
int do_realloc = 1;
int len = -1;
size_t pnp_len = sizeof(ErtsPortNames);
@@ -2766,17 +5004,10 @@ erts_get_port_names(Eterm id)
pnp_len = sizeof(ErtsPortNames) + len;
pnp = erts_alloc(ERTS_ALC_T_PORT_NAMES, pnp_len);
}
- erts_smp_port_state_lock(prt);
- if (id != prt->id) {
- len = nlen = 0;
- name = driver_name = NULL;
- }
- else {
- name = prt->name;
- len = nlen = name ? sys_strlen(name) + 1 : 0;
- driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL);
- len += driver_name ? sys_strlen(driver_name) + 1 : 0;
- }
+ name = prt->name;
+ len = nlen = name ? sys_strlen(name) + 1 : 0;
+ driver_name = (prt->drv_ptr ? prt->drv_ptr->name : NULL);
+ len += driver_name ? sys_strlen(driver_name) + 1 : 0;
if (len <= pnp_len - sizeof(ErtsPortNames)) {
if (!name)
pnp->name = NULL;
@@ -2794,7 +5025,6 @@ erts_get_port_names(Eterm id)
}
do_realloc = 0;
}
- erts_smp_port_state_unlock(prt);
} while (do_realloc);
}
return pnp;
@@ -2819,11 +5049,9 @@ static void schedule_port_timeout(Port *p)
* /Rickard
*/
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p));
- (void) erts_port_task_schedule(p->id,
- &p->timeout_task,
- ERTS_PORT_TASK_TIMEOUT,
- (ErlDrvEvent) -1,
- NULL);
+ erts_port_task_schedule(p->common.id,
+ &p->timeout_task,
+ ERTS_PORT_TASK_TIMEOUT);
}
ErlDrvTermData driver_mk_term_nil(void)
@@ -2831,9 +5059,9 @@ ErlDrvTermData driver_mk_term_nil(void)
return driver_term_nil;
}
-void driver_report_exit(int ix, int status)
+void driver_report_exit(ErlDrvPort ix, int status)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
Eterm* hp;
Eterm tuple;
Process *rp;
@@ -2841,13 +5069,17 @@ void driver_report_exit(int ix, int status)
ErlHeapFragment *bp = NULL;
ErlOffHeap *ohp;
ErtsProcLocks rp_locks = 0;
+ int scheduler = erts_get_scheduler_id() != 0;
ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- pid = prt->connected;
+ pid = ERTS_PORT_GET_CONNECTED(prt);
ASSERT(is_internal_pid(pid));
- rp = erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC);
+
+ rp = (scheduler
+ ? erts_proc_lookup(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0, ERTS_P2P_FLG_SMP_INC_REFC));
if (!rp)
return;
@@ -2855,7 +5087,7 @@ void driver_report_exit(int ix, int status)
tuple = TUPLE2(hp, am_exit_status, make_small(status));
hp += 3;
- tuple = TUPLE2(hp, prt->id, tuple);
+ tuple = TUPLE2(hp, prt->common.id, tuple);
erts_queue_message(rp, &rp_locks, bp, tuple, am_undefined
#ifdef USE_VM_PROBES
@@ -2864,29 +5096,8 @@ void driver_report_exit(int ix, int status)
);
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
-}
-
-
-static ERTS_INLINE int
-deliver_term_check_port(ErlDrvPort drvport)
-{
- int res;
- int ix = (int) drvport;
- if (ix < 0 || erts_max_ports <= ix)
- res = -1; /* invalid */
- else {
- Port* prt = &erts_port[ix];
- erts_smp_port_state_lock(prt);
- if (!(prt->status & ERTS_PORT_SFLGS_INVALID_LOOKUP))
- res = 1; /* ok */
- else if (prt->status & ERTS_PORT_SFLG_CLOSING)
- res = 0; /* closing */
- else
- res = -1; /* invalid (dead) */
- erts_smp_port_state_unlock(prt);
- }
- return res;
+ if (!scheduler)
+ erts_smp_proc_dec_refc(rp);
}
#define ERTS_B2T_STATES_DEF_STATES_SZ 5
@@ -2976,10 +5187,7 @@ cleanup_b2t_states(struct b2t_states__ *b2tsp)
*/
static int
-driver_deliver_term(ErlDrvPort port,
- Eterm to,
- ErlDrvTermData* data,
- int len)
+driver_deliver_term(Eterm to, ErlDrvTermData* data, int len)
{
#define ERTS_DDT_FAIL do { res = -1; goto done; } while (0)
Uint need = 0;
@@ -2995,6 +5203,7 @@ driver_deliver_term(ErlDrvPort port,
ErlOffHeap *ohp;
ErtsProcLocks rp_locks = 0;
struct b2t_states__ b2t;
+ int scheduler = 1; /* Silence erroneous warning... */
init_b2t_states(&b2t);
@@ -3180,13 +5389,16 @@ driver_deliver_term(ErlDrvPort port,
b2t.ix = 0;
/*
- * The term is OK. Go ahead and validate the port and process.
+ * The term is OK. Go ahead and validate the process.
*/
- res = deliver_term_check_port(port);
- if (res <= 0)
- goto done;
- rp = erts_pid2proc_opt(NULL, 0, to, rp_locks, ERTS_P2P_FLG_SMP_INC_REFC);
+ /*
+ * Increase refc on proc if done from a non-scheduler thread.
+ */
+ scheduler = erts_get_scheduler_id() != 0;
+ rp = (scheduler
+ ? erts_proc_lookup(to)
+ : erts_pid2proc_opt(NULL, 0, to, 0, ERTS_P2P_FLG_SMP_INC_REFC));
if (!rp) {
res = 0;
goto done;
@@ -3438,7 +5650,8 @@ driver_deliver_term(ErlDrvPort port,
if (rp) {
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
- erts_smp_proc_dec_refc(rp);
+ if (!scheduler)
+ erts_smp_proc_dec_refc(rp);
}
#endif
cleanup_b2t_states(&b2t);
@@ -3447,25 +5660,115 @@ driver_deliver_term(ErlDrvPort port,
#undef ERTS_DDT_FAIL
}
+static ERTS_INLINE int
+deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p)
+{
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
+#endif
+ Port *prt = erts_port_lookup_raw((Eterm) port_id);
+ erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
+ if (connected_p) {
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ ETHR_MEMBAR(ETHR_LoadLoad);
+#endif
+ *connected_p = ERTS_PORT_GET_CONNECTED(prt);
+ }
+#ifdef ERTS_SMP
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
+ erts_thr_progress_unmanaged_continue(dhndl);
+ ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
+ }
+#endif
+ ERTS_SMP_LC_ASSERT(dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED
+ ? erts_lc_is_port_locked(prt)
+ : !erts_lc_is_port_locked(prt));
+ return ((state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ ? -1
+ : ((state & ERTS_PORT_SFLG_CLOSING) ? 0 : 1));
+}
+
+int erl_drv_output_term(ErlDrvTermData port_id, ErlDrvTermData* data, int len)
+{
+ /* May be called from arbitrary thread */
+ Eterm connected;
+ int res = deliver_term_check_port(port_id, &connected);
+ if (res <= 0)
+ return res;
+ return driver_deliver_term(connected, data, len);
+}
+/*
+ * driver_output_term() is deprecated, and has been scheduled for
+ * removal in OTP-R17. It is replaced by erl_drv_output_term()
+ * above.
+ */
int
-driver_output_term(ErlDrvPort ix, ErlDrvTermData* data, int len)
+driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len)
{
- Port* prt = erts_drvport2port(ix);
+ erts_aint32_t state;
+ Port* prt;
ERTS_SMP_CHK_NO_PROC_LOCKS;
+ /* NOTE! It *not* safe to access 'drvport' from unmanaged threads. */
+ prt = erts_drvport2port(drvport, &state);
+ if (!prt)
+ return -1; /* invalid (dead) */
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- if (prt == NULL)
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
return -1;
- return driver_deliver_term(ix, prt->connected, data, len);
+ else if (state & ERTS_PORT_SFLG_CLOSING)
+ return 0;
+
+ return driver_deliver_term(ERTS_PORT_GET_CONNECTED(prt), data, len);
}
+int erl_drv_send_term(ErlDrvTermData port_id,
+ ErlDrvTermData to,
+ ErlDrvTermData* data,
+ int len)
+{
+ /* May be called from arbitrary thread */
+ int res = deliver_term_check_port(port_id, NULL);
+ if (res <= 0)
+ return res;
+ return driver_deliver_term(to, data, len);
+}
+/*
+ * driver_send_term() is deprecated, and has been scheduled for
+ * removal in OTP-R17. It is replaced by erl_drv_send_term() above.
+ */
int
-driver_send_term(ErlDrvPort ix, ErlDrvTermData to, ErlDrvTermData* data, int len)
+driver_send_term(ErlDrvPort drvport,
+ ErlDrvTermData to,
+ ErlDrvTermData* data,
+ int len)
{
- return driver_deliver_term(ix, to, data, len);
+ /*
+ * NOTE! It is *not* safe to access the 'drvport' parameter
+ * from unmanaged threads. Also note that it is impossible
+ * to make this access safe without using a less efficient
+ * internal data representation for ErlDrvPort.
+ */
+ ERTS_SMP_CHK_NO_PROC_LOCKS;
+#ifdef ERTS_SMP
+ if (erts_thr_progress_is_managed_thread())
+#endif
+ {
+ erts_aint32_t state;
+ Port* prt = erts_drvport2port(drvport, &state);
+ if (!prt)
+ return -1; /* invalid (dead) */
+ ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP)
+ return -1;
+ else if (state & ERTS_PORT_SFLG_CLOSING)
+ return 0;
+ }
+ return driver_deliver_term(to, data, len);
}
@@ -3477,26 +5780,27 @@ driver_send_term(ErlDrvPort ix, ErlDrvTermData to, ErlDrvTermData* data, int len
int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
ErlDrvBinary* bin, ErlDrvSizeT offs, ErlDrvSizeT len)
{
- Port* prt = erts_drvport2port(ix);
+ erts_aint32_t state;
+ Port* prt = erts_drvport2port(ix, &state);
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
prt->bytes_in += (hlen + len);
erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len));
- if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
+ if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
return erts_net_message(prt,
prt->dist_entry,
(byte*) hbuf, hlen,
(byte*) (bin->orig_bytes+offs), len);
}
else
- deliver_bin_message(prt, prt->connected,
+ deliver_bin_message(prt, ERTS_PORT_GET_CONNECTED(prt),
hbuf, hlen, bin, offs, len);
return 0;
}
@@ -3511,7 +5815,8 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
char* buf, ErlDrvSizeT len)
{
- Port* prt = erts_drvport2port(ix);
+ erts_aint32_t state;
+ Port* prt = erts_drvport2port(ix, &state);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -3520,12 +5825,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
prt->bytes_in += (hlen + len);
erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + len));
- if (prt->status & ERTS_PORT_SFLG_DISTRIBUTION) {
+ if (state & ERTS_PORT_SFLG_DISTRIBUTION) {
if (len == 0)
return erts_net_message(prt,
prt->dist_entry,
@@ -3537,10 +5842,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
(byte*) hbuf, hlen,
(byte*) buf, len);
}
- else if(prt->status & ERTS_PORT_SFLG_LINEBUF_IO)
- deliver_linebuf_message(prt, prt->connected, hbuf, hlen, buf, len);
+ else if (state & ERTS_PORT_SFLG_LINEBUF_IO)
+ deliver_linebuf_message(prt, state, ERTS_PORT_GET_CONNECTED(prt),
+ hbuf, hlen, buf, len);
else
- deliver_read_message(prt, prt->connected, hbuf, hlen, buf, len, 0);
+ deliver_read_message(prt, state, ERTS_PORT_GET_CONNECTED(prt),
+ hbuf, hlen, buf, len, 0);
return 0;
}
@@ -3561,6 +5868,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
SysIOVec* iov;
ErlDrvBinary** binv;
Port* prt;
+ erts_aint32_t state;
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -3573,13 +5881,13 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
if (hlen < 0)
hlen = 0;
- prt = erts_drvport2port(ix);
+ prt = erts_drvport2port(ix, &state);
if (prt == NULL)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- if (prt->status & ERTS_PORT_SFLG_CLOSING)
+ if (state & ERTS_PORT_SFLG_CLOSING)
return 0;
/* size > 0 ! */
@@ -3604,7 +5912,8 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen,
/* XXX handle distribution !!! */
prt->bytes_in += (hlen + size);
erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) (hlen + size));
- deliver_vec_message(prt, prt->connected, hbuf, hlen, binv, iov, n, size);
+ deliver_vec_message(prt, ERTS_PORT_GET_CONNECTED(prt), hbuf, hlen,
+ binv, iov, n, size);
return 0;
}
@@ -3715,8 +6024,7 @@ ErlDrvBinary* driver_realloc_binary(ErlDrvBinary* bin, ErlDrvSizeT size)
}
-void driver_free_binary(dbin)
-ErlDrvBinary* dbin;
+void driver_free_binary(ErlDrvBinary* dbin)
{
Binary *bin;
if (!dbin) {
@@ -3812,6 +6120,7 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
{
ERTS_LC_ASSERT(driver_pdl_get_refc(pdl) == 0);
erts_mtx_destroy(&pdl->mtx);
+ erts_port_dec_refc(pdl->prt);
erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl);
}
@@ -3849,16 +6158,18 @@ ErlDrvPDL
driver_pdl_create(ErlDrvPort dp)
{
ErlDrvPDL pdl;
- Port *pp = erts_drvport2port(dp);
+ Port *pp = erts_drvport2port(dp, NULL);
if (!pp || pp->port_data_lock)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
erts_mtx_init(&pdl->mtx, "port_data_lock");
pdl_init_refc(pdl);
+ erts_port_inc_refc(pp);
+ pdl->prt = pp;
pp->port_data_lock = pdl;
#ifdef HARDDEBUG
- erts_fprintf(stderr, "driver_pdl_create(%T) -> 0x%08X\r\n",pp->id,(unsigned) pdl);
+ erts_fprintf(stderr, "driver_pdl_create(%T) -> 0x%08X\r\n",pp->common.id,(unsigned) pdl);
#endif
return pdl;
}
@@ -4296,33 +6607,33 @@ static ERTS_INLINE void
drv_cancel_timer(Port *prt)
{
#ifdef ERTS_SMP
- erts_cancel_smp_ptimer(prt->ptimer);
+ erts_cancel_smp_ptimer(prt->common.u.alive.ptimer);
#else
- erts_cancel_timer(&prt->tm);
+ erts_cancel_timer(&prt->common.u.alive.tm);
#endif
if (erts_port_task_is_scheduled(&prt->timeout_task))
- erts_port_task_abort(prt->id, &prt->timeout_task);
+ erts_port_task_abort(&prt->timeout_task);
}
int driver_set_timer(ErlDrvPort ix, unsigned long t)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
+
if (prt->drv_ptr->timeout == NULL)
return -1;
drv_cancel_timer(prt);
#ifdef ERTS_SMP
- erts_create_smp_ptimer(&prt->ptimer,
- prt->id,
+ erts_create_smp_ptimer(&prt->common.u.alive.ptimer,
+ prt->common.id,
(ErlTimeoutProc) schedule_port_timeout,
t);
#else
- erts_set_timer(&prt->tm,
+ erts_set_timer(&prt->common.u.alive.tm,
(ErlTimeoutProc) schedule_port_timeout,
NULL,
prt,
@@ -4333,7 +6644,7 @@ int driver_set_timer(ErlDrvPort ix, unsigned long t)
int driver_cancel_timer(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
if (prt == NULL)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
@@ -4345,7 +6656,7 @@ int driver_cancel_timer(ErlDrvPort ix)
int
driver_read_timer(ErlDrvPort ix, unsigned long* t)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -4353,9 +6664,11 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
#ifdef ERTS_SMP
- *t = prt->ptimer ? erts_time_left(&prt->ptimer->timer.tm) : 0;
+ *t = (prt->common.u.alive.ptimer
+ ? erts_time_left(&prt->common.u.alive.ptimer->timer.tm)
+ : 0);
#else
- *t = erts_time_left(&prt->tm);
+ *t = erts_time_left(&prt->common.u.alive.tm);
#endif
return 0;
}
@@ -4406,8 +6719,8 @@ static int do_driver_monitor_process(Port *prt,
}
ref = erts_make_ref_in_buffer(buf);
- erts_add_monitor(&(prt->monitors), MON_ORIGIN, ref, rp->id, NIL);
- erts_add_monitor(&(rp->monitors), MON_TARGET, ref, prt->id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(prt), MON_ORIGIN, ref, rp->common.id, NIL);
+ erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
ref_to_driver_monitor(ref,monitor);
@@ -4417,31 +6730,22 @@ static int do_driver_monitor_process(Port *prt,
/*
* This can be called from a non scheduler thread iff a port_data_lock exists
*/
-int driver_monitor_process(ErlDrvPort port,
+int driver_monitor_process(ErlDrvPort drvport,
ErlDrvTermData process,
ErlDrvMonitor *monitor)
{
Port *prt;
int ret;
- Uint32 status;
+ erts_aint32_t state;
+#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
ErtsSchedulerData *sched = erts_get_scheduler_data();
- int ix = (int) port;
- if (ix < 0 || erts_max_ports <= ix) {
- return -1;
- }
- prt = &erts_port[ix];
+#endif
- DRV_MONITOR_LOCK_PDL(prt);
+ prt = DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(drvport);
- if (sched) {
- status = erts_port[ix].status;
- } else {
- erts_smp_port_state_lock(prt);
- status = erts_port[ix].status;
- erts_smp_port_state_unlock(prt);
- }
+ state = erts_atomic32_read_nob(&prt->state);
- if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
DRV_MONITOR_UNLOCK_PDL(prt);
return -1;
}
@@ -4479,7 +6783,7 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf,
memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
ref = make_internal_ref(buf);
- mon = erts_lookup_monitor(prt->monitors, ref);
+ mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return 1;
}
@@ -4491,13 +6795,13 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf,
to,
ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
- mon = erts_remove_monitor(&(prt->monitors), ref);
+ mon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref);
if (mon) {
erts_destroy_monitor(mon);
}
if (rp) {
ErtsMonitor *rmon;
- rmon = erts_remove_monitor(&(rp->monitors), ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
if (rmon != NULL) {
erts_destroy_monitor(rmon);
@@ -4506,30 +6810,21 @@ static int do_driver_demonitor_process(Port *prt, Eterm *buf,
return 0;
}
-int driver_demonitor_process(ErlDrvPort port,
+int driver_demonitor_process(ErlDrvPort drvport,
const ErlDrvMonitor *monitor)
{
Port *prt;
int ret;
- Uint32 status;
+ erts_aint32_t state;
+#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
ErtsSchedulerData *sched = erts_get_scheduler_data();
- int ix = (int) port;
- if (ix < 0 || erts_max_ports <= ix) {
- return -1;
- }
- prt = &erts_port[ix];
+#endif
- DRV_MONITOR_LOCK_PDL(prt);
+ prt = DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(drvport);
- if (sched) {
- status = erts_port[ix].status;
- } else {
- erts_smp_port_state_lock(prt);
- status = erts_port[ix].status;
- erts_smp_port_state_unlock(prt);
- }
+ state = erts_atomic32_read_nob(&prt->state);
- if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
DRV_MONITOR_UNLOCK_PDL(prt);
return -1;
}
@@ -4565,7 +6860,7 @@ static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
memcpy(buf,monitor,sizeof(Eterm)*REF_THING_SIZE);
ref = make_internal_ref(buf);
- mon = erts_lookup_monitor(prt->monitors, ref);
+ mon = erts_lookup_monitor(ERTS_P_MONITORS(prt), ref);
if (mon == NULL) {
return driver_term_nil;
}
@@ -4576,30 +6871,20 @@ static ErlDrvTermData do_driver_get_monitored_process(Port *prt, Eterm *buf,
}
-ErlDrvTermData driver_get_monitored_process(ErlDrvPort port,
+ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport,
const ErlDrvMonitor *monitor)
{
Port *prt;
ErlDrvTermData ret;
- Uint32 status;
+ erts_aint32_t state;
+#if !HEAP_ON_C_STACK || (defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK))
ErtsSchedulerData *sched = erts_get_scheduler_data();
- int ix = (int) port;
- if (ix < 0 || erts_max_ports <= ix) {
- return driver_term_nil;
- }
- prt = &erts_port[ix];
-
- DRV_MONITOR_LOCK_PDL(prt);
+#endif
- if (sched) {
- status = erts_port[ix].status;
- } else {
- erts_smp_port_state_lock(prt);
- status = erts_port[ix].status;
- erts_smp_port_state_unlock(prt);
- }
+ prt = DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(drvport);
- if (status & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
+ state = erts_atomic32_read_nob(&prt->state);
+ if (state & ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP) {
DRV_MONITOR_UNLOCK_PDL(prt);
return driver_term_nil;
}
@@ -4644,7 +6929,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(prt->drv_ptr != NULL);
DRV_MONITOR_LOCK_PDL(prt);
- if (erts_lookup_monitor(prt->monitors,ref) == NULL) {
+ if (erts_lookup_monitor(ERTS_P_MONITORS(prt), ref) == NULL) {
DRV_MONITOR_UNLOCK_PDL(prt);
return;
}
@@ -4654,7 +6939,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
DRV_MONITOR_UNLOCK_PDL(prt);
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(driver_process_exit)) {
- DTRACE_FORMAT_COMMON_PID_AND_PORT(prt->connected, prt)
+ DTRACE_FORMAT_COMMON_PID_AND_PORT(ERTS_PORT_GET_CONNECTED(prt), prt)
DTRACE3(driver_process_exit, process_str, port_str, prt->name);
}
#endif
@@ -4663,7 +6948,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
erts_unblock_fpe(fpe_was_unmasked);
DRV_MONITOR_LOCK_PDL(prt);
/* remove monitor *after* callback */
- rmon = erts_remove_monitor(&(prt->monitors),ref);
+ rmon = erts_remove_monitor(&ERTS_P_MONITORS(prt), ref);
DRV_MONITOR_UNLOCK_PDL(prt);
if (rmon) {
erts_destroy_monitor(rmon);
@@ -4674,7 +6959,8 @@ void erts_fire_port_monitor(Port *prt, Eterm ref)
static int
driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
{
- Port* prt = erts_drvport2port(ix);
+ erts_aint32_t state;
+ Port* prt = erts_drvport2port(ix, &state);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -4682,19 +6968,19 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
return -1;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if (eof)
- flush_linebuf_messages(prt);
- if (prt->status & ERTS_PORT_SFLG_CLOSING) {
+ flush_linebuf_messages(prt, state);
+ if (state & ERTS_PORT_SFLG_CLOSING) {
terminate_port(prt);
- } else if (eof && (prt->status & ERTS_PORT_SFLG_SOFT_EOF)) {
- deliver_result(prt->id, prt->connected, am_eof);
+ } else if (eof && (state & ERTS_PORT_SFLG_SOFT_EOF)) {
+ deliver_result(prt->common.id, ERTS_PORT_GET_CONNECTED(prt), am_eof);
} else {
- /* XXX UGLY WORK AROUND, Let do_exit_port terminate the port */
+ /* XXX UGLY WORK AROUND, Let erts_deliver_port_exit() terminate the port */
if (prt->port_data_lock)
driver_pdl_lock(prt->port_data_lock);
prt->ioq.size = 0;
if (prt->port_data_lock)
driver_pdl_unlock(prt->port_data_lock);
- erts_do_exit_port(prt, prt->id, eof ? am_normal : term);
+ erts_deliver_port_exit(prt, prt->common.id, eof ? am_normal : term, 0);
}
return 0;
}
@@ -4707,23 +6993,23 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof)
*/
int driver_exit(ErlDrvPort ix, int err)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
Process* rp;
ErtsLink *lnk, *rlnk = NULL;
+ Eterm connected;
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return -1;
- ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
-
- rp = erts_pid2proc(NULL, 0, prt->connected, ERTS_PROC_LOCK_LINK);
+ connected = ERTS_PORT_GET_CONNECTED(prt);
+ rp = erts_pid2proc(NULL, 0, connected, ERTS_PROC_LOCK_LINK);
if (rp) {
- rlnk = erts_remove_link(&(rp->nlinks),prt->id);
+ rlnk = erts_remove_link(&ERTS_P_LINKS(rp),prt->common.id);
}
- lnk = erts_remove_link(&(prt->nlinks),prt->connected);
+ lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected);
#ifdef ERTS_SMP
if (rp)
@@ -4780,24 +7066,24 @@ ErlDrvTermData driver_mk_atom(char* string)
ErlDrvTermData driver_mk_port(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- return (ErlDrvTermData) prt->id;
+ return (ErlDrvTermData) prt->common.id;
}
ErlDrvTermData driver_connected(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return NIL;
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
- return prt->connected;
+ return ERTS_PORT_GET_CONNECTED(prt);
}
ErlDrvTermData driver_caller(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (prt == NULL)
return NIL;
@@ -4807,25 +7093,25 @@ ErlDrvTermData driver_caller(ErlDrvPort ix)
int driver_lock_driver(ErlDrvPort ix)
{
- Port* prt = erts_drvport2port(ix);
+ Port* prt = erts_drvport2port(ix, NULL);
DE_Handle* dh;
ERTS_SMP_CHK_NO_PROC_LOCKS;
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
if (prt == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
return -1;
}
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
return -1;
}
erts_ddll_lock_driver(dh, prt->drv_ptr->name);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
return 0;
}
@@ -4835,7 +7121,7 @@ static int maybe_lock_driver_list(void)
void *rec_lock;
rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
if (rec_lock == 0) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
return 1;
}
return 0;
@@ -4843,7 +7129,7 @@ static int maybe_lock_driver_list(void)
static void maybe_unlock_driver_list(int doit)
{
if (doit) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
}
/*
@@ -5012,7 +7298,7 @@ no_event_callback(ErlDrvData drv_data, ErlDrvEvent event, ErlDrvEventData event_
{
Port *prt = get_current_port();
report_missing_drv_callback(prt, "Event", "event()");
- driver_event((ErlDrvPort) internal_port_index(prt->id), event, NULL);
+ driver_event((ErlDrvPort) prt, event, NULL);
}
static void
@@ -5020,7 +7306,7 @@ no_ready_input_callback(ErlDrvData drv_data, ErlDrvEvent event)
{
Port *prt = get_current_port();
report_missing_drv_callback(prt, "Input", "ready_input()");
- driver_select((ErlDrvPort) internal_port_index(prt->id), event,
+ driver_select((ErlDrvPort) prt, event,
(ERL_DRV_READ | ERL_DRV_USE_NO_CALLBACK), 0);
}
@@ -5029,7 +7315,7 @@ no_ready_output_callback(ErlDrvData drv_data, ErlDrvEvent event)
{
Port *prt = get_current_port();
report_missing_drv_callback(prt, "Output", "ready_output()");
- driver_select((ErlDrvPort) internal_port_index(prt->id), event,
+ driver_select((ErlDrvPort) prt, event,
(ERL_DRV_WRITE | ERL_DRV_USE_NO_CALLBACK), 0);
}
@@ -5064,13 +7350,13 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->lock = NULL;
else {
drv->lock = erts_alloc(ERTS_ALC_T_DRIVER_LOCK,
- sizeof(erts_smp_mtx_t));
- erts_smp_mtx_init_x(drv->lock,
- "driver_lock",
+ sizeof(erts_mtx_t));
+ erts_mtx_init_x(drv->lock,
+ "driver_lock",
#if defined(ERTS_ENABLE_LOCK_CHECK) || defined(ERTS_ENABLE_LOCK_COUNT)
- am_atom_put(drv->name, sys_strlen(drv->name))
+ am_atom_put(drv->name, sys_strlen(drv->name))
#else
- NIL
+ NIL
#endif
);
}
@@ -5142,7 +7428,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
int res;
if (!driver_list_locked) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
}
dp->next = driver_list;
@@ -5171,7 +7457,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo
if (!driver_list_locked) {
erts_smp_tsd_set(driver_list_lock_status_key, NULL);
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
return res;
}
@@ -5184,7 +7470,7 @@ int remove_driver_entry(ErlDrvEntry *drv)
rec_lock = erts_smp_tsd_get(driver_list_lock_status_key);
if (rec_lock == NULL) {
- erts_smp_mtx_lock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwlock(&erts_driver_list_lock);
}
dp = driver_list;
while (dp && dp->entry != drv)
@@ -5192,7 +7478,7 @@ int remove_driver_entry(ErlDrvEntry *drv)
if (dp) {
if (dp->handle) {
if (rec_lock == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
return -1;
}
@@ -5206,12 +7492,12 @@ int remove_driver_entry(ErlDrvEntry *drv)
}
erts_destroy_driver(dp);
if (rec_lock == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
return 1;
}
if (rec_lock == NULL) {
- erts_smp_mtx_unlock(&erts_driver_list_lock);
+ erts_smp_rwmtx_rwunlock(&erts_driver_list_lock);
}
return 0;
}
@@ -5241,18 +7527,22 @@ erl_drv_getenv(char *key, char *value, size_t *value_size)
* - uses the fact that heart_port is registered when starting heart
*/
-Port *erts_get_heart_port() {
+Port *erts_get_heart_port(void)
+{
+ int ix, max = erts_ptab_max(&erts_port);
- Port* port;
- Uint ix;
+ for (ix = 0; ix < max; ix++) {
+ struct reg_proc *reg;
+ Port *port = erts_pix2port(ix);
- for(ix = 0; ix < erts_max_ports; ix++) {
- port = &erts_port[ix];
+ if (!port)
+ continue;
/* only examine undead or alive ports */
- if (port->status & ERTS_PORT_SFLGS_DEAD)
+ if (erts_atomic32_read_nob(&port->state) & ERTS_PORT_SFLGS_DEAD)
continue;
/* immediate atom compare */
- if (port->reg && port->reg->name == am_heart_port) {
+ reg = port->common.u.alive.reg;
+ if (reg && reg->name == am_heart_port) {
return port;
}
}
diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c
index b93b1ad09a..01856007ee 100644
--- a/erts/emulator/beam/module.c
+++ b/erts/emulator/beam/module.c
@@ -26,21 +26,32 @@
#include "global.h"
#include "module.h"
+#ifdef DEBUG
+# define IF_DEBUG(x) x
+#else
+# define IF_DEBUG(x)
+#endif
+
#define MODULE_SIZE 50
#define MODULE_LIMIT (64*1024)
-static IndexTable module_table;
+static IndexTable module_tables[ERTS_NUM_CODE_IX];
-/*
- * SMP note: We don't need to look accesses to the module table because
- * there is one only scheduler thread when we update it.
+erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
+
+static erts_smp_atomic_t tot_module_bytes;
+
+/* SMP note: Active module table lookup and current module instance can be
+ * read without any locks. Old module instances are protected by
+ * "the_old_code_rwlocks" as purging is done on active module table.
+ * Staging table is protected by the "code_ix lock".
*/
#include "erl_smp.h"
void module_info(int to, void *to_arg)
{
- index_info(to, to_arg, &module_table);
+ index_info(to, to_arg, &module_tables[erts_active_code_ix()]);
}
@@ -59,45 +70,67 @@ static int module_cmp(Module* tmpl, Module* obj)
static Module* module_alloc(Module* tmpl)
{
Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module));
+ erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module));
obj->module = tmpl->module;
- obj->code = 0;
- obj->old_code = 0;
- obj->code_length = 0;
- obj->old_code_length = 0;
+ obj->curr.code = 0;
+ obj->old.code = 0;
+ obj->curr.code_length = 0;
+ obj->old.code_length = 0;
obj->slot.index = -1;
- obj->nif = NULL;
- obj->old_nif = NULL;
+ obj->curr.nif = NULL;
+ obj->old.nif = NULL;
+ obj->curr.num_breakpoints = 0;
+ obj->old.num_breakpoints = 0;
+ obj->curr.num_traced_exports = 0;
+ obj->old.num_traced_exports = 0;
return obj;
}
+static void module_free(Module* mod)
+{
+ erts_free(ERTS_ALC_T_MODULE, mod);
+ erts_smp_atomic_add_nob(&tot_module_bytes, -sizeof(Module));
+}
void init_module_table(void)
{
HashFunctions f;
+ int i;
f.hash = (H_FUN) module_hash;
f.cmp = (HCMP_FUN) module_cmp;
f.alloc = (HALLOC_FUN) module_alloc;
- f.free = 0;
+ f.free = (HFREE_FUN) module_free;
+
+ for (i = 0; i < ERTS_NUM_CODE_IX; i++) {
+ erts_index_init(ERTS_ALC_T_MODULE_TABLE, &module_tables[i], "module_code",
+ MODULE_SIZE, MODULE_LIMIT, f);
+ }
- erts_index_init(ERTS_ALC_T_MODULE_TABLE, &module_table, "module_code",
- MODULE_SIZE, MODULE_LIMIT, f);
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ erts_smp_rwmtx_init_x(&the_old_code_rwlocks[i], "old_code", make_small(i));
+ }
+ erts_smp_atomic_init_nob(&tot_module_bytes, 0);
}
Module*
-erts_get_module(Eterm mod)
+erts_get_module(Eterm mod, ErtsCodeIndex code_ix)
{
Module e;
int index;
+ IndexTable* mod_tab;
ASSERT(is_atom(mod));
+
+ mod_tab = &module_tables[code_ix];
+
e.module = atom_val(mod);
- index = index_get(&module_table, (void*) &e);
+ index = index_get(mod_tab, (void*) &e);
if (index == -1) {
return NULL;
} else {
- return (Module*) erts_index_lookup(&module_table, index);
+ return (Module*) erts_index_lookup(mod_tab, index);
}
}
@@ -105,27 +138,101 @@ Module*
erts_put_module(Eterm mod)
{
Module e;
- int index;
+ IndexTable* mod_tab;
+ int oldsz, newsz;
+ Module* res;
ASSERT(is_atom(mod));
ERTS_SMP_LC_ASSERT(erts_initialized == 0
- || erts_smp_thr_progress_is_blocking());
+ || erts_has_code_write_permission());
+
+ mod_tab = &module_tables[erts_staging_code_ix()];
e.module = atom_val(mod);
- index = index_put(&module_table, (void*) &e);
- return (Module*) erts_index_lookup(&module_table, index);
+ oldsz = index_table_sz(mod_tab);
+ res = (Module*) index_put_entry(mod_tab, (void*) &e);
+ newsz = index_table_sz(mod_tab);
+ erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ return res;
}
-Module *module_code(int i)
+Module *module_code(int i, ErtsCodeIndex code_ix)
{
- return (Module*) erts_index_lookup(&module_table, i);
+ return (Module*) erts_index_lookup(&module_tables[code_ix], i);
}
-int module_code_size(void)
+int module_code_size(ErtsCodeIndex code_ix)
{
- return module_table.entries;
+ return module_tables[code_ix].entries;
}
int module_table_sz(void)
{
- return index_table_sz(&module_table);
+ return erts_smp_atomic_read_nob(&tot_module_bytes);
+}
+
+#ifdef DEBUG
+static ErtsCodeIndex dbg_load_code_ix = 0;
+#endif
+
+static int entries_at_start_staging = 0;
+
+void module_start_staging(void)
+{
+ IndexTable* src = &module_tables[erts_active_code_ix()];
+ IndexTable* dst = &module_tables[erts_staging_code_ix()];
+ Module* src_mod;
+ Module* dst_mod;
+ int i, oldsz, newsz;
+
+ ASSERT(dbg_load_code_ix == -1);
+ ASSERT(dst->entries <= src->entries);
+
+ /*
+ * Make sure our existing modules are up-to-date
+ */
+ for (i = 0; i < dst->entries; i++) {
+ src_mod = (Module*) erts_index_lookup(src, i);
+ dst_mod = (Module*) erts_index_lookup(dst, i);
+ ASSERT(src_mod->module == dst_mod->module);
+
+ dst_mod->curr = src_mod->curr;
+ dst_mod->old = src_mod->old;
+ }
+
+ /*
+ * Copy all new modules from active table
+ */
+ oldsz = index_table_sz(dst);
+ for (i = dst->entries; i < src->entries; i++) {
+ src_mod = (Module*) erts_index_lookup(src, i);
+ dst_mod = (Module*) index_put_entry(dst, src_mod);
+ ASSERT(dst_mod != src_mod);
+
+ dst_mod->curr = src_mod->curr;
+ dst_mod->old = src_mod->old;
+ }
+ newsz = index_table_sz(dst);
+ erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+
+ entries_at_start_staging = dst->entries;
+ IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix());
+}
+
+void module_end_staging(int commit)
+{
+ ASSERT(dbg_load_code_ix == erts_staging_code_ix());
+
+ if (!commit) { /* abort */
+ IndexTable* tab = &module_tables[erts_staging_code_ix()];
+ int oldsz, newsz;
+
+ ASSERT(entries_at_start_staging <= tab->entries);
+ oldsz = index_table_sz(tab);
+ index_erase_latest_from(tab, entries_at_start_staging);
+ newsz = index_table_sz(tab);
+ erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz));
+ }
+
+ IF_DEBUG(dbg_load_code_ix = -1);
}
+
diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h
index 694e4ab72f..6e123a0ffe 100644
--- a/erts/emulator/beam/module.h
+++ b/erts/emulator/beam/module.h
@@ -24,28 +24,72 @@
#include "index.h"
#endif
+struct erl_module_instance {
+ BeamInstr* code;
+ int code_length; /* Length of loaded code in bytes. */
+ unsigned catches;
+ struct erl_module_nif* nif;
+ int num_breakpoints;
+ int num_traced_exports;
+};
typedef struct erl_module {
IndexSlot slot; /* Must be located at top of struct! */
int module; /* Atom index for module (not tagged). */
- BeamInstr* code;
- BeamInstr* old_code;
- int code_length; /* Length of loaded code in bytes. */
- int old_code_length; /* Length of old loaded code in bytes */
- unsigned catches, old_catches;
- struct erl_module_nif* nif;
- struct erl_module_nif* old_nif;
+ struct erl_module_instance curr;
+ struct erl_module_instance old; /* protected by "old_code" rwlock */
} Module;
-Module* erts_get_module(Eterm mod);
+Module* erts_get_module(Eterm mod, ErtsCodeIndex code_ix);
Module* erts_put_module(Eterm mod);
void init_module_table(void);
+void module_start_staging(void);
+void module_end_staging(int commit);
void module_info(int, void *);
-Module *module_code(int);
-int module_code_size(void);
+Module *module_code(int, ErtsCodeIndex);
+int module_code_size(ErtsCodeIndex);
int module_table_sz(void);
+ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex);
+ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex);
+ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex);
+ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex);
+#ifdef ERTS_ENABLE_LOCK_CHECK
+int erts_is_old_code_rlocked(ErtsCodeIndex);
+#endif
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+extern erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX];
+
+ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex code_ix)
+{
+ erts_smp_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]);
+}
+ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex code_ix)
+{
+ erts_smp_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]);
+}
+ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex code_ix)
+{
+ erts_smp_rwmtx_rlock(&the_old_code_rwlocks[code_ix]);
+}
+ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex code_ix)
+{
+ erts_smp_rwmtx_runlock(&the_old_code_rwlocks[code_ix]);
+}
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ERTS_GLB_INLINE int erts_is_old_code_rlocked(ErtsCodeIndex code_ix)
+{
+ return erts_smp_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]);
+}
#endif
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
+
+#endif /* !__MODULE_H__ */
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 9b168889dd..6764e88c81 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -62,11 +62,8 @@ label L
i_func_info I a a I
int_code_end
-i_trace_breakpoint
-i_mtrace_breakpoint
+i_generic_breakpoint
i_debug_breakpoint
-i_count_breakpoint
-i_time_breakpoint
i_return_time_trace
i_return_to_trace
i_yield
@@ -522,7 +519,6 @@ apply_bif
call_nif
call_error_handler
error_action_code
-call_traced_function
return_trace
#
@@ -829,16 +825,20 @@ call_ext_only Ar=u==2 Bif=u$bif:erlang:load_nif/2 => allocate u Ar | i_call_ext
#
-# The apply/2 and apply/3 BIFs are instructions.
+# apply/2 is an instruction, not a BIF.
#
call_ext u==2 u$func:erlang:apply/2 => i_apply_fun
call_ext_last u==2 u$func:erlang:apply/2 D => i_apply_fun_last D
call_ext_only u==2 u$func:erlang:apply/2 => i_apply_fun_only
-call_ext u==3 u$func:erlang:apply/3 => i_apply
-call_ext_last u==3 u$func:erlang:apply/3 D => i_apply_last D
-call_ext_only u==3 u$func:erlang:apply/3 => i_apply_only
+#
+# The apply/3 BIF is an instruction.
+#
+
+call_ext u==3 u$bif:erlang:apply/3 => i_apply
+call_ext_last u==3 u$bif:erlang:apply/3 D => i_apply_last D
+call_ext_only u==3 u$bif:erlang:apply/3 => i_apply_only
#
# The exit/1 and throw/1 BIFs never execute the instruction following them;
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index 26d64887d0..757e2800e6 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -93,19 +93,14 @@ reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks)
reg_write_lock();
}
+#endif
+
static ERTS_INLINE int
is_proc_alive(Process *p)
{
- int res;
- erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id);
- erts_pix_lock(pixlck);
- res = !p->is_exiting;
- erts_pix_unlock(pixlck);
- return res;
+ return !ERTS_PROC_IS_EXITING(p);
}
-#endif
-
void register_info(int to, void *to_arg)
{
int lock = !ERTS_IS_CRASH_DUMPING;
@@ -180,14 +175,14 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
if (is_not_atom(name) || name == am_undefined)
return res;
- if (c_p->id == id) /* A very common case I think... */
+ if (c_p->common.id == id) /* A very common case I think... */
proc = c_p;
else {
if (is_not_internal_pid(id) && is_not_internal_port(id))
return res;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
if (is_internal_port(id)) {
- port = erts_id2port(id, NULL, 0);
+ port = erts_id2port(id);
if (!port)
goto done;
}
@@ -209,7 +204,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
r.p = proc;
if (!proc)
goto done;
- if (proc->reg)
+ if (proc->common.u.alive.reg)
goto done;
r.pt = NULL;
}
@@ -217,7 +212,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
ASSERT(!INVALID_PORT(port, id));
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
r.pt = port;
- if (r.pt->reg)
+ if (r.pt->common.u.alive.reg)
goto done;
r.p = NULL;
}
@@ -229,23 +224,24 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
if (IS_TRACED_FL(proc, F_TRACE_PROCS)) {
trace_proc(c_p, proc, am_register, name);
}
- proc->reg = rp;
+ proc->common.u.alive.reg = rp;
}
else if (port && rp->pt == port) {
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
trace_port(port, am_register, name);
}
- port->reg = rp;
+ port->common.u.alive.reg = rp;
}
- if ((rp->p && rp->p->id == id) || (rp->pt && rp->pt->id == id)) {
+ if ((rp->p && rp->p->common.id == id)
+ || (rp->pt && rp->pt->common.id == id)) {
res = 1;
}
done:
reg_write_unlock();
if (port)
- erts_smp_port_unlock(port);
+ erts_port_release(port);
if (c_p != proc) {
if (proc)
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
@@ -296,9 +292,9 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
* is read only.
*/
if (rp->p)
- res = rp->p->id;
+ res = rp->p->common.id;
else if (rp->pt)
- res = rp->pt->id;
+ res = rp->pt->common.id;
break;
}
b = b->next;
@@ -389,8 +385,7 @@ erts_whereis_name(Process *c_p,
}
#else
if (rp->p
- && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
- || rp->p->status != P_EXITING))
+ && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p)))
*proc = rp->p;
else
*proc = NULL;
@@ -409,19 +404,19 @@ erts_whereis_name(Process *c_p,
if (pending_port) {
/* Ahh! Registered port changed while reg lock
was unlocked... */
- erts_smp_port_unlock(pending_port);
+ erts_port_release(pending_port);
pending_port = NULL;
}
if (erts_smp_port_trylock(rp->pt) == EBUSY) {
- Eterm id = rp->pt->id; /* id read only... */
+ Eterm id = rp->pt->common.id; /* id read only... */
/* Unlock all locks, acquire port lock, and restart... */
if (current_c_p_locks) {
erts_smp_proc_unlock(c_p, current_c_p_locks);
current_c_p_locks = 0;
}
reg_read_unlock();
- pending_port = erts_id2port(id, NULL, 0);
+ pending_port = erts_id2port(id);
goto restart;
}
}
@@ -435,7 +430,7 @@ erts_whereis_name(Process *c_p,
if (c_p && !current_c_p_locks)
erts_smp_proc_lock(c_p, c_p_locks);
if (pending_port)
- erts_smp_port_unlock(pending_port);
+ erts_port_release(pending_port);
#endif
reg_read_unlock();
@@ -497,8 +492,8 @@ int erts_unregister_name(Process *c_p,
current_c_p_locks = c_p_locks;
}
#endif
- if (c_p->reg) {
- r.name = c_p->reg->name;
+ if (c_p->common.u.alive.reg) {
+ r.name = c_p->common.u.alive.reg->name;
} else {
/* Name got unregistered while main lock was released */
res = 0;
@@ -511,20 +506,20 @@ int erts_unregister_name(Process *c_p,
if (port != rp->pt) {
#ifdef ERTS_SMP
if (port) {
- ERTS_SMP_LC_ASSERT(port != c_prt);
- erts_smp_port_unlock(port);
+ ASSERT(port != c_prt);
+ erts_port_release(port);
port = NULL;
}
if (erts_smp_port_trylock(rp->pt) == EBUSY) {
- Eterm id = rp->pt->id; /* id read only... */
+ Eterm id = rp->pt->common.id; /* id read only... */
/* Unlock all locks, acquire port lock, and restart... */
if (current_c_p_locks) {
erts_smp_proc_unlock(c_p, current_c_p_locks);
current_c_p_locks = 0;
}
reg_write_unlock();
- port = erts_id2port(id, NULL, 0);
+ port = erts_id2port(id);
goto restart;
}
#endif
@@ -534,7 +529,7 @@ int erts_unregister_name(Process *c_p,
ASSERT(rp->pt == port);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port));
- rp->pt->reg = NULL;
+ rp->pt->common.u.alive.reg = NULL;
if (IS_TRACED_FL(port, F_TRACE_PORTS)) {
trace_port(port, am_unregister, r.name);
@@ -551,7 +546,7 @@ int erts_unregister_name(Process *c_p,
ERTS_PROC_LOCK_MAIN);
current_c_p_locks = c_p_locks;
#endif
- rp->p->reg = NULL;
+ rp->p->common.u.alive.reg = NULL;
if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
trace_proc(c_p, rp->p, am_unregister, r.name);
}
@@ -570,7 +565,7 @@ int erts_unregister_name(Process *c_p,
reg_write_unlock();
if (c_prt != port) {
if (port) {
- erts_smp_port_unlock(port);
+ erts_port_release(port);
}
if (c_prt) {
erts_smp_port_lock(c_prt);
diff --git a/erts/emulator/beam/register.h b/erts/emulator/beam/register.h
index 38e8cfbf28..7170463375 100644
--- a/erts/emulator/beam/register.h
+++ b/erts/emulator/beam/register.h
@@ -24,26 +24,19 @@
#ifndef __REGPROC_H__
#define __REGPROC_H__
-#ifndef __SYS_H__
#include "sys.h"
-#endif
-
-#ifndef __HASH_H__
#include "hash.h"
-#endif
-
-#ifndef __PROCESS_H__
#include "erl_process.h"
-#endif
-
-struct port;
+#define ERL_PORT_GET_PORT_TYPE_ONLY__
+#include "erl_port.h"
+#undef ERL_PORT_GET_PORT_TYPE_ONLY__
typedef struct reg_proc
{
HashBucket bucket; /* MUST BE LOCATED AT TOP OF STRUCT!!! */
Process *p; /* The process registered (only one of this and
'pt' is non-NULL */
- struct port *pt; /* The port registered */
+ Port *pt; /* The port registered */
Eterm name; /* Atom name */
} RegProc;
@@ -55,12 +48,12 @@ int erts_register_name(Process *, Eterm, Eterm);
Eterm erts_whereis_name_to_id(Process *, Eterm);
void erts_whereis_name(Process *, ErtsProcLocks,
Eterm, Process**, ErtsProcLocks, int,
- struct port**);
+ Port**);
Process *erts_whereis_process(Process *,
ErtsProcLocks,
Eterm,
ErtsProcLocks,
int);
-int erts_unregister_name(Process *, ErtsProcLocks, struct port *, Eterm);
+int erts_unregister_name(Process *, ErtsProcLocks, Port *, Eterm);
#endif
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 9dd8341520..898a30b010 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -25,11 +25,6 @@
# define NO_FPE_SIGNALS
#endif
-/* xxxP __VXWORKS__ */
-#ifdef VXWORKS
-#include <vxWorks.h>
-#endif
-
#ifdef DISABLE_CHILD_WAITER_THREAD
#undef ENABLE_CHILD_WAITER_THREAD
#endif
@@ -39,10 +34,10 @@
#define ENABLE_CHILD_WAITER_THREAD 1
#endif
+#define ERTS_I64_LITERAL(X) X##LL
+
#if defined (__WIN32__)
# include "erl_win_sys.h"
-#elif defined (VXWORKS)
-# include "erl_vxworks_sys.h"
#else
# include "erl_unix_sys.h"
#ifndef UNIX
@@ -91,14 +86,22 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# endif
#endif
-#ifdef __GNUC__
-# if __GNUC__ < 3 && (__GNUC__ != 2 || __GNUC_MINOR__ < 96)
-# define ERTS_LIKELY(BOOL) (BOOL)
-# define ERTS_UNLIKELY(BOOL) (BOOL)
-# else
-# define ERTS_LIKELY(BOOL) __builtin_expect((BOOL), !0)
-# define ERTS_UNLIKELY(BOOL) __builtin_expect((BOOL), 0)
-# endif
+#if !defined(__GNUC__)
+# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0
+#elif !defined(__GNUC_MINOR__)
+# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#elif !defined(__GNUC_PATCHLEVEL__)
+# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#else
+# define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \
+ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
+#endif
+
+#if ERTS_AT_LEAST_GCC_VSN__(2, 96, 0)
+# define ERTS_LIKELY(BOOL) __builtin_expect((BOOL), !0)
+# define ERTS_UNLIKELY(BOOL) __builtin_expect((BOOL), 0)
#else
# define ERTS_LIKELY(BOOL) (BOOL)
# define ERTS_UNLIKELY(BOOL) (BOOL)
@@ -113,6 +116,16 @@ typedef ERTS_SYS_FD_TYPE ErtsSysFdType;
# define ERTS_DECLARE_DUMMY(X) X
#endif
+#if !defined(__func__)
+# if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L
+# if !defined(__GNUC__) || __GNUC__ < 2
+# define __func__ "[unknown_function]"
+# else
+# define __func__ __FUNCTION__
+# endif
+# endif
+#endif
+
#if defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
# undef ERTS_CAN_INLINE
# define ERTS_CAN_INLINE 0
@@ -172,10 +185,16 @@ void erl_assert_error(char* expr, char* file, int line);
# define const
#endif
-#ifdef VXWORKS
-/* Replace VxWorks' printf with a real one that does fprintf(stdout, ...) */
-int real_printf(const char *fmt, ...);
-# define printf real_printf
+#undef __deprecated
+#if ERTS_AT_LEAST_GCC_VSN__(3, 0, 0)
+# define __deprecated __attribute__((deprecated))
+#else
+# define __deprecated
+#endif
+#if ERTS_AT_LEAST_GCC_VSN__(3, 0, 4)
+# define erts_align_attribute(SZ) __attribute__ ((aligned (SZ)))
+#else
+# define erts_align_attribute(SZ)
#endif
/* In VC++, noreturn is a declspec that has to be before the types,
@@ -185,12 +204,6 @@ int real_printf(const char *fmt, ...);
#if __GNUC__
# define __decl_noreturn
# define __noreturn __attribute__((noreturn))
-# undef __deprecated
-# if __GNUC__ >= 3
-# define __deprecated __attribute__((deprecated))
-# else
-# define __deprecated
-# endif
#else
# if defined(__WIN32__) && defined(_MSC_VER)
# define __noreturn
@@ -199,7 +212,6 @@ int real_printf(const char *fmt, ...);
# define __noreturn
# define __decl_noreturn
# endif
-# define __deprecated
#endif
/*
@@ -229,9 +241,11 @@ int real_printf(const char *fmt, ...);
#if SIZEOF_VOID_P == 8
#undef ARCH_32
#define ARCH_64
+#define ERTS_SIZEOF_TERM 8
#elif SIZEOF_VOID_P == 4
#define ARCH_32
#undef ARCH_64
+#define ERTS_SIZEOF_TERM 4
#else
#error Neither 32 nor 64 bit architecture
#endif
@@ -239,6 +253,8 @@ int real_printf(const char *fmt, ...);
# define HALFWORD_HEAP 1
# define HALFWORD_ASSERT 0
# define ASSERT_HALFWORD(COND) ASSERT(COND)
+# undef ERTS_SIZEOF_TERM
+# define ERTS_SIZEOF_TERM 4
#else
# define HALFWORD_HEAP 0
# define HALFWORD_ASSERT 0
@@ -365,6 +381,27 @@ typedef unsigned char byte;
#error 64-bit architecture, but no appropriate type to use for Uint64 and Sint64 found
#endif
+#ifdef WORDS_BIGENDIAN
+# define ERTS_HUINT_HVAL_HIGH 0
+# define ERTS_HUINT_HVAL_LOW 1
+#else
+# define ERTS_HUINT_HVAL_HIGH 1
+# define ERTS_HUINT_HVAL_LOW 0
+#endif
+#if ERTS_SIZEOF_TERM == 8
+typedef union {
+ Uint val;
+ Uint32 hval[2];
+} HUint;
+#elif ERTS_SIZEOF_TERM == 4
+typedef union {
+ Uint val;
+ Uint16 hval[2];
+} HUint;
+#else
+#error "Unsupported size of term"
+#endif
+
# define ERTS_EXTRA_DATA_ALIGN_SZ(X) \
(((size_t) 8) - (((size_t) (X)) & ((size_t) 7)))
@@ -471,38 +508,28 @@ static unsigned long zero_value = 0, one_value = 1;
# define SET_NONBLOCKING(fd) ioctlsocket((fd), FIONBIO, &one_value)
# else
-# ifdef VXWORKS
-# include <fcntl.h> /* xxxP added for O_WRONLY etc ... macro:s ... */
-# include <ioLib.h>
-static const int zero_value = 0, one_value = 1;
-# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, (int)&zero_value)
-# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, (int)&one_value)
-# define ERRNO_BLOCK EWOULDBLOCK
-
-# else
-# ifdef NB_FIONBIO /* Old BSD */
-# include <sys/ioctl.h>
+# ifdef NB_FIONBIO /* Old BSD */
+# include <sys/ioctl.h>
static const int zero_value = 0, one_value = 1;
-# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value)
-# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value)
-# define ERRNO_BLOCK EWOULDBLOCK
-# else /* !NB_FIONBIO */
-# include <fcntl.h>
-# ifdef NB_O_NDELAY /* Nothing needs this? */
-# define NB_FLAG O_NDELAY
-# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */
-# define ERRNO_BLOCK EWOULDBLOCK
-# endif
-# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */
-# define NB_FLAG O_NONBLOCK
-# define ERRNO_BLOCK EAGAIN
-# endif /* !NB_O_NDELAY */
-# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \
- fcntl((fd), F_GETFL, 0) & ~NB_FLAG)
-# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \
- fcntl((fd), F_GETFL, 0) | NB_FLAG)
-# endif /* !NB_FIONBIO */
-# endif /* _WXWORKS_ */
+# define SET_BLOCKING(fd) ioctl((fd), FIONBIO, &zero_value)
+# define SET_NONBLOCKING(fd) ioctl((fd), FIONBIO, &one_value)
+# define ERRNO_BLOCK EWOULDBLOCK
+# else /* !NB_FIONBIO */
+# include <fcntl.h>
+# ifdef NB_O_NDELAY /* Nothing needs this? */
+# define NB_FLAG O_NDELAY
+# ifndef ERRNO_BLOCK /* allow override (e.g. EAGAIN) via Makefile */
+# define ERRNO_BLOCK EWOULDBLOCK
+# endif
+# else /* !NB_O_NDELAY */ /* The True Way - POSIX!:-) */
+# define NB_FLAG O_NONBLOCK
+# define ERRNO_BLOCK EAGAIN
+# endif /* !NB_O_NDELAY */
+# define SET_BLOCKING(fd) fcntl((fd), F_SETFL, \
+ fcntl((fd), F_GETFL, 0) & ~NB_FLAG)
+# define SET_NONBLOCKING(fd) fcntl((fd), F_SETFL, \
+ fcntl((fd), F_GETFL, 0) | NB_FLAG)
+# endif /* !NB_FIONBIO */
# endif /* !__WIN32__ */
#endif /* WANT_NONBLOCKING */
@@ -513,6 +540,10 @@ __decl_noreturn void __noreturn erl_exit(int n, char*, ...);
#define ERTS_ABORT_EXIT (INT_MIN + 1) /* no crash dump; only abort() */
#define ERTS_DUMP_EXIT (INT_MIN + 2) /* crash dump; then exit() */
+#define ERTS_INTERNAL_ERROR(What) \
+ erl_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error: %s\n", \
+ __FILE__, __LINE__, __func__, What)
+
Eterm erts_check_io_info(void *p);
/* Size of misc memory allocated from system dependent code */
@@ -587,6 +618,7 @@ typedef struct _SysDriverOpts {
char *wd; /* Working directory. */
unsigned spawn_type; /* Bitfield of ERTS_SPAWN_DRIVER |
ERTS_SPAWN_EXTERNAL | both*/
+ int parallelism; /* Optimize for parallelism */
} SysDriverOpts;
extern char *erts_default_arg0;
@@ -626,7 +658,7 @@ typedef struct {
#define ERTS_SYS_DDLL_ERROR_INIT {NULL}
extern void erts_sys_ddll_free_error(ErtsSysDdllError*);
extern void erl_sys_ddll_init(void); /* to initialize mutexes etc */
-extern int erts_sys_ddll_open2(char *path, void **handle, ErtsSysDdllError*);
+extern int erts_sys_ddll_open2(const char *path, void **handle, ErtsSysDdllError*);
#define erts_sys_ddll_open(P,H) erts_sys_ddll_open2(P,H,NULL)
extern int erts_sys_ddll_open_noext(char *path, void **handle, ErtsSysDdllError*);
extern int erts_sys_ddll_load_driver_init(void *handle, void **function);
@@ -635,7 +667,7 @@ extern int erts_sys_ddll_close2(void *handle, ErtsSysDdllError*);
#define erts_sys_ddll_close(H) erts_sys_ddll_close2(H,NULL)
extern void *erts_sys_ddll_call_init(void *function);
extern void *erts_sys_ddll_call_nif_init(void *function);
-extern int erts_sys_ddll_sym2(void *handle, char *name, void **function, ErtsSysDdllError*);
+extern int erts_sys_ddll_sym2(void *handle, const char *name, void **function, ErtsSysDdllError*);
#define erts_sys_ddll_sym(H,N,F) erts_sys_ddll_sym2(H,N,F,NULL)
extern char *erts_sys_ddll_error(int code);
@@ -652,7 +684,7 @@ void erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec);
void erts_sys_main_thread(void);
#endif
-extern void erts_sys_prepare_crash_dump(int secs);
+extern int erts_sys_prepare_crash_dump(int secs);
extern void erts_sys_pre_init(void);
extern void erl_sys_init(void);
extern void erl_sys_args(int *argc, char **argv);
@@ -699,8 +731,8 @@ void fini_getenv_state(GETENV_STATE *);
/* xxxP */
void init_sys_float(void);
int sys_chars_to_double(char*, double*);
-int sys_double_to_chars(double, char*);
-void sys_get_pid(char *);
+int sys_double_to_chars(double, char*, size_t);
+void sys_get_pid(char *, size_t);
/* erts_sys_putenv() returns, 0 on success and a value != 0 on failure. */
int erts_sys_putenv(char *key, char *value);
@@ -858,13 +890,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val)
extern int erts_use_kernel_poll;
#endif
-#if defined(VXWORKS)
-/* NOTE! sys_calloc2 does not exist on other
- platforms than VxWorks and OSE */
-void* sys_calloc2(Uint, Uint);
-#endif /* VXWORKS || OSE */
-
-
#define sys_memcpy(s1,s2,n) memcpy(s1,s2,n)
#define sys_memmove(s1,s2,n) memmove(s1,s2,n)
#define sys_memcmp(s1,s2,n) memcmp(s1,s2,n)
@@ -971,43 +996,6 @@ void erl_bin_write(unsigned char *, int, int);
# define DEBUGF(x)
#endif
-
-#ifdef VXWORKS
-/* This includes redefines of malloc etc
- this should be done after sys_alloc, etc, above */
-# include "reclaim.h"
-/*********************Malloc and friends************************
- * There is a problem with the naming of malloc and friends,
- * malloc is used throughout sys.c and the resolver to mean save_alloc,
- * but it should actually mean either sys_alloc or sys_alloc2,
- * so the definitions from reclaim_master.h are not any
- * good, i redefine the malloc family here, although it's quite
- * ugly, actually it would be preferrable to use the
- * names sys_alloc and so on throughout the offending code, but
- * that will be saved as an later exercise...
- * I also add an own calloc, to make the BSD resolver source happy.
- ***************************************************************/
-/* Undefine malloc and friends */
-# ifdef malloc
-# undef malloc
-# endif
-# ifdef calloc
-# undef calloc
-# endif
-# ifdef realloc
-# undef realloc
-# endif
-# ifdef free
-# undef free
-# endif
-/* Redefine malloc and friends */
-# define malloc sys_alloc
-# define calloc sys_calloc
-# define realloc sys_realloc
-# define free sys_free
-
-#endif
-
#ifdef __WIN32__
#ifdef ARCH_64
#define ERTS_ALLOC_ALIGN_BYTES 16
@@ -1023,23 +1011,20 @@ void erl_bin_write(unsigned char *, int, int);
#ifdef __WIN32__
-
void call_break_handler(void);
char* last_error(void);
char* win32_errorstr(int);
-
-
#endif
/************************************************************************
* Find out the native filename encoding of the process (look at locale of
* Unix processes and just do UTF16 on windows
************************************************************************/
-#define ERL_FILENAME_UNKNOWN 0
-#define ERL_FILENAME_LATIN1 1
-#define ERL_FILENAME_UTF8 2
-#define ERL_FILENAME_UTF8_MAC 3
-#define ERL_FILENAME_WIN_WCHAR 4
+#define ERL_FILENAME_UNKNOWN (0)
+#define ERL_FILENAME_LATIN1 (1)
+#define ERL_FILENAME_UTF8 (2)
+#define ERL_FILENAME_UTF8_MAC (3)
+#define ERL_FILENAME_WIN_WCHAR (4)
int erts_get_native_filename_encoding(void);
/* The set function is only to be used by erl_init! */
@@ -1049,4 +1034,3 @@ int erts_get_user_requested_filename_encoding(void);
void erts_init_sys_common_misc(void);
#endif
-
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index bd708ceee6..5261effef9 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -46,6 +46,7 @@
#include "erl_thr_queue.h"
#include "erl_sched_spec_pre_alloc.h"
#include "beam_bp.h"
+#include "erl_ptab.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
@@ -268,16 +269,42 @@ list_length(Eterm list)
return i;
}
-Uint erts_fit_in_bits(Uint n)
+static const struct {
+ Sint64 mask;
+ int bits;
+} fib_data[] = {{ERTS_I64_LITERAL(0x2), 1},
+ {ERTS_I64_LITERAL(0xc), 2},
+ {ERTS_I64_LITERAL(0xf0), 4},
+ {ERTS_I64_LITERAL(0xff00), 8},
+ {ERTS_I64_LITERAL(0xffff0000), 16},
+ {ERTS_I64_LITERAL(0xffffffff00000000), 32}};
+
+static ERTS_INLINE int
+fit_in_bits(Sint64 value, int start)
{
- Uint i;
+ int bits = 0;
+ int i;
- i = 0;
- while (n > 0) {
- i++;
- n >>= 1;
- }
- return i;
+ for (i = start; i >= 0; i--) {
+ if (value & fib_data[i].mask) {
+ value >>= fib_data[i].bits;
+ bits |= fib_data[i].bits;
+ }
+ }
+
+ bits++;
+
+ return bits;
+}
+
+int erts_fit_in_bits_int64(Sint64 value)
+{
+ return fit_in_bits(value, 5);
+}
+
+int erts_fit_in_bits_int32(Sint32 value)
+{
+ return fit_in_bits((Sint64) (Uint32) value, 4);
}
int
@@ -1640,12 +1667,20 @@ static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
}
#ifndef ERTS_SMP
- if (
#ifdef USE_THREADS
- !erts_get_scheduler_data() || /* Must be scheduler thread */
+ p = NULL;
+ if (erts_get_scheduler_data()) /* Must be scheduler thread */
#endif
- (p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0)) == NULL
- || p->status == P_RUNNING) {
+ {
+ p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
+ if (p) {
+ erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ if (state & ERTS_PSFLG_RUNNING)
+ p = NULL;
+ }
+ }
+
+ if (!p) {
/* buf *always* points to a null terminated string */
erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
tag, buf);
@@ -2982,12 +3017,13 @@ buf_to_intlist(Eterm** hpp, char *buf, size_t len, Eterm tail)
** ;
**
** Return remaining bytes in buffer on success
-** -1 on overflow
-** -2 on type error (including that result would not be a whole number of bytes)
+** ERTS_IOLIST_TO_BUF_OVERFLOW on overflow
+** ERTS_IOLIST_TO_BUF_TYPE_ERROR on type error (including that result would not be a whole number of bytes)
*/
-int io_list_to_buf(Eterm obj, char* buf, int len)
+ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len)
{
+ ErlDrvSizeT len = (ErlDrvSizeT) alloced_len;
Eterm* objp;
DECLARE_ESTACK(s);
goto L_again;
@@ -3080,20 +3116,20 @@ int io_list_to_buf(Eterm obj, char* buf, int len)
L_type_error:
DESTROY_ESTACK(s);
- return -2;
+ return ERTS_IOLIST_TO_BUF_TYPE_ERROR;
L_overflow:
DESTROY_ESTACK(s);
- return -1;
+ return ERTS_IOLIST_TO_BUF_OVERFLOW;
}
/*
* Return 0 if successful, and non-zero if unsuccessful.
*/
-int erts_iolist_size(Eterm obj, Uint* sizep)
+int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep)
{
Eterm* objp;
- Uint size = 0;
+ Uint size = 0; /* Intentionally Uint due to halfword heap */
DECLARE_ESTACK(s);
goto L_again;
@@ -3145,7 +3181,7 @@ int erts_iolist_size(Eterm obj, Uint* sizep)
#undef SAFE_ADD
DESTROY_ESTACK(s);
- *sizep = size;
+ *sizep = (ErlDrvSizeT) size;
return ERTS_IOLIST_OK;
L_overflow_error:
@@ -3240,7 +3276,7 @@ ptimer_timeout(ErtsSmpPTimer *ptimer)
ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
ERTS_P2P_FLG_ALLOW_OTHER_X);
if (p) {
- if (!p->is_exiting
+ if (!ERTS_PROC_IS_EXITING(p)
&& !(ptimer->timer.flags & ERTS_PTMR_FLG_CANCELLED)) {
ASSERT(*ptimer->timer.timer_ref == ptimer);
*ptimer->timer.timer_ref = NULL;
@@ -3436,6 +3472,254 @@ void erts_silence_warn_unused_result(long unused)
}
+/*
+ * Interval counts
+ */
+void
+erts_interval_init(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ erts_atomic_init_nob(&icp->counter.atomic, 0);
+#else
+ erts_dw_aint_t dw;
+#ifdef ETHR_SU_DW_NAINT_T__
+ dw.dw_sint = 0;
+#else
+ dw.sint[ERTS_DW_AINT_HIGH_WORD] = 0;
+ dw.sint[ERTS_DW_AINT_LOW_WORD] = 0;
+#endif
+ erts_dw_atomic_init_nob(&icp->counter.atomic, &dw);
+
+#endif
+#ifdef DEBUG
+ icp->smp_api = 0;
+#endif
+}
+
+void
+erts_smp_interval_init(erts_interval_t *icp)
+{
+#ifdef ERTS_SMP
+ erts_interval_init(icp);
+#else
+ icp->counter.not_atomic = 0;
+#endif
+#ifdef DEBUG
+ icp->smp_api = 1;
+#endif
+}
+
+static ERTS_INLINE Uint64
+step_interval_nob(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic);
+#else
+ erts_dw_aint_t exp;
+
+ erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
+ while (1) {
+ erts_dw_aint_t new = exp;
+
+#ifdef ETHR_SU_DW_NAINT_T__
+ new.dw_sint++;
+#else
+ new.sint[ERTS_DW_AINT_LOW_WORD]++;
+ if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
+ new.sint[ERTS_DW_AINT_HIGH_WORD]++;
+#endif
+
+ if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp))
+ return erts_interval_dw_aint_to_val__(&new);
+
+ }
+#endif
+}
+
+static ERTS_INLINE Uint64
+step_interval_relb(erts_interval_t *icp)
+{
+#ifdef ARCH_64
+ return (Uint64) erts_atomic_inc_read_relb(&icp->counter.atomic);
+#else
+ erts_dw_aint_t exp;
+
+ erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
+ while (1) {
+ erts_dw_aint_t new = exp;
+
+#ifdef ETHR_SU_DW_NAINT_T__
+ new.dw_sint++;
+#else
+ new.sint[ERTS_DW_AINT_LOW_WORD]++;
+ if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
+ new.sint[ERTS_DW_AINT_HIGH_WORD]++;
+#endif
+
+ if (erts_dw_atomic_cmpxchg_relb(&icp->counter.atomic, &new, &exp))
+ return erts_interval_dw_aint_to_val__(&new);
+
+ }
+#endif
+}
+
+
+static ERTS_INLINE Uint64
+ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
+{
+ Uint64 curr_ic;
+#ifdef ARCH_64
+ curr_ic = (Uint64) erts_atomic_read_nob(&icp->counter.atomic);
+ if (curr_ic > ic)
+ return curr_ic;
+ return (Uint64) erts_atomic_inc_read_nob(&icp->counter.atomic);
+#else
+ erts_dw_aint_t exp;
+
+ erts_dw_atomic_read_nob(&icp->counter.atomic, &exp);
+ curr_ic = erts_interval_dw_aint_to_val__(&exp);
+ if (curr_ic > ic)
+ return curr_ic;
+
+ while (1) {
+ erts_dw_aint_t new = exp;
+
+#ifdef ETHR_SU_DW_NAINT_T__
+ new.dw_sint++;
+#else
+ new.sint[ERTS_DW_AINT_LOW_WORD]++;
+ if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
+ new.sint[ERTS_DW_AINT_HIGH_WORD]++;
+#endif
+
+ if (erts_dw_atomic_cmpxchg_nob(&icp->counter.atomic, &new, &exp))
+ return erts_interval_dw_aint_to_val__(&new);
+
+ curr_ic = erts_interval_dw_aint_to_val__(&exp);
+ if (curr_ic > ic)
+ return curr_ic;
+ }
+#endif
+}
+
+
+static ERTS_INLINE Uint64
+ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
+{
+ Uint64 curr_ic;
+#ifdef ARCH_64
+ curr_ic = (Uint64) erts_atomic_read_acqb(&icp->counter.atomic);
+ if (curr_ic > ic)
+ return curr_ic;
+ return (Uint64) erts_atomic_inc_read_acqb(&icp->counter.atomic);
+#else
+ erts_dw_aint_t exp;
+
+ erts_dw_atomic_read_acqb(&icp->counter.atomic, &exp);
+ curr_ic = erts_interval_dw_aint_to_val__(&exp);
+ if (curr_ic > ic)
+ return curr_ic;
+
+ while (1) {
+ erts_dw_aint_t new = exp;
+
+#ifdef ETHR_SU_DW_NAINT_T__
+ new.dw_sint++;
+#else
+ new.sint[ERTS_DW_AINT_LOW_WORD]++;
+ if (new.sint[ERTS_DW_AINT_LOW_WORD] == 0)
+ new.sint[ERTS_DW_AINT_HIGH_WORD]++;
+#endif
+
+ if (erts_dw_atomic_cmpxchg_acqb(&icp->counter.atomic, &new, &exp))
+ return erts_interval_dw_aint_to_val__(&new);
+
+ curr_ic = erts_interval_dw_aint_to_val__(&exp);
+ if (curr_ic > ic)
+ return curr_ic;
+ }
+#endif
+}
+
+Uint64
+erts_step_interval_nob(erts_interval_t *icp)
+{
+ ASSERT(!icp->smp_api);
+ return step_interval_nob(icp);
+}
+
+Uint64
+erts_step_interval_relb(erts_interval_t *icp)
+{
+ ASSERT(!icp->smp_api);
+ return step_interval_relb(icp);
+}
+
+Uint64
+erts_smp_step_interval_nob(erts_interval_t *icp)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return step_interval_nob(icp);
+#else
+ return ++icp->counter.not_atomic;
+#endif
+}
+
+Uint64
+erts_smp_step_interval_relb(erts_interval_t *icp)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return step_interval_relb(icp);
+#else
+ return ++icp->counter.not_atomic;
+#endif
+}
+
+Uint64
+erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
+{
+ ASSERT(!icp->smp_api);
+ return ensure_later_interval_nob(icp, ic);
+}
+
+Uint64
+erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
+{
+ ASSERT(!icp->smp_api);
+ return ensure_later_interval_acqb(icp, ic);
+}
+
+Uint64
+erts_smp_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return ensure_later_interval_nob(icp, ic);
+#else
+ if (icp->counter.not_atomic > ic)
+ return icp->counter.not_atomic;
+ else
+ return ++icp->counter.not_atomic;
+#endif
+}
+
+Uint64
+erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
+{
+ ASSERT(icp->smp_api);
+#ifdef ERTS_SMP
+ return ensure_later_interval_acqb(icp, ic);
+#else
+ if (icp->counter.not_atomic > ic)
+ return icp->counter.not_atomic;
+ else
+ return ++icp->counter.not_atomic;
+#endif
+}
+
+
#ifdef DEBUG
/*
* Handy functions when using a debugger - don't use in the code!
@@ -3468,7 +3752,7 @@ Process *p;
void ppi(Eterm pid)
{
- pp(erts_pid2proc_unlocked(pid));
+ pp(erts_proc_lookup(pid));
}
void td(Eterm x)
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 912f5d3d8b..2ac7f169af 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -439,6 +439,7 @@ struct t_data
Efile_error errInfo;
int flags;
SWord fd;
+ int is_fd_unused;
/**/
Efile_info info;
EFILE_DIR_HANDLE dir_handle; /* Handle to open directory. */
@@ -781,11 +782,6 @@ file_start(ErlDrvPort port, char* command)
return (ErlDrvData) desc;
}
-static void free_data(void *data)
-{
- EF_FREE(data);
-}
-
static void do_close(int flags, SWord fd) {
if (flags & EFILE_COMPRESSED) {
erts_gzclose((gzFile)(fd));
@@ -803,6 +799,17 @@ static void invoke_close(void *data)
DTRACE_INVOKE_RETURN(FILE_CLOSE);
}
+static void free_data(void *data)
+{
+ struct t_data *d = (struct t_data *) data;
+
+ if (d->command == FILE_OPEN && d->is_fd_unused && d->fd != FILE_FD_INVALID) {
+ do_close(d->flags, d->fd);
+ }
+
+ EF_FREE(data);
+}
+
/*********************************************************************
* Driver entry point -> stop
*/
@@ -1862,6 +1869,9 @@ static void invoke_open(void *data)
}
d->result_ok = status;
+ if (!status) {
+ d->fd = FILE_FD_INVALID;
+ }
DTRACE_INVOKE_RETURN(FILE_OPEN);
}
@@ -2373,8 +2383,10 @@ file_async_ready(ErlDrvData e, ErlDrvThreadData data)
if (!d->result_ok) {
reply_error(desc, &d->errInfo);
} else {
+ ASSERT(d->is_fd_unused);
desc->fd = d->fd;
desc->flags = d->flags;
+ d->is_fd_unused = 0;
reply_Uint(desc, d->fd);
}
free_data(data);
@@ -2745,6 +2757,7 @@ file_output(ErlDrvData e, char* buf, ErlDrvSizeT count)
d->invoke = invoke_open;
d->free = free_data;
d->level = 2;
+ d->is_fd_unused = 1;
goto done;
}
diff --git a/erts/emulator/drivers/common/gzio.c b/erts/emulator/drivers/common/gzio.c
index a9303d55bc..ca6d25adb4 100644
--- a/erts/emulator/drivers/common/gzio.c
+++ b/erts/emulator/drivers/common/gzio.c
@@ -21,11 +21,6 @@
#include "erl_driver.h"
#include "sys.h"
-#ifdef VXWORKS
-/* pull in FOPEN from zutil.h instead */
-#undef F_OPEN
-#endif
-
#ifdef __WIN32__
#ifndef HAVE_CONFLICTING_FREAD_DECLARATION
#define HAVE_CONFLICTING_FREAD_DECLARATION
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 8f4fff0f40..236b8710fb 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -284,27 +284,15 @@ static unsigned long one_value = 1;
#else
-#ifdef VXWORKS
-#include <sockLib.h>
-#include <sys/times.h>
-#include <iosLib.h>
-#include <taskLib.h>
-#include <selectLib.h>
-#include <ioLib.h>
-#else
#include <sys/time.h>
#ifdef NETDB_H_NEEDS_IN_H
#include <netinet/in.h>
#endif
#include <netdb.h>
-#endif
#include <sys/socket.h>
#include <netinet/in.h>
-#ifdef VXWORKS
-#include <rpc/rpctypes.h>
-#endif
#ifdef DEF_INADDR_LOOPBACK_IN_RPC_TYPES_H
#include <rpc/types.h>
#endif
@@ -312,12 +300,10 @@ static unsigned long one_value = 1;
#include <netinet/tcp.h>
#include <arpa/inet.h>
-#if (!defined(VXWORKS))
#include <sys/param.h>
#ifdef HAVE_ARPA_NAMESER_H
#include <arpa/nameser.h>
#endif
-#endif
#ifdef HAVE_SYS_SOCKIO_H
#include <sys/sockio.h>
@@ -331,7 +317,7 @@ static unsigned long one_value = 1;
/* SCTP support -- currently for UNIX platforms only: */
#undef HAVE_SCTP
-#if (!defined(VXWORKS) && !defined(__WIN32__) && defined(HAVE_SCTP_H))
+#if (!defined(__WIN32__) && defined(HAVE_SCTP_H))
#include <netinet/sctp.h>
@@ -478,15 +464,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define sock_connect(s, addr, len) connect((s), (addr), (len))
#define sock_listen(s, b) listen((s), (b))
#define sock_bind(s, addr, len) bind((s), (addr), (len))
-#ifdef VXWORKS
-#define sock_getopt(s,t,n,v,l) wrap_sockopt(&getsockopt,\
- s,t,n,v,(unsigned int)(l))
-#define sock_setopt(s,t,n,v,l) wrap_sockopt(&setsockopt,\
- s,t,n,v,(unsigned int)(l))
-#else
#define sock_getopt(s,t,n,v,l) getsockopt((s),(t),(n),(v),(l))
#define sock_setopt(s,t,n,v,l) setsockopt((s),(t),(n),(v),(l))
-#endif
#define sock_name(s, addr, len) getsockname((s), (addr), (len))
#define sock_peer(s, addr, len) getpeername((s), (addr), (len))
#define sock_ntohs(x) ntohs((x))
@@ -535,6 +514,12 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#endif /* __WIN32__ */
+#ifdef HAVE_SOCKLEN_T
+# define SOCKLEN_T socklen_t
+#else
+# define SOCKLEN_T int
+#endif
+
#include "packet_parser.h"
#define get_int24(s) ((((unsigned char*) (s))[0] << 16) | \
@@ -675,6 +660,7 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define UDP_OPT_MULTICAST_LOOP 13 /* set/get IP multicast loopback */
#define UDP_OPT_ADD_MEMBERSHIP 14 /* add an IP group membership */
#define UDP_OPT_DROP_MEMBERSHIP 15 /* drop an IP group membership */
+#define INET_OPT_IPV6_V6ONLY 16 /* IPv6 only socket, no mapped v4 addrs */
/* LOPT is local options */
#define INET_LOPT_BUFFER 20 /* min buffer size hint */
#define INET_LOPT_HEADER 21 /* list header size */
@@ -685,13 +671,15 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_LOPT_EXITONCLOSE 26 /* exit port on active close or not ! */
#define INET_LOPT_TCP_HIWTRMRK 27 /* set local high watermark */
#define INET_LOPT_TCP_LOWTRMRK 28 /* set local low watermark */
-#define INET_LOPT_BIT8 29 /* set 8 bit detection */
+ /* 29 unused */
#define INET_LOPT_TCP_SEND_TIMEOUT 30 /* set send timeout */
#define INET_LOPT_TCP_DELAY_SEND 31 /* Delay sends until next poll */
#define INET_LOPT_PACKET_SIZE 32 /* Max packet size */
#define INET_LOPT_UDP_READ_PACKETS 33 /* Number of packets to read */
#define INET_OPT_RAW 34 /* Raw socket options */
#define INET_LOPT_TCP_SEND_TIMEOUT_CLOSE 35 /* auto-close on send timeout or not */
+#define INET_LOPT_TCP_MSGQ_HIWTRMRK 36 /* set local high watermark */
+#define INET_LOPT_TCP_MSGQ_LOWTRMRK 37 /* set local low watermark */
/* SCTP options: a separate range, from 100: */
#define SCTP_OPT_RTOINFO 100
#define SCTP_OPT_ASSOCINFO 101
@@ -720,12 +708,6 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_IFOPT_FLAGS 6
#define INET_IFOPT_HWADDR 7
-/* INET_LOPT_BIT8 options */
-#define INET_BIT8_CLEAR 0
-#define INET_BIT8_SET 1
-#define INET_BIT8_ON 2
-#define INET_BIT8_OFF 3
-
/* INET_REQ_GETSTAT enumeration */
#define INET_STAT_RECV_CNT 1
#define INET_STAT_RECV_MAX 2
@@ -808,6 +790,8 @@ static int my_strncasecmp(const char *s1, const char *s2, size_t n)
#define INET_HIGH_WATERMARK (1024*8) /* 8k pending high => busy */
#define INET_LOW_WATERMARK (1024*4) /* 4k pending => allow more */
+#define INET_HIGH_MSGQ_WATERMARK (1024*8) /* 8k pending high => busy */
+#define INET_LOW_MSGQ_WATERMARK (1024*4) /* 4k pending => allow more */
#define INET_INFINITY 0xffffffff /* infinity value */
@@ -899,7 +883,7 @@ typedef struct subs_list_ {
#define NO_PROCESS 0
#define NO_SUBSCRIBERS(SLP) ((SLP)->subscriber == NO_PROCESS)
-static void send_to_subscribers(ErlDrvPort, subs_list *, int,
+static void send_to_subscribers(ErlDrvTermData, subs_list *, int,
ErlDrvTermData [], int);
static void free_subscribers(subs_list*);
static int save_subscriber(subs_list *, ErlDrvTermData);
@@ -921,7 +905,6 @@ typedef struct {
int mode; /* BINARY | LIST
(affect how to interpret hsz) */
int exitf; /* exit port on close or not */
- int bit8f; /* check if data has bit number 7 set */
int deliver; /* Delivery mode, TERM or PORT */
ErlDrvTermData caller; /* recipient of sync reply */
@@ -940,8 +923,6 @@ typedef struct {
int sfamily; /* address family */
enum PacketParseType htype; /* header type (TCP only?) */
unsigned int psize; /* max packet size (TCP only?) */
- int bit8; /* set if bit8f==true and data some data
- seen had the 7th bit set */
inet_address remote; /* remote address for connected sockets */
inet_address peer_addr; /* fake peer address */
inet_address name_addr; /* fake local address */
@@ -1191,6 +1172,7 @@ static ErlDrvTermData am_reuseaddr;
static ErlDrvTermData am_dontroute;
static ErlDrvTermData am_priority;
static ErlDrvTermData am_tos;
+static ErlDrvTermData am_ipv6_v6only;
#endif
/* speical errors for bad ports and sequences */
@@ -1895,8 +1877,7 @@ static int deq_async(inet_descriptor* desc, int* ap, ErlDrvTermData* cp, int* rp
** {inet_async, Port, Ref, ok}
*/
static int
-send_async_ok(ErlDrvPort port, ErlDrvTermData Port, int Ref,
- ErlDrvTermData recipient)
+send_async_ok(ErlDrvTermData Port, int Ref,ErlDrvTermData recipient)
{
ErlDrvTermData spec[2*LOAD_ATOM_CNT + LOAD_PORT_CNT +
LOAD_INT_CNT + LOAD_TUPLE_CNT];
@@ -1910,14 +1891,14 @@ send_async_ok(ErlDrvPort port, ErlDrvTermData Port, int Ref,
ASSERT(i == sizeof(spec)/sizeof(*spec));
- return driver_send_term(port, recipient, spec, i);
+ return erl_drv_send_term(Port, recipient, spec, i);
}
/* send message:
** {inet_async, Port, Ref, {ok,Port2}}
*/
static int
-send_async_ok_port(ErlDrvPort port, ErlDrvTermData Port, int Ref,
+send_async_ok_port(ErlDrvTermData Port, int Ref,
ErlDrvTermData recipient, ErlDrvTermData Port2)
{
ErlDrvTermData spec[2*LOAD_ATOM_CNT + 2*LOAD_PORT_CNT +
@@ -1936,14 +1917,14 @@ send_async_ok_port(ErlDrvPort port, ErlDrvTermData Port, int Ref,
ASSERT(i == sizeof(spec)/sizeof(*spec));
- return driver_send_term(port, recipient, spec, i);
+ return erl_drv_send_term(Port, recipient, spec, i);
}
/* send message:
** {inet_async, Port, Ref, {error,Reason}}
*/
static int
-send_async_error(ErlDrvPort port, ErlDrvTermData Port, int Ref,
+send_async_error(ErlDrvTermData Port, int Ref,
ErlDrvTermData recipient, ErlDrvTermData Reason)
{
ErlDrvTermData spec[3*LOAD_ATOM_CNT + LOAD_PORT_CNT +
@@ -1961,7 +1942,7 @@ send_async_error(ErlDrvPort port, ErlDrvTermData Port, int Ref,
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i == sizeof(spec)/sizeof(*spec));
DEBUGF(("send_async_error %ld %ld\r\n", recipient, Reason));
- return driver_send_term(port, recipient, spec, i);
+ return erl_drv_send_term(Port, recipient, spec, i);
}
@@ -1973,7 +1954,7 @@ static int async_ok(inet_descriptor* desc)
if (deq_async(desc, &aid, &caller, &req) < 0)
return -1;
- return send_async_ok(desc->port, desc->dport, aid, caller);
+ return send_async_ok(desc->dport, aid, caller);
}
static int async_ok_port(inet_descriptor* desc, ErlDrvTermData Port2)
@@ -1984,7 +1965,7 @@ static int async_ok_port(inet_descriptor* desc, ErlDrvTermData Port2)
if (deq_async(desc, &aid, &caller, &req) < 0)
return -1;
- return send_async_ok_port(desc->port, desc->dport, aid, caller, Port2);
+ return send_async_ok_port(desc->dport, aid, caller, Port2);
}
static int async_error_am(inet_descriptor* desc, ErlDrvTermData reason)
@@ -1995,8 +1976,7 @@ static int async_error_am(inet_descriptor* desc, ErlDrvTermData reason)
if (deq_async(desc, &aid, &caller, &req) < 0)
return -1;
- return send_async_error(desc->port, desc->dport, aid, caller,
- reason);
+ return send_async_error(desc->dport, aid, caller, reason);
}
/* dequeue all operations */
@@ -2007,8 +1987,7 @@ static int async_error_am_all(inet_descriptor* desc, ErlDrvTermData reason)
ErlDrvTermData caller;
while (deq_async(desc, &aid, &caller, &req) == 0) {
- send_async_error(desc->port, desc->dport, aid, caller,
- reason);
+ send_async_error(desc->dport, aid, caller, reason);
}
return 0;
}
@@ -2036,7 +2015,7 @@ static int inet_reply_ok(inet_descriptor* desc)
ASSERT(i == sizeof(spec)/sizeof(*spec));
desc->caller = 0;
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
#ifdef HAVE_SCTP
@@ -2055,7 +2034,7 @@ static int inet_reply_ok_port(inet_descriptor* desc, ErlDrvTermData dport)
ASSERT(i == sizeof(spec)/sizeof(*spec));
desc->caller = 0;
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
#endif
@@ -2078,7 +2057,7 @@ static int inet_reply_error_am(inet_descriptor* desc, ErlDrvTermData reason)
desc->caller = 0;
DEBUGF(("inet_reply_error_am %ld %ld\r\n", caller, reason));
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
/* send:
@@ -2187,12 +2166,12 @@ static int http_response_inetdrv(void *arg, int major, int minor,
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i<=27);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i<=27);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2284,12 +2263,12 @@ http_request_inetdrv(void* arg, const http_atom_t* meth, const char* meth_ptr,
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 43);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 43);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2338,12 +2317,12 @@ http_header_inetdrv(void* arg, const http_atom_t* name, const char* name_ptr,
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 26);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 26);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2369,7 +2348,7 @@ static int http_eoh_inetdrv(void* arg)
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 14);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
/* {http, S, http_eoh} */
@@ -2378,7 +2357,7 @@ static int http_eoh_inetdrv(void* arg)
i = LOAD_ATOM(spec, i, am_http_eoh);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 14);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2406,7 +2385,7 @@ static int http_error_inetdrv(void* arg, const char* buf, int len)
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 19);
- return driver_send_term(desc->inet.port, caller, spec, i);
+ return erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
/* {http, S, {http_error,Line} */
@@ -2417,7 +2396,7 @@ static int http_error_inetdrv(void* arg, const char* buf, int len)
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 19);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
}
@@ -2470,11 +2449,11 @@ int ssl_tls_inetdrv(void* arg, unsigned type, unsigned major, unsigned minor,
i = LOAD_TUPLE(spec, i, 2);
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 28);
- ret = driver_send_term(desc->inet.port, caller, spec, i);
+ ret = erl_drv_send_term(desc->inet.dport, caller, spec, i);
}
else {
ASSERT(i <= 28);
- ret = driver_output_term(desc->inet.port, spec, i);
+ ret = erl_drv_output_term(desc->inet.dport, spec, i);
}
done:
driver_free_binary(bin);
@@ -2524,7 +2503,7 @@ static int inet_async_data(inet_descriptor* desc, const char* buf, int len)
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i == 15);
desc->caller = 0;
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
else {
/* INET_MODE_BINARY => [H1,H2,...HSz | Binary] */
@@ -2538,7 +2517,7 @@ static int inet_async_data(inet_descriptor* desc, const char* buf, int len)
i = LOAD_TUPLE(spec, i, 4);
ASSERT(i <= 20);
desc->caller = 0;
- code = driver_send_term(desc->port, caller, spec, i);
+ code = erl_drv_send_term(desc->dport, caller, spec, i);
return code;
}
}
@@ -3131,7 +3110,7 @@ inet_async_binary_data
ASSERT(i <= PACKET_ERL_DRV_TERM_DATA_LEN);
desc->caller = 0;
- return driver_send_term(desc->port, caller, spec, i);
+ return erl_drv_send_term(desc->dport, caller, spec, i);
}
/*
@@ -3154,7 +3133,7 @@ static int tcp_message(inet_descriptor* desc, const char* buf, int len)
i = LOAD_STRING(spec, i, buf, len); /* => [H1,H2,...Hn] */
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 20);
- return driver_output_term(desc->port, spec, i);
+ return erl_drv_output_term(desc->dport, spec, i);
}
else {
/* INET_MODE_BINARY => [H1,H2,...HSz | Binary] */
@@ -3166,7 +3145,7 @@ static int tcp_message(inet_descriptor* desc, const char* buf, int len)
i = LOAD_STRING_CONS(spec, i, buf, hsz);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 20);
- code = driver_output_term(desc->port, spec, i);
+ code = erl_drv_output_term(desc->dport, spec, i);
return code;
}
}
@@ -3201,7 +3180,7 @@ tcp_binary_message(inet_descriptor* desc, ErlDrvBinary* bin, int offs, int len)
}
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 20);
- return driver_output_term(desc->port, spec, i);
+ return erl_drv_output_term(desc->dport, spec, i);
}
/*
@@ -3220,7 +3199,7 @@ static int tcp_closed_message(tcp_descriptor* desc)
i = LOAD_PORT(spec, i, desc->inet.dport);
i = LOAD_TUPLE(spec, i, 2);
ASSERT(i <= 6);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
return 0;
}
@@ -3241,7 +3220,7 @@ static int tcp_error_message(tcp_descriptor* desc, int err)
i = LOAD_ATOM(spec, i, am_err);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i <= 8);
- return driver_output_term(desc->inet.port, spec, i);
+ return erl_drv_output_term(desc->inet.dport, spec, i);
}
/*
@@ -3332,7 +3311,7 @@ static int packet_binary_message
/* Close up the outer 5-tuple: */
i = LOAD_TUPLE(spec, i, 5);
ASSERT(i <= PACKET_ERL_DRV_TERM_DATA_LEN);
- return driver_output_term(desc->port, spec, i);
+ return erl_drv_output_term(desc->dport, spec, i);
}
/*
@@ -3359,21 +3338,10 @@ static int packet_error_message(udp_descriptor* udesc, int err)
i = LOAD_ATOM(spec, i, am_err);
i = LOAD_TUPLE(spec, i, 3);
ASSERT(i == sizeof(spec)/sizeof(*spec));
- return driver_output_term(desc->port, spec, i);
+ return erl_drv_output_term(desc->dport, spec, i);
}
-/* scan buffer for bit 7 */
-static void scanbit8(inet_descriptor* desc, const char* buf, int len)
-{
- int c;
-
- if (!desc->bit8f || desc->bit8) return;
- c = 0;
- while(len--) c |= *buf++;
- desc->bit8 = ((c & 0x80) != 0);
-}
-
/*
** active=TRUE:
** (NOTE! distribution MUST use active=TRUE, deliver=PORT)
@@ -3391,8 +3359,6 @@ static int tcp_reply_data(tcp_descriptor* desc, char* buf, int len)
packet_get_body(desc->inet.htype, &body, &bodylen);
- scanbit8(INETP(desc), body, bodylen);
-
if (desc->inet.deliver == INET_DELIVER_PORT) {
code = inet_port_data(INETP(desc), body, bodylen);
}
@@ -3424,8 +3390,6 @@ tcp_reply_binary_data(tcp_descriptor* desc, ErlDrvBinary* bin, int offs, int len
packet_get_body(desc->inet.htype, &body, &bodylen);
offs = body - bin->orig_bytes; /* body offset now */
- scanbit8(INETP(desc), body, bodylen);
-
if (desc->inet.deliver == INET_DELIVER_PORT)
code = inet_port_binary_data(INETP(desc), bin, offs, bodylen);
else if ((code=packet_parse(desc->inet.htype, buf, len, &desc->http_state,
@@ -3451,8 +3415,6 @@ packet_reply_binary_data(inet_descriptor* desc, unsigned int hsz,
{
int code;
- scanbit8(desc, bin->orig_bytes+offs, len);
-
if (desc->active == INET_PASSIVE)
/* "inet" is actually for both UDP and SCTP, as well as TCP! */
return inet_async_binary_data(desc, hsz, bin, offs, len, extra);
@@ -3527,6 +3489,7 @@ static void inet_init_sctp(void) {
INIT_ATOM(dontroute);
INIT_ATOM(priority);
INIT_ATOM(tos);
+ INIT_ATOM(ipv6_v6only);
/* Option names */
INIT_ATOM(sctp_rtoinfo);
@@ -5313,50 +5276,6 @@ static ErlDrvSSizeT inet_ctl_getifaddrs(inet_descriptor* desc_p,
#endif
-
-
-#ifdef VXWORKS
-/*
-** THIS is a terrible creature, a bug in the TCP part
-** of the old VxWorks stack (non SENS) created a race.
-** If (and only if?) a socket got closed from the other
-** end and we tried a set/getsockopt on the TCP level,
-** the task would generate a bus error...
-*/
-static STATUS wrap_sockopt(STATUS (*function)() /* Yep, no parameter
- check */,
- int s, int level, int optname,
- char *optval, unsigned int optlen
- /* optlen is a pointer if function
- is getsockopt... */)
-{
- fd_set rs;
- struct timeval timeout;
- int to_read;
- int ret;
-
- FD_ZERO(&rs);
- FD_SET(s,&rs);
- memset(&timeout,0,sizeof(timeout));
- if (level == IPPROTO_TCP) {
- taskLock();
- if (select(s+1,&rs,NULL,NULL,&timeout)) {
- if (ioctl(s,FIONREAD,(int)&to_read) == ERROR ||
- to_read == 0) { /* End of file, other end closed? */
- sock_errno() = EBADF;
- taskUnlock();
- return ERROR;
- }
- }
- ret = (*function)(s,level,optname,optval,optlen);
- taskUnlock();
- } else {
- ret = (*function)(s,level,optname,optval,optlen);
- }
- return ret;
-}
-#endif
-
/* Per H @ Tail-f: The original code here had problems that possibly
only occur if you abuse it for non-INET sockets, but anyway:
a) If the getsockopt for SO_PRIORITY or IP_TOS failed, the actual
@@ -5382,13 +5301,8 @@ static int setopt_prio_tos_trick
int res;
int res_prio;
int res_tos;
-#ifdef HAVE_SOCKLEN_T
- socklen_t
-#else
- int
-#endif
- tmp_arg_sz_prio = sizeof(tmp_ival_prio),
- tmp_arg_sz_tos = sizeof(tmp_ival_tos);
+ SOCKLEN_T tmp_arg_sz_prio = sizeof(tmp_ival_prio);
+ SOCKLEN_T tmp_arg_sz_tos = sizeof(tmp_ival_tos);
res_prio = sock_getopt(fd, SOL_SOCKET, SO_PRIORITY,
(char *) &tmp_ival_prio, &tmp_arg_sz_prio);
@@ -5532,29 +5446,6 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
desc->exitf = ival;
continue;
- case INET_LOPT_BIT8:
- DEBUGF(("inet_set_opts(%ld): s=%d, BIT8=%d\r\n",
- (long)desc->port, desc->s, ival));
- switch(ival) {
- case INET_BIT8_ON:
- desc->bit8f = 1;
- desc->bit8 = 0;
- break;
- case INET_BIT8_OFF:
- desc->bit8f = 0;
- desc->bit8 = 0;
- break;
- case INET_BIT8_CLEAR:
- desc->bit8f = 1;
- desc->bit8 = 0;
- break;
- case INET_BIT8_SET:
- desc->bit8f = 1;
- desc->bit8 = 1;
- break;
- }
- continue;
-
case INET_LOPT_TCP_HIWTRMRK:
if (desc->stype == SOCK_STREAM) {
tcp_descriptor* tdesc = (tcp_descriptor*) desc;
@@ -5575,6 +5466,28 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
}
continue;
+ case INET_LOPT_TCP_MSGQ_HIWTRMRK:
+ if (desc->stype == SOCK_STREAM) {
+ ErlDrvSizeT high;
+ if (ival < ERL_DRV_BUSY_MSGQ_LIM_MIN
+ || ERL_DRV_BUSY_MSGQ_LIM_MAX < ival)
+ return -1;
+ high = (ErlDrvSizeT) ival;
+ erl_drv_busy_msgq_limits(desc->port, NULL, &high);
+ }
+ continue;
+
+ case INET_LOPT_TCP_MSGQ_LOWTRMRK:
+ if (desc->stype == SOCK_STREAM) {
+ ErlDrvSizeT low;
+ if (ival < ERL_DRV_BUSY_MSGQ_LIM_MIN
+ || ERL_DRV_BUSY_MSGQ_LIM_MAX < ival)
+ return -1;
+ low = (ErlDrvSizeT) ival;
+ erl_drv_busy_msgq_limits(desc->port, &low, NULL);
+ }
+ continue;
+
case INET_LOPT_TCP_SEND_TIMEOUT:
if (desc->stype == SOCK_STREAM) {
tcp_descriptor* tdesc = (tcp_descriptor*) desc;
@@ -5636,23 +5549,11 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
case INET_OPT_SNDBUF: type = SO_SNDBUF;
DEBUGF(("inet_set_opts(%ld): s=%d, SO_SNDBUF=%d\r\n",
(long)desc->port, desc->s, ival));
- /*
- * Setting buffer sizes in VxWorks gives unexpected results
- * our workaround is to leave it at default.
- */
-#ifdef VXWORKS
- goto skip_os_setopt;
-#else
break;
-#endif
case INET_OPT_RCVBUF: type = SO_RCVBUF;
DEBUGF(("inet_set_opts(%ld): s=%d, SO_RCVBUF=%d\r\n",
(long)desc->port, desc->s, ival));
-#ifdef VXWORKS
- goto skip_os_setopt;
-#else
break;
-#endif
case INET_OPT_LINGER: type = SO_LINGER;
if (len < 4)
return -1;
@@ -5743,6 +5644,23 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
#endif /* HAVE_MULTICAST_SUPPORT */
+ case INET_OPT_IPV6_V6ONLY:
+#if HAVE_DECL_IPV6_V6ONLY
+ proto = IPPROTO_IPV6;
+ type = IPV6_V6ONLY;
+ propagate = 1;
+ DEBUGF(("inet_set_opts(%ld): s=%d, IPV6_V6ONLY=%d\r\n",
+ (long)desc->port, desc->s, ival));
+ break;
+#elif defined(__WIN32__) && defined(HAVE_IN6) && defined(AF_INET6)
+ /* Fake a'la OpenBSD; set to 'true' is fine but 'false' invalid. */
+ if (ival != 0) continue;
+ else return -1;
+ break;
+#else
+ continue;
+#endif
+
case INET_OPT_RAW:
if (len < 8) {
return -1;
@@ -5774,9 +5692,6 @@ static int inet_set_opts(inet_descriptor* desc, char* ptr, int len)
}
DEBUGF(("inet_set_opts(%ld): s=%d returned %d\r\n",
(long)desc->port, desc->s, res));
-#ifdef VXWORKS
-skip_os_setopt:
-#endif
if (type == SO_RCVBUF) {
/* make sure we have desc->bufsz >= SO_RCVBUF */
if (ival > desc->bufsz)
@@ -6075,6 +5990,22 @@ static int sctp_set_opts(inet_descriptor* desc, char* ptr, int len)
continue; /* Option not supported -- ignore it */
# endif
+ case INET_OPT_IPV6_V6ONLY:
+# if HAVE_DECL_IPV6_V6ONLY
+ {
+ arg.ival= get_int32 (curr); curr += 4;
+ proto = IPPROTO_IPV6;
+ type = IPV6_V6ONLY;
+ arg_ptr = (char*) (&arg.ival);
+ arg_sz = sizeof ( arg.ival);
+ break;
+ }
+# elif defined(__WIN32__) && defined(HAVE_IN6) && defined(AF_INET6)
+# error Here is a fix for Win IPv6 SCTP missing
+# else
+ continue; /* Option not supported -- ignore it */
+# endif
+
case SCTP_OPT_AUTOCLOSE:
{
arg.ival= get_int32 (curr); curr += 4;
@@ -6437,15 +6368,6 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
put_int32(desc->exitf, ptr);
continue;
- case INET_LOPT_BIT8:
- *ptr++ = opt;
- if (desc->bit8f) {
- put_int32(desc->bit8, ptr);
- } else {
- put_int32(INET_BIT8_OFF, ptr);
- }
- continue;
-
case INET_LOPT_TCP_HIWTRMRK:
if (desc->stype == SOCK_STREAM) {
*ptr++ = opt;
@@ -6466,6 +6388,32 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
}
continue;
+ case INET_LOPT_TCP_MSGQ_HIWTRMRK:
+ if (desc->stype == SOCK_STREAM) {
+ ErlDrvSizeT high = ERL_DRV_BUSY_MSGQ_READ_ONLY;
+ *ptr++ = opt;
+ erl_drv_busy_msgq_limits(desc->port, NULL, &high);
+ ival = high > INT_MAX ? INT_MAX : (int) high;
+ put_int32(ival, ptr);
+ }
+ else {
+ TRUNCATE_TO(0,ptr);
+ }
+ continue;
+
+ case INET_LOPT_TCP_MSGQ_LOWTRMRK:
+ if (desc->stype == SOCK_STREAM) {
+ ErlDrvSizeT low = ERL_DRV_BUSY_MSGQ_READ_ONLY;
+ *ptr++ = opt;
+ erl_drv_busy_msgq_limits(desc->port, &low, NULL);
+ ival = low > INT_MAX ? INT_MAX : (int) low;
+ put_int32(ival, ptr);
+ }
+ else {
+ TRUNCATE_TO(0,ptr);
+ }
+ continue;
+
case INET_LOPT_TCP_SEND_TIMEOUT:
if (desc->stype == SOCK_STREAM) {
*ptr++ = opt;
@@ -6572,6 +6520,22 @@ static ErlDrvSSizeT inet_fill_opts(inet_descriptor* desc,
break;
#endif /* HAVE_MULTICAST_SUPPORT */
+ case INET_OPT_IPV6_V6ONLY:
+#if HAVE_DECL_IPV6_V6ONLY
+ proto = IPPROTO_IPV6;
+ type = IPV6_V6ONLY;
+ break;
+#elif defined(__WIN32__) && defined(HAVE_IN6) && defined(AF_INET6)
+ /* Fake reading 'true' */
+ *ptr++ = opt;
+ put_int32(1, ptr);
+ ptr += 4;
+ continue;
+#else
+ TRUNCATE_TO(0,ptr);
+ continue; /* skip - no result */
+#endif
+
case INET_OPT_RAW:
{
int data_provided;
@@ -6876,6 +6840,7 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
case INET_OPT_DONTROUTE:
case INET_OPT_PRIORITY :
case INET_OPT_TOS :
+ case INET_OPT_IPV6_V6ONLY:
case SCTP_OPT_AUTOCLOSE:
case SCTP_OPT_MAXSEG :
/* The following options return true or false: */
@@ -6948,6 +6913,20 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
continue;
# endif
}
+ case INET_OPT_IPV6_V6ONLY:
+# if HAVE_DECL_IPV6_V6ONLY
+ {
+ proto = IPPROTO_IPV6;
+ type = IPV6_V6ONLY;
+ tag = am_ipv6_v6only;
+ break;
+ }
+# elif defined(__WIN32__) && defined(HAVE_IN6) && defined(AF_INET6)
+# error Here is a fix for Win IPv6 SCTP needed
+# else
+ /* Not supported -- ignore */
+ continue;
+# endif
case SCTP_OPT_AUTOCLOSE:
{
proto = IPPROTO_SCTP;
@@ -7348,7 +7327,7 @@ static ErlDrvSSizeT sctp_fill_opts(inet_descriptor* desc,
i = LOAD_TUPLE(spec, i, 3);
/* Now, convert "spec" into the returnable term: */
- driver_send_term(desc->port, driver_caller(desc->port), spec, i);
+ erl_drv_send_term(desc->dport, driver_caller(desc->port), spec, i);
FREE(spec);
(*dest)[0] = INET_REP;
@@ -7430,7 +7409,7 @@ send_empty_out_q_msgs(inet_descriptor* desc)
ASSERT(msg_len == sizeof(msg)/sizeof(*msg));
- send_to_subscribers(desc->port,
+ send_to_subscribers(desc->dport,
&desc->empty_out_q_subs,
1,
msg,
@@ -7504,8 +7483,6 @@ static ErlDrvData inet_start(ErlDrvPort port, int size, int protocol)
desc->mode = INET_MODE_LIST; /* list mode */
desc->exitf = 1; /* exit port when close on active
socket */
- desc->bit8f = 0;
- desc->bit8 = 0;
desc->deliver = INET_DELIVER_TERM; /* standard term format */
desc->active = INET_PASSIVE; /* start passive */
desc->oph = NULL;
@@ -7813,8 +7790,8 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf,
desc->state = INET_STATE_BOUND;
if ((port = inet_address_port(&local)) == 0) {
- len = sizeof(local);
- sock_name(desc->s, (struct sockaddr*) &local, (unsigned int*)&len);
+ SOCKLEN_T adrlen = sizeof(local);
+ sock_name(desc->s, &local.sa, &adrlen);
port = inet_address_port(&local);
}
port = sock_ntohs(port);
@@ -7849,8 +7826,6 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf,
return ctl_reply(INET_REP_OK, NULL, 0, rbuf, rsize);
}
-#ifndef VXWORKS
-
case INET_REQ_GETSERVBYNAME: { /* L1 Name-String L2 Proto-String */
char namebuf[256];
char protobuf[256];
@@ -7901,8 +7876,6 @@ static ErlDrvSSizeT inet_ctl(inet_descriptor* desc, int cmd, char* buf,
return ctl_reply(INET_REP_OK, srv->s_name, len, rbuf, rsize);
}
-#endif /* !VXWORKS */
-
default:
return ctl_xerror(EXBADPORT, rbuf, rsize);
}
@@ -8083,6 +8056,7 @@ static int tcp_inet_init(void)
static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args)
{
+ ErlDrvSizeT q_low, q_high;
tcp_descriptor* desc;
DEBUGF(("tcp_inet_start(%ld) {\r\n", (long)port));
@@ -8092,6 +8066,17 @@ static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args)
return ERL_DRV_ERROR_ERRNO;
desc->high = INET_HIGH_WATERMARK;
desc->low = INET_LOW_WATERMARK;
+ q_high = INET_HIGH_MSGQ_WATERMARK;
+ q_low = INET_LOW_MSGQ_WATERMARK;
+ if (q_low < ERL_DRV_BUSY_MSGQ_LIM_MIN)
+ q_low = ERL_DRV_BUSY_MSGQ_LIM_MIN;
+ else if (q_low > ERL_DRV_BUSY_MSGQ_LIM_MAX)
+ q_low = ERL_DRV_BUSY_MSGQ_LIM_MAX;
+ if (q_high < ERL_DRV_BUSY_MSGQ_LIM_MIN)
+ q_high = ERL_DRV_BUSY_MSGQ_LIM_MIN;
+ else if (q_high > ERL_DRV_BUSY_MSGQ_LIM_MAX)
+ q_high = ERL_DRV_BUSY_MSGQ_LIM_MAX;
+ erl_drv_busy_msgq_limits(port, &q_low, &q_high);
desc->send_timeout = INET_INFINITY;
desc->send_timeout_close = 0;
desc->busy_on_send = 0;
@@ -8115,6 +8100,7 @@ static ErlDrvData tcp_inet_start(ErlDrvPort port, char* args)
static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
ErlDrvTermData owner, int* err)
{
+ ErlDrvSizeT q_low, q_high;
ErlDrvPort port = desc->inet.port;
tcp_descriptor* copy_desc;
@@ -8133,7 +8119,6 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
/* Some flags must be inherited at this point */
copy_desc->inet.mode = desc->inet.mode;
copy_desc->inet.exitf = desc->inet.exitf;
- copy_desc->inet.bit8f = desc->inet.bit8f;
copy_desc->inet.deliver = desc->inet.deliver;
copy_desc->inet.htype = desc->inet.htype;
copy_desc->inet.psize = desc->inet.psize;
@@ -8153,6 +8138,13 @@ static tcp_descriptor* tcp_inet_copy(tcp_descriptor* desc,SOCKET s,
FREE(copy_desc);
return NULL;
}
+
+ /* Read busy msgq limits of parent */
+ q_low = q_high = ERL_DRV_BUSY_MSGQ_READ_ONLY;
+ erl_drv_busy_msgq_limits(desc->inet.port, &q_low, &q_high);
+ /* Write same busy msgq limits to child */
+ erl_drv_busy_msgq_limits(port, &q_low, &q_high);
+
copy_desc->inet.port = port;
copy_desc->inet.dport = driver_mk_port(port);
*err = 0;
@@ -8185,7 +8177,7 @@ static void tcp_close_check(tcp_descriptor* desc)
desc->inet.state = INET_STATE_LISTENING;
while (deq_multi_op(desc,&id,&req,&caller,NULL,&monitor) == 0) {
driver_demonitor_process(desc->inet.port, &monitor);
- send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_closed);
+ send_async_error(desc->inet.dport, id, caller, am_closed);
}
clean_multi_timers(&(desc->mtd), desc->inet.port);
}
@@ -8609,7 +8601,7 @@ static void tcp_inet_multi_timeout(ErlDrvData e, ErlDrvTermData caller)
sock_select(INETP(desc),FD_ACCEPT,0);
desc->inet.state = INET_STATE_LISTENING; /* restore state */
}
- send_async_error(desc->inet.port, desc->inet.dport, id, caller, am_timeout);
+ send_async_error(desc->inet.dport, id, caller, am_timeout);
}
@@ -9350,7 +9342,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
if (s == INVALID_SOCKET) { /* Not ERRNO_BLOCK, that's handled right away */
- ret = send_async_error(desc->inet.port, desc->inet.dport,
+ ret = send_async_error(desc->inet.dport,
id, caller, error_atom(sock_errno()));
goto done;
}
@@ -9360,7 +9352,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
if ((accept_desc = tcp_inet_copy(desc,s,caller,&err)) == NULL) {
sock_close(s);
- ret = send_async_error(desc->inet.port, desc->inet.dport,
+ ret = send_async_error(desc->inet.dport,
id, caller, error_atom(err));
goto done;
}
@@ -9371,7 +9363,7 @@ static int tcp_inet_input(tcp_descriptor* desc, HANDLE event)
ERL_DRV_READ, 1);
#endif
accept_desc->inet.state = INET_STATE_CONNECTED;
- ret = send_async_ok_port(desc->inet.port, desc->inet.dport,
+ ret = send_async_ok_port(desc->inet.dport,
id, caller, accept_desc->inet.dport);
}
}
@@ -9838,7 +9830,6 @@ static udp_descriptor* sctp_inet_copy(udp_descriptor* desc, SOCKET s, int* err)
/* Some flags must be inherited at this point */
copy_desc->inet.mode = desc->inet.mode;
copy_desc->inet.exitf = desc->inet.exitf;
- copy_desc->inet.bit8f = desc->inet.bit8f;
copy_desc->inet.deliver = desc->inet.deliver;
copy_desc->inet.htype = desc->inet.htype;
copy_desc->inet.psize = desc->inet.psize;
@@ -10240,6 +10231,7 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf,
}
new_udesc->inet.state = INET_STATE_CONNECTED;
new_udesc->inet.stype = SOCK_STREAM;
+ SET_NONBLOCKING(new_udesc->inet.s);
inet_reply_ok_port(desc, new_udesc->inet.dport);
(*rbuf)[0] = INET_REP;
@@ -11013,7 +11005,7 @@ subs_list *subs;
static void send_to_subscribers
(
- ErlDrvPort port,
+ ErlDrvTermData port,
subs_list *subs,
int free_subs,
ErlDrvTermData msg[],
@@ -11030,7 +11022,7 @@ static void send_to_subscribers
this = subs;
while(this) {
- (void) driver_send_term(port, this->subscriber, msg, msg_len);
+ (void) erl_drv_send_term(port, this->subscriber, msg, msg_len);
if(free_subs && !first) {
next = this->next;
diff --git a/erts/emulator/drivers/unix/unix_efile.c b/erts/emulator/drivers/unix/unix_efile.c
index b250bac4dc..cf7af71b92 100644
--- a/erts/emulator/drivers/unix/unix_efile.c
+++ b/erts/emulator/drivers/unix/unix_efile.c
@@ -45,23 +45,6 @@
#include <fcntl.h>
#endif /* DARWIN */
-#ifdef VXWORKS
-#include <ioLib.h>
-#include <dosFsLib.h>
-#include <nfsLib.h>
-#include <sys/stat.h>
-/*
-** Not nice to include usrLib.h as MANY normal variable names get reported
-** as shadowing globals, like 'i' for example.
-** Instead we declare the only function we use here
-*/
-/*
- * #include <usrLib.h>
- */
-extern STATUS copy(char *, char *);
-#include <errno.h>
-#endif
-
#ifdef SUNOS4
# define getcwd(buf, size) getwd(buf)
#endif
@@ -93,276 +76,27 @@ extern STATUS copy(char *, char *);
#define DIR_MODE 0777
#endif
-#ifdef VXWORKS /* Currently only used on vxworks */
-
-#define EF_ALLOC(S) driver_alloc((S))
-#define EF_REALLOC(P, S) driver_realloc((P), (S))
-#define EF_SAFE_ALLOC(S) ef_safe_alloc((S))
-#define EF_SAFE_REALLOC(P, S) ef_safe_realloc((P), (S))
-#define EF_FREE(P) do { if((P)) driver_free((P)); } while(0)
-
-void erl_exit(int n, char *fmt, ...);
-
-static void *ef_safe_alloc(Uint s)
-{
- void *p = EF_ALLOC(s);
- if (!p) erl_exit(1,
- "unix efile drv: Can't allocate %lu bytes of memory\n",
- (unsigned long)s);
- return p;
-}
-
-#if 0 /* Currently not used */
-
-static void *ef_safe_realloc(void *op, Uint s)
-{
- void *p = EF_REALLOC(op, s);
- if (!p) erl_exit(1,
- "unix efile drv: Can't reallocate %lu bytes of memory\n",
- (unsigned long)s);
- return p;
-}
-
-#endif /* #if 0 */
-#endif /* #ifdef VXWORKS */
-
#define IS_DOT_OR_DOTDOT(s) \
(s[0] == '.' && (s[1] == '\0' || (s[1] == '.' && s[2] == '\0')))
-#ifdef VXWORKS
-static int vxworks_to_posix(int vx_errno);
-#endif
-
-/*
-** VxWorks (not) strikes again. Too long RESULTING paths
-** may give the infamous bus error. Have to check ALL
-** filenames and pathnames. No wonder the emulator is slow on
-** these cards...
-*/
-#ifdef VXWORKS
-#define CHECK_PATHLEN(Name, ErrInfo) \
- if (path_size(Name) > PATH_MAX) { \
- errno = ENAMETOOLONG; \
- return check_error(-1, ErrInfo); \
- }
-#else
-#define CHECK_PATHLEN(X,Y) /* Nothing */
-#endif
-
static int check_error(int result, Efile_error* errInfo);
static int
check_error(int result, Efile_error *errInfo)
{
if (result < 0) {
-#ifdef VXWORKS
- errInfo->posix_errno = errInfo->os_errno = vxworks_to_posix(errno);
-#else
errInfo->posix_errno = errInfo->os_errno = errno;
-#endif
return 0;
}
return 1;
}
-#ifdef VXWORKS
-
-/*
- * VxWorks has different error codes for different file systems.
- * We map those to POSIX ones.
- */
-static int
-vxworks_to_posix(int vx_errno)
-{
- DEBUGF(("[vxworks_to_posix] vx_errno: %08x\n", vx_errno));
- switch (vx_errno) {
- /* dosFsLib mapping */
-#ifdef S_dosFsLib_FILE_ALREADY_EXISTS
- case S_dosFsLib_FILE_ALREADY_EXISTS: return EEXIST;
-#else
- case S_dosFsLib_FILE_EXISTS: return EEXIST;
-#endif
-#ifdef S_dosFsLib_BAD_DISK
- case S_dosFsLib_BAD_DISK: return EIO;
-#endif
-#ifdef S_dosFsLib_CANT_CHANGE_ROOT
- case S_dosFsLib_CANT_CHANGE_ROOT: return EINVAL;
-#endif
-#ifdef S_dosFsLib_NO_BLOCK_DEVICE
- case S_dosFsLib_NO_BLOCK_DEVICE: return ENOTBLK;
-#endif
-#ifdef S_dosFsLib_BAD_SEEK
- case S_dosFsLib_BAD_SEEK: return ESPIPE;
-#endif
- case S_dosFsLib_VOLUME_NOT_AVAILABLE: return ENXIO;
- case S_dosFsLib_DISK_FULL: return ENOSPC;
- case S_dosFsLib_FILE_NOT_FOUND: return ENOENT;
- case S_dosFsLib_NO_FREE_FILE_DESCRIPTORS: return ENFILE;
- case S_dosFsLib_INVALID_NUMBER_OF_BYTES: return EINVAL;
- case S_dosFsLib_ILLEGAL_NAME: return EINVAL;
- case S_dosFsLib_CANT_DEL_ROOT: return EACCES;
- case S_dosFsLib_NOT_FILE: return EISDIR;
- case S_dosFsLib_NOT_DIRECTORY: return ENOTDIR;
- case S_dosFsLib_NOT_SAME_VOLUME: return EXDEV;
- case S_dosFsLib_READ_ONLY: return EACCES;
- case S_dosFsLib_ROOT_DIR_FULL: return ENOSPC;
- case S_dosFsLib_DIR_NOT_EMPTY: return EEXIST;
- case S_dosFsLib_NO_LABEL: return ENXIO;
- case S_dosFsLib_INVALID_PARAMETER: return EINVAL;
- case S_dosFsLib_NO_CONTIG_SPACE: return ENOSPC;
- case S_dosFsLib_FD_OBSOLETE: return EBADF;
- case S_dosFsLib_DELETED: return EINVAL;
- case S_dosFsLib_INTERNAL_ERROR: return EIO;
- case S_dosFsLib_WRITE_ONLY: return EACCES;
- /* nfsLib mapping - is needed since Windriver has used */
- /* inconsistent error codes (errno.h/nfsLib.h). */
- case S_nfsLib_NFS_OK: return 0;
- case S_nfsLib_NFSERR_PERM: return EPERM;
- case S_nfsLib_NFSERR_NOENT: return ENOENT;
- case S_nfsLib_NFSERR_IO: return EIO;
- case S_nfsLib_NFSERR_NXIO: return ENXIO;
-#ifdef S_nfsLib_NFSERR_ACCES
- case S_nfsLib_NFSERR_ACCES: return EACCES;
-#else
- case S_nfsLib_NFSERR_ACCESS: return EACCES;
-#endif
- case S_nfsLib_NFSERR_EXIST: return EEXIST;
- case S_nfsLib_NFSERR_NODEV: return ENODEV;
- case S_nfsLib_NFSERR_NOTDIR: return ENOTDIR;
- case S_nfsLib_NFSERR_ISDIR: return EISDIR;
- case S_nfsLib_NFSERR_FBIG: return EFBIG;
- case S_nfsLib_NFSERR_NOSPC: return ENOSPC;
- case S_nfsLib_NFSERR_ROFS: return EROFS;
- case S_nfsLib_NFSERR_NAMETOOLONG: return ENAMETOOLONG;
- case S_nfsLib_NFSERR_NOTEMPTY: return EEXIST;
- case S_nfsLib_NFSERR_DQUOT: return ENOSPC;
- case S_nfsLib_NFSERR_STALE: return EINVAL;
- case S_nfsLib_NFSERR_WFLUSH: return ENXIO;
- /* And sometimes (...) the error codes are from ioLib (as in the */
- /* case of the (for nfsLib) unimplemented rename function) */
- case S_ioLib_DISK_NOT_PRESENT: return EIO;
-#if S_ioLib_DISK_NOT_PRESENT != S_ioLib_NO_DRIVER
- case S_ioLib_NO_DRIVER: return ENXIO;
-#endif
- case S_ioLib_UNKNOWN_REQUEST: return ENOSYS;
- case S_ioLib_DEVICE_TIMEOUT: return EIO;
-#ifdef S_ioLib_UNFORMATED
- /* Added (VxWorks 5.2 -> 5.3.1) */
- #if S_ioLib_UNFORMATED != S_ioLib_DEVICE_TIMEOUT
- case S_ioLib_UNFORMATED: return EIO;
- #endif
-#endif
-#if S_ioLib_DEVICE_TIMEOUT != S_ioLib_DEVICE_ERROR
- case S_ioLib_DEVICE_ERROR: return ENXIO;
-#endif
- case S_ioLib_WRITE_PROTECTED: return EACCES;
- case S_ioLib_NO_FILENAME: return EINVAL;
- case S_ioLib_CANCELLED: return EINTR;
- case S_ioLib_NO_DEVICE_NAME_IN_PATH: return EINVAL;
- case S_ioLib_NAME_TOO_LONG: return ENAMETOOLONG;
-#ifdef S_objLib_OBJ_UNAVAILABLE
- case S_objLib_OBJ_UNAVAILABLE: return ENOENT;
-#endif
-
- /* Temporary workaround for a weird error in passFs
- (VxWorks Simsparc only). File operation fails because of
- ENOENT, but errno is not set. */
-#ifdef SIMSPARCSOLARIS
- case 0: return ENOENT;
-#endif
-
- }
- /* If the error code matches none of the above, assume */
- /* it is a POSIX one already. The upper bits (>=16) are */
- /* cleared since VxWorks uses those bits to indicate in */
- /* what module the error occured. */
- return vx_errno & 0xffff;
-}
-
-static int
-vxworks_enotsup(Efile_error *errInfo)
-{
- errInfo->posix_errno = errInfo->os_errno = ENOTSUP;
- return 0;
-}
-
-static int
-count_path_length(char *pathname, char *pathname2)
-{
- static int stack[PATH_MAX / 2 + 1];
- int sp = 0;
- char *tmp;
- char *cpy = NULL;
- int i;
- int sum;
- for(i = 0;i < 2;++i) {
- if (!i) {
- cpy = EF_SAFE_ALLOC(strlen(pathname)+1);
- strcpy(cpy, pathname);
- } else if (pathname2 != NULL) {
- EF_FREE(cpy);
- cpy = EF_SAFE_ALLOC(strlen(pathname2)+1);
- strcpy(cpy, pathname2);
- } else
- break;
-
- for (tmp = strtok(cpy,"/"); tmp != NULL; tmp = strtok(NULL,"/")) {
- if (!strcmp(tmp,"..") && sp > 0)
- --sp;
- else if (strcmp(tmp,"."))
- stack[sp++] = strlen(tmp);
- }
- }
- if (cpy != NULL)
- EF_FREE(cpy);
- sum = 0;
- for(i = 0;i < sp; ++i)
- sum += stack[i]+1;
- return (sum) ? sum : 1;
-}
-
-static int
-path_size(char *pathname)
-{
- static char currdir[PATH_MAX+2];
- if (*pathname == '/')
- return count_path_length(pathname,NULL);
- ioDefPathGet(currdir);
- strcat(currdir,"/");
- return count_path_length(currdir,pathname);
-}
-
-#endif /* VXWORKS */
-
int
efile_mkdir(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of directory to create. */
{
- CHECK_PATHLEN(name,errInfo);
#ifdef NO_MKDIR_MODE
-#ifdef VXWORKS
- /* This is a VxWorks/nfs workaround for erl_tar to create
- * non-existant directories. (of some reason (...) VxWorks
- * returns, the *non-module-prefixed*, 0xd code when
- * trying to create a directory in a directory that doesn't exist).
- * (see efile_openfile)
- */
- if (mkdir(name) < 0) {
- struct stat sb;
- if (name[0] == '\0') {
- /* Return the correct error code enoent */
- errno = S_nfsLib_NFSERR_NOENT;
- } else if (stat(name, &sb) == OK) {
- errno = S_nfsLib_NFSERR_EXIST;
- } else if((strchr(name, '/') != NULL) && (errno == 0xd)) {
- /* Return the correct error code enoent */
- errno = S_nfsLib_NFSERR_NOENT;
- }
- return check_error(-1, errInfo);
- } else return 1;
-#else
return check_error(mkdir(name), errInfo);
-#endif
#else
return check_error(mkdir(name, DIR_MODE), errInfo);
#endif
@@ -372,16 +106,9 @@ int
efile_rmdir(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of directory to delete. */
{
- CHECK_PATHLEN(name, errInfo);
if (rmdir(name) == 0) {
return 1;
}
-#ifdef VXWORKS
- if (name[0] == '\0') {
- /* Return the correct error code enoent */
- errno = S_nfsLib_NFSERR_NOENT;
- }
-#else
if (errno == ENOTEMPTY) {
errno = EEXIST;
}
@@ -401,7 +128,6 @@ efile_rmdir(Efile_error* errInfo, /* Where to return error codes. */
}
errno = saved_errno;
}
-#endif
return check_error(-1, errInfo);
}
@@ -409,7 +135,6 @@ int
efile_delete_file(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of file to delete. */
{
- CHECK_PATHLEN(name,errInfo);
if (unlink(name) == 0) {
return 1;
}
@@ -457,32 +182,13 @@ efile_rename(Efile_error* errInfo, /* Where to return error codes. */
char* src, /* Original name. */
char* dst) /* New name. */
{
- CHECK_PATHLEN(src,errInfo);
- CHECK_PATHLEN(dst,errInfo);
-#ifdef VXWORKS
-
- /* First check if src == dst, if so, just return. */
- /* VxWorks dos file system destroys the file otherwise, */
- /* VxWorks nfs file system rename doesn't work at all. */
- if(strcmp(src, dst) == 0)
- return 1;
-#endif
if (rename(src, dst) == 0) {
return 1;
}
-#ifdef VXWORKS
- /* nfs for VxWorks doesn't support rename. We try to emulate it */
- /* (by first copying src to dst and then deleting src). */
- if(errno == S_ioLib_UNKNOWN_REQUEST && /* error code returned
- by ioLib (!) */
- copy(src, dst) == OK &&
- unlink(src) == OK)
- return 1;
-#endif
if (errno == ENOTEMPTY) {
errno = EEXIST;
}
-#if defined (sparc) && !defined(VXWORKS)
+#if defined (sparc)
/*
* SunOS 4.1.4 reports overwriting a non-empty directory with a
* directory as EINVAL instead of EEXIST (first rule out the correct
@@ -543,7 +249,6 @@ int
efile_chdir(Efile_error* errInfo, /* Where to return error codes. */
char* name) /* Name of directory to make current. */
{
- CHECK_PATHLEN(name, errInfo);
return check_error(chdir(name), errInfo);
}
@@ -600,8 +305,6 @@ efile_readdir(Efile_error* errInfo, /* Where to return error codes. */
* If this is the first call, we must open the directory.
*/
- CHECK_PATHLEN(name, errInfo);
-
if (*p_dir_handle == NULL) {
dp = opendir(name);
if (dp == NULL)
@@ -641,26 +344,8 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
struct stat statbuf;
int fd;
int mode; /* Open mode. */
-#ifdef VXWORKS
- char pathbuff[PATH_MAX+2];
- char sbuff[PATH_MAX*2];
- char *totbuff = sbuff;
- int nameneed;
-#endif
-
-
- CHECK_PATHLEN(name, errInfo);
-
-#ifdef VXWORKS
- /* Have to check that it's not a directory. */
- if (stat(name,&statbuf) != ERROR && ISDIR(statbuf)) {
- errno = EISDIR;
- return check_error(-1, errInfo);
- }
-#endif
if (stat(name, &statbuf) >= 0 && !ISREG(statbuf)) {
-#if !defined(VXWORKS) && !defined(OSE)
/*
* For UNIX only, here is some ugly code to allow
* /dev/null to be opened as a file.
@@ -677,12 +362,9 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
}
}
if (!(dev_null_ino && statbuf.st_ino == dev_null_ino)) {
-#endif
errno = EISDIR;
return check_error(-1, errInfo);
-#if !defined(VXWORKS) && !defined(OSE)
}
-#endif
}
switch (flags & (EFILE_MODE_READ|EFILE_MODE_WRITE)) {
@@ -706,49 +388,14 @@ efile_openfile(Efile_error* errInfo, /* Where to return error codes. */
if (flags & EFILE_MODE_APPEND) {
mode &= ~O_TRUNC;
-#ifndef VXWORKS
- mode |= O_APPEND; /* Dont make VxWorks think things it shouldn't */
-#endif
+ mode |= O_APPEND;
}
if (flags & EFILE_MODE_EXCL) {
mode |= O_EXCL;
}
-#ifdef VXWORKS
- if (*name != '/') {
- /* Make sure it is an absolute pathname, because ftruncate needs it */
- ioDefPathGet(pathbuff);
- strcat(pathbuff,"/");
- nameneed = strlen(pathbuff) + strlen(name) + 1;
- if (nameneed > PATH_MAX*2)
- totbuff = EF_SAFE_ALLOC(nameneed);
- strcpy(totbuff,pathbuff);
- strcat(totbuff,name);
- fd = open(totbuff, mode, FILE_MODE);
- if (totbuff != sbuff)
- EF_FREE(totbuff);
- } else {
- fd = open(name, mode, FILE_MODE);
- }
-#else
fd = open(name, mode, FILE_MODE);
-#endif
-
-#ifdef VXWORKS
-
- /* This is a VxWorks/nfs workaround for erl_tar to create
- * non-existant directories. (of some reason (...) VxWorks
- * returns, the *non-module-prefixed*, 0xd code when
- * trying to write a file in a directory that doesn't exist).
- * (see efile_mkdir)
- */
- if ((fd < 0) && (strchr(name, '/') != NULL) && (errno == 0xd)) {
- /* Return the correct error code enoent */
- errno = S_nfsLib_NFSERR_NOENT;
- return check_error(-1, errInfo);
- }
-#endif
if (!check_error(fd, errInfo))
return 0;
@@ -797,11 +444,7 @@ efile_fsync(Efile_error *errInfo, /* Where to return error codes. */
int fd) /* File descriptor for file to sync. */
{
#ifdef NO_FSYNC
-#ifdef VXWORKS
- return check_error(ioctl(fd, FIOSYNC, 0), errInfo);
-#else
- undefined fsync
-#endif /* VXWORKS */
+ undefined fsync /* XXX: Really? */
#else
#if defined(DARWIN) && defined(F_FULLFSYNC)
return check_error(fcntl(fd, F_FULLFSYNC), errInfo);
@@ -818,21 +461,8 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
struct stat statbuf; /* Information about the file */
int result;
-#ifdef VXWORKS
- if (*name == '\0') {
- errInfo->posix_errno = errInfo->os_errno = ENOENT;
- return 0;
- }
-#endif
-
- CHECK_PATHLEN(name, errInfo);
-
if (info_for_link) {
-#if (defined(VXWORKS))
- result = stat(name, &statbuf);
-#else
result = lstat(name, &statbuf);
-#endif
} else {
result = stat(name, &statbuf);
}
@@ -849,19 +479,9 @@ efile_fileinfo(Efile_error* errInfo, Efile_info* pInfo,
#ifdef NO_ACCESS
/* Just look at read/write access for owner. */
-#ifdef VXWORKS
-
- pInfo->access = FA_NONE;
- if(statbuf.st_mode & S_IRUSR)
- pInfo->access |= FA_READ;
- if(statbuf.st_mode & S_IWUSR)
- pInfo->access |= FA_WRITE;
-
-#else
pInfo->access = ((statbuf.st_mode >> 6) & 07) >> 1;
-#endif /* VXWORKS */
#else
pInfo->access = FA_NONE;
if (access(name, R_OK) == 0)
@@ -902,35 +522,6 @@ efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name)
{
struct utimbuf tval;
- CHECK_PATHLEN(name, errInfo);
-
-#ifdef VXWORKS
-
- if (pInfo->mode != -1) {
- int fd;
- struct stat statbuf;
-
- fd = open(name, O_RDONLY, 0);
- if (!check_error(fd, errInfo))
- return 0;
- if (fstat(fd, &statbuf) < 0) {
- close(fd);
- return check_error(-1, errInfo);
- }
- if (pInfo->mode & S_IWUSR) {
- /* clear read only bit */
- statbuf.st_attrib &= ~DOS_ATTR_RDONLY;
- } else {
- /* set read only bit */
- statbuf.st_attrib |= DOS_ATTR_RDONLY;
- }
- /* This should work for dos files but not for nfs ditos, so don't
- * report errors (to avoid problems when running e.g. erl_tar)
- */
- ioctl(fd, FIOATTRIBSET, statbuf.st_attrib);
- close(fd);
- }
-#else
/*
* On some systems chown will always fail for a non-root user unless
* POSIX_CHOWN_RESTRICTED is not set. Others will succeed as long as
@@ -952,20 +543,10 @@ efile_write_info(Efile_error *errInfo, Efile_info *pInfo, char *name)
}
}
-#endif /* !VXWORKS */
-
tval.actime = pInfo->accessTime;
tval.modtime = pInfo->modifyTime;
-#ifdef VXWORKS
- /* VxWorks' utime doesn't work when the file is a nfs mounted
- * one, don't report error if utime fails.
- */
- utime(name, &tval);
- return 1;
-#else
return check_error(utime(name, &tval), errInfo);
-#endif
}
@@ -979,11 +560,6 @@ efile_write(Efile_error* errInfo, /* Where to return error codes. */
{
ssize_t written; /* Bytes written in last operation. */
-#ifdef VXWORKS
- if (flags & EFILE_MODE_APPEND) {
- lseek(fd, 0, SEEK_END); /* Naive append emulation on VXWORKS */
- }
-#endif
while (count > 0) {
if ((written = write(fd, buf, count)) < 0) {
if (errno != EINTR)
@@ -1012,12 +588,6 @@ efile_writev(Efile_error* errInfo, /* Where to return error codes */
ASSERT(iovcnt >= 0);
-#ifdef VXWORKS
- if (flags & EFILE_MODE_APPEND) {
- lseek(fd, 0, SEEK_END); /* Naive append emulation on VXWORKS */
- }
-#endif
-
while (cnt < iovcnt) {
if ((! iov[cnt].iov_base) || (iov[cnt].iov_len <= 0)) {
/* Empty buffer - skip */
@@ -1226,118 +796,6 @@ efile_seek(Efile_error* errInfo, /* Where to return error codes. */
int
efile_truncate_file(Efile_error* errInfo, int *fd, int flags)
{
-#ifdef VXWORKS
- off_t offset;
- char namebuf[PATH_MAX+1];
- char namebuf2[PATH_MAX+10];
- int new;
- int dummy;
- int i;
- int left;
- static char buff[1024];
- struct stat st;
- Efile_error tmperr;
-
- if ((offset = lseek(*fd, 0, 1)) < 0) {
- return check_error((int) offset,errInfo);
- }
- if (ftruncate(*fd, offset) < 0) {
- if (vxworks_to_posix(errno) != EINVAL) {
- return check_error(-1, errInfo);
- }
- /*
- ** Kludge
- */
- if(ioctl(*fd,FIOGETNAME,(int) namebuf) < 0) {
- return check_error(-1, errInfo);
- }
- for(i=0;i<1000;++i) {
- sprintf(namebuf2,"%s%d",namebuf,i);
- CHECK_PATHLEN(namebuf2,errInfo);
- if (stat(namebuf2,&st) < 0) {
- break;
- }
- }
- if (i > 1000) {
- errno = EINVAL;
- return check_error(-1, errInfo);
- }
- if (close(*fd) < 0) {
- return check_error(-1, errInfo);
- }
- if (efile_rename(&tmperr,namebuf,namebuf2) < 0) {
- i = check_error(-1,&tmperr);
- if (!efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE,
- fd,&dummy)) {
- *fd = -1;
- } else {
- *errInfo = tmperr;
- }
- return i;
- }
- if ((*fd = open(namebuf2, O_RDONLY, 0)) < 0) {
- i = check_error(-1,errInfo);
- efile_rename(&tmperr,namebuf2,namebuf); /* at least try */
- if (!efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE,
- fd,&dummy)) {
- *fd = -1;
- } else {
- lseek(*fd,offset,SEEK_SET);
- }
- return i;
- }
- /* Point of no return... */
-
- if ((new = open(namebuf,O_RDWR | O_CREAT, FILE_MODE)) < 0) {
- close(*fd);
- *fd = -1;
- return 0;
- }
- left = offset;
-
- while (left) {
- if ((i = read(*fd,buff,(left > 1024) ? 1024 : left)) < 0) {
- i = check_error(-1,errInfo);
- close(new);
- close(*fd);
- unlink(namebuf);
- efile_rename(&tmperr,namebuf2,namebuf); /* at least try */
- if (!efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE,
- fd,&dummy)) {
- *fd = -1;
- } else {
- lseek(*fd,offset,SEEK_SET);
- }
- return i;
- }
- left -= i;
- if (write(new,buff,i) < 0) {
- i = check_error(-1,errInfo);
- close(new);
- close(*fd);
- unlink(namebuf);
- rename(namebuf2,namebuf); /* at least try */
- if (!efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE,
- fd,&dummy)) {
- *fd = -1;
- } else {
- lseek(*fd,offset,SEEK_SET);
- }
- return i;
- }
- }
- close(*fd);
- unlink(namebuf2);
- close(new);
- i = efile_openfile(errInfo,namebuf,flags | EFILE_NO_TRUNCATE,fd,
- &dummy);
- if (i) {
- lseek(*fd,offset,SEEK_SET);
- }
- return i;
- }
- return 1;
-#else
#ifndef NO_FTRUNCATE
off_t offset;
@@ -1347,15 +805,11 @@ efile_truncate_file(Efile_error* errInfo, int *fd, int flags)
#else
return 1;
#endif
-#endif
}
int
efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
{
-#ifdef VXWORKS
- return vxworks_enotsup(errInfo);
-#else
int len;
ASSERT(size > 0);
len = readlink(name, buffer, size-1);
@@ -1364,7 +818,6 @@ efile_readlink(Efile_error* errInfo, char* name, char* buffer, size_t size)
}
buffer[len] = '\0';
return 1;
-#endif
}
int
@@ -1377,21 +830,13 @@ efile_altname(Efile_error* errInfo, char* name, char* buffer, size_t size)
int
efile_link(Efile_error* errInfo, char* old, char* new)
{
-#ifdef VXWORKS
- return vxworks_enotsup(errInfo);
-#else
return check_error(link(old, new), errInfo);
-#endif
}
int
efile_symlink(Efile_error* errInfo, char* old, char* new)
{
-#ifdef VXWORKS
- return vxworks_enotsup(errInfo);
-#else
return check_error(symlink(old, new), errInfo);
-#endif
}
int
@@ -1406,8 +851,8 @@ efile_fadvise(Efile_error* errInfo, int fd, Sint64 offset,
}
#ifdef HAVE_SENDFILE
-// For some reason the maximum size_t cannot be used as the max size
-// 3GB seems to work on all platforms
+/* For some reason the maximum size_t cannot be used as the max size
+ 3GB seems to work on all platforms */
#define SENDFILE_CHUNK_SIZE ((1UL << 30) -1)
/*
@@ -1444,7 +889,7 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd,
#if defined(__linux__)
ssize_t retval;
do {
- // check if *nbytes is 0 or greater than chunk size
+ /* check if *nbytes is 0 or greater than chunk size */
if (*nbytes == 0 || *nbytes > SENDFILE_CHUNK_SIZE)
retval = sendfile(out_fd, in_fd, offset, SENDFILE_CHUNK_SIZE);
else
@@ -1455,7 +900,7 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd,
}
} while (retval == SENDFILE_CHUNK_SIZE);
if (written != 0) {
- // -1 is not returned by the linux API so we have to simulate it
+ /* -1 is not returned by the linux API so we have to simulate it */
retval = -1;
errno = EAGAIN;
}
@@ -1468,23 +913,29 @@ efile_sendfile(Efile_error* errInfo, int in_fd, int out_fd,
do {
fdrec.sfv_off = *offset;
len = 0;
- // check if *nbytes is 0 or greater than chunk size
+ /* check if *nbytes is 0 or greater than chunk size */
if (*nbytes == 0 || *nbytes > SENDFILE_CHUNK_SIZE)
fdrec.sfv_len = SENDFILE_CHUNK_SIZE;
else
fdrec.sfv_len = *nbytes;
retval = sendfilev(out_fd, &fdrec, 1, &len);
- if (retval != -1 || errno == EAGAIN || errno == EINTR) {
+
+ /* Sometimes sendfilev can return -1 and still send data.
+ When that happens we just pretend that no error happend. */
+ if (retval != -1 || errno == EAGAIN || errno == EINTR ||
+ len != 0) {
*offset += len;
*nbytes -= len;
written += len;
+ if (errno != EAGAIN && errno != EINTR && len != 0)
+ retval = len;
}
} while (len == SENDFILE_CHUNK_SIZE);
#elif defined(DARWIN)
int retval;
off_t len;
do {
- // check if *nbytes is 0 or greater than chunk size
+ /* check if *nbytes is 0 or greater than chunk size */
if(*nbytes > SENDFILE_CHUNK_SIZE)
len = SENDFILE_CHUNK_SIZE;
else
diff --git a/erts/emulator/drivers/win32/registry_drv.c b/erts/emulator/drivers/win32/registry_drv.c
index 1fad34e380..5b200ebd32 100644
--- a/erts/emulator/drivers/win32/registry_drv.c
+++ b/erts/emulator/drivers/win32/registry_drv.c
@@ -344,7 +344,7 @@ fix_value_result(RegPort* rp, LONG result, DWORD type,
#ifdef DEBUG
if (ok != ERROR_SUCCESS) {
char buff[256];
- sprintf(buff,"Failure in registry_drv line %d, error = %d",
+ erts_snprintf(buff, sizeof(buff), "Failure in registry_drv line %d, error = %d",
__LINE__, GetLastError());
MessageBox(NULL, buff, "Internal error", MB_OK);
ASSERT(ok == ERROR_SUCCESS);
diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4
index ec25c0b9b7..0de69a617f 100644
--- a/erts/emulator/hipe/hipe_amd64_bifs.m4
+++ b/erts/emulator/hipe/hipe_amd64_bifs.m4
@@ -2,7 +2,7 @@ changecom(`/*', `*/')dnl
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -30,12 +30,12 @@ include(`hipe/hipe_amd64_asm.m4')
#define TEST_GOT_EXN cmpq $THE_NON_VALUE, %rax
#endif'
-define(TEST_GOT_MBUF,`movq P_MBUF(P), %rdx # `TEST_GOT_MBUF'
+define(TEST_GOT_MBUF,`movq P_MBUF(P), %rdx /* `TEST_GOT_MBUF' */
testq %rdx, %rdx
jnz 3f
2:')
define(HANDLE_GOT_MBUF,`
-3: call nbif_$1_gc_after_bif # `HANDLE_GOT_MBUF'
+3: call nbif_$1_gc_after_bif /* `HANDLE_GOT_MBUF' */
jmp 2b')
`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index 23ced284bf..1562748f2d 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -609,8 +609,8 @@ static Uint *hipe_find_emu_address(Eterm mod, Eterm name, unsigned int arity)
Uint *code_base;
int i, n;
- modp = erts_get_module(mod);
- if (modp == NULL || (code_base = modp->code) == NULL)
+ modp = erts_get_module(mod, erts_active_code_ix());
+ if (modp == NULL || (code_base = modp->curr.code) == NULL)
return NULL;
n = code_base[MI_NUM_FUNCTIONS];
for (i = 0; i < n; ++i) {
@@ -648,7 +648,7 @@ static void *hipe_get_emu_address(Eterm m, Eterm f, unsigned int arity, int is_r
/* if not found, stub it via the export entry */
/* no lock needed around erts_export_get_or_make_stub() */
Export *export_entry = erts_export_get_or_make_stub(m, f, arity);
- address = export_entry->address;
+ address = export_entry->addressv[erts_active_code_ix()];
}
return address;
}
@@ -1583,14 +1583,6 @@ BIF_RETTYPE hipe_nonclosure_address(BIF_ALIST_2)
goto badfun;
m = ep->code[0];
f = ep->code[1];
- } else if (hdr == make_arityval(2)) {
- Eterm *tp = tuple_val(BIF_ARG_1);
- m = tp[1];
- f = tp[2];
- if (is_not_atom(m) || is_not_atom(f))
- goto badfun;
- if (!erts_find_export_entry(m, f, BIF_ARG_2))
- goto badfun;
} else
goto badfun;
address = hipe_get_na_nofail(m, f, BIF_ARG_2, 1);
diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c
index 7eab1ec2ad..e09988e2c5 100644
--- a/erts/emulator/hipe/hipe_bif2.c
+++ b/erts/emulator/hipe/hipe_bif2.c
@@ -173,3 +173,10 @@ BIF_RETTYPE hipe_debug_bif_wrapper(BIF_ALIST_1)
#endif /* ERTS_ENABLE_LOCK_CHECK && ERTS_SMP */
+
+BIF_RETTYPE hipe_bifs_debug_native_called_2(BIF_ALIST_2)
+{
+ erts_printf("hipe_debug_native_called: %T(%T)\r\n", BIF_ARG_1, BIF_ARG_2);
+ BIF_RET(am_ok);
+}
+
diff --git a/erts/emulator/hipe/hipe_bif2.tab b/erts/emulator/hipe/hipe_bif2.tab
index c71b02fbeb..45a395bf57 100644
--- a/erts/emulator/hipe/hipe_bif2.tab
+++ b/erts/emulator/hipe/hipe_bif2.tab
@@ -29,3 +29,4 @@ bif hipe_bifs:show_term/1
bif hipe_bifs:in_native/0
bif hipe_bifs:modeswitch_debug_on/0
bif hipe_bifs:modeswitch_debug_off/0
+bif hipe_bifs:debug_native_called/2
diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4
index 942fa0c5cb..764b8d180c 100644
--- a/erts/emulator/hipe/hipe_bif_list.m4
+++ b/erts/emulator/hipe/hipe_bif_list.m4
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2004-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2004-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -145,6 +145,7 @@
* Zero-arity BIFs that can fail.
*/
standard_bif_interface_0(nbif_processes_0, processes_0)
+standard_bif_interface_0(nbif_ports_0, ports_0)
/*
* BIFs and primops that may do a GC (change heap limit and walk the native stack).
@@ -165,6 +166,7 @@ gc_bif_interface_2(nbif_put_2, put_2)
gc_bif_interface_1(nbif_hipe_bifs_show_nstack_1, hipe_show_nstack_1)
gc_bif_interface_1(nbif_hipe_bifs_show_pcb_1, hipe_bifs_show_pcb_1)
gc_bif_interface_0(nbif_hipe_bifs_nstack_used_size_0, hipe_bifs_nstack_used_size_0)
+gc_bif_interface_2(nbif_hipe_bifs_debug_native_called, hipe_bifs_debug_native_called_2)
/*
* Arithmetic operators called indirectly by the HiPE compiler.
@@ -245,7 +247,7 @@ noproc_primop_interface_5(nbif_bs_put_big_integer, hipe_bs_put_big_integer)
gc_bif_interface_0(nbif_check_get_msg, hipe_check_get_msg)
-#ifdef NO_FPE_SIGNALS
+#`ifdef' NO_FPE_SIGNALS
nocons_nofail_primop_interface_0(nbif_emulate_fpe, hipe_emulate_fpe)
#endif
diff --git a/erts/emulator/hipe/hipe_debug.c b/erts/emulator/hipe/hipe_debug.c
index 7ca11f8c6c..f2e9d03607 100644
--- a/erts/emulator/hipe/hipe_debug.c
+++ b/erts/emulator/hipe/hipe_debug.c
@@ -188,14 +188,11 @@ void hipe_print_pcb(Process *p)
U("old_htop ", old_htop);
U("old_head ", old_heap);
U("min_heap_..", min_heap_size);
- U("status ", status);
- U("rstatus ", rstatus);
U("rcount ", rcount);
- U("id ", id);
- U("prio ", prio);
+ U("id ", common.id);
U("reds ", reds);
- U("tracer_pr..", tracer_proc);
- U("trace_fla..", trace_flags);
+ U("tracer_pr..", common.tracer_proc);
+ U("trace_fla..", common.trace_flags);
U("group_lea..", group_leader);
U("flags ", flags);
U("fvalue ", fvalue);
@@ -204,8 +201,8 @@ void hipe_print_pcb(Process *p)
/*XXX: ErlTimer tm; */
U("next ", next);
/*XXX: ErlOffHeap off_heap; */
- U("reg ", reg);
- U("nlinks ", nlinks);
+ U("reg ", common.u.alive.reg);
+ U("nlinks ", common.u.alive.links);
/*XXX: ErlMessageQueue msg; */
U("mbuf ", mbuf);
U("mbuf_sz ", mbuf_sz);
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index cbbf1db2e5..0e287908b1 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -262,47 +262,6 @@ static const struct literal {
const char *name;
int value;
} literals[] = {
- /* Field offsets in a process struct */
- { "P_HP", offsetof(struct process, htop) },
- { "P_HP_LIMIT", offsetof(struct process, stop) },
- { "P_OFF_HEAP_FIRST", offsetof(struct process, off_heap.first) },
- { "P_MBUF", offsetof(struct process, mbuf) },
- { "P_ID", offsetof(struct process, id) },
- { "P_FLAGS", offsetof(struct process, flags) },
- { "P_FVALUE", offsetof(struct process, fvalue) },
- { "P_FREASON", offsetof(struct process, freason) },
- { "P_FTRACE", offsetof(struct process, ftrace) },
- { "P_FCALLS", offsetof(struct process, fcalls) },
- { "P_BEAM_IP", offsetof(struct process, i) },
- { "P_ARITY", offsetof(struct process, arity) },
- { "P_ARG0", offsetof(struct process, def_arg_reg[0]) },
- { "P_ARG1", offsetof(struct process, def_arg_reg[1]) },
- { "P_ARG2", offsetof(struct process, def_arg_reg[2]) },
- { "P_ARG3", offsetof(struct process, def_arg_reg[3]) },
- { "P_ARG4", offsetof(struct process, def_arg_reg[4]) },
- { "P_ARG5", offsetof(struct process, def_arg_reg[5]) },
-#ifdef HIPE
- { "P_NSP", offsetof(struct process, hipe.nsp) },
- { "P_NCALLEE", offsetof(struct process, hipe.ncallee) },
- { "P_CLOSURE", offsetof(struct process, hipe.closure) },
-#if defined(__i386__) || defined(__x86_64__)
- { "P_NSP_LIMIT", offsetof(struct process, hipe.nstack) },
- { "P_CSP", offsetof(struct process, hipe.ncsp) },
-#elif defined(__sparc__) || defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__)
- { "P_NSP_LIMIT", offsetof(struct process, hipe.nstack) },
- { "P_NRA", offsetof(struct process, hipe.nra) },
-#endif
- { "P_NARITY", offsetof(struct process, hipe.narity) },
- { "P_FLOAT_RESULT",
-# ifdef NO_FPE_SIGNALS
- offsetof(struct process, hipe.float_result)
-# endif
- },
-# if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
- { "P_BIF_CALLEE", offsetof(struct process, hipe.bif_callee) },
-# endif
-#endif /* HIPE */
-
/* process flags bits */
{ "F_TIMO", F_TIMO },
@@ -380,8 +339,6 @@ static const struct literal {
{ "MS_SAVEOFFSET_SIZE", field_sizeof(struct erl_bin_match_struct, save_offset)},
/* messages */
- { "P_MSG_FIRST", offsetof(struct process, msg.first) },
- { "P_MSG_SAVE", offsetof(struct process, msg.save) },
{ "MSG_NEXT", offsetof(struct erl_mesg, next) },
/* ARM */
@@ -460,12 +417,14 @@ static const struct atom_literal {
* These depend on configuration options such as heap architecture.
* The compiler accesses these through hipe_bifs:get_rts_param/1.
*/
-static const struct rts_param {
+struct rts_param {
unsigned int nr;
const char *name;
unsigned int is_defined;
int value;
-} rts_params[] = {
+};
+
+static const struct rts_param rts_params[] = {
{ 1, "P_OFF_HEAP_FUNS",
1, offsetof(struct process, off_heap.first)
},
@@ -518,7 +477,53 @@ static const struct rts_param {
{ 19, "MSG_MESSAGE",
1, offsetof(struct erl_mesg, m[0])
},
- /* highest entry ever used == 21 */
+
+ /* Field offsets in a process struct */
+ { 22, "P_HP", 1, offsetof(struct process, htop) },
+ { 23, "P_HP_LIMIT", 1, offsetof(struct process, stop) },
+ { 24, "P_OFF_HEAP_FIRST", 1, offsetof(struct process, off_heap.first) },
+ { 25, "P_MBUF", 1, offsetof(struct process, mbuf) },
+ { 26, "P_ID", 1, offsetof(struct process, common.id) },
+ { 27, "P_FLAGS", 1, offsetof(struct process, flags) },
+ { 28, "P_FVALUE", 1, offsetof(struct process, fvalue) },
+ { 29, "P_FREASON", 1, offsetof(struct process, freason) },
+ { 30, "P_FTRACE", 1, offsetof(struct process, ftrace) },
+ { 31, "P_FCALLS", 1, offsetof(struct process, fcalls) },
+ { 32, "P_BEAM_IP", 1, offsetof(struct process, i) },
+ { 33, "P_ARITY", 1, offsetof(struct process, arity) },
+ { 34, "P_ARG0", 1, offsetof(struct process, def_arg_reg[0]) },
+ { 35, "P_ARG1", 1, offsetof(struct process, def_arg_reg[1]) },
+ { 36, "P_ARG2", 1, offsetof(struct process, def_arg_reg[2]) },
+ { 37, "P_ARG3", 1, offsetof(struct process, def_arg_reg[3]) },
+ { 38, "P_ARG4", 1, offsetof(struct process, def_arg_reg[4]) },
+ { 39, "P_ARG5", 1, offsetof(struct process, def_arg_reg[5]) },
+ { 40, "P_NSP", 1, offsetof(struct process, hipe.nsp) },
+ { 41, "P_NCALLEE", 1, offsetof(struct process, hipe.ncallee) },
+ { 42, "P_CLOSURE", 1, offsetof(struct process, hipe.closure) },
+ { 43, "P_NSP_LIMIT", 1, offsetof(struct process, hipe.nstack) },
+ { 44, "P_CSP",
+#if defined(__i386__) || defined(__x86_64__)
+ 1, offsetof(struct process, hipe.ncsp)
+#endif
+ },
+ { 45, "P_NRA",
+#if defined(__sparc__) || defined(__powerpc__) || defined(__ppc__) || defined(__powerpc64__) || defined(__arm__)
+ 1, offsetof(struct process, hipe.nra)
+#endif
+ },
+ { 46, "P_NARITY", 1, offsetof(struct process, hipe.narity) },
+ { 47, "P_FLOAT_RESULT",
+#ifdef NO_FPE_SIGNALS
+ 1, offsetof(struct process, hipe.float_result)
+#endif
+ },
+ { 48, "P_BIF_CALLEE",
+#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP)
+ 1, offsetof(struct process, hipe.bif_callee)
+#endif
+ },
+ { 49, "P_MSG_FIRST", 1, offsetof(struct process, msg.first) },
+ { 50, "P_MSG_SAVE", 1, offsetof(struct process, msg.save) },
};
#define NR_PARAMS ARRAY_SIZE(rts_params)
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index 6a3ce5608f..3c782b2f56 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -360,7 +360,8 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
p->flags &= ~F_HIBERNATE_SCHED;
goto do_schedule;
}
- if (p->status == P_WAITING) {
+
+ if (!(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) {
for (i = 0; i < p->arity; ++i)
p->arg_reg[i] = reg[i];
goto do_schedule;
@@ -451,10 +452,6 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
case HIPE_MODE_SWITCH_RES_SUSPEND: {
p->i = hipe_beam_pc_resume;
p->arity = 0;
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- if (p->status != P_SUSPENDED)
- erts_add_to_runq(p);
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
goto do_schedule;
}
case HIPE_MODE_SWITCH_RES_WAIT:
@@ -470,7 +467,8 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
#endif
p->i = hipe_beam_pc_resume;
p->arity = 0;
- p->status = P_WAITING;
+ erts_smp_atomic32_read_band_relb(&p->state,
+ ~ERTS_PSFLG_ACTIVE);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
do_schedule:
{
diff --git a/erts/emulator/hipe/hipe_native_bif.h b/erts/emulator/hipe/hipe_native_bif.h
index 9e3a156fbc..3f460a5a5c 100644
--- a/erts/emulator/hipe/hipe_native_bif.h
+++ b/erts/emulator/hipe/hipe_native_bif.h
@@ -110,6 +110,9 @@ int hipe_bs_put_big_integer(Eterm, Uint, byte*, unsigned, unsigned);
AEXTERN(Eterm,nbif_check_get_msg,(Process*));
Eterm hipe_check_get_msg(Process*);
+AEXTERN(BIF_RETTYPE,nbif_hipe_bifs_debug_native_called,(Process*,Eterm,Eterm));
+BIF_RETTYPE hipe_bifs_debug_native_called_2(BIF_ALIST_2);
+
/*
* SMP-specific stuff
*/
diff --git a/erts/emulator/hipe/hipe_primops.h b/erts/emulator/hipe/hipe_primops.h
index 38509c105b..52b4681cfe 100644
--- a/erts/emulator/hipe/hipe_primops.h
+++ b/erts/emulator/hipe/hipe_primops.h
@@ -80,6 +80,7 @@ PRIMOP_LIST(am_fclearerror_error, &nbif_fclearerror_error)
#ifdef NO_FPE_SIGNALS
PRIMOP_LIST(am_emulate_fpe, &nbif_emulate_fpe)
#endif
+PRIMOP_LIST(am_debug_native_called, &nbif_hipe_bifs_debug_native_called)
#if defined(__sparc__)
#include "hipe_sparc_primops.h"
diff --git a/erts/emulator/hipe/hipe_stack.c b/erts/emulator/hipe/hipe_stack.c
index da462a64e1..53c316ba52 100644
--- a/erts/emulator/hipe/hipe_stack.c
+++ b/erts/emulator/hipe/hipe_stack.c
@@ -130,7 +130,7 @@ struct sdesc *hipe_decode_sdesc(Eterm arg)
struct sdesc *sdesc;
if (is_not_tuple(arg) ||
- (tuple_val(arg))[0] != make_arityval(5) ||
+ (tuple_val(arg))[0] != make_arityval(6) ||
term_to_Uint((tuple_val(arg))[1], &ra) == 0 ||
term_to_Uint((tuple_val(arg))[2], &exnra) == 0 ||
is_not_small((tuple_val(arg))[3]) ||
@@ -183,5 +183,13 @@ struct sdesc *hipe_decode_sdesc(Eterm arg)
off = unsigned_val(live[i]);
sdesc->livebits[off / 32] |= (1 << (off & 31));
}
+#ifdef DEBUG
+ {
+ Eterm mfa_tpl = tuple_val(arg)[6];
+ sdesc->dbg_M = tuple_val(mfa_tpl)[1];
+ sdesc->dbg_F = tuple_val(mfa_tpl)[2];
+ sdesc->dbg_A = tuple_val(mfa_tpl)[3];
+ }
+#endif
return sdesc;
}
diff --git a/erts/emulator/hipe/hipe_stack.h b/erts/emulator/hipe/hipe_stack.h
index c4f2aacd8c..66f9f04c73 100644
--- a/erts/emulator/hipe/hipe_stack.h
+++ b/erts/emulator/hipe/hipe_stack.h
@@ -35,6 +35,10 @@ struct sdesc {
struct sdesc *next; /* hash collision chain */
} bucket;
unsigned int summary; /* frame size, exn handler presence flag, arity */
+#ifdef DEBUG
+ Eterm dbg_M, dbg_F;
+ unsigned dbg_A;
+#endif
unsigned int livebits[1]; /* size depends on arch & data in summary field */
};
diff --git a/erts/emulator/hipe/hipe_x86.c b/erts/emulator/hipe/hipe_x86.c
index 24d232c968..4281730ae2 100644
--- a/erts/emulator/hipe/hipe_x86.c
+++ b/erts/emulator/hipe/hipe_x86.c
@@ -265,7 +265,7 @@ void *hipe_make_native_stub(void *beamAddress, unsigned int beamArity)
void hipe_arch_print_pcb(struct hipe_process_state *p)
{
#define U(n,x) \
- printf(" % 4d | %s | 0x%08x | |\r\n", offsetof(struct hipe_process_state,x), n, (unsigned)p->x)
+ printf(" % 4d | %s | 0x%08x | |\r\n", (int)offsetof(struct hipe_process_state,x), n, (unsigned)p->x)
U("ncsp ", ncsp);
U("narity ", narity);
#undef U
diff --git a/erts/emulator/hipe/hipe_x86_bifs.m4 b/erts/emulator/hipe/hipe_x86_bifs.m4
index 3cb7d67be0..dd6980f555 100644
--- a/erts/emulator/hipe/hipe_x86_bifs.m4
+++ b/erts/emulator/hipe/hipe_x86_bifs.m4
@@ -2,7 +2,7 @@ changecom(`/*', `*/')dnl
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2001-2011. All Rights Reserved.
+ * Copyright Ericsson AB 2001-2012. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -35,12 +35,12 @@ include(`hipe/hipe_x86_asm.m4')
# define CALL_BIF(F) call CSYM(F)
#endif'
-define(TEST_GOT_MBUF,`movl P_MBUF(P), %edx # `TEST_GOT_MBUF'
+define(TEST_GOT_MBUF,`movl P_MBUF(P), %edx /* `TEST_GOT_MBUF' */
testl %edx, %edx
jnz 3f
2:')
define(HANDLE_GOT_MBUF,`
-3: call nbif_$1_gc_after_bif # `HANDLE_GOT_MBUF'
+3: call nbif_$1_gc_after_bif /* `HANDLE_GOT_MBUF' */
jmp 2b')
/*
@@ -70,7 +70,7 @@ ASYM($1):
NBIF_ARG_REG(0,P)
NBIF_ARG(2,1,0)
lea 8(%esp), %eax
- NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ NBIF_ARG_REG(1,%eax) /* BIF__ARGS */
CALL_BIF($2)
TEST_GOT_MBUF
@@ -105,7 +105,7 @@ ASYM($1):
NBIF_ARG(2,2,0)
NBIF_ARG(3,2,1)
lea 8(%esp), %eax
- NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ NBIF_ARG_REG(1,%eax) /* BIF__ARGS */
CALL_BIF($2)
TEST_GOT_MBUF
@@ -141,7 +141,7 @@ ASYM($1):
NBIF_ARG(3,3,1)
NBIF_ARG(4,3,2)
lea 8(%esp), %eax
- NBIF_ARG_REG(1,%eax) # BIF__ARGS
+ NBIF_ARG_REG(1,%eax) /* BIF__ARGS */
CALL_BIF($2)
TEST_GOT_MBUF
diff --git a/erts/emulator/hipe/hipe_x86_gc.h b/erts/emulator/hipe/hipe_x86_gc.h
index e4607ad27d..aa4abb6f59 100644
--- a/erts/emulator/hipe/hipe_x86_gc.h
+++ b/erts/emulator/hipe/hipe_x86_gc.h
@@ -69,6 +69,11 @@ nstack_walk_init_sdesc(const Process *p, struct nstack_walk_state *state)
nstkarity = 0;
state->sdesc0[0].summary = (0 << 9) | (0 << 8) | nstkarity;
state->sdesc0[0].livebits[0] = 0;
+# ifdef DEBUG
+ state->sdesc0[0].dbg_M = 0;
+ state->sdesc0[0].dbg_F = am_init;
+ state->sdesc0[0].dbg_A = 0;
+# endif
/* XXX: this appears to prevent a gcc-4.1.1 bug on x86 */
__asm__ __volatile__("" : : "m"(*state) : "memory");
return &state->sdesc0[0];
diff --git a/erts/emulator/hipe/hipe_x86_glue.h b/erts/emulator/hipe/hipe_x86_glue.h
index b0db93267c..63ad250d60 100644
--- a/erts/emulator/hipe/hipe_x86_glue.h
+++ b/erts/emulator/hipe/hipe_x86_glue.h
@@ -62,6 +62,9 @@ static __inline__ void hipe_arch_glue_init(void)
.sdesc = {
.bucket = { .hvalue = (unsigned long)nbif_return },
.summary = (1<<8),
+ #ifdef DEBUG
+ .dbg_F = am_return,
+ #endif
},
};
hipe_init_sdesc_table(&nbif_return_sdesc.sdesc);
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index c1336c60d9..474408ae7c 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -39,7 +39,6 @@
#include "dtrace-wrapper.h"
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
-# define ERTS_DRV_EV_STATE_EXTRA_SIZE 128
#else
# include "safe_hash.h"
# define DRV_EV_STATE_HTAB_SIZE 1024
@@ -201,17 +200,6 @@ static void event_large_fd_error(ErlDrvPort, ErtsSysFdType, ErlDrvEventData);
#endif
static void steal_pending_stop_select(erts_dsprintf_buf_t*, ErlDrvPort,
ErtsDrvEventState*, int mode, int on);
-static ERTS_INLINE Eterm
-drvport2id(ErlDrvPort dp)
-{
- Port *pp = erts_drvport2port(dp);
- if (pp)
- return pp->id;
- else {
- ASSERT(0);
- return am_undefined;
- }
-}
#ifdef ERTS_SMP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_FD_LIST)
@@ -334,7 +322,9 @@ static void
grow_drv_ev_state(int min_ix)
{
int i;
- int new_len = min_ix + 1 + ERTS_DRV_EV_STATE_EXTRA_SIZE;
+ int new_len;
+
+ new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_ix + 1);
if (new_len > max_fds)
new_len = max_fds;
@@ -377,7 +367,7 @@ abort_task(Eterm id, ErtsPortTaskHandle *pthp, EventStateType type)
|| !erts_port_task_is_scheduled(pthp));
}
else if (erts_port_task_is_scheduled(pthp)) {
- erts_port_task_abort(id, pthp);
+ erts_port_task_abort(pthp);
ASSERT(erts_is_port_alive(id));
}
}
@@ -491,7 +481,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
int on)
{
void (*stop_select_fn)(ErlDrvEvent, void*) = NULL;
- Eterm id = drvport2id(ix);
+ Eterm id = erts_drvport2id(ix);
ErtsSysFdType fd = (ErtsSysFdType) e;
ErtsPollEvents ctl_events = (ErtsPollEvents) 0;
ErtsPollEvents new_events, old_events;
@@ -502,8 +492,8 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
DTRACE_CHARBUF(name, 64);
#endif
- ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
- && erts_lc_is_port_locked(erts_drvport2port(ix)));
+ ERTS_SMP_LC_ASSERT(erts_drvport2port(ix, NULL)
+ && erts_lc_is_port_locked(erts_drvport2port(ix, NULL)));
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
@@ -529,9 +519,9 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
if (!on && (mode&ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
if (IS_FD_UNKNOWN(state)) {
/* fast track to stop_select callback */
- stop_select_fn = erts_drvport2port(ix)->drv_ptr->stop_select;
+ stop_select_fn = erts_drvport2port(ix, NULL)->drv_ptr->stop_select;
#ifdef USE_VM_PROBES
- strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1);
+ strncpy(name, erts_drvport2port(ix, NULL)->drv_ptr->name, sizeof(name)-1);
name[sizeof(name)-1] = '\0';
#endif
ret = 0;
@@ -664,14 +654,14 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
}
}
if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
- erts_driver_t* drv_ptr = erts_drvport2port(ix)->drv_ptr;
+ erts_driver_t* drv_ptr = erts_drvport2port(ix, NULL)->drv_ptr;
ASSERT(new_events==0);
if (state->remove_cnt == 0 || !wake_poller) {
/* Safe to close fd now as it is not in pollset
or there was no need to eject fd (kernel poll) */
stop_select_fn = drv_ptr->stop_select;
#ifdef USE_VM_PROBES
- strncpy(name, erts_drvport2port(ix)->drv_ptr->name, sizeof(name)-1);
+ strncpy(name, erts_drvport2port(ix, NULL)->drv_ptr->name, sizeof(name)-1);
name[sizeof(name)-1] = '\0';
#endif
}
@@ -718,13 +708,13 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
ErtsPollEvents events;
ErtsPollEvents add_events;
ErtsPollEvents remove_events;
- Eterm id = drvport2id(ix);
+ Eterm id = erts_drvport2id(ix);
ErtsDrvEventState *state;
int do_wake = 0;
int ret;
- ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
- && erts_lc_is_port_locked(erts_drvport2port(ix)));
+ ERTS_SMP_LC_ASSERT(erts_drvport2port(ix, NULL)
+ && erts_lc_is_port_locked(erts_drvport2port(ix, NULL)));
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
@@ -959,7 +949,7 @@ static void
print_select_op(erts_dsprintf_buf_t *dsbufp,
ErlDrvPort ix, ErtsSysFdType fd, int mode, int on)
{
- Port *pp = erts_drvport2port(ix);
+ Port *pp = erts_drvport2port(ix, NULL);
erts_dsprintf(dsbufp,
"driver_select(%p, %d,%s%s%s%s, %d) "
"by ",
@@ -970,8 +960,8 @@ print_select_op(erts_dsprintf_buf_t *dsbufp,
mode & ERL_DRV_USE ? " ERL_DRV_USE" : "",
mode & (ERL_DRV_USE_NO_CALLBACK & ~ERL_DRV_USE) ? "_NO_CALLBACK" : "",
on);
- print_driver_name(dsbufp, pp->id);
- erts_dsprintf(dsbufp, "driver %T ", pp ? pp->id : NIL);
+ print_driver_name(dsbufp, pp->common.id);
+ erts_dsprintf(dsbufp, "driver %T ", pp ? pp->common.id : NIL);
}
static void
@@ -1030,7 +1020,7 @@ steal_pending_stop_select(erts_dsprintf_buf_t *dsbufp, ErlDrvPort ix,
state->driver.drv_ptr = NULL;
}
else if ((mode & ERL_DRV_USE_NO_CALLBACK) == ERL_DRV_USE) {
- erts_driver_t* drv_ptr = erts_drvport2port(ix)->drv_ptr;
+ erts_driver_t* drv_ptr = erts_drvport2port(ix, NULL)->drv_ptr;
if (drv_ptr != state->driver.drv_ptr) {
/* Some other driver wants the stop_select callback */
if (state->driver.drv_ptr->handle) {
@@ -1052,7 +1042,7 @@ static void
print_event_op(erts_dsprintf_buf_t *dsbufp,
ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data)
{
- Port *pp = erts_drvport2port(ix);
+ Port *pp = erts_drvport2port(ix, NULL);
erts_dsprintf(dsbufp, "driver_event(%p, %d, ", ix, (int) fd);
if (!event_data)
erts_dsprintf(dsbufp, "NULL");
@@ -1061,8 +1051,8 @@ print_event_op(erts_dsprintf_buf_t *dsbufp,
(unsigned int) event_data->events,
(unsigned int) event_data->revents);
erts_dsprintf(dsbufp, ") by ");
- print_driver_name(dsbufp, pp->id);
- erts_dsprintf(dsbufp, "driver %T ", pp ? pp->id : NIL);
+ print_driver_name(dsbufp, pp->common.id);
+ erts_dsprintf(dsbufp, "driver %T ", pp ? pp->common.id : NIL);
}
static void
@@ -1099,8 +1089,7 @@ iready(Eterm id, ErtsDrvEventState *state)
if (erts_port_task_schedule(id,
&state->driver.select->intask,
ERTS_PORT_TASK_INPUT,
- (ErlDrvEvent) state->fd,
- NULL) != 0) {
+ (ErlDrvEvent) state->fd) != 0) {
stale_drv_select(id, state, ERL_DRV_READ);
}
}
@@ -1111,8 +1100,7 @@ oready(Eterm id, ErtsDrvEventState *state)
if (erts_port_task_schedule(id,
&state->driver.select->outtask,
ERTS_PORT_TASK_OUTPUT,
- (ErlDrvEvent) state->fd,
- NULL) != 0) {
+ (ErlDrvEvent) state->fd) != 0) {
stale_drv_select(id, state, ERL_DRV_WRITE);
}
}
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index db2854fa40..94f9f76a20 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -57,26 +57,48 @@
/* Implement some other way to get the real page size if needed! */
#endif
-#define MAX_CACHE_SIZE 30
-
#undef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#undef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
-#undef PAGE_MASK
-#define INV_PAGE_MASK ((Uint) (page_size - 1))
-#define PAGE_MASK (~INV_PAGE_MASK)
-#define PAGE_FLOOR(X) ((X) & PAGE_MASK)
-#define PAGE_CEILING(X) PAGE_FLOOR((X) + INV_PAGE_MASK)
-#define PAGES(X) ((X) >> page_shift)
+#define INV_ALIGNED_MASK ((UWord) ((MSEG_ALIGNED_SIZE) - 1))
+#define ALIGNED_MASK (~INV_ALIGNED_MASK)
+#define ALIGNED_FLOOR(X) (((UWord)(X)) & ALIGNED_MASK)
+#define ALIGNED_CEILING(X) ALIGNED_FLOOR((X) + INV_ALIGNED_MASK)
+#define MAP_IS_ALIGNED(X) (((UWord)(X) & (MSEG_ALIGNED_SIZE - 1)) == 0)
+
+#define IS_2POW(X) ((X) && !((X) & ((X) - 1)))
+static ERTS_INLINE Uint ceil_2pow(Uint x) {
+ int i = 1 << (4 + (sizeof(Uint) != 4 ? 1 : 0));
+ x--;
+ do { x |= x >> i; } while(i >>= 1);
+ return x + 1;
+}
+static const int debruijn[32] = {
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
+};
+
+#define LOG2(X) (debruijn[((Uint32)(((X) & -(X)) * 0x077CB531U)) >> 27])
+
+#define CACHE_AREAS (32 - MSEG_ALIGN_BITS)
+
+#define SIZE_TO_CACHE_AREA_IDX(S) (LOG2((S)) - MSEG_ALIGN_BITS)
+#define MAX_CACHE_SIZE (30)
+
+#define MSEG_FLG_IS_2POW(X) ((X) & ERTS_MSEG_FLG_2POW)
+
+#ifdef DEBUG
+#define DBG(F,...) fprintf(stderr, (F), __VA_ARGS__ )
+#else
+#define DBG(F,...) do{}while(0)
+#endif
static int atoms_initialized;
typedef struct mem_kind_t MemKind;
-static void mseg_clear_cache(MemKind*);
-
#if HALFWORD_HEAP
static int initialize_pmmap(void);
static void *pmmap(size_t size);
@@ -116,15 +138,6 @@ static int mmap_fd;
#error "Not supported"
#endif /* #if HAVE_MMAP */
-#if defined(ERTS_MSEG_FAKE_SEGMENTS) && HALFWORD_HEAP
-# warning "ERTS_MSEG_FAKE_SEGMENTS will only be used for high memory segments"
-#endif
-
-#if defined(ERTS_MSEG_FAKE_SEGMENTS)
-#undef CAN_PARTLY_DESTROY
-#define CAN_PARTLY_DESTROY 0
-#endif
-
const ErtsMsegOpt_t erts_mseg_default_opt = {
1, /* Use cache */
1, /* Preserv data */
@@ -137,26 +150,17 @@ const ErtsMsegOpt_t erts_mseg_default_opt = {
};
-typedef struct cache_desc_t_ {
- void *seg;
- Uint size;
- struct cache_desc_t_ *next;
- struct cache_desc_t_ *prev;
-} cache_desc_t;
-
typedef struct {
Uint32 giga_no;
Uint32 no;
} CallCounter;
-static Uint page_size;
-static Uint page_shift;
-
typedef struct {
CallCounter alloc;
CallCounter dealloc;
CallCounter realloc;
CallCounter create;
+ CallCounter create_resize;
CallCounter destroy;
#if HAVE_MSEG_RECREATE
CallCounter recreate;
@@ -165,17 +169,25 @@ typedef struct {
CallCounter check_cache;
} ErtsMsegCalls;
+typedef struct cache_t_ cache_t;
+
+struct cache_t_ {
+ Uint size;
+ void *seg;
+ cache_t *next;
+};
+
+
typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t;
struct mem_kind_t {
- cache_desc_t cache_descs[MAX_CACHE_SIZE];
- cache_desc_t *free_cache_descs;
- cache_desc_t *cache;
- cache_desc_t *cache_end;
-
- Uint cache_size;
- Uint min_cached_seg_size;
- Uint max_cached_seg_size;
+
+ cache_t cache[MAX_CACHE_SIZE];
+ cache_t *cache_unpowered;
+ cache_t *cache_area[CACHE_AREAS];
+ cache_t *cache_free;
+
+ Sint cache_size;
Uint cache_hits;
struct {
@@ -320,8 +332,7 @@ static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */
static ERTS_INLINE void
-schedule_cache_check(ErtsMsegAllctr_t *ma)
-{
+schedule_cache_check(ErtsMsegAllctr_t *ma) {
if (!ma->is_cache_check_scheduled && ma->is_init_done) {
erts_set_aux_work_timeout(ma->ix,
@@ -331,12 +342,45 @@ schedule_cache_check(ErtsMsegAllctr_t *ma)
}
}
+/* remove ErtsMsegAllctr_t from arguments?
+ * only used for statistics
+ */
+static ERTS_INLINE void *
+mmap_align(ErtsMsegAllctr_t *ma, void *addr, size_t length, int prot, int flags, int fd, off_t offset) {
+
+ void *p, *q;
+ UWord d;
+
+ p = mmap(addr, length, prot, flags, fd, offset);
+
+ if (MAP_IS_ALIGNED(p) || p == MAP_FAILED)
+ return p;
+
+ if (ma)
+ INC_CC(ma, create_resize);
+
+ munmap(p, length);
+
+ if ((p = mmap(addr, length + MSEG_ALIGNED_SIZE, prot, flags, fd, offset)) == MAP_FAILED)
+ return MAP_FAILED;
+
+ q = (void *)ALIGNED_CEILING(p);
+ d = q - p;
+
+ if (d > 0)
+ munmap(p, d);
+
+ if (MSEG_ALIGNED_SIZE - d > 0)
+ munmap((void *) (q + length), MSEG_ALIGNED_SIZE - d);
+
+ return q;
+}
+
static ERTS_INLINE void *
mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
{
void *seg;
-
- ASSERT(size % page_size == 0);
+ ASSERT(size % MSEG_ALIGNED_SIZE == 0);
#if HALFWORD_HEAP
if (mk == &ma->low_mem) {
@@ -345,18 +389,17 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
return NULL;
}
- }
- else
+ } else
#endif
{
-#if defined(ERTS_MSEG_FAKE_SEGMENTS)
- seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
-#elif HAVE_MMAP
+#if HAVE_MMAP
{
- seg = (void *) mmap((void *) 0, (size_t) size,
+ seg = (void *) mmap_align(ma, (void *) 0, (size_t) size,
MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
if (seg == (void *) MAP_FAILED)
seg = NULL;
+
+ ASSERT(MAP_IS_ALIGNED(seg) || !seg);
}
#else
# error "Missing mseg_create() implementation"
@@ -369,38 +412,24 @@ mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
}
static ERTS_INLINE void
-mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size)
-{
-#ifdef DEBUG
- int res;
-#endif
+mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) {
+ ERTS_DECLARE_DUMMY(int res);
#if HALFWORD_HEAP
if (mk == &ma->low_mem) {
-#ifdef DEBUG
- res =
-#endif
- pmunmap((void *) seg, size);
+ res = pmunmap((void *) seg, size);
}
else
#endif
{
-#ifdef ERTS_MSEG_FAKE_SEGMENTS
- erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
-#ifdef DEBUG
- res = 0;
-#endif
-#elif HAVE_MMAP
-#ifdef DEBUG
- res =
-#endif
- munmap((void *) seg, size);
+#ifdef HAVE_MMAP
+ res = munmap((void *) seg, size);
#else
# error "Missing mseg_destroy() implementation"
#endif
}
- ASSERT(size % page_size == 0);
+ ASSERT(size % MSEG_ALIGNED_SIZE == 0);
ASSERT(res == 0);
INC_CC(ma, destroy);
@@ -408,14 +437,36 @@ mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size)
}
#if HAVE_MSEG_RECREATE
+#if defined(__NetBsd__)
+#define MREMAP_FLAGS (0)
+#else
+#define MREMAP_FLAGS (MREMAP_MAYMOVE)
+#endif
+
+
+/* mseg_recreate
+ * May return *unaligned* segments as in address not aligned to MSEG_ALIGNMENT
+ * it is still page aligned
+ *
+ * This is fine for single block carriers as long as we don't cache misaligned
+ * segments (since multiblock carriers may use them)
+ *
+ * For multiblock carriers we *need* MSEG_ALIGNMENT but mbc's will never be
+ * reallocated.
+ *
+ * This should probably be fixed the following way:
+ * 1) Use an option to segment allocation - NEED_ALIGNMENT
+ * 2) Add mremap_align which takes care of aligning a new a mremaped area
+ * 3) Fix the cache to handle of aligned and unaligned segments
+ */
static ERTS_INLINE void *
mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
{
void *new_seg;
- ASSERT(old_size % page_size == 0);
- ASSERT(new_size % page_size == 0);
+ ASSERT(old_size % MSEG_ALIGNED_SIZE == 0);
+ ASSERT(new_size % MSEG_ALIGNED_SIZE == 0);
#if HALFWORD_HEAP
if (mk == &ma->low_mem) {
@@ -426,22 +477,12 @@ mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, U
else
#endif
{
-#if defined(ERTS_MSEG_FAKE_SEGMENTS)
- new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
-#elif HAVE_MREMAP
-
- #if defined(__NetBSD__)
- new_seg = (void *) mremap((void *) old_seg,
- (size_t) old_size,
- NULL,
- (size_t) new_size,
- 0);
- #else
- new_seg = (void *) mremap((void *) old_seg,
- (size_t) old_size,
- (size_t) new_size,
- MREMAP_MAYMOVE);
- #endif
+#if HAVE_MREMAP
+#if defined(__NetBSD__)
+ new_seg = mremap(old_seg, (size_t)old_size, NULL, new_size, MREMAP_FLAGS);
+#else
+ new_seg = mremap(old_seg, (size_t)old_size, (size_t)new_size, MREMAP_FLAGS);
+#endif
if (new_seg == (void *) MAP_FAILED)
new_seg = NULL;
#else
@@ -475,151 +516,265 @@ do { \
#define ERTS_DBG_MK_CHK_THR_ACCESS(MK)
#endif
-static ERTS_INLINE cache_desc_t *
-alloc_cd(MemKind* mk)
-{
- cache_desc_t *cd = mk->free_cache_descs;
+/* NEW CACHE interface */
+
+static ERTS_INLINE cache_t *mseg_cache_alloc_descriptor(MemKind *mk) {
+ cache_t *c = mk->cache_free;
+
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (cd)
- mk->free_cache_descs = cd->next;
- return cd;
+ if (c)
+ mk->cache_free = c->next;
+
+ return c;
}
-static ERTS_INLINE void
-free_cd(MemKind* mk, cache_desc_t *cd)
-{
+static ERTS_INLINE void mseg_cache_free_descriptor(MemKind *mk, cache_t *c) {
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- cd->next = mk->free_cache_descs;
- mk->free_cache_descs = cd;
+ ASSERT(c);
+
+ c->seg = NULL;
+ c->size = 0;
+ c->next = mk->cache_free;
+ mk->cache_free = c;
}
+static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, Uint size) {
-static ERTS_INLINE void
-link_cd(MemKind* mk, cache_desc_t *cd)
-{
+ cache_t *c;
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (mk->cache)
- mk->cache->prev = cd;
- cd->next = mk->cache;
- cd->prev = NULL;
- mk->cache = cd;
-
- if (!mk->cache_end) {
- ASSERT(!cd->next);
- mk->cache_end = cd;
+
+ if (mk->cache_free && MAP_IS_ALIGNED(seg)) {
+ if (IS_2POW(size)) {
+ int ix = SIZE_TO_CACHE_AREA_IDX(size);
+
+ ASSERT(ix < CACHE_AREAS);
+ ASSERT((1 << (ix + MSEG_ALIGN_BITS)) == size);
+
+ /* unlink from free cache list */
+ c = mseg_cache_alloc_descriptor(mk);
+
+ /* link to cache area */
+ c->seg = seg;
+ c->size = size;
+ c->next = mk->cache_area[ix];
+
+ mk->cache_area[ix] = c;
+ mk->cache_size++;
+
+ ASSERT(mk->cache_size <= mk->ma->max_cache_size);
+
+ return 1;
+ } else {
+ /* unlink from free cache list */
+ c = mseg_cache_alloc_descriptor(mk);
+
+ /* link to cache area */
+ c->seg = seg;
+ c->size = size;
+ c->next = mk->cache_unpowered;
+
+ mk->cache_unpowered = c;
+ mk->cache_size++;
+
+ ASSERT(mk->cache_size <= mk->ma->max_cache_size);
+
+ return 1;
+ }
}
- mk->cache_size++;
+ return 0;
}
-#if CAN_PARTLY_DESTROY
-static ERTS_INLINE void
-end_link_cd(MemKind* mk, cache_desc_t *cd)
-{
+static ERTS_INLINE void *cache_get_segment(MemKind *mk, Uint *size_p) {
+
+ Uint size = *size_p;
+
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (mk->cache_end)
- mk->cache_end->next = cd;
- cd->next = NULL;
- cd->prev = mk->cache_end;
- mk->cache_end = cd;
-
- if (!mk->cache) {
- ASSERT(!cd->prev);
- mk->cache = cd;
- }
+ if (IS_2POW(size)) {
+
+ int i, ix = SIZE_TO_CACHE_AREA_IDX(size);
+ void *seg;
+ cache_t *c;
+ Uint csize;
+
+ for( i = ix; i < CACHE_AREAS; i++) {
+
+ if ((c = mk->cache_area[i]) == NULL)
+ continue;
+
+ ASSERT(IS_2POW(c->size));
+
+ /* unlink from cache area */
+ csize = c->size;
+ seg = c->seg;
+ mk->cache_area[i] = c->next;
+ c->next = NULL;
+ mk->cache_size--;
+ mk->cache_hits++;
+
+ /* link to free cache list */
+ mseg_cache_free_descriptor(mk, c);
+
+ ASSERT(!(mk->cache_size < 0));
+
+ /* divvy up the cache - if needed */
+ while( i > ix) {
+ csize = csize >> 1;
+ /* try to cache half of it */
+ if (!cache_bless_segment(mk, (char *)seg + csize, csize)) {
+ /* wouldn't cache .. destroy it instead */
+ mseg_destroy(mk->ma, mk, (char *)seg + csize, csize);
+ }
+ i--;
+ }
+ ASSERT(csize == size);
+ return seg;
+ }
+ }
+ else if (mk->cache_unpowered) {
+ void *seg;
+ cache_t *c, *pc;
+ Uint csize;
+ Uint bad_max_abs = mk->ma->abs_max_cache_bad_fit;
+ Uint bad_max_rel = mk->ma->rel_max_cache_bad_fit;
+
+ c = mk->cache_unpowered;
+ pc = c;
+
+ while (c) {
+ csize = c->size;
+ if (csize >= size &&
+ ((csize - size)*100 < bad_max_rel*size) &&
+ (csize - size) < bad_max_abs ) {
+
+ /* unlink from cache area */
+ seg = c->seg;
+
+ if (pc == c) {
+ mk->cache_unpowered = c->next;
+ } else {
+ pc->next = c->next;
+ }
+
+ c->next = NULL;
+ mk->cache_size--;
+ mk->cache_hits++;
+
+ /* link to free cache list */
+ mseg_cache_free_descriptor(mk, c);
+ *size_p = csize;
+
+ return seg;
+ }
- mk->cache_size++;
+ pc = c;
+ c = c->next;
+ }
+ }
+ return NULL;
}
-#endif
-static ERTS_INLINE void
-unlink_cd(MemKind* mk, cache_desc_t *cd)
-{
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- if (cd->next)
- cd->next->prev = cd->prev;
- else
- mk->cache_end = cd->prev;
-
- if (cd->prev)
- cd->prev->next = cd->next;
- else
- mk->cache = cd->next;
- ASSERT(mk->cache_size > 0);
+/* *_mseg_check_*_cache
+ * Slowly remove segments cached in the allocator by
+ * using callbacks from aux-work in the scheduler.
+ */
+
+static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, cache_t **head) {
+ cache_t *c = NULL;
+
+ c = *head;
+
+ ASSERT( c != NULL );
+
+ *head = c->next;
+
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
+
+ mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_cache_free_descriptor(mk, c);
+
+ mk->segments.current.watermark--;
mk->cache_size--;
-}
-static ERTS_INLINE void
-check_cache_limits(MemKind* mk)
-{
- cache_desc_t *cd;
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- mk->max_cached_seg_size = 0;
- mk->min_cached_seg_size = ~((Uint) 0);
- for (cd = mk->cache; cd; cd = cd->next) {
- if (cd->size < mk->min_cached_seg_size)
- mk->min_cached_seg_size = cd->size;
- if (cd->size > mk->max_cached_seg_size)
- mk->max_cached_seg_size = cd->size;
- }
+ ASSERT( mk->cache_size >= 0 );
+
+ return mk->cache_size;
}
-static ERTS_INLINE void
-adjust_cache_size(MemKind* mk, int force_check_limits)
-{
- cache_desc_t *cd;
- int check_limits = force_check_limits;
- Sint max_cached = ((Sint) mk->segments.current.watermark
- - (Sint) mk->segments.current.no);
- ERTS_DBG_MK_CHK_THR_ACCESS(mk);
- while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
- ASSERT(mk->cache_end);
- cd = mk->cache_end;
- if (!check_limits &&
- !(mk->min_cached_seg_size < cd->size
- && cd->size < mk->max_cached_seg_size)) {
- check_limits = 1;
- }
+static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, cache_t **head) {
+ cache_t *c = NULL, *next = NULL;
+
+ c = *head;
+ ASSERT( c != NULL );
+
+ while (c) {
+
+ next = c->next;
+
if (erts_mtrace_enabled)
- erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(mk->ma, mk, cd->seg, cd->size);
- unlink_cd(mk,cd);
- free_cd(mk,cd);
- }
+ erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg);
- if (check_limits)
- check_cache_limits(mk);
-}
+ mseg_destroy(mk->ma, mk, c->seg, c->size);
+ mseg_cache_free_descriptor(mk, c);
-static Uint
-check_one_cache(MemKind* mk)
-{
- if (mk->segments.current.watermark > mk->segments.current.no)
mk->segments.current.watermark--;
- adjust_cache_size(mk, 0);
+ mk->cache_size--;
+
+ c = next;
+ }
+
+ *head = NULL;
+
+ ASSERT( mk->cache_size >= 0 );
- if (mk->cache_size)
- schedule_cache_check(mk->ma);
return mk->cache_size;
}
-static void do_cache_check(ErtsMsegAllctr_t *ma)
-{
- int empty_cache = 1;
+/* mseg_check_memkind_cache
+ * - Check if we can empty some cached segments in this
+ * MemKind.
+ */
+
+
+static Uint mseg_check_memkind_cache(MemKind *mk) {
+ int i;
+
+ ERTS_DBG_MK_CHK_THR_ACCESS(mk);
+
+ for (i = 0; i < CACHE_AREAS; i++) {
+ if (mk->cache_area[i] != NULL)
+ return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_area[i]));
+ }
+
+ if (mk->cache_unpowered)
+ return mseg_drop_one_memkind_cache_size(mk, &(mk->cache_unpowered));
+
+ return 0;
+}
+
+/* mseg_cache_check
+ * - Check if we have some cache we can purge
+ * in any of the memkinds.
+ */
+
+static void mseg_cache_check(ErtsMsegAllctr_t *ma) {
MemKind* mk;
+ Uint empty_cache = 1;
ERTS_MSEG_LOCK(ma);
- for (mk=ma->mk_list; mk; mk=mk->next) {
- if (check_one_cache(mk))
+ for (mk = ma->mk_list; mk; mk = mk->next) {
+ if (mseg_check_memkind_cache(mk))
empty_cache = 0;
}
+ /* If all MemKinds caches are empty,
+ * remove aux-work callback
+ */
if (empty_cache) {
ma->is_cache_check_scheduled = 0;
- erts_set_aux_work_timeout(ma->ix,
- ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
- 0);
+ erts_set_aux_work_timeout(ma->ix, ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK, 0);
}
INC_CC(ma, check_cache);
@@ -627,27 +782,65 @@ static void do_cache_check(ErtsMsegAllctr_t *ma)
ERTS_MSEG_UNLOCK(ma);
}
-void erts_mseg_cache_check(void)
-{
- do_cache_check(ERTS_MSEG_ALLCTR_SS());
+/* erts_mseg_cache_check
+ * - This is a callback that is scheduled as aux-work from
+ * schedulers and is called at some interval if we have a cache
+ * on this mseg-allocator and memkind.
+ * - Purpose: Empty cache slowly so we don't collect mapped areas
+ * and bloat memory.
+ */
+
+void erts_mseg_cache_check(void) {
+ mseg_cache_check(ERTS_MSEG_ALLCTR_SS());
}
-static void
-mseg_clear_cache(MemKind* mk)
-{
- mk->segments.current.watermark = 0;
- adjust_cache_size(mk, 1);
+/* *_mseg_clear_*_cache
+ * Remove cached segments from the allocator completely
+ */
+
+static void mseg_clear_memkind_cache(MemKind *mk) {
+ int i;
+
+ /* drop pow2 caches */
+ for (i = 0; i < CACHE_AREAS; i++) {
+ if (mk->cache_area[i] == NULL)
+ continue;
+
+ mseg_drop_memkind_cache_size(mk, &(mk->cache_area[i]));
+ ASSERT(mk->cache_area[i] == NULL);
+ }
+ /* drop varied caches */
+ if(mk->cache_unpowered)
+ mseg_drop_memkind_cache_size(mk, &(mk->cache_unpowered));
+
+ ASSERT(mk->cache_unpowered == NULL);
+ ASSERT(mk->cache_size == 0);
+}
+
+static void mseg_clear_cache(ErtsMsegAllctr_t *ma) {
+ MemKind* mk;
+
+ ERTS_MSEG_LOCK(ma);
+ ERTS_DBG_MA_CHK_THR_ACCESS(ma);
+
- ASSERT(!mk->cache);
- ASSERT(!mk->cache_end);
- ASSERT(!mk->cache_size);
+ for (mk = ma->mk_list; mk; mk = mk->next) {
+ mseg_clear_memkind_cache(mk);
+ }
- mk->segments.current.watermark = mk->segments.current.no;
+ INC_CC(ma, clear_cache);
- INC_CC(mk->ma, clear_cache);
+ ERTS_MSEG_UNLOCK(ma);
}
+void erts_mseg_clear_cache(void) {
+ mseg_clear_cache(ERTS_MSEG_ALLCTR_SS());
+ mseg_clear_cache(ERTS_MSEG_ALLCTR_IX(0));
+}
+
+
+
static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
const ErtsMsegOpt_t *opt)
{
@@ -660,116 +853,40 @@ static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
static void *
mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p,
- const ErtsMsegOpt_t *opt)
+ Uint flags, const ErtsMsegOpt_t *opt)
{
- Uint max, min, diff_size, size;
- cache_desc_t *cd, *cand_cd;
+ Uint size;
void *seg;
MemKind* mk = memkind(ma, opt);
INC_CC(ma, alloc);
- size = PAGE_CEILING(*size_p);
+ /* Carrier align */
+ size = ALIGNED_CEILING(*size_p);
+
+ /* Cache optim (if applicable) */
+ if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(size))
+ size = ceil_2pow(size);
#if CAN_PARTLY_DESTROY
if (size < ma->min_seg_size)
ma->min_seg_size = size;
#endif
+
+ if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size)) != NULL)
+ goto done;
- if (!opt->cache) {
- create_seg:
- adjust_cache_size(mk,0);
- seg = mseg_create(ma, mk, size);
- if (!seg) {
- mseg_clear_cache(mk);
- seg = mseg_create(ma, mk, size);
- if (!seg)
- size = 0;
- }
-
- *size_p = size;
- if (seg) {
- if (erts_mtrace_enabled)
- erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
- ERTS_MSEG_ALLOC_STAT(mk,size);
- }
- return seg;
- }
-
- if (size > mk->max_cached_seg_size)
- goto create_seg;
-
- if (size < mk->min_cached_seg_size) {
-
- diff_size = mk->min_cached_seg_size - size;
-
- if (diff_size > ma->abs_max_cache_bad_fit)
- goto create_seg;
-
- if (100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size))
- goto create_seg;
-
- }
-
- max = 0;
- min = ~((Uint) 0);
- cand_cd = NULL;
-
- for (cd = mk->cache; cd; cd = cd->next) {
- if (cd->size >= size) {
- if (!cand_cd) {
- cand_cd = cd;
- continue;
- }
- else if (cd->size < cand_cd->size) {
- if (max < cand_cd->size)
- max = cand_cd->size;
- if (min > cand_cd->size)
- min = cand_cd->size;
- cand_cd = cd;
- continue;
- }
- }
- if (max < cd->size)
- max = cd->size;
- if (min > cd->size)
- min = cd->size;
- }
-
- mk->min_cached_seg_size = min;
- mk->max_cached_seg_size = max;
-
- if (!cand_cd)
- goto create_seg;
-
- diff_size = cand_cd->size - size;
-
- if (diff_size > ma->abs_max_cache_bad_fit
- || 100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) {
- if (mk->max_cached_seg_size < cand_cd->size)
- mk->max_cached_seg_size = cand_cd->size;
- if (mk->min_cached_seg_size > cand_cd->size)
- mk->min_cached_seg_size = cand_cd->size;
- goto create_seg;
- }
-
- mk->cache_hits++;
-
- size = cand_cd->size;
- seg = cand_cd->seg;
-
- unlink_cd(mk,cand_cd);
- free_cd(mk,cand_cd);
+ if ((seg = mseg_create(ma, mk, size)) == NULL)
+ size = 0;
+done:
*size_p = size;
+ if (seg) {
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size);
- if (erts_mtrace_enabled) {
- erts_mtrace_crr_free(SEGTYPE, SEGTYPE, seg);
- erts_mtrace_crr_alloc(seg, atype, SEGTYPE, size);
- }
-
- if (seg)
ERTS_MSEG_ALLOC_STAT(mk,size);
+ }
return seg;
}
@@ -780,73 +897,42 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size,
const ErtsMsegOpt_t *opt)
{
MemKind* mk = memkind(ma, opt);
- cache_desc_t *cd;
+
ERTS_MSEG_DEALLOC_STAT(mk,size);
- if (!opt->cache || ma->max_cache_size == 0) {
- if (erts_mtrace_enabled)
- erts_mtrace_crr_free(atype, SEGTYPE, seg);
- mseg_destroy(ma, mk, seg, size);
+ if (opt->cache && cache_bless_segment(mk, seg, size)) {
+ schedule_cache_check(ma);
+ goto done;
}
- else {
- int check_limits = 0;
-
- if (size < mk->min_cached_seg_size)
- mk->min_cached_seg_size = size;
- if (size > mk->max_cached_seg_size)
- mk->max_cached_seg_size = size;
-
- if (!mk->free_cache_descs) {
- cd = mk->cache_end;
- if (!(mk->min_cached_seg_size < cd->size
- && cd->size < mk->max_cached_seg_size)) {
- check_limits = 1;
- }
- if (erts_mtrace_enabled)
- erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
- mseg_destroy(ma, mk, cd->seg, cd->size);
- unlink_cd(mk,cd);
- free_cd(mk,cd);
- }
- cd = alloc_cd(mk);
- ASSERT(cd);
- cd->seg = seg;
- cd->size = size;
- link_cd(mk,cd);
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_free(atype, SEGTYPE, seg);
- if (erts_mtrace_enabled) {
- erts_mtrace_crr_free(atype, SEGTYPE, seg);
- erts_mtrace_crr_alloc(seg, SEGTYPE, SEGTYPE, size);
- }
-
- /* ASSERT(segments.current.watermark >= segments.current.no + cache_size); */
-
- if (check_limits)
- check_cache_limits(mk);
+ mseg_destroy(ma, mk, seg, size);
- schedule_cache_check(ma);
-
- }
+done:
INC_CC(ma, dealloc);
}
static void *
mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
- Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt)
+ Uint old_size, Uint *new_size_p, Uint flags, const ErtsMsegOpt_t *opt)
{
MemKind* mk;
void *new_seg;
Uint new_size;
+ /* Just allocate a new segment if we didn't have one before */
if (!seg || !old_size) {
- new_seg = mseg_alloc(ma, atype, new_size_p, opt);
+ new_seg = mseg_alloc(ma, atype, new_size_p, flags, opt);
DEC_CC(ma, alloc);
return new_seg;
}
+
+ /* Dealloc old segment if new segment is of size 0 */
if (!(*new_size_p)) {
mseg_dealloc(ma, atype, seg, old_size, opt);
DEC_CC(ma, dealloc);
@@ -855,7 +941,13 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
mk = memkind(ma, opt);
new_seg = seg;
- new_size = PAGE_CEILING(*new_size_p);
+
+ /* Carrier align */
+ new_size = ALIGNED_CEILING(*new_size_p);
+
+ /* Cache optim (if applicable) */
+ if (MSEG_FLG_IS_2POW(flags) && !IS_2POW(new_size))
+ new_size = ceil_2pow(new_size);
if (new_size == old_size)
;
@@ -866,53 +958,27 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
if (new_size < ma->min_seg_size)
ma->min_seg_size = new_size;
#endif
-
+ /* +M<S>rsbcst <ratio> */
if (shrink_sz < opt->abs_shrink_th
- && 100*PAGES(shrink_sz) < opt->rel_shrink_th*PAGES(old_size)) {
+ && 100*shrink_sz < opt->rel_shrink_th*old_size) {
new_size = old_size;
}
else {
#if CAN_PARTLY_DESTROY
- if (shrink_sz > ma->min_seg_size
- && mk->free_cache_descs
- && opt->cache) {
- cache_desc_t *cd;
-
- cd = alloc_cd(mk);
- ASSERT(cd);
- cd->seg = ((char *) seg) + new_size;
- cd->size = shrink_sz;
- end_link_cd(mk,cd);
-
- if (erts_mtrace_enabled) {
- erts_mtrace_crr_realloc(new_seg,
- atype,
- SEGTYPE,
- seg,
- new_size);
- erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size);
- }
- schedule_cache_check(ma);
- }
- else {
- if (erts_mtrace_enabled)
- erts_mtrace_crr_realloc(new_seg,
- atype,
- SEGTYPE,
- seg,
- new_size);
- mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz);
- }
+ if (erts_mtrace_enabled)
+ erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
-#elif HAVE_MSEG_RECREATE
+ mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz);
+#elif HAVE_MSEG_RECREATE
goto do_recreate;
-
#else
+ new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
+
+ ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
- new_seg = mseg_alloc(ma, atype, &new_size, opt);
if (!new_seg)
new_size = old_size;
else {
@@ -921,16 +987,15 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
MIN(new_size, old_size));
mseg_dealloc(ma, atype, seg, old_size, opt);
}
-
#endif
-
}
}
else {
if (!opt->preserv) {
mseg_dealloc(ma, atype, seg, old_size, opt);
- new_seg = mseg_alloc(ma, atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
+ ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
}
else {
#if HAVE_MSEG_RECREATE
@@ -938,18 +1003,23 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
do_recreate:
#endif
new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size);
+ /* ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
+ * will not always be aligned and it ok for now
+ */
+
if (erts_mtrace_enabled)
erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size);
if (!new_seg)
new_size = old_size;
#else
- new_seg = mseg_alloc(ma, atype, &new_size, opt);
+ new_seg = mseg_alloc(ma, atype, &new_size, flags, opt);
+
+ ASSERT(MAP_IS_ALIGNED(new_seg) || !new_seg);
+
if (!new_seg)
new_size = old_size;
else {
- sys_memcpy(((char *) new_seg),
- ((char *) seg),
- MIN(new_size, old_size));
+ sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size));
mseg_dealloc(ma, atype, seg, old_size, opt);
}
#endif
@@ -958,6 +1028,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg,
INC_CC(ma, realloc);
+ ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size));
*new_size_p = new_size;
ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size);
@@ -990,6 +1061,7 @@ static struct {
Eterm mseg_dealloc;
Eterm mseg_realloc;
Eterm mseg_create;
+ Eterm mseg_create_resize;
Eterm mseg_destroy;
#if HAVE_MSEG_RECREATE
Eterm mseg_recreate;
@@ -1046,6 +1118,7 @@ init_atoms(ErtsMsegAllctr_t *ma)
AM_INIT(mseg_dealloc);
AM_INIT(mseg_realloc);
AM_INIT(mseg_create);
+ AM_INIT(mseg_create_resize);
AM_INIT(mseg_destroy);
#if HAVE_MSEG_RECREATE
AM_INIT(mseg_recreate);
@@ -1065,14 +1138,12 @@ init_atoms(ErtsMsegAllctr_t *ma)
erts_mtx_unlock(&init_atoms_mutex);
}
-
#define bld_uint erts_bld_uint
#define bld_cons erts_bld_cons
#define bld_tuple erts_bld_tuple
#define bld_string erts_bld_string
#define bld_2tup_list erts_bld_2tup_list
-
/*
* bld_unstable_uint() (instead of bld_uint()) is used when values may
* change between size check and actual build. This because a value
@@ -1116,6 +1187,7 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp,
*lp = bld_cons(hpp, szp, bld_tuple(hpp, szp, 4, el1, el2, el3, el4), *lp);
}
+
static Eterm
info_options(ErtsMsegAllctr_t *ma,
char *prefix,
@@ -1176,6 +1248,7 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp
PRINT_CC(to, arg, dealloc);
PRINT_CC(to, arg, realloc);
PRINT_CC(to, arg, create);
+ PRINT_CC(to, arg, create_resize);
PRINT_CC(to, arg, destroy);
#if HAVE_MSEG_RECREATE
PRINT_CC(to, arg, recreate);
@@ -1215,6 +1288,10 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp
bld_unstable_uint(hpp, szp, ma->calls.create.giga_no),
bld_unstable_uint(hpp, szp, ma->calls.create.no));
+ add_3tup(hpp, szp, &res,
+ am.mseg_create_resize,
+ bld_unstable_uint(hpp, szp, ma->calls.create_resize.giga_no),
+ bld_unstable_uint(hpp, szp, ma->calls.create_resize.no));
add_3tup(hpp, szp, &res,
am.mseg_realloc,
@@ -1401,21 +1478,21 @@ erts_mseg_info(int ix,
}
void *
-erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
+erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, Uint flags, const ErtsMsegOpt_t *opt)
{
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *seg;
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- seg = mseg_alloc(ma, atype, size_p, opt);
+ seg = mseg_alloc(ma, atype, size_p, flags, opt);
ERTS_MSEG_UNLOCK(ma);
return seg;
}
void *
-erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p)
+erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p, Uint flags)
{
- return erts_mseg_alloc_opt(atype, size_p, &erts_mseg_default_opt);
+ return erts_mseg_alloc_opt(atype, size_p, flags, &erts_mseg_default_opt);
}
void
@@ -1438,44 +1515,24 @@ erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
void *
erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg,
Uint old_size, Uint *new_size_p,
+ Uint flags,
const ErtsMsegOpt_t *opt)
{
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
void *new_seg;
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt);
+ new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, flags, opt);
ERTS_MSEG_UNLOCK(ma);
return new_seg;
}
void *
erts_mseg_realloc(ErtsAlcType_t atype, void *seg,
- Uint old_size, Uint *new_size_p)
+ Uint old_size, Uint *new_size_p, Uint flags)
{
return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p,
- &erts_mseg_default_opt);
-}
-
-void
-erts_mseg_clear_cache(void)
-{
- ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
- MemKind* mk;
-
-start:
-
- ERTS_MSEG_LOCK(ma);
- ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- for (mk=ma->mk_list; mk; mk=mk->next) {
- mseg_clear_cache(mk);
- }
- ERTS_MSEG_UNLOCK(ma);
-
- if (ma->ix != 0) {
- ma = ERTS_MSEG_ALLCTR_IX(0);
- goto start;
- }
+ flags, &erts_mseg_default_opt);
}
Uint
@@ -1496,28 +1553,32 @@ erts_mseg_no(const ErtsMsegOpt_t *opt)
Uint
erts_mseg_unit_size(void)
{
- return page_size;
+ return MSEG_ALIGNED_SIZE;
}
static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name)
{
- unsigned i;
+ int i;
- mk->cache = NULL;
- mk->cache_end = NULL;
- mk->max_cached_seg_size = 0;
- mk->min_cached_seg_size = ~((Uint) 0);
- mk->cache_size = 0;
- mk->cache_hits = 0;
+ for (i = 0; i < CACHE_AREAS; i++) {
+ mk->cache_area[i] = NULL;
+ }
+
+ mk->cache_free = NULL;
- if (ma->max_cache_size > 0) {
- for (i = 0; i < ma->max_cache_size - 1; i++)
- mk->cache_descs[i].next = &mk->cache_descs[i + 1];
- mk->cache_descs[ma->max_cache_size - 1].next = NULL;
- mk->free_cache_descs = &mk->cache_descs[0];
+ ASSERT(ma->max_cache_size <= MAX_CACHE_SIZE);
+
+ for (i = 0; i < ma->max_cache_size; i++) {
+ mk->cache[i].seg = NULL;
+ mk->cache[i].size = 0;
+ mk->cache[i].next = mk->cache_free;
+ mk->cache_free = &(mk->cache[i]);
}
- else
- mk->free_cache_descs = NULL;
+
+ mk->cache_unpowered = NULL;
+
+ mk->cache_size = 0;
+ mk->cache_hits = 0;
mk->segments.current.watermark = 0;
mk->segments.current.no = 0;
@@ -1570,15 +1631,10 @@ erts_mseg_init(ErtsMsegInit_t *init)
initialize_pmmap();
#endif
- page_size = GET_PAGE_SIZE;
+ if (!IS_2POW(GET_PAGE_SIZE))
+ erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE);
- page_shift = 1;
- while ((page_size >> page_shift) != 1) {
- if ((page_size & (1 << (page_shift - 1))) != 0)
- erl_exit(ERTS_ABORT_EXIT,
- "erts_mseg: Unexpected page_size %beu\n", page_size);
- page_shift++;
- }
+ ASSERT((MSEG_ALIGNED_SIZE % GET_PAGE_SIZE) == 0);
for (i = 0; i < no_mseg_allocators; i++) {
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i);
@@ -1663,7 +1719,7 @@ erts_mseg_test(unsigned long op,
case 0x400: /* Have erts_mseg */
return (unsigned long) 1;
case 0x401:
- return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1);
+ return (unsigned long) erts_mseg_alloc(ERTS_ALC_A_INVALID, (Uint *) a1, (Uint) 0);
case 0x402:
erts_mseg_dealloc(ERTS_ALC_A_INVALID, (void *) a1, (Uint) a2);
return (unsigned long) 0;
@@ -1671,7 +1727,8 @@ erts_mseg_test(unsigned long op,
return (unsigned long) erts_mseg_realloc(ERTS_ALC_A_INVALID,
(void *) a1,
(Uint) a2,
- (Uint *) a3);
+ (Uint *) a3,
+ (Uint) 0);
case 0x404:
erts_mseg_clear_cache();
return (unsigned long) 0;
@@ -1707,7 +1764,40 @@ erts_mseg_test(unsigned long op,
* mapping tricks.
*/
-/*#define HARDDEBUG 1*/
+/* #define HARDDEBUG 1 */
+
+#ifdef HARDDEBUG
+static void dump_freelist(void)
+{
+ FreeBlock *p = first;
+
+ while (p) {
+ fprintf(stderr, "p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n",
+ (void *) p, (unsigned long) p->num, (void *) p->next);
+ p = p->next;
+ }
+}
+
+#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) \
+ fprintf(stderr,"Mapping of address %p with size %ld " \
+ "does not map complete pages (%s:%d)\r\n", \
+ (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
+
+#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) \
+ fprintf(stderr,"Mapping of address %p with size %ld " \
+ "is not page aligned (%s:%d)\r\n", \
+ (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
+
+#define HARDDEBUG_MAP_FAILED(PTR, SZ) \
+ fprintf(stderr, "Could not actually map memory " \
+ "at address %p with size %ld (%s:%d) ..\r\n", \
+ (void *) (PTR), (unsigned long) (SZ),__FILE__, __LINE__)
+#else
+#define HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(PTR, SZ) do{}while(0)
+#define HARDDEBUG_HW_UNALIGNED_ALIGNMENT(PTR, SZ) do{}while(0)
+#define HARDDEBUG_MAP_FAILED(PTR, SZ) do{}while(0)
+#endif
+
#ifdef __APPLE__
#define MAP_ANONYMOUS MAP_ANON
@@ -1726,49 +1816,20 @@ typedef struct _free_block {
struct _free_block *next;
} FreeBlock;
-/* Assigned once and for all */
-static size_t pagsz;
-
/* Protect with lock */
static FreeBlock *first;
-static size_t round_up_to_pagesize(size_t size)
-{
- size_t x = size / pagsz;
-
- if ((size % pagsz)) {
- ++x;
- }
-
- return pagsz * x;
-}
-
-static size_t round_down_to_pagesize(size_t size)
-{
- size_t x = size / pagsz;
-
- return pagsz * x;
-}
-
static void *do_map(void *ptr, size_t sz)
{
void *res;
- if (round_up_to_pagesize(sz) != sz) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld "
- "does not map complete pages\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ if (ALIGNED_CEILING(sz) != sz) {
+ HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz);
return NULL;
}
- if (((unsigned long) ptr) % pagsz) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld "
- "is not page aligned\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) {
+ HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz);
return NULL;
}
@@ -1782,10 +1843,7 @@ static void *do_map(void *ptr, size_t sz)
#endif
if (res == MAP_FAILED) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ HARDDEBUG_MAP_FAILED(ptr, sz);
return NULL;
}
@@ -1796,35 +1854,22 @@ static int do_unmap(void *ptr, size_t sz)
{
void *res;
- if (round_up_to_pagesize(sz) != sz) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld "
- "does not map complete pages\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ if (ALIGNED_CEILING(sz) != sz) {
+ HARDDEBUG_HW_INCOMPLETE_ALIGNMENT(ptr, sz);
return 1;
}
- if (((unsigned long) ptr) % pagsz) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld "
- "is not page aligned\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ if (((unsigned long) ptr) % MSEG_ALIGNED_SIZE) {
+ HARDDEBUG_HW_UNALIGNED_ALIGNMENT(ptr, sz);
return 1;
}
-
res = mmap(ptr, sz,
- PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE
- | MAP_FIXED,
+ PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-1 , 0);
if (res == MAP_FAILED) {
-#ifdef HARDDEBUG
- fprintf(stderr,"Mapping of address %p with size %ld failed!\r\n",
- (void *) ptr, (unsigned long) sz);
-#endif
+ HARDDEBUG_MAP_FAILED(ptr, sz);
return 1;
}
@@ -1862,8 +1907,6 @@ static int initialize_pmmap(void)
size_t rsz;
FreeBlock *initial;
-
- pagsz = getpagesize();
SET_RANGE_MIN();
if (sizeof(void *) != 8) {
erl_exit(1,"Halfword emulator cannot be run in 32bit mode");
@@ -1872,15 +1915,15 @@ static int initialize_pmmap(void)
p = (char *) RANGE_MIN;
q = (char *) RANGE_MAX;
- rsz = round_down_to_pagesize(q - p);
+ rsz = ALIGNED_FLOOR(q - p);
- rptr = mmap((void *) p, rsz,
+ rptr = mmap_align(NULL, (void *) p, rsz,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS |
MAP_NORESERVE | EXTRA_MAP_FLAGS,
-1 , 0);
#ifdef HARDDEBUG
printf("p=%p, rsz = %ld, pages = %ld, got range = %p -> %p\r\n",
- p, (unsigned long) rsz, (unsigned long) (rsz / pagsz),
+ p, (unsigned long) rsz, (unsigned long) (rsz / MSEG_ALIGNED_SIZE),
(void *) rptr, (void*)(rptr + rsz));
#endif
if ((UWord)(rptr + rsz) > RANGE_MAX) {
@@ -1892,39 +1935,27 @@ static int initialize_pmmap(void)
munmap((void*)RANGE_MAX, rsz - rsz_trunc);
rsz = rsz_trunc;
}
- if (!do_map(rptr,pagsz)) {
+ if (!do_map(rptr, MSEG_ALIGNED_SIZE)) {
erl_exit(1,"Could not actually mmap first page for halfword emulator...\n");
}
initial = (FreeBlock *) rptr;
- initial->num = (rsz / pagsz);
+ initial->num = (rsz / MSEG_ALIGNED_SIZE);
initial->next = NULL;
first = initial;
INIT_LOCK();
return 0;
}
-#ifdef HARDDEBUG
-static void dump_freelist(void)
-{
- FreeBlock *p = first;
-
- while (p) {
- printf("p = %p\r\np->num = %ld\r\np->next = %p\r\n\r\n",
- (void *) p, (unsigned long) p->num, (void *) p->next);
- p = p->next;
- }
-}
-#endif
-
-
static void *pmmap(size_t size)
{
- size_t real_size = round_up_to_pagesize(size);
- size_t num_pages = real_size / pagsz;
+ size_t real_size = ALIGNED_CEILING(size);
+ size_t num_pages = real_size / MSEG_ALIGNED_SIZE;
FreeBlock **block;
FreeBlock *tail;
FreeBlock *res;
+
TAKE_LOCK();
+
for (block = &first;
*block != NULL && (*block)->num < num_pages;
block = &((*block)->next))
@@ -1935,29 +1966,25 @@ static void *pmmap(size_t size)
}
if ((*block)->num == num_pages) {
/* nice, perfect fit */
- res = *block;
+ res = *block;
*block = (*block)->next;
} else {
tail = (FreeBlock *) (((char *) ((void *) (*block))) + real_size);
- if (!do_map(tail,pagsz)) {
-#ifdef HARDDEBUG
- fprintf(stderr, "Could not actually allocate page at %p...\r\n",
- (void *) tail);
-#endif
+ if (!do_map(tail, MSEG_ALIGNED_SIZE)) {
+ HARDDEBUG_MAP_FAILED(tail, MSEG_ALIGNED_SIZE);
RELEASE_LOCK();
return NULL;
}
- tail->num = (*block)->num - num_pages;
+ tail->num = (*block)->num - num_pages;
tail->next = (*block)->next;
res = *block;
*block = tail;
}
+
RELEASE_LOCK();
- if (!do_map(res,real_size)) {
-#ifdef HARDDEBUG
- fprintf(stderr, "Could not actually allocate %ld at %p...\r\n",
- (unsigned long) real_size, (void *) res);
-#endif
+
+ if (!do_map(res, real_size)) {
+ HARDDEBUG_MAP_FAILED(res, real_size);
return NULL;
}
@@ -1966,15 +1993,17 @@ static void *pmmap(size_t size)
static int pmunmap(void *p, size_t size)
{
- size_t real_size = round_up_to_pagesize(size);
- size_t num_pages = real_size / pagsz;
+ size_t real_size = ALIGNED_CEILING(size);
+ size_t num_pages = real_size / MSEG_ALIGNED_SIZE;
+
FreeBlock *block;
FreeBlock *last;
FreeBlock *nb = (FreeBlock *) p;
ASSERT(((unsigned long)p & CHECK_POINTER_MASK)==0);
- if (real_size > pagsz) {
- if (do_unmap(((char *) p) + pagsz,real_size - pagsz)) {
+
+ if (real_size > MSEG_ALIGNED_SIZE) {
+ if (do_unmap(((char *) p) + MSEG_ALIGNED_SIZE, real_size - MSEG_ALIGNED_SIZE)) {
return 1;
}
}
@@ -1993,7 +2022,7 @@ static int pmunmap(void *p, size_t size)
/* Merge new free block with following */
nb->num = block->num + num_pages;
nb->next = block->next;
- if (do_unmap(block,pagsz)) {
+ if (do_unmap(block, MSEG_ALIGNED_SIZE)) {
RELEASE_LOCK();
return 1;
}
@@ -2003,11 +2032,11 @@ static int pmunmap(void *p, size_t size)
nb->next = block;
}
if (last != NULL) {
- if (p == ((void *) (((char *) last) + (last->num * pagsz)))) {
+ if (p == ((void *) (((char *) last) + (last->num * MSEG_ALIGNED_SIZE)))) {
/* Merge with previous */
last->num += nb->num;
last->next = nb->next;
- if (do_unmap(nb,pagsz)) {
+ if (do_unmap(nb, MSEG_ALIGNED_SIZE)) {
RELEASE_LOCK();
return 1;
}
@@ -2024,10 +2053,10 @@ static int pmunmap(void *p, size_t size)
static void *pmremap(void *old_address, size_t old_size,
size_t new_size)
{
- size_t new_real_size = round_up_to_pagesize(new_size);
- size_t new_num_pages = new_real_size / pagsz;
- size_t old_real_size = round_up_to_pagesize(old_size);
- size_t old_num_pages = old_real_size / pagsz;
+ size_t new_real_size = ALIGNED_CEILING(new_size);
+ size_t new_num_pages = new_real_size / MSEG_ALIGNED_SIZE;
+ size_t old_real_size = ALIGNED_CEILING(old_size);
+ size_t old_num_pages = old_real_size / MSEG_ALIGNED_SIZE;
if (new_num_pages == old_num_pages) {
return old_address;
} else if (new_num_pages < old_num_pages) { /* Shrink */
@@ -2045,8 +2074,8 @@ static void *pmremap(void *old_address, size_t old_size,
(*block) > ((FreeBlock *)(((char *) vnfb) + nfb_real_size))) {
/* Normal link in */
if (nfb_pages > 1) {
- if (do_unmap((void *)(((char *) vnfb) + pagsz),
- (nfb_pages - 1)*pagsz)) {
+ if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE),
+ (nfb_pages - 1)*MSEG_ALIGNED_SIZE)) {
return NULL;
}
}
@@ -2058,8 +2087,8 @@ static void *pmremap(void *old_address, size_t old_size,
nfb->num = nfb_pages + (*block)->num;
/* unmap also the first page of the next freeblock */
(*block) = nfb;
- if (do_unmap((void *)(((char *) vnfb) + pagsz),
- nfb_pages*pagsz)) {
+ if (do_unmap((void *)(((char *) vnfb) + MSEG_ALIGNED_SIZE),
+ nfb_pages*MSEG_ALIGNED_SIZE)) {
return NULL;
}
}
@@ -2094,9 +2123,9 @@ static void *pmremap(void *old_address, size_t old_size,
size_t remaining_pages = (*block)->num -
(new_num_pages - old_num_pages);
if (!remaining_pages) {
- void *p = (void *) (((char *) (*block)) + pagsz);
+ void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE);
void *n = (*block)->next;
- size_t x = ((*block)->num - 1) * pagsz;
+ size_t x = ((*block)->num - 1) * MSEG_ALIGNED_SIZE;
if (x > 0) {
if (do_map(p,x) == NULL) {
RELEASE_LOCK();
@@ -2108,7 +2137,7 @@ static void *pmremap(void *old_address, size_t old_size,
FreeBlock *nfb = (FreeBlock *) ((void *)
(((char *) old_address) +
new_real_size));
- void *p = (void *) (((char *) (*block)) + pagsz);
+ void *p = (void *) (((char *) (*block)) + MSEG_ALIGNED_SIZE);
if (do_map(p,new_real_size - old_real_size) == NULL) {
RELEASE_LOCK();
return NULL;
@@ -2122,5 +2151,4 @@ static void *pmremap(void *old_address, size_t old_size,
}
}
}
-
#endif /* HALFWORD_HEAP */
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index 741080fb78..6f373f13f9 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -32,12 +32,35 @@
#if HAVE_MMAP
# define HAVE_ERTS_MSEG 1
+# define HAVE_SUPER_ALIGNED_MB_CARRIERS 1
#else
# define HAVE_ERTS_MSEG 0
+# define HAVE_SUPER_ALIGNED_MB_CARRIERS 0
+#endif
+
+#if HAVE_SUPER_ALIGNED_MB_CARRIERS
+# define MSEG_ALIGN_BITS (18)
+ /* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
+#else
+/* If we don't use super aligned multiblock carriers
+ * we will mmap with page size alignment (and thus use corresponding
+ * align bits).
+ *
+ * Current implementation needs this to be a constant and
+ * only uses this for user dev testing so setting page size
+ * to 4096 (12 bits) is fine.
+ */
+# define MSEG_ALIGN_BITS (12)
#endif
#if HAVE_ERTS_MSEG
+#define MSEG_ALIGNED_SIZE (1 << MSEG_ALIGN_BITS)
+
+#define ERTS_MSEG_FLG_NONE ((Uint)(0))
+#define ERTS_MSEG_FLG_2POW ((Uint)(1 << 0))
+
+
#define ERTS_MSEG_VSN_STR "0.9"
typedef struct {
@@ -68,13 +91,13 @@ typedef struct {
extern const ErtsMsegOpt_t erts_mseg_default_opt;
-void *erts_mseg_alloc(ErtsAlcType_t, Uint *);
-void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, const ErtsMsegOpt_t *);
+void *erts_mseg_alloc(ErtsAlcType_t, Uint *, Uint);
+void *erts_mseg_alloc_opt(ErtsAlcType_t, Uint *, Uint, const ErtsMsegOpt_t *);
void erts_mseg_dealloc(ErtsAlcType_t, void *, Uint);
void erts_mseg_dealloc_opt(ErtsAlcType_t, void *, Uint, const ErtsMsegOpt_t *);
-void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *);
+void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *, Uint);
void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *,
- const ErtsMsegOpt_t *);
+ Uint, const ErtsMsegOpt_t *);
void erts_mseg_clear_cache(void);
void erts_mseg_cache_check(void);
Uint erts_mseg_no( const ErtsMsegOpt_t *);
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 50a888323a..a523d67158 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -55,17 +55,12 @@
# ifdef SYS_SELECT_H
# include <sys/select.h>
# endif
-# ifdef VXWORKS
-# include <selectLib.h>
-# endif
#endif
-#ifndef VXWORKS
-# ifdef NO_SYSCONF
-# if ERTS_POLL_USE_SELECT
-# include <sys/param.h>
-# else
-# include <limits.h>
-# endif
+#ifdef NO_SYSCONF
+# if ERTS_POLL_USE_SELECT
+# include <sys/param.h>
+# else
+# include <limits.h>
# endif
#endif
#include "erl_thr_progress.h"
@@ -105,8 +100,8 @@
#define ERTS_POLL_COALESCE_KP_RES (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL)
-#define FDS_STATUS_EXTRA_FREE_SIZE 128
-#define POLL_FDS_EXTRA_FREE_SIZE 128
+#define ERTS_EV_TABLE_MIN_LENGTH 1024
+#define ERTS_EV_TABLE_EXP_THRESHOLD (2048*1024)
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
# define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 1
@@ -563,6 +558,28 @@ free_update_requests_block(ErtsPollSet ps,
* --- Growing poll set structures -------------------------------------------
*/
+int
+ERTS_POLL_EXPORT(erts_poll_get_table_len) (int new_len)
+{
+ if (new_len < ERTS_EV_TABLE_MIN_LENGTH) {
+ new_len = ERTS_EV_TABLE_MIN_LENGTH;
+ } else if (new_len < ERTS_EV_TABLE_EXP_THRESHOLD) {
+ /* find next power of 2 */
+ --new_len;
+ new_len |= new_len >> 1;
+ new_len |= new_len >> 2;
+ new_len |= new_len >> 4;
+ new_len |= new_len >> 8;
+ new_len |= new_len >> 16;
+ ++new_len;
+ } else {
+ /* grow incrementally */
+ new_len += ERTS_EV_TABLE_EXP_THRESHOLD;
+ }
+ return new_len;
+}
+
+
#if ERTS_POLL_USE_KERNEL_POLL
static void
grow_res_events(ErtsPollSet ps, int new_len)
@@ -575,7 +592,7 @@ grow_res_events(ErtsPollSet ps, int new_len)
#elif ERTS_POLL_USE_KQUEUE
struct kevent
#endif
- )*new_len;
+ ) * ERTS_POLL_EXPORT(erts_poll_get_table_len)(new_len);
/* We do not need to save previously stored data */
if (ps->res_events)
erts_free(ERTS_ALC_T_POLL_RES_EVS, ps->res_events);
@@ -589,7 +606,7 @@ static void
grow_poll_fds(ErtsPollSet ps, int min_ix)
{
int i;
- int new_len = min_ix + 1 + POLL_FDS_EXTRA_FREE_SIZE;
+ int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_ix + 1);
if (new_len > max_fds)
new_len = max_fds;
ps->poll_fds = (ps->poll_fds_len
@@ -611,7 +628,7 @@ static void
grow_fds_status(ErtsPollSet ps, int min_fd)
{
int i;
- int new_len = min_fd + 1 + FDS_STATUS_EXTRA_FREE_SIZE;
+ int new_len = ERTS_POLL_EXPORT(erts_poll_get_table_len)(min_fd + 1);
ASSERT(min_fd < max_fds);
if (new_len > max_fds)
new_len = max_fds;
@@ -2200,10 +2217,6 @@ ERTS_POLL_EXPORT(erts_poll_max_fds)(void)
* --- Initialization --------------------------------------------------------
*/
-#ifdef VXWORKS
-extern int erts_vxworks_max_files;
-#endif
-
void
ERTS_POLL_EXPORT(erts_poll_init)(void)
{
@@ -2212,9 +2225,7 @@ ERTS_POLL_EXPORT(erts_poll_init)(void)
errno = 0;
-#if defined(VXWORKS)
- max_fds = erts_vxworks_max_files;
-#elif !defined(NO_SYSCONF)
+#if !defined(NO_SYSCONF)
max_fds = sysconf(_SC_OPEN_MAX);
#elif ERTS_POLL_USE_SELECT
max_fds = NOFILE;
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index 8dde619105..502290e4bb 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -246,4 +246,6 @@ void ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet,
ErtsPollEvents [],
int);
+int ERTS_POLL_EXPORT(erts_poll_get_table_len)(int);
+
#endif /* #ifndef ERL_POLL_H__ */
diff --git a/erts/emulator/sys/unix/erl_unix_sys_ddll.c b/erts/emulator/sys/unix/erl_unix_sys_ddll.c
index 336d9586c4..a35aec560a 100644
--- a/erts/emulator/sys/unix/erl_unix_sys_ddll.c
+++ b/erts/emulator/sys/unix/erl_unix_sys_ddll.c
@@ -101,7 +101,7 @@ void erl_sys_ddll_init(void) {
/*
* Open a shared object
*/
-int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err)
+int erts_sys_ddll_open2(const char *full_name, void **handle, ErtsSysDdllError* err)
{
#if defined(HAVE_DLOPEN)
char* dlname;
@@ -153,7 +153,7 @@ int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
/*
* Find a symbol in the shared object
*/
-int erts_sys_ddll_sym2(void *handle, char *func_name, void **function,
+int erts_sys_ddll_sym2(void *handle, const char *func_name, void **function,
ErtsSysDdllError* err)
{
#if defined(HAVE_DLOPEN)
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index c485a4eece..0b96eded76 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -123,7 +123,8 @@ struct ErtsSysReportExit_ {
/* This data is shared by these drivers - initialized by spawn_init() */
static struct driver_data {
- int port_num, ofd, packet_bytes;
+ ErlDrvPort port_num;
+ int ofd, packet_bytes;
ErtsSysReportExit *report_exit;
int pid;
int alive;
@@ -577,7 +578,7 @@ erl_sys_init(void)
+ 1);
child_setup_prog = erts_alloc(ERTS_ALC_T_CS_PROG_PATH, csp_path_sz);
erts_smp_atomic_add_nob(&sys_misc_mem_sz, csp_path_sz);
- sprintf(child_setup_prog,
+ erts_snprintf(child_setup_prog, csp_path_sz,
"%s%c%s",
bindir,
DIR_SEPARATOR_CHAR,
@@ -686,7 +687,7 @@ static RETSIGTYPE break_handler(int sig)
}
#endif /* 0 */
-static ERTS_INLINE void
+static ERTS_INLINE int
prepare_crash_dump(int secs)
{
#define NUFBUF (3)
@@ -698,24 +699,41 @@ prepare_crash_dump(int secs)
Eterm *hp = heap;
Eterm list = NIL;
int heart_fd[2] = {-1,-1};
+ int has_heart = 0;
UseTmpHeapNoproc(NUFBUF);
if (ERTS_PREPARED_CRASH_DUMP)
- return; /* We have already been called */
+ return 0; /* We have already been called */
heart_port = erts_get_heart_port();
+
+ /* Positive secs means an alarm must be set
+ * 0 or negative means no alarm
+ *
+ * Set alarm before we try to write to a port
+ * we don't want to hang on a port write with
+ * no alarm.
+ *
+ */
+
+ if (secs >= 0) {
+ alarm((unsigned int)secs);
+ }
+
if (heart_port) {
/* hearts input fd
* We "know" drv_data is the in_fd since the port is started with read|write
*/
heart_fd[0] = (int)heart_port->drv_data;
heart_fd[1] = (int)driver_data[heart_fd[0]].ofd;
+ has_heart = 1;
list = CONS(hp, make_small(8), list); hp += 2;
/* send to heart port, CMD = 8, i.e. prepare crash dump =o */
- erts_write_to_port(ERTS_INVALID_PID, heart_port, list);
+ erts_port_output(NULL, ERTS_PORT_SIG_FLG_FORCE_IMM_CALL, heart_port,
+ heart_port->common.id, list, NULL);
}
/* Make sure we unregister at epmd (unknown fd) and get at least
@@ -752,20 +770,14 @@ prepare_crash_dump(int secs)
erts_silence_warn_unused_result(nice(nice_val));
}
- /* Positive secs means an alarm must be set
- * 0 or negative means no alarm
- */
- if (secs > 0) {
- alarm((unsigned int)secs);
- }
UnUseTmpHeapNoproc(NUFBUF);
#undef NUFBUF
+ return has_heart;
}
-void
-erts_sys_prepare_crash_dump(int secs)
+int erts_sys_prepare_crash_dump(int secs)
{
- prepare_crash_dump(secs);
+ return prepare_crash_dump(secs);
}
static ERTS_INLINE void
@@ -802,12 +814,22 @@ static RETSIGTYPE request_break(int signum)
static ERTS_INLINE void
sigusr1_exit(void)
{
- /* We do this at interrupt level, since the main reason for
- wanting to generate a crash dump in this way is that the emulator
- is hung somewhere, so it won't be able to poll any flag we set here.
- */
+ char env[21]; /* enough to hold any 64-bit integer */
+ size_t envsz;
+ int i, secs = -1;
+
+ /* We do this at interrupt level, since the main reason for
+ * wanting to generate a crash dump in this way is that the emulator
+ * is hung somewhere, so it won't be able to poll any flag we set here.
+ */
ERTS_SET_GOT_SIGUSR1;
- prepare_crash_dump((int)0);
+
+ envsz = sizeof(env);
+ if ((i = erts_sys_getenv_raw("ERL_CRASH_DUMP_SECONDS", env, &envsz)) >= 0) {
+ secs = i != 0 ? 0 : atoi(env);
+ }
+
+ prepare_crash_dump(secs);
erl_exit(1, "Received SIGUSR1\n");
}
@@ -1162,7 +1184,7 @@ static RETSIGTYPE onchld(int signum)
#endif
}
-static int set_driver_data(int port_num,
+static int set_driver_data(ErlDrvPort port_num,
int ifd,
int ofd,
int packet_bytes,
@@ -1170,6 +1192,7 @@ static int set_driver_data(int port_num,
int exit_status,
int pid)
{
+ Port *prt;
ErtsSysReportExit *report_exit;
if (!exit_status)
@@ -1178,7 +1201,7 @@ static int set_driver_data(int port_num,
report_exit = erts_alloc(ERTS_ALC_T_PRT_REP_EXIT,
sizeof(ErtsSysReportExit));
report_exit->next = report_exit_list;
- report_exit->port = erts_port[port_num].id;
+ report_exit->port = erts_drvport2id(port_num);
report_exit->pid = pid;
report_exit->ifd = read_write & DO_READ ? ifd : -1;
report_exit->ofd = read_write & DO_WRITE ? ofd : -1;
@@ -1188,7 +1211,9 @@ static int set_driver_data(int port_num,
report_exit_list = report_exit;
}
- erts_port[port_num].os_pid = pid;
+ prt = erts_drvport2port(port_num, NULL);
+ if (prt)
+ prt->os_pid = pid;
if (read_write & DO_READ) {
driver_data[ifd].packet_bytes = packet_bytes;
@@ -1261,7 +1286,7 @@ static void close_pipes(int ifd[2], int ofd[2], int read_write)
}
}
-static void init_fd_data(int fd, int prt)
+static void init_fd_data(int fd, ErlDrvPort port_num)
{
fd_data[fd].buf = NULL;
fd_data[fd].cpos = NULL;
@@ -1557,12 +1582,13 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op
}
#if !DISABLE_VFORK
}
+#define ENOUGH_BYTES (44)
else { /* Use vfork() */
char **cs_argv= erts_alloc(ERTS_ALC_T_TMP,(CS_ARGV_NO_OF_ARGS + 1)*
sizeof(char *));
- char fd_close_range[44]; /* 44 bytes are enough to */
- char dup2_op[CS_ARGV_NO_OF_DUP2_OPS][44]; /* hold any "%d:%d" string */
- /* on a 64-bit machine. */
+ char fd_close_range[ENOUGH_BYTES]; /* 44 bytes are enough to */
+ char dup2_op[CS_ARGV_NO_OF_DUP2_OPS][ENOUGH_BYTES]; /* hold any "%d:%d" string */
+ /* on a 64-bit machine. */
/* Setup argv[] for the child setup program (implemented in
erl_child_setup.c) */
@@ -1570,23 +1596,23 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op
if (opts->use_stdio) {
if (opts->read_write & DO_READ){
/* stdout for process */
- sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 1);
+ erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ifd[1], 1);
if(opts->redir_stderr)
/* stderr for process */
- sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 2);
+ erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ifd[1], 2);
}
if (opts->read_write & DO_WRITE)
/* stdin for process */
- sprintf(&dup2_op[i++][0], "%d:%d", ofd[0], 0);
+ erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ofd[0], 0);
} else { /* XXX will fail if ofd[0] == 4 (unlikely..) */
if (opts->read_write & DO_READ)
- sprintf(&dup2_op[i++][0], "%d:%d", ifd[1], 4);
+ erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ifd[1], 4);
if (opts->read_write & DO_WRITE)
- sprintf(&dup2_op[i++][0], "%d:%d", ofd[0], 3);
+ erts_snprintf(&dup2_op[i++][0], ENOUGH_BYTES, "%d:%d", ofd[0], 3);
}
for (; i < CS_ARGV_NO_OF_DUP2_OPS; i++)
strcpy(&dup2_op[i][0], "-");
- sprintf(fd_close_range, "%d:%d", opts->use_stdio ? 3 : 5, max_files-1);
+ erts_snprintf(fd_close_range, ENOUGH_BYTES, "%d:%d", opts->use_stdio ? 3 : 5, max_files-1);
cs_argv[CS_ARGV_PROGNAME_IX] = child_setup_prog;
cs_argv[CS_ARGV_WD_IX] = opts->wd ? opts->wd : ".";
@@ -1637,6 +1663,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* op
}
erts_free(ERTS_ALC_T_TMP,cs_argv);
}
+#undef ENOUGH_BYTES
#endif
erts_sched_bind_atfork_parent(unbind);
@@ -1949,7 +1976,7 @@ static void clear_fd_data(int fd)
fd_data[fd].psz = 0;
}
-static void nbio_stop_fd(int prt, int fd)
+static void nbio_stop_fd(ErlDrvPort prt, int fd)
{
driver_select(prt,fd,DO_READ|DO_WRITE,0);
clear_fd_data(fd);
@@ -1997,7 +2024,8 @@ static ErlDrvData vanilla_start(ErlDrvPort port_num, char* name,
static void stop(ErlDrvData fd)
{
- int prt, ofd;
+ ErlDrvPort prt;
+ int ofd;
prt = driver_data[(int)(long)fd].port_num;
nbio_stop_fd(prt, (int)(long)fd);
@@ -2010,7 +2038,7 @@ static void stop(ErlDrvData fd)
CHLD_STAT_LOCK;
- /* Mark as unused. Maybe resetting the 'port_num' slot is better? */
+ /* Mark as unused. */
driver_data[(int)(long)fd].pid = -1;
CHLD_STAT_UNLOCK;
@@ -2026,7 +2054,7 @@ static void stop(ErlDrvData fd)
static void outputv(ErlDrvData e, ErlIOVec* ev)
{
int fd = (int)(long)e;
- int ix = driver_data[fd].port_num;
+ ErlDrvPort ix = driver_data[fd].port_num;
int pb = driver_data[fd].packet_bytes;
int ofd = driver_data[fd].ofd;
ssize_t n;
@@ -2076,7 +2104,7 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
static void output(ErlDrvData e, char* buf, ErlDrvSizeT len)
{
int fd = (int)(long)e;
- int ix = driver_data[fd].port_num;
+ ErlDrvPort ix = driver_data[fd].port_num;
int pb = driver_data[fd].packet_bytes;
int ofd = driver_data[fd].ofd;
ssize_t n;
@@ -2127,7 +2155,7 @@ static void output(ErlDrvData e, char* buf, ErlDrvSizeT len)
return; /* 0; */
}
-static int port_inp_failure(int port_num, int ready_fd, int res)
+static int port_inp_failure(ErlDrvPort port_num, int ready_fd, int res)
/* Result: 0 (eof) or -1 (error) */
{
int err = errno;
@@ -2177,7 +2205,7 @@ static int port_inp_failure(int port_num, int ready_fd, int res)
static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
{
int fd = (int)(long)e;
- int port_num;
+ ErlDrvPort port_num;
int packet_bytes;
int res;
Uint h;
@@ -2300,7 +2328,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd)
static void ready_output(ErlDrvData e, ErlDrvEvent ready_fd)
{
int fd = (int)(long)e;
- int ix = driver_data[fd].port_num;
+ ErlDrvPort ix = driver_data[fd].port_num;
int n;
struct iovec* iv;
int vsize;
@@ -2380,10 +2408,10 @@ void erts_do_break_handling(void)
** no interpretatione of this should be done by the rest of the
** emulator. The buffer should be at least 21 bytes long.
*/
-void sys_get_pid(char *buffer){
+void sys_get_pid(char *buffer, size_t buffer_size){
pid_t p = getpid();
/* Assume the pid is scalar and can rest in an unsigned long... */
- sprintf(buffer,"%lu",(unsigned long) p);
+ erts_snprintf(buffer, buffer_size, "%lu",(unsigned long) p);
}
int
@@ -2609,19 +2637,20 @@ report_exit_status(ErtsSysReportExit *rep, int status)
Port *pp;
#ifdef ERTS_SMP
CHLD_STAT_UNLOCK;
-#endif
+ pp = erts_thr_id2port_sflgs(rep->port,
+ ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
+ CHLD_STAT_LOCK;
+#else
pp = erts_id2port_sflgs(rep->port,
NULL,
0,
ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
-#ifdef ERTS_SMP
- CHLD_STAT_LOCK;
#endif
if (pp) {
if (rep->ifd >= 0) {
driver_data[rep->ifd].alive = 0;
driver_data[rep->ifd].status = status;
- (void) driver_select((ErlDrvPort) internal_port_index(pp->id),
+ (void) driver_select((ErlDrvPort) pp,
rep->ifd,
(ERL_DRV_READ|ERL_DRV_USE),
1);
@@ -2629,12 +2658,16 @@ report_exit_status(ErtsSysReportExit *rep, int status)
if (rep->ofd >= 0) {
driver_data[rep->ofd].alive = 0;
driver_data[rep->ofd].status = status;
- (void) driver_select((ErlDrvPort) internal_port_index(pp->id),
+ (void) driver_select((ErlDrvPort) pp,
rep->ofd,
(ERL_DRV_WRITE|ERL_DRV_USE),
1);
}
+#ifdef ERTS_SMP
+ erts_thr_port_release(pp);
+#else
erts_port_release(pp);
+#endif
}
erts_free(ERTS_ALC_T_PRT_REP_EXIT, rep);
}
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index 8ec7b31ce0..3fcb4d88dc 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -745,18 +745,18 @@ void erts_sys_unblock_fpe(int unmasked)
*/
int
-sys_double_to_chars(double fp, char *buf)
+sys_double_to_chars(double fp, char *buffer, size_t buffer_size)
{
- char *s = buf;
+ char *s = buffer;
- (void) sprintf(buf, "%.20e", fp);
+ (void) erts_snprintf(buffer, buffer_size, "%.20e", fp);
/* Search upto decimal point */
if (*s == '+' || *s == '-') s++;
while (ISDIGIT(*s)) s++;
if (*s == ',') *s++ = '.'; /* Replace ',' with '.' */
/* Scan to end of string */
while (*s) s++;
- return s-buf; /* i.e strlen(buf) */
+ return s-buffer; /* i.e strlen(buffer) */
}
/* Float conversion */
diff --git a/erts/emulator/sys/vxworks/driver_int.h b/erts/emulator/sys/vxworks/driver_int.h
deleted file mode 100644
index f6bc71a799..0000000000
--- a/erts/emulator/sys/vxworks/driver_int.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/*----------------------------------------------------------------------
-** Purpose : System dependant driver declarations
-**---------------------------------------------------------------------- */
-
-#ifndef __DRIVER_INT_H__
-#define __DRIVER_INT_H__
-
-#include <ioLib.h>
-
-typedef struct iovec SysIOVec;
-
-#endif
diff --git a/erts/emulator/sys/vxworks/erl_main.c b/erts/emulator/sys/vxworks/erl_main.c
deleted file mode 100644
index c9b44a635a..0000000000
--- a/erts/emulator/sys/vxworks/erl_main.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2000-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-#include "sys.h"
-#include "erl_vm.h"
-
-#if defined(__GNUC__)
-/*
- * The generated assembler does the usual trick (relative
- * branch-and-link to next instruction) to get a copy of the
- * instruction ptr. Instead of branching to an explicit zero offset,
- * it branches to the symbol `__eabi' --- which is expected to be
- * undefined and thus zero (if it is defined as non-zero, things will
- * be interesting --- as in the Chinese curse). To shut up the VxWorks
- * linker, we define `__eabi' as zero.
- *
- * This is just a work around. It's really Wind River's GCC's code
- * generator that should be fixed.
- */
-__asm__(".equ __eabi, 0");
-#endif
-
-void
-erl_main(int argc, char **argv)
-{
- erl_start(argc, argv);
-}
diff --git a/erts/emulator/sys/vxworks/erl_vxworks_sys.h b/erts/emulator/sys/vxworks/erl_vxworks_sys.h
deleted file mode 100644
index 3d53238ea6..0000000000
--- a/erts/emulator/sys/vxworks/erl_vxworks_sys.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2011. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-#ifndef __ERL_VXWORKS_SYS_H__
-#define __ERL_VXWORKS_SYS_H__
-
-/* stdarg.h don't work without this one... */
-#include <vxWorks.h>
-
-#include <stdio.h>
-#include <math.h>
-#include <limits.h>
-#include <stdlib.h>
-#define index StringIndexFunctionThatIDontWantDeclared
-#include <string.h>
-#undef index
-
-
-
-#include <sys/times.h>
-#include <time.h>/* xxxP */
-
-#include <dirent.h>
-#include <sys/stat.h>
-
-/* xxxP from unix_sys.h begin */
-
-/*
- * Make sure that MAXPATHLEN is defined.
- */
-
-#ifndef MAXPATHLEN
-# ifdef PATH_MAX
-# define MAXPATHLEN PATH_MAX
-# else
-# define MAXPATHLEN 2048
-# endif
-#endif
-
-/* xxxP end */
-
-
-/* Unimplemented math functions */
-#define NO_ASINH
-#define NO_ACOSH
-#define NO_ATANH
-#define NO_ERF
-#define NO_ERFC
-
-/* Stuff that is useful for port programs, drivers, etc */
-#ifndef VXWORKS
-#define VXWORKS
-#endif
-
-#define DONT_USE_MAIN
-#define NO_FSYNC
-#define NO_MKDIR_MODE
-#define NO_UMASK
-#define NO_SYMBOLIC_LINKS
-#define NO_DEVICE_FILES
-#define NO_UID
-#define NO_ACCESS
-#define NO_FCNTL
-#define NO_SYSLOG
-#define NO_SYSCONF
-#define NO_PWD /* XXX Means what? */
-#define NO_DAEMON
-/* This chooses ~250 reductions instead of 500 in config.h */
-#if (CPU == CPU32)
-#define SLOW_PROCESSOR
-#endif
-
-/*
- * Even though we does not always have small memories on VxWorks
- * we certainly does not have virtual memory.
- */
-#if !defined(LARGE_MEMORY)
-#define SMALL_MEMORY
-#endif
-
-/*************** Floating point exception handling ***************/
-
-/* There are no known ways to customize the handling of invalid floating
- point operations, such as matherr() or ieee_handler(), in VxWorks 5.1. */
-
-#if (CPU == MC68040 || CPU == CPU32 || CPU == PPC860 || CPU == PPC32 || \
- CPU == PPC603 || CPU == PPC604 || CPU == SIMSPARCSOLARIS)
-
-/* VxWorks 5.1 on Motorola 68040 never generates SIGFPE, but sets the
- result of invalid floating point ops to Inf and NaN - unfortunately
- the way to test for those values is undocumented and hidden in a
- "private" include file... */
-/* Haven't found any better way, as of yet, for ppc860 xxxP*/
-
-#include <private/mathP.h>
-#define NO_FPE_SIGNALS
-#define erts_get_current_fp_exception() NULL
-#define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0)
-#define __ERTS_FP_ERROR(fpexnp, f, Action) if (isInf(f) || isNan(f)) { Action; } else {}
-#define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action)
-#define __ERTS_SAVE_FP_EXCEPTION(fpexnp)
-#define __ERTS_RESTORE_FP_EXCEPTION(fpexnp)
-
-#define ERTS_FP_CHECK_INIT(p) __ERTS_FP_CHECK_INIT(&(p)->fp_exception)
-#define ERTS_FP_ERROR(p, f, A) __ERTS_FP_ERROR(&(p)->fp_exception, f, A)
-#define ERTS_SAVE_FP_EXCEPTION(p) __ERTS_SAVE_FP_EXCEPTION(&(p)->fp_exception)
-#define ERTS_RESTORE_FP_EXCEPTION(p) __ERTS_RESTORE_FP_EXCEPTION(&(p)->fp_exception)
-#define ERTS_FP_ERROR_THOROUGH(p, f, A) __ERTS_FP_ERROR_THOROUGH(&(p)->fp_exception, f, A)
-
-#define erts_sys_block_fpe() 0
-#define erts_sys_unblock_fpe(x) do{}while(0)
-
-#if (CPU == PPC603)
-/* Need fppLib to change the Floating point registers
- (fix_registers in sys.c)*/
-
-#include <fppLib.h>
-
-#endif /* PPC603 */
-
-#else
-
-Unsupported CPU value !
-
-#endif
-
-typedef void *GETENV_STATE;
-
-#define HAVE_GETHRTIME
-
-extern int erts_clock_rate;
-
-#define SYS_CLK_TCK (erts_clock_rate)
-
-#define SYS_CLOCK_RESOLUTION 1
-
-typedef struct _vxworks_tms {
- clock_t tms_utime;
- clock_t tms_stime;
- clock_t tms_cutime;
- clock_t tms_cstime;
-} SysTimes;
-
-typedef long long SysHrTime;
-
-typedef time_t erts_time_t;
-typedef struct timeval SysTimeval;
-
-extern int sys_init_hrtime(void);
-extern SysHrTime sys_gethrtime(void);
-extern void sys_gettimeofday(SysTimeval *tvp);
-extern clock_t sys_times(SysTimes *t);
-
-#define SIZEOF_SHORT 2
-#define SIZEOF_INT 4
-#define SIZEOF_LONG 4
-#define SIZEOF_VOID_P 4
-#define SIZEOF_SIZE_T 4
-#define SIZEOF_OFF_T 4
-
-/*
- * Temporary buffer *only* used in sys code.
- */
-#define SYS_TMP_BUF_SIZE 65536
-
-/* Need to be able to interrupt erts_poll_wait() from signal handler */
-#define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-
-#endif /* __ERL_VXWORKS_SYS_H__ */
diff --git a/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c b/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c
deleted file mode 100644
index c56c633b2f..0000000000
--- a/erts/emulator/sys/vxworks/erl_vxworks_sys_ddll.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2006-2009. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-
-/*
- * Interface functions to the dynamic linker using dl* functions.
- * (As far as I know it works on SunOS 4, 5, Linux and FreeBSD. /Seb)
- */
-
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-#include <vxWorks.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdarg.h>
-#include <a_out.h>
-#include <symLib.h>
-#include <loadLib.h>
-#include <unldLib.h>
-#include <moduleLib.h>
-#include <sysSymTbl.h>
-#include "sys.h"
-#include "global.h"
-#include "erl_alloc.h"
-#include "erl_driver.h"
-
-#define EXT_LEN 4
-#define FILE_EXT ".eld"
-#define ALT_FILE_EXT ".o"
-/* ALT_FILE_EXT must not be longer than FILE_EXT */
-#define DRIVER_INIT_SUFFIX "_init"
-
-static MODULE_ID get_mid(char *);
-static FUNCPTR lookup(char *);
-
-typedef enum {
- NoError,
- ModuleNotFound,
- ModuleNotUnloadable,
- UnknownError
-} FakeSytemError;
-
-static char *errcode_tab[] = {
- "No error",
- "Module/file not found",
- "Module cannot be unloaded",
- "Unknown error"
-};
-
-void erl_sys_ddll_init(void) {
- return;
-}
-/*
- * Open a shared object
- */
-int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err)
-{
- int len;
-
- if (erts_sys_ddll_open_noext(full_name, handle, err) == ERL_DE_NO_ERROR) {
- return ERL_DE_NO_ERROR;
- }
- if ((len = sys_strlen(full_name)) > PATH_MAX-EXT_LEN) {
- return ERL_DE_LOAD_ERROR_NAME_TO_LONG;
- } else {
- static char dlname[PATH_MAX + 1];
-
- sys_strcpy(dlname, full_name);
- sys_strcpy(dlname+len, FILE_EXT);
- if (erts_sys_ddll_open_noext(dlname, handle, err) == ERL_DE_NO_ERROR) {
- return ERL_DE_NO_ERROR;
- }
- sys_strcpy(dlname+len, ALT_FILE_EXT);
- return erts_sys_ddll_open_noext(dlname, handle, err);
- }
-}
-int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
-{
- MODULE_ID mid;
-
- if((mid = get_mid(dlname)) == NULL) {
- return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotFound);
- }
- *handle = (void *) mid;
- return ERL_DE_NO_ERROR;
-}
-
-/*
- * Find a symbol in the shared object
- */
-#define PREALLOC_BUFFER_SIZE 256
-int erts_sys_ddll_sym2(void *handle, char *func_name, void **function, ErtsSysDdllError* err)
-{
- FUNCPTR proc;
- static char statbuf[PREALLOC_BUFFER_SIZE];
- char *buf = statbuf;
- int need;
-
- if ((proc = lookup(func_name)) == NULL) {
- if ((need = strlen(func_name)+2) > PREALLOC_BUFFER_SIZE) {
- buf = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF,need);
- }
- buf[0] = '_';
- sys_strcpy(buf+1,func_name);
- proc = lookup(buf);
- if (buf != statbuf) {
- erts_free(ERTS_ALC_T_DDLL_TMP_BUF, buf);
- }
- if (proc == NULL) {
- return ERL_DE_LOOKUP_ERROR_NOT_FOUND;
- }
- }
- *function = (void *) proc;
- return ERL_DE_NO_ERROR;
-}
-
-/* XXX:PaN These two will be changed with new driver interface! */
-
-/*
- * Load the driver init function, might appear under different names depending on object arch...
- */
-
-int erts_sys_ddll_load_driver_init(void *handle, void **function)
-{
- MODULE_ID mid = (MODULE_ID) handle;
- char *modname;
- char *cp;
- static char statbuf[PREALLOC_BUFFER_SIZE];
- char *fname = statbuf;
- int len;
- int res;
- void *func;
- int need;
-
- if((modname = moduleNameGet(mid)) == NULL) {
- return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotFound);
- }
-
- if((cp = strrchr(modname, '.')) == NULL) {
- len = strlen(modname);
- } else {
- len = cp - modname;
- }
-
- need = len + strlen(DRIVER_INIT_SUFFIX) + 1;
- if (need > PREALLOC_BUFFER_SIZE) {
- fname = erts_alloc(ERTS_ALC_T_DDLL_TMP_BUF, need); /* erts_alloc exits on failure */
- }
- sys_strncpy(fname, modname, len);
- fname[len] = '\0';
- sys_strcat(fname, DRIVER_INIT_SUFFIX);
- res = erts_sys_ddll_sym(handle, fname, &func);
- if (fname != statbuf) {
- erts_free(ERTS_ALC_T_DDLL_TMP_BUF, fname);
- }
- if ( res != ERL_DE_NO_ERROR) {
- return res;
- }
- *function = func;
- return ERL_DE_NO_ERROR;
-}
-
-int erts_sys_ddll_load_nif_init(void *handle, void **function, ErtsSysDdllError* err)
-{
- /* NIFs not implemented for vxworks */
- return ERL_DE_ERROR_NO_DDLL_FUNCTIONALITY;
-}
-
-/*
- * Call the driver_init function, whatever it's really called, simple on unix...
-*/
-void *erts_sys_ddll_call_init(void *function) {
- void *(*initfn)(void) = function;
- return (*initfn)();
-}
-void *erts_sys_ddll_call_nif_init(void *function) {
- return erts_sys_ddll_call_init(function);
-}
-
-
-/*
- * Close a chared object
- */
-int erts_sys_ddll_close2(void *handle, ErtsSysDdllError* err)
-{
- MODULE_ID mid = (MODULE_ID) handle;
- if (unld(mid, 0) < 0) {
- return ERL_DE_DYNAMIC_ERROR_OFFSET - ((int) ModuleNotUnloadable);
- }
- return ERL_DE_NO_ERROR;
-}
-
-/*
- * Return string that describes the (current) error
- */
-char *erts_sys_ddll_error(int code)
-{
- int actual_code;
- if (code > ERL_DE_DYNAMIC_ERROR_OFFSET) {
- return "Unspecified error";
- }
- actual_code = -1*(code - ERL_DE_DYNAMIC_ERROR_OFFSET);
- if (actual_code > ((int) UnknownError)) {
- actual_code = UnknownError;
- }
- return errcode_tab[actual_code];
-}
-
-static FUNCPTR lookup(char *sym)
-{
- FUNCPTR entry;
- SYM_TYPE type;
-
- if (symFindByNameAndType(sysSymTbl, sym, (char **)&entry,
- &type, N_EXT | N_TEXT, N_EXT | N_TEXT) != OK) {
- return NULL ;
- }
- return entry;
-}
-
-static MODULE_ID get_mid(char* name)
-{
- int fd;
- MODULE_ID mid = NULL;
-
- if((fd = open(name, O_RDONLY, 0664)) >= 0) {
- mid = loadModule(fd, GLOBAL_SYMBOLS);
- close(fd);
- }
- return mid;
-}
-
-void erts_sys_ddll_free_error(ErtsSysDdllError* err)
-{
- /* NYI */
-}
-
diff --git a/erts/emulator/sys/vxworks/sys.c b/erts/emulator/sys/vxworks/sys.c
deleted file mode 100644
index 739b026fb1..0000000000
--- a/erts/emulator/sys/vxworks/sys.c
+++ /dev/null
@@ -1,2610 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 1997-2011. All Rights Reserved.
- *
- * The contents of this file are subject to the Erlang Public License,
- * Version 1.1, (the "License"); you may not use this file except in
- * compliance with the License. You should have received a copy of the
- * Erlang Public License along with this software. If not, it can be
- * retrieved online at http://www.erlang.org/.
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and limitations
- * under the License.
- *
- * %CopyrightEnd%
- */
-/*
- * system-dependent functions
- *
- */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
-#include <vxWorks.h>
-#include <version.h>
-#include <string.h>
-#include <types.h>
-#include <sigLib.h>
-#include <ioLib.h>
-#include <iosLib.h>
-#include <envLib.h>
-#include <fioLib.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-#include <symLib.h>
-#include <sysLib.h>
-#include <sysSymTbl.h>
-#include <loadLib.h>
-#include <taskLib.h>
-#include <taskVarLib.h>
-#include <taskHookLib.h>
-#include <tickLib.h>
-#include <time.h>
-#include <rngLib.h>
-#include <semLib.h>
-#include <selectLib.h>
-#include <sockLib.h>
-#include <a_out.h>
-#include <wdLib.h>
-#include <timers.h>
-#include <ctype.h>
-#include <sys/stat.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <stdarg.h>
-
-
-#ifndef WANT_NONBLOCKING
-#define WANT_NONBLOCKING
-#endif
-
-#include "sys.h"
-#include "erl_alloc.h"
-
-/* don't need global.h, but bif_table.h (included by bif.h) won't compile otherwise */
-#include "global.h"
-#include "bif.h"
-
-#include "erl_sys_driver.h"
-
-#include "elib_stat.h"
-
-#include "reclaim_private.h" /* Some more or less private reclaim facilities */
-
-#ifndef RETSIGTYPE
-#define RETSIGTYPE void
-#endif
-
-EXTERN_FUNCTION(void, erl_start, (int, char**));
-EXTERN_FUNCTION(void, erl_exit, (int n, char*, _DOTS_));
-EXTERN_FUNCTION(void, erl_error, (char*, va_list));
-EXTERN_FUNCTION(int, driver_interrupt, (int, int));
-EXTERN_FUNCTION(void, increment_time, (int));
-EXTERN_FUNCTION(int, erts_next_time, (_VOID_));
-EXTERN_FUNCTION(void, set_reclaim_free_function, (FreeFunction));
-EXTERN_FUNCTION(int, erl_mem_info_get, (MEM_PART_STATS *));
-EXTERN_FUNCTION(void, erl_crash_dump, (char* file, int line, char* fmt, ...));
-
-#define ISREG(st) (((st).st_mode&S_IFMT) == S_IFREG)
-
-/* these are defined in usrLib.c */
-extern int spTaskPriority, spTaskOptions;
-
-/* forward declarations */
-static FUNCTION(FUNCPTR, lookup, (char*));
-static FUNCTION(int, read_fill, (int, char*, int));
-#if (CPU == SPARC)
-static FUNCTION(RETSIGTYPE, fpe_sig_handler, (int)); /*where is this fun? */
-#elif (CPU == PPC603)
-static FUNCTION(void, fix_registers, (void));
-#endif
-static FUNCTION(void, close_pipes, (int*, int*, int));
-static FUNCTION(void, delete_hook, (void));
-static FUNCTION(void, initialize_allocation, (void));
-
-FUNCTION(STATUS, uxPipeDrv, (void));
-FUNCTION(STATUS, pipe, (int*));
-FUNCTION(void, uxPipeShow, (int));
-
-void erl_main(int argc, char **argv);
-void argcall(char *args);
-
-/* Malloc-realted functions called from the VxWorks shell */
-EXTERN_FUNCTION(int, erl_set_memory_block,
- (int, int, int, int, int, int, int, int, int, int));
-EXTERN_FUNCTION(int, erl_memory_show,
- (int, int, int, int, int, int, int, int, int, int));
-
-#define DEFAULT_PORT_STACK_SIZE 100000
-static int port_stack_size;
-
-static int erlang_id = 0; /* Inited at loading, set/reset at each run */
-
-/* interval time reported to emulator */
-static int sys_itime;
-
-/* XXX - This is defined in .../config/all/configAll.h (NUM_FILES),
- and not easily accessible at compile or run time - however,
- in VxWorks 5.1 it is stored in the (undocumented?) maxFiles variable;
- probably shouldn't depend on it, but we try to pick it up... */
-static int max_files = 50; /* default configAll.h */
-
-int erts_vxworks_max_files;
-
-/*
- * used by the break handler (set by signal handler on ctl-c)
- */
-volatile int erts_break_requested;
-
-/********************* General functions ****************************/
-
-/*
- * Reset the terminal to the original settings on exit
- * (nothing to do for WxWorks).
- */
-void sys_tty_reset(int exit_code)
-{
-}
-
-Uint
-erts_sys_misc_mem_sz(void)
-{
- Uint res = erts_check_io_size();
- /* res += FIXME */
- return res;
-}
-
-/*
- * XXX This declaration should not be here.
- */
-void erl_sys_schedule_loop(void);
-
-#ifdef SOFTDEBUG
-static void do_trace(int line, char *file, char *format, ...)
-{
- va_list va;
- int tid = taskIdSelf();
- char buff[512];
-
- va_start(va, format);
- sprintf(buff,"Trace: Task: 0x%08x, %s:%d - ",
- tid, file, line);
- vsprintf(buff + strlen(buff), format, va);
- va_end(va);
- strcat(buff,"\r\n");
- write(2,buff,strlen(buff));
-}
-
-#define TRACE() do_trace(__LINE__, __FILE__,"")
-#define TRACEF(Args...) do_trace(__LINE__,__FILE__, ## Args)
-#endif
-
-void
-erts_sys_pre_init(void)
-{
- if (erlang_id != 0) {
- /* NOTE: This particular case must *not* call erl_exit() */
- erts_fprintf(stderr, "Sorry, erlang is already running (as task %d)\n",
- erlang_id);
- exit(1);
- }
-
- /* This must be done as early as possible... */
- if(!reclaim_init())
- fprintf(stderr, "Warning : reclaim facility should be initiated before "
- "erlang is started!\n");
- erts_vxworks_max_files = max_files = reclaim_max_files();
-
- /* Floating point exceptions */
-#if (CPU == SPARC)
- sys_sigset(SIGFPE, fpe_sig_handler);
-#elif (CPU == PPC603)
- fix_registers();
-#endif
-
- /* register the private delete hook in reclaim */
- save_delete_hook((FUNCPTR)delete_hook, (caddr_t)0);
- erlang_id = taskIdSelf();
-#ifdef DEBUG
- printf("emulator task id = 0x%x\n", erlang_id);
-#endif
-}
-
-void erts_sys_alloc_init(void)
-{
- initialize_allocation();
-}
-
-void
-erl_sys_init(void)
-{
- setvbuf(stdout, (char *)NULL, _IOLBF, BUFSIZ);
- /* XXX Bug in VxWorks stdio loses fputch()'ed output after the
- setvbuf() but before a *printf(), and possibly worse (malloc
- errors, crash?) - so let's give it a *printf().... */
- fprintf(stdout, "%s","");
-}
-
-void
-erl_sys_args(int* argc, char** argv)
-{
- erts_init_check_io();
- max_files = erts_check_io_max_files();
- ASSERT(max_files <= erts_vxworks_max_files);
-}
-
-void
-erts_sys_schedule_interrupt(int set)
-{
- erts_check_io_interrupt(set);
-}
-
-/*
- * Called from schedule() when it runs out of runnable processes,
- * or when Erlang code has performed INPUT_REDUCTIONS reduction
- * steps. runnable == 0 iff there are no runnable Erlang processes.
- */
-void
-erl_sys_schedule(int runnable)
-{
- erts_check_io(!runnable);
-}
-
-void erts_do_break_handling(void)
-{
- SET_BLOCKING(0);
- /* call the break handling function, reset the flag */
- do_break();
- erts_break_requested = 0;
- SET_NONBLOCKING(0);
-}
-
-/* signal handling */
-RETSIGTYPE (*sys_sigset(sig, func))()
- int sig;
- RETSIGTYPE (*func)();
-{
- struct sigaction act, oact;
-
- sigemptyset(&act.sa_mask);
- act.sa_flags = 0;
- act.sa_handler = func;
- sigaction(sig, &act, &oact);
- return(oact.sa_handler);
-}
-
-void sys_sigblock(int sig)
-{
- sigset_t mask;
-
- sigemptyset(&mask);
- sigaddset(&mask, sig);
- sigprocmask(SIG_BLOCK, &mask, (sigset_t *)NULL);
-}
-
-void sys_sigrelease(int sig)
-{
- sigset_t mask;
-
- sigemptyset(&mask);
- sigaddset(&mask, sig);
- sigprocmask(SIG_UNBLOCK, &mask, (sigset_t *)NULL);
-}
-
-void
-erts_sys_prepare_crash_dump(void)
-{
-
-}
-
-/* register signal handlers XXX - they don't work, need to find out why... */
-/* set up signal handlers for break and quit */
-static void request_break(void)
-{
- /* just set a flag - checked for and handled
- * in main thread (not signal handler).
- * see check_io()
- */
-#ifdef DEBUG
- fprintf(stderr,"break!\n");
-#endif
- erts_break_requested = 1;
- erts_check_io_async_sig_interrupt(1); /* Make sure we don't sleep in erts_poll_wait */
-}
-
-static void do_quit(void)
-{
- halt_0(0);
-}
-
-void erts_set_ignore_break(void) {
-}
-
-void init_break_handler(void)
-{
- sys_sigset(SIGINT, request_break);
- sys_sigset(SIGQUIT, do_quit);
-}
-
-void erts_replace_intr(void) {
-}
-
-int sys_max_files(void)
-{
- return(max_files);
-}
-
-/******************* Routines for time measurement *********************/
-
-int sys_init_time(void)
-{
- erts_clock_rate = sysClkRateGet();
- /*
- ** One could imagine that it would be better returning
- ** a resolution more near the clock rate, like in:
- ** return 1000 / erts_clock_rate;
- ** but tests show that such isn't the case (rounding errors?)
- ** Well, we go for the Unix variant of returning 1
- ** as a constant virtual clock rate.
- */
- return SYS_CLOCK_RESOLUTION;
-}
-
-int erts_clock_rate;
-static volatile int ticks_inuse;
-static volatile unsigned long ticks_collected; /* will wrap */
-static WDOG_ID watchdog_id;
-static ULONG user_time;
-static int this_task_id, sys_itime;
-static SysHrTime hrtime_wrap;
-static unsigned long last_tick_count;
-
-static void tolerant_time_clockint(int count)
-{
- if (watchdog_id != NULL) {
- if (taskIsReady(this_task_id))
- user_time += 1;
- ++count;
- if (!ticks_inuse) {
- ticks_collected += count;
- count = 0;
- }
- wdStart(watchdog_id, 1, (FUNCPTR)tolerant_time_clockint, count);
- }
-}
-
-int sys_init_hrtime(void)
-{
- this_task_id = taskIdSelf(); /* OK, this only works for one single task
- in the system... */
- user_time = 0;
-
- ticks_inuse = 0;
- ticks_collected = 0;
- hrtime_wrap = 0;
- last_tick_count = 0;
-
- sys_itime = 1000 / erts_clock_rate;
- watchdog_id = wdCreate();
- wdStart(watchdog_id, 1, (FUNCPTR) tolerant_time_clockint, 0);
- return 0;
-}
-
-SysHrTime sys_gethrtime(void)
-{
- SysHrTime ticks;
-
- ++ticks_inuse;
- ticks = (SysHrTime) (ticks_collected & 0x7FFFFFFF);
- ticks_inuse = 0;
- if (ticks < (SysHrTime) last_tick_count) {
- hrtime_wrap += 1UL << 31;
- }
- last_tick_count = ticks;
- return (ticks + hrtime_wrap) * ((SysHrTime) (1000000000UL /
- erts_clock_rate));
-}
-
-void sys_gettimeofday(SysTimeval *tvp)
-{
- struct timespec now;
-
- clock_gettime(CLOCK_REALTIME, &now);
- tvp->tv_sec = now.tv_sec;
- tvp->tv_usec = now.tv_nsec / 1000;
-}
-
-clock_t sys_times(SysTimes *t)
-{
- t->tms_stime = t->tms_cutime = t->tms_cstime = 0;
- ++ticks_inuse;
- t->tms_utime = user_time;
- ticks_inuse = 0;
- return tickGet(); /* The best we can do... */
-}
-
-/* This is called when *this task* is deleted */
-static void delete_hook(void)
-{
- if (watchdog_id != NULL) {
- wdDelete(watchdog_id);
- watchdog_id = NULL;
- }
- erlang_id = 0;
- this_task_id = 0;
-}
-
-/************************** OS info *******************************/
-
-/* Used by erlang:info/1. */
-/* (This code was formerly in drv.XXX/XXX_os_drv.c) */
-
-#define MAX_VER_STR 9 /* Number of characters to
- consider in version string */
-
-static FUNCTION(int, get_number, (char** str_ptr));
-
-char os_type[] = "vxworks";
-
-static int
-get_number(char **str_ptr)
-{
- char* s = *str_ptr; /* Pointer to beginning of string. */
- char* dot; /* Pointer to dot in string or NULL. */
-
- if (!isdigit(*s))
- return 0;
- if ((dot = strchr(s, '.')) == NULL) {
- *str_ptr = s+strlen(s);
- return atoi(s);
- } else {
- *dot = '\0';
- *str_ptr = dot+1;
- return atoi(s);
- }
-}
-
-/* namebuf; Where to return the name. */
-/* size; Size of name buffer. */
-void
-os_flavor(char *namebuf, unsigned size)
-{
- strcpy(namebuf, "-");
-}
-
-/* int* pMajor; Pointer to major version. */
-/* int* pMinor; Pointer to minor version. */
-/* int* pBuild; Pointer to build number. */
-void
-os_version(int *pMajor, int *pMinor, int *pBuild)
-{
- char os_ver[MAX_VER_STR+2];
- char* release; /* Pointer to the release string:
- * X.Y or X.Y.Z.
- */
- strncpy(os_ver, vxWorksVersion, MAX_VER_STR);
- release = os_ver;
- *pMajor = get_number(&release);
- *pMinor = get_number(&release);
- *pBuild = get_number(&release);
-}
-
-void init_getenv_state(GETENV_STATE *state)
-{
- *state = NULL;
-}
-
-char *getenv_string(GETENV_STATE *state0)
-{
- return NULL;
-}
-
-void fini_getenv_state(GETENV_STATE *state)
-{
- *state = NULL;
-}
-
-/************************** Port I/O *******************************/
-
-
-/* I. Common stuff */
-
-#define TMP_BUF_MAX (tmp_buf_size - 1024)
-static byte *tmp_buf;
-static Uint tmp_buf_size;
-
-/* II. The spawn/fd/vanilla drivers */
-
-/* This data is shared by these drivers - initialized by spawn_init() */
-static struct driver_data {
- int port_num, ofd, packet_bytes, report_exit;
- int exitcode, exit_reported; /* For returning of exit codes. */
-} *driver_data; /* indexed by fd */
-
-/*
- * Locking only for exitcodes and exit_reported, one global sem for all
- * spawn ports as this is rare.
- */
-static SEM_ID driver_data_sem = NULL;
-/*
- * Also locking when looking up entries in the load table
- */
-static SEM_ID entry_data_sem = NULL;
-
-/* We maintain a linked fifo queue of these structs in order */
-/* to manage unfinnished reads/and writes on differenet fd's */
-
-typedef struct pend {
- char *cpos;
- int fd;
- int remain;
- struct pend *next;
- char buf[1]; /* this is a trick to be able to malloc one chunk */
-} Pend;
-
-static struct fd_data {
- int inport, outport;
- char *buf, *cpos;
- int sz, remain; /* for input on fd */
- Pend* pending; /* pending outputs */
-
-} *fd_data; /* indexed by fd */
-
-
-/* Driver interfaces */
-static ErlDrvData spawn_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts);
-static ErlDrvData fd_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts);
-static ErlDrvData vanilla_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts);
-static int spawn_init(void);
-static void fd_stop(ErlDrvData);
-static void stop(ErlDrvData);
-static void ready_input(ErlDrvData fd, ErlDrvEvent ready_fd);
-static void ready_output(ErlDrvData fd, ErlDrvEvent ready_fd);
-static void output(ErlDrvData fd, char *buf, ErlDrvSizeT len);
-static void stop_select(ErlDrvEvent, void*);
-
-struct erl_drv_entry spawn_driver_entry = {
- spawn_init,
- spawn_start,
- stop,
- output,
- ready_input,
- ready_output,
- "spawn",
- NULL, /* finish */
- NULL, /* handle */
- NULL, /* control */
- NULL, /* timeout */
- NULL, /* outputv */
- NULL, /* ready_async */
- NULL, /* flush */
- NULL, /* call */
- NULL, /* event */
- ERL_DRV_EXTENDED_MARKER,
- ERL_DRV_EXTENDED_MAJOR_VERSION,
- ERL_DRV_EXTENDED_MINOR_VERSION,
- 0, /* ERL_DRV_FLAGs */
- NULL, /* handle2 */
- NULL, /* process_exit */
- stop_select
-
-};
-struct erl_drv_entry fd_driver_entry = {
- NULL,
- fd_start,
- fd_stop,
- output,
- ready_input,
- ready_output,
- "fd",
- NULL, /* finish */
- NULL, /* handle */
- NULL, /* control */
- NULL, /* timeout */
- NULL, /* outputv */
- NULL, /* ready_async */
- NULL, /* flush */
- NULL, /* call */
- NULL, /* event */
- ERL_DRV_EXTENDED_MARKER,
- ERL_DRV_EXTENDED_MAJOR_VERSION,
- ERL_DRV_EXTENDED_MINOR_VERSION,
- 0, /* ERL_DRV_FLAGs */
- NULL, /* handle2 */
- NULL, /* process_exit */
- stop_select
-};
-struct erl_drv_entry vanilla_driver_entry = {
- NULL,
- vanilla_start,
- stop,
- output,
- ready_input,
- ready_output,
- "vanilla",
- NULL, /* finish */
- NULL, /* handle */
- NULL, /* control */
- NULL, /* timeout */
- NULL, /* outputv */
- NULL, /* ready_async */
- NULL, /* flush */
- NULL, /* call */
- NULL, /* event */
- ERL_DRV_EXTENDED_MARKER,
- ERL_DRV_EXTENDED_MAJOR_VERSION,
- ERL_DRV_EXTENDED_MINOR_VERSION,
- 0, /* ERL_DRV_FLAGs */
- NULL, /* handle2 */
- NULL, /* process_exit */
- stop_select
-};
-
-/*
-** Set up enough of the driver_data structure to be able to report exit status.
-** Some things may be initiated again, but that is no real problem.
-*/
-static int pre_set_driver_data(int ifd, int ofd,
- int read_write, int report_exit) {
- if (read_write & DO_READ) {
- driver_data[ifd].report_exit = report_exit;
- driver_data[ifd].exitcode = 0;
- driver_data[ifd].exit_reported = 0;
- if (read_write & DO_WRITE) {
- driver_data[ifd].ofd = ofd;
- if (ifd != ofd) {
- driver_data[ofd] = driver_data[ifd];
- driver_data[ofd].report_exit = 0;
- }
- } else { /* DO_READ only */
- driver_data[ifd].ofd = -1;
- }
- return(ifd);
- } else { /* DO_WRITE only */
- driver_data[ofd].report_exit = 0;
- driver_data[ofd].exitcode = 0;
- driver_data[ofd].exit_reported = 0;
- driver_data[ofd].ofd = ofd;
- return(ofd);
- }
-}
-
-/*
-** Set up the driver_data structure, it may have been initiated
-** partly by the function above, but we dont care.
-*/
-static int set_driver_data(int port_num, int ifd, int ofd,
- int packet_bytes, int read_write,
- int report_exit)
-{
- if (read_write & DO_READ) {
- driver_data[ifd].packet_bytes = packet_bytes;
- driver_data[ifd].port_num = port_num;
- driver_data[ifd].report_exit = report_exit;
- if (read_write & DO_WRITE) {
- driver_data[ifd].ofd = ofd;
- if (ifd != ofd) {
- driver_data[ofd] = driver_data[ifd];
- driver_data[ofd].report_exit = 0;
- }
- } else { /* DO_READ only */
- driver_data[ifd].ofd = -1;
- }
- (void) driver_select(port_num, ifd, ERL_DRV_READ|ERL_DRV_USE, 1);
- return(ifd);
- } else { /* DO_WRITE only */
- driver_data[ofd].packet_bytes = packet_bytes;
- driver_data[ofd].port_num = port_num;
- driver_data[ofd].report_exit = 0;
- driver_data[ofd].ofd = ofd;
- return(ofd);
- }
-}
-
-static int need_new_sems = 1;
-
-static int spawn_init(void)
-{
- char *stackenv;
- int size;
- driver_data = (struct driver_data *)
- erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
- if (need_new_sems) {
- driver_data_sem = semMCreate
- (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE);
- entry_data_sem = semMCreate
- (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE);
- }
- if (driver_data_sem == NULL || entry_data_sem == NULL) {
- erl_exit(1,"Could not allocate driver locking semaphore.");
- }
- need_new_sems = 0;
-
- (void)uxPipeDrv(); /* Install pipe driver */
-
- if ((stackenv = getenv("ERLPORTSTACKSIZE")) != NULL &&
- (size = atoi(stackenv)) > 0)
- port_stack_size = size;
- else
- port_stack_size = DEFAULT_PORT_STACK_SIZE;
- return 0;
-}
-
-/* Argv has to be built vith the save_xxx routines, not with whathever
- sys_xxx2 has in mind... */
-#define argv_alloc save_malloc
-#define argv_realloc save_realloc
-#define argv_free save_free
-/* Build argv, return argc or -1 on failure */
-static int build_argv(char *name, char ***argvp)
-{
- int argvsize = 10, argc = 0;
- char *args, *arglast = NULL, *argp;
- char **argv;
-
-#ifdef DEBUG
- fdprintf(2, "Building argv, %s =>\n", name);
-#endif
- if ((argv = (char **)argv_alloc(argvsize * sizeof(char *))) == NULL)
- return(-1);
- if ((args = argv_alloc(strlen(name) + 1)) == NULL)
- return(-1);
- strcpy(args, name);
- argp = strtok_r(args, " \t", &arglast);
- while (argp != NULL) {
- if (argc + 1 >= argvsize) {
- argvsize += 10;
- argv = (char **)argv_realloc((char *)argv, argvsize*sizeof(char *));
- if (argv == NULL) {
- argv_free(args);
- return(-1);
- }
- }
-#ifdef DEBUG
- fdprintf(2, "%s\n", argp);
-#endif
- argv[argc++] = argp;
- argp = strtok_r((char *)NULL, " \t", &arglast);
- }
- argv[argc] = NULL;
- *argvp = argv;
- return(argc);
-}
-#undef argv_alloc
-#undef argv_realloc
-#undef argv_free
-
-
-/* Lookup and return global text symbol or NULL on failure
- Symbol name is null-terminated and without the leading '_' */
-static FUNCPTR
-lookup(char *sym)
-{
- char buf[256];
- char *symname = buf;
- int len;
- FUNCPTR entry;
- SYM_TYPE type;
-
- len = strlen(sym);
- if (len > 254 && (symname = malloc(len+2)) == NULL)
- return(NULL);
-#if defined _ARCH_PPC || defined SIMSPARCSOLARIS
- /* GCC for PPC and SIMSPARC doesn't add a leading _ to symbols */
- strcpy(symname, sym);
-#else
- sprintf(symname, "_%s", sym);
-#endif
- if (symFindByNameAndType(sysSymTbl, symname, (char **)&entry,
- &type, N_EXT | N_TEXT, N_EXT | N_TEXT) != OK)
- entry = NULL;
- if (symname != buf)
- free(symname);
- return(entry);
-}
-
-/* This function is spawned to build argc, argv, lookup the symbol to call,
- connect and set up file descriptors, and make the actual call.
- N.B. 'name' was allocated by the Erlang task (through plain_malloc) and
- is freed by this port program task.
- Note: 'name' may be a path containing '/'. */
-
-static void call_proc(char *name, int ifd, int ofd, int read_write,
- int redir_stderr, int driver_index,
- int p6, int p7, int p8, int p9)
-{
- int argc;
- char **argv, *bname;
- FUNCPTR entry;
- int ret = -1;
-
- /* Must consume 'name' */
- argc = build_argv(name, &argv);
- plain_free(name);
- /* Find basename of path */
- if ((bname = strrchr(argv[0], '/')) != NULL) {
- bname++;
- } else {
- bname = argv[0];
- }
-#ifdef DEBUG
- fdprintf(2, "Port program name: %s\n", bname);
-#endif
- semTake(entry_data_sem, WAIT_FOREVER);
-
- if (argc > 0) {
- if ((entry = lookup(bname)) == NULL) {
- int fd;
- char *fn;
- /* NOTE: We don't check the return value of loadModule,
- since that was incompatibly changed from 5.0.2b to 5.1,
- but rather do a repeated lookup(). */
- if ((fd = open(argv[0], O_RDONLY)) > 0) {
- (void) loadModule(fd, GLOBAL_SYMBOLS);
- close(fd);
- entry = lookup(bname);
- }
- if (entry == NULL) {
- /* filename == func failed, try func.o */
- if ((fn = malloc(strlen(argv[0]) + 3)) != NULL) { /* ".o\0" */
- strcpy(fn, argv[0]);
- strcat(fn, ".o");
- if ((fd = open(fn, O_RDONLY)) > 0) {
- (void) loadModule(fd, GLOBAL_SYMBOLS);
- close(fd);
- entry = lookup(bname);
- }
- free(fn);
- }
- }
- }
- } else {
- entry = NULL;
- }
- semGive(entry_data_sem);
-
- if (read_write & DO_READ) { /* emulator read */
- save_fd(ofd);
- ioTaskStdSet(0, 1, ofd); /* stdout for process */
- if(redir_stderr)
- ioTaskStdSet(0, 2, ofd);/* stderr for process */
- }
- if (read_write & DO_WRITE) { /* emulator write */
- save_fd(ifd);
- ioTaskStdSet(0, 0, ifd); /* stdin for process */
- }
- if (entry != NULL) {
- ret = (*entry)(argc, argv, (char **)NULL); /* NULL for envp */
- } else {
- fdprintf(2, "Could not exec \"%s\"\n", argv[0]);
- ret = -1;
- }
- if (driver_data[driver_index].report_exit) {
- semTake(driver_data_sem, WAIT_FOREVER);
- driver_data[driver_index].exitcode = ret;
- driver_data[driver_index].exit_reported = 1;
- semGive(driver_data_sem);
- }
- /* We *don't* want to close the pipes here, but let the delete
- hook take care of it - it might want to flush stdout and there'd
- better be an open descriptor to flush to... */
- exit(ret);
-}
-
-static void close_pipes(int ifd[2], int ofd[2], int read_write)
-{
- if (read_write & DO_READ) {
- (void) close(ifd[0]);
- (void) close(ifd[1]);
- }
- if (read_write & DO_WRITE) {
- (void) close(ofd[0]);
- (void) close(ofd[1]);
- }
-}
-
-static void init_fd_data(int fd, int port_unused_argument)
-{
- SET_NONBLOCKING(fd);
- fd_data[fd].pending = NULL;
- fd_data[fd].buf = fd_data[fd].cpos = NULL;
- fd_data[fd].remain = fd_data[fd].sz = 0;
-}
-
-static ErlDrvData spawn_start(ErlDrvPort port_num, char *name,SysDriverOpts* opts)
-{
- int ifd[2], ofd[2], len, nl, id;
- char taskname[11], *progname, *bname;
- char *space_in_command;
- int packet_bytes = opts->packet_bytes;
- int read_write = opts->read_write;
- int use_stdio = opts->use_stdio;
- int redir_stderr = opts->redir_stderr;
- int driver_index;
-
- if (!use_stdio){
- return (ErlDrvData) -3;
- }
-
- /* Create pipes and set the Erlang task as owner of its
- * read and write ends (through save_fd()).
- */
- switch (read_write) {
- case DO_READ:
- if (pipe(ifd) < 0){
- return (ErlDrvData) -2;
- }
- if (ifd[0] >= max_files) {
- close_pipes(ifd, ofd, read_write);
- errno = ENFILE;
- return (ErlDrvData) -2;
- }
- save_fd(ifd[0]);
- break;
- case DO_WRITE:
- if (pipe(ofd) < 0) {
- return (ErlDrvData) -2;
- }
- if (ofd[1] >= max_files) {
- close_pipes(ifd, ofd, read_write);
- errno = ENFILE;
- return (ErlDrvData) -2;
- }
- save_fd(ofd[1]);
- break;
- case DO_READ|DO_WRITE:
- if (pipe(ifd) < 0){
- return (ErlDrvData) -2;
- }
- if (ifd[0] >= max_files || pipe(ofd) < 0) {
- close_pipes(ifd, ofd, DO_READ);
- errno = ENFILE;
- return (ErlDrvData) -2;
- }
- if (ofd[1] >= max_files) {
- close_pipes(ifd, ofd, read_write);
- errno = ENFILE;
- return (ErlDrvData) -2;
- }
- save_fd(ifd[0]);
- save_fd(ofd[1]);
- break;
- default:
- return (ErlDrvData) -1;
- }
-
- /* Allocate space for program name to be freed by the
- * spawned task. We use plain_malloc so that the allocated
- * space is not owned by the Erlang task.
- */
-
- if ((progname = plain_malloc(strlen(name) + 1)) == NULL) {
- close_pipes(ifd, ofd, read_write);
- errno = ENOMEM;
- return (ErlDrvData) -2;
- }
- strcpy(progname, name);
-
- /* Check if name contains a space
- * (e.g "port_test -o/home/gandalf/tornado/wind/target/erlang")
- */
- if ((space_in_command = strrchr(progname, ' ')) != NULL) {
- *space_in_command = '\0';
- }
-
- /* resulting in "port_test" */
- if ((bname = strrchr(progname, '/')) != NULL)
- bname++;
- else
- bname = progname;
-
- /* resulting in "port_test" */
- len = strlen(bname);
- nl = len > 10 ? 10 : len;
- strncpy(taskname, bname, nl);
- taskname[nl] = '\0';
- if (space_in_command != NULL)
- *space_in_command = ' ';
- driver_index = pre_set_driver_data(ifd[0], ofd[1],
- read_write, opts->exit_status);
-
- /* resetting to "port_test -o/home/gandalf/tornado/wind/target/erlang" */
- if ((id = taskSpawn(taskname, spTaskPriority, spTaskOptions,
- port_stack_size, (FUNCPTR)call_proc, (int)progname,
- ofd[0], ifd[1], read_write, redir_stderr, driver_index,
- 0,0,0,0))
- == ERROR) {
- close_pipes(ifd, ofd, read_write);
- plain_free(progname); /* only when spawn fails */
- errno = ENOMEM;
- return (ErlDrvData) -2;
- }
-#ifdef DEBUG
- fdprintf(2, "Spawned %s as %s[0x%x]\n", name, taskname, id);
-#endif
- if (read_write & DO_READ)
- init_fd_data(ifd[0], port_num);
- if (read_write & DO_WRITE)
- init_fd_data(ofd[1], port_num);
- return (ErlDrvData) (set_driver_data(port_num, ifd[0], ofd[1],
- packet_bytes,read_write,
- opts->exit_status));
-}
-
-static ErlDrvData fd_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts)
-{
- if (((opts->read_write & DO_READ) && opts->ifd >= max_files) ||
- ((opts->read_write & DO_WRITE) && opts->ofd >= max_files)) {
- return (ErlDrvData) -1;
- }
-
- if (opts->read_write & DO_READ)
- init_fd_data(opts->ifd, port_num);
- if (opts->read_write & DO_WRITE)
- init_fd_data(opts->ofd, port_num);
- return (ErlDrvData) (set_driver_data(port_num, opts->ifd, opts->ofd,
- opts->packet_bytes, opts->read_write, 0));
-}
-
-static void clear_fd_data(int fd)
-{
-
- if (fd_data[fd].sz > 0)
- erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fd_data[fd].buf);
- fd_data[fd].buf = NULL;
- fd_data[fd].sz = 0;
- fd_data[fd].remain = 0;
- fd_data[fd].cpos = NULL;
-}
-
-static void nbio_stop_fd(int port_num, int fd)
-{
- Pend *p, *p1;
-
- driver_select(port_num, fd, ERL_DRV_READ|ERL_DRV_WRITE, 0);
- clear_fd_data(fd);
- p = fd_data[fd].pending;
- SET_BLOCKING(fd);
- while (p) {
- p1 = p->next;
- free(p);
- p = p1;
- }
- fd_data[fd].pending = NULL;
-}
-
-static void fd_stop(ErlDrvData drv_data)
-{
- int ofd;
- int fd = (int) drv_data;
-
- nbio_stop_fd(driver_data[fd].port_num, (int)fd);
- ofd = driver_data[fd].ofd;
- if (ofd != fd && ofd != -1)
- nbio_stop_fd(driver_data[fd].port_num, (int)ofd); /* XXX fd = ofd? */
-}
-
-static ErlDrvData
-vanilla_start(ErlDrvPort port_num, char *name, SysDriverOpts* opts)
-{
- int flags, fd;
- struct stat statbuf;
-
- DEBUGF(("vanilla_start, name: %s [r=%1i w=%1i]\n", name,
- opts->read_write & DO_READ,
- opts->read_write & DO_WRITE));
-
- flags = (opts->read_write == DO_READ ? O_RDONLY :
- opts->read_write == DO_WRITE ? O_WRONLY|O_CREAT|O_TRUNC :
- O_RDWR|O_CREAT);
- if ((fd = open(name, flags, 0666)) < 0){
- errno = ENFILE;
- return (ErlDrvData) -2;
- }
- if (fd >= max_files) {
- close(fd);
- errno = ENFILE;
- return (ErlDrvData) -2;
- }
- if (fstat(fd, &statbuf) < 0) {
- close(fd);
- errno = ENFILE;
- return (ErlDrvData) -2;
- }
-
- /* Return error for reading regular files (doesn't work) */
- if (ISREG(statbuf) && ((opts->read_write) & DO_READ)) {
- close(fd);
- return (ErlDrvData) -3;
- }
- init_fd_data(fd, port_num);
- return (ErlDrvData) (set_driver_data(port_num, fd, fd,
- opts->packet_bytes, opts->read_write, 0));
-}
-
-/* Note that driver_data[fd].ifd == fd if the port was opened for reading, */
-/* otherwise (i.e. write only) driver_data[fd].ofd = fd. */
-
-static void stop(ErlDrvData drv_data)
-{
- int port_num, ofd;
- int fd = (int) drv_data;
-
- port_num = driver_data[fd].port_num;
- nbio_stop_fd(port_num, fd);
- driver_select(port_num, fd, ERL_DRV_USE, 0); /* close(fd) */
-
- ofd = driver_data[fd].ofd;
- if (ofd != fd && ofd != -1) {
- nbio_stop_fd(port_num, ofd);
- driver_select(port_num, ofd, ERL_DRV_USE, 0); /* close(fd) */
- }
-}
-
-static int sched_write(int port_num,int fd, char *buf, int len, int pb)
-{
- Pend *p, *p2, *p3;
- int p_bytes = len;
-
- p = (Pend*) erts_alloc_fnf(ERTS_ALC_T_PEND_DATA, pb + len + sizeof(Pend));
- if (!p) {
- driver_failure(port_num, -1);
- return(-1);
- }
-
- switch(pb) {
- case 4: put_int32(len, p->buf); break;
- case 2: put_int16(len, p->buf); break;
- case 1: put_int8(len, p->buf); break;
- case 0: break; /* Handles this case too */
- }
- sys_memcpy(p->buf + pb, buf, len);
- driver_select(port_num, fd, ERL_DRV_WRITE|ERL_DRV_USE, 1);
- p->cpos = p->buf;
- p->fd = fd;
- p->next = NULL;
- p->remain = len + pb;
- p2 = fd_data[fd].pending;
- if (p2 == NULL)
- fd_data[fd].pending = p;
- else {
- p3 = p2->next;
- while(p3) {
- p_bytes += p2->remain;
- p2 = p2->next;
- p3 = p3->next;
- }
- p2->next = p;
- }
- if (p_bytes > (1 << 13)) /* More than 8 k pending */
- set_busy_port(port_num, 1);
- return(0);
-}
-
-/* Fd is the value returned as drv_data by the start func */
-static void output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len)
-{
- int buf_done, port_num, wval, pb, ofd;
- byte lb[4];
- struct iovec iv[2];
- int fd = (int) drv_data;
-
- pb = driver_data[fd].packet_bytes;
- port_num = driver_data[fd].port_num;
-
- if ((ofd = driver_data[fd].ofd) == -1) {
- return;
- }
-
- if (fd_data[ofd].pending) {
- sched_write(port_num, ofd, buf, len, pb);
- return;
- }
-
- if ((pb == 2 && len > 65535) || (pb == 1 && len > 255)) {
- driver_failure_posix(port_num, EINVAL);
- return;
- }
- if (pb == 0) {
- wval = write(ofd, buf, len);
- } else {
- lb[0] = (len >> 24) & 255; /* MSB */
- lb[1] = (len >> 16) & 255;
- lb[2] = (len >> 8) & 255;
- lb[3] = len & 255; /* LSB */
- iv[0].iov_base = (char*) lb + (4 - pb);
- iv[0].iov_len = pb;
- iv[1].iov_base = buf;
- iv[1].iov_len = len;
- wval = writev(ofd, iv, 2);
- }
- if (wval == pb + len ) {
- return;
- }
- if (wval < 0) {
- if ((errno == EINTR) || (errno == ERRNO_BLOCK)) {
- if (pb) {
- sched_write(port_num, ofd, buf ,len, pb);
- } else if (pb == 0) {
- sched_write(port_num, ofd, buf ,len, 0);
- }
- return;
- }
- driver_failure_posix(driver_data[fd].port_num, EINVAL);
- return;
- }
- if (wval < pb) {
- sched_write(port_num, ofd, (lb +4 -pb) + wval, pb-wval, 0);
- sched_write(port_num, ofd, buf ,len, 0);
- return;
- }
-
- /* we now know that wval < (pb + len) */
- buf_done = wval - pb;
- sched_write(port_num, ofd, buf + buf_done, len - buf_done,0);
-}
-
-static void stop_select(ErlDrvEvent fd, void* _)
-{
- close((int)fd);
-}
-
-static int ensure_header(int fd,char *buf,int packet_size, int sofar)
-{
- int res = 0;
- int remaining = packet_size - sofar;
-
- SET_BLOCKING(fd);
- if (read_fill(fd, buf+sofar, remaining) != remaining)
- return -1;
- switch (packet_size) {
- case 1: res = get_int8(buf); break;
- case 2: res = get_int16(buf); break;
- case 4: res = get_int32(buf); break;
- }
- SET_NONBLOCKING(fd);
- return(res);
-}
-
-static int port_inp_failure(int port_num, int ready_fd, int res)
-{
- (void) driver_select(port_num, ready_fd, ERL_DRV_READ|ERL_DRV_WRITE, 0);
- clear_fd_data(ready_fd);
- if (res == 0) {
- if (driver_data[ready_fd].report_exit) {
- int tmpexit = 0;
- int reported;
- /* Lock the driver_data structure */
- semTake(driver_data_sem, WAIT_FOREVER);
- if ((reported = driver_data[ready_fd].exit_reported))
- tmpexit = driver_data[ready_fd].exitcode;
- semGive(driver_data_sem);
- if (reported) {
- erts_fprintf(stderr,"Exitcode %d reported\r\n", tmpexit);
- driver_report_exit(port_num, tmpexit);
- }
- }
- driver_failure_eof(port_num);
- } else {
- driver_failure(port_num, res);
- }
- return 0;
-}
-
-/* fd is the drv_data that is returned from the */
-/* initial start routine */
-/* ready_fd is the descriptor that is ready to read */
-
-static void ready_input(ErlDrvData drv_data, ErlDrvEvent drv_event)
-{
- int port_num, packet_bytes, res;
- Uint h = 0;
- char *buf;
- int fd = (int) drv_data;
- int ready_fd = (int) drv_event;
-
- port_num = driver_data[fd].port_num;
- packet_bytes = driver_data[fd].packet_bytes;
-
- if (packet_bytes == 0) {
- if ((res = read(ready_fd, tmp_buf, tmp_buf_size)) > 0) {
- driver_output(port_num, (char*)tmp_buf, res);
- return;
- }
- port_inp_failure(port_num, ready_fd, res);
- return;
- }
-
- if (fd_data[ready_fd].remain > 0) { /* We try to read the remainder */
- /* space is allocated in buf */
- res = read(ready_fd, fd_data[ready_fd].cpos,
- fd_data[ready_fd].remain);
- if (res < 0) {
- if ((errno == EINTR) || (errno == ERRNO_BLOCK)) {
- ;
- } else {
- port_inp_failure(port_num, ready_fd, res);
- }
- } else if (res == 0) {
- port_inp_failure(port_num, ready_fd, res);
- } else if (res == fd_data[ready_fd].remain) { /* we're done */
- driver_output(port_num, fd_data[ready_fd].buf,
- fd_data[ready_fd].sz);
- clear_fd_data(ready_fd);
- } else { /* if (res < fd_data[ready_fd].remain) */
- fd_data[ready_fd].cpos += res;
- fd_data[ready_fd].remain -= res;
- }
- return;
- }
-
-
- if (fd_data[ready_fd].remain == 0) { /* clean fd */
- /* We make one read attempt and see what happens */
- res = read(ready_fd, tmp_buf, tmp_buf_size);
- if (res < 0) {
- if ((errno == EINTR) || (errno == ERRNO_BLOCK))
- return;
- port_inp_failure(port_num, ready_fd, res);
- return;
- }
- else if (res == 0) { /* eof */
- port_inp_failure(port_num, ready_fd, res);
- return;
- }
- else if (res < packet_bytes) { /* Ugly case... get at least */
- if ((h = ensure_header(ready_fd, tmp_buf, packet_bytes, res))==-1) {
- port_inp_failure(port_num, ready_fd, -1);
- return;
- }
- buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
- if (!buf) {
- port_inp_failure(port_num, ready_fd, -1);
- return;
- }
- fd_data[ready_fd].buf = buf;
- fd_data[ready_fd].sz = h;
- fd_data[ready_fd].remain = h;
- fd_data[ready_fd].cpos = buf;
- return;
- }
- else { /* if (res >= packet_bytes) */
- unsigned char* cpos = tmp_buf;
- int bytes_left = res;
- while (1) { /* driver_output as many as possible */
- if (bytes_left == 0) {
- clear_fd_data(ready_fd);
- return;
- }
- if (bytes_left < packet_bytes) { /* Yet an ugly case */
- if((h=ensure_header(ready_fd, cpos,
- packet_bytes, bytes_left))==-1) {
- port_inp_failure(port_num, ready_fd, -1);
- return;
- }
- buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
- if (!buf)
- port_inp_failure(port_num, ready_fd, -1);
- fd_data[ready_fd].buf = buf;
- fd_data[ready_fd].sz = h;
- fd_data[ready_fd].remain = h;
- fd_data[ready_fd].cpos = buf;
- return;
- }
- switch (packet_bytes) {
- case 1: h = get_int8(cpos); cpos += 1; break;
- case 2: h = get_int16(cpos); cpos += 2; break;
- case 4: h = get_int32(cpos); cpos += 4; break;
- }
- bytes_left -= packet_bytes;
- /* we've got the header, now check if we've got the data */
- if (h <= (bytes_left)) {
- driver_output(port_num, (char*) cpos, h);
- cpos += h;
- bytes_left -= h;
- continue;
- }
- else { /* The last message we got was split */
- buf = erts_alloc_fnf(ERTS_ALC_T_FD_ENTRY_BUF, h);
- if (!buf) {
- port_inp_failure(port_num, ready_fd, -1);
- }
- sys_memcpy(buf, cpos, bytes_left);
- fd_data[ready_fd].buf = buf;
- fd_data[ready_fd].sz = h;
- fd_data[ready_fd].remain = h - bytes_left;
- fd_data[ready_fd].cpos = buf + bytes_left;
- return;
- }
- }
- return;
- }
- }
- fprintf(stderr, "remain %d \n", fd_data[ready_fd].remain);
- port_inp_failure(port_num, ready_fd, -1);
-}
-
-
-/* fd is the drv_data that is returned from the */
-/* initial start routine */
-/* ready_fd is the descriptor that is ready to read */
-
-static void ready_output(ErlDrvData drv_data, ErlDrvEvent drv_event)
-{
- Pend *p;
- int wval;
-
- int fd = (int) drv_data;
- int ready_fd = (int) drv_event;
-
- while(1) {
- if ((p = fd_data[ready_fd].pending) == NULL) {
- driver_select(driver_data[fd].port_num, ready_fd,
- ERL_DRV_WRITE, 0);
- return;
- }
- wval = write(p->fd, p->cpos, p->remain);
- if (wval == p->remain) {
- fd_data[ready_fd].pending = p->next;
- erts_free(ERTS_ALC_T_PEND_DATA, p);
- if (fd_data[ready_fd].pending == NULL) {
- driver_select(driver_data[fd].port_num, ready_fd,
- ERL_DRV_WRITE, 0);
- set_busy_port(driver_data[fd].port_num, 0);
- return;
- }
- else
- continue;
- }
- else if (wval < 0) {
- if (errno == ERRNO_BLOCK || errno == EINTR)
- return;
- else {
- driver_select(driver_data[fd].port_num, ready_fd,
- ERL_DRV_WRITE, 0);
- driver_failure(driver_data[fd].port_num, -1);
- return;
- }
- }
- else if (wval < p->remain) {
- p->cpos += wval;
- p->remain -= wval;
- return;
- }
- }
-}
-
-/* Fills in the systems representation of the jam/beam process identifier.
-** The Pid is put in STRING representation in the supplied buffer,
-** no interpretatione of this should be done by the rest of the
-** emulator. The buffer should be at least 21 bytes long.
-*/
-void sys_get_pid(char *buffer){
- int p = taskIdSelf(); /* Hmm, may be negative??? requires some GB of
- memory to make the TCB address convert to a
- negative value. */
- sprintf(buffer,"%d", p);
-}
-
-int
-erts_sys_putenv(char *buffer, int sep_ix)
-{
- return putenv(buffer);
-}
-
-int
-erts_sys_getenv(char *key, char *value, size_t *size)
-{
- char *orig_value;
- int res;
- orig_value = getenv(key);
- if (!orig_value)
- res = -1;
- else {
- size_t len = sys_strlen(orig_value);
- if (len >= *size) {
- *size = len + 1;
- res = 1;
- }
- else {
- *size = len;
- sys_memcpy((void *) value, (void *) orig_value, len+1);
- res = 0;
- }
- }
- return res;
-}
-
-int
-erts_sys_getenv__(char *key, char *value, size_t *size)
-{
- return erts_sys_getenv(key, value, size);
-}
-
-void
-sys_init_io(void)
-{
- tmp_buf = (byte *) erts_alloc(ERTS_ALC_T_SYS_TMP_BUF, SYS_TMP_BUF_SIZE);
- tmp_buf_size = SYS_TMP_BUF_SIZE;
- fd_data = (struct fd_data *)
- erts_alloc(ERTS_ALC_T_FD_TAB, max_files * sizeof(struct fd_data));
-}
-
-
-/* Fill buffer, return buffer length, 0 for EOF, < 0 for error. */
-
-static int read_fill(int fd, char *buf, int len)
-{
- int i, got = 0;
- do {
- if ((i = read(fd, buf+got, len-got)) <= 0) {
- return i;
- }
- got += i;
- } while (got < len);
- return (len);
-}
-
-
-/************************** Misc... *******************************/
-
-extern const char pre_loaded_code[];
-extern char* const pre_loaded[];
-
-
-/* Float conversion */
-
-int sys_chars_to_double(char *buf, double *fp)
-{
- char *s = buf;
-
- /* The following check is incorporated from the Vee machine */
-
-#define ISDIGIT(d) ((d) >= '0' && (d) <= '9')
-
- /* Robert says that something like this is what he really wanted:
- *
- * 7 == sscanf(Tbuf, "%[+-]%[0-9].%[0-9]%[eE]%[+-]%[0-9]%s", ....);
- * if (*s2 == 0 || *s3 == 0 || *s4 == 0 || *s6 == 0 || *s7)
- * break;
- */
-
- /* Scan string to check syntax. */
- if (*s == '+' || *s == '-')
- s++;
-
- if (!ISDIGIT(*s)) /* Leading digits. */
- return -1;
- while (ISDIGIT(*s)) s++;
- if (*s++ != '.') /* Decimal part. */
- return -1;
- if (!ISDIGIT(*s))
- return -1;
- while (ISDIGIT(*s)) s++;
- if (*s == 'e' || *s == 'E') {
- /* There is an exponent. */
- s++;
- if (*s == '+' || *s == '-')
- s++;
- if (!ISDIGIT(*s))
- return -1;
- while (ISDIGIT(*s)) s++;
- }
- if (*s) /* That should be it */
- return -1;
-
- if (sscanf(buf, "%lf", fp) != 1)
- return -1;
- return 0;
-}
-
-/*
- ** Convert a double to ascii format 0.dddde[+|-]ddd
- ** return number of characters converted
- */
-
-int sys_double_to_chars(double fp, char *buf)
-{
- (void) sprintf(buf, "%.20e", fp);
- return strlen(buf);
-}
-
-
-/* Floating point exceptions */
-
-#if (CPU == SPARC)
-jmp_buf fpe_jmp;
-
-RETSIGTYPE fpe_sig_handler(int sig)
-{
- longjmp(fpe_jmp, 1);
-}
-
-#elif (CPU == PPC603)
-static void fix_registers(void){
- FP_CONTEXT fpcontext;
- fppSave(&fpcontext);
- fpcontext.fpcsr &= ~(_PPC_FPSCR_INIT);
- fppRestore(&fpcontext);
-}
-#endif
-
-
-/* Return a pointer to a vector of names of preloaded modules */
-
-Preload* sys_preloaded(void)
-{
- return (Preload *) pre_loaded;
-}
-
-/* Return a pointer to preloaded code for module "module" */
-unsigned char* sys_preload_begin(Preload *pp)
-{
- return pp->code;
-}
-
-/* Clean up if allocated */
-void sys_preload_end(Preload *pp)
-{
- /* Nothing */
-}
-
-/* Read a key from console (?) */
-
-int sys_get_key(int fd)
-{
- int c;
- unsigned char rbuf[64];
-
- fflush(stdout); /* Flush query ??? */
-
- if ((c = read(fd,rbuf,64)) <= 0)
- return c;
- return rbuf[0];
-}
-
-
-/* A real printf that does the equivalent of fprintf(stdout, ...) */
-
-/* ARGSUSED */
-static STATUS
-stdio_write(char *buf, int nchars, int fp)
-{
- if (fwrite(buf, sizeof(char), nchars, (FILE *)fp) == 0)
- return(ERROR);
- return(OK);
-}
-
-int real_printf(const char *fmt, ...)
-{
- va_list ap;
- int err;
-
- va_start(ap, fmt);
- err = fioFormatV(fmt, ap, stdio_write, (int)stdout);
- va_end(ap);
- return(err);
-}
-
-
-/*
- * Little function to do argc, argv calls from (e.g.) VxWorks shell
- * The arguments should be in the form of a single ""-enclosed string
- * NOTE: This isn't really part of the emulator, just included here
- * so we can use the handy functions and memory reclamation.
- */
-void argcall(char *args)
-{
- int argc;
- char **argv;
- FUNCPTR entry;
-
- if (args != NULL) {
- if ((argc = build_argv(args, &argv)) > 0) {
- if ((entry = lookup(argv[0])) != NULL)
- (*entry)(argc, argv, (char **)NULL); /* NULL for envp */
- else
- fprintf(stderr, "Couldn't find %s\n", argv[0]);
- } else
- fprintf(stderr, "Failed to build argv!\n");
- } else
- fprintf(stderr, "No argument list!\n");
-}
-
-
-/* That concludes the Erlang stuff - now we just need to implement an OS...
- - Just kidding, but resource reclamation isn't the strength of VxWorks */
-#undef calloc
-#undef free
-#undef cfree
-#undef malloc
-#undef realloc
-#undef open
-#undef creat
-#undef socket
-#undef accept
-#undef close
-#undef fopen
-#undef fdopen
-#undef freopen
-#undef fclose
-
-/********************* Using elib_malloc ****************************/
-/* This gives us yet another level of malloc wrappers. The purpouse */
-/* is to be able to select between different varieties of memory */
-/* allocation without recompiling. */
-/* Maybe the performance is somewhat degraded by this, but */
-/* on the other hand, performance may be much better if the most */
-/* suiting malloc is used (not to mention the much lower */
-/* fragmentation). */
-/* /Patrik N */
-/********************************************************************/
-
-/*
- * I don't want to include the whole elib header, especially
- * as it uses char * for generic pointers. Let's fool ANSI C instead.
- */
-extern void *elib_malloc(size_t);
-extern void *elib_realloc(void *, size_t);
-extern void elib_free(void *);
-extern void elib_init(void *, int);
-extern void elib_force_init(void *, int);
-extern size_t elib_sizeof(void *);
-
-/* Flags */
-#define USING_ELIB_MALLOC 1 /* We are using the elib_malloc */
-#define WARN_MALLOC_MIX 2 /* Warn if plain malloc or save_malloc
- is mixed with sys_free2 or
- sys_realloc2 */
-#define REALLOC_MOVES 4 /* Always move on realloc
- (less fragmentation) */
-#define USER_POOL 8 /* The user supplied the memory
- pool, it was not save_alloced. */
-#define RECLAIM_USER_POOL 16 /* Use the reclaim mechanism in the
- user pool. */
-#define NEW_USER_POOL 32 /* The user pool is newly suppllied,
- any old pool should be discarded */
-
-
-#define ELIB_LOCK \
-if(alloc_flags & USING_ELIB_MALLOC) \
- semTake(elib_malloc_sem, WAIT_FOREVER)
-
-#define ELIB_UNLOCK \
-if(alloc_flags & USING_ELIB_MALLOC) \
- semGive(elib_malloc_sem)
-
-#define USER_RECLAIM() ((alloc_flags & USING_ELIB_MALLOC) && \
- (alloc_flags & USER_POOL) && \
- (alloc_flags & RECLAIM_USER_POOL))
-
-/*
- * Global state
- * The use of function pointers for the malloc/realloc/free functions
- * is actually only useful in the malloc case, we must know what kind of
- * realloc/free we are going to use, so we could call elib_xxx directly.
- * However, as the overhead is small and this construction makes it
- * fairly easy to add another malloc algorithm, the function pointers
- * are used in realloc/free to.
- */
-static MallocFunction actual_alloc = &save_malloc;
-static ReallocFunction actual_realloc = &save_realloc;
-static FreeFunction actual_free = &save_free;
-static int alloc_flags = 0;
-static int alloc_pool_size = 0;
-static void *alloc_pool_ptr = NULL;
-static SEM_ID elib_malloc_sem = NULL;
-
-/*
- * Descide if we should use the save_free instead of elib_free or,
- * in the case of the free used in a delete hook, if we should
- * use plain free instead of elib_free.
- */
-static int use_save_free(void *ptr){
- register int diff = ((char *) ptr) - ((char *) alloc_pool_ptr);
- /*
- * Hmmm... should it be save_free even if diff is exactly 0?
- * The answer is Yes if the whole area is save_alloced and No if not,
- * so reclaim_free_hook is NOT run in the case of one save_alloced area.
- */
- return (!(alloc_flags & USING_ELIB_MALLOC) ||
- (diff < 0 || diff >= alloc_pool_size));
-}
-
-/*
- * A free function used by the task deletion hook for the save_xxx functions.
- * Set with the set_reclaim_free_function function.
- */
-static void reclaim_free_hook(void *ptr){
- if(use_save_free(ptr)){
- free(ptr);
- } else {
- ELIB_LOCK;
- (*actual_free)(ptr);
- ELIB_UNLOCK;
- }
-}
-
-
-/*
- * Initialize, sets the values of pointers based on
- * either nothing (the default) or what's set previously by the
- * erl_set_memory_block function.
- */
-static void initialize_allocation(void){
- set_reclaim_free_function(NULL);
- if(alloc_pool_size == 0){
- actual_alloc = (void *(*)(size_t))&save_malloc;
- actual_realloc = (void *(*)(void *, size_t))&save_realloc;
- actual_free = &save_free;
- alloc_flags &= ~(USING_ELIB_MALLOC | USER_POOL | RECLAIM_USER_POOL);
- } else {
- if(elib_malloc_sem == NULL)
- elib_malloc_sem = semMCreate
- (SEM_Q_PRIORITY | SEM_DELETE_SAFE | SEM_INVERSION_SAFE);
- if(elib_malloc_sem == NULL)
- erl_exit(1,"Could not create mutex semaphore for elib_malloc");
- if(!(alloc_flags & USER_POOL)){
- if((alloc_pool_ptr = save_malloc(alloc_pool_size)) == NULL)
- erl_exit(1,"Erlang set to allocate a %d byte block initially;"
- " not enough memory available.", alloc_pool_size);
- elib_force_init(alloc_pool_ptr, alloc_pool_size);
- } else if(alloc_flags & NEW_USER_POOL){
- elib_force_init(alloc_pool_ptr, alloc_pool_size);
- }
- actual_alloc=&elib_malloc;
- actual_realloc=&elib_realloc;
- actual_free=&elib_free;
- alloc_flags |= USING_ELIB_MALLOC;
- /* We MUST see to that the right free function is used
- otherwise we'll get a very nasty crash! */
- if(USER_RECLAIM())
- set_reclaim_free_function(&reclaim_free_hook);
- }
- alloc_flags &= ~(NEW_USER_POOL); /* It's never new after initialization*/
-}
-
-/* This does not exist on other platforms, we just use it in sys.c
- and the BSD resolver */
-void *sys_calloc2(Uint nelem, Uint elsize){
- void *ptr = erts_alloc_fnf(ERTS_ALC_T_UNDEF, nelem*elsize);
- if(ptr != NULL)
- memset(ptr,0,nelem*elsize);
- return ptr;
-}
-
-/*
- * The malloc wrapper
- */
-void *
-erts_sys_alloc(ErtsAlcType_t type, void *extra, Uint size)
-{
- register void *ret;
- ELIB_LOCK;
- if(USER_RECLAIM())
- ret = save_malloc2((size_t)size,actual_alloc);
- else
- ret = (*actual_alloc)((size_t)size);
- ELIB_UNLOCK;
- return ret;
-}
-
-/*
- * The realloc wrapper, may respond to the "realloc-always-moves" flag
- * if the area is initially allocated with elib_malloc.
- */
-void *
-erts_sys_realloc(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
-{
- register void *ret;
- if(use_save_free(ptr)){
- if((alloc_flags & WARN_MALLOC_MIX) &&
- (alloc_flags & USING_ELIB_MALLOC))
- erts_fprintf(stderr,"Warning, save_malloced data realloced "
- "by sys_realloc2\n");
- return save_realloc(ptr, (size_t) size);
- } else {
- ELIB_LOCK;
- if((alloc_flags & REALLOC_MOVES) &&
- (alloc_flags & USING_ELIB_MALLOC)){
- size_t osz = elib_sizeof(ptr);
- if(USER_RECLAIM())
- ret = save_malloc2((size_t) size, actual_alloc);
- else
- ret = (*actual_alloc)((size_t) size);
- if(ret != NULL){
- memcpy(ret,ptr,(((size_t)size) < osz) ? ((size_t)size) : osz);
- if(USER_RECLAIM())
- save_free2(ptr,actual_free);
- else
- (*actual_free)(ptr);
- }
- } else {
- if(USER_RECLAIM())
- ret = save_realloc2(ptr,(size_t)size,actual_realloc);
- else
- ret = (*actual_realloc)(ptr,(size_t)size);
- }
- ELIB_UNLOCK;
- return ret;
- }
-}
-
-/*
- * Wrapped free().
- */
-void
-erts_sys_free(ErtsAlcType_t type, void *extra, void *ptr)
-{
- if(use_save_free(ptr)){
- /*
- * This might happen when linked in drivers use save_malloc etc
- * directly.
- */
- if((alloc_flags & WARN_MALLOC_MIX) &&
- (alloc_flags & USING_ELIB_MALLOC))
- erts_fprintf(stderr,"Warning, save_malloced data freed by "
- "sys_free2\n");
- save_free(ptr);
- } else {
- ELIB_LOCK;
- if(USER_RECLAIM())
- save_free2(ptr,actual_free);
- else
- (*actual_free)(ptr);
- ELIB_UNLOCK;
- }
-}
-
-/*
- * External interface to be called before erlang is started
- * Parameters:
- * isize: The size of the memory block where erlang should malloc().
- * iptr: (optional) A pointer to a user supplied memory block of
- * size isize.
- * warn_save: Instructs sys_free2 and sys_realloc2 to warn if
- * memory allocation/reallocation/freeing is mixed between
- * pure malloc/save_malloc/sys_alloc2 routines (only
- * warns if elib is actually used in the sys_alloc2 routines).
- * realloc_moves: Always allocate a fresh memory block on reallocation
- * (less fragmentation).
- * reclaim_in_supplied: Use memory reclaim mechanisms inside the user
- * supplied area, this makes one area reusable between
- * starts of erlang and might be nice for drivers etc.
- */
-
-int erl_set_memory_block(int isize, int iptr, int warn_save,
- int realloc_moves, int reclaim_in_supplied, int p5,
- int p6, int p7, int p8, int p9){
- if(erlang_id != 0){
- erts_fprintf(stderr,"Error, cannot set erlang memory block while an "
- "erlang task is running!\n");
- return 1;
- }
- if(isize < 8 * 1024 *1024)
- erts_fprintf(stderr,
- "Warning, the memory pool of %dMb may be to small to "
- "run erlang in!\n", isize / (1024 * 1024));
- alloc_pool_size = (size_t) isize;
- alloc_pool_ptr = (void *) iptr;
- alloc_flags = 0;
- /* USING_ELIB_MALLOC gets set by the initialization routine */
- if((void *)iptr != NULL)
- alloc_flags |= (USER_POOL | NEW_USER_POOL);
- if(realloc_moves)
- alloc_flags |= REALLOC_MOVES;
- if(warn_save)
- alloc_flags |= WARN_MALLOC_MIX;
- if((void *)iptr != NULL && reclaim_in_supplied)
- alloc_flags |= RECLAIM_USER_POOL;
- return 0;
-}
-
-/* External statistics interface */
-int erl_memory_show(int p0, int p1, int p2, int p3, int p4, int p5,
- int p6, int p7, int p8, int p9){
- struct elib_stat statistics;
- if(!(alloc_flags & USING_ELIB_MALLOC) && erlang_id != 0){
- erts_printf("Using plain save_alloc, use memShow instead.\n");
- return 1;
- }
- if(erlang_id == 0 && !((alloc_flags & USER_POOL) &&
- !(alloc_flags & NEW_USER_POOL))){
- erts_printf("Sorry, no allocation statistics until erlang "
- "is started.\n");
- return 1;
- }
- erts_printf("Allocation settings:\n");
- erts_printf("Using elib_malloc with memory pool size of %lu bytes.\n",
- (unsigned long) alloc_pool_size);
- erts_printf("Realloc-always-moves is %s\n",
- (alloc_flags & REALLOC_MOVES) ? "on" : "off");
- erts_printf("Warnings about mixed malloc/free's are %s\n",
- (alloc_flags & WARN_MALLOC_MIX) ? "on" : "off");
- if(alloc_flags & USER_POOL){
- erts_printf("The memory block used by elib is user supplied "
- "at 0x%08x.\n", (unsigned int) alloc_pool_ptr);
- if(alloc_flags & RECLAIM_USER_POOL)
- erts_printf("Allocated memory within the user supplied pool\n"
- " will be automatically reclaimed at task exit.\n");
- } else {
- erts_printf("The memory block used by elib is save_malloc'ed "
- "at 0x%08x.\n", (unsigned int) alloc_pool_ptr);
- }
- erts_printf("Statistics from elib_malloc:\n");
- ELIB_LOCK;
-
- elib_stat(&statistics);
- ELIB_UNLOCK;
- erts_printf("Type Size (bytes) Number of blocks\n");
- erts_printf("============= ============ ================\n");
- erts_printf("Total: %12lu %16lu\n",
- (unsigned long) statistics.mem_total*4,
- (unsigned long) statistics.mem_blocks);
- erts_printf("Allocated: %12lu %16lu\n",
- (unsigned long) statistics.mem_alloc*4,
- (unsigned long) statistics.mem_blocks-statistics.free_blocks);
- erts_printf("Free: %12lu %16lu\n",
- (unsigned long) statistics.mem_free*4,
- (unsigned long) statistics.free_blocks);
- erts_printf("Largest free: %12lu -\n\n",
- (unsigned long) statistics.max_free*4);
- return 0;
-}
-
-
-/*
-** More programmer friendly (as opposed to user friendly ;-) interface
-** to the memory statistics. Resembles the VxWorks memPartInfoGet but
-** does not take a partition id as parameter...
-*/
-int erl_mem_info_get(MEM_PART_STATS *stats){
- struct elib_stat statistics;
- if(!(alloc_flags & USING_ELIB_MALLOC))
- return -1;
- ELIB_LOCK;
- elib_stat(&statistics);
- ELIB_UNLOCK;
- stats->numBytesFree = statistics.mem_free*4;
- stats->numBlocksFree = statistics.free_blocks;
- stats->maxBlockSizeFree = statistics.max_free*4;
- stats->numBytesAlloc = statistics.mem_alloc*4;
- stats->numBlocksAlloc = statistics.mem_blocks-statistics.free_blocks;
- return 0;
-}
-
-/********************* Pipe driver **********************************/
-/*
- * Purpose: Pipe driver with Unix (unnamed) pipe semantics.
- * Author: Peter Hogfeldt ([email protected]) from an outline
- * by Per Hedeland ([email protected]).
- *
- * Note: This driver must *not* use the reclaim facilities, hence it
- * is placed here. (after the #undef's of open,malloc etc)
- *
- * This driver supports select() and non-blocking I/O via
- * ioctl(fd, FIONBIO, val).
- *
- * 1997-03-21 Peter Hogfeldt
- * Added non-blocking I/O.
- *
- */
-
-/*
- * SEMAPHORES
- *
- * Each end of a pipe has two semaphores: semExcl for serialising access to
- * the pipe end, and semBlock for blocking I/O.
- *
- * reader->semBlock is available (full) if and only if the pipe is
- * not empty, or the write end is closed. Otherwise
- * it is unavailable (empty). It is initially
- * unavailable.
- *
- * writer->semBlock is available (full) if and only if the pipe is
- * not full, or if the reader end is closed.
- * Otherwise it is unavailable. It is initially
- * available.
- */
-
-#define UXPIPE_SIZE 4096
-
-/* Forward declaration */
-typedef struct uxPipeDev UXPIPE_DEV;
-
-/*
- * Pipe descriptor (one for each open pipe).
- */
-typedef struct {
- int drvNum;
- UXPIPE_DEV *reader, *writer;
- RING_ID ringId;
-} UXPIPE;
-
-/*
- * Device descriptor (one for each of the read and write
- * ends of an open pipe).
- */
-struct uxPipeDev {
- UXPIPE *pipe;
- int blocking;
- SEL_WAKEUP_LIST wakeupList;
- SEM_ID semExcl;
- SEM_ID semBlock;
-};
-
-int uxPipeDrvNum = 0; /* driver number of pipe driver */
-
-#define PIPE_NAME "/uxpipe" /* only used internally */
-#define PIPE_READ "/r" /* ditto */
-#define PIPE_WRITE "/w" /* ditto */
-
-LOCAL char pipeRead[64], pipeWrite[64];
-LOCAL DEV_HDR devHdr;
-LOCAL UXPIPE *newPipe; /* communicate btwn open()s in pipe() */
-LOCAL SEM_ID pipeSem; /* mutual exclusion in pipe() */
-
-/* forward declarations */
-LOCAL int uxPipeOpen(DEV_HDR *pDv, char *name, int mode);
-LOCAL int uxPipeClose(UXPIPE_DEV *pDev);
-LOCAL int uxPipeRead(UXPIPE_DEV *pDev, char *buffer, int maxbytes);
-LOCAL int uxPipeWrite(UXPIPE_DEV *pDev, char *buffer, int nbytes);
-LOCAL STATUS uxPipeIoctl(FAST UXPIPE_DEV *pDev, FAST int function, int arg);
-
-
-/***************************************************************************
- *
- * uxPipeDrv - install Unix pipe driver
- *
- * This routine initializes the Unix pipe driver. It must be called
- * before any other routine in this driver.
- *
- * RETURNS:
- * OK, or ERROR if I/O system is unable to install driver.
- */
-
-STATUS
-uxPipeDrv(void)
-{
- if (uxPipeDrvNum > 0)
- return (OK); /* driver already installed */
- if ((uxPipeDrvNum = iosDrvInstall((FUNCPTR) NULL, (FUNCPTR) NULL,
- uxPipeOpen, uxPipeClose, uxPipeRead,
- uxPipeWrite, uxPipeIoctl)) == ERROR)
- return (ERROR);
- if (iosDevAdd(&devHdr, PIPE_NAME, uxPipeDrvNum) == ERROR)
- return (ERROR);
- strcpy(pipeRead, PIPE_NAME);
- strcat(pipeRead, PIPE_READ);
- strcpy(pipeWrite, PIPE_NAME);
- strcat(pipeWrite, PIPE_WRITE);
- if ((pipeSem = semMCreate(SEM_Q_PRIORITY | SEM_DELETE_SAFE)) == NULL)
- return (ERROR);
- return (OK);
-}
-
-/***************************************************************************
- *
- * uxPipeOpen - open a pipe
- *
- * RETURNS: Pointer to device descriptor, or ERROR if memory cannot be
- * allocated (errno = ENOMEM), or invalid argument (errno = EINVAL).
- */
-
-/*
- * DEV_HDR *pDv; pointer to device header (dummy)
- * char *name; name of pipe to open ("/r" or "/w")
- * int mode; access mode (O_RDONLY or O_WRONLY)
- */
-LOCAL int
-uxPipeOpen(DEV_HDR *pDv, char *name, int mode)
-{
- UXPIPE_DEV *reader, *writer;
-
- if (mode == O_RDONLY && strcmp(name, PIPE_READ) == 0) {
- /* reader open */
- if ((newPipe = (UXPIPE *) malloc(sizeof(UXPIPE))) != NULL) {
- if ((newPipe->ringId = rngCreate(UXPIPE_SIZE)) != NULL) {
- if ((reader = (UXPIPE_DEV *) malloc(sizeof(UXPIPE_DEV))) != NULL) {
- if ((reader->semExcl = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) {
- if ((reader->semBlock = semBCreate(SEM_Q_FIFO, SEM_EMPTY)) != NULL) {
- reader->pipe = newPipe;
- reader->blocking = 1;
- selWakeupListInit(&reader->wakeupList);
- newPipe->reader = reader;
- newPipe->writer = NULL;
- newPipe->drvNum = uxPipeDrvNum;
- return ((int) reader);
- }
- semDelete(reader->semExcl);
- }
- free(reader);
- }
- rngDelete(newPipe->ringId);
- }
- free(newPipe);
- newPipe = NULL;
- errno = ENOMEM;
- }
- } else if (mode == O_WRONLY && strcmp(name, PIPE_WRITE) == 0) {
- /* writer open */
- if (newPipe != NULL &&
- (writer = (UXPIPE_DEV *) malloc(sizeof(UXPIPE_DEV))) != NULL) {
- if ((writer->semExcl = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) {
- if ((writer->semBlock = semBCreate(SEM_Q_FIFO, SEM_FULL)) != NULL) {
- writer->blocking = 1;
- writer->pipe = newPipe;
- selWakeupListInit(&writer->wakeupList);
- newPipe->writer = writer;
- newPipe = NULL;
- return ((int) writer);
- }
- semDelete(writer->semExcl);
- }
- free(writer);
- }
- if (newPipe != NULL)
- free(newPipe);
- newPipe = NULL;
- errno = ENOMEM;
- } else {
- errno = EINVAL;
- }
- return (ERROR);
-}
-
-/***************************************************************************
- *
- * uxPipeClose - close read or write end of a pipe.
- *
- * RETURNS:
- * OK, or ERROR if device descriptor does not refer to an open read or
- write end of a pipe (errno = EBADF).
- */
-
-LOCAL int
-uxPipeClose(UXPIPE_DEV *pDev)
-{
- UXPIPE *pajp = pDev->pipe;
-
- taskLock();
- if (pDev == pajp->reader) {
- /* Close this end */
- semDelete(pDev->semExcl);
- semDelete(pDev->semBlock);
- free(pDev);
- pajp->reader = NULL;
- /* Inform the other end */
- if (pajp->writer != NULL) {
- selWakeupAll(&pajp->writer->wakeupList, SELWRITE);
- semGive(pajp->writer->semBlock);
- }
- } else if (pDev == pajp->writer) {
- /* Close this end */
- semDelete(pDev->semExcl);
- semDelete(pDev->semBlock);
- free(pDev);
- pajp->writer = NULL;
- /* Inform the other end */
- if (pajp->reader != NULL) {
- selWakeupAll(&pajp->reader->wakeupList, SELREAD);
- semGive(pajp->reader->semBlock);
- }
- } else {
- errno = EBADF;
- taskUnlock();
- return (ERROR);
- }
- if (pajp->reader == NULL && pajp->writer == NULL) {
- rngDelete(pajp->ringId);
- pajp->drvNum = 0;
- free(pajp);
- }
- taskUnlock();
- return (OK);
-}
-/***************************************************************************
- *
- * uxPipeRead - read from a pipe.
- *
- * Reads at most maxbytes bytes from the pipe. Blocks if blocking mode is
- * set and the pipe is empty.
- *
- * RETURNS:
- * number of bytes read, 0 on EOF, or ERROR if device descriptor does
- * not refer to an open read end of a pipe (errno = EBADF), or if
- * non-blocking mode is set and the pipe is empty (errno = EWOULDBLOCK).
- */
-
-LOCAL int
-uxPipeRead(UXPIPE_DEV *pDev, char *buffer, int maxbytes)
-{
- UXPIPE *pajp = pDev->pipe;
- int nbytes = 0;
-
- if (pDev != pajp->reader) {
- errno = EBADF;
- return (ERROR);
- }
- if (maxbytes == 0)
- return (0);
- semTake(pDev->semExcl, WAIT_FOREVER);
- /*
- * Note that semBlock may be full, although there is nothing to read.
- * This happens e.g. after the following sequence of operations: a
- * reader task blocks, a writer task writes two times (the first
- * write unblocks the reader task, the second write makes semBlock
- * full).
- */
- while (nbytes == 0) {
- if (pDev->blocking)
- semTake(pDev->semBlock, WAIT_FOREVER);
- /*
- * Reading and updating of the write end must not be interleaved
- * with a write from another task - hence we lock this task.
- */
- taskLock();
- nbytes = rngBufGet(pajp->ringId, buffer, maxbytes);
- if (nbytes > 0) {
- /* Give own semaphore if bytes remain or if write end is closed */
- if ((!rngIsEmpty(pajp->ringId) || pajp->writer == NULL) &&
- pDev->blocking)
- semGive(pDev->semBlock);
- /* Inform write end */
- if (pajp->writer != NULL) {
- if (pajp->writer->blocking)
- semGive(pajp->writer->semBlock);
- selWakeupAll(&pajp->writer->wakeupList, SELWRITE);
- }
- } else if (pajp->writer == NULL) {
- nbytes = 0; /* EOF */
- /* Give semaphore when write end is closed */
- if (pDev->blocking)
- semGive(pDev->semBlock);
- taskUnlock();
- semGive(pDev->semExcl);
- return (nbytes);
- } else if (!pDev->blocking) {
- taskUnlock();
- semGive(pDev->semExcl);
- errno = EWOULDBLOCK;
- return (ERROR);
- }
- taskUnlock();
- }
- semGive(pDev->semExcl);
- return (nbytes);
-}
-
-/***************************************************************************
- *
- * uxPipeWrite - write to a pipe.
- *
- * Writes nbytes bytes to the pipe. Blocks if blocking mode is set, and if
- * the pipe is full.
- *
- * RETURNS:
- * number of bytes written, or ERROR if the device descriptor does not
- * refer to an open write end of a pipe (errno = EBADF); or if the read end
- * of the pipe is closed (errno = EPIPE); or if non-blocking mode is set
- * and the pipe is full (errno = EWOULDBLOCK).
- *
- */
-
-LOCAL int
-uxPipeWrite(UXPIPE_DEV *pDev, char *buffer, int nbytes)
-{
-
- UXPIPE *pajp = pDev->pipe;
- int sofar = 0, written;
-
- if (pDev != pajp->writer) {
- errno = EBADF;
- return (ERROR);
- }
- if (pajp->reader == NULL) {
- errno = EPIPE;
- return (ERROR);
- }
- if (nbytes == 0)
- return (0);
- semTake(pDev->semExcl, WAIT_FOREVER);
- while (sofar < nbytes) {
- if (pDev->blocking)
- semTake(pDev->semBlock, WAIT_FOREVER);
- if (pajp->reader == NULL) {
- errno = EPIPE;
- semGive(pDev->semBlock);
- semGive(pDev->semExcl);
- return (ERROR);
- }
- /* Writing and updating of the read end must not be interleaved
- * with a read from another task - hence we lock this task.
- */
- taskLock();
- written = rngBufPut(pajp->ringId, buffer + sofar, nbytes - sofar);
- sofar += written;
- /* Inform the read end if we really wrote something */
- if (written > 0 && pajp->reader != NULL) {
- selWakeupAll(&pajp->reader->wakeupList, SELREAD);
- if (pajp->reader->blocking)
- semGive(pajp->reader->semBlock);
- }
- taskUnlock();
- if (!pDev->blocking) {
- if (sofar == 0) {
- errno = EWOULDBLOCK;
- sofar = ERROR;
- }
- break;
- }
- }
- /* Give own semaphore if space remains */
- if (!rngIsFull(pajp->ringId) && pDev->blocking)
- semGive(pDev->semBlock);
- semGive(pDev->semExcl);
- return (sofar);
-}
-
-/***************************************************************************
- *
- * uxPipeIoctl - do device specific I/O control
- *
- * RETURNS:
- * OK or ERROR.
- */
-
-LOCAL STATUS
-uxPipeIoctl(FAST UXPIPE_DEV *pDev, FAST int function, int arg)
-
-{
- UXPIPE *pajp = pDev->pipe;
- int status = OK;
-
- switch (function) {
- case FIONBIO:
- pDev->blocking = (*(int *)arg) ? 0 : 1;
- break;
- case FIOSELECT:
- taskLock();
- selNodeAdd(&pDev->wakeupList, (SEL_WAKEUP_NODE *) arg);
- if (selWakeupType((SEL_WAKEUP_NODE *) arg) == SELREAD &&
- pDev == pajp->reader &&
- (!rngIsEmpty(pajp->ringId) || pajp->writer == NULL))
- selWakeup((SEL_WAKEUP_NODE *) arg);
- if (selWakeupType((SEL_WAKEUP_NODE *) arg) == SELWRITE &&
- pDev == pajp->writer &&
- (!rngIsFull(pajp->ringId) || pajp->reader == NULL))
- selWakeup((SEL_WAKEUP_NODE *) arg);
- taskUnlock();
- break;
- case FIOUNSELECT:
- selNodeDelete(&pDev->wakeupList, (SEL_WAKEUP_NODE *) arg);
- break;
- default:
- status = ERROR;
- break;
- }
- return (status);
-}
-
-/***************************************************************************
- *
- * pipe - create an intertask channel
- *
- * Creates a pipe. fd[0] (fd[1]) is the read (write) file descriptor.
- *
- * RETURNS:
- * OK or ERROR, if the pipe could not be created.
- */
-
-STATUS
-pipe(int fd[2])
-{
- semTake(pipeSem, WAIT_FOREVER);
- if ((fd[0] = open(pipeRead, O_RDONLY, 0)) != ERROR) {
- if ((fd[1] = open(pipeWrite, O_WRONLY, 0)) != ERROR) {
- semGive(pipeSem);
- return (OK);
- }
- (void) close(fd[0]);
- }
- errno &= 0xFFFF;
- if((errno & 0xFFFF) == EINTR) /* Why on earth EINTR??? */
- errno = ENFILE; /* It means we are out of file descriptors...*/
- semGive(pipeSem);
- return (ERROR);
-}
-
-/***************************************************************************
- *
- * uxPipeShow - display pipe information
- *
- * RETURNS:
- * N/A.
- */
-
-void
-uxPipeShow(int fd)
-{
- UXPIPE_DEV *pDev;
- UXPIPE *pajp;
- int drvValue;
-
- if ((drvValue = iosFdValue(fd)) == ERROR) {
- erts_fprintf(stderr, "Error: file descriptor invalid\n");
- return;
- }
- pDev = (UXPIPE_DEV *)drvValue;
- pajp = pDev->pipe;
- if (pajp->drvNum != uxPipeDrvNum) {
- erts_fprintf(stderr, "Error: Not a ux pipe device\n");
- return;
- }
- erts_fprintf(stderr, "Device : 0x%x\n", (int) pDev);
- erts_fprintf(stderr, "Buffer size : %d\n", UXPIPE_SIZE);
- erts_fprintf(stderr, "Bytes in buffer : %d\n\n", rngNBytes(pajp->ringId));
- erts_fprintf(stderr, "READ END\n\n");
- if (pajp->reader != NULL) {
- erts_fprintf(stderr, "Mode : ");
- erts_fprintf(stderr, "%s\n",
- (pajp->reader->blocking) ? "blocking" : "non-blocking");
- }
- erts_fprintf(stderr, "Status : ");
- if (pajp->reader != NULL) {
- erts_fprintf(stderr, "OPEN\n");
- erts_fprintf(stderr, "Wake-up list : %d\n\n",
- selWakeupListLen(&pajp->reader->wakeupList));
- erts_fprintf(stderr, "Exclusion Semaphore\n");
- semShow(pajp->reader->semExcl, 1);
- erts_fprintf(stderr, "Blocking Semaphore\n");
- semShow(pajp->reader->semBlock, 1);
- } else
- erts_fprintf(stderr, "CLOSED\n\n");
- erts_fprintf(stderr, "WRITE END\n\n");
- if (pajp->writer != NULL) {
- erts_fprintf(stderr, "Mode : ");
- erts_fprintf(stderr, "%s\n",
- (pajp->writer->blocking) ? "blocking" : "non-blocking");
- }
- erts_fprintf(stderr, "Status : ");
- if (pajp->writer != NULL) {
- erts_fprintf(stderr, "OPEN\n");
- erts_fprintf(stderr, "Wake-up list : %d\n\n",
- selWakeupListLen(&pajp->writer->wakeupList));
- erts_fprintf(stderr, "Exclusion Semaphore\n");
- semShow(pajp->writer->semExcl, 1);
- erts_fprintf(stderr, "Blocking Semaphore\n");
- semShow(pajp->writer->semBlock, 1);
- } else
- erts_fprintf(stderr, "CLOSED\n\n");
-}
-
-#ifdef DEBUG
-void
-erl_assert_error(char* expr, char* file, int line)
-{
- fflush(stdout);
- fprintf(stderr, "Assertion failed: %s in %s, line %d\n",
- expr, file, line);
- fflush(stderr);
- erl_crash_dump(file, line, "Assertion failed: %s\n", expr);
- abort();
-}
-void
-erl_debug(char* fmt, ...)
-{
- char sbuf[1024]; /* Temporary buffer. */
- va_list va;
-
- va_start(va, fmt);
- vsprintf(sbuf, fmt, va);
- va_end(va);
- fprintf(stderr, "%s\n", sbuf);
-}
-#endif
diff --git a/erts/emulator/sys/win32/erl_win32_sys_ddll.c b/erts/emulator/sys/win32/erl_win32_sys_ddll.c
index d00eed932b..54991a610c 100644
--- a/erts/emulator/sys/win32/erl_win32_sys_ddll.c
+++ b/erts/emulator/sys/win32/erl_win32_sys_ddll.c
@@ -58,7 +58,7 @@ void erl_sys_ddll_init(void) {
/*
* Open a shared object
*/
-int erts_sys_ddll_open2(char *full_name, void **handle, ErtsSysDdllError* err)
+int erts_sys_ddll_open2(const char *full_name, void **handle, ErtsSysDdllError* err)
{
int len;
char dlname[MAXPATHLEN + 1];
@@ -92,7 +92,7 @@ int erts_sys_ddll_open_noext(char *dlname, void **handle, ErtsSysDdllError* err)
/*
* Find a symbol in the shared object
*/
-int erts_sys_ddll_sym2(void *handle, char *func_name, void **function,
+int erts_sys_ddll_sym2(void *handle, const char *func_name, void **function,
ErtsSysDdllError* err)
{
FARPROC proc;
diff --git a/erts/emulator/sys/win32/erl_win_dyn_driver.h b/erts/emulator/sys/win32/erl_win_dyn_driver.h
index ec5141838a..8b6be2b2f1 100644
--- a/erts/emulator/sys/win32/erl_win_dyn_driver.h
+++ b/erts/emulator/sys/win32/erl_win_dyn_driver.h
@@ -74,7 +74,9 @@ WDD_TYPEDEF(ErlDrvTermData, driver_mk_port,(ErlDrvPort));
WDD_TYPEDEF(ErlDrvTermData, driver_connected,(ErlDrvPort));
WDD_TYPEDEF(ErlDrvTermData, driver_caller,(ErlDrvPort));
WDD_TYPEDEF(ErlDrvTermData, driver_mk_term_nil,(void));
+WDD_TYPEDEF(int, erl_drv_output_term, (ErlDrvTermData, ErlDrvTermData*, int));
WDD_TYPEDEF(int, driver_output_term, (ErlDrvPort, ErlDrvTermData*, int));
+WDD_TYPEDEF(int, erl_drv_send_term, (ErlDrvTermData, ErlDrvTermData, ErlDrvTermData*, int));
WDD_TYPEDEF(int, driver_send_term, (ErlDrvPort, ErlDrvTermData, ErlDrvTermData*, int));
WDD_TYPEDEF(long, driver_async, (ErlDrvPort,unsigned int*,void (*)(void*),void*,void (*)(void*)));
WDD_TYPEDEF(int, driver_async_cancel, (unsigned int));
@@ -187,7 +189,9 @@ typedef struct {
WDD_FTYPE(driver_connected) *driver_connected;
WDD_FTYPE(driver_caller) *driver_caller;
WDD_FTYPE(driver_mk_term_nil) *driver_mk_term_nil;
+ WDD_FTYPE(erl_drv_output_term) *erl_drv_output_term;
WDD_FTYPE(driver_output_term) *driver_output_term;
+ WDD_FTYPE(erl_drv_send_term) *erl_drv_send_term;
WDD_FTYPE(driver_send_term) *driver_send_term;
WDD_FTYPE(driver_async) *driver_async;
WDD_FTYPE(driver_async_cancel) *driver_async_cancel;
@@ -294,7 +298,9 @@ extern TWinDynDriverCallbacks WinDynDriverCallbacks;
#define driver_connected (WinDynDriverCallbacks.driver_connected)
#define driver_caller (WinDynDriverCallbacks.driver_caller)
#define driver_mk_term_nil (WinDynDriverCallbacks.driver_mk_term_nil)
+#define erl_drv_output_term (WinDynDriverCallbacks.erl_drv_output_term)
#define driver_output_term (WinDynDriverCallbacks.driver_output_term)
+#define erl_drv_send_term (WinDynDriverCallbacks.erl_drv_send_term)
#define driver_send_term (WinDynDriverCallbacks.driver_send_term)
#define driver_async (WinDynDriverCallbacks.driver_async)
#define driver_async_cancel (WinDynDriverCallbacks.driver_async_cancel)
@@ -425,7 +431,9 @@ do { \
((W).driver_connected) = driver_connected; \
((W).driver_caller) = driver_caller; \
((W).driver_mk_term_nil) = driver_mk_term_nil; \
+((W).erl_drv_output_term) = erl_drv_output_term; \
((W).driver_output_term) = driver_output_term; \
+((W).erl_drv_send_term) = erl_drv_send_term; \
((W).driver_send_term) = driver_send_term; \
((W).driver_async) = driver_async; \
((W).driver_async_cancel) = driver_async_cancel; \
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index b6f11209ba..5ce1a61303 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -90,6 +90,11 @@
#define strncasecmp _strnicmp
+#ifndef __GNUC__
+# undef ERTS_I64_LITERAL
+# define ERTS_I64_LITERAL(X) X##i64
+#endif
+
/*
* Practial Windows specific macros.
*/
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index 50f6219374..1cd9072cea 100755
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -87,9 +87,6 @@ static erts_smp_tsd_key_t win32_errstr_key;
static erts_smp_atomic_t pipe_creation_counter;
-static erts_smp_mtx_t sys_driver_data_lock;
-
-
/* Results from application_type(_w) is one of */
#define APPL_NONE 0
#define APPL_DOS 1
@@ -97,7 +94,6 @@ static erts_smp_mtx_t sys_driver_data_lock;
#define APPL_WIN32 3
static int driver_write(long, HANDLE, byte*, int);
-static void common_stop(int);
static int create_file_thread(struct async_io* aio, int mode);
#ifdef ERTS_SMP
static void close_active_handle(ErlDrvPort, HANDLE handle);
@@ -115,9 +111,6 @@ BOOL WINAPI ctrl_handler(DWORD dwCtrlType);
#define PORT_BUFSIZ 4096
-#define PORT_FREE (-1)
-#define PORT_EXITING (-2)
-
#define DRV_BUF_ALLOC(SZ) \
erts_alloc_fnf(ERTS_ALC_T_DRV_DATA_BUF, (SZ))
#define DRV_BUF_REALLOC(P, SZ) \
@@ -255,8 +248,7 @@ void erl_sys_args(int* argc, char** argv)
#endif
}
-void
-erts_sys_prepare_crash_dump(int secs)
+int erts_sys_prepare_crash_dump(int secs)
{
Port *heart_port;
Eterm heap[3];
@@ -270,11 +262,16 @@ erts_sys_prepare_crash_dump(int secs)
list = CONS(hp, make_small(8), list); hp += 2;
/* send to heart port, CMD = 8, i.e. prepare crash dump =o */
- erts_write_to_port(NIL, heart_port, list);
+ erts_port_output(NULL, ERTS_PORT_SIG_FLG_FORCE_IMM_CALL, heart_port,
+ heart_port->common.id, list, NULL);
+
+ return 1;
}
/* Windows - free file descriptors are hopefully available */
/* Alarm not used on windows */
+
+ return 0;
}
static void
@@ -471,7 +468,7 @@ typedef struct driver_data {
byte *inbuf; /* Buffer to use for overlapped read. */
int outBufSize; /* Size of output buffer. */
byte *outbuf; /* Buffer to use for overlapped write. */
- ErlDrvPort port_num; /* The port number. */
+ ErlDrvPort port_num; /* The port handle. */
int packet_bytes; /* 0: continous stream, 1, 2, or 4: the number
* of bytes in the packet header.
*/
@@ -481,8 +478,6 @@ typedef struct driver_data {
int report_exit; /* Do report exit status for the port */
} DriverData;
-static DriverData* driver_data; /* Pointer to array of driver data. */
-
/* Driver interfaces */
static ErlDrvData spawn_start(ErlDrvPort, char*, SysDriverOpts*);
static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
@@ -594,67 +589,53 @@ struct erl_drv_entry vanilla_driver_entry = {
*/
static DriverData*
-new_driver_data(int port_num, int packet_bytes, int wait_objs_required, int use_threads)
+new_driver_data(ErlDrvPort port_num, int packet_bytes, int wait_objs_required, int use_threads)
{
DriverData* dp;
-
- erts_smp_mtx_lock(&sys_driver_data_lock);
- DEBUGF(("new_driver_data(port_num %d, pb %d)\n",
- port_num, packet_bytes));
+ DEBUGF(("new_driver_data(%p, pb %d)\n", port_num, packet_bytes));
+ dp = driver_alloc(sizeof(DriverData));
+ if (!dp)
+ return NULL;
/*
* We used to test first at all that there is enough room in the
* array used by WaitForMultipleObjects(), but that is not necessary
* any more, since driver_select() can't fail.
*/
- /*
- * Search for a free slot.
- */
+ dp->bytesInBuffer = 0;
+ dp->totalNeeded = packet_bytes;
+ dp->inBufSize = PORT_BUFSIZ;
+ dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize);
+ if (dp->inbuf == NULL)
+ goto buf_alloc_error;
+ erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize);
+ dp->outBufSize = 0;
+ dp->outbuf = NULL;
+ dp->port_num = port_num;
+ dp->packet_bytes = packet_bytes;
+ dp->port_pid = INVALID_HANDLE_VALUE;
+ if (init_async_io(&dp->in, use_threads) == -1)
+ goto async_io_error1;
+ if (init_async_io(&dp->out, use_threads) == -1)
+ goto async_io_error2;
- for (dp = driver_data; dp < driver_data+max_files; dp++) {
- if (dp->port_num == PORT_FREE) {
- dp->bytesInBuffer = 0;
- dp->totalNeeded = packet_bytes;
- dp->inBufSize = PORT_BUFSIZ;
- dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize);
- if (dp->inbuf == NULL) {
- erts_smp_mtx_unlock(&sys_driver_data_lock);
- return NULL;
- }
- erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize);
- dp->outBufSize = 0;
- dp->outbuf = NULL;
- dp->port_num = port_num;
- dp->packet_bytes = packet_bytes;
- dp->port_pid = INVALID_HANDLE_VALUE;
- if (init_async_io(&dp->in, use_threads) == -1)
- break;
- if (init_async_io(&dp->out, use_threads) == -1)
- break;
- erts_smp_mtx_unlock(&sys_driver_data_lock);
- return dp;
- }
- }
+ return dp;
- /*
- * Error or no free driver data.
- */
+async_io_error2:
+ release_async_io(&dp->in, dp->port_num);
+async_io_error1:
+ release_async_io(&dp->out, dp->port_num);
- if (dp < driver_data+max_files) {
- release_async_io(&dp->in, dp->port_num);
- release_async_io(&dp->out, dp->port_num);
- }
- erts_smp_mtx_unlock(&sys_driver_data_lock);
+buf_alloc_error:
+ driver_free(dp);
return NULL;
}
static void
release_driver_data(DriverData* dp)
{
- erts_smp_mtx_lock(&sys_driver_data_lock);
-
#ifdef ERTS_SMP
#ifdef USE_CANCELIOEX
if (fpCancelIoEx != NULL) {
@@ -738,8 +719,7 @@ release_driver_data(DriverData* dp)
* the exit thread.
*/
- dp->port_num = PORT_FREE;
- erts_smp_mtx_unlock(&sys_driver_data_lock);
+ driver_free(dp);
}
#ifdef ERTS_SMP
@@ -834,7 +814,6 @@ threaded_handle_closer(LPVOID param)
static ErlDrvData
set_driver_data(DriverData* dp, HANDLE ifd, HANDLE ofd, int read_write, int report_exit)
{
- int index = dp - driver_data;
int result;
dp->in.fd = ifd;
@@ -853,13 +832,12 @@ set_driver_data(DriverData* dp, HANDLE ifd, HANDLE ofd, int read_write, int repo
ERL_DRV_WRITE|ERL_DRV_USE, 1);
ASSERT(result != -1);
}
- return (ErlDrvData)index;
+ return (ErlDrvData) dp;
}
static ErlDrvData
reuse_driver_data(DriverData *dp, HANDLE ifd, HANDLE ofd, int read_write, ErlDrvPort port_num)
{
- int index = dp - driver_data;
int result;
dp->port_num = port_num;
@@ -878,7 +856,7 @@ reuse_driver_data(DriverData *dp, HANDLE ifd, HANDLE ofd, int read_write, ErlDrv
ERL_DRV_WRITE|ERL_DRV_USE, 1);
ASSERT(result != -1);
}
- return (ErlDrvData)index;
+ return (ErlDrvData) dp;
}
/*
@@ -1151,12 +1129,6 @@ spawn_init(void)
((module != NULL) ? GetProcAddress(module,"CancelIoEx") : NULL);
DEBUGF(("fpCancelIoEx = %p\r\n", fpCancelIoEx));
#endif
- driver_data = (struct driver_data *)
- erts_alloc(ERTS_ALC_T_DRV_TAB, max_files * sizeof(struct driver_data));
- erts_smp_atomic_add_nob(&sys_misc_mem_sz,
- max_files*sizeof(struct driver_data));
- for (i = 0; i < max_files; i++)
- driver_data[i].port_num = PORT_FREE;
return 0;
}
@@ -1287,9 +1259,12 @@ spawn_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
#endif
retval = set_driver_data(dp, hFromChild, hToChild, opts->read_write,
opts->exit_status);
- if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO)
- /* We assume that this cannot generate a negative number */
- erts_port[port_num].os_pid = (SWord) pid;
+ if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO) {
+ Port *prt = erts_drvport2port_raw(port_num);
+ /* We assume that this cannot generate a negative number */
+ ASSERT(prt);
+ prt->os_pid = (SWord) pid;
+ }
}
if (retval != ERL_DRV_ERROR_GENERAL && retval != ERL_DRV_ERROR_ERRNO)
@@ -2278,12 +2253,10 @@ fd_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
**/
if (!create_file_thread(&dp->in, DO_READ)) {
- dp->port_num = PORT_FREE;
return ERL_DRV_ERROR_GENERAL;
}
if (!create_file_thread(&dp->out, DO_WRITE)) {
- dp->port_num = PORT_FREE;
return ERL_DRV_ERROR_GENERAL;
}
@@ -2303,10 +2276,9 @@ fd_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
}
}
-static void fd_stop(ErlDrvData d)
+static void fd_stop(ErlDrvData data)
{
- int fd = (int)d;
- DriverData* dp = driver_data+fd;
+ DriverData * dp = (DriverData *) data;
/*
* There's no way we can terminate an fd port in a consistent way.
* Instead we let it live until it's opened again (which it is,
@@ -2369,16 +2341,10 @@ vanilla_start(ErlDrvPort port_num, char* name, SysDriverOpts* opts)
}
static void
-stop(ErlDrvData index)
+stop(ErlDrvData data)
{
- common_stop((int)index);
-}
-
-static void common_stop(int index)
-{
- DriverData* dp = driver_data+index;
-
- DEBUGF(("common_stop(%d)\n", index));
+ DriverData *dp = (DriverData *) data;
+ DEBUGF(("stop(%p)\n", dp));
if (dp->in.ov.hEvent != NULL) {
(void) driver_select(dp->port_num,
@@ -2400,7 +2366,6 @@ static void common_stop(int index)
*/
HANDLE thread;
DWORD tid;
- dp->port_num = PORT_EXITING;
thread = (HANDLE *) _beginthreadex(NULL, 0, threaded_exiter, dp, 0, &tid);
CloseHandle(thread);
}
@@ -2525,22 +2490,17 @@ threaded_exiter(LPVOID param)
static void
output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
-/* long drv_data; /* The slot to use in the driver data table.
+/* ErlDrvData drv_data; /* The slot to use in the driver data table.
* For Windows NT, this is *NOT* a file handle.
* The handle is found in the driver data.
*/
/* char *buf; /* Pointer to data to write to the port program. */
/* ErlDrvSizeT len; /* Number of bytes to write. */
{
- DriverData* dp;
+ DriverData* dp = (DriverData *) drv_data;
int pb; /* The header size for this port. */
- int port_num; /* The actual port number (for diagnostics). */
char* current;
- dp = driver_data + (int)drv_data;
- if ((port_num = dp->port_num) == -1)
- return ; /*-1;*/
-
pb = dp->packet_bytes;
if ((pb+len) == 0)
@@ -2551,7 +2511,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
*/
if ((pb == 2 && len > 65535) || (pb == 1 && len > 255)) {
- driver_failure_posix(port_num, EINVAL);
+ driver_failure_posix(dp->port_num, EINVAL);
return ; /* -1; */
}
@@ -2565,7 +2525,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
ASSERT(!dp->outbuf);
dp->outbuf = DRV_BUF_ALLOC(pb+len);
if (!dp->outbuf) {
- driver_failure_posix(port_num, ENOMEM);
+ driver_failure_posix(dp->port_num, ENOMEM);
return ; /* -1; */
}
@@ -2595,7 +2555,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len)
memcpy(current, buf, len);
if (!async_write_file(&dp->out, dp->outbuf, pb+len)) {
- set_busy_port(port_num, 1);
+ set_busy_port(dp->port_num, 1);
} else {
dp->out.ov.Offset += pb+len; /* For vanilla driver. */
/* XXX OffsetHigh should be changed too. */
@@ -2630,10 +2590,9 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event)
{
int error = 0; /* The error code (assume initially no errors). */
DWORD bytesRead; /* Number of bytes read. */
- DriverData* dp;
+ DriverData* dp = (DriverData *) drv_data;
int pb;
- dp = driver_data+(int)drv_data;
pb = dp->packet_bytes;
#ifdef ERTS_SMP
if(dp->in.thread == (HANDLE) -1) {
@@ -2801,7 +2760,7 @@ static void
ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
{
DWORD bytesWritten;
- DriverData* dp = driver_data + (int)drv_data;
+ DriverData *dp = (DriverData *) drv_data;
int error;
#ifdef ERTS_SMP
@@ -2809,7 +2768,7 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
dp->out.async_io_active = 0;
}
#endif
- DEBUGF(("ready_output(%d, 0x%x)\n", drv_data, ready_event));
+ DEBUGF(("ready_output(%p, 0x%x)\n", drv_data, ready_event));
set_busy_port(dp->port_num, 0);
if (!(dp->outbuf)) {
/* Happens because event sometimes get signalled during a successful
@@ -2850,10 +2809,10 @@ static void stop_select(ErlDrvEvent e, void* _)
** no interpretation of this should be done by the rest of the
** emulator. The buffer should be at least 21 bytes long.
*/
-void sys_get_pid(char *buffer){
+void sys_get_pid(char *buffer, size_t buffer_size){
DWORD p = GetCurrentProcessId();
/* The pid is scalar and is an unsigned long. */
- sprintf(buffer,"%lu",(unsigned long) p);
+ erts_snprintf(buffer, buffer_size, "%lu",(unsigned long) p);
}
void
@@ -2864,7 +2823,7 @@ sys_init_io(void)
can change our view of the number of open files possible.
We estimate the number to twice the amount of ports.
We really dont know on windows, do we? */
- max_files = 2*erts_max_ports;
+ max_files = 2*erts_ptab_max(&erts_port);
}
#ifdef ERTS_SMP
@@ -3193,7 +3152,8 @@ erl_assert_error(char* expr, char* file, int line)
{
char message[1024];
- sprintf(message, "File %hs, line %d: %hs", file, line, expr);
+ erts_snprintf(message, sizeof(message),
+ "File %hs, line %d: %hs", file, line, expr);
MessageBox(GetActiveWindow(), message, "Assertion failed",
MB_OK | MB_ICONERROR);
#if 0
@@ -3318,9 +3278,6 @@ void erl_sys_init(void)
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
-
- erts_smp_mtx_init(&sys_driver_data_lock, "sys_driver_data_lock");
-
#ifdef ERTS_SMP
erts_smp_tsd_key_create(&win32_errstr_key);
InitializeCriticalSection(&htbc_lock);
diff --git a/erts/emulator/sys/win32/sys_float.c b/erts/emulator/sys/win32/sys_float.c
index 6558ad2d99..09dad89140 100644
--- a/erts/emulator/sys/win32/sys_float.c
+++ b/erts/emulator/sys/win32/sys_float.c
@@ -118,18 +118,18 @@ sys_chars_to_double(char *buf, double *fp)
*/
int
-sys_double_to_chars(double fp, char *buf)
+sys_double_to_chars(double fp, char *buffer, size_t buffer_size)
{
- char *s = buf;
+ char *s = buffer;
- (void) sprintf(buf, "%.20e", fp);
+ (void) erts_snprintf(buffer, buffer_size, "%.20e", fp);
/* Search upto decimal point */
if (*s == '+' || *s == '-') s++;
while (isdigit(*s)) s++;
if (*s == ',') *s++ = '.'; /* Replace ',' with '.' */
/* Scan to end of string */
while (*s) s++;
- return s-buf; /* i.e strlen(buf) */
+ return s-buffer; /* i.e strlen(buffer) */
}
int
diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c
index b5123dc45d..2f2dfc8197 100644
--- a/erts/emulator/sys/win32/sys_time.c
+++ b/erts/emulator/sys/win32/sys_time.c
@@ -26,11 +26,7 @@
#include "sys.h"
#include "assert.h"
-#ifdef __GNUC__
-#define LL_LITERAL(X) X##LL
-#else
-#define LL_LITERAL(X) X##i64
-#endif
+#define LL_LITERAL(X) ERTS_I64_LITERAL(X)
/******************* Routines for time measurement *********************/
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index a7c8bd2cd2..9594ab48b1 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -47,6 +47,7 @@ MODULES= \
busy_port_SUITE \
call_trace_SUITE \
code_SUITE \
+ code_parallel_load_SUITE \
crypto_SUITE \
ddll_SUITE \
decode_packet_SUITE \
@@ -137,10 +138,10 @@ TARGET_FILES = $(MODULES:%=$(EBIN)/%.$(EMULATOR))
EMAKEFILE=Emakefile
-TEST_SPEC_FILES = emulator.spec \
- emulator.spec.win \
- emulator.spec.vxworks \
- emulator.spec.ose
+TEST_SPEC_FILES= emulator.spec \
+ emulator.spec.win \
+ emulator_bench.spec
+
# ----------------------------------------------------
# Release directory specification
# ----------------------------------------------------
diff --git a/erts/emulator/test/alloc_SUITE_data/allocator_test.h b/erts/emulator/test/alloc_SUITE_data/allocator_test.h
index cd4a91d34a..c0396ddb61 100644
--- a/erts/emulator/test/alloc_SUITE_data/allocator_test.h
+++ b/erts/emulator/test/alloc_SUITE_data/allocator_test.h
@@ -60,9 +60,9 @@ typedef void* erts_cond;
#define IS_MMAP_C(C) ((Ulong) ALC_TEST1(0x00a, (C)))
#define C_SZ(C) ((Ulong) ALC_TEST1(0x00b, (C)))
#define SBC2BLK(A, C) ((Block_t *) ALC_TEST2(0x00c, (A), (C)))
-#define BLK2SBC(A, B) ((Carrier_t *) ALC_TEST2(0x00d, (A), (B)))
-#define MBC2FBLK(A, C) ((Block_t *) ALC_TEST2(0x00e, (A), (C)))
-#define FBLK2MBC(A, B) ((Carrier_t *) ALC_TEST2(0x00f, (A), (B)))
+#define BLK_TO_SBC(A, B) ((Carrier_t *) ALC_TEST2(0x00d, (A), (B)))
+#define MBC_TO_FIRST_BLK(A, C) ((Block_t *) ALC_TEST2(0x00e, (A), (C)))
+#define FIRST_BLK_TO_MBC(A, B) ((Carrier_t *) ALC_TEST2(0x00f, (A), (B)))
#define FIRST_MBC(A) ((Carrier_t *) ALC_TEST1(0x010, (A)))
#define LAST_MBC(A) ((Carrier_t *) ALC_TEST1(0x011, (A)))
#define FIRST_SBC(A) ((Carrier_t *) ALC_TEST1(0x012, (A)))
@@ -73,7 +73,7 @@ typedef void* erts_cond;
#define MIN_BLK_SZ(A) ((Ulong) ALC_TEST1(0x017, (A)))
#define NXT_BLK(B) ((Block_t *) ALC_TEST1(0x018, (B)))
#define PREV_BLK(B) ((Block_t *) ALC_TEST1(0x019, (B)))
-#define IS_FIRST_BLK(B) ((Ulong) ALC_TEST1(0x01a, (B)))
+#define IS_MBC_FIRST_BLK(A,B) ((Ulong) ALC_TEST2(0x01a, (A), (B)))
#define UNIT_SZ ((Ulong) ALC_TEST0(0x01b))
/* From erl_goodfit_alloc.c */
diff --git a/erts/emulator/test/alloc_SUITE_data/basic.c b/erts/emulator/test/alloc_SUITE_data/basic.c
index 4a5e888161..0c27665712 100644
--- a/erts/emulator/test/alloc_SUITE_data/basic.c
+++ b/erts/emulator/test/alloc_SUITE_data/basic.c
@@ -44,7 +44,7 @@ testcase_run(TestCaseState_t *tcs)
c = FIRST_MBC(a);
ASSERT(tcs, !NEXT_C(c));
- blk = MBC2FBLK(a, c);
+ blk = MBC_TO_FIRST_BLK(a, c);
ASSERT(tcs, IS_LAST_BLK(blk));
ASSERT(tcs, IS_FREE_BLK(blk));
diff --git a/erts/emulator/test/alloc_SUITE_data/bucket_mask.c b/erts/emulator/test/alloc_SUITE_data/bucket_mask.c
index b214f87e4a..34979cacf1 100644
--- a/erts/emulator/test/alloc_SUITE_data/bucket_mask.c
+++ b/erts/emulator/test/alloc_SUITE_data/bucket_mask.c
@@ -22,7 +22,7 @@
#include "allocator_test.h"
#include <stdio.h>
-#ifdef __WIN32__ && SIZEOF_VOID_P == 8
+#if defined(__WIN32__) && SIZEOF_VOID_P == 8
/* Use larger threashold for win64 as block alignment
is 16 bytes and not 8 */
#define SBCT ((1024*1024))
@@ -48,10 +48,16 @@ testcase_cleanup(TestCaseState_t *tcs)
void
testcase_run(TestCaseState_t *tcs)
{
- void *tmp;
- void **fence;
+ typedef struct linked_block {
+ struct linked_block* next;
+ }Linked;
+ Linked* link;
+ Linked* fence_list;
+ Linked* pad_list;
+ void* tmp;
void **blk;
Ulong sz;
+ Ulong residue;
Ulong smbcs;
int i;
int bi;
@@ -73,7 +79,7 @@ testcase_run(TestCaseState_t *tcs)
ASSERT(tcs, a);
min_blk_sz = MIN_BLK_SZ(a);
- smbcs = 2*(no_bkts*sizeof(void *) + min_blk_sz) + min_blk_sz;
+ smbcs = (no_bkts*sizeof(void *) + min_blk_sz) + min_blk_sz;
for (i = 0; i < no_bkts; i++) {
sz = BKT_MIN_SZ(a, i);
if (sz >= sbct)
@@ -98,26 +104,42 @@ testcase_run(TestCaseState_t *tcs)
tcs->extra = (void *) a;
ASSERT(tcs, a);
+
blk = (void **) ALLOC(a, no_bkts*sizeof(void *));
- fence = (void **) ALLOC(a, no_bkts*sizeof(void *));
- ASSERT(tcs, blk && fence);
+ ASSERT(tcs, blk);
+ fence_list = NULL;
testcase_printf(tcs, "Allocating blocks and fences\n");
for (i = 0; i < bi_tests; i++) {
sz = BKT_MIN_SZ(a, i);
blk[i] = ALLOC(a, sz - ablk_hdr_sz);
- fence[i] = ALLOC(a, 1);
- ASSERT(tcs, blk[i] && fence[i]);
+ link = (Linked*) ALLOC(a, sizeof(Linked));
+ ASSERT(tcs, blk[i] && link);
+ link->next = fence_list;
+ fence_list = link;
}
- tmp = (void *) UMEM2BLK(fence[bi_tests - 1]);
- tmp = (void *) NXT_BLK((Block_t *) tmp);
- ASSERT(tcs, IS_LAST_BLK(tmp));
- sz = BLK_SZ((Block_t *) tmp);
- testcase_printf(tcs, "Allocating leftover size = %lu\n", sz);
- tmp = ALLOC(a, sz - ablk_hdr_sz);
- ASSERT(tcs, tmp);
+ pad_list = 0;
+ do {
+ tmp = (void *) UMEM2BLK(link); /* last allocated */
+ tmp = (void *) NXT_BLK((Block_t *) tmp);
+ ASSERT(tcs, IS_LAST_BLK(tmp));
+ sz = BLK_SZ((Block_t *) tmp);
+ if (sz >= sbct) {
+ residue = sz;
+ sz = sbct - min_blk_sz;
+ residue -= sz;
+ }
+ else {
+ residue = 0;
+ }
+ testcase_printf(tcs, "Allocating leftover size = %lu, residue = %lu\n", sz, residue);
+ link = (Linked*) ALLOC(a, sz - ablk_hdr_sz);
+ ASSERT(tcs, link);
+ link->next = pad_list;
+ pad_list = link;
+ } while (residue);
bi = FIND_BKT(a, 0);
ASSERT(tcs, bi < 0);
@@ -135,16 +157,23 @@ testcase_run(TestCaseState_t *tcs)
for (i = 0; i < bi_tests; i++) {
FREE(a, blk[i]);
- FREE(a, fence[i]);
+ }
+ while (fence_list) {
+ link = fence_list;
+ fence_list = link->next;
+ FREE(a, link);
}
FREE(a, (void *) blk);
- FREE(a, (void *) fence);
bi = FIND_BKT(a, 0);
ASSERT(tcs, bi == no_bkts - 1);
- FREE(a, tmp);
+ while (pad_list) {
+ link = pad_list;
+ pad_list = link->next;
+ FREE(a, link);
+ }
bi = FIND_BKT(a, 0);
ASSERT(tcs, bi < 0);
diff --git a/erts/emulator/test/alloc_SUITE_data/coalesce.c b/erts/emulator/test/alloc_SUITE_data/coalesce.c
index 6f35d3279b..981fa6d43e 100644
--- a/erts/emulator/test/alloc_SUITE_data/coalesce.c
+++ b/erts/emulator/test/alloc_SUITE_data/coalesce.c
@@ -54,7 +54,7 @@ setup_sequence(TestCaseState_t *tcs, Allctr_t *a, Ulong bsz, int no,
no, bsz);
c = FIRST_MBC(a);
ASSERT(tcs, !NEXT_C(c));
- blk = MBC2FBLK(a, c);
+ blk = MBC_TO_FIRST_BLK(a, c);
ASSERT(tcs, IS_LAST_BLK(blk));
for (i = 0; i < no; i++)
@@ -266,7 +266,7 @@ testcase_name(void)
void
testcase_run(TestCaseState_t *tcs)
{
- char *argv_org[] = {"-tmmbcs1024", "-tsbct2048", "-trmbcmt100", "-tas", NULL, NULL};
+ char *argv_org[] = {"-tsmbcs511","-tmmbcs511", "-tsbct512", "-trmbcmt100", "-tas", NULL, NULL};
char *alg[] = {"af", "gf", "bf", "aobf", "aoff", NULL};
int i;
@@ -276,7 +276,7 @@ testcase_run(TestCaseState_t *tcs)
char *argv[sizeof(argv_org)/sizeof(argv_org[0])];
memcpy((void *) argv, (void *) argv_org, sizeof(argv_org));
- argv[4] = alg[i];
+ argv[5] = alg[i];
testcase_printf(tcs, " *** Starting \"%s\" allocator *** \n", alg[i]);
a = START_ALC("coalesce_", 0, argv);
ASSERT(tcs, a);
diff --git a/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c b/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c
index 0277616bd0..7d5608f890 100644
--- a/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c
+++ b/erts/emulator/test/alloc_SUITE_data/mseg_clear_cache.c
@@ -52,10 +52,10 @@ testcase_run(TestCaseState_t *tcs)
tcs->extra = &seg[0];
for (i = 0; i < MAX_SEGS; i++) {
- seg[i].size = 1000;
+ seg[i].size = 1 << 18;
seg[i].ptr = MSEG_ALLOC(&seg[i].size);
ASSERT(tcs, seg[i].ptr);
- ASSERT(tcs, seg[i].size >= 1000);
+ ASSERT(tcs, seg[i].size >= (1 << 18));
}
n = MSEG_NO();
diff --git a/erts/emulator/test/beam_SUITE.erl b/erts/emulator/test/beam_SUITE.erl
index 02c6e19686..3197a4c137 100644
--- a/erts/emulator/test/beam_SUITE.erl
+++ b/erts/emulator/test/beam_SUITE.erl
@@ -54,7 +54,7 @@ end_per_group(_GroupName, Config) ->
%% Verify that apply(M, F, A) is really tail recursive.
apply_last(Config) when is_list(Config) ->
- Pid=spawn(?MODULE, applied, [self(), 10000]),
+ Pid = spawn(?MODULE, applied, [self(), 10000]),
Size =
receive
{Pid, finished} ->
@@ -94,32 +94,32 @@ apply_last_bif(Config) when is_list(Config) ->
%% Test three high register numbers in a put_list instruction
%% (to test whether packing works properly).
packed_registers(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Mod = packed_regs,
- ?line Name = filename:join(PrivDir, atom_to_list(Mod) ++ ".erl"),
+ PrivDir = ?config(priv_dir, Config),
+ Mod = packed_regs,
+ Name = filename:join(PrivDir, atom_to_list(Mod) ++ ".erl"),
%% Generate a module which generates a list of tuples.
%% put_list(A) -> [{A, 600}, {A, 999}, ... {A, 0}].
- ?line Code = gen_packed_regs(600, ["-module("++atom_to_list(Mod)++").\n",
+ Code = gen_packed_regs(600, ["-module("++atom_to_list(Mod)++").\n",
"-export([put_list/1]).\n",
"put_list(A) ->\n["]),
- ?line ok = file:write_file(Name, Code),
+ ok = file:write_file(Name, Code),
%% Compile the module.
- ?line io:format("Compiling: ~s\n", [Name]),
- ?line CompRc = compile:file(Name, [{outdir, PrivDir}, report]),
- ?line io:format("Result: ~p\n",[CompRc]),
- ?line {ok, Mod} = CompRc,
+ io:format("Compiling: ~s\n", [Name]),
+ CompRc = compile:file(Name, [{outdir, PrivDir}, report]),
+ io:format("Result: ~p\n",[CompRc]),
+ {ok, Mod} = CompRc,
%% Load it.
- ?line io:format("Loading...\n",[]),
- ?line LoadRc = code:load_abs(filename:join(PrivDir, atom_to_list(Mod))),
- ?line {module,_Module} = LoadRc,
+ io:format("Loading...\n",[]),
+ LoadRc = code:load_abs(filename:join(PrivDir, atom_to_list(Mod))),
+ {module,_Module} = LoadRc,
%% Call it and verify result.
- ?line Term = {a, b},
- ?line L = Mod:put_list(Term),
- ?line verify_packed_regs(L, Term, 600),
+ Term = {a, b},
+ L = Mod:put_list(Term),
+ verify_packed_regs(L, Term, 600),
ok.
gen_packed_regs(0, Acc) ->
@@ -131,11 +131,11 @@ verify_packed_regs([], _, -1) -> ok;
verify_packed_regs([{Term, N}| T], Term, N) ->
verify_packed_regs(T, Term, N-1);
verify_packed_regs(L, Term, N) ->
- ?line ok = io:format("Expected [{~p, ~p}|T]; got\n~p\n", [Term, N, L]),
- ?line test_server:fail().
+ ok = io:format("Expected [{~p, ~p}|T]; got\n~p\n", [Term, N, L]),
+ test_server:fail().
buildo_mucho(Config) when is_list(Config) ->
- ?line buildo_mucho_1(),
+ buildo_mucho_1(),
ok.
buildo_mucho_1() ->
@@ -206,20 +206,27 @@ buildo_mucho_1() ->
{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1},{<<>>,1}].
heap_sizes(Config) when is_list(Config) ->
- ?line Sizes = erlang:system_info(heap_sizes),
- ?line io:format("~p heap sizes\n", [length(Sizes)]),
- ?line io:format("~p\n", [Sizes]),
+ Sizes = erlang:system_info(heap_sizes),
+ io:format("~p heap sizes\n", [length(Sizes)]),
+ io:format("~p\n", [Sizes]),
%% Verify that heap sizes increase monotonically.
- ?line Largest = lists:foldl(fun(E, P) when is_integer(P), E > P -> E;
+ Largest = lists:foldl(fun(E, P) when is_integer(P), E > P -> E;
(E, []) -> E
end, [], Sizes),
- %% Verify that the largest heap size consists of 31 or 63 bits.
- ?line
- case Largest bsr (erlang:system_info(wordsize)*8-2) of
- R when R > 0 -> ok
- end,
+ %% Verify that the largest heap size consists of
+ %% - 31 bits of bytes on 32 bits arch
+ %% - atleast 52 bits of bytes (48 is the maximum virtual address)
+ %% and at the most 63 bits on 64 bit archs
+ %% heap sizes are in words
+ case erlang:system_info(wordsize) of
+ 8 ->
+ 0 = (Largest*8) bsr 63,
+ true = (Largest*8) > (1 bsl 52);
+ 4 ->
+ 1 = (Largest*4) bsr 31
+ end,
ok.
%% Thanks to Igor Goryachev.
@@ -302,10 +309,10 @@ b() ->
end.
fconv(Config) when is_list(Config) ->
- ?line do_fconv(atom),
- ?line do_fconv(nil),
- ?line do_fconv(tuple_literal),
- ?line 3.0 = do_fconv(1.0, 2.0),
+ do_fconv(atom),
+ do_fconv(nil),
+ do_fconv(tuple_literal),
+ 3.0 = do_fconv(1.0, 2.0),
ok.
do_fconv(Type) ->
@@ -325,9 +332,9 @@ do_fconv(tuple_literal, Float) when is_float(Float) ->
Float + {a,b}.
select_val(Config) when is_list(Config) ->
- ?line zero = do_select_val(0),
- ?line big = do_select_val(1 bsl 64),
- ?line integer = do_select_val(42),
+ zero = do_select_val(0),
+ big = do_select_val(1 bsl 64),
+ integer = do_select_val(42),
ok.
do_select_val(X) ->
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index 99ed8f1748..e2442861c7 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -25,7 +25,9 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
display/1, display_huge/0,
- types/1,
+ erl_bif_types/1,guard_bifs_in_erl_bif_types/1,
+ shadow_comments/1,
+ specs/1,improper_bif_stubs/1,auto_imports/1,
t_list_to_existing_atom/1,os_env/1,otp_7526/1,
binary_to_atom/1,binary_to_existing_atom/1,
atom_to_binary/1,min_max/1, erlang_halt/1]).
@@ -33,7 +35,9 @@
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
- [types, t_list_to_existing_atom, os_env, otp_7526,
+ [erl_bif_types, guard_bifs_in_erl_bif_types, shadow_comments,
+ specs, improper_bif_stubs, auto_imports,
+ t_list_to_existing_atom, os_env, otp_7526,
display,
atom_to_binary, binary_to_atom, binary_to_existing_atom,
min_max, erlang_halt].
@@ -86,33 +90,20 @@ deeep(N,Acc) ->
deeep(N) ->
deeep(N,[hello]).
+erl_bif_types(Config) when is_list(Config) ->
+ ensure_erl_bif_types_compiled(),
-types(Config) when is_list(Config) ->
- c:l(erl_bif_types),
- case erlang:function_exported(erl_bif_types, module_info, 0) of
- false ->
- %% Fail cleanly.
- ?line ?t:fail("erl_bif_types not compiled");
- true ->
- types_1()
- end.
-
-types_1() ->
- ?line List0 = erlang:system_info(snifs),
+ List0 = erlang:system_info(snifs),
%% Ignore missing type information for hipe BIFs.
- ?line List = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs],
+ List = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs],
- case [MFA || MFA <- List, not known_types(MFA)] of
- [] ->
- types_2(List);
- BadTypes ->
- io:put_chars("No type information:\n"),
- io:format("~p\n", [lists:sort(BadTypes)]),
- ?line ?t:fail({length(BadTypes),bifs_without_types})
- end.
+ KnownTypes = [MFA || MFA <- List, known_types(MFA)],
+ io:format("There are ~p BIFs with type information in erl_bif_types.",
+ [length(KnownTypes)]),
+ erl_bif_types_2(KnownTypes).
-types_2(List) ->
+erl_bif_types_2(List) ->
BadArity = [MFA || {M,F,A}=MFA <- List,
begin
Types = erl_bif_types:arg_types(M, F, A),
@@ -120,14 +111,14 @@ types_2(List) ->
end],
case BadArity of
[] ->
- types_3(List);
+ erl_bif_types_3(List);
[_|_] ->
io:put_chars("Bifs with bad arity\n"),
io:format("~p\n", [BadArity]),
?line ?t:fail({length(BadArity),bad_arity})
end.
-types_3(List) ->
+erl_bif_types_3(List) ->
BadSmokeTest = [MFA || {M,F,A}=MFA <- List,
begin
try erl_bif_types:type(M, F, A) of
@@ -151,9 +142,220 @@ types_3(List) ->
?line ?t:fail({length(BadSmokeTest),bad_smoke_test})
end.
+guard_bifs_in_erl_bif_types(_Config) ->
+ ensure_erl_bif_types_compiled(),
+
+ List0 = erlang:system_info(snifs),
+ List = [{F,A} || {erlang,F,A} <- List0,
+ erl_internal:guard_bif(F, A)],
+ Not = [FA || {F,A}=FA <- List,
+ not erl_bif_types:is_known(erlang, F, A)],
+ case Not of
+ [] ->
+ ok;
+ [_|_] ->
+ io:put_chars(
+ ["Dialyzer requires that all guard BIFs "
+ "have type information in erl_bif_types.\n\n"
+ "The following guard BIFs have no type information "
+ "in erl_bif_types:\n\n",
+ [io_lib:format(" ~p/~p\n", [F,A]) || {F,A} <- Not]]),
+ ?t:fail()
+ end.
+
+shadow_comments(_Config) ->
+ ensure_erl_bif_types_compiled(),
+
+ List0 = erlang:system_info(snifs),
+ List1 = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs],
+ List = [MFA || MFA <- List1, not is_operator(MFA)],
+ HasTypes = [MFA || {M,F,A}=MFA <- List,
+ erl_bif_types:is_known(M, F, A)],
+ Path = get_code_path(),
+ BifRel = sofs:relation(HasTypes, [{m,f,a}]),
+ BifModules = sofs:to_external(sofs:projection(1, BifRel)),
+ AbstrByModule = [extract_abstract(Mod, Path) || Mod <- BifModules],
+ Specs0 = [extract_specs(Mod, Abstr) ||
+ {Mod,Abstr} <- AbstrByModule],
+ Specs = lists:append(Specs0),
+ SpecFuns0 = [F || {F,_} <- Specs],
+ SpecFuns = sofs:relation(SpecFuns0, [{m,f,a}]),
+ HasTypesAndSpecs = sofs:intersection(BifRel, SpecFuns),
+ Commented0 = lists:append([extract_comments(Mod, Path) ||
+ Mod <- BifModules]),
+ Commented = sofs:relation(Commented0, [{m,f,a}]),
+ {NoComments0,_,NoBifSpecs0} =
+ sofs:symmetric_partition(HasTypesAndSpecs, Commented),
+ NoComments = sofs:to_external(NoComments0),
+ NoBifSpecs = sofs:to_external(NoBifSpecs0),
+
+ case NoComments of
+ [] ->
+ ok;
+ [_|_] ->
+ io:put_chars(
+ ["If a BIF stub has both a spec and has type information in "
+ "erl_bif_types, there *must*\n"
+ "be a comment in the source file to make that immediately "
+ "obvious.\n\nThe following comments are missing:\n\n",
+ [io_lib:format("%% Shadowed by erl_bif_types: ~p:~p/~p\n",
+ [M,F,A]) || {M,F,A} <- NoComments]]),
+ ?t:fail()
+ end,
+
+ case NoBifSpecs of
+ [] ->
+ ok;
+ [_|_] ->
+ io:put_chars(
+ ["The following functions have \"shadowed\" comments "
+ "claiming that there is type information in erl_bif_types,\n"
+ "but actually there is no such type information.\n\n"
+ "Therefore, the following comments should be removed:\n\n",
+ [io_lib:format("%% Shadowed by erl_bif_types: ~p:~p/~p\n",
+ [M,F,A]) || {M,F,A} <- NoBifSpecs]]),
+ ?t:fail()
+ end.
+
+extract_comments(Mod, Path) ->
+ Beam = which(Mod, Path),
+ SrcDir = filename:join(filename:dirname(filename:dirname(Beam)), "src"),
+ Src = filename:join(SrcDir, atom_to_list(Mod) ++ ".erl"),
+ {ok,Bin} = file:read_file(Src),
+ Lines0 = binary:split(Bin, <<"\n">>, [global]),
+ Lines1 = [T || <<"%% Shadowed by erl_bif_types: ",T/binary>> <- Lines0],
+ {ok,ReMFA} = re:compile("([^:]*):([^/]*)/(\\d*)"),
+ Lines = [L || L <- Lines1, re:run(L, ReMFA, [{capture,[]}]) =:= match],
+ [begin
+ {match,[M,F,A]} = re:run(L, ReMFA, [{capture,all_but_first,list}]),
+ {list_to_atom(M),list_to_atom(F),list_to_integer(A)}
+ end || L <- Lines].
+
+ensure_erl_bif_types_compiled() ->
+ c:l(erl_bif_types),
+ case erlang:function_exported(erl_bif_types, module_info, 0) of
+ false ->
+ %% Fail cleanly.
+ ?t:fail("erl_bif_types not compiled");
+ true ->
+ ok
+ end.
+
known_types({M,F,A}) ->
erl_bif_types:is_known(M, F, A).
+specs(_) ->
+ List0 = erlang:system_info(snifs),
+
+ %% Ignore missing type information for hipe BIFs.
+ List1 = [MFA || {M,_,_}=MFA <- List0, M =/= hipe_bifs],
+
+ %% Ignore all operators.
+ List = [MFA || MFA <- List1, not is_operator(MFA)],
+
+ %% Extract specs from the abstract code for all BIFs.
+ Path = get_code_path(),
+ BifRel = sofs:relation(List, [{m,f,a}]),
+ BifModules = sofs:to_external(sofs:projection(1, BifRel)),
+ AbstrByModule = [extract_abstract(Mod, Path) || Mod <- BifModules],
+ Specs0 = [extract_specs(Mod, Abstr) ||
+ {Mod,Abstr} <- AbstrByModule],
+ Specs = lists:append(Specs0),
+ BifSet = sofs:set(List, [function]),
+ SpecRel0 = sofs:relation(Specs, [{function,spec}]),
+ SpecRel = sofs:restriction(SpecRel0, BifSet),
+
+ %% Find BIFs without specs.
+ NoSpecs0 = sofs:difference(BifSet, sofs:domain(SpecRel)),
+ NoSpecs = sofs:to_external(NoSpecs0),
+ case NoSpecs of
+ [] ->
+ ok;
+ [_|_] ->
+ io:put_chars("The following BIFs don't have specs:\n"),
+ [print_mfa(MFA) || MFA <- NoSpecs],
+ ?t:fail()
+ end.
+
+is_operator({erlang,F,A}) ->
+ erl_internal:arith_op(F, A) orelse
+ erl_internal:bool_op(F, A) orelse
+ erl_internal:comp_op(F, A) orelse
+ erl_internal:list_op(F, A) orelse
+ erl_internal:send_op(F, A);
+is_operator(_) -> false.
+
+extract_specs(M, Abstr) ->
+ [{make_mfa(M, Name),Spec} || {attribute,_,spec,{Name,Spec}} <- Abstr].
+
+make_mfa(M, {F,A}) -> {M,F,A};
+make_mfa(M, {M,_,_}=MFA) -> MFA.
+
+improper_bif_stubs(_) ->
+ Bifs0 = erlang:system_info(snifs),
+ Bifs = [MFA || {M,_,_}=MFA <- Bifs0, M =/= hipe_bifs],
+ Path = get_code_path(),
+ BifRel = sofs:relation(Bifs, [{m,f,a}]),
+ BifModules = sofs:to_external(sofs:projection(1, BifRel)),
+ AbstrByModule = [extract_abstract(Mod, Path) || Mod <- BifModules],
+ Funcs0 = [extract_functions(Mod, Abstr) ||
+ {Mod,Abstr} <- AbstrByModule],
+ Funcs = lists:append(Funcs0),
+ BifSet = sofs:set(Bifs, [function]),
+ FuncRel0 = sofs:relation(Funcs, [{function,code}]),
+ FuncRel = sofs:restriction(FuncRel0, BifSet),
+ [check_stub(MFA, Body) || {MFA,Body} <- sofs:to_external(FuncRel)],
+ ok.
+
+auto_imports(_Config) ->
+ Path = get_code_path(),
+ {erlang,Abstr} = extract_abstract(erlang, Path),
+ SpecFuns = [Name || {attribute,_,spec,{Name,_}} <- Abstr],
+ auto_imports(SpecFuns, 0).
+
+auto_imports([{F,A}|T], Errors) ->
+ case erl_internal:bif(F, A) of
+ false ->
+ io:format("~p/~p: not auto-imported, but spec claims it "
+ "is auto-imported", [F,A]),
+ auto_imports(T, Errors+1);
+ true ->
+ auto_imports(T, Errors)
+ end;
+auto_imports([{erlang,F,A}|T], Errors) ->
+ case erl_internal:bif(F, A) of
+ false ->
+ auto_imports(T, Errors);
+ true ->
+ io:format("~p/~p: auto-imported, but "
+ "spec claims it is *not* auto-imported", [F,A]),
+ auto_imports(T, Errors+1)
+ end;
+auto_imports([], 0) ->
+ ok;
+auto_imports([], Errors) ->
+ ?t:fail({Errors,inconsistencies}).
+
+extract_functions(M, Abstr) ->
+ [{{M,F,A},Body} || {function,_,F,A,Body} <- Abstr].
+
+check_stub({erlang,apply,3}, _) ->
+ ok;
+check_stub({_,F,A}, B) ->
+ try
+ [{clause,_,Args,[],Body}] = B,
+ A = length(Args),
+ [{call,_,{remote,_,{atom,_,erlang},{atom,_,nif_error}},[_]}] = Body
+ catch
+ _:_ ->
+ io:put_chars("Invalid body for the following BIF stub:\n"),
+ Func = {function,0,F,A,B},
+ io:put_chars(erl_pp:function(Func)),
+ io:nl(),
+ io:put_chars("The body should be: erlang:nif_error(undef)"),
+ ?t:fail()
+ end.
+
t_list_to_existing_atom(Config) when is_list(Config) ->
?line all = list_to_existing_atom("all"),
?line ?MODULE = list_to_existing_atom(?MODULE_STRING),
@@ -483,6 +685,35 @@ erlang_halt(Config) when is_list(Config) ->
id(I) -> I.
+%% Get code path, including the path for the erts application.
+get_code_path() ->
+ case code:lib_dir(erts) of
+ {error,bad_name} ->
+ Erts = filename:join([code:root_dir(),"erts","preloaded","ebin"]),
+ [Erts|code:get_path()];
+ _ ->
+ code:get_path()
+ end.
+
+which(Mod, Path) ->
+ which_1(atom_to_list(Mod) ++ ".beam", Path).
+
+which_1(Base, [D|Ds]) ->
+ Path = filename:join(D, Base),
+ case filelib:is_regular(Path) of
+ true -> Path;
+ false -> which_1(Base, Ds)
+ end.
+print_mfa({M,F,A}) ->
+ io:format("~p:~p/~p", [M,F,A]).
+
+extract_abstract(Mod, Path) ->
+ Beam = which(Mod, Path),
+ {ok,{Mod,[{abstract_code,{raw_abstract_v1,Abstr}}]}} =
+ beam_lib:chunks(Beam, [abstract_code]),
+ {Mod,Abstr}.
+
+
hostname() ->
hostname(atom_to_list(node())).
diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl
index 58e0cb4096..babdb3363f 100644
--- a/erts/emulator/test/binary_SUITE.erl
+++ b/erts/emulator/test/binary_SUITE.erl
@@ -626,8 +626,32 @@ safe_binary_to_term2(Config) when is_list(Config) ->
bad_terms(suite) -> [];
bad_terms(Config) when is_list(Config) ->
?line test_terms(fun corrupter/1).
-
+
+corrupter(Term) when is_function(Term);
+ is_function(hd(Term));
+ is_function(element(2,element(2,element(2,Term)))) ->
+ %% Check if lists is native compiled. If it is, we do not try to
+ %% corrupt funs as this can create some very strange behaviour.
+ %% To show the error print `Byte` in the foreach fun in corrupter/2.
+ case erlang:system_info(hipe_architecture) of
+ undefined ->
+ corrupter0(Term);
+ Architecture ->
+ {lists, ListsBinary, _ListsFilename} = code:get_object_code(lists),
+ ChunkName = hipe_unified_loader:chunk_name(Architecture),
+ NativeChunk = beam_lib:chunks(ListsBinary, [ChunkName]),
+ case NativeChunk of
+ {ok,{_,[{_,Bin}]}} when is_binary(Bin) ->
+ S = io_lib:format("Skipping corruption of: ~P", [Term,12]),
+ io:put_chars(S);
+ {error, beam_lib, _} ->
+ corrupter0(Term)
+ end
+ end;
corrupter(Term) ->
+ corrupter0(Term).
+
+corrupter0(Term) ->
?line try
S = io_lib:format("About to corrupt: ~P", [Term,12]),
io:put_chars(S)
diff --git a/erts/emulator/test/bs_bit_binaries_SUITE.erl b/erts/emulator/test/bs_bit_binaries_SUITE.erl
index ff1088118d..7f2cd1e881 100644
--- a/erts/emulator/test/bs_bit_binaries_SUITE.erl
+++ b/erts/emulator/test/bs_bit_binaries_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2006-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2006-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -177,14 +177,55 @@ append(Config) when is_list(Config) ->
cs_init(),
?line <<(-1):256/signed-unit:8>> = cs(do_append(id(<<>>), 256*8)),
?line <<(-1):256/signed-unit:8>> = cs(do_append2(id(<<>>), 256*4)),
+ <<(-1):256/signed-unit:8>> = cs(do_append3(id(<<>>), 256*8)),
cs_end().
do_append(Bin, N) when N > 0 -> do_append(<<Bin/bits,1:1>>, N-1);
do_append(Bin, 0) -> Bin.
-do_append2(Bin, N) when N > 0 -> do_append2(<<Bin/bits,3:2>>, N-1);
+do_append2(Bin, N) when N > 0 -> do_append2(<<Bin/binary-unit:2,3:2>>, N-1);
do_append2(Bin, 0) -> Bin.
+do_append3(Bin, N) when N > 0 ->
+ Bits = bit_size(Bin),
+ if
+ Bits rem 2 =:= 0 ->
+ do_append3(<<Bin/binary-unit:2,1:1>>, N-1);
+ Bits rem 3 =:= 0 ->
+ do_append3(<<Bin/binary-unit:3,1:1>>, N-1);
+ Bits rem 4 =:= 0 ->
+ do_append3(<<Bin/binary-unit:4,1:1>>, N-1);
+ Bits rem 5 =:= 0 ->
+ do_append3(<<Bin/binary-unit:5,1:1>>, N-1);
+ Bits rem 6 =:= 0 ->
+ do_append3(<<Bin/binary-unit:6,1:1>>, N-1);
+ Bits rem 7 =:= 0 ->
+ do_append3(<<Bin/binary-unit:7,1:1>>, N-1);
+ Bits rem 8 =:= 0 ->
+ do_append3(<<Bin/binary-unit:8,1:1>>, N-1);
+ Bits rem 9 =:= 0 ->
+ do_append3(<<Bin/binary-unit:9,1:1>>, N-1);
+ Bits rem 10 =:= 0 ->
+ do_append3(<<Bin/binary-unit:10,1:1>>, N-1);
+ Bits rem 11 =:= 0 ->
+ do_append3(<<Bin/binary-unit:11,1:1>>, N-1);
+ Bits rem 12 =:= 0 ->
+ do_append3(<<Bin/binary-unit:12,1:1>>, N-1);
+ Bits rem 13 =:= 0 ->
+ do_append3(<<Bin/binary-unit:13,1:1>>, N-1);
+ Bits rem 14 =:= 0 ->
+ do_append3(<<Bin/binary-unit:14,1:1>>, N-1);
+ Bits rem 15 =:= 0 ->
+ do_append3(<<Bin/binary-unit:15,1:1>>, N-1);
+ Bits rem 16 =:= 0 ->
+ do_append3(<<Bin/binary-unit:16,1:1>>, N-1);
+ Bits rem 17 =:= 0 ->
+ do_append3(<<Bin/binary-unit:17,1:1>>, N-1);
+ true ->
+ do_append3(<<Bin/binary-unit:1,1:1>>, N-1)
+ end;
+do_append3(Bin, 0) -> Bin.
+
cs_init() ->
erts_debug:set_internal_state(available_internal_state, true),
ok.
diff --git a/erts/emulator/test/bs_construct_SUITE.erl b/erts/emulator/test/bs_construct_SUITE.erl
index 8b5c82d968..123952d01d 100644
--- a/erts/emulator/test/bs_construct_SUITE.erl
+++ b/erts/emulator/test/bs_construct_SUITE.erl
@@ -28,7 +28,7 @@
mem_leak/1, coerce_to_float/1, bjorn/1,
huge_float_field/1, huge_binary/1, system_limit/1, badarg/1,
copy_writable_binary/1, kostis/1, dynamic/1, bs_add/1,
- otp_7422/1, zero_width/1]).
+ otp_7422/1, zero_width/1, bad_append/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -38,7 +38,8 @@ all() ->
[test1, test2, test3, test4, test5, testf, not_used,
in_guard, mem_leak, coerce_to_float, bjorn,
huge_float_field, huge_binary, system_limit, badarg,
- copy_writable_binary, kostis, dynamic, bs_add, otp_7422, zero_width].
+ copy_writable_binary, kostis, dynamic, bs_add, otp_7422, zero_width,
+ bad_append].
groups() ->
[].
@@ -546,11 +547,47 @@ huge_float_check({'EXIT',{badarg,_}}) -> ok.
huge_binary(Config) when is_list(Config) ->
?line 16777216 = size(<<0:(id(1 bsl 26)),(-1):(id(1 bsl 26))>>),
?line garbage_collect(),
- ?line id(<<0:((1 bsl 32)-1)>>),
+ {Shift,Return} = case free_mem() of
+ undefined -> {32,ok};
+ Mb when Mb > 600 -> {32,ok};
+ Mb when Mb > 300 -> {31,"Limit huge binaries to 256 Mb"};
+ _ -> {30,"Limit huge binary to 128 Mb"}
+ end,
?line garbage_collect(),
- ?line id(<<0:(id((1 bsl 32)-1))>>),
+ ?line id(<<0:((1 bsl Shift)-1)>>),
?line garbage_collect(),
- ok.
+ ?line id(<<0:(id((1 bsl Shift)-1))>>),
+ ?line garbage_collect(),
+ case Return of
+ ok -> ok;
+ Comment -> {comment, Comment}
+ end.
+
+free_mem() ->
+ Cmd = "uname; free",
+ Output = string:tokens(os:cmd(Cmd), "\n"),
+ io:format("Output from command ~p\n~p\n",[Cmd,Output]),
+ case Output of
+ [OS, ColumnNames, Values | _] ->
+ case string:str(OS,"Linux") of
+ 0 ->
+ io:format("Unknown OS\n",[]),
+ undefined;
+ _ ->
+ case {string:tokens(ColumnNames, " \t"),
+ string:tokens(Values, " \t")} of
+ {[_,_,"free"|_],["Mem:",_,_,FreeKb|_]} ->
+ list_to_integer(FreeKb) div 1024;
+ _ ->
+ io:format("Failed to parse output from 'free':\n",[]),
+ undefined
+ end
+ end;
+ _ ->
+ io:format("Too few lines in output\n",[]),
+ undefined
+ end.
+
system_limit(Config) when is_list(Config) ->
WordSize = erlang:system_info(wordsize),
@@ -827,5 +864,49 @@ zero_width(Config) when is_list(Config) ->
?line {'EXIT',{badarg,_}} = (catch <<(id(not_a_number)):0>>),
ok.
+
+bad_append(_) ->
+ do_bad_append(<<127:1>>, fun append_unit_3/1),
+ do_bad_append(<<127:2>>, fun append_unit_3/1),
+ do_bad_append(<<127:17>>, fun append_unit_3/1),
+
+ do_bad_append(<<127:3>>, fun append_unit_4/1),
+ do_bad_append(<<127:5>>, fun append_unit_4/1),
+ do_bad_append(<<127:7>>, fun append_unit_4/1),
+ do_bad_append(<<127:199>>, fun append_unit_4/1),
+
+ do_bad_append(<<127:7>>, fun append_unit_8/1),
+ do_bad_append(<<127:9>>, fun append_unit_8/1),
+
+ do_bad_append(<<0:8>>, fun append_unit_16/1),
+ do_bad_append(<<0:15>>, fun append_unit_16/1),
+ do_bad_append(<<0:17>>, fun append_unit_16/1),
+ ok.
+
+do_bad_append(Bin0, Appender) ->
+ {'EXIT',{badarg,_}} = (catch Appender(Bin0)),
+
+ Bin1 = id(<<0:3,Bin0/bitstring>>),
+ <<_:3,Bin2/bitstring>> = Bin1,
+ {'EXIT',{badarg,_}} = (catch Appender(Bin2)),
+
+ %% Create a writable binary.
+ Empty = id(<<>>),
+ Bin3 = <<Empty/bitstring,Bin0/bitstring>>,
+ {'EXIT',{badarg,_}} = (catch Appender(Bin3)),
+ ok.
+
+append_unit_3(Bin) ->
+ <<Bin/binary-unit:3,0:1>>.
+
+append_unit_4(Bin) ->
+ <<Bin/binary-unit:4,0:1>>.
+
+append_unit_8(Bin) ->
+ <<Bin/binary,0:1>>.
+
+append_unit_16(Bin) ->
+ <<Bin/binary-unit:16,0:1>>.
+
id(I) -> I.
diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl
index 3a29fd4d68..32e907ca69 100644
--- a/erts/emulator/test/busy_port_SUITE.erl
+++ b/erts/emulator/test/busy_port_SUITE.erl
@@ -148,9 +148,9 @@ message_order(Config) when is_list(Config) ->
send_to_busy_1(Parent) ->
{Owner, Slave} = get_slave(),
- Slave ! {Owner, {command, "set_me_busy"}},
- Slave ! {Owner, {command, "hello"}},
- Slave ! {Owner, {command, "hello again"}},
+ (catch port_command(Slave, "set_me_busy")),
+ (catch port_command(Slave, "hello")),
+ (catch port_command(Slave, "hello again")),
receive
Message ->
Parent ! {self(), Message}
@@ -193,10 +193,10 @@ system_monitor(Config) when is_list(Config) ->
?line Busy =
spawn_link(
fun() ->
- Slave ! {Owner,{command,"set busy"}},
+ (catch port_command(Slave, "set busy")),
receive {Parent,alpha} -> ok end,
- Slave ! {Owner,{command,"busy"}},
- Slave ! {Owner,{command,"free"}},
+ (catch port_command(Slave, "busy")),
+ (catch port_command(Slave, "free")),
Parent ! {self(),alpha},
command(lock),
receive {Parent,beta} -> ok end,
@@ -212,7 +212,7 @@ system_monitor(Config) when is_list(Config) ->
?line Void = rec(Void),
?line Busy ! {self(), beta},
?line {monitor,Owner,busy_port,Slave} = rec(Void),
- ?line Master ! {Owner, {command, "u"}},
+ ?line port_command(Master, "u"),
?line {Busy,beta} = rec(Void),
?line Void = rec(Void),
?line _NewMonitor = erlang:system_monitor(OldMonitor),
@@ -296,9 +296,9 @@ no_trap_exit_process(ResultTo, Link, Config) ->
linked -> ok;
unlink -> unlink(Slave)
end,
- ?line Slave ! {self(), {command, "lock port"}},
+ ?line (catch port_command(Slave, "lock port")),
?line ResultTo ! {self(), port_created, Slave},
- ?line Slave ! {self(), {command, "suspend me"}},
+ ?line (catch port_command(Slave, "suspend me")),
ok.
%% Assuming the following scenario,
@@ -339,9 +339,9 @@ busy_port_exit_process(ResultTo, Config) ->
?line load_busy_driver(Config),
?line _Master = open_port({spawn, "busy_drv master"}, [eof]),
?line Slave = open_port({spawn, "busy_drv slave"}, [eof]),
- ?line Slave ! {self(), {command, "lock port"}},
+ ?line (catch port_command(Slave, "lock port")),
?line ResultTo ! {self(), port_created, Slave},
- ?line Slave ! {self(), {command, "suspend me"}},
+ ?line (catch port_command(Slave, "suspend me")),
receive
{'EXIT', Slave, die} ->
ResultTo ! {self(), ok};
@@ -383,8 +383,8 @@ multiple_writers(Config) when is_list(Config) ->
quick_writer() ->
{Owner, Port} = get_slave(),
- Port ! {Owner, {command, "port to busy"}},
- Port ! {Owner, {command, "lock me"}},
+ (catch port_command(Port, "port to busy")),
+ (catch port_command(Port, "lock me")),
ok.
hard_busy_driver(Config) when is_list(Config) ->
@@ -644,11 +644,11 @@ loop(Master, Slave) ->
Pid ! {busy_drv_reply, {self(), Slave}},
loop(Master, Slave);
{Pid, unlock} ->
- Master ! {self(), {command, "u"}},
+ port_command(Master, "u"),
Pid ! {busy_drv_reply, ok},
loop(Master, Slave);
{Pid, lock} ->
- Master ! {self(), {command, "l"}},
+ port_command(Master, "l"),
Pid ! {busy_drv_reply, ok},
loop(Master, Slave);
{Pid, {port_command,Data}} ->
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index 9d80b01748..eaecd32f95 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -25,6 +25,7 @@
init_per_testcase/2,end_per_testcase/2,
hipe/1,process_specs/1,basic/1,flags/1,errors/1,pam/1,change_pam/1,
return_trace/1,exception_trace/1,on_load/1,deep_exception/1,
+ upgrade/1,
exception_nocatch/1,bit_syntax/1]).
%% Helper functions.
@@ -46,6 +47,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
Common = [errors, on_load],
NotHipe = [process_specs, basic, flags, pam, change_pam,
+ upgrade,
return_trace, exception_trace, deep_exception,
exception_nocatch, bit_syntax],
Hipe = [hipe],
@@ -76,7 +78,13 @@ init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
end_per_testcase(_Func, Config) ->
Dog = ?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
+ ?t:timetrap_cancel(Dog),
+
+ %% Reloading the module will clear all trace patterns, and
+ %% in a debug-compiled emulator run assertions of the counters
+ %% for the number of traced exported functions in this module.
+
+ c:l(?MODULE).
hipe(Config) when is_list(Config) ->
?line 0 = erlang:trace_pattern({?MODULE,worker_foo,1}, true),
@@ -185,8 +193,13 @@ basic() ->
%% Trace some functions...
?line trace_func({lists,'_','_'}, []),
+
+ %% Make sure that tracing the same functions more than once
+ %% does not cause any problems.
+ ?line 3 = trace_func({?MODULE,foo,'_'}, true),
?line 3 = trace_func({?MODULE,foo,'_'}, true),
?line 1 = trace_func({?MODULE,bar,0}, true),
+ ?line 1 = trace_func({?MODULE,bar,0}, true),
?line {traced,global} = trace_info({?MODULE,bar,0}, traced),
?line 1 = trace_func({erlang,list_to_integer,1}, true),
?line {traced,global} = trace_info({erlang,list_to_integer,1}, traced),
@@ -267,6 +280,118 @@ foo() -> foo0.
foo(X) -> X+1.
foo(X, Y) -> X+Y.
+
+%% Note that the semantics that this test case verifies
+%% are not explicitly specified in the docs (what I could find in R15B).
+%% This test case was written to verify that we do not change
+%% any behaviour with the introduction of "block-free" upgrade in R16.
+%% In short: Do not refer to this test case as an authority of how it must work.
+upgrade(doc) ->
+ "Test tracing on module being upgraded";
+upgrade(Config) when is_list(Config) ->
+ V1 = compile_version(my_upgrade_test, 1, Config),
+ V2 = compile_version(my_upgrade_test, 2, Config),
+ start_tracer(),
+ upgrade_do(V1, V2, false),
+ upgrade_do(V1, V2, true).
+
+upgrade_do(V1, V2, TraceLocalVersion) ->
+ {module,my_upgrade_test} = erlang:load_module(my_upgrade_test, V1),
+
+
+ %% Test that trace is cleared after load_module
+
+ trace_func({my_upgrade_test,'_','_'}, [], [global]),
+ case TraceLocalVersion of
+ true -> trace_func({my_upgrade_test,local_version,0}, [], [local]);
+ _ -> ok
+ end,
+ 1 = my_upgrade_test:version(),
+ 1 = my_upgrade_test:do_local(),
+ 1 = my_upgrade_test:do_real_local(),
+ put('F1_exp', my_upgrade_test:make_fun_exp()),
+ put('F1_loc', my_upgrade_test:make_fun_local()),
+ 1 = (get('F1_exp'))(),
+ 1 = (get('F1_loc'))(),
+
+ Self = self(),
+ expect({trace,Self,call,{my_upgrade_test,version,[]}}),
+ expect({trace,Self,call,{my_upgrade_test,do_local,[]}}),
+ expect({trace,Self,call,{my_upgrade_test,do_real_local,[]}}),
+ case TraceLocalVersion of
+ true -> expect({trace,Self,call,{my_upgrade_test,local_version,[]}});
+ _ -> ok
+ end,
+ expect({trace,Self,call,{my_upgrade_test,make_fun_exp,[]}}),
+ expect({trace,Self,call,{my_upgrade_test,make_fun_local,[]}}),
+ expect({trace,Self,call,{my_upgrade_test,version,[]}}), % F1_exp
+ case TraceLocalVersion of
+ true -> expect({trace,Self,call,{my_upgrade_test,local_version,[]}}); % F1_loc
+ _ -> ok
+ end,
+
+ {module,my_upgrade_test} = erlang:load_module(my_upgrade_test, V2),
+ 2 = my_upgrade_test:version(),
+ put('F2_exp', my_upgrade_test:make_fun_exp()),
+ put('F2_loc', my_upgrade_test:make_fun_local()),
+ 2 = (get('F1_exp'))(),
+ 1 = (get('F1_loc'))(),
+ 2 = (get('F2_exp'))(),
+ 2 = (get('F2_loc'))(),
+ expect(),
+
+ put('F1_exp', undefined),
+ put('F1_loc', undefined),
+ erlang:garbage_collect(),
+ erlang:purge_module(my_upgrade_test),
+
+ % Test that trace is cleared after delete_module
+
+ trace_func({my_upgrade_test,'_','_'}, [], [global]),
+ case TraceLocalVersion of
+ true -> trace_func({my_upgrade_test,local_version,0}, [], [local]);
+ _ -> ok
+ end,
+ 2 = my_upgrade_test:version(),
+ 2 = my_upgrade_test:do_local(),
+ 2 = my_upgrade_test:do_real_local(),
+ 2 = (get('F2_exp'))(),
+ 2 = (get('F2_loc'))(),
+
+ expect({trace,Self,call,{my_upgrade_test,version,[]}}),
+ expect({trace,Self,call,{my_upgrade_test,do_local,[]}}),
+ expect({trace,Self,call,{my_upgrade_test,do_real_local,[]}}),
+ case TraceLocalVersion of
+ true -> expect({trace,Self,call,{my_upgrade_test,local_version,[]}});
+ _ -> ok
+ end,
+ expect({trace,Self,call,{my_upgrade_test,version,[]}}), % F2_exp
+ case TraceLocalVersion of
+ true -> expect({trace,Self,call,{my_upgrade_test,local_version,[]}}); % F2_loc
+ _ -> ok
+ end,
+
+ true = erlang:delete_module(my_upgrade_test),
+ {'EXIT',{undef,_}} = (catch my_upgrade_test:version()),
+ {'EXIT',{undef,_}} = (catch ((get('F2_exp'))())),
+ 2 = (get('F2_loc'))(),
+ expect(),
+
+ put('F2_exp', undefined),
+ put('F2_loc', undefined),
+ erlang:garbage_collect(),
+ erlang:purge_module(my_upgrade_test),
+ ok.
+
+compile_version(Module, Version, Config) ->
+ Data = ?config(data_dir, Config),
+ File = filename:join(Data, atom_to_list(Module)),
+ {ok,Module,Bin} = compile:file(File, [{d,'VERSION',Version},
+ binary,report]),
+ Bin.
+
+
+
%% Test flags (arity, timestamp) for call_trace/3.
%% Also, test the '{tracer,Pid}' option.
flags(_Config) ->
@@ -1151,11 +1276,13 @@ trace_info(What, Key) ->
Res.
trace_func(MFA, MatchSpec) ->
- get(tracer) ! {apply,self(),{erlang,trace_pattern,[MFA, MatchSpec]}},
+ trace_func(MFA, MatchSpec, []).
+trace_func(MFA, MatchSpec, Flags) ->
+ get(tracer) ! {apply,self(),{erlang,trace_pattern,[MFA, MatchSpec, Flags]}},
Res = receive
{apply_result,Result} -> Result
end,
- ok = io:format("trace_pattern(~p, ~p) -> ~p", [MFA,MatchSpec,Res]),
+ ok = io:format("trace_pattern(~p, ~p, ~p) -> ~p", [MFA,MatchSpec,Flags,Res]),
Res.
trace_pid(Pid, On, Flags) ->
diff --git a/erts/emulator/test/call_trace_SUITE_data/my_upgrade_test.erl b/erts/emulator/test/call_trace_SUITE_data/my_upgrade_test.erl
new file mode 100644
index 0000000000..11b8a95209
--- /dev/null
+++ b/erts/emulator/test/call_trace_SUITE_data/my_upgrade_test.erl
@@ -0,0 +1,26 @@
+-module(my_upgrade_test).
+
+-export([version/0]).
+-export([do_local/0]).
+-export([do_real_local/0]).
+-export([make_fun_exp/0]).
+-export([make_fun_local/0]).
+
+
+version() ->
+ ?VERSION.
+
+do_local() ->
+ version().
+
+do_real_local() ->
+ local_version().
+
+local_version() ->
+ ?VERSION.
+
+make_fun_exp() ->
+ fun() -> ?MODULE:version() end.
+
+make_fun_local() ->
+ fun() -> local_version() end.
diff --git a/erts/emulator/test/code_parallel_load_SUITE.erl b/erts/emulator/test/code_parallel_load_SUITE.erl
new file mode 100644
index 0000000000..aa9e4c96c6
--- /dev/null
+++ b/erts/emulator/test/code_parallel_load_SUITE.erl
@@ -0,0 +1,198 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% Author: Björn-Egil Dahlberg
+
+-module(code_parallel_load_SUITE).
+-export([
+ all/0,
+ suite/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_testcase/2,
+ end_per_testcase/2
+ ]).
+
+-export([
+ multiple_load_check_purge_repeat/1,
+ many_load_distributed_only_once/1
+ ]).
+
+-define(model, code_parallel_load_SUITE_model).
+-define(interval, 50).
+-define(number_of_processes, 160).
+-define(passes, 4).
+
+
+-include_lib("test_server/include/test_server.hrl").
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ multiple_load_check_purge_repeat,
+ many_load_distributed_only_once
+ ].
+
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
+ Dog=?t:timetrap(?t:minutes(3)),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(_Func, Config) ->
+ SConf = ?config(save_config, Config),
+ Pids = proplists:get_value(purge_pids, SConf),
+
+ case check_old_code(?model) of
+ true -> check_and_purge_processes_code(Pids, ?model);
+ _ -> ok
+ end,
+ case erlang:delete_module(?model) of
+ true -> check_and_purge_processes_code(Pids, ?model);
+ _ -> ok
+ end,
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog).
+
+
+multiple_load_check_purge_repeat(_Conf) ->
+ Ts = [v1,v2,v3,v4,v5,v6],
+
+ %% generate code that receives a token, code switches to new code
+ %% then matches this token against a literal code token
+ %% should be identical
+ %% (smoke test for parallel code loading
+ Codes = [{T, generate(?model, [], [
+ format("check(T) -> receive {_Pid, change, T1} -> "
+ " ~w:check(T1)\n"
+ " after 0 -> T = f(), check(T) end.\n", [?model]),
+ format("f() -> ~w.~n", [T])
+ ])} || T <- Ts],
+
+ Pids = setup_code_changer(Codes),
+ {save_config, [{purge_pids,Pids}]}.
+
+setup_code_changer([{Token,Code}|Cs] = Codes) ->
+ {module, ?model} = erlang:load_module(?model,Code),
+ Pids = setup_checkers(Token,?number_of_processes),
+ code_changer(Cs, Codes, ?interval,Pids,?passes),
+ Pids.
+
+code_changer(_, _, _, Pids, 0) ->
+ [unlink(Pid) || Pid <- Pids],
+ [exit(Pid, die) || Pid <- Pids],
+ io:format("done~n"),
+ ok;
+code_changer([], Codes, T, Pids, Ps) ->
+ code_changer(Codes, Codes, T, Pids, Ps - 1);
+code_changer([{Token,Code}|Cs], Codes, T, Pids, Ps) ->
+ receive after T ->
+ io:format("load code with token ~4w : pass ~4w~n", [Token, Ps]),
+ {module, ?model} = erlang:load_module(?model, Code),
+ % this is second time we call load_module for this module
+ % so it should have old code
+ [Pid ! {self(), change, Token} || Pid <- Pids],
+ % should we wait a moment or just blantantly try to check and purge repeatadly?
+ receive after 1 -> ok end,
+ ok = check_and_purge_processes_code(Pids, ?model),
+ code_changer(Cs, Codes, T, Pids, Ps)
+ end.
+
+
+
+many_load_distributed_only_once(_Conf) ->
+ Ts = [<<"first version">>, <<"second version">>],
+
+ [{Token1,Code1},{Token2, Code2}] = [{T, generate(?model, [], [
+ "check({<<\"second version\">> = V, Pid}) -> V = f(), Pid ! {self(), completed, V}, ok;\n" ++
+ format("check(T) -> receive {Pid, change, T1, B} -> "
+ " Res = erlang:load_module(~w, B), Pid ! {self(), change, Res},\n"
+ " ~w:check({T1, Pid})\n"
+ " after 0 -> T = f(), check(T) end.\n", [?model, ?model]),
+ format("f() -> ~w.~n", [T])
+ ])} || T <- Ts],
+
+
+ {module, ?model} = erlang:load_module(?model, Code1),
+ Pids = setup_checkers(Token1,?number_of_processes),
+
+ receive after 1000 -> ok end, % give 'em some time to spin up
+ [Pid ! {self(), change, Token2, Code2} || Pid <- Pids],
+ Loads = [receive {Pid, change, Res} -> Res end || Pid <- Pids],
+ [receive {Pid, completed, Token2} -> ok end || Pid <- Pids],
+
+ ok = ensure_only_one_load(Loads, 0),
+ {save_config, [{purge_pids,Pids}]}.
+
+ensure_only_one_load([], 1) -> ok;
+ensure_only_one_load([], _) -> too_many_loads;
+ensure_only_one_load([{module, ?model}|Loads], N) ->
+ ensure_only_one_load(Loads, N + 1);
+ensure_only_one_load([{error, not_purged}|Loads], N) ->
+ ensure_only_one_load(Loads, N).
+% no other return values are allowed from load_module
+
+
+%% aux
+
+setup_checkers(_,0) -> [];
+setup_checkers(T,N) -> [spawn_link(fun() -> ?model:check(T) end) | setup_checkers(T, N-1)].
+
+check_and_purge_processes_code(Pids, M) ->
+ check_and_purge_processes_code(Pids, M, []).
+check_and_purge_processes_code([], M, []) ->
+ erlang:purge_module(M),
+ ok;
+check_and_purge_processes_code([], M, Pending) ->
+ io:format("Processes ~w are still executing old code - retrying.~n", [Pending]),
+ check_and_purge_processes_code(Pending, M, []);
+check_and_purge_processes_code([Pid|Pids], M, Pending) ->
+ case erlang:check_process_code(Pid, M) of
+ false ->
+ check_and_purge_processes_code(Pids, M, Pending);
+ true ->
+ check_and_purge_processes_code(Pids, M, [Pid|Pending])
+ end.
+
+
+generate(Module, Attributes, FunStrings) ->
+ FunForms = function_forms(FunStrings),
+ Forms = [
+ {attribute,1,module,Module},
+ {attribute,2,export,[FA || {FA,_} <- FunForms]}
+ ] ++ [{attribute, 3, A, V}|| {A, V} <- Attributes] ++
+ [ Function || {_, Function} <- FunForms],
+ {ok, Module, Bin} = compile:forms(Forms),
+ Bin.
+
+
+function_forms([]) -> [];
+function_forms([S|Ss]) ->
+ {ok, Ts,_} = erl_scan:string(S),
+ {ok, Form} = erl_parse:parse_form(Ts),
+ Fun = element(3, Form),
+ Arity = element(4, Form),
+ [{{Fun,Arity}, Form}|function_forms(Ss)].
+
+format(F,Ts) -> lists:flatten(io_lib:format(F, Ts)).
diff --git a/erts/emulator/test/ddll_SUITE.erl b/erts/emulator/test/ddll_SUITE.erl
index 6e15c228cd..4675cab15c 100644
--- a/erts/emulator/test/ddll_SUITE.erl
+++ b/erts/emulator/test/ddll_SUITE.erl
@@ -136,8 +136,8 @@ delayed_unload_with_ports(Config) when is_list(Config) ->
?line {ok,pending_driver,Ref} = erl_ddll:try_unload(echo_drv,[{monitor, pending_driver}]),
?line ok = receive _ -> false after 0 -> ok end,
?line Port ! {self(), close},
- ?line 1 = erl_ddll:info(echo_drv, port_count),
?line ok = receive {Port,closed} -> ok after 1000 -> false end,
+ ?line 1 = erl_ddll:info(echo_drv, port_count),
?line Port2 ! {self(), close},
?line ok = receive {Port2,closed} -> ok after 1000 -> false end,
?line ok = receive {'DOWN', Ref, driver, echo_drv, unloaded} -> ok after 1000 -> false end,
diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl
index a833e357cf..f3a177faf2 100644
--- a/erts/emulator/test/distribution_SUITE.erl
+++ b/erts/emulator/test/distribution_SUITE.erl
@@ -98,19 +98,6 @@ end_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Dog=?config(watchdog, Config),
?t:timetrap_cancel(Dog).
-%%% Don't be too hard on vxworks, the cross server gets nodedown
-%%% cause the card is too busy if we don't sleep a little between pings.
-sleep() ->
- case os:type() of
- vxworks ->
- receive
- after 10 ->
- ok
- end;
- _ ->
- ok
- end.
-
ping(doc) ->
["Tests pinging a node in different ways."];
ping(Config) when is_list(Config) ->
@@ -122,23 +109,21 @@ ping(Config) when is_list(Config) ->
?line Host = hostname(),
?line BadName = list_to_atom("__pucko__@" ++ Host),
?line io:format("Pinging ~s (assumed to not exist)", [BadName]),
- ?line test_server:do_times(Times,
- fun() -> pang = net_adm:ping(BadName),
- sleep()
+ ?line test_server:do_times(Times, fun() -> pang = net_adm:ping(BadName)
end),
%% Pings another node.
?line {ok, OtherNode} = start_node(distribution_SUITE_other),
?line io:format("Pinging ~s (assumed to exist)", [OtherNode]),
- ?line test_server:do_times(Times, fun() -> pong = net_adm:ping(OtherNode),sleep() end),
+ ?line test_server:do_times(Times, fun() -> pong = net_adm:ping(OtherNode) end),
?line stop_node(OtherNode),
%% Pings our own node many times.
?line Node = node(),
?line io:format("Pinging ~s (the same node)", [Node]),
- ?line test_server:do_times(Times, fun() -> pong = net_adm:ping(Node),sleep() end),
+ ?line test_server:do_times(Times, fun() -> pong = net_adm:ping(Node) end),
ok.
diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl
index 643357263c..13f18b4563 100644
--- a/erts/emulator/test/driver_SUITE.erl
+++ b/erts/emulator/test/driver_SUITE.erl
@@ -77,7 +77,8 @@
thread_mseg_alloc_cache_clean/1,
otp_9302/1,
thr_free_drv/1,
- async_blast/1]).
+ async_blast/1,
+ thr_msg_blast/1]).
-export([bin_prefix/2]).
@@ -147,7 +148,8 @@ all() ->
thread_mseg_alloc_cache_clean,
otp_9302,
thr_free_drv,
- async_blast].
+ async_blast,
+ thr_msg_blast].
groups() ->
[{timer, [],
@@ -1136,7 +1138,9 @@ check_driver_system_info_result(Result) ->
{{1, 1}, _} ->
?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
-- ?EXPECTED_SYSTEM_INFO_NAMES2),
- ?line ExpNs = lists:sort(Ns)
+ ?line ExpNs = lists:sort(Ns);
+ {{2, 0}, _} ->
+ ?line [] = Ns
end.
chk_sis(SIs, Ns) ->
@@ -2010,7 +2014,64 @@ async_blast(Config) when is_list(Config) ->
?line erlang:display({async_blast_time, AsyncBlastTime}),
?line ok.
+thr_msg_blast_receiver(_Port, N, N) ->
+ ok;
+thr_msg_blast_receiver(Port, N, Max) ->
+ receive
+ {Port, hi} ->
+ thr_msg_blast_receiver(Port, N+1, Max)
+ end.
+
+thr_msg_blast_receiver_proc(Port, Max, Parent, Done) ->
+ case port_control(Port, 0, "") of
+ "receiver" ->
+ spawn(fun () ->
+ thr_msg_blast_receiver_proc(Port, Max+1, Parent, Done)
+ end),
+ thr_msg_blast_receiver(Port, 0, Max);
+ "done" ->
+ Parent ! Done
+ end.
+thr_msg_blast(Config) when is_list(Config) ->
+ case erlang:system_info(smp_support) of
+ false ->
+ {skipped, "Non-SMP emulator; nothing to test..."};
+ true ->
+ Path = ?config(data_dir, Config),
+ erl_ddll:start(),
+ ok = load_driver(Path, thr_msg_blast_drv),
+ MemBefore = driver_alloc_size(),
+ Start = os:timestamp(),
+ Port = open_port({spawn, thr_msg_blast_drv}, []),
+ true = is_port(Port),
+ Done = make_ref(),
+ Me = self(),
+ spawn(fun () ->
+ thr_msg_blast_receiver_proc(Port, 1, Me, Done)
+ end),
+ receive
+ Done -> ok
+ end,
+ ok = thr_msg_blast_receiver(Port, 0, 32*10000),
+ port_close(Port),
+ End = os:timestamp(),
+ receive
+ Garbage ->
+ ?t:fail({received_garbage, Port, Garbage})
+ after 2000 ->
+ ok
+ end,
+ MemAfter = driver_alloc_size(),
+ io:format("MemBefore=~p, MemAfter=~p~n",
+ [MemBefore, MemAfter]),
+ ThrMsgBlastTime = timer:now_diff(End,Start)/1000000,
+ io:format("ThrMsgBlastTime=~p~n", [ThrMsgBlastTime]),
+ MemBefore = MemAfter,
+ Res = {thr_msg_blast_time, ThrMsgBlastTime},
+ erlang:display(Res),
+ Res
+ end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Utilities
diff --git a/erts/emulator/test/driver_SUITE_data/Makefile.src b/erts/emulator/test/driver_SUITE_data/Makefile.src
index 9cc107cc66..b667dff6b6 100644
--- a/erts/emulator/test/driver_SUITE_data/Makefile.src
+++ b/erts/emulator/test/driver_SUITE_data/Makefile.src
@@ -14,7 +14,8 @@ MISC_DRVS = outputv_drv@dll@ \
thr_alloc_drv@dll@ \
otp_9302_drv@dll@ \
thr_free_drv@dll@ \
- async_blast_drv@dll@
+ async_blast_drv@dll@ \
+ thr_msg_blast_drv@dll@
SYS_INFO_DRVS = sys_info_base_drv@dll@ \
sys_info_prev_drv@dll@ \
diff --git a/erts/emulator/test/driver_SUITE_data/chkio_drv.c b/erts/emulator/test/driver_SUITE_data/chkio_drv.c
index 40f1ad4fea..faf1040276 100644
--- a/erts/emulator/test/driver_SUITE_data/chkio_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/chkio_drv.c
@@ -17,7 +17,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(VXWORKS)
+#if !defined(__WIN32__)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c b/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
index e6a3edcd74..1e107309df 100644
--- a/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/io_ready_exit_drv.c
@@ -17,7 +17,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(VXWORKS)
+#if !defined(__WIN32__)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
index b2cc1e785a..d174771629 100644
--- a/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/ioq_exit_drv.c
@@ -29,7 +29,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(VXWORKS)
+#if !defined(__WIN32__)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c b/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c
index e7d9a294fa..851f2c745b 100644
--- a/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/missing_callback_drv.c
@@ -17,7 +17,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(VXWORKS)
+#if !defined(__WIN32__)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
index 8e203f74ec..0c86a26604 100644
--- a/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/peek_non_existing_queue_drv.c
@@ -28,7 +28,7 @@
*/
#ifndef UNIX
-#if !defined(__WIN32__) && !defined(VXWORKS)
+#if !defined(__WIN32__)
#define UNIX 1
#endif
#endif
diff --git a/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c b/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c
new file mode 100644
index 0000000000..1070678d7b
--- /dev/null
+++ b/erts/emulator/test/driver_SUITE_data/thr_msg_blast_drv.c
@@ -0,0 +1,178 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2012. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+#include "erl_driver.h"
+
+#define THR_MSG_BLAST_NO_PROCS 10
+#define THR_MSG_BLAST_NO_SENDS_PER_PROC 10000
+
+#define THR_MSG_BLAST_THREADS 32
+
+static void stop(ErlDrvData drv_data);
+static ErlDrvData start(ErlDrvPort port,
+ char *command);
+static ErlDrvSSizeT control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen);
+
+static ErlDrvEntry thr_msg_blast_drv_entry = {
+ NULL /* init */,
+ start,
+ stop,
+ NULL /* output */,
+ NULL /* ready_input */,
+ NULL /* ready_output */,
+ "thr_msg_blast_drv",
+ NULL /* finish */,
+ NULL /* handle */,
+ control,
+ NULL /* timeout */,
+ NULL /* outputv */,
+ NULL /* ready_async */,
+ NULL /* flush */,
+ NULL /* call */,
+ NULL /* event */,
+ ERL_DRV_EXTENDED_MARKER,
+ ERL_DRV_EXTENDED_MAJOR_VERSION,
+ ERL_DRV_EXTENDED_MINOR_VERSION,
+ ERL_DRV_FLAG_USE_PORT_LOCKING,
+ NULL /* handle2 */,
+ NULL /* handle_monitor */
+};
+
+typedef struct {
+ ErlDrvPort port;
+ ErlDrvTermData td_port;
+ ErlDrvTermData hi;
+ ErlDrvTid tid[THR_MSG_BLAST_THREADS];
+ int no_thrs;
+ ErlDrvTermData proc[THR_MSG_BLAST_NO_PROCS];
+ int no_procs;
+} thr_msg_blast_data_t;
+
+
+DRIVER_INIT(thr_msg_blast_drv)
+{
+ return &thr_msg_blast_drv_entry;
+}
+
+static void stop(ErlDrvData drv_data)
+{
+ int i;
+ thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) drv_data;
+ for (i = 0; i < tmbd->no_thrs; i++)
+ erl_drv_thread_join(tmbd->tid[i], NULL);
+ driver_free((void *) tmbd);
+}
+
+static ErlDrvData start(ErlDrvPort port,
+ char *command)
+{
+ thr_msg_blast_data_t *tmbd;
+
+ tmbd = driver_alloc(sizeof(thr_msg_blast_data_t));
+ if (!tmbd)
+ return ERL_DRV_ERROR_GENERAL;
+
+ tmbd->port = port;
+ tmbd->td_port = driver_mk_port(port);
+ tmbd->hi = driver_mk_atom("hi");
+ tmbd->no_thrs = 0;
+ tmbd->no_procs = 1;
+ tmbd->proc[0] = driver_caller(port);
+
+ return (ErlDrvData) tmbd;
+}
+
+static void *thread(void *);
+
+static ErlDrvSSizeT control(ErlDrvData drv_data,
+ unsigned int command,
+ char *buf, ErlDrvSizeT len,
+ char **rbuf, ErlDrvSizeT rlen)
+{
+ thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) drv_data;
+ char *res_str = "error";
+
+ if (tmbd->no_procs >= THR_MSG_BLAST_NO_PROCS) {
+ int i;
+ for (i = 0; i < tmbd->no_thrs; i++)
+ erl_drv_thread_join(tmbd->tid[i], NULL);
+ tmbd->no_thrs = 0;
+ res_str = "done";
+ }
+ else {
+
+ tmbd->proc[tmbd->no_procs++] = driver_caller(tmbd->port);
+
+ if (tmbd->no_procs == THR_MSG_BLAST_NO_PROCS) {
+ for (tmbd->no_thrs = 0;
+ tmbd->no_thrs < THR_MSG_BLAST_THREADS;
+ tmbd->no_thrs++) {
+ int res = erl_drv_thread_create("test",
+ &tmbd->tid[tmbd->no_thrs],
+ thread,
+ tmbd,
+ NULL);
+ if (res != 0) {
+ driver_failure_posix(tmbd->port, res);
+ goto done;
+ }
+ }
+ }
+
+ res_str = "receiver";
+ }
+
+ done: {
+ ErlDrvSSizeT res_len = strlen(res_str);
+ if (res_len > rlen) {
+ char *abuf = driver_alloc(sizeof(char)*res_len);
+ if (!abuf)
+ return 0;
+ *rbuf = abuf;
+ }
+
+ memcpy((void *) *rbuf, (void *) res_str, res_len);
+
+ return res_len;
+ }
+}
+
+static void *thread(void *varg)
+{
+ int s, p;
+ thr_msg_blast_data_t *tmbd = (thr_msg_blast_data_t *) varg;
+ ErlDrvTermData spec[] = {
+ ERL_DRV_PORT, tmbd->td_port,
+ ERL_DRV_ATOM, tmbd->hi,
+ ERL_DRV_TUPLE, 2
+ };
+
+ for (s = 0; s < THR_MSG_BLAST_NO_SENDS_PER_PROC; s++) {
+ for (p = 0; p < THR_MSG_BLAST_NO_PROCS; p++) {
+ int res = driver_send_term(tmbd->port, tmbd->proc[p],
+ spec, sizeof(spec)/sizeof(spec[0]));
+ if (p == 0 && res <= 0)
+ abort(); /* Could not send to creator */
+ }
+ }
+ return NULL;
+}
diff --git a/erts/emulator/test/driver_SUITE_data/timer_drv.c b/erts/emulator/test/driver_SUITE_data/timer_drv.c
index 8c3f203a64..57538e0d57 100644
--- a/erts/emulator/test/driver_SUITE_data/timer_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/timer_drv.c
@@ -1,11 +1,3 @@
-#ifdef VXWORKS
-#include <vxWorks.h>
-#include <taskVarLib.h>
-#include <taskLib.h>
-#include <sysLib.h>
-#include <string.h>
-#include <ioLib.h>
-#endif
#include <stdio.h>
#include "erl_driver.h"
@@ -84,12 +76,8 @@ static void timer_read(ErlDrvData p, char *buf, ErlDrvSizeT len)
driver_output(port, reply, 1);
} else if (buf[0] == DELAY_START_TIMER) {
#ifndef __WIN32__
-#ifdef VXWORKS
- taskDelay(sysClkRateGet());
-#else
sleep(1);
#endif
-#endif
driver_set_timer(port, get_int32(buf + 1));
}
}
diff --git a/erts/emulator/test/emulator.spec.vxworks b/erts/emulator/test/emulator.spec.vxworks
deleted file mode 100644
index 55675bdc29..0000000000
--- a/erts/emulator/test/emulator.spec.vxworks
+++ /dev/null
@@ -1,26 +0,0 @@
-{topcase, {dir, "../emulator_test"}}.
-
-% Added since R11
-{skip,{distribution_SUITE,link_to_dead_new_node,"Does not work in distributed test environments"}}.
-{skip,{binary_SUITE,terms_float,"Floats, VxWorks, PPC = Floating points never equal..."}}.
-{skip,{system_info_SUITE,process_count,"Fix-allocs starving VxWorks cards"}}.
-{skip,{monitor_SUITE,mixer,"Fix-allocs starving VxWorks cards"}}.
-
-{skip,{node_container_SUITE,"Too memory consuming..."}}.
-
-{skip,{trace_SUITE,system_monitor_long_gc_1,"Too memory consuming..."}}.
-{skip,{trace_SUITE,system_monitor_long_gc_2,"Too memory consuming..."}}.
-{skip,{trace_SUITE,system_monitor_large_heap_1,"Too memory consuming..."}}.
-{skip,{trace_SUITE,system_monitor_large_heap_2,"Too memory consuming..."}}.
-% End added since R11
-
-{skip, {distribution_SUITE,stop_dist,"Not written to work on VxWorks."}}.
-{skip, {distribution_SUITE,dist_auto_connect_never,
- "Not written to work on VxWorks."}}.
-{skip, {distribution_SUITE,dist_auto_connect_once,
- "Not written to work on VxWorks."}}.
-{skip, {trace_SUITE,system_monitor_long_gc,
- "Too memory consuming for VxWorks cards."}}.
-{skip, {trace_meta_SUITE,stack_grow,
- "Too memory consuming for VxWorks cards."}}.
-{skip, {obsolete_SUITE, "Not on vxworks"}}.
diff --git a/erts/emulator/test/emulator_bench.spec b/erts/emulator/test/emulator_bench.spec
new file mode 100644
index 0000000000..f709d913b7
--- /dev/null
+++ b/erts/emulator/test/emulator_bench.spec
@@ -0,0 +1 @@
+{groups,"../emulator_test",estone_SUITE,[estone_bench]}.
diff --git a/erts/emulator/test/estone_SUITE.erl b/erts/emulator/test/estone_SUITE.erl
index 2417d4bcfe..21834bfa62 100644
--- a/erts/emulator/test/estone_SUITE.erl
+++ b/erts/emulator/test/estone_SUITE.erl
@@ -19,7 +19,7 @@
-module(estone_SUITE).
%% Test functions
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
- init_per_group/2,end_per_group/2,estone/1]).
+ init_per_group/2,end_per_group/2,estone/1,estone_bench/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
%% Internal exports for EStone tests
@@ -46,6 +46,7 @@
-include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct_event.hrl").
%% Test suite defines
-define(default_timeout, ?t:minutes(10)).
@@ -80,7 +81,7 @@ all() ->
[estone].
groups() ->
- [].
+ [{estone_bench, [{repeat,50}],[estone_bench]}].
init_per_suite(Config) ->
Config.
@@ -108,6 +109,17 @@ estone(Config) when is_list(Config) ->
?line {comment,Mhz ++ " MHz, " ++
integer_to_list(Stones) ++ " ESTONES"}.
+estone_bench(Config) ->
+ DataDir = ?config(data_dir,Config),
+ L = ?MODULE:macro(?MODULE:micros(),DataDir),
+ [ct_event:notify(
+ #event{name = benchmark_data,
+ data = [{name,proplists:get_value(title,Mark)},
+ {value,proplists:get_value(estones,Mark)}]})
+ || Mark <- L],
+ L.
+
+
%%
%% Calculate CPU speed
%%
diff --git a/erts/emulator/test/estone_SUITE_data/estone_cat.c b/erts/emulator/test/estone_SUITE_data/estone_cat.c
index 8ed9f8375b..a34bda4384 100644
--- a/erts/emulator/test/estone_SUITE_data/estone_cat.c
+++ b/erts/emulator/test/estone_SUITE_data/estone_cat.c
@@ -12,11 +12,7 @@
#include <fcntl.h>
#include <errno.h>
-#ifdef VXWORKS
-estone_cat(argc, argv)
-#else
main(argc, argv)
-#endif
int argc;
char *argv[];
{
diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl
index ef06845cf2..36ba4e0f48 100644
--- a/erts/emulator/test/fun_SUITE.erl
+++ b/erts/emulator/test/fun_SUITE.erl
@@ -726,8 +726,8 @@ t_arity(Config) when is_list(Config) ->
ok.
t_is_function2(Config) when is_list(Config) ->
- ?line true = is_function({a,b}, 0),
- ?line true = is_function({a,b}, 234343434333433433),
+ false = is_function(id({a,b}), 0),
+ false = is_function(id({a,b}), 234343434333433433),
?line true = is_function(fun() -> ok end, 0),
?line true = is_function(fun(_) -> ok end, 1),
?line false = is_function(fun(_) -> ok end, 0),
diff --git a/erts/emulator/test/gc_SUITE.erl b/erts/emulator/test/gc_SUITE.erl
index 771d2c9a7a..19fa433a53 100644
--- a/erts/emulator/test/gc_SUITE.erl
+++ b/erts/emulator/test/gc_SUITE.erl
@@ -54,17 +54,12 @@ grow_heap(doc) -> ["Produce a growing list of elements, ",
"for X calls, then drop one item per call",
"until the list is empty."];
grow_heap(Config) when is_list(Config) ->
- ?line Dog=test_server:timetrap(test_server:minutes(40)),
- ?line ok=grow_heap1(256),
- case os:type() of
- vxworks ->
- stop_here;
- _ ->
- ?line ok=grow_heap1(512),
- ?line ok=grow_heap1(1024),
- ?line ok=grow_heap1(2048)
- end,
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:minutes(40)),
+ ok = grow_heap1(256),
+ ok = grow_heap1(512),
+ ok = grow_heap1(1024),
+ ok = grow_heap1(2048),
+ test_server:timetrap_cancel(Dog),
ok.
grow_heap1(Len) ->
@@ -82,10 +77,10 @@ grow_heap1(List, MaxLen, CurLen, up) ->
grow_heap1([], _MaxLen, _, down) ->
ok;
grow_heap1([_|List], MaxLen, CurLen, down) ->
- ?line {_,_,C}=erlang:now(),
- ?line Num=C rem (length(List))+1,
- ?line Elem=lists:nth(Num, List),
- ?line NewList=lists:delete(Elem, List),
+ {_,_,C} = erlang:now(),
+ Num = C rem (length(List))+1,
+ Elem = lists:nth(Num, List),
+ NewList = lists:delete(Elem, List),
grow_heap1(NewList, MaxLen, CurLen-1, down).
@@ -93,16 +88,11 @@ grow_heap1([_|List], MaxLen, CurLen, down) ->
grow_stack(doc) -> ["Increase and decrease stack size, and ",
"drop off some garbage from time to time."];
grow_stack(Config) when is_list(Config) ->
- ?line Dog=test_server:timetrap(test_server:minutes(80)),
+ Dog = test_server:timetrap(test_server:minutes(80)),
show_heap("before:"),
- case os:type() of
- vxworks ->
- ?line grow_stack1(25, 0);
- _ ->
- ?line grow_stack1(200, 0)
- end,
+ grow_stack1(200, 0),
show_heap("after:"),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
grow_stack1(0, _) ->
@@ -123,16 +113,11 @@ grow_stack_heap(doc) -> ["While growing the heap, bounces the size ",
"of the stack, and while reducing the heap",
"bounces the stack usage."];
grow_stack_heap(Config) when is_list(Config) ->
- case os:type() of
- vxworks ->
- {comment, "Takes too long to run on VxWorks/cpu32"};
- _ ->
- ?line Dog=test_server:timetrap(test_server:minutes(40)),
- ?line grow_stack_heap1(16),
- ?line grow_stack_heap1(32),
- ?line test_server:timetrap_cancel(Dog),
- ok
- end.
+ Dog = test_server:timetrap(test_server:minutes(40)),
+ grow_stack_heap1(16),
+ grow_stack_heap1(32),
+ test_server:timetrap_cancel(Dog),
+ ok.
grow_stack_heap1(MaxLen) ->
io:format("~ngrow_stack_heap with ~p items.",[MaxLen]),
@@ -151,10 +136,10 @@ grow_stack_heap1(List, MaxLen, CurLen, up) ->
grow_stack_heap1([], _MaxLen, _, down) -> ok;
grow_stack_heap1([_|List], MaxLen, CurLen, down) ->
grow_stack1(CurLen*2,0),
- ?line {_,_,C}=erlang:now(),
- ?line Num=C rem (length(List))+1,
- ?line Elem=lists:nth(Num, List),
- ?line NewList=lists:delete(Elem, List),
+ {_,_,C}=erlang:now(),
+ Num=C rem (length(List))+1,
+ Elem=lists:nth(Num, List),
+ NewList=lists:delete(Elem, List),
grow_stack_heap1(NewList, MaxLen, CurLen-1, down),
ok.
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index 461773114e..d5cb4ee1b7 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -28,7 +28,7 @@
unary_plus/1, unary_minus/1, moving_labels/1]).
-export([fpe/1]).
-export([otp_9422/1]).
-
+-export([faulty_seq_trace/1, do_faulty_seq_trace/0]).
-export([runner/2, loop_runner/3]).
-export([f1/1, f2/2, f3/2, fn/1, fn/2, fn/3]).
-export([do_boxed_and_small/0]).
@@ -59,6 +59,7 @@ all() ->
ms_trace3, boxed_and_small, destructive_in_test_bif,
guard_exceptions, unary_plus, unary_minus, fpe,
moving_labels,
+ faulty_seq_trace,
otp_9422];
true -> [not_run]
end.
@@ -726,6 +727,19 @@ do_boxed_and_small() ->
{ok, false, _, _} = erlang:match_spec_test({0,3},[{{make_ref(),'_'},[],['$_']}],table),
ok.
+faulty_seq_trace(doc) ->
+ ["Test that faulty seq_trace_call does not crash emulator"];
+faulty_seq_trace(suite) -> [];
+faulty_seq_trace(Config) when is_list(Config) ->
+ ?line {ok, Node} = start_node(match_spec_suite_other),
+ ?line ok = rpc:call(Node,?MODULE,do_faulty_seq_trace,[]),
+ ?line stop_node(Node),
+ ok.
+
+do_faulty_seq_trace() ->
+ {ok,'EXIT',_,_} = erlang:match_spec_test([],[{'_',[],[{message,{set_seq_token,yxa,true}}]}],trace),
+ ok.
+
errchk(Pat) ->
case catch erlang:trace_pattern({?MODULE, f2, 2}, Pat) of
{'EXIT', {badarg, _}} ->
diff --git a/erts/emulator/test/mtx_SUITE_data/Makefile.src b/erts/emulator/test/mtx_SUITE_data/Makefile.src
index b6c843269c..37eb1daa72 100644
--- a/erts/emulator/test/mtx_SUITE_data/Makefile.src
+++ b/erts/emulator/test/mtx_SUITE_data/Makefile.src
@@ -27,4 +27,11 @@ LIBS = @ERTS_LIBS@
all: $(NIF_LIBS)
+mtx_SUITE.c: force_rebuild
+ touch mtx_SUITE.c
+
+force_rebuild:
+ echo "Force rebuild to compensate for emulator type dependencies"
+
+
@SHLIB_RULES@
diff --git a/erts/emulator/test/nofrag_SUITE.erl b/erts/emulator/test/nofrag_SUITE.erl
index 6b6ac28e2e..71567ed0cb 100644
--- a/erts/emulator/test/nofrag_SUITE.erl
+++ b/erts/emulator/test/nofrag_SUITE.erl
@@ -26,7 +26,6 @@
init_per_testcase/2,end_per_testcase/2,
error_handler/1,error_handler_apply/1,
error_handler_fixed_apply/1,error_handler_fun/1,
- error_handler_tuple_fun/1,
debug_breakpoint/1]).
%% Exported functions for an error_handler module.
@@ -37,7 +36,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[error_handler, error_handler_apply,
error_handler_fixed_apply, error_handler_fun,
- error_handler_tuple_fun, debug_breakpoint].
+ debug_breakpoint].
groups() ->
[].
@@ -178,29 +177,6 @@ collect_fun(N, Fun) ->
undefined_lambda(foobarblurf, Fun, Args) when is_function(Fun) ->
Args.
-error_handler_tuple_fun(Config) when is_list(Config) ->
- ?line process_flag(error_handler, ?MODULE),
- ?line Term = collect_tuple_fun(1024, {?MODULE,very_undefined_function}),
- ?line Term = binary_to_term(term_to_binary(Term)),
- ?line 1024 = length(Term),
- ?line [[{foo,bar},42.0,[e,f,g]]] = lists:usort(Term),
- ok.
-
-collect_tuple_fun(0, _) ->
- [];
-collect_tuple_fun(N, Fun) ->
- %% The next line calls the error handle function, which is
- %% ?MODULE:undefined_function/3 (it simply returns the list
- %% of args).
- C = Fun({foo,id(bar)}, 42.0, [e,f,id(g)]),
-
- %% The variable C will be saved onto the stack frame; if C
- %% points into a heap fragment the garbage collector will reach
- %% it and the emulator will crash sooner or later (sooner if
- %% the emulator is debug-compiled).
- Res = collect_tuple_fun(N-1, Fun),
- [C|Res].
-
debug_breakpoint(Config) when is_list(Config) ->
?line process_flag(error_handler, ?MODULE),
?line erts_debug:breakpoint({?MODULE,foobar,5}, true),
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 9fa4df6373..13aa0f4c00 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -90,7 +90,7 @@
mix_up_ports/1, otp_5112/1, otp_5119/1, otp_6224/1,
exit_status_multi_scheduling_block/1, ports/1,
spawn_driver/1, spawn_executable/1, close_deaf_port/1,
- unregister_name/1]).
+ unregister_name/1, parallelism_option/1]).
-export([]).
@@ -114,7 +114,8 @@ all() ->
stderr_to_stdout, otp_3906, otp_4389, win_massive,
mix_up_ports, otp_5112, otp_5119,
exit_status_multi_scheduling_block, ports, spawn_driver,
- spawn_executable, close_deaf_port, unregister_name].
+ spawn_executable, close_deaf_port, unregister_name,
+ parallelism_option].
groups() ->
[{stream, [], [stream_small, stream_big]},
@@ -157,16 +158,16 @@ win_massive(Config) when is_list(Config) ->
end.
do_win_massive() ->
- ?line Dog = test_server:timetrap(test_server:seconds(360)),
- ?line SuiteDir = filename:dirname(code:which(?MODULE)),
- ?line Env = " -env ERL_MAX_PORTS 8192",
- ?line {ok, Node} =
+ Dog = test_server:timetrap(test_server:seconds(360)),
+ SuiteDir = filename:dirname(code:which(?MODULE)),
+ Ports = " +Q 8192",
+ {ok, Node} =
test_server:start_node(win_massive,
slave,
- [{args, " -pa " ++ SuiteDir ++ Env}]),
- ?line ok = rpc:call(Node,?MODULE,win_massive_client,[3000]),
- ?line test_server:stop_node(Node),
- ?line test_server:timetrap_cancel(Dog),
+ [{args, " -pa " ++ SuiteDir ++ Ports}]),
+ ok = rpc:call(Node,?MODULE,win_massive_client,[3000]),
+ test_server:stop_node(Node),
+ test_server:timetrap_cancel(Dog),
ok.
win_massive_client(N) ->
@@ -208,11 +209,11 @@ win_massive_loop(P,N) ->
%% We will send only a small amount of data, to avoid deadlock.
stream_small(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line stream_ping(Config, 512, "", []),
- ?line stream_ping(Config, 1777, "", []),
- ?line stream_ping(Config, 1777, "-s512", []),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ stream_ping(Config, 512, "", []),
+ stream_ping(Config, 1777, "", []),
+ stream_ping(Config, 1777, "-s512", []),
+ test_server:timetrap_cancel(Dog),
ok.
%% Send big amounts of data (much bigger than the buffer size in port test).
@@ -220,30 +221,22 @@ stream_small(Config) when is_list(Config) ->
%% non-blocking reads and writes.
stream_big(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(180)),
- case os:type() of
- vxworks ->
- %% Don't stress VxWorks too much
- ?line stream_ping(Config, 43755, "", []),
- ?line stream_ping(Config, 51255, "", []),
- ?line stream_ping(Config, 52345, " -s40000", []);
- _ ->
- ?line stream_ping(Config, 43755, "", []),
- ?line stream_ping(Config, 100000, "", []),
- ?line stream_ping(Config, 77777, " -s40000", [])
- end,
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(180)),
+ stream_ping(Config, 43755, "", []),
+ stream_ping(Config, 100000, "", []),
+ stream_ping(Config, 77777, " -s40000", []),
+ test_server:timetrap_cancel(Dog),
ok.
%% Sends packet with header size of 1, 2, and 4, with packets of various
%% sizes.
basic_ping(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(120)),
- ?line ping(Config, sizes(1), 1, "", []),
- ?line ping(Config, sizes(2), 2, "", []),
- ?line ping(Config, sizes(4), 4, "", []),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(120)),
+ ping(Config, sizes(1), 1, "", []),
+ ping(Config, sizes(2), 2, "", []),
+ ping(Config, sizes(4), 4, "", []),
+ test_server:timetrap_cancel(Dog),
ok.
%% Let the port program insert delays between characters sent back to
@@ -251,30 +244,29 @@ basic_ping(Config) when is_list(Config) ->
%% small chunks rather than all at once.
slow_writes(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
- ?line ping(Config, [8], 4, "-s1", []),
- ?line ping(Config, [10], 2, "-s2", []),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(20)),
+ ping(Config, [8], 4, "-s1", []),
+ ping(Config, [10], 2, "-s2", []),
+ test_server:timetrap_cancel(Dog),
ok.
bad_packet(doc) ->
["Test that we get {'EXIT', Port, einval} if we try to send a bigger "
"packet than the packet header allows."];
bad_packet(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line PortTest = port_test(Config),
- ?line process_flag(trap_exit, true),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ PortTest = port_test(Config),
+ process_flag(trap_exit, true),
- ?line bad_packet(PortTest, 1, 256),
- ?line bad_packet(PortTest, 1, 257),
- ?line bad_packet(PortTest, 2, 65536),
- ?line bad_packet(PortTest, 2, 65537),
+ bad_packet(PortTest, 1, 256),
+ bad_packet(PortTest, 1, 257),
+ bad_packet(PortTest, 2, 65536),
+ bad_packet(PortTest, 2, 65537),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
bad_packet(PortTest, HeaderSize, PacketSize) ->
- %% Intentionally no ?line macros.
P = open_port({spawn, PortTest}, [{packet, HeaderSize}]),
P ! {self(), {command, make_zero_packet(PacketSize)}},
receive
@@ -292,16 +284,16 @@ make_zero_packet(N) ->
%% Test sending bad messages to a port.
bad_port_messages(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line PortTest = port_test(Config),
- ?line process_flag(trap_exit, true),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ PortTest = port_test(Config),
+ process_flag(trap_exit, true),
- ?line bad_message(PortTest, {a,b}),
- ?line bad_message(PortTest, {a}),
- ?line bad_message(PortTest, {self(),{command,bad_command}}),
- ?line bad_message(PortTest, {self(),{connect,no_pid}}),
+ bad_message(PortTest, {a,b}),
+ bad_message(PortTest, {a}),
+ bad_message(PortTest, {self(),{command,bad_command}}),
+ bad_message(PortTest, {self(),{connect,no_pid}}),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
bad_message(PortTest, Message) ->
@@ -319,108 +311,97 @@ bad_message(PortTest, Message) ->
%% Tests the 'binary' option for a port.
t_binary(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(300)),
+ Dog = test_server:timetrap(test_server:seconds(300)),
%% Packet mode.
- ?line ping(Config, sizes(1), 1, "", [binary]),
- ?line ping(Config, sizes(2), 2, "", [binary]),
- ?line ping(Config, sizes(4), 4, "", [binary]),
+ ping(Config, sizes(1), 1, "", [binary]),
+ ping(Config, sizes(2), 2, "", [binary]),
+ ping(Config, sizes(4), 4, "", [binary]),
%% Stream mode.
- case os:type() of
- vxworks ->
- %% don't stress VxWorks too much
- ?line stream_ping(Config, 435, "", [binary]),
- ?line stream_ping(Config, 43755, "", [binary]),
- ?line stream_ping(Config, 50000, "", [binary]);
- _ ->
- ?line stream_ping(Config, 435, "", [binary]),
- ?line stream_ping(Config, 43755, "", [binary]),
- ?line stream_ping(Config, 100000, "", [binary])
- end,
+ stream_ping(Config, 435, "", [binary]),
+ stream_ping(Config, 43755, "", [binary]),
+ stream_ping(Config, 100000, "", [binary]),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
name1(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(100)),
- ?line PortTest = port_test(Config),
- ?line Command = lists:concat([PortTest, " "]),
- ?line P = open_port({spawn, Command}, []),
- ?line register(myport, P),
- ?line P = whereis(myport),
+ Dog = test_server:timetrap(test_server:seconds(100)),
+ PortTest = port_test(Config),
+ Command = lists:concat([PortTest, " "]),
+ P = open_port({spawn, Command}, []),
+ register(myport, P),
+ P = whereis(myport),
Text = "hej",
- ?line myport ! {self(), {command, Text}},
- ?line receive
- {P, {data, Text}} ->
- ok
- end,
- ?line myport ! {self(), close},
- ?line receive
- {P, closed} -> ok
- end,
- ?line undefined = whereis(myport),
- ?line test_server:timetrap_cancel(Dog),
+ myport ! {self(), {command, Text}},
+ receive
+ {P, {data, Text}} ->
+ ok
+ end,
+ myport ! {self(), close},
+ receive
+ {P, closed} -> ok
+ end,
+ undefined = whereis(myport),
+ test_server:timetrap_cancel(Dog),
ok.
%% Test that the 'eof' option works.
eof(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(100)),
- ?line PortTest = port_test(Config),
- ?line Command = lists:concat([PortTest, " -h0 -q"]),
- ?line P = open_port({spawn, Command}, [eof]),
- ?line receive
- {P, eof} ->
- ok
- end,
- ?line P ! {self(), close},
- ?line receive
- {P, closed} -> ok
- end,
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(100)),
+ PortTest = port_test(Config),
+ Command = lists:concat([PortTest, " -h0 -q"]),
+ P = open_port({spawn, Command}, [eof]),
+ receive
+ {P, eof} ->
+ ok
+ end,
+ P ! {self(), close},
+ receive
+ {P, closed} -> ok
+ end,
+ test_server:timetrap_cancel(Dog),
ok.
%% Tests that the 'in' option for a port works.
input_only(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(300)),
- ?line expect_input(Config, [0, 1, 10, 13, 127, 128, 255], 1, "", [in]),
- ?line expect_input(Config, [0, 1, 255, 2048], 2, "", [in]),
- ?line expect_input(Config, [0, 1, 255, 2048], 4, "", [in]),
- ?line expect_input(Config, [0, 1, 10, 13, 127, 128, 255],
- 1, "", [in, binary]),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(300)),
+ expect_input(Config, [0, 1, 10, 13, 127, 128, 255], 1, "", [in]),
+ expect_input(Config, [0, 1, 255, 2048], 2, "", [in]),
+ expect_input(Config, [0, 1, 255, 2048], 4, "", [in]),
+ expect_input(Config, [0, 1, 10, 13, 127, 128, 255],
+ 1, "", [in, binary]),
+ test_server:timetrap_cancel(Dog),
ok.
%% Tests that the 'out' option for a port works.
output_only(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(100)),
- ?line Dir = ?config(priv_dir, Config),
- ?line Filename = filename:join(Dir, "output_only_stream"),
- ?line output_and_verify(Config, Filename, "-h0",
- random_packet(35777, "echo")),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(100)),
+ Dir = ?config(priv_dir, Config),
+ Filename = filename:join(Dir, "output_only_stream"),
+ output_and_verify(Config, Filename, "-h0",
+ random_packet(35777, "echo")),
+ test_server:timetrap_cancel(Dog),
ok.
output_and_verify(Config, Filename, Options, Data) ->
- ?line PortTest = port_test(Config),
- ?line Command = lists:concat([PortTest, " ",
- Options, " -o", Filename]),
- ?line Port = open_port({spawn, Command}, [out]),
- ?line Port ! {self(), {command, Data}},
- ?line Port ! {self(), close},
- ?line receive
- {Port, closed} -> ok
- end,
- Wait_time = case os:type() of
- vxworks -> 5000;
- _ -> 500
- end,
- ?line test_server:sleep(Wait_time),
- ?line {ok, Written} = file:read_file(Filename),
- ?line Data = binary_to_list(Written),
+ PortTest = port_test(Config),
+ Command = lists:concat([PortTest, " ",
+ Options, " -o", Filename]),
+ Port = open_port({spawn, Command}, [out]),
+ Port ! {self(), {command, Data}},
+ Port ! {self(), close},
+ receive
+ {Port, closed} -> ok
+ end,
+ Wait_time = 500,
+ test_server:sleep(Wait_time),
+ {ok, Written} = file:read_file(Filename),
+ Data = binary_to_list(Written),
ok.
%% Test that receiving several packages written in the same
@@ -430,19 +411,11 @@ output_and_verify(Config, Filename, Options, Data) ->
%% Basic test of receiving multiple packages, written in
%% one operation by the other end.
mul_basic(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(600)),
- case os:type() of
- vxworks ->
- %% don't stress vxworks too much
- ?line expect_input(Config, [0, 1, 255, 10, 13], 1, "", []),
- ?line expect_input(Config, [0, 10, 13, 1600, 8191, 16383], 2, "", []),
- ?line expect_input(Config, [10, 35000], 4, "", []);
- _ ->
- ?line expect_input(Config, [0, 1, 255, 10, 13], 1, "", []),
- ?line expect_input(Config, [0, 10, 13, 1600, 32767, 65535], 2, "", []),
- ?line expect_input(Config, [10, 70000], 4, "", [])
- end,
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(600)),
+ expect_input(Config, [0, 1, 255, 10, 13], 1, "", []),
+ expect_input(Config, [0, 10, 13, 1600, 32767, 65535], 2, "", []),
+ expect_input(Config, [10, 70000], 4, "", []),
+ test_server:timetrap_cancel(Dog),
ok.
%% Test reading a buffer consisting of several packets, some
@@ -451,9 +424,9 @@ mul_basic(Config) when is_list(Config) ->
%% delays in between.)
mul_slow_writes(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(250)),
- ?line expect_input(Config, [0, 20, 255, 10, 1], 1, "-s64", []),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(250)),
+ expect_input(Config, [0, 20, 255, 10, 1], 1, "-s64", []),
+ test_server:timetrap_cancel(Dog),
ok.
%% Runs several port tests in parallell. Each individual test
@@ -461,27 +434,27 @@ mul_slow_writes(Config) when is_list(Config) ->
%% should also finish in about 5 seconds.
parallell(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(300)),
- ?line Testers =
- [fun() -> stream_ping(Config, 1007, "-s100", []) end,
- fun() -> stream_ping(Config, 10007, "-s1000", []) end,
- fun() -> stream_ping(Config, 10007, "-s1000", []) end,
-
- fun() -> expect_input(Config, [21, 22, 23, 24, 25], 1,
- "-s10", [in]) end,
-
- fun() -> ping(Config, [10], 1, "-d", []) end,
- fun() -> ping(Config, [20000], 2, "-d", []) end,
- fun() -> ping(Config, [101], 1, "-s10", []) end,
- fun() -> ping(Config, [1001], 2, "-s100", []) end,
- fun() -> ping(Config, [10001], 4, "-s1000", []) end,
-
- fun() -> ping(Config, [501, 501], 2, "-s100", []) end,
- fun() -> ping(Config, [11, 12, 13, 14, 11], 1, "-s5", []) end],
- ?line process_flag(trap_exit, true),
- ?line Pids = lists:map(fun fun_spawn/1, Testers),
- ?line wait_for(Pids),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(300)),
+ Testers = [
+ fun() -> stream_ping(Config, 1007, "-s100", []) end,
+ fun() -> stream_ping(Config, 10007, "-s1000", []) end,
+ fun() -> stream_ping(Config, 10007, "-s1000", []) end,
+
+ fun() -> expect_input(Config, [21, 22, 23, 24, 25], 1,
+ "-s10", [in]) end,
+
+ fun() -> ping(Config, [10], 1, "-d", []) end,
+ fun() -> ping(Config, [20000], 2, "-d", []) end,
+ fun() -> ping(Config, [101], 1, "-s10", []) end,
+ fun() -> ping(Config, [1001], 2, "-s100", []) end,
+ fun() -> ping(Config, [10001], 4, "-s1000", []) end,
+
+ fun() -> ping(Config, [501, 501], 2, "-s100", []) end,
+ fun() -> ping(Config, [11, 12, 13, 14, 11], 1, "-s5", []) end],
+ process_flag(trap_exit, true),
+ Pids = lists:map(fun fun_spawn/1, Testers),
+ wait_for(Pids),
+ test_server:timetrap_cancel(Dog),
ok.
wait_for([]) ->
@@ -500,29 +473,29 @@ wait_for(Pids) ->
dying_port(suite) -> [];
dying_port(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(150)),
- ?line process_flag(trap_exit, true),
+ Dog = test_server:timetrap(test_server:seconds(150)),
+ process_flag(trap_exit, true),
- ?line P1 = make_dying_port(Config),
- ?line P2 = make_dying_port(Config),
- ?line P3 = make_dying_port(Config),
- ?line P4 = make_dying_port(Config),
- ?line P5 = make_dying_port(Config),
+ P1 = make_dying_port(Config),
+ P2 = make_dying_port(Config),
+ P3 = make_dying_port(Config),
+ P4 = make_dying_port(Config),
+ P5 = make_dying_port(Config),
%% This should be big enough to be sure to block in the write.
- ?line Garbage = random_packet(16384),
+ Garbage = random_packet(16384),
- ?line P1 ! {self(), {command, Garbage}},
- ?line P3 ! {self(), {command, Garbage}},
- ?line P5 ! {self(), {command, Garbage}},
+ P1 ! {self(), {command, Garbage}},
+ P3 ! {self(), {command, Garbage}},
+ P5 ! {self(), {command, Garbage}},
- ?line wait_for_port_exit(P1),
- ?line wait_for_port_exit(P2),
- ?line wait_for_port_exit(P3),
- ?line wait_for_port_exit(P4),
- ?line wait_for_port_exit(P5),
+ wait_for_port_exit(P1),
+ wait_for_port_exit(P2),
+ wait_for_port_exit(P3),
+ wait_for_port_exit(P4),
+ wait_for_port_exit(P5),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
wait_for_port_exit(Port) ->
@@ -549,9 +522,9 @@ make_dying_port(Config) when is_list(Config) ->
port_program_with_path(suite) -> [];
port_program_with_path(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(100)),
- ?line DataDir = ?config(data_dir, Config),
- ?line PrivDir = ?config(priv_dir, Config),
+ Dog = test_server:timetrap(test_server:seconds(100)),
+ DataDir = ?config(data_dir, Config),
+ PrivDir = ?config(priv_dir, Config),
%% Create a copy of the port test program in a directory not
%% included in PATH (i.e. in priv_dir), with the name 'my_port_test.exe'.
@@ -560,36 +533,31 @@ port_program_with_path(Config) when is_list(Config) ->
%% (On Unix, there will be a single file created, which will be
%% a copy of the port program.)
- ?line PortTest = os:find_executable("port_test", DataDir),
+ PortTest = os:find_executable("port_test", DataDir),
io:format("os:find_executable(~p, ~p) returned ~p",
["port_test", DataDir, PortTest]),
- ?line {ok, PortTestPgm} = file:read_file(PortTest),
- ?line NewName = filename:join(PrivDir, filename:basename(PortTest)),
- ?line RedHerring = filename:rootname(NewName),
- ?line ok = file:write_file(RedHerring, "I'm just here to confuse.\n"),
- ?line ok = file:write_file(NewName, PortTestPgm),
- ?line ok = file:write_file_info(NewName, #file_info{mode=8#111}),
- ?line PgmWithPathAndNoExt = filename:rootname(NewName),
+ {ok, PortTestPgm} = file:read_file(PortTest),
+ NewName = filename:join(PrivDir, filename:basename(PortTest)),
+ RedHerring = filename:rootname(NewName),
+ ok = file:write_file(RedHerring, "I'm just here to confuse.\n"),
+ ok = file:write_file(NewName, PortTestPgm),
+ ok = file:write_file_info(NewName, #file_info{mode=8#111}),
+ PgmWithPathAndNoExt = filename:rootname(NewName),
%% Open the port using the path to the copied port test program,
%% but without the .exe extension, and verified that it was started.
%%
%% If the bug is present the open_port call will fail with badarg.
- ?line Command = lists:concat([PgmWithPathAndNoExt, " -h2"]),
- %% allow VxWorks time to write file
- case os:type() of
- vxworks -> test_server:sleep(2500);
- _ -> time
+ Command = lists:concat([PgmWithPathAndNoExt, " -h2"]),
+ P = open_port({spawn, Command}, [{packet, 2}]),
+ Message = "echo back to me",
+ P ! {self(), {command, Message}},
+ receive
+ {P, {data, Message}} ->
+ ok
end,
- ?line P = open_port({spawn, Command}, [{packet, 2}]),
- ?line Message = "echo back to me",
- ?line P ! {self(), {command, Message}},
- ?line receive
- {P, {data, Message}} ->
- ok
- end,
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
@@ -597,56 +565,46 @@ port_program_with_path(Config) when is_list(Config) ->
%% This used to fail on Windows.
open_input_file_port(suite) -> [];
open_input_file_port(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line PrivDir = ?config(priv_dir, Config),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ PrivDir = ?config(priv_dir, Config),
%% Create a file with the file driver and read it back using
%% open_port/2.
- ?line MyFile1 = filename:join(PrivDir, "my_input_file"),
- ?line FileData1 = "An input file",
- ?line ok = file:write_file(MyFile1, FileData1),
- case os:type() of
- vxworks ->
- %% Can't open input file with vanilla driver on VxWorks
- ?line process_flag(trap_exit, true),
- ?line case catch open_port(MyFile1, [in]) of
- {'EXIT', {badarg, _}} ->
- ok
- end;
- _ ->
- ?line case open_port(MyFile1, [in]) of
- InputPort when is_port(InputPort) ->
- ?line receive
- {InputPort, {data, FileData1}} ->
- ok
- end
- end
+ MyFile1 = filename:join(PrivDir, "my_input_file"),
+ FileData1 = "An input file",
+ ok = file:write_file(MyFile1, FileData1),
+ case open_port(MyFile1, [in]) of
+ InputPort when is_port(InputPort) ->
+ receive
+ {InputPort, {data, FileData1}} ->
+ ok
+ end
end,
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
%% Tests that files can be written using open_port(Filename, [out]).
open_output_file_port(suite) -> [];
open_output_file_port(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(100)),
- ?line PrivDir = ?config(priv_dir, Config),
+ Dog = test_server:timetrap(test_server:seconds(100)),
+ PrivDir = ?config(priv_dir, Config),
%% Create a file with open_port/2 and read it back with
%% the file driver.
- ?line MyFile2 = filename:join(PrivDir, "my_output_file"),
- ?line FileData2_0 = "A file created ",
- ?line FileData2_1 = "with open_port/2.\n",
- ?line FileData2 = FileData2_0 ++ FileData2_1,
- ?line OutputPort = open_port(MyFile2, [out]),
- ?line OutputPort ! {self(), {command, FileData2_0}},
- ?line OutputPort ! {self(), {command, FileData2_1}},
- ?line OutputPort ! {self(), close},
- ?line {ok, Bin} = file:read_file(MyFile2),
- ?line FileData2 = binary_to_list(Bin),
-
- ?line test_server:timetrap_cancel(Dog),
+ MyFile2 = filename:join(PrivDir, "my_output_file"),
+ FileData2_0 = "A file created ",
+ FileData2_1 = "with open_port/2.\n",
+ FileData2 = FileData2_0 ++ FileData2_1,
+ OutputPort = open_port(MyFile2, [out]),
+ OutputPort ! {self(), {command, FileData2_0}},
+ OutputPort ! {self(), {command, FileData2_1}},
+ OutputPort ! {self(), close},
+ {ok, Bin} = file:read_file(MyFile2),
+ FileData2 = binary_to_list(Bin),
+
+ test_server:timetrap_cancel(Dog),
ok.
%%
@@ -670,17 +628,17 @@ iter_max_ports(Config) when is_list(Config) ->
iter_max_ports_test(Config) ->
- ?line Dog = test_server:timetrap(test_server:minutes(20)),
- ?line PortTest = port_test(Config),
- ?line Command = lists:concat([PortTest, " -h0 -q"]),
- ?line Iters = case os:type() of
+ Dog = test_server:timetrap(test_server:minutes(20)),
+ PortTest = port_test(Config),
+ Command = lists:concat([PortTest, " -h0 -q"]),
+ Iters = case os:type() of
{win32,_} -> 4;
_ -> 10
end,
- ?line L = do_iter_max_ports(Iters, Command),
+ L = do_iter_max_ports(Iters, Command),
io:format("Result: ~p",[L]),
- ?line all_equal(L),
- ?line test_server:timetrap_cancel(Dog),
+ all_equal(L),
+ test_server:timetrap_cancel(Dog),
{comment, "Max ports: " ++ integer_to_list(hd(L))}.
do_iter_max_ports(N, Command) when N > 0 ->
@@ -695,9 +653,9 @@ all_equal([]) -> ok.
max_ports(Command) ->
test_server:sleep(500),
- ?line Ps = open_ports({spawn, Command}, [eof]),
- ?line N = length(Ps),
- ?line close_ports(Ps),
+ Ps = open_ports({spawn, Command}, [eof]),
+ N = length(Ps),
+ close_ports(Ps),
io:format("Got ~p ports\n",[N]),
N.
@@ -727,105 +685,105 @@ open_ports(Name, Settings) ->
enomem ->
[];
Other ->
- ?line test_server:fail({open_ports, Other})
+ test_server:fail({open_ports, Other})
end;
Other ->
- ?line test_server:fail({open_ports, Other})
+ test_server:fail({open_ports, Other})
end.
%% Tests that exit(Port, Term) works (has been known to crash the emulator).
t_exit(suite) -> [];
t_exit(Config) when is_list(Config) ->
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun suicide_port/1, [Config]),
- ?line receive
- {'EXIT', Pid, die} ->
- ok;
- Other ->
- test_server:fail({bad_message, Other})
- end.
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun suicide_port/1, [Config]),
+ receive
+ {'EXIT', Pid, die} ->
+ ok;
+ Other ->
+ test_server:fail({bad_message, Other})
+ end.
suicide_port(Config) when is_list(Config) ->
- ?line Port = port_expect(Config, [], 0, "", []),
- ?line exit(Port, die),
- ?line receive after infinity -> ok end.
+ Port = port_expect(Config, [], 0, "", []),
+ exit(Port, die),
+ receive after infinity -> ok end.
tps_16_bytes(doc) -> "";
tps_16_bytes(suite) -> [];
tps_16_bytes(Config) when is_list(Config) ->
- ?line tps(16, Config).
+ tps(16, Config).
tps_1K(doc) -> "";
tps_1K(suite) -> [];
tps_1K(Config) when is_list(Config) ->
- ?line tps(1024, Config).
+ tps(1024, Config).
tps(Size, Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(300)),
- ?line PortTest = port_test(Config),
- ?line Packet = list_to_binary(random_packet(Size, "e")),
- ?line Port = open_port({spawn, PortTest}, [binary, {packet, 2}]),
- ?line Transactions = 10000,
- ?line {Elapsed, ok} = test_server:timecall(?MODULE, tps,
+ Dog = test_server:timetrap(test_server:seconds(300)),
+ PortTest = port_test(Config),
+ Packet = list_to_binary(random_packet(Size, "e")),
+ Port = open_port({spawn, PortTest}, [binary, {packet, 2}]),
+ Transactions = 10000,
+ {Elapsed, ok} = test_server:timecall(?MODULE, tps,
[Port, Packet, Transactions]),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
{comment, integer_to_list(trunc(Transactions/Elapsed+0.5)) ++ " transactions/s"}.
tps(_Port, _Packet, 0) -> ok;
tps(Port, Packet, N) ->
- ?line port_command(Port, Packet),
- ?line receive
- {Port, {data, Packet}} ->
- ?line tps(Port, Packet, N-1);
- Other ->
- ?line test_server:fail({bad_message, Other})
- end.
+ port_command(Port, Packet),
+ receive
+ {Port, {data, Packet}} ->
+ tps(Port, Packet, N-1);
+ Other ->
+ test_server:fail({bad_message, Other})
+ end.
%% Line I/O test
line(Config) when is_list(Config) ->
- ?line Siz = 110,
- ?line Dog = test_server:timetrap(test_server:seconds(300)),
- ?line Packet1 = random_packet(Siz),
- ?line Packet2 = random_packet(Siz div 2),
+ Siz = 110,
+ Dog = test_server:timetrap(test_server:seconds(300)),
+ Packet1 = random_packet(Siz),
+ Packet2 = random_packet(Siz div 2),
%% Test that packets are split into lines
- ?line port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet2,
+ port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet2,
io_lib:nl()]),
[{eol, Packet1}, {eol, Packet2}]}],
0, "", [{line,Siz}]),
%% Test the same for binaries
- ?line port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet2,
+ port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet2,
io_lib:nl()]),
[{eol, Packet1}, {eol, Packet2}]}],
0, "", [{line,Siz},binary]),
%% Test that too long lines get split
- ?line port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet1,
+ port_expect(Config,[{lists:append([Packet1, io_lib:nl(), Packet1,
Packet2, io_lib:nl()]),
[{eol, Packet1}, {noeol, Packet1},
{eol, Packet2}]}], 0, "", [{line,Siz}]),
%% Test that last output from closing port program gets received.
- ?line L1 = lists:append([Packet1, io_lib:nl(), Packet2]),
- ?line S1 = lists:flatten(io_lib:format("-l~w", [length(L1)])),
+ L1 = lists:append([Packet1, io_lib:nl(), Packet2]),
+ S1 = lists:flatten(io_lib:format("-l~w", [length(L1)])),
io:format("S1 = ~w, L1 = ~w~n", [S1,L1]),
- ?line port_expect(Config,[{L1,
+ port_expect(Config,[{L1,
[{eol, Packet1}, {noeol, Packet2}, eof]}], 0,
S1, [{line,Siz},eof]),
%% Test that lonely <CR> Don't get treated as newlines
- ?line port_expect(Config,[{lists:append([Packet1, [13], Packet2,
+ port_expect(Config,[{lists:append([Packet1, [13], Packet2,
io_lib:nl()]),
[{noeol, Packet1}, {eol, [13 |Packet2]}]}],
0, "", [{line,Siz}]),
%% Test that packets get built up to lines (delayed output from
%% port program)
- ?line port_expect(Config,[{Packet2,[]},
+ port_expect(Config,[{Packet2,[]},
{lists:append([Packet2, io_lib:nl(),
Packet1, io_lib:nl()]),
[{eol, lists:append(Packet2, Packet2)},
{eol, Packet1}]}], 0, "-d", [{line,Siz}]),
%% Test that we get badarg if trying both packet and line
- ?line bad_argument(Config, [{packet, 5}, {line, 5}]),
- ?line test_server:timetrap_cancel(Dog),
+ bad_argument(Config, [{packet, 5}, {line, 5}]),
+ test_server:timetrap_cancel(Dog),
ok.
%%% Redirection of stderr test
@@ -834,15 +792,15 @@ stderr_to_stdout(suite) ->
stderr_to_stdout(doc) ->
"Test that redirection of standard error to standard output works.";
stderr_to_stdout(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(60)),
+ Dog = test_server:timetrap(test_server:seconds(60)),
%% See that it works
- ?line Packet = random_packet(10),
- ?line port_expect(Config,[{Packet,[Packet]}], 0, "-e -l10",
+ Packet = random_packet(10),
+ port_expect(Config,[{Packet,[Packet]}], 0, "-e -l10",
[stderr_to_stdout]),
- %% ?line stream_ping(Config, 10, "-e", [stderr_to_stdout]),
+ %% stream_ping(Config, 10, "-e", [stderr_to_stdout]),
%% See that it doesn't always happen (will generate garbage on stderr)
- ?line port_expect(Config,[{Packet,[eof]}], 0, "-e -l10", [line,eof]),
- ?line test_server:timetrap_cancel(Dog),
+ port_expect(Config,[{Packet,[eof]}], 0, "-e -l10", [line,eof]),
+ test_server:timetrap_cancel(Dog),
ok.
@@ -862,47 +820,39 @@ env(suite) ->
env(doc) ->
["Test that the 'env' option works"];
env(Config) when is_list(Config) ->
- case os:type() of
- vxworks ->
- {skipped,"Environments not implemented on VxWorks (could be...)"};
- _ ->
- env2(Config)
- end.
-
-env2(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(60)),
- ?line Priv = ?config(priv_dir, Config),
- ?line Temp = filename:join(Priv, "env_fun.bin"),
+ Dog = test_server:timetrap(test_server:seconds(60)),
+ Priv = ?config(priv_dir, Config),
+ Temp = filename:join(Priv, "env_fun.bin"),
PluppVal = "dirty monkey",
- ?line env_slave(Temp, [{"plupp",PluppVal}]),
+ env_slave(Temp, [{"plupp",PluppVal}]),
Long = "LongAndBoringEnvName",
- ?line os:putenv(Long, "nisse"),
-
- ?line env_slave(Temp, [{"plupp",PluppVal},
- {"DIR_PLUPP","###glurfrik"}],
- fun() ->
- PluppVal = os:getenv("plupp"),
- "###glurfrik" = os:getenv("DIR_PLUPP"),
- "nisse" = os:getenv(Long)
- end),
+ os:putenv(Long, "nisse"),
-
- ?line env_slave(Temp, [{"must_define_something","some_value"},
- {"certainly_not_existing",false},
- {"ends_with_equal", "value="},
- {Long,false},
- {"glurf","a glorfy string"}]),
+ env_slave(Temp, [{"plupp",PluppVal},
+ {"DIR_PLUPP","###glurfrik"}],
+ fun() ->
+ PluppVal = os:getenv("plupp"),
+ "###glurfrik" = os:getenv("DIR_PLUPP"),
+ "nisse" = os:getenv(Long)
+ end),
+
+
+ env_slave(Temp, [{"must_define_something","some_value"},
+ {"certainly_not_existing",false},
+ {"ends_with_equal", "value="},
+ {Long,false},
+ {"glurf","a glorfy string"}]),
%% A lot of non existing variables (mingled with existing)
NotExistingList = [{lists:flatten(io_lib:format("V~p_not_existing",[X])),false}
|| X <- lists:seq(1,150)],
ExistingList = [{lists:flatten(io_lib:format("V~p_existing",[X])),"a_value"}
|| X <- lists:seq(1,150)],
- ?line env_slave(Temp, lists:sort(ExistingList ++ NotExistingList)),
+ env_slave(Temp, lists:sort(ExistingList ++ NotExistingList)),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
env_slave(File, Env) ->
@@ -946,23 +896,15 @@ env_slave_main([File]) ->
%% 'env' option
%% Test bad environments.
bad_env(Config) when is_list(Config) ->
- case os:type() of
- vxworks ->
- {skipped,"Environments not implemented on VxWorks"};
- _ ->
- bad_env_1()
- end.
-
-bad_env_1() ->
- ?line try_bad_env([abbb]),
- ?line try_bad_env([{"key","value"}|{"another","value"}]),
- ?line try_bad_env([{"key","value","value2"}]),
- ?line try_bad_env([{"key",[a,b,c]}]),
- ?line try_bad_env([{"key",value}]),
- ?line try_bad_env({a,tuple}),
- ?line try_bad_env(42),
- ?line try_bad_env([a|b]),
- ?line try_bad_env(self()),
+ try_bad_env([abbb]),
+ try_bad_env([{"key","value"}|{"another","value"}]),
+ try_bad_env([{"key","value","value2"}]),
+ try_bad_env([{"key",[a,b,c]}]),
+ try_bad_env([{"key",value}]),
+ try_bad_env({a,tuple}),
+ try_bad_env(42),
+ try_bad_env([a|b]),
+ try_bad_env(self()),
ok.
try_bad_env(Env) ->
@@ -979,36 +921,29 @@ cd(suite) ->
cd(doc) ->
["Test that the 'cd' option works"];
cd(Config) when is_list(Config) ->
- case os:type() of
- vxworks ->
- {skipped,"Task specific directories does not exist on VxWorks"};
- _ ->
- cd2(Config)
- end.
-cd2(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(60)),
-
- ?line Program = atom_to_list(lib:progname()),
- ?line DataDir = ?config(data_dir, Config),
- ?line TestDir = filename:join(DataDir, "dir"),
- ?line Cmd = Program ++ " -pz " ++ DataDir ++
- " -noshell -s port_test pwd -s erlang halt",
- ?line _ = open_port({spawn, Cmd},
- [{cd, TestDir},
- {line, 256}]),
- ?line receive
- {_, {data, {eol, String}}} ->
- case filename_equal(String, TestDir) of
- true ->
- ok;
- false ->
- ?line test_server:fail({cd, String})
- end;
- Other2 ->
- ?line test_server:fail({env, Other2})
- end,
+ Dog = test_server:timetrap(test_server:seconds(60)),
- ?line test_server:timetrap_cancel(Dog),
+ Program = atom_to_list(lib:progname()),
+ DataDir = ?config(data_dir, Config),
+ TestDir = filename:join(DataDir, "dir"),
+ Cmd = Program ++ " -pz " ++ DataDir ++
+ " -noshell -s port_test pwd -s erlang halt",
+ _ = open_port({spawn, Cmd},
+ [{cd, TestDir},
+ {line, 256}]),
+ receive
+ {_, {data, {eol, String}}} ->
+ case filename_equal(String, TestDir) of
+ true ->
+ ok;
+ false ->
+ test_server:fail({cd, String})
+ end;
+ Other2 ->
+ test_server:fail({env, Other2})
+ end,
+
+ test_server:timetrap_cancel(Dog),
ok.
filename_equal(A, B) ->
@@ -1059,8 +994,8 @@ otp_3906(Config) when is_list(Config) ->
-define(OTP_3906_MAX_CONC_OSP, 50).
otp_3906(Config, OSName) ->
- ?line DataDir = filename:dirname(proplists:get_value(data_dir,Config)),
- ?line {ok, Variables} = file:consult(
+ DataDir = filename:dirname(proplists:get_value(data_dir,Config)),
+ {ok, Variables} = file:consult(
filename:join([DataDir,"..","..",
"test_server","variables"])),
case lists:keysearch('CC', 1, Variables) of
@@ -1105,7 +1040,7 @@ otp_3906(Config, OSName) ->
succeded ->
ok;
_ ->
- ?line test_server:fail(Result)
+ test_server:fail(Result)
end;
_ ->
{skipped, "No C compiler found"}
@@ -1246,15 +1181,15 @@ otp_4389(doc) -> [];
otp_4389(Config) when is_list(Config) ->
case os:type() of
{unix, _} ->
- ?line Dog = test_server:timetrap(test_server:seconds(240)),
- ?line TCR = self(),
+ Dog = test_server:timetrap(test_server:seconds(240)),
+ TCR = self(),
case get_true_cmd() of
True when is_list(True) ->
- ?line lists:foreach(
+ lists:foreach(
fun (P) ->
- ?line receive
- {P, ok} -> ?line ok;
- {P, Err} -> ?line ?t:fail(Err)
+ receive
+ {P, ok} -> ok;
+ {P, Err} -> ?t:fail(Err)
end
end,
lists:map(
@@ -1283,14 +1218,14 @@ otp_4389(Config) when is_list(Config) ->
end)
end,
lists:duplicate(1000,[]))),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
{comment,
"This test case doesn't always fail when the bug that "
"it tests for is present (it is most likely to fail on"
" a multi processor machine). If the test case fails it"
" will fail by deadlocking the emulator."};
_ ->
- ?line {skipped, "\"true\" command not found"}
+ {skipped, "\"true\" command not found"}
end;
_ ->
{skip,"Only run on Unix"}
@@ -1320,11 +1255,11 @@ exit_status(suite) ->
exit_status(doc) ->
["Test that the 'exit_status' option works"];
exit_status(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(60)),
- ?line port_expect(Config,[{"x",
+ Dog = test_server:timetrap(test_server:seconds(60)),
+ port_expect(Config,[{"x",
[{exit_status, 5}]}],
1, "", [exit_status]),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
spawn_driver(suite) ->
@@ -1332,10 +1267,49 @@ spawn_driver(suite) ->
spawn_driver(doc) ->
["Test spawning a driver specifically"];
spawn_driver(Config) when is_list(Config) ->
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ Path = ?config(data_dir, Config),
+ ok = load_driver(Path, "echo_drv"),
+ Port = erlang:open_port({spawn_driver, "echo_drv"}, []),
+ Port ! {self(), {command, "Hello port!"}},
+ receive
+ {Port, {data, "Hello port!"}} = Msg1 ->
+ io:format("~p~n", [Msg1]),
+ ok;
+ Other ->
+ test_server:fail({unexpected, Other})
+ end,
+ Port ! {self(), close},
+ receive {Port, closed} -> ok end,
+
+ Port2 = erlang:open_port({spawn_driver, "echo_drv -Hello port?"},
+ []),
+ receive
+ {Port2, {data, "Hello port?"}} = Msg2 ->
+ io:format("~p~n", [Msg2]),
+ ok;
+ Other2 ->
+ test_server:fail({unexpected2, Other2})
+ end,
+ Port2 ! {self(), close},
+ receive {Port2, closed} -> ok end,
+ {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, "ls"}, [])),
+ {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, "cmd"}, [])),
+ {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, os:find_executable("erl")}, [])),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+parallelism_option(suite) ->
+ [];
+parallelism_option(doc) ->
+ ["Test parallelism option of open_port"];
+parallelism_option(Config) when is_list(Config) ->
?line Dog = test_server:timetrap(test_server:seconds(10)),
?line Path = ?config(data_dir, Config),
?line ok = load_driver(Path, "echo_drv"),
- ?line Port = erlang:open_port({spawn_driver, "echo_drv"}, []),
+ ?line Port = erlang:open_port({spawn_driver, "echo_drv"},
+ [{parallelism, true}]),
+ ?line {parallelism, true} = erlang:port_info(Port, parallelism),
?line Port ! {self(), {command, "Hello port!"}},
?line receive
{Port, {data, "Hello port!"}} = Msg1 ->
@@ -1348,7 +1322,8 @@ spawn_driver(Config) when is_list(Config) ->
?line receive {Port, closed} -> ok end,
?line Port2 = erlang:open_port({spawn_driver, "echo_drv -Hello port?"},
- []),
+ [{parallelism, false}]),
+ ?line {parallelism, false} = erlang:port_info(Port2, parallelism),
?line receive
{Port2, {data, "Hello port?"}} = Msg2 ->
io:format("~p~n", [Msg2]),
@@ -1358,9 +1333,6 @@ spawn_driver(Config) when is_list(Config) ->
end,
?line Port2 ! {self(), close},
?line receive {Port2, closed} -> ok end,
- ?line {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, "ls"}, [])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, "cmd"}, [])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:open_port({spawn_driver, os:find_executable("erl")}, [])),
?line test_server:timetrap_cancel(Dog),
ok.
@@ -1369,48 +1341,48 @@ spawn_executable(suite) ->
spawn_executable(doc) ->
["Test spawning an executable specifically"];
spawn_executable(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line DataDir = ?config(data_dir, Config),
- ?line EchoArgs1 = filename:join([DataDir,"echo_args"]),
- ?line ExactFile1 = filename:nativename(os:find_executable(EchoArgs1)),
- ?line [ExactFile1] = run_echo_args(DataDir,[]),
- ?line ["echo_args"] = run_echo_args(DataDir,["echo_args"]),
- ?line ["echo_arguments"] = run_echo_args(DataDir,["echo_arguments"]),
- ?line [ExactFile1,"hello world","dlrow olleh"] =
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ DataDir = ?config(data_dir, Config),
+ EchoArgs1 = filename:join([DataDir,"echo_args"]),
+ ExactFile1 = filename:nativename(os:find_executable(EchoArgs1)),
+ [ExactFile1] = run_echo_args(DataDir,[]),
+ ["echo_args"] = run_echo_args(DataDir,["echo_args"]),
+ ["echo_arguments"] = run_echo_args(DataDir,["echo_arguments"]),
+ [ExactFile1,"hello world","dlrow olleh"] =
run_echo_args(DataDir,[ExactFile1,"hello world","dlrow olleh"]),
- ?line [ExactFile1] = run_echo_args(DataDir,[default]),
- ?line [ExactFile1,"hello world","dlrow olleh"] =
+ [ExactFile1] = run_echo_args(DataDir,[default]),
+ [ExactFile1,"hello world","dlrow olleh"] =
run_echo_args(DataDir,[switch_order,ExactFile1,"hello world",
"dlrow olleh"]),
- ?line [ExactFile1,"hello world","dlrow olleh"] =
+ [ExactFile1,"hello world","dlrow olleh"] =
run_echo_args(DataDir,[default,"hello world","dlrow olleh"]),
- ?line [ExactFile1,"hello world","dlrow olleh"] =
+ [ExactFile1,"hello world","dlrow olleh"] =
run_echo_args_2("\""++ExactFile1++"\" "++"\"hello world\" \"dlrow olleh\""),
- ?line PrivDir = ?config(priv_dir, Config),
- ?line SpaceDir =filename:join([PrivDir,"With Spaces"]),
- ?line file:make_dir(SpaceDir),
- ?line Executable = filename:basename(ExactFile1),
- ?line file:copy(ExactFile1,filename:join([SpaceDir,Executable])),
- ?line ExactFile2 = filename:nativename(filename:join([SpaceDir,Executable])),
- ?line chmodplusx(ExactFile2),
+ PrivDir = ?config(priv_dir, Config),
+ SpaceDir =filename:join([PrivDir,"With Spaces"]),
+ file:make_dir(SpaceDir),
+ Executable = filename:basename(ExactFile1),
+ file:copy(ExactFile1,filename:join([SpaceDir,Executable])),
+ ExactFile2 = filename:nativename(filename:join([SpaceDir,Executable])),
+ chmodplusx(ExactFile2),
io:format("|~s|~n",[ExactFile2]),
- ?line [ExactFile2] = run_echo_args(SpaceDir,[]),
- ?line ["echo_args"] = run_echo_args(SpaceDir,["echo_args"]),
- ?line ["echo_arguments"] = run_echo_args(SpaceDir,["echo_arguments"]),
- ?line [ExactFile2,"hello world","dlrow olleh"] =
+ [ExactFile2] = run_echo_args(SpaceDir,[]),
+ ["echo_args"] = run_echo_args(SpaceDir,["echo_args"]),
+ ["echo_arguments"] = run_echo_args(SpaceDir,["echo_arguments"]),
+ [ExactFile2,"hello world","dlrow olleh"] =
run_echo_args(SpaceDir,[ExactFile2,"hello world","dlrow olleh"]),
- ?line [ExactFile2] = run_echo_args(SpaceDir,[default]),
- ?line [ExactFile2,"hello world","dlrow olleh"] =
+ [ExactFile2] = run_echo_args(SpaceDir,[default]),
+ [ExactFile2,"hello world","dlrow olleh"] =
run_echo_args(SpaceDir,[switch_order,ExactFile2,"hello world",
"dlrow olleh"]),
- ?line [ExactFile2,"hello world","dlrow olleh"] =
+ [ExactFile2,"hello world","dlrow olleh"] =
run_echo_args(SpaceDir,[default,"hello world","dlrow olleh"]),
- ?line [ExactFile2,"hello world","dlrow olleh"] =
+ [ExactFile2,"hello world","dlrow olleh"] =
run_echo_args_2("\""++ExactFile2++"\" "++"\"hello world\" \"dlrow olleh\""),
- ?line ExeExt =
+ ExeExt =
case string:to_lower(lists:last(string:tokens(ExactFile2,"."))) of
"exe" ->
".exe";
@@ -1418,47 +1390,47 @@ spawn_executable(Config) when is_list(Config) ->
""
end,
Executable2 = "spoky name"++ExeExt,
- ?line file:copy(ExactFile1,filename:join([SpaceDir,Executable2])),
- ?line ExactFile3 = filename:nativename(filename:join([SpaceDir,Executable2])),
- ?line chmodplusx(ExactFile3),
- ?line [ExactFile3] = run_echo_args(SpaceDir,Executable2,[]),
- ?line ["echo_args"] = run_echo_args(SpaceDir,Executable2,["echo_args"]),
- ?line ["echo_arguments"] = run_echo_args(SpaceDir,Executable2,["echo_arguments"]),
- ?line [ExactFile3,"hello world","dlrow olleh"] =
+ file:copy(ExactFile1,filename:join([SpaceDir,Executable2])),
+ ExactFile3 = filename:nativename(filename:join([SpaceDir,Executable2])),
+ chmodplusx(ExactFile3),
+ [ExactFile3] = run_echo_args(SpaceDir,Executable2,[]),
+ ["echo_args"] = run_echo_args(SpaceDir,Executable2,["echo_args"]),
+ ["echo_arguments"] = run_echo_args(SpaceDir,Executable2,["echo_arguments"]),
+ [ExactFile3,"hello world","dlrow olleh"] =
run_echo_args(SpaceDir,Executable2,[ExactFile3,"hello world","dlrow olleh"]),
- ?line [ExactFile3] = run_echo_args(SpaceDir,Executable2,[default]),
- ?line [ExactFile3,"hello world","dlrow olleh"] =
+ [ExactFile3] = run_echo_args(SpaceDir,Executable2,[default]),
+ [ExactFile3,"hello world","dlrow olleh"] =
run_echo_args(SpaceDir,Executable2,
[switch_order,ExactFile3,"hello world",
"dlrow olleh"]),
- ?line [ExactFile3,"hello world","dlrow olleh"] =
+ [ExactFile3,"hello world","dlrow olleh"] =
run_echo_args(SpaceDir,Executable2,
[default,"hello world","dlrow olleh"]),
- ?line [ExactFile3,"hello world","dlrow olleh"] =
+ [ExactFile3,"hello world","dlrow olleh"] =
run_echo_args_2("\""++ExactFile3++"\" "++"\"hello world\" \"dlrow olleh\""),
- ?line {'EXIT',{enoent,_}} = (catch run_echo_args(SpaceDir,"fnurflmonfi",
+ {'EXIT',{enoent,_}} = (catch run_echo_args(SpaceDir,"fnurflmonfi",
[default,"hello world",
"dlrow olleh"])),
NonExec = "kronxfrt"++ExeExt,
- ?line file:write_file(filename:join([SpaceDir,NonExec]),
+ file:write_file(filename:join([SpaceDir,NonExec]),
<<"Not an executable">>),
- ?line {'EXIT',{eacces,_}} = (catch run_echo_args(SpaceDir,NonExec,
+ {'EXIT',{eacces,_}} = (catch run_echo_args(SpaceDir,NonExec,
[default,"hello world",
"dlrow olleh"])),
- ?line {'EXIT',{enoent,_}} = (catch open_port({spawn_executable,"cmd"},[])),
- ?line {'EXIT',{enoent,_}} = (catch open_port({spawn_executable,"sh"},[])),
+ {'EXIT',{enoent,_}} = (catch open_port({spawn_executable,"cmd"},[])),
+ {'EXIT',{enoent,_}} = (catch open_port({spawn_executable,"sh"},[])),
case os:type() of
{win32,_} ->
test_bat_file(SpaceDir);
{unix,_} ->
test_sh_file(SpaceDir)
end,
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
unregister_name(Config) when is_list(Config) ->
- ?line true = register(crash, open_port({spawn, "sleep 100"}, [])),
- ?line true = unregister(crash).
+ true = register(crash, open_port({spawn, "sleep 100"}, [])),
+ true = unregister(crash).
test_bat_file(Dir) ->
FN = "tf.bat",
@@ -1478,16 +1450,16 @@ test_bat_file(Dir) ->
<<"\r\n">>,
<<":done\r\n">>,
<<"\r\n">>],
- ?line file:write_file(Full,list_to_binary(D)),
- ?line EF = filename:basename(FN),
- ?line [DN,"hello","world"] =
+ file:write_file(Full,list_to_binary(D)),
+ EF = filename:basename(FN),
+ [DN,"hello","world"] =
run_echo_args(Dir,FN,
[default,"hello","world"]),
%% The arg0 argumant should be ignored when running batch files
- ?line [DN,"hello","world"] =
+ [DN,"hello","world"] =
run_echo_args(Dir,FN,
["knaskurt","hello","world"]),
- ?line EF = filename:basename(DN),
+ EF = filename:basename(DN),
ok.
test_sh_file(Dir) ->
@@ -1501,20 +1473,20 @@ test_sh_file(Dir) ->
<<" shift\n">>,
<<" i=`expr $i + 1`\n">>,
<<"done\n">>],
- ?line file:write_file(Full,list_to_binary(D)),
- ?line chmodplusx(Full),
- ?line [Full,"hello","world"] =
+ file:write_file(Full,list_to_binary(D)),
+ chmodplusx(Full),
+ [Full,"hello","world"] =
run_echo_args(Dir,FN,
[default,"hello","world"]),
- ?line [Full,"hello","world of spaces"] =
+ [Full,"hello","world of spaces"] =
run_echo_args(Dir,FN,
[default,"hello","world of spaces"]),
- ?line file:write_file(filename:join([Dir,"testfile1"]),<<"testdata1">>),
- ?line file:write_file(filename:join([Dir,"testfile2"]),<<"testdata2">>),
- ?line Pattern = filename:join([Dir,"testfile*"]),
- ?line L = filelib:wildcard(Pattern),
- ?line 2 = length(L),
- ?line [Full,"hello",Pattern] =
+ file:write_file(filename:join([Dir,"testfile1"]),<<"testdata1">>),
+ file:write_file(filename:join([Dir,"testfile2"]),<<"testdata2">>),
+ Pattern = filename:join([Dir,"testfile*"]),
+ L = filelib:wildcard(Pattern),
+ 2 = length(L),
+ [Full,"hello",Pattern] =
run_echo_args(Dir,FN,
[default,"hello",Pattern]),
ok.
@@ -1574,25 +1546,25 @@ mix_up_ports(suite) ->
mix_up_ports(doc) ->
["Test that the emulator does not mix up ports when the port table wraps"];
mix_up_ports(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line Path = ?config(data_dir, Config),
- ?line ok = load_driver(Path, "echo_drv"),
- ?line Port = erlang:open_port({spawn, "echo_drv"}, []),
- ?line Port ! {self(), {command, "Hello port!"}},
- ?line receive
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ Path = ?config(data_dir, Config),
+ ok = load_driver(Path, "echo_drv"),
+ Port = erlang:open_port({spawn, "echo_drv"}, []),
+ Port ! {self(), {command, "Hello port!"}},
+ receive
{Port, {data, "Hello port!"}} = Msg1 ->
io:format("~p~n", [Msg1]),
ok;
Other ->
test_server:fail({unexpected, Other})
end,
- ?line Port ! {self(), close},
- ?line receive {Port, closed} -> ok end,
- ?line loop(start, done,
+ Port ! {self(), close},
+ receive {Port, closed} -> ok end,
+ loop(start, done,
fun(P) ->
- ?line Q =
+ Q =
(catch erlang:open_port({spawn, "echo_drv"}, [])),
-%% ?line io:format("~p ", [Q]),
+%% io:format("~p ", [Q]),
if is_port(Q) ->
Q;
true ->
@@ -1600,14 +1572,14 @@ mix_up_ports(Config) when is_list(Config) ->
done
end
end),
- ?line Port ! {self(), {command, "Hello again port!"}},
- ?line receive
+ Port ! {self(), {command, "Hello again port!"}},
+ receive
Msg2 ->
test_server:fail({unexpected, Msg2})
after 1000 ->
ok
end,
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
loop(Stop, Stop, Fun) when is_function(Fun) ->
@@ -1622,45 +1594,46 @@ otp_5112(doc) ->
["Test that link to connected process is taken away when port calls",
"driver_exit() also when the port index has wrapped"];
otp_5112(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line Path = ?config(data_dir, Config),
- ?line ok = load_driver(Path, "exit_drv"),
- ?line Port = otp_5112_get_wrapped_port(),
- ?line ?t:format("Max ports: ~p~n",[max_ports()]),
- ?line ?t:format("Port: ~p~n",[Port]),
- ?line {links, Links1} = process_info(self(),links),
- ?line ?t:format("Links1: ~p~n",[Links1]),
- ?line true = lists:member(Port, Links1),
- ?line Port ! {self(), {command, ""}},
- ?line {links, Links2} = process_info(self(),links),
- ?line ?t:format("Links2: ~p~n",[Links2]),
- ?line false = lists:member(Port, Links2), %% This used to fail
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ Path = ?config(data_dir, Config),
+ ok = load_driver(Path, "exit_drv"),
+ Port = otp_5112_get_wrapped_port(),
+ ?t:format("Max ports: ~p~n",[max_ports()]),
+ ?t:format("Port: ~p~n",[Port]),
+ {links, Links1} = process_info(self(),links),
+ ?t:format("Links1: ~p~n",[Links1]),
+ true = lists:member(Port, Links1),
+ Port ! {self(), {command, ""}},
+ ?line wait_until(fun () -> lists:member(Port, erlang:ports()) == false end),
+ {links, Links2} = process_info(self(),links),
+ ?t:format("Links2: ~p~n",[Links2]),
+ false = lists:member(Port, Links2), %% This used to fail
+ test_server:timetrap_cancel(Dog),
ok.
otp_5112_get_wrapped_port() ->
- ?line P1 = erlang:open_port({spawn, "exit_drv"}, []),
- ?line case port_ix(P1) < max_ports() of
+ P1 = erlang:open_port({spawn, "exit_drv"}, []),
+ case port_ix(P1) < max_ports() of
true ->
- ?line ?t:format("Need to wrap port index (~p)~n", [P1]),
- ?line otp_5112_wrap_port_ix([P1]),
- ?line P2 = erlang:open_port({spawn, "exit_drv"}, []),
- ?line false = port_ix(P2) < max_ports(),
- ?line P2;
+ ?t:format("Need to wrap port index (~p)~n", [P1]),
+ otp_5112_wrap_port_ix([P1]),
+ P2 = erlang:open_port({spawn, "exit_drv"}, []),
+ false = port_ix(P2) < max_ports(),
+ P2;
false ->
- ?line ?t:format("Port index already wrapped (~p)~n", [P1]),
- ?line P1
+ ?t:format("Port index already wrapped (~p)~n", [P1]),
+ P1
end.
otp_5112_wrap_port_ix(Ports) ->
- ?line case (catch erlang:open_port({spawn, "exit_drv"}, [])) of
+ case (catch erlang:open_port({spawn, "exit_drv"}, [])) of
Port when is_port(Port) ->
- ?line otp_5112_wrap_port_ix([Port|Ports]);
+ otp_5112_wrap_port_ix([Port|Ports]);
_ ->
%% Port table now full; empty port table
- ?line lists:foreach(fun (P) -> P ! {self(), close} end,
+ lists:foreach(fun (P) -> P ! {self(), close} end,
Ports),
- ?line ok
+ ok
end.
@@ -1669,106 +1642,75 @@ otp_5119(suite) ->
otp_5119(doc) ->
["Test that port index is not unnecessarily wrapped"];
otp_5119(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line Path = ?config(data_dir, Config),
- ?line ok = load_driver(Path, "exit_drv"),
- ?line PI1 = port_ix(otp_5119_fill_empty_port_tab([])),
- ?line PI2 = port_ix(erlang:open_port({spawn, "exit_drv"}, [])),
- ?line {PortIx1, PortIx2}
- = case PI2 > PI1 of
- true ->
- ?line {PI1, PI2};
- false ->
- ?line {port_ix(otp_5119_fill_empty_port_tab([PI2])),
- port_ix(erlang:open_port({spawn, "exit_drv"}, []))}
- end,
- ?line MaxPorts = max_ports(),
- ?line ?t:format("PortIx1 = ~p ~p~n", [PI1, PortIx1]),
- ?line ?t:format("PortIx2 = ~p ~p~n", [PI2, PortIx2]),
- ?line ?t:format("MaxPorts = ~p~n", [MaxPorts]),
- ?line true = PortIx2 > PortIx1,
- ?line true = PortIx2 =< PortIx1 + MaxPorts,
- ?line test_server:timetrap_cancel(Dog),
- ?line ok.
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ Path = ?config(data_dir, Config),
+ ok = load_driver(Path, "exit_drv"),
+ PI1 = port_ix(otp_5119_fill_empty_port_tab([])),
+ PI2 = port_ix(erlang:open_port({spawn, "exit_drv"}, [])),
+ {PortIx1, PortIx2} = case PI2 > PI1 of
+ true ->
+ {PI1, PI2};
+ false ->
+ {port_ix(otp_5119_fill_empty_port_tab([PI2])),
+ port_ix(erlang:open_port({spawn, "exit_drv"}, []))}
+ end,
+ MaxPorts = max_ports(),
+ ?t:format("PortIx1 = ~p ~p~n", [PI1, PortIx1]),
+ ?t:format("PortIx2 = ~p ~p~n", [PI2, PortIx2]),
+ ?t:format("MaxPorts = ~p~n", [MaxPorts]),
+ true = PortIx2 > PortIx1,
+ true = PortIx2 =< PortIx1 + MaxPorts,
+ test_server:timetrap_cancel(Dog),
+ ok.
otp_5119_fill_empty_port_tab(Ports) ->
- ?line case (catch erlang:open_port({spawn, "exit_drv"}, [])) of
+ case (catch erlang:open_port({spawn, "exit_drv"}, [])) of
Port when is_port(Port) ->
- ?line otp_5119_fill_empty_port_tab([Port|Ports]);
+ otp_5119_fill_empty_port_tab([Port|Ports]);
_ ->
%% Port table now full; empty port table
- ?line lists:foreach(fun (P) -> P ! {self(), close} end,
+ lists:foreach(fun (P) -> P ! {self(), close} end,
Ports),
- ?line [LastPort|_] = Ports,
- ?line LastPort
- end.
-
--define(DEF_MAX_PORTS, 1024).
-
-max_ports_env() ->
- ?line case os:getenv("ERL_MAX_PORTS") of
- EMP when is_list(EMP) ->
- case catch list_to_integer(EMP) of
- Int when is_integer(Int) -> ?line Int;
- _ -> ?line false
- end;
- _ -> ?line false
+ [LastPort|_] = Ports,
+ LastPort
end.
max_ports() ->
- ?line PreMaxPorts
- = case max_ports_env() of
- Env when is_integer(Env) -> ?line Env;
- _ ->
- ?line case os:type() of
- {unix, _} ->
- ?line UlimStr = string:strip(os:cmd("ulimit -n")
- -- "\n"),
- ?line case catch list_to_integer(UlimStr) of
- Ulim when is_integer(Ulim) -> ?line Ulim;
- _ -> ?line ?DEF_MAX_PORTS
- end;
- _ -> ?line ?DEF_MAX_PORTS
- end
- end,
- ?line case PreMaxPorts > ?DEF_MAX_PORTS of
- true -> ?line PreMaxPorts;
- false -> ?line ?DEF_MAX_PORTS
- end.
+ erlang:system_info(port_limit).
port_ix(Port) when is_port(Port) ->
- ?line ["#Port",_,PortIxStr] = string:tokens(erlang:port_to_list(Port),
+ ["#Port",_,PortIxStr] = string:tokens(erlang:port_to_list(Port),
"<.>"),
- ?line list_to_integer(PortIxStr).
+ list_to_integer(PortIxStr).
otp_6224(doc) -> ["Check that port command failure doesn't crash the emulator"];
otp_6224(suite) -> [];
otp_6224(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line Path = ?config(data_dir, Config),
- ?line ok = load_driver(Path, "failure_drv"),
- ?line Go = make_ref(),
- ?line Failer = spawn(fun () ->
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ Path = ?config(data_dir, Config),
+ ok = load_driver(Path, "failure_drv"),
+ Go = make_ref(),
+ Failer = spawn(fun () ->
receive Go -> ok end,
- ?line Port = open_port({spawn, "failure_drv"},
+ Port = open_port({spawn, "failure_drv"},
[]),
Port ! {self(), {command, "Fail, please!"}},
otp_6224_loop()
end),
- ?line Mon = erlang:monitor(process, Failer),
- ?line Failer ! Go,
- ?line receive
+ Mon = erlang:monitor(process, Failer),
+ Failer ! Go,
+ receive
{'DOWN', Mon, process, Failer, Reason} ->
- ?line case Reason of
- {driver_failed, _} -> ?line ok;
- driver_failed -> ?line ok;
- _ -> ?line ?t:fail({unexpected_exit_reason,
+ case Reason of
+ {driver_failed, _} -> ok;
+ driver_failed -> ok;
+ _ -> ?t:fail({unexpected_exit_reason,
Reason})
end
end,
- ?line test_server:timetrap_cancel(Dog),
- ?line ok.
+ test_server:timetrap_cancel(Dog),
+ ok.
otp_6224_loop() ->
receive _ -> ok after 0 -> ok end,
@@ -1781,11 +1723,11 @@ otp_6224_loop() ->
exit_status_multi_scheduling_block(doc) -> [];
exit_status_multi_scheduling_block(suite) -> [];
exit_status_multi_scheduling_block(Config) when is_list(Config) ->
- ?line Repeat = 3,
- ?line case ?t:os_type() of
+ Repeat = 3,
+ case ?t:os_type() of
{unix, _} ->
- ?line Dog = ?t:timetrap(test_server:minutes(2*Repeat)),
- ?line SleepSecs = 6,
+ Dog = ?t:timetrap(test_server:minutes(2*Repeat)),
+ SleepSecs = 6,
try
lists:foreach(fun (_) ->
exit_status_msb_test(Config,
@@ -1799,7 +1741,7 @@ exit_status_multi_scheduling_block(Config) when is_list(Config) ->
?t:timetrap_cancel(Dog),
receive after SleepSecs+500 -> ok end
end;
- _ -> ?line {skip, "Not implemented for this OS"}
+ _ -> {skip, "Not implemented for this OS"}
end.
exit_status_msb_test(Config, SleepSecs) when is_list(Config) ->
@@ -1808,22 +1750,22 @@ exit_status_msb_test(Config, SleepSecs) when is_list(Config) ->
%% and we want these port programs to terminate while multi-scheduling
%% is blocked.
%%
- ?line NoSchedsOnln = erlang:system_info(schedulers_online),
- ?line Parent = self(),
- ?line ?t:format("SleepSecs = ~p~n", [SleepSecs]),
- ?line PortProg = "sleep " ++ integer_to_list(SleepSecs),
- ?line Start = now(),
- ?line NoProcs = case NoSchedsOnln of
+ NoSchedsOnln = erlang:system_info(schedulers_online),
+ Parent = self(),
+ ?t:format("SleepSecs = ~p~n", [SleepSecs]),
+ PortProg = "sleep " ++ integer_to_list(SleepSecs),
+ Start = now(),
+ NoProcs = case NoSchedsOnln of
NProcs when NProcs < ?EXIT_STATUS_MSB_MAX_PROCS ->
NProcs;
_ ->
?EXIT_STATUS_MSB_MAX_PROCS
end,
- ?line NoPortsPerProc = case 20*NoProcs of
+ NoPortsPerProc = case 20*NoProcs of
TNPorts when TNPorts < ?EXIT_STATUS_MSB_MAX_PORTS -> 20;
_ -> ?EXIT_STATUS_MSB_MAX_PORTS div NoProcs
end,
- ?line ?t:format("NoProcs = ~p~nNoPortsPerProc = ~p~n",
+ ?t:format("NoProcs = ~p~nNoPortsPerProc = ~p~n",
[NoProcs, NoPortsPerProc]),
ProcFun
= fun () ->
@@ -1873,32 +1815,32 @@ exit_status_msb_test(Config, SleepSecs) when is_list(Config) ->
PrtSIds),
Parent ! {self(), done}
end,
- ?line Procs = lists:map(fun (N) ->
+ Procs = lists:map(fun (N) ->
spawn_opt(ProcFun,
[link,
{scheduler,
(N rem NoSchedsOnln)+1}])
end,
lists:seq(1, NoProcs)),
- ?line SIds = lists:map(fun (P) ->
+ SIds = lists:map(fun (P) ->
receive {P, started, SIds} -> SIds end
end,
Procs),
- ?line StartedTime = timer:now_diff(now(), Start)/1000000,
- ?line ?t:format("StartedTime = ~p~n", [StartedTime]),
- ?line true = StartedTime < SleepSecs,
- ?line erlang:system_flag(multi_scheduling, block),
- ?line lists:foreach(fun (P) -> receive {P, done} -> ok end end, Procs),
- ?line DoneTime = timer:now_diff(now(), Start)/1000000,
- ?line ?t:format("DoneTime = ~p~n", [DoneTime]),
- ?line true = DoneTime > SleepSecs,
- ?line ok = verify_multi_scheduling_blocked(),
- ?line erlang:system_flag(multi_scheduling, unblock),
- ?line case {length(lists:usort(lists:flatten(SIds))), NoSchedsOnln} of
+ StartedTime = timer:now_diff(now(), Start)/1000000,
+ ?t:format("StartedTime = ~p~n", [StartedTime]),
+ true = StartedTime < SleepSecs,
+ erlang:system_flag(multi_scheduling, block),
+ lists:foreach(fun (P) -> receive {P, done} -> ok end end, Procs),
+ DoneTime = timer:now_diff(now(), Start)/1000000,
+ ?t:format("DoneTime = ~p~n", [DoneTime]),
+ true = DoneTime > SleepSecs,
+ ok = verify_multi_scheduling_blocked(),
+ erlang:system_flag(multi_scheduling, unblock),
+ case {length(lists:usort(lists:flatten(SIds))), NoSchedsOnln} of
{N, N} ->
- ?line ok;
+ ok;
{N, M} ->
- ?line ?t:fail("Failed to create ports on all"
+ ?t:fail("Failed to create ports on all"
++ integer_to_list(M) ++ " available"
"schedulers. Only created ports on "
++ integer_to_list(N) ++ " schedulers.")
@@ -1921,18 +1863,18 @@ sid_proc(SIds) ->
end.
verify_multi_scheduling_blocked() ->
- ?line Procs = lists:map(fun (_) ->
+ Procs = lists:map(fun (_) ->
spawn_link(fun () -> sid_proc([]) end)
end,
lists:seq(1, 3*erlang:system_info(schedulers_online))),
- ?line receive after 1000 -> ok end,
- ?line SIds = lists:map(fun (P) ->
+ receive after 1000 -> ok end,
+ SIds = lists:map(fun (P) ->
P ! {self(), want_sids},
receive {P, sids, PSIds} -> PSIds end
end,
Procs),
- ?line 1 = length(lists:usort(lists:flatten(SIds))),
- ?line ok.
+ 1 = length(lists:usort(lists:flatten(SIds))),
+ ok.
%%% Pinging functions.
@@ -1993,30 +1935,30 @@ build_cmd_line(FixedCmdLine, [], Result) ->
port_expect(Config, Actions, HSize, CmdLine, Options0) ->
% io:format("port_expect(~p, ~p, ~p, ~p)",
% [Actions, HSize, CmdLine, Options0]),
- ?line PortTest = port_test(Config),
- ?line Cmd = lists:concat([PortTest, " -h", HSize, " ", CmdLine]),
- ?line PortType =
+ PortTest = port_test(Config),
+ Cmd = lists:concat([PortTest, " -h", HSize, " ", CmdLine]),
+ PortType =
case HSize of
0 -> stream;
_ -> {packet, HSize}
end,
- ?line Options = [PortType|Options0],
- ?line io:format("open_port({spawn, ~p}, ~p)", [Cmd, Options]),
- ?line Port = open_port({spawn, Cmd}, Options),
- ?line port_expect(Port, Actions, Options),
+ Options = [PortType|Options0],
+ io:format("open_port({spawn, ~p}, ~p)", [Cmd, Options]),
+ Port = open_port({spawn, Cmd}, Options),
+ port_expect(Port, Actions, Options),
Port.
port_expect(Port, [{Send, Expects}|Rest], Options) when is_list(Expects) ->
- ?line port_send(Port, Send),
- ?line IsBinaryPort = lists:member(binary, Options),
- ?line Receiver =
+ port_send(Port, Send),
+ IsBinaryPort = lists:member(binary, Options),
+ Receiver =
case {lists:member(stream, Options), line_option(Options)} of
{false, _} -> fun receive_all/2;
{true,false} -> fun stream_receive_all/2;
{_, true} -> fun receive_all/2
end,
- ?line Receiver(Port, maybe_to_binary(Expects, IsBinaryPort)),
- ?line port_expect(Port, Rest, Options);
+ Receiver(Port, maybe_to_binary(Expects, IsBinaryPort)),
+ port_expect(Port, Rest, Options);
port_expect(_, [], _) ->
ok.
@@ -2068,7 +2010,7 @@ receive_all(Port, [Expect|Rest]) ->
_ ->
%%% io:format("Unexpected message: ~s", [format(Other)]),
io:format("Unexpected message: ~w", [Other]),
- ?line test_server:fail(unexpected_message)
+ test_server:fail(unexpected_message)
end
end,
receive_all(Port, Rest);
@@ -2157,15 +2099,8 @@ build_packet(Left, Result, NextChar0) ->
build_packet(Left-1, [NextChar0|Result], NextChar).
sizes() ->
- case os:type() of
- vxworks ->
- % don't stress VxWorks too much
- [10, 13, 64, 127, 128, 255, 256, 1023, 1024,
- 8191, 8192, 16383, 16384];
- _ ->
- [10, 13, 64, 127, 128, 255, 256, 1023, 1024,
- 32767, 32768, 65535, 65536]
- end.
+ [10, 13, 64, 127, 128, 255, 256, 1023, 1024,
+ 32767, 32768, 65535, 65536].
sizes(Header_Size) ->
sizes(Header_Size, sizes(), []).
@@ -2203,18 +2138,18 @@ fun_spawn(Fun, Args) ->
spawn_link(erlang, apply, [Fun, Args]).
port_test(Config) when is_list(Config) ->
- ?line filename:join(?config(data_dir, Config), "port_test").
+ filename:join(?config(data_dir, Config), "port_test").
ports(doc) -> "Test that erlang:ports/0 returns a consistent snapshot of ports";
ports(suite) -> [];
ports(Config) when is_list(Config) ->
- ?line Path = ?config(data_dir, Config),
- ?line ok = load_driver(Path, "exit_drv"),
+ Path = ?config(data_dir, Config),
+ ok = load_driver(Path, "exit_drv"),
receive after 1000 -> ok end, % Wait for other ports to stabilize
- ?line OtherPorts = erlang:ports(),
+ OtherPorts = erlang:ports(),
io:format("Other ports: ~p\n",[OtherPorts]),
MaxPorts = 1024 - length(OtherPorts),
@@ -2222,7 +2157,7 @@ ports(Config) when is_list(Config) ->
ports_snapshots(100, TrafficPid, OtherPorts),
TrafficPid ! {self(),die},
- ?line receive {TrafficPid, dead} -> ok end,
+ receive {TrafficPid, dead} -> ok end,
ok.
ports_snapshots(0, _, _) ->
@@ -2230,12 +2165,12 @@ ports_snapshots(0, _, _) ->
ports_snapshots(Iter, TrafficPid, OtherPorts) ->
TrafficPid ! start,
- ?line receive after 1 -> ok end,
+ receive after 1 -> ok end,
Snapshot = erlang:ports(),
TrafficPid ! {self(), stop},
- ?line receive {TrafficPid, EventList, TrafficPorts} -> ok end,
+ receive {TrafficPid, EventList, TrafficPorts} -> ok end,
%%io:format("Snapshot=~p\n", [Snapshot]),
ports_verify(Snapshot, OtherPorts ++ TrafficPorts, EventList),
@@ -2252,7 +2187,7 @@ ports_traffic_stopped(MaxPorts, {PortList, PortCnt}) ->
%%io:format("Traffic started in ~p\n",[self()]),
ports_traffic_started(MaxPorts, {PortList, PortCnt}, []);
{Pid,die} ->
- ?line lists:foreach(fun(Port)-> erlang:port_close(Port) end,
+ lists:foreach(fun(Port)-> erlang:port_close(Port) end,
PortList),
Pid ! {self(),dead}
end.
@@ -2272,15 +2207,15 @@ ports_traffic_do(MaxPorts, {PortList, PortCnt}, EventList) ->
N = uniform(MaxPorts),
case N > PortCnt of
true -> % Open port
- ?line P = open_port({spawn, "exit_drv"}, []),
+ P = open_port({spawn, "exit_drv"}, []),
%%io:format("Created port ~p\n",[P]),
ports_traffic_started(MaxPorts, {[P|PortList], PortCnt+1},
[{open,P}|EventList]);
false -> % Close port
- ?line P = lists:nth(N, PortList),
+ P = lists:nth(N, PortList),
%%io:format("Close port ~p\n",[P]),
- ?line true = erlang:port_close(P),
+ true = erlang:port_close(P),
ports_traffic_started(MaxPorts, {lists:delete(P,PortList), PortCnt-1},
[{close,P}|EventList])
end.
@@ -2301,7 +2236,7 @@ ports_verify(Ports, PortsAfter, EventList) ->
ports_verify(Ports, [P | PortsAfter], Tail);
[] ->
- ?line test_server:fail("Inconsistent snapshot from erlang:ports()")
+ test_server:fail("Inconsistent snapshot from erlang:ports()")
end
end.
@@ -2318,31 +2253,38 @@ close_deaf_port(doc) -> ["Send data to port program that does not read it, then
"Primary targeting Windows to test threaded_handle_closer in sys.c"];
close_deaf_port(suite) -> [];
close_deaf_port(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(100)),
- ?line DataDir = ?config(data_dir, Config),
- ?line DeadPort = os:find_executable("dead_port", DataDir),
- ?line Port = open_port({spawn,DeadPort++" 60"},[]),
- ?line erlang:port_command(Port,"Hello, can you hear me!?!?"),
- ?line port_close(Port),
+ Dog = test_server:timetrap(test_server:seconds(100)),
+ DataDir = ?config(data_dir, Config),
+ DeadPort = os:find_executable("dead_port", DataDir),
+ Port = open_port({spawn,DeadPort++" 60"},[]),
+ erlang:port_command(Port,"Hello, can you hear me!?!?"),
+ port_close(Port),
Res = close_deaf_port_1(0, DeadPort),
io:format("Waiting for OS procs to terminate...\n"),
receive after 5*1000 -> ok end,
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
Res.
close_deaf_port_1(1000, _) ->
ok;
close_deaf_port_1(N, Cmd) ->
Timeout = integer_to_list(random:uniform(5*1000)),
- ?line try open_port({spawn_executable,Cmd},[{args,[Timeout]}]) of
+ try open_port({spawn_executable,Cmd},[{args,[Timeout]}]) of
Port ->
- ?line erlang:port_command(Port,"Hello, can you hear me!?!?"),
- ?line port_close(Port),
+ erlang:port_command(Port,"Hello, can you hear me!?!?"),
+ port_close(Port),
close_deaf_port_1(N+1, Cmd)
catch
_:eagain ->
{comment, "Could not spawn more than " ++ integer_to_list(N) ++ " OS processes."}
end.
-
+wait_until(Fun) ->
+ case catch Fun() of
+ true ->
+ ok;
+ _ ->
+ receive after 100 -> ok end,
+ wait_until(Fun)
+ end.
diff --git a/erts/emulator/test/port_SUITE_data/dead_port.c b/erts/emulator/test/port_SUITE_data/dead_port.c
index 68e96fbf14..4dd9ee4cc2 100644
--- a/erts/emulator/test/port_SUITE_data/dead_port.c
+++ b/erts/emulator/test/port_SUITE_data/dead_port.c
@@ -17,15 +17,6 @@
* %CopyrightEnd%
*/
-#ifdef VXWORKS
-#include <vxWorks.h>
-#include <taskVarLib.h>
-#include <taskLib.h>
-#include <sysLib.h>
-#include <string.h>
-#include <ioLib.h>
-#endif
-
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -37,12 +28,7 @@
#ifndef __WIN32__
#include <unistd.h>
-#ifdef VXWORKS
-#include "reclaim.h"
-#include <sys/times.h>
-#else
#include <sys/time.h>
-#endif
#define O_BINARY 0
#define _setmode(fd, mode)
@@ -53,13 +39,7 @@
#include "winbase.h"
#endif
-
-#ifdef VXWORKS
-#define MAIN(argc, argv) port_test(argc, argv)
-#else
#define MAIN(argc, argv) main(argc, argv)
-#endif
-
extern int errno;
@@ -86,9 +66,6 @@ char *argv[];
static void
delay(unsigned ms)
{
-#ifdef VXWORKS
- taskDelay((sysClkRateGet() * ms) / 1000);
-#else
#ifdef __WIN32__
Sleep(ms);
#else
@@ -98,5 +75,4 @@ delay(unsigned ms)
select(0, NULL, NULL, NULL, &t);
#endif
-#endif
}
diff --git a/erts/emulator/test/port_SUITE_data/port_test.c b/erts/emulator/test/port_SUITE_data/port_test.c
index 7b4e386d87..7abefab2e3 100644
--- a/erts/emulator/test/port_SUITE_data/port_test.c
+++ b/erts/emulator/test/port_SUITE_data/port_test.c
@@ -3,15 +3,6 @@
* Purpose: A port program to be used for testing the open_port bif.
*/
-#ifdef VXWORKS
-#include <vxWorks.h>
-#include <taskVarLib.h>
-#include <taskLib.h>
-#include <sysLib.h>
-#include <string.h>
-#include <ioLib.h>
-#endif
-
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -23,12 +14,7 @@
#ifndef __WIN32__
#include <unistd.h>
-#ifdef VXWORKS
-#include "reclaim.h"
-#include <sys/times.h>
-#else
#include <sys/time.h>
-#endif
#define O_BINARY 0
#define _setmode(fd, mode)
@@ -40,22 +26,13 @@
#endif
-#ifdef VXWORKS
-#define REDIR_STDOUT(fd) ioTaskStdSet(0, 1, fd);
-#else
#define REDIR_STDOUT(fd) if (dup2(fd, 1) == -1) { \
fprintf(stderr, "%s: failed to duplicate handle %d to 1: %d\n", \
port_data->progname, fd, errno); \
exit(1); \
}
-#endif
-#ifdef VXWORKS
-#define MAIN(argc, argv) port_test(argc, argv)
-#else
#define MAIN(argc, argv) main(argc, argv)
-#endif
-
extern int errno;
@@ -101,7 +78,6 @@ static void dump(unsigned char* buf, int sz, int max);
static void replace_stdout(char* filename);
static void generate_reply(char* spec);
-#ifndef VXWORKS
#ifndef HAVE_STRERROR
extern int sys_nerr;
#ifndef sys_errlist /* sys_errlist is sometimes defined to
@@ -125,7 +101,6 @@ int err;
return msgstr;
}
#endif
-#endif
MAIN(argc, argv)
@@ -133,12 +108,6 @@ int argc;
char *argv[];
{
int ret;
-#ifdef VXWORKS
- if(taskVarAdd(0, (int *)&port_data) != OK) {
- fprintf(stderr, "Can't do taskVarAdd in port_test\n");
- exit(1);
- }
-#endif
if((port_data = (PORT_TEST_DATA *) malloc(sizeof(PORT_TEST_DATA))) == NULL) {
fprintf(stderr, "Couldn't malloc for port_data");
exit(1);
@@ -511,9 +480,6 @@ dump(buf, sz, max)
static void
delay(unsigned ms)
{
-#ifdef VXWORKS
- taskDelay((sysClkRateGet() * ms) / 1000);
-#else
#ifdef __WIN32__
Sleep(ms);
#else
@@ -523,7 +489,6 @@ delay(unsigned ms)
select(0, NULL, NULL, NULL, &t);
#endif
-#endif
}
/*
diff --git a/erts/emulator/test/port_SUITE_data/reclaim.h b/erts/emulator/test/port_SUITE_data/reclaim.h
deleted file mode 100644
index 1d57dc5b8a..0000000000
--- a/erts/emulator/test/port_SUITE_data/reclaim.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef __RECLAIM_H__
-#define __RECLAIM_H__
-
-
-/* The Erlang release for VxWorks includes a simple mechanism for
- "resource reclamation" at task exit - it allows replacement of the
- functions that open/close "files" and malloc/free memory with versions
- that keep track, to be able to "reclaim" file descriptors and memory
- when a task exits (regardless of *how* it exits).
-
- The interface to this mechanism is made available via this file,
- with the following caveats:
-
- - The interface may change (or perhaps even be removed, though that
- isn't likely until VxWorks itself provides similar functionality)
- in future releases - i.e. you must always use the version of this
- file that comes with the Erlang release you are using.
-
- - Disaster is guaranteed if you use the mechanism incorrectly (see
- below for the correct way), e.g. allocate memory with the "tracking"
- version of malloc() and free it with the "standard" version of free().
-
- - The mechanism (of course) incurs some performance penalty - thus
- for a simple program you may be better off with careful programming,
- making sure that you do whatever close()/free()/etc calls that are
- appropriate at all exit points (though if you need to guard against
- taskDelete() etc, things get messy...).
-
- To use the mechanism, simply program your application normally, i.e.
- use open()/close()/malloc()/free() etc as usual, but #include this
- file before any usage of the relevant functions. NOTE: To avoid the
- "disaster" mentioned above, you *must* #include it in *all* (or none)
- of the files that manipulate a particular file descriptor, allocated
- memory area, etc. Finally, note that you can obviously not load your
- application before the Erlang system when using this interface.
-*/
-
-/* Sorry, no ANSI prototypes yet... */
-extern int save_open(),save_creat(),save_socket(),save_accept(),save_close();
-#define open save_open
-#define creat save_creat
-#define socket save_socket
-#define accept save_accept
-#define close save_close
-extern FILE *save_fopen(), *save_fdopen(), *save_freopen();
-extern int save_fclose();
-#define fopen save_fopen
-#define fdopen save_fdopen
-#define freopen save_freopen
-#define fclose save_fclose
-/* XXX Should do opendir/closedir too... */
-extern char *save_malloc(), *save_calloc(), *save_realloc();
-extern void save_free(), save_cfree();
-#define malloc save_malloc
-#define calloc save_calloc
-#define realloc save_realloc
-#define free save_free
-#define cfree save_cfree
-
-#endif
diff --git a/erts/emulator/test/port_bif_SUITE_data/port_test.c b/erts/emulator/test/port_bif_SUITE_data/port_test.c
index c6b128df66..28324a56a6 100644
--- a/erts/emulator/test/port_bif_SUITE_data/port_test.c
+++ b/erts/emulator/test/port_bif_SUITE_data/port_test.c
@@ -3,15 +3,6 @@
* Purpose: A port program to be used for testing the open_port bif.
*/
-#ifdef VXWORKS
-#include <vxWorks.h>
-#include <taskVarLib.h>
-#include <taskLib.h>
-#include <sysLib.h>
-#include <string.h>
-#include <ioLib.h>
-#endif
-
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -23,12 +14,7 @@
#ifndef __WIN32__
#include <unistd.h>
-#ifdef VXWORKS
-#include "reclaim.h"
-#include <sys/times.h>
-#else
#include <sys/time.h>
-#endif
#define O_BINARY 0
#define _setmode(fd, mode)
@@ -40,22 +26,13 @@
#endif
-#ifdef VXWORKS
-#define REDIR_STDOUT(fd) ioTaskStdSet(0, 1, fd);
-#else
#define REDIR_STDOUT(fd) if (dup2(fd, 1) == -1) { \
fprintf(stderr, "%s: failed to duplicate handle %d to 1: %d\n", \
port_data->progname, fd, errno); \
exit(1); \
}
-#endif
-#ifdef VXWORKS
-#define MAIN(argc, argv) port_test(argc, argv)
-#else
#define MAIN(argc, argv) main(argc, argv)
-#endif
-
extern int errno;
@@ -101,7 +78,6 @@ static void dump(unsigned char* buf, int sz, int max);
static void replace_stdout(char* filename);
static void generate_reply(char* spec);
-#ifndef VXWORKS
#ifndef HAVE_STRERROR
extern int sys_nerr;
#ifndef sys_errlist /* sys_errlist is sometimes defined to
@@ -125,20 +101,13 @@ int err;
return msgstr;
}
#endif
-#endif
-
MAIN(argc, argv)
int argc;
char *argv[];
{
int ret;
-#ifdef VXWORKS
- if(taskVarAdd(0, (int *)&port_data) != OK) {
- fprintf(stderr, "Can't do taskVarAdd in port_test\n");
- exit(1);
- }
-#endif
+
if((port_data = (PORT_TEST_DATA *) malloc(sizeof(PORT_TEST_DATA))) == NULL) {
fprintf(stderr, "Couldn't malloc for port_data");
exit(1);
@@ -508,9 +477,6 @@ dump(buf, sz, max)
static void
delay(unsigned ms)
{
-#ifdef VXWORKS
- taskDelay((sysClkRateGet() * ms) / 1000);
-#else
#ifdef __WIN32__
Sleep(ms);
#else
@@ -520,7 +486,6 @@ delay(unsigned ms)
select(0, NULL, NULL, NULL, &t);
#endif
-#endif
}
/*
diff --git a/erts/emulator/test/port_bif_SUITE_data/reclaim.h b/erts/emulator/test/port_bif_SUITE_data/reclaim.h
deleted file mode 100644
index 1d57dc5b8a..0000000000
--- a/erts/emulator/test/port_bif_SUITE_data/reclaim.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef __RECLAIM_H__
-#define __RECLAIM_H__
-
-
-/* The Erlang release for VxWorks includes a simple mechanism for
- "resource reclamation" at task exit - it allows replacement of the
- functions that open/close "files" and malloc/free memory with versions
- that keep track, to be able to "reclaim" file descriptors and memory
- when a task exits (regardless of *how* it exits).
-
- The interface to this mechanism is made available via this file,
- with the following caveats:
-
- - The interface may change (or perhaps even be removed, though that
- isn't likely until VxWorks itself provides similar functionality)
- in future releases - i.e. you must always use the version of this
- file that comes with the Erlang release you are using.
-
- - Disaster is guaranteed if you use the mechanism incorrectly (see
- below for the correct way), e.g. allocate memory with the "tracking"
- version of malloc() and free it with the "standard" version of free().
-
- - The mechanism (of course) incurs some performance penalty - thus
- for a simple program you may be better off with careful programming,
- making sure that you do whatever close()/free()/etc calls that are
- appropriate at all exit points (though if you need to guard against
- taskDelete() etc, things get messy...).
-
- To use the mechanism, simply program your application normally, i.e.
- use open()/close()/malloc()/free() etc as usual, but #include this
- file before any usage of the relevant functions. NOTE: To avoid the
- "disaster" mentioned above, you *must* #include it in *all* (or none)
- of the files that manipulate a particular file descriptor, allocated
- memory area, etc. Finally, note that you can obviously not load your
- application before the Erlang system when using this interface.
-*/
-
-/* Sorry, no ANSI prototypes yet... */
-extern int save_open(),save_creat(),save_socket(),save_accept(),save_close();
-#define open save_open
-#define creat save_creat
-#define socket save_socket
-#define accept save_accept
-#define close save_close
-extern FILE *save_fopen(), *save_fdopen(), *save_freopen();
-extern int save_fclose();
-#define fopen save_fopen
-#define fdopen save_fdopen
-#define freopen save_freopen
-#define fclose save_fclose
-/* XXX Should do opendir/closedir too... */
-extern char *save_malloc(), *save_calloc(), *save_realloc();
-extern void save_free(), save_cfree();
-#define malloc save_malloc
-#define calloc save_calloc
-#define realloc save_realloc
-#define free save_free
-#define cfree save_cfree
-
-#endif
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index fdc55a4cc5..11dd88413f 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -118,15 +118,15 @@ fun_spawn(Fun) ->
%% (unclear if this test case will actually prove anything on
%% a modern computer with lots of memory).
spawn_with_binaries(Config) when is_list(Config) ->
- ?line L = lists:duplicate(2048, 42),
- ?line TwoMeg = lists:duplicate(1024, L),
- ?line Fun = fun() -> spawn(?MODULE, binary_owner, [list_to_binary(TwoMeg)]),
+ L = lists:duplicate(2048, 42),
+ TwoMeg = lists:duplicate(1024, L),
+ Fun = fun() -> spawn(?MODULE, binary_owner, [list_to_binary(TwoMeg)]),
receive after 1 -> ok end end,
- ?line Iter = case test_server:purify_is_running() of
+ Iter = case test_server:purify_is_running() of
true -> 10;
false -> 150
end,
- ?line test_server:do_times(Iter, Fun),
+ test_server:do_times(Iter, Fun),
ok.
binary_owner(Bin) when is_binary(Bin) ->
@@ -134,87 +134,87 @@ binary_owner(Bin) when is_binary(Bin) ->
%% Tests exit/1 with a big message.
t_exit_1(Config) when is_list(Config) ->
- ?line start_spawner(),
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(10, fun t_exit_1/0),
- ?line test_server:timetrap_cancel(Dog),
- ?line stop_spawner(),
+ start_spawner(),
+ Dog = test_server:timetrap(test_server:seconds(20)),
+ process_flag(trap_exit, true),
+ test_server:do_times(10, fun t_exit_1/0),
+ test_server:timetrap_cancel(Dog),
+ stop_spawner(),
ok.
t_exit_1() ->
- ?line Pid = fun_spawn(fun() -> exit(kb_128()) end),
- ?line Garbage = kb_128(),
- ?line receive
+ Pid = fun_spawn(fun() -> exit(kb_128()) end),
+ Garbage = kb_128(),
+ receive
{'EXIT', Pid, Garbage} -> ok
end.
%% Tests exit/2 with a lot of data in the exit message.
t_exit_2_other(Config) when is_list(Config) ->
- ?line start_spawner(),
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(10, fun t_exit_2_other/0),
- ?line test_server:timetrap_cancel(Dog),
- ?line stop_spawner(),
+ start_spawner(),
+ Dog = test_server:timetrap(test_server:seconds(20)),
+ process_flag(trap_exit, true),
+ test_server:do_times(10, fun t_exit_2_other/0),
+ test_server:timetrap_cancel(Dog),
+ stop_spawner(),
ok.
t_exit_2_other() ->
- ?line Pid = fun_spawn(fun() -> receive x -> ok end end),
- ?line Garbage = kb_128(),
- ?line exit(Pid, Garbage),
- ?line receive
+ Pid = fun_spawn(fun() -> receive x -> ok end end),
+ Garbage = kb_128(),
+ exit(Pid, Garbage),
+ receive
{'EXIT', Pid, Garbage} -> ok
end.
%% Tests that exit(Pid, normal) does not kill another process.;
t_exit_2_other_normal(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun() -> receive x -> ok end end),
- ?line exit(Pid, normal),
- ?line receive
+ Dog = test_server:timetrap(test_server:seconds(20)),
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun() -> receive x -> ok end end),
+ exit(Pid, normal),
+ receive
{'EXIT', Pid, Reason} ->
- ?line test_server:fail({process_died, Reason})
+ test_server:fail({process_died, Reason})
after 1000 ->
ok
end,
- ?line case process_info(Pid) of
+ case process_info(Pid) of
undefined ->
test_server:fail(process_died_on_normal);
List when is_list(List) ->
ok
end,
exit(Pid, kill),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
%% Tests that we can trap an exit message sent with exit/2 from
%% the same process.
self_exit(Config) when is_list(Config) ->
- ?line start_spawner(),
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(200, fun self_exit/0),
- ?line test_server:timetrap_cancel(Dog),
- ?line stop_spawner(),
+ start_spawner(),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ process_flag(trap_exit, true),
+ test_server:do_times(200, fun self_exit/0),
+ test_server:timetrap_cancel(Dog),
+ stop_spawner(),
ok.
self_exit() ->
- ?line Garbage = eight_kb(),
- ?line P = self(),
- ?line true = exit(P, Garbage),
- ?line receive
+ Garbage = eight_kb(),
+ P = self(),
+ true = exit(P, Garbage),
+ receive
{'EXIT', P, Garbage} -> ok
end.
%% Tests exit(self(), normal) is equivalent to exit(normal) for a process
%% that doesn't trap exits.
normal_suicide_exit(Config) when is_list(Config) ->
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun() -> exit(self(), normal) end),
- ?line receive
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun() -> exit(self(), normal) end),
+ receive
{'EXIT', Pid, normal} -> ok;
Other -> test_server:fail({bad_message, Other})
end.
@@ -222,19 +222,19 @@ normal_suicide_exit(Config) when is_list(Config) ->
%% Tests exit(self(), Term) is equivalent to exit(Term) for a process
%% that doesn't trap exits.";
abnormal_suicide_exit(Config) when is_list(Config) ->
- ?line Garbage = eight_kb(),
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun() -> exit(self(), Garbage) end),
- ?line receive
+ Garbage = eight_kb(),
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun() -> exit(self(), Garbage) end),
+ receive
{'EXIT', Pid, Garbage} -> ok;
Other -> test_server:fail({bad_message, Other})
end.
%% Tests that exit(self(), die) cannot be catched.
t_exit_2_catch(Config) when is_list(Config) ->
- ?line process_flag(trap_exit, true),
- ?line Pid = fun_spawn(fun() -> catch exit(self(), die) end),
- ?line receive
+ process_flag(trap_exit, true),
+ Pid = fun_spawn(fun() -> catch exit(self(), die) end),
+ receive
{'EXIT', Pid, normal} ->
test_server:fail(catch_worked);
{'EXIT', Pid, die} ->
@@ -246,29 +246,29 @@ t_exit_2_catch(Config) when is_list(Config) ->
%% Tests trapping of an 'EXIT' message generated by a bad argument to
%% the abs/1 bif. The 'EXIT' message will intentionally be very big.
trap_exit_badarg(Config) when is_list(Config) ->
- ?line start_spawner(),
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(10, fun trap_exit_badarg/0),
- ?line test_server:timetrap_cancel(Dog),
- ?line stop_spawner(),
+ start_spawner(),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ process_flag(trap_exit, true),
+ test_server:do_times(10, fun trap_exit_badarg/0),
+ test_server:timetrap_cancel(Dog),
+ stop_spawner(),
ok.
trap_exit_badarg() ->
- ?line Pid = fun_spawn(fun() -> bad_guy(kb_128()) end),
- ?line Garbage = kb_128(),
- ?line receive
+ Pid = fun_spawn(fun() -> bad_guy(kb_128()) end),
+ Garbage = kb_128(),
+ receive
{'EXIT',Pid,{badarg,[{erlang,abs,[Garbage],Loc1},
{?MODULE,bad_guy,1,Loc2}|_]}}
when is_list(Loc1), is_list(Loc2) ->
ok;
Other ->
- ?line ok = io:format("Bad EXIT message: ~P", [Other, 30]),
- ?line test_server:fail(bad_exit_message)
+ ok = io:format("Bad EXIT message: ~P", [Other, 30]),
+ test_server:fail(bad_exit_message)
end.
bad_guy(Arg) ->
- ?line abs(Arg).
+ abs(Arg).
kb_128() ->
@@ -280,20 +280,12 @@ kb_128() ->
big_binary()}.
eight_kb() ->
- %%% This is really much more than eight kb, so vxworks platforms
- %%% gets away with 1/8 of the other platforms (due to limited
- %%% memory resources).
- B64 = case os:type() of
- vxworks ->
- ?line lists:seq(1, 8);
- _ ->
- ?line lists:seq(1, 64)
- end,
- ?line B512 = {<<1>>,B64,<<2,3>>,B64,make_unaligned_sub_binary(<<4,5,6,7,8,9>>),
+ B64 = lists:seq(1, 64),
+ B512 = {<<1>>,B64,<<2,3>>,B64,make_unaligned_sub_binary(<<4,5,6,7,8,9>>),
B64,make_sub_binary([1,2,3,4,5,6]),
B64,make_sub_binary(lists:seq(1, ?heap_binary_size+1)),
B64,B64,B64,B64,big_binary()},
- ?line lists:duplicate(8, {B512,B512}).
+ lists:duplicate(8, {B512,B512}).
big_binary() ->
big_binary(10, [42]).
@@ -304,19 +296,19 @@ big_binary(N, Acc) ->
%% Test receiving an EXIT message when spawning a BIF with bad arguments.
trap_exit_badarg_in_bif(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line process_flag(trap_exit, true),
- ?line test_server:do_times(10, fun trap_exit_badarg_bif/0),
- ?line test_server:timetrap_cancel(Dog),
+ Dog = test_server:timetrap(test_server:seconds(10)),
+ process_flag(trap_exit, true),
+ test_server:do_times(10, fun trap_exit_badarg_bif/0),
+ test_server:timetrap_cancel(Dog),
ok.
trap_exit_badarg_bif() ->
- ?line Pid = spawn_link(erlang, node, [1]),
- ?line receive
+ Pid = spawn_link(erlang, node, [1]),
+ receive
{'EXIT', Pid, {badarg, _}} ->
ok;
Other ->
- ?line test_server:fail({unexpected, Other})
+ test_server:fail({unexpected, Other})
end.
%% The following sequences of events have crasched Beam.
@@ -329,27 +321,27 @@ trap_exit_badarg_bif() ->
%% 3) The process will crash the next time it executes 'receive'.
exit_and_timeout(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
+ Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line Parent = self(),
- ?line Low = fun_spawn(fun() -> eat_low(Parent) end),
- ?line High = fun_spawn(fun() -> eat_high(Low) end),
- ?line eat_wait_for(Low, High),
+ process_flag(trap_exit, true),
+ Parent = self(),
+ Low = fun_spawn(fun() -> eat_low(Parent) end),
+ High = fun_spawn(fun() -> eat_high(Low) end),
+ eat_wait_for(Low, High),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
eat_wait_for(Low, High) ->
- ?line receive
- {'EXIT', Low, {you, are, dead}} ->
- ok;
- {'EXIT', High, normal} ->
- eat_wait_for(Low, High);
- Other ->
- test_server:fail({bad_message, Other})
- end.
+ receive
+ {'EXIT', Low, {you, are, dead}} ->
+ ok;
+ {'EXIT', High, normal} ->
+ eat_wait_for(Low, High);
+ Other ->
+ test_server:fail({bad_message, Other})
+ end.
eat_low(_Parent) ->
receive
@@ -382,27 +374,27 @@ loop(_, _) ->
%% Tries to send two different exit messages to a process.
%% (The second one should be ignored.)
exit_twice(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
+ Dog = test_server:timetrap(test_server:seconds(20)),
- ?line process_flag(trap_exit, true),
- ?line Low = fun_spawn(fun etwice_low/0),
- ?line High = fun_spawn(fun() -> etwice_high(Low) end),
- ?line etwice_wait_for(Low, High),
+ process_flag(trap_exit, true),
+ Low = fun_spawn(fun etwice_low/0),
+ High = fun_spawn(fun() -> etwice_high(Low) end),
+ etwice_wait_for(Low, High),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:timetrap_cancel(Dog),
ok.
etwice_wait_for(Low, High) ->
- ?line receive
- {'EXIT', Low, first} ->
- ok;
- {'EXIT', Low, Other} ->
- test_server:fail({wrong_exit_reason, Other});
- {'EXIT', High, normal} ->
- etwice_wait_for(Low, High);
- Other ->
- test_server:fail({bad_message, Other})
- end.
+ receive
+ {'EXIT', Low, first} ->
+ ok;
+ {'EXIT', Low, Other} ->
+ test_server:fail({wrong_exit_reason, Other});
+ {'EXIT', High, normal} ->
+ etwice_wait_for(Low, High);
+ Other ->
+ test_server:fail({bad_message, Other})
+ end.
etwice_low() ->
etwice_low().
@@ -414,15 +406,15 @@ etwice_high(Low) ->
%% Tests the process_info/2 BIF.
t_process_info(Config) when is_list(Config) ->
- ?line [] = process_info(self(), registered_name),
- ?line register(my_name, self()),
- ?line {registered_name, my_name} = process_info(self(), registered_name),
- ?line {status, running} = process_info(self(), status),
- ?line {min_heap_size, 233} = process_info(self(), min_heap_size),
- ?line {min_bin_vheap_size, 46368} = process_info(self(), min_bin_vheap_size),
- ?line {current_function,{?MODULE,t_process_info,1}} =
+ [] = process_info(self(), registered_name),
+ register(my_name, self()),
+ {registered_name, my_name} = process_info(self(), registered_name),
+ {status, running} = process_info(self(), status),
+ {min_heap_size, 233} = process_info(self(), min_heap_size),
+ {min_bin_vheap_size,46422} = process_info(self(), min_bin_vheap_size),
+ {current_function,{?MODULE,t_process_info,1}} =
process_info(self(), current_function),
- ?line {current_function,{?MODULE,t_process_info,1}} =
+ {current_function,{?MODULE,t_process_info,1}} =
apply(erlang, process_info, [self(),current_function]),
%% current_location and current_stacktrace
@@ -433,9 +425,9 @@ t_process_info(Config) when is_list(Config) ->
verify_loc(Line2, Res2),
pi_stacktrace([{?MODULE,t_process_info,1,?LINE}]),
- ?line Gleader = group_leader(),
- ?line {group_leader, Gleader} = process_info(self(), group_leader),
- ?line {'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')),
+ Gleader = group_leader(),
+ {group_leader, Gleader} = process_info(self(), group_leader),
+ {'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')),
ok.
pi_stacktrace(Expected0) ->
@@ -517,50 +509,50 @@ process_info_looper(Parent) ->
%% Tests the process_info/1 BIF on another process with messages.
process_info_other_msg(Config) when is_list(Config) ->
Self = self(),
- ?line Pid = spawn_link(fun() -> other_process(Self) end),
+ Pid = spawn_link(fun() -> other_process(Self) end),
receive
{go_ahead,Pid} -> ok
end,
- ?line Own = {my,own,message},
+ Own = {my,own,message},
- ?line {messages,[Own]} = process_info(Pid, messages),
+ {messages,[Own]} = process_info(Pid, messages),
- ?line Garbage = kb_128(),
- ?line MsgA = {a,Garbage},
- ?line MsgB = {b,Garbage},
- ?line MsgC = {c,Garbage},
- ?line MsgD = {d,Garbage},
- ?line MsgE = {e,Garbage},
-
- ?line Pid ! MsgA,
- ?line {messages,[Own,MsgA]} = process_info(Pid, messages),
- ?line Pid ! MsgB,
- ?line {messages,[Own,MsgA,MsgB]} = process_info(Pid, messages),
- ?line Pid ! MsgC,
- ?line {messages,[Own,MsgA,MsgB,MsgC]} = process_info(Pid, messages),
- ?line Pid ! MsgD,
- ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD]} = process_info(Pid, messages),
- ?line Pid ! MsgE,
- ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages),
- ?line {memory,BytesOther} = process_info(Pid, memory),
- ?line {memory,BytesSelf} = process_info(self(), memory),
+ Garbage = kb_128(),
+ MsgA = {a,Garbage},
+ MsgB = {b,Garbage},
+ MsgC = {c,Garbage},
+ MsgD = {d,Garbage},
+ MsgE = {e,Garbage},
+
+ Pid ! MsgA,
+ {messages,[Own,MsgA]} = process_info(Pid, messages),
+ Pid ! MsgB,
+ {messages,[Own,MsgA,MsgB]} = process_info(Pid, messages),
+ Pid ! MsgC,
+ {messages,[Own,MsgA,MsgB,MsgC]} = process_info(Pid, messages),
+ Pid ! MsgD,
+ {messages,[Own,MsgA,MsgB,MsgC,MsgD]} = process_info(Pid, messages),
+ Pid ! MsgE,
+ {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages),
+ {memory,BytesOther} = process_info(Pid, memory),
+ {memory,BytesSelf} = process_info(self(), memory),
io:format("Memory ~p: ~p\n", [Pid,BytesOther]),
io:format("Memory ~p (self): ~p\n", [self(),BytesSelf]),
[Own,MsgA,MsgB,MsgC,MsgD,MsgE] = All,
- ?line Pid ! {self(),empty},
- ?line receive
+ Pid ! {self(),empty},
+ receive
empty -> ok
end,
- ?line {messages,[]} = process_info(Pid, messages),
+ {messages,[]} = process_info(Pid, messages),
- ?line {min_heap_size, 233} = process_info(Pid, min_heap_size),
- ?line {min_bin_vheap_size, 46368} = process_info(Pid, min_bin_vheap_size),
+ {min_heap_size, 233} = process_info(Pid, min_heap_size),
+ {min_bin_vheap_size, 46422} = process_info(Pid, min_bin_vheap_size),
- ?line Pid ! stop,
+ Pid ! stop,
ok.
process_info_other_dist_msg(Config) when is_list(Config) ->
@@ -568,52 +560,51 @@ process_info_other_dist_msg(Config) when is_list(Config) ->
%% Check that process_info can handle messages that have not been
%% decoded yet.
%%
- ?line {ok, Node} = start_node(Config),
- ?line Self = self(),
- ?line Pid = spawn_link(fun() -> other_process(Self) end),
- ?line receive {go_ahead,Pid} -> ok end,
+ {ok, Node} = start_node(Config),
+ Self = self(),
+ Pid = spawn_link(fun() -> other_process(Self) end),
+ receive {go_ahead,Pid} -> ok end,
- ?line Own = {my,own,message},
+ Own = {my,own,message},
- ?line {messages,[Own]} = process_info(Pid, messages),
- ?line Garbage = kb_128(),
- ?line MsgA = {a,self(),Garbage},
- ?line MsgB = {b,self(),Garbage},
- ?line MsgC = {c,self(),Garbage},
- ?line MsgD = {d,self(),Garbage},
- ?line MsgE = {e,self(),Garbage},
+ {messages,[Own]} = process_info(Pid, messages),
+ Garbage = kb_128(),
+ MsgA = {a,self(),Garbage},
+ MsgB = {b,self(),Garbage},
+ MsgC = {c,self(),Garbage},
+ MsgD = {d,self(),Garbage},
+ MsgE = {e,self(),Garbage},
%% We don't want the other process to decode messages itself
%% therefore we suspend it.
- ?line true = erlang:suspend_process(Pid),
- ?line spawn_link(Node, fun () ->
- Pid ! MsgA,
- Pid ! MsgB,
- Pid ! MsgC,
- Self ! check_abc
- end),
- ?line receive check_abc -> ok end,
- ?line [{status,suspended},
- {messages,[Own,MsgA,MsgB,MsgC]},
- {status,suspended}]= process_info(Pid, [status,messages,status]),
- ?line spawn_link(Node, fun () ->
- Pid ! MsgD,
- Pid ! MsgE,
- Self ! check_de
- end),
- ?line receive check_de -> ok end,
- ?line {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All}
- = process_info(Pid, messages),
- ?line true = erlang:resume_process(Pid),
- ?line Pid ! {self(), get_all_messages},
- ?line receive
+ true = erlang:suspend_process(Pid),
+ spawn_link(Node, fun () ->
+ Pid ! MsgA,
+ Pid ! MsgB,
+ Pid ! MsgC,
+ Self ! check_abc
+ end),
+ receive check_abc -> ok end,
+ [{status,suspended},
+ {messages,[Own,MsgA,MsgB,MsgC]},
+ {status,suspended}]= process_info(Pid, [status,messages,status]),
+ spawn_link(Node, fun () ->
+ Pid ! MsgD,
+ Pid ! MsgE,
+ Self ! check_de
+ end),
+ receive check_de -> ok end,
+ {messages,[Own,MsgA,MsgB,MsgC,MsgD,MsgE]=All} = process_info(Pid, messages),
+ true = erlang:resume_process(Pid),
+ Pid ! {self(), get_all_messages},
+ receive
{all_messages, AllMsgs} ->
- ?line All = AllMsgs
+ All = AllMsgs
end,
- ?line {messages,[]} = process_info(Pid, messages),
- ?line Pid ! stop,
- ?line stop_node(Node),
- ?line ok.
+ {messages,[]} = process_info(Pid, messages),
+ Pid ! stop,
+ stop_node(Node),
+ ok.
other_process(Parent) ->
@@ -660,38 +651,36 @@ process_info_2_list(doc) ->
process_info_2_list(suite) ->
[];
process_info_2_list(Config) when is_list(Config) ->
- ?line Proc = spawn(fun () ->
- receive after infinity -> ok end end),
+ Proc = spawn(fun () -> receive after infinity -> ok end end),
register(process_SUITE_process_info_2_list1, self()),
register(process_SUITE_process_info_2_list2, Proc),
- ?line erts_debug:set_internal_state(available_internal_state,true),
- ?line AllArgs = erts_debug:get_internal_state(process_info_args),
- ?line A1 = lists:sort(AllArgs) ++ [status] ++ lists:reverse(AllArgs),
+ erts_debug:set_internal_state(available_internal_state,true),
+ AllArgs = erts_debug:get_internal_state(process_info_args),
+ A1 = lists:sort(AllArgs) ++ [status] ++ lists:reverse(AllArgs),
%% Verify that argument is accepted as single atom
- ?line lists:foreach(fun (A) ->
- ?line {A, _} = process_info(Proc, A),
- ?line {A, _} = process_info(self(), A)
- end,
- A1),
+ lists:foreach(fun (A) ->
+ {A, _} = process_info(Proc, A),
+ {A, _} = process_info(self(), A)
+ end, A1),
%% Verify that order is preserved
- ?line ok = chk_pi_order(process_info(self(), A1), A1),
- ?line ok = chk_pi_order(process_info(Proc, A1), A1),
+ ok = chk_pi_order(process_info(self(), A1), A1),
+ ok = chk_pi_order(process_info(Proc, A1), A1),
%% Small arg list
- ?line A2 = [status, stack_size, trap_exit, priority],
- ?line [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}]
+ A2 = [status, stack_size, trap_exit, priority],
+ [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}]
= process_info(Proc, A2),
- ?line [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}]
+ [{status, _}, {stack_size, _}, {trap_exit, _}, {priority, _}]
= process_info(self(), A2),
%% Huge arg list (note values are shared)
- ?line A3 = lists:duplicate(5000,backtrace),
- ?line V3 = process_info(Proc, A3),
- ?line 5000 = length(V3),
- ?line lists:foreach(fun ({backtrace, _}) -> ok end, V3),
- ?line ok.
+ A3 = lists:duplicate(5000,backtrace),
+ V3 = process_info(Proc, A3),
+ 5000 = length(V3),
+ lists:foreach(fun ({backtrace, _}) -> ok end, V3),
+ ok.
process_info_lock_reschedule(doc) ->
[];
@@ -700,43 +689,37 @@ process_info_lock_reschedule(suite) ->
process_info_lock_reschedule(Config) when is_list(Config) ->
%% We need a process that is running and an item that requires
%% process_info to take the main process lock.
- ?line Target1 = spawn_link(fun tok_loop/0),
- ?line Name1 = process_info_lock_reschedule_running,
- ?line register(Name1, Target1),
- ?line Target2 = spawn_link(fun () -> receive after infinity -> ok end end),
- ?line Name2 = process_info_lock_reschedule_waiting,
- ?line register(Name2, Target2),
- ?line PI = fun(_) ->
- ?line erlang:yield(),
- ?line [{registered_name, Name1}]
- = process_info(Target1, [registered_name]),
- ?line [{registered_name, Name2}]
- = process_info(Target2, [registered_name]),
- ?line erlang:yield(),
- ?line {registered_name, Name1}
- = process_info(Target1, registered_name),
- ?line {registered_name, Name2}
- = process_info(Target2, registered_name),
- ?line erlang:yield(),
- ?line [{registered_name, Name1}| _]
- = process_info(Target1),
- ?line [{registered_name, Name2}| _]
- = process_info(Target2)
- end,
- ?line lists:foreach(PI, lists:seq(1,1000)),
+ Target1 = spawn_link(fun tok_loop/0),
+ Name1 = process_info_lock_reschedule_running,
+ register(Name1, Target1),
+ Target2 = spawn_link(fun () -> receive after infinity -> ok end end),
+ Name2 = process_info_lock_reschedule_waiting,
+ register(Name2, Target2),
+ PI = fun(_) ->
+ erlang:yield(),
+ [{registered_name, Name1}] = process_info(Target1, [registered_name]),
+ [{registered_name, Name2}] = process_info(Target2, [registered_name]),
+ erlang:yield(),
+ {registered_name, Name1} = process_info(Target1, registered_name),
+ {registered_name, Name2} = process_info(Target2, registered_name),
+ erlang:yield(),
+ [{registered_name, Name1}| _] = process_info(Target1),
+ [{registered_name, Name2}| _] = process_info(Target2)
+ end,
+ lists:foreach(PI, lists:seq(1,1000)),
%% Make sure Target1 still is willing to "tok loop"
- ?line case process_info(Target1, status) of
- {status, OkStatus} when OkStatus == runnable;
- OkStatus == running;
- OkStatus == garbage_collecting ->
- ?line unlink(Target1),
- ?line unlink(Target2),
- ?line exit(Target1, bang),
- ?line exit(Target2, bang),
- ?line OkStatus;
- {status, BadStatus} ->
- ?line ?t:fail(BadStatus)
- end.
+ case process_info(Target1, status) of
+ {status, OkStatus} when OkStatus == runnable;
+ OkStatus == running;
+ OkStatus == garbage_collecting ->
+ unlink(Target1),
+ unlink(Target2),
+ exit(Target1, bang),
+ exit(Target2, bang),
+ OkStatus;
+ {status, BadStatus} ->
+ ?t:fail(BadStatus)
+ end.
pi_loop(_Name, _Pid, 0) ->
ok;
@@ -749,50 +732,50 @@ process_info_lock_reschedule2(doc) ->
process_info_lock_reschedule2(suite) ->
[];
process_info_lock_reschedule2(Config) when is_list(Config) ->
- ?line Parent = self(),
- ?line Fun = fun () ->
- receive {go, Name, Pid} -> ok end,
- pi_loop(Name, Pid, 10000),
- Parent ! {done, self()},
- receive after infinity -> ok end
- end,
- ?line P1 = spawn_link(Fun),
- ?line N1 = process_info_lock_reschedule2_1,
- ?line true = register(N1, P1),
- ?line P2 = spawn_link(Fun),
- ?line N2 = process_info_lock_reschedule2_2,
- ?line true = register(N2, P2),
- ?line P3 = spawn_link(Fun),
- ?line N3 = process_info_lock_reschedule2_3,
- ?line true = register(N3, P3),
- ?line P4 = spawn_link(Fun),
- ?line N4 = process_info_lock_reschedule2_4,
- ?line true = register(N4, P4),
- ?line P5 = spawn_link(Fun),
- ?line N5 = process_info_lock_reschedule2_5,
- ?line true = register(N5, P5),
- ?line P6 = spawn_link(Fun),
- ?line N6 = process_info_lock_reschedule2_6,
- ?line true = register(N6, P6),
- ?line P1 ! {go, N2, P2},
- ?line P2 ! {go, N1, P1},
- ?line P3 ! {go, N1, P1},
- ?line P4 ! {go, N1, P1},
- ?line P5 ! {go, N6, P6},
- ?line P6 ! {go, N5, P5},
- ?line receive {done, P1} -> ok end,
- ?line receive {done, P2} -> ok end,
- ?line receive {done, P3} -> ok end,
- ?line receive {done, P4} -> ok end,
- ?line receive {done, P5} -> ok end,
- ?line receive {done, P6} -> ok end,
- ?line unlink(P1), exit(P1, bang),
- ?line unlink(P2), exit(P2, bang),
- ?line unlink(P3), exit(P3, bang),
- ?line unlink(P4), exit(P4, bang),
- ?line unlink(P5), exit(P5, bang),
- ?line unlink(P6), exit(P6, bang),
- ?line ok.
+ Parent = self(),
+ Fun = fun () ->
+ receive {go, Name, Pid} -> ok end,
+ pi_loop(Name, Pid, 10000),
+ Parent ! {done, self()},
+ receive after infinity -> ok end
+ end,
+ P1 = spawn_link(Fun),
+ N1 = process_info_lock_reschedule2_1,
+ true = register(N1, P1),
+ P2 = spawn_link(Fun),
+ N2 = process_info_lock_reschedule2_2,
+ true = register(N2, P2),
+ P3 = spawn_link(Fun),
+ N3 = process_info_lock_reschedule2_3,
+ true = register(N3, P3),
+ P4 = spawn_link(Fun),
+ N4 = process_info_lock_reschedule2_4,
+ true = register(N4, P4),
+ P5 = spawn_link(Fun),
+ N5 = process_info_lock_reschedule2_5,
+ true = register(N5, P5),
+ P6 = spawn_link(Fun),
+ N6 = process_info_lock_reschedule2_6,
+ true = register(N6, P6),
+ P1 ! {go, N2, P2},
+ P2 ! {go, N1, P1},
+ P3 ! {go, N1, P1},
+ P4 ! {go, N1, P1},
+ P5 ! {go, N6, P6},
+ P6 ! {go, N5, P5},
+ receive {done, P1} -> ok end,
+ receive {done, P2} -> ok end,
+ receive {done, P3} -> ok end,
+ receive {done, P4} -> ok end,
+ receive {done, P5} -> ok end,
+ receive {done, P6} -> ok end,
+ unlink(P1), exit(P1, bang),
+ unlink(P2), exit(P2, bang),
+ unlink(P3), exit(P3, bang),
+ unlink(P4), exit(P4, bang),
+ unlink(P5), exit(P5, bang),
+ unlink(P6), exit(P6, bang),
+ ok.
many_args(0,_B,_C,_D,_E,_F,_G,_H,_I,_J) ->
ok;
@@ -810,120 +793,115 @@ process_info_lock_reschedule3(suite) ->
process_info_lock_reschedule3(Config) when is_list(Config) ->
%% We need a process that is running and an item that requires
%% process_info to take the main process lock.
- ?line Target1 = spawn_link(fun tok_loop/0),
- ?line Name1 = process_info_lock_reschedule_running,
- ?line register(Name1, Target1),
- ?line Target2 = spawn_link(fun () -> receive after infinity -> ok end end),
- ?line Name2 = process_info_lock_reschedule_waiting,
- ?line register(Name2, Target2),
- ?line PI = fun(N) ->
- case N rem 10 of
- 0 -> erlang:yield();
- _ -> ok
- end,
- ?line do_pi_msg_len({proc, Target1},
- {arg, message_queue_len})
- end,
- ?line many_args(100000,1,2,3,4,5,6,7,8,9),
- ?line lists:foreach(PI, lists:seq(1,1000000)),
+ Target1 = spawn_link(fun tok_loop/0),
+ Name1 = process_info_lock_reschedule_running,
+ register(Name1, Target1),
+ Target2 = spawn_link(fun () -> receive after infinity -> ok end end),
+ Name2 = process_info_lock_reschedule_waiting,
+ register(Name2, Target2),
+ PI = fun(N) ->
+ case N rem 10 of
+ 0 -> erlang:yield();
+ _ -> ok
+ end,
+ do_pi_msg_len({proc, Target1},
+ {arg, message_queue_len})
+ end,
+ many_args(100000,1,2,3,4,5,6,7,8,9),
+ lists:foreach(PI, lists:seq(1,1000000)),
%% Make sure Target1 still is willing to "tok loop"
- ?line case process_info(Target1, status) of
+ case process_info(Target1, status) of
{status, OkStatus} when OkStatus == runnable;
OkStatus == running;
OkStatus == garbage_collecting ->
- ?line unlink(Target1),
- ?line unlink(Target2),
- ?line exit(Target1, bang),
- ?line exit(Target2, bang),
- ?line OkStatus;
+ unlink(Target1),
+ unlink(Target2),
+ exit(Target1, bang),
+ exit(Target2, bang),
+ OkStatus;
{status, BadStatus} ->
- ?line ?t:fail(BadStatus)
+ ?t:fail(BadStatus)
end.
process_status_exiting(Config) when is_list(Config) ->
%% Make sure that erts_debug:get_internal_state({process_status,P})
%% returns exiting if it is in status P_EXITING.
- ?line erts_debug:set_internal_state(available_internal_state,true),
- ?line Prio = process_flag(priority, max),
- ?line P = spawn_opt(fun () -> receive after infinity -> ok end end,
+ erts_debug:set_internal_state(available_internal_state,true),
+ Prio = process_flag(priority, max),
+ P = spawn_opt(fun () -> receive after infinity -> ok end end,
[{priority, normal}]),
- ?line erlang:yield(),
+ erlang:yield(),
%% The tok_loop processes are here to make it hard for the exiting
%% process to be scheduled in for exit...
- ?line TokLoops = lists:map(fun (_) ->
- spawn_opt(fun tok_loop/0,
- [link,{priority, high}])
- end,
- lists:seq(1, erlang:system_info(schedulers_online))),
- ?line exit(P, boom),
- ?line wait_until(
- fun () ->
- exiting =:= erts_debug:get_internal_state({process_status,P})
- end),
- ?line lists:foreach(fun (Tok) -> unlink(Tok), exit(Tok,bang) end, TokLoops),
- ?line process_flag(priority, Prio),
- ?line ok.
+ TokLoops = lists:map(fun (_) ->
+ spawn_opt(fun tok_loop/0,
+ [link,{priority, high}])
+ end, lists:seq(1, erlang:system_info(schedulers_online))),
+ exit(P, boom),
+ wait_until(fun() ->
+ exiting =:= erts_debug:get_internal_state({process_status,P})
+ end),
+ lists:foreach(fun (Tok) -> unlink(Tok), exit(Tok,bang) end, TokLoops),
+ process_flag(priority, Prio),
+ ok.
otp_4725(Config) when is_list(Config) ->
- ?line Tester = self(),
- ?line Ref1 = make_ref(),
- ?line Pid1 = spawn_opt(fun () ->
- Tester ! {Ref1, process_info(self())},
- receive
- Ref1 -> bye
- end
- end,
- [link,
- {priority, max},
- {fullsweep_after, 600}]),
- ?line receive
- {Ref1, ProcInfo1A} ->
- ?line ProcInfo1B = process_info(Pid1),
- ?line Pid1 ! Ref1,
- ?line check_proc_infos(ProcInfo1A, ProcInfo1B)
- end,
- ?line Ref2 = make_ref(),
- ?line Pid2 = spawn_opt(fun () ->
- Tester ! {Ref2, process_info(self())},
- receive
- Ref2 -> bye
- end
- end,
- []),
- ?line receive
- {Ref2, ProcInfo2A} ->
- ?line ProcInfo2B = process_info(Pid2),
- ?line Pid2 ! Ref2,
- ?line check_proc_infos(ProcInfo2A, ProcInfo2B)
- end,
- ?line ok.
+ Tester = self(),
+ Ref1 = make_ref(),
+ Pid1 = spawn_opt(fun () ->
+ Tester ! {Ref1, process_info(self())},
+ receive
+ Ref1 -> bye
+ end
+ end, [link, {priority, max}, {fullsweep_after, 600}]),
+ receive
+ {Ref1, ProcInfo1A} ->
+ ProcInfo1B = process_info(Pid1),
+ Pid1 ! Ref1,
+ check_proc_infos(ProcInfo1A, ProcInfo1B)
+ end,
+ Ref2 = make_ref(),
+ Pid2 = spawn_opt(fun () ->
+ Tester ! {Ref2, process_info(self())},
+ receive
+ Ref2 -> bye
+ end
+ end,
+ []),
+ receive
+ {Ref2, ProcInfo2A} ->
+ ProcInfo2B = process_info(Pid2),
+ Pid2 ! Ref2,
+ check_proc_infos(ProcInfo2A, ProcInfo2B)
+ end,
+ ok.
check_proc_infos(A, B) ->
- ?line IC = lists:keysearch(initial_call, 1, A),
- ?line IC = lists:keysearch(initial_call, 1, B),
+ IC = lists:keysearch(initial_call, 1, A),
+ IC = lists:keysearch(initial_call, 1, B),
- ?line L = lists:keysearch(links, 1, A),
- ?line L = lists:keysearch(links, 1, B),
+ L = lists:keysearch(links, 1, A),
+ L = lists:keysearch(links, 1, B),
- ?line D = lists:keysearch(dictionary, 1, A),
- ?line D = lists:keysearch(dictionary, 1, B),
+ D = lists:keysearch(dictionary, 1, A),
+ D = lists:keysearch(dictionary, 1, B),
- ?line TE = lists:keysearch(trap_exit, 1, A),
- ?line TE = lists:keysearch(trap_exit, 1, B),
+ TE = lists:keysearch(trap_exit, 1, A),
+ TE = lists:keysearch(trap_exit, 1, B),
- ?line EH = lists:keysearch(error_handler, 1, A),
- ?line EH = lists:keysearch(error_handler, 1, B),
+ EH = lists:keysearch(error_handler, 1, A),
+ EH = lists:keysearch(error_handler, 1, B),
- ?line P = lists:keysearch(priority, 1, A),
- ?line P = lists:keysearch(priority, 1, B),
+ P = lists:keysearch(priority, 1, A),
+ P = lists:keysearch(priority, 1, B),
- ?line GL = lists:keysearch(group_leader, 1, A),
- ?line GL = lists:keysearch(group_leader, 1, B),
+ GL = lists:keysearch(group_leader, 1, A),
+ GL = lists:keysearch(group_leader, 1, B),
- ?line GC = lists:keysearch(garbage_collection, 1, A),
- ?line GC = lists:keysearch(garbage_collection, 1, B),
+ GC = lists:keysearch(garbage_collection, 1, A),
+ GC = lists:keysearch(garbage_collection, 1, B),
- ?line ok.
+ ok.
%% Dummies.
@@ -936,18 +914,18 @@ stop_spawner() ->
%% Tests erlang:bump_reductions/1.
bump_reductions(Config) when is_list(Config) ->
- ?line erlang:garbage_collect(),
- ?line receive after 1 -> ok end, % Clear reductions.
- ?line {reductions,R1} = process_info(self(), reductions),
- ?line true = erlang:bump_reductions(100),
- ?line {reductions,R2} = process_info(self(), reductions),
- ?line case R2-R1 of
+ erlang:garbage_collect(),
+ receive after 1 -> ok end, % Clear reductions.
+ {reductions,R1} = process_info(self(), reductions),
+ true = erlang:bump_reductions(100),
+ {reductions,R2} = process_info(self(), reductions),
+ case R2-R1 of
Diff when Diff < 100 ->
- ?line ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]),
- ?line test_server:fail({small_diff, Diff});
+ ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]),
+ test_server:fail({small_diff, Diff});
Diff when Diff > 110 ->
- ?line ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]),
- ?line test_server:fail({big_diff, Diff});
+ ok = io:format("R1 = ~w, R2 = ~w", [R1, R2]),
+ test_server:fail({big_diff, Diff});
Diff ->
io:format("~p\n", [Diff]),
ok
@@ -957,11 +935,11 @@ bump_reductions(Config) when is_list(Config) ->
bump_big(R2, 16#08000000).
bump_big(Prev, Limit) ->
- ?line true = erlang:bump_reductions(100000), %Limited to CONTEXT_REDUCTIONS.
- ?line case process_info(self(), reductions) of
+ true = erlang:bump_reductions(100000), %Limited to CONTEXT_REDUCTIONS.
+ case process_info(self(), reductions) of
{reductions,Big} when is_integer(Big), Big > Limit ->
- ?line erlang:garbage_collect(),
- ?line io:format("~p\n", [Big]);
+ erlang:garbage_collect(),
+ io:format("~p\n", [Big]);
{reductions,R} when is_integer(R), R > Prev ->
bump_big(R, Limit)
end,
@@ -972,34 +950,34 @@ bump_big(Prev, Limit) ->
low_prio(Config) when is_list(Config) ->
case erlang:system_info(schedulers_online) of
1 ->
- ?line ok = low_prio_test(Config);
+ ok = low_prio_test(Config);
_ ->
- ?line erlang:system_flag(multi_scheduling, block),
- ?line ok = low_prio_test(Config),
- ?line erlang:system_flag(multi_scheduling, unblock),
- ?line {comment,
+ erlang:system_flag(multi_scheduling, block),
+ ok = low_prio_test(Config),
+ erlang:system_flag(multi_scheduling, unblock),
+ {comment,
"Test not written for SMP runtime system. "
"Multi scheduling blocked during test."}
end.
low_prio_test(Config) when is_list(Config) ->
- ?line process_flag(trap_exit, true),
- ?line S = spawn_link(?MODULE, prio_server, [0, 0]),
- ?line PCs = spawn_prio_clients(S, erlang:system_info(schedulers_online)),
- ?line timer:sleep(2000),
- ?line lists:foreach(fun (P) -> exit(P, kill) end, PCs),
- ?line S ! exit,
- ?line receive {'EXIT', S, {A, B}} -> check_prio(A, B) end,
+ process_flag(trap_exit, true),
+ S = spawn_link(?MODULE, prio_server, [0, 0]),
+ PCs = spawn_prio_clients(S, erlang:system_info(schedulers_online)),
+ timer:sleep(2000),
+ lists:foreach(fun (P) -> exit(P, kill) end, PCs),
+ S ! exit,
+ receive {'EXIT', S, {A, B}} -> check_prio(A, B) end,
ok.
check_prio(A, B) ->
- ?line Prop = A/B,
- ?line ok = io:format("Low=~p, High=~p, Prop=~p\n", [A, B, Prop]),
+ Prop = A/B,
+ ok = io:format("Low=~p, High=~p, Prop=~p\n", [A, B, Prop]),
%% It isn't 1/8, it's more like 0.3, but let's check that
%% the low-prio processes get some little chance to run at all.
- ?line true = (Prop < 1.0),
- ?line true = (Prop > 1/32).
+ true = (Prop < 1.0),
+ true = (Prop > 1/32).
prio_server(A, B) ->
receive
@@ -1059,25 +1037,25 @@ yield(Config) when is_list(Config) ->
end.
yield_test() ->
- ?line erlang:garbage_collect(),
- ?line receive after 1 -> ok end, % Clear reductions.
- ?line SC = schedcnt(start),
- ?line {reductions, R1} = process_info(self(), reductions),
- ?line {ok, true} = call_yield(middle),
- ?line true = call_yield(final),
- ?line true = call_yield(),
- ?line true = apply(erlang, yield, []),
- ?line {reductions, R2} = process_info(self(), reductions),
- ?line Schedcnt = schedcnt(stop, SC),
- ?line case {R2-R1, Schedcnt} of
- {Diff, 4} when Diff < 30 ->
- ?line ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w",
- [R1, R2, Schedcnt]);
- {Diff, _} ->
- ?line ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w",
- [R1, R2, Schedcnt]),
- ?line test_server:fail({measurement_error, Diff, Schedcnt})
- end.
+ erlang:garbage_collect(),
+ receive after 1 -> ok end, % Clear reductions.
+ SC = schedcnt(start),
+ {reductions, R1} = process_info(self(), reductions),
+ {ok, true} = call_yield(middle),
+ true = call_yield(final),
+ true = call_yield(),
+ true = apply(erlang, yield, []),
+ {reductions, R2} = process_info(self(), reductions),
+ Schedcnt = schedcnt(stop, SC),
+ case {R2-R1, Schedcnt} of
+ {Diff, 4} when Diff < 30 ->
+ ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w",
+ [R1, R2, Schedcnt]);
+ {Diff, _} ->
+ ok = io:format("R1 = ~w, R2 = ~w, Schedcnt = ~w",
+ [R1, R2, Schedcnt]),
+ test_server:fail({measurement_error, Diff, Schedcnt})
+ end.
call_yield() ->
erlang:yield().
@@ -1116,61 +1094,61 @@ schedcnt(stop, {Ref, Pid}) when is_reference(Ref), is_pid(Pid) ->
yield2(doc) -> [];
yield2(suite) -> [];
yield2(Config) when is_list(Config) ->
- ?line Me = self(),
- ?line Go = make_ref(),
- ?line RedDiff = make_ref(),
- ?line Done = make_ref(),
- ?line P = spawn(fun () ->
- receive Go -> ok end,
- {reductions, R1} = process_info(self(), reductions),
- {ok, true} = call_yield(middle),
- true = call_yield(final),
- true = call_yield(),
- true = apply(erlang, yield, []),
- {reductions, R2} = process_info(self(), reductions),
- Me ! {RedDiff, R2 - R1},
- exit(Done)
- end),
- ?line erlang:yield(),
-
- ?line 1 = erlang:trace(P, true, [running, procs, {tracer, self()}]),
-
- ?line P ! Go,
+ Me = self(),
+ Go = make_ref(),
+ RedDiff = make_ref(),
+ Done = make_ref(),
+ P = spawn(fun () ->
+ receive Go -> ok end,
+ {reductions, R1} = process_info(self(), reductions),
+ {ok, true} = call_yield(middle),
+ true = call_yield(final),
+ true = call_yield(),
+ true = apply(erlang, yield, []),
+ {reductions, R2} = process_info(self(), reductions),
+ Me ! {RedDiff, R2 - R1},
+ exit(Done)
+ end),
+ erlang:yield(),
+
+ 1 = erlang:trace(P, true, [running, procs, {tracer, self()}]),
+
+ P ! Go,
%% receive Go -> ok end,
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% {ok, true} = call_yield(middle),
- ?line {trace, P, out, _} = next_tmsg(P),
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, out, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% true = call_yield(final),
- ?line {trace, P, out, _} = next_tmsg(P),
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, out, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% true = call_yield(),
- ?line {trace, P, out, _} = next_tmsg(P),
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, out, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% true = apply(erlang, yield, []),
- ?line {trace, P, out, _} = next_tmsg(P),
- ?line {trace, P, in, _} = next_tmsg(P),
+ {trace, P, out, _} = next_tmsg(P),
+ {trace, P, in, _} = next_tmsg(P),
%% exit(Done)
- ?line {trace, P, exit, Done} = next_tmsg(P),
+ {trace, P, exit, Done} = next_tmsg(P),
- ?line receive
+ receive
{RedDiff, Reductions} when Reductions < 30, Reductions > 0 ->
io:format("Reductions = ~p~n", [Reductions]),
- ?line ok;
+ ok;
{RedDiff, Reductions} ->
- ?line ?t:fail({unexpected_reduction_count, Reductions})
+ ?t:fail({unexpected_reduction_count, Reductions})
end,
- ?line none = next_tmsg(P),
+ none = next_tmsg(P),
- ?line ok.
+ ok.
next_tmsg(Pid) ->
receive
@@ -1186,19 +1164,19 @@ next_tmsg(Pid) ->
bad_register(Config) when is_list(Config) ->
Name = a_long_and_unused_name,
- ?line {'EXIT',{badarg,_}} = (catch register({bad,name}, self())),
- ?line fail_register(undefined, self()),
- ?line fail_register([bad,name], self()),
+ {'EXIT',{badarg,_}} = (catch register({bad,name}, self())),
+ fail_register(undefined, self()),
+ fail_register([bad,name], self()),
- ?line {Dead,Mref} = spawn_monitor(fun() -> true end),
+ {Dead,Mref} = spawn_monitor(fun() -> true end),
receive
{'DOWN',Mref,process,Dead,_} -> ok
end,
- ?line fail_register(Name, Dead),
- ?line fail_register(Name, make_ref()),
- ?line fail_register(Name, []),
- ?line fail_register(Name, {bad,process}),
- ?line fail_register(Name, <<>>),
+ fail_register(Name, Dead),
+ fail_register(Name, make_ref()),
+ fail_register(Name, []),
+ fail_register(Name, {bad,process}),
+ fail_register(Name, <<>>),
ok.
fail_register(Name, Process) ->
@@ -1209,50 +1187,50 @@ fail_register(Name, Process) ->
garbage_collect(doc) -> [];
garbage_collect(suite) -> [];
garbage_collect(Config) when is_list(Config) ->
- ?line Prio = process_flag(priority, high),
- ?line true = erlang:garbage_collect(),
- ?line TokLoopers = lists:map(fun (_) ->
- spawn_opt(fun tok_loop/0,
- [{priority, low}, link])
- end,
- lists:seq(1, 10)),
- ?line lists:foreach(fun (Pid) ->
- ?line Mon = erlang:monitor(process, Pid),
- ?line DownBefore = receive
- {'DOWN', Mon, _, _, _} ->
- ?line true
- after 0 ->
- ?line false
- end,
- ?line GC = erlang:garbage_collect(Pid),
- ?line DownAfter = receive
- {'DOWN', Mon, _, _, _} ->
- ?line true
- after 0 ->
- ?line false
- end,
- ?line true = erlang:demonitor(Mon),
- ?line case {DownBefore, DownAfter} of
- {true, _} -> ?line false = GC;
- {false, false} -> ?line true = GC;
- _ -> ?line GC
- end
- end,
- processes()),
- ?line lists:foreach(fun (Pid) ->
- unlink(Pid),
- exit(Pid, bang)
- end, TokLoopers),
- ?line process_flag(priority, Prio),
- ?line ok.
+ Prio = process_flag(priority, high),
+ true = erlang:garbage_collect(),
+
+ TokLoopers = lists:map(fun (_) ->
+ spawn_opt(fun tok_loop/0, [{priority, low}, link])
+ end, lists:seq(1, 10)),
+
+ lists:foreach(fun (Pid) ->
+ Mon = erlang:monitor(process, Pid),
+ DownBefore = receive
+ {'DOWN', Mon, _, _, _} ->
+ true
+ after 0 ->
+ false
+ end,
+ GC = erlang:garbage_collect(Pid),
+ DownAfter = receive
+ {'DOWN', Mon, _, _, _} ->
+ true
+ after 0 ->
+ false
+ end,
+ true = erlang:demonitor(Mon),
+ case {DownBefore, DownAfter} of
+ {true, _} -> false = GC;
+ {false, false} -> true = GC;
+ _ -> GC
+ end
+ end, processes()),
+
+ lists:foreach(fun (Pid) ->
+ unlink(Pid),
+ exit(Pid, bang)
+ end, TokLoopers),
+ process_flag(priority, Prio),
+ ok.
process_info_messages(doc) ->
["This used to cause the nofrag emulator to dump core"];
process_info_messages(suite) ->
[];
process_info_messages(Config) when is_list(Config) ->
- ?line process_info_messages_test(),
- ?line ok.
+ process_info_messages_test(),
+ ok.
process_info_messages_loop(0) -> ok;
process_info_messages_loop(N) -> process_info_messages_loop(N-1).
@@ -1267,43 +1245,42 @@ process_info_messages_send_my_msgs_to(Rcvr) ->
end.
process_info_messages_test() ->
- ?line Go = make_ref(),
- ?line Done = make_ref(),
- ?line Rcvr = self(),
- ?line Rcvr2 = spawn_link(fun () ->
- receive {Go, Rcvr} -> ok end,
- garbage_collect(),
- Rcvr ! {Done, self()}
- end),
- ?line Sndrs = lists:map(
- fun (_) ->
- spawn_link(fun () ->
- Rcvr ! {Go, self()},
- receive {Go, Rcvr} -> ok end,
- BigData = lists:seq(1, 1000),
- Rcvr ! BigData,
- Rcvr ! BigData,
- Rcvr ! BigData,
- Rcvr ! {Done, self()}
- end)
- end,
- lists:seq(1, 10)),
- ?line lists:foreach(fun (Sndr) -> receive {Go, Sndr} -> ok end end,
+ Go = make_ref(),
+ Done = make_ref(),
+ Rcvr = self(),
+ Rcvr2 = spawn_link(fun () ->
+ receive {Go, Rcvr} -> ok end,
+ garbage_collect(),
+ Rcvr ! {Done, self()}
+ end),
+ Sndrs = lists:map(
+ fun (_) ->
+ spawn_link(fun () ->
+ Rcvr ! {Go, self()},
+ receive {Go, Rcvr} -> ok end,
+ BigData = lists:seq(1, 1000),
+ Rcvr ! BigData,
+ Rcvr ! BigData,
+ Rcvr ! BigData,
+ Rcvr ! {Done, self()}
+ end)
+ end, lists:seq(1, 10)),
+ lists:foreach(fun (Sndr) -> receive {Go, Sndr} -> ok end end,
Sndrs),
- ?line garbage_collect(),
- ?line erlang:yield(),
- ?line lists:foreach(fun (Sndr) -> Sndr ! {Go, self()} end, Sndrs),
- ?line process_info_messages_loop(100000000),
- ?line Msgs = process_info(self(), messages),
- ?line lists:foreach(fun (Sndr) -> receive {Done, Sndr} -> ok end end,
+ garbage_collect(),
+ erlang:yield(),
+ lists:foreach(fun (Sndr) -> Sndr ! {Go, self()} end, Sndrs),
+ process_info_messages_loop(100000000),
+ Msgs = process_info(self(), messages),
+ lists:foreach(fun (Sndr) -> receive {Done, Sndr} -> ok end end,
Sndrs),
- ?line garbage_collect(),
- ?line Rcvr2 ! Msgs,
- ?line process_info_messages_send_my_msgs_to(Rcvr2),
- ?line Rcvr2 ! {Go, self()},
- ?line garbage_collect(),
- ?line receive {Done, Rcvr2} -> ok end,
- ?line Msgs.
+ garbage_collect(),
+ Rcvr2 ! Msgs,
+ process_info_messages_send_my_msgs_to(Rcvr2),
+ Rcvr2 ! {Go, self()},
+ garbage_collect(),
+ receive {Done, Rcvr2} -> ok end,
+ Msgs.
chk_badarg(Fun) ->
try Fun(), exit(no_badarg) catch error:badarg -> ok end.
@@ -1313,76 +1290,72 @@ process_flag_badarg(doc) ->
process_flag_badarg(suite) ->
[];
process_flag_badarg(Config) when is_list(Config) ->
- ?line chk_badarg(fun () -> process_flag(gurka, banan) end),
- ?line chk_badarg(fun () -> process_flag(trap_exit, gurka) end),
- ?line chk_badarg(fun () -> process_flag(error_handler, 1) end),
- ?line chk_badarg(fun () -> process_flag(min_heap_size, gurka) end),
- ?line chk_badarg(fun () -> process_flag(min_bin_vheap_size, gurka) end),
- ?line chk_badarg(fun () -> process_flag(min_bin_vheap_size, -1) end),
- ?line chk_badarg(fun () -> process_flag(priority, 4711) end),
- ?line chk_badarg(fun () -> process_flag(save_calls, hmmm) end),
- ?line P= spawn_link(fun () -> receive die -> ok end end),
- ?line chk_badarg(fun () -> process_flag(P, save_calls, hmmm) end),
- ?line chk_badarg(fun () -> process_flag(gurka, save_calls, hmmm) end),
- ?line P ! die,
- ?line ok.
+ chk_badarg(fun () -> process_flag(gurka, banan) end),
+ chk_badarg(fun () -> process_flag(trap_exit, gurka) end),
+ chk_badarg(fun () -> process_flag(error_handler, 1) end),
+ chk_badarg(fun () -> process_flag(min_heap_size, gurka) end),
+ chk_badarg(fun () -> process_flag(min_bin_vheap_size, gurka) end),
+ chk_badarg(fun () -> process_flag(min_bin_vheap_size, -1) end),
+ chk_badarg(fun () -> process_flag(priority, 4711) end),
+ chk_badarg(fun () -> process_flag(save_calls, hmmm) end),
+ P= spawn_link(fun () -> receive die -> ok end end),
+ chk_badarg(fun () -> process_flag(P, save_calls, hmmm) end),
+ chk_badarg(fun () -> process_flag(gurka, save_calls, hmmm) end),
+ P ! die,
+ ok.
-include_lib("stdlib/include/ms_transform.hrl").
otp_6237(doc) -> [];
otp_6237(suite) -> [];
otp_6237(Config) when is_list(Config) ->
- ?line Slctrs = lists:map(fun (_) ->
- spawn_link(fun () ->
- otp_6237_select_loop()
- end)
- end,
- lists:seq(1,5)),
- ?line lists:foreach(fun (_) -> otp_6237_test() end, lists:seq(1, 100)),
- ?line lists:foreach(fun (S) -> unlink(S),exit(S, kill) end, Slctrs),
- ?line ok.
+ Slctrs = lists:map(fun (_) ->
+ spawn_link(fun () ->
+ otp_6237_select_loop()
+ end)
+ end,
+ lists:seq(1,5)),
+ lists:foreach(fun (_) -> otp_6237_test() end, lists:seq(1, 100)),
+ lists:foreach(fun (S) -> unlink(S),exit(S, kill) end, Slctrs),
+ ok.
otp_6237_test() ->
- ?line Parent = self(),
- ?line Inited = make_ref(),
- ?line Die = make_ref(),
- ?line Pid = spawn_link(fun () ->
- register(otp_6237,self()),
- otp_6237 = ets:new(otp_6237,
- [named_table,
- ordered_set]),
- ets:insert(otp_6237,
- [{I,I}
- || I <- lists:seq(1, 100)]),
- %% Inserting a lot of bif timers
- %% increase the possibility that
- %% the test will fail when the
- %% original cleanup order is used
- lists:foreach(
- fun (_) ->
- erlang:send_after(1000000,
- self(),
- {a,b,c})
- end,
- lists:seq(1,1000)),
- Parent ! Inited,
- receive Die -> bye end
- end),
- ?line receive
- Inited -> ?line ok
- end,
- ?line Pid ! Die,
+ Parent = self(),
+ Inited = make_ref(),
+ Die = make_ref(),
+ Pid = spawn_link(fun () ->
+ register(otp_6237,self()),
+ otp_6237 = ets:new(otp_6237,
+ [named_table,
+ ordered_set]),
+ ets:insert(otp_6237,
+ [{I,I}
+ || I <- lists:seq(1, 100)]),
+ %% Inserting a lot of bif timers
+ %% increase the possibility that
+ %% the test will fail when the
+ %% original cleanup order is used
+ lists:foreach( fun (_) ->
+ erlang:send_after(1000000, self(), {a,b,c})
+ end, lists:seq(1,1000)),
+ Parent ! Inited,
+ receive Die -> bye end
+ end),
+ receive
+ Inited -> ok
+ end,
+ Pid ! Die,
otp_6237_whereis_loop().
otp_6237_whereis_loop() ->
- ?line case whereis(otp_6237) of
+ case whereis(otp_6237) of
undefined ->
- ?line otp_6237 = ets:new(otp_6237,
+ otp_6237 = ets:new(otp_6237,
[named_table,ordered_set]),
- ?line ets:delete(otp_6237),
- ?line ok;
+ ets:delete(otp_6237),
+ ok;
_ ->
- ?line otp_6237_whereis_loop()
+ otp_6237_whereis_loop()
end.
otp_6237_select_loop() ->
@@ -1390,9 +1363,8 @@ otp_6237_select_loop() ->
otp_6237_select_loop().
-
-define(NoTestProcs, 10000).
--record(processes_bif_info, {min_start_reds,
+-record(ptab_list_bif_info, {min_start_reds,
tab_chunks,
tab_chunks_size,
tab_indices_per_red,
@@ -1407,89 +1379,86 @@ processes_large_tab(doc) ->
processes_large_tab(suite) ->
[];
processes_large_tab(Config) when is_list(Config) ->
- ?line enable_internal_state(),
- ?line MaxDbgLvl = 20,
- ?line MinProcTabSize = 2*(1 bsl 15),
- ?line ProcTabSize0 = 1000000,
- ?line ProcTabSize1 = case {erlang:system_info(schedulers_online),
- erlang:system_info(logical_processors)} of
- {Schdlrs, Cpus} when is_integer(Cpus),
- Schdlrs =< Cpus ->
- ProcTabSize0;
- _ ->
- ProcTabSize0 div 4
- end,
- ?line ProcTabSize2 = case erlang:system_info(debug_compiled) of
- true -> ProcTabSize1 - 500000;
- false -> ProcTabSize1
- end,
+ enable_internal_state(),
+ MaxDbgLvl = 20,
+ MinProcTabSize = 2*(1 bsl 15),
+ ProcTabSize0 = 1000000,
+ ProcTabSize1 = case {erlang:system_info(schedulers_online),
+ erlang:system_info(logical_processors)} of
+ {Schdlrs, Cpus} when is_integer(Cpus),
+ Schdlrs =< Cpus ->
+ ProcTabSize0;
+ _ ->
+ ProcTabSize0 div 4
+ end,
+ ProcTabSize2 = case erlang:system_info(debug_compiled) of
+ true -> ProcTabSize1 - 500000;
+ false -> ProcTabSize1
+ end,
%% With high debug levels this test takes so long time that
%% the connection times out; therefore, shrink the test on
%% high debug levels.
- ?line DbgLvl = case erts_debug:get_internal_state(processes_bif_info) of
- #processes_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl ->
+ DbgLvl = case erts_debug:get_internal_state(processes_bif_info) of
+ #ptab_list_bif_info{debug_level = Lvl} when Lvl > MaxDbgLvl ->
20;
- #processes_bif_info{debug_level = Lvl} when Lvl < 0 ->
- ?line ?t:fail({debug_level, Lvl});
- #processes_bif_info{debug_level = Lvl} ->
+ #ptab_list_bif_info{debug_level = Lvl} when Lvl < 0 ->
+ ?t:fail({debug_level, Lvl});
+ #ptab_list_bif_info{debug_level = Lvl} ->
Lvl
end,
- ?line ProcTabSize3 = ProcTabSize2 - (1300000 * DbgLvl div MaxDbgLvl),
- ?line ProcTabSize = case ProcTabSize3 < MinProcTabSize of
+ ProcTabSize3 = ProcTabSize2 - (1300000 * DbgLvl div MaxDbgLvl),
+ ProcTabSize = case ProcTabSize3 < MinProcTabSize of
true -> MinProcTabSize;
false -> ProcTabSize3
end,
- ?line {ok, LargeNode} = start_node(Config,
+ {ok, LargeNode} = start_node(Config,
"+P " ++ integer_to_list(ProcTabSize)),
- ?line Res = rpc:call(LargeNode, ?MODULE, processes_bif_test, []),
- ?line case rpc:call(LargeNode,
+ Res = rpc:call(LargeNode, ?MODULE, processes_bif_test, []),
+ case rpc:call(LargeNode,
erts_debug,
get_internal_state,
[processes_bif_info]) of
- #processes_bif_info{tab_chunks = Chunks} when is_integer(Chunks),
+ #ptab_list_bif_info{tab_chunks = Chunks} when is_integer(Chunks),
Chunks > 1 -> ok;
PBInfo -> ?t:fail(PBInfo)
end,
- ?line stop_node(LargeNode),
- ?line chk_processes_bif_test_res(Res).
+ stop_node(LargeNode),
+ chk_processes_bif_test_res(Res).
processes_default_tab(doc) ->
[];
processes_default_tab(suite) ->
[];
processes_default_tab(Config) when is_list(Config) ->
- ?line {ok, DefaultNode} = start_node(Config, ""),
- ?line Res = rpc:call(DefaultNode, ?MODULE, processes_bif_test, []),
- ?line stop_node(DefaultNode),
- ?line chk_processes_bif_test_res(Res).
+ {ok, DefaultNode} = start_node(Config, ""),
+ Res = rpc:call(DefaultNode, ?MODULE, processes_bif_test, []),
+ stop_node(DefaultNode),
+ chk_processes_bif_test_res(Res).
processes_small_tab(doc) ->
[];
processes_small_tab(suite) ->
[];
processes_small_tab(Config) when is_list(Config) ->
- ?line {ok, SmallNode} = start_node(Config, "+P 500"),
- ?line Res = rpc:call(SmallNode, ?MODULE, processes_bif_test, []),
- ?line PBInfo = rpc:call(SmallNode,
- erts_debug,
- get_internal_state,
- [processes_bif_info]),
- ?line stop_node(SmallNode),
- ?line 1 = PBInfo#processes_bif_info.tab_chunks,
- ?line chk_processes_bif_test_res(Res).
+ {ok, SmallNode} = start_node(Config, "+P 1024"),
+ Res = rpc:call(SmallNode, ?MODULE, processes_bif_test, []),
+ PBInfo = rpc:call(SmallNode, erts_debug, get_internal_state, [processes_bif_info]),
+ stop_node(SmallNode),
+ true = PBInfo#ptab_list_bif_info.tab_chunks < 10,
+ chk_processes_bif_test_res(Res).
processes_this_tab(doc) ->
[];
processes_this_tab(suite) ->
[];
processes_this_tab(Config) when is_list(Config) ->
- ?line chk_processes_bif_test_res(processes_bif_test()).
+ chk_processes_bif_test_res(processes_bif_test()).
chk_processes_bif_test_res(ok) -> ok;
chk_processes_bif_test_res({comment, _} = Comment) -> Comment;
chk_processes_bif_test_res(Failure) -> ?t:fail(Failure).
-print_processes_bif_info(#processes_bif_info{min_start_reds = MinStartReds,
+print_processes_bif_info(#ptab_list_bif_info{min_start_reds = MinStartReds,
tab_chunks = TabChunks,
tab_chunks_size = TabChunksSize,
tab_indices_per_red = TabIndPerRed,
@@ -1583,26 +1552,26 @@ do_processes(WantReds) ->
processes().
processes_bif_test() ->
- ?line Tester = self(),
- ?line enable_internal_state(),
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
- ?line WillTrap = case PBInfo of
- #processes_bif_info{tab_chunks = 1} ->
- false;
- #processes_bif_info{tab_chunks = Chunks,
- tab_chunks_size = ChunksSize,
- tab_indices_per_red = IndiciesPerRed
- } ->
- Chunks*ChunksSize >= IndiciesPerRed*WantReds
- end,
- ?line Processes = fun () ->
- erts_debug:set_internal_state(reds_left,WantReds),
- processes()
- end,
+ Tester = self(),
+ enable_internal_state(),
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
+ WillTrap = case PBInfo of
+ #ptab_list_bif_info{tab_chunks = Chunks} when Chunks < 10 ->
+ false; %% Skip for small tables
+ #ptab_list_bif_info{tab_chunks = Chunks,
+ tab_chunks_size = ChunksSize,
+ tab_indices_per_red = IndiciesPerRed
+ } ->
+ Chunks*ChunksSize >= IndiciesPerRed*WantReds
+ end,
+ Processes = fun () ->
+ erts_debug:set_internal_state(reds_left,WantReds),
+ processes()
+ end,
- ?line ok = do_processes_bif_test(WantReds, WillTrap, Processes),
+ ok = do_processes_bif_test(WantReds, WillTrap, Processes),
case WillTrap of
false ->
@@ -1610,8 +1579,8 @@ processes_bif_test() ->
true ->
%% Do it again with a process suspended while
%% in the processes/0 bif.
- ?line erlang:system_flag(multi_scheduling, block),
- ?line Suspendee = spawn_link(fun () ->
+ erlang:system_flag(multi_scheduling, block),
+ Suspendee = spawn_link(fun () ->
Tester ! {suspend_me, self()},
Tester ! {self(),
done,
@@ -1621,179 +1590,160 @@ processes_bif_test() ->
ok
end
end),
- ?line receive {suspend_me, Suspendee} -> ok end,
- ?line erlang:suspend_process(Suspendee),
- ?line erlang:system_flag(multi_scheduling, unblock),
+ receive {suspend_me, Suspendee} -> ok end,
+ erlang:suspend_process(Suspendee),
+ erlang:system_flag(multi_scheduling, unblock),
- ?line [{status,suspended},
- {current_function,{erlang,processes_trap,2}}]
- = process_info(Suspendee, [status, current_function]),
+ [{status,suspended},{current_function,{erlang,ptab_list_continue,2}}] =
+ process_info(Suspendee, [status, current_function]),
- ?line ok = do_processes_bif_test(WantReds, WillTrap, Processes),
+ ok = do_processes_bif_test(WantReds, WillTrap, Processes),
- ?line erlang:resume_process(Suspendee),
- ?line receive {Suspendee, done, _} -> ok end,
- ?line unlink(Suspendee),
- ?line exit(Suspendee, bang)
+ erlang:resume_process(Suspendee),
+ receive {Suspendee, done, _} -> ok end,
+ unlink(Suspendee),
+ exit(Suspendee, bang)
end,
case get(processes_bif_testcase_comment) of
- undefined -> ?line ok;
- Comment -> ?line {comment, Comment}
+ undefined -> ok;
+ Comment -> {comment, Comment}
end.
do_processes_bif_test(WantReds, DieTest, Processes) ->
- ?line Tester = self(),
- ?line SpawnProcesses = fun (Prio) ->
- spawn_opt(?MODULE,
- do_processes,
- [WantReds],
- [link, {priority, Prio}])
- end,
- ?line Cleaner = spawn_link(fun () ->
- process_flag(trap_exit, true),
- Tester ! {cleaner_alive, self()},
- processes_bif_cleaner()
- end),
- ?line receive {cleaner_alive, Cleaner} -> ok end,
+ Tester = self(),
+ SpawnProcesses = fun (Prio) ->
+ spawn_opt(?MODULE, do_processes, [WantReds], [link, {priority, Prio}])
+ end,
+ Cleaner = spawn_link(fun () ->
+ process_flag(trap_exit, true),
+ Tester ! {cleaner_alive, self()},
+ processes_bif_cleaner()
+ end),
+ receive {cleaner_alive, Cleaner} -> ok end,
try
- ?line DoIt = make_ref(),
- ?line GetGoing = make_ref(),
- ?line {NoTestProcs, TestProcs} = spawn_initial_hangarounds(Cleaner),
- ?line ?t:format("Testing with ~p processes~n", [NoTestProcs]),
- ?line SpawnHangAround = fun () ->
- spawn(?MODULE,
- hangaround,
- [Cleaner, new_hangaround])
- end,
- ?line Killer = spawn_opt(fun () ->
- Splt = NoTestProcs div 10,
- {TP1, TP23} = lists:split(Splt,
- TestProcs),
- {TP2, TP3} = lists:split(Splt, TP23),
- erlang:system_flag(multi_scheduling,
- block),
- Tester ! DoIt,
- receive GetGoing -> ok end,
- erlang:system_flag(multi_scheduling,
- unblock),
- SpawnProcesses(high),
- lists:foreach(
- fun (P) ->
- SpawnHangAround(),
- exit(P, bang)
- end,
- TP1),
- SpawnProcesses(high),
- erlang:yield(),
- lists:foreach(
- fun (P) ->
- SpawnHangAround(),
- exit(P, bang)
- end,
- TP2),
- SpawnProcesses(high),
- lists:foreach(
- fun (P) ->
- SpawnHangAround(),
- exit(P, bang)
- end,
- TP3)
- end,
- [{priority, high}, link]),
- ?line receive DoIt -> ok end,
- ?line process_flag(priority, low),
- ?line SpawnProcesses(low),
- ?line erlang:yield(),
- ?line process_flag(priority, normal),
- ?line CorrectProcs0 = erts_debug:get_internal_state(processes),
- ?line Killer ! GetGoing,
- ?line erts_debug:set_internal_state(reds_left, WantReds),
- ?line Procs0 = processes(),
- ?line Procs = lists:sort(Procs0),
- ?line CorrectProcs = lists:sort(CorrectProcs0),
- ?line LengthCorrectProcs = length(CorrectProcs),
- ?line ?t:format("~p = length(CorrectProcs)~n", [LengthCorrectProcs]),
- ?line true = LengthCorrectProcs > NoTestProcs,
- ?line case CorrectProcs =:= Procs of
- true ->
- ?line ok;
- false ->
- ?line processes_unexpected_result(CorrectProcs, Procs)
- end,
- ?line unlink(Killer),
- ?line exit(Killer, bang)
+ DoIt = make_ref(),
+ GetGoing = make_ref(),
+ {NoTestProcs, TestProcs} = spawn_initial_hangarounds(Cleaner),
+ ?t:format("Testing with ~p processes~n", [NoTestProcs]),
+ SpawnHangAround = fun () ->
+ spawn(?MODULE, hangaround, [Cleaner, new_hangaround])
+ end,
+ Killer = spawn_opt(fun () ->
+ Splt = NoTestProcs div 10,
+ {TP1, TP23} = lists:split(Splt, TestProcs),
+ {TP2, TP3} = lists:split(Splt, TP23),
+ erlang:system_flag(multi_scheduling, block),
+ Tester ! DoIt,
+ receive GetGoing -> ok end,
+ erlang:system_flag(multi_scheduling, unblock),
+ SpawnProcesses(high),
+ lists:foreach( fun (P) ->
+ SpawnHangAround(),
+ exit(P, bang)
+ end, TP1),
+ SpawnProcesses(high),
+ erlang:yield(),
+ lists:foreach( fun (P) ->
+ SpawnHangAround(),
+ exit(P, bang)
+ end, TP2),
+ SpawnProcesses(high),
+ lists:foreach(
+ fun (P) ->
+ SpawnHangAround(),
+ exit(P, bang)
+ end, TP3)
+ end, [{priority, high}, link]),
+ receive DoIt -> ok end,
+ process_flag(priority, low),
+ SpawnProcesses(low),
+ erlang:yield(),
+ process_flag(priority, normal),
+ CorrectProcs0 = erts_debug:get_internal_state(processes),
+ Killer ! GetGoing,
+ erts_debug:set_internal_state(reds_left, WantReds),
+ Procs0 = processes(),
+ Procs = lists:sort(Procs0),
+ CorrectProcs = lists:sort(CorrectProcs0),
+ LengthCorrectProcs = length(CorrectProcs),
+ ?t:format("~p = length(CorrectProcs)~n", [LengthCorrectProcs]),
+ true = LengthCorrectProcs > NoTestProcs,
+ case CorrectProcs =:= Procs of
+ true ->
+ ok;
+ false ->
+ processes_unexpected_result(CorrectProcs, Procs)
+ end,
+ unlink(Killer),
+ exit(Killer, bang)
after
unlink(Cleaner),
exit(Cleaner, kill),
%% Wait for the system to recover to a normal state...
wait_until_system_recover()
end,
- ?line do_processes_bif_die_test(DieTest, Processes),
- ?line ok.
+ do_processes_bif_die_test(DieTest, Processes),
+ ok.
do_processes_bif_die_test(false, _Processes) ->
- ?line ?t:format("Skipping test killing process executing processes/0~n",[]),
- ?line ok;
+ ?t:format("Skipping test killing process executing processes/0~n",[]),
+ ok;
do_processes_bif_die_test(true, Processes) ->
- ?line do_processes_bif_die_test(5, Processes);
+ do_processes_bif_die_test(5, Processes);
do_processes_bif_die_test(N, Processes) ->
- ?line ?t:format("Doing test killing process executing processes/0~n",[]),
+ ?t:format("Doing test killing process executing processes/0~n",[]),
try
- ?line Tester = self(),
- ?line Oooh_Nooooooo = make_ref(),
- ?line {_, DieWhileDoingMon} = erlang:spawn_monitor(
- fun () ->
- Victim = self(),
- spawn_opt(
- fun () ->
- exit(Victim, got_him)
- end,
- [link,
- {priority, max}]),
- Tester ! {Oooh_Nooooooo,
- hd(Processes())},
- exit(ohhhh_nooooo)
- end),
- ?line receive
- {'DOWN', DieWhileDoingMon, _, _, Reason} ->
- case Reason of
- got_him -> ok;
- _ -> throw({kill_in_trap, Reason})
- end
- end,
- ?line receive
- {Oooh_Nooooooo, _} ->
- ?line throw({kill_in_trap, 'Oooh_Nooooooo'})
- after 0 ->
- ?line ok
- end,
- ?line PrcsCllrsSeqLen = 2*erlang:system_info(schedulers_online),
- ?line PrcsCllrsSeq = lists:seq(1, PrcsCllrsSeqLen),
- ?line ProcsCallers = lists:map(
- fun (_) ->
- spawn_link(
- fun () ->
- Tester ! hd(Processes())
- end)
- end,
- PrcsCllrsSeq),
- ?line erlang:yield(),
+ Tester = self(),
+ Oooh_Nooooooo = make_ref(),
+ {_, DieWhileDoingMon} = erlang:spawn_monitor( fun () ->
+ Victim = self(),
+ spawn_opt(
+ fun () ->
+ exit(Victim, got_him)
+ end,
+ [link, {priority, max}]),
+ Tester ! {Oooh_Nooooooo,
+ hd(Processes())},
+ exit(ohhhh_nooooo)
+ end),
+ receive
+ {'DOWN', DieWhileDoingMon, _, _, Reason} ->
+ case Reason of
+ got_him -> ok;
+ _ -> throw({kill_in_trap, Reason})
+ end
+ end,
+ receive
+ {Oooh_Nooooooo, _} ->
+ throw({kill_in_trap, 'Oooh_Nooooooo'})
+ after 0 ->
+ ok
+ end,
+ PrcsCllrsSeqLen = 2*erlang:system_info(schedulers_online),
+ PrcsCllrsSeq = lists:seq(1, PrcsCllrsSeqLen),
+ ProcsCallers = lists:map( fun (_) ->
+ spawn_link(
+ fun () ->
+ Tester ! hd(Processes())
+ end)
+ end, PrcsCllrsSeq),
+ erlang:yield(),
{ProcsCallers1, ProcsCallers2} = lists:split(PrcsCllrsSeqLen div 2,
ProcsCallers),
- ?line process_flag(priority, high),
- ?line lists:foreach(
+ process_flag(priority, high),
+ lists:foreach(
fun (P) ->
unlink(P),
exit(P, bang)
end,
lists:reverse(ProcsCallers2) ++ ProcsCallers1),
- ?line process_flag(priority, normal),
- ?line ok
+ process_flag(priority, normal),
+ ok
catch
throw:{kill_in_trap, R} when N > 0 ->
?t:format("Failed to kill in trap: ~p~n", [R]),
- ?t:format("Trying again~p~n", []),
+ ?t:format("Trying again~n", []),
do_processes_bif_die_test(N-1, Processes)
end.
@@ -1852,23 +1802,23 @@ processes_last_call_trap(doc) ->
processes_last_call_trap(suite) ->
[];
processes_last_call_trap(Config) when is_list(Config) ->
- ?line enable_internal_state(),
- ?line Processes = fun () -> processes() end,
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = case PBInfo#processes_bif_info.min_start_reds of
- R when R > 10 -> R - 1;
- _R -> 9
- end,
- ?line lists:foreach(fun (_) ->
- ?line erts_debug:set_internal_state(reds_left,
- WantReds),
- Processes(),
- ?line erts_debug:set_internal_state(reds_left,
- WantReds),
- my_processes()
- end,
- lists:seq(1,100)).
+ enable_internal_state(),
+ Processes = fun () -> processes() end,
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
+ R when R > 10 -> R - 1;
+ _R -> 9
+ end,
+ lists:foreach(fun (_) ->
+ erts_debug:set_internal_state(reds_left,
+ WantReds),
+ Processes(),
+ erts_debug:set_internal_state(reds_left,
+ WantReds),
+ my_processes()
+ end,
+ lists:seq(1,100)).
my_processes() ->
processes().
@@ -1878,108 +1828,106 @@ processes_apply_trap(doc) ->
processes_apply_trap(suite) ->
[];
processes_apply_trap(Config) when is_list(Config) ->
- ?line enable_internal_state(),
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = case PBInfo#processes_bif_info.min_start_reds of
- R when R > 10 -> R - 1;
- _R -> 9
- end,
- ?line lists:foreach(fun (_) ->
- ?line erts_debug:set_internal_state(reds_left,
- WantReds),
- ?line apply(erlang, processes, [])
- end,
- lists:seq(1,100)).
+ enable_internal_state(),
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = case PBInfo#ptab_list_bif_info.min_start_reds of
+ R when R > 10 -> R - 1;
+ _R -> 9
+ end,
+ lists:foreach(fun (_) ->
+ erts_debug:set_internal_state(reds_left,
+ WantReds),
+ apply(erlang, processes, [])
+ end, lists:seq(1,100)).
processes_gc_trap(doc) ->
[];
processes_gc_trap(suite) ->
[];
processes_gc_trap(Config) when is_list(Config) ->
- ?line Tester = self(),
- ?line enable_internal_state(),
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
- ?line Processes = fun () ->
- erts_debug:set_internal_state(reds_left,WantReds),
- processes()
- end,
+ Tester = self(),
+ enable_internal_state(),
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
+ Processes = fun () ->
+ erts_debug:set_internal_state(reds_left,WantReds),
+ processes()
+ end,
- ?line erlang:system_flag(multi_scheduling, block),
- ?line Suspendee = spawn_link(fun () ->
+ erlang:system_flag(multi_scheduling, block),
+ Suspendee = spawn_link(fun () ->
Tester ! {suspend_me, self()},
Tester ! {self(),
done,
hd(Processes())},
receive after infinity -> ok end
end),
- ?line receive {suspend_me, Suspendee} -> ok end,
- ?line erlang:suspend_process(Suspendee),
- ?line erlang:system_flag(multi_scheduling, unblock),
+ receive {suspend_me, Suspendee} -> ok end,
+ erlang:suspend_process(Suspendee),
+ erlang:system_flag(multi_scheduling, unblock),
- ?line [{status,suspended}, {current_function,{erlang,processes_trap,2}}]
+ [{status,suspended}, {current_function,{erlang,ptab_list_continue,2}}]
= process_info(Suspendee, [status, current_function]),
- ?line erlang:garbage_collect(Suspendee),
- ?line erlang:garbage_collect(Suspendee),
+ erlang:garbage_collect(Suspendee),
+ erlang:garbage_collect(Suspendee),
- ?line erlang:resume_process(Suspendee),
- ?line receive {Suspendee, done, _} -> ok end,
- ?line erlang:garbage_collect(Suspendee),
- ?line erlang:garbage_collect(Suspendee),
+ erlang:resume_process(Suspendee),
+ receive {Suspendee, done, _} -> ok end,
+ erlang:garbage_collect(Suspendee),
+ erlang:garbage_collect(Suspendee),
- ?line unlink(Suspendee),
- ?line exit(Suspendee, bang),
- ?line ok.
+ unlink(Suspendee),
+ exit(Suspendee, bang),
+ ok.
process_flag_heap_size(doc) ->
[];
process_flag_heap_size(suite) ->
[];
process_flag_heap_size(Config) when is_list(Config) ->
- HSize = 2584, % must be gc fib number
- VHSize = 317811, % must be gc fib number
- ?line OldHmin = erlang:process_flag(min_heap_size, HSize),
- ?line {min_heap_size, HSize} = erlang:process_info(self(), min_heap_size),
- ?line OldVHmin = erlang:process_flag(min_bin_vheap_size, VHSize),
- ?line {min_bin_vheap_size, VHSize} = erlang:process_info(self(), min_bin_vheap_size),
- ?line HSize = erlang:process_flag(min_heap_size, OldHmin),
- ?line VHSize = erlang:process_flag(min_bin_vheap_size, OldVHmin),
- ?line ok.
+ HSize = 2586, % must be gc fib+ number
+ VHSize = 318187, % must be gc fib+ number
+ OldHmin = erlang:process_flag(min_heap_size, HSize),
+ {min_heap_size, HSize} = erlang:process_info(self(), min_heap_size),
+ OldVHmin = erlang:process_flag(min_bin_vheap_size, VHSize),
+ {min_bin_vheap_size, VHSize} = erlang:process_info(self(), min_bin_vheap_size),
+ HSize = erlang:process_flag(min_heap_size, OldHmin),
+ VHSize = erlang:process_flag(min_bin_vheap_size, OldVHmin),
+ ok.
spawn_opt_heap_size(doc) ->
[];
spawn_opt_heap_size(suite) ->
[];
spawn_opt_heap_size(Config) when is_list(Config) ->
- HSize = 987, % must be gc fib number
- VHSize = 46368, % must be gc fib number
- ?line Pid = spawn_opt(fun () -> receive stop -> ok end end,
+ HSize = 987, % must be gc fib+ number
+ VHSize = 46422, % must be gc fib+ number
+ Pid = spawn_opt(fun () -> receive stop -> ok end end,
[{min_heap_size, HSize},{ min_bin_vheap_size, VHSize}]),
- ?line {min_heap_size, HSize} = process_info(Pid, min_heap_size),
- ?line {min_bin_vheap_size, VHSize} = process_info(Pid, min_bin_vheap_size),
- ?line Pid ! stop,
- ?line ok.
+ {min_heap_size, HSize} = process_info(Pid, min_heap_size),
+ {min_bin_vheap_size, VHSize} = process_info(Pid, min_bin_vheap_size),
+ Pid ! stop,
+ ok.
processes_term_proc_list(doc) ->
[];
processes_term_proc_list(suite) ->
[];
processes_term_proc_list(Config) when is_list(Config) ->
- ?line Tester = self(),
- ?line as_expected = processes_term_proc_list_test(false),
- ?line {ok, Node} = start_node(Config, "+Mis true"),
- ?line RT = spawn_link(Node,
- fun () ->
- receive after 1000 -> ok end,
- processes_term_proc_list_test(false),
- Tester ! {it_worked, self()}
- end),
- ?line receive {it_worked, RT} -> ok end,
- ?line stop_node(Node),
- ?line ok.
+ Tester = self(),
+ as_expected = processes_term_proc_list_test(false),
+ {ok, Node} = start_node(Config, "+Mis true"),
+ RT = spawn_link(Node, fun () ->
+ receive after 1000 -> ok end,
+ processes_term_proc_list_test(false),
+ Tester ! {it_worked, self()}
+ end),
+ receive {it_worked, RT} -> ok end,
+ stop_node(Node),
+ ok.
-define(CHK_TERM_PROC_LIST(MC, XB),
chk_term_proc_list(?LINE, MC, XB)).
@@ -1990,8 +1938,8 @@ chk_term_proc_list(Line, MustChk, ExpectBlks) ->
not_enabled;
{_, MS} ->
{value,
- {processes_term_proc_el,
- DL}} = lists:keysearch(processes_term_proc_el, 1, MS),
+ {ptab_list_deleted_el,
+ DL}} = lists:keysearch(ptab_list_deleted_el, 1, MS),
case lists:keysearch(blocks, 1, DL) of
{value, {blocks, ExpectBlks, _, _}} ->
ok;
@@ -2005,35 +1953,34 @@ chk_term_proc_list(Line, MustChk, ExpectBlks) ->
ok.
processes_term_proc_list_test(MustChk) ->
- ?line Tester = self(),
- ?line enable_internal_state(),
- ?line PBInfo = erts_debug:get_internal_state(processes_bif_info),
- ?line print_processes_bif_info(PBInfo),
- ?line WantReds = PBInfo#processes_bif_info.min_start_reds + 10,
- ?line #processes_bif_info{tab_chunks = Chunks,
- tab_chunks_size = ChunksSize,
- tab_indices_per_red = IndiciesPerRed
- } = PBInfo,
- ?line true = Chunks > 1,
- ?line true = Chunks*ChunksSize >= IndiciesPerRed*WantReds,
- ?line Processes = fun () ->
- erts_debug:set_internal_state(reds_left,
- WantReds),
- processes()
- end,
- ?line Exit = fun (P) ->
- unlink(P),
- exit(P, bang),
- wait_until(
- fun () ->
- not lists:member(
- P,
- erts_debug:get_internal_state(
- processes))
- end)
- end,
- ?line SpawnSuspendProcessesProc
- = fun () ->
+ Tester = self(),
+ enable_internal_state(),
+ PBInfo = erts_debug:get_internal_state(processes_bif_info),
+ print_processes_bif_info(PBInfo),
+ WantReds = PBInfo#ptab_list_bif_info.min_start_reds + 10,
+ #ptab_list_bif_info{tab_chunks = Chunks,
+ tab_chunks_size = ChunksSize,
+ tab_indices_per_red = IndiciesPerRed
+ } = PBInfo,
+ true = Chunks > 1,
+ true = Chunks*ChunksSize >= IndiciesPerRed*WantReds,
+ Processes = fun () ->
+ erts_debug:set_internal_state(reds_left,
+ WantReds),
+ processes()
+ end,
+ Exit = fun (P) ->
+ unlink(P),
+ exit(P, bang),
+ wait_until(
+ fun () ->
+ not lists:member(
+ P,
+ erts_debug:get_internal_state(
+ processes))
+ end)
+ end,
+ SpawnSuspendProcessesProc = fun () ->
erlang:system_flag(multi_scheduling, block),
P = spawn_link(fun () ->
Tester ! {suspend_me, self()},
@@ -2046,76 +1993,76 @@ processes_term_proc_list_test(MustChk) ->
erlang:suspend_process(P),
erlang:system_flag(multi_scheduling, unblock),
[{status,suspended},
- {current_function,{erlang,processes_trap,2}}]
+ {current_function,{erlang,ptab_list_continue,2}}]
= process_info(P, [status, current_function]),
P
end,
- ?line ResumeProcessesProc = fun (P) ->
+ ResumeProcessesProc = fun (P) ->
erlang:resume_process(P),
receive {P, done, _} -> ok end
end,
- ?line ?CHK_TERM_PROC_LIST(MustChk, 0),
- ?line HangAround = fun () -> receive after infinity -> ok end end,
- ?line HA1 = spawn_link(HangAround),
- ?line HA2 = spawn_link(HangAround),
- ?line HA3 = spawn_link(HangAround),
- ?line S1 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 1),
- ?line Exit(HA1),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 2),
- ?line S2 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 3),
- ?line S3 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 4),
- ?line Exit(HA2),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 5),
- ?line S4 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 6),
- ?line Exit(HA3),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 7),
- ?line ResumeProcessesProc(S1),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 5),
- ?line ResumeProcessesProc(S3),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 4),
- ?line ResumeProcessesProc(S4),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 3),
- ?line ResumeProcessesProc(S2),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 0),
- ?line Exit(S1),
- ?line Exit(S2),
- ?line Exit(S3),
- ?line Exit(S4),
-
-
- ?line HA4 = spawn_link(HangAround),
- ?line HA5 = spawn_link(HangAround),
- ?line HA6 = spawn_link(HangAround),
- ?line S5 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 1),
- ?line Exit(HA4),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 2),
- ?line S6 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 3),
- ?line Exit(HA5),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 4),
- ?line S7 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 5),
- ?line Exit(HA6),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 6),
- ?line S8 = SpawnSuspendProcessesProc(),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 7),
-
- ?line erlang:system_flag(multi_scheduling, block),
- ?line Exit(S8),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 7),
- ?line Exit(S5),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 6),
- ?line Exit(S7),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 6),
- ?line Exit(S6),
- ?line ?CHK_TERM_PROC_LIST(MustChk, 0),
- ?line erlang:system_flag(multi_scheduling, unblock),
- ?line as_expected.
+ ?CHK_TERM_PROC_LIST(MustChk, 0),
+ HangAround = fun () -> receive after infinity -> ok end end,
+ HA1 = spawn_link(HangAround),
+ HA2 = spawn_link(HangAround),
+ HA3 = spawn_link(HangAround),
+ S1 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 1),
+ Exit(HA1),
+ ?CHK_TERM_PROC_LIST(MustChk, 2),
+ S2 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 3),
+ S3 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 4),
+ Exit(HA2),
+ ?CHK_TERM_PROC_LIST(MustChk, 5),
+ S4 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 6),
+ Exit(HA3),
+ ?CHK_TERM_PROC_LIST(MustChk, 7),
+ ResumeProcessesProc(S1),
+ ?CHK_TERM_PROC_LIST(MustChk, 5),
+ ResumeProcessesProc(S3),
+ ?CHK_TERM_PROC_LIST(MustChk, 4),
+ ResumeProcessesProc(S4),
+ ?CHK_TERM_PROC_LIST(MustChk, 3),
+ ResumeProcessesProc(S2),
+ ?CHK_TERM_PROC_LIST(MustChk, 0),
+ Exit(S1),
+ Exit(S2),
+ Exit(S3),
+ Exit(S4),
+
+
+ HA4 = spawn_link(HangAround),
+ HA5 = spawn_link(HangAround),
+ HA6 = spawn_link(HangAround),
+ S5 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 1),
+ Exit(HA4),
+ ?CHK_TERM_PROC_LIST(MustChk, 2),
+ S6 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 3),
+ Exit(HA5),
+ ?CHK_TERM_PROC_LIST(MustChk, 4),
+ S7 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 5),
+ Exit(HA6),
+ ?CHK_TERM_PROC_LIST(MustChk, 6),
+ S8 = SpawnSuspendProcessesProc(),
+ ?CHK_TERM_PROC_LIST(MustChk, 7),
+
+ erlang:system_flag(multi_scheduling, block),
+ Exit(S8),
+ ?CHK_TERM_PROC_LIST(MustChk, 7),
+ Exit(S5),
+ ?CHK_TERM_PROC_LIST(MustChk, 6),
+ Exit(S7),
+ ?CHK_TERM_PROC_LIST(MustChk, 6),
+ Exit(S6),
+ ?CHK_TERM_PROC_LIST(MustChk, 0),
+ erlang:system_flag(multi_scheduling, unblock),
+ as_expected.
otp_7738_waiting(doc) ->
@@ -2123,88 +2070,88 @@ otp_7738_waiting(doc) ->
otp_7738_waiting(suite) ->
[];
otp_7738_waiting(Config) when is_list(Config) ->
- ?line otp_7738_test(waiting).
+ otp_7738_test(waiting).
otp_7738_suspended(doc) ->
[];
otp_7738_suspended(suite) ->
[];
otp_7738_suspended(Config) when is_list(Config) ->
- ?line otp_7738_test(suspended).
+ otp_7738_test(suspended).
otp_7738_resume(doc) ->
[];
otp_7738_resume(suite) ->
[];
otp_7738_resume(Config) when is_list(Config) ->
- ?line otp_7738_test(resume).
+ otp_7738_test(resume).
otp_7738_test(Type) ->
- ?line T = self(),
- ?line S = spawn_link(fun () ->
- receive
- {suspend, Suspendee} ->
- erlang:suspend_process(Suspendee),
- T ! {suspended, Suspendee},
- receive
- after 10 ->
- erlang:resume_process(Suspendee),
- Suspendee ! wake_up
- end;
- {send, To, Msg} ->
- receive after 10 -> ok end,
- To ! Msg
- end
- end),
- ?line R = spawn_link(fun () ->
- X = lists:seq(1, 20000000),
- T ! {initialized, self()},
- ?line case Type of
- _ when Type == suspended;
- Type == waiting ->
- receive _ -> ok end;
- _ when Type == resume ->
- Receive = fun (F) ->
- receive
- _ ->
- ok
- after 0 ->
- F(F)
- end
- end,
- Receive(Receive)
- end,
- T ! {woke_up, self()},
- id(X)
- end),
- ?line receive {initialized, R} -> ok end,
- ?line receive after 10 -> ok end,
- ?line case Type of
+ T = self(),
+ S = spawn_link(fun () ->
+ receive
+ {suspend, Suspendee} ->
+ erlang:suspend_process(Suspendee),
+ T ! {suspended, Suspendee},
+ receive
+ after 10 ->
+ erlang:resume_process(Suspendee),
+ Suspendee ! wake_up
+ end;
+ {send, To, Msg} ->
+ receive after 10 -> ok end,
+ To ! Msg
+ end
+ end),
+ R = spawn_link(fun () ->
+ X = lists:seq(1, 20000000),
+ T ! {initialized, self()},
+ case Type of
+ _ when Type == suspended;
+ Type == waiting ->
+ receive _ -> ok end;
+ _ when Type == resume ->
+ Receive = fun (F) ->
+ receive
+ _ ->
+ ok
+ after 0 ->
+ F(F)
+ end
+ end,
+ Receive(Receive)
+ end,
+ T ! {woke_up, self()},
+ id(X)
+ end),
+ receive {initialized, R} -> ok end,
+ receive after 10 -> ok end,
+ case Type of
suspended ->
- ?line erlang:suspend_process(R),
- ?line S ! {send, R, wake_up};
+ erlang:suspend_process(R),
+ S ! {send, R, wake_up};
waiting ->
- ?line S ! {send, R, wake_up};
+ S ! {send, R, wake_up};
resume ->
- ?line S ! {suspend, R},
- ?line receive {suspended, R} -> ok end
+ S ! {suspend, R},
+ receive {suspended, R} -> ok end
end,
- ?line erlang:garbage_collect(R),
- ?line case Type of
+ erlang:garbage_collect(R),
+ case Type of
suspended ->
- ?line erlang:resume_process(R);
+ erlang:resume_process(R);
_ ->
- ?line ok
+ ok
end,
- ?line receive
+ receive
{woke_up, R} ->
- ?line ok
+ ok
after 2000 ->
- ?line I = process_info(R, [status, message_queue_len]),
- ?line ?t:format("~p~n", [I]),
- ?line ?t:fail(no_progress)
+ I = process_info(R, [status, message_queue_len]),
+ ?t:format("~p~n", [I]),
+ ?t:fail(no_progress)
end,
- ?line ok.
+ ok.
gor(Reds, Stop) ->
receive
@@ -2218,28 +2165,28 @@ gor(Reds, Stop) ->
end.
garb_other_running(Config) when is_list(Config) ->
- ?line Stop = make_ref(),
- ?line {Pid, Mon} = spawn_monitor(fun () -> gor(0, Stop) end),
- ?line Reds = lists:foldl(fun (_, OldReds) ->
- ?line erlang:garbage_collect(Pid),
- ?line receive after 1 -> ok end,
- ?line Pid ! {self(), reds},
- ?line receive
+ Stop = make_ref(),
+ {Pid, Mon} = spawn_monitor(fun () -> gor(0, Stop) end),
+ Reds = lists:foldl(fun (_, OldReds) ->
+ erlang:garbage_collect(Pid),
+ receive after 1 -> ok end,
+ Pid ! {self(), reds},
+ receive
{reds, NewReds, Pid} ->
- ?line true = (NewReds > OldReds),
- ?line NewReds
+ true = (NewReds > OldReds),
+ NewReds
end
end,
0,
lists:seq(1, 10000)),
- ?line receive after 1 -> ok end,
- ?line Pid ! {self(), Stop},
- ?line receive
+ receive after 1 -> ok end,
+ Pid ! {self(), Stop},
+ receive
{stopped, Stop, StopReds, Pid} ->
- ?line true = (StopReds > Reds)
+ true = (StopReds > Reds)
end,
- ?line receive {'DOWN', Mon, process, Pid, normal} -> ok end,
- ?line ok.
+ receive {'DOWN', Mon, process, Pid, normal} -> ok end,
+ ok.
%% Internal functions
@@ -2263,9 +2210,9 @@ start_node(Config) ->
start_node(Config, "").
start_node(Config, Args) when is_list(Config) ->
- ?line Pa = filename:dirname(code:which(?MODULE)),
- ?line {A, B, C} = now(),
- ?line Name = list_to_atom(atom_to_list(?MODULE)
+ Pa = filename:dirname(code:which(?MODULE)),
+ {A, B, C} = now(),
+ Name = list_to_atom(atom_to_list(?MODULE)
++ "-"
++ atom_to_list(?config(testcase, Config))
++ "-"
@@ -2274,7 +2221,7 @@ start_node(Config, Args) when is_list(Config) ->
++ integer_to_list(B)
++ "-"
++ integer_to_list(C)),
- ?line ?t:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]).
+ ?t:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]).
stop_node(Node) ->
?t:stop_node(Node).
diff --git a/erts/emulator/test/save_calls_SUITE.erl b/erts/emulator/test/save_calls_SUITE.erl
index 390b49b604..26ac4f2f7f 100644
--- a/erts/emulator/test/save_calls_SUITE.erl
+++ b/erts/emulator/test/save_calls_SUITE.erl
@@ -21,8 +21,10 @@
-include_lib("test_server/include/test_server.hrl").
--export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
- init_per_group/2,end_per_group/2]).
+-export([all/0, suite/0,groups/0,
+ init_per_suite/1, end_per_suite/1,
+ init_per_group/2,end_per_group/2,
+ init_per_testcase/2,end_per_testcase/2]).
-export([save_calls_1/1,dont_break_reductions/1]).
@@ -48,6 +50,27 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
+init_per_testcase(dont_break_reductions,Config) ->
+ %% Skip on --enable-native-libs as hipe rescedules after each
+ %% function call.
+ case erlang:system_info(hipe_architecture) of
+ undefined ->
+ Config;
+ Architecture ->
+ {lists, ListsBinary, _ListsFilename} = code:get_object_code(lists),
+ ChunkName = hipe_unified_loader:chunk_name(Architecture),
+ NativeChunk = beam_lib:chunks(ListsBinary, [ChunkName]),
+ case NativeChunk of
+ {ok,{_,[{_,Bin}]}} when is_binary(Bin) ->
+ {skip,"Does not work for --enable-native-libs"};
+ {error, beam_lib, _} -> Config
+ end
+ end;
+init_per_testcase(_,Config) ->
+ Config.
+
+end_per_testcase(_,_Config) ->
+ ok.
dont_break_reductions(suite) ->
[];
diff --git a/erts/emulator/test/smoke_test_SUITE.erl b/erts/emulator/test/smoke_test_SUITE.erl
index 71186d4942..6f5c2080c0 100644
--- a/erts/emulator/test/smoke_test_SUITE.erl
+++ b/erts/emulator/test/smoke_test_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2011. All Rights Reserved.
+%% Copyright Ericsson AB 2011-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
diff --git a/erts/emulator/test/timer_bif_SUITE.erl b/erts/emulator/test/timer_bif_SUITE.erl
index 7ff7449ff5..c9533d0748 100644
--- a/erts/emulator/test/timer_bif_SUITE.erl
+++ b/erts/emulator/test/timer_bif_SUITE.erl
@@ -71,38 +71,23 @@ end_per_group(_GroupName, Config) ->
start_timer_1(doc) -> ["Basic start_timer/3 functionality"];
start_timer_1(Config) when is_list(Config) ->
- ?line Ref1 = erlang:start_timer(1000, self(), plopp),
- ?line ok = get(1100, {timeout, Ref1, plopp}),
-
- ?line false = erlang:read_timer(Ref1),
- ?line false = erlang:cancel_timer(Ref1),
- ?line false = erlang:read_timer(Ref1),
-
- ?line Ref2 = erlang:start_timer(1000, self(), plapp),
- ?line Left2 = erlang:cancel_timer(Ref2),
- UpperLimit = case os:type() of
- vxworks ->
- %% The ticks of vxworks have a far lesser granularity
- %% than what is expected in this testcase, in
- %% fact the Left2 variable can get a little more than 1000...
- 1100;
- _ ->
- 1000
- end,
- ?line RetVal = case os:type() of
- vxworks ->
- {comment, "VxWorks behaves slightly unexpected, should be fixed,"};
- _ ->
- ok
- end,
- ?line true = (Left2 > 900) and (Left2 =< UpperLimit),
- ?line empty = get_msg(),
- ?line false = erlang:cancel_timer(Ref2),
-
- ?line Ref3 = erlang:start_timer(1000, self(), plopp),
- ?line no_message = get(900, {timeout, Ref3, plopp}),
-
- RetVal.
+ Ref1 = erlang:start_timer(1000, self(), plopp),
+ ok = get(1100, {timeout, Ref1, plopp}),
+
+ false = erlang:read_timer(Ref1),
+ false = erlang:cancel_timer(Ref1),
+ false = erlang:read_timer(Ref1),
+
+ Ref2 = erlang:start_timer(1000, self(), plapp),
+ Left2 = erlang:cancel_timer(Ref2),
+ UpperLimit = 1000,
+ true = (Left2 > 900) and (Left2 =< UpperLimit),
+ empty = get_msg(),
+ false = erlang:cancel_timer(Ref2),
+
+ Ref3 = erlang:start_timer(1000, self(), plopp),
+ no_message = get(900, {timeout, Ref3, plopp}),
+ ok.
send_after_1(doc) -> ["Basic send_after/3 functionality"];
send_after_1(Config) when is_list(Config) ->
@@ -153,19 +138,11 @@ send_after_2(Config) when is_list(Config) ->
send_after_3(doc) -> ["send_after/3: messages in the right order, worse than send_after_2"];
send_after_3(Config) when is_list(Config) ->
- case os:type() of
- vxworks ->
- {skipped, "VxWorks timer granularity and order is not working good, this is subject to change!"};
- _ ->
- do_send_after_3()
- end.
-
-do_send_after_3() ->
- ?line _ = erlang:send_after(100, self(), b1),
- ?line _ = erlang:send_after(101, self(), b2),
- ?line _ = erlang:send_after(102, self(), b3),
- ?line _ = erlang:send_after(103, self(), last),
- ?line [b1, b2, b3, last] = collect(last),
+ _ = erlang:send_after(100, self(), b1),
+ _ = erlang:send_after(101, self(), b2),
+ _ = erlang:send_after(102, self(), b3),
+ _ = erlang:send_after(103, self(), last),
+ [b1, b2, b3, last] = collect(last),
% This behaviour is not guaranteed:
% ?line _ = erlang:send_after(100, self(), c1),
diff --git a/erts/emulator/test/trace_local_SUITE.erl b/erts/emulator/test/trace_local_SUITE.erl
index 32e2a98e3c..1e0705fabe 100644
--- a/erts/emulator/test/trace_local_SUITE.erl
+++ b/erts/emulator/test/trace_local_SUITE.erl
@@ -70,7 +70,8 @@ config(priv_dir,_) ->
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2, basic/1, bit_syntax/1,
- return/1, on_and_off/1, stack_grow/1,info/1, delete/1,
+ return/1, on_and_off/1, systematic_on_off/1,
+ stack_grow/1,info/1, delete/1,
exception/1, exception_apply/1,
exception_function/1, exception_apply_function/1,
exception_nocatch/1, exception_nocatch_apply/1,
@@ -80,6 +81,7 @@ config(priv_dir,_) ->
exception_meta_nocatch/1, exception_meta_nocatch_apply/1,
exception_meta_nocatch_function/1,
exception_meta_nocatch_apply_function/1,
+ concurrency/1,
init_per_testcase/2, end_per_testcase/2]).
init_per_testcase(_Case, Config) ->
?line Dog=test_server:timetrap(test_server:minutes(2)),
@@ -89,14 +91,23 @@ end_per_testcase(_Case, Config) ->
shutdown(),
Dog=?config(watchdog, Config),
test_server:timetrap_cancel(Dog),
- ok.
+
+ %% Reloading the module will clear all trace patterns, and
+ %% in a debug-compiled emulator run assertions of the counters
+ %% for the number of functions with breakpoints.
+
+ c:l(?MODULE).
+
+
+
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
case test_server:is_native(trace_local_SUITE) of
true -> [not_run];
false ->
- [basic, bit_syntax, return, on_and_off, stack_grow,
+ [basic, bit_syntax, return, on_and_off, systematic_on_off,
+ stack_grow,
info, delete, exception, exception_apply,
exception_function, exception_apply_function,
exception_nocatch, exception_nocatch_apply,
@@ -106,7 +117,8 @@ all() ->
exception_meta_apply_function, exception_meta_nocatch,
exception_meta_nocatch_apply,
exception_meta_nocatch_function,
- exception_meta_nocatch_apply_function]
+ exception_meta_nocatch_apply_function,
+ concurrency]
end.
groups() ->
@@ -350,7 +362,8 @@ same(A, B) ->
basic_test() ->
?line setup([call]),
- ?line erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
+ NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
+ NumMatches = erlang:trace_pattern({?MODULE,'_','_'},[],[local]),
?line erlang:trace_pattern({?MODULE,slave,'_'},false,[local]),
?line [1,1,1,1] = apply_slave(?MODULE,exported_wrap,[1]),
?line ?CT(?MODULE,exported_wrap,[1]),
@@ -572,7 +585,118 @@ on_and_off_test() ->
end,
?line ?NM,
ok.
-
+
+systematic_on_off(Config) when is_list(Config) ->
+ setup([call]),
+ Local = combinations([local,meta,call_count,call_time]),
+ [systematic_on_off_1(Flags) || Flags <- Local],
+
+ %% Make sure that we don't get any trace messages when trace
+ %% is supposed to be off.
+ receive_no_next(500).
+
+systematic_on_off_1(Local) ->
+ io:format("~p\n", [Local]),
+
+ %% Global off.
+ verify_trace_info(false, []),
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, Local),
+ verify_trace_info(false, Local),
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, [global]),
+ verify_trace_info(false, Local),
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, Local),
+ verify_trace_info(false, []),
+
+ %% Global on.
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, [global]),
+ verify_trace_info(true, []),
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, Local),
+ verify_trace_info(true, []),
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, [global]),
+ verify_trace_info(false, []),
+
+ %% Implicitly turn off global call trace.
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, [global]),
+ verify_trace_info(true, []),
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, Local),
+ verify_trace_info(false, Local),
+
+ %% Implicitly turn off local call trace.
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, true, [global]),
+ verify_trace_info(true, []),
+
+ %% Turn off global call trace. Everything should be off now.
+ 1 = erlang:trace_pattern({?MODULE,exported_wrap,1}, false, [global]),
+ verify_trace_info(false, []),
+
+ ok.
+
+verify_trace_info(Global, Local) ->
+ case erlang:trace_info({?MODULE,exported_wrap,1}, all) of
+ {all,false} ->
+ false = Global,
+ [] = Local;
+ {all,Ps} ->
+ io:format("~p\n", [Ps]),
+ [verify_trace_info(P, Global, Local) || P <- Ps]
+ end,
+ global_call(Global, Local),
+ local_call(Local),
+ ok.
+
+verify_trace_info({traced,global}, true, []) -> ok;
+verify_trace_info({traced,local}, false, _) -> ok;
+verify_trace_info({match_spec,[]}, _, _) -> ok;
+verify_trace_info({meta_match_spec,[]}, _, _) -> ok;
+verify_trace_info({LocalFlag,Bool}, _, Local) when is_boolean(Bool) ->
+ try
+ Bool = lists:member(LocalFlag, Local)
+ catch
+ error:_ ->
+ io:format("Line ~p: {~p,~p}, false, ~p\n",
+ [?LINE,LocalFlag,Bool,Local]),
+ ?t:fail()
+ end;
+verify_trace_info({meta,Pid}, false, Local) when is_pid(Pid) ->
+ true = lists:member(meta, Local);
+verify_trace_info({call_time,_}, false, Local) ->
+ true = lists:member(call_time, Local);
+verify_trace_info({call_count,_}, false, Local) ->
+ true = lists:member(call_time, Local).
+
+global_call(Global, Local) ->
+ apply_slave(?MODULE, exported_wrap, [global_call]),
+ case Global of
+ false ->
+ recv_local_call(Local, [global_call]);
+ true ->
+ ?CT(?MODULE, exported_wrap, [global_call])
+ end.
+
+local_call(Local) ->
+ lambda_slave(fun() -> exported_wrap(local_call) end),
+ recv_local_call(Local, [local_call]).
+
+recv_local_call(Local, Args) ->
+ case lists:member(local, Local) of
+ false ->
+ ok;
+ true ->
+ ?CT(?MODULE, exported_wrap, Args)
+ end,
+ case lists:member(meta, Local) of
+ false ->
+ ok;
+ true ->
+ ?CTT(?MODULE, exported_wrap, Args)
+ end,
+ ok.
+
+combinations([_]=One) ->
+ [One];
+combinations([H|T]) ->
+ Cs = combinations(T),
+ [[H|C] || C <- Cs] ++ Cs.
stack_grow_test() ->
?line setup([call,return_to]),
@@ -703,16 +827,10 @@ exception_test(Opts) ->
?line ok.
exceptions() ->
- ?line Ref = make_ref(),
- ?line N = case os:type() of
- vxworks ->
- ?line 2000; % Limited memory on themachines, not actually
- % VxWorks' fault /PaN
- _ ->
- ?line 200000
- end,
- ?line LiL = seq(1, N-1, N), % Long Improper List
- ?line LL = seq(1, N, []), % Long List
+ Ref = make_ref(),
+ N = 200000,
+ LiL = seq(1, N-1, N), % Long Improper List
+ LL = seq(1, N, []), % Long List
[{{erlang,exit}, [done]},
{{erlang,error}, [1.0]},
{{erlang,error}, [Ref,[]]},
@@ -813,6 +931,42 @@ clean_location({crash,{Reason,Stk0}}) ->
{crash,{Reason,Stk}};
clean_location(Term) -> Term.
+concurrency(_Config) ->
+ N = erlang:system_info(schedulers),
+
+ %% Spawn 2*N processes that spin in a tight infinite loop,
+ %% and one process that will turn on and off local call
+ %% trace on the infinite_loop/0 function. We expect the
+ %% emulator to crash if there is a memory barrier bug or
+ %% if an aligned word-sized write is not atomic.
+
+ Ps0 = [spawn_monitor(fun() -> infinite_loop() end) ||
+ _ <- lists:seq(1, 2*N)],
+ OnAndOff = fun() -> concurrency_on_and_off() end,
+ Ps1 = [spawn_monitor(OnAndOff)|Ps0],
+ ?t:sleep(1000),
+
+ %% Now spawn off N more processes that turn on off and off
+ %% a local trace pattern.
+ Ps = [spawn_monitor(OnAndOff) || _ <- lists:seq(1, N)] ++ Ps1,
+ ?t:sleep(1000),
+
+ %% Clean up.
+ [exit(Pid, kill) || {Pid,_} <- Ps],
+ [receive
+ {'DOWN',Ref,process,Pid,killed} -> ok
+ end || {Pid,Ref} <- Ps],
+ erlang:trace_pattern({?MODULE,infinite_loop,0}, false, [local]),
+ ok.
+
+concurrency_on_and_off() ->
+ 1 = erlang:trace_pattern({?MODULE,infinite_loop,0}, true, [local]),
+ 1 = erlang:trace_pattern({?MODULE,infinite_loop,0}, false, [local]),
+ concurrency_on_and_off().
+
+infinite_loop() ->
+ infinite_loop().
+
%%% Tracee target functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
diff --git a/erts/emulator/test/trace_port_SUITE.erl b/erts/emulator/test/trace_port_SUITE.erl
index f81cab3114..cc2eadafbc 100644
--- a/erts/emulator/test/trace_port_SUITE.erl
+++ b/erts/emulator/test/trace_port_SUITE.erl
@@ -472,14 +472,9 @@ default_tracer(Config) when is_list(Config) ->
?line M = N,
ok.
-
%%% Help functions.
-huge_data() ->
- case os:type() of
- vxworks -> huge_data(4711);
- _ -> huge_data(16384)
- end.
+huge_data() -> huge_data(16384).
huge_data(0) -> [];
huge_data(N) when N rem 2 == 0 ->
P = huge_data(N div 2),
diff --git a/erts/emulator/test/tuple_SUITE.erl b/erts/emulator/test/tuple_SUITE.erl
index bfc3910742..a3b2764a5d 100644
--- a/erts/emulator/test/tuple_SUITE.erl
+++ b/erts/emulator/test/tuple_SUITE.erl
@@ -20,6 +20,7 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
t_size/1, t_tuple_size/1, t_element/1, t_setelement/1,
+ t_insert_element/1, t_delete_element/1,
t_list_to_tuple/1, t_tuple_to_list/1,
t_make_tuple_2/1, t_make_tuple_3/1, t_append_element/1,
build_and_match/1, tuple_with_case/1, tuple_in_guard/1]).
@@ -41,6 +42,7 @@ all() ->
[build_and_match, t_size, t_tuple_size, t_list_to_tuple,
t_tuple_to_list, t_element, t_setelement,
t_make_tuple_2, t_make_tuple_3, t_append_element,
+ t_insert_element, t_delete_element,
tuple_with_case, tuple_in_guard].
groups() ->
@@ -60,40 +62,40 @@ end_per_group(_GroupName, Config) ->
build_and_match(Config) when is_list(Config) ->
- ?line {} = id({}),
- ?line {1} = id({1}),
- ?line {1, 2} = id({1, 2}),
- ?line {1, 2, 3} = id({1, 2, 3}),
- ?line {1, 2, 3, 4} = id({1, 2, 3, 4}),
- ?line {1, 2, 3, 4, 5} = id({1, 2, 3, 4, 5}),
- ?line {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}),
- ?line {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}),
- ?line {1, 2, 3, 4, 5, 6, 7} = id({1, 2, 3, 4, 5, 6, 7}),
- ?line {1, 2, 3, 4, 5, 6, 7, 8} = id({1, 2, 3, 4, 5, 6, 7, 8}),
+ {} = id({}),
+ {1} = id({1}),
+ {1, 2} = id({1, 2}),
+ {1, 2, 3} = id({1, 2, 3}),
+ {1, 2, 3, 4} = id({1, 2, 3, 4}),
+ {1, 2, 3, 4, 5} = id({1, 2, 3, 4, 5}),
+ {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}),
+ {1, 2, 3, 4, 5, 6} = id({1, 2, 3, 4, 5, 6}),
+ {1, 2, 3, 4, 5, 6, 7} = id({1, 2, 3, 4, 5, 6, 7}),
+ {1, 2, 3, 4, 5, 6, 7, 8} = id({1, 2, 3, 4, 5, 6, 7, 8}),
ok.
%% Tests size(Tuple).
t_size(Config) when is_list(Config) ->
- ?line 0 = size({}),
- ?line 1 = size({a}),
- ?line 1 = size({{a}}),
- ?line 2 = size({{a}, {b}}),
- ?line 3 = size({1, 2, 3}),
+ 0 = size({}),
+ 1 = size({a}),
+ 1 = size({{a}}),
+ 2 = size({{a}, {b}}),
+ 3 = size({1, 2, 3}),
ok.
t_tuple_size(Config) when is_list(Config) ->
- ?line 0 = tuple_size(id({})),
- ?line 1 = tuple_size(id({a})),
- ?line 1 = tuple_size(id({{a}})),
- ?line 2 = tuple_size(id({{a},{b}})),
- ?line 3 = tuple_size(id({1,2,3})),
+ 0 = tuple_size(id({})),
+ 1 = tuple_size(id({a})),
+ 1 = tuple_size(id({{a}})),
+ 2 = tuple_size(id({{a},{b}})),
+ 3 = tuple_size(id({1,2,3})),
%% Error cases.
- ?line {'EXIT',{badarg,_}} = (catch tuple_size([])),
- ?line {'EXIT',{badarg,_}} = (catch tuple_size(<<1,2,3>>)),
- ?line error = ludicrous_tuple_size({a,b,c}),
- ?line error = ludicrous_tuple_size([a,b,c]),
+ {'EXIT',{badarg,_}} = (catch tuple_size([])),
+ {'EXIT',{badarg,_}} = (catch tuple_size(<<1,2,3>>)),
+ error = ludicrous_tuple_size({a,b,c}),
+ error = ludicrous_tuple_size([a,b,c]),
ok.
@@ -104,44 +106,44 @@ ludicrous_tuple_size(_) -> error.
%% Tests element/2.
t_element(Config) when is_list(Config) ->
- ?line a = element(1, {a}),
- ?line a = element(1, {a, b}),
+ a = element(1, {a}),
+ a = element(1, {a, b}),
- ?line List = lists:seq(1, 4096),
- ?line Tuple = list_to_tuple(lists:seq(1, 4096)),
- ?line get_elements(List, Tuple, 1),
+ List = lists:seq(1, 4096),
+ Tuple = list_to_tuple(lists:seq(1, 4096)),
+ get_elements(List, Tuple, 1),
- ?line {'EXIT', {badarg, _}} = (catch element(0, id({a,b}))),
- ?line {'EXIT', {badarg, _}} = (catch element(3, id({a,b}))),
- ?line {'EXIT', {badarg, _}} = (catch element(1, id({}))),
- ?line {'EXIT', {badarg, _}} = (catch element(1, id([a,b]))),
- ?line {'EXIT', {badarg, _}} = (catch element(1, id(42))),
- ?line {'EXIT', {badarg, _}} = (catch element(id(1.5), id({a,b}))),
+ {'EXIT', {badarg, _}} = (catch element(0, id({a,b}))),
+ {'EXIT', {badarg, _}} = (catch element(3, id({a,b}))),
+ {'EXIT', {badarg, _}} = (catch element(1, id({}))),
+ {'EXIT', {badarg, _}} = (catch element(1, id([a,b]))),
+ {'EXIT', {badarg, _}} = (catch element(1, id(42))),
+ {'EXIT', {badarg, _}} = (catch element(id(1.5), id({a,b}))),
ok.
get_elements([Element|Rest], Tuple, Pos) ->
- ?line Element = element(Pos, Tuple),
- ?line get_elements(Rest, Tuple, Pos+1);
+ Element = element(Pos, Tuple),
+ get_elements(Rest, Tuple, Pos+1);
get_elements([], _Tuple, _Pos) ->
ok.
%% Tests set_element/3.
t_setelement(Config) when is_list(Config) ->
- ?line {x} = setelement(1, id({1}), x),
- ?line {x,2} = setelement(1, id({1,2}), x),
- ?line {1,x} = setelement(2, id({1,2}), x),
+ {x} = setelement(1, id({1}), x),
+ {x,2} = setelement(1, id({1,2}), x),
+ {1,x} = setelement(2, id({1,2}), x),
- ?line Tuple = list_to_tuple(lists:duplicate(2048, x)),
- ?line NewTuple = set_all_elements(Tuple, 1),
- ?line NewTuple = list_to_tuple(lists:seq(1+7, 2048+7)),
+ Tuple = list_to_tuple(lists:duplicate(2048, x)),
+ NewTuple = set_all_elements(Tuple, 1),
+ NewTuple = list_to_tuple(lists:seq(1+7, 2048+7)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(0, {a, b}, x)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(3, {a, b}, x)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(1, {}, x)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(1, [a, b], x)),
- ?line {'EXIT', {badarg, _}} = (catch setelement(1.5, {a, b}, x)),
+ {'EXIT', {badarg, _}} = (catch setelement(0, {a, b}, x)),
+ {'EXIT', {badarg, _}} = (catch setelement(3, {a, b}, x)),
+ {'EXIT', {badarg, _}} = (catch setelement(1, {}, x)),
+ {'EXIT', {badarg, _}} = (catch setelement(1, [a, b], x)),
+ {'EXIT', {badarg, _}} = (catch setelement(1.5, {a, b}, x)),
%% Nested setelement with literals.
AnotherTuple = id({0,0,a,b,c}),
@@ -159,52 +161,68 @@ set_all_elements(Tuple, Pos) when Pos > size(Tuple) ->
%% Tests list_to_tuple/1.
t_list_to_tuple(Config) when is_list(Config) ->
- ?line {} = list_to_tuple([]),
- ?line {a} = list_to_tuple([a]),
- ?line {a, b} = list_to_tuple([a, b]),
- ?line {a, b, c} = list_to_tuple([a, b, c]),
- ?line {a, b, c, d} = list_to_tuple([a, b, c, d]),
- ?line {a, b, c, d, e} = list_to_tuple([a, b, c, d, e]),
-
- ?line Size = 4096,
- ?line Tuple = list_to_tuple(lists:seq(1, Size)),
- ?line Size = size(Tuple),
-
- ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id({a,b}))),
- ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
- ?line {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
-
+ {} = list_to_tuple([]),
+ {a} = list_to_tuple([a]),
+ {a, b} = list_to_tuple([a, b]),
+ {a, b, c} = list_to_tuple([a, b, c]),
+ {a, b, c, d} = list_to_tuple([a, b, c, d]),
+ {a, b, c, d, e} = list_to_tuple([a, b, c, d, e]),
+
+ Size = 4096,
+ Tuple = list_to_tuple(lists:seq(1, Size)),
+ Size = size(Tuple),
+
+ {'EXIT', {badarg, _}} = (catch list_to_tuple(id({a,b}))),
+ {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
+ {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
+
+ % test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ MaxSize = size(MaxTuple),
+
+ {'EXIT', {badarg,_}} = (catch list_to_tuple(lists:seq(1, 1 bsl 24))),
ok.
%% Tests tuple_to_list/1.
t_tuple_to_list(Config) when is_list(Config) ->
- ?line [] = tuple_to_list({}),
- ?line [a] = tuple_to_list({a}),
- ?line [a, b] = tuple_to_list({a, b}),
- ?line [a, b, c] = tuple_to_list({a, b, c}),
- ?line [a, b, c, d] = tuple_to_list({a, b, c, d}),
- ?line [a, b, c, d] = tuple_to_list({a, b, c, d}),
-
- ?line Size = 4096,
- ?line List = lists:seq(1, Size),
- ?line Tuple = list_to_tuple(List),
- ?line Size = size(Tuple),
- ?line List = tuple_to_list(Tuple),
-
- ?line {'EXIT', {badarg,_}} = (catch tuple_to_list(id(a))),
- ?line {'EXIT', {badarg,_}} = (catch tuple_to_list(id(42))),
+ [] = tuple_to_list({}),
+ [a] = tuple_to_list({a}),
+ [a, b] = tuple_to_list({a, b}),
+ [a, b, c] = tuple_to_list({a, b, c}),
+ [a, b, c, d] = tuple_to_list({a, b, c, d}),
+ [a, b, c, d] = tuple_to_list({a, b, c, d}),
+
+ Size = 4096,
+ List = lists:seq(1, Size),
+ Tuple = list_to_tuple(List),
+ Size = size(Tuple),
+ List = tuple_to_list(Tuple),
+
+ {'EXIT', {badarg,_}} = (catch tuple_to_list(id(a))),
+ {'EXIT', {badarg,_}} = (catch tuple_to_list(id(42))),
ok.
%% Tests the make_tuple/2 BIF.
t_make_tuple_2(Config) when is_list(Config) ->
- ?line t_make_tuple1([]),
- ?line t_make_tuple1(42),
- ?line t_make_tuple1(a),
- ?line t_make_tuple1({}),
- ?line t_make_tuple1({a}),
- ?line t_make_tuple1(erlang:make_tuple(400, [])),
+ t_make_tuple1([]),
+ t_make_tuple1(42),
+ t_make_tuple1(a),
+ t_make_tuple1({}),
+ t_make_tuple1({a}),
+ t_make_tuple1(erlang:make_tuple(400, [])),
+
+ % test upper boundry, 16777215 elements
+ t_make_tuple(1 bsl 24 - 1, a),
+ {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 24, a)),
+
+ {'EXIT', {badarg,_}} = (catch erlang:make_tuple(-1, a)),
+ % 26 bits is the total header arity room (for now)
+ {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 26 + 3, a)),
+ % bignum
+ {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 65 + 3, a)),
ok.
t_make_tuple1(Element) ->
@@ -222,29 +240,82 @@ t_make_tuple(Size, Element) ->
%% Tests the erlang:make_tuple/3 BIF.
t_make_tuple_3(Config) when is_list(Config) ->
- ?line {} = erlang:make_tuple(0, def, []),
- ?line {def} = erlang:make_tuple(1, def, []),
- ?line {a} = erlang:make_tuple(1, def, [{1,a}]),
- ?line {a,def,c,def,e} = erlang:make_tuple(5, def, [{5,e},{1,a},{3,c}]),
- ?line {a,def,c,def,e} = erlang:make_tuple(5, def,
- [{1,blurf},{5,e},{3,blurf},
- {1,a},{3,c}]),
+ {} = erlang:make_tuple(0, def, []),
+ {def} = erlang:make_tuple(1, def, []),
+ {a} = erlang:make_tuple(1, def, [{1,a}]),
+
+ {a,def,c,def,e} = erlang:make_tuple(5, def, [{5,e},{1,a},{3,c}]),
+ {a,def,c,def,e} = erlang:make_tuple(5, def, [{1,blurf},{5,e},{3,blurf},{1,a},{3,c}]),
+ MaxSize = 1 bsl 16 - 1,
+ MaxTuple = erlang:make_tuple(MaxSize, def, [{1,blurf},{5,e},{3,blurf},{1,a},{3,c}]),
+ MaxSize = size(MaxTuple),
+
+ %% Error cases.
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(0, def, [{1,a}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{-1,a}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{0,a}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{6,z}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(a, def, [{6,z}])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{1,a}|b])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [42])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [[a,b,c]])),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, non_list)),
+ {'EXIT',{badarg,_}} = (catch erlang:make_tuple(1 bsl 24, def, [{5,e},{1,a},{3,c}])),
+
+ ok.
+
+%% Tests the erlang:insert_element/3 BIF.
+t_insert_element(Config) when is_list(Config) ->
+ {a} = erlang:insert_element(1, {}, a),
+ {{b,b},a} = erlang:insert_element(1, {a}, {b,b}),
+ {a,b} = erlang:insert_element(2, {a}, b),
+ [b,def|_] = tuple_to_list(erlang:insert_element(1, erlang:make_tuple(1 bsl 20, def), b)),
+ [def,b|_] = tuple_to_list(erlang:insert_element(2, erlang:make_tuple(1 bsl 20, def), b)),
+ [def,b|_] = lists:reverse(tuple_to_list(erlang:insert_element(1 bsl 20, erlang:make_tuple(1 bsl 20, def), b))),
+ [b,def|_] = lists:reverse(tuple_to_list(erlang:insert_element((1 bsl 20) + 1, erlang:make_tuple(1 bsl 20, def), b))),
+
+ %% Error cases.
+ {'EXIT',{badarg,_}} = (catch erlang:insert_element(1, [], a)),
+ {'EXIT',{badarg,_}} = (catch erlang:insert_element(1, a, a)),
+ {'EXIT',{badarg,_}} = (catch erlang:insert_element(0, {}, a)),
+ {'EXIT',{badarg,_}} = (catch erlang:insert_element(0, {b,b,b,b,b}, a)),
+ {'EXIT',{badarg,_}} = (catch erlang:insert_element(-1, {}, a)),
+ {'EXIT',{badarg,_}} = (catch erlang:insert_element(2, {}, a)),
+ {'EXIT',{badarg,_}} = (catch erlang:insert_element(6, {b,b,b,b}, a)),
+ {'EXIT',{badarg,_}} = (catch erlang:insert_element(1 bsl 20, {b,b,b,b}, a)),
+ ok.
+
+%% Tests the erlang:delete_element/3 BIF.
+t_delete_element(Config) when is_list(Config) ->
+ {} = erlang:delete_element(1, {a}),
+ {{b,b},c} = erlang:delete_element(1, {a,{b,b},c}),
+ {a,b} = erlang:delete_element(2, {a,c,b}),
+ [2,3|_] = tuple_to_list(erlang:delete_element(1, list_to_tuple(lists:seq(1, 1 bsl 20)))),
+ [1,3|_] = tuple_to_list(erlang:delete_element(2, list_to_tuple(lists:seq(1, 1 bsl 20)))),
+ [(1 bsl 20) - 1, (1 bsl 20) - 2 |_] = lists:reverse(tuple_to_list(erlang:delete_element(1 bsl 20, list_to_tuple(lists:seq(1, 1 bsl 20))))),
+ [(1 bsl 20), (1 bsl 20) - 2 |_] = lists:reverse(tuple_to_list(erlang:delete_element((1 bsl 20) - 1, list_to_tuple(lists:seq(1, 1 bsl 20))))),
%% Error cases.
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(0, def, [{1,a}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{-1,a}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{0,a}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{6,z}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(a, def, [{6,z}])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [{1,a}|b])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [42])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, [[a,b,c]])),
- ?line {'EXIT',{badarg,_}} = (catch erlang:make_tuple(5, def, non_list)),
+ {'EXIT',{badarg,_}} = (catch erlang:delete_element(1, [])),
+ {'EXIT',{badarg,_}} = (catch erlang:delete_element(1, a)),
+ {'EXIT',{badarg,_}} = (catch erlang:delete_element(0, {})),
+ {'EXIT',{badarg,_}} = (catch erlang:delete_element(-1, {})),
+ {'EXIT',{badarg,_}} = (catch erlang:delete_element(1, {})),
+ {'EXIT',{badarg,_}} = (catch erlang:delete_element(0, {b,b,b,b,b})),
+ {'EXIT',{badarg,_}} = (catch erlang:delete_element(5, {b,b,b,b})),
+ {'EXIT',{badarg,_}} = (catch erlang:delete_element(1 bsl 20, {b,c,b,b,b})),
ok.
+
%% Tests the append_element/2 BIF.
t_append_element(Config) when is_list(Config) ->
- t_append_element({}, 2048, 2048).
+ ok = t_append_element({}, 2048, 2048),
+
+ % test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)),
+ ok.
t_append_element(_Tuple, 0, _High) -> ok;
t_append_element(Tuple, N, High) ->
@@ -261,7 +332,7 @@ verify_seq([High|T], High, Lower) ->
%% (This is known to crash earlier versions of BEAM.)
tuple_with_case(Config) when is_list(Config) ->
- ?line {reply, true} = tuple_with_case(),
+ {reply, true} = tuple_with_case(),
ok.
tuple_with_case() ->
@@ -280,21 +351,21 @@ foo() -> ignored.
%% Test to build a tuple in a guard.
tuple_in_guard(Config) when is_list(Config) ->
- ?line Tuple1 = id({a,b}),
- ?line Tuple2 = id({a,b,c}),
- ?line if
- Tuple1 == {element(1, Tuple2),element(2, Tuple2)} ->
- ok;
- true ->
- ?line test_server:fail()
- end,
- ?line if
- Tuple2 == {element(1, Tuple2),element(2, Tuple2),
- element(3, Tuple2)} ->
- ok;
- true ->
- ?line test_server:fail()
- end,
+ Tuple1 = id({a,b}),
+ Tuple2 = id({a,b,c}),
+ if
+ Tuple1 == {element(1, Tuple2),element(2, Tuple2)} ->
+ ok;
+ true ->
+ test_server:fail()
+ end,
+ if
+ Tuple2 == {element(1, Tuple2),element(2, Tuple2),
+ element(3, Tuple2)} ->
+ ok;
+ true ->
+ test_server:fail()
+ end,
ok.
%% Use this function to avoid compile-time evaluation of an expression.
diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops
index 8fe2402ca8..16a949c2a6 100755
--- a/erts/emulator/utils/beam_makeops
+++ b/erts/emulator/utils/beam_makeops
@@ -1,4 +1,4 @@
-#!/usr/bin/env perl
+#!/usr/bin/env perl -W
#
# %CopyrightBegin%
#
@@ -362,7 +362,7 @@ while (<>) {
$gen_to_spec{"$name/$arity"} = undef;
$num_specific{"$name/$arity"} = 0;
$min_window{"$name/$arity"} = 255;
- $obsolete[$op_num] = $obsolete eq '-';
+ $obsolete[$op_num] = defined $obsolete;
} else { # Unnumbered generic operation.
push(@unnumbered_generic, [$name, $arity]);
$unnumbered{$name,$arity} = 1;
@@ -379,7 +379,7 @@ while (<>) {
if @args > $max_spec_operands;
&syntax_check($name, @args);
my $arity = @args;
- if ($obsolete[$gen_opnum{$name,$arity}]) {
+ if (defined $gen_opnum{$name,$arity} and $obsolete[$gen_opnum{$name,$arity}]) {
error("specific instructions may not be specified for obsolete instructions");
}
push(@{$specific_op{"$name/$arity"}}, [$name, $hot, @args]);
@@ -810,8 +810,8 @@ sub compiler_output {
#
# Generate .hrl file.
#
- my($name) = "$outdir/${module}.hrl";
- open(STDOUT, ">$name") || die "Failed to open $name for writing: $!\n";
+ my($hrl_name) = "$outdir/${module}.hrl";
+ open(STDOUT, ">$hrl_name") || die "Failed to open $hrl_name for writing: $!\n";
&comment('erlang');
for ($i = 0; $i < @tag_type && $i < 8; $i++) {
@@ -1251,8 +1251,8 @@ sub compile_transform {
$arity++ unless $list[1] eq '*';
$_ = [ @list ];
}
-
- if ($obsolete[$gen_opnum{$name,$arity}]) {
+
+ if (defined $gen_opnum{$name,$arity} && $obsolete[$gen_opnum{$name,$arity}]) {
error("obsolete function must not be used in transformations");
}
@@ -1704,14 +1704,15 @@ sub tr_gen_to {
#
my($first_ref) = shift(@code);
my($size, $first, $key) = @$first_ref;
- my($dummy, $op, $arity) = @$first;
+ my($dummy, $arity);
+ ($dummy, $op, $arity) = @$first;
my($comment) = "\n/*\n * Line $line:\n * $orig_transform\n */\n\n";
$min_window{$key} = $min_window
if $min_window{$key} > $min_window;
my $prev_last;
$prev_last = pop(@{$gen_transform{$key}})
- if defined @{$gen_transform{$key}}; # Fail
+ if defined $gen_transform{$key}; # Fail
if ($prev_last && !is_instr($prev_last, 'fail')) {
error("Line $line: A previous transformation shadows '$orig_transform'");
@@ -1719,7 +1720,7 @@ sub tr_gen_to {
unless ($cannot_fail) {
unshift(@code, make_op('', 'try_me_else',
tr_code_len(@code)));
- push(@code, make_op(""), make_op("$key", 'fail'));
+ push(@code, make_op("$key", 'fail'));
}
unshift(@code, make_op($comment));
push(@{$gen_transform{$key}}, @code),
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index 91efb4c023..a841f26d6a 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -167,7 +167,6 @@ typedef struct bif_entry {
extern BifEntry bif_table[];
extern Export* bif_export[];
-extern unsigned char erts_bif_trace_flags[];
#define BIF_SIZE $bif_size
@@ -197,7 +196,6 @@ includes("export.h", "sys.h", "erl_vm.h", "erl_process.h", "bif.h",
"erl_bif_table.h", "erl_atom_table.h");
print "\nExport* bif_export[BIF_SIZE];\n";
-print "unsigned char erts_bif_trace_flags[BIF_SIZE];\n\n";
print "BifEntry bif_table[] = {\n";
for ($i = 0; $i < @bif; $i++) {
diff --git a/erts/emulator/valgrind/suppress.halfword b/erts/emulator/valgrind/suppress.halfword
new file mode 100644
index 0000000000..8fe448d897
--- /dev/null
+++ b/erts/emulator/valgrind/suppress.halfword
@@ -0,0 +1,56 @@
+# Extra suppressions specific for the halfword emulator.
+
+# --- Suppress all offheap binaries ---
+# Valgrinds leak check does not recognize pointers that are stored
+# at unaligned addresses. In halfword emulator we store 64-bit pointers
+# to offheap data on 32-bit aligned heaps.
+# We solve this by suppressing allocation of all offheap structures
+# that are not referenced by other tables (ie binaries).
+
+{
+Halfword erts_bin_nrml_alloc
+Memcheck:Leak
+...
+fun:erts_bin_nrml_alloc
+...
+}
+
+{
+Halfword erts_bin_realloc
+Memcheck:Leak
+...
+fun:erts_bin_realloc
+...
+}
+
+{
+Halfword erts_bin_realloc_fnf
+Memcheck:Leak
+...
+fun:erts_bin_realloc_fnf
+...
+}
+
+{
+Halfword erts_bin_drv_alloc
+Memcheck:Leak
+...
+fun:erts_bin_drv_alloc
+...
+}
+
+{
+Halfword erts_bin_drv_alloc_fnf
+Memcheck:Leak
+...
+fun:erts_bin_drv_alloc_fnf
+...
+}
+
+{
+Halfword erts_create_magic_binary
+Memcheck:Leak
+...
+fun:erts_create_magic_binary
+...
+}
diff --git a/erts/emulator/valgrind/suppress.patched.3.6.0 b/erts/emulator/valgrind/suppress.patched.3.6.0
index 62ba032520..b3507bdba7 100644
--- a/erts/emulator/valgrind/suppress.patched.3.6.0
+++ b/erts/emulator/valgrind/suppress.patched.3.6.0
@@ -133,26 +133,18 @@ fun:pthread_create@@GLIBC_2.2.5
{
zlib; ok according to zlib developers
Memcheck:Cond
-fun:longest_match
+...
fun:deflate_slow
fun:deflate
}
{
zlib; ok according to zlib developers
Memcheck:Cond
-fun:longest_match
+...
fun:deflate_fast
fun:deflate
}
{
-zlib; ok accordnig to zlib (this one popped up with valgrind-3.6.0)
-Memcheck:Cond
-fun:deflate_slow
-fun:deflate
-fun:zlib_deflate
-fun:zlib_ctl
-}
-{
No leak; pointer into block
Memcheck:Leak
fun:malloc
@@ -275,6 +267,14 @@ obj:*/ssleay.*
}
{
+ Harmless assembler bug in openssl
+ Memcheck:Addr8
+ ...
+ fun:AES_cbc_encrypt
+ ...
+}
+
+{
erts_bits_init_state; Why is this needed?
Memcheck:Leak
PossiblyLost
diff --git a/erts/emulator/valgrind/suppress.standard b/erts/emulator/valgrind/suppress.standard
index 5a129bfd10..beecf1a7b5 100644
--- a/erts/emulator/valgrind/suppress.standard
+++ b/erts/emulator/valgrind/suppress.standard
@@ -120,14 +120,14 @@ fun:pthread_create@@GLIBC_2.2.5
{
zlib; ok according to zlib developers
Memcheck:Cond
-fun:longest_match
+...
fun:deflate_slow
fun:deflate
}
{
zlib; ok according to zlib developers
Memcheck:Cond
-fun:longest_match
+...
fun:deflate_fast
fun:deflate
}
@@ -252,6 +252,14 @@ obj:*/ssleay.*
}
{
+ Harmless assembler bug in openssl
+ Memcheck:Addr8
+ ...
+ fun:AES_cbc_encrypt
+ ...
+}
+
+{
Prebuilt constant terms in os_info_init (PossiblyLost)
Memcheck:Leak
fun:malloc