diff options
432 files changed, 17177 insertions, 18436 deletions
diff --git a/HOWTO/INSTALL.md b/HOWTO/INSTALL.md index 36365799e3..f62429a3ff 100644 --- a/HOWTO/INSTALL.md +++ b/HOWTO/INSTALL.md @@ -18,9 +18,6 @@ Required Utilities These are the tools you need in order to unpack and build Erlang/OTP. -> *WARNING*: Please have a look at the [Known platform issues][] chapter -> before you start. - ### Unpacking ### * GNU unzip, or a modern uncompress. @@ -343,12 +340,6 @@ use the `--prefix` argument like this: `./configure --prefix=<Dir>`. Some of the available `configure` options are: * `--prefix=PATH` - Specify installation prefix. -* `--enable-plain-emulator` - Build a threaded emulator that only - uses one scheduler. This emulator type is deprecated and will be - removed in a future release. -* `--disable-threads` - Build a non-threaded emulator. This emulator type - is deprecated and will be - removed in a future release. * `--{enable,disable}-kernel-poll` - Kernel poll support (enabled by default if possible) * `--{enable,disable}-hipe` - HiPE support (enabled by default on supported @@ -426,11 +417,6 @@ Some of the available `configure` options are: and scalability compared to the default clock sources chosen. * `--disable-saved-compile-time` - Disable saving of compile date and time in the emulator binary. -* `--enable-dirty-schedulers` - Enable the **experimental** dirty schedulers - functionality. Note that the dirty schedulers functionality is experimental, - and **not supported**. This functionality **will** be subject to backward - incompatible changes. Note that you should **not** enable the dirty scheduler - functionality on production systems. It is only provided for testing. If you or your system has special requirements please read the `Makefile` for additional configuration information. @@ -575,16 +561,12 @@ as before, but the build process will take a much longer time. After completing all the normal building steps described above a debug enabled runtime system can be built. To do this you have to change -directory to `$ERL_TOP/erts/emulator`. - -In this directory execute: +directory to `$ERL_TOP/erts/emulator` and execute: - $ make debug FLAVOR=$FLAVOR + $ (cd $ERL_TOP/erts/emulator && make debug) -where `$FLAVOR` is either `plain` or `smp`. The flavor options will -produce a beam.debug and beam.smp.debug executable respectively. The -files are installed along side with the normal (opt) versions `beam.smp` -and `beam`. +This will produce a beam.smp.debug executable. The +file are installed along side with the normal (opt) version `beam.smp`. To start the debug enabled runtime system execute: @@ -598,7 +580,7 @@ using appropriate configure options. There are other types of runtime systems that can be built as well using the similar steps just described. - $ make $TYPE FLAVOR=$FLAVOR + $ (cd $ERL_TOP/erts/emulator && make $TYPE) where `$TYPE` is `opt`, `gcov`, `gprof`, `debug`, `valgrind`, or `lcnt`. These different beam types are useful for debugging and profiling @@ -794,7 +776,6 @@ Use `hipe:help_options/0` to print out the available options. [man pages]: http://www.erlang.org/download/otp_doc_man_%OTP-VSN%.tar.gz [the released source tar ball]: http://www.erlang.org/download/otp_src_%OTP-VSN%.tar.gz [System Principles]: ../system_principles/system_principles - [Known platform issues]: #Known-platform-issues [native build]: #How-to-Build-and-Install-ErlangOTP [cross build]: INSTALL-CROSS.md [Required Utilities]: #Required-Utilities diff --git a/Makefile.in b/Makefile.in index 6b5ce8c53f..b3ab11d29a 100644 --- a/Makefile.in +++ b/Makefile.in @@ -316,6 +316,7 @@ endif # The steps to build a working system are: # * build an emulator # * setup the erl and erlc program in bootstrap/bin +# * optionally run pgo and build optimized emulator # * build additional compilers and copy them into bootstrap/lib # * use the bootstrap erl and erlc to build all the libs # @@ -396,7 +397,7 @@ else endif cd $(ERL_TOP)/erts && \ ERL_TOP=$(ERL_TOP) PATH=$(INST_PATH_PREFIX)"$${PATH}" \ - $(MAKE) BUILD_ALL=1 TESTROOT="$(RELEASE_ROOT)" release + $(MAKE) BUILD_ALL=1 PROFILE=$(PROFILE) TESTROOT="$(RELEASE_ROOT)" release ifeq ($(RELEASE_ROOT),) $(INSTALL_DATA) "$(ERL_TOP)/OTP_VERSION" "$(OTP_DEFAULT_RELEASE_PATH)/releases/@OTP_REL@" else @@ -438,10 +439,24 @@ BOOT_BINDIR=$(BOOTSTRAP_ROOT)/bootstrap/erts/bin BEAM_EVM=$(ERL_TOP)/bin/$(TARGET)/beam_evm BOOTSTRAP_COMPILER = $(BOOTSTRAP_TOP)/primary_compiler +# otp.mk is only used to figure out if we are doing PGO or not +include $(ERL_TOP)/make/$(TARGET)/otp.mk + .PHONY: emulator libs kernel stdlib compiler hipe syntax_tools preloaded -emulator: - $(make_verbose)cd erts && ERL_TOP=$(ERL_TOP) $(MAKE) NO_START_SCRIPTS=true $(TYPE) FLAVOR=$(FLAVOR) +ifeq ($(USE_PGO), true) +PROFILE=use +PROFILE_EMU_DEPS=emulator_profile_generate bootstrap_setup +emulator_profile_generate: + $(make_verbose)cd erts && ERL_TOP=$(ERL_TOP) $(MAKE) NO_START_SCRIPTS=true $(TYPE) FLAVOR=$(FLAVOR) PROFILE=generate +else +PROFILE= +PROFILE_EMU_DEPS= +endif + +emulator: $(PROFILE_EMU_DEPS) + $(make_verbose)cd erts && ERL_TOP=$(ERL_TOP) PATH=$(BOOT_PREFIX)"$${PATH}" \ + $(MAKE) NO_START_SCRIPTS=true $(TYPE) FLAVOR=$(FLAVOR) PROFILE=$(PROFILE) libs: ifeq ($(OTP_SMALL_BUILD),true) @@ -1007,7 +1022,7 @@ install-docs: install.emulator: cd erts && \ ERL_TOP=$(ERL_TOP) PATH=$(INST_PATH_PREFIX)"$${PATH}" \ - $(MAKE) TESTROOT="$(ERLANG_LIBDIR)" release + $(MAKE) PROFILE=$(PROFILE) TESTROOT="$(ERLANG_LIBDIR)" release install.libs: ifeq ($(OTP_SMALL_BUILD),true) diff --git a/OTP_VERSION b/OTP_VERSION index b09651c1d0..06d4ac2bfd 100644 --- a/OTP_VERSION +++ b/OTP_VERSION @@ -1 +1 @@ -20.0.5 +21.0-rc0 diff --git a/bootstrap/bin/start.boot b/bootstrap/bin/start.boot Binary files differindex c5fa075a18..0fa43faa1f 100644 --- a/bootstrap/bin/start.boot +++ b/bootstrap/bin/start.boot diff --git a/bootstrap/bin/start_clean.boot b/bootstrap/bin/start_clean.boot Binary files differindex c5fa075a18..0fa43faa1f 100644 --- a/bootstrap/bin/start_clean.boot +++ b/bootstrap/bin/start_clean.boot diff --git a/bootstrap/lib/compiler/ebin/beam_block.beam b/bootstrap/lib/compiler/ebin/beam_block.beam Binary files differindex 84e6e64efc..f2d8c1c51d 100644 --- a/bootstrap/lib/compiler/ebin/beam_block.beam +++ b/bootstrap/lib/compiler/ebin/beam_block.beam diff --git a/bootstrap/lib/compiler/ebin/beam_bs.beam b/bootstrap/lib/compiler/ebin/beam_bs.beam Binary files differindex e9be7763ad..6a5532ea9e 100644 --- a/bootstrap/lib/compiler/ebin/beam_bs.beam +++ b/bootstrap/lib/compiler/ebin/beam_bs.beam diff --git a/bootstrap/lib/compiler/ebin/beam_clean.beam b/bootstrap/lib/compiler/ebin/beam_clean.beam Binary files differindex b6c47725c6..0985c13769 100644 --- a/bootstrap/lib/compiler/ebin/beam_clean.beam +++ b/bootstrap/lib/compiler/ebin/beam_clean.beam diff --git a/bootstrap/lib/compiler/ebin/beam_dead.beam b/bootstrap/lib/compiler/ebin/beam_dead.beam Binary files differindex 088898eea3..46e7e4c530 100644 --- a/bootstrap/lib/compiler/ebin/beam_dead.beam +++ b/bootstrap/lib/compiler/ebin/beam_dead.beam diff --git a/bootstrap/lib/compiler/ebin/beam_disasm.beam b/bootstrap/lib/compiler/ebin/beam_disasm.beam Binary files differindex 3b9b0bba18..132b756895 100644 --- a/bootstrap/lib/compiler/ebin/beam_disasm.beam +++ b/bootstrap/lib/compiler/ebin/beam_disasm.beam diff --git a/bootstrap/lib/compiler/ebin/beam_jump.beam b/bootstrap/lib/compiler/ebin/beam_jump.beam Binary files differindex 7b66277f10..6840f1e750 100644 --- a/bootstrap/lib/compiler/ebin/beam_jump.beam +++ b/bootstrap/lib/compiler/ebin/beam_jump.beam diff --git a/bootstrap/lib/compiler/ebin/beam_peep.beam b/bootstrap/lib/compiler/ebin/beam_peep.beam Binary files differindex a26cb84590..2bb2df33a3 100644 --- a/bootstrap/lib/compiler/ebin/beam_peep.beam +++ b/bootstrap/lib/compiler/ebin/beam_peep.beam diff --git a/bootstrap/lib/compiler/ebin/beam_receive.beam b/bootstrap/lib/compiler/ebin/beam_receive.beam Binary files differindex 6e864e4837..1152825b01 100644 --- a/bootstrap/lib/compiler/ebin/beam_receive.beam +++ b/bootstrap/lib/compiler/ebin/beam_receive.beam diff --git a/bootstrap/lib/compiler/ebin/beam_record.beam b/bootstrap/lib/compiler/ebin/beam_record.beam Binary files differindex 7b855184fb..6ca907ac26 100644 --- a/bootstrap/lib/compiler/ebin/beam_record.beam +++ b/bootstrap/lib/compiler/ebin/beam_record.beam diff --git a/bootstrap/lib/compiler/ebin/beam_reorder.beam b/bootstrap/lib/compiler/ebin/beam_reorder.beam Binary files differindex 4b1c7f6d15..1d5f5e3dcd 100644 --- a/bootstrap/lib/compiler/ebin/beam_reorder.beam +++ b/bootstrap/lib/compiler/ebin/beam_reorder.beam diff --git a/bootstrap/lib/compiler/ebin/beam_split.beam b/bootstrap/lib/compiler/ebin/beam_split.beam Binary files differindex 4202961791..12c532b465 100644 --- a/bootstrap/lib/compiler/ebin/beam_split.beam +++ b/bootstrap/lib/compiler/ebin/beam_split.beam diff --git a/bootstrap/lib/compiler/ebin/beam_trim.beam b/bootstrap/lib/compiler/ebin/beam_trim.beam Binary files differindex 1aa648532b..5338e9079f 100644 --- a/bootstrap/lib/compiler/ebin/beam_trim.beam +++ b/bootstrap/lib/compiler/ebin/beam_trim.beam diff --git a/bootstrap/lib/compiler/ebin/beam_type.beam b/bootstrap/lib/compiler/ebin/beam_type.beam Binary files differindex 1a4bdd5c5e..d0e6da02a8 100644 --- a/bootstrap/lib/compiler/ebin/beam_type.beam +++ b/bootstrap/lib/compiler/ebin/beam_type.beam diff --git a/bootstrap/lib/compiler/ebin/beam_utils.beam b/bootstrap/lib/compiler/ebin/beam_utils.beam Binary files differindex 5a14b35026..0f44d8e4d7 100644 --- a/bootstrap/lib/compiler/ebin/beam_utils.beam +++ b/bootstrap/lib/compiler/ebin/beam_utils.beam diff --git a/bootstrap/lib/compiler/ebin/beam_validator.beam b/bootstrap/lib/compiler/ebin/beam_validator.beam Binary files differindex c9455c28e7..0e97168da7 100644 --- a/bootstrap/lib/compiler/ebin/beam_validator.beam +++ b/bootstrap/lib/compiler/ebin/beam_validator.beam diff --git a/bootstrap/lib/compiler/ebin/beam_z.beam b/bootstrap/lib/compiler/ebin/beam_z.beam Binary files differindex 991226cc18..71db427c29 100644 --- a/bootstrap/lib/compiler/ebin/beam_z.beam +++ b/bootstrap/lib/compiler/ebin/beam_z.beam diff --git a/bootstrap/lib/compiler/ebin/cerl.beam b/bootstrap/lib/compiler/ebin/cerl.beam Binary files differindex 2af5f13b49..0ce3fbe876 100644 --- a/bootstrap/lib/compiler/ebin/cerl.beam +++ b/bootstrap/lib/compiler/ebin/cerl.beam diff --git a/bootstrap/lib/compiler/ebin/cerl_inline.beam b/bootstrap/lib/compiler/ebin/cerl_inline.beam Binary files differindex 61edb6b3df..0fa0d82191 100644 --- a/bootstrap/lib/compiler/ebin/cerl_inline.beam +++ b/bootstrap/lib/compiler/ebin/cerl_inline.beam diff --git a/bootstrap/lib/compiler/ebin/compile.beam b/bootstrap/lib/compiler/ebin/compile.beam Binary files differindex c50f648238..a92341690d 100644 --- a/bootstrap/lib/compiler/ebin/compile.beam +++ b/bootstrap/lib/compiler/ebin/compile.beam diff --git a/bootstrap/lib/compiler/ebin/compiler.app b/bootstrap/lib/compiler/ebin/compiler.app index 7de02551fe..b2ca1a8cad 100644 --- a/bootstrap/lib/compiler/ebin/compiler.app +++ b/bootstrap/lib/compiler/ebin/compiler.app @@ -58,6 +58,7 @@ core_lib, erl_bifs, rec_env, + sys_core_alias, sys_core_bsm, sys_core_dsetel, sys_core_fold, diff --git a/bootstrap/lib/compiler/ebin/compiler.appup b/bootstrap/lib/compiler/ebin/compiler.appup index bfea67c6dd..277b35faa8 100644 --- a/bootstrap/lib/compiler/ebin/compiler.appup +++ b/bootstrap/lib/compiler/ebin/compiler.appup @@ -16,7 +16,7 @@ %% limitations under the License. %% %% %CopyrightEnd% -{"7.0.4", +{"7.1.1", [{<<".*">>,[{restart_application, compiler}]}], [{<<".*">>,[{restart_application, compiler}]}] }. diff --git a/bootstrap/lib/compiler/ebin/core_lint.beam b/bootstrap/lib/compiler/ebin/core_lint.beam Binary files differindex f7fb759eb0..9c5dee5418 100644 --- a/bootstrap/lib/compiler/ebin/core_lint.beam +++ b/bootstrap/lib/compiler/ebin/core_lint.beam diff --git a/bootstrap/lib/compiler/ebin/core_parse.beam b/bootstrap/lib/compiler/ebin/core_parse.beam Binary files differindex dda2d59d7c..f95a6fca4e 100644 --- a/bootstrap/lib/compiler/ebin/core_parse.beam +++ b/bootstrap/lib/compiler/ebin/core_parse.beam diff --git a/bootstrap/lib/compiler/ebin/core_scan.beam b/bootstrap/lib/compiler/ebin/core_scan.beam Binary files differindex bec935bc5b..e9f74a1d1c 100644 --- a/bootstrap/lib/compiler/ebin/core_scan.beam +++ b/bootstrap/lib/compiler/ebin/core_scan.beam diff --git a/bootstrap/lib/compiler/ebin/erl_bifs.beam b/bootstrap/lib/compiler/ebin/erl_bifs.beam Binary files differindex 6e3aad89df..060c6571af 100644 --- a/bootstrap/lib/compiler/ebin/erl_bifs.beam +++ b/bootstrap/lib/compiler/ebin/erl_bifs.beam diff --git a/bootstrap/lib/compiler/ebin/rec_env.beam b/bootstrap/lib/compiler/ebin/rec_env.beam Binary files differindex 792fdeafc5..17cc7fec75 100644 --- a/bootstrap/lib/compiler/ebin/rec_env.beam +++ b/bootstrap/lib/compiler/ebin/rec_env.beam diff --git a/bootstrap/lib/compiler/ebin/sys_core_alias.beam b/bootstrap/lib/compiler/ebin/sys_core_alias.beam Binary files differnew file mode 100644 index 0000000000..d1fe157419 --- /dev/null +++ b/bootstrap/lib/compiler/ebin/sys_core_alias.beam diff --git a/bootstrap/lib/compiler/ebin/sys_core_bsm.beam b/bootstrap/lib/compiler/ebin/sys_core_bsm.beam Binary files differindex f343655448..d5e06493c5 100644 --- a/bootstrap/lib/compiler/ebin/sys_core_bsm.beam +++ b/bootstrap/lib/compiler/ebin/sys_core_bsm.beam diff --git a/bootstrap/lib/compiler/ebin/sys_core_dsetel.beam b/bootstrap/lib/compiler/ebin/sys_core_dsetel.beam Binary files differindex 121f2ebdd5..afe4cd4517 100644 --- a/bootstrap/lib/compiler/ebin/sys_core_dsetel.beam +++ b/bootstrap/lib/compiler/ebin/sys_core_dsetel.beam diff --git a/bootstrap/lib/compiler/ebin/sys_core_fold.beam b/bootstrap/lib/compiler/ebin/sys_core_fold.beam Binary files differindex 38ed7f6fa0..dfedc09f49 100644 --- a/bootstrap/lib/compiler/ebin/sys_core_fold.beam +++ b/bootstrap/lib/compiler/ebin/sys_core_fold.beam diff --git a/bootstrap/lib/compiler/ebin/sys_core_fold_lists.beam b/bootstrap/lib/compiler/ebin/sys_core_fold_lists.beam Binary files differindex 3f72043a3f..092ac1242d 100644 --- a/bootstrap/lib/compiler/ebin/sys_core_fold_lists.beam +++ b/bootstrap/lib/compiler/ebin/sys_core_fold_lists.beam diff --git a/bootstrap/lib/compiler/ebin/v3_codegen.beam b/bootstrap/lib/compiler/ebin/v3_codegen.beam Binary files differindex b2e91c3907..c6c1593790 100644 --- a/bootstrap/lib/compiler/ebin/v3_codegen.beam +++ b/bootstrap/lib/compiler/ebin/v3_codegen.beam diff --git a/bootstrap/lib/compiler/ebin/v3_core.beam b/bootstrap/lib/compiler/ebin/v3_core.beam Binary files differindex 539f5f2e61..59717eae84 100644 --- a/bootstrap/lib/compiler/ebin/v3_core.beam +++ b/bootstrap/lib/compiler/ebin/v3_core.beam diff --git a/bootstrap/lib/compiler/ebin/v3_kernel.beam b/bootstrap/lib/compiler/ebin/v3_kernel.beam Binary files differindex a5e95c8ecc..6143af9050 100644 --- a/bootstrap/lib/compiler/ebin/v3_kernel.beam +++ b/bootstrap/lib/compiler/ebin/v3_kernel.beam diff --git a/bootstrap/lib/kernel/ebin/application.beam b/bootstrap/lib/kernel/ebin/application.beam Binary files differindex 31c8cdb84c..05ecd20859 100644 --- a/bootstrap/lib/kernel/ebin/application.beam +++ b/bootstrap/lib/kernel/ebin/application.beam diff --git a/bootstrap/lib/kernel/ebin/application_controller.beam b/bootstrap/lib/kernel/ebin/application_controller.beam Binary files differindex c82ed7443d..2f480b5e37 100644 --- a/bootstrap/lib/kernel/ebin/application_controller.beam +++ b/bootstrap/lib/kernel/ebin/application_controller.beam diff --git a/bootstrap/lib/kernel/ebin/application_master.beam b/bootstrap/lib/kernel/ebin/application_master.beam Binary files differindex b76b4e4877..59606bc90d 100644 --- a/bootstrap/lib/kernel/ebin/application_master.beam +++ b/bootstrap/lib/kernel/ebin/application_master.beam diff --git a/bootstrap/lib/kernel/ebin/auth.beam b/bootstrap/lib/kernel/ebin/auth.beam Binary files differindex 612c23a653..f1126ac7c8 100644 --- a/bootstrap/lib/kernel/ebin/auth.beam +++ b/bootstrap/lib/kernel/ebin/auth.beam diff --git a/bootstrap/lib/kernel/ebin/code.beam b/bootstrap/lib/kernel/ebin/code.beam Binary files differindex 412d341d9e..418816a351 100644 --- a/bootstrap/lib/kernel/ebin/code.beam +++ b/bootstrap/lib/kernel/ebin/code.beam diff --git a/bootstrap/lib/kernel/ebin/code_server.beam b/bootstrap/lib/kernel/ebin/code_server.beam Binary files differindex f76e90cefb..536da5c692 100644 --- a/bootstrap/lib/kernel/ebin/code_server.beam +++ b/bootstrap/lib/kernel/ebin/code_server.beam diff --git a/bootstrap/lib/kernel/ebin/disk_log.beam b/bootstrap/lib/kernel/ebin/disk_log.beam Binary files differindex 7d4aee71ce..4dc0ef29f2 100644 --- a/bootstrap/lib/kernel/ebin/disk_log.beam +++ b/bootstrap/lib/kernel/ebin/disk_log.beam diff --git a/bootstrap/lib/kernel/ebin/disk_log_1.beam b/bootstrap/lib/kernel/ebin/disk_log_1.beam Binary files differindex 4b5ad17e71..420a4e818b 100644 --- a/bootstrap/lib/kernel/ebin/disk_log_1.beam +++ b/bootstrap/lib/kernel/ebin/disk_log_1.beam diff --git a/bootstrap/lib/kernel/ebin/disk_log_server.beam b/bootstrap/lib/kernel/ebin/disk_log_server.beam Binary files differindex 68d1be71a7..96bad0104e 100644 --- a/bootstrap/lib/kernel/ebin/disk_log_server.beam +++ b/bootstrap/lib/kernel/ebin/disk_log_server.beam diff --git a/bootstrap/lib/kernel/ebin/dist_ac.beam b/bootstrap/lib/kernel/ebin/dist_ac.beam Binary files differindex ee99f41a29..bcc27081b2 100644 --- a/bootstrap/lib/kernel/ebin/dist_ac.beam +++ b/bootstrap/lib/kernel/ebin/dist_ac.beam diff --git a/bootstrap/lib/kernel/ebin/dist_util.beam b/bootstrap/lib/kernel/ebin/dist_util.beam Binary files differindex 6376d791cf..fd4e4fb5de 100644 --- a/bootstrap/lib/kernel/ebin/dist_util.beam +++ b/bootstrap/lib/kernel/ebin/dist_util.beam diff --git a/bootstrap/lib/kernel/ebin/erl_ddll.beam b/bootstrap/lib/kernel/ebin/erl_ddll.beam Binary files differindex 6137ab9dd7..940a74c1c3 100644 --- a/bootstrap/lib/kernel/ebin/erl_ddll.beam +++ b/bootstrap/lib/kernel/ebin/erl_ddll.beam diff --git a/bootstrap/lib/kernel/ebin/erl_epmd.beam b/bootstrap/lib/kernel/ebin/erl_epmd.beam Binary files differindex 22725cc590..717d7937da 100644 --- a/bootstrap/lib/kernel/ebin/erl_epmd.beam +++ b/bootstrap/lib/kernel/ebin/erl_epmd.beam diff --git a/bootstrap/lib/kernel/ebin/error_logger.beam b/bootstrap/lib/kernel/ebin/error_logger.beam Binary files differindex 2881ba0e9f..4129e6af6e 100644 --- a/bootstrap/lib/kernel/ebin/error_logger.beam +++ b/bootstrap/lib/kernel/ebin/error_logger.beam diff --git a/bootstrap/lib/kernel/ebin/file.beam b/bootstrap/lib/kernel/ebin/file.beam Binary files differindex e75200dbe3..6d45887800 100644 --- a/bootstrap/lib/kernel/ebin/file.beam +++ b/bootstrap/lib/kernel/ebin/file.beam diff --git a/bootstrap/lib/kernel/ebin/file_io_server.beam b/bootstrap/lib/kernel/ebin/file_io_server.beam Binary files differindex 8d34178122..d0279971b0 100644 --- a/bootstrap/lib/kernel/ebin/file_io_server.beam +++ b/bootstrap/lib/kernel/ebin/file_io_server.beam diff --git a/bootstrap/lib/kernel/ebin/file_server.beam b/bootstrap/lib/kernel/ebin/file_server.beam Binary files differindex 5a12e2544f..d609725fa8 100644 --- a/bootstrap/lib/kernel/ebin/file_server.beam +++ b/bootstrap/lib/kernel/ebin/file_server.beam diff --git a/bootstrap/lib/kernel/ebin/gen_tcp.beam b/bootstrap/lib/kernel/ebin/gen_tcp.beam Binary files differindex 99ae2b0e6a..9d9828c266 100644 --- a/bootstrap/lib/kernel/ebin/gen_tcp.beam +++ b/bootstrap/lib/kernel/ebin/gen_tcp.beam diff --git a/bootstrap/lib/kernel/ebin/global.beam b/bootstrap/lib/kernel/ebin/global.beam Binary files differindex 9f2c77e693..ff9a409bf8 100644 --- a/bootstrap/lib/kernel/ebin/global.beam +++ b/bootstrap/lib/kernel/ebin/global.beam diff --git a/bootstrap/lib/kernel/ebin/global_group.beam b/bootstrap/lib/kernel/ebin/global_group.beam Binary files differindex b98a472cac..ef0913f98d 100644 --- a/bootstrap/lib/kernel/ebin/global_group.beam +++ b/bootstrap/lib/kernel/ebin/global_group.beam diff --git a/bootstrap/lib/kernel/ebin/group.beam b/bootstrap/lib/kernel/ebin/group.beam Binary files differindex 99101fecfc..57851fcb6e 100644 --- a/bootstrap/lib/kernel/ebin/group.beam +++ b/bootstrap/lib/kernel/ebin/group.beam diff --git a/bootstrap/lib/kernel/ebin/group_history.beam b/bootstrap/lib/kernel/ebin/group_history.beam Binary files differindex 1def2a5d42..9328355025 100644 --- a/bootstrap/lib/kernel/ebin/group_history.beam +++ b/bootstrap/lib/kernel/ebin/group_history.beam diff --git a/bootstrap/lib/kernel/ebin/hipe_unified_loader.beam b/bootstrap/lib/kernel/ebin/hipe_unified_loader.beam Binary files differindex 06998f8dd5..2682b088ea 100644 --- a/bootstrap/lib/kernel/ebin/hipe_unified_loader.beam +++ b/bootstrap/lib/kernel/ebin/hipe_unified_loader.beam diff --git a/bootstrap/lib/kernel/ebin/inet.beam b/bootstrap/lib/kernel/ebin/inet.beam Binary files differindex c4cfa54be3..667c67acb8 100644 --- a/bootstrap/lib/kernel/ebin/inet.beam +++ b/bootstrap/lib/kernel/ebin/inet.beam diff --git a/bootstrap/lib/kernel/ebin/inet6_tcp.beam b/bootstrap/lib/kernel/ebin/inet6_tcp.beam Binary files differindex 2aada7f95b..7a2649e560 100644 --- a/bootstrap/lib/kernel/ebin/inet6_tcp.beam +++ b/bootstrap/lib/kernel/ebin/inet6_tcp.beam diff --git a/bootstrap/lib/kernel/ebin/inet_db.beam b/bootstrap/lib/kernel/ebin/inet_db.beam Binary files differindex a6843431fb..e2b5639720 100644 --- a/bootstrap/lib/kernel/ebin/inet_db.beam +++ b/bootstrap/lib/kernel/ebin/inet_db.beam diff --git a/bootstrap/lib/kernel/ebin/inet_dns.beam b/bootstrap/lib/kernel/ebin/inet_dns.beam Binary files differindex 4cd63fb349..cebe72a7f0 100644 --- a/bootstrap/lib/kernel/ebin/inet_dns.beam +++ b/bootstrap/lib/kernel/ebin/inet_dns.beam diff --git a/bootstrap/lib/kernel/ebin/inet_parse.beam b/bootstrap/lib/kernel/ebin/inet_parse.beam Binary files differindex e6b9d07494..a5b4d7611f 100644 --- a/bootstrap/lib/kernel/ebin/inet_parse.beam +++ b/bootstrap/lib/kernel/ebin/inet_parse.beam diff --git a/bootstrap/lib/kernel/ebin/inet_res.beam b/bootstrap/lib/kernel/ebin/inet_res.beam Binary files differindex 826b5c4030..74b3a11f59 100644 --- a/bootstrap/lib/kernel/ebin/inet_res.beam +++ b/bootstrap/lib/kernel/ebin/inet_res.beam diff --git a/bootstrap/lib/kernel/ebin/inet_tcp.beam b/bootstrap/lib/kernel/ebin/inet_tcp.beam Binary files differindex 6199354874..8453fe15e8 100644 --- a/bootstrap/lib/kernel/ebin/inet_tcp.beam +++ b/bootstrap/lib/kernel/ebin/inet_tcp.beam diff --git a/bootstrap/lib/kernel/ebin/inet_tcp_dist.beam b/bootstrap/lib/kernel/ebin/inet_tcp_dist.beam Binary files differindex f8a95de32c..f25d7689f5 100644 --- a/bootstrap/lib/kernel/ebin/inet_tcp_dist.beam +++ b/bootstrap/lib/kernel/ebin/inet_tcp_dist.beam diff --git a/bootstrap/lib/kernel/ebin/kernel.appup b/bootstrap/lib/kernel/ebin/kernel.appup index d822674939..519e83f7e4 100644 --- a/bootstrap/lib/kernel/ebin/kernel.appup +++ b/bootstrap/lib/kernel/ebin/kernel.appup @@ -18,7 +18,7 @@ %% %CopyrightEnd% {"5.3.1", %% Up from - max one major revision back - [{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-19.*, OTP-20.0 + [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.* %% Down to - max one major revision back - [{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-19.*, OTP-20.0 + [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.* }. diff --git a/bootstrap/lib/kernel/ebin/kernel_config.beam b/bootstrap/lib/kernel/ebin/kernel_config.beam Binary files differindex d669a101a0..c557c45967 100644 --- a/bootstrap/lib/kernel/ebin/kernel_config.beam +++ b/bootstrap/lib/kernel/ebin/kernel_config.beam diff --git a/bootstrap/lib/kernel/ebin/local_tcp.beam b/bootstrap/lib/kernel/ebin/local_tcp.beam Binary files differindex 151ec3cd4e..a21fed9c88 100644 --- a/bootstrap/lib/kernel/ebin/local_tcp.beam +++ b/bootstrap/lib/kernel/ebin/local_tcp.beam diff --git a/bootstrap/lib/kernel/ebin/net_adm.beam b/bootstrap/lib/kernel/ebin/net_adm.beam Binary files differindex 0597590966..f50856393d 100644 --- a/bootstrap/lib/kernel/ebin/net_adm.beam +++ b/bootstrap/lib/kernel/ebin/net_adm.beam diff --git a/bootstrap/lib/kernel/ebin/net_kernel.beam b/bootstrap/lib/kernel/ebin/net_kernel.beam Binary files differindex b3c5f4ba29..3d59085f05 100644 --- a/bootstrap/lib/kernel/ebin/net_kernel.beam +++ b/bootstrap/lib/kernel/ebin/net_kernel.beam diff --git a/bootstrap/lib/kernel/ebin/ram_file.beam b/bootstrap/lib/kernel/ebin/ram_file.beam Binary files differindex c867e48002..8f4e066f3a 100644 --- a/bootstrap/lib/kernel/ebin/ram_file.beam +++ b/bootstrap/lib/kernel/ebin/ram_file.beam diff --git a/bootstrap/lib/kernel/ebin/user.beam b/bootstrap/lib/kernel/ebin/user.beam Binary files differindex cf4e55254c..947bc0f642 100644 --- a/bootstrap/lib/kernel/ebin/user.beam +++ b/bootstrap/lib/kernel/ebin/user.beam diff --git a/bootstrap/lib/kernel/ebin/user_drv.beam b/bootstrap/lib/kernel/ebin/user_drv.beam Binary files differindex b204dced59..b6ac29d5c4 100644 --- a/bootstrap/lib/kernel/ebin/user_drv.beam +++ b/bootstrap/lib/kernel/ebin/user_drv.beam diff --git a/bootstrap/lib/kernel/include/dist.hrl b/bootstrap/lib/kernel/include/dist.hrl index d6bccdf474..db4a5eaebc 100644 --- a/bootstrap/lib/kernel/include/dist.hrl +++ b/bootstrap/lib/kernel/include/dist.hrl @@ -40,3 +40,33 @@ -define(DFLAG_UTF8_ATOMS, 16#10000). -define(DFLAG_MAP_TAG, 16#20000). -define(DFLAG_BIG_CREATION, 16#40000). +-define(DFLAG_SEND_SENDER, 16#80000). + +%% DFLAGs that require strict ordering or:ed together... +-define(DFLAGS_STRICT_ORDER_DELIVERY, + ?DFLAG_DIST_HDR_ATOM_CACHE). + + +%% Also update dflag2str() in ../src/dist_util.erl +%% when adding flags... + +-define(DFLAGS_ALL, + (?DFLAG_PUBLISHED + bor ?DFLAG_ATOM_CACHE + bor ?DFLAG_EXTENDED_REFERENCES + bor ?DFLAG_DIST_MONITOR + bor ?DFLAG_FUN_TAGS + bor ?DFLAG_DIST_MONITOR_NAME + bor ?DFLAG_HIDDEN_ATOM_CACHE + bor ?DFLAG_NEW_FUN_TAGS + bor ?DFLAG_EXTENDED_PIDS_PORTS + bor ?DFLAG_EXPORT_PTR_TAG + bor ?DFLAG_BIT_BINARIES + bor ?DFLAG_NEW_FLOATS + bor ?DFLAG_UNICODE_IO + bor ?DFLAG_DIST_HDR_ATOM_CACHE + bor ?DFLAG_SMALL_ATOM_TAGS + bor ?DFLAG_UTF8_ATOMS + bor ?DFLAG_MAP_TAG + bor ?DFLAG_BIG_CREATION + bor ?DFLAG_SEND_SENDER)). diff --git a/bootstrap/lib/kernel/include/dist_util.hrl b/bootstrap/lib/kernel/include/dist_util.hrl index e3d2fe0eb6..eeb0f8dd43 100644 --- a/bootstrap/lib/kernel/include/dist_util.hrl +++ b/bootstrap/lib/kernel/include/dist_util.hrl @@ -29,9 +29,9 @@ -endif. -ifdef(dist_trace). --define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:timestamp(),node(),lists:flatten(io_lib:format(Fmt, Args))])). +-define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])). % Use the one below for config-file (early boot) connection tracing -%-define(trace(Fmt,Args), erlang:display([erlang:now(),node(),lists:flatten(io_lib:format(Fmt, Args))])). +%-define(trace(Fmt,Args), erlang:display([erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])). -define(trace_factor,8). -else. -define(trace(Fmt,Args), ok). @@ -78,7 +78,13 @@ %% New in kernel-5.1 (OTP 19.1): mf_setopts, %% netkernel:setopts on active connection - mf_getopts %% netkernel:getopts on active connection + mf_getopts, %% netkernel:getopts on active connection + + %% New in kernel-6.0 (OTP 21.0) + f_handshake_complete, %% Notify handshake complete + add_flags, %% dflags to add + reject_flags, %% dflags not to use (not all can be rejected) + require_flags %% dflags that are required }). diff --git a/bootstrap/lib/stdlib/ebin/beam_lib.beam b/bootstrap/lib/stdlib/ebin/beam_lib.beam Binary files differindex 3625f9349e..8255fc6f82 100644 --- a/bootstrap/lib/stdlib/ebin/beam_lib.beam +++ b/bootstrap/lib/stdlib/ebin/beam_lib.beam diff --git a/bootstrap/lib/stdlib/ebin/binary.beam b/bootstrap/lib/stdlib/ebin/binary.beam Binary files differindex 64c0538908..70c0873a8e 100644 --- a/bootstrap/lib/stdlib/ebin/binary.beam +++ b/bootstrap/lib/stdlib/ebin/binary.beam diff --git a/bootstrap/lib/stdlib/ebin/c.beam b/bootstrap/lib/stdlib/ebin/c.beam Binary files differindex 0af4164bf5..9e30687b57 100644 --- a/bootstrap/lib/stdlib/ebin/c.beam +++ b/bootstrap/lib/stdlib/ebin/c.beam diff --git a/bootstrap/lib/stdlib/ebin/calendar.beam b/bootstrap/lib/stdlib/ebin/calendar.beam Binary files differindex 62e51ef137..fb7b882218 100644 --- a/bootstrap/lib/stdlib/ebin/calendar.beam +++ b/bootstrap/lib/stdlib/ebin/calendar.beam diff --git a/bootstrap/lib/stdlib/ebin/dets.beam b/bootstrap/lib/stdlib/ebin/dets.beam Binary files differindex ee5786ea39..603110cbf8 100644 --- a/bootstrap/lib/stdlib/ebin/dets.beam +++ b/bootstrap/lib/stdlib/ebin/dets.beam diff --git a/bootstrap/lib/stdlib/ebin/dets_server.beam b/bootstrap/lib/stdlib/ebin/dets_server.beam Binary files differindex a447af9bc4..d67eda05c6 100644 --- a/bootstrap/lib/stdlib/ebin/dets_server.beam +++ b/bootstrap/lib/stdlib/ebin/dets_server.beam diff --git a/bootstrap/lib/stdlib/ebin/dets_utils.beam b/bootstrap/lib/stdlib/ebin/dets_utils.beam Binary files differindex ab06a2b8b2..6840be1e57 100644 --- a/bootstrap/lib/stdlib/ebin/dets_utils.beam +++ b/bootstrap/lib/stdlib/ebin/dets_utils.beam diff --git a/bootstrap/lib/stdlib/ebin/dets_v9.beam b/bootstrap/lib/stdlib/ebin/dets_v9.beam Binary files differindex 6eb576ec50..567018d680 100644 --- a/bootstrap/lib/stdlib/ebin/dets_v9.beam +++ b/bootstrap/lib/stdlib/ebin/dets_v9.beam diff --git a/bootstrap/lib/stdlib/ebin/digraph.beam b/bootstrap/lib/stdlib/ebin/digraph.beam Binary files differindex 0a2e8f20b4..7434821dda 100644 --- a/bootstrap/lib/stdlib/ebin/digraph.beam +++ b/bootstrap/lib/stdlib/ebin/digraph.beam diff --git a/bootstrap/lib/stdlib/ebin/edlin.beam b/bootstrap/lib/stdlib/ebin/edlin.beam Binary files differindex d6a9bd1814..b6f5e62671 100644 --- a/bootstrap/lib/stdlib/ebin/edlin.beam +++ b/bootstrap/lib/stdlib/ebin/edlin.beam diff --git a/bootstrap/lib/stdlib/ebin/epp.beam b/bootstrap/lib/stdlib/ebin/epp.beam Binary files differindex b5c98392d5..36c2b2219e 100644 --- a/bootstrap/lib/stdlib/ebin/epp.beam +++ b/bootstrap/lib/stdlib/ebin/epp.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_anno.beam b/bootstrap/lib/stdlib/ebin/erl_anno.beam Binary files differindex 716a2b63df..11bd496e2e 100644 --- a/bootstrap/lib/stdlib/ebin/erl_anno.beam +++ b/bootstrap/lib/stdlib/ebin/erl_anno.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_eval.beam b/bootstrap/lib/stdlib/ebin/erl_eval.beam Binary files differindex 37832662df..9244cff669 100644 --- a/bootstrap/lib/stdlib/ebin/erl_eval.beam +++ b/bootstrap/lib/stdlib/ebin/erl_eval.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_expand_records.beam b/bootstrap/lib/stdlib/ebin/erl_expand_records.beam Binary files differindex 902a5d545a..146f0cc6bd 100644 --- a/bootstrap/lib/stdlib/ebin/erl_expand_records.beam +++ b/bootstrap/lib/stdlib/ebin/erl_expand_records.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_internal.beam b/bootstrap/lib/stdlib/ebin/erl_internal.beam Binary files differindex f58974b75f..beaab5658b 100644 --- a/bootstrap/lib/stdlib/ebin/erl_internal.beam +++ b/bootstrap/lib/stdlib/ebin/erl_internal.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_lint.beam b/bootstrap/lib/stdlib/ebin/erl_lint.beam Binary files differindex f79c57877c..dc4c689263 100644 --- a/bootstrap/lib/stdlib/ebin/erl_lint.beam +++ b/bootstrap/lib/stdlib/ebin/erl_lint.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_parse.beam b/bootstrap/lib/stdlib/ebin/erl_parse.beam Binary files differindex d419485285..ce2c86c6e9 100644 --- a/bootstrap/lib/stdlib/ebin/erl_parse.beam +++ b/bootstrap/lib/stdlib/ebin/erl_parse.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_pp.beam b/bootstrap/lib/stdlib/ebin/erl_pp.beam Binary files differindex 50514737fc..e77e0f9136 100644 --- a/bootstrap/lib/stdlib/ebin/erl_pp.beam +++ b/bootstrap/lib/stdlib/ebin/erl_pp.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_scan.beam b/bootstrap/lib/stdlib/ebin/erl_scan.beam Binary files differindex 38d6c42ba2..86307bc790 100644 --- a/bootstrap/lib/stdlib/ebin/erl_scan.beam +++ b/bootstrap/lib/stdlib/ebin/erl_scan.beam diff --git a/bootstrap/lib/stdlib/ebin/erl_tar.beam b/bootstrap/lib/stdlib/ebin/erl_tar.beam Binary files differindex b48a01a7b7..c2434b26ed 100644 --- a/bootstrap/lib/stdlib/ebin/erl_tar.beam +++ b/bootstrap/lib/stdlib/ebin/erl_tar.beam diff --git a/bootstrap/lib/stdlib/ebin/error_logger_file_h.beam b/bootstrap/lib/stdlib/ebin/error_logger_file_h.beam Binary files differindex f9df6de595..cac19681cc 100644 --- a/bootstrap/lib/stdlib/ebin/error_logger_file_h.beam +++ b/bootstrap/lib/stdlib/ebin/error_logger_file_h.beam diff --git a/bootstrap/lib/stdlib/ebin/error_logger_tty_h.beam b/bootstrap/lib/stdlib/ebin/error_logger_tty_h.beam Binary files differindex 7c9a264e4e..788a7c2fcf 100644 --- a/bootstrap/lib/stdlib/ebin/error_logger_tty_h.beam +++ b/bootstrap/lib/stdlib/ebin/error_logger_tty_h.beam diff --git a/bootstrap/lib/stdlib/ebin/escript.beam b/bootstrap/lib/stdlib/ebin/escript.beam Binary files differindex a80e141a9a..30dea42a21 100644 --- a/bootstrap/lib/stdlib/ebin/escript.beam +++ b/bootstrap/lib/stdlib/ebin/escript.beam diff --git a/bootstrap/lib/stdlib/ebin/ets.beam b/bootstrap/lib/stdlib/ebin/ets.beam Binary files differindex 6345b6b2da..40be147afa 100644 --- a/bootstrap/lib/stdlib/ebin/ets.beam +++ b/bootstrap/lib/stdlib/ebin/ets.beam diff --git a/bootstrap/lib/stdlib/ebin/eval_bits.beam b/bootstrap/lib/stdlib/ebin/eval_bits.beam Binary files differindex 472990bbbb..76ea9a4941 100644 --- a/bootstrap/lib/stdlib/ebin/eval_bits.beam +++ b/bootstrap/lib/stdlib/ebin/eval_bits.beam diff --git a/bootstrap/lib/stdlib/ebin/file_sorter.beam b/bootstrap/lib/stdlib/ebin/file_sorter.beam Binary files differindex 84fee9944c..f2a1fd74d0 100644 --- a/bootstrap/lib/stdlib/ebin/file_sorter.beam +++ b/bootstrap/lib/stdlib/ebin/file_sorter.beam diff --git a/bootstrap/lib/stdlib/ebin/filelib.beam b/bootstrap/lib/stdlib/ebin/filelib.beam Binary files differindex 32496b6ceb..1378390c61 100644 --- a/bootstrap/lib/stdlib/ebin/filelib.beam +++ b/bootstrap/lib/stdlib/ebin/filelib.beam diff --git a/bootstrap/lib/stdlib/ebin/filename.beam b/bootstrap/lib/stdlib/ebin/filename.beam Binary files differindex 76e1755ba4..a94acbb852 100644 --- a/bootstrap/lib/stdlib/ebin/filename.beam +++ b/bootstrap/lib/stdlib/ebin/filename.beam diff --git a/bootstrap/lib/stdlib/ebin/gen_event.beam b/bootstrap/lib/stdlib/ebin/gen_event.beam Binary files differindex ec537eb4fb..afeb6775ef 100644 --- a/bootstrap/lib/stdlib/ebin/gen_event.beam +++ b/bootstrap/lib/stdlib/ebin/gen_event.beam diff --git a/bootstrap/lib/stdlib/ebin/gen_fsm.beam b/bootstrap/lib/stdlib/ebin/gen_fsm.beam Binary files differindex fc315592aa..9bb60f185c 100644 --- a/bootstrap/lib/stdlib/ebin/gen_fsm.beam +++ b/bootstrap/lib/stdlib/ebin/gen_fsm.beam diff --git a/bootstrap/lib/stdlib/ebin/gen_server.beam b/bootstrap/lib/stdlib/ebin/gen_server.beam Binary files differindex d84e351fe3..27981c75b2 100644 --- a/bootstrap/lib/stdlib/ebin/gen_server.beam +++ b/bootstrap/lib/stdlib/ebin/gen_server.beam diff --git a/bootstrap/lib/stdlib/ebin/gen_statem.beam b/bootstrap/lib/stdlib/ebin/gen_statem.beam Binary files differindex 56cf9402f4..8123f65a42 100644 --- a/bootstrap/lib/stdlib/ebin/gen_statem.beam +++ b/bootstrap/lib/stdlib/ebin/gen_statem.beam diff --git a/bootstrap/lib/stdlib/ebin/io.beam b/bootstrap/lib/stdlib/ebin/io.beam Binary files differindex f582fa67e9..18f66f5443 100644 --- a/bootstrap/lib/stdlib/ebin/io.beam +++ b/bootstrap/lib/stdlib/ebin/io.beam diff --git a/bootstrap/lib/stdlib/ebin/io_lib.beam b/bootstrap/lib/stdlib/ebin/io_lib.beam Binary files differindex 7e16c0503d..a68d77b7f6 100644 --- a/bootstrap/lib/stdlib/ebin/io_lib.beam +++ b/bootstrap/lib/stdlib/ebin/io_lib.beam diff --git a/bootstrap/lib/stdlib/ebin/io_lib_format.beam b/bootstrap/lib/stdlib/ebin/io_lib_format.beam Binary files differindex 9cffe25cc3..e85995c1de 100644 --- a/bootstrap/lib/stdlib/ebin/io_lib_format.beam +++ b/bootstrap/lib/stdlib/ebin/io_lib_format.beam diff --git a/bootstrap/lib/stdlib/ebin/io_lib_fread.beam b/bootstrap/lib/stdlib/ebin/io_lib_fread.beam Binary files differindex 162bec0220..a1c1e35ff6 100644 --- a/bootstrap/lib/stdlib/ebin/io_lib_fread.beam +++ b/bootstrap/lib/stdlib/ebin/io_lib_fread.beam diff --git a/bootstrap/lib/stdlib/ebin/io_lib_pretty.beam b/bootstrap/lib/stdlib/ebin/io_lib_pretty.beam Binary files differindex 63290e5490..6ed1e3b7cc 100644 --- a/bootstrap/lib/stdlib/ebin/io_lib_pretty.beam +++ b/bootstrap/lib/stdlib/ebin/io_lib_pretty.beam diff --git a/bootstrap/lib/stdlib/ebin/lib.beam b/bootstrap/lib/stdlib/ebin/lib.beam Binary files differindex 2e050a2ab5..6ad7dff7da 100644 --- a/bootstrap/lib/stdlib/ebin/lib.beam +++ b/bootstrap/lib/stdlib/ebin/lib.beam diff --git a/bootstrap/lib/stdlib/ebin/lists.beam b/bootstrap/lib/stdlib/ebin/lists.beam Binary files differindex 9e47a6b1eb..f1c5f21315 100644 --- a/bootstrap/lib/stdlib/ebin/lists.beam +++ b/bootstrap/lib/stdlib/ebin/lists.beam diff --git a/bootstrap/lib/stdlib/ebin/log_mf_h.beam b/bootstrap/lib/stdlib/ebin/log_mf_h.beam Binary files differindex 85feb97748..920286c40e 100644 --- a/bootstrap/lib/stdlib/ebin/log_mf_h.beam +++ b/bootstrap/lib/stdlib/ebin/log_mf_h.beam diff --git a/bootstrap/lib/stdlib/ebin/maps.beam b/bootstrap/lib/stdlib/ebin/maps.beam Binary files differindex 70715a5dd2..f07f4f922d 100644 --- a/bootstrap/lib/stdlib/ebin/maps.beam +++ b/bootstrap/lib/stdlib/ebin/maps.beam diff --git a/bootstrap/lib/stdlib/ebin/ms_transform.beam b/bootstrap/lib/stdlib/ebin/ms_transform.beam Binary files differindex cdc1c8d23f..b4c8527161 100644 --- a/bootstrap/lib/stdlib/ebin/ms_transform.beam +++ b/bootstrap/lib/stdlib/ebin/ms_transform.beam diff --git a/bootstrap/lib/stdlib/ebin/orddict.beam b/bootstrap/lib/stdlib/ebin/orddict.beam Binary files differindex a97f19b2cd..9d65872e90 100644 --- a/bootstrap/lib/stdlib/ebin/orddict.beam +++ b/bootstrap/lib/stdlib/ebin/orddict.beam diff --git a/bootstrap/lib/stdlib/ebin/otp_internal.beam b/bootstrap/lib/stdlib/ebin/otp_internal.beam Binary files differindex 620f2ea8f8..912dfba644 100644 --- a/bootstrap/lib/stdlib/ebin/otp_internal.beam +++ b/bootstrap/lib/stdlib/ebin/otp_internal.beam diff --git a/bootstrap/lib/stdlib/ebin/pool.beam b/bootstrap/lib/stdlib/ebin/pool.beam Binary files differindex e43f0d4265..160474fc2a 100644 --- a/bootstrap/lib/stdlib/ebin/pool.beam +++ b/bootstrap/lib/stdlib/ebin/pool.beam diff --git a/bootstrap/lib/stdlib/ebin/proplists.beam b/bootstrap/lib/stdlib/ebin/proplists.beam Binary files differindex f3a49e1a90..176a0e0db5 100644 --- a/bootstrap/lib/stdlib/ebin/proplists.beam +++ b/bootstrap/lib/stdlib/ebin/proplists.beam diff --git a/bootstrap/lib/stdlib/ebin/qlc.beam b/bootstrap/lib/stdlib/ebin/qlc.beam Binary files differindex ea0d5265b8..4a315e8d42 100644 --- a/bootstrap/lib/stdlib/ebin/qlc.beam +++ b/bootstrap/lib/stdlib/ebin/qlc.beam diff --git a/bootstrap/lib/stdlib/ebin/qlc_pt.beam b/bootstrap/lib/stdlib/ebin/qlc_pt.beam Binary files differindex 64b750856e..ac343d8abd 100644 --- a/bootstrap/lib/stdlib/ebin/qlc_pt.beam +++ b/bootstrap/lib/stdlib/ebin/qlc_pt.beam diff --git a/bootstrap/lib/stdlib/ebin/queue.beam b/bootstrap/lib/stdlib/ebin/queue.beam Binary files differindex ab31525e31..fb24f979e1 100644 --- a/bootstrap/lib/stdlib/ebin/queue.beam +++ b/bootstrap/lib/stdlib/ebin/queue.beam diff --git a/bootstrap/lib/stdlib/ebin/rand.beam b/bootstrap/lib/stdlib/ebin/rand.beam Binary files differindex 9141567b43..887a09385c 100644 --- a/bootstrap/lib/stdlib/ebin/rand.beam +++ b/bootstrap/lib/stdlib/ebin/rand.beam diff --git a/bootstrap/lib/stdlib/ebin/re.beam b/bootstrap/lib/stdlib/ebin/re.beam Binary files differindex ad5c7ac20f..da68ad9240 100644 --- a/bootstrap/lib/stdlib/ebin/re.beam +++ b/bootstrap/lib/stdlib/ebin/re.beam diff --git a/bootstrap/lib/stdlib/ebin/shell.beam b/bootstrap/lib/stdlib/ebin/shell.beam Binary files differindex 31437e38e1..a0e2b73018 100644 --- a/bootstrap/lib/stdlib/ebin/shell.beam +++ b/bootstrap/lib/stdlib/ebin/shell.beam diff --git a/bootstrap/lib/stdlib/ebin/slave.beam b/bootstrap/lib/stdlib/ebin/slave.beam Binary files differindex c5c7d79bbc..299c24448d 100644 --- a/bootstrap/lib/stdlib/ebin/slave.beam +++ b/bootstrap/lib/stdlib/ebin/slave.beam diff --git a/bootstrap/lib/stdlib/ebin/sofs.beam b/bootstrap/lib/stdlib/ebin/sofs.beam Binary files differindex 4045afa010..a2cc62d4af 100644 --- a/bootstrap/lib/stdlib/ebin/sofs.beam +++ b/bootstrap/lib/stdlib/ebin/sofs.beam diff --git a/bootstrap/lib/stdlib/ebin/stdlib.appup b/bootstrap/lib/stdlib/ebin/stdlib.appup index 63b5f82103..40ae15f055 100644 --- a/bootstrap/lib/stdlib/ebin/stdlib.appup +++ b/bootstrap/lib/stdlib/ebin/stdlib.appup @@ -18,7 +18,7 @@ %% %CopyrightEnd% {"3.4.1", %% Up from - max one major revision back - [{<<"3\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-19.* + [{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.* %% Down to - max one major revision back - [{<<"3\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-19.* + [{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.* }. diff --git a/bootstrap/lib/stdlib/ebin/string.beam b/bootstrap/lib/stdlib/ebin/string.beam Binary files differindex 0292c1dee0..a281027902 100644 --- a/bootstrap/lib/stdlib/ebin/string.beam +++ b/bootstrap/lib/stdlib/ebin/string.beam diff --git a/bootstrap/lib/stdlib/ebin/supervisor.beam b/bootstrap/lib/stdlib/ebin/supervisor.beam Binary files differindex cb8ad2a6b2..f7f11b9b26 100644 --- a/bootstrap/lib/stdlib/ebin/supervisor.beam +++ b/bootstrap/lib/stdlib/ebin/supervisor.beam diff --git a/bootstrap/lib/stdlib/ebin/sys.beam b/bootstrap/lib/stdlib/ebin/sys.beam Binary files differindex ce0895e903..291c750da3 100644 --- a/bootstrap/lib/stdlib/ebin/sys.beam +++ b/bootstrap/lib/stdlib/ebin/sys.beam diff --git a/bootstrap/lib/stdlib/ebin/unicode.beam b/bootstrap/lib/stdlib/ebin/unicode.beam Binary files differindex 23fe5d9c46..1d4a16cd10 100644 --- a/bootstrap/lib/stdlib/ebin/unicode.beam +++ b/bootstrap/lib/stdlib/ebin/unicode.beam diff --git a/bootstrap/lib/stdlib/ebin/unicode_util.beam b/bootstrap/lib/stdlib/ebin/unicode_util.beam Binary files differindex 05cc455f9b..4fcf959fe6 100644 --- a/bootstrap/lib/stdlib/ebin/unicode_util.beam +++ b/bootstrap/lib/stdlib/ebin/unicode_util.beam diff --git a/bootstrap/lib/stdlib/ebin/win32reg.beam b/bootstrap/lib/stdlib/ebin/win32reg.beam Binary files differindex 85a84529e4..49d62a74e1 100644 --- a/bootstrap/lib/stdlib/ebin/win32reg.beam +++ b/bootstrap/lib/stdlib/ebin/win32reg.beam diff --git a/bootstrap/lib/stdlib/ebin/zip.beam b/bootstrap/lib/stdlib/ebin/zip.beam Binary files differindex 7e21ed9f14..e1aa2716cb 100644 --- a/bootstrap/lib/stdlib/ebin/zip.beam +++ b/bootstrap/lib/stdlib/ebin/zip.beam diff --git a/erts/Makefile b/erts/Makefile index 12d2ec57a8..0393ccc759 100644 --- a/erts/Makefile +++ b/erts/Makefile @@ -34,7 +34,7 @@ ERTSDIRS += start_scripts endif .PHONY: all -all: $(FLAVORS) +all: smp .PHONY: docs docs: @@ -44,20 +44,15 @@ docs: debug opt lcnt clean: $(V_at)for d in emulator $(ERTSDIRS); do \ if test -d $$d; then \ - ( cd $$d && $(MAKE) $@ FLAVOR=$(FLAVOR) ) || exit $$? ; \ + ( cd $$d && $(MAKE) $@ ) || exit $$?; \ fi ; \ done (cd preloaded/src && $(MAKE) ../ebin/erts.app) -# ---------------------------------------------------------------------- -# These are "convenience targets", provided as shortcuts for developers -# - don't use them in scripts or assume they will always stay like this! -# - -.PHONY: $(FLAVORS) -$(FLAVORS): +.PHONY: smp +smp: $(V_at)for type in $(TYPES); do \ - ( $(MAKE) $$type FLAVOR=$@ ); \ + ( $(MAKE) $$type ) || exit $$?; \ done # Make erl script and erlc in $(ERL_TOP)/bin which runs the compiled version @@ -112,6 +107,11 @@ local_setup: $(ERL_TOP)/bin/start_clean.script \ $(ERL_TOP)/bin/no_dot_erlang.script +# ---------------------------------------------------------------------- +# These are "convenience targets", provided as shortcuts for developers +# - don't use them in scripts or assume they will always stay like this! +# + # Run the configure script .PHONY: configure configure: @@ -129,10 +129,8 @@ makefiles: .PHONY: release release: - $(V_at)for f in $(FLAVORS); do \ - for t in $(TYPES); do \ - ( cd emulator && $(MAKE) release FLAVOR=$$f TYPE=$$t ) \ - done \ + for t in $(TYPES); do \ + ( cd emulator && $(MAKE) release TYPE=$$t ) || exit $$?; \ done $(V_at)for d in $(ERTSDIRS) $(XINSTDIRS); do \ if test -d $$d; then \ diff --git a/erts/aclocal.m4 b/erts/aclocal.m4 index 80bf236188..887babc13f 100644 --- a/erts/aclocal.m4 +++ b/erts/aclocal.m4 @@ -2726,6 +2726,21 @@ AC_DEFUN([LM_TRY_ENABLE_CFLAG], [ fi ]) +AC_DEFUN([LM_CHECK_ENABLE_CFLAG], [ + AC_MSG_CHECKING([whether $CC accepts $1...]) + saved_CFLAGS=$CFLAGS; + CFLAGS="$1 $CFLAGS"; + AC_TRY_COMPILE([],[return 0;],can_enable_flag=true,can_enable_flag=false) + CFLAGS=$saved_CFLAGS; + if test "X$can_enable_flag" = "Xtrue"; then + AS_VAR_SET($2, true) + AC_MSG_RESULT([yes]) + else + AS_VAR_SET($2, false) + AC_MSG_RESULT([no]) + fi +]) + dnl ERL_TRY_LINK_JAVA(CLASSES, FUNCTION-BODY dnl [ACTION_IF_FOUND [, ACTION-IF-NOT-FOUND]]) dnl Freely inspired by AC_TRY_LINK. (Maybe better to create a diff --git a/erts/configure.in b/erts/configure.in index 913315e402..2cb446b470 100644 --- a/erts/configure.in +++ b/erts/configure.in @@ -113,32 +113,14 @@ AS_HELP_STRING([--enable-bootstrap-only], # Disable stuff not necessary in a bootstrap only system in order # to speed up things by reducing the amount of stuff needing to be # built... - enable_threads=no - enable_smp_support=no with_termcap=no with_ssl=no with_ssl_zlib=no enable_hipe=no enable_sctp=no - enable_dirty_schedulers=no - fi + fi ]) -AC_ARG_ENABLE(threads, -AS_HELP_STRING([--enable-threads], [enable async thread support]) -AS_HELP_STRING([--disable-threads], [disable async thread support]), -[ case "$enableval" in - no) enable_threads=no ;; - *) enable_threads=yes ;; - esac ], enable_threads=unknown) - -AC_ARG_ENABLE(dirty-schedulers, -AS_HELP_STRING([--enable-dirty-schedulers], [enable dirty scheduler support]), -[ case "$enableval" in - no) enable_dirty_schedulers=no ;; - *) enable_dirty_schedulers=yes ;; - esac ], enable_dirty_schedulers=default) - AC_ARG_ENABLE(dirty-schedulers-test, AS_HELP_STRING([--enable-dirty-schedulers-test], [enable dirty scheduler test (for debugging purposes)]), [ case "$enableval" in @@ -146,22 +128,6 @@ AS_HELP_STRING([--enable-dirty-schedulers-test], [enable dirty scheduler test (f *) enable_dirty_schedulers_test=no ;; esac ], enable_dirty_schedulers_test=no) -AC_ARG_ENABLE(smp-support, -AS_HELP_STRING([--enable-smp-support], [enable smp support]) -AS_HELP_STRING([--disable-smp-support], [disable smp support]), -[ case "$enableval" in - no) enable_smp_support=no ;; - *) enable_smp_support=yes ;; - esac ], enable_smp_support=unknown) - -AC_ARG_ENABLE(plain-emulator, -AS_HELP_STRING([--enable-plain-emulator], [enable plain emulator]) -AS_HELP_STRING([--disable-plain-emulator], [disable plain emulator]), -[ case "$enableval" in - no) enable_plain_emulator=no ;; - *) enable_plain_emulator=yes ;; - esac ], enable_plain_emulator=unknown) - AC_ARG_ENABLE(smp-require-native-atomics, AS_HELP_STRING([--disable-smp-require-native-atomics], [disable the SMP requirement of a native atomic implementation]), @@ -580,6 +546,94 @@ AC_SUBST(WFLAGS) AC_SUBST(WERRORFLAGS) AC_SUBST(CFLAG_RUNTIME_LIBRARY_PATH) +## Check if we can do profile guided optimization of beam_emu +LM_CHECK_ENABLE_CFLAG([-fprofile-generate -Werror],[PROFILE_GENERATE]) +LM_CHECK_ENABLE_CFLAG([-fprofile-use -Werror],[PROFILE_USE]) + +## Check if this is clang +LM_CHECK_ENABLE_CFLAG([-fprofile-instr-generate -Werror],[PROFILE_INSTR_GENERATE]) +if test "X$PROFILE_INSTR_GENERATE" = "Xtrue"; then + # It was clang, now we also have to check if we have llvm-profdata and that + # we can link programs with -fprofile-instr-use + saved_CFLAGS=$CFLAGS; + CFLAGS="-fprofile-instr-generate -Werror $saved_CFLAGS" + AC_RUN_IFELSE([AC_LANG_PROGRAM([],[])], + [AC_CHECK_PROGS([LLVM_PROFDATA], [llvm-profdata]) + AC_CHECK_PROGS([XCRUN], [xcrun]) + if test "X$XCRUN" != "X" -a "X$LLVM_PROFDATA" = "X"; then + AC_MSG_CHECKING([for $XCRUN llvm-profdata]) + if $XCRUN llvm-profdata --help 2>& AS_MESSAGE_LOG_FD >& AS_MESSAGE_LOG_FD; then + LLVM_PROFDATA="$XCRUN llvm-profdata" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + fi + AC_SUBST(LLVM_PROFDATA) + if test "X$LLVM_PROFDATA" != "X"; then + CFLAGS="-fprofile-instr-use=default.profdata -Werror $saved_CFLAGS"; + $LLVM_PROFDATA merge -output=default.profdata *.profraw; + AC_MSG_CHECKING([whether gcc accepts -fprofile-instr-use=default.profdata -Werror]) + AC_COMPILE_IFELSE([], + [AC_MSG_RESULT([yes]) + PROFILE_INSTR_USE=true], + [AC_MSG_RESULT([no]) + PROFILE_INSTR_USE=false]) + rm -f default.profdata + fi], + []) + rm -f *.profraw + CFLAGS=$saved_CFLAGS; +fi + +AC_ARG_ENABLE(pgo, +AS_HELP_STRING([--enable-pgo], + [build erts using PGO (profile guided optimization)]), +[ case "$enableval" in + no) enable_pgo=no ;; + *) enable_pgo=yes ;; + esac +],enable_pgo=default) + +USE_PGO=false +AC_MSG_CHECKING([whether to do PGO of erts]) +if test $enable_pgo = no; then + AC_MSG_RESULT([no, disabled by user]) +elif test $CROSS_COMPILING = yes; then + if $enable_pgo = yes; then + AC_MSG_ERROR(cannot use PGO when cross-compiling) + else + AC_MSG_RESULT([no, cross compiling]) + fi +elif test "X$host" = "Xwin32"; then + AC_MSG_RESULT([no, not supported in windows]) +elif test "X$PROFILE_GENERATE" = "Xtrue" -a "X$PROFILE_USE" = "Xtrue"; then + USE_PGO=true + AC_MSG_RESULT([yes, using -fprofile-generate]) + PROFILE_COMPILER=gcc +# check if $CC accepts -fprofile-correction, if so we can use PGO on multi-threaded files. + LM_CHECK_ENABLE_CFLAG([-fprofile-use -fprofile-correction -Werror],[PROFILE_CORRECTION]) + if test "X$PROFILE_CORRECTION" = "Xtrue"; then + PROFILE_CORRECTION="-fprofile-correction" + else + PROFILE_CORRECTION="" + fi + AC_SUBST(PROFILE_CORRECTION) +elif test "X$PROFILE_INSTR_GENERATE" = "Xtrue" -a "X$PROFILE_INSTR_USE" = "Xtrue"; then + USE_PGO=true + AC_MSG_RESULT([yes, using -fprofile-instr-generate]) + PROFILE_COMPILER=clang +else + if $enable_pgo = yes; then + AC_MSG_ERROR(cannot use PGO with this compiler) + else + AC_MSG_RESULT([no]) + fi +fi + +AC_SUBST(USE_PGO) +AC_SUBST(PROFILE_COMPILER) + AC_CHECK_SIZEOF(void *) # Needed for ARCH and smp checks below if test "x$ac_cv_sizeof_void_p" = x8; then AC_SUBST(EXTERNAL_WORD_SIZE, 64) @@ -640,6 +694,7 @@ case $chk_arch_ in armv7l) ARCH=arm;; armv7hl) ARCH=arm;; tile) ARCH=tile;; + e2k) ARCH=e2k;; *) ARCH=noarch;; esac @@ -996,81 +1051,12 @@ dnl are set by ERL_FIND_ETHR_LIB ERL_FIND_ETHR_LIB if test "X$ETHR_LIB_NAME" = "X"; then - found_threads=no -else - found_threads=yes + AC_MSG_ERROR([cannot build emulator since no thread library was found]) fi -FLAVORS= TYPES=opt -ERTS_BUILD_SMP_EMU=$enable_smp_support -AC_MSG_CHECKING(whether an emulator with smp support should be built) -case $ERTS_BUILD_SMP_EMU in - yes) - AC_MSG_RESULT(yes; enabled by user) - ;; - no) - AC_MSG_RESULT(no; disabled by user) - ;; - unknown) - AC_TRY_COMPILE([],[ - #if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95) - ; - #else - #error old or no gcc - #endif - ], - gcc_smp=okgcc, - gcc_smp=oldornogcc) - ERTS_BUILD_SMP_EMU=yes - case "$enable_threads-$gcc_smp-$found_threads-$host_os" in - - no-*) - AC_MSG_RESULT(no; threads disabled by user) - ERTS_BUILD_SMP_EMU=no - ;; - - *-okgcc-yes-*) - AC_MSG_RESULT(yes) - ERTS_BUILD_SMP_EMU=yes - ;; - - *-win32) - AC_MSG_RESULT(yes) - ERTS_BUILD_SMP_EMU=yes - ;; - - *-oldornogcc-*) - AC_MSG_RESULT(no; old gcc or no gcc found) - ERTS_BUILD_SMP_EMU=no - ;; - - *) - AC_MSG_RESULT(no) - ERTS_BUILD_SMP_EMU=no - ;; - esac - ;; -esac - -AC_MSG_CHECKING(whether dirty schedulers should be enabled) -case $ERTS_BUILD_SMP_EMU-$enable_dirty_schedulers in - yes-yes) - DIRTY_SCHEDULER_SUPPORT=yes;; - yes-default) - DIRTY_SCHEDULER_SUPPORT=yes;; - no-default) - DIRTY_SCHEDULER_SUPPORT=no;; - no-yes) - AC_MSG_ERROR([No smp emulator will be built, but dirty schedulers requested]);; - *) - DIRTY_SCHEDULER_SUPPORT=no;; -esac -AC_MSG_RESULT($DIRTY_SCHEDULER_SUPPORT) -AC_SUBST(DIRTY_SCHEDULER_SUPPORT) DIRTY_SCHEDULER_TEST=$enable_dirty_schedulers_test -test $DIRTY_SCHEDULER_SUPPORT = yes || DIRTY_SCHEDULER_TEST=no AC_SUBST(DIRTY_SCHEDULER_TEST) test $DIRTY_SCHEDULER_TEST != yes || { test -f "$ERL_TOP/erts/CONF_INFO" || echo "" > "$ERL_TOP/erts/CONF_INFO" @@ -1085,26 +1071,15 @@ test $DIRTY_SCHEDULER_TEST != yes || { EOF } -if test $ERTS_BUILD_SMP_EMU = yes; then +test "X$smp_require_native_atomics" = "Xyes" && + AC_DEFINE(ETHR_SMP_REQUIRE_NATIVE_IMPLS, 1, [Define if you want to enable check for native ethread implementations]) - DEFAULT_FLAVOR=smp - FLAVORS="$FLAVORS smp" - - if test $found_threads = no; then - AC_MSG_ERROR([cannot build smp enabled emulator since no thread library was found]) - fi - - AC_DEFINE(ERTS_HAVE_SMP_EMU, 1, [Define if the smp emulator is built]) - - test "X$smp_require_native_atomics" = "Xyes" && - AC_DEFINE(ETHR_SMP_REQUIRE_NATIVE_IMPLS, 1, [Define if you want to enable check for native ethread implementations]) - - case "$ethr_have_native_atomics-$smp_require_native_atomics-$ethr_have_native_spinlock" in - yes-*) - if test "$ethr_native_atomic_implementation" = "gcc_sync"; then - test -f "$ERL_TOP/erts/CONF_INFO" || - echo "" > "$ERL_TOP/erts/CONF_INFO" - cat >> $ERL_TOP/erts/CONF_INFO <<EOF +case "$ethr_have_native_atomics-$smp_require_native_atomics-$ethr_have_native_spinlock" in + yes-*) + if test "$ethr_native_atomic_implementation" = "gcc_sync"; then + test -f "$ERL_TOP/erts/CONF_INFO" || + echo "" > "$ERL_TOP/erts/CONF_INFO" + cat >> $ERL_TOP/erts/CONF_INFO <<EOF WARNING: Only gcc's __sync_* builtins available for @@ -1121,18 +1096,18 @@ if test $ERTS_BUILD_SMP_EMU = yes; then more information. EOF - fi - ;; + fi + ;; - no-yes-*) - AC_MSG_ERROR([No native atomic implementation found. See the \"Atomic Memory Operations and the VM\" chapter of \$ERL_TOP/HOWTO/INSTALL.md for more information.]) - ;; + no-yes-*) + AC_MSG_ERROR([No native atomic implementation found. See the \"Atomic Memory Operations and the VM\" chapter of \$ERL_TOP/HOWTO/INSTALL.md for more information.]) + ;; - no-no-yes) + no-no-yes) - test -f "$ERL_TOP/erts/CONF_INFO" || - echo "" > "$ERL_TOP/erts/CONF_INFO" - cat >> $ERL_TOP/erts/CONF_INFO <<EOF + test -f "$ERL_TOP/erts/CONF_INFO" || + echo "" > "$ERL_TOP/erts/CONF_INFO" + cat >> $ERL_TOP/erts/CONF_INFO <<EOF No native atomic implementation available. Fallbacks implemented using spinlocks will be @@ -1141,13 +1116,12 @@ EOF this. EOF - ;; - - no-no-no) + ;; - test -f "$ERL_TOP/erts/CONF_INFO" || - echo "" > "$ERL_TOP/erts/CONF_INFO" - cat >> "$ERL_TOP/erts/CONF_INFO" <<EOF + no-no-no) + test -f "$ERL_TOP/erts/CONF_INFO" || + echo "" > "$ERL_TOP/erts/CONF_INFO" + cat >> "$ERL_TOP/erts/CONF_INFO" <<EOF No native atomic implementation, nor no native spinlock implementation available. Fallbacks @@ -1156,76 +1130,11 @@ EOF will suffer immensely due to this. EOF - ;; - - esac - - enable_threads=force -fi - -AC_SUBST(ERTS_BUILD_SMP_EMU) - -ERTS_BUILD_PLAIN_EMU=$enable_plain_emulator -AC_MSG_CHECKING(whether an emulator without smp support should be built) -case $ERTS_BUILD_PLAIN_EMU in - yes) - AC_MSG_RESULT(yes; enabled by user) - ;; - no) - AC_MSG_RESULT(no; disabled by user) ;; - unknown) - case "$enable_threads-$ERTS_BUILD_SMP_EMU" in - no-*) - ERTS_BUILD_PLAIN_EMU=yes - AC_MSG_RESULT(yes) - ;; - *-no) - ERTS_BUILD_PLAIN_EMU=yes - AC_MSG_RESULT(yes; enabled as smp emulator was disabled) - ;; - *) - ERTS_BUILD_PLAIN_EMU=no - AC_MSG_RESULT(no) - ;; - esac - ;; -esac - -case $ERTS_BUILD_PLAIN_EMU in - yes) - AC_DEFINE(ERTS_HAVE_PLAIN_EMU, 1, [Define if the non-smp emulator is built]) - FLAVORS="$FLAVORS plain" - test -f "$ERL_TOP/erts/CONF_INFO" || echo "" > "$ERL_TOP/erts/CONF_INFO" - cat >> $ERL_TOP/erts/CONF_INFO <<EOF - The PLAIN aka NON-SMP emulator has been enabled. - This is a DEPRECATED feature scheduled for removal - in a future major release. - -EOF - ;; - no) - ;; esac - -AC_SUBST(ERTS_BUILD_PLAIN_EMU) -AC_SUBST(FLAVORS) AC_SUBST(TYPES) -case "$ERTS_BUILD_PLAIN_EMU-$ERTS_BUILD_SMP_EMU" in - no-no) - AC_MSG_ERROR([both smp and non-smp emulators have been disabled, one of them has to be enabled]) - ;; - *-no) - DEFAULT_FLAVOR=plain - ;; - *) - ;; -esac - -AC_SUBST(DEFAULT_FLAVOR) - AC_CHECK_FUNCS([posix_fadvise closefrom]) AC_CHECK_HEADERS([linux/falloc.h]) dnl * Old glibcs have broken fallocate64(). Make sure not to use it. @@ -1285,121 +1194,65 @@ if test $i_cv_posix_fallocate_works = yes; then fi # -# Figure out if the emulator should use threads. The default is set above -# in the enable_threads variable. It can have the following values: -# -# no single-threaded emulator requested -# yes multi-threaded emulator requested -# force multi-threaded emulator required -# # EMU_THR_LIB_NAME, EMU_THR_LIBS, EMU_THR_X_LIBS, and EMU_THR_DEFS is # used by the emulator, and can (but should not) be used by applications # that only require thread support when the emulator has thread support. # Other applications should use ETHR_LIB_NAME, ETHR_LIBS, ETHR_X_LIBS, # and ETHR_DEFS. # -AC_MSG_CHECKING(whether the emulator should use threads) EMU_THR_LIB_NAME= EMU_THR_X_LIBS= EMU_THR_LIBS= EMU_THR_DEFS= -emu_threads=no - -case "$enable_threads"-"$host_os" in - *-win32) - # The windows erlang emulator can never run without threads. - # It has to be enabled or the emulator will crash. Until that - # is fixed we force threads on win32. - enable_threads=force ;; - yes-osf*) - # The emulator hang when threads are enabled on osf - AC_MSG_ERROR(unresolved problems exist with threads on this platform) ;; - *) ;; -esac -case "$enable_threads"-"$found_threads" in - force-yes) - emu_threads=yes - AC_MSG_RESULT(yes; thread support required and therefore forced) ;; - yes-yes) - emu_threads=yes - AC_MSG_RESULT(yes; enabled by user) ;; - unknown-yes) - case $host_os in - solaris*|linux*|darwin*|win32) - emu_threads=yes - AC_MSG_RESULT(yes; default on this platform) - ;; - *) - AC_MSG_RESULT(no; default on this platform) - ;; - esac - ;; - no-yes) - AC_MSG_RESULT(no; thread support found but disabled by user) ;; - unknown-no|no-no) - AC_MSG_RESULT(no) ;; - force-no) - AC_MSG_ERROR(thread support required but not found) ;; - yes-no) - AC_MSG_ERROR(thread support enabled by user but not found) ;; - *) - AC_MSG_ERROR(internal error) ;; -esac +# Threads enabled for emulator +EMU_THR_LIB_NAME=$ETHR_LIB_NAME +EMU_THR_X_LIBS=$ETHR_X_LIBS +EMU_THR_LIBS=$ETHR_LIBS +EMU_THR_DEFS=$ETHR_DEFS +ENABLE_ALLOC_TYPE_VARS="$ENABLE_ALLOC_TYPE_VARS threads" +AC_MSG_CHECKING(whether lock checking should be enabled) +AC_MSG_RESULT($enable_lock_check) +if test "x$enable_lock_check" != "xno"; then + EMU_THR_DEFS="$EMU_THR_DEFS -DERTS_ENABLE_LOCK_CHECK" +fi -if test $emu_threads != yes; then - enable_lock_check=no - enable_lock_count=no -else - # Threads enabled for emulator - EMU_THR_LIB_NAME=$ETHR_LIB_NAME - EMU_THR_X_LIBS=$ETHR_X_LIBS - EMU_THR_LIBS=$ETHR_LIBS - EMU_THR_DEFS=$ETHR_DEFS - ENABLE_ALLOC_TYPE_VARS="$ENABLE_ALLOC_TYPE_VARS threads" - AC_MSG_CHECKING(whether lock checking should be enabled) - AC_MSG_RESULT($enable_lock_check) - if test "x$enable_lock_check" != "xno"; then - EMU_THR_DEFS="$EMU_THR_DEFS -DERTS_ENABLE_LOCK_CHECK" - fi +AC_MSG_CHECKING(whether lock counters should be enabled) +AC_MSG_RESULT($enable_lock_count) +if test "x$enable_lock_count" != "xno"; then + TYPES="$TYPES lcnt" +fi - AC_MSG_CHECKING(whether lock counters should be enabled) - AC_MSG_RESULT($enable_lock_count) - if test "x$enable_lock_count" != "xno"; then - TYPES="$TYPES lcnt" +case $host_os in + linux*) + AC_MSG_CHECKING([whether dlopen() needs to be called before first call to dlerror()]) + if test "x$ETHR_THR_LIB_BASE_TYPE" != "xposix_nptl"; then + AC_DEFINE(ERTS_NEED_DLOPEN_BEFORE_DLERROR,[1], + [Define if dlopen() needs to be called before first call to dlerror()]) + AC_MSG_RESULT(yes) + else + AC_MSG_RESULT(no) fi + ;; + *) + ;; +esac - case $host_os in - linux*) - AC_MSG_CHECKING([whether dlopen() needs to be called before first call to dlerror()]) - if test "x$ETHR_THR_LIB_BASE_TYPE" != "xposix_nptl"; then - AC_DEFINE(ERTS_NEED_DLOPEN_BEFORE_DLERROR,[1], - [Define if dlopen() needs to be called before first call to dlerror()]) - AC_MSG_RESULT(yes) - else - AC_MSG_RESULT(no) - fi - ;; - *) - ;; - esac - - # Remove -D_WIN32_WINNT*, -DWINVER* and -D_GNU_SOURCE from EMU_THR_DEFS - # (defined in CFLAGS). Note that we want to keep these flags - # in ETHR_DEFS, but not in EMU_THR_DEFS. - new_emu_thr_defs= - for thr_def in $EMU_THR_DEFS; do - case $thr_def in - -D_GNU_SOURCE*|-D_WIN32_WINNT*|-DWINVER*) - ;; - *) - new_emu_thr_defs="$new_emu_thr_defs $thr_def" - ;; - esac - done - EMU_THR_DEFS=$new_emu_thr_defs -fi +# Remove -D_WIN32_WINNT*, -DWINVER* and -D_GNU_SOURCE from EMU_THR_DEFS +# (defined in CFLAGS). Note that we want to keep these flags +# in ETHR_DEFS, but not in EMU_THR_DEFS. +new_emu_thr_defs= +for thr_def in $EMU_THR_DEFS; do + case $thr_def in + -D_GNU_SOURCE*|-D_WIN32_WINNT*|-DWINVER*) + ;; + *) + new_emu_thr_defs="$new_emu_thr_defs $thr_def" + ;; + esac +done +EMU_THR_DEFS=$new_emu_thr_defs AC_SUBST(EMU_THR_LIB_NAME) AC_SUBST(EMU_THR_X_LIBS) @@ -2451,9 +2304,6 @@ extern char end; #elif defined(HAVE__END_SYMBOL) extern char _end; #endif -#ifndef USE_THREADS -#undef ETHR_PTHREADS -#endif #ifdef ETHR_PTHREADS # ifdef ETHR_HAVE_PTHREAD_H @@ -2687,10 +2537,6 @@ extern char _end; # error no 'end' nor '_end' #endif -#ifndef USE_THREADS -#undef ETHR_PTHREADS -#endif - #ifdef ETHR_PTHREADS # ifdef ETHR_HAVE_PTHREAD_H # include <pthread.h> @@ -3483,10 +3329,7 @@ esac fi fi - - - - +AC_SUBST(FPE) dnl @@ -4982,7 +4825,7 @@ AH_BOTTOM([ # endif #endif -#if defined(DEBUG) && defined(USE_THREADS) && !defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(DEBUG) && !defined(ERTS_ENABLE_LOCK_CHECK) #define ERTS_ENABLE_LOCK_CHECK 1 #endif ]) diff --git a/erts/doc/src/Makefile b/erts/doc/src/Makefile index b96cbbce40..444adf4a6e 100644 --- a/erts/doc/src/Makefile +++ b/erts/doc/src/Makefile @@ -173,6 +173,8 @@ release_docs_spec: docs "$(RELSYSDIR)/doc/html" $(INSTALL_DATA) $(ERL_TOP)/erts/example/time_compat.erl \ "$(RELSYSDIR)/doc/html" + $(INSTALL_DATA) $(ERL_TOP)/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl \ + "$(RELSYSDIR)/doc/html" $(INSTALL_DATA) $(INFO_FILE) "$(RELSYSDIR)" $(INSTALL_DIR) "$(RELEASE_PATH)/man/man3" $(INSTALL_DATA) $(MAN3DIR)/* "$(RELEASE_PATH)/man/man3" diff --git a/erts/doc/src/alt_dist.xml b/erts/doc/src/alt_dist.xml index be969a8267..d3731a5391 100644 --- a/erts/doc/src/alt_dist.xml +++ b/erts/doc/src/alt_dist.xml @@ -47,23 +47,30 @@ runs on. The reason the C code is not made portable, is simply readability.</p> - <note> - <p>This section was written a long time ago. Most of it is still - valid, but some things have changed since then. - Most notably is the driver interface. Some updates have been made - to the documentation of the driver presented here, - but more can be done and is planned for the future. - The reader is encouraged to read the - <seealso marker="erl_driver"><c>erl_driver</c></seealso> and - <seealso marker="driver_entry"><c>driver_entry</c></seealso> - documentation also.</p> - </note> - <section> <title>Introduction</title> <p>To implement a new carrier for the Erlang distribution, the main steps are as follows.</p> + <note><p> + As of ERTS version 10.0 support for distribution controller + processes has been introduced. That is, the traffic over a + distribution channel can be managed by a process instead of + only by a port. This makes it possible to implement large + parts of the logic in Erlang code, and you perhaps do not + even need a new driver for the protocol. One example could + be Erlang distribution over UDP using <c>gen_udp</c> (your + Erlang code will of course have to take care of retranspissions, + etc in this example). That is, depending on what you want + to do you perhaps do not need to implement a driver at all + and can then skip the driver related sections below. + The <c>gen_tcp_dist</c> example described in the + <seealso marker="#distribution_module">Distribution + Module</seealso> section utilize distribution controller + processes and can be worth having a look at if you want to + use distribution controller processes. + </p></note> + <section> <title>Writing an Erlang Driver</title> <p>First, the protocol must be available to the Erlang machine, which @@ -152,7 +159,712 @@ </section> <section> + <marker id="distribution_module"/> + <title>Distribution Module</title> + <p> + The distribution module expose an API that <c>net_kernel</c> call + in order to manage connections to other nodes. The module name + should have the suffix <c>_dist</c>. + </p> + <p> + The module needs to create some kind of listening entity (process + or port) and an acceptor process that accepts incoming connections + using the listening entity. For each connection, the module at least + needs to create one connection supervisor process, which also is + responsible for the handshake when setting up the connection, and + a distribution controller (process or port) responsible for + transport of data over the connection. The distribution controller + and the connection supervisor process should be linked together + so both of them are cleaned up when the connection is taken down. + </p> + <p> + Note that there need to be exactly one distribution controller + per connection. A process or port can only be distribution + controller for one connection. The registration as distribution + controller cannot be undone. It will stick until the distribution + controller terminates. The distribution controller should not + ignore exit signals. It is allowed to trap exits, but it should + then voluntarily terminate when an exit signal is received. + </p> + <p> + An example implementation of a distribution module can be found + in + <url href="gen_tcp_dist.erl">$ERL_TOP/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl</url>. + It implements the distribution over TCP/IP using the <c>gen_tcp</c> + API with distribution controllers implemented by processes. This + instead of using port distribution controllers as the ordinary TCP/IP + distribution uses. + </p> + + <section> + <marker id="distribution_module_exported_callback_functions"/> + <title>Exported Callback Functions</title> + + <p> + The following functions are mandatory: + </p> + <taglist> + <tag><marker id="listen"/><c>listen(Name) -></c><br/> <c>{ok, {Listen, Address, Creation}} | {error, Error} </c></tag> + <item> + <p> + <c>listen/1</c> is called once in order to listen for incoming + connection requests. The call is made when the distribution is brought + up. The argument <c>Name</c> is the part of the node name before + the <c>@</c> sign in the full node name. It can be either an atom or a + string. + </p> + <p> + The return value consists of a <c>Listen</c> handle (which is + later passed to the <seealso marker="#accept"><c>accept/1</c></seealso> + callback), <c>Address</c> which is a <c>#net_address{}</c> record + with information about the address for the node (the + <c>#net_address{}</c> record is defined in + <c>kernel/include/net_address.hrl</c>), and <c>Creation</c> which + (currently) is an integer <c>1</c>, <c>2</c>, or <c>3</c>. + </p> + <p> + If <seealso marker="erts:epmd"><c>epmd</c></seealso> is to be used + for node discovery, you typically want to use the (unfortunately + undocumented) <c>erl_epmd</c> module (part of the <c>kernel</c> + application) in order to register the listen port with <c>epmd</c> + and retrieve <c>Creation</c> to use. + </p> + </item> + + <tag><marker id="accept"/><c>accept(Listen) -></c><br/> <c>AcceptorPid</c></tag> + <item> + <p> + <c>accept/1</c> should spawn a process that accepts connections. This + process should preferably execute on <c>max</c> priority. The process + identifier of this process should be returned. + </p> + <p> + The <c>Listen</c> argument will be the same as the <c>Listen</c> handle + part of the return value of the + <seealso marker="#listen"><c>listen/1</c></seealso> callback above. + <c>accept/1</c> is called only once when the distribution protocol is + started. + </p> + <p> + The caller of this function is a representative for <c>net_kernel</c> + (this may or may not be the process registered as <c>net_kernel</c>) + and is in this document identified as <c>Kernel</c>. + When a connection has been accepted by the acceptor process, it needs + to inform <c>Kernel</c> about the accepted connection. This is done by + passing a message on the form: + </p> + <code type="none"><![CDATA[Kernel ! {accept, AcceptorPid, DistController, Family, Proto}]]></code> + <p> + <c>DistController</c> is either the process or port identifier + of the distribution controller for the connection. The + distribution controller should be created by the acceptor + processes when a new connection is accepted. Its job is to + dispatch traffic on the connection. + </p> + <c>Kernel</c> responds with one of the following messages: + <taglist> + <tag><c>{Kernel, controller, SupervisorPid}</c></tag> + <item> + <p> + The request was accepted and <c>SupervisorPid</c> is the + process identifier of the connection supervisor process + (which is created in the + <seealso marker="#accept_connection"><c>accept_connection/5</c></seealso> + callback). + </p> + </item> + <tag><c>{Kernel, unsupported_protocol}</c></tag> + <item> + <p> + The request was rejected. This is a fatal error. The acceptor + process should terminate. + </p> + </item> + </taglist> + <p> + When an accept sequence has been completed the acceptor process + is expected to continue accepting further requests. + </p> + </item> + + <tag><marker id="accept_connection"/><c>accept_connection(AcceptorPid, DistCtrl, MyNode, Allowed, SetupTime) -></c><br/> <c>ConnectionSupervisorPid</c></tag> + <item> + <p> + <c>accept_connection/5</c> should spawn a process that will + perform the Erlang distribution handshake for the connection. + If the handshake successfully completes it should continue to + function as a connection supervisor. This process + should preferably execute on <c>max</c> priority. + </p> + <p>The arguments:</p> + <taglist> + <tag><c>AcceptorPid</c></tag> + <item> + <p> + Process identifier of the process created by the + <seealso marker="#accept"><c>accept/1</c></seealso> + callback. + </p> + </item> + <tag><c>DistCtrl</c></tag> + <item> + <p>The identifier of the distribution controller identifier + created by the acceptor process. To be passed along to + <c>dist_util:handshake_other_started(HsData)</c>. + </p> + </item> + <tag><c>MyNode</c></tag> + <item> + <p> + Node name of this node. To be passed along to + <c>dist_util:handshake_other_started(HsData)</c>. + </p> + </item> + <tag><c>Allowed</c></tag> + <item> + <p> + To be passed along to + <c>dist_util:handshake_other_started(HsData)</c>. + </p> + </item> + <tag><c>SetupTime</c></tag> + <item> + <p> + Time used for creating a setup timer by a + call to <c>dist_util:start_timer(SetupTime)</c>. + The timer should be passed along to + <c>dist_util:handshake_other_started(HsData)</c>. + </p> + </item> + </taglist> + <p> + The created process should provide callbacks and other + information needed for the handshake in a + <seealso marker="#hs_data_record"><c>#hs_data{}</c></seealso> + record and call <c>dist_util:handshake_other_started(HsData)</c> + with this record. + </p> + <p> + <c>dist_util:handshake_other_started(HsData)</c> will perform + the handshake and if the handshake successfully completes this + process will then continue in a connection supervisor loop + as long as the connection is up. + </p> + </item> + + <tag><marker id="setup"/><c>setup(Node, Type, MyNode, LongOrShortNames, SetupTime) -></c><br/> <c>ConnectionSupervisorPid</c></tag> + <item> + <p> + <c>setup/5</c> should spawn a process that connects to + <c>Node</c>. When connection has been established it should + perform the Erlang distribution handshake for the connection. + If the handshake successfully completes it should continue to + function as a connection supervisor. This process + should preferably execute on <c>max</c> priority. + </p> + <p>The arguments:</p> + <taglist> + <tag><c>Node</c></tag> + <item> + <p> + Node name of remote node. To be passed along to + <c>dist_util:handshake_we_started(HsData)</c>. + </p> + </item> + <tag><c>Type</c></tag> + <item> + <p> + Connection type. To be passed along to + <c>dist_util:handshake_we_started(HsData)</c>. + </p> + </item> + <tag><c>MyNode</c></tag> + <item> + <p> + Node name of this node. To be passed along to + <c>dist_util:handshake_we_started(HsData)</c>. + </p> + </item> + <tag><c>LongOrShortNames</c></tag> + <item> + <p> + Either the atom <c>longnames</c> or + the atom <c>shortnames</c> indicating + whether long or short names is used. + </p> + </item> + <tag><c>SetupTime</c></tag> + <item> + <p> + Time used for creating a setup timer by a + call to <c>dist_util:start_timer(SetupTime)</c>. + The timer should be passed along to + <c>dist_util:handshake_we_started(HsData)</c>. + </p> + </item> + </taglist> + <p> + The caller of this function is a representative for <c>net_kernel</c> + (this may or may not be the process registered as <c>net_kernel</c>) + and is in this document identified as <c>Kernel</c>. + </p> + <p> + This function should, besides spawning the connection supervisor, + also create a distribution controller. The distribution + controller is either a process or a port which is responsible + for dispatching traffic. + </p> + <p> + The created process should provide callbacks and other + information needed for the handshake in a + <seealso marker="#hs_data_record"><c>#hs_data{}</c></seealso> + record and call <c>dist_util:handshake_we_started(HsData)</c> + with this record. + </p> + <p> + <c>dist_util:handshake_we_started(HsData)</c> will perform + the handshake and the handshake successfully completes this + process will then continue in a connection supervisor loop + as long as the connection is up. + </p> + </item> + + <tag><marker id="close"/><c>close(Listen) -></c><br/> <c>void()</c></tag> + + <item><p> + Called in order to close the <c>Listen</c> handle + that originally was passed from the + <seealso marker="#listen"><c>listen/1</c></seealso> callback. + </p></item> + + <tag><marker id="select"/><c>select(NodeName) -></c><br/> <c>boolean()</c></tag> + <item> + <p>Return <c>true</c> if the host name part + of the <c>NodeName</c> is valid for use + with this protocol; otherwise, <c>false</c>. + </p> + </item> + + </taglist> + + <p> + There are also two optional functions that may be + exported: + </p> + <taglist> + <tag><marker id="select"/><c>setopts(Listen, Opts) -></c><br/> <c>ok | {error, Error}</c></tag> + <item> + <p> + The argument <c>Listen</c> is the handle originally passed + from the + <seealso marker="#listen"><c>listen/1</c></seealso> callback. + The argument <c>Opts</c> is a list of options to set on future + connections. + </p> + </item> + + <tag><marker id="select"/><c>getopts(Listen, Opts) -></c><br/> <c>{ok, OptionValues} | {error, Error}</c></tag> + <item> + <p> + The argument <c>Listen</c> is the handle originally passed + from the + <seealso marker="#listen"><c>listen/1</c></seealso> callback. + The argument <c>Opts</c> is a list of options to read for future + connections. + </p> + </item> + </taglist> + + </section> + <section> + <marker id="hs_data_record"/> + <title>The #hs_data{} Record</title> + <p> + The <c>dist_util:handshake_we_started/1</c> and + <c>dist_util:handshake_other_started/1</c> functions + takes a <c>#hs_data{}</c> record as argument. There + are quite a lot of fields in this record that you + need to set. The record is defined in + <c>kernel/include/dist_util.hrl</c>. Not documented + fields should not be set, i.e., should be left as + <c>undefined</c>. + </p> + <p> + The following <c>#hs_data{}</c> record fields need + to be set unless otherwise stated:</p> + <taglist> + <tag><marker id="hs_data_kernel_pid"/><c>kernel_pid</c></tag> + <item> + <p> + Process identifier of the <c>Kernel</c> process. That is, + the process that called either + <seealso marker="#setup"><c>setup/5</c></seealso> or + <seealso marker="#accept_connection"><c>accept_connection/5</c></seealso>. + </p> + </item> + + <tag><marker id="hs_data_other_node"/><c>other_node</c></tag> + <item> + <p>Name of the other node. This field is only + mandatory when this node initiates the connection. + That is, when connection is set up via + <seealso marker="#setup"><c>setup/5</c></seealso>. + </p> + </item> + + <tag><marker id="hs_data_this_node"/><c>this_node</c></tag> + <item> + <p> + The node name of this node. + </p> + </item> + + <tag><marker id="hs_data_socket"/><c>socket</c></tag> + <item> + <p> + The identifier of the distribution controller. + </p> + </item> + + <tag><marker id="hs_data_timer"/><c>timer</c></tag> + <item> + <p> + The timer created using <c>dist_util:start_timer/1</c>. + </p> + </item> + + <tag><marker id="hs_data_allowed"/><c>allowed</c></tag> + <item> + <p>Information passed as <c>Allowed</c> to + <c>accept_connection/5</c>. This field is only + mandatory when the remote node initiated the + connection. That is, when the connection is set + up via + <seealso marker="#accept_connection"><c>accept_connection/5</c></seealso>. + </p> + </item> + + <tag><marker id="hs_data_f_send"/><c>f_send</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr, Data) -> ok | {error, Error}]]></code> + <p> + where <c>DistCtrlr</c> is the identifier of + the distribution controller and <c>Data</c> + is io data to pass to the other side. + </p> + <p>Only used during handshake phase.</p> + </item> + + <tag><marker id="hs_data_f_recv"/><c>f_recv</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr, Length) -> {ok, Packet} | {error, Reason}]]></code> + <p> + where <c>DistCtrlr</c> is the identifier of the distribution + controller. + If <c>Length</c> is <c>0</c>, all available bytes should be + returned. If <c>Length > 0</c>, exactly <c>Length</c> bytes + should be returned, or an error; possibly discarding less + than <c>Length</c> bytes of data when the connection is + closed from the other side. + It is used for passive receive of data from the + other end. + </p> + <p>Only used during handshake phase.</p> + </item> + + <tag><marker id="hs_data_f_setopts_pre_nodeup"/><c>f_setopts_pre_nodeup</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr) -> ok | {error, Error}]]></code> + <p> + where <c>DistCtrlr</c> is the identifier of + the distribution controller. Called just + before the distribution channel is taken up + for normal traffic. + </p> + <p>Only used during handshake phase.</p> + </item> + + <tag><marker id="hs_data_f_setopts_post_nodeup"/><c>f_setopts_post_nodeup</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr) -> ok | {error, Error}]]></code> + <p> + where <c>DistCtrlr</c> is the identifier of + the distribution controller. Called just + after distribution channel has been taken + up for normal traffic. + </p> + <p>Only used during handshake phase.</p> + </item> + + <tag><marker id="hs_data_f_getll"/><c>f_getll</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr) -> ID]]></code> + <p> + where <c>DistCtrlr</c> is the identifier of + the distribution controller and <c>ID</c> is + the identifier of the low level entity that + handles the connection (often <c>DistCtrlr</c> + itself). + </p> + <p>Only used during handshake phase.</p> + </item> + + <tag><marker id="hs_data_f_address"/><c>f_address</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr, Node) -> NetAddress]]></code> + <p> + where <c>DistCtrlr</c> is the identifier of + the distribution controller, <c>Node</c> + is the node name of the node on the other end, + and <c>NetAddress</c> is a <c>#net_address{}</c> + record with information about the address + for the <c>Node</c> on the other end of the + connection. The <c>#net_address{}</c> record + is defined in + <c>kernel/include/net_address.hrl</c>. + </p> + <p>Only used during handshake phase.</p> + </item> + + <tag><marker id="hs_data_mf_tick"/><c>mf_tick</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr) -> void()]]></code> + <p> + where <c>DistCtrlr</c> is the identifier + of the distribution controller. This + function should send information over + the connection that is not interpreted + by the other end while increasing the + statistics of received packets on the + other end. This is usually implemented by + sending an empty packet. + </p> + <note><p> + It is of vital importance that this operation + does not block the caller for a long time. + This since it is called from the connection + supervisor. + </p></note> + <p>Used when connection is up.</p> + </item> + + <tag><marker id="hs_data_mf_getstat"/><c>mf_getstat</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr) -> {ok, Received, Sent, PendSend}]]></code> + <p> + where <c>DistCtrlr</c> is the identifier + of the distribution controller, <c>Received</c> + is received packets, <c>Sent</c> is + sent packets, and <c>PendSend</c> is + amount of packets in queue to be sent + or a <c>boolean()</c> indicating whether + there are packets in queue to be sent. + </p> + <note><p> + It is of vital importance that this operation + does not block the caller for a long time. + This since it is called from the connection + supervisor. + </p></note> + <p>Used when connection is up.</p> + </item> + + <tag><marker id="hs_data_request_type"/><c>request_type</c></tag> + <item> + <p> + The request <c>Type</c> as passed to + <seealso marker="#setup"><c>setup/5</c></seealso>. + This is only mandatory when the connection has + been initiated by this node. That is, the connection + is set up via <c>setup/5</c>. + </p> + </item> + + <tag><marker id="hs_data_mf_setopts"/><c>mf_setopts</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrl, Opts) -> ok | {error, Error}]]></code> + <p> + where <c>DistCtrlr</c> is the identifier + of the distribution controller and <c>Opts</c> + is a list of options to set on the connection. + </p> + <p>This function is optional. Used when connection is up.</p> + </item> + + <tag><marker id="hs_data_mf_getopts"/><c>mf_getopts</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrl, Opts) -> {ok, OptionValues} | {error, Error}]]></code> + <p> + where <c>DistCtrlr</c> is the identifier + of the distribution controller and <c>Opts</c> + is a list of options to read for the connection. + </p> + <p>This function is optional. Used when connection is up.</p> + </item> + + <tag><marker id="hs_data_f_handshake_complete"/><c>f_handshake_complete</c></tag> + <item> + <p> + A fun with the following signature: + </p> + <code type="none"><![CDATA[fun (DistCtrlr, Node, DHandle) -> void()]]></code> + <p> + where <c>DistCtrlr</c> is the identifier + of the distribution controller, <c>Node</c> is + the node name of the node connected at the other + end, and <c>DHandle</c> is a distribution handle + needed by a distribution controller process when + calling the following BIFs: + </p> + <list> + <item><p><seealso marker="erts:erlang#dist_ctrl_get_data/1"><c>erlang:dist_ctrl_get_data/1</c></seealso></p></item> + <item><p><seealso marker="erts:erlang#dist_ctrl_get_data_notification/1"><c>erlang:dist_ctrl_get_data_notification/1</c></seealso></p></item> + <item><p><seealso marker="erts:erlang#dist_ctrl_input_handler/2"><c>erlang:dist_ctrl_input_handler/2</c></seealso></p></item> + <item><p><seealso marker="erts:erlang#dist_ctrl_put_data/2"><c>erlang:dist_ctrl_put_data/2</c></seealso></p></item> + </list> + <p> + This function is called when the handshake has + completed and the distribution channel is up. + The distribution controller can begin dispatching + traffic over the channel. This function is optional. + </p> + <p>Only used during handshake phase.</p> + </item> + + <tag><marker id="hs_data_add_flags"/><c>add_flags</c></tag> + <item> + <p> + <seealso marker="erl_dist_protocol#dflags">Distribution flags</seealso> + to add to the connection. Currently all (non obsolete) flags will + automatically be enabled. + </p> + <p> + This flag field is optional. + </p> + </item> + + <tag><marker id="hs_data_reject_flags"/><c>reject_flags</c></tag> + <item> + <p> + <seealso marker="erl_dist_protocol#dflags">Distribution flags</seealso> + to reject. Currently the following distribution flags can be rejected: + </p> + <taglist> + <tag><c>DFLAG_DIST_HDR_ATOM_CACHE</c></tag> + <item>Do not use atom cache over this connection.</item> + <tag><c>DFLAGS_STRICT_ORDER_DELIVERY</c></tag> + <item>Do not use any features that require strict + order delivery.</item> + </taglist> + <p> + This flag field is optional. + </p> + </item> + + <tag><marker id="hs_data_require_flags"/><c>require_flags</c></tag> + <item> + <p> + Require these <seealso marker="erl_dist_protocol#dflags">distribution + flags</seealso> to be used. The connection will be aborted during the + handshake if the other end does not use them. + </p> + <p> + This flag field is optional. + </p> + </item> + + </taglist> + </section> + + <section> + <marker id="distribution_data_delivery"/> + <title>Distribution Data Delivery</title> + <p> + When using the default configuration, the data to pass + over a connection needs to be delivered as is + to the node on the receiving end in the <em>exact same + order</em>, with no loss of data what so ever, as sent + from the sending node. + </p> + <p> + The data delivery order can be relaxed by disabling + features that require strict ordering. This is done by + passing the <c>?DFLAGS_STRICT_ORDER_DELIVERY</c> + <seealso marker="erl_dist_protocol#dflags">distribution + flags</seealso> in the + <seealso marker="alt_dist#hs_data_reject_flags"><c>reject_flags</c></seealso> + field of the <seealso marker="#hs_data_record"><c>#hs_data{}</c></seealso> + record used when setting up the connection. When relaxed + ordering is used, only the order of signals with the same + sender/receiver pair has to be preserved. + However, note that disabling the features that require + strict ordering may have a negative impact on performance, + throughput, and/or latency. + </p> + </section> + + <section> + <marker id="enable_your_distribution_module"/> + <title>Enable Your Distribution Module</title> + + <p>For <c>net_kernel</c> to find out which distribution module to use, + the <c>erl</c> command-line argument <c>-proto_dist</c> is used. It + is followed by one or more distribution module names, with suffix + "_dist" removed. That is, <c>gen_tcp_dist</c> as a distribution module + is specified as <c>-proto_dist gen_tcp</c>.</p> + + <p>If no <c>epmd</c> (TCP port mapper daemon) is used, also command-line + option <c>-no_epmd</c> is to be specified, which makes + Erlang skip the <c>epmd</c> startup, both as an OS process and as an + Erlang ditto.</p> + </section> + + </section> + + <section> <title>The Driver</title> + + <note> + <p>This section was written a long time ago. Most of it is still + valid, but some things have changed since then. Some updates have + been made to the documentation of the driver presented here, + but more can be done and is planned for the future. + The reader is encouraged to read the + <seealso marker="erl_driver"><c>erl_driver</c></seealso> and + <seealso marker="driver_entry"><c>driver_entry</c></seealso> + documentation also.</p> + </note> + <p>Although Erlang drivers in general can be beyond the scope of this section, a brief introduction seems to be in place.</p> diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml index 638e88ca31..71fe08d4e6 100644 --- a/erts/doc/src/erl.xml +++ b/erts/doc/src/erl.xml @@ -538,20 +538,6 @@ <p>Note that a distributed node will fail to start if epmd is not running.</p> </item> - <tag><marker id="smp"/><c><![CDATA[-smp [enable|auto|disable]]]></c></tag> - <item> - <p><c>-smp enable</c> and <c>-smp</c> start the Erlang runtime - system with SMP support enabled. This can fail if no runtime - system with SMP support is available. <c>-smp auto</c> starts - the Erlang runtime system with SMP support enabled if it is - available and more than one logical processor is detected. - <c>-smp disable</c> starts a runtime system without SMP support. - The runtime system without SMP support is deprecated and will - be removed in a future major release.</p> - <note> - <p>See also flag<seealso marker="#+S"><c>+S</c></seealso>.</p> - </note> - </item> <tag><c><![CDATA[-version]]></c> (emulator flag)</tag> <item> <p>Makes the emulator print its version number. The same @@ -902,7 +888,7 @@ <c><![CDATA[+S Schedulers:SchedulerOnline]]></c></tag> <item> <p>Sets the number of scheduler threads to create and scheduler threads - to set online when SMP support has been enabled. The maximum for both + to set online. The maximum for both values is 1024. If the Erlang runtime system is able to determine the number of logical processors configured and logical processors available, <c>Schedulers</c> defaults to logical processors @@ -920,8 +906,6 @@ <p>Specifying value <c>0</c> for <c>Schedulers</c> or <c>SchedulersOnline</c> resets the number of scheduler threads or scheduler threads online, respectively, to its default value.</p> - <p>This option is ignored if the emulator does not have SMP support - enabled (see flag <seealso marker="#smp"><c>-smp</c></seealso>).</p> </item> <tag><marker id="+SP"/><c><![CDATA[+SP SchedulersPercentage:SchedulersOnlinePercentage]]></c></tag> @@ -929,8 +913,8 @@ <p>Similar to <seealso marker="#+S"><c>+S</c></seealso> but uses percentages to set the number of scheduler threads to create, based on logical processors configured, and scheduler threads to set online, - based on logical processors available, when SMP support has been - enabled. Specified values must be > 0. For example, + based on logical processors available. + Specified values must be > 0. For example, <c>+SP 50:25</c> sets the number of scheduler threads to 50% of the logical processors configured, and the number of scheduler threads online to 25% of the logical processors available. @@ -945,15 +929,13 @@ and 8 logical cores available, the combination of the options <c>+S 4:4 +SP 50:25</c> (in either order) results in 2 scheduler threads (50% of 4) and 1 scheduler thread online (25% of 4).</p> - <p>This option is ignored if the emulator does not have SMP support - enabled (see flag <seealso marker="#smp"><c>-smp</c></seealso>).</p> </item> <tag><marker id="+SDcpu"/><c><![CDATA[+SDcpu DirtyCPUSchedulers:DirtyCPUSchedulersOnline]]></c></tag> <item> <p>Sets the number of dirty CPU scheduler threads to create and dirty - CPU scheduler threads to set online when threading support has been - enabled. The maximum for both values is 1024, and each value is + CPU scheduler threads to set online. + The maximum for both values is 1024, and each value is further limited by the settings for normal schedulers:</p> <list type="bulleted"> <item>The number of dirty CPU scheduler threads created cannot exceed @@ -977,16 +959,14 @@ executing on ordinary schedulers. If the amount of dirty CPU schedulers was allowed to be unlimited, dirty CPU bound jobs would potentially starve normal jobs.</p> - <p>This option is ignored if the emulator does not have threading - support enabled.</p> </item> <tag><marker id="+SDPcpu"/><c><![CDATA[+SDPcpu DirtyCPUSchedulersPercentage:DirtyCPUSchedulersOnlinePercentage]]></c></tag> <item> <p>Similar to <seealso marker="#+SDcpu"><c>+SDcpu</c></seealso> but uses percentages to set the number of dirty CPU scheduler threads to - create and the number of dirty CPU scheduler threads to set online - when threading support has been enabled. Specified values must be + create and the number of dirty CPU scheduler threads to set online. + Specified values must be > 0. For example, <c>+SDPcpu 50:25</c> sets the number of dirty CPU scheduler threads to 50% of the logical processors configured and the number of dirty CPU scheduler threads online to 25% of the @@ -1003,13 +983,11 @@ the combination of the options <c>+SDcpu 4:4 +SDPcpu 50:25</c> (in either order) results in 2 dirty CPU scheduler threads (50% of 4) and 1 dirty CPU scheduler thread online (25% of 4).</p> - <p>This option is ignored if the emulator does not have threading - support enabled.</p> </item> <tag><marker id="+SDio"/><c><![CDATA[+SDio DirtyIOSchedulers]]></c></tag> <item> - <p>Sets the number of dirty I/O scheduler threads to create when - threading support has been enabled. Valid range is 0-1024. By + <p>Sets the number of dirty I/O scheduler threads to create. + Valid range is 0-1024. By default, the number of dirty I/O scheduler threads created is 10, same as the default number of threads in the <seealso marker="#async_thread_pool_size">async thread pool</seealso>.</p> @@ -1019,8 +997,6 @@ expected to execute on dirty I/O schedulers. If the user should schedule CPU bound jobs on dirty I/O schedulers, these jobs might starve ordinary jobs executing on ordinary schedulers.</p> - <p>This option is ignored if the emulator does not have threading - support enabled.</p> </item> <tag><c><![CDATA[+sFlag Value]]></c></tag> <item> diff --git a/erts/doc/src/erl_dist_protocol.xml b/erts/doc/src/erl_dist_protocol.xml index 610351db6c..a78b13aaa4 100644 --- a/erts/doc/src/erl_dist_protocol.xml +++ b/erts/doc/src/erl_dist_protocol.xml @@ -829,7 +829,31 @@ DiB == gen_digest(ChA, ICA)? <item> <p>The node understand UTF-8 encoded atoms.</p> </item> + <tag><c>-define(DFLAG_MAP_TAG, 16#20000).</c></tag> + <item> + <p>The node understand the map tag.</p> + </item> + <tag><c>-define(DFLAG_BIG_CREATION, 16#40000).</c></tag> + <item> + <p>The node understand big node creation.</p> + </item> + <tag><c>-define(DFLAG_SEND_SENDER, 16#80000).</c></tag> + <item> + <p> + Use the <c>SEND_SENDER</c> + <seealso marker="#control_message">control message</seealso> + instead of the <c>SEND</c> control message and use the + <c>SEND_SENDER_TT</c> control message instead + of the <c>SEND_TT</c> control message. + </p> + </item> </taglist> + <p> + There are also a collection of <c>DFLAG</c>s bitwise or:ed + together in the <c>DFLAGS_STRICT_ORDER_DELIVERY</c> macro. + These flags corresponds to features that require strict + ordering of data over distribution channels. + </p> </section> </section> @@ -922,6 +946,7 @@ DiB == gen_digest(ChA, ICA)? </item> </taglist> + <marker id="control_message"/> <p>The <c>ControlMessage</c> is a tuple, where the first element indicates which distributed operation it encodes:</p> @@ -1028,4 +1053,49 @@ DiB == gen_digest(ChA, ICA)? </item> </taglist> </section> + + <section> + <title>New Ctrlmessages for Erlang/OTP 21</title> + <taglist> + <tag><c>SEND_SENDER</c></tag> + <item> + <p><c>{22, FromPid, ToPid}</c></p> + <p>Followed by <c>Message</c>.</p> + <p> + This control messages replace the <c>SEND</c> control + message and will be sent when the distribution flag + <seealso marker="erl_dist_protocol#dflags"><c>DFLAG_SEND_SENDER</c></seealso> + has been negotiated in the connection setup handshake. + </p> + <note><p> + Messages encoded before the connection has + been set up may still use the <c>SEND</c> control + message. However, once a <c>SEND_SENDER</c> or <c>SEND_SENDER_TT</c> + control message has been sent, no more <c>SEND</c> + control messages will be sent in the same direction + on the connection. + </p></note> + </item> + <tag><c>SEND_SENDER_TT</c></tag> + <item> + <p><c>{23, FromPid, ToPid, TraceToken}</c></p> + <p>Followed by <c>Message</c>.</p> + <p> + This control messages replace the <c>SEND_TT</c> control + message and will be sent when the distribution flag + <seealso marker="erl_dist_protocol#dflags"><c>DFLAG_SEND_SENDER</c></seealso> + has been negotiated in the connection setup handshake. + </p> + <note><p> + Messages encoded before the connection has + been set up may still use the <c>SEND_TT</c> control + message. However, once a <c>SEND_SENDER</c> or <c>SEND_SENDER_TT</c> + control message has been sent, no more <c>SEND_TT</c> + control messages will be sent in the same direction + on the connection. + </p></note> + </item> + </taglist> + </section> + </chapter> diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml index 2465f49581..ca8ba044e7 100644 --- a/erts/doc/src/erlang.xml +++ b/erts/doc/src/erlang.xml @@ -189,6 +189,14 @@ </taglist> </desc> </datatype> + + <datatype> + <name name="dist_handle"></name> + <desc> + <p>An opaque handle identifing a distribution channel.</p> + </desc> + </datatype> + </datatypes> <funcs> @@ -1233,6 +1241,141 @@ end</code> </func> <func> + <name name="dist_ctrl_get_data" arity="1"/> + <fsummary>Get distribution channel data to pass to another node.</fsummary> + <desc> + <p> + Get distribution channel data from the local node that is + to be passed to the remote node. The distribution channel + is identified by <c><anno>DHandle</anno></c>. If no data + is available, the atom <c>none</c> is returned. One + can request to be informed by a message when more + data is available by calling + <seealso marker="erlang#dist_ctrl_get_data_notification/1"><c>erlang:dist_ctrl_get_data_notification(DHandle)</c></seealso>. + </p> + <note><p> + Only the process registered as distribution + controller for the distribution channel identified by + <c><anno>DHandle</anno></c> is allowed to call this + function. + </p></note> + <p> + This function is used when implementing an alternative + distribution carrier using processes as distribution + controllers. <c><anno>DHandle</anno></c> is retrived + via the callback + <seealso marker="erts:alt_dist#hs_data_f_handshake_complete"><c>f_handshake_complete</c></seealso>. + More information can be found in the documentation of + <seealso marker="erts:alt_dist#distribution_module">ERTS + User's Guide ➜ How to implement an Alternative Carrier + for the Erlang Distribution ➜ Distribution Module</seealso>. + </p> + </desc> + </func> + + <func> + <name name="dist_ctrl_get_data_notification" arity="1"/> + <fsummary>Request notification about available outgoing distribution channel data.</fsummary> + <desc> + <p> + Request notification when more data is available to + fetch using + <seealso marker="erlang#dist_ctrl_get_data/1"><c>erlang:dist_ctrl_get_data(DHandle)</c></seealso> + for the distribution channel identified by + <c><anno>DHandle</anno></c>. When more data is present, + the caller will be sent the message <c>dist_data</c>. + Once a <c>dist_data</c> messages has been sent, no + more <c>dist_data</c> messages will be sent until + the <c>dist_ctrl_get_data_notification/1</c> function has been called + again. + </p> + <note><p> + Only the process registered as distribution + controller for the distribution channel identified by + <c><anno>DHandle</anno></c> is allowed to call this + function. + </p></note> + <p> + This function is used when implementing an alternative + distribution carrier using processes as distribution + controllers. <c><anno>DHandle</anno></c> is retrived + via the callback + <seealso marker="erts:alt_dist#hs_data_f_handshake_complete"><c>f_handshake_complete</c></seealso>. + More information can be found in the documentation of + <seealso marker="erts:alt_dist#distribution_module">ERTS + User's Guide ➜ How to implement an Alternative Carrier + for the Erlang Distribution ➜ Distribution Module</seealso>. + </p> + </desc> + </func> + + <func> + <name name="dist_ctrl_input_handler" arity="2"/> + <fsummary>Register distribution channel input handler process.</fsummary> + <desc> + <p> + Register an alternate input handler process for the + distribution channel identified by <c><anno>DHandle</anno></c>. + Once this function has been called, <c><anno>InputHandler</anno></c> + is the only process allowed to call + <seealso marker="erlang#dist_ctrl_put_data/2"><c>erlang:dist_ctrl_put_data(DHandle, Data)</c></seealso> + with the <c><anno>DHandle</anno></c> identifing this distribution + channel. + </p> + <note><p> + Only the process registered as distribution + controller for the distribution channel identified by + <c><anno>DHandle</anno></c> is allowed to call this + function. + </p></note> + <p> + This function is used when implementing an alternative + distribution carrier using processes as distribution + controllers. <c><anno>DHandle</anno></c> is retrived + via the callback + <seealso marker="erts:alt_dist#hs_data_f_handshake_complete"><c>f_handshake_complete</c></seealso>. + More information can be found in the documentation of + <seealso marker="erts:alt_dist#distribution_module">ERTS + User's Guide ➜ How to implement an Alternative Carrier + for the Erlang Distribution ➜ Distribution Module</seealso>. + </p> + </desc> + </func> + + <func> + <name name="dist_ctrl_put_data" arity="2"/> + <fsummary>Pass data into the VM from a distribution channel.</fsummary> + <desc> + <p> + Deliver distribution channel data from a remote node to the + local node. + </p> + <note><p> + Only the process registered as distribution + controller for the distribution channel identified by + <c><anno>DHandle</anno></c> is allowed to call this + function unless an alternate input handler process + has been registered using + <seealso marker="erlang#dist_ctrl_input_handler/2"><c>erlang:dist_ctrl_input_handler(DHandle, InputHandler)</c></seealso>. + If an alternate input handler has been registered, only + the registered input handler process is allowed to call + this function. + </p></note> + <p> + This function is used when implementing an alternative + distribution carrier using processes as distribution + controllers. <c><anno>DHandle</anno></c> is retrived + via the callback + <seealso marker="erts:alt_dist#hs_data_f_handshake_complete"><c>f_handshake_complete</c></seealso>. + More information can be found in the documentation of + <seealso marker="erts:alt_dist#distribution_module">ERTS + User's Guide ➜ How to implement an Alternative Carrier + for the Erlang Distribution ➜ Distribution Module</seealso>. + </p> + </desc> + </func> + + <func> <name name="element" arity="2"/> <fsummary>Return the Nth element of a tuple.</fsummary> <type_desc variable="N">1..tuple_size(<anno>Tuple</anno>)</type_desc> @@ -4323,7 +4466,6 @@ RealSystem = system + MissedSystem</code> <desc> <p><c><anno>Locking</anno></c> is one of the following:</p> <list type="bulleted"> - <item><c>false</c> (emulator without SMP support)</item> <item><c>port_level</c> (port-specific locking)</item> <item><c>driver_level</c> (driver-specific locking)</item> </list> @@ -4727,8 +4869,8 @@ RealSystem = system + MissedSystem</code> selected for execution. Notice however that this does <em>not</em> mean that no processes on priority <c>low</c> or <c>normal</c> can run when processes - are running on priority <c>high</c>. On the runtime - system with SMP support, more processes can be running + are running on priority <c>high</c>. When using multiple + schedulers, more processes can be running in parallel than processes on priority <c>high</c>. That is, a <c>low</c> and a <c>high</c> priority process can execute at the same time.</p> @@ -4743,10 +4885,8 @@ RealSystem = system + MissedSystem</code> execution.</p> <note> <p>Do not depend on the scheduling - to remain exactly as it is today. Scheduling, at least on - the runtime system with SMP support, is likely to be - changed in a future release to use available - processor cores better.</p> + to remain exactly as it is today. Scheduling is likely to be + changed in a future release to use available processor cores better.</p> </note> <p>There is <em>no</em> automatic mechanism for avoiding priority inversion, such as priority inheritance @@ -6258,8 +6398,7 @@ true</pre> <p><c>statistics(exact_reductions)</c> is a more expensive operation than <seealso marker="#statistics_reductions"> - statistics(reductions)</seealso>, - especially on an Erlang machine with SMP support.</p> + statistics(reductions)</seealso>.</p> </note> </desc> </func> @@ -6643,8 +6782,8 @@ ok than available logical processors, this value may be greater than <c>1.0</c>.</p> <p>As of ERTS version 9.0, the Erlang runtime system - with SMP support will as default have more schedulers - than logical processors. This due to the dirty schedulers.</p> + will as default have more schedulers than logical processors. + This due to the dirty schedulers.</p> <note> <p><c>scheduler_wall_time</c> is by default disabled. To enable it, use @@ -8014,9 +8153,7 @@ ok <taglist> <tag><c>disabled</c></tag> <item> - <p>The emulator has only one scheduler thread. The - emulator does not have SMP support, or have been - started with only one scheduler thread.</p> + <p>The emulator has been started with only one scheduler thread.</p> </item> <tag><c>blocked</c></tag> <item> @@ -8379,8 +8516,7 @@ ok </item> <tag><c>smp_support</c></tag> <item> - <p>Returns <c>true</c> if the emulator has been compiled - with SMP support, otherwise <c>false</c> is returned.</p> + <p>Returns <c>true</c>.</p> </item> <tag><marker id="system_info_start_time"/><c>start_time</c></tag> <item> @@ -8403,8 +8539,7 @@ ok </item> <tag><c>threads</c></tag> <item> - <p>Returns <c>true</c> if the emulator has been compiled - with thread support, otherwise <c>false</c> is returned.</p> + <p>Returns <c>true</c>.</p> </item> <tag><c>thread_pool_size</c></tag> <item> @@ -10523,9 +10658,9 @@ true</pre> <c>receive after 1 -> ok end</c>, except that <c>yield()</c> is faster.</p> <warning> - <p>There is seldom or never any need to use this BIF, - especially in the SMP emulator, as other processes have a - chance to run in another scheduler thread anyway. + <p>There is seldom or never any need to use this BIF + as other processes have a chance to run in another scheduler + thread anyway. Using this BIF without a thorough grasp of how the scheduler works can cause performance degradation.</p> </warning> diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml index 139057adb7..10b963a4e8 100644 --- a/erts/doc/src/notes.xml +++ b/erts/doc/src/notes.xml @@ -624,7 +624,7 @@ marker="erts:erl"><c>erl</c></seealso> command.</p> <p> See <url - href="http://pcre.org/original/changelog.txt"><c>http://pcre.org/original/changelog.txt</c></url> + href="http://pcre.org/original/changelog.txt">http://pcre.org/original/changelog.txt</url> for information about changes made to PCRE between the versions 8.33 and 8.40.</p> <p> @@ -3562,8 +3562,7 @@ <p>The previously introduced "eager check I/O" feature is now enabled by default.</p> <p>Eager check I/O can be disabled using the <c>erl</c> - command line argument: <seealso - marker="erl#+secio"><c>+secio false</c></seealso></p> + command line argument: <c>+secio false</c></p> <p>Characteristics impact compared to previous default:</p> <list> <item>Lower latency and smoother management of externally triggered I/O operations.</item> @@ -4344,8 +4343,7 @@ prioritized to the same extent as when eager check I/O is disabled.</p> <p>Eager check I/O can be enabled using the <c>erl</c> - command line argument: <seealso - marker="erl#+secio"><c>+secio true</c></seealso></p> + command line argument: <c>+secio true</c></p> <p>Characteristics impact when enabled:</p> <list> <item>Lower latency and smoother management of externally triggered I/O operations.</item> <item>A slightly reduced diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in index 1916b97a89..02a9a9a93b 100644 --- a/erts/emulator/Makefile.in +++ b/erts/emulator/Makefile.in @@ -28,10 +28,22 @@ HIPE_ENABLED=@HIPE_ENABLED@ DTRACE_ENABLED=@DTRACE_ENABLED@ DTRACE_ENABLED_2STEP=@DTRACE_ENABLED_2STEP@ USE_VM_PROBES=@USE_VM_PROBES@ +FPE=@FPE@ LIBS = @LIBS@ Z_LIB=@Z_LIB@ NO_INLINE_FUNCTIONS=false -OPCODE_TABLES = $(ERL_TOP)/lib/compiler/src/genop.tab beam/ops.tab +OPCODE_TABLES = $(ERL_TOP)/lib/compiler/src/genop.tab \ +beam/ops.tab \ +beam/macros.tab \ +beam/instrs.tab \ +beam/arith_instrs.tab \ +beam/bif_instrs.tab \ +beam/bs_instrs.tab \ +beam/float_instrs.tab \ +beam/map_instrs.tab \ +beam/msg_instrs.tab \ +beam/select_instrs.tab \ +beam/trace_instrs.tab DEBUG_CFLAGS = @DEBUG_CFLAGS@ CONFIGURE_CFLAGS = @CFLAGS@ @@ -51,7 +63,28 @@ ARFLAGS=rc OMIT_OMIT_FP=no TYPE_LIBS= -DIRTY_SCHEDULER_SUPPORT=@DIRTY_SCHEDULER_SUPPORT@ +PROFILE_COMPILER=@PROFILE_COMPILER@ +PROFILE_MARKER= +ifeq ($(PROFILE),generate) +PROFILE_MARKER=_pg +else +ifeq ($(PROFILE),use) +PROFILE_MARKER=_pu +endif +endif + +ifeq ($(PROFILE_COMPILER), gcc) +PROFILE_CORRECTION=@PROFILE_CORRECTION@ +PROFILE_GENERATE=-fprofile-generate +PROFILE_USE=-fprofile-use $(PROFILE_CORRECTION) +PROFILE_USE_DEPS=$(OBJDIR)/%_pu.gcda +endif +ifeq ($(PROFILE_COMPILER), clang) +PROFILE_GENERATE=-fprofile-instr-generate +PROFILE_USE=-fprofile-instr-use=$(OBJDIR)/default.profdata +PROFILE_USE_DEPS=$(OBJDIR)/default.profdata +endif + DIRTY_SCHEDULER_TEST=@DIRTY_SCHEDULER_TEST@ ifeq ($(TYPE),debug) @@ -178,31 +211,10 @@ endif # NOTE: When adding a new type update ERL_BUILD_TYPE_MARKER in sys/unix/sys.c # -FLAVOR=$(DEFAULT_FLAVOR) - -ifeq ($(FLAVOR),plain) - -DS_SUPPORT=no -DS_TEST=no - -FLAVOR_MARKER= -FLAVOR_FLAGS= -ENABLE_ALLOC_TYPE_VARS += nofrag -M4FLAGS += - -else # FLAVOR - -# If flavor isn't one of the above, it *is* smp flavor... override FLAVOR=smp FLAVOR_MARKER=.smp -FLAVOR_FLAGS=-DERTS_SMP -ENABLE_ALLOC_TYPE_VARS += smp nofrag -M4FLAGS += -DERTS_SMP=1 -ifeq ($(DIRTY_SCHEDULER_SUPPORT),yes) -THR_DEFS += -DERTS_DIRTY_SCHEDULERS -DS_SUPPORT=yes - +ENABLE_ALLOC_TYPE_VARS += nofrag ifeq ($(DIRTY_SCHEDULER_TEST),yes) DS_TEST=yes THR_DEFS += -DERTS_DIRTY_SCHEDULERS_TEST @@ -210,13 +222,6 @@ else # DIRTY_SCHEDULER_TEST DS_TEST=no endif # DIRTY_SCHEDULER_TEST -else # DIRTY_SCHEDULER_SUPPORT -DS_SUPPORT=no -DS_TEST=no -endif # DIRTY_SCHEDULER_SUPPORT - -endif # FLAVOR - TF_MARKER=$(TYPEMARKER)$(FLAVOR_MARKER) ifeq ($(TYPE)-@HAVE_VALGRIND@,valgrind-no) @@ -425,9 +430,20 @@ ifeq ($(TARGET), win32) EMULATOR_EXECUTABLE = beam$(TF_MARKER).dll else EMULATOR_EXECUTABLE = beam$(TF_MARKER) +PROFILE_EXECUTABLE = beam.prof$(TF_MARKER) endif CS_EXECUTABLE = erl_child_setup$(TYPEMARKER) +ifeq ($(PROFILE), generate) +EMULATOR_EXECUTABLE = $(PROFILE_EXECUTABLE) +ifeq ($(PROFILE_COMPILER), gcc) +PROFILE_LDFLAGS = -fprofile-generate +endif +ifeq ($(PROFILE_COMPILER), clang) +PROFILE_LDFLAGS = -fprofile-instr-generate +endif +endif + # ---------------------------------------------------------------------- ifeq ($(ERLANG_OSTYPE), unix) @@ -548,10 +564,11 @@ DTRACE_HEADERS = endif ifdef HIPE_ENABLED -OPCODE_TABLES += hipe/hipe_ops.tab +OPCODE_TABLES += hipe/hipe_ops.tab hipe/hipe_instrs.tab endif $(TTF_DIR)/beam_cold.h \ +$(TTF_DIR)/beam_warm.h \ $(TTF_DIR)/beam_hot.h \ $(TTF_DIR)/beam_opcodes.c \ $(TTF_DIR)/beam_opcodes.h \ @@ -563,6 +580,7 @@ $(TTF_DIR)/OPCODES-GENERATED: $(OPCODE_TABLES) utils/beam_makeops -wordsize @EXTERNAL_WORD_SIZE@ \ -outdir $(TTF_DIR) \ -DUSE_VM_PROBES=$(if $(USE_VM_PROBES),1,0) \ + -DNO_FPE_SIGNALS=$(if $filter(unreliable,$(FPE)),1,0) \ -emulator $(OPCODE_TABLES) && echo $? >$(TTF_DIR)/OPCODES-GENERATED GENERATE += $(TTF_DIR)/OPCODES-GENERATED @@ -599,7 +617,7 @@ $(HIPE_NBIF_FILES) \ : $(TTF_DIR)/TABLES-GENERATED $(TTF_DIR)/TABLES-GENERATED: $(ATOMS) $(DIRTY_BIFS) $(BIFS) utils/make_tables $(gen_verbose)LANG=C $(PERL) utils/make_tables -src $(TTF_DIR) -include $(TTF_DIR)\ - -ds $(DS_SUPPORT) -dst $(DS_TEST) -hipe $(HIPE) $(ATOMS) $(DIRTY_BIFS) $(BIFS) && echo $? >$(TTF_DIR)/TABLES-GENERATED + -dst $(DS_TEST) -hipe $(HIPE) $(ATOMS) $(DIRTY_BIFS) $(BIFS) && echo $? >$(TTF_DIR)/TABLES-GENERATED GENERATE += $(TTF_DIR)/TABLES-GENERATED $(TTF_DIR)/erl_alloc_types.h: beam/erl_alloc.types utils/make_alloc_types @@ -693,16 +711,33 @@ $(OBJDIR)/beams.$(RES_EXT): $(TARGET)/beams.rc endif -ifneq ($(filter tile-%,$(TARGET)),) -$(OBJDIR)/beam_emu.o: beam/beam_emu.c - $(V_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) \ - $(INCLUDES) -c $< -o $@ -else # Usually the same as the default rule, but certain platforms (e.g. win32) mix # different compilers $(OBJDIR)/beam_emu.o: beam/beam_emu.c $(V_EMU_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ -endif + +$(OBJDIR)/%_pg.o: beam/%.c + $(V_CC) $(PROFILE_GENERATE) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ +$(OBJDIR)/%_pu.o: beam/%.c $(PROFILE_USE_DEPS) + $(V_CC) $(PROFILE_USE) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ + +$(OBJDIR)/PROFILE: $(BINDIR)/$(PROFILE_EXECUTABLE) + $(V_at)echo " PROFILE ${PROFILE_EXECUTABLE}" + $(V_at)rm -f $(OBJDIR)/erl*.profraw + $(V_at)set -e; LLVM_PROFILE_FILE="$(OBJDIR)/erlc-%m.profraw" \ + ERL_FLAGS="-emu_type prof${TYPEMARKER} +S 1" $(ERLC) -DPGO \ + -o $(OBJDIR) test/estone_SUITE.erl > $(OBJDIR)/PROFILE_LOG + $(V_at)set -e; LLVM_PROFILE_FILE="$(OBJDIR)/erl-%m.profraw" \ + ERL_FLAGS="-emu_type prof${TYPEMARKER} +S 1" $(ERL) -pa $(OBJDIR) \ + -noshell -s estone_SUITE pgo -s init stop >> $(OBJDIR)/PROFILE_LOG + $(V_at)touch $@ + +$(OBJDIR)/%_pu.gcda: $(OBJDIR)/PROFILE + $(V_at)mv $(OBJDIR)/$*_pg.gcda $@ + $(V_at)touch $@ + +$(OBJDIR)/default.profdata: $(OBJDIR)/PROFILE + $(V_LLVM_PROFDATA) merge -output $@ $(OBJDIR)/*.profraw $(OBJDIR)/%.o: beam/%.c $(V_CC) $(subst -O2, $(GEN_OPT_FLGS), $(CFLAGS)) $(INCLUDES) -c $< -o $@ @@ -764,15 +799,23 @@ $(ERL_TOP)/lib/%.beam: INIT_OBJS = $(OBJDIR)/erl_main.o $(PRELOAD_OBJ) +# -fprofile-correction is needed in order to use PGO on erl_process +# as multiple threads execute in that file. +ifeq ($(PROFILE_CORRECTION),) +PROFILE_OBJS = $(OBJDIR)/beam_emu.o +RUN_OBJS = $(OBJDIR)/erl_process.o +else +PROFILE_OBJS = $(OBJDIR)/beam_emu.o $(OBJDIR)/erl_process.o +endif + EMU_OBJS = \ - $(OBJDIR)/beam_emu.o $(OBJDIR)/beam_opcodes.o \ + $(OBJDIR)/beam_opcodes.o \ $(OBJDIR)/beam_load.o $(OBJDIR)/beam_bif_load.o \ $(OBJDIR)/beam_debug.o $(OBJDIR)/beam_bp.o \ - $(OBJDIR)/beam_catches.o \ - $(OBJDIR)/code_ix.o \ + $(OBJDIR)/beam_catches.o $(OBJDIR)/code_ix.o \ $(OBJDIR)/beam_ranges.o -RUN_OBJS = \ +RUN_OBJS += \ $(OBJDIR)/erl_alloc.o $(OBJDIR)/erl_mtrace.o \ $(OBJDIR)/erl_alloc_util.o $(OBJDIR)/erl_goodfit_alloc.o \ $(OBJDIR)/erl_bestfit_alloc.o $(OBJDIR)/erl_afit_alloc.o \ @@ -788,7 +831,7 @@ RUN_OBJS = \ $(OBJDIR)/utils.o $(OBJDIR)/bif.o \ $(OBJDIR)/io.o $(OBJDIR)/erl_printf_term.o\ $(OBJDIR)/erl_debug.o $(OBJDIR)/erl_md5.o \ - $(OBJDIR)/erl_message.o $(OBJDIR)/erl_process.o \ + $(OBJDIR)/erl_message.o \ $(OBJDIR)/erl_process_dict.o $(OBJDIR)/erl_process_lock.o \ $(OBJDIR)/erl_port_task.o $(OBJDIR)/erl_arith.o \ $(OBJDIR)/time.o $(OBJDIR)/erl_time_sup.o \ @@ -930,21 +973,23 @@ ifdef HIPE_ENABLED EXTRA_BASE_OBJS += $(HIPE_OBJS) endif -BASE_OBJS = $(EMU_OBJS) $(RUN_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS) $(LTTNG_OBJS) +BASE_OBJS = $(EMU_OBJS) $(RUN_OBJS) $(OS_OBJS) $(EXTRA_BASE_OBJS) \ + $(LTTNG_OBJS) $(DRV_OBJS) $(NIF_OBJS) -before_DTrace_OBJS = $(BASE_OBJS) $(DRV_OBJS) $(NIF_OBJS) +PROF_OBJS = $(patsubst %.o,%$(PROFILE_MARKER).o,$(PROFILE_OBJS)) $(BASE_OBJS) + +OBJS = $(PROF_OBJS) -DTRACE_OBJS = ifdef DTRACE_ENABLED_2STEP -DTRACE_OBJS = $(OBJDIR)/erlang_dtrace.o -$(OBJDIR)/erlang_dtrace.o: $(before_DTrace_OBJS) $(TARGET)/erlang_dtrace.h +# The $(PROFILE_MARKER) is placed in the object file name in order to +# make sure we re-compile with the new object files for the profiled emulator +OBJS += $(OBJDIR)/erlang$(PROFILE_MARKER)_dtrace.o +$(OBJDIR)/erlang$(PROFILE_MARKER)_dtrace.o: $(PROF_OBJS) $(TARGET)/erlang_dtrace.h dtrace -G -C -Ibeam \ -s beam/erlang_dtrace.d \ - -o $@ $(before_DTrace_OBJS) + -o $@ $(PROF_OBJS) endif -OBJS = $(before_DTrace_OBJS) $(DTRACE_OBJS) - $(INIT_OBJS): $(TTF_DIR)/GENERATED $(OBJS): $(TTF_DIR)/GENERATED @@ -1036,8 +1081,8 @@ $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS) else $(BINDIR)/$(EMULATOR_EXECUTABLE): $(INIT_OBJS) $(OBJS) $(DEPLIBS) - $(ld_verbose)$(PURIFY) $(LD) -o $(BINDIR)/$(EMULATOR_EXECUTABLE) \ - $(HIPEBEAMLDFLAGS) $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) \ + $(ld_verbose)$(PURIFY) $(LD) -o $@ \ + $(HIPEBEAMLDFLAGS) $(PROFILE_LDFLAGS) $(LDFLAGS) $(DEXPORT) $(INIT_OBJS) $(OBJS) \ $(STATIC_NIF_LIBS) $(STATIC_DRIVER_LIBS) $(LIBS) endif diff --git a/erts/emulator/beam/arith_instrs.tab b/erts/emulator/beam/arith_instrs.tab new file mode 100644 index 0000000000..7c9cd47e28 --- /dev/null +++ b/erts/emulator/beam/arith_instrs.tab @@ -0,0 +1,399 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +OUTLINED_ARITH_2(Fail, Live, Name, BIF, Op1, Op2, Dst) { + Eterm result; + Uint live = $Live; + HEAVY_SWAPOUT; + reg[live] = $Op1; + reg[live+1] = $Op2; + result = erts_gc_$Name (c_p, reg, live); + HEAVY_SWAPIN; + ERTS_HOLE_CHECK(c_p); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + $BIF_ERROR_ARITY_2($Fail, $BIF, reg[live], reg[live+1]); +} + + +i_plus := plus.fetch.execute; + +plus.head() { + Eterm PlusOp1, PlusOp2; +} + +plus.fetch(Op1, Op2) { + PlusOp1 = $Op1; + PlusOp2 = $Op2; +} + +plus.execute(Fail, Live, Dst) { + if (ERTS_LIKELY(is_both_small(PlusOp1, PlusOp2))) { + Sint i = signed_val(PlusOp1) + signed_val(PlusOp2); + ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); + if (ERTS_LIKELY(MY_IS_SSMALL(i))) { + $Dst = make_small(i); + $NEXT0(); + } + } + $OUTLINED_ARITH_2($Fail, $Live, mixed_plus, BIF_splus_2, PlusOp1, PlusOp2, $Dst); +} + +i_minus := minus.fetch.execute; + +minus.head() { + Eterm MinusOp1, MinusOp2; +} + +minus.fetch(Op1, Op2) { + MinusOp1 = $Op1; + MinusOp2 = $Op2; +} + +minus.execute(Fail, Live, Dst) { + if (ERTS_LIKELY(is_both_small(MinusOp1, MinusOp2))) { + Sint i = signed_val(MinusOp1) - signed_val(MinusOp2); + ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); + if (ERTS_LIKELY(MY_IS_SSMALL(i))) { + $Dst = make_small(i); + $NEXT0(); + } + } + $OUTLINED_ARITH_2($Fail, $Live, mixed_minus, BIF_sminus_2, MinusOp1, MinusOp2, $Dst); +} + +i_increment := increment.fetch.execute; + +increment.head() { + Eterm increment_reg_val; + Eterm increment_val; + Uint live; + Eterm result; +} + +increment.fetch(Src) { + increment_reg_val = $Src; +} + +increment.execute(IncrementVal, Live, Dst) { + increment_val = $IncrementVal; + if (ERTS_LIKELY(is_small(increment_reg_val))) { + Sint i = signed_val(increment_reg_val) + increment_val; + ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); + if (ERTS_LIKELY(MY_IS_SSMALL(i))) { + $Dst = make_small(i); + $NEXT0(); + } + } + live = $Live; + HEAVY_SWAPOUT; + reg[live] = increment_reg_val; + reg[live+1] = make_small(increment_val); + result = erts_gc_mixed_plus(c_p, reg, live); + HEAVY_SWAPIN; + ERTS_HOLE_CHECK(c_p); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue)); + goto find_func_info; +} + +i_times(Fail, Live, Op1, Op2, Dst) { + Eterm op1 = $Op1; + Eterm op2 = $Op2; + $OUTLINED_ARITH_2($Fail, $Live, mixed_times, BIF_stimes_2, op1, op2, $Dst); +} + +i_m_div(Fail, Live, Op1, Op2, Dst) { + Eterm op1 = $Op1; + Eterm op2 = $Op2; + $OUTLINED_ARITH_2($Fail, $Live, mixed_div, BIF_div_2, op1, op2, $Dst); +} + +i_int_div(Fail, Live, Op1, Op2, Dst) { + Eterm op1 = $Op1; + Eterm op2 = $Op2; + if (ERTS_UNLIKELY(op2 == SMALL_ZERO)) { + c_p->freason = BADARITH; + $BIF_ERROR_ARITY_2($Fail, BIF_intdiv_2, op1, op2); + } else if (ERTS_LIKELY(is_both_small(op1, op2))) { + Sint ires = signed_val(op1) / signed_val(op2); + if (ERTS_LIKELY(MY_IS_SSMALL(ires))) { + $Dst = make_small(ires); + $NEXT0(); + } + } + $OUTLINED_ARITH_2($Fail, $Live, int_div, BIF_intdiv_2, op1, op2, $Dst); +} + +i_rem := rem.fetch.execute; + +rem.head() { + Eterm RemOp1, RemOp2; +} + +rem.fetch(Src1, Src2) { + RemOp1 = $Src1; + RemOp2 = $Src2; +} + +rem.execute(Fail, Live, Dst) { + if (ERTS_UNLIKELY(RemOp2 == SMALL_ZERO)) { + c_p->freason = BADARITH; + $BIF_ERROR_ARITY_2($Fail, BIF_rem_2, RemOp1, RemOp2); + } else if (ERTS_LIKELY(is_both_small(RemOp1, RemOp2))) { + $Dst = make_small(signed_val(RemOp1) % signed_val(RemOp2)); + $NEXT0(); + } else { + $OUTLINED_ARITH_2($Fail, $Live, int_rem, BIF_rem_2, RemOp1, RemOp2, $Dst); + } +} + +i_band := band.fetch.execute; + +band.head() { + Eterm BandOp1, BandOp2; +} + +band.fetch(Src1, Src2) { + BandOp1 = $Src1; + BandOp2 = $Src2; +} + +band.execute(Fail, Live, Dst) { + if (ERTS_LIKELY(is_both_small(BandOp1, BandOp2))) { + /* + * No need to untag -- TAG & TAG == TAG. + */ + $Dst = BandOp1 & BandOp2; + $NEXT0(); + } + $OUTLINED_ARITH_2($Fail, $Live, band, BIF_band_2, BandOp1, BandOp2, $Dst); +} + +i_bor(Fail, Live, Src1, Src2, Dst) { + if (ERTS_LIKELY(is_both_small($Src1, $Src2))) { + /* + * No need to untag -- TAG | TAG == TAG. + */ + $Dst = $Src1 | $Src2; + $NEXT0(); + } + $OUTLINED_ARITH_2($Fail, $Live, bor, BIF_bor_2, $Src1, $Src2, $Dst); +} + +i_bxor(Fail, Live, Src1, Src2, Dst) { + if (ERTS_LIKELY(is_both_small($Src1, $Src2))) { + /* + * TAG ^ TAG == 0. + * + * Therefore, we perform the XOR operation on the tagged values, + * and OR in the tag bits. + */ + $Dst = ($Src1 ^ $Src2) | make_small(0); + $NEXT0(); + } + $OUTLINED_ARITH_2($Fail, $Live, bxor, BIF_bxor_2, $Src1, $Src2, $Dst); +} + +i_bsl := shift.setup_bsl.execute; +i_bsr := shift.setup_bsr.execute; + +shift.head() { + Eterm Op1, Op2; + Sint shift_left_count; +} + +shift.setup_bsr(Src1, Src2) { + Op1 = $Src1; + Op2 = $Src2; + shift_left_count = 0; + if (ERTS_LIKELY(is_small(Op2))) { + shift_left_count = -signed_val(Op2); + } else if (is_big(Op2)) { + /* + * N bsr NegativeBigNum == N bsl MAX_SMALL + * N bsr PositiveBigNum == N bsl MIN_SMALL + */ + shift_left_count = make_small(bignum_header_is_neg(*big_val(Op2)) ? + MAX_SMALL : MIN_SMALL); + } +} + +shift.setup_bsl(Src1, Src2) { + Op1 = $Src1; + Op2 = $Src2; + shift_left_count = 0; + if (ERTS_LIKELY(is_small(Op2))) { + shift_left_count = signed_val(Op2); + } else if (is_big(Op2)) { + if (bignum_header_is_neg(*big_val(Op2))) { + /* + * N bsl NegativeBigNum is either 0 or -1, depending on + * the sign of N. Since we don't believe this case + * is common, do the calculation with the minimum + * amount of code. + */ + shift_left_count = MIN_SMALL; + } else if (is_integer(Op1)) { + /* + * N bsl PositiveBigNum is too large to represent. + */ + shift_left_count = MAX_SMALL; + } + } +} + +shift.execute(Fail, Live, Dst) { + Uint big_words_needed; + + if (ERTS_LIKELY(is_small(Op1))) { + Sint int_res = signed_val(Op1); + if (ERTS_UNLIKELY(shift_left_count == 0 || int_res == 0)) { + if (ERTS_UNLIKELY(is_not_integer(Op2))) { + goto shift_error; + } + if (int_res == 0) { + $Dst = Op1; + $NEXT0(); + } + } else if (shift_left_count < 0) { /* Right shift */ + Eterm bsr_res; + shift_left_count = -shift_left_count; + if (shift_left_count >= SMALL_BITS-1) { + bsr_res = (int_res < 0) ? SMALL_MINUS_ONE : SMALL_ZERO; + } else { + bsr_res = make_small(int_res >> shift_left_count); + } + $Dst = bsr_res; + $NEXT0(); + } else if (shift_left_count < SMALL_BITS-1) { /* Left shift */ + if ((int_res > 0 && + ((~(Uint)0 << ((SMALL_BITS-1)-shift_left_count)) & int_res) == 0) || + ((~(Uint)0 << ((SMALL_BITS-1)-shift_left_count)) & ~int_res) == 0) { + $Dst = make_small(int_res << shift_left_count); + $NEXT0(); + } + } + big_words_needed = 1; /* big_size(small_to_big(Op1)) */ + goto big_shift; + } else if (is_big(Op1)) { + if (shift_left_count == 0) { + if (is_not_integer(Op2)) { + goto shift_error; + } + $Dst = Op1; + $NEXT0(); + } + big_words_needed = big_size(Op1); + + big_shift: + if (shift_left_count > 0) { /* Left shift. */ + big_words_needed += (shift_left_count / D_EXP); + } else { /* Right shift. */ + if (big_words_needed <= (-shift_left_count / D_EXP)) { + big_words_needed = 3; /* ??? */ + } else { + big_words_needed -= (-shift_left_count / D_EXP); + } + } + { + Eterm tmp_big[2]; + Sint big_need_size = BIG_NEED_SIZE(big_words_needed+1); + + /* + * Slightly conservative check the size to avoid + * allocating huge amounts of memory for bignums that + * clearly would overflow the arity in the header + * word. + */ + if (big_need_size-8 > BIG_ARITY_MAX) { + $SYSTEM_LIMIT($Fail); + } + $GC_TEST_PRESERVE(big_need_size+1, $Live, Op1); + if (is_small(Op1)) { + Op1 = small_to_big(signed_val(Op1), tmp_big); + } + Op1 = big_lshift(Op1, shift_left_count, HTOP); + if (is_big(Op1)) { + HTOP += bignum_header_arity(*HTOP) + 1; + } + HEAP_SPACE_VERIFIED(0); + if (ERTS_UNLIKELY(is_nil(Op1))) { + /* + * This result must have been only slighty larger + * than allowed since it wasn't caught by the + * previous test. + */ + $SYSTEM_LIMIT($Fail); + } + ERTS_HOLE_CHECK(c_p); + $REFRESH_GEN_DEST(); + $Dst = Op1; + $NEXT0(); + } + } + + /* + * One or more non-integer arguments. + */ + shift_error: + c_p->freason = BADARITH; + if ($Fail) { + $FAIL($Fail); + } else { + reg[0] = Op1; + reg[1] = Op2; + SWAPOUT; + if (I[0] == (BeamInstr) OpCode(i_bsl_ssjtd)) { + I = handle_error(c_p, I, reg, &bif_export[BIF_bsl_2]->info.mfa); + } else { + ASSERT(I[0] == (BeamInstr) OpCode(i_bsr_ssjtd)); + I = handle_error(c_p, I, reg, &bif_export[BIF_bsr_2]->info.mfa); + } + goto post_error_handling; + } +} + +i_int_bnot(Fail, Src, Live, Dst) { + Eterm bnot_val = $Src; + if (ERTS_LIKELY(is_small(bnot_val))) { + bnot_val = make_small(~signed_val(bnot_val)); + } else { + Uint live = $Live; + HEAVY_SWAPOUT; + reg[live] = bnot_val; + bnot_val = erts_gc_bnot(c_p, reg, live); + HEAVY_SWAPIN; + ERTS_HOLE_CHECK(c_p); + if (ERTS_UNLIKELY(is_nil(bnot_val))) { + $BIF_ERROR_ARITY_1($Fail, BIF_bnot_1, reg[live]); + } + $REFRESH_GEN_DEST(); + } + $Dst = bnot_val; +} diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c index 38e02c386f..bbe1cb3e11 100644 --- a/erts/emulator/beam/atom.c +++ b/erts/emulator/beam/atom.c @@ -34,20 +34,18 @@ IndexTable erts_atom_table; /* The index table */ -#include "erl_smp.h" +static erts_rwmtx_t atom_table_lock; -static erts_smp_rwmtx_t atom_table_lock; - -#define atom_read_lock() erts_smp_rwmtx_rlock(&atom_table_lock) -#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock) -#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock) -#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock) +#define atom_read_lock() erts_rwmtx_rlock(&atom_table_lock) +#define atom_read_unlock() erts_rwmtx_runlock(&atom_table_lock) +#define atom_write_lock() erts_rwmtx_rwlock(&atom_table_lock) +#define atom_write_unlock() erts_rwmtx_rwunlock(&atom_table_lock) #if 0 #define ERTS_ATOM_PUT_OPS_STAT #endif #ifdef ERTS_ATOM_PUT_OPS_STAT -static erts_smp_atomic_t atom_put_ops; +static erts_atomic_t atom_put_ops; #endif /* Functions for allocating space for the ext of atoms. We do not @@ -76,7 +74,7 @@ void atom_info(fmtfn_t to, void *to_arg) index_info(to, to_arg, &erts_atom_table); #ifdef ERTS_ATOM_PUT_OPS_STAT erts_print(to, to_arg, "atom_put_ops: %ld\n", - erts_smp_atomic_read_nob(&atom_put_ops)); + erts_atomic_read_nob(&atom_put_ops)); #endif if (lock) @@ -246,7 +244,7 @@ erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc) int aix; #ifdef ERTS_ATOM_PUT_OPS_STAT - erts_smp_atomic_inc_nob(&atom_put_ops); + erts_atomic_inc_nob(&atom_put_ops); #endif if (tlen < 0) { @@ -359,32 +357,24 @@ am_atom_put(const char* name, int len) int atom_table_size(void) { int ret; -#ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) atom_read_lock(); -#endif ret = erts_atom_table.entries; -#ifdef ERTS_SMP if (lock) atom_read_unlock(); -#endif return ret; } int atom_table_sz(void) { int ret; -#ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) atom_read_lock(); -#endif ret = index_table_sz(&erts_atom_table); -#ifdef ERTS_SMP if (lock) atom_read_unlock(); -#endif return ret; } @@ -412,19 +402,15 @@ erts_atom_get(const char *name, int len, Eterm* ap, ErtsAtomEncoding enc) void erts_atom_get_text_space_sizes(Uint *reserved, Uint *used) { -#ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) atom_read_lock(); -#endif if (reserved) *reserved = reserved_atom_space; if (used) *used = atom_space; -#ifdef ERTS_SMP if (lock) atom_read_unlock(); -#endif } void @@ -433,16 +419,16 @@ init_atom_table(void) HashFunctions f; int i; Atom a; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; #ifdef ERTS_ATOM_PUT_OPS_STAT - erts_smp_atomic_init_nob(&atom_put_ops, 0); + erts_atomic_init_nob(&atom_put_ops, 0); #endif - erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL, + erts_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) atom_hash; @@ -505,4 +491,4 @@ Uint erts_get_atom_limit(void) { return erts_atom_table.limit; -}
\ No newline at end of file +} diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index a44d23b181..fc55b687d4 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -217,6 +217,8 @@ atom discard atom display_items atom dist atom dist_cmd +atom dist_ctrl_put_data +atom dist_data atom Div='/' atom div atom dlink diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 023ee3ef4b..e48415ecc4 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -50,7 +50,7 @@ static struct { Eterm module; - erts_smp_mtx_t mtx; + erts_mtx_t mtx; Export *pending_purge_lambda; Eterm *sprocs; Eterm def_sprocs[10]; @@ -65,12 +65,10 @@ static struct { Process *erts_code_purger = NULL; -#ifdef ERTS_DIRTY_SCHEDULERS Process *erts_dirty_process_code_checker; -#endif -erts_smp_atomic_t erts_copy_literal_area__; +erts_atomic_t erts_copy_literal_area__; #define ERTS_SET_COPY_LITERAL_AREA(LA) \ - erts_smp_atomic_set_nob(&erts_copy_literal_area__, \ + erts_atomic_set_nob(&erts_copy_literal_area__, \ (erts_aint_t) (LA)) Process *erts_literal_area_collector = NULL; @@ -81,7 +79,7 @@ struct ErtsLiteralAreaRef_ { }; struct { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; ErtsLiteralAreaRef *first; ErtsLiteralAreaRef *last; } release_literal_areas; @@ -97,7 +95,7 @@ init_purge_state(void) { purge_state.module = THE_NON_VALUE; - erts_smp_mtx_init(&purge_state.mtx, "purge_state", NIL, + erts_mtx_init(&purge_state.mtx, "purge_state", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); purge_state.pending_purge_lambda = @@ -119,12 +117,12 @@ init_purge_state(void) void erts_beam_bif_load_init(void) { - erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL, + erts_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); release_literal_areas.first = NULL; release_literal_areas.last = NULL; - erts_smp_atomic_init_nob(&erts_copy_literal_area__, + erts_atomic_init_nob(&erts_copy_literal_area__, (erts_aint_t) NULL); init_purge_state(); @@ -172,8 +170,8 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); modp = erts_get_module(mod, erts_active_code_ix()); @@ -197,8 +195,8 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) else { erts_abort_staging_code_ix(); } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; #endif @@ -265,7 +263,6 @@ struct m { }; static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int, int); -#ifdef ERTS_SMP static void smp_code_ix_commiter(void*); static struct /* Protected by code_write_permission */ @@ -273,7 +270,6 @@ static struct /* Protected by code_write_permission */ Process* stager; ErtsThrPrgrLaterOp lop; } committer_state; -#endif static Eterm exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions) @@ -401,8 +397,8 @@ finish_loading_1(BIF_ALIST_1) erts_is_default_trace_enabled() || IF_HIPE(hipe_need_blocking(p[i].modp))) { /* tracing or hipe need thread blocking */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; break; } @@ -465,9 +461,7 @@ static Eterm staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, struct m* mods, int nmods, int free_mods) { -#ifdef ERTS_SMP if (is_blocking || !commit) -#endif { if (commit) { int i; @@ -491,13 +485,12 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, erts_free(ERTS_ALC_T_LOADER_TMP, mods); } if (is_blocking) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return res; } -#ifdef ERTS_SMP else { ASSERT(is_value(res)); @@ -522,11 +515,9 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, */ ERTS_BIF_YIELD_RETURN(c_p, res); } -#endif } -#ifdef ERTS_SMP static void smp_code_ix_commiter(void* null) { Process* p = committer_state.stager; @@ -536,14 +527,13 @@ static void smp_code_ix_commiter(void* null) committer_state.stager = NULL; #endif erts_release_code_write_permission(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); } -#endif /* ERTS_SMP */ @@ -613,9 +603,6 @@ badarg: BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) { -#if !defined(ERTS_DIRTY_SCHEDULERS) - BIF_ERROR(BIF_P, EXC_NOTSUP); -#else Process *rp; int reds = 0; Eterm res; @@ -640,12 +627,11 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls); if (BIF_P != rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); ASSERT(is_value(res)); BIF_RET2(res, reds); -#endif } BIF_RETTYPE delete_module_1(BIF_ALIST_1) @@ -683,8 +669,8 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1) modp->curr.num_traced_exports > 0 || IF_HIPE(hipe_need_blocking(modp))) { /* tracing or hipe need to go single threaded */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; if (modp->curr.num_breakpoints) { erts_clear_module_break(modp); @@ -789,16 +775,16 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) /* ToDo: Use code_ix staging instead of thread blocking */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->on_load || !modp->on_load->code_hdr) { error: - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } @@ -869,8 +855,8 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) ep->beam[1] = 0; } } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); } @@ -931,9 +917,9 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed * any other heap than the message it self. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); for (msgp = c_p->msg.first; msgp; msgp = msgp->next) { ErlHeapFragment *hf; @@ -1063,10 +1049,8 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed return_ok: -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))) c_p->flags &= ~F_DIRTY_CLA; -#endif return am_ok; @@ -1081,10 +1065,8 @@ literal_gc: *redsp += erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize, oh, fcalls); -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->flags & F_DIRTY_CLA) return THE_NON_VALUE; -#endif return am_ok; } @@ -1314,7 +1296,6 @@ hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp, } } -#ifdef ERTS_SMP ErtsThrPrgrLaterOp later_literal_area_switch; @@ -1336,13 +1317,12 @@ static void complete_literal_area_switch(void *literal_area) { Process *p = erts_literal_area_collector; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); erts_resume(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (literal_area) erts_release_literal_area((ErtsLiteralArea *) literal_area); } -#endif BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) { @@ -1352,7 +1332,7 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) if (BIF_P != erts_literal_area_collector) BIF_ERROR(BIF_P, EXC_NOTSUP); - erts_smp_mtx_lock(&release_literal_areas.mtx); + erts_mtx_lock(&release_literal_areas.mtx); la_ref = release_literal_areas.first; if (la_ref) { @@ -1361,14 +1341,13 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) release_literal_areas.last = NULL; } - erts_smp_mtx_unlock(&release_literal_areas.mtx); + erts_mtx_unlock(&release_literal_areas.mtx); unused_la = ERTS_COPY_LITERAL_AREA(); if (!la_ref) { ERTS_SET_COPY_LITERAL_AREA(NULL); if (unused_la) { -#ifdef ERTS_SMP ErtsLaterReleasLiteralArea *lrlap; lrlap = erts_alloc(ERTS_ALC_T_RELEASE_LAREA, sizeof(ErtsLaterReleasLiteralArea)); @@ -1382,9 +1361,6 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) + ((unused_la->end - &unused_la->start[0]) - 1)*(sizeof(Eterm)))); -#else - erts_release_literal_area(unused_la); -#endif } BIF_RET(am_false); } @@ -1393,16 +1369,11 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) erts_free(ERTS_ALC_T_LITERAL_REF, la_ref); -#ifdef ERTS_SMP erts_schedule_thr_prgr_later_op(complete_literal_area_switch, unused_la, &later_literal_area_switch); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); -#else - erts_release_literal_area(unused_la); - BIF_RET(am_true); -#endif } @@ -1428,7 +1399,7 @@ erts_purge_state_add_fun(ErlFunEntry *fe) Export * erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe) { - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); if (purge_state.module == fe->module) { /* * The process c_p is about to call a fun in the code @@ -1454,7 +1425,7 @@ erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe) erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); ERTS_VBUMP_ALL_REDS(c_p); } - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); return purge_state.pending_purge_lambda; } @@ -1464,9 +1435,9 @@ finalize_purge_operation(Process *c_p, int succeded) Uint ix; if (c_p) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); ASSERT(purge_state.module != THE_NON_VALUE); @@ -1482,14 +1453,14 @@ finalize_purge_operation(Process *c_p, int succeded) ERTS_PROC_LOCK_STATUS); if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } } - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); if (c_p) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (purge_state.sprocs != &purge_state.def_sprocs[0]) { erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs); @@ -1508,7 +1479,6 @@ finalize_purge_operation(Process *c_p, int succeded) purge_state.fe_ix = 0; } -#ifdef ERTS_SMP static ErtsThrPrgrLaterOp purger_lop_data; @@ -1516,9 +1486,9 @@ static void resume_purger(void *unused) { Process *p = erts_code_purger; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); erts_resume(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } static void @@ -1531,7 +1501,6 @@ finalize_purge_abort(void *unused) resume_purger(NULL); } -#endif /* ERTS_SMP */ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) { @@ -1590,9 +1559,9 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) else { BeamInstr* code; BeamInstr* end; - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); purge_state.module = BIF_ARG_1; - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); res = am_true; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); @@ -1606,9 +1575,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) } } -#ifndef ERTS_SMP - BIF_RET(res); -#else if (res != am_true) BIF_RET(res); else { @@ -1627,7 +1593,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); } -#endif } case am_abort: { @@ -1641,11 +1606,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix); -#ifndef ERTS_SMP - erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix); - finalize_purge_operation(BIF_P, 0); - BIF_RET(am_false); -#else /* * We need to restore the code addresses of the funs in * two stages in order to ensure that we do not get any @@ -1661,7 +1621,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) &purger_lop_data); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_false); -#endif } case am_complete: { @@ -1713,8 +1672,8 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) || IF_HIPE(hipe_purge_need_blocking(modp))) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); if (modp->old.nif) { @@ -1752,8 +1711,8 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) erts_rwunlock_old_code(code_ix); } if (is_blocking) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); @@ -1766,7 +1725,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) sizeof(ErtsLiteralAreaRef)); ref->literal_area = literals; ref->next = NULL; - erts_smp_mtx_lock(&release_literal_areas.mtx); + erts_mtx_lock(&release_literal_areas.mtx); if (release_literal_areas.last) { release_literal_areas.last->next = ref; release_literal_areas.last = ref; @@ -1775,7 +1734,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) release_literal_areas.first = ref; release_literal_areas.last = ref; } - erts_smp_mtx_unlock(&release_literal_areas.mtx); + erts_mtx_unlock(&release_literal_areas.mtx); erts_queue_message(erts_literal_area_collector, 0, erts_alloc_message(0, NULL), @@ -1812,7 +1771,7 @@ delete_code(Module* modp) } else if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(modp->curr.num_traced_exports > 0); DBG_TRACE_MFA_P(&ep->info.mfa, "export trace cleared, code_ix=%d", code_ix); diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 950639f7ae..49ec59c989 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -46,15 +46,15 @@ #define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ)) #define Free(P) erts_free(ERTS_ALC_T_BPD, (P)) -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +#if defined(ERTS_ENABLE_LOCK_CHECK) +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\ __FILE__, __LINE__) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) #else -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) +# define ERTS_REQ_PROC_MAIN_LOCK(P) +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) #endif #define ERTS_BPF_LOCAL_TRACE 0x01 @@ -73,11 +73,9 @@ extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */ extern BeamInstr beam_exception_trace[1]; /* OpCode(i_exception_trace) */ extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ -erts_smp_atomic32_t erts_active_bp_index; -erts_smp_atomic32_t erts_staging_bp_index; -#ifdef ERTS_DIRTY_SCHEDULERS -erts_smp_mtx_t erts_dirty_bp_ix_mtx; -#endif +erts_atomic32_t erts_active_bp_index; +erts_atomic32_t erts_staging_bp_index; +erts_mtx_t erts_dirty_bp_ix_mtx; /* * Inlined helpers @@ -94,22 +92,18 @@ acquire_bp_sched_ix(Process *c_p) { ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); ASSERT(esdp); -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { - erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx); + erts_mtx_lock(&erts_dirty_bp_ix_mtx); return (Uint32) erts_no_schedulers; } -#endif return (Uint32) esdp->no - 1; } static ERTS_INLINE void release_bp_sched_ix(Uint32 ix) { -#ifdef ERTS_DIRTY_SCHEDULERS if (ix == (Uint32) erts_no_schedulers) - erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx); -#endif + erts_mtx_unlock(&erts_dirty_bp_ix_mtx); } @@ -162,12 +156,10 @@ static void bp_hash_delete(bp_time_hash_t *hash); void erts_bp_init(void) { - erts_smp_atomic32_init_nob(&erts_active_bp_index, 0); - erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1); -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL, + erts_atomic32_init_nob(&erts_active_bp_index, 0); + erts_atomic32_init_nob(&erts_staging_bp_index, 1); + erts_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); -#endif } @@ -306,7 +298,7 @@ erts_consolidate_bp_data(BpFunctions* f, int local) Uint i; Uint n = f->matched; - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); for (i = 0; i < n; i++) { consolidate_bp_data(fs[i].mod, fs[i].ci, local); @@ -318,7 +310,7 @@ erts_consolidate_bif_bp_data(void) { int i; - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); for (i = 0; i < BIF_SIZE; i++) { Export *ep = bif_export[i]; consolidate_bp_data(0, &ep->info, 0); @@ -393,17 +385,17 @@ consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local) } if (flags & ERTS_BPF_META_TRACE) { dst->meta_tracer = src->meta_tracer; - erts_smp_refc_inc(&dst->meta_tracer->refc, 1); + erts_refc_inc(&dst->meta_tracer->refc, 1); dst->meta_ms = src->meta_ms; MatchSetRef(dst->meta_ms); } if (flags & ERTS_BPF_COUNT) { dst->count = src->count; - erts_smp_refc_inc(&dst->count->refc, 1); + erts_refc_inc(&dst->count->refc, 1); } if (flags & ERTS_BPF_TIME_TRACE) { dst->time = src->time; - erts_smp_refc_inc(&dst->time->refc, 1); + erts_refc_inc(&dst->time->refc, 1); ASSERT(dst->time->hash); } } @@ -414,8 +406,8 @@ erts_commit_staged_bp(void) ErtsBpIndex staging = erts_staging_bp_ix(); ErtsBpIndex active = erts_active_bp_ix(); - erts_smp_atomic32_set_nob(&erts_active_bp_index, staging); - erts_smp_atomic32_set_nob(&erts_staging_bp_index, active); + erts_atomic32_set_nob(&erts_active_bp_index, staging); + erts_atomic32_set_nob(&erts_staging_bp_index, active); } void @@ -575,7 +567,7 @@ erts_clear_mtrace_bif(ErtsCodeInfo *ci) void erts_clear_debug_break(BpFunctions* f) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); clear_break(f, ERTS_BPF_DEBUG); } @@ -603,7 +595,7 @@ erts_clear_module_break(Module *modp) { Uint n; Uint i; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(modp); code_hdr = modp->curr.code_hdr; if (!code_hdr) { @@ -633,7 +625,7 @@ erts_clear_module_break(Module *modp) { void erts_clear_export_break(Module* modp, ErtsCodeInfo *ci) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); clear_function_break(ci, ERTS_BPF_ALL); erts_commit_staged_bp(); @@ -679,12 +671,12 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg) if (bp_flags & ERTS_BPF_META_TRACE) { ErtsTracer old_tracer, new_tracer; - old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + old_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer); new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer); if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) { - if (old_tracer == erts_smp_atomic_cmpxchg_acqb( + if (old_tracer == erts_atomic_cmpxchg_acqb( &bp->meta_tracer->tracer, (erts_aint_t)new_tracer, (erts_aint_t)old_tracer)) { @@ -696,7 +688,7 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg) } if (bp_flags & ERTS_BPF_COUNT_ACTIVE) { - erts_smp_atomic_inc_nob(&bp->count->acount); + erts_atomic_inc_nob(&bp->count->acount); } if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) { @@ -753,7 +745,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) GenericBpData* bp = NULL; Uint bp_flags = 0; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); g = ep->info.u.gen_bp; if (g) { @@ -777,7 +769,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) if (bp_flags & ERTS_BPF_META_TRACE) { ErtsTracer old_tracer; - meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + meta_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer); old_tracer = meta_tracer; flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args, 0, &meta_tracer); @@ -785,7 +777,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) { ErtsTracer new_tracer = erts_tracer_nil; erts_tracer_update(&new_tracer, meta_tracer); - if (old_tracer == erts_smp_atomic_cmpxchg_acqb( + if (old_tracer == erts_atomic_cmpxchg_acqb( &bp->meta_tracer->tracer, (erts_aint_t)new_tracer, (erts_aint_t)old_tracer)) { @@ -912,9 +904,9 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, } } if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) { - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); } } } else { @@ -937,7 +929,7 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, } } } - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); return result; } @@ -982,9 +974,9 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, c_p->cp = (BeamInstr *) cp_val(*cpp); ASSERT(is_CP(*cpp)); } - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); flags = erts_call_trace(c_p, info, ms, reg, local, &tracer); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); if (cpp) { c_p->cp = cp_save; } @@ -1024,9 +1016,9 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, the funcinfo is above i. */ c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ? beam_exception_trace : beam_return_trace; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else c_p->stop = E; return tracer; @@ -1043,7 +1035,7 @@ erts_trace_time_call(Process* c_p, ErtsCodeInfo *info, BpDataTime* bdt) Uint32 six = acquire_bp_sched_ix(c_p); ASSERT(c_p); - ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING + ASSERT(erts_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)); /* get previous timestamp and breakpoint @@ -1124,7 +1116,7 @@ erts_trace_time_return(Process *p, ErtsCodeInfo *ci) Uint32 six = acquire_bp_sched_ix(p); ASSERT(p); - ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING + ASSERT(erts_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)); /* get previous timestamp and breakpoint @@ -1206,7 +1198,7 @@ erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, *match_spec_ret = bp->meta_ms; } if (tracer_ret) { - *tracer_ret = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + *tracer_ret = erts_atomic_read_nob(&bp->meta_tracer->tracer); } return 1; } @@ -1220,7 +1212,7 @@ erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret) if (bp) { if (count_ret) { - *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount); + *count_ret = (Uint) erts_atomic_read_nob(&bp->count->acount); } return 1; } @@ -1500,7 +1492,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, Uint common; ErtsBpIndex ix = erts_staging_bp_ix(); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); g = ci->u.gen_bp; if (g == 0) { int i; @@ -1532,7 +1524,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, bp->flags &= ~ERTS_BPF_COUNT_ACTIVE; } else { bp->flags |= ERTS_BPF_COUNT_ACTIVE; - erts_smp_atomic_set_nob(&bp->count->acount, 0); + erts_atomic_set_nob(&bp->count->acount, 0); } ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); return; @@ -1566,17 +1558,17 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, MatchSetRef(match_spec); bp->meta_ms = match_spec; bmt = Alloc(sizeof(BpMetaTracer)); - erts_smp_refc_init(&bmt->refc, 1); + erts_refc_init(&bmt->refc, 1); erts_tracer_update(&meta_tracer, tracer); /* copy tracer */ - erts_smp_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer); + erts_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer); bp->meta_tracer = bmt; } else if (break_flags & ERTS_BPF_COUNT) { BpCount* bcp; ASSERT((bp->flags & ERTS_BPF_COUNT) == 0); bcp = Alloc(sizeof(BpCount)); - erts_smp_refc_init(&bcp->refc, 1); - erts_smp_atomic_init_nob(&bcp->acount, 0); + erts_refc_init(&bcp->refc, 1); + erts_atomic_init_nob(&bcp->acount, 0); bp->count = bcp; } else if (break_flags & ERTS_BPF_TIME_TRACE) { BpDataTime* bdt; @@ -1584,12 +1576,8 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0); bdt = Alloc(sizeof(BpDataTime)); - erts_smp_refc_init(&bdt->refc, 1); -#ifdef ERTS_DIRTY_SCHEDULERS + erts_refc_init(&bdt->refc, 1); bdt->n = erts_no_schedulers + 1; -#else - bdt->n = erts_no_schedulers; -#endif bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n)); for (i = 0; i < bdt->n; i++) { bp_hash_init(&(bdt->hash[i]), 32); @@ -1621,7 +1609,7 @@ clear_function_break(ErtsCodeInfo *ci, Uint break_flags) Uint common; ErtsBpIndex ix = erts_staging_bp_ix(); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); if ((g = ci->u.gen_bp) == NULL) { return 1; @@ -1654,8 +1642,8 @@ clear_function_break(ErtsCodeInfo *ci, Uint break_flags) static void bp_meta_unref(BpMetaTracer* bmt) { - if (erts_smp_refc_dectest(&bmt->refc, 0) <= 0) { - ErtsTracer trc = erts_smp_atomic_read_nob(&bmt->tracer); + if (erts_refc_dectest(&bmt->refc, 0) <= 0) { + ErtsTracer trc = erts_atomic_read_nob(&bmt->tracer); ERTS_TRACER_CLEAR(&trc); Free(bmt); } @@ -1664,7 +1652,7 @@ bp_meta_unref(BpMetaTracer* bmt) static void bp_count_unref(BpCount* bcp) { - if (erts_smp_refc_dectest(&bcp->refc, 0) <= 0) { + if (erts_refc_dectest(&bcp->refc, 0) <= 0) { Free(bcp); } } @@ -1672,7 +1660,7 @@ bp_count_unref(BpCount* bcp) static void bp_time_unref(BpDataTime* bdt) { - if (erts_smp_refc_dectest(&bdt->refc, 0) <= 0) { + if (erts_refc_dectest(&bdt->refc, 0) <= 0) { Uint i = 0; Uint j = 0; Process *h_p = NULL; @@ -1696,7 +1684,7 @@ bp_time_unref(BpDataTime* bdt) if (pbt) { Free(pbt); } - erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN); } } } diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h index 56fa82b912..a64765822b 100644 --- a/erts/emulator/beam/beam_bp.h +++ b/erts/emulator/beam/beam_bp.h @@ -41,7 +41,7 @@ typedef struct { typedef struct bp_data_time { /* Call time */ Uint n; bp_time_hash_t *hash; - erts_smp_refc_t refc; + erts_refc_t refc; } BpDataTime; typedef struct { @@ -50,13 +50,13 @@ typedef struct { } process_breakpoint_time_t; /* used within psd */ typedef struct { - erts_smp_atomic_t acount; - erts_smp_refc_t refc; + erts_atomic_t acount; + erts_refc_t refc; } BpCount; typedef struct { - erts_smp_atomic_t tracer; - erts_smp_refc_t refc; + erts_atomic_t tracer; + erts_refc_t refc; } BpMetaTracer; typedef struct generic_bp_data { @@ -79,9 +79,7 @@ typedef struct generic_bp { #define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1) #define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2) -#ifdef ERTS_DIRTY_SCHEDULERS -extern erts_smp_mtx_t erts_dirty_bp_ix_mtx; -#endif +extern erts_mtx_t erts_dirty_bp_ix_mtx; enum erts_break_op{ ERTS_BREAK_NOP = 0, /* Must be false */ @@ -173,17 +171,17 @@ ErtsCodeInfo *erts_find_local_func(ErtsCodeMFA *mfa); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -extern erts_smp_atomic32_t erts_active_bp_index; -extern erts_smp_atomic32_t erts_staging_bp_index; +extern erts_atomic32_t erts_active_bp_index; +extern erts_atomic32_t erts_staging_bp_index; ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void) { - return erts_smp_atomic32_read_nob(&erts_active_bp_index); + return erts_atomic32_read_nob(&erts_active_bp_index); } ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void) { - return erts_smp_atomic32_read_nob(&erts_staging_bp_index); + return erts_atomic32_read_nob(&erts_staging_bp_index); } #endif diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index a2060c80de..7819e9907d 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -53,6 +53,8 @@ void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg); static int print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr); static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif); +static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap); +static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap); BIF_RETTYPE erts_debug_same_2(BIF_ALIST_2) @@ -157,8 +159,8 @@ erts_debug_breakpoint_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); erts_bp_match_functions(&f, &mfa, specified); if (boolean == am_true) { @@ -174,8 +176,8 @@ erts_debug_breakpoint_2(BIF_ALIST_2) res = make_small(f.matched); erts_bp_free_matched_functions(&f); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; @@ -424,7 +426,9 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) while (start_prog < prog) { prog--; switch (*prog) { + case 'f': case 'g': + case 'q': *ap++ = *--sp; break; case 'i': /* Initialize packing accumulator. */ @@ -489,6 +493,14 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) case 'n': /* Nil */ erts_print(to, to_arg, "[]"); break; + case 'S': /* Register */ + { + Uint reg_type = (*ap & 1) ? 'y' : 'x'; + Uint n = ap[0] / sizeof(Eterm); + erts_print(to, to_arg, "%c(%d)", reg_type, n); + ap++; + break; + } case 's': /* Any source (tagged constant or register) */ tag = loader_tag(*ap); if (tag == LOADER_X_REG) { @@ -522,12 +534,13 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) } ap++; break; - case 'I': /* Untagged integer. */ - case 't': + case 't': /* Untagged integers */ + case 'I': + case 'W': switch (op) { - case op_i_gc_bif1_jIsId: - case op_i_gc_bif2_jIIssd: - case op_i_gc_bif3_jIIssd: + case op_i_gc_bif1_jWstd: + case op_i_gc_bif2_jWtssd: + case op_i_gc_bif3_jWtssd: { const ErtsGcBif* p; BifFunction gcf = (BifFunction) *ap; @@ -549,9 +562,10 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) break; case 'f': /* Destination label */ { - ErtsCodeMFA* cmfa = find_function_from_pc((BeamInstr *)*ap); - if (!cmfa || erts_codemfa_to_code(cmfa) != (BeamInstr *) *ap) { - erts_print(to, to_arg, "f(" HEXF ")", *ap); + BeamInstr* target = f_to_addr(addr, op, ap); + ErtsCodeMFA* cmfa = find_function_from_pc(target); + if (!cmfa || erts_codemfa_to_code(cmfa) != target) { + erts_print(to, to_arg, "f(" HEXF ")", target); } else { erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module, cmfa->function, cmfa->arity); @@ -561,18 +575,18 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) break; case 'p': /* Pointer (to label) */ { - ErtsCodeMFA* cmfa = find_function_from_pc((BeamInstr *)*ap); - if (!cmfa || erts_codemfa_to_code(cmfa) != (BeamInstr *) *ap) { - erts_print(to, to_arg, "p(" HEXF ")", *ap); - } else { - erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module, - cmfa->function, cmfa->arity); - } + BeamInstr* target = f_to_addr(addr, op, ap); + erts_print(to, to_arg, "p(" HEXF ")", target); ap++; } break; case 'j': /* Pointer (to label) */ - erts_print(to, to_arg, "j(" HEXF ")", *ap); + if (*ap == 0) { + erts_print(to, to_arg, "j(0)"); + } else { + BeamInstr* target = f_to_addr(addr, op, ap); + erts_print(to, to_arg, "j(" HEXF ")", target); + } ap++; break; case 'e': /* Export entry */ @@ -615,12 +629,22 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) unpacked = ap; ap = addr + size; + + /* + * In the code below, never use ap[-1], ap[-2], ... + * (will not work if the arguments have been packed). + * + * Instead use unpacked[-1], unpacked[-2], ... + */ switch (op) { case op_i_select_val_lins_xfI: case op_i_select_val_lins_yfI: + case op_i_select_val_bins_xfI: + case op_i_select_val_bins_yfI: { - int n = ap[-1]; + int n = unpacked[-1]; int ix = n; + Sint32* jump_tab = (Sint32 *)(ap + n); while (ix--) { erts_print(to, to_arg, "%T ", (Eterm) ap[0]); @@ -629,30 +653,19 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) } ix = n; while (ix--) { - erts_print(to, to_arg, "f(" HEXF ") ", (Eterm) ap[0]); - ap++; - size++; - } - } - break; - case op_i_select_val_bins_xfI: - case op_i_select_val_bins_yfI: - { - int n = ap[-1]; - - while (n > 0) { - erts_print(to, to_arg, "%T f(" HEXF ") ", (Eterm) ap[0], ap[1]); - ap += 2; - size += 2; - n--; + BeamInstr* target = f_to_addr_packed(addr, op, jump_tab); + erts_print(to, to_arg, "f(" HEXF ") ", target); + jump_tab++; } + size += (n+1) / 2; } break; case op_i_select_tuple_arity_xfI: case op_i_select_tuple_arity_yfI: { - int n = ap[-1]; + int n = unpacked[-1]; int ix = n - 1; /* without sentinel */ + Sint32* jump_tab = (Sint32 *)(ap + n); while (ix--) { Uint arity = arityval(ap[0]); @@ -666,39 +679,62 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) size++; ix = n; while (ix--) { - erts_print(to, to_arg, "f(" HEXF ") ", ap[0]); - ap++; - size++; + BeamInstr* target = f_to_addr_packed(addr, op, jump_tab); + erts_print(to, to_arg, "f(" HEXF ") ", target); + jump_tab++; + } + size += (n+1) / 2; + } + break; + case op_i_select_val2_xfcc: + case op_i_select_val2_yfcc: + case op_i_select_tuple_arity2_xfAA: + case op_i_select_tuple_arity2_yfAA: + { + Sint32* jump_tab = (Sint32 *) ap; + BeamInstr* target; + int i; + + for (i = 0; i < 2; i++) { + target = f_to_addr_packed(addr, op, jump_tab++); + erts_print(to, to_arg, "f(" HEXF ") ", target); } + size += 1; } break; - case op_i_jump_on_val_xfII: - case op_i_jump_on_val_yfII: + case op_i_jump_on_val_xfIW: + case op_i_jump_on_val_yfIW: { - int n; - for (n = ap[-2]; n > 0; n--) { - erts_print(to, to_arg, "f(" HEXF ") ", ap[0]); - ap++; - size++; + int n = unpacked[-2]; + Sint32* jump_tab = (Sint32 *) ap; + + size += (n+1) / 2; + while (n-- > 0) { + BeamInstr* target = f_to_addr_packed(addr, op, jump_tab); + erts_print(to, to_arg, "f(" HEXF ") ", target); + jump_tab++; } } break; case op_i_jump_on_val_zero_xfI: case op_i_jump_on_val_zero_yfI: { - int n; - for (n = ap[-1]; n > 0; n--) { - erts_print(to, to_arg, "f(" HEXF ") ", ap[0]); - ap++; - size++; + int n = unpacked[-1]; + Sint32* jump_tab = (Sint32 *) ap; + + size += (n+1) / 2; + while (n-- > 0) { + BeamInstr* target = f_to_addr_packed(addr, op, jump_tab); + erts_print(to, to_arg, "f(" HEXF ") ", target); + jump_tab++; } } break; case op_i_put_tuple_xI: case op_i_put_tuple_yI: - case op_new_map_dII: - case op_update_map_assoc_jsdII: - case op_update_map_exact_jsdII: + case op_new_map_dtI: + case op_update_map_assoc_sdtI: + case op_update_map_exact_jsdtI: { int n = unpacked[-1]; @@ -718,6 +754,27 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) } } break; + case op_i_new_small_map_lit_dtq: + { + Eterm *tp = tuple_val(unpacked[-1]); + int n = arityval(*tp); + + while (n > 0) { + switch (loader_tag(ap[0])) { + case LOADER_X_REG: + erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0])); + break; + case LOADER_Y_REG: + erts_print(to, to_arg, " x(%d)", loader_y_reg_index(ap[0])); + break; + default: + erts_print(to, to_arg, " %T", (Eterm) ap[0]); + break; + } + ap++, size++, n--; + } + } + break; case op_i_get_map_elements_fsI: { int n = unpacked[-1]; @@ -766,6 +823,17 @@ static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif) } } +static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap) +{ + return base - 1 + opc[op].adjust + (Sint32) *ap; +} + +static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap) +{ + return base - 1 + opc[op].adjust + *ap; +} + + /* * Dirty BIF testing. * @@ -774,10 +842,8 @@ static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif) * test suite. */ -#ifdef ERTS_DIRTY_SCHEDULERS static int ms_wait(Process *c_p, Eterm etimeout, int busy); static int dirty_send_message(Process *c_p, Eterm to, Eterm tag); -#endif static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I); /* @@ -806,7 +872,6 @@ erts_debug_dirty_io_2(BIF_ALIST_2) BIF_RETTYPE erts_debug_dirty_3(BIF_ALIST_3) { -#ifdef ERTS_DIRTY_SCHEDULERS Eterm argv[2]; switch (BIF_ARG_1) { case am_normal: @@ -836,9 +901,6 @@ erts_debug_dirty_3(BIF_ALIST_3) default: BIF_ERROR(BIF_P, EXC_BADARG); } -#else - BIF_ERROR(BIF_P, EXC_UNDEF); -#endif } @@ -846,7 +908,6 @@ static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I) { BIF_RETTYPE ret; -#ifdef ERTS_DIRTY_SCHEDULERS if (am_scheduler == arg1) { ErtsSchedulerData *esdp; if (arg2 != am_type) @@ -1032,13 +1093,9 @@ dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I) badarg: ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); } -#else - ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF); -#endif return ret; } -#ifdef ERTS_DIRTY_SCHEDULERS static int dirty_send_message(Process *c_p, Eterm to, Eterm tag) @@ -1075,7 +1132,7 @@ dirty_send_message(Process *c_p, Eterm to, Eterm tag) if (rp == real_c_p) rp_locks &= ~c_p_locks; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); @@ -1125,13 +1182,8 @@ ms_wait(Process *c_p, Eterm etimeout, int busy) return 1; } -#endif /* ERTS_DIRTY_SCHEDULERS */ -#ifdef ERTS_SMP # define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit()) -#else -# define ERTS_STACK_LIMIT ((char *) erts_scheduler_stack_limit) -#endif /* * The below functions is for testing of the stack diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index b4e6c35579..81c4417b1e 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -62,33 +62,27 @@ #endif #ifdef ERTS_ENABLE_LOCK_CHECK -# ifdef ERTS_SMP -# define PROCESS_MAIN_CHK_LOCKS(P) \ -do { \ - if ((P)) \ - erts_proc_lc_chk_only_proc_main((P)); \ - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \ +# define PROCESS_MAIN_CHK_LOCKS(P) \ +do { \ + if ((P)) \ + erts_proc_lc_chk_only_proc_main((P)); \ + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); \ } while (0) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ -do { \ - if ((P)) \ - erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \ - __FILE__, __LINE__); \ +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ +do { \ + if ((P)) \ + erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \ + __FILE__, __LINE__); \ } while (0) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ do { \ if ((P)) \ erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \ } while (0) -# else -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) -# define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0) -# endif #else # define PROCESS_MAIN_CHK_LOCKS(P) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) +# define ERTS_REQ_PROC_MAIN_LOCK(P) +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) #endif /* @@ -155,50 +149,7 @@ do { \ * Register target (X or Y register). */ -#define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb(Target-1) : &xb(Target)) -#define REG_TARGET(Target) (*REG_TARGET_PTR(Target)) - -/* - * Store a result into a register given a destination descriptor. - */ - -#define StoreResult(Result, DestDesc) \ - do { \ - Eterm stb_reg; \ - stb_reg = (DestDesc); \ - CHECK_TERM(Result); \ - REG_TARGET(stb_reg) = (Result); \ - } while (0) - -/* - * Store a result into a register and execute the next instruction. - * Dst points to the word with a destination descriptor, which MUST - * be just before the next instruction. - */ - -#define StoreBifResult(Dst, Result) \ - do { \ - BeamInstr* stb_next; \ - Eterm stb_reg; \ - stb_reg = Arg(Dst); \ - I += (Dst) + 2; \ - stb_next = (BeamInstr *) *I; \ - CHECK_TERM(Result); \ - REG_TARGET(stb_reg) = (Result); \ - Goto(stb_next); \ - } while (0) - -#define ClauseFail() goto jump_f - -#define SAVE_CP(X) \ - do { \ - *(X) = make_cp(c_p->cp); \ - c_p->cp = 0; \ - } while(0) - -#define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X))) - -#define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y)) +#define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb((Target)-1) : &xb(Target)) /* * Special Beam instructions. @@ -281,13 +232,16 @@ void** beam_ops; HEAP_TOP((P)) = HTOP; \ (P)->stop = E; \ PROCESS_MAIN_CHK_LOCKS((P)); \ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P)) + ERTS_UNREQ_PROC_MAIN_LOCK((P)) #define db(N) (N) +#define fb(N) ((Sint)(Sint32)(N)) +#define jb(N) ((Sint)(Sint32)(N)) #define tb(N) (N) #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N))) #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N))) -#define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N))) +#define Sb(N) (*REG_TARGET_PTR(N)) +#define lb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N))) #define Qb(N) (N) #define Ib(N) (N) #define x(N) reg[N] @@ -295,151 +249,6 @@ void** beam_ops; #define r(N) x(N) /* - * Makes sure that there are StackNeed + HeapNeed + 1 words available - * on the combined heap/stack segment, then allocates StackNeed + 1 - * words on the stack and saves CP. - * - * M is number of live registers to preserve during garbage collection - */ - -#define AH(StackNeed, HeapNeed, M) \ - do { \ - int needed; \ - needed = (StackNeed) + 1; \ - if (E - HTOP < (needed + (HeapNeed))) { \ - SWAPOUT; \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - FCALLS -= erts_garbage_collect_nobump(c_p, needed + (HeapNeed), \ - reg, (M), FCALLS); \ - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - SWAPIN; \ - } \ - E -= needed; \ - SAVE_CP(E); \ - } while (0) - -#define Allocate(Ns, Live) AH(Ns, 0, Live) - -#define AllocateZero(Ns, Live) \ - do { Eterm* ptr; \ - int i = (Ns); \ - AH(i, 0, Live); \ - for (ptr = E + i; ptr > E; ptr--) { \ - make_blank(*ptr); \ - } \ - } while (0) - -#define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live) - -#define AllocateHeapZero(Ns, Nh, Live) \ - do { Eterm* ptr; \ - int i = (Ns); \ - AH(i, Nh, Live); \ - for (ptr = E + i; ptr > E; ptr--) { \ - make_blank(*ptr); \ - } \ - } while (0) - -#define AllocateInit(Ns, Live, Y) \ - do { AH(Ns, 0, Live); make_blank(Y); } while (0) - -/* - * Like the AH macro, but allocates no additional heap space. - */ - -#define A(StackNeed, M) AH(StackNeed, 0, M) - -#define D(N) \ - RESTORE_CP(E); \ - E += (N) + 1; - - - -#define TestBinVHeap(VNh, Nh, Live) \ - do { \ - unsigned need = (Nh); \ - if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\ - SWAPOUT; \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live), FCALLS); \ - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - SWAPIN; \ - } \ - HEAP_SPACE_VERIFIED(need); \ - } while (0) - - - -/* - * Check if Nh words of heap are available; if not, do a garbage collection. - * Live is number of active argument registers to be preserved. - */ - -#define TestHeap(Nh, Live) \ - do { \ - unsigned need = (Nh); \ - if (E - HTOP < need) { \ - SWAPOUT; \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live), FCALLS); \ - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - SWAPIN; \ - } \ - HEAP_SPACE_VERIFIED(need); \ - } while (0) - -/* - * Check if Nh words of heap are available; if not, do a garbage collection. - * Live is number of active argument registers to be preserved. - * Takes special care to preserve Extra if a garbage collection occurs. - */ - -#define TestHeapPreserve(Nh, Live, Extra) \ - do { \ - unsigned need = (Nh); \ - if (E - HTOP < need) { \ - SWAPOUT; \ - reg[Live] = Extra; \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live)+1, FCALLS); \ - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - Extra = reg[Live]; \ - SWAPIN; \ - } \ - HEAP_SPACE_VERIFIED(need); \ - } while (0) - -#define TestHeapPutList(Need, Reg) \ - do { \ - TestHeap((Need), 1); \ - PutList(Reg, r(0), r(0)); \ - CHECK_TERM(r(0)); \ - } while (0) - -#define Init(N) make_blank(yb(N)) - -#define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0) -#define Init3(Y1, Y2, Y3) \ - do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0) - -#define MakeFun(FunP, NumFree) \ - do { \ - HEAVY_SWAPOUT; \ - r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \ - HEAVY_SWAPIN; \ - } while (0) - -#define PutTuple(Dst, Arity) \ - do { \ - Dst = make_tuple(HTOP); \ - pt_arity = (Arity); \ - } while (0) - -/* * Check that we haven't used the reductions and jump to function pointed to by * the I register. If we are out of reductions, do a context switch. */ @@ -505,20 +314,7 @@ void** beam_ops; # define Dispatchfun() DispatchMacroFun() #endif -#define Self(R) R = c_p->common.id -#define Node(R) R = erts_this_node->sysname - #define Arg(N) I[(N)+1] -#define Next(N) \ - I += (N) + 1; \ - ASSERT(VALID_INSTR(*I)); \ - Goto(*I) - -#define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0) -#define NextPF(N, Dst) \ - I += N + 1; \ - ASSERT(VALID_INSTR(Dst)); \ - Goto(Dst) #define GetR(pos, tr) \ do { \ @@ -535,97 +331,20 @@ void** beam_ops; CHECK_TERM(tr); \ } while (0) -#define GetArg1(N, Dst) GetR((N), Dst) - -#define GetArg2(N, Dst1, Dst2) \ - do { \ - GetR(N, Dst1); \ - GetR((N)+1, Dst2); \ - } while (0) - -#define PutList(H, T, Dst) \ - do { \ - HTOP[0] = (H); HTOP[1] = (T); \ - Dst = make_list(HTOP); \ - HTOP += 2; \ - } while (0) - -#define Swap(R1, R2) \ - do { \ - Eterm V = R1; \ - R1 = R2; \ - R2 = V; \ - } while (0) - -#define SwapTemp(R1, R2, Tmp) \ - do { \ - Eterm V = R1; \ - R1 = R2; \ - R2 = Tmp = V; \ - } while (0) - -#define Move(Src, Dst) Dst = (Src) - -#define Move2Par(S1, D1, S2, D2) \ - do { \ - Eterm V1, V2; \ - V1 = (S1); V2 = (S2); D1 = V1; D2 = V2; \ - } while (0) - -#define MoveShift(Src, SD, D) \ - do { \ - Eterm V; \ - V = Src; D = SD; SD = V; \ - } while (0) - -#define MoveDup(Src, D1, D2) \ - do { \ - D1 = D2 = (Src); \ - } while (0) - -#define Move3(S1, D1, S2, D2, S3, D3) D1 = (S1); D2 = (S2); D3 = (S3) - -#define MoveWindow3(S1, S2, S3, D) \ - do { \ - Eterm xt0, xt1, xt2; \ - Eterm *y = &D; \ - xt0 = S1; \ - xt1 = S2; \ - xt2 = S3; \ - y[0] = xt0; \ - y[1] = xt1; \ - y[2] = xt2; \ - } while (0) - -#define MoveWindow4(S1, S2, S3, S4, D) \ - do { \ - Eterm xt0, xt1, xt2, xt3; \ - Eterm *y = &D; \ - xt0 = S1; \ - xt1 = S2; \ - xt2 = S3; \ - xt3 = S4; \ - y[0] = xt0; \ - y[1] = xt1; \ - y[2] = xt2; \ - y[3] = xt3; \ - } while (0) - -#define MoveWindow5(S1, S2, S3, S4, S5, D) \ - do { \ - Eterm xt0, xt1, xt2, xt3, xt4; \ - Eterm *y = &D; \ - xt0 = S1; \ - xt1 = S2; \ - xt2 = S3; \ - xt3 = S4; \ - xt4 = S5; \ - y[0] = xt0; \ - y[1] = xt1; \ - y[2] = xt2; \ - y[3] = xt3; \ - y[4] = xt4; \ - } while (0) +#define PUT_TERM_REG(term, desc) \ +do { \ + switch (loader_tag(desc)) { \ + case LOADER_X_REG: \ + x(loader_x_reg_index(desc)) = (term); \ + break; \ + case LOADER_Y_REG: \ + y(loader_y_reg_index(desc)) = (term); \ + break; \ + default: \ + ASSERT(0); \ + break; \ + } \ +} while(0) #define DispatchReturn \ do { \ @@ -640,409 +359,14 @@ do { \ } \ } while (0) -#define MoveReturn(Src) \ - x(0) = (Src); \ - I = c_p->cp; \ - ASSERT(VALID_INSTR(*c_p->cp)); \ - c_p->cp = 0; \ - CHECK_TERM(r(0)); \ - DispatchReturn - -#define DeallocateReturn(Deallocate) \ - do { \ - int words_to_pop = (Deallocate); \ - SET_I((BeamInstr *) cp_val(*E)); \ - E = ADD_BYTE_OFFSET(E, words_to_pop); \ - CHECK_TERM(r(0)); \ - DispatchReturn; \ - } while (0) - -#define MoveDeallocateReturn(Src, Deallocate) \ - x(0) = (Src); \ - DeallocateReturn(Deallocate) - -#define MoveCall(Src, CallDest, Size) \ - x(0) = (Src); \ - SET_CP(c_p, I+Size+1); \ - SET_I((BeamInstr *) CallDest); \ - Dispatch(); - -#define MoveCallLast(Src, CallDest, Deallocate) \ - x(0) = (Src); \ - RESTORE_CP(E); \ - E = ADD_BYTE_OFFSET(E, (Deallocate)); \ - SET_I((BeamInstr *) CallDest); \ - Dispatch(); - -#define MoveCallOnly(Src, CallDest) \ - x(0) = (Src); \ - SET_I((BeamInstr *) CallDest); \ - Dispatch(); - -#define MoveJump(Src) \ - r(0) = (Src); \ - SET_I((BeamInstr *) Arg(0)); \ - Goto(*I); - -#define GetList(Src, H, T) \ - do { \ - Eterm* tmp_ptr = list_val(Src); \ - Eterm hd, tl; \ - hd = CAR(tmp_ptr); \ - tl = CDR(tmp_ptr); \ - H = hd; T = tl; \ - } while (0) - -#define GetTupleElement(Src, Element, Dest) \ - do { \ - Eterm* src; \ - src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \ - (Dest) = *src; \ - } while (0) - -#define GetTupleElement2(Src, Element, Dest) \ - do { \ - Eterm* src; \ - Eterm* dst; \ - Eterm E1, E2; \ - src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \ - dst = &(Dest); \ - E1 = src[0]; \ - E2 = src[1]; \ - dst[0] = E1; \ - dst[1] = E2; \ - } while (0) - -#define GetTupleElement2Y(Src, Element, D1, D2) \ - do { \ - Eterm* src; \ - Eterm E1, E2; \ - src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \ - E1 = src[0]; \ - E2 = src[1]; \ - D1 = E1; \ - D2 = E2; \ - } while (0) - -#define GetTupleElement3(Src, Element, Dest) \ - do { \ - Eterm* src; \ - Eterm* dst; \ - Eterm E1, E2, E3; \ - src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \ - dst = &(Dest); \ - E1 = src[0]; \ - E2 = src[1]; \ - E3 = src[2]; \ - dst[0] = E1; \ - dst[1] = E2; \ - dst[2] = E3; \ - } while (0) - -#define EqualImmed(X, Y, Action) if (X != Y) { Action; } -#define NotEqualImmed(X, Y, Action) if (X == Y) { Action; } -#define EqualExact(X, Y, Action) if (!EQ(X,Y)) { Action; } -#define NotEqualExact(X, Y, Action) if (EQ(X,Y)) { Action; } -#define Equal(X, Y, Action) CMP_EQ_ACTION(X,Y,Action) -#define NotEqual(X, Y, Action) CMP_NE_ACTION(X,Y,Action) -#define IsLessThan(X, Y, Action) CMP_LT_ACTION(X,Y,Action) -#define IsGreaterEqual(X, Y, Action) CMP_GE_ACTION(X,Y,Action) - -#define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; } - -#define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; } - -#define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; } - -#define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; } - -#define IsIntegerAllocate(Src, Need, Alive, Fail) \ - if (is_not_integer(Src)) { Fail; } \ - A(Need, Alive) - -#define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; } - -#define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; } - -#define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; } - -#define IsNonemptyListAllocate(Src, Need, Alive, Fail) \ - if (is_not_list(Src)) { Fail; } \ - A(Need, Alive) - -#define IsNonemptyListTestHeap(Need, Alive, Fail) \ - if (is_not_list(x(0))) { Fail; } \ - TestHeap(Need, Alive) - -#define IsNonemptyListGetList(Src, H, T, Fail) \ - if (is_not_list(Src)) { \ - Fail; \ - } else { \ - Eterm* tmp_ptr = list_val(Src); \ - Eterm hd, tl; \ - hd = CAR(tmp_ptr); \ - tl = CDR(tmp_ptr); \ - H = hd; T = tl; \ - } - -#define IsTuple(X, Action) if (is_not_tuple(X)) Action - -#define IsArity(Pointer, Arity, Fail) \ - if (*tuple_val(Pointer) != (Arity)) { \ - Fail; \ - } - -#define IsMap(Src, Fail) if (!is_map(Src)) { Fail; } - -#define GetMapElement(Src, Key, Dst, Fail) \ - do { \ - Eterm _res = get_map_element(Src, Key); \ - if (is_non_value(_res)) { \ - Fail; \ - } \ - Dst = _res; \ - } while (0) - -#define GetMapElementHash(Src, Key, Hx, Dst, Fail) \ - do { \ - Eterm _res = get_map_element_hash(Src, Key, Hx); \ - if (is_non_value(_res)) { \ - Fail; \ - } \ - Dst = _res; \ - } while (0) - -#define IsFunction(X, Action) \ - do { \ - if ( !(is_any_fun(X)) ) { \ - Action; \ - } \ - } while (0) - -#define IsFunction2(F, A, Action) \ - do { \ - if (erl_is_function(c_p, F, A) != am_true ) { \ - Action; \ - } \ - } while (0) - #ifdef DEBUG -#define IsTupleOfArity(Src, Arityval, Fail) \ - do { \ - if (!(is_tuple(Src) && *tuple_val(Src) == Arityval)) { \ - Fail; \ - } \ - } while (0) +/* Better static type testing by the C compiler */ +# define BEAM_IS_TUPLE(Src) is_tuple(Src) #else -#define IsTupleOfArity(Src, Arityval, Fail) \ - do { \ - if (!(is_boxed(Src) && *tuple_val(Src) == Arityval)) { \ - Fail; \ - } \ - } while (0) +/* Better performance */ +# define BEAM_IS_TUPLE(Src) is_boxed(Src) #endif -#define IsTaggedTuple(Src,Arityval,Tag,Fail) \ - do { \ - if (!(is_tuple(Src) && \ - (tuple_val(Src))[0] == Arityval && \ - (tuple_val(Src))[1] == Tag)) { \ - Fail; \ - } \ - } while (0) - -#define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; } - -#define IsBinary(Src, Fail) \ - if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; } - -#define IsBitstring(Src, Fail) \ - if (is_not_binary(Src)) { Fail; } - -#if defined(ARCH_64) -#define BsSafeMul(A, B, Fail, Target) \ - do { Uint64 _res = (A) * (B); \ - if (_res / B != A) { Fail; } \ - Target = _res; \ - } while (0) -#else -#define BsSafeMul(A, B, Fail, Target) \ - do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \ - if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \ - Target = _res; \ - } while (0) -#endif - -#define BsGetFieldSize(Bits, Unit, Fail, Target) \ - do { \ - Sint _signed_size; Uint _uint_size; \ - Uint temp_bits; \ - if (is_small(Bits)) { \ - _signed_size = signed_val(Bits); \ - if (_signed_size < 0) { Fail; } \ - _uint_size = (Uint) _signed_size; \ - } else { \ - if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \ - _uint_size = temp_bits; \ - } \ - BsSafeMul(_uint_size, Unit, Fail, Target); \ - } while (0) - -#define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \ - do { \ - Sint _signed_size; Uint _uint_size; \ - Uint temp_bits; \ - if (is_small(Bits)) { \ - _signed_size = signed_val(Bits); \ - if (_signed_size < 0) { Fail; } \ - _uint_size = (Uint) _signed_size; \ - } else { \ - if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \ - _uint_size = (Uint) temp_bits; \ - } \ - Target = _uint_size * Unit; \ - } while (0) - -#define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - Eterm _result; Sint _size; \ - if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \ - _size *= ((Flags) >> 3); \ - TestHeap(FLOAT_SIZE_OBJECT, Live); \ - _mb = ms_matchbuffer(Ms); \ - LIGHT_SWAPOUT; \ - _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \ - LIGHT_SWAPIN; \ - HEAP_SPACE_VERIFIED(0); \ - if (is_non_value(_result)) { Fail; } \ - else { Dst = _result; } \ - } while (0) - -#define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - Eterm _result; \ - TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \ - _mb = ms_matchbuffer(Ms); \ - LIGHT_SWAPOUT; \ - _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \ - LIGHT_SWAPIN; \ - HEAP_SPACE_VERIFIED(0); \ - if (is_non_value(_result)) { Fail; } \ - else { Dst = _result; } \ - } while (0) - -#define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - Eterm _result; Uint _size; \ - BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \ - TestHeap(ERL_SUB_BIN_SIZE, Live); \ - _mb = ms_matchbuffer(Ms); \ - LIGHT_SWAPOUT; \ - _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \ - LIGHT_SWAPIN; \ - HEAP_SPACE_VERIFIED(0); \ - if (is_non_value(_result)) { Fail; } \ - else { Dst = _result; } \ - } while (0) - -#define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - Eterm _result; \ - TestHeap(ERL_SUB_BIN_SIZE, Live); \ - _mb = ms_matchbuffer(Ms); \ - if (((_mb->size - _mb->offset) % Unit) == 0) { \ - LIGHT_SWAPOUT; \ - _result = erts_bs_get_binary_all_2(c_p, _mb); \ - LIGHT_SWAPIN; \ - HEAP_SPACE_VERIFIED(0); \ - ASSERT(is_value(_result)); \ - Dst = _result; \ - } else { \ - HEAP_SPACE_VERIFIED(0); \ - Fail; } \ - } while (0) - -#define BsSkipBits2(Ms, Bits, Unit, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - size_t new_offset; \ - Uint _size; \ - _mb = ms_matchbuffer(Ms); \ - BsGetFieldSize(Bits, Unit, Fail, _size); \ - new_offset = _mb->offset + _size; \ - if (new_offset <= _mb->size) { _mb->offset = new_offset; } \ - else { Fail; } \ - } while (0) - -#define BsSkipBitsAll2(Ms, Unit, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - _mb = ms_matchbuffer(Ms); \ - if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \ - else { Fail; } \ - } while (0) - -#define BsSkipBitsImm2(Ms, Bits, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - size_t new_offset; \ - _mb = ms_matchbuffer(Ms); \ - new_offset = _mb->offset + (Bits); \ - if (new_offset <= _mb->size) { _mb->offset = new_offset; } \ - else { Fail; } \ - } while (0) - -#define NewBsPutIntegerImm(Sz, Flags, Src) \ - do { \ - if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \ - } while (0) - -#define NewBsPutInteger(Sz, Flags, Src) \ - do { \ - Sint _size; \ - BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \ - if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \ - { goto badarg; } \ - } while (0) - -#define NewBsPutFloatImm(Sz, Flags, Src) \ - do { \ - if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \ - } while (0) - -#define NewBsPutFloat(Sz, Flags, Src) \ - do { \ - Sint _size; \ - BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \ - if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \ - } while (0) - -#define NewBsPutBinary(Sz, Flags, Src) \ - do { \ - Sint _size; \ - BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \ - if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \ - } while (0) - -#define NewBsPutBinaryImm(Sz, Src) \ - do { \ - if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \ - } while (0) - -#define NewBsPutBinaryAll(Src, Unit) \ - do { \ - if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \ - } while (0) - - -#define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; } -#define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; } -#define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; } - /* * process_main() is already huge, so we want to avoid inlining * into it. Especially functions that are seldom used. @@ -1066,20 +390,21 @@ static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa, Eterm* reg, Eterm func) NOINLINE; static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity, BeamInstr *I, Uint offs) NOINLINE; -static BeamInstr* apply(Process* p, Eterm module, Eterm function, - Eterm args, Eterm* reg, - BeamInstr *I, Uint offs) NOINLINE; +static BeamInstr* apply(Process* p, Eterm* reg, + BeamInstr *I, Uint offs) NOINLINE; static BeamInstr* call_fun(Process* p, int arity, Eterm* reg, Eterm args) NOINLINE; static BeamInstr* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg) NOINLINE; static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) NOINLINE; -static Eterm new_map(Process* p, Eterm* reg, BeamInstr* I) NOINLINE; -static Eterm update_map_assoc(Process* p, Eterm* reg, - Eterm map, BeamInstr* I) NOINLINE; -static Eterm update_map_exact(Process* p, Eterm* reg, - Eterm map, BeamInstr* I) NOINLINE; +static Eterm new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) NOINLINE; +static Eterm new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, + Uint live, BeamInstr* ptr) NOINLINE; +static Eterm update_map_assoc(Process* p, Eterm* reg, Uint live, + Uint n, BeamInstr* new_p) NOINLINE; +static Eterm update_map_exact(Process* p, Eterm* reg, Uint live, + Uint n, Eterm* new_p) NOINLINE; static Eterm get_map_element(Eterm map, Eterm key); static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx); @@ -1111,6 +436,12 @@ init_emulator(void) # define REG_stop asm("%l3") # define REG_I asm("%l4") # define REG_fcalls asm("%l5") +#elif defined(__GNUC__) && defined(__amd64__) && !defined(DEBUG) +# define REG_xregs asm("%r12") +# define REG_htop +# define REG_stop asm("%r13") +# define REG_I asm("%rbx") +# define REG_fcalls asm("%r14") #else # define REG_xregs # define REG_htop @@ -1230,6 +561,13 @@ init_emulator(void) #define ERTS_DBG_CHK_REDS(P, FC) #endif +#ifdef NO_FPE_SIGNALS +# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT +# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR +#else +# define ERTS_NO_FPE_CHECK_INIT(p) +# define ERTS_NO_FPE_ERROR(p, a, b) +#endif /* * process_main() is called twice: @@ -1293,8 +631,6 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) #endif #endif - Eterm pt_arity; /* Used by do_put_tuple */ - Uint64 start_time = 0; /* Monitor long schedule */ BeamInstr* start_time_i = NULL; @@ -1311,7 +647,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) * Note: c_p->arity must be set to reflect the number of useful terms in * c_p->arg_reg before calling the scheduler. */ - if (!init_done) { + if (ERTS_UNLIKELY(!init_done)) { /* This should only be reached during the init phase when only the main * process is running. I.e. there is no race for init_done. */ @@ -1344,16 +680,16 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) } PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); c_p = erts_schedule(NULL, c_p, reds_used); ASSERT(!(c_p->flags & F_HIPE_MODE)); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); start_time = 0; #ifdef DEBUG - pid = c_p->common.id; /* Save for debugging purpouses */ + pid = c_p->common.id; /* Save for debugging purposes */ #endif - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); ERTS_MSACC_UPDATE_CACHE_X(); @@ -1435,1923 +771,8 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) #ifdef NO_JUMP_TABLE switch (Go) { #endif -#include "beam_hot.h" - - { - Eterm increment_reg_val; - Eterm increment_val; - Uint live; - Eterm result; - - OpCase(i_increment_rIId): - increment_reg_val = x(0); - I--; - goto do_increment; - - OpCase(i_increment_xIId): - increment_reg_val = xb(Arg(0)); - goto do_increment; - - OpCase(i_increment_yIId): - increment_reg_val = yb(Arg(0)); - goto do_increment; - - do_increment: - increment_val = Arg(1); - if (is_small(increment_reg_val)) { - Sint i = signed_val(increment_reg_val) + increment_val; - ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); - if (MY_IS_SSMALL(i)) { - result = make_small(i); - StoreBifResult(3, result); - } - } - - live = Arg(2); - HEAVY_SWAPOUT; - reg[live] = increment_reg_val; - reg[live+1] = make_small(increment_val); - result = erts_gc_mixed_plus(c_p, reg, live); - HEAVY_SWAPIN; - ERTS_HOLE_CHECK(c_p); - if (is_value(result)) { - StoreBifResult(3, result); - } - ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue)); - goto find_func_info; - } - -#define DO_OUTLINED_ARITH_2(name, Op1, Op2) \ - do { \ - Eterm result; \ - Uint live = Arg(1); \ - \ - HEAVY_SWAPOUT; \ - reg[live] = Op1; \ - reg[live+1] = Op2; \ - result = erts_gc_##name(c_p, reg, live); \ - HEAVY_SWAPIN; \ - ERTS_HOLE_CHECK(c_p); \ - if (is_value(result)) { \ - StoreBifResult(4, result); \ - } \ - goto lb_Cl_error; \ - } while (0) - - { - Eterm PlusOp1, PlusOp2; - Eterm result; - - OpCase(i_plus_jIxxd): - PlusOp1 = xb(Arg(2)); - PlusOp2 = xb(Arg(3)); - goto do_plus; - - OpCase(i_plus_jIxyd): - PlusOp1 = xb(Arg(2)); - PlusOp2 = yb(Arg(3)); - goto do_plus; - - OpCase(i_plus_jIssd): - GetArg2(2, PlusOp1, PlusOp2); - goto do_plus; - - do_plus: - if (is_both_small(PlusOp1, PlusOp2)) { - Sint i = signed_val(PlusOp1) + signed_val(PlusOp2); - ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); - if (MY_IS_SSMALL(i)) { - result = make_small(i); - StoreBifResult(4, result); - } - } - DO_OUTLINED_ARITH_2(mixed_plus, PlusOp1, PlusOp2); - } - - { - Eterm MinusOp1, MinusOp2; - Eterm result; - - OpCase(i_minus_jIxxd): - MinusOp1 = xb(Arg(2)); - MinusOp2 = xb(Arg(3)); - goto do_minus; - - OpCase(i_minus_jIssd): - GetArg2(2, MinusOp1, MinusOp2); - goto do_minus; - - do_minus: - if (is_both_small(MinusOp1, MinusOp2)) { - Sint i = signed_val(MinusOp1) - signed_val(MinusOp2); - ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); - if (MY_IS_SSMALL(i)) { - result = make_small(i); - StoreBifResult(4, result); - } - } - DO_OUTLINED_ARITH_2(mixed_minus, MinusOp1, MinusOp2); - } - - { - Eterm is_eq_exact_lit_val; - - OpCase(i_is_eq_exact_literal_fxc): - is_eq_exact_lit_val = xb(Arg(1)); - goto do_is_eq_exact_literal; - - OpCase(i_is_eq_exact_literal_fyc): - is_eq_exact_lit_val = yb(Arg(1)); - goto do_is_eq_exact_literal; - - do_is_eq_exact_literal: - if (!eq(Arg(2), is_eq_exact_lit_val)) { - ClauseFail(); - } - Next(3); - } - - { - Eterm is_ne_exact_lit_val; - - OpCase(i_is_ne_exact_literal_fxc): - is_ne_exact_lit_val = xb(Arg(1)); - goto do_is_ne_exact_literal; - - OpCase(i_is_ne_exact_literal_fyc): - is_ne_exact_lit_val = yb(Arg(1)); - goto do_is_ne_exact_literal; - - do_is_ne_exact_literal: - if (eq(Arg(2), is_ne_exact_lit_val)) { - ClauseFail(); - } - Next(3); - } - - OpCase(i_move_call_only_fc): { - r(0) = Arg(1); - } - /* FALL THROUGH */ - OpCase(i_call_only_f): { - SET_I((BeamInstr *) Arg(0)); - DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); - Dispatch(); - } - - OpCase(i_move_call_last_fPc): { - r(0) = Arg(2); - } - /* FALL THROUGH */ - OpCase(i_call_last_fP): { - RESTORE_CP(E); - E = ADD_BYTE_OFFSET(E, Arg(1)); - SET_I((BeamInstr *) Arg(0)); - DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); - Dispatch(); - } - - OpCase(i_move_call_cf): { - r(0) = Arg(0); - I++; - } - /* FALL THROUGH */ - OpCase(i_call_f): { - SET_CP(c_p, I+2); - SET_I((BeamInstr *) Arg(0)); - DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); - Dispatch(); - } - - OpCase(i_move_call_ext_last_ePc): { - r(0) = Arg(2); - } - /* FALL THROUGH */ - OpCase(i_call_ext_last_eP): - RESTORE_CP(E); - E = ADD_BYTE_OFFSET(E, Arg(1)); - - /* - * Note: The pointer to the export entry is never NULL; if the module - * is not loaded, it points to code which will invoke the error handler - * (see lb_call_error_handler below). - */ - DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0)); - Dispatchx(); - - OpCase(i_move_call_ext_ce): { - r(0) = Arg(0); - I++; - } - /* FALL THROUGH */ - OpCase(i_call_ext_e): - SET_CP(c_p, I+2); - DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0)); - Dispatchx(); - - OpCase(i_move_call_ext_only_ec): { - r(0) = Arg(1); - } - /* FALL THROUGH */ - OpCase(i_call_ext_only_e): - DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0)); - Dispatchx(); - - OpCase(init_y): { - BeamInstr *next; - - PreFetch(1, next); - make_blank(yb(Arg(0))); - NextPF(1, next); - } - - OpCase(i_trim_I): { - BeamInstr *next; - Uint words; - Uint cp; - - words = Arg(0); - cp = E[0]; - PreFetch(1, next); - E += words; - E[0] = cp; - NextPF(1, next); - } - - OpCase(move_x1_c): { - x(1) = Arg(0); - Next(1); - } - - OpCase(move_x2_c): { - x(2) = Arg(0); - Next(1); - } - - OpCase(return): { - SET_I(c_p->cp); - DTRACE_RETURN_FROM_PC(c_p); - /* - * We must clear the CP to make sure that a stale value do not - * create a false module dependcy preventing code upgrading. - * It also means that we can use the CP in stack backtraces. - */ - c_p->cp = 0; - CHECK_TERM(r(0)); - HEAP_SPACE_VERIFIED(0); - DispatchReturn; - } - - /* - * Send is almost a standard call-BIF with two arguments, except for: - * 1) It cannot be traced. - * 2) There is no pointer to the send_2 function stored in - * the instruction. - */ - - OpCase(send): { - BeamInstr *next; - Eterm result; - - if (!(FCALLS > 0 || FCALLS > neg_o_reds)) { - /* If we have run out of reductions, we do a context - switch before calling the bif */ - c_p->arity = 2; - c_p->current = NULL; - goto context_switch3; - } - - PRE_BIF_SWAPOUT(c_p); - c_p->fcalls = FCALLS - 1; - result = erl_send(c_p, r(0), x(1)); - PreFetch(0, next); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - HTOP = HEAP_TOP(c_p); - FCALLS = c_p->fcalls; - if (is_value(result)) { - r(0) = result; - CHECK_TERM(r(0)); - NextPF(0, next); - } else if (c_p->freason == TRAP) { - SET_CP(c_p, I+1); - SET_I(c_p->i); - SWAPIN; - Dispatch(); - } - goto find_func_info; - } - - { - Eterm element_index; - Eterm element_tuple; - - OpCase(i_element_jxsd): - element_tuple = xb(Arg(1)); - goto do_element; - - OpCase(i_element_jysd): - element_tuple = yb(Arg(1)); - goto do_element; - - do_element: - GetArg1(2, element_index); - if (is_small(element_index) && is_tuple(element_tuple)) { - Eterm* tp = tuple_val(element_tuple); - - if ((signed_val(element_index) >= 1) && - (signed_val(element_index) <= arityval(*tp))) { - Eterm result = tp[signed_val(element_index)]; - StoreBifResult(3, result); - } - } - } - /* Fall through */ - - OpCase(badarg_j): - badarg: - c_p->freason = BADARG; - goto lb_Cl_error; - - { - Eterm fast_element_tuple; - - OpCase(i_fast_element_jxId): - fast_element_tuple = xb(Arg(1)); - goto do_fast_element; - - OpCase(i_fast_element_jyId): - fast_element_tuple = yb(Arg(1)); - goto do_fast_element; - - do_fast_element: - if (is_tuple(fast_element_tuple)) { - Eterm* tp = tuple_val(fast_element_tuple); - Eterm pos = Arg(2); /* Untagged integer >= 1 */ - if (pos <= arityval(*tp)) { - Eterm result = tp[pos]; - StoreBifResult(3, result); - } - } - goto badarg; - } - - OpCase(catch_yf): - c_p->catches++; - yb(Arg(0)) = Arg(1); - Next(2); - - OpCase(catch_end_y): { - c_p->catches--; - make_blank(yb(Arg(0))); - if (is_non_value(r(0))) { - c_p->fvalue = NIL; - if (x(1) == am_throw) { - r(0) = x(2); - } else { - if (x(1) == am_error) { - SWAPOUT; - x(2) = add_stacktrace(c_p, x(2), x(3)); - SWAPIN; - } - /* only x(2) is included in the rootset here */ - if (E - HTOP < 3) { - SWAPOUT; - PROCESS_MAIN_CHK_LOCKS(c_p); - FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - SWAPIN; - } - r(0) = TUPLE2(HTOP, am_EXIT, x(2)); - HTOP += 3; - } - } - CHECK_TERM(r(0)); - Next(1); - } - - OpCase(try_end_y): { - c_p->catches--; - make_blank(yb(Arg(0))); - if (is_non_value(r(0))) { - c_p->fvalue = NIL; - r(0) = x(1); - x(1) = x(2); - x(2) = x(3); - } - Next(1); - } - - /* - * Skeleton for receive statement: - * - * recv_mark L1 Optional - * call make_ref/monitor Optional - * ... - * recv_set L1 Optional - * L1: <-------------------+ - * <-----------+ | - * | | - * loop_rec L2 ------+---+ | - * ... | | | - * remove_message | | | - * jump L3 | | | - * ... | | | - * loop_rec_end L1 --+ | | - * L2: <---------------+ | - * wait L1 -----------------+ or wait_timeout - * timeout - * - * L3: Code after receive... - * - * - */ - - OpCase(recv_mark_f): { - /* - * Save the current position in message buffer and the - * the label for the loop_rec/2 instruction for the - * the receive statement. - */ - c_p->msg.mark = (BeamInstr *) Arg(0); - c_p->msg.saved_last = c_p->msg.last; - Next(1); - } - - OpCase(i_recv_set): { - /* - * If the mark is valid (points to the loop_rec/2 - * instruction that follows), we know that the saved - * position points to the first message that could - * possibly be matched out. - * - * If the mark is invalid, we do nothing, meaning that - * we will look through all messages in the message queue. - */ - if (c_p->msg.mark == (BeamInstr *) (I+1)) { - c_p->msg.save = c_p->msg.saved_last; - } - I++; - /* Fall through to the loop_rec/2 instruction */ - } - - /* - * Pick up the next message and place it in x(0). - * If no message, jump to a wait or wait_timeout instruction. - */ - OpCase(i_loop_rec_f): - { - BeamInstr *next; - ErtsMessage* msgp; - - /* - * We need to disable GC while matching messages - * in the queue. This since messages with data outside - * the heap will be corrupted by a GC. - */ - ASSERT(!(c_p->flags & F_DELAY_GC)); - c_p->flags |= F_DELAY_GC; - - loop_rec__: - - PROCESS_MAIN_CHK_LOCKS(c_p); - - msgp = PEEK_MESSAGE(c_p); - - if (!msgp) { -#ifdef ERTS_SMP - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - /* Make sure messages wont pass exit signals... */ - if (ERTS_PROC_PENDING_EXIT(c_p)) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - SWAPOUT; - c_p->flags &= ~F_DELAY_GC; - c_p->arity = 0; - goto do_schedule; /* Will be rescheduled for exit */ - } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - msgp = PEEK_MESSAGE(c_p); - if (msgp) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - else -#endif - { - c_p->flags &= ~F_DELAY_GC; - SET_I((BeamInstr *) Arg(0)); - Goto(*I); /* Jump to a wait or wait_timeout instruction */ - } - } - if (is_non_value(ERL_MESSAGE_TERM(msgp))) { - SWAPOUT; /* erts_decode_dist_message() may write to heap... */ - if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) { - /* - * A corrupt distribution message that we weren't able to decode; - * remove it... - */ - /* No swapin should be needed */ - ASSERT(HTOP == c_p->htop && E == c_p->stop); - /* TODO: Add DTrace probe for this bad message situation? */ - UNLINK_MESSAGE(c_p, msgp); - msgp->next = NULL; - erts_cleanup_messages(msgp); - goto loop_rec__; - } - SWAPIN; - } - PreFetch(1, next); - r(0) = ERL_MESSAGE_TERM(msgp); - NextPF(1, next); - } - - /* - * Remove a (matched) message from the message queue. - */ - OpCase(remove_message): { - BeamInstr *next; - ErtsMessage* msgp; - PROCESS_MAIN_CHK_LOCKS(c_p); - - ERTS_CHK_MBUF_SZ(c_p); - - PreFetch(0, next); - msgp = PEEK_MESSAGE(c_p); - - if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { - save_calls(c_p, &exp_receive); - } - if (ERL_MESSAGE_TOKEN(msgp) == NIL) { -#ifdef USE_VM_PROBES - if (DT_UTAG(c_p) != NIL) { - if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) { - SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag; - } else { - DT_UTAG(c_p) = NIL; - SEQ_TRACE_TOKEN(c_p) = NIL; - } - } else { -#endif - SEQ_TRACE_TOKEN(c_p) = NIL; -#ifdef USE_VM_PROBES - } - DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING; -#endif - } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) { - Eterm msg; - SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp); -#ifdef USE_VM_PROBES - if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) { - if (DT_UTAG(c_p) == NIL) { - DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp); - } - DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING; - } else { -#endif - ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p))); - ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5); - ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p))); - ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p))); - ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p))); - ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p))); - c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p)); - if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) { - c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p)); - } - msg = ERL_MESSAGE_TERM(msgp); - seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE, - c_p->common.id, c_p); -#ifdef USE_VM_PROBES - } -#endif - } -#ifdef USE_VM_PROBES - if (DTRACE_ENABLED(message_receive)) { - Eterm token2 = NIL; - DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); - Sint tok_label = 0; - Sint tok_lastcnt = 0; - Sint tok_serial = 0; - - dtrace_proc_str(c_p, receiver_name); - token2 = SEQ_TRACE_TOKEN(c_p); - if (have_seqtrace(token2)) { - tok_label = signed_val(SEQ_TRACE_T_LABEL(token2)); - tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2)); - tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2)); - } - DTRACE6(message_receive, - receiver_name, size_object(ERL_MESSAGE_TERM(msgp)), - c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial); - } -#endif - UNLINK_MESSAGE(c_p, msgp); - JOIN_MESSAGE(c_p); - CANCEL_TIMER(c_p); - - erts_save_message_in_proc(c_p, msgp); - c_p->flags &= ~F_DELAY_GC; - - if (ERTS_IS_GC_DESIRED_INTERNAL(c_p, HTOP, E)) { - /* - * We want to GC soon but we leave a few - * reductions giving the message some time - * to turn into garbage. - */ - ERTS_VBUMP_LEAVE_REDS_INTERNAL(c_p, 5, FCALLS); - } - - ERTS_DBG_CHK_REDS(c_p, FCALLS); - ERTS_CHK_MBUF_SZ(c_p); - - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - NextPF(0, next); - } - - /* - * Advance the save pointer to the next message (the current - * message didn't match), then jump to the loop_rec instruction. - */ - OpCase(loop_rec_end_f): { - - ASSERT(c_p->flags & F_DELAY_GC); - - SET_I((BeamInstr *) Arg(0)); - SAVE_MESSAGE(c_p); - if (FCALLS > 0 || FCALLS > neg_o_reds) { - FCALLS--; - goto loop_rec__; - } - - c_p->flags &= ~F_DELAY_GC; - c_p->i = I; - SWAPOUT; - c_p->arity = 0; - c_p->current = NULL; - goto do_schedule; - } - /* - * Prepare to wait for a message or a timeout, whichever occurs first. - * - * Note: In order to keep the compatibility between 32 and 64 bits - * emulators, only timeout values that can be represented in 32 bits - * (unsigned) or less are allowed. - */ - - - OpCase(i_wait_timeout_fs): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - - /* Fall through */ - } - OpCase(i_wait_timeout_locked_fs): { - Eterm timeout_value; - - /* - * If we have already set the timer, we must NOT set it again. Therefore, - * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag. - */ - if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) { - goto wait2; - } - GetArg1(1, timeout_value); - if (timeout_value != make_small(0)) { - - if (timeout_value == am_infinity) - c_p->flags |= F_TIMO; - else { - int tres = erts_set_proc_timer_term(c_p, timeout_value); - if (tres == 0) { - /* - * The timer routiner will set c_p->i to the value in - * c_p->def_arg_reg[0]. Note that it is safe to use this - * location because there are no living x registers in - * a receive statement. - */ - BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg; - *pi = I+3; - } - else { /* Wrong time */ - OpCase(i_wait_error_locked): { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - /* Fall through */ - } - OpCase(i_wait_error): { - c_p->freason = EXC_TIMEOUT_VALUE; - goto find_func_info; - } - } - } - - /* - * Prepare to wait indefinitely for a new message to arrive - * (or the time set above if falling through from above). - * - * When a new message arrives, control will be transferred - * the loop_rec instruction (at label L1). In case of - * of timeout, control will be transferred to the timeout - * instruction following the wait_timeout instruction. - */ - - OpCase(wait_locked_f): - OpCase(wait_f): - - wait2: { -#ifndef ERTS_SMP - if (ERTS_PROC_IS_EXITING(c_p)) { - /* - * I non smp case: - * - * Currently executing process might be sent an exit - * signal if it is traced by a port that it also is - * linked to, and the port terminates during the - * trace. In this case we do *not* want to clear - * the active flag, which will make the process hang - * in limbo forever. - */ - SWAPOUT; - c_p->arity = 0; - goto do_schedule; - } -#endif - c_p->i = (BeamInstr *) Arg(0); /* L1 */ - SWAPOUT; - c_p->arity = 0; - - if (!ERTS_PTMR_IS_TIMED_OUT(c_p)) - erts_smp_atomic32_read_band_relb(&c_p->state, - ~ERTS_PSFLG_ACTIVE); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - c_p->current = NULL; - goto do_schedule; - } - OpCase(wait_unlocked_f): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - goto wait2; - } - } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - Next(2); - } - - OpCase(i_wait_timeout_fI): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - } - - OpCase(i_wait_timeout_locked_fI): - { - /* - * If we have already set the timer, we must NOT set it again. Therefore, - * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag. - */ - if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) { - BeamInstr** p = (BeamInstr **) c_p->def_arg_reg; - *p = I+3; - erts_set_proc_timer_uword(c_p, Arg(1)); - } - goto wait2; - } - - /* - * A timeout has occurred. Reset the save pointer so that the next - * receive statement will examine the first message first. - */ - OpCase(timeout_locked): { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - } - - OpCase(timeout): { - BeamInstr *next; - - PreFetch(0, next); - if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) { - trace_receive(c_p, am_clock_service, am_timeout, NULL); - } - if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { - save_calls(c_p, &exp_timeout); - } - c_p->flags &= ~F_TIMO; - JOIN_MESSAGE(c_p); - NextPF(0, next); - } - - { - Eterm select_val2; - - OpCase(i_select_tuple_arity2_yfAAff): - select_val2 = yb(Arg(0)); - goto do_select_tuple_arity2; - - OpCase(i_select_tuple_arity2_xfAAff): - select_val2 = xb(Arg(0)); - goto do_select_tuple_arity2; - - do_select_tuple_arity2: - if (is_not_tuple(select_val2)) { - goto select_val2_fail; - } - select_val2 = *tuple_val(select_val2); - goto do_select_val2; - - OpCase(i_select_val2_yfccff): - select_val2 = yb(Arg(0)); - goto do_select_val2; - - OpCase(i_select_val2_xfccff): - select_val2 = xb(Arg(0)); - goto do_select_val2; - - do_select_val2: - if (select_val2 == Arg(2)) { - I += 3; - } else if (select_val2 == Arg(3)) { - I += 4; - } - - select_val2_fail: - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - - { - Eterm select_val; - - OpCase(i_select_tuple_arity_xfI): - select_val = xb(Arg(0)); - goto do_select_tuple_arity; - - OpCase(i_select_tuple_arity_yfI): - select_val = yb(Arg(0)); - goto do_select_tuple_arity; - - do_select_tuple_arity: - if (is_tuple(select_val)) { - select_val = *tuple_val(select_val); - goto do_linear_search; - } - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - - OpCase(i_select_val_lins_xfI): - select_val = xb(Arg(0)); - goto do_linear_search; - - OpCase(i_select_val_lins_yfI): - select_val = yb(Arg(0)); - goto do_linear_search; - - do_linear_search: { - BeamInstr *vs = &Arg(3); - int ix = 0; - - for(;;) { - if (vs[ix+0] >= select_val) { ix += 0; break; } - if (vs[ix+1] >= select_val) { ix += 1; break; } - ix += 2; - } - - if (vs[ix] == select_val) { - I += ix + Arg(2) + 2; - } - - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - - OpCase(i_select_val_bins_xfI): - select_val = xb(Arg(0)); - goto do_binary_search; - - OpCase(i_select_val_bins_yfI): - select_val = yb(Arg(0)); - goto do_binary_search; - - do_binary_search: - { - struct Pairs { - BeamInstr val; - BeamInstr* addr; - }; - struct Pairs* low; - struct Pairs* high; - struct Pairs* mid; - int bdiff; /* int not long because the arrays aren't that large */ - - low = (struct Pairs *) &Arg(3); - high = low + Arg(2); - - /* The pointer subtraction (high-low) below must produce - * a signed result, because high could be < low. That - * requires the compiler to insert quite a bit of code. - * - * However, high will be > low so the result will be - * positive. We can use that knowledge to optimise the - * entire sequence, from the initial comparison to the - * computation of mid. - * - * -- Mikael Pettersson, Acumem AB - * - * Original loop control code: - * - * while (low < high) { - * mid = low + (high-low) / 2; - * - */ - while ((bdiff = (int)((char*)high - (char*)low)) > 0) { - unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Pairs)-1); - - mid = (struct Pairs*)((char*)low + boffset); - if (select_val < mid->val) { - high = mid; - } else if (select_val > mid->val) { - low = mid + 1; - } else { - SET_I(mid->addr); - Goto(*I); - } - } - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - } - - { - Eterm jump_on_val_zero_index; - - OpCase(i_jump_on_val_zero_yfI): - jump_on_val_zero_index = yb(Arg(0)); - goto do_jump_on_val_zero_index; - - OpCase(i_jump_on_val_zero_xfI): - jump_on_val_zero_index = xb(Arg(0)); - goto do_jump_on_val_zero_index; - - do_jump_on_val_zero_index: - if (is_small(jump_on_val_zero_index)) { - jump_on_val_zero_index = signed_val(jump_on_val_zero_index); - if (jump_on_val_zero_index < Arg(2)) { - SET_I((BeamInstr *) (&Arg(3))[jump_on_val_zero_index]); - Goto(*I); - } - } - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - - { - Eterm jump_on_val_index; - - - OpCase(i_jump_on_val_yfII): - jump_on_val_index = yb(Arg(0)); - goto do_jump_on_val_index; - - OpCase(i_jump_on_val_xfII): - jump_on_val_index = xb(Arg(0)); - goto do_jump_on_val_index; - - do_jump_on_val_index: - if (is_small(jump_on_val_index)) { - jump_on_val_index = (Uint) (signed_val(jump_on_val_index) - Arg(3)); - if (jump_on_val_index < Arg(2)) { - SET_I((BeamInstr *) (&Arg(4))[jump_on_val_index]); - Goto(*I); - } - } - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - - do_put_tuple: { - Eterm* hp = HTOP; - - *hp++ = make_arityval(pt_arity); - - do { - Eterm term = *I++; - switch (loader_tag(term)) { - case LOADER_X_REG: - *hp++ = x(loader_x_reg_index(term)); - break; - case LOADER_Y_REG: - *hp++ = y(loader_y_reg_index(term)); - break; - default: - *hp++ = term; - break; - } - } while (--pt_arity != 0); - HTOP = hp; - Goto(*I); - } - - OpCase(new_map_dII): { - Eterm res; - - HEAVY_SWAPOUT; - res = new_map(c_p, reg, I-1); - HEAVY_SWAPIN; - StoreResult(res, Arg(0)); - Next(3+Arg(2)); - } - -#define PUT_TERM_REG(term, desc) \ -do { \ - switch (loader_tag(desc)) { \ - case LOADER_X_REG: \ - x(loader_x_reg_index(desc)) = (term); \ - break; \ - case LOADER_Y_REG: \ - y(loader_y_reg_index(desc)) = (term); \ - break; \ - default: \ - ASSERT(0); \ - break; \ - } \ -} while(0) - - OpCase(i_get_map_elements_fsI): { - Eterm map; - BeamInstr *fs; - Uint sz, n; - GetArg1(1, map); - - /* this instruction assumes Arg1 is a map, - * i.e. that it follows a test is_map if needed. - */ - - n = (Uint)Arg(2) / 3; - fs = &Arg(3); /* pattern fields and target registers */ - - if (is_flatmap(map)) { - flatmap_t *mp; - Eterm *ks; - Eterm *vs; - - mp = (flatmap_t *)flatmap_val(map); - sz = flatmap_get_size(mp); - - if (sz == 0) { - ClauseFail(); - } - - ks = flatmap_get_keys(mp); - vs = flatmap_get_values(mp); - - while(sz) { - if (EQ((Eterm) fs[0], *ks)) { - PUT_TERM_REG(*vs, fs[1]); - n--; - fs += 3; - /* no more values to fetch, we are done */ - if (n == 0) { - I = fs; - Next(-1); - } - } - ks++, sz--, vs++; - } - - ClauseFail(); - } else { - const Eterm *v; - Uint32 hx; - ASSERT(is_hashmap(map)); - while(n--) { - hx = fs[2]; - ASSERT(hx == hashmap_make_hash((Eterm)fs[0])); - if ((v = erts_hashmap_get(hx, (Eterm)fs[0], map)) == NULL) { - ClauseFail(); - } - PUT_TERM_REG(*v, fs[1]); - fs += 3; - } - I = fs; - Next(-1); - } - } -#undef PUT_TERM_REG - - OpCase(update_map_assoc_jsdII): { - Eterm res; - Eterm map; - - GetArg1(1, map); - HEAVY_SWAPOUT; - res = update_map_assoc(c_p, reg, map, I); - HEAVY_SWAPIN; - if (is_value(res)) { - StoreResult(res, Arg(2)); - Next(5+Arg(4)); - } else { - /* - * This can only happen if the code was compiled - * with the compiler in OTP 17. - */ - c_p->freason = BADMAP; - c_p->fvalue = map; - goto lb_Cl_error; - } - } - - OpCase(update_map_exact_jsdII): { - Eterm res; - Eterm map; - - GetArg1(1, map); - HEAVY_SWAPOUT; - res = update_map_exact(c_p, reg, map, I); - HEAVY_SWAPIN; - if (is_value(res)) { - StoreResult(res, Arg(2)); - Next(5+Arg(4)); - } else { - goto lb_Cl_error; - } - } - - - /* - * All guards with zero arguments have special instructions: - * self/0 - * node/0 - * - * All other guard BIFs take one or two arguments. - */ - - /* - * Guard BIF in head. On failure, ignore the error and jump - * to the code for the next clause. We don't support tracing - * of guard BIFs. - */ - - OpCase(bif1_fbsd): - { - ErtsBifFunc bf; - Eterm tmp_reg[1]; - Eterm result; - - GetArg1(2, tmp_reg[0]); - bf = (BifFunction) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, tmp_reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(3, result); - } - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - - /* - * Guard BIF in body. It can fail like any BIF. No trace support. - */ - - OpCase(bif1_body_bsd): - { - ErtsBifFunc bf; - - Eterm tmp_reg[1]; - Eterm result; - - GetArg1(1, tmp_reg[0]); - bf = (ErtsBifFunc) Arg(0); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, tmp_reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(2, result); - } - reg[0] = tmp_reg[0]; - SWAPOUT; - I = handle_error(c_p, I, reg, ubif2mfa((void *) bf)); - goto post_error_handling; - } - - OpCase(i_gc_bif1_jIsId): - { - typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); - GcBifFunction bf; - Eterm result; - Uint live = (Uint) Arg(3); - - GetArg1(2, x(live)); - bf = (GcBifFunction) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - SWAPOUT; - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, reg, live); - ERTS_CHK_MBUF_SZ(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - SWAPIN; - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(4, result); - } - if (Arg(0) != 0) { - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - x(0) = x(live); - I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); - goto post_error_handling; - } - - OpCase(i_gc_bif2_jIIssd): /* Note, one less parameter than the i_gc_bif1 - and i_gc_bif3 */ - { - typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); - GcBifFunction bf; - Eterm result; - Uint live = (Uint) Arg(2); - - GetArg2(3, x(live), x(live+1)); - /* - * XXX This calling convention does not make sense. 'live' - * should point out the first argument, not the second - * (i.e. 'live' should not be incremented below). - */ - live++; - bf = (GcBifFunction) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - SWAPOUT; - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, reg, live); - ERTS_CHK_MBUF_SZ(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - SWAPIN; - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(5, result); - } - if (Arg(0) != 0) { - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - live--; - x(0) = x(live); - x(1) = x(live+1); - I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); - goto post_error_handling; - } - - OpCase(i_gc_bif3_jIIssd): - { - typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); - GcBifFunction bf; - Eterm result; - Uint live = (Uint) Arg(2); - - x(live) = x(SCRATCH_X_REG); - GetArg2(3, x(live+1), x(live+2)); - /* - * XXX This calling convention does not make sense. 'live' - * should point out the first argument, not the third - * (i.e. 'live' should not be incremented below). - */ - live += 2; - bf = (GcBifFunction) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - SWAPOUT; - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, reg, live); - ERTS_CHK_MBUF_SZ(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - SWAPIN; - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(5, result); - } - if (Arg(0) != 0) { - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - live -= 2; - x(0) = x(live); - x(1) = x(live+1); - x(2) = x(live+2); - I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); - goto post_error_handling; - } - - /* - * Guards bifs and, or, xor in guards. - */ - OpCase(i_bif2_fbssd): - { - Eterm tmp_reg[2]; - ErtsBifFunc bf; - Eterm result; - - GetArg2(2, tmp_reg[0], tmp_reg[1]); - bf = (ErtsBifFunc) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, tmp_reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(4, result); - } - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - - /* - * Guards bifs and, or, xor, relational operators in body. - */ - OpCase(i_bif2_body_bssd): - { - Eterm tmp_reg[2]; - ErtsBifFunc bf; - Eterm result; - - GetArg2(1, tmp_reg[0], tmp_reg[1]); - bf = (ErtsBifFunc) Arg(0); - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, tmp_reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_HOLE_CHECK(c_p); - if (is_value(result)) { - ASSERT(!is_CP(result)); - StoreBifResult(3, result); - } - reg[0] = tmp_reg[0]; - reg[1] = tmp_reg[1]; - SWAPOUT; - I = handle_error(c_p, I, reg, ubif2mfa((void *) bf)); - goto post_error_handling; - } - - /* - * The most general BIF call. The BIF may build any amount of data - * on the heap. The result is always returned in r(0). - */ - OpCase(call_bif_e): - { - ErtsBifFunc bf; - Eterm result; - BeamInstr *next; - ErlHeapFragment *live_hf_end; - Export *export = (Export*)Arg(0); - - - if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) { - /* If we have run out of reductions, we do a context - switch before calling the bif */ - c_p->arity = GET_BIF_ARITY(export); - c_p->current = &export->info.mfa; - goto context_switch3; - } - - ERTS_MSACC_SET_BIF_STATE_CACHED_X( - GET_BIF_MODULE(export), GET_BIF_ADDRESS(export)); - - bf = GET_BIF_ADDRESS(export); - - PRE_BIF_SWAPOUT(c_p); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS - 1; - if (FCALLS <= 0) { - save_calls(c_p, export); - } - PreFetch(1, next); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - live_hf_end = c_p->mbuf; - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_HOLE_CHECK(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - if (ERTS_IS_GC_DESIRED(c_p)) { - Uint arity = GET_BIF_ARITY(export); - result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result, reg, arity); - E = c_p->stop; - } - PROCESS_MAIN_CHK_LOCKS(c_p); - HTOP = HEAP_TOP(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - /* We have to update the cache if we are enabled in order - to make sure no book keeping is done after we disabled - msacc. We don't always do this as it is quite expensive. */ - if (ERTS_MSACC_IS_ENABLED_CACHED_X()) - ERTS_MSACC_UPDATE_CACHE_X(); - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); - if (is_value(result)) { - r(0) = result; - CHECK_TERM(r(0)); - NextPF(1, next); - } else if (c_p->freason == TRAP) { - SET_CP(c_p, I+2); - SET_I(c_p->i); - SWAPIN; - Dispatch(); - } - - /* - * Error handling. SWAPOUT is not needed because it was done above. - */ - ASSERT(c_p->stop == E); - I = handle_error(c_p, I, reg, &export->info.mfa); - goto post_error_handling; - } - - /* - * Arithmetic operations. - */ - - OpCase(i_times_jIssd): - { - Eterm Op1, Op2; - GetArg2(2, Op1, Op2); - DO_OUTLINED_ARITH_2(mixed_times, Op1, Op2); - } - - OpCase(i_m_div_jIssd): - { - Eterm Op1, Op2; - GetArg2(2, Op1, Op2); - DO_OUTLINED_ARITH_2(mixed_div, Op1, Op2); - } - - OpCase(i_int_div_jIssd): - { - Eterm Op1, Op2; - - GetArg2(2, Op1, Op2); - if (Op2 == SMALL_ZERO) { - goto badarith; - } else if (is_both_small(Op1, Op2)) { - Sint ires = signed_val(Op1) / signed_val(Op2); - if (MY_IS_SSMALL(ires)) { - Eterm result = make_small(ires); - StoreBifResult(4, result); - } - } - DO_OUTLINED_ARITH_2(int_div, Op1, Op2); - } - - { - Eterm RemOp1, RemOp2; - - OpCase(i_rem_jIxxd): - RemOp1 = xb(Arg(2)); - RemOp2 = xb(Arg(3)); - goto do_rem; - - OpCase(i_rem_jIssd): - GetArg2(2, RemOp1, RemOp2); - goto do_rem; - - do_rem: - if (RemOp2 == SMALL_ZERO) { - goto badarith; - } else if (is_both_small(RemOp1, RemOp2)) { - Eterm result = make_small(signed_val(RemOp1) % signed_val(RemOp2)); - StoreBifResult(4, result); - } else { - DO_OUTLINED_ARITH_2(int_rem, RemOp1, RemOp2); - } - } - - { - Eterm BandOp1, BandOp2; - - OpCase(i_band_jIxcd): - BandOp1 = xb(Arg(2)); - BandOp2 = Arg(3); - goto do_band; - - OpCase(i_band_jIssd): - GetArg2(2, BandOp1, BandOp2); - goto do_band; - - do_band: - if (is_both_small(BandOp1, BandOp2)) { - /* - * No need to untag -- TAG & TAG == TAG. - */ - Eterm result = BandOp1 & BandOp2; - StoreBifResult(4, result); - } - DO_OUTLINED_ARITH_2(band, BandOp1, BandOp2); - } - - /* - * An error occurred in an arithmetic operation or test that could - * appear either in a head or in a body. - * In a head, execution should continue at failure address in Arg(0). - * In a body, Arg(0) == 0 and an exception should be raised. - */ - lb_Cl_error: { - if (Arg(0) != 0) { - OpCase(jump_f): { - jump_f: - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - } - ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue)); - goto find_func_info; - } - - OpCase(i_bor_jIssd): - { - Eterm Op1, Op2; - - GetArg2(2, Op1, Op2); - if (is_both_small(Op1, Op2)) { - /* - * No need to untag -- TAG | TAG == TAG. - */ - Eterm result = Op1 | Op2; - StoreBifResult(4, result); - } - DO_OUTLINED_ARITH_2(bor, Op1, Op2); - } - - OpCase(i_bxor_jIssd): - { - Eterm Op1, Op2; - - GetArg2(2, Op1, Op2); - if (is_both_small(Op1, Op2)) { - /* - * TAG ^ TAG == 0. - * - * Therefore, we perform the XOR operation on the tagged values, - * and OR in the tag bits. - */ - Eterm result = (Op1 ^ Op2) | make_small(0); - StoreBifResult(4, result); - } - DO_OUTLINED_ARITH_2(bxor, Op1, Op2); - } - - { - Eterm Op1, Op2; - Sint i; - Sint ires; - Eterm* bigp; - Eterm tmp_big[2]; - - OpCase(i_bsr_jIssd): - GetArg2(2, Op1, Op2); - if (is_small(Op2)) { - i = -signed_val(Op2); - if (is_small(Op1)) { - goto small_shift; - } else if (is_big(Op1)) { - if (i == 0) { - StoreBifResult(4, Op1); - } - ires = big_size(Op1); - goto big_shift; - } - } else if (is_big(Op2)) { - /* - * N bsr NegativeBigNum == N bsl MAX_SMALL - * N bsr PositiveBigNum == N bsl MIN_SMALL - */ - Op2 = make_small(bignum_header_is_neg(*big_val(Op2)) ? - MAX_SMALL : MIN_SMALL); - goto do_bsl; - } - goto badarith; - - OpCase(i_bsl_jIssd): - GetArg2(2, Op1, Op2); - do_bsl: - if (is_small(Op2)) { - i = signed_val(Op2); - - if (is_small(Op1)) { - small_shift: - ires = signed_val(Op1); - - if (i == 0 || ires == 0) { - StoreBifResult(4, Op1); - } else if (i < 0) { /* Right shift */ - i = -i; - if (i >= SMALL_BITS-1) { - Op1 = (ires < 0) ? SMALL_MINUS_ONE : SMALL_ZERO; - } else { - Op1 = make_small(ires >> i); - } - StoreBifResult(4, Op1); - } else if (i < SMALL_BITS-1) { /* Left shift */ - if ((ires > 0 && ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ires) == 0) || - ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ~ires) == 0) { - Op1 = make_small(ires << i); - StoreBifResult(4, Op1); - } - } - ires = 1; /* big_size(small_to_big(Op1)) */ - - big_shift: - if (i > 0) { /* Left shift. */ - ires += (i / D_EXP); - } else { /* Right shift. */ - if (ires <= (-i / D_EXP)) - ires = 3; /* ??? */ - else - ires -= (-i / D_EXP); - } - { - ires = BIG_NEED_SIZE(ires+1); - /* - * Slightly conservative check the size to avoid - * allocating huge amounts of memory for bignums that - * clearly would overflow the arity in the header - * word. - */ - if (ires-8 > BIG_ARITY_MAX) { - c_p->freason = SYSTEM_LIMIT; - goto lb_Cl_error; - } - TestHeapPreserve(ires+1, Arg(1), Op1); - if (is_small(Op1)) { - Op1 = small_to_big(signed_val(Op1), tmp_big); - } - bigp = HTOP; - Op1 = big_lshift(Op1, i, bigp); - if (is_big(Op1)) { - HTOP += bignum_header_arity(*HTOP) + 1; - } - HEAP_SPACE_VERIFIED(0); - if (is_nil(Op1)) { - /* - * This result must have been only slight larger - * than allowed since it wasn't caught by the - * previous test. - */ - c_p->freason = SYSTEM_LIMIT; - goto lb_Cl_error; - } - ERTS_HOLE_CHECK(c_p); - StoreBifResult(4, Op1); - } - } else if (is_big(Op1)) { - if (i == 0) { - StoreBifResult(4, Op1); - } - ires = big_size(Op1); - goto big_shift; - } - } else if (is_big(Op2)) { - if (bignum_header_is_neg(*big_val(Op2))) { - /* - * N bsl NegativeBigNum is either 0 or -1, depending on - * the sign of N. Since we don't believe this case - * is common, do the calculation with the minimum - * amount of code. - */ - Op2 = make_small(MIN_SMALL); - goto do_bsl; - } else if (is_small(Op1) || is_big(Op1)) { - /* - * N bsl PositiveBigNum is too large to represent. - */ - c_p->freason = SYSTEM_LIMIT; - goto lb_Cl_error; - } - /* Fall through if the left argument is not an integer. */ - } - /* - * One or more non-integer arguments. - */ - goto badarith; - } - - OpCase(i_int_bnot_jsId): - { - Eterm bnot_val; - - GetArg1(1, bnot_val); - if (is_small(bnot_val)) { - bnot_val = make_small(~signed_val(bnot_val)); - } else { - Uint live = Arg(2); - HEAVY_SWAPOUT; - reg[live] = bnot_val; - bnot_val = erts_gc_bnot(c_p, reg, live); - HEAVY_SWAPIN; - ERTS_HOLE_CHECK(c_p); - if (is_nil(bnot_val)) { - goto lb_Cl_error; - } - } - StoreBifResult(3, bnot_val); - } - - badarith: - c_p->freason = BADARITH; - goto lb_Cl_error; - - OpCase(i_apply): { - BeamInstr *next; - HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg, NULL, 0); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, I+1); - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(i_apply_last_P): { - BeamInstr *next; - HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg, I, Arg(0)); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, (BeamInstr *) E[0]); - E = ADD_BYTE_OFFSET(E, Arg(0)); - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(i_apply_only): { - BeamInstr *next; - HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg, I, 0); - HEAVY_SWAPIN; - if (next != NULL) { - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(apply_I): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = fixed_apply(c_p, reg, Arg(0), NULL, 0); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, I+2); - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(apply_last_IP): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = fixed_apply(c_p, reg, Arg(0), I, Arg(1)); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, (BeamInstr *) E[0]); - E = ADD_BYTE_OFFSET(E, Arg(1)); - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(i_apply_fun): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = apply_fun(c_p, r(0), x(1), reg); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, I+1); - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - - OpCase(i_apply_fun_last_P): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = apply_fun(c_p, r(0), x(1), reg); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, (BeamInstr *) E[0]); - E = ADD_BYTE_OFFSET(E, Arg(0)); - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - - OpCase(i_apply_fun_only): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = apply_fun(c_p, r(0), x(1), reg); - HEAVY_SWAPIN; - if (next != NULL) { - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - - OpCase(i_call_fun_I): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, I+2); - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - - OpCase(i_call_fun_last_IP): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, (BeamInstr *) E[0]); - E = ADD_BYTE_OFFSET(E, Arg(1)); - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } +#include "beam_hot.h" #ifdef DEBUG /* @@ -3395,7 +816,7 @@ do { \ Eterm* argp; int i; - if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) { + if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) { c_p->i = beam_exit; c_p->arity = 0; c_p->current = NULL; @@ -3452,64 +873,25 @@ do { \ goto do_schedule1; } - OpCase(set_tuple_element_sdP): { - Eterm element; - Eterm tuple; - BeamInstr *next; - Eterm* p; - - PreFetch(3, next); - GetArg1(0, element); - tuple = REG_TARGET(Arg(1)); - ASSERT(is_tuple(tuple)); - p = (Eterm *) ((unsigned char *) tuple_val(tuple) + Arg(2)); - *p = element; - NextPF(3, next); - } +#include "beam_warm.h" OpCase(normal_exit): { SWAPOUT; c_p->freason = EXC_NORMAL; - c_p->arity = 0; /* In case this process will never be garbed again. */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + c_p->arity = 0; /* In case this process will ever be garbed again. */ + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); erts_do_exit_process(c_p, am_normal); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); goto do_schedule; } OpCase(continue_exit): { - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); erts_continue_exit_process(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); goto do_schedule; } - OpCase(i_raise): { - Eterm raise_trace = x(2); - Eterm raise_value = x(1); - struct StackTrace *s; - - c_p->fvalue = raise_value; - c_p->ftrace = raise_trace; - s = get_trace_from_exc(raise_trace); - if (s == NULL) { - c_p->freason = EXC_ERROR; - } else { - c_p->freason = PRIMARY_EXCEPTION(s->freason); - } - goto find_func_info; - } - - { - Eterm badmatch_val; - - OpCase(badmatch_x): - badmatch_val = xb(Arg(0)); - c_p->fvalue = badmatch_val; - c_p->freason = BADMATCH; - } - /* Fall through here */ - find_func_info: { SWAPOUT; I = handle_error(c_p, I, reg, NULL); @@ -3550,194 +932,6 @@ do { \ } } - { - Eterm nif_bif_result; - Eterm bif_nif_arity; - - OpCase(call_nif): - { - /* - * call_nif is always first instruction in function: - * - * I[-3]: Module - * I[-2]: Function - * I[-1]: Arity - * I[0]: &&call_nif - * I[1]: Function pointer to NIF function - * I[2]: Pointer to erl_module_nif - * I[3]: Function pointer to dirty NIF - * - * This layout is determined by the NifExport struct - */ - BifFunction vbf; - ErlHeapFragment *live_hf_end; - ErtsCodeMFA *codemfa; - - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF); - - codemfa = erts_code_to_codemfa(I); - - c_p->current = codemfa; /* current and vbf set to please handle_error */ - - DTRACE_NIF_ENTRY(c_p, codemfa); - - HEAVY_SWAPOUT; - - PROCESS_MAIN_CHK_LOCKS(c_p); - bif_nif_arity = codemfa->arity; - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - { - typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]); - NifF* fp = vbf = (NifF*) I[1]; - struct enif_environment_t env; -#ifdef ERTS_SMP - ASSERT(c_p->scheduler_data); -#endif - live_hf_end = c_p->mbuf; - ERTS_CHK_MBUF_SZ(c_p); - erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL); - nif_bif_result = (*fp)(&env, bif_nif_arity, reg); - if (env.exception_thrown) - nif_bif_result = THE_NON_VALUE; - erts_post_nif(&env); - ERTS_CHK_MBUF_SZ(c_p); - - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); - ASSERT(!env.exiting); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - } - - DTRACE_NIF_RETURN(c_p, codemfa); - goto apply_bif_or_nif_epilogue; - - OpCase(apply_bif): - /* - * At this point, I points to the code[0] in the export entry for - * the BIF: - * - * code[-3]: Module - * code[-2]: Function - * code[-1]: Arity - * code[0]: &&apply_bif - * code[1]: Function pointer to BIF function - */ - - if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) { - /* If we have run out of reductions, we do a context - switch before calling the bif */ - goto context_switch; - } - - codemfa = erts_code_to_codemfa(I); - - ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0)); - - - /* In case we apply process_info/1,2 or load_nif/1 */ - c_p->current = codemfa; - c_p->i = I; /* In case we apply check_process_code/2. */ - c_p->arity = 0; /* To allow garbage collection on ourselves - * (check_process_code/2). - */ - DTRACE_BIF_ENTRY(c_p, codemfa); - - SWAPOUT; - ERTS_DBG_CHK_REDS(c_p, FCALLS - 1); - c_p->fcalls = FCALLS - 1; - vbf = (BifFunction) Arg(0); - PROCESS_MAIN_CHK_LOCKS(c_p); - bif_nif_arity = codemfa->arity; - ASSERT(bif_nif_arity <= 4); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - { - ErtsBifFunc bf = vbf; - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - live_hf_end = c_p->mbuf; - ERTS_CHK_MBUF_SZ(c_p); - nif_bif_result = (*bf)(c_p, reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || - is_non_value(nif_bif_result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - } - /* We have to update the cache if we are enabled in order - to make sure no book keeping is done after we disabled - msacc. We don't always do this as it is quite expensive. */ - if (ERTS_MSACC_IS_ENABLED_CACHED_X()) - ERTS_MSACC_UPDATE_CACHE_X(); - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); - DTRACE_BIF_RETURN(c_p, codemfa); - - apply_bif_or_nif_epilogue: - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - ERTS_HOLE_CHECK(c_p); - if (ERTS_IS_GC_DESIRED(c_p)) { - nif_bif_result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, - nif_bif_result, - reg, bif_nif_arity); - } - SWAPIN; /* There might have been a garbage collection. */ - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(nif_bif_result)) { - r(0) = nif_bif_result; - CHECK_TERM(r(0)); - SET_I(c_p->cp); - c_p->cp = 0; - Goto(*I); - } else if (c_p->freason == TRAP) { - SET_I(c_p->i); - if (c_p->flags & F_HIBERNATE_SCHED) { - c_p->flags &= ~F_HIBERNATE_SCHED; - goto do_schedule; - } - Dispatch(); - } - I = handle_error(c_p, c_p->cp, reg, c_p->current); - goto post_error_handling; - } - } - - OpCase(i_get_sd): - { - Eterm arg; - Eterm result; - - GetArg1(0, arg); - result = erts_pd_hash_get(c_p, arg); - StoreBifResult(1, result); - } - - OpCase(i_get_hash_cId): - { - Eterm arg; - Eterm result; - - GetArg1(0, arg); - result = erts_pd_hash_get_with_hx(c_p, Arg(1), arg); - StoreBifResult(2, result); - } - - { - Eterm case_end_val; - - OpCase(case_end_x): - case_end_val = xb(Arg(0)); - c_p->fvalue = case_end_val; - c_p->freason = EXC_CASE_CLAUSE; - goto find_func_info; - } - - OpCase(if_end): - c_p->freason = EXC_IF_CLAUSE; - goto find_func_info; - OpCase(i_func_info_IaaI): { ErtsCodeInfo *ci = (ErtsCodeInfo*)I; c_p->freason = EXC_FUNCTION_CLAUSE; @@ -3745,1367 +939,8 @@ do { \ goto handle_error; } - OpCase(try_case_end_s): - { - Eterm try_case_end_val; - GetArg1(0, try_case_end_val); - c_p->fvalue = try_case_end_val; - c_p->freason = EXC_TRY_CLAUSE; - goto find_func_info; - } - - /* - * Construction of binaries using new instructions. - */ - { - Eterm new_binary; - Eterm num_bits_term; - Uint num_bits; - Uint alloc; - Uint num_bytes; - - OpCase(i_bs_init_bits_heap_IIId): { - num_bits = Arg(0); - alloc = Arg(1); - I++; - goto do_bs_init_bits_known; - } - - OpCase(i_bs_init_bits_IId): { - num_bits = Arg(0); - alloc = 0; - goto do_bs_init_bits_known; - } - - OpCase(i_bs_init_bits_fail_heap_sIjId): { - GetArg1(0, num_bits_term); - alloc = Arg(1); - I += 2; - goto do_bs_init_bits; - } - - OpCase(i_bs_init_bits_fail_yjId): { - num_bits_term = yb(Arg(0)); - I++; - alloc = 0; - goto do_bs_init_bits; - } - OpCase(i_bs_init_bits_fail_xjId): { - num_bits_term = xb(Arg(0)); - I++; - alloc = 0; - /* FALL THROUGH */ - } - - /* num_bits_term = Term for number of bits to build (small/big) - * alloc = Number of words to allocate on heap - * Operands: Fail Live Dst - */ - - do_bs_init_bits: - if (is_small(num_bits_term)) { - Sint size = signed_val(num_bits_term); - if (size < 0) { - goto badarg; - } - num_bits = (Uint) size; - } else { - Uint bits; - - if (!term_to_Uint(num_bits_term, &bits)) { - c_p->freason = bits; - goto lb_Cl_error; - - } - num_bits = (Eterm) bits; - } - - /* num_bits = Number of bits to build - * alloc = Number of extra words to allocate on heap - * Operands: NotUsed Live Dst - */ - do_bs_init_bits_known: - num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3; - if (num_bits & 7) { - alloc += ERL_SUB_BIN_SIZE; - } - if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) { - alloc += heap_bin_size(num_bytes); - } else { - alloc += PROC_BIN_SIZE; - } - TestHeap(alloc, Arg(1)); - - /* num_bits = Number of bits to build - * num_bytes = Number of bytes to allocate in the binary - * alloc = Total number of words to allocate on heap - * Operands: NotUsed NotUsed Dst - */ - if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) { - ErlHeapBin* hb; - - erts_bin_offset = 0; - erts_writable_bin = 0; - hb = (ErlHeapBin *) HTOP; - HTOP += heap_bin_size(num_bytes); - hb->thing_word = header_heap_bin(num_bytes); - hb->size = num_bytes; - erts_current_bin = (byte *) hb->data; - new_binary = make_binary(hb); - - do_bits_sub_bin: - if (num_bits & 7) { - ErlSubBin* sb; - - sb = (ErlSubBin *) HTOP; - HTOP += ERL_SUB_BIN_SIZE; - sb->thing_word = HEADER_SUB_BIN; - sb->size = num_bytes - 1; - sb->bitsize = num_bits & 7; - sb->offs = 0; - sb->bitoffs = 0; - sb->is_writable = 0; - sb->orig = new_binary; - new_binary = make_binary(sb); - } - HEAP_SPACE_VERIFIED(0); - StoreBifResult(2, new_binary); - } else { - Binary* bptr; - ProcBin* pb; - - erts_bin_offset = 0; - erts_writable_bin = 0; - - /* - * Allocate the binary struct itself. - */ - bptr = erts_bin_nrml_alloc(num_bytes); - erts_current_bin = (byte *) bptr->orig_bytes; - - /* - * Now allocate the ProcBin on the heap. - */ - pb = (ProcBin *) HTOP; - HTOP += PROC_BIN_SIZE; - pb->thing_word = HEADER_PROC_BIN; - pb->size = num_bytes; - pb->next = MSO(c_p).first; - MSO(c_p).first = (struct erl_off_heap_header*) pb; - pb->val = bptr; - pb->bytes = (byte*) bptr->orig_bytes; - pb->flags = 0; - OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm)); - new_binary = make_binary(pb); - goto do_bits_sub_bin; - } - } - - { - Eterm BsOp1, BsOp2; - - OpCase(i_bs_init_fail_heap_sIjId): { - GetArg1(0, BsOp1); - BsOp2 = Arg(1); - I += 2; - goto do_bs_init; - } - - OpCase(i_bs_init_fail_yjId): { - BsOp1 = yb(Arg(0)); - BsOp2 = 0; - I++; - goto do_bs_init; - } - - OpCase(i_bs_init_fail_xjId): { - BsOp1 = xb(Arg(0)); - BsOp2 = 0; - I++; - } - /* FALL THROUGH */ - do_bs_init: - if (is_small(BsOp1)) { - Sint size = signed_val(BsOp1); - if (size < 0) { - goto badarg; - } - BsOp1 = (Eterm) size; - } else { - Uint bytes; - - if (!term_to_Uint(BsOp1, &bytes)) { - c_p->freason = bytes; - goto lb_Cl_error; - } - if ((bytes >> (8*sizeof(Uint)-3)) != 0) { - goto system_limit; - } - BsOp1 = (Eterm) bytes; - } - if (BsOp1 <= ERL_ONHEAP_BIN_LIMIT) { - goto do_heap_bin_alloc; - } else { - goto do_proc_bin_alloc; - } - - - OpCase(i_bs_init_heap_IIId): { - BsOp1 = Arg(0); - BsOp2 = Arg(1); - I++; - goto do_proc_bin_alloc; - } - - OpCase(i_bs_init_IId): { - BsOp1 = Arg(0); - BsOp2 = 0; - } - /* FALL THROUGH */ - do_proc_bin_alloc: { - Binary* bptr; - ProcBin* pb; - - erts_bin_offset = 0; - erts_writable_bin = 0; - TestBinVHeap(BsOp1 / sizeof(Eterm), - BsOp2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, Arg(1)); - - /* - * Allocate the binary struct itself. - */ - bptr = erts_bin_nrml_alloc(BsOp1); - erts_current_bin = (byte *) bptr->orig_bytes; - - /* - * Now allocate the ProcBin on the heap. - */ - pb = (ProcBin *) HTOP; - HTOP += PROC_BIN_SIZE; - pb->thing_word = HEADER_PROC_BIN; - pb->size = BsOp1; - pb->next = MSO(c_p).first; - MSO(c_p).first = (struct erl_off_heap_header*) pb; - pb->val = bptr; - pb->bytes = (byte*) bptr->orig_bytes; - pb->flags = 0; - - OH_OVERHEAD(&(MSO(c_p)), BsOp1 / sizeof(Eterm)); - - StoreBifResult(2, make_binary(pb)); - } - - OpCase(i_bs_init_heap_bin_heap_IIId): { - BsOp1 = Arg(0); - BsOp2 = Arg(1); - I++; - goto do_heap_bin_alloc; - } - - OpCase(i_bs_init_heap_bin_IId): { - BsOp1 = Arg(0); - BsOp2 = 0; - } - /* Fall through */ - do_heap_bin_alloc: - { - ErlHeapBin* hb; - Uint bin_need; - - bin_need = heap_bin_size(BsOp1); - erts_bin_offset = 0; - erts_writable_bin = 0; - TestHeap(bin_need+BsOp2+ERL_SUB_BIN_SIZE, Arg(1)); - hb = (ErlHeapBin *) HTOP; - HTOP += bin_need; - hb->thing_word = header_heap_bin(BsOp1); - hb->size = BsOp1; - erts_current_bin = (byte *) hb->data; - BsOp1 = make_binary(hb); - StoreBifResult(2, BsOp1); - } - } - - OpCase(bs_add_jssId): { - Eterm Op1, Op2; - Uint Unit = Arg(3); - - GetArg2(1, Op1, Op2); - if (is_both_small(Op1, Op2)) { - Sint Arg1 = signed_val(Op1); - Sint Arg2 = signed_val(Op2); - - if (Arg1 >= 0 && Arg2 >= 0) { - BsSafeMul(Arg2, Unit, goto system_limit, Op1); - Op1 += Arg1; - - store_bs_add_result: - if (Op1 <= MAX_SMALL) { - Op1 = make_small(Op1); - } else { - /* - * May generate a heap fragment, but in this - * particular case it is OK, since the value will be - * stored into an x register (the GC will scan x - * registers for references to heap fragments) and - * there is no risk that value can be stored into a - * location that is not scanned for heap-fragment - * references (such as the heap). - */ - SWAPOUT; - Op1 = erts_make_integer(Op1, c_p); - HTOP = HEAP_TOP(c_p); - } - StoreBifResult(4, Op1); - } - goto badarg; - } else { - Uint a; - Uint b; - Uint c; - - /* - * Now we know that one of the arguments is - * not a small. We must convert both arguments - * to Uints and check for errors at the same time. - * - * Error checking is tricky. - * - * If one of the arguments is not numeric or - * not positive, the error reason is BADARG. - * - * Otherwise if both arguments are numeric, - * but at least one argument does not fit in - * an Uint, the reason is SYSTEM_LIMIT. - */ - - if (!term_to_Uint(Op1, &a)) { - if (a == BADARG) { - goto badarg; - } - if (!term_to_Uint(Op2, &b)) { - c_p->freason = b; - goto lb_Cl_error; - } - goto system_limit; - } else if (!term_to_Uint(Op2, &b)) { - c_p->freason = b; - goto lb_Cl_error; - } - - /* - * The arguments are now correct and stored in a and b. - */ - - BsSafeMul(b, Unit, goto system_limit, c); - Op1 = a + c; - if (Op1 < a) { - /* - * If the result is less than one of the - * arguments, there must have been an overflow. - */ - goto system_limit; - } - goto store_bs_add_result; - } - /* No fallthrough */ - ASSERT(0); - } - - OpCase(bs_put_string_II): - { - BeamInstr *next; - PreFetch(2, next); - erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) Arg(1), Arg(0))); - NextPF(2, next); - } - - /* - * x(SCRATCH_X_REG); - * Operands: Fail ExtraHeap Live Unit Size Dst - */ - - OpCase(i_bs_append_jIIIsd): { - Uint live = Arg(2); - Uint res; - Eterm Size; - - GetArg1(4, Size); - HEAVY_SWAPOUT; - reg[live] = x(SCRATCH_X_REG); - res = erts_bs_append(c_p, reg, live, Size, Arg(1), Arg(3)); - HEAVY_SWAPIN; - if (is_non_value(res)) { - /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */ - goto lb_Cl_error; - } - StoreBifResult(5, res); - } - - /* - * Operands: Fail Size Src Unit Dst - */ - OpCase(i_bs_private_append_jIssd): { - Eterm res; - Eterm Size, Src; - - GetArg2(2, Size, Src); - res = erts_bs_private_append(c_p, Src, Size, Arg(1)); - if (is_non_value(res)) { - /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */ - goto lb_Cl_error; - } - StoreBifResult(4, res); - } - - OpCase(bs_init_writable): { - HEAVY_SWAPOUT; - r(0) = erts_bs_init_writable(c_p, r(0)); - HEAVY_SWAPIN; - Next(0); - } - - /* - * Calculate the number of bytes needed to encode the source - * operarand to UTF-8. If the source operand is invalid (e.g. wrong - * type or range) we return a nonsense integer result (0 or 4). We - * can get away with that because we KNOW that bs_put_utf8 will do - * full error checking. - */ - OpCase(i_bs_utf8_size_sd): { - Eterm arg; - Eterm result; - - GetArg1(0, arg); - if (arg < make_small(0x80UL)) { - result = make_small(1); - } else if (arg < make_small(0x800UL)) { - result = make_small(2); - } else if (arg < make_small(0x10000UL)) { - result = make_small(3); - } else { - result = make_small(4); - } - StoreBifResult(1, result); - } - - OpCase(i_bs_put_utf8_js): { - Eterm arg; - - GetArg1(1, arg); - if (!erts_bs_put_utf8(ERL_BITS_ARGS_1(arg))) { - goto badarg; - } - Next(2); - } - - /* - * Calculate the number of bytes needed to encode the source - * operarand to UTF-8. If the source operand is invalid (e.g. wrong - * type or range) we return a nonsense integer result (2 or 4). We - * can get away with that because we KNOW that bs_put_utf16 will do - * full error checking. - */ - - OpCase(i_bs_utf16_size_sd): { - Eterm arg; - Eterm result = make_small(2); - - GetArg1(0, arg); - if (arg >= make_small(0x10000UL)) { - result = make_small(4); - } - StoreBifResult(1, result); - } - - OpCase(bs_put_utf16_jIs): { - Eterm arg; - - GetArg1(2, arg); - if (!erts_bs_put_utf16(ERL_BITS_ARGS_2(arg, Arg(1)))) { - goto badarg; - } - Next(3); - } - - /* - * Only used for validating a value about to be stored in a binary. - */ - OpCase(i_bs_validate_unicode_js): { - Eterm val; - - GetArg1(1, val); - - /* - * There is no need to untag the integer, but it IS necessary - * to make sure it is small (if the term is a bignum, it could - * slip through the test, and there is no further test that - * would catch it, since bit syntax construction silently masks - * too big numbers). - */ - if (is_not_small(val) || val > make_small(0x10FFFFUL) || - (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) { - goto badarg; - } - Next(2); - } - - /* - * Only used for validating a value matched out. - */ - OpCase(i_bs_validate_unicode_retract_jss): { - Eterm i; /* Integer to validate */ - - /* - * There is no need to untag the integer, but it IS necessary - * to make sure it is small (a bignum pointer could fall in - * the valid range). - */ - - GetArg1(1, i); - if (is_not_small(i) || i > make_small(0x10FFFFUL) || - (make_small(0xD800UL) <= i && i <= make_small(0xDFFFUL))) { - Eterm ms; /* Match context */ - ErlBinMatchBuffer* mb; - - GetArg1(2, ms); - mb = ms_matchbuffer(ms); - mb->offset -= 32; - goto badarg; - } - Next(3); - } - - /* - * Matching of binaries. - */ - - { - Eterm header; - BeamInstr *next; - Uint slots; - Eterm context; - - do_start_match: - slots = Arg(2); - if (!is_boxed(context)) { - ClauseFail(); - } - PreFetch(4, next); - header = *boxed_val(context); - if (header_is_bin_matchstate(header)) { - ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context); - Uint actual_slots = HEADER_NUM_SLOTS(header); - ms->save_offset[0] = ms->mb.offset; - if (actual_slots < slots) { - ErlBinMatchState* dst; - Uint live = Arg(1); - Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); - - TestHeapPreserve(wordsneeded, live, context); - ms = (ErlBinMatchState *) boxed_val(context); - dst = (ErlBinMatchState *) HTOP; - *dst = *ms; - *HTOP = HEADER_BIN_MATCHSTATE(slots); - HTOP += wordsneeded; - HEAP_SPACE_VERIFIED(0); - StoreResult(make_matchstate(dst), Arg(3)); - } - } else if (is_binary_header(header)) { - Eterm result; - Uint live = Arg(1); - Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); - TestHeapPreserve(wordsneeded, live, context); - HEAP_TOP(c_p) = HTOP; -#ifdef DEBUG - c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */ -#endif - result = erts_bs_start_match_2(c_p, context, slots); - HTOP = HEAP_TOP(c_p); - HEAP_SPACE_VERIFIED(0); - if (is_non_value(result)) { - ClauseFail(); - } else { - StoreResult(result, Arg(3)); - } - } else { - ClauseFail(); - } - NextPF(4, next); - - OpCase(i_bs_start_match2_xfIId): { - context = xb(Arg(0)); - I++; - goto do_start_match; - } - OpCase(i_bs_start_match2_yfIId): { - context = yb(Arg(0)); - I++; - goto do_start_match; - } - } - - OpCase(bs_test_zero_tail2_fx): { - BeamInstr *next; - ErlBinMatchBuffer *_mb; - - PreFetch(2, next); - _mb = (ErlBinMatchBuffer*) ms_matchbuffer(xb(Arg(1))); - if (_mb->size != _mb->offset) { - ClauseFail(); - } - NextPF(2, next); - } - - OpCase(bs_test_tail_imm2_fxI): { - BeamInstr *next; - ErlBinMatchBuffer *_mb; - PreFetch(3, next); - _mb = ms_matchbuffer(xb(Arg(1))); - if (_mb->size - _mb->offset != Arg(2)) { - ClauseFail(); - } - NextPF(3, next); - } - - OpCase(bs_test_unit_fxI): { - BeamInstr *next; - ErlBinMatchBuffer *_mb; - PreFetch(3, next); - _mb = ms_matchbuffer(xb(Arg(1))); - if ((_mb->size - _mb->offset) % Arg(2)) { - ClauseFail(); - } - NextPF(3, next); - } - - OpCase(bs_test_unit8_fx): { - BeamInstr *next; - ErlBinMatchBuffer *_mb; - PreFetch(2, next); - _mb = ms_matchbuffer(xb(Arg(1))); - if ((_mb->size - _mb->offset) & 7) { - ClauseFail(); - } - NextPF(2, next); - } - - { - Eterm bs_get_integer8_context; - - OpCase(i_bs_get_integer_8_xfd): { - ErlBinMatchBuffer *_mb; - Eterm _result; - bs_get_integer8_context = xb(Arg(0)); - I++; - _mb = ms_matchbuffer(bs_get_integer8_context); - if (_mb->size - _mb->offset < 8) { - ClauseFail(); - } - if (BIT_OFFSET(_mb->offset) != 0) { - _result = erts_bs_get_integer_2(c_p, 8, 0, _mb); - } else { - _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]); - _mb->offset += 8; - } - StoreBifResult(1, _result); - } - } - - { - Eterm bs_get_integer_16_context; - - OpCase(i_bs_get_integer_16_xfd): - bs_get_integer_16_context = xb(Arg(0)); - I++; - - { - ErlBinMatchBuffer *_mb; - Eterm _result; - _mb = ms_matchbuffer(bs_get_integer_16_context); - if (_mb->size - _mb->offset < 16) { - ClauseFail(); - } - if (BIT_OFFSET(_mb->offset) != 0) { - _result = erts_bs_get_integer_2(c_p, 16, 0, _mb); - } else { - _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset))); - _mb->offset += 16; - } - StoreBifResult(1, _result); - } - } - - { - Eterm bs_get_integer_32_context; - - OpCase(i_bs_get_integer_32_xfId): - bs_get_integer_32_context = xb(Arg(0)); - I++; - - { - ErlBinMatchBuffer *_mb; - Uint32 _integer; - Eterm _result; - _mb = ms_matchbuffer(bs_get_integer_32_context); - if (_mb->size - _mb->offset < 32) { ClauseFail(); } - if (BIT_OFFSET(_mb->offset) != 0) { - _integer = erts_bs_get_unaligned_uint32(_mb); - } else { - _integer = get_int32(_mb->base + _mb->offset/8); - } - _mb->offset += 32; -#if !defined(ARCH_64) - if (IS_USMALL(0, _integer)) { -#endif - _result = make_small(_integer); -#if !defined(ARCH_64) - } else { - TestHeap(BIG_UINT_HEAP_SIZE, Arg(1)); - _result = uint_to_big((Uint) _integer, HTOP); - HTOP += BIG_UINT_HEAP_SIZE; - HEAP_SPACE_VERIFIED(0); - } -#endif - StoreBifResult(2, _result); - } - } - - { - Eterm Ms, Sz; - - /* Operands: x(Reg) Size Live Fail Flags Dst */ - OpCase(i_bs_get_integer_imm_xIIfId): { - Uint wordsneeded; - Ms = xb(Arg(0)); - Sz = Arg(1); - wordsneeded = 1+WSIZE(NBYTES(Sz)); - TestHeapPreserve(wordsneeded, Arg(2), Ms); - I += 3; - /* Operands: Fail Flags Dst */ - goto do_bs_get_integer_imm; - } - - /* Operands: x(Reg) Size Fail Flags Dst */ - OpCase(i_bs_get_integer_small_imm_xIfId): { - Ms = xb(Arg(0)); - Sz = Arg(1); - I += 2; - /* Operands: Fail Flags Dst */ - goto do_bs_get_integer_imm; - } - - /* - * Ms = match context - * Sz = size of field - * Operands: Fail Flags Dst - */ - do_bs_get_integer_imm: { - ErlBinMatchBuffer* mb; - Eterm result; - - mb = ms_matchbuffer(Ms); - LIGHT_SWAPOUT; - result = erts_bs_get_integer_2(c_p, Sz, Arg(1), mb); - LIGHT_SWAPIN; - HEAP_SPACE_VERIFIED(0); - if (is_non_value(result)) { - ClauseFail(); - } - StoreBifResult(2, result); - } - } - - /* - * Operands: Fail Live FlagsAndUnit Ms Sz Dst - */ - OpCase(i_bs_get_integer_fIIssd): { - Uint flags; - Uint size; - Eterm Ms; - Eterm Sz; - ErlBinMatchBuffer* mb; - Eterm result; - - flags = Arg(2); - GetArg2(3, Ms, Sz); - BsGetFieldSize(Sz, (flags >> 3), ClauseFail(), size); - if (size >= SMALL_BITS) { - Uint wordsneeded; - /* Check bits size before potential gc. - * We do not want a gc and then realize we don't need - * the allocated space (i.e. if the op fails). - * - * Remember to re-acquire the matchbuffer after gc. - */ - - mb = ms_matchbuffer(Ms); - if (mb->size - mb->offset < size) { - ClauseFail(); - } - wordsneeded = 1+WSIZE(NBYTES((Uint) size)); - TestHeapPreserve(wordsneeded, Arg(1), Ms); - } - mb = ms_matchbuffer(Ms); - LIGHT_SWAPOUT; - result = erts_bs_get_integer_2(c_p, size, flags, mb); - LIGHT_SWAPIN; - HEAP_SPACE_VERIFIED(0); - if (is_non_value(result)) { - ClauseFail(); - } - StoreBifResult(5, result); - } - - { - Eterm get_utf8_context; - - /* Operands: MatchContext Fail Dst */ - OpCase(i_bs_get_utf8_xfd): { - get_utf8_context = xb(Arg(0)); - I++; - } - - /* - * get_utf8_context = match_context - * Operands: Fail Dst - */ - - { - Eterm result = erts_bs_get_utf8(ms_matchbuffer(get_utf8_context)); - if (is_non_value(result)) { - ClauseFail(); - } - StoreBifResult(1, result); - } - } - - { - Eterm get_utf16_context; - - /* Operands: MatchContext Fail Flags Dst */ - OpCase(i_bs_get_utf16_xfId): { - get_utf16_context = xb(Arg(0)); - I++; - } - - /* - * get_utf16_context = match_context - * Operands: Fail Flags Dst - */ - { - Eterm result = erts_bs_get_utf16(ms_matchbuffer(get_utf16_context), - Arg(1)); - if (is_non_value(result)) { - ClauseFail(); - } - StoreBifResult(2, result); - } - } - - { - Eterm context_to_binary_context; - ErlBinMatchBuffer* mb; - ErlSubBin* sb; - Uint size; - Uint offs; - Uint orig; - Uint hole_size; - - OpCase(bs_context_to_binary_x): - context_to_binary_context = xb(Arg(0)); - I--; - - if (is_boxed(context_to_binary_context) && - header_is_bin_matchstate(*boxed_val(context_to_binary_context))) { - ErlBinMatchState* ms; - ms = (ErlBinMatchState *) boxed_val(context_to_binary_context); - mb = &ms->mb; - offs = ms->save_offset[0]; - size = mb->size - offs; - goto do_bs_get_binary_all_reuse_common; - } - Next(2); - - OpCase(i_bs_get_binary_all_reuse_xfI): { - context_to_binary_context = xb(Arg(0)); - I++; - } - - mb = ms_matchbuffer(context_to_binary_context); - size = mb->size - mb->offset; - if (size % Arg(1) != 0) { - ClauseFail(); - } - offs = mb->offset; - - do_bs_get_binary_all_reuse_common: - orig = mb->orig; - sb = (ErlSubBin *) boxed_val(context_to_binary_context); - hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE; - sb->thing_word = HEADER_SUB_BIN; - sb->size = BYTE_OFFSET(size); - sb->bitsize = BIT_OFFSET(size); - sb->offs = BYTE_OFFSET(offs); - sb->bitoffs = BIT_OFFSET(offs); - sb->is_writable = 0; - sb->orig = orig; - if (hole_size) { - sb[1].thing_word = make_pos_bignum_header(hole_size-1); - } - Next(2); - } - - { - Eterm match_string_context; - - OpCase(i_bs_match_string_xfII): { - match_string_context = xb(Arg(0)); - I++; - } - - { - BeamInstr *next; - byte* bytes; - Uint bits; - ErlBinMatchBuffer* mb; - Uint offs; - - PreFetch(3, next); - bits = Arg(1); - bytes = (byte *) Arg(2); - mb = ms_matchbuffer(match_string_context); - if (mb->size - mb->offset < bits) { - ClauseFail(); - } - offs = mb->offset & 7; - if (offs == 0 && (bits & 7) == 0) { - if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) { - ClauseFail(); - } - } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) { - ClauseFail(); - } - mb->offset += bits; - NextPF(3, next); - } - } - - OpCase(i_bs_save2_xI): { - BeamInstr *next; - ErlBinMatchState *_ms; - PreFetch(2, next); - _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0))); - _ms->save_offset[Arg(1)] = _ms->mb.offset; - NextPF(2, next); - } - - OpCase(i_bs_restore2_xI): { - BeamInstr *next; - ErlBinMatchState *_ms; - PreFetch(2, next); - _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0))); - _ms->mb.offset = _ms->save_offset[Arg(1)]; - NextPF(2, next); - } - #include "beam_cold.h" - - /* - * This instruction is probably never used (because it is combined with a - * a return). However, a future compiler might for some reason emit a - * deallocate not followed by a return, and that should work. - */ - OpCase(deallocate_I): { - BeamInstr *next; - - PreFetch(1, next); - D(Arg(0)); - NextPF(1, next); - } - - /* - * Trace and debugging support. - */ - - OpCase(return_trace): { - ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]); - - SWAPOUT; /* Needed for shared heap */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - SWAPIN; - c_p->cp = NULL; - SET_I((BeamInstr *) cp_val(E[2])); - E += 3; - Goto(*I); - } - - OpCase(i_generic_breakpoint): { - BeamInstr real_I; - HEAVY_SWAPOUT; - real_I = erts_generic_breakpoint(c_p, erts_code_to_codeinfo(I), reg); - HEAVY_SWAPIN; - ASSERT(VALID_INSTR(real_I)); - Goto(real_I); - } - - OpCase(i_return_time_trace): { - BeamInstr *pc = (BeamInstr *) (UWord) E[0]; - SWAPOUT; - erts_trace_time_return(c_p, erts_code_to_codeinfo(pc)); - SWAPIN; - c_p->cp = NULL; - SET_I((BeamInstr *) cp_val(E[1])); - E += 2; - Goto(*I); - } - - OpCase(i_return_to_trace): { - if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) { - Uint *cpp = (Uint*) E; - for(;;) { - ASSERT(is_CP(*cpp)); - if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) { - do ++cpp; while(is_not_CP(*cpp)); - cpp += 2; - } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) { - do ++cpp; while(is_not_CP(*cpp)); - } else break; - } - SWAPOUT; /* Needed for shared heap */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - erts_trace_return_to(c_p, cp_val(*cpp)); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - SWAPIN; - } - c_p->cp = NULL; - SET_I((BeamInstr *) cp_val(E[0])); - E += 1; - Goto(*I); - } - - /* - * New floating point instructions. - */ - - OpCase(fmove_ql): { - Eterm fr = Arg(1); - BeamInstr *next; - - PreFetch(2, next); - GET_DOUBLE(Arg(0), *(FloatDef*)ADD_BYTE_OFFSET(freg, fr)); - NextPF(2, next); - } - - OpCase(fmove_dl): { - Eterm targ1; - Eterm fr = Arg(1); - BeamInstr *next; - - PreFetch(2, next); - targ1 = REG_TARGET(Arg(0)); - /* Arg(0) == HEADER_FLONUM */ - GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr)); - NextPF(2, next); - } - - OpCase(fmove_ld): { - Eterm fr = Arg(0); - Eterm dest = make_float(HTOP); - - PUT_DOUBLE(*(FloatDef*)ADD_BYTE_OFFSET(freg, fr), HTOP); - HTOP += FLOAT_SIZE_OBJECT; - StoreBifResult(1, dest); - } - - OpCase(fconv_dl): { - Eterm targ1; - Eterm fr = Arg(1); - BeamInstr *next; - - targ1 = REG_TARGET(Arg(0)); - PreFetch(2, next); - if (is_small(targ1)) { - fb(fr) = (double) signed_val(targ1); - } else if (is_big(targ1)) { - if (big_to_double(targ1, &fb(fr)) < 0) { - goto fbadarith; - } - } else if (is_float(targ1)) { - GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr)); - } else { - goto fbadarith; - } - NextPF(2, next); - } - -#ifdef NO_FPE_SIGNALS - OpCase(fclearerror): - OpCase(i_fcheckerror): - erts_exit(ERTS_ERROR_EXIT, "fclearerror/i_fcheckerror without fpe signals (beam_emu)"); -# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT -# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR -#else -# define ERTS_NO_FPE_CHECK_INIT(p) -# define ERTS_NO_FPE_ERROR(p, a, b) - - OpCase(fclearerror): { - BeamInstr *next; - - PreFetch(0, next); - ERTS_FP_CHECK_INIT(c_p); - NextPF(0, next); - } - - OpCase(i_fcheckerror): { - BeamInstr *next; - - PreFetch(0, next); - ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith); - NextPF(0, next); - } -#endif - - - OpCase(i_fadd_lll): { - BeamInstr *next; - - PreFetch(3, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(2)) = fb(Arg(0)) + fb(Arg(1)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith); - NextPF(3, next); - } - OpCase(i_fsub_lll): { - BeamInstr *next; - - PreFetch(3, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(2)) = fb(Arg(0)) - fb(Arg(1)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith); - NextPF(3, next); - } - OpCase(i_fmul_lll): { - BeamInstr *next; - - PreFetch(3, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(2)) = fb(Arg(0)) * fb(Arg(1)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith); - NextPF(3, next); - } - OpCase(i_fdiv_lll): { - BeamInstr *next; - - PreFetch(3, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(2)) = fb(Arg(0)) / fb(Arg(1)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith); - NextPF(3, next); - } - OpCase(i_fnegate_ll): { - BeamInstr *next; - - PreFetch(2, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(1)) = -fb(Arg(0)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(1)), goto fbadarith); - NextPF(2, next); - - fbadarith: - c_p->freason = BADARITH; - goto find_func_info; - } - -#ifdef HIPE - { -#define HIPE_MODE_SWITCH(Cmd) \ - SWAPOUT; \ - ERTS_DBG_CHK_REDS(c_p, FCALLS); \ - c_p->fcalls = FCALLS; \ - c_p->def_arg_reg[4] = -neg_o_reds; \ - c_p = hipe_mode_switch(c_p, Cmd, reg); \ - goto L_post_hipe_mode_switch - - OpCase(hipe_trap_call): { - /* - * I[-5]: &&lb_i_func_info_IaaI - * I[-4]: Native code callee (inserted by HiPE) - * I[-3]: Module (tagged atom) - * I[-2]: Function (tagged atom) - * I[-1]: Arity (untagged integer) - * I[ 0]: &&lb_hipe_trap_call - * ... remainder of original BEAM code - */ - ErtsCodeInfo *ci = erts_code_to_codeinfo(I); - ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); - c_p->hipe.u.ncallee = ci->u.ncallee; - ++hipe_trap_count; - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (ci->mfa.arity << 8)); - } - OpCase(hipe_trap_call_closure): { - ErtsCodeInfo *ci = erts_code_to_codeinfo(I); - ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); - c_p->hipe.u.ncallee = ci->u.ncallee; - ++hipe_trap_count; - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (ci->mfa.arity << 8)); - } - OpCase(hipe_trap_return): { - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RETURN); - } - OpCase(hipe_trap_throw): { - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_THROW); - } - OpCase(hipe_trap_resume): { - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RESUME); - } -#undef HIPE_MODE_SWITCH - - L_post_hipe_mode_switch: -#ifdef DEBUG - pid = c_p->common.id; /* may have switched process... */ -#endif - reg = erts_proc_sched_data(c_p)->x_reg_array; - freg = erts_proc_sched_data(c_p)->f_reg_array; - ERL_BITS_RELOAD_STATEP(c_p); - /* XXX: this abuse of def_arg_reg[] is horrid! */ - neg_o_reds = -c_p->def_arg_reg[4]; - FCALLS = c_p->fcalls; - SWAPIN; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - switch( c_p->def_arg_reg[3] ) { - case HIPE_MODE_SWITCH_RES_RETURN: - ASSERT(is_value(reg[0])); - SET_I(c_p->cp); - c_p->cp = 0; - Goto(*I); - case HIPE_MODE_SWITCH_RES_CALL_EXPORTED: - c_p->i = c_p->hipe.u.callee_exp->addressv[erts_active_code_ix()]; - /*fall through*/ - case HIPE_MODE_SWITCH_RES_CALL_BEAM: - SET_I(c_p->i); - Dispatch(); - case HIPE_MODE_SWITCH_RES_CALL_CLOSURE: - /* This can be used to call any function value, but currently it's - only used to call closures referring to unloaded modules. */ - { - BeamInstr *next; - - next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE); - HEAVY_SWAPIN; - if (next != NULL) { - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - case HIPE_MODE_SWITCH_RES_THROW: - c_p->cp = NULL; - I = handle_error(c_p, I, reg, NULL); - goto post_error_handling; - default: - erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: result %u\n", c_p->def_arg_reg[3]); - } - } - OpCase(hipe_call_count): { - /* - * I[-5]: &&lb_i_func_info_IaaI - * I[-4]: pointer to struct hipe_call_count (inserted by HiPE) - * I[-3]: Module (tagged atom) - * I[-2]: Function (tagged atom) - * I[-1]: Arity (untagged integer) - * I[ 0]: &&lb_hipe_call_count - * ... remainder of original BEAM code - */ - ErtsCodeInfo *ci = erts_code_to_codeinfo(I); - struct hipe_call_count *hcc = ci->u.hcc; - ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); - ASSERT(hcc != NULL); - ASSERT(VALID_INSTR(hcc->opcode)); - ++(hcc->count); - Goto(hcc->opcode); - } -#endif /* HIPE */ - - OpCase(i_yield): - { - /* This is safe as long as REDS_IN(c_p) is never stored - * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5], - * which may be c_p->arg_reg[5], which is close, but no banana. - */ - c_p->arg_reg[0] = am_true; - c_p->arity = 1; /* One living register (the 'true' return value) */ - SWAPOUT; - c_p->i = I + 1; /* Next instruction */ - c_p->current = NULL; - goto do_schedule; - } - - OpCase(i_hibernate): { - HEAVY_SWAPOUT; - if (erts_hibernate(c_p, r(0), x(1), x(2), reg)) { - FCALLS = c_p->fcalls; - c_p->flags &= ~F_HIBERNATE_SCHED; - goto do_schedule; - } else { - HEAVY_SWAPIN; - I = handle_error(c_p, I, reg, &bif_export[BIF_hibernate_3]->info.mfa); - goto post_error_handling; - } - } - - /* This is optimised as an instruction because - it has to be very very fast */ - OpCase(i_perf_counter): { - BeamInstr* next; - ErtsSysPerfCounter ts; - PreFetch(0, next); - - ts = erts_sys_perf_counter(); - - if (IS_SSMALL(ts)) { - r(0) = make_small((Sint)ts); - } else { - TestHeap(ERTS_SINT64_HEAP_SIZE(ts),0); - r(0) = make_big(HTOP); -#if defined(ARCH_32) - if (ts >= (((Uint64) 1) << 32)) { - *HTOP = make_pos_bignum_header(2); - BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff)); - BIG_DIGIT(HTOP, 1) = (Uint) ((ts >> 32) & ((Uint) 0xffffffff)); - HTOP += 3; - } - else -#endif - { - *HTOP = make_pos_bignum_header(1); - BIG_DIGIT(HTOP, 0) = (Uint) ts; - HTOP += 2; - } - } - NextPF(0, next); - } - - OpCase(i_debug_breakpoint): { - HEAVY_SWAPOUT; - I = call_error_handler(c_p, erts_code_to_codemfa(I), reg, am_breakpoint); - HEAVY_SWAPIN; - if (I) { - Goto(*I); - } - goto handle_error; - } - - - OpCase(system_limit_j): - system_limit: - c_p->freason = SYSTEM_LIMIT; - goto lb_Cl_error; - - #ifdef ERTS_OPCODE_COUNTER_SUPPORT DEFINE_COUNTING_LABELS; #endif @@ -5201,7 +1036,6 @@ do { \ */ void erts_dirty_process_main(ErtsSchedulerData *esdp) { -#ifdef ERTS_DIRTY_SCHEDULERS Process* c_p = NULL; ErtsMonotonicTime start_time; #ifdef DEBUG @@ -5313,7 +1147,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) } PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); c_p = erts_schedule(esdp, c_p, reds_used); @@ -5327,7 +1161,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) #ifdef DEBUG pid = c_p->common.id; /* Save for debugging purposes */ #endif - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); ASSERT(!(c_p->flags & F_HIPE_MODE)); @@ -5342,7 +1176,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) else c_p->fcalls = CONTEXT_REDS; - if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) { + if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) { erts_execute_dirty_system_task(c_p); goto do_dirty_schedule; } @@ -5413,7 +1247,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) c_p->current = codemfa; SWAPOUT; PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); if (em_apply_bif == (BeamInstr *) *I) { @@ -5427,7 +1261,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) ASSERT(!(c_p->flags & F_HIBERNATE_SCHED)); PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); if (exiting) @@ -5440,7 +1274,6 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) I = c_p->i; goto context_switch; } -#endif /* ERTS_DIRTY_SCHEDULERS */ } static ErtsCodeMFA * @@ -5606,9 +1439,9 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa) } if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found"); } - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); terminate_proc(c_p, Value); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); return NULL; } @@ -6344,13 +2177,14 @@ apply_bif_error_adjustment(Process *p, Export *ep, } static BeamInstr* -apply( -Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg, -BeamInstr *I, Uint stack_offset) +apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset) { int arity; Export* ep; - Eterm tmp, this; + Eterm tmp; + Eterm module = reg[0]; + Eterm function = reg[1]; + Eterm args = reg[2]; /* * Check the arguments which should be of the form apply(Module, @@ -6373,20 +2207,8 @@ BeamInstr *I, Uint stack_offset) while (1) { Eterm m, f, a; - /* The module argument may be either an atom or an abstract module - * (currently implemented using tuples, but this might change). - */ - this = THE_NON_VALUE; - if (is_not_atom(module)) { - Eterm* tp; - - if (is_not_tuple(module)) goto error; - tp = tuple_val(module); - if (arityval(tp[0]) < 1) goto error; - this = module; - module = tp[1]; - if (is_not_atom(module)) goto error; - } + + if (is_not_atom(module)) goto error; if (module != am_erlang || function != am_apply) break; @@ -6421,9 +2243,7 @@ BeamInstr *I, Uint stack_offset) } /* * Walk down the 3rd parameter of apply (the argument list) and copy - * the parameters to the x registers (reg[]). If the module argument - * was an abstract module, add 1 to the function arity and put the - * module argument in the n+1st x register as a THIS reference. + * the parameters to the x registers (reg[]). */ tmp = args; @@ -6440,9 +2260,6 @@ BeamInstr *I, Uint stack_offset) if (is_not_nil(tmp)) { /* Must be well-formed list */ goto error; } - if (this != THE_NON_VALUE) { - reg[arity++] = this; - } /* * Get the index into the export table, or failing that the export @@ -6481,22 +2298,12 @@ fixed_apply(Process* p, Eterm* reg, Uint arity, return 0; } - /* The module argument may be either an atom or an abstract module - * (currently implemented using tuples, but this might change). - */ - if (is_not_atom(module)) { - Eterm* tp; - if (is_not_tuple(module)) goto error; - tp = tuple_val(module); - if (arityval(tp[0]) < 1) goto error; - module = tp[1]; - if (is_not_atom(module)) goto error; - ++arity; - } + if (is_not_atom(module)) goto error; /* Handle apply of apply/3... */ - if (module == am_erlang && function == am_apply && arity == 3) - return apply(p, reg[0], reg[1], reg[2], reg, I, stack_offset); + if (module == am_erlang && function == am_apply && arity == 3) { + return apply(p, reg, I, stack_offset); + } /* * Get the index into the export table, or failing that the export @@ -6517,27 +2324,13 @@ fixed_apply(Process* p, Eterm* reg, Uint arity, } int -erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg) +erts_hibernate(Process* c_p, Eterm* reg) { int arity; Eterm tmp; - -#ifndef ERTS_SMP - if (ERTS_PROC_IS_EXITING(c_p)) { - /* - * I non smp case: - * - * Currently executing process might be sent an exit - * signal if it is traced by a port that it also is - * linked to, and the port terminates during the - * trace. In this case we do *not* want to clear - * the active flag, which will make the process hang - * in limbo forever. Get out of here and terminate - * the process... - */ - return -1; - } -#endif + Eterm module = reg[0]; + Eterm function = reg[1]; + Eterm args = reg[2]; if (is_not_atom(module) || is_not_atom(function)) { /* @@ -6605,33 +2398,22 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re * If there are no waiting messages, garbage collect and * shrink the heap. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (!c_p->msg.len) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->fvalue = NIL; PROCESS_MAIN_CHK_LOCKS(c_p); erts_garbage_collect_hibernate(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); -#ifndef ERTS_SMP - if (ERTS_PROC_IS_EXITING(c_p)) { - /* - * See comment in the beginning of the function... - * - * This second test is needed since gc might be traced. - */ - return -1; - } -#else /* ERTS_SMP */ - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (!c_p->msg.len) -#endif - erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); + erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->current = &bif_export[BIF_hibernate_3]->info.mfa; c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */ return 1; @@ -6721,7 +2503,7 @@ call_fun(Process* p, /* Current process. */ module = fe->module; - ERTS_SMP_READ_MEMORY_BARRIER; + ERTS_THR_READ_MEMORY_BARRIER; if (fe->pend_purge_address) { /* * The system is currently trying to purge the @@ -6852,7 +2634,7 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) p->htop = hp + needed; funp = (ErlFunThing *) hp; hp = funp->env; - erts_smp_refc_inc(&fe->refc, 2); + erts_refc_inc(&fe->refc, 2); funp->thing_word = HEADER_FUN; funp->next = MSO(p).first; MSO(p).first = (struct erl_off_heap_header*) funp; @@ -6955,24 +2737,20 @@ do { \ static Eterm -new_map(Process* p, Eterm* reg, BeamInstr* I) +new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) { - Uint n = Arg(3); Uint i; Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */; Eterm keys; Eterm *mhp,*thp; Eterm *E; - BeamInstr *ptr; flatmap_t *mp; ErtsHeapFactory factory; - ptr = &Arg(4); - if (n > 2*MAP_SMALL_MAP_LIMIT) { Eterm res; if (HeapWordsLeft(p) < n) { - erts_garbage_collect(p, n, reg, Arg(2)); + erts_garbage_collect(p, n, reg, live); } mhp = p->htop; @@ -6993,7 +2771,7 @@ new_map(Process* p, Eterm* reg, BeamInstr* I) } if (HeapWordsLeft(p) < need) { - erts_garbage_collect(p, need, reg, Arg(2)); + erts_garbage_collect(p, need, reg, live); } thp = p->htop; @@ -7016,9 +2794,42 @@ new_map(Process* p, Eterm* reg, BeamInstr* I) } static Eterm -update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) +new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, Uint live, BeamInstr* ptr) +{ + Eterm* keys = tuple_val(keys_literal); + Uint n = arityval(*keys); + Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */; + Uint i; + flatmap_t *mp; + Eterm *mhp; + Eterm *E; + + ASSERT(n <= MAP_SMALL_MAP_LIMIT); + + if (HeapWordsLeft(p) < need) { + erts_garbage_collect(p, need, reg, live); + } + + mhp = p->htop; + E = p->stop; + + mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ; + mp->thing_word = MAP_HEADER_FLATMAP; + mp->size = n; + mp->keys = keys_literal; + + for (i = 0; i < n; i++) { + GET_TERM(*ptr++, *mhp++); + } + + p->htop = mhp; + + return make_flatmap(mp); +} + +static Eterm +update_map_assoc(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* new_p) { - Uint n; Uint num_old; Uint num_updates; Uint need; @@ -7028,23 +2839,18 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) Eterm* E; Eterm* old_keys; Eterm* old_vals; - BeamInstr* new_p; Eterm new_key; Eterm* kp; + Eterm map; - new_p = &Arg(5); - num_updates = Arg(4) / 2; + num_updates = n / 2; + map = reg[live]; if (is_not_flatmap(map)) { Uint32 hx; Eterm val; - /* apparently the compiler does not emit is_map instructions, - * bad compiler */ - - if (is_not_hashmap(map)) - return THE_NON_VALUE; - + ASSERT(is_hashmap(map)); res = map; E = p->stop; while(num_updates--) { @@ -7068,7 +2874,7 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) */ if (num_old == 0) { - return new_map(p, reg, I+1); + return new_map(p, reg, live, n, new_p); } /* @@ -7078,8 +2884,6 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) need = 2*(num_old+num_updates) + 1 + MAP_HEADER_FLATMAP_SZ; if (HeapWordsLeft(p) < need) { - Uint live = Arg(3); - reg[live] = map; erts_garbage_collect(p, need, reg, live+1); map = reg[live]; old_mp = (flatmap_t *)flatmap_val(map); @@ -7226,9 +3030,8 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) */ static Eterm -update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I) +update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p) { - Uint n; Uint i; Uint num_old; Uint need; @@ -7238,12 +3041,12 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I) Eterm* E; Eterm* old_keys; Eterm* old_vals; - BeamInstr* new_p; Eterm new_key; + Eterm map; - new_p = &Arg(5); - n = Arg(4) / 2; /* Number of values to be updated */ + n /= 2; /* Number of values to be updated */ ASSERT(n > 0); + map = reg[live]; if (is_not_flatmap(map)) { Uint32 hx; @@ -7297,8 +3100,6 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I) need = num_old + MAP_HEADER_FLATMAP_SZ; if (HeapWordsLeft(p) < need) { - Uint live = Arg(3); - reg[live] = map; erts_garbage_collect(p, need, reg, live+1); map = reg[live]; old_mp = (flatmap_t *)flatmap_val(map); diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 23258dbe9c..3f9dc2c1aa 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -81,16 +81,28 @@ ErlDrvBinary* erts_gzinflate_buffer(char*, int); #define TE_FAIL (-1) #define TE_SHORT_WINDOW (-2) +/* + * Type for a reference to a label that must be patched. + */ + typedef struct { - Uint value; /* Value of label (NULL if not known yet). */ - Sint patches; /* Index (into code buffer) to first location - * which must be patched with the value of this label. - */ -#ifdef ERTS_SMP + Uint pos; /* Position of label reference to patch. */ + Uint offset; /* Offset from patch location. */ + int packed; /* 0 (not packed), 1 (lsw), 2 (msw) */ +} LabelPatch; + +/* + * Type for a label. + */ + +typedef struct { + Uint value; /* Value of label (0 if not known yet). */ Uint looprec_targeted; /* Non-zero if this label is the target of a loop_rec * instruction. */ -#endif + LabelPatch* patches; /* Array of label patches. */ + Uint num_patches; /* Number of patches in array. */ + Uint num_allocated; /* Number of allocated patches. */ } Label; /* @@ -227,7 +239,7 @@ typedef struct { typedef struct literal_patch LiteralPatch; struct literal_patch { - int pos; /* Position in code */ + Uint pos; /* Position in code */ LiteralPatch* next; }; @@ -307,6 +319,7 @@ typedef struct LoaderState { int on_load; /* Index in the code for the on_load function * (or 0 if there is no on_load function) */ + int otp_20_or_higher; /* Compiled with OTP 20 or higher */ /* * Atom table. @@ -507,6 +520,7 @@ static int read_lambda_table(LoaderState* stp); static int read_literal_table(LoaderState* stp); static int read_line_table(LoaderState* stp); static int read_code_header(LoaderState* stp); +static void init_label(Label* lp); static int load_code(LoaderState* stp); static GenOp* gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index, GenOpArg Tuple, GenOpArg Dst); @@ -537,6 +551,7 @@ static int get_tag_and_value(LoaderState* stp, Uint len_code, static int new_label(LoaderState* stp); static void new_literal_patch(LoaderState* stp, int pos); static void new_string_patch(LoaderState* stp, int pos); +static int find_literal(LoaderState* stp, Eterm needle, Uint *idx); static Uint new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size); static int genopargcompare(GenOpArg* a, GenOpArg* b); static Eterm get_module_info(Process* p, ErtsCodeIndex code_ix, @@ -740,6 +755,13 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader, } /* + * Find out whether the code was compiled with OTP 20 + * or higher. + */ + + stp->otp_20_or_higher = stp->chunks[UTF8_ATOM_CHUNK].size > 0; + + /* * Load the code chunk. */ @@ -795,8 +817,8 @@ erts_finish_loading(Binary* magic, Process* c_p, * table which is not protected by any locks. */ - ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); /* * Make current code for the module old and insert the new code * as current. This will fail if there already exists old code @@ -831,7 +853,7 @@ erts_finish_loading(Binary* magic, Process* c_p, continue; } else if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(mod_tab_p->curr.num_traced_exports > 0); erts_clear_export_break(mod_tab_p, &ep->info); ep->addressv[code_ix] = (BeamInstr *) ep->beam[1]; @@ -1043,6 +1065,10 @@ loader_state_dtor(Binary* magic) stp->codev = 0; } if (stp->labels != 0) { + Uint num; + for (num = 0; num < stp->num_labels; num++) { + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels[num].patches); + } erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels); stp->labels = 0; } @@ -1526,7 +1552,7 @@ read_export_table(LoaderState* stp) * any other functions that walk through all local functions. */ - if (stp->labels[n].patches >= 0) { + if (stp->labels[n].num_patches > 0) { LoadError3(stp, "there are local calls to the stub for " "the BIF %T:%T/%d", stp->module, func, arity); @@ -1872,11 +1898,7 @@ read_code_header(LoaderState* stp) stp->labels = (Label *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_labels * sizeof(Label)); for (i = 0; i < stp->num_labels; i++) { - stp->labels[i].value = 0; - stp->labels[i].patches = -1; -#ifdef ERTS_SMP - stp->labels[i].looprec_targeted = 0; -#endif + init_label(&stp->labels[i]); } stp->catches = 0; @@ -1905,12 +1927,43 @@ read_code_header(LoaderState* stp) #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm)))) +static void init_label(Label* lp) +{ + lp->value = 0; + lp->looprec_targeted = 0; + lp->num_patches = 0; + lp->num_allocated = 4; + lp->patches = erts_alloc(ERTS_ALC_T_PREPARED_CODE, + lp->num_allocated * sizeof(LabelPatch)); +} + +static void +register_label_patch(LoaderState* stp, Uint label, Uint ci, Uint offset) +{ + Label* lp; + + ASSERT(label < stp->num_labels); + lp = &stp->labels[label]; + if (lp->num_allocated <= lp->num_patches) { + lp->num_allocated *= 2; + lp->patches = erts_realloc(ERTS_ALC_T_PREPARED_CODE, + (void *) lp->patches, + lp->num_allocated * sizeof(LabelPatch)); + } + lp->patches[lp->num_patches].pos = ci; + lp->patches[lp->num_patches].offset = offset; + lp->patches[lp->num_patches].packed = 0; + lp->num_patches++; + stp->codev[ci] = label; +} + static int load_code(LoaderState* stp) { int i; - int ci; - int last_func_start = 0; /* Needed by nif loading and line instructions */ + Uint ci; + Uint last_instr_start; /* Needed for relative jumps */ + Uint last_func_start = 0; /* Needed by nif loading and line instructions */ char* sign; int arg; /* Number of current argument. */ int num_specific; /* Number of specific ops for current. */ @@ -1923,6 +1976,9 @@ load_code(LoaderState* stp) GenOp** last_op_next = NULL; int arity; int retval = 1; +#if defined(BEAM_WIDE_SHIFT) + int num_trailing_f; /* Number of extra 'f' arguments in a list */ +#endif /* * The size of the loaded func_info instruction is needed @@ -2028,30 +2084,10 @@ load_code(LoaderState* stp) case 0: /* Floating point number. * Not generated by the compiler in R16B and later. + * (The literal pool is used instead.) */ - { - Eterm* hp; -#if !defined(ARCH_64) - Uint high, low; -# endif - last_op->a[arg].val = new_literal(stp, &hp, - FLOAT_SIZE_OBJECT); - hp[0] = HEADER_FLONUM; - last_op->a[arg].type = TAG_q; -#if defined(ARCH_64) - GetInt(stp, 8, hp[1]); -# else - GetInt(stp, 4, high); - GetInt(stp, 4, low); - if (must_swap_floats) { - Uint t = high; - high = low; - low = t; - } - hp[1] = high; - hp[2] = low; -# endif - } + LoadError0(stp, "please re-compile this module with an " + ERLANG_OTP_RELEASE " compiler"); break; case 1: /* List. */ if (arg+1 != arity) { @@ -2286,6 +2322,7 @@ load_code(LoaderState* stp) stp->specific_op = specific; CodeNeed(opc[stp->specific_op].sz+16); /* Extra margin for packing */ + last_instr_start = ci + opc[stp->specific_op].adjust; code[ci++] = BeamOpCode(stp->specific_op); } @@ -2368,7 +2405,8 @@ load_code(LoaderState* stp) break; } break; - case 'd': /* Destination (x(0), x(N), y(N) */ + case 'd': /* Destination (x(N), y(N) */ + case 'S': /* Source (x(N), y(N)) */ switch (tag) { case TAG_x: code[ci++] = tmp_op->a[arg].val * sizeof(Eterm); @@ -2382,11 +2420,29 @@ load_code(LoaderState* stp) break; } break; - case 'I': /* Untagged integer (or pointer). */ - VerifyTag(stp, tag, TAG_u); - code[ci++] = tmp_op->a[arg].val; - break; - case 't': /* Small untagged integer -- can be packed. */ + case 't': /* Small untagged integer (16 bits) -- can be packed. */ + case 'I': /* Untagged integer (32 bits) -- can be packed. */ + case 'W': /* Untagged integer or pointer (machine word). */ +#ifdef DEBUG + switch (*sign) { + case 't': + if (tmp_op->a[arg].val >> 16 != 0) { + load_printf(__LINE__, stp, "value %lu of type 't' does not fit in 16 bits", + tmp_op->a[arg].val); + ASSERT(0); + } + break; +#ifdef ARCH_64 + case 'I': + if (tmp_op->a[arg].val >> 32 != 0) { + load_printf(__LINE__, stp, "value %lu of type 'I' does not fit in 32 bits", + tmp_op->a[arg].val); + ASSERT(0); + } + break; +#endif + } +#endif VerifyTag(stp, tag, TAG_u); code[ci++] = tmp_op->a[arg].val; break; @@ -2396,16 +2452,14 @@ load_code(LoaderState* stp) break; case 'f': /* Destination label */ VerifyTag(stp, tag_to_letter[tag], *sign); - code[ci] = stp->labels[tmp_op->a[arg].val].patches; - stp->labels[tmp_op->a[arg].val].patches = ci; + register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start); ci++; break; case 'j': /* 'f' or 'p' */ if (tag == TAG_p) { code[ci] = 0; } else if (tag == TAG_f) { - code[ci] = stp->labels[tmp_op->a[arg].val].patches; - stp->labels[tmp_op->a[arg].val].patches = ci; + register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start); } else { LoadError3(stp, "bad tag %d; expected %d or %d", tag, TAG_f, TAG_p); @@ -2425,7 +2479,6 @@ load_code(LoaderState* stp) LoadError1(stp, "label %d defined more than once", last_label); } stp->labels[last_label].value = ci; - ASSERT(stp->labels[last_label].patches < ci); break; case 'e': /* Export entry */ VerifyTag(stp, tag, TAG_u); @@ -2471,36 +2524,134 @@ load_code(LoaderState* stp) * The packing engine. */ if (opc[stp->specific_op].pack[0]) { - char* prog; /* Program for packing engine. */ - BeamInstr stack[8]; /* Stack. */ - BeamInstr* sp = stack; /* Points to next free position. */ - BeamInstr packed = 0; /* Accumulator for packed operations. */ + char* prog; /* Program for packing engine. */ + struct pack_stack { + BeamInstr instr; + Uint* patch_pos; + } stack[8]; /* Stack. */ + struct pack_stack* sp = stack; /* Points to next free position. */ + BeamInstr packed = 0; /* Accumulator for packed operations. */ + LabelPatch* packed_label = 0; for (prog = opc[stp->specific_op].pack; *prog; prog++) { switch (*prog) { - case 'g': /* Get instruction; push on stack. */ - *sp++ = code[--ci]; - break; + case 'g': /* Get operand and push on stack. */ + ci--; + sp->instr = code[ci]; + sp->patch_pos = 0; + sp++; + break; + case 'f': /* Get possible 'f' operand and push on stack. */ + { + Uint w = code[--ci]; + sp->instr = w; + sp->patch_pos = 0; + + if (w != 0) { + LabelPatch* lbl_p; + int num_patches; + int patch; + + ASSERT(w < stp->num_labels); + lbl_p = stp->labels[w].patches; + num_patches = stp->labels[w].num_patches; + for (patch = num_patches - 1; patch >= 0; patch--) { + if (lbl_p[patch].pos == ci) { + sp->patch_pos = &lbl_p[patch].pos; + break; + } + } + ASSERT(sp->patch_pos); + } + sp++; + } + break; + case 'q': /* Get possible 'q' operand and push on stack. */ + { + LiteralPatch* lp; + + ci--; + sp->instr = code[ci]; + sp->patch_pos = 0; + + for (lp = stp->literal_patches; + lp && lp->pos > ci-MAX_OPARGS; + lp = lp->next) { + if (lp->pos == ci) { + sp->patch_pos = &lp->pos; + break; + } + } + sp++; + } + break; case 'i': /* Initialize packing accumulator. */ packed = code[--ci]; break; case '0': /* Tight shift */ packed = (packed << BEAM_TIGHT_SHIFT) | code[--ci]; + if (packed_label) { + packed_label->packed++; + } break; case '6': /* Shift 16 steps */ packed = (packed << BEAM_LOOSE_SHIFT) | code[--ci]; + if (packed_label) { + packed_label->packed++; + } break; #ifdef ARCH_64 case 'w': /* Shift 32 steps */ - packed = (packed << BEAM_WIDE_SHIFT) | code[--ci]; - break; + { + Uint w = code[--ci]; + + if (packed_label) { + packed_label->packed++; + } + + /* + * 'w' can handle both labels ('f' and 'j'), as well + * as 'I'. Test whether this is a label. + */ + + if (w < stp->num_labels) { + /* + * Probably a label. Look for patch pointing to this + * position. + */ + LabelPatch* lp = stp->labels[w].patches; + int num_patches = stp->labels[w].num_patches; + int patch; + for (patch = num_patches - 1; patch >= 0; patch--) { + if (lp[patch].pos == ci) { + lp[patch].packed = 1; + packed_label = &lp[patch]; + break; + } + } + } + packed = (packed << BEAM_WIDE_SHIFT) | + (code[ci] & BEAM_WIDE_MASK); + } + break; #endif case 'p': /* Put instruction (from stack). */ - code[ci++] = *--sp; + --sp; + code[ci] = sp->instr; + if (sp->patch_pos) { + *sp->patch_pos = ci; + } + ci++; break; case 'P': /* Put packed operands. */ - *sp++ = packed; + sp->instr = packed; + sp->patch_pos = 0; + sp++; packed = 0; + if (packed_label) { + packed_label->pos = ci; + packed_label = 0; + } break; default: ASSERT(0); @@ -2513,7 +2664,17 @@ load_code(LoaderState* stp) * Load any list arguments using the primitive tags. */ +#if defined(BEAM_WIDE_SHIFT) + num_trailing_f = 0; +#endif for ( ; arg < tmp_op->arity; arg++) { +#if defined(BEAM_WIDE_SHIFT) + if (tmp_op->a[arg].type == TAG_f) { + num_trailing_f++; + } else { + num_trailing_f = 0; + } +#endif switch (tmp_op->a[arg].type) { case TAG_i: CodeNeed(1); @@ -2527,8 +2688,7 @@ load_code(LoaderState* stp) break; case TAG_f: CodeNeed(1); - code[ci] = stp->labels[tmp_op->a[arg].val].patches; - stp->labels[tmp_op->a[arg].val].patches = ci; + register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start); ci++; break; case TAG_x: @@ -2554,6 +2714,61 @@ load_code(LoaderState* stp) } } + /* + * If all the extra arguments were 'f' operands, + * and the wordsize is 64 bits, pack two 'f' operands + * into each word. + */ + +#if defined(BEAM_WIDE_SHIFT) + if (num_trailing_f >= 1) { + Uint src_index = ci - num_trailing_f; + Uint src_limit = ci; + Uint dst_limit = src_index + (num_trailing_f+1)/2; + + ci = src_index; + while (ci < dst_limit) { + Uint w[2]; + BeamInstr packed = 0; + int wi; + + w[0] = code[src_index]; + if (src_index+1 < src_limit) { + w[1] = code[src_index+1]; + } else { + w[1] = 0; + } + for (wi = 0; wi < 2; wi++) { + Uint lbl = w[wi]; + LabelPatch* lp = stp->labels[lbl].patches; + int num_patches = stp->labels[lbl].num_patches; + +#if defined(WORDS_BIGENDIAN) + packed <<= BEAM_WIDE_SHIFT; + packed |= lbl & BEAM_WIDE_MASK; +#else + packed >>= BEAM_WIDE_SHIFT; + packed |= lbl << BEAM_WIDE_SHIFT; +#endif + while (num_patches-- > 0) { + if (lp->pos == src_index + wi) { + lp->pos = ci; +#if defined(WORDS_BIGENDIAN) + lp->packed = 2 - wi; +#else + lp->packed = wi + 1; +#endif + break; + } + lp++; + } + } + code[ci++] = packed; + src_index += 2; + } + } +#endif + /* * Handle a few special cases. */ @@ -2600,17 +2815,16 @@ load_code(LoaderState* stp) the size of the ops.tab i_func_info instruction is not the same as FUNC_INFO_SZ */ ASSERT(stp->labels[last_label].value == ci - FUNC_INFO_SZ); - stp->hdr->functions[function_number] = (ErtsCodeInfo*) stp->labels[last_label].patches; offset = function_number; - stp->labels[last_label].patches = offset; + register_label_patch(stp, last_label, offset, 0); function_number++; if (stp->arity > MAX_ARG) { LoadError1(stp, "too many arguments: %d", stp->arity); } #ifdef DEBUG - ASSERT(stp->labels[0].patches < 0); /* Should not be referenced. */ + ASSERT(stp->labels[0].num_patches == 0); /* Should not be referenced. */ for (i = 1; i < stp->num_labels; i++) { - ASSERT(stp->labels[i].patches < ci); + ASSERT(stp->labels[i].num_patches <= stp->labels[i].num_allocated); } #endif } @@ -2621,8 +2835,8 @@ load_code(LoaderState* stp) /* Remember offset for the on_load function. */ stp->on_load = ci; break; - case op_bs_put_string_II: - case op_i_bs_match_string_xfII: + case op_bs_put_string_WW: + case op_i_bs_match_string_xfWW: new_string_patch(stp, ci-1); break; @@ -2733,6 +2947,12 @@ load_code(LoaderState* stp) #define never(St) 0 +static int +compiled_with_otp_20_or_higher(LoaderState* stp) +{ + return stp->otp_20_or_higher; +} + /* * Predicate that tests whether a jump table can be used. */ @@ -2872,17 +3092,18 @@ gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index, op->next = NULL; if (Index.type == TAG_i && Index.val > 0 && + Index.val <= ERTS_MAX_TUPLE_SIZE && (Tuple.type == TAG_x || Tuple.type == TAG_y)) { op->op = genop_i_fast_element_4; - op->a[0] = Fail; - op->a[1] = Tuple; + op->a[0] = Tuple; + op->a[1] = Fail; op->a[2].type = TAG_u; op->a[2].val = Index.val; op->a[3] = Dst; } else { op->op = genop_i_element_4; - op->a[0] = Fail; - op->a[1] = Tuple; + op->a[0] = Tuple; + op->a[1] = Fail; op->a[2] = Index; op->a[3] = Dst; } @@ -2962,13 +3183,14 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live, op->a[0] = Ms; op->a[1] = Fail; op->a[2] = Dst; +#ifdef ARCH_64 } else if (bits == 32 && (Flags.val & BSF_LITTLE) == 0) { - op->op = genop_i_bs_get_integer_32_4; - op->arity = 4; + op->op = genop_i_bs_get_integer_32_3; + op->arity = 3; op->a[0] = Ms; op->a[1] = Fail; - op->a[2] = Live; - op->a[3] = Dst; + op->a[2] = Dst; +#endif } else { generic: if (bits < SMALL_BITS) { @@ -3103,16 +3325,6 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live, } /* - * Predicate to test whether a heap binary should be generated. - */ - -static int -should_gen_heap_bin(LoaderState* stp, GenOpArg Src) -{ - return Src.val <= ERL_ONHEAP_BIN_LIMIT; -} - -/* * Predicate to test whether a binary construction is too big. */ @@ -3384,27 +3596,14 @@ negation_is_small(LoaderState* stp, GenOpArg Int) IS_SSMALL(-((Sint)Int.val)); } - -static int -smp(LoaderState* stp) -{ -#ifdef ERTS_SMP - return 1; -#else - return 0; -#endif -} - /* * Mark this label. */ static int smp_mark_target_label(LoaderState* stp, GenOpArg L) { -#ifdef ERTS_SMP ASSERT(L.type == TAG_f); stp->labels[L.val].looprec_targeted = 1; -#endif return 1; } @@ -3415,12 +3614,8 @@ smp_mark_target_label(LoaderState* stp, GenOpArg L) static int smp_already_locked(LoaderState* stp, GenOpArg L) { -#ifdef ERTS_SMP ASSERT(L.type == TAG_u); return stp->labels[L.val].looprec_targeted; -#else - return 0; -#endif } /* @@ -3434,11 +3629,11 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time) Sint timeout; NEW_GENOP(stp, op); - op->op = genop_i_wait_timeout_2; + op->op = genop_wait_timeout_unlocked_int_2; op->next = NULL; op->arity = 2; - op->a[0] = Fail; - op->a[1].type = TAG_u; + op->a[0].type = TAG_u; + op->a[1] = Fail; if (Time.type == TAG_i && (timeout = Time.val) >= 0 && #if defined(ARCH_64) @@ -3447,7 +3642,7 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time) 1 #endif ) { - op->a[1].val = timeout; + op->a[0].val = timeout; #if !defined(ARCH_64) } else if (Time.type == TAG_q) { Eterm big; @@ -3461,7 +3656,7 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time) } else { Uint u; (void) term_to_Uint(big, &u); - op->a[1].val = (BeamInstr) u; + op->a[0].val = (BeamInstr) u; } #endif } else { @@ -3481,12 +3676,12 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time) Sint timeout; NEW_GENOP(stp, op); - op->op = genop_i_wait_timeout_locked_2; + op->op = genop_wait_timeout_locked_int_2; op->next = NULL; op->arity = 2; - op->a[0] = Fail; - op->a[1].type = TAG_u; - + op->a[0].type = TAG_u; + op->a[1] = Fail; + if (Time.type == TAG_i && (timeout = Time.val) >= 0 && #if defined(ARCH_64) (timeout >> 32) == 0 @@ -3494,7 +3689,7 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time) 1 #endif ) { - op->a[1].val = timeout; + op->a[0].val = timeout; #if !defined(ARCH_64) } else if (Time.type == TAG_q) { Eterm big; @@ -3508,7 +3703,7 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time) } else { Uint u; (void) term_to_Uint(big, &u); - op->a[1].val = (BeamInstr) u; + op->a[0].val = (BeamInstr) u; } #endif } else { @@ -3554,7 +3749,7 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail, if (size == 2) { NEW_GENOP(stp, op); op->next = NULL; - op->op = genop_i_select_tuple_arity2_6; + op->op = genop_i_select_tuple_arity2_4; GENOP_ARITY(op, arity - 1); op->a[0] = S; op->a[1] = Fail; @@ -3844,14 +4039,13 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, int i, j, align = 0; if (size == 2) { - /* * Use a special-cased instruction if there are only two values. */ NEW_GENOP(stp, op); op->next = NULL; - op->op = genop_i_select_val2_6; + op->op = genop_i_select_val2_4; GENOP_ARITY(op, arity - 1); op->a[0] = S; op->a[1] = Fail; @@ -3861,47 +4055,19 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, op->a[5] = Rest[3]; return op; - - } else if (size > 10) { - - /* binary search instruction */ - - NEW_GENOP(stp, op); - op->next = NULL; - op->op = genop_i_select_val_bins_3; - GENOP_ARITY(op, arity); - op->a[0] = S; - op->a[1] = Fail; - op->a[2].type = TAG_u; - op->a[2].val = size; - for (i = 3; i < arity; i++) { - op->a[i] = Rest[i-3]; - } - - /* - * Sort the values to make them useful for a binary search. - */ - - qsort(op->a+3, size, 2*sizeof(GenOpArg), - (int (*)(const void *, const void *)) genopargcompare); -#ifdef DEBUG - for (i = 3; i < arity-2; i += 2) { - ASSERT(op->a[i].val < op->a[i+2].val); - } -#endif - return op; } - /* linear search instruction */ - - align = 1; + if (size <= 10) { + /* Use linear search. Reserve place for a sentinel. */ + align = 1; + } arity += 2*align; size += align; NEW_GENOP(stp, op); op->next = NULL; - op->op = genop_i_select_val_lins_3; + op->op = (align == 0) ? genop_i_select_val_bins_3 : genop_i_select_val_lins_3; GENOP_ARITY(op, arity); op->a[0] = S; op->a[1] = Fail; @@ -3915,7 +4081,7 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, } /* - * Sort the values to make them useful for a sentinel search + * Sort the values to make them useful for a binary or sentinel search. */ qsort(tmp, size - align, 2*sizeof(GenOpArg), @@ -3930,11 +4096,12 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, erts_free(ERTS_ALC_T_LOADER_TMP, (void *) tmp); - /* add sentinel */ - - op->a[j].type = TAG_u; - op->a[j].val = ~((BeamInstr)0); - op->a[j+size] = Fail; + if (align) { + /* Add sentinel for linear search. */ + op->a[j].type = TAG_u; + op->a[j].val = ~((BeamInstr)0); + op->a[j+size] = Fail; + } #ifdef DEBUG for (i = 0; i < size - 1; i++) { @@ -4222,6 +4389,92 @@ literal_is_map(LoaderState* stp, GenOpArg Lit) } /* + * Predicate to test whether all of the given new small map keys are literals + */ +static int +is_small_map_literal_keys(LoaderState* stp, GenOpArg Size, GenOpArg* Rest) +{ + if (Size.val > MAP_SMALL_MAP_LIMIT) { + return 0; + } + + /* + * Operations with non-literals have always only one key. + */ + if (Size.val != 2) { + return 1; + } + + switch (Rest[0].type) { + case TAG_a: + case TAG_i: + case TAG_n: + case TAG_q: + return 1; + default: + return 0; + } +} + +static GenOp* +gen_new_small_map_lit(LoaderState* stp, GenOpArg Dst, GenOpArg Live, + GenOpArg Size, GenOpArg* Rest) +{ + unsigned size = Size.val; + Uint lit; + unsigned i; + GenOp* op; + GenOpArg* dst; + Eterm* hp; + Eterm* tmp; + Eterm* thp; + Eterm keys; + + NEW_GENOP(stp, op); + GENOP_ARITY(op, 3 + size/2); + op->next = NULL; + op->op = genop_i_new_small_map_lit_3; + + tmp = thp = erts_alloc(ERTS_ALC_T_LOADER_TMP, (1 + size/2) * sizeof(*tmp)); + keys = make_tuple(thp); + *thp++ = make_arityval(size/2); + + dst = op->a+3; + + for (i = 0; i < size; i += 2) { + switch (Rest[i].type) { + case TAG_a: + *thp++ = Rest[i].val; + ASSERT(is_atom(Rest[i].val)); + break; + case TAG_i: + *thp++ = make_small(Rest[i].val); + break; + case TAG_n: + *thp++ = NIL; + break; + case TAG_q: + *thp++ = stp->literals[Rest[i].val].term; + break; + } + *dst++ = Rest[i + 1]; + } + + if (!find_literal(stp, keys, &lit)) { + lit = new_literal(stp, &hp, 1 + size/2); + sys_memcpy(hp, tmp, (1 + size/2) * sizeof(*tmp)); + } + erts_free(ERTS_ALC_T_LOADER_TMP, tmp); + + op->a[0] = Dst; + op->a[1] = Live; + op->a[2].type = TAG_q; + op->a[2].val = lit; + + return op; +} + +/* * Predicate to test whether the given literal is an empty map. */ @@ -4732,21 +4985,57 @@ freeze_code(LoaderState* stp) */ for (i = 0; i < stp->num_labels; i++) { - Sint this_patch; - Sint next_patch; + Uint patch; Uint value = stp->labels[i].value; - - if (value == 0 && stp->labels[i].patches >= 0) { + + if (value == 0 && stp->labels[i].num_patches != 0) { LoadError1(stp, "label %d not resolved", i); } ASSERT(value < stp->ci); - this_patch = stp->labels[i].patches; - while (this_patch >= 0) { - ASSERT(this_patch < stp->ci); - next_patch = codev[this_patch]; - ASSERT(next_patch < stp->ci); - codev[this_patch] = (BeamInstr) (codev + value); - this_patch = next_patch; + for (patch = 0; patch < stp->labels[i].num_patches; patch++) { + LabelPatch* lp = &stp->labels[i].patches[patch]; + Uint pos = lp->pos; + ASSERT(pos < stp->ci); + if (pos < stp->num_functions) { + /* + * This is the array of pointers to the beginning of + * each function. The pointers must remain absolute. + */ + codev[pos] = (BeamInstr) (codev + value); + } else { +#ifdef DEBUG + Uint w; +#endif + Sint32 rel = lp->offset + value; + switch (lp->packed) { + case 0: /* Not packed */ + ASSERT(codev[pos] == i); + codev[pos] = rel; + break; +#ifdef BEAM_WIDE_MASK + case 1: /* Least significant word. */ +#ifdef DEBUG + w = codev[pos] & BEAM_WIDE_MASK; + /* Correct label in least significant word? */ + ASSERT(w == i); +#endif + codev[pos] = (codev[pos] & ~BEAM_WIDE_MASK) | + (rel & BEAM_WIDE_MASK); + break; + case 2: /* Most significant word */ +#ifdef DEBUG + w = (codev[pos] >> BEAM_WIDE_SHIFT) & BEAM_WIDE_MASK; + /* Correct label in most significant word? */ + ASSERT(w == i); +#endif + codev[pos] = ((Uint)rel << BEAM_WIDE_SHIFT) | + (codev[pos] & BEAM_WIDE_MASK); + break; +#endif + default: + ASSERT(0); + } + } } } CHKBLK(ERTS_ALC_T_CODE,code_hdr); @@ -4789,8 +5078,11 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p) catches = BEAM_CATCHES_NIL; while (index != 0) { BeamInstr next = codev[index]; + BeamInstr* abs_addr; codev[index] = BeamOpCode(op_catch_yf); - catches = beam_catches_cons((BeamInstr *)codev[index+2], catches); + /* We must make the address of the label absolute again. */ + abs_addr = (BeamInstr *)codev + index + codev[index+2]; + catches = beam_catches_cons(abs_addr, catches); codev[index+2] = make_catch(catches); index = next; } @@ -4861,7 +5153,7 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p) /* * We are hiding a pointer into older code. */ - erts_smp_refc_dec(&fe->refc, 1); + erts_refc_dec(&fe->refc, 1); } fe->address = code_ptr; #ifdef HIPE @@ -5257,12 +5549,15 @@ get_tag_and_value(LoaderState* stp, Uint len_code, { Uint count; Sint val; - byte default_buf[128]; - byte* bigbuf = default_buf; + byte default_byte_buf[128]; + byte* byte_buf = default_byte_buf; + Eterm default_big_buf[128/sizeof(Eterm)]; + Eterm* big_buf = default_big_buf; + Eterm tmp_big; byte* s; int i; int neg = 0; - Uint arity; + Uint words_needed; Eterm* hp; /* @@ -5339,8 +5634,11 @@ get_tag_and_value(LoaderState* stp, Uint len_code, *result = val; return TAG_i; } else { - *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE); - (void) small_to_big(val, hp); + tmp_big = small_to_big(val, big_buf); + if (!find_literal(stp, tmp_big, result)) { + *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE); + sys_memcpy(hp, big_buf, BIG_UINT_HEAP_SIZE*sizeof(Eterm)); + } return TAG_q; } } @@ -5350,8 +5648,8 @@ get_tag_and_value(LoaderState* stp, Uint len_code, * (including margin). */ - if (count+8 > sizeof(default_buf)) { - bigbuf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8); + if (count+8 > sizeof(default_byte_buf)) { + byte_buf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8); } /* @@ -5360,20 +5658,20 @@ get_tag_and_value(LoaderState* stp, Uint len_code, GetString(stp, s, count); for (i = 0; i < count; i++) { - bigbuf[count-i-1] = *s++; + byte_buf[count-i-1] = *s++; } /* * Check if the number is negative, and negate it if so. */ - if ((bigbuf[count-1] & 0x80) != 0) { + if ((byte_buf[count-1] & 0x80) != 0) { unsigned carry = 1; neg = 1; for (i = 0; i < count; i++) { - bigbuf[i] = ~bigbuf[i] + carry; - carry = (bigbuf[i] == 0 && carry == 1); + byte_buf[i] = ~byte_buf[i] + carry; + carry = (byte_buf[i] == 0 && carry == 1); } ASSERT(carry == 0); } @@ -5382,33 +5680,52 @@ get_tag_and_value(LoaderState* stp, Uint len_code, * Align to word boundary. */ - if (bigbuf[count-1] == 0) { + if (byte_buf[count-1] == 0) { count--; } - if (bigbuf[count-1] == 0) { + if (byte_buf[count-1] == 0) { LoadError0(stp, "bignum not normalized"); } while (count % sizeof(Eterm) != 0) { - bigbuf[count++] = 0; + byte_buf[count++] = 0; } /* - * Allocate heap space for the bignum and copy it. + * Convert to a bignum. */ - arity = count/sizeof(Eterm); - *result = new_literal(stp, &hp, arity+1); - if (is_nil(bytes_to_big(bigbuf, count, neg, hp))) - goto load_error; + words_needed = count/sizeof(Eterm) + 1; + if (words_needed*sizeof(Eterm) > sizeof(default_big_buf)) { + big_buf = erts_alloc(ERTS_ALC_T_LOADER_TMP, words_needed*sizeof(Eterm)); + } + tmp_big = bytes_to_big(byte_buf, count, neg, big_buf); + if (is_nil(tmp_big)) { + goto load_error; + } - if (bigbuf != default_buf) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf); + /* + * Create a literal if there is no previous literal with the same value. + */ + + if (!find_literal(stp, tmp_big, result)) { + *result = new_literal(stp, &hp, words_needed); + sys_memcpy(hp, big_buf, words_needed*sizeof(Eterm)); + } + + if (byte_buf != default_byte_buf) { + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) byte_buf); + } + if (big_buf != default_big_buf) { + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) big_buf); } return TAG_q; load_error: - if (bigbuf != default_buf) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf); + if (byte_buf != default_byte_buf) { + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) byte_buf); + } + if (big_buf != default_big_buf) { + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) big_buf); } return -1; } @@ -5453,8 +5770,7 @@ new_label(LoaderState* stp) stp->labels = (Label *) erts_realloc(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels, stp->num_labels * sizeof(Label)); - stp->labels[num].value = 0; - stp->labels[num].patches = -1; + init_label(&stp->labels[num]); return num; } @@ -5509,6 +5825,24 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size) return stp->num_literals++; } +static int +find_literal(LoaderState* stp, Eterm needle, Uint *idx) +{ + int i; + + /* + * The search is done backwards since the most recent literals + * allocated by the loader itself will be placed at the end + */ + for (i = stp->num_literals - 1; i >= 0; i--) { + if (EQ(needle, stp->literals[i].term)) { + *idx = (Uint) i; + return 1; + } + } + return 0; +} + Eterm erts_module_info_0(Process* p, Eterm module) { @@ -6276,7 +6610,7 @@ patch_funentries(Eterm Patchlist) fe = erts_get_fun_entry(Mod, uniq, index); fe->native_address = (Uint *)native_address; - erts_smp_refc_dec(&fe->refc, 1); + erts_refc_dec(&fe->refc, 1); if (!patch(Addresses, (Uint) fe)) return 0; diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h index c088bdb751..c4a90d3f3a 100644 --- a/erts/emulator/beam/beam_load.h +++ b/erts/emulator/beam/beam_load.h @@ -111,11 +111,7 @@ typedef struct beam_code_header { }BeamCodeHeader; -#ifdef ERTS_DIRTY_SCHEDULERS # define BEAM_NIF_MIN_FUNC_SZ 4 -#else -# define BEAM_NIF_MIN_FUNC_SZ 3 -#endif void erts_release_literal_area(struct ErtsLiteralArea_* literal_area); int erts_is_module_native(BeamCodeHeader* code); diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c index 9b0335e83d..6e373a3480 100644 --- a/erts/emulator/beam/beam_ranges.c +++ b/erts/emulator/beam/beam_ranges.c @@ -29,12 +29,12 @@ typedef struct { BeamInstr* start; /* Pointer to start of module. */ - erts_smp_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */ + erts_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */ } Range; /* Range 'end' needs to be atomic as we purge module by setting end=start in active code_ix */ -#define RANGE_END(R) ((BeamInstr*)erts_smp_atomic_read_nob(&(R)->end)) +#define RANGE_END(R) ((BeamInstr*)erts_atomic_read_nob(&(R)->end)) static Range* find_range(BeamInstr* pc); static void lookup_loc(FunctionInfo* fi, const BeamInstr* pc, @@ -49,10 +49,10 @@ struct ranges { Range* modules; /* Sorted lists of module addresses. */ Sint n; /* Number of range entries. */ Sint allocated; /* Number of allocated entries. */ - erts_smp_atomic_t mid; /* Cached search start point */ + erts_atomic_t mid; /* Cached search start point */ }; static struct ranges r[ERTS_NUM_CODE_IX]; -static erts_smp_atomic_t mem_used; +static erts_atomic_t mem_used; static Range* write_ptr; #ifdef HARD_DEBUG @@ -90,12 +90,12 @@ erts_init_ranges(void) { Sint i; - erts_smp_atomic_init_nob(&mem_used, 0); + erts_atomic_init_nob(&mem_used, 0); for (i = 0; i < ERTS_NUM_CODE_IX; i++) { r[i].modules = 0; r[i].n = 0; r[i].allocated = 0; - erts_smp_atomic_init_nob(&r[i].mid, 0); + erts_atomic_init_nob(&r[i].mid, 0); } } @@ -107,12 +107,12 @@ erts_start_staging_ranges(int num_new) Sint need; if (r[dst].modules) { - erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated); + erts_atomic_add_nob(&mem_used, -r[dst].allocated); erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules); } need = r[dst].allocated = r[src].n + num_new; - erts_smp_atomic_add_nob(&mem_used, need); + erts_atomic_add_nob(&mem_used, need); write_ptr = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].modules = write_ptr; @@ -135,7 +135,7 @@ erts_end_staging_ranges(int commit) if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ write_ptr->start = rp->start; - erts_smp_atomic_init_nob(&write_ptr->end, + erts_atomic_init_nob(&write_ptr->end, (erts_aint_t)(RANGE_END(rp))); write_ptr++; } @@ -161,7 +161,7 @@ erts_end_staging_ranges(int commit) } r[dst].modules = mp; CHECK(&r[dst]); - erts_smp_atomic_set_nob(&r[dst].mid, + erts_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + r[dst].n / 2)); } @@ -182,7 +182,7 @@ erts_update_ranges(BeamInstr* code, Uint size) */ if (r[dst].modules == NULL) { Sint need = 128; - erts_smp_atomic_add_nob(&mem_used, need); + erts_atomic_add_nob(&mem_used, need); r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].allocated = need; @@ -192,7 +192,7 @@ erts_update_ranges(BeamInstr* code, Uint size) ASSERT(r[dst].modules); write_ptr->start = code; - erts_smp_atomic_init_nob(&(write_ptr->end), + erts_atomic_init_nob(&(write_ptr->end), (erts_aint_t)(((byte *)code) + size)); write_ptr++; } @@ -201,13 +201,13 @@ void erts_remove_from_ranges(BeamInstr* code) { Range* rp = find_range(code); - erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); + erts_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); } UWord erts_ranges_sz(void) { - return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range); + return erts_atomic_read_nob(&mem_used) * sizeof(Range); } /* @@ -262,7 +262,7 @@ find_range(BeamInstr* pc) ErtsCodeIndex active = erts_active_code_ix(); Range* low = r[active].modules; Range* high = low + r[active].n; - Range* mid = (Range *) erts_smp_atomic_read_nob(&r[active].mid); + Range* mid = (Range *) erts_atomic_read_nob(&r[active].mid); CHECK(&r[active]); while (low < high) { @@ -271,7 +271,7 @@ find_range(BeamInstr* pc) } else if (pc >= RANGE_END(mid)) { low = mid + 1; } else { - erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid); + erts_atomic_set_nob(&r[active].mid, (erts_aint_t) mid); return mid; } mid = low + (high-low) / 2; diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index b6595d2a5d..80f391e91e 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -57,10 +57,10 @@ static Export dsend_continue_trap_export; Export *erts_convert_time_unit_trap = NULL; static Export *await_msacc_mod_trap = NULL; -static erts_smp_atomic32_t msacc; +static erts_atomic32_t msacc; static Export *await_sched_wall_time_mod_trap; -static erts_smp_atomic32_t sched_wall_time; +static erts_atomic32_t sched_wall_time; #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) @@ -98,14 +98,12 @@ static int insert_internal_link(Process* p, Eterm rpid) ASSERT(is_internal_pid(rpid)); -#ifdef ERTS_SMP if (IS_TRACED(p) && (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) { rp_locks = ERTS_PROC_LOCKS_ALL; } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); -#endif + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); /* get a pointer to the process struct of the linked process */ rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, @@ -113,7 +111,7 @@ static int insert_internal_link(Process* p, Eterm rpid) ERTS_P2P_FLG_ALLOW_OTHER_X); if (!rp) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); return 0; } @@ -139,10 +137,10 @@ static int insert_internal_link(Process* p, Eterm rpid) rp, am_getting_linked, p->common.id); if (p == rp) - erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN); else { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, rp_locks); } return 1; @@ -178,13 +176,13 @@ BIF_RETTYPE link_1(BIF_ALIST_1) goto res_no_proc; } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); if (erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1) >= 0) send_link_signal = 1; /* else: already linked */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); if (send_link_signal) { Eterm ref; @@ -212,11 +210,11 @@ BIF_RETTYPE link_1(BIF_ALIST_1) if (is_external_pid(BIF_ARG_1)) { - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); /* We may earn time by checking first that we're not linked already */ if (erts_lookup_link(ERTS_P_LINKS(BIF_P), BIF_ARG_1) != NULL) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } else { @@ -225,7 +223,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1) ErtsDSigData dsd; dep = external_pid_dist_entry(BIF_ARG_1); if (dep == erts_this_dist_entry) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); goto res_no_proc; } @@ -234,13 +232,13 @@ BIF_RETTYPE link_1(BIF_ALIST_1) case ERTS_DSIG_PREP_NOT_ALIVE: /* Let the dlink trap handle it */ case ERTS_DSIG_PREP_NOT_CONNECTED: - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_TRAP1(dlink_trap, BIF_P, BIF_ARG_1); case ERTS_DSIG_PREP_CONNECTED: /* We are connected. Setup link and send link signal */ - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1); lnk = erts_add_or_lookup_link(&(dep->nlinks), @@ -249,9 +247,9 @@ BIF_RETTYPE link_1(BIF_ALIST_1) ASSERT(lnk != NULL); erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1); if (code == ERTS_DSIG_SEND_YIELD) @@ -267,11 +265,11 @@ BIF_RETTYPE link_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); res_no_proc: { - erts_aint32_t state = erts_smp_atomic32_read_nob(&BIF_P->state); + erts_aint32_t state = erts_atomic32_read_nob(&BIF_P->state); if (state & ERTS_PSFLG_TRAP_EXIT) { ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN; erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL); - erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks); + erts_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks); BIF_RET(am_true); } else @@ -290,58 +288,41 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) ErtsMonitor *mon; int code; Eterm res = am_false; -#ifndef ERTS_SMP - int stale_mon = 0; -#endif - ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK) + ERTS_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK) == erts_proc_lc_my_proc_locks(c_p)); code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_DSP_RLOCK, 0); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: case ERTS_DSIG_PREP_NOT_CONNECTED: -#ifndef ERTS_SMP - /* XXX Is this possible? Shouldn't this link - previously have been removed if the node - had previously been disconnected. */ - ASSERT(0); - stale_mon = 1; -#endif /* * In the smp case this is possible if the node goes * down just before the call to demonitor. */ if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); dmon = erts_remove_monitor(&dep->monitors, ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (dmon) erts_destroy_monitor(dmon); } mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); res = am_true; break; case ERTS_DSIG_PREP_CONNECTED: - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); dmon = erts_remove_monitor(&dep->monitors, ref); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); if (!dmon) { -#ifndef ERTS_SMP - /* XXX How is this possible? Shouldn't this link - previously have been removed when the distributed - end was removed. */ - ASSERT(0); - stale_mon = 1; -#endif /* * This is possible when smp support is enabled. * 'DOWN' message just arrived. @@ -370,18 +351,6 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) return am_internal_error; } -#ifndef ERTS_SMP - if (stale_mon) { - erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "Stale process monitor %T to ", ref); - if (is_atom(to)) - erts_dsprintf(dsbufp, "{%T, %T}", to, dep->sysname); - else - erts_dsprintf(dsbufp, "%T", to); - erts_dsprintf(dsbufp, " found\n"); - erts_send_error_to_logger(c_p->group_leader, dsbufp); - } -#endif /* * We aren't allowed to destroy 'mon' until now, since 'to' @@ -391,7 +360,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) lookup and remove */ erts_destroy_monitor(mon); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); return res; } @@ -405,13 +374,9 @@ demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res) ERTS_P2P_FLG_ALLOW_OTHER_X); ErtsMonitor *mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); -#ifndef ERTS_SMP - ASSERT(mon); -#else if (!mon) *res = am_false; else -#endif { *res = am_true; erts_destroy_monitor(mon); @@ -420,12 +385,12 @@ demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res) ErtsMonitor *rmon; rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (rp != c_p) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon != NULL) erts_destroy_monitor(rmon); } else { - ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p); + ERTS_ASSERT_IS_NOT_EXITING(c_p); } } @@ -438,7 +403,7 @@ demonitor_local_port(Process *origin, Eterm ref, Eterm target) if (!port) { BIF_ERROR(origin, BADARG); } - erts_smp_proc_unlock(origin, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(origin, ERTS_PROC_LOCK_LINK); if (port) { Eterm trap_ref; @@ -458,7 +423,7 @@ demonitor_local_port(Process *origin, Eterm ref, Eterm target) } } else { - ERTS_SMP_ASSERT_IS_NOT_EXITING(origin); + ERTS_ASSERT_IS_NOT_EXITING(origin); } BIF_RET(res); } @@ -472,11 +437,10 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip) ErtsMonitor *mon = NULL; /* The monitor entry to delete */ Eterm to = NIL; /* Monitor link traget */ DistEntry *dep = NULL; /* Target's distribution entry */ - int deref_de = 0; BIF_RETTYPE res = am_false; int unlock_link = 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(c_p, ERTS_PROC_LOCK_LINK); if (is_not_internal_ref(ref)) { res = am_badarg; @@ -502,8 +466,6 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip) ASSERT(is_node_name_atom(to)); dep = erts_sysname_to_connected_dist_entry(to); ASSERT(dep != erts_this_dist_entry); - if (dep) - deref_de = 1; } else if (is_port(to)) { if (port_dist_entry(to) != erts_this_dist_entry) { goto badarg; @@ -521,11 +483,6 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip) unlock_link = 0; } else { /* Local monitor */ - if (deref_de) { - deref_de = 0; - erts_deref_dist_entry(dep); - } - dep = NULL; demonitor_local_process(c_p, ref, to, &res); } break; @@ -538,14 +495,9 @@ badarg: done: if (unlock_link) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); - - if (deref_de) { - ASSERT(dep); - erts_deref_dist_entry(dep); - } + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); BIF_RET(res); } @@ -668,12 +620,12 @@ local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean) return ret; } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); rp = erts_pid2proc_opt(p, p_locks, target, ERTS_PROC_LOCK_LINK, ERTS_P2P_FLG_ALLOW_OTHER_X); if (!rp) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); p_locks &= ~ERTS_PROC_LOCK_LINK; if (boolean) ret = am_false; @@ -690,10 +642,10 @@ local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean) erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } - erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN); return ret; } @@ -727,7 +679,7 @@ res_no_proc: break; } } - erts_smp_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN); BIF_RET(ref); } @@ -742,7 +694,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) Process *proc = NULL; Port *port = NULL; - erts_smp_proc_lock(self, ERTS_PROC_LOCK_LINK); + erts_proc_lock(self, ERTS_PROC_LOCK_LINK); erts_whereis_name(self, p_locks, target_name, &proc, ERTS_PROC_LOCK_LINK, @@ -761,7 +713,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) Eterm item; UseTmpHeap(3,self); - erts_smp_proc_unlock(self, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(self, ERTS_PROC_LOCK_LINK); p_locks &= ~ERTS_PROC_LOCK_LINK; item = TUPLE2(lhp, target_name, erts_this_dist_entry->sysname); @@ -772,7 +724,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) UnUseTmpHeap(3,self); } else if (port) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); p_locks &= ~ERTS_PROC_LOCK_MAIN; switch (erts_port_monitor(self, port, target_name, &ret)) { @@ -793,16 +745,16 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) proc->common.id, target_name); erts_add_monitor(&ERTS_P_MONITORS(proc), MON_TARGET, ret, self->common.id, target_name); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(proc, ERTS_PROC_LOCK_LINK); } if (p_locks) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); } BIF_RET(ret); badarg: if (p_locks) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); } BIF_ERROR(self, BADARG); } @@ -815,20 +767,20 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, BIF_RETTYPE ret; int code; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_RLOCK, 0); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: /* Let the dmonitor_p trap handle it */ case ERTS_DSIG_PREP_NOT_CONNECTED: - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); ERTS_BIF_PREP_TRAP2(ret, dmonitor_p_trap, p, bifarg1, bifarg2); break; case ERTS_DSIG_PREP_CONNECTED: if (!(dep->flags & DFLAG_DIST_MONITOR) || (byname && !(dep->flags & DFLAG_DIST_MONITOR_NAME))) { - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); ERTS_BIF_PREP_ERROR(ret, p, BADARG); } else { @@ -847,16 +799,16 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, d_name = NIL; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, p_trgt, p_name); erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->common.id, d_name); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); code = erts_dsig_send_monitor(&dsd, p->common.id, target, mon_ref); if (code == ERTS_DSIG_SEND_YIELD) @@ -879,7 +831,6 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2) Eterm target = BIF_ARG_2; BIF_RETTYPE ret; DistEntry *dep = NULL; - int deref_de = 0; /* Only process monitors are implemented */ switch (BIF_ARG_1) { @@ -889,10 +840,10 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2) goto badarg; } ref = erts_make_ref(BIF_P); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); erts_add_monitor(&ERTS_P_MONITORS(BIF_P), MON_TIME_OFFSET, ref, am_clock_service, NIL); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); erts_monitor_time_offset(BIF_P->common.id, ref); BIF_RET(ref); } @@ -939,21 +890,14 @@ local_port: } dep = erts_sysname_to_connected_dist_entry(remote_node); if (dep == erts_this_dist_entry) { - deref_de = 1; ret = local_name_monitor(BIF_P, BIF_ARG_1, name); } else { - if (dep) - deref_de = 1; ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, name, 1); } } else { badarg: ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); } - if (deref_de) { - deref_de = 0; - erts_deref_dist_entry(dep); - } return ret; } @@ -1009,7 +953,7 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1) so.max_heap_size = H_MAX_SIZE; so.max_heap_flags = H_MAX_FLAGS; so.priority = PRIORITY_NORMAL; - so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + so.max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs); so.scheduler = 0; /* @@ -1150,15 +1094,13 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) } if (is_internal_port(BIF_ARG_1)) { - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); -#ifdef ERTS_SMP + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (ERTS_PROC_PENDING_EXIT(BIF_P)) goto handle_pending_exit; -#endif l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (l) { Port *prt; @@ -1200,14 +1142,12 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) /* Blind removal, we might have trapped or anything, this leaves us in a state where monitors might be inconsistent, but the dist code should take care of it. */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); -#ifdef ERTS_SMP + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (ERTS_PROC_PENDING_EXIT(BIF_P)) goto handle_pending_exit; -#endif l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (l) @@ -1249,7 +1189,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) /* Internal pid... */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); cp_locks |= ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS; @@ -1258,13 +1198,11 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) BIF_ARG_1, ERTS_PROC_LOCK_LINK, ERTS_P2P_FLG_ALLOW_OTHER_X); -#ifdef ERTS_SMP if (ERTS_PROC_PENDING_EXIT(BIF_P)) { if (rp && rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); goto handle_pending_exit; } -#endif /* unlink and ignore errors */ l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); @@ -1272,7 +1210,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) erts_destroy_link(l); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); } else { rl = erts_remove_link(&ERTS_P_LINKS(rp), BIF_P->common.id); @@ -1280,29 +1218,27 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) erts_destroy_link(rl); if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); cp_locks &= ~ERTS_PROC_LOCK_STATUS; trace_proc(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK), rp, am_getting_unlinked, BIF_P->common.id); } if (rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } - erts_smp_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); -#ifdef ERTS_SMP handle_pending_exit: erts_handle_pending_exit(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK | ERTS_PROC_LOCK_STATUS)); ASSERT(ERTS_PROC_IS_EXITING(BIF_P)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); ERTS_BIF_EXITED(BIF_P); -#endif } BIF_RETTYPE hibernate_3(BIF_ALIST_3) @@ -1314,7 +1250,11 @@ BIF_RETTYPE hibernate_3(BIF_ALIST_3) */ Eterm reg[3]; - if (erts_hibernate(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, reg)) { + reg[0] = BIF_ARG_1; + reg[1] = BIF_ARG_2; + reg[2] = BIF_ARG_3; + + if (erts_hibernate(BIF_P, reg)) { /* * If hibernate succeeded, TRAP. The process will be wait in a * hibernated state if its state is inactive (!ERTS_PSFLG_ACTIVE); @@ -1635,7 +1575,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) if (BIF_ARG_1 == BIF_P->common.id) { rp_locks = ERTS_PROC_LOCKS_ALL; rp = BIF_P; - erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR); } else { rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; @@ -1657,12 +1597,10 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) NIL, NULL, BIF_P == rp ? ERTS_XSIG_FLG_NO_IGN_NORMAL : 0); -#ifdef ERTS_SMP if (rp == BIF_P) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); -#endif + erts_proc_unlock(rp, rp_locks); /* * We may have exited ourselves and may have to take action. */ @@ -1774,21 +1712,19 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) * true. For more info, see implementation of * erts_send_exit_signal(). */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); if (trap_exit) - state = erts_smp_atomic32_read_bor_mb(&BIF_P->state, + state = erts_atomic32_read_bor_mb(&BIF_P->state, ERTS_PSFLG_TRAP_EXIT); else - state = erts_smp_atomic32_read_band_mb(&BIF_P->state, + state = erts_atomic32_read_band_mb(&BIF_P->state, ~ERTS_PSFLG_TRAP_EXIT); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) { erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); ERTS_BIF_EXITED(BIF_P); } -#endif old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false; BIF_RET(old_value); @@ -1806,15 +1742,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) if (sched == 0) { new = NULL; - state = erts_smp_atomic32_read_band_mb(&BIF_P->state, + state = erts_atomic32_read_band_mb(&BIF_P->state, ~ERTS_PSFLG_BOUND); } else { new = erts_schedid2runq(sched); -#ifdef ERTS_SMP erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new); -#endif - state = erts_smp_atomic32_read_bor_mb(&BIF_P->state, + state = erts_atomic32_read_bor_mb(&BIF_P->state, ERTS_PSFLG_BOUND); } @@ -1895,7 +1829,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) } else { goto error; } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE ? am_true : am_false); @@ -1904,7 +1838,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) } else { ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); /* make sure to bump all reds so that we get rescheduled immediately so setting takes effect */ BIF_RET2(old_value, CONTEXT_REDS); @@ -1946,15 +1880,11 @@ BIF_RETTYPE process_flag_3(BIF_ALIST_3) Process *rp; Eterm res; -#ifdef ERTS_SMP rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCK_MAIN); if (rp == ERTS_PROC_LOCK_BUSY) ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); -#else - rp = erts_proc_lookup(BIF_ARG_1); -#endif if (!rp) BIF_ERROR(BIF_P, BADARG); @@ -1962,7 +1892,7 @@ BIF_RETTYPE process_flag_3(BIF_ALIST_3) res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3); if (rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); return res; } @@ -2053,6 +1983,7 @@ static Sint remote_send(Process *p, DistEntry *dep, ASSERT(is_atom(to) || is_external_pid(to)); + ctx->dep = dep; code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_DSP_NO_LOCK, !ctx->suspend); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: @@ -2254,7 +2185,6 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) if (dep == erts_this_dist_entry) { Eterm id; - erts_deref_dist_entry(dep); if (IS_TRACED_FL(p, F_TRACE_SEND)) trace_send(p, to, msg); if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) @@ -2277,11 +2207,9 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) } ret = remote_send(p, dep, tp[1], to, msg, ctx); - if (ret != SEND_YIELD_CONTINUE) { - if (dep) { - erts_deref_dist_entry(dep); - } - } else { + if (ret == SEND_YIELD_CONTINUE) { + if (dep) + erts_ref_dist_entry(dep); ctx->dep_to_deref = dep; } return ret; @@ -2296,17 +2224,15 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) send_message: { ErtsProcLocks rp_locks = 0; Sint res; -#ifdef ERTS_SMP if (p == rp) rp_locks |= ERTS_PROC_LOCK_MAIN; -#endif /* send to local process */ res = erts_send_message(p, rp, &rp_locks, msg, 0); if (erts_use_sender_punish) res *= 4; else res = 0; - erts_smp_proc_unlock(rp, + erts_proc_unlock(rp, p == rp ? (rp_locks & ~ERTS_PROC_LOCK_MAIN) : rp_locks); @@ -4010,14 +3936,14 @@ BIF_RETTYPE halt_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_halt_2], BIF_P, am_undefined, am_undefined); } else { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(pos_int_code, ""); } } else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) { VERBOSE(DEBUG_SYSTEM, ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(ERTS_ABORT_EXIT, ""); } else if (is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) { @@ -4033,7 +3959,7 @@ BIF_RETTYPE halt_2(BIF_ALIST_2) halt_msg[written] = '\0'; VERBOSE(DEBUG_SYSTEM, ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg); } else @@ -4219,7 +4145,6 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1) goto bad; if(dep == erts_this_dist_entry) { - erts_deref_dist_entry(dep); BIF_RET(make_internal_pid(make_pid_data(c, b))); } else { @@ -4239,13 +4164,10 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1) etp->data.ui[0] = make_pid_data(c, b); MSO(BIF_P).first = (struct erl_off_heap_header*) etp; - erts_deref_dist_entry(dep); BIF_RET(make_external_pid(etp)); } bad: - if (dep) - erts_deref_dist_entry(dep); if (buf) erts_free(ERTS_ALC_T_TMP, (void *) buf); BIF_ERROR(BIF_P, BADARG); @@ -4290,7 +4212,6 @@ BIF_RETTYPE list_to_port_1(BIF_ALIST_1) goto bad; if(dep == erts_this_dist_entry) { - erts_deref_dist_entry(dep); BIF_RET(make_internal_port(p)); } else { @@ -4310,13 +4231,10 @@ BIF_RETTYPE list_to_port_1(BIF_ALIST_1) etp->data.ui[0] = p; MSO(BIF_P).first = (struct erl_off_heap_header*) etp; - erts_deref_dist_entry(dep); BIF_RET(make_external_port(etp)); } bad: - if (dep) - erts_deref_dist_entry(dep); BIF_ERROR(BIF_P, BADARG); } @@ -4436,12 +4354,9 @@ BIF_RETTYPE list_to_ref_1(BIF_ALIST_1) res = make_external_ref(etp); } - erts_deref_dist_entry(dep); BIF_RET(res); bad: - if (dep) - erts_deref_dist_entry(dep); BIF_ERROR(BIF_P, BADARG); } @@ -4507,9 +4422,9 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) new_member->group_leader = BIF_ARG_1; else { locks &= ~ERTS_PROC_LOCK_STATUS; - erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS); if (new_member == BIF_P - || !(erts_smp_atomic32_read_nob(&new_member->state) + || !(erts_atomic32_read_nob(&new_member->state) & ERTS_PSFLG_DIRTY_RUNNING)) { new_member->group_leader = STORE_NC_IN_PROC(new_member, BIF_ARG_1); @@ -4538,7 +4453,7 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) if (new_member == BIF_P) locks &= ~ERTS_PROC_LOCK_MAIN; if (locks) - erts_smp_proc_unlock(new_member, locks); + erts_proc_unlock(new_member, locks); if (await_x) { /* Wait for new_member to terminate; then badarg */ @@ -4565,9 +4480,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) if (BIF_ARG_1 == am_multi_scheduling) { if (BIF_ARG_2 == am_block || BIF_ARG_2 == am_unblock || BIF_ARG_2 == am_block_normal || BIF_ARG_2 == am_unblock_normal) { -#ifndef ERTS_SMP - BIF_RET(am_disabled); -#else int block = (BIF_ARG_2 == am_block || BIF_ARG_2 == am_block_normal); int normal = (BIF_ARG_2 == am_block_normal @@ -4603,15 +4515,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); break; } -#endif } } else if (BIF_ARG_1 == am_schedulers_online) { -#ifndef ERTS_SMP - if (BIF_ARG_2 != make_small(1)) - goto error; - else - BIF_RET(make_small(1)); -#else Sint old_no; if (!is_small(BIF_ARG_2)) goto error; @@ -4635,7 +4540,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); break; } -#endif } else if (BIF_ARG_1 == am_fullsweep_after) { Uint16 nval; Uint oval; @@ -4643,7 +4547,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n); - oval = (Uint) erts_smp_atomic32_xchg_nob(&erts_max_gen_gcs, + oval = (Uint) erts_atomic32_xchg_nob(&erts_max_gen_gcs, (erts_aint32_t) nval); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_min_heap_size) { @@ -4653,13 +4557,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); H_MIN_SIZE = erts_next_heap_size(n, 0); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_min_bin_vheap_size) { @@ -4669,13 +4573,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); BIN_VH_MIN_SIZE = erts_next_heap_size(n, 0); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_max_heap_size) { @@ -4693,14 +4597,14 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) hp = HAlloc(BIF_P, sz); old_value = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); H_MAX_SIZE = max_heap_size; H_MAX_FLAGS = max_heap_flags; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(old_value); } else if (BIF_ARG_1 == am_display_items) { @@ -4754,8 +4658,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) } else if (BIF_ARG_1 == make_small(1)) { int i, max; ErtsMessage* mp; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); max = erts_ptab_max(&erts_proc); for (i = 0; i < max; i++) { @@ -4768,7 +4672,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) #endif p->seq_trace_clock = 0; p->seq_trace_lastcnt = 0; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); mp = p->msg.first; while(mp != NULL) { #ifdef USE_VM_PROBES @@ -4781,14 +4685,14 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) } } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); } else if (BIF_ARG_1 == am_scheduler_wall_time) { if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) { erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0; - erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time, + erts_aint32_t old = erts_atomic32_xchg_nob(&sched_wall_time, new); Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0); ASSERT(is_value(ref)); @@ -4797,7 +4701,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) ref, old ? am_true : am_false); } -#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS) } else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) { Sint old_no; if (!is_small(BIF_ARG_2)) @@ -4823,13 +4726,12 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); break; } -#endif } else if (BIF_ARG_1 == am_time_offset && ERTS_IS_ATOM_STR("finalize", BIF_ARG_2)) { ErtsTimeOffsetState res; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); res = erts_finalize_time_offset(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); switch (res) { case ERTS_TIME_OFFSET_PRELIMINARY: { DECL_AM(preliminary); @@ -4851,7 +4753,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) Eterm threads; if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) { erts_aint32_t new = BIF_ARG_2 == am_true ? ERTS_MSACC_ENABLE : ERTS_MSACC_DISABLE; - erts_aint32_t old = erts_smp_atomic32_xchg_nob(&msacc, new); + erts_aint32_t old = erts_atomic32_xchg_nob(&msacc, new); Eterm ref = erts_msacc_request(BIF_P, new, &threads); if (is_non_value(ref)) BIF_RET(old ? am_true : am_false); @@ -4862,7 +4764,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) threads); } else if (BIF_ARG_2 == am_reset) { Eterm ref = erts_msacc_request(BIF_P, ERTS_MSACC_RESET, &threads); - erts_aint32_t old = erts_smp_atomic32_read_nob(&msacc); + erts_aint32_t old = erts_atomic32_read_nob(&msacc); ASSERT(is_value(ref)); BIF_TRAP3(await_msacc_mod_trap, BIF_P, @@ -4881,9 +4783,9 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) what = ERTS_SCHED_STAT_MODIFY_CLEAR; else goto error; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_sched_stat_modify(what); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", BIF_ARG_1)) { Eterm res = erts_set_cpu_topology(BIF_P, BIF_ARG_2); @@ -5021,7 +4923,6 @@ static BIF_RETTYPE bif_return_trap(BIF_ALIST_2) Eterm res = BIF_ARG_1; switch (BIF_ARG_2) { -#ifdef ERTS_SMP case am_multi_scheduling: { int msb = erts_is_multi_scheduling_blocked(); if (msb > 0) @@ -5032,7 +4933,6 @@ static BIF_RETTYPE bif_return_trap(BIF_ALIST_2) ERTS_INTERNAL_ERROR("Unexpected multi scheduling block state"); break; } -#endif default: break; } @@ -5057,22 +4957,22 @@ static ERTS_INLINE int skip_current_msgq(Process *c_p) { int res; -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_chk_only_proc_main(c_p); #endif - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) { KILL_CATCHES(c_p); c_p->freason = EXC_EXIT; res = 0; } else { - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; res = 1; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); return res; } @@ -5175,8 +5075,8 @@ void erts_init_bif(void) await_msacc_mod_trap = erts_export_put(am_erts_internal, am_await_microstate_accounting_modifications, 3); - erts_smp_atomic32_init_nob(&sched_wall_time, 0); - erts_smp_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED()); + erts_atomic32_init_nob(&sched_wall_time, 0); + erts_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED()); } /* @@ -5194,7 +5094,7 @@ schedule(Process *c_p, Process *dirty_shadow_proc, Eterm module, Eterm function, int argc, Eterm *argv) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); (void) erts_nif_export_schedule(c_p, dirty_shadow_proc, mfa, pc, (BeamInstr) em_apply_bif, dfunc, ifunc, @@ -5202,7 +5102,6 @@ schedule(Process *c_p, Process *dirty_shadow_proc, argc, argv); } -#ifdef ERTS_DIRTY_SCHEDULERS static BIF_RETTYPE dirty_bif_result(BIF_ALIST_1) { @@ -5245,7 +5144,6 @@ static BIF_RETTYPE dirty_bif_exception(BIF_ALIST_2) BIF_ERROR(BIF_P, freason); } -#endif /* ERTS_DIRTY_SCHEDULERS */ extern BeamInstr* em_call_bif_e; static BIF_RETTYPE call_bif(Process *c_p, Eterm *reg, BeamInstr *I); @@ -5263,15 +5161,13 @@ erts_schedule_bif(Process *proc, Process *c_p, *dirty_shadow_proc; ErtsCodeMFA *mfa; -#ifdef ERTS_DIRTY_SCHEDULERS if (proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { dirty_shadow_proc = proc; c_p = proc->next; ASSERT(c_p->common.id == dirty_shadow_proc->common.id); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } else -#endif { dirty_shadow_proc = NULL; c_p = proc; @@ -5287,7 +5183,6 @@ erts_schedule_bif(Process *proc, * ibif - indirect bif */ -#ifdef ERTS_DIRTY_SCHEDULERS erts_aint32_t set, mask; mask = (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC); @@ -5310,11 +5205,7 @@ erts_schedule_bif(Process *proc, break; } - (void) erts_smp_atomic32_read_bset_nob(&c_p->state, mask, set); -#else - dbif = call_bif; - ibif = bif; -#endif + (void) erts_atomic32_read_bset_nob(&c_p->state, mask, set); if (i == NULL) { ERTS_INTERNAL_ERROR("Missing instruction pointer"); @@ -5355,7 +5246,7 @@ erts_schedule_bif(Process *proc, } if (dirty_shadow_proc) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); return THE_NON_VALUE; } @@ -5390,7 +5281,6 @@ call_bif(Process *c_p, Eterm *reg, BeamInstr *I) return ret; } -#ifdef ERTS_DIRTY_SCHEDULERS int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg) @@ -5405,7 +5295,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * erts_aint32_t state; ASSERT(!c_p->scheduler_data); - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING) && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))); ASSERT(esdp); @@ -5419,7 +5309,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * bf = (ErtsBifFunc) I[1]; - erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC + erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); dirty_shadow_proc = erts_make_dirty_shadow_proc(esdp, c_p); @@ -5434,11 +5324,11 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * c_p_htop = c_p->htop; #endif - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); result = (*bf)(dirty_shadow_proc, reg, I); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); ASSERT(c_p_htop == c_p->htop); ASSERT(dirty_shadow_proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); @@ -5461,7 +5351,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * } else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) { /* Dirty BIF did an ordinary trap... */ - ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(!(erts_atomic32_read_nob(&c_p->state) & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))); schedule(c_p, dirty_shadow_proc, NULL, NULL, dirty_bif_trap, (void *) dirty_shadow_proc->i, @@ -5484,7 +5374,6 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * return exiting; } -#endif /* ERTS_DIRTY_SCHEDULERS */ #ifdef HARDDEBUG diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h index 01cca90a7a..a2bc883dbe 100644 --- a/erts/emulator/beam/bif.h +++ b/erts/emulator/beam/bif.h @@ -93,7 +93,7 @@ do { \ #define BUMP_REDS(p, gc) do { \ ASSERT(p); \ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\ + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\ (p)->fcalls -= (gc); \ if ((p)->fcalls < 0) { \ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \ @@ -498,10 +498,8 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p, Eterm args[], int nargs); -#ifdef ERTS_DIRTY_SCHEDULERS int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg); -#endif BIF_RETTYPE erts_schedule_bif(Process *proc, diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 10ca0b5066..f7b4451890 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -154,6 +154,12 @@ bif erlang:spawn_opt/1 bif erlang:setnode/2 bif erlang:setnode/3 bif erlang:dist_exit/3 +bif erlang:dist_get_stat/1 +bif erlang:dist_ctrl_input_handler/2 +bif erlang:dist_ctrl_put_data/2 +bif erlang:dist_ctrl_get_data/1 +bif erlang:dist_ctrl_get_data_notification/1 + # Static native functions in erts_internal bif erts_internal:port_info/1 diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab new file mode 100644 index 0000000000..0932b8b985 --- /dev/null +++ b/erts/emulator/beam/bif_instrs.tab @@ -0,0 +1,539 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +// ================================================================ +// All guards with zero arguments have special instructions, +// for example: +// +// self/0 +// node/0 +// +// All other guard BIFs take one or two arguments. +// ================================================================ + +CALL_GUARD_BIF(BF, TmpReg, Dst) { + Eterm result; + + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + PROCESS_MAIN_CHK_LOCKS(c_p); + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + ERTS_CHK_MBUF_SZ(c_p); + result = (*$BF)(c_p, $TmpReg, I); + ERTS_CHK_MBUF_SZ(c_p); + ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_HOLE_CHECK(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(result))) { + $Dst = result; + $NEXT0(); + } +} + +// Guard BIF in head. On failure, ignore the error and jump +// to the code for the next clause. We don't support tracing +// of guard BIFs. + +bif1(Fail, Bif, Src, Dst) { + ErtsBifFunc bf; + Eterm tmp_reg[1]; + + tmp_reg[0] = $Src; + bf = (BifFunction) $Bif; + $CALL_GUARD_BIF(bf, tmp_reg, $Dst); + + $FAIL($Fail); +} + +// +// Guard BIF in body. It can fail like any BIF. No trace support. +// + +bif1_body(Bif, Src, Dst) { + ErtsBifFunc bf; + Eterm tmp_reg[1]; + + tmp_reg[0] = $Src; + bf = (BifFunction) $Bif; + $CALL_GUARD_BIF(bf, tmp_reg, $Dst); + + reg[0] = tmp_reg[0]; + SWAPOUT; + I = handle_error(c_p, I, reg, ubif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// Guard bif in guard with two arguments ('and'/2, 'or'/2, 'xor'/2). +// + +i_bif2(Fail, Bif, Src1, Src2, Dst) { + Eterm tmp_reg[2]; + ErtsBifFunc bf; + + tmp_reg[0] = $Src1; + tmp_reg[1] = $Src2; + bf = (ErtsBifFunc) $Bif; + $CALL_GUARD_BIF(bf, tmp_reg, $Dst); + $FAIL($Fail); +} + +// +// Guard bif in body with two arguments ('and'/2, 'or'/2, 'xor'/2). +// + +i_bif2_body(Bif, Src1, Src2, Dst) { + Eterm tmp_reg[2]; + ErtsBifFunc bf; + + tmp_reg[0] = $Src1; + tmp_reg[1] = $Src2; + bf = (ErtsBifFunc) $Bif; + $CALL_GUARD_BIF(bf, tmp_reg, $Dst); + reg[0] = tmp_reg[0]; + reg[1] = tmp_reg[1]; + SWAPOUT; + I = handle_error(c_p, I, reg, ubif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// Garbage-collecting BIF with one argument in either guard or body. +// + +i_gc_bif1(Fail, Bif, Src, Live, Dst) { + typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); + GcBifFunction bf; + Eterm result; + Uint live = (Uint) $Live; + + x(live) = $Src; + bf = (GcBifFunction) $Bif; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_CHK_MBUF_SZ(c_p); + result = (*bf)(c_p, reg, live); + ERTS_CHK_MBUF_SZ(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + ERTS_HOLE_CHECK(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */ + $JUMP($Fail); + } + + /* Handle error in body. */ + x(0) = x(live); + I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// Garbage-collecting BIF with two arguments in either guard or body. +// + +i_gc_bif2(Fail, Bif, Live, Src1, Src2, Dst) { + typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); + GcBifFunction bf; + Eterm result; + Uint live = (Uint) $Live; + + /* + * XXX This calling convention does not make sense. 'live' + * should point out the first argument, not the second + * (i.e. 'live' should not be incremented below). + */ + x(live) = $Src1; + x(live+1) = $Src2; + live++; + + bf = (GcBifFunction) $Bif; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_CHK_MBUF_SZ(c_p); + result = (*bf)(c_p, reg, live); + ERTS_CHK_MBUF_SZ(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + ERTS_HOLE_CHECK(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + + if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */ + $JUMP($Fail); + } + + /* Handle error in body. */ + live--; + x(0) = x(live); + x(1) = x(live+1); + I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// Garbage-collecting BIF with three arguments in either guard or body. +// + +i_gc_bif3(Fail, Bif, Live, Src2, Src3, Dst) { + typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); + GcBifFunction bf; + Eterm result; + Uint live = (Uint) $Live; + + /* + * XXX This calling convention does not make sense. 'live' + * should point out the first argument, not the third + * (i.e. 'live' should not be incremented below). + */ + x(live) = x(SCRATCH_X_REG); + x(live+1) = $Src2; + x(live+2) = $Src3; + live += 2; + + bf = (GcBifFunction) $Bif; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_CHK_MBUF_SZ(c_p); + result = (*bf)(c_p, reg, live); + ERTS_CHK_MBUF_SZ(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + ERTS_HOLE_CHECK(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + + /* Handle error in guard. */ + if (ERTS_LIKELY($Fail != 0)) { + $JUMP($Fail); + } + + /* Handle error in body. */ + live -= 2; + x(0) = x(live); + x(1) = x(live+1); + x(2) = x(live+2); + I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// The most general BIF call. The BIF may build any amount of data +// on the heap. The result is always returned in r(0). +// +call_bif(Exp) { + ErtsBifFunc bf; + Eterm result; + ErlHeapFragment *live_hf_end; + Export *export = (Export*) $Exp; + + if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + c_p->arity = GET_BIF_ARITY(export); + c_p->current = &export->info.mfa; + goto context_switch3; + } + + ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(export), + GET_BIF_ADDRESS(export)); + + bf = GET_BIF_ADDRESS(export); + + PRE_BIF_SWAPOUT(c_p); + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS - 1; + if (FCALLS <= 0) { + save_calls(c_p, export); + } + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + live_hf_end = c_p->mbuf; + ERTS_CHK_MBUF_SZ(c_p); + result = (*bf)(c_p, reg, I); + ERTS_CHK_MBUF_SZ(c_p); + ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_HOLE_CHECK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + if (ERTS_IS_GC_DESIRED(c_p)) { + Uint arity = GET_BIF_ARITY(export); + result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result, + reg, arity); + E = c_p->stop; + } + PROCESS_MAIN_CHK_LOCKS(c_p); + HTOP = HEAP_TOP(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + /* We have to update the cache if we are enabled in order + to make sure no book keeping is done after we disabled + msacc. We don't always do this as it is quite expensive. */ + if (ERTS_MSACC_IS_ENABLED_CACHED_X()) { + ERTS_MSACC_UPDATE_CACHE_X(); + } + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + if (ERTS_LIKELY(is_value(result))) { + r(0) = result; + CHECK_TERM(r(0)); + $NEXT0(); + } else if (c_p->freason == TRAP) { + SET_CP(c_p, I+2); + SET_I(c_p->i); + SWAPIN; + Dispatch(); + } + + /* + * Error handling. SWAPOUT is not needed because it was done above. + */ + ASSERT(c_p->stop == E); + I = handle_error(c_p, I, reg, &export->info.mfa); + goto post_error_handling; +} + +// +// Send is almost a standard call-BIF with two arguments, except for: +// 1. It cannot be traced. +// 2. There is no pointer to the send_2 function stored in +// the instruction. +// + +send() { + Eterm result; + + if (!(FCALLS > 0 || FCALLS > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + c_p->arity = 2; + c_p->current = NULL; + goto context_switch3; + } + + PRE_BIF_SWAPOUT(c_p); + c_p->fcalls = FCALLS - 1; + result = erl_send(c_p, r(0), x(1)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + HTOP = HEAP_TOP(c_p); + FCALLS = c_p->fcalls; + if (ERTS_LIKELY(is_value(result))) { + r(0) = result; + CHECK_TERM(r(0)); + } else if (c_p->freason == TRAP) { + SET_CP(c_p, I+1); + SET_I(c_p->i); + SWAPIN; + Dispatch(); + } else { + goto find_func_info; + } +} + +call_nif := nif_bif.call_nif.epilogue; +apply_bif := nif_bif.apply_bif.epilogue; + +nif_bif.head() { + Eterm nif_bif_result; + Eterm bif_nif_arity; + BifFunction vbf; + ErlHeapFragment *live_hf_end; + ErtsCodeMFA *codemfa; +} + +nif_bif.call_nif() { + /* + * call_nif is always first instruction in function: + * + * I[-3]: Module + * I[-2]: Function + * I[-1]: Arity + * I[0]: &&call_nif + * I[1]: Function pointer to NIF function + * I[2]: Pointer to erl_module_nif + * I[3]: Function pointer to dirty NIF + * + * This layout is determined by the NifExport struct + */ + + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF); + + codemfa = erts_code_to_codemfa(I); + + c_p->current = codemfa; /* current and vbf set to please handle_error */ + + DTRACE_NIF_ENTRY(c_p, codemfa); + + HEAVY_SWAPOUT; + + PROCESS_MAIN_CHK_LOCKS(c_p); + bif_nif_arity = codemfa->arity; + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + { + typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]); + NifF* fp = vbf = (NifF*) I[1]; + struct enif_environment_t env; + ASSERT(c_p->scheduler_data); + live_hf_end = c_p->mbuf; + ERTS_CHK_MBUF_SZ(c_p); + erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL); + nif_bif_result = (*fp)(&env, bif_nif_arity, reg); + if (env.exception_thrown) + nif_bif_result = THE_NON_VALUE; + erts_post_nif(&env); + ERTS_CHK_MBUF_SZ(c_p); + + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + ASSERT(!env.exiting); + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + } + + DTRACE_NIF_RETURN(c_p, codemfa); +} + +nif_bif.apply_bif() { + /* + * At this point, I points to the code[0] in the export entry for + * the BIF: + * + * code[-3]: Module + * code[-2]: Function + * code[-1]: Arity + * code[0]: &&apply_bif + * code[1]: Function pointer to BIF function + */ + + if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + goto context_switch; + } + + codemfa = erts_code_to_codemfa(I); + + ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0)); + + + /* In case we apply process_info/1,2 or load_nif/1 */ + c_p->current = codemfa; + $SET_CP_I_ABS(I); /* In case we apply check_process_code/2. */ + c_p->arity = 0; /* To allow garbage collection on ourselves + * (check_process_code/2). + */ + DTRACE_BIF_ENTRY(c_p, codemfa); + + SWAPOUT; + ERTS_DBG_CHK_REDS(c_p, FCALLS - 1); + c_p->fcalls = FCALLS - 1; + vbf = (BifFunction) Arg(0); + PROCESS_MAIN_CHK_LOCKS(c_p); + bif_nif_arity = codemfa->arity; + ASSERT(bif_nif_arity <= 4); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + { + ErtsBifFunc bf = vbf; + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + live_hf_end = c_p->mbuf; + ERTS_CHK_MBUF_SZ(c_p); + nif_bif_result = (*bf)(c_p, reg, I); + ERTS_CHK_MBUF_SZ(c_p); + ASSERT(!ERTS_PROC_IS_EXITING(c_p) || + is_non_value(nif_bif_result)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + } + /* We have to update the cache if we are enabled in order + to make sure no book keeping is done after we disabled + msacc. We don't always do this as it is quite expensive. */ + if (ERTS_MSACC_IS_ENABLED_CACHED_X()) + ERTS_MSACC_UPDATE_CACHE_X(); + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + DTRACE_BIF_RETURN(c_p, codemfa); +} + +nif_bif.epilogue() { + ERTS_REQ_PROC_MAIN_LOCK(c_p); + ERTS_HOLE_CHECK(c_p); + if (ERTS_IS_GC_DESIRED(c_p)) { + nif_bif_result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, + nif_bif_result, + reg, bif_nif_arity); + } + SWAPIN; /* There might have been a garbage collection. */ + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(nif_bif_result))) { + r(0) = nif_bif_result; + CHECK_TERM(r(0)); + SET_I(c_p->cp); + c_p->cp = 0; + Goto(*I); + } else if (c_p->freason == TRAP) { + SET_I(c_p->i); + if (c_p->flags & F_HIBERNATE_SCHED) { + c_p->flags &= ~F_HIBERNATE_SCHED; + goto do_schedule; + } + Dispatch(); + } + I = handle_error(c_p, c_p->cp, reg, c_p->current); + goto post_error_handling; +} diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 76a0c5c716..35b2365655 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -109,8 +109,8 @@ process_killer(void) ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; erts_aint32_t state; erts_proc_inc_refc(rp); - erts_smp_proc_lock(rp, rp_locks); - state = erts_smp_atomic32_read_acqb(&rp->state); + erts_proc_lock(rp, rp_locks); + state = erts_atomic32_read_acqb(&rp->state); if (state & (ERTS_PSFLG_FREE | ERTS_PSFLG_EXITING | ERTS_PSFLG_ACTIVE @@ -132,7 +132,7 @@ process_killer(void) NULL, 0); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); } case 'n': br = 1; break; @@ -219,7 +219,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) /* Display the state */ erts_print(to, to_arg, "State: "); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); erts_dump_process_state(to, to_arg, state); if (state & ERTS_PSFLG_GC) { garbing = 1; @@ -258,7 +258,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) erts_print(to, to_arg, "Spawned by: %T\n", p->parent); approx_started = (time_t) p->approx_started; erts_print(to, to_arg, "Started: %s", ctime(&approx_started)); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len); /* display the message queue only if there is anything in it */ @@ -344,9 +344,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) erts_program_counter_info(to, to_arg, p); } else { erts_print(to, to_arg, "Stack dump:\n"); -#ifdef ERTS_SMP if (!garbing) -#endif erts_stack_dump(to, to_arg, p); } @@ -358,11 +356,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) static void print_garb_info(fmtfn_t to, void *to_arg, Process* p) { -#ifdef ERTS_SMP /* ERTS_SMP: A scheduler is probably concurrently doing gc... */ if (!ERTS_IS_CRASH_DUMPING) return; -#endif erts_print(to, to_arg, "New heap start: %bpX\n", p->heap); erts_print(to, to_arg, "New heap top: %bpX\n", p->htop); erts_print(to, to_arg, "Stack top: %bpX\n", p->stop); @@ -512,7 +508,7 @@ do_break(void) erts_free_read_env(mode); #endif /* __WIN32__ */ - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); erts_printf("\n" "BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n" @@ -698,9 +694,7 @@ static int crash_dump_limited_writer(void* vfdp, char* buf, size_t len) void erl_crash_dump_v(char *file, int line, char* fmt, va_list args) { -#ifdef ERTS_SMP ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */ -#endif int fd; size_t envsz; time_t now; @@ -717,7 +711,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) if (ERTS_SOMEONE_IS_CRASH_DUMPING) return; -#ifdef ERTS_SMP /* Order all managed threads to block, this has to be done first to guarantee that this is the only thread to generate crash dump. */ @@ -741,12 +734,9 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) #endif /* Allow us to pass certain places without locking... */ - erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1); - erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1); + erts_atomic32_set_mb(&erts_writing_erl_crash_dump, 1); + erts_tsd_set(erts_is_crash_dumping_key, (void *) 1); -#else /* !ERTS_SMP */ - erts_writing_erl_crash_dump = 1; -#endif /* ERTS_SMP */ envsz = sizeof(env); /* ERL_CRASH_DUMP_SECONDS not set @@ -841,7 +831,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_print_nif_taints(to, to_arg); erts_cbprintf(to, to_arg, "Atoms: %d\n", atom_table_size()); -#ifdef USE_THREADS /* We want to note which thread it was that called erts_exit */ if (erts_get_scheduler_data()) { erts_cbprintf(to, to_arg, "Calling Thread: scheduler:%d\n", @@ -852,9 +841,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) else erts_cbprintf(to, to_arg, "Calling Thread: %p\n", erts_thr_self()); } -#else - erts_cbprintf(to, to_arg, "Calling Thread: scheduler:1\n"); -#endif #if defined(ERTS_HAVE_TRY_CATCH) @@ -873,7 +859,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) } #endif -#ifdef ERTS_SMP #ifdef ERTS_SYS_SUSPEND_SIGNAL @@ -895,7 +880,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) */ erts_thr_progress_fatal_error_wait(60000); /* Either worked or not... */ -#endif #ifndef ERTS_HAVE_TRY_CATCH /* This is safe to call here, as all schedulers are blocked */ diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab new file mode 100644 index 0000000000..a4d4afe7d4 --- /dev/null +++ b/erts/emulator/beam/bs_instrs.tab @@ -0,0 +1,1023 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +%if ARCH_64 +BS_SAFE_MUL(A, B, Fail, Dst) { + Uint64 res = ($A) * ($B); + if (res / $B != $A) { + $Fail; + } + $Dst = res; +} +%else +BS_SAFE_MUL(A, B, Fail, Dst) { + Uint64 res = (Uint64)($A) * (Uint64)($B); + if ((res >> (8*sizeof(Uint))) != 0) { + $Fail; + } + $Dst = res; +} +%endif + +BS_GET_FIELD_SIZE(Bits, Unit, Fail, Dst) { + Sint signed_size; + Uint uint_size; + Uint temp_bits; + + if (is_small($Bits)) { + signed_size = signed_val($Bits); + if (signed_size < 0) { + $Fail; + } + uint_size = (Uint) signed_size; + } else { + if (!term_to_Uint($Bits, &temp_bits)) { + $Fail; + } + uint_size = temp_bits; + } + $BS_SAFE_MUL(uint_size, $Unit, $Fail, $Dst); +} + +BS_GET_UNCHECKED_FIELD_SIZE(Bits, Unit, Fail, Dst) { + Sint signed_size; + Uint uint_size; + Uint temp_bits; + + if (is_small($Bits)) { + signed_size = signed_val($Bits); + if (signed_size < 0) { + $Fail; + } + uint_size = (Uint) signed_size; + } else { + if (!term_to_Uint($Bits, &temp_bits)) { + $Fail; + } + uint_size = temp_bits; + } + $Dst = uint_size * $Unit; +} + +TEST_BIN_VHEAP(VNh, Nh, Live) { + Uint need = $Nh; + if (E - HTOP < need || MSO(c_p).overhead + $VNh >= BIN_VHEAP_SZ(c_p)) { + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + } + HEAP_SPACE_VERIFIED(need); +} + +i_bs_get_binary_all2(Fail, Ms, Live, Unit, Dst) { + ErlBinMatchBuffer *_mb; + Eterm _result; + + $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live); + _mb = ms_matchbuffer($Ms); + if (((_mb->size - _mb->offset) % $Unit) == 0) { + LIGHT_SWAPOUT; + _result = erts_bs_get_binary_all_2(c_p, _mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + ASSERT(is_value(_result)); + $Dst = _result; + } else { + HEAP_SPACE_VERIFIED(0); + $FAIL($Fail); + } +} + +i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) { + ErlBinMatchBuffer *_mb; + Eterm _result; + Uint _size; + $BS_GET_FIELD_SIZE($Sz, (($Flags) >> 3), $FAIL($Fail), _size); + $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live); + _mb = ms_matchbuffer($Ms); + LIGHT_SWAPOUT; + _result = erts_bs_get_binary_2(c_p, _size, $Flags, _mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(_result)) { + $FAIL($Fail); + } else { + $Dst = _result; + } +} + +i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) { + ErlBinMatchBuffer *_mb; + Eterm _result; + $GC_TEST(0, heap_bin_size(ERL_ONHEAP_BIN_LIMIT), $Live); + _mb = ms_matchbuffer($Ms); + LIGHT_SWAPOUT; + _result = erts_bs_get_binary_2(c_p, $Sz, $Flags, _mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(_result)) { + $FAIL($Fail); + } else { + $Dst = _result; + } +} + +i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) { + ErlBinMatchBuffer *_mb; + Eterm _result; + Sint _size; + + if (!is_small($Sz) || (_size = unsigned_val($Sz)) > 64) { + $FAIL($Fail); + } + _size *= (($Flags) >> 3); + $GC_TEST(0, FLOAT_SIZE_OBJECT, $Live); + _mb = ms_matchbuffer($Ms); + LIGHT_SWAPOUT; + _result = erts_bs_get_float_2(c_p, _size, ($Flags), _mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(_result)) { + $FAIL($Fail); + } else { + $Dst = _result; + } +} + +i_bs_skip_bits2(Fail, Ms, Bits, Unit) { + ErlBinMatchBuffer *_mb; + size_t new_offset; + Uint _size; + + _mb = ms_matchbuffer($Ms); + $BS_GET_FIELD_SIZE($Bits, $Unit, $FAIL($Fail), _size); + new_offset = _mb->offset + _size; + if (new_offset <= _mb->size) { + _mb->offset = new_offset; + } else { + $FAIL($Fail); + } +} + +i_bs_skip_bits_all2(Fail, Ms, Unit) { + ErlBinMatchBuffer *_mb; + _mb = ms_matchbuffer($Ms); + if (((_mb->size - _mb->offset) % $Unit) == 0) { + _mb->offset = _mb->size; + } else { + $FAIL($Fail); + } +} + +i_bs_skip_bits_imm2(Fail, Ms, Bits) { + ErlBinMatchBuffer *_mb; + size_t new_offset; + _mb = ms_matchbuffer($Ms); + new_offset = _mb->offset + ($Bits); + if (new_offset <= _mb->size) { + _mb->offset = new_offset; + } else { + $FAIL($Fail); + } +} + +i_new_bs_put_binary(Fail, Sz, Flags, Src) { + Sint _size; + $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size); + if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), _size))) { + $BADARG($Fail); + } +} + +i_new_bs_put_binary_all(Fail, Src, Unit) { + if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2(($Src), ($Unit)))) { + $BADARG($Fail); + } +} + +i_new_bs_put_binary_imm(Fail, Sz, Src) { + if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), ($Sz)))) { + $BADARG($Fail); + } +} + +i_new_bs_put_float(Fail, Sz, Flags, Src) { + Sint _size; + $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size); + if (!erts_new_bs_put_float(c_p, ($Src), _size, ($Flags))) { + $BADARG($Fail); + } +} + +i_new_bs_put_float_imm(Fail, Sz, Flags, Src) { + if (!erts_new_bs_put_float(c_p, ($Src), ($Sz), ($Flags))) { + $BADARG($Fail); + } +} + +i_new_bs_put_integer(Fail, Sz, Flags, Src) { + Sint _size; + $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size); + if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), _size, ($Flags)))) { + $BADARG($Fail); + } +} + +i_new_bs_put_integer_imm(Fail, Sz, Flags, Src) { + if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), ($Sz), ($Flags)))) { + $BADARG($Fail); + } +} + +# +# i_bs_init* +# + +i_bs_init_fail_heap := bs_init.fail_heap.verify.execute; +i_bs_init_fail := bs_init.fail.verify.execute; +i_bs_init := bs_init.plain.execute; +i_bs_init_heap := bs_init.heap.execute; + +bs_init.head() { + Eterm BsOp1; + Eterm BsOp2; +} + +bs_init.fail_heap(Size, HeapAlloc) { + BsOp1 = $Size; + BsOp2 = $HeapAlloc; +} + +bs_init.fail(Size) { + BsOp1 = $Size; + BsOp2 = 0; +} + +bs_init.plain(Size) { + BsOp1 = $Size; + BsOp2 = 0; +} + +bs_init.heap(Size, HeapAlloc) { + BsOp1 = $Size; + BsOp2 = $HeapAlloc; +} + +bs_init.verify(Fail) { + if (is_small(BsOp1)) { + Sint size = signed_val(BsOp1); + if (size < 0) { + $BADARG($Fail); + } + BsOp1 = (Eterm) size; + } else { + Uint bytes; + + if (!term_to_Uint(BsOp1, &bytes)) { + c_p->freason = bytes; + $FAIL_HEAD_OR_BODY($Fail); + } + if ((bytes >> (8*sizeof(Uint)-3)) != 0) { + $SYSTEM_LIMIT($Fail); + } + BsOp1 = (Eterm) bytes; + } +} + +bs_init.execute(Live, Dst) { + if (BsOp1 <= ERL_ONHEAP_BIN_LIMIT) { + ErlHeapBin* hb; + Uint bin_need; + + bin_need = heap_bin_size(BsOp1); + erts_bin_offset = 0; + erts_writable_bin = 0; + $GC_TEST(0, bin_need+BsOp2+ERL_SUB_BIN_SIZE, $Live); + hb = (ErlHeapBin *) HTOP; + HTOP += bin_need; + hb->thing_word = header_heap_bin(BsOp1); + hb->size = BsOp1; + erts_current_bin = (byte *) hb->data; + $Dst = make_binary(hb); + } else { + Binary* bptr; + ProcBin* pb; + + erts_bin_offset = 0; + erts_writable_bin = 0; + $TEST_BIN_VHEAP(BsOp1 / sizeof(Eterm), + BsOp2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, $Live); + + /* + * Allocate the binary struct itself. + */ + bptr = erts_bin_nrml_alloc(BsOp1); + erts_current_bin = (byte *) bptr->orig_bytes; + + /* + * Now allocate the ProcBin on the heap. + */ + pb = (ProcBin *) HTOP; + HTOP += PROC_BIN_SIZE; + pb->thing_word = HEADER_PROC_BIN; + pb->size = BsOp1; + pb->next = MSO(c_p).first; + MSO(c_p).first = (struct erl_off_heap_header*) pb; + pb->val = bptr; + pb->bytes = (byte*) bptr->orig_bytes; + pb->flags = 0; + + OH_OVERHEAD(&(MSO(c_p)), BsOp1 / sizeof(Eterm)); + + $Dst = make_binary(pb); + } +} + +# +# i_bs_init_bits* +# + +i_bs_init_bits := bs_init_bits.plain.execute; +i_bs_init_bits_heap := bs_init_bits.heap.execute; +i_bs_init_bits_fail := bs_init_bits.fail.verify.execute; +i_bs_init_bits_fail_heap := bs_init_bits.fail_heap.verify.execute; + +bs_init_bits.head() { + Eterm new_binary; + Eterm num_bits_term; + Uint num_bits; + Uint alloc; + Uint num_bytes; +} + +bs_init_bits.plain(NumBits) { + num_bits = $NumBits; + alloc = 0; +} + +bs_init_bits.heap(NumBits, Alloc) { + num_bits = $NumBits; + alloc = $Alloc; +} + +bs_init_bits.fail(NumBitsTerm) { + num_bits_term = $NumBitsTerm; + alloc = 0; +} + +bs_init_bits.fail_heap(NumBitsTerm, Alloc) { + num_bits_term = $NumBitsTerm; + alloc = $Alloc; +} + +bs_init_bits.verify(Fail) { + if (is_small(num_bits_term)) { + Sint size = signed_val(num_bits_term); + if (size < 0) { + $BADARG($Fail); + } + num_bits = (Uint) size; + } else { + Uint bits; + + if (!term_to_Uint(num_bits_term, &bits)) { + c_p->freason = bits; + $FAIL_HEAD_OR_BODY($Fail); + } + num_bits = (Uint) bits; + } +} + +bs_init_bits.execute(Live, Dst) { + num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3; + if (num_bits & 7) { + alloc += ERL_SUB_BIN_SIZE; + } + if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) { + alloc += heap_bin_size(num_bytes); + } else { + alloc += PROC_BIN_SIZE; + } + $test_heap(alloc, $Live); + + /* num_bits = Number of bits to build + * num_bytes = Number of bytes to allocate in the binary + * alloc = Total number of words to allocate on heap + * Operands: NotUsed NotUsed Dst + */ + if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) { + ErlHeapBin* hb; + + erts_bin_offset = 0; + erts_writable_bin = 0; + hb = (ErlHeapBin *) HTOP; + HTOP += heap_bin_size(num_bytes); + hb->thing_word = header_heap_bin(num_bytes); + hb->size = num_bytes; + erts_current_bin = (byte *) hb->data; + new_binary = make_binary(hb); + + do_bits_sub_bin: + if (num_bits & 7) { + ErlSubBin* sb; + + sb = (ErlSubBin *) HTOP; + HTOP += ERL_SUB_BIN_SIZE; + sb->thing_word = HEADER_SUB_BIN; + sb->size = num_bytes - 1; + sb->bitsize = num_bits & 7; + sb->offs = 0; + sb->bitoffs = 0; + sb->is_writable = 0; + sb->orig = new_binary; + new_binary = make_binary(sb); + } + HEAP_SPACE_VERIFIED(0); + $Dst = new_binary; + } else { + Binary* bptr; + ProcBin* pb; + + erts_bin_offset = 0; + erts_writable_bin = 0; + + /* + * Allocate the binary struct itself. + */ + bptr = erts_bin_nrml_alloc(num_bytes); + erts_current_bin = (byte *) bptr->orig_bytes; + + /* + * Now allocate the ProcBin on the heap. + */ + pb = (ProcBin *) HTOP; + HTOP += PROC_BIN_SIZE; + pb->thing_word = HEADER_PROC_BIN; + pb->size = num_bytes; + pb->next = MSO(c_p).first; + MSO(c_p).first = (struct erl_off_heap_header*) pb; + pb->val = bptr; + pb->bytes = (byte*) bptr->orig_bytes; + pb->flags = 0; + OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm)); + new_binary = make_binary(pb); + goto do_bits_sub_bin; + } +} + +bs_add(Fail, Src1, Src2, Unit, Dst) { + Eterm Op1 = $Src1; + Eterm Op2 = $Src2; + Uint unit = $Unit; + + if (is_both_small(Op1, Op2)) { + Sint Arg1 = signed_val(Op1); + Sint Arg2 = signed_val(Op2); + + if (Arg1 >= 0 && Arg2 >= 0) { + $BS_SAFE_MUL(Arg2, unit, $SYSTEM_LIMIT($Fail), Op1); + Op1 += Arg1; + + store_bs_add_result: + if (Op1 <= MAX_SMALL) { + Op1 = make_small(Op1); + } else { + /* + * May generate a heap fragment, but in this + * particular case it is OK, since the value will be + * stored into an x register (the GC will scan x + * registers for references to heap fragments) and + * there is no risk that value can be stored into a + * location that is not scanned for heap-fragment + * references (such as the heap). + */ + SWAPOUT; + Op1 = erts_make_integer(Op1, c_p); + HTOP = HEAP_TOP(c_p); + } + $Dst = Op1; + $NEXT0(); + } + $BADARG($Fail); + } else { + Uint a; + Uint b; + Uint c; + + /* + * Now we know that one of the arguments is + * not a small. We must convert both arguments + * to Uints and check for errors at the same time. + * + * Error checking is tricky. + * + * If one of the arguments is not numeric or + * not positive, the error reason is BADARG. + * + * Otherwise if both arguments are numeric, + * but at least one argument does not fit in + * an Uint, the reason is SYSTEM_LIMIT. + */ + + if (!term_to_Uint(Op1, &a)) { + if (a == BADARG) { + $BADARG($Fail); + } + if (!term_to_Uint(Op2, &b)) { + c_p->freason = b; + $FAIL_HEAD_OR_BODY($Fail); + } + $SYSTEM_LIMIT($Fail); + } else if (!term_to_Uint(Op2, &b)) { + c_p->freason = b; + $FAIL_HEAD_OR_BODY($Fail); + } + + /* + * The arguments are now correct and stored in a and b. + */ + + $BS_SAFE_MUL(b, unit, $SYSTEM_LIMIT($Fail), c); + Op1 = a + c; + if (Op1 < a) { + /* + * If the result is less than one of the + * arguments, there must have been an overflow. + */ + $SYSTEM_LIMIT($Fail); + } + goto store_bs_add_result; + } + /* No fallthrough */ + ASSERT(0); +} + +bs_put_string(Len, Ptr) { + erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) $Ptr, $Len)); +} + +i_bs_append(Fail, ExtraHeap, Live, Unit, Size, Dst) { + Uint live = $Live; + Uint res; + + HEAVY_SWAPOUT; + reg[live] = x(SCRATCH_X_REG); + res = erts_bs_append(c_p, reg, live, $Size, $ExtraHeap, $Unit); + HEAVY_SWAPIN; + if (is_non_value(res)) { + /* c_p->freason is already set (to BADARG or SYSTEM_LIMIT). */ + $FAIL_HEAD_OR_BODY($Fail); + } + $Dst = res; +} + +i_bs_private_append(Fail, Unit, Size, Src, Dst) { + Eterm res; + + res = erts_bs_private_append(c_p, $Src, $Size, $Unit); + if (is_non_value(res)) { + /* c_p->freason is already set (to BADARG or SYSTEM_LIMIT). */ + $FAIL_HEAD_OR_BODY($Fail); + } + $Dst = res; +} + +bs_init_writable() { + HEAVY_SWAPOUT; + r(0) = erts_bs_init_writable(c_p, r(0)); + HEAVY_SWAPIN; +} + +i_bs_utf8_size(Src, Dst) { + Eterm arg = $Src; + Eterm result; + + /* + * Calculate the number of bytes needed to encode the source + * operand to UTF-8. If the source operand is invalid (e.g. wrong + * type or range) we return a nonsense integer result (0 or 4). We + * can get away with that because we KNOW that bs_put_utf8 will do + * full error checking. + */ + + if (arg < make_small(0x80UL)) { + result = make_small(1); + } else if (arg < make_small(0x800UL)) { + result = make_small(2); + } else if (arg < make_small(0x10000UL)) { + result = make_small(3); + } else { + result = make_small(4); + } + $Dst = result; +} + +i_bs_put_utf8(Fail, Src) { + if (!erts_bs_put_utf8(ERL_BITS_ARGS_1($Src))) { + $BADARG($Fail); + } +} + +i_bs_utf16_size(Src, Dst) { + Eterm arg = $Src; + Eterm result = make_small(2); + + /* + * Calculate the number of bytes needed to encode the source + * operarand to UTF-16. If the source operand is invalid (e.g. wrong + * type or range) we return a nonsense integer result (2 or 4). We + * can get away with that because we KNOW that bs_put_utf16 will do + * full error checking. + */ + + if (arg >= make_small(0x10000UL)) { + result = make_small(4); + } + $Dst = result; +} + +bs_put_utf16(Fail, Flags, Src) { + if (!erts_bs_put_utf16(ERL_BITS_ARGS_2($Src, $Flags))) { + $BADARG($Fail); + } +} + +// Validate a value about to be stored in a binary. +i_bs_validate_unicode(Fail, Src) { + Eterm val = $Src; + + /* + * There is no need to untag the integer, but it IS necessary + * to make sure it is small (if the term is a bignum, it could + * slip through the test, and there is no further test that + * would catch it, since bit syntax construction silently masks + * too big numbers). + */ + if (is_not_small(val) || val > make_small(0x10FFFFUL) || + (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) { + $BADARG($Fail); + } +} + +// Validate a value that has been matched out. +i_bs_validate_unicode_retract(Fail, Src, Ms) { + /* + * There is no need to untag the integer, but it IS necessary + * to make sure it is small (a bignum pointer could fall in + * the valid range). + */ + + Eterm i = $Src; + if (is_not_small(i) || i > make_small(0x10FFFFUL) || + (make_small(0xD800UL) <= i && i <= make_small(0xDFFFUL))) { + Eterm ms = $Ms; /* Match context */ + ErlBinMatchBuffer* mb; + + /* Invalid value. Retract the position in the binary. */ + mb = ms_matchbuffer(ms); + mb->offset -= 32; + $BADARG($Fail); + } +} + + +// +// Matching of binaries. +// + +i_bs_start_match2 := bs_start_match.fetch.execute; + +bs_start_match.head() { + Uint slots; + Uint live; + Eterm header; + Eterm context; +} + +bs_start_match.fetch(Src) { + context = $Src; +} + +bs_start_match.execute(Fail, Live, Slots, Dst) { + if (!is_boxed(context)) { + $FAIL($Fail); + } + header = *boxed_val(context); + slots = $Slots; + live = $Live; + if (header_is_bin_matchstate(header)) { + ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context); + Uint actual_slots = HEADER_NUM_SLOTS(header); + ms->save_offset[0] = ms->mb.offset; + if (actual_slots < slots) { + ErlBinMatchState* dst; + Uint live = $Live; + Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); + + $GC_TEST_PRESERVE(wordsneeded, live, context); + ms = (ErlBinMatchState *) boxed_val(context); + dst = (ErlBinMatchState *) HTOP; + *dst = *ms; + *HTOP = HEADER_BIN_MATCHSTATE(slots); + HTOP += wordsneeded; + HEAP_SPACE_VERIFIED(0); + $Dst = make_matchstate(dst); + } + } else if (is_binary_header(header)) { + Eterm result; + Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); + $GC_TEST_PRESERVE(wordsneeded, live, context); + HEAP_TOP(c_p) = HTOP; +#ifdef DEBUG + c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */ +#endif + result = erts_bs_start_match_2(c_p, context, slots); + HTOP = HEAP_TOP(c_p); + HEAP_SPACE_VERIFIED(0); + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; + } else { + $FAIL($Fail); + } +} + +bs_test_zero_tail2(Fail, Ctx) { + ErlBinMatchBuffer *_mb; + _mb = (ErlBinMatchBuffer*) ms_matchbuffer($Ctx); + if (_mb->size != _mb->offset) { + $FAIL($Fail); + } +} + +bs_test_tail_imm2(Fail, Ctx, Offset) { + ErlBinMatchBuffer *_mb; + _mb = ms_matchbuffer($Ctx); + if (_mb->size - _mb->offset != $Offset) { + $FAIL($Fail); + } +} + +bs_test_unit(Fail, Ctx, Unit) { + ErlBinMatchBuffer *_mb; + _mb = ms_matchbuffer($Ctx); + if ((_mb->size - _mb->offset) % $Unit) { + $FAIL($Fail); + } +} + +bs_test_unit8(Fail, Ctx) { + ErlBinMatchBuffer *_mb; + _mb = ms_matchbuffer($Ctx); + if ((_mb->size - _mb->offset) & 7) { + $FAIL($Fail); + } +} + +i_bs_get_integer_8(Ctx, Fail, Dst) { + Eterm _result; + ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx); + + if (_mb->size - _mb->offset < 8) { + $FAIL($Fail); + } + if (BIT_OFFSET(_mb->offset) != 0) { + _result = erts_bs_get_integer_2(c_p, 8, 0, _mb); + } else { + _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]); + _mb->offset += 8; + } + $Dst = _result; +} + +i_bs_get_integer_16(Ctx, Fail, Dst) { + Eterm _result; + ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx); + + if (_mb->size - _mb->offset < 16) { + $FAIL($Fail); + } + if (BIT_OFFSET(_mb->offset) != 0) { + _result = erts_bs_get_integer_2(c_p, 16, 0, _mb); + } else { + _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset))); + _mb->offset += 16; + } + $Dst = _result; +} + +%if ARCH_64 +i_bs_get_integer_32(Ctx, Fail, Dst) { + Uint32 _integer; + ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx); + + if (_mb->size - _mb->offset < 32) { + $FAIL($Fail); + } + if (BIT_OFFSET(_mb->offset) != 0) { + _integer = erts_bs_get_unaligned_uint32(_mb); + } else { + _integer = get_int32(_mb->base + _mb->offset/8); + } + _mb->offset += 32; + $Dst = make_small(_integer); +} +%endif + +i_bs_get_integer_imm := bs_get_integer.fetch.execute; +i_bs_get_integer_small_imm := bs_get_integer.fetch_small.execute; + +bs_get_integer.head() { + Eterm Ms, Sz; +} + +bs_get_integer.fetch(Ctx, Size, Live) { + Uint wordsneeded; + Ms = $Ctx; + Sz = $Size; + wordsneeded = 1+WSIZE(NBYTES(Sz)); + $GC_TEST_PRESERVE(wordsneeded, $Live, Ms); +} + +bs_get_integer.fetch_small(Ctx, Size) { + Ms = $Ctx; + Sz = $Size; +} + +bs_get_integer.execute(Fail, Flags, Dst) { + ErlBinMatchBuffer* mb; + Eterm result; + + mb = ms_matchbuffer(Ms); + LIGHT_SWAPOUT; + result = erts_bs_get_integer_2(c_p, Sz, $Flags, mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; +} + +i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) { + Uint flags; + Uint size; + Eterm ms; + ErlBinMatchBuffer* mb; + Eterm result; + + flags = $FlagsAndUnit; + ms = $Ms; + $BS_GET_FIELD_SIZE($Sz, (flags >> 3), $FAIL($Fail), size); + if (size >= SMALL_BITS) { + Uint wordsneeded; + /* Check bits size before potential gc. + * We do not want a gc and then realize we don't need + * the allocated space (i.e. if the op fails). + * + * Remember to re-acquire the matchbuffer after gc. + */ + + mb = ms_matchbuffer(ms); + if (mb->size - mb->offset < size) { + $FAIL($Fail); + } + wordsneeded = 1+WSIZE(NBYTES((Uint) size)); + $GC_TEST_PRESERVE(wordsneeded, $Live, ms); + } + mb = ms_matchbuffer(ms); + LIGHT_SWAPOUT; + result = erts_bs_get_integer_2(c_p, size, flags, mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; +} + +i_bs_get_utf8(Ctx, Fail, Dst) { + ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx); + Eterm result = erts_bs_get_utf8(mb); + + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; +} + +i_bs_get_utf16(Ctx, Fail, Flags, Dst) { + ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx); + Eterm result = erts_bs_get_utf16(mb, $Flags); + + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; +} + +bs_context_to_binary := ctx_to_bin.fetch.execute; +i_bs_get_binary_all_reuse := ctx_to_bin.fetch_bin.execute; + +ctx_to_bin.head() { + Eterm context; + ErlBinMatchBuffer* mb; + ErlSubBin* sb; + Uint size; + Uint offs; + Uint orig; + Uint hole_size; +} + +ctx_to_bin.fetch(Src) { + context = $Src; + if (is_boxed(context) && + header_is_bin_matchstate(*boxed_val(context))) { + ErlBinMatchState* ms; + ms = (ErlBinMatchState *) boxed_val(context); + mb = &ms->mb; + offs = ms->save_offset[0]; + size = mb->size - offs; + } else { + $NEXT0(); + } +} + +ctx_to_bin.fetch_bin(Src, Fail, Unit) { + context = $Src; + mb = ms_matchbuffer(context); + size = mb->size - mb->offset; + if (size % $Unit != 0) { + $FAIL($Fail); + } + offs = mb->offset; +} + +ctx_to_bin.execute() { + orig = mb->orig; + sb = (ErlSubBin *) boxed_val(context); + hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE; + sb->thing_word = HEADER_SUB_BIN; + sb->size = BYTE_OFFSET(size); + sb->bitsize = BIT_OFFSET(size); + sb->offs = BYTE_OFFSET(offs); + sb->bitoffs = BIT_OFFSET(offs); + sb->is_writable = 0; + sb->orig = orig; + if (hole_size) { + sb[1].thing_word = make_pos_bignum_header(hole_size-1); + } +} + +i_bs_match_string(Ctx, Fail, Bits, Ptr) { + byte* bytes = (byte *) $Ptr; + Uint bits = $Bits; + ErlBinMatchBuffer* mb; + Uint offs; + + mb = ms_matchbuffer($Ctx); + if (mb->size - mb->offset < bits) { + $FAIL($Fail); + } + offs = mb->offset & 7; + if (offs == 0 && (bits & 7) == 0) { + if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) { + $FAIL($Fail); + } + } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) { + $FAIL($Fail); + } + mb->offset += bits; +} + +i_bs_save2(Src, Slot) { + ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src); + _ms->save_offset[$Slot] = _ms->mb.offset; +} + +i_bs_restore2(Src, Slot) { + ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src); + _ms->mb.offset = _ms->save_offset[$Slot]; +} diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c index 8a3d1b20b4..34e46f5f33 100644 --- a/erts/emulator/beam/code_ix.c +++ b/erts/emulator/beam/code_ix.c @@ -34,8 +34,8 @@ # define CIX_TRACE(text) #endif -erts_smp_atomic32_t the_active_code_index; -erts_smp_atomic32_t the_staging_code_index; +erts_atomic32_t the_active_code_index; +erts_atomic32_t the_staging_code_index; static Process* code_writing_process = NULL; struct code_write_queue_item { @@ -43,7 +43,7 @@ struct code_write_queue_item { struct code_write_queue_item* next; }; static struct code_write_queue_item* code_write_queue = NULL; -static erts_smp_mtx_t code_write_permission_mtx; +static erts_mtx_t code_write_permission_mtx; #ifdef ERTS_ENABLE_LOCK_CHECK static erts_tsd_key_t has_code_write_permission; @@ -55,9 +55,9 @@ void erts_code_ix_init(void) * single threaded with active and staging set both to zero. * Preloading is finished by a commit that will set things straight. */ - erts_smp_atomic32_init_nob(&the_active_code_index, 0); - erts_smp_atomic32_init_nob(&the_staging_code_index, 0); - erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL, + erts_atomic32_init_nob(&the_active_code_index, 0); + erts_atomic32_init_nob(&the_staging_code_index, 0); + erts_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_key_create(&has_code_write_permission, @@ -91,9 +91,9 @@ void erts_commit_staging_code_ix(void) /* We need to this lock as we are now making the staging export table active */ export_staging_lock(); ix = erts_staging_code_ix(); - erts_smp_atomic32_set_nob(&the_active_code_index, ix); + erts_atomic32_set_nob(&the_active_code_index, ix); ix = (ix + 1) % ERTS_NUM_CODE_IX; - erts_smp_atomic32_set_nob(&the_staging_code_index, ix); + erts_atomic32_set_nob(&the_staging_code_index, ix); export_staging_unlock(); erts_tracer_nif_clear(); CIX_TRACE("activate"); @@ -115,12 +115,10 @@ void erts_abort_staging_code_ix(void) int erts_try_seize_code_write_permission(Process* c_p) { int success; -#ifdef ERTS_SMP - ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ -#endif + ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */ ASSERT(c_p != NULL); - erts_smp_mtx_lock(&code_write_permission_mtx); + erts_mtx_lock(&code_write_permission_mtx); success = (code_writing_process == NULL); if (success) { code_writing_process = c_p; @@ -138,21 +136,21 @@ int erts_try_seize_code_write_permission(Process* c_p) code_write_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } - erts_smp_mtx_unlock(&code_write_permission_mtx); + erts_mtx_unlock(&code_write_permission_mtx); return success; } void erts_release_code_write_permission(void) { - erts_smp_mtx_lock(&code_write_permission_mtx); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + erts_mtx_lock(&code_write_permission_mtx); + ERTS_LC_ASSERT(erts_has_code_write_permission()); while (code_write_queue != NULL) { /* unleash the entire herd */ struct code_write_queue_item* qitem = code_write_queue; - erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(qitem->p)) { erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); code_write_queue = qitem->next; erts_proc_dec_refc(qitem->p); erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem); @@ -161,7 +159,7 @@ void erts_release_code_write_permission(void) #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 0); #endif - erts_smp_mtx_unlock(&code_write_permission_mtx); + erts_mtx_unlock(&code_write_permission_mtx); } #ifdef ERTS_ENABLE_LOCK_CHECK diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h index a28b0cd36e..9e3280cd98 100644 --- a/erts/emulator/beam/code_ix.h +++ b/erts/emulator/beam/code_ix.h @@ -205,16 +205,16 @@ ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I) return mfa; } -extern erts_smp_atomic32_t the_active_code_index; -extern erts_smp_atomic32_t the_staging_code_index; +extern erts_atomic32_t the_active_code_index; +extern erts_atomic32_t the_staging_code_index; ERTS_GLB_INLINE ErtsCodeIndex erts_active_code_ix(void) { - return erts_smp_atomic32_read_nob(&the_active_code_index); + return erts_atomic32_read_nob(&the_active_code_index); } ERTS_GLB_INLINE ErtsCodeIndex erts_staging_code_ix(void) { - return erts_smp_atomic32_read_nob(&the_staging_code_index); + return erts_atomic32_read_nob(&the_staging_code_index); } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index fefde256d7..10bf197405 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -845,7 +845,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint funp = (ErlFunThing *) tp; funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*) funp; - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); *argp = make_fun(tp); } break; @@ -854,7 +854,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint case EXTERNAL_REF_SUBTAG: { ExternalThing *etp = (ExternalThing *) objp; - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } L_off_heap_node_container_common: { @@ -1531,7 +1531,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, } funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*) funp; - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); goto cleanup_next; } case MAP_SUBTAG: @@ -1658,7 +1658,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, case EXTERNAL_REF_SUBTAG: { ExternalThing *etp = (ExternalThing *) ptr; - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } off_heap_node_container_common: { @@ -1855,7 +1855,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) case FUN_SUBTAG: { ErlFunThing* funp = (ErlFunThing *) (tp-1); - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); } goto off_heap_common; case EXTERNAL_PID_SUBTAG: @@ -1863,7 +1863,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) case EXTERNAL_REF_SUBTAG: { ExternalThing* etp = (ExternalThing *) (tp-1); - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } off_heap_common: { diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 09fdb897f5..bc168fc58d 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -121,7 +121,7 @@ Export* dexit_trap = NULL; Export* dmonitor_p_trap = NULL; /* local variables */ - +static Export *dist_ctrl_put_data_trap; /* forward declarations */ @@ -130,8 +130,8 @@ static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy); static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm); static void init_nodes_monitors(void); -static erts_smp_atomic_t no_caches; -static erts_smp_atomic_t no_nodes; +static erts_atomic_t no_caches; +static erts_atomic_t no_nodes; struct { Eterm reason; @@ -144,8 +144,8 @@ delete_cache(ErtsAtomCache *cache) { if (cache) { erts_free(ERTS_ALC_T_DCACHE, (void *) cache); - ASSERT(erts_smp_atomic_read_nob(&no_caches) > 0); - erts_smp_atomic_dec_nob(&no_caches); + ASSERT(erts_atomic_read_nob(&no_caches) > 0); + erts_atomic_dec_nob(&no_caches); } } @@ -156,14 +156,12 @@ create_cache(DistEntry *dep) int i; ErtsAtomCache *cp; - ERTS_SMP_LC_ASSERT( - is_internal_port(dep->cid) - && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); + ERTS_LC_ASSERT(is_nil(dep->cid)); ASSERT(!dep->cache); dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE, sizeof(ErtsAtomCache)); - erts_smp_atomic_inc_nob(&no_caches); + erts_atomic_inc_nob(&no_caches); for (i = 0; i < sizeof(cp->in_arr)/sizeof(cp->in_arr[0]); i++) { cp->in_arr[i] = THE_NON_VALUE; cp->out_arr[i] = THE_NON_VALUE; @@ -172,15 +170,17 @@ create_cache(DistEntry *dep) Uint erts_dist_cache_size(void) { - return (Uint) erts_smp_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache); + return (Uint) erts_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache); } static ErtsProcList * -get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs) +get_suspended_on_de(DistEntry *dep, erts_aint32_t unset_qflgs) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&dep->qlock)); - dep->qflgs &= ~unset_qflgs; - if (dep->qflgs & ERTS_DE_QFLG_EXIT) { + erts_aint32_t qflgs; + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&dep->qlock)); + qflgs = erts_atomic32_read_band_acqb(&dep->qflgs, ~unset_qflgs); + qflgs &= ~unset_qflgs; + if (qflgs & ERTS_DE_QFLG_EXIT) { /* No resume when exit has been scheduled */ return NULL; } @@ -283,17 +283,15 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp) watched = (is_atom(rmon->name) ? TUPLE2(lhp, rmon->name, dep->sysname) : rmon->u.pid); -#ifdef ERTS_SMP rp_locks |= ERTS_PROC_LOCKS_MSG_SEND; - erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND); -#endif + erts_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND); erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process, watched, am_noconnection); erts_destroy_monitor(rmon); } UnUseTmpHeapNoproc(3); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); done: erts_destroy_monitor(mon); } @@ -342,7 +340,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp) trace_proc(NULL, 0, rp, am_getting_unlinked, sublnk->pid); } } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } done: erts_destroy_link(sublnk); @@ -384,7 +382,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) rp = erts_proc_lookup(lnk->pid); if (!rp) goto done; - erts_smp_proc_lock(rp, rp_locks); + erts_proc_lock(rp, rp_locks); rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name); if (rlnk != NULL) { ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE)); @@ -401,7 +399,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) tup = TUPLE2(hp, am_nodedown, name); erts_queue_message(rp, rp_locks, msgp, tup, am_system); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } done: erts_destroy_link(lnk); @@ -413,16 +411,16 @@ set_node_not_alive(void *unused) ErlHeapFragment *bp; Eterm nodename = erts_this_dist_entry->sysname; - ASSERT(erts_smp_atomic_read_nob(&no_nodes) == 0); + ASSERT(erts_atomic_read_nob(&no_nodes) == 0); - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_set_this_node(am_Noname, 0); erts_is_alive = 0; send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nodedown.reason); nodedown.reason = NIL; bp = nodedown.bp; nodedown.bp = NULL; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); if (bp) free_message_buffer(bp); } @@ -430,7 +428,7 @@ set_node_not_alive(void *unused) static ERTS_INLINE void dec_no_nodes(void) { - erts_aint_t no = erts_smp_atomic_dec_read_mb(&no_nodes); + erts_aint_t no = erts_atomic_dec_read_mb(&no_nodes); ASSERT(no >= 0); ASSERT(erts_get_scheduler_id()); /* Need to be a scheduler */ if (no == 0) @@ -443,12 +441,40 @@ static ERTS_INLINE void inc_no_nodes(void) { #ifdef DEBUG - erts_aint_t no = erts_smp_atomic_read_nob(&no_nodes); + erts_aint_t no = erts_atomic_read_nob(&no_nodes); ASSERT(erts_is_alive ? no > 0 : no == 0); #endif - erts_smp_atomic_inc_mb(&no_nodes); + erts_atomic_inc_mb(&no_nodes); } - + +static void +kill_dist_ctrl_proc(void *vpid) +{ + Eterm pid = (Eterm) vpid; + ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; + Process *rp = erts_pid2proc(NULL, 0, pid, rp_locks); + if (rp) { + erts_send_exit_signal(NULL, rp->common.id, rp, &rp_locks, + am_kill, NIL, NULL, 0); + if (rp_locks) + erts_proc_unlock(rp, rp_locks); + } +} + +static void +schedule_kill_dist_ctrl_proc(Eterm pid) +{ + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + int sched_id = 1; + if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp)) + sched_id = 1; + else + sched_id = (int) esdp->no; + erts_schedule_misc_aux_work(sched_id, + kill_dist_ctrl_proc, + (void *) (UWord) pid); +} + /* * proc is currently running or exiting process. */ @@ -458,58 +484,62 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */ DistEntry *tdep; - int no_dist_port = 0; + int no_dist_ctrl = 0; Eterm nd_reason = (reason == am_no_network ? am_no_network : am_net_kernel_terminated); - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) - no_dist_port++; + no_dist_ctrl++; for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) - no_dist_port++; + no_dist_ctrl++; /* KILL all port controllers */ - if (no_dist_port == 0) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + if (no_dist_ctrl == 0) + erts_rwmtx_runlock(&erts_dist_table_rwmtx); else { Eterm def_buf[128]; int i = 0; - Eterm *dist_port; + Eterm *dist_ctrl; - if (no_dist_port <= sizeof(def_buf)/sizeof(def_buf[0])) - dist_port = &def_buf[0]; + if (no_dist_ctrl <= sizeof(def_buf)/sizeof(def_buf[0])) + dist_ctrl = &def_buf[0]; else - dist_port = erts_alloc(ERTS_ALC_T_TMP, - sizeof(Eterm)*no_dist_port); + dist_ctrl = erts_alloc(ERTS_ALC_T_TMP, + sizeof(Eterm)*no_dist_ctrl); for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) { - ASSERT(is_internal_port(tdep->cid)); - dist_port[i++] = tdep->cid; + ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid)); + dist_ctrl[i++] = tdep->cid; } for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) { - ASSERT(is_internal_port(tdep->cid)); - dist_port[i++] = tdep->cid; + ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid)); + dist_ctrl[i++] = tdep->cid; } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); - - for (i = 0; i < no_dist_port; i++) { - Port *prt = erts_port_lookup(dist_port[i], - ERTS_PORT_SFLGS_INVALID_LOOKUP); - if (!prt) - continue; - ASSERT(erts_atomic32_read_nob(&prt->state) - & ERTS_PORT_SFLG_DISTRIBUTION); - - erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED, - prt, dist_port[i], nd_reason, NULL); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); + + for (i = 0; i < no_dist_ctrl; i++) { + if (is_internal_pid(dist_ctrl[i])) + schedule_kill_dist_ctrl_proc(dist_ctrl[i]); + else { + Port *prt = erts_port_lookup(dist_ctrl[i], + ERTS_PORT_SFLGS_INVALID_LOOKUP); + if (prt) { + ASSERT(erts_atomic32_read_nob(&prt->state) + & ERTS_PORT_SFLG_DISTRIBUTION); + + erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED, + prt, dist_ctrl[i], nd_reason, NULL); + } + } } - if (dist_port != &def_buf[0]) - erts_free(ERTS_ALC_T_TMP, dist_port); + if (dist_ctrl != &def_buf[0]) + erts_free(ERTS_ALC_T_TMP, dist_ctrl); } /* - * When last dist port exits, node will be taken + * When last dist ctrl exits, node will be taken * from alive to not alive. */ ASSERT(is_nil(nodedown.reason) && !nodedown.bp); @@ -526,52 +556,51 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) &nodedown.bp->off_heap); } } - else { /* Call from distribution port */ + else { /* Call from distribution controller (port/process) */ NetExitsContext nec = {dep}; ErtsLink *nlinks; ErtsLink *node_links; ErtsMonitor *monitors; Uint32 flags; - erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 1); - erts_smp_de_rwlock(dep); + erts_atomic_set_mb(&dep->dist_cmd_scheduled, 1); + erts_de_rwlock(dep); - ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid) - && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); + if (is_internal_port(dep->cid)) { + ERTS_LC_ASSERT(erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); - if (erts_port_task_is_scheduled(&dep->dist_cmd)) - erts_port_task_abort(&dep->dist_cmd); + if (erts_port_task_is_scheduled(&dep->dist_cmd)) + erts_port_task_abort(&dep->dist_cmd); + } if (dep->status & ERTS_DE_SFLG_EXITING) { #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qflgs & ERTS_DE_QFLG_EXIT); - erts_smp_mtx_unlock(&dep->qlock); + ASSERT(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT); #endif } else { dep->status |= ERTS_DE_SFLG_EXITING; - erts_smp_mtx_lock(&dep->qlock); - ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT)); - dep->qflgs |= ERTS_DE_QFLG_EXIT; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT)); + erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_EXIT); + erts_mtx_unlock(&dep->qlock); } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); monitors = dep->monitors; nlinks = dep->nlinks; node_links = dep->node_links; dep->monitors = NULL; dep->nlinks = NULL; dep->node_links = NULL; - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); nodename = dep->sysname; flags = dep->flags; erts_set_dist_entry_not_connected(dep); - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_sweep_monitors(monitors, &doit_monitor_net_exits, (void *) &nec); erts_sweep_links(nlinks, &doit_link_net_exits, (void *) &nec); @@ -605,8 +634,8 @@ void init_dist(void) nodedown.reason = NIL; nodedown.bp = NULL; - erts_smp_atomic_init_nob(&no_nodes, 0); - erts_smp_atomic_init_nob(&no_caches, 0); + erts_atomic_init_nob(&no_nodes, 0); + erts_atomic_init_nob(&no_caches, 0); /* Lookup/Install all references to trap functions */ dsend2_trap = trap_function(am_dsend,2); @@ -618,6 +647,9 @@ void init_dist(void) dgroup_leader_trap = trap_function(am_dgroup_leader,2); dexit_trap = trap_function(am_dexit, 2); dmonitor_p_trap = trap_function(am_dmonitor_p, 2); + dist_ctrl_put_data_trap = erts_export_put(am_erts_internal, + am_dist_ctrl_put_data, + 2); } #define ErtsDistOutputBuf2Binary(OB) \ @@ -659,19 +691,24 @@ static void clear_dist_entry(DistEntry *dep) ErtsProcList *suspendees; ErtsDistOutputBuf *obuf; - erts_smp_de_rwlock(dep); + erts_de_rwlock(dep); + erts_atomic_set_nob(&dep->input_handler, + (erts_aint_t) NIL); cache = dep->cache; dep->cache = NULL; #ifdef DEBUG - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); ASSERT(!dep->nlinks); ASSERT(!dep->node_links); ASSERT(!dep->monitors); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); #endif - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + + erts_atomic64_set_nob(&dep->in, 0); + erts_atomic64_set_nob(&dep->out, 0); if (!dep->out_queue.last) obuf = dep->finalized_out_queue.first; @@ -680,17 +717,24 @@ static void clear_dist_entry(DistEntry *dep) obuf = dep->out_queue.first; } + if (dep->tmp_out_queue.first) { + dep->tmp_out_queue.last->next = obuf; + obuf = dep->tmp_out_queue.first; + } + dep->out_queue.first = NULL; dep->out_queue.last = NULL; + dep->tmp_out_queue.first = NULL; + dep->tmp_out_queue.last = NULL; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; dep->status = 0; suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL); - erts_smp_mtx_unlock(&dep->qlock); - erts_smp_atomic_set_nob(&dep->dist_cmd_scheduled, 0); + erts_mtx_unlock(&dep->qlock); + erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0); dep->send = NULL; - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_resume_processes(suspendees); @@ -705,10 +749,11 @@ static void clear_dist_entry(DistEntry *dep) } if (obufsize) { - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize >= obufsize); - dep->qsize -= obufsize; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + ASSERT(erts_atomic_read_nob(&dep->qsize) >= obufsize); + erts_atomic_add_nob(&dep->qsize, + (erts_aint_t) -obufsize); + erts_mtx_unlock(&dep->qlock); } } @@ -813,9 +858,9 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched, watched, watcher, ref, reason); #ifdef DEBUG - erts_smp_de_links_lock(dsdp->dep); + erts_de_links_lock(dsdp->dep); ASSERT(!erts_lookup_monitor(dsdp->dep->monitors, ref)); - erts_smp_de_links_unlock(dsdp->dep); + erts_de_links_unlock(dsdp->dep); #endif res = dsig_send_ctl(dsdp, ctl, 1); @@ -906,11 +951,30 @@ erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx) } #endif - if (token != NIL) - ctl = TUPLE4(&ctx->ctl_heap[0], - make_small(DOP_SEND_TT), am_Empty, remote, token); - else - ctl = TUPLE3(&ctx->ctl_heap[0], make_small(DOP_SEND), am_Empty, remote); + if (token != NIL) { + Eterm el1, el2; + if (ctx->dep->flags & DFLAG_SEND_SENDER) { + el1 = make_small(DOP_SEND_SENDER_TT); + el2 = sender->common.id; + } + else { + el1 = make_small(DOP_SEND_TT); + el2 = am_Empty; + } + ctl = TUPLE4(&ctx->ctl_heap[0], el1, el2, remote, token); + } + else { + Eterm el1, el2; + if (ctx->dep->flags & DFLAG_SEND_SENDER) { + el1 = make_small(DOP_SEND_SENDER); + el2 = sender->common.id; + } + else { + el1 = make_small(DOP_SEND); + el2 = am_Empty; + } + ctl = TUPLE3(&ctx->ctl_heap[0], el1, el2, remote); + } DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); DTRACE7(message_send_remote, sender_name, node_name, receiver_name, @@ -1147,22 +1211,25 @@ int erts_net_message(Port *prt, ErtsLink *lnk; Uint tuple_arity; int res; + Uint32 connection_id; #ifdef ERTS_DIST_MSG_DBG ErlDrvSizeT orig_len = len; #endif UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); if (!erts_is_alive) { UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); return 0; } - if (hlen != 0) - goto data_error; + + + ASSERT(hlen == 0); + if (len == 0) { /* HANDLE TICK !!! */ UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); return 0; @@ -1181,30 +1248,31 @@ int erts_net_message(Port *prt, len--; } - if (len == 0) { - PURIFY_MSG("data error"); - goto data_error; - } + res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache, &connection_id); - res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache); - - if (res >= 0) - res = ctl_len = erts_decode_dist_ext_size(&ede); - else { + switch (res) { + case ERTS_PREP_DIST_EXT_CLOSED: + return 0; /* Connection not alive; ignore signal... */ + case ERTS_PREP_DIST_EXT_FAILED: #ifdef ERTS_DIST_MSG_DBG erts_fprintf(stderr, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n"); bw(buf, orig_len); #endif - ctl_len = 0; - } - - if (res < 0) { + goto data_error; + case ERTS_PREP_DIST_EXT_SUCCESS: + ctl_len = erts_decode_dist_ext_size(&ede); + if (ctl_len < 0) { #ifdef ERTS_DIST_MSG_DBG - erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n"); - bw(buf, orig_len); + erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n"); + bw(buf, orig_len); #endif - PURIFY_MSG("data error"); - goto data_error; + PURIFY_MSG("data error"); + goto data_error; + } + break; + default: + ERTS_INTERNAL_ERROR("Unexpected result from erts_prepare_dist_ext()"); + break; } if (ctl_len > DIST_CTL_DEFAULT_SIZE) { @@ -1235,6 +1303,7 @@ int erts_net_message(Port *prt, } token_size = 0; + token = NIL; switch (type = unsigned_val(tuple[1])) { case DOP_LINK: @@ -1263,23 +1332,23 @@ int erts_net_message(Port *prt, break; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); res = erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, from); if (res < 0) { /* It was already there! Lets skip the rest... */ - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); break; } lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->common.id); erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (IS_TRACED_FL(rp, F_TRACE_PROCS)) trace_proc(NULL, 0, rp, am_getting_linked, from); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); break; case DOP_UNLINK: { @@ -1305,7 +1374,7 @@ int erts_net_message(Port *prt, trace_proc(NULL, 0, rp, am_getting_unlinked, from); } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); erts_remove_dist_link(&dld, to, from, dep); erts_destroy_dist_link(&dld); @@ -1357,11 +1426,11 @@ int erts_net_message(Port *prt, else { if (is_atom(watched)) watched = rp->common.id; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, watcher, name); - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } break; @@ -1383,9 +1452,9 @@ int erts_net_message(Port *prt, goto invalid_message; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); mon = erts_remove_monitor(&(dep->monitors),ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); /* ASSERT(mon != NULL); can happen in case of broken dist message */ if (mon == NULL) { break; @@ -1399,7 +1468,7 @@ int erts_net_message(Port *prt, break; } mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); ASSERT(mon != NULL); if (mon == NULL) { break; @@ -1460,42 +1529,56 @@ int erts_net_message(Port *prt, erts_queue_dist_message(rp, locks, ede_copy, token, from); if (locks) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); } break; + case DOP_SEND_SENDER_TT: { + Uint xsize; case DOP_SEND_TT: + if (tuple_arity != 4) { goto invalid_message; } - - token_size = size_object(tuple[4]); - /* Fall through ... */ + + token = tuple[4]; + token_size = size_object(token); + xsize = ERTS_HEAP_FRAG_SIZE(token_size); + goto send_common; + + case DOP_SEND_SENDER: case DOP_SEND: + + token = NIL; + xsize = 0; + if (tuple_arity != 3) + goto invalid_message; + + send_common: + /* - * There is intentionally no testing of the cookie (it is always '') - * from R9B and onwards. + * If DOP_SEND_SENDER or DOP_SEND_SENDER_TT element 2 contains + * the sender pid (i.e. DFLAG_SEND_SENDER is set); otherwise, + * the atom '' (empty cookie). */ + ASSERT((type == DOP_SEND_SENDER || type == DOP_SEND_SENDER_TT) + ? (is_pid(tuple[2]) && (dep->flags & DFLAG_SEND_SENDER)) + : tuple[2] == am_Empty); + #ifdef ERTS_DIST_MSG_DBG dist_msg_dbg(&ede, "MSG", buf, orig_len); #endif - if (type != DOP_SEND_TT && tuple_arity != 3) { - goto invalid_message; - } to = tuple[3]; if (is_not_pid(to)) { goto invalid_message; } rp = erts_proc_lookup(to); if (rp) { - Uint xsize = type == DOP_SEND ? 0 : ERTS_HEAP_FRAG_SIZE(token_size); ErtsProcLocks locks = 0; ErtsDistExternal *ede_copy; ede_copy = erts_make_dist_ext_copy(&ede, xsize); - if (type == DOP_SEND) { - token = NIL; - } else { + if (is_not_nil(token)) { ErlHeapFragment *heap_frag; ErlOffHeap *ohp; ASSERT(xsize); @@ -1503,15 +1586,15 @@ int erts_net_message(Port *prt, ERTS_INIT_HEAP_FRAG(heap_frag, token_size, token_size); hp = heap_frag->mem; ohp = &heap_frag->off_heap; - token = tuple[4]; token = copy_struct(token, token_size, &hp, ohp); } - erts_queue_dist_message(rp, locks, ede_copy, token, tuple[2]); + erts_queue_dist_message(rp, locks, ede_copy, token, am_Empty); if (locks) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); } break; + } case DOP_MONITOR_P_EXIT: { /* We are monitoring a process on the remote node which dies, we get @@ -1535,7 +1618,7 @@ int erts_net_message(Port *prt, goto invalid_message; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); sysname = dep->sysname; mon = erts_remove_monitor(&(dep->monitors), ref); /* @@ -1544,7 +1627,7 @@ int erts_net_message(Port *prt, * removed info about monitor. In this case, do nothing * and everything will be as it should. */ - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (mon == NULL) { break; } @@ -1558,7 +1641,7 @@ int erts_net_message(Port *prt, mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (mon == NULL) { - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); break; } UseTmpHeapNoproc(3); @@ -1569,7 +1652,7 @@ int erts_net_message(Port *prt, erts_queue_monitor_message(rp, &rp_locks, ref, am_process, watched, reason); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_destroy_monitor(mon); UnUseTmpHeapNoproc(3); break; @@ -1631,13 +1714,13 @@ int erts_net_message(Port *prt, if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) { /* We didn't exit the process and it is traced */ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } trace_proc(NULL, 0, rp, am_getting_unlinked, from); } } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } erts_remove_dist_link(&dld, to, from, dep); if (lnk) @@ -1679,7 +1762,7 @@ int erts_net_message(Port *prt, token, NULL, 0); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } break; } @@ -1697,7 +1780,7 @@ int erts_net_message(Port *prt, if (!rp) break; rp->group_leader = STORE_NC_IN_PROC(rp, from); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); break; default: @@ -1709,7 +1792,7 @@ int erts_net_message(Port *prt, erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl); } UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return 0; invalid_message: { @@ -1725,8 +1808,8 @@ decode_error: } data_error: UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - erts_deliver_port_exit(prt, dep->cid, am_killed, 0, 1); - ERTS_SMP_CHK_NO_PROC_LOCKS; + erts_kill_dist_connection(dep, connection_id); + ERTS_CHK_NO_PROC_LOCKS; return -1; } @@ -1746,6 +1829,31 @@ static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy) return ret; } +static ERTS_INLINE void +notify_dist_data(Process *c_p, Eterm pid) +{ + Process *rp; + ErtsProcLocks rp_locks; + + ASSERT(erts_get_scheduler_data() + && !ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); + ASSERT(is_internal_pid(pid)); + + if (c_p && c_p->common.id == pid) { + rp = c_p; + rp_locks = ERTS_PROC_LOCK_MAIN; + } + else { + rp = erts_proc_lookup(pid); + rp_locks = 0; + } + + if (rp) { + ErtsMessage *mp = erts_alloc_message(0, NULL); + erts_queue_message(rp, rp_locks, mp, am_dist_data, am_system); + } +} + int erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) { @@ -1762,7 +1870,7 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) if (!ctx->c_p || dsdp->no_suspend) ctx->force_busy = 1; - ERTS_SMP_LC_ASSERT(!ctx->c_p + ERTS_LC_ASSERT(!ctx->c_p || (ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(ctx->c_p))); @@ -1851,28 +1959,48 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) * and if so enqueue the signal and schedule it for send. */ ctx->obuf->next = NULL; - erts_smp_de_rlock(dep); + erts_de_rlock(dep); cid = dep->cid; if (cid != dsdp->cid || dep->connection_id != dsdp->connection_id || dep->status & ERTS_DE_SFLG_EXITING) { /* Not the same connection as when we started; drop message... */ - erts_smp_de_runlock(dep); + erts_de_runlock(dep); free_dist_obuf(ctx->obuf); } else { + Sint qsize; + erts_aint32_t qflgs; ErtsProcList *plp = NULL; - erts_smp_mtx_lock(&dep->qlock); - dep->qsize += size_obuf(ctx->obuf); - if (dep->qsize >= erts_dist_buf_busy_limit) - dep->qflgs |= ERTS_DE_QFLG_BUSY; - if (!ctx->force_busy && (dep->qflgs & ERTS_DE_QFLG_BUSY)) { - erts_smp_mtx_unlock(&dep->qlock); + Eterm notify_proc = NIL; + Sint obsz = size_obuf(ctx->obuf); + + erts_mtx_lock(&dep->qlock); + qsize = erts_atomic_add_read_nob(&dep->qsize, (erts_aint_t) obsz); + ASSERT(qsize >= obsz); + qflgs = erts_atomic32_read_nob(&dep->qflgs); + if (!(qflgs & ERTS_DE_QFLG_BUSY) && qsize >= erts_dist_buf_busy_limit) { + erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_BUSY); + qflgs |= ERTS_DE_QFLG_BUSY; + } + if (qsize == obsz && (qflgs & ERTS_DE_QFLG_REQ_INFO)) { + /* Previously empty queue and info requested... */ + qflgs = erts_atomic32_read_band_mb(&dep->qflgs, + ~ERTS_DE_QFLG_REQ_INFO); + if (qflgs & ERTS_DE_QFLG_REQ_INFO) { + notify_proc = dep->cid; + ASSERT(is_internal_pid(notify_proc)); + } + /* else: requester will send itself the message... */ + qflgs &= ~ERTS_DE_QFLG_REQ_INFO; + } + if (!ctx->force_busy && (qflgs & ERTS_DE_QFLG_BUSY)) { + erts_mtx_unlock(&dep->qlock); plp = erts_proclist_create(ctx->c_p); erts_suspend(ctx->c_p, ERTS_PROC_LOCK_MAIN, NULL); suspended = 1; - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); } /* Enqueue obuf on dist entry */ @@ -1883,7 +2011,8 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) dep->out_queue.last = ctx->obuf; if (!ctx->force_busy) { - if (!(dep->qflgs & ERTS_DE_QFLG_BUSY)) { + qflgs = erts_atomic32_read_nob(&dep->qflgs); + if (!(qflgs & ERTS_DE_QFLG_BUSY)) { if (suspended) resume = 1; /* was busy when we started, but isn't now */ #ifdef USE_VM_PROBES @@ -1907,9 +2036,12 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) } } - erts_smp_mtx_unlock(&dep->qlock); - erts_schedule_dist_command(NULL, dep); - erts_smp_de_runlock(dep); + erts_mtx_unlock(&dep->qlock); + if (is_internal_port(dep->cid)) + erts_schedule_dist_command(NULL, dep); + erts_de_runlock(dep); + if (is_internal_pid(notify_proc)) + notify_dist_data(ctx->c_p, notify_proc); if (resume) { erts_resume(ctx->c_p, ERTS_PROC_LOCK_MAIN); @@ -1963,16 +2095,20 @@ static Uint dist_port_command(Port *prt, ErtsDistOutputBuf *obuf) { int fpe_was_unmasked; - Uint size = obuf->ext_endp - obuf->extp; + ErlDrvSizeT size; + char *bufp; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); - if (size > (Uint) INT_MAX) - erts_exit(ERTS_DUMP_EXIT, - "Absurdly large distribution output data buffer " - "(%beu bytes) passed.\n", - size); + if (!obuf) { + size = 0; + bufp = NULL; + } + else { + size = obuf->ext_endp - obuf->extp; + bufp = (char*) obuf->extp; + } #ifdef USE_VM_PROBES if (DTRACE_ENABLED(dist_output)) { @@ -1987,11 +2123,10 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf) remote_str, size); } #endif + prt->caller = NIL; fpe_was_unmasked = erts_block_fpe(); - (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data, - (char*) obuf->extp, - (int) size); + (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data, bufp, size); erts_unblock_fpe(fpe_was_unmasked); return size; } @@ -2000,33 +2135,41 @@ static Uint dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf) { int fpe_was_unmasked; - Uint size = obuf->ext_endp - obuf->extp; + ErlDrvSizeT size; SysIOVec iov[2]; ErlDrvBinary* bv[2]; ErlIOVec eiov; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - - if (size > (Uint) INT_MAX) - erts_exit(ERTS_DUMP_EXIT, - "Absurdly large distribution output data buffer " - "(%beu bytes) passed.\n", - size); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); iov[0].iov_base = NULL; iov[0].iov_len = 0; bv[0] = NULL; - iov[1].iov_base = obuf->extp; - iov[1].iov_len = size; - bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf)); + if (!obuf) { + size = 0; + eiov.vsize = 1; + } + else { + size = obuf->ext_endp - obuf->extp; + eiov.vsize = 2; + + iov[1].iov_base = obuf->extp; + iov[1].iov_len = size; + bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf)); + } - eiov.vsize = 2; eiov.size = size; eiov.iov = iov; eiov.binv = bv; + if (size > (Uint) INT_MAX) + erts_exit(ERTS_DUMP_EXIT, + "Absurdly large distribution output data buffer " + "(%beu bytes) passed.\n", + size); + ASSERT(prt->drv_ptr->outputv); #ifdef USE_VM_PROBES @@ -2074,29 +2217,25 @@ erts_dist_command(Port *prt, int reds_limit) Sint reds = ERTS_PORT_REDS_DIST_CMD_START; Uint32 status; Uint32 flags; - Sint obufsize = 0; + Sint qsize, obufsize = 0; ErtsDistOutputQueue oq, foq; DistEntry *dep = prt->dist_entry; Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf); erts_aint32_t sched_flags; ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - - erts_smp_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be - removed if port command fails */ + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); - erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 0); + erts_atomic_set_mb(&dep->dist_cmd_scheduled, 0); - erts_smp_de_rlock(dep); + erts_de_rlock(dep); flags = dep->flags; status = dep->status; send = dep->send; - erts_smp_de_runlock(dep); + erts_de_runlock(dep); if (status & ERTS_DE_SFLG_EXITING) { erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1); - erts_deref_dist_entry(dep); return reds + ERTS_PORT_REDS_DIST_CMD_EXIT; } @@ -2110,19 +2249,19 @@ erts_dist_command(Port *prt, int reds_limit) * a mess. */ - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); oq.first = dep->out_queue.first; oq.last = dep->out_queue.last; dep->out_queue.first = NULL; dep->out_queue.last = NULL; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); foq.first = dep->finalized_out_queue.first; foq.last = dep->finalized_out_queue.last; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (reds > reds_limit) goto preempted; @@ -2130,21 +2269,21 @@ erts_dist_command(Port *prt, int reds_limit) if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && foq.first) { int preempt = 0; do { - Uint size; - ErtsDistOutputBuf *fob; - - size = (*send)(prt, foq.first); - esdp->io.out += (Uint64) size; + Uint size; + ErtsDistOutputBuf *fob; + size = (*send)(prt, foq.first); + erts_atomic64_inc_nob(&dep->out); + esdp->io.out += (Uint64) size; #ifdef ERTS_RAW_DIST_MSG_DBG - erts_fprintf(stderr, ">> "); - bw(foq.first->extp, size); + erts_fprintf(stderr, ">> "); + bw(foq.first->extp, size); #endif - reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); - fob = foq.first; - obufsize += size_obuf(fob); - foq.first = foq.first->next; - free_dist_obuf(fob); - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); + fob = foq.first; + obufsize += size_obuf(fob); + foq.first = foq.first->next; + free_dist_obuf(fob); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) break; @@ -2204,32 +2343,34 @@ erts_dist_command(Port *prt, int reds_limit) } } else { + int de_busy; int preempt = 0; while (oq.first && !preempt) { - ErtsDistOutputBuf *fob; - Uint size; - oq.first->extp - = erts_encode_ext_dist_header_finalize(oq.first->extp, - dep->cache, - flags); - reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; - if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) - *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through' - needed */ - ASSERT(&oq.first->data[0] <= oq.first->extp - && oq.first->extp < oq.first->ext_endp); - size = (*send)(prt, oq.first); - esdp->io.out += (Uint64) size; + ErtsDistOutputBuf *fob; + Uint size; + oq.first->extp + = erts_encode_ext_dist_header_finalize(oq.first->extp, + dep->cache, + flags); + reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; + if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) + *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through' + needed */ + ASSERT(&oq.first->data[0] <= oq.first->extp + && oq.first->extp < oq.first->ext_endp); + size = (*send)(prt, oq.first); + erts_atomic64_inc_nob(&dep->out); + esdp->io.out += (Uint64) size; #ifdef ERTS_RAW_DIST_MSG_DBG - erts_fprintf(stderr, ">> "); - bw(oq.first->extp, size); + erts_fprintf(stderr, ">> "); + bw(oq.first->extp, size); #endif - reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); - fob = oq.first; - obufsize += size_obuf(fob); - oq.first = oq.first->next; - free_dist_obuf(fob); - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); + fob = oq.first; + obufsize += size_obuf(fob); + oq.first = oq.first->next; + free_dist_obuf(fob); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt) goto finalize_only; @@ -2256,23 +2397,24 @@ erts_dist_command(Port *prt, int reds_limit) * dist entry in a non-busy state and resume suspended * processes. */ - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize >= obufsize); - dep->qsize -= obufsize; + erts_mtx_lock(&dep->qlock); + de_busy = !!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_BUSY); + qsize = (Sint) erts_atomic_add_read_nob(&dep->qsize, + (erts_aint_t) -obufsize); + ASSERT(qsize >= 0); obufsize = 0; if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) - && (dep->qflgs & ERTS_DE_QFLG_BUSY) - && dep->qsize < erts_dist_buf_busy_limit) { + && de_busy && qsize < erts_dist_buf_busy_limit) { ErtsProcList *suspendees; int resumed; suspendees = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); resumed = erts_resume_processes(suspendees); reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; } else - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } ASSERT(!oq.first && !oq.last); @@ -2281,10 +2423,15 @@ erts_dist_command(Port *prt, int reds_limit) if (obufsize != 0) { ASSERT(obufsize > 0); - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize >= obufsize); - dep->qsize -= obufsize; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); +#ifdef DEBUG + qsize = (Sint) erts_atomic_add_read_nob(&dep->qsize, + (erts_aint_t) -obufsize); + ASSERT(qsize >= 0); +#else + erts_atomic_add_nob(&dep->qsize, (erts_aint_t) -obufsize); +#endif + erts_mtx_unlock(&dep->qlock); } ASSERT(foq.first || !foq.last); @@ -2301,8 +2448,6 @@ erts_dist_command(Port *prt, int reds_limit) if (reds > INT_MAX/2) reds = INT_MAX/2; - erts_deref_dist_entry(dep); - return reds; preempted: @@ -2338,9 +2483,9 @@ erts_dist_command(Port *prt, int reds_limit) foq.last = NULL; #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize == obufsize); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + ASSERT(erts_atomic_read_nob(&dep->qsize) == obufsize); + erts_mtx_unlock(&dep->qlock); #endif } else { @@ -2349,14 +2494,14 @@ erts_dist_command(Port *prt, int reds_limit) * Unhandle buffers need to be put back first * in out_queue. */ - erts_smp_mtx_lock(&dep->qlock); - dep->qsize -= obufsize; + erts_mtx_lock(&dep->qlock); + erts_atomic_add_nob(&dep->qsize, -obufsize); obufsize = 0; oq.last->next = dep->out_queue.first; dep->out_queue.first = oq.first; if (!dep->out_queue.last) dep->out_queue.last = oq.last; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } erts_schedule_dist_command(prt, NULL); @@ -2364,6 +2509,370 @@ erts_dist_command(Port *prt, int reds_limit) goto done; } +#if 0 + +int +dist_data_finalize(Process *c_p, int reds_limit) +{ + int reds = 5; + DistEntry *dep = ; + ErtsDistOutputQueue oq, foq; + ErtsDistOutputBuf *ob; + int preempt; + + + erts_mtx_lock(&dep->qlock); + flags = dep->flags; + oq.first = dep->out_queue.first; + oq.last = dep->out_queue.last; + dep->out_queue.first = NULL; + dep->out_queue.last = NULL; + erts_mtx_unlock(&dep->qlock); + + if (!oq.first) { + ASSERT(!oq.last); + oq.first = dep->tmp_out_queue.first; + oq.last = dep->tmp_out_queue.last; + } + else { + ErtsDistOutputBuf *f, *l; + ASSERT(oq.last); + if (dep->tmp_out_queue.last) { + dep->tmp_out_queue.last->next = oq.first; + oq.first = dep->tmp_out_queue.first; + } + } + + if (!oq.first) { + /* Nothing to do... */ + ASSERT(!oq.last); + return reds; + } + + foq.first = dep->finalized_out_queue.first; + foq.last = dep->finalized_out_queue.last; + + preempt = 0; + ob = oq.first; + ASSERT(ob); + + do { + ob->extp = erts_encode_ext_dist_header_finalize(ob->extp, + dep->cache, + flags); + if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) + *--ob->extp = PASS_THROUGH; /* Old node; 'pass through' + needed */ + ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp); + reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; + preempt = reds > reds_limit; + if (preempt) + break; + ob = ob->next; + } while (ob); + /* + * At least one buffer was finalized; if we got preempted, + * ob points to the last buffer that we finalized. + */ + if (foq.last) + foq.last->next = oq.first; + else + foq.first = oq.first; + if (!preempt) { + /* All buffers finalized */ + foq.last = oq.last; + oq.first = oq.last = NULL; + } + else { + /* Not all buffers finalized; split oq. */ + foq.last = ob; + oq.first = ob->next; + if (oq.first) + ob->next = NULL; + else + oq.last = NULL; + } + + dep->finalized_out_queue.first = foq.first; + dep->finalized_out_queue.last = foq.last; + dep->tmp_out_queue.first = oq.first; + dep->tmp_out_queue.last = oq.last; + + return reds; +} + +#endif + +BIF_RETTYPE +dist_ctrl_get_data_notification_1(BIF_ALIST_1) +{ + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P); + erts_aint32_t qflgs; + erts_aint_t qsize; + Eterm receiver = NIL; + + if (!dep) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep) + BIF_ERROR(BIF_P, BADARG); + + /* + * Caller is the only one that can consume from this queue + * and the only one that can set the req-info flag... + */ + + erts_de_rlock(dep); + + ASSERT(dep->cid == BIF_P->common.id); + + qflgs = erts_atomic32_read_acqb(&dep->qflgs); + + if (!(qflgs & ERTS_DE_QFLG_REQ_INFO)) { + qsize = erts_atomic_read_acqb(&dep->qsize); + ASSERT(qsize >= 0); + if (qsize > 0) + receiver = BIF_P->common.id; /* Notify ourselves... */ + else { /* Empty queue; set req-info flag... */ + qflgs = erts_atomic32_read_bor_mb(&dep->qflgs, + ERTS_DE_QFLG_REQ_INFO); + qsize = erts_atomic_read_acqb(&dep->qsize); + ASSERT(qsize >= 0); + if (qsize > 0) { + qflgs = erts_atomic32_read_band_mb(&dep->qflgs, + ~ERTS_DE_QFLG_REQ_INFO); + if (qflgs & ERTS_DE_QFLG_REQ_INFO) + receiver = BIF_P->common.id; /* Notify ourselves... */ + /* else: someone else will notify us... */ + } + /* else: still empty queue... */ + } + } + /* else: Already requested... */ + + erts_de_runlock(dep); + + if (is_internal_pid(receiver)) + notify_dist_data(BIF_P, receiver); + + BIF_RET(am_ok); +} + +BIF_RETTYPE +dist_ctrl_put_data_2(BIF_ALIST_2) +{ + DistEntry *dep; + ErlDrvSizeT size; + Eterm input_handler; + + if (is_binary(BIF_ARG_2)) + size = binary_size(BIF_ARG_2); + else if (is_nil(BIF_ARG_2)) + size = 0; + else if (is_list(BIF_ARG_2)) + BIF_TRAP2(dist_ctrl_put_data_trap, + BIF_P, BIF_ARG_1, BIF_ARG_2); + else + BIF_ERROR(BIF_P, BADARG); + + dep = erts_dhandle_to_dist_entry(BIF_ARG_1); + if (!dep) + BIF_ERROR(BIF_P, BADARG); + + input_handler = (Eterm) erts_atomic_read_nob(&dep->input_handler); + + if (input_handler != BIF_P->common.id) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + erts_atomic64_inc_nob(&dep->in); + + if (size != 0) { + byte *data, *temp_alloc = NULL; + + data = (byte *) erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc); + if (!data) + BIF_ERROR(BIF_P, BADARG); + + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + + (void) erts_net_message(NULL, dep, NULL, 0, data, size); + /* + * We ignore any decode failures. On fatal failures the + * connection will be taken down by killing the + * distribution channel controller... + */ + + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + + BUMP_REDS(BIF_P, 5); + + erts_free_aligned_binary_bytes(temp_alloc); + + } + + BIF_RET(am_ok); +} + +BIF_RETTYPE +dist_get_stat_1(BIF_ALIST_1) +{ + Sint64 read, write, pend; + Eterm res, *hp, **hpp; + Uint sz, *szp; + DistEntry *dep = erts_dhandle_to_dist_entry(BIF_ARG_1); + + if (!dep) + BIF_ERROR(BIF_P, BADARG); + + erts_de_rlock(dep); + + read = (Sint64) erts_atomic64_read_nob(&dep->in); + write = (Sint64) erts_atomic64_read_nob(&dep->out); + pend = (Sint64) erts_atomic_read_nob(&dep->qsize); + + erts_de_runlock(dep); + + sz = 0; + szp = &sz; + hpp = NULL; + + while (1) { + res = erts_bld_tuple(hpp, szp, 4, + am_ok, + erts_bld_sint64(hpp, szp, read), + erts_bld_sint64(hpp, szp, write), + pend ? am_true : am_false); + if (hpp) + break; + hp = HAlloc(BIF_P, sz); + hpp = &hp; + szp = NULL; + } + + BIF_RET(res); +} + +BIF_RETTYPE +dist_ctrl_input_handler_2(BIF_ALIST_2) +{ + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P); + + if (!dep) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep) + BIF_ERROR(BIF_P, BADARG); + + if (is_not_internal_pid(BIF_ARG_2)) + BIF_ERROR(BIF_P, BADARG); + + erts_atomic_set_nob(&dep->input_handler, + (erts_aint_t) BIF_ARG_2); + + BIF_RET(am_ok); +} + +BIF_RETTYPE +dist_ctrl_get_data_1(BIF_ALIST_1) +{ + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P); + int reds = 1; + ErtsDistOutputBuf *obuf; + Eterm *hp; + ProcBin *pb; + erts_aint_t qsize; + + if (!dep) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep) + BIF_ERROR(BIF_P, BADARG); + + erts_de_rlock(dep); + + if (dep->status & ERTS_DE_SFLG_EXITING) + goto return_none; + + ASSERT(dep->cid == BIF_P->common.id); + +#if 0 + if (dep->finalized_out_queue.first) { + obuf = dep->finalized_out_queue.first; + dep->finalized_out_queue.first = obuf->next; + if (!obuf->next) + dep->finalized_out_queue.last = NULL; + } + else +#endif + { + if (!dep->tmp_out_queue.first) { + ASSERT(!dep->tmp_out_queue.last); + qsize = erts_atomic_read_acqb(&dep->qsize); + if (qsize > 0) { + erts_mtx_lock(&dep->qlock); + dep->tmp_out_queue.first = dep->out_queue.first; + dep->tmp_out_queue.last = dep->out_queue.last; + dep->out_queue.first = NULL; + dep->out_queue.last = NULL; + erts_mtx_unlock(&dep->qlock); + } + } + + if (!dep->tmp_out_queue.first) { + ASSERT(!dep->tmp_out_queue.last); + return_none: + erts_de_runlock(dep); + BIF_RET(am_none); + } + else { + obuf = dep->tmp_out_queue.first; + dep->tmp_out_queue.first = obuf->next; + if (!obuf->next) + dep->tmp_out_queue.last = NULL; + } + + obuf->extp = erts_encode_ext_dist_header_finalize(obuf->extp, + dep->cache, + dep->flags); + reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; + if (!(dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)) + *--obuf->extp = PASS_THROUGH; /* 'pass through' needed */ + ASSERT(&obuf->data[0] <= obuf->extp + && obuf->extp < obuf->ext_endp); + } + + erts_atomic64_inc_nob(&dep->out); + + erts_de_runlock(dep); + + hp = HAlloc(BIF_P, PROC_BIN_SIZE); + pb = (ProcBin *) (char *) hp; + pb->thing_word = HEADER_PROC_BIN; + pb->size = obuf->ext_endp - obuf->extp; + pb->next = MSO(BIF_P).first; + MSO(BIF_P).first = (struct erl_off_heap_header*) pb; + pb->val = ErtsDistOutputBuf2Binary(obuf); + pb->bytes = (byte*) obuf->extp; + pb->flags = 0; + + qsize = erts_atomic_add_read_nob(&dep->qsize, -size_obuf(obuf)); + ASSERT(qsize >= 0); + + if (qsize < erts_dist_buf_busy_limit/2 + && (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY)) { + ErtsProcList *resume_procs = NULL; + erts_mtx_lock(&dep->qlock); + resume_procs = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY); + erts_mtx_unlock(&dep->qlock); + if (resume_procs) { + int resumed = erts_resume_processes(resume_procs); + reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; + } + } + + BIF_RET2(make_binary(pb), reds); +} + void erts_dist_port_not_busy(Port *prt) { @@ -2386,21 +2895,23 @@ erts_dist_port_not_busy(Port *prt) void erts_kill_dist_connection(DistEntry *dep, Uint32 connection_id) { - erts_smp_de_rwlock(dep); - if (is_internal_port(dep->cid) - && connection_id == dep->connection_id + erts_de_rwlock(dep); + if (connection_id == dep->connection_id && !(dep->status & ERTS_DE_SFLG_EXITING)) { dep->status |= ERTS_DE_SFLG_EXITING; - erts_smp_mtx_lock(&dep->qlock); - ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT)); - dep->qflgs |= ERTS_DE_QFLG_EXIT; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT)); + erts_atomic32_read_bor_nob(&dep->qflgs, ERTS_DE_QFLG_EXIT); + erts_mtx_unlock(&dep->qlock); - erts_schedule_dist_command(NULL, dep); + if (is_internal_port(dep->cid)) + erts_schedule_dist_command(NULL, dep); + else if (is_internal_pid(dep->cid)) + schedule_kill_dist_ctrl_proc(dep->cid); } - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); } struct print_to_data { @@ -2515,9 +3026,6 @@ info_dist_entry(fmtfn_t to, void *arg, DistEntry *dep, int visible, int connecte } erts_print(to, arg, "Name: %T", dep->sysname); -#ifdef DEBUG - erts_print(to, arg, " (refc=%d)", erts_smp_refc_read(&dep->refc, 0)); -#endif erts_print(to, arg, "\n"); if (!connected && is_nil(dep->cid)) { if (dep->nlinks) { @@ -2637,32 +3145,46 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2) goto error; } - net_kernel = erts_whereis_process(BIF_P, ERTS_PROC_LOCK_MAIN, - am_net_kernel, ERTS_PROC_LOCK_MAIN, 0); - if (!net_kernel) + net_kernel = erts_whereis_process(BIF_P, + ERTS_PROC_LOCK_MAIN, + am_net_kernel, + ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS, + 0); + if (!net_kernel || ERTS_PROC_GET_DIST_ENTRY(net_kernel)) goto error; /* By setting F_DISTRIBUTION on net_kernel, - * do_net_exist will be called when net_kernel is terminated !! */ + * erts_do_net_exits will be called when net_kernel is terminated !! */ net_kernel->flags |= F_DISTRIBUTION; - if (net_kernel != BIF_P) - erts_smp_proc_unlock(net_kernel, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(net_kernel, + (ERTS_PROC_LOCK_STATUS + | ((net_kernel != BIF_P) + ? ERTS_PROC_LOCK_MAIN + : 0))); #ifdef DEBUG - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); ASSERT(!erts_visible_dist_entries && !erts_hidden_dist_entries); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); #endif - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); inc_no_nodes(); erts_set_this_node(BIF_ARG_1, (Uint32) creation); erts_is_alive = 1; send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + + /* + * Note erts_this_dist_entry is changed by erts_set_this_node(), + * so we *need* to use the new one after erts_set_this_node() + * is called. + */ + erts_ref_dist_entry(erts_this_dist_entry); + ERTS_PROC_SET_DIST_ENTRY(net_kernel, erts_this_dist_entry); BIF_RET(am_true); @@ -2693,18 +3215,18 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) Eterm ic, oc; Eterm *tp; DistEntry *dep = NULL; + ErtsProcLocks proc_unlock = 0; + Process *proc; Port *pp = NULL; - /* Prepare for success */ - ERTS_BIF_PREP_RET(ret, am_true); - /* * Check and pick out arguments */ if (!is_node_name_atom(BIF_ARG_1) || - is_not_internal_port(BIF_ARG_2) || - (erts_this_node->sysname == am_Noname)) { + !(is_internal_port(BIF_ARG_2) + || is_internal_pid(BIF_ARG_2)) + || (erts_this_node->sysname == am_Noname)) { goto badarg; } @@ -2748,77 +3270,124 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) else if (!dep) goto system_limit; /* Should never happen!!! */ - pp = erts_id2port_sflgs(BIF_ARG_2, - BIF_P, - ERTS_PROC_LOCK_MAIN, - ERTS_PORT_SFLGS_INVALID_LOOKUP); - erts_smp_de_rwlock(dep); + if (is_internal_pid(BIF_ARG_2)) { + if (BIF_P->common.id == BIF_ARG_2) { + proc_unlock = 0; + proc = BIF_P; + } + else { + proc_unlock = ERTS_PROC_LOCK_MAIN; + proc = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, + BIF_ARG_2, proc_unlock); + } + erts_de_rwlock(dep); - if (!pp || (erts_atomic32_read_nob(&pp->state) - & ERTS_PORT_SFLG_EXITING)) - goto badarg; + if (!proc) + goto badarg; + else if (proc == ERTS_PROC_LOCK_BUSY) { + proc_unlock = 0; + goto yield; + } - if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0) - goto badarg; + erts_proc_lock(proc, ERTS_PROC_LOCK_STATUS); + proc_unlock |= ERTS_PROC_LOCK_STATUS; + + if (ERTS_PROC_GET_DIST_ENTRY(proc)) { + if (dep == ERTS_PROC_GET_DIST_ENTRY(proc) + && (proc->flags & F_DISTRIBUTION) + && dep->cid == BIF_ARG_2) { + ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); + goto done; + } + goto badarg; + } + + if (is_not_nil(dep->cid)) + goto badarg; + + proc->flags |= F_DISTRIBUTION; + ERTS_PROC_SET_DIST_ENTRY(proc, dep); - if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) - goto done; /* Already set */ + proc_unlock &= ~ERTS_PROC_LOCK_STATUS; + erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); + + dep->send = NULL; /* Only for distr ports... */ - if (dep->status & ERTS_DE_SFLG_EXITING) { - /* Suspend on dist entry waiting for the exit to finish */ - ErtsProcList *plp = erts_proclist_create(BIF_P); - plp->next = NULL; - erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); - erts_smp_mtx_lock(&dep->qlock); - erts_proclist_store_last(&dep->suspended, plp); - erts_smp_mtx_unlock(&dep->qlock); - goto yield; } + else { - ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING)); + pp = erts_id2port_sflgs(BIF_ARG_2, + BIF_P, + ERTS_PROC_LOCK_MAIN, + ERTS_PORT_SFLGS_INVALID_LOOKUP); + erts_de_rwlock(dep); - if (pp->dist_entry || is_not_nil(dep->cid)) - goto badarg; + if (!pp || (erts_atomic32_read_nob(&pp->state) + & ERTS_PORT_SFLG_EXITING)) + goto badarg; - erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION); + if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0) + goto badarg; - /* - * Dist-ports do not use the "busy port message queue" functionality, but - * instead use "busy dist entry" functionality. - */ - { - ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED; - erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL); - } + if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) { + ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); + goto done; /* Already set */ + } - pp->dist_entry = dep; + if (dep->status & ERTS_DE_SFLG_EXITING) { + /* Suspend on dist entry waiting for the exit to finish */ + ErtsProcList *plp = erts_proclist_create(BIF_P); + plp->next = NULL; + erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); + erts_mtx_lock(&dep->qlock); + erts_proclist_store_last(&dep->suspended, plp); + erts_mtx_unlock(&dep->qlock); + goto yield; + } - dep->version = version; - dep->creation = 0; + ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING)); - ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output); + if (pp->dist_entry || is_not_nil(dep->cid)) + goto badarg; -#if 1 - dep->send = (pp->drv_ptr->outputv - ? dist_port_commandv - : dist_port_command); -#else - dep->send = dist_port_command; -#endif - ASSERT(dep->send); + erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION); + + pp->dist_entry = dep; + + ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output); + + dep->send = (pp->drv_ptr->outputv + ? dist_port_commandv + : dist_port_command); + ASSERT(dep->send); + + /* + * Dist-ports do not use the "busy port message queue" functionality, but + * instead use "busy dist entry" functionality. + */ + { + ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED; + erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL); + } + + } + + dep->version = version; + dep->creation = 0; #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize == 0); - erts_smp_mtx_unlock(&dep->qlock); + ASSERT(erts_atomic_read_nob(&dep->qsize) == 0); #endif - erts_set_dist_entry_connected(dep, BIF_ARG_2, flags); - if (flags & DFLAG_DIST_HDR_ATOM_CACHE) create_cache(dep); - erts_smp_de_rwunlock(dep); + erts_set_dist_entry_connected(dep, BIF_ARG_2, flags); + + erts_de_rwunlock(dep); + + ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); + dep = NULL; /* inc of refc transferred to port (dist_entry field) */ inc_no_nodes(); @@ -2831,13 +3400,16 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) done: if (dep && dep != erts_this_dist_entry) { - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_deref_dist_entry(dep); } if (pp) erts_port_release(pp); + if (proc_unlock) + erts_proc_unlock(proc, proc_unlock); + return ret; yield: @@ -2883,7 +3455,7 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) if (BIF_P->common.id == local) { lp_locks = ERTS_PROC_LOCKS_ALL; lp = BIF_P; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); } else { lp_locks = ERTS_PROC_LOCKS_XSIG_SEND; @@ -2902,21 +3474,17 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) NIL, NULL, 0); -#ifdef ERTS_SMP if (lp == BIF_P) lp_locks &= ~ERTS_PROC_LOCK_MAIN; -#endif - erts_smp_proc_unlock(lp, lp_locks); + erts_proc_unlock(lp, lp_locks); if (lp == BIF_P) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state); + erts_aint32_t state = erts_atomic32_read_acqb(&BIF_P->state); /* * We may have exited current process and may have to take action. */ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif ERTS_BIF_EXITED(BIF_P); } } @@ -3002,7 +3570,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) length = 0; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); ASSERT(erts_no_of_not_connected_dist_entries > 0); ASSERT(erts_no_of_hidden_dist_entries >= 0); @@ -3019,7 +3587,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) result = NIL; if (length == 0) { - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); goto done; } @@ -3050,7 +3618,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) hp += 2; } ASSERT(endp == hp); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); done: UnUseTmpHeap(2,BIF_P); @@ -3105,15 +3673,15 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) if (dep == erts_this_dist_entry) goto done; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); - erts_smp_de_rlock(dep); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_de_rlock(dep); if (ERTS_DE_IS_NOT_CONNECTED(dep)) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); - erts_smp_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_runlock(dep); goto do_trap; } - erts_smp_de_links_lock(dep); - erts_smp_de_runlock(dep); + erts_de_links_lock(dep); + erts_de_runlock(dep); if (Bool == am_true) { ASSERT(dep->cid != NIL); @@ -3140,11 +3708,10 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) } } - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); done: - erts_deref_dist_entry(dep); BIF_RET(am_true); } @@ -3173,9 +3740,9 @@ BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1) if (de == erts_this_dist_entry) { BIF_RET(am_true); } - erts_smp_de_rlock(de); + erts_de_rlock(de); f = de->flags; - erts_smp_de_runlock(de); + erts_de_runlock(de); BIF_RET(((f & DFLAG_UNICODE_IO) ? am_true : am_false)); } @@ -3205,7 +3772,7 @@ struct ErtsNodesMonitor_ { Uint16 no; }; -static erts_smp_mtx_t nodes_monitors_mtx; +static erts_mtx_t nodes_monitors_mtx; static ErtsNodesMonitor *nodes_monitors; static ErtsNodesMonitor *nodes_monitors_end; @@ -3223,7 +3790,7 @@ static ErtsNodesMonitor *nodes_monitors_end; static void init_nodes_monitors(void) { - erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL, + erts_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); nodes_monitors = NULL; nodes_monitors_end = NULL; @@ -3349,10 +3916,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas } #endif - ERTS_SMP_LC_ASSERT(!c_p + ERTS_LC_ASSERT(!c_p || (erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN)); - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_mtx_lock(&nodes_monitors_mtx); for (nmp = nodes_monitors; nmp; nmp = nmp->next) { int i; @@ -3385,7 +3952,7 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas if (rp) { if (rp == c_p) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } rp = nmp->proc; @@ -3412,10 +3979,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas if (rp) { if (rp == c_p) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); } static Eterm @@ -3425,8 +3992,8 @@ insert_nodes_monitor(Process *c_p, Uint32 opts) Eterm res = am_false; ErtsNodesMonitor *xnmp, *nmp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&nodes_monitors_mtx)); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); xnmp = c_p->nodes_monitors; if (xnmp) { @@ -3510,8 +4077,8 @@ remove_nodes_monitors(Process *c_p, Uint32 opts, int all) Eterm res = am_false; ErtsNodesMonitor *nmp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&nodes_monitors_mtx)); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); nmp = c_p->nodes_monitors; ASSERT(!nmp || !nmp->prev || nmp->prev->proc != c_p); @@ -3553,23 +4120,23 @@ remove_nodes_monitors(Process *c_p, Uint32 opts, int all) void erts_delete_nodes_monitors(Process *c_p, ErtsProcLocks locks) { -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_CHECK) if (c_p) { ErtsProcLocks might_unlock = locks & ~ERTS_PROC_LOCK_MAIN; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif - if (erts_smp_mtx_trylock(&nodes_monitors_mtx) == EBUSY) { + if (erts_mtx_trylock(&nodes_monitors_mtx) == EBUSY) { ErtsProcLocks unlock_locks = locks & ~ERTS_PROC_LOCK_MAIN; if (c_p && unlock_locks) - erts_smp_proc_unlock(c_p, unlock_locks); - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_proc_unlock(c_p, unlock_locks); + erts_mtx_lock(&nodes_monitors_mtx); if (c_p && unlock_locks) - erts_smp_proc_lock(c_p, unlock_locks); + erts_proc_lock(c_p, unlock_locks); } remove_nodes_monitors(c_p, 0, 1); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); } Eterm @@ -3580,7 +4147,7 @@ erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist) Uint16 opts = (Uint16) 0; ASSERT(c_p); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); if (on != am_true && on != am_false) return THE_NON_VALUE; @@ -3636,14 +4203,14 @@ erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist) return THE_NON_VALUE; } - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_mtx_lock(&nodes_monitors_mtx); if (on == am_true) res = insert_nodes_monitor(c_p, opts); else res = remove_nodes_monitors(c_p, opts, 0); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); return res; } @@ -3666,8 +4233,8 @@ erts_processes_monitoring_nodes(Process *c_p) #endif ASSERT(c_p); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); - erts_smp_mtx_lock(&nodes_monitors_mtx); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + erts_mtx_lock(&nodes_monitors_mtx); sz = 0; szp = &sz; @@ -3716,7 +4283,7 @@ erts_processes_monitoring_nodes(Process *c_p) ASSERT(hp == hend); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); return res; } diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index 3e17645997..d4765c50b8 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -44,6 +44,7 @@ #define DFLAG_UTF8_ATOMS 0x10000 #define DFLAG_MAP_TAG 0x20000 #define DFLAG_BIG_CREATION 0x40000 +#define DFLAG_SEND_SENDER 0x80000 /* All flags that should be enabled when term_to_binary/1 is used. */ #define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \ @@ -74,6 +75,9 @@ #define DOP_DEMONITOR_P 20 #define DOP_MONITOR_P_EXIT 21 +#define DOP_SEND_SENDER 22 +#define DOP_SEND_SENDER_TT 23 + /* distribution trap functions */ extern Export* dsend2_trap; extern Export* dsend3_trap; @@ -100,7 +104,7 @@ typedef struct { } ErtsDSigData; #define ERTS_DE_IS_NOT_CONNECTED(DEP) \ - (ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \ + (ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \ || erts_lc_rwmtx_is_rwlocked(&(DEP)->rwmtx)), \ (is_nil((DEP)->cid) || ((DEP)->status & ERTS_DE_SFLG_EXITING))) @@ -153,21 +157,18 @@ erts_dsig_prepare(ErtsDSigData *dsdp, if (!dep) return ERTS_DSIG_PREP_NOT_CONNECTED; if (dspl == ERTS_DSP_RWLOCK) - erts_smp_de_rwlock(dep); + erts_de_rwlock(dep); else - erts_smp_de_rlock(dep); + erts_de_rlock(dep); if (ERTS_DE_IS_NOT_CONNECTED(dep)) { failure = ERTS_DSIG_PREP_NOT_CONNECTED; goto fail; } if (no_suspend) { - failure = ERTS_DSIG_PREP_CONNECTED; - erts_smp_mtx_lock(&dep->qlock); - if (dep->qflgs & ERTS_DE_QFLG_BUSY) + if (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY) { failure = ERTS_DSIG_PREP_WOULD_SUSPEND; - erts_smp_mtx_unlock(&dep->qlock); - if (failure == ERTS_DSIG_PREP_WOULD_SUSPEND) goto fail; + } } dsdp->proc = proc; dsdp->dep = dep; @@ -175,14 +176,14 @@ erts_dsig_prepare(ErtsDSigData *dsdp, dsdp->connection_id = dep->connection_id; dsdp->no_suspend = no_suspend; if (dspl == ERTS_DSP_NO_LOCK) - erts_smp_de_runlock(dep); + erts_de_runlock(dep); return ERTS_DSIG_PREP_CONNECTED; fail: if (dspl == ERTS_DSP_RWLOCK) - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); else - erts_smp_de_runlock(dep); + erts_de_runlock(dep); return failure; } @@ -194,7 +195,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) Eterm id; if (prt) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT((erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_DEAD) == 0); ASSERT(prt->dist_entry); @@ -204,7 +205,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) } else { ASSERT(dist_entry); - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx) || erts_lc_rwmtx_is_rwlocked(&dist_entry->rwmtx)); ASSERT(is_internal_port(dist_entry->cid)); @@ -212,7 +213,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) id = dep->cid; } - if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) + if (!erts_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD); } @@ -238,7 +239,7 @@ erts_remove_dist_link(ErtsDistLinkData *dldp, Eterm rid, DistEntry *dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); dldp->d_lnk = erts_lookup_link(dep->nlinks, lid); if (!dldp->d_lnk) dldp->d_sub_lnk = NULL; @@ -248,7 +249,7 @@ erts_remove_dist_link(ErtsDistLinkData *dldp, ? NULL : erts_remove_link(&dep->nlinks, lid)); } - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); } ERTS_GLB_INLINE int @@ -349,6 +350,7 @@ typedef struct { Eterm ctl_heap[6]; ErtsDSigData dsd; DistEntry* dep_to_deref; + DistEntry *dep; struct erts_dsig_send_context dss; Eterm return_term; diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index c7ab444c96..845cef24c7 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -83,14 +83,6 @@ #define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC #define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC -#ifndef ERTS_SMP -# undef ERTS_ALC_DEFAULT_ACUL -# define ERTS_ALC_DEFAULT_ACUL 0 -# undef ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC -# define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0 -# undef ERTS_ALC_DEFAULT_ACUL_LL_ALLOC -# define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0 -#endif #ifdef DEBUG static Uint install_debug_functions(void); @@ -148,7 +140,7 @@ enum { }; typedef struct { - erts_smp_atomic32_t refc; + erts_atomic32_t refc; int only_sz; int internal; Uint req_sched; @@ -528,7 +520,6 @@ set_default_test_alloc_opts(struct au_init *ip) } -#ifdef ERTS_SMP static void adjust_tpref(struct au_init *ip, int no_sched) @@ -551,7 +542,6 @@ adjust_tpref(struct au_init *ip, int no_sched) } } -#endif static void handle_args(int *, char **, erts_alc_hndl_args_init_t *); @@ -580,7 +570,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size) if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) { int j; -#ifdef ERTS_SMP if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) { int i; ErtsAllocatorThrSpec_t* tspec; @@ -596,7 +585,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size) } } else -#endif { Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra; for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) { @@ -619,7 +607,6 @@ strategy_support_carrier_migration(struct au_init *auip) static ERTS_INLINE void adjust_carrier_migration_support(struct au_init *auip) { -#ifdef ERTS_SMP if (auip->init.util.acul) { auip->thr_spec = -1; /* Need thread preferred */ @@ -633,9 +620,6 @@ adjust_carrier_migration_support(struct au_init *auip) auip->init.aoff.flavor = AOFF_BF; } } -#else - auip->init.util.acul = 0; -#endif } void @@ -668,10 +652,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) = sizeof(ErtsNifSelectDataState); fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)] = sizeof(ErtsMessageRef); -#ifdef ERTS_SMP fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)] = sizeof(ErtsThrQElement_t); -#endif fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)] = erts_timer_type_size(ERTS_ALC_T_LL_PTIMER); fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)] @@ -734,20 +716,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) #endif } -#ifndef ERTS_SMP - init.sl_alloc.thr_spec = 0; - init.std_alloc.thr_spec = 0; - init.ll_alloc.thr_spec = 0; - init.eheap_alloc.thr_spec = 0; - init.binary_alloc.thr_spec = 0; - init.ets_alloc.thr_spec = 0; - init.driver_alloc.thr_spec = 0; - init.fix_alloc.thr_spec = 0; - init.literal_alloc.thr_spec = 0; -#ifdef ERTS_ALC_A_EXEC - init.exec_alloc.thr_spec = 0; -#endif -#endif /* Make adjustments for carrier migration support */ init.temp_alloc.init.util.acul = 0; @@ -798,7 +766,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) #endif } -#ifdef ERTS_SMP /* Only temp_alloc can use thread specific interface */ if (init.temp_alloc.thr_spec) init.temp_alloc.thr_spec = erts_no_schedulers; @@ -817,10 +784,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) adjust_tpref(&init.exec_alloc, erts_no_schedulers); #endif -#else - /* No thread specific if not smp */ - init.temp_alloc.thr_spec = 0; -#endif /* * The following allocators cannot be run with afit strategy. @@ -839,10 +802,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) refuse_af_strategy(&init.exec_alloc); #endif -#ifdef ERTS_SMP if (!init.temp_alloc.thr_spec) refuse_af_strategy(&init.temp_alloc); -#endif erts_mtrace_pre_init(); #if HAVE_ERTS_MSEG @@ -1006,8 +967,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu) return; } -#ifdef USE_THREADS -#ifdef ERTS_SMP if (init->thr_spec) { if (init->thr_spec > 0) { af->alloc = erts_alcu_alloc_thr_spec; @@ -1037,7 +996,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu) ai->thr_spec = tspec->size; } else -#endif if (init->init.util.ts) { af->alloc = erts_alcu_alloc_ts; if (init->init.util.fix_type_size) @@ -1049,21 +1007,9 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu) af->free = erts_alcu_free_ts; } else -#endif { -#ifdef ERTS_SMP erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n", init->init.util.name_prefix); -#else - af->alloc = erts_alcu_alloc; - if (init->init.util.fix_type_size) - af->realloc = erts_realloc_fixed_size; - else if (init->init.util.ramv) - af->realloc = erts_alcu_realloc_mv; - else - af->realloc = erts_alcu_realloc; - af->free = erts_alcu_free; -#endif } af->extra = NULL; ai->alloc_util = 1; @@ -1895,9 +1841,7 @@ erts_alloc_register_scheduler(void *vesdp) int ix = (int) esdp->no; int aix; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) { ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix]; esdp->alloc_data.deallctr[aix] = NULL; @@ -1915,7 +1859,6 @@ erts_alloc_register_scheduler(void *vesdp) } } -#ifdef ERTS_SMP void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp, int *need_thr_progress, @@ -1944,12 +1887,10 @@ erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp, } } } -#endif erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs) { -#ifdef ERTS_SMP ErtsAllocatorThrSpec_t *tspec; tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE]; if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled) @@ -1957,11 +1898,6 @@ erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs) if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra) return erts_alcu_fix_alloc_shrink( erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs); -#else - if (ix == 1 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra) - return erts_alcu_fix_alloc_shrink( - erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs); -#endif return 0; } @@ -2165,7 +2101,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) int only_one_value = 0; ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}}; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); /* Figure out whats wanted... */ @@ -2338,10 +2274,10 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) if (proc) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(proc)); /* We'll need locks early in the lock order */ - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); } /* Calculate values needed... */ @@ -2499,7 +2435,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) Uint *hp; Uint hsz; - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); if (only_one_value) { ASSERT(length == 1); @@ -2548,11 +2484,11 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc) Uint reserved_atom_space, atom_space; if (proc) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(proc)); /* We'll need locks early in the lock order */ - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); } i = 0; @@ -2704,7 +2640,7 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc) Uint hsz; Uint *hszp; - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); hpp = NULL; hsz = 0; @@ -2792,7 +2728,7 @@ erts_allocator_info(fmtfn_t to, void *arg) { ErtsAlcType_t a; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) { int ai; @@ -2845,11 +2781,7 @@ erts_allocator_info(fmtfn_t to, void *arg) #if HAVE_ERTS_MSEG { struct erts_mmap_info_struct emis; -#ifdef ERTS_SMP int max = (int) erts_no_schedulers; -#else - int max = 0; -#endif int i; for (i = 0; i <= max; i++) { erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i); @@ -3310,7 +3242,7 @@ reply_alloc_info(void *vair) case ERTS_ALC_INFO_A_DISABLED_EXEC: break; case ERTS_ALC_INFO_A_MSEG_ALLOC: -#if HAVE_ERTS_MSEG && defined(ERTS_SMP) +#if HAVE_ERTS_MSEG alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc"); ainfo = erts_mseg_info(sched_id, NULL, NULL, hpp != NULL, air->only_sz, hpp, szp); @@ -3364,10 +3296,10 @@ reply_alloc_info(void *vair) if (air->req_sched == sched_id) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0) { + if (erts_atomic32_dec_read_nob(&air->refc) == 0) { erts_iref_storage_clean(&air->iref); aireq_free(air); } @@ -3446,18 +3378,16 @@ erts_request_alloc_info(struct process *c_p, air->allocs[airix] = ERTS_ALC_A_INVALID; - erts_smp_atomic32_init_nob(&air->refc, + erts_atomic32_init_nob(&air->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_alloc_info, (void *) air); -#endif reply_alloc_info((void *) air); @@ -3532,35 +3462,29 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) case 0xf: switch (op) { case 0xf00: -#ifdef USE_THREADS if (((Allctr_t *) a1)->thread_safe) return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF, (void *) a1, (Uint) a2); else -#endif return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF, (void *) a1, (Uint) a2); case 0xf01: -#ifdef USE_THREADS if (((Allctr_t *) a1)->thread_safe) return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2, (Uint) a3); else -#endif return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2, (Uint) a3); case 0xf02: -#ifdef USE_THREADS if (((Allctr_t *) a1)->thread_safe) erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2); else -#endif erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2); return 0; case 0xf03: { @@ -3571,11 +3495,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) init.enable = 1; init.atype = GOODFIT; init.init.util.name_prefix = (char *) a1; -#ifdef ERTS_SMP init.init.util.ts = 1; -#else - init.init.util.ts = a2 ? 1 : 0; -#endif if ((char **) a3) { char **argv = (char **) a3; int i = 0; @@ -3630,7 +3550,6 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) erts_alcu_stop((Allctr_t *) a1); erts_free(ERTS_ALC_T_UNDEF, (void *) a1); break; -#ifdef USE_THREADS case 0xf05: return (UWord) 1; case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe; #ifdef ETHR_NO_FORKSAFETY @@ -3700,12 +3619,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) ethr_thr_exit((void *) a1); ERTS_ALC_TEST_ABORT; break; -#endif /* #ifdef USE_THREADS */ -#ifdef ERTS_SMP case 0xf13: return (UWord) 1; -#else - case 0xf13: return (UWord) 0; -#endif case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1); case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0; @@ -3909,10 +3823,8 @@ void check_allocators(void) ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra; Allctr_t *allctr = real_af->extra; Carrier_t *ct; -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); -#endif if (allctr->check_mbc) { for (ct = allctr->mbc_list.first; ct; ct = ct->next) { @@ -3920,10 +3832,8 @@ void check_allocators(void) allctr->check_mbc(allctr,ct); } } -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); -#endif } } } diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index 97a1cf1308..c661d0b226 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -27,9 +27,7 @@ #include "erl_thr_progress.h" #undef ERL_THR_PROGRESS_TSD_TYPE_ONLY #include "erl_alloc_util.h" -#ifdef USE_THREADS #include "erl_threads.h" -#endif #include "erl_mmap.h" #ifdef DEBUG @@ -154,12 +152,10 @@ void erts_allctr_wrapper_pre_lock(void); void erts_allctr_wrapper_pre_unlock(void); void erts_alloc_register_scheduler(void *vesdp); -#ifdef ERTS_SMP void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp, int *need_thr_progress, ErtsThrPrgrVal *thr_prgr_p, int *more_work); -#endif erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs); __decl_noreturn void erts_alloc_enomem(ErtsAlcType_t,Uint) @@ -338,37 +334,10 @@ erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr); (((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE) #define ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \ - (void) 0, (void) 0, (void) 0) - -#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -static erts_smp_spinlock_t NAME##_lck; \ -ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \ - erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \ - ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\ - erts_smp_spin_lock(&NAME##_lck), \ - erts_smp_spin_unlock(&NAME##_lck)) - -#ifdef ERTS_SMP - -#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) - -#else /* !ERTS_SMP */ + ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, (void) 0, (void) 0, (void) 0) #define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -static erts_mtx_t NAME##_lck; \ -ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \ - erts_mtx_init(NAME##_lck, #NAME "_alloc_lock", NIL, \ - ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\ - erts_mtx_lock(&NAME##_lck), \ - erts_mtx_unlock(&NAME##_lck)) - - -#endif - -#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \ -ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0) +ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) #define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \ static erts_spinlock_t NAME##_lck; \ @@ -378,17 +347,10 @@ ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \ erts_spin_lock(&NAME##_lck), \ erts_spin_unlock(&NAME##_lck)) -#ifdef ERTS_SMP -#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \ +#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \ ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) -#else /* !ERTS_SMP */ - -#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \ - ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) - -#endif #define ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, ILCK, LCK, ULCK) \ ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, ILCK, LCK, ULCK) \ @@ -412,21 +374,11 @@ NAME##_free(TYPE *p) \ erts_free(ALCT, (void *) p); \ } -#ifdef ERTS_SMP #define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \ ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) -#else -#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \ - ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0) -#endif -#ifdef ERTS_SMP #define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \ ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ) -#else -#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \ -ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, (void) 0, (void) 0, (void) 0) -#endif #define ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \ diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 50a1d97dd5..11884299e2 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -53,15 +53,6 @@ # # IMPORTANT! Only use 7-bit ascii text in this file! -+if smp -+disable threads_no_smp -+else -+if threads -+enable threads_no_smp -+else -+disable threads_no_smp -+endif -+endif # --- Allocator declarations ------------------------------------------------- # @@ -77,8 +68,6 @@ allocator SYSTEM true sys_alloc -+if smp - allocator TEMPORARY true temp_alloc allocator SHORT_LIVED true sl_alloc allocator STANDARD true std_alloc @@ -91,22 +80,6 @@ allocator LITERAL true literal_alloc allocator EXEC true exec_alloc +endif -+else # Non smp build - -allocator TEMPORARY false temp_alloc -allocator SHORT_LIVED false sl_alloc -allocator STANDARD false std_alloc -allocator LONG_LIVED false ll_alloc -allocator EHEAP false eheap_alloc -allocator ETS false ets_alloc -allocator FIXED_SIZE false fix_alloc -allocator LITERAL false literal_alloc -+if exec_alloc -allocator EXEC false exec_alloc -+endif - -+endif - allocator BINARY true binary_alloc allocator DRIVER true driver_alloc @@ -285,32 +258,18 @@ type MREF_ENT STANDARD SYSTEM magic_ref_entry type MREF_TAB_BKTS STANDARD SYSTEM magic_ref_table_buckets type MREF_TAB LONG_LIVED SYSTEM magic_ref_table type MINDIRECTION FIXED_SIZE SYSTEM magic_indirection +type BINARY_FIND SHORT_LIVED PROCESSES binary_find -+if threads_no_smp -# Need thread safe allocs, but std_alloc and fix_alloc are not; -# use driver_alloc which is... -type THR_Q_EL DRIVER SYSTEM thr_q_element -type THR_Q_EL_SL DRIVER SYSTEM sl_thr_q_element -type MISC_AUX_WORK DRIVER SYSTEM misc_aux_work -+else type THR_Q_EL STANDARD SYSTEM thr_q_element type THR_Q_EL_SL FIXED_SIZE SYSTEM sl_thr_q_element type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work -+endif type THR_Q STANDARD SYSTEM thr_queue type THR_Q_SL SHORT_LIVED SYSTEM short_lived_thr_queue type THR_Q_LL LONG_LIVED SYSTEM long_lived_thr_queue -+if smp type ASYNC SHORT_LIVED SYSTEM async type ZLIB STANDARD SYSTEM zlib -+else -# sl/std_alloc is not thread safe in non smp build; therefore, we use driver_alloc -type ZLIB DRIVER SYSTEM zlib -type ASYNC DRIVER SYSTEM async -+endif -+if smp type PORT_LOCK STANDARD SYSTEM port_lock type DRIVER_LOCK STANDARD SYSTEM driver_lock type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list @@ -320,33 +279,19 @@ type THR_PRGR_IDATA LONG_LIVED SYSTEM thr_prgr_internal_data type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data type T_THR_PRGR_DATA SHORT_LIVED SYSTEM temp_thr_prgr_data type RELEASE_LAREA SHORT_LIVED SYSTEM release_literal_area -+endif # # Types used for special emulators # -+if threads - type ETHR_STD STANDARD SYSTEM ethread_standard type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived -+endif - -+if shared_heap - -type STACK STANDARD PROCESSES stack -type ACTIVE_PROCS STANDARD PROCESSES active_procs - -+endif - -+if smp type SYS_MSG_Q SHORT_LIVED PROCESSES system_messages_queue type FP_EXCEPTION LONG_LIVED SYSTEM fp_exception type LL_MPATHS LONG_LIVED SYSTEM ll_migration_paths type SL_MPATHS SHORT_LIVED SYSTEM sl_migration_paths -+endif +if hipe @@ -360,8 +305,6 @@ type HIPE_EXEC EXEC CODE hipe_code +endif - - +if heap_frag_elim_test type SSB SHORT_LIVED PROCESSES ssb @@ -431,11 +374,7 @@ type PUTENV_STR SYSTEM SYSTEM putenv_string type PRT_REP_EXIT STANDARD SYSTEM port_report_exit type SYS_BLOCKING STANDARD SYSTEM sys_blocking -+if smp type SYS_WRITE_BUF TEMPORARY SYSTEM sys_write_buf -+else -type SYS_WRITE_BUF BINARY SYSTEM sys_write_buf -+endif +endif diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index af86ad0548..4d4bddb93f 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -305,9 +305,6 @@ MBC after deallocating first block: # define ERTS_ALC_CPOOL_DEBUG #endif -#ifndef ERTS_SMP -# undef ERTS_ALC_CPOOL_DEBUG -#endif #ifdef ERTS_ALC_CPOOL_DEBUG # define ERTS_ALC_CPOOL_ASSERT(A) \ @@ -322,13 +319,8 @@ MBC after deallocating first block: # define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1) #endif -#ifdef ERTS_SMP #define ERTS_ALC_IS_CPOOL_ENABLED(A) ((A)->cpool.util_limit) -#else -#define ERTS_ALC_IS_CPOOL_ENABLED(A) (0) -#endif -#ifdef ERTS_SMP #define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON 1000 #define ERTS_ALC_CPOOL_ALLOC_OP_INC 8 @@ -367,28 +359,16 @@ do { \ } \ } while (0) -#else -#define ERTS_ALC_CPOOL_ALLOC_OP(A) -#define ERTS_ALC_CPOOL_REALLOC_OP(A) -#define ERTS_ALC_CPOOL_FREE_OP(A) -#endif #define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0) #define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1) #define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \ ERTS_CRR_ALCTR_FLG_BUSY) -#ifdef ERTS_SMP #define SBC_HEADER_SIZE \ (UNIT_CEILING(offsetof(Carrier_t, cpool) \ + ABLK_HDR_SZ) \ - ABLK_HDR_SZ) -#else -#define SBC_HEADER_SIZE \ - (UNIT_CEILING(sizeof(Carrier_t) \ - + ABLK_HDR_SZ) \ - - ABLK_HDR_SZ) -#endif #define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size) @@ -402,7 +382,7 @@ do { \ #define SET_CARRIER_HDR(C, Sz, F, AP) \ (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \ - erts_smp_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP))) + erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP))) #define BLK_TO_SBC(B) \ ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE)) @@ -598,15 +578,11 @@ do { \ (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \ } while (0) -#ifdef ERTS_SMP #define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \ do { \ (CRR)->cpool.blocks++; \ (CRR)->cpool.blocks_size += (BSZ); \ } while (0) -#else -#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) ((void) (CRR)) /* Get rid of warning */ -#endif #define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \ do { \ @@ -626,7 +602,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr, Carrier_t **busy_pcrr_pp, UWord blksz) { -#ifdef ERTS_SMP ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0); crr->cpool.blocks--; @@ -651,9 +626,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr, #endif return 1; -#else - return 0; -#endif } #define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \ @@ -689,12 +661,7 @@ do { \ #endif #ifdef DEBUG -#ifdef USE_THREADS -# ifdef ERTS_SMP # define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking()) -# else -# define IS_ACTUALLY_BLOCKING 0 -# endif #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \ do { \ if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) { \ @@ -703,7 +670,7 @@ do { \ (A)->debug.saved_tid = 1; \ } \ else { \ - ERTS_SMP_LC_ASSERT( \ + ERTS_LC_ASSERT( \ ethr_equal_tids((A)->debug.tid, erts_thr_self())); \ } \ } \ @@ -711,9 +678,6 @@ do { \ #else #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) #endif -#else -#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) -#endif static void make_name_atoms(Allctr_t *allctr); @@ -862,7 +826,7 @@ erts_alcu_literal_32_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags) Uint sz = ERTS_SUPERALIGNED_CEILING(*size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); res = erts_alcu_mseg_alloc(allctr, &sz, flags); if (res) { @@ -880,7 +844,7 @@ erts_alcu_literal_32_mseg_realloc(Allctr_t *allctr, void *seg, Uint new_sz = ERTS_SUPERALIGNED_CEILING(*new_size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); if (seg && old_size) clear_literal_range(seg, old_size); @@ -898,7 +862,7 @@ erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, { ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); erts_alcu_mseg_dealloc(allctr, seg, size, flags); @@ -1058,7 +1022,7 @@ erts_alcu_literal_32_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign) Uint size = ERTS_SUPERALIGNED_CEILING(*size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); res = erts_alcu_sys_alloc(allctr, &size, 1); if (res) { @@ -1076,7 +1040,7 @@ erts_alcu_literal_32_sys_realloc(Allctr_t *allctr, void *ptr, Uint* size_p, Uint ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); if (ptr && old_size) clear_literal_range(ptr, old_size); @@ -1093,7 +1057,7 @@ erts_alcu_literal_32_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int sup { ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); erts_alcu_sys_dealloc(allctr, ptr, size, 1); @@ -1191,7 +1155,6 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr) } } -#ifdef ERTS_SMP #ifdef DEBUG static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node) @@ -1292,16 +1255,15 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr) erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY; ERTS_ALC_CPOOL_ASSERT(old_val - == erts_smp_atomic_xchg_relb(&crr->allctr, + == erts_atomic_xchg_relb(&crr->allctr, new_val)); } #else - erts_smp_atomic_set_relb(&crr->allctr, new_val); + erts_atomic_set_relb(&crr->allctr, new_val); #endif } } -#endif /* ERTS_SMP */ #if 0 #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \ @@ -1325,12 +1287,10 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before) static void *mbc_alloc(Allctr_t *allctr, Uint size); -#ifdef ERTS_SMP typedef struct { ErtsAllctrDDBlock_t ddblock__; /* must be first */ ErtsAlcType_t fix_type; } ErtsAllctrFixDDBlock_t; -#endif #define ERTS_ALC_FIX_NO_UNUSE (((ErtsAlcType_t) 1) << ERTS_ALC_N_BITS) @@ -1341,11 +1301,9 @@ dealloc_fix_block(Allctr_t *allctr, ErtsAlcFixList_t *fix, int dec_cc_on_redirect) { -#ifdef ERTS_SMP /* May be redirected... */ ASSERT((type & ERTS_ALC_FIX_NO_UNUSE) == 0); ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type | ERTS_ALC_FIX_NO_UNUSE; -#endif dealloc_block(allctr, ptr, fix, dec_cc_on_redirect); } @@ -1379,12 +1337,10 @@ fix_cpool_check_shrink(Allctr_t *allctr, fix->u.cpool.shrink_list = 0; else { void *p; -#ifdef ERTS_SMP if (busy_pcrr_pp) { clear_busy_pool_carrier(allctr, *busy_pcrr_pp); *busy_pcrr_pp = NULL; } -#endif fix->u.cpool.shrink_list--; p = fix->list; fix->list = *((void **) p); @@ -1477,10 +1433,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) int ix, o; int flush = flgs == 0; -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); -#endif for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) { ErtsAlcFixList_t *fix = &allctr->fix[ix]; @@ -1520,10 +1474,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) if (all_empty) sched_fix_shrink(allctr, 0); -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); -#endif return res; } @@ -1635,10 +1587,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) int ix, o; int flush = flgs == 0; -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); -#endif for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) { ErtsAlcFixList_t *fix = &allctr->fix[ix]; @@ -1680,10 +1630,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) if (all_empty) sched_fix_shrink(allctr, 0); -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); -#endif return res; } @@ -1709,7 +1657,6 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr) dealloc_carrier(allctr, crr, 1); } -#ifdef ERTS_SMP static ERTS_INLINE Allctr_t* get_pref_allctr(void *extra) @@ -1750,7 +1697,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, crr = BLK_TO_SBC(blk); if (sizep) *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ; - iallctr = erts_smp_atomic_read_dirty(&crr->allctr); + iallctr = erts_atomic_read_dirty(&crr->allctr); } else { crr = ABLK_TO_MBC(blk); @@ -1758,10 +1705,10 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, if (sizep) *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ; if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr)) - iallctr = erts_smp_atomic_read_dirty(&crr->allctr); + iallctr = erts_atomic_read_dirty(&crr->allctr); else { int locked_pref_allctr = 0; - iallctr = erts_smp_atomic_read_ddrb(&crr->allctr); + iallctr = erts_atomic_read_ddrb(&crr->allctr); if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock && pref_allctr->thread_safe) { @@ -1777,7 +1724,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, erts_aint_t act; ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY)); - act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr, + act = erts_atomic_cmpxchg_ddrb(&crr->allctr, iallctr|ERTS_CRR_ALCTR_FLG_BUSY, iallctr); if (act == iallctr) { @@ -2152,10 +2099,10 @@ handle_delayed_dealloc(Allctr_t *allctr, ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr)); ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) - != (erts_smp_atomic_read_nob(&crr->allctr) + != (erts_atomic_read_nob(&crr->allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); schedule_dealloc_carrier(allctr, crr); } @@ -2201,9 +2148,7 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type, erts_alloc_notify_delayed_dealloc(allctr->ix); } -#endif -#ifdef ERTS_SMP static void set_new_allctr_abandon_limit(Allctr_t *allctr); static void @@ -2265,7 +2210,6 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr, thr_prgr_p, more_work); } -#endif #define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \ handle_delayed_dealloc((Allctr), (Locked), 1, \ @@ -2276,24 +2220,18 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_ { Block_t *blk = UMEM2BLK(ptr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); if (IS_SBC_BLK(blk)) { destroy_carrier(allctr, blk, NULL); -#ifdef ERTS_SMP if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; if (!(type & ERTS_ALC_FIX_NO_UNUSE)) fix->u.cpool.used--; fix->u.cpool.allocated--; } -#endif } -#ifndef ERTS_SMP - else - mbc_free(allctr, ptr, NULL); -#else else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) mbc_free(allctr, ptr, NULL); else { @@ -2323,7 +2261,6 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_ erts_alloc_notify_delayed_dealloc(used_allctr->ix); } } -#endif } /* Multi block carrier alloc/realloc/free ... */ @@ -2571,9 +2508,7 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) else { (*allctr->link_free_block)(allctr, blk); HARD_CHECK_BLK_CARRIER(allctr, blk); -#ifdef ERTS_SMP check_abandon_carrier(allctr, blk, busy_pcrr_pp); -#endif } } @@ -2607,10 +2542,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, return NULL; #else /* !MBC_REALLOC_ALWAYS_MOVES */ -#ifdef ERTS_SMP if (busy_pcrr_pp && *busy_pcrr_pp) goto realloc_move; /* Don't want to use carrier in pool */ -#endif get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size); @@ -2731,9 +2664,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, HARD_CHECK_BLK_CARRIER(allctr, blk); -#ifdef ERTS_SMP check_abandon_carrier(allctr, nxt_blk, NULL); -#endif return p; } @@ -2845,9 +2776,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, if (cand_blk_sz < get_blk_sz) { /* We wont fit in cand_blk get a new one */ -#ifdef ERTS_SMP realloc_move: -#endif #endif /* !MBC_REALLOC_ALWAYS_MOVES */ new_p = mbc_alloc(allctr, size); @@ -2949,7 +2878,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, #endif /* !MBC_REALLOC_ALWAYS_MOVES */ } -#ifdef ERTS_SMP #define ERTS_ALC_MAX_DEALLOC_CARRIER 10 #define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20 @@ -3120,7 +3048,7 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ || erts_thr_progress_is_managed_thread()); - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr) + ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) == (erts_aint_t) allctr); erts_atomic_add_nob(&allctr->cpool.stat.blocks_size, @@ -3190,7 +3118,7 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) (erts_aint_t) &crr->cpool, (erts_aint_t) cpd1p); - erts_smp_atomic_set_wb(&crr->allctr, + erts_atomic_set_wb(&crr->allctr, ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL); LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr)); } @@ -3322,11 +3250,11 @@ cpool_fetch(Allctr_t *allctr, UWord size) ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl)); ASSERT(crr->cpool.orig_allctr == allctr); dl = dl->next; - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL && erts_atomic_read_nob(&crr->cpool.max_size) >= size) { /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3368,12 +3296,12 @@ cpool_fetch(Allctr_t *allctr, UWord size) ASSERT(dl != &allctr->cpool.pooled_list); ASSERT(crr->cpool.orig_allctr == allctr); dl = dl->next; - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) { if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY) && erts_atomic_read_nob(&crr->cpool.max_size) >= size) { /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3449,12 +3377,12 @@ cpool_fetch(Allctr_t *allctr, UWord size) has_passed_sentinel = 1; } crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool)); - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL) && (erts_atomic_read_nob(&cpdp->max_size) >= size)) { erts_aint_t act; /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3477,11 +3405,11 @@ check_dc_list: Block_t* blk; unlink_carrier(&allctr->cpool.dc_list, crr); #ifdef ERTS_ALC_CPOOL_DEBUG - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr, + ERTS_ALC_CPOOL_ASSERT(erts_atomic_xchg_nob(&crr->allctr, ((erts_aint_t) allctr)) == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); #else - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); #endif blk = MBC_TO_FIRST_BLK(allctr, crr); ASSERT(FBLK_TO_MBC(blk) == crr); @@ -3584,7 +3512,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk)); ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk)); ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) - == (erts_smp_atomic_read_nob(&crr->allctr) + == (erts_atomic_read_nob(&crr->allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit)) @@ -3735,7 +3663,6 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord * } -#endif /* ERTS_SMP */ #ifdef DEBUG @@ -3836,7 +3763,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz); } -#ifdef ERTS_SMP allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON; if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC @@ -3852,7 +3778,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) return blk; } } -#endif #if HAVE_ERTS_MSEG @@ -3982,9 +3907,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) allctr->main_carrier = crr; } -#ifdef ERTS_SMP cpool_init_carrier_data(allctr, crr); -#endif link_carrier(&allctr->mbc_list, crr); @@ -4204,19 +4127,17 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) } #endif -#ifdef ERTS_SMP if (busy_pcrr_pp && *busy_pcrr_pp) { ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr); *busy_pcrr_pp = NULL; - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr) + ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) == (((erts_aint_t) allctr) | ERTS_CRR_ALCTR_FLG_IN_POOL | ERTS_CRR_ALCTR_FLG_BUSY)); - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); cpool_delete(allctr, allctr, crr); } else -#endif { unlink_carrier(&allctr->mbc_list, crr); #if HAVE_ERTS_MSEG @@ -4247,11 +4168,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) } #endif -#ifdef ERTS_SMP schedule_dealloc_carrier(allctr, crr); -#else - dealloc_mbc(allctr, crr); -#endif } } @@ -4294,9 +4211,7 @@ static struct { Eterm fix_types; Eterm mbcs; -#ifdef ERTS_SMP Eterm mbcs_pool; -#endif Eterm sbcs; Eterm sys_alloc_carriers_size; @@ -4384,9 +4299,7 @@ init_atoms(Allctr_t *allctr) AM_INIT(fix_types); AM_INIT(mbcs); -#ifdef ERTS_SMP AM_INIT(mbcs_pool); -#endif AM_INIT(sbcs); AM_INIT(sys_alloc_carriers_size); @@ -4636,7 +4549,6 @@ sz_info_carriers(Allctr_t *allctr, return res; } -#ifdef ERTS_SMP static Eterm info_cpool(Allctr_t *allctr, @@ -4690,7 +4602,6 @@ info_cpool(Allctr_t *allctr, return res; } -#endif /* ERTS_SMP */ static Eterm info_carriers(Allctr_t *allctr, @@ -4945,11 +4856,7 @@ info_options(Allctr_t *allctr, return res; } -#ifdef ERTS_SMP acul = allctr->cpool.util_limit; -#else - acul = 0; -#endif if (print_to_p) { char topt[21]; /* Enough for any 64-bit integer */ @@ -5132,19 +5039,15 @@ erts_alcu_info_options(Allctr_t *allctr, if (hpp || szp) ensure_atoms_initialized(allctr); -#ifdef USE_THREADS if (allctr->thread_safe) { erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); } -#endif res = info_options(allctr, print_to_p, print_to_arg, hpp, szp); -#ifdef USE_THREADS if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); erts_allctr_wrapper_pre_unlock(); } -#endif return res; } @@ -5160,9 +5063,7 @@ erts_alcu_sz_info(Allctr_t *allctr, Uint *szp) { Eterm res, mbcs, sbcs, fix = THE_NON_VALUE; -#ifdef ERTS_SMP Eterm mbcs_pool; -#endif res = THE_NON_VALUE; @@ -5177,12 +5078,10 @@ erts_alcu_sz_info(Allctr_t *allctr, if (hpp || szp) ensure_atoms_initialized(allctr); -#ifdef USE_THREADS if (allctr->thread_safe) { erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); } -#endif ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5198,23 +5097,19 @@ erts_alcu_sz_info(Allctr_t *allctr, fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp); mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p, print_to_arg, hpp, szp); -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p, print_to_arg, hpp, szp); else mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */ -#endif sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p, print_to_arg, hpp, szp); if (hpp || szp) { res = NIL; add_2tup(hpp, szp, &res, am.sbcs, sbcs); -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool); -#endif add_2tup(hpp, szp, &res, am.mbcs, mbcs); add_fix_types(allctr, internal, hpp, szp, &res, fix); } @@ -5225,12 +5120,10 @@ erts_alcu_sz_info(Allctr_t *allctr, } -#ifdef USE_THREADS if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); erts_allctr_wrapper_pre_unlock(); } -#endif return res; } @@ -5246,9 +5139,7 @@ erts_alcu_info(Allctr_t *allctr, Uint *szp) { Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE; -#ifdef ERTS_SMP Eterm mbcs_pool; -#endif res = THE_NON_VALUE; @@ -5263,12 +5154,10 @@ erts_alcu_info(Allctr_t *allctr, if (hpp || szp) ensure_atoms_initialized(allctr); -#ifdef USE_THREADS if (allctr->thread_safe) { erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); } -#endif ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5293,13 +5182,11 @@ erts_alcu_info(Allctr_t *allctr, fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp); mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p, print_to_arg, hpp, szp); -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p, print_to_arg, hpp, szp); else mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */ -#endif sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p, print_to_arg, hpp, szp); calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp); @@ -5309,10 +5196,8 @@ erts_alcu_info(Allctr_t *allctr, add_2tup(hpp, szp, &res, am.calls, calls); add_2tup(hpp, szp, &res, am.sbcs, sbcs); -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool); -#endif add_2tup(hpp, szp, &res, am.mbcs, mbcs); add_fix_types(allctr, internal, hpp, szp, &res, fix); add_2tup(hpp, szp, &res, am.options, sett); @@ -5328,12 +5213,10 @@ erts_alcu_info(Allctr_t *allctr, } -#ifdef USE_THREADS if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); erts_allctr_wrapper_pre_unlock(); } -#endif return res; } @@ -5343,10 +5226,8 @@ void erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz) { -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); -#endif size->carriers = allctr->mbcs.curr.norm.mseg.size; size->carriers += allctr->mbcs.curr.norm.sys_alloc.size; @@ -5356,14 +5237,12 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t * size->blocks = allctr->mbcs.blocks.curr.size; size->blocks += allctr->sbcs.blocks.curr.size; -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { UWord csz, bsz; cpool_read_stat(allctr, NULL, &csz, NULL, &bsz); size->blocks += bsz; size->carriers += csz; } -#endif if (fi) { int ix; @@ -5385,10 +5264,8 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t * } } -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); -#endif } /* ----------------------------------------------------------------------- */ @@ -5403,7 +5280,7 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5436,18 +5313,13 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) { void *res; -#ifdef ERTS_SMP ASSERT(!"This is not thread safe"); -#elif defined(USE_THREADS) - ASSERT(erts_equal_tids(erts_main_thread, erts_thr_self())); -#endif res = do_erts_alcu_alloc(type, extra, size); DEBUG_CHECK_ALIGNMENT(res); return res; } -#ifdef USE_THREADS void * erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size) @@ -5463,7 +5335,6 @@ erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size) return res; } -#ifdef ERTS_SMP void * erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size) @@ -5503,21 +5374,17 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); -#ifdef ERTS_SMP ASSERT(pref_allctr->dd.use); ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1); -#endif ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr); res = do_erts_alcu_alloc(type, pref_allctr, size); -#ifdef ERTS_SMP if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) { /* Cleaned up a bit more; try one more time... */ res = do_erts_alcu_alloc(type, pref_allctr, size); } -#endif if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); @@ -5528,9 +5395,7 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) return res; } -#endif -#endif /* ------------------------------------------------------------------------- */ @@ -5543,7 +5408,7 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p, ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5573,7 +5438,6 @@ void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p) do_erts_alcu_free(type, extra, p, NULL); } -#ifdef USE_THREADS void erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p) @@ -5584,7 +5448,6 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p) erts_mtx_unlock(&allctr->mutex); } -#ifdef ERTS_SMP void erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p) @@ -5634,9 +5497,7 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p) } } -#endif -#endif /* ------------------------------------------------------------------------- */ @@ -5656,7 +5517,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5785,7 +5646,6 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size) } -#ifdef USE_THREADS void * erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size) @@ -5824,7 +5684,6 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size) return res; } -#ifdef ERTS_SMP void * erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra, @@ -5905,9 +5764,7 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, Allctr_t *pref_allctr, *used_allctr; UWord old_user_size; Carrier_t *busy_pcrr_p; -#ifdef ERTS_SMP int retried; -#endif if (!p) return erts_alcu_alloc_thr_pref(type, extra, size); @@ -5917,12 +5774,10 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); -#ifdef ERTS_SMP ASSERT(pref_allctr->dd.use); ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1); retried = 0; restart: -#endif used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO, p, &old_user_size, &busy_pcrr_p); @@ -5938,13 +5793,11 @@ restart: 0, &busy_pcrr_p); clear_busy_pool_carrier(used_allctr, busy_pcrr_p); -#ifdef ERTS_SMP if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) { /* Cleaned up a bit more; try one more time... */ retried = 1; goto restart; } -#endif if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); } @@ -5999,9 +5852,7 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra, return realloc_thr_pref(type, extra, p, size, 1); } -#endif -#endif /* ------------------------------------------------------------------------- */ @@ -6022,10 +5873,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) sys_memcpy((void *) &allctr->mseg_opt, (void *) &erts_mseg_default_opt, sizeof(ErtsMsegOpt_t)); -#ifdef ERTS_SMP if (init->tspec || init->tpref) allctr->mseg_opt.sched_spec = 1; -#endif /* ERTS_SMP */ #endif /* HAVE_ERTS_MSEG */ allctr->name_prefix = init->name_prefix; @@ -6083,7 +5932,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) goto error; allctr->min_block_size = UNIT_CEILING(allctr->min_block_size + sizeof(FreeBlkFtr_t)); -#ifdef ERTS_SMP if (init->tpref) { Uint sz = ABLK_HDR_SZ; sz += (init->fix ? @@ -6107,7 +5955,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0); allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; allctr->cpool.util_limit = init->ts ? 0 : init->acul; -#endif allctr->sbc_threshold = init->sbct; #ifndef ARCH_64 @@ -6131,7 +5978,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100; #endif -#ifdef USE_THREADS if (init->ts) { allctr->thread_safe = 1; @@ -6142,7 +5988,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->debug.saved_tid = 0; #endif } -#endif if(!allctr->get_free_block || !allctr->link_free_block @@ -6155,14 +6000,12 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) if (allctr->mbc_header_size < sizeof(Carrier_t)) goto error; -#ifdef ERTS_SMP allctr->dd.use = 0; if (init->tpref) { allctr->dd.use = 1; init_dd_queue(&allctr->dd.q); allctr->dd.ix = init->ix; } -#endif allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size + ABLK_HDR_SZ) - ABLK_HDR_SZ); @@ -6216,10 +6059,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) | CFLG_NO_CPOOL | CFLG_MAIN_CARRIER); if (!blk) { -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_destroy(&allctr->mutex); -#endif erts_exit(ERTS_ABORT_EXIT, "Failed to create main carrier for %salloc\n", init->name_prefix); @@ -6239,9 +6080,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->fix[i].type_size = init->fix_type_size[i]; allctr->fix[i].list_size = 0; allctr->fix[i].list = NULL; -#ifdef ERTS_SMP ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t)); -#endif if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { allctr->fix[i].u.cpool.min_list_size = 0; allctr->fix[i].u.cpool.shrink_list = 0; @@ -6261,10 +6100,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) error: -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_destroy(&allctr->mutex); -#endif return 0; @@ -6282,10 +6119,8 @@ erts_alcu_stop(Allctr_t *allctr) while (allctr->mbc_list.first) destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL); -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_destroy(&allctr->mutex); -#endif } @@ -6294,14 +6129,12 @@ erts_alcu_stop(Allctr_t *allctr) void erts_alcu_init(AlcUInit_t *init) { -#ifdef ERTS_SMP int i; for (i = 0; i <= ERTS_ALC_A_MAX; i++) { ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel; erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); } -#endif ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */ #if HAVE_ERTS_MSEG ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ); @@ -6369,7 +6202,6 @@ erts_alcu_test(UWord op, UWord a1, UWord a2) case 0x01c: return (UWord) BLK_TO_MBC((Block_t*) a1); case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break; case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break; -#ifdef ERTS_SMP case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t); case 0x020: SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1); @@ -6383,14 +6215,6 @@ erts_alcu_test(UWord op, UWord a1, UWord a2) return (UWord) a2; case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1); case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2); -#else - case 0x01f: return (UWord) 0; - case 0x020: return (UWord) 0; - case 0x021: return (UWord) 0; - case 0x022: return (UWord) 0; - case 0x023: return (UWord) 0; - case 0x024: return (UWord) 0; -#endif case 0x025: /* UMEM2BLK_TEST*/ #ifdef DEBUG # ifdef HARD_DEBUG @@ -6448,13 +6272,9 @@ erts_alcu_verify_unused(Allctr_t *allctr) void erts_alcu_verify_unused_ts(Allctr_t *allctr) { -#ifdef USE_THREADS erts_mtx_lock(&allctr->mutex); -#endif erts_alcu_verify_unused(allctr); -#ifdef USE_THREADS erts_mtx_unlock(&allctr->mutex); -#endif } #ifdef DEBUG diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index aa13cda9fb..faeb5ef368 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -24,10 +24,8 @@ #define ERTS_ALCU_VSN_STR "3.0" #include "erl_alloc_types.h" -#ifdef USE_THREADS #define ERL_THREADS_EMU_INTERNAL__ #include "erl_threads.h" -#endif #include "erl_mseg.h" #include "lttng-wrapper.h" @@ -162,12 +160,10 @@ void * erts_alcu_alloc(ErtsAlcType_t, void *, Uint); void * erts_alcu_realloc(ErtsAlcType_t, void *, void *, Uint); void * erts_alcu_realloc_mv(ErtsAlcType_t, void *, void *, Uint); void erts_alcu_free(ErtsAlcType_t, void *, void *); -#ifdef USE_THREADS void * erts_alcu_alloc_ts(ErtsAlcType_t, void *, Uint); void * erts_alcu_realloc_ts(ErtsAlcType_t, void *, void *, Uint); void * erts_alcu_realloc_mv_ts(ErtsAlcType_t, void *, void *, Uint); void erts_alcu_free_ts(ErtsAlcType_t, void *, void *); -#ifdef ERTS_SMP void * erts_alcu_alloc_thr_spec(ErtsAlcType_t, void *, Uint); void * erts_alcu_realloc_thr_spec(ErtsAlcType_t, void *, void *, Uint); void * erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t, void *, void *, Uint); @@ -176,8 +172,6 @@ void * erts_alcu_alloc_thr_pref(ErtsAlcType_t, void *, Uint); void * erts_alcu_realloc_thr_pref(ErtsAlcType_t, void *, void *, Uint); void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t, void *, void *, Uint); void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *); -#endif -#endif Eterm erts_alcu_au_info_options(fmtfn_t *, void *, Uint **, Uint *); Eterm erts_alcu_info_options(Allctr_t *, fmtfn_t *, void *, Uint **, Uint *); Eterm erts_alcu_sz_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *); @@ -185,9 +179,7 @@ Eterm erts_alcu_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *); void erts_alcu_init(AlcUInit_t *); void erts_alcu_current_size(Allctr_t *, AllctrSize_t *, ErtsAlcUFixInfo_t *, int); -#ifdef ERTS_SMP void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *); -#endif erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); #ifdef ARCH_32 @@ -304,7 +296,6 @@ void erts_lcnt_update_allocator_locks(int enable); typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t; -#ifdef ERTS_SMP typedef struct ErtsDoubleLink_t_ { struct ErtsDoubleLink_t_ *next; @@ -323,21 +314,18 @@ typedef struct { ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */ } ErtsAlcCPoolData_t; -#endif typedef struct Carrier_t_ Carrier_t; struct Carrier_t_ { UWord chdr; Carrier_t *next; Carrier_t *prev; - erts_smp_atomic_t allctr; -#ifdef ERTS_SMP + erts_atomic_t allctr; ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */ -#endif }; #define ERTS_ALC_CARRIER_TO_ALLCTR(C) \ - ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) + ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) typedef struct { Carrier_t *first; @@ -430,7 +418,6 @@ typedef struct { } while (0) #endif -#ifdef ERTS_SMP typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t; @@ -473,7 +460,6 @@ typedef struct { } head; } ErtsAllctrDDQueue_t; -#endif typedef struct { size_t type_size; @@ -496,7 +482,6 @@ typedef struct { } ErtsAlcFixList_t; struct Allctr_t_ { -#ifdef ERTS_SMP struct { /* * We want the queue at the beginning of @@ -507,7 +492,6 @@ struct Allctr_t_ { int use; int ix; } dd; -#endif /* Allocator name prefix */ char * name_prefix; @@ -556,7 +540,6 @@ struct Allctr_t_ { /* Carriers */ CarrierList_t mbc_list; CarrierList_t sbc_list; -#ifdef ERTS_SMP struct { /* pooled_list, traitor list and dc_list contain only carriers _created_ by this allocator */ @@ -575,7 +558,6 @@ struct Allctr_t_ { erts_atomic_t no_carriers; } stat; } cpool; -#endif /* Main carrier (if there is one) */ Carrier_t * main_carrier; @@ -618,7 +600,6 @@ struct Allctr_t_ { int fix_shrink_scheduled; ErtsAlcFixList_t *fix; -#ifdef USE_THREADS /* Mutex for this allocator */ erts_mtx_t mutex; int thread_safe; @@ -627,7 +608,6 @@ struct Allctr_t_ { Allctr_t *next; } ts_list; -#endif int atoms_initialized; @@ -650,13 +630,11 @@ struct Allctr_t_ { CarriersStats_t mbcs; #ifdef DEBUG -#ifdef USE_THREADS struct { int saved_tid; erts_tid_t tid; } debug; #endif -#endif }; int erts_alcu_start(Allctr_t *, AllctrInit_t *); diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c index 861532f241..f2a3e411ec 100644 --- a/erts/emulator/beam/erl_arith.c +++ b/erts/emulator/beam/erl_arith.c @@ -276,8 +276,12 @@ shift(Process* p, Eterm arg1, Eterm arg2, int right) goto do_bsl; } else if (is_small(arg1) || is_big(arg1)) { /* - * N bsl PositiveBigNum is too large to represent. + * N bsl PositiveBigNum is too large to represent, + * unless N is 0. */ + if (arg1 == make_small(0)) { + BIF_RET(arg1); + } BIF_ERROR(p, SYSTEM_LIMIT); } /* Fall through if the left argument is not an integer. */ diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c index 9a93034fcb..3ceb2fd368 100644 --- a/erts/emulator/beam/erl_async.c +++ b/erts/emulator/beam/erl_async.c @@ -34,9 +34,6 @@ #define ERTS_ASYNC_PRINT_JOB 0 -#if !defined(ERTS_SMP) && defined(USE_THREADS) && !ERTS_USE_ASYNC_READY_Q -# error "Need async ready queue in non-smp case" -#endif typedef struct _erl_async { DE_Handle* hndl; /* The DE_Handle is needed when port is gone */ @@ -46,16 +43,13 @@ typedef struct _erl_async { ErlDrvPDL pdl; void (*async_invoke)(void*); void (*async_free)(void*); -#if ERTS_USE_ASYNC_READY_Q Uint sched_id; union { ErtsThrQPrepEnQ_t *prep_enq; ErtsThrQFinDeQ_t fin_deq; } q; -#endif } ErtsAsync; -#if ERTS_USE_ASYNC_READY_Q /* * We can do without the enqueue mutex since it isn't needed for @@ -94,7 +88,6 @@ typedef union { char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncReadyQ))]; } ErtsAlgndAsyncReadyQ; -#endif /* ERTS_USE_ASYNC_READY_Q */ typedef struct { ErtsThrQ_t thr_q; @@ -119,12 +112,10 @@ typedef struct { char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncInit))]; } init; ErtsAlgndAsyncQ *queue; -#if ERTS_USE_ASYNC_READY_Q ErtsAlgndAsyncReadyQ *ready_queue; -#endif } ErtsAsyncData; -#if defined(USE_THREADS) && defined(USE_VM_PROBES) +#if defined(USE_VM_PROBES) /* * Some compilers, e.g. GCC 4.2.1 and -O3, will optimize away DTrace @@ -140,15 +131,6 @@ int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */ static ErtsAsyncData *async; -#ifndef USE_THREADS - -void -erts_init_async(void) -{ - -} - -#else static void *async_main(void *); @@ -158,7 +140,6 @@ async_q(int i) return &async->queue[i].aq; } -#if ERTS_USE_ASYNC_READY_Q static ERTS_INLINE ErtsAsyncReadyQ * async_ready_q(Uint sched_id) @@ -166,16 +147,13 @@ async_ready_q(Uint sched_id) return &async->ready_queue[((int)sched_id)-1].arq; } -#endif void erts_init_async(void) { async = NULL; if (erts_async_max_threads > 0) { -#if ERTS_USE_ASYNC_READY_Q ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT; -#endif erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; char *ptr, thr_name[16]; size_t tot_size = 0; @@ -183,9 +161,7 @@ erts_init_async(void) tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData)); tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads; -#if ERTS_USE_ASYNC_READY_Q tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers; -#endif ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA, tot_size); @@ -202,7 +178,6 @@ erts_init_async(void) async->queue = (ErtsAlgndAsyncQ *) ptr; ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads; -#if ERTS_USE_ASYNC_READY_Q qinit.live.queue = ERTS_THR_Q_LIVE_LONG; qinit.live.objects = ERTS_THR_Q_LIVE_SHORT; @@ -222,7 +197,6 @@ erts_init_async(void) erts_thr_q_initialize(&arq->thr_q, &qinit); } -#endif /* Create async threads... */ @@ -253,7 +227,6 @@ erts_init_async(void) } } -#if ERTS_USE_ASYNC_READY_Q void * erts_get_async_ready_queue(Uint sched_id) @@ -261,7 +234,6 @@ erts_get_async_ready_queue(Uint sched_id) return (void *) async ? async_ready_q(sched_id) : NULL; } -#endif static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q) { @@ -270,10 +242,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q) #endif if (is_internal_port(a->port)) { -#if ERTS_USE_ASYNC_READY_Q ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id); a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q); -#endif /* make sure the driver will stay around */ if (a->hndl) erts_ddll_reference_referenced_driver(a->hndl); @@ -309,10 +279,8 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_tse_t *tse, ErtsThrQPrepEnQ_t **prep_enq) { -#if ERTS_USE_ASYNC_READY_Q int saved_fin_deq = 0; ErtsThrQFinDeQ_t fin_deq; -#endif #ifdef USE_VM_PROBES int len; #endif @@ -321,12 +289,10 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q); if (a) { -#if ERTS_USE_ASYNC_READY_Q *prep_enq = a->q.prep_enq; erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq); if (saved_fin_deq) erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq); -#endif #ifdef USE_LTTNG_VM_TRACEPOINTS if (LTTNG_ENABLED(aio_pool_get)) { lttng_decl_portbuf(port_str); @@ -354,7 +320,6 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_tse_reset(tse); -#if ERTS_USE_ASYNC_READY_Q chk_fin_deq: if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) { if (!saved_fin_deq) { @@ -364,13 +329,11 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_thr_q_append_finalize_dequeue_data(&fin_deq, &tmp_fin_deq); } -#endif switch (erts_thr_q_inspect(q, 1)) { case ERTS_THR_Q_DIRTY: break; case ERTS_THR_Q_NEED_THR_PRGR: -#ifdef ERTS_SMP { ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q); erts_thr_progress_wakeup(NULL, prgr); @@ -382,17 +345,14 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_tse_wait(tse); break; } -#endif case ERTS_THR_Q_CLEAN: -#if ERTS_USE_ASYNC_READY_Q if (saved_fin_deq) { if (erts_thr_q_finalize_dequeue(&fin_deq)) goto chk_fin_deq; else saved_fin_deq = 0; } -#endif erts_tse_wait(tse); break; @@ -408,15 +368,10 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, static ERTS_INLINE void call_async_ready(ErtsAsync *a) { -#if ERTS_USE_ASYNC_READY_Q Port *p = erts_id2port_sflgs(a->port, NULL, 0, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); -#else - Port *p = erts_thr_id2port_sflgs(a->port, - ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); -#endif if (!p) { if (a->async_free) { ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_PORT); @@ -432,11 +387,7 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a) ERTS_MSACC_POP_STATE(); } } -#if ERTS_USE_ASYNC_READY_Q erts_port_release(p); -#else - erts_thr_port_release(p); -#endif } if (a->pdl) driver_pdl_dec_refc(a->pdl); @@ -446,7 +397,6 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a) static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq) { -#if ERTS_USE_ASYNC_READY_Q ErtsAsyncReadyQ *arq; #if ERTS_ASYNC_PRINT_JOB @@ -465,12 +415,6 @@ static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq) erts_mtx_unlock(&arq->x.data.enq_mtx); #endif -#else /* ERTS_USE_ASYNC_READY_Q */ - - call_async_ready(a); - erts_free(ERTS_ALC_T_ASYNC, (void *) a); - -#endif /* ERTS_USE_ASYNC_READY_Q */ } @@ -486,7 +430,6 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq) erts_tse_t *tse = erts_tse_fetch(); ERTS_DECLARE_DUMMY(Uint no); -#ifdef ERTS_SMP ErtsThrPrgrCallbacks callbacks; callbacks.arg = (void *) tse; @@ -495,15 +438,12 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq) callbacks.wait = NULL; erts_thr_progress_register_unmanaged_thread(&callbacks); -#endif qinit.live.queue = ERTS_THR_Q_LIVE_LONG; qinit.live.objects = ERTS_THR_Q_LIVE_SHORT; qinit.arg = (void *) tse; qinit.notify = async_wakeup; -#if ERTS_USE_ASYNC_READY_Q qinit.auto_finalize_dequeue = 0; -#endif erts_thr_q_initialize(&aq->thr_q, &qinit); @@ -545,12 +485,10 @@ static void *async_main(void* arg) return NULL; } -#endif /* USE_THREADS */ void erts_exit_flush_async(void) { -#ifdef USE_THREADS int i; ErtsAsync a; a.port = NIL; @@ -564,11 +502,8 @@ erts_exit_flush_async(void) async_add(&a, async_q(i)); for (i = 0; i < erts_async_max_threads; i++) erts_thr_join(async->queue[i].aq.thr_id, NULL); -#endif } -#if defined(USE_THREADS) && ERTS_USE_ASYNC_READY_Q - int erts_check_async_ready(void *varq) { ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq; @@ -609,18 +544,15 @@ int erts_async_ready_clean(void *varq, void *val) case ERTS_THR_Q_DIRTY: return ERTS_ASYNC_READY_DIRTY; case ERTS_THR_Q_NEED_THR_PRGR: -#ifdef ERTS_SMP *((ErtsThrPrgrVal *) val) = erts_thr_q_need_thr_progress(&arq->thr_q); return ERTS_ASYNC_READY_NEED_THR_PRGR; -#endif case ERTS_THR_Q_CLEAN: break; } return ERTS_ASYNC_READY_CLEAN; } -#endif /* ** Generate a fair async key prom an ErlDrvPort @@ -658,28 +590,22 @@ long driver_async(ErlDrvPort ix, unsigned int* key, Port* prt; long id; unsigned int qix; -#if ERTS_USE_ASYNC_READY_Q Uint sched_id; ERTS_MSACC_PUSH_STATE(); sched_id = erts_get_scheduler_id(); if (!sched_id) sched_id = 1; -#else - ERTS_MSACC_PUSH_STATE(); -#endif prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync)); -#if ERTS_USE_ASYNC_READY_Q a->sched_id = sched_id; -#endif a->hndl = (DE_Handle*)prt->drv_ptr->handle; a->port = prt->common.id; a->pdl = NULL; @@ -709,7 +635,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key, (*key % erts_async_max_threads) : 0; *key = qix; } -#ifdef USE_THREADS if (erts_async_max_threads > 0) { if (prt->port_data_lock) { driver_pdl_inc_refc(prt->port_data_lock); @@ -718,7 +643,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key, async_add(a, async_q(qix)); return id; } -#endif ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT); (*a->async_invoke)(a->async_data); diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h index 4b470e7679..70ef247e0a 100644 --- a/erts/emulator/beam/erl_async.h +++ b/erts/emulator/beam/erl_async.h @@ -27,39 +27,12 @@ extern int erts_async_max_threads; #define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */ extern int erts_async_thread_suggested_stack_size; - -#ifdef ERTS_SMP -/* - * With smp support we can choose to have, or not to - * have an async ready queue. - */ -#define ERTS_USE_ASYNC_READY_Q 1 -#endif - -#ifndef ERTS_SMP -/* In non-smp case we *need* the async ready queue */ -# undef ERTS_USE_ASYNC_READY_Q -# define ERTS_USE_ASYNC_READY_Q 1 -#endif - -#ifndef ERTS_USE_ASYNC_READY_Q -# define ERTS_USE_ASYNC_READY_Q 0 -#endif - -#ifndef USE_THREADS -# undef ERTS_USE_ASYNC_READY_Q -# define ERTS_USE_ASYNC_READY_Q 0 -#endif /* !USE_THREADS */ -#if ERTS_USE_ASYNC_READY_Q int erts_check_async_ready(void *); int erts_async_ready_clean(void *, void *); void *erts_get_async_ready_queue(Uint sched_id); #define ERTS_ASYNC_READY_CLEAN 0 #define ERTS_ASYNC_READY_DIRTY 1 -#ifdef ERTS_SMP #define ERTS_ASYNC_READY_NEED_THR_PRGR 2 -#endif -#endif /* ERTS_USE_ASYNC_READY_Q */ void erts_init_async(void); void erts_exit_flush_async(void); diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index dcffde5777..4cafa499a9 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -171,6 +171,16 @@ static void *my_alloc(MyAllocator *my, Uint size) #define ALPHABET_SIZE 256 +typedef struct _findall_data { + Uint pos; + Uint len; +#ifdef HARDDEBUG + Uint id; +#endif + Eterm epos; + Eterm elen; +} FindallData; + typedef struct _ac_node { #ifdef HARDDEBUG Uint32 id; /* To identify h pointer targets when @@ -208,6 +218,103 @@ typedef struct _bm_data { Sint badshift[ALPHABET_SIZE]; } BMData; +typedef struct _ac_find_all_state { + ACNode *q; + Uint pos; + Uint len; + Uint m; + Uint allocated; + FindallData *out; +} ACFindAllState; + +typedef struct _ac_find_first_state { + ACNode *q; + Uint pos; + Uint len; + ACNode *candidate; + Uint candidate_start; +} ACFindFirstState; + +typedef struct _bm_find_all_state { + Sint pos; + Sint len; + Uint m; + Uint allocated; + FindallData *out; +} BMFindAllState; + +typedef struct _bm_find_first_state { + Sint pos; + Sint len; +} BMFindFirstState; + +typedef enum _bf_return { + BF_RESTART = -3, + BF_NOT_FOUND, + BF_BADARG, + BF_OK +} BFReturn; + +typedef struct _binary_find_all_context { + ErtsHeapFactory factory; + Eterm term; + Sint head; + Sint tail; + Uint end_pos; + Uint size; + FindallData *data; + union { + ACFindAllState ac; + BMFindAllState bm; + } d; +} BinaryFindAllContext; + +typedef struct _binary_find_first_context { + Uint pos; + Uint len; + union { + ACFindFirstState ac; + BMFindFirstState bm; + } d; +} BinaryFindFirstContext; + +typedef struct _binary_find_context BinaryFindContext; + +typedef struct _binary_find_search { + void (*init) (BinaryFindContext *); + BFReturn (*find) (BinaryFindContext *, byte *); + void (*done) (BinaryFindContext *); +} BinaryFindSearch; + +typedef Eterm (*BinaryFindResult)(Process *, Eterm, BinaryFindContext **); + +typedef enum _binary_find_state { + BFSearch, + BFResult, + BFDone +} BinaryFindState; + +struct _binary_find_context { + Eterm pat_type; + Eterm pat_term; + Binary *pat_bin; + Uint flags; + Uint hsstart; + Uint hsend; + int loop_factor; + int exported; + Uint reds; + BinaryFindState state; + Eterm trap_term; + BinaryFindSearch *search; + BinaryFindResult not_found; + BinaryFindResult found; + union { + BinaryFindAllContext fa; + BinaryFindFirstContext ff; + } u; +}; + #ifdef HARDDEBUG static void dump_bm_data(BMData *bm); static void dump_ac_trie(ACTrie *act); @@ -414,32 +521,25 @@ static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff) * Basic AC finds the first end before the first start... * */ -typedef struct { - ACNode *q; - Uint pos; - Uint len; - ACNode *candidate; - Uint candidate_start; -} ACFindFirstState; - - -static void ac_init_find_first_match(ACFindFirstState *state, ACTrie *act, Sint startpos, Uint len) +static void ac_init_find_first_match(BinaryFindContext *ctx) { + ACFindFirstState *state = &(ctx->u.ff.d.ac); + ACTrie *act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); state->q = act->root; - state->pos = startpos; - state->len = len; + state->pos = ctx->hsstart; + state->len = ctx->hsend; state->candidate = NULL; state->candidate_start = 0; } -#define AC_OK 0 -#define AC_NOT_FOUND -1 -#define AC_RESTART -2 #define AC_LOOP_FACTOR 10 -static int ac_find_first_match(ACFindFirstState *state, byte *haystack, - Uint *mpos, Uint *mlen, Uint *reductions) +static BFReturn ac_find_first_match(BinaryFindContext *ctx, byte *haystack) { + ACFindFirstState *state = &(ctx->u.ff.d.ac); + Uint *mpos = &(ctx->u.ff.pos); + Uint *mlen = &(ctx->u.ff.len); + Uint *reductions = &(ctx->reds); ACNode *q = state->q; Uint i = state->pos; ACNode *candidate = state->candidate, *r; @@ -455,7 +555,7 @@ static int ac_find_first_match(ACFindFirstState *state, byte *haystack, state->len = len; state->candidate = candidate; state->candidate_start = candidate_start; - return AC_RESTART; + return BF_RESTART; } while (q->g[haystack[i]] == NULL && q->h != q) { @@ -485,68 +585,33 @@ static int ac_find_first_match(ACFindFirstState *state, byte *haystack, } *reductions = reds; if (!candidate) { - return AC_NOT_FOUND; + return BF_NOT_FOUND; } #ifdef HARDDEBUG dump_ac_node(candidate,0,'?'); #endif *mpos = candidate_start; *mlen = candidate->d; - return AC_OK; + return BF_OK; } -typedef struct _findall_data { - Uint pos; - Uint len; -#ifdef HARDDEBUG - Uint id; -#endif - Eterm epos; - Eterm elen; -} FindallData; - -typedef struct { - ACNode *q; - Uint pos; - Uint len; - Uint m; - Uint allocated; - FindallData *out; -} ACFindAllState; - -static void ac_init_find_all(ACFindAllState *state, ACTrie *act, Sint startpos, Uint len) +static void ac_init_find_all(BinaryFindContext *ctx) { + ACFindAllState *state = &(ctx->u.fa.d.ac); + ACTrie *act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); state->q = act->root; - state->pos = startpos; - state->len = len; + state->pos = ctx->hsstart; + state->len = ctx->hsend; state->m = 0; state->allocated = 0; state->out = NULL; } -static void ac_restore_find_all(ACFindAllState *state, - const ACFindAllState *src) -{ - memcpy(state, src, sizeof(ACFindAllState)); - if (state->allocated > 0) { - state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * (state->allocated)); - memcpy(state->out, src+1, sizeof(FindallData)*state->m); - } else { - state->out = NULL; - } -} - -static void ac_serialize_find_all(const ACFindAllState *state, - ACFindAllState *dst) -{ - memcpy(dst, state, sizeof(ACFindAllState)); - memcpy(dst+1, state->out, sizeof(FindallData)*state->m); -} - -static void ac_clean_find_all(ACFindAllState *state) +static void ac_clean_find_all(BinaryFindContext *ctx) { + ACFindAllState *state = &(ctx->u.fa.d.ac); if (state->out != NULL) { - erts_free(ERTS_ALC_T_TMP, state->out); + erts_free(ERTS_ALC_T_BINARY_FIND, state->out); } #ifdef HARDDEBUG state->out = NULL; @@ -558,9 +623,10 @@ static void ac_clean_find_all(ACFindAllState *state) * Differs to the find_first function in that it stores all matches and the values * arte returned only in the state. */ -static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, - Uint *reductions) +static BFReturn ac_find_all_non_overlapping(BinaryFindContext *ctx, byte *haystack) { + ACFindAllState *state = &(ctx->u.fa.d.ac); + Uint *reductions = &(ctx->reds); ACNode *q = state->q; Uint i = state->pos; Uint rstart; @@ -571,7 +637,6 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, FindallData *out = state->out; register Uint reds = *reductions; - while (i < len) { if (--reds == 0) { state->q = q; @@ -580,7 +645,7 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, state->m = m; state->allocated = allocated; state->out = out; - return AC_RESTART; + return BF_RESTART; } while (q->g[haystack[i]] == NULL && q->h != q) { q = q->h; @@ -618,11 +683,11 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, if (m >= allocated) { if (!allocated) { allocated = 10; - out = erts_alloc(ERTS_ALC_T_TMP, + out = erts_alloc(ERTS_ALC_T_BINARY_FIND, sizeof(FindallData) * allocated); } else { allocated *= 2; - out = erts_realloc(ERTS_ALC_T_TMP, out, + out = erts_realloc(ERTS_ALC_T_BINARY_FIND, out, sizeof(FindallData) * allocated); } @@ -649,7 +714,7 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, *reductions = reds; state->m = m; state->out = out; - return (m == 0) ? AC_NOT_FOUND : AC_OK; + return (m == 0) ? BF_NOT_FOUND : BF_OK; } /* @@ -736,27 +801,22 @@ static void compute_goodshifts(BMData *bmd) erts_free(ERTS_ALC_T_TMP, suffixes); } -typedef struct { - Sint pos; - Sint len; -} BMFindFirstState; - -#define BM_OK 0 /* used only for find_all */ -#define BM_NOT_FOUND -1 -#define BM_RESTART -2 #define BM_LOOP_FACTOR 10 /* Should we have a higher value? */ -static void bm_init_find_first_match(BMFindFirstState *state, Sint startpos, - Uint len) +static void bm_init_find_first_match(BinaryFindContext *ctx) { - state->pos = startpos; - state->len = (Sint) len; + BMFindFirstState *state = &(ctx->u.ff.d.bm); + state->pos = ctx->hsstart; + state->len = ctx->hsend; } - -static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd, - byte *haystack, Uint *reductions) +static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack) { + BMFindFirstState *state = &(ctx->u.ff.d.bm); + BMData *bmd = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); + Uint *mpos = &(ctx->u.ff.pos); + Uint *mlen = &(ctx->u.ff.len); + Uint *reductions = &(ctx->reds); Sint blen = bmd->len; Sint len = state->len; Sint *gs = bmd->goodshift; @@ -769,61 +829,37 @@ static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd, while (j <= len - blen) { if (--reds == 0) { state->pos = j; - return BM_RESTART; + return BF_RESTART; } for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i) ; if (i < 0) { /* found */ *reductions = reds; - return j; + *mpos = (Uint) j; + *mlen = (Uint) blen; + return BF_OK; } j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i); } *reductions = reds; - return BM_NOT_FOUND; + return BF_NOT_FOUND; } -typedef struct { - Sint pos; - Sint len; - Uint m; - Uint allocated; - FindallData *out; -} BMFindAllState; - -static void bm_init_find_all(BMFindAllState *state, Sint startpos, Uint len) +static void bm_init_find_all(BinaryFindContext *ctx) { - state->pos = startpos; - state->len = (Sint) len; + BMFindAllState *state = &(ctx->u.fa.d.bm); + state->pos = ctx->hsstart; + state->len = ctx->hsend; state->m = 0; state->allocated = 0; state->out = NULL; } -static void bm_restore_find_all(BMFindAllState *state, - const BMFindAllState *src) -{ - memcpy(state, src, sizeof(BMFindAllState)); - if (state->allocated > 0) { - state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * - (state->allocated)); - memcpy(state->out, src+1, sizeof(FindallData)*state->m); - } else { - state->out = NULL; - } -} - -static void bm_serialize_find_all(const BMFindAllState *state, - BMFindAllState *dst) -{ - memcpy(dst, state, sizeof(BMFindAllState)); - memcpy(dst+1, state->out, sizeof(FindallData)*state->m); -} - -static void bm_clean_find_all(BMFindAllState *state) +static void bm_clean_find_all(BinaryFindContext *ctx) { + BMFindAllState *state = &(ctx->u.fa.d.bm); if (state->out != NULL) { - erts_free(ERTS_ALC_T_TMP, state->out); + erts_free(ERTS_ALC_T_BINARY_FIND, state->out); } #ifdef HARDDEBUG state->out = NULL; @@ -835,10 +871,11 @@ static void bm_clean_find_all(BMFindAllState *state) * Differs to the find_first function in that it stores all matches and the * values are returned only in the state. */ -static Sint bm_find_all_non_overlapping(BMFindAllState *state, - BMData *bmd, byte *haystack, - Uint *reductions) +static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haystack) { + BMFindAllState *state = &(ctx->u.fa.d.bm); + BMData *bmd = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); + Uint *reductions = &(ctx->reds); Sint blen = bmd->len; Sint len = state->len; Sint *gs = bmd->goodshift; @@ -857,7 +894,7 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state, state->m = m; state->allocated = allocated; state->out = out; - return BM_RESTART; + return BF_RESTART; } for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i) ; @@ -865,10 +902,11 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state, if (m >= allocated) { if (!allocated) { allocated = 10; - out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * allocated); + out = erts_alloc(ERTS_ALC_T_BINARY_FIND, + sizeof(FindallData) * allocated); } else { allocated *= 2; - out = erts_realloc(ERTS_ALC_T_TMP, out, + out = erts_realloc(ERTS_ALC_T_BINARY_FIND, out, sizeof(FindallData) * allocated); } } @@ -883,7 +921,7 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state, state->m = m; state->out = out; *reductions = reds; - return (m == 0) ? BM_NOT_FOUND : BM_OK; + return (m == 0) ? BF_NOT_FOUND : BF_OK; } /* @@ -1009,51 +1047,160 @@ BIF_RETTYPE binary_compile_pattern_1(BIF_ALIST_1) BIF_RET(ret); } -#define DO_BIN_MATCH_OK 0 -#define DO_BIN_MATCH_BADARG -1 -#define DO_BIN_MATCH_RESTART -2 +#define BF_FLAG_GLOBAL 0x01 +#define BF_FLAG_SPLIT_TRIM 0x02 +#define BF_FLAG_SPLIT_TRIM_ALL 0x04 -#define BINARY_FIND_ALL 0x01 -#define BINARY_SPLIT_TRIM 0x02 -#define BINARY_SPLIT_TRIM_ALL 0x04 +static void bf_context_init(BinaryFindContext *ctx, BinaryFindResult not_found, + BinaryFindResult single, BinaryFindResult global, + Binary *pat_bin); +static BinaryFindContext *bf_context_export(Process *p, BinaryFindContext *src); +static int bf_context_destructor(Binary *ctx_bin); +#ifdef HARDDEBUG +static void bf_context_dump(BinaryFindContext *ctx); +#endif -typedef struct BinaryFindState { - Eterm type; - Uint flags; - Uint hsstart; - Uint hsend; - Eterm (*not_found_result) (Process *, Eterm, struct BinaryFindState *); - Eterm (*single_result) (Process *, Eterm, struct BinaryFindState *, Sint, Sint); - Eterm (*global_result) (Process *, Eterm, struct BinaryFindState *, FindallData *, Uint); -} BinaryFindState; +static BinaryFindSearch bf_search_ac_global = { + ac_init_find_all, + ac_find_all_non_overlapping, + ac_clean_find_all +}; + +static BinaryFindSearch bf_search_ac_single = { + ac_init_find_first_match, + ac_find_first_match, + NULL +}; + +static BinaryFindSearch bf_search_bm_global = { + bm_init_find_all, + bm_find_all_non_overlapping, + bm_clean_find_all +}; + +static BinaryFindSearch bf_search_bm_single = { + bm_init_find_first_match, + bm_find_first_match, + NULL +}; + +static void bf_context_init(BinaryFindContext *ctx, BinaryFindResult not_found, + BinaryFindResult single, BinaryFindResult global, + Binary *pat_bin) +{ + ctx->exported = 0; + ctx->state = BFSearch; + ctx->not_found = not_found; + if (ctx->flags & BF_FLAG_GLOBAL) { + ctx->found = global; + if (ctx->pat_type == am_bm) { + ctx->search = &bf_search_bm_global; + ctx->loop_factor = BM_LOOP_FACTOR; + } else if (ctx->pat_type == am_ac) { + ctx->search = &bf_search_ac_global; + ctx->loop_factor = AC_LOOP_FACTOR; + } + } else { + ctx->found = single; + if (ctx->pat_type == am_bm) { + ctx->search = &bf_search_bm_single; + ctx->loop_factor = BM_LOOP_FACTOR; + } else if (ctx->pat_type == am_ac) { + ctx->search = &bf_search_ac_single; + ctx->loop_factor = AC_LOOP_FACTOR; + } + } + ctx->trap_term = THE_NON_VALUE; + ctx->pat_bin = pat_bin; + ctx->search->init(ctx); +} -typedef struct BinaryFindState_bignum { - Eterm bignum_hdr; - BinaryFindState bfs; - union { - BMFindFirstState bmffs; - BMFindAllState bmfas; - ACFindFirstState acffs; - ACFindAllState acfas; - } data; -} BinaryFindState_bignum; - -#define SIZEOF_BINARY_FIND_STATE(S) \ - (sizeof(BinaryFindState)+sizeof(S)) - -#define SIZEOF_BINARY_FIND_ALL_STATE(S) \ - (sizeof(BinaryFindState)+sizeof(S)+(sizeof(FindallData)*(S).m)) - -static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs); -static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindState *bfs, - Sint pos, Sint len); -static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindState *bfs, - FindallData *fad, Uint fad_sz); -static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs); -static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *bfs, - Sint pos, Sint len); -static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState *bfs, - FindallData *fad, Uint fad_sz); +static BinaryFindContext *bf_context_export(Process *p, BinaryFindContext *src) +{ + Binary *ctx_bin; + BinaryFindContext *ctx; + Eterm *hp; + + ASSERT(src->exported == 0); + ctx_bin = erts_create_magic_binary(sizeof(BinaryFindContext), + bf_context_destructor); + ctx = ERTS_MAGIC_BIN_DATA(ctx_bin); + sys_memcpy(ctx, src, sizeof(BinaryFindContext)); + if (ctx->pat_bin != NULL && ctx->pat_term == THE_NON_VALUE) { + hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE * 2); + ctx->pat_term = erts_mk_magic_ref(&hp, &MSO(p), ctx->pat_bin); + } else { + hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); + } + ctx->trap_term = erts_mk_magic_ref(&hp, &MSO(p), ctx_bin); + ctx->exported = 1; + return ctx; +} + +static int bf_context_destructor(Binary *ctx_bin) +{ + BinaryFindContext *ctx; + + ctx = ERTS_MAGIC_BIN_DATA(ctx_bin); + if (ctx->state != BFDone) { + if (ctx->search->done != NULL) { + ctx->search->done(ctx); + } + ctx->state = BFDone; + } + return 1; +} + +#ifdef HARDDEBUG +static void bf_context_dump(BinaryFindContext *ctx) +{ + if (ctx->pat_type == am_bm) { + BMData *bm; + bm = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); + dump_bm_data(bm); + } else { + ACTrie *act; + act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); + dump_ac_trie(act); + } +} +#endif + +static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp); + +static BFReturn maybe_binary_match_compile(BinaryFindContext *ctx, Eterm arg, Binary **pat_bin) +{ + Eterm *tp; + ctx->pat_term = THE_NON_VALUE; + if (is_tuple(arg)) { + tp = tuple_val(arg); + if (arityval(*tp) != 2 || is_not_atom(tp[1])) { + return BF_BADARG; + } + if (((tp[1] != am_bm) && (tp[1] != am_ac)) || + !is_internal_magic_ref(tp[2])) { + return BF_BADARG; + } + *pat_bin = erts_magic_ref2bin(tp[2]); + if ((tp[1] == am_bm && + ERTS_MAGIC_BIN_DESTRUCTOR(*pat_bin) != cleanup_my_data_bm) || + (tp[1] == am_ac && + ERTS_MAGIC_BIN_DESTRUCTOR(*pat_bin) != cleanup_my_data_ac)) { + *pat_bin = NULL; + return BF_BADARG; + } + ctx->pat_type = tp[1]; + ctx->pat_term = tp[2]; + } else if (do_binary_match_compile(arg, &(ctx->pat_type), pat_bin) != 0) { + return BF_BADARG; + } + return BF_OK; +} static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp) { @@ -1134,17 +1281,17 @@ static int parse_split_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp, Uin Uint orig_size; if (is_atom(t)) { if (t == am_global) { - *optp |= BINARY_FIND_ALL; + *optp |= BF_FLAG_GLOBAL; l = CDR(list_val(l)); continue; } if (t == am_trim) { - *optp |= BINARY_SPLIT_TRIM; + *optp |= BF_FLAG_SPLIT_TRIM; l = CDR(list_val(l)); continue; } if (t == am_trim_all) { - *optp |= BINARY_SPLIT_TRIM_ALL; + *optp |= BF_FLAG_SPLIT_TRIM_ALL; l = CDR(list_val(l)); continue; } @@ -1197,266 +1344,160 @@ static int parse_split_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp, Uin } } -static int do_binary_find(Process *p, Eterm subject, BinaryFindState *bfs, Binary *bin, - Eterm state_term, Eterm *res_term) +static BFReturn do_binary_find(Process *p, Eterm subject, BinaryFindContext **ctxp, + Binary *pat_bin, Binary *ctx_bin, Eterm *res_term) { - byte *bytes; - Uint bitoffs, bitsize; - byte *temp_alloc = NULL; - BinaryFindState_bignum *state_ptr = NULL; + BinaryFindContext *ctx; + int is_first_call; + Uint initial_reds; + BFReturn runres; - ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize); - if (bitsize != 0) { - goto badarg; - } - if (bitoffs != 0) { - bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc); - } - if (state_term != NIL) { - state_ptr = (BinaryFindState_bignum *)(big_val(state_term)); - bfs = &(state_ptr->bfs); + if (ctx_bin == NULL) { + is_first_call = 1; + ctx = *ctxp; + } else { + is_first_call = 0; + ctx = ERTS_MAGIC_BIN_DATA(ctx_bin); + ctx->pat_bin = pat_bin; + *ctxp = ctx; } - if (bfs->flags & BINARY_FIND_ALL) { - if (bfs->type == am_bm) { - BMData *bm; - Sint pos; - BMFindAllState state; - Uint reds = get_reds(p, BM_LOOP_FACTOR); - Uint save_reds = reds; + initial_reds = ctx->reds = get_reds(p, ctx->loop_factor); - bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin); -#ifdef HARDDEBUG - dump_bm_data(bm); -#endif - if (state_term == NIL) { - bm_init_find_all(&state, bfs->hsstart, bfs->hsend); - } else { - bm_restore_find_all(&state, &(state_ptr->data.bmfas)); - } + switch (ctx->state) { + case BFSearch: { + byte *bytes; + Uint bitoffs, bitsize; + byte *temp_alloc = NULL; - pos = bm_find_all_non_overlapping(&state, bm, bytes, &reds); - if (pos == BM_NOT_FOUND) { - *res_term = bfs->not_found_result(p, subject, bfs); - } else if (pos == BM_RESTART) { - int x = - (SIZEOF_BINARY_FIND_ALL_STATE(state) / sizeof(Eterm)) + - !!(SIZEOF_BINARY_FIND_ALL_STATE(state) % sizeof(Eterm)); -#ifdef HARDDEBUG - erts_printf("Trap bm!\n"); -#endif - state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1); - state_ptr->bignum_hdr = make_pos_bignum_header(x); - memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState)); - bm_serialize_find_all(&state, &state_ptr->data.bmfas); - *res_term = make_big(&state_ptr->bignum_hdr); - erts_free_aligned_binary_bytes(temp_alloc); - bm_clean_find_all(&state); - return DO_BIN_MATCH_RESTART; - } else { - *res_term = bfs->global_result(p, subject, bfs, state.out, state.m); - } - erts_free_aligned_binary_bytes(temp_alloc); - bm_clean_find_all(&state); - BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR); - return DO_BIN_MATCH_OK; - } else if (bfs->type == am_ac) { - ACTrie *act; - int acr; - ACFindAllState state; - Uint reds = get_reds(p, AC_LOOP_FACTOR); - Uint save_reds = reds; - - act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin); + ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize); + if (bitsize != 0) { + goto badarg; + } + if (bitoffs != 0) { + bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc); + } #ifdef HARDDEBUG - dump_ac_trie(act); + bf_context_dump(ctx); #endif - if (state_term == NIL) { - ac_init_find_all(&state, act, bfs->hsstart, bfs->hsend); - } else { - ac_restore_find_all(&state, &(state_ptr->data.acfas)); - } - acr = ac_find_all_non_overlapping(&state, bytes, &reds); - if (acr == AC_NOT_FOUND) { - *res_term = bfs->not_found_result(p, subject, bfs); - } else if (acr == AC_RESTART) { - int x = - (SIZEOF_BINARY_FIND_ALL_STATE(state) / sizeof(Eterm)) + - !!(SIZEOF_BINARY_FIND_ALL_STATE(state) % sizeof(Eterm)); + runres = ctx->search->find(ctx, bytes); + if (runres == BF_NOT_FOUND) { + *res_term = ctx->not_found(p, subject, &ctx); + *ctxp = ctx; + } else if (runres == BF_RESTART) { #ifdef HARDDEBUG + if (ctx->pat_type == am_ac) { erts_printf("Trap ac!\n"); -#endif - state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1); - state_ptr->bignum_hdr = make_pos_bignum_header(x); - memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState)); - ac_serialize_find_all(&state, &state_ptr->data.acfas); - *res_term = make_big(&state_ptr->bignum_hdr); - erts_free_aligned_binary_bytes(temp_alloc); - ac_clean_find_all(&state); - return DO_BIN_MATCH_RESTART; - } else { - *res_term = bfs->global_result(p, subject, bfs, state.out, state.m); - } - erts_free_aligned_binary_bytes(temp_alloc); - ac_clean_find_all(&state); - BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR); - return DO_BIN_MATCH_OK; - } - } else { - if (bfs->type == am_bm) { - BMData *bm; - Sint pos; - BMFindFirstState state; - Uint reds = get_reds(p, BM_LOOP_FACTOR); - Uint save_reds = reds; - - bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin); -#ifdef HARDDEBUG - dump_bm_data(bm); -#endif - if (state_term == NIL) { - bm_init_find_first_match(&state, bfs->hsstart, bfs->hsend); } else { - memcpy(&state, &state_ptr->data.bmffs, sizeof(BMFindFirstState)); - } - -#ifdef HARDDEBUG - erts_printf("(bm) state->pos = %ld, state->len = %lu\n",state.pos, - state.len); -#endif - pos = bm_find_first_match(&state, bm, bytes, &reds); - if (pos == BM_NOT_FOUND) { - *res_term = bfs->not_found_result(p, subject, bfs); - } else if (pos == BM_RESTART) { - int x = - (SIZEOF_BINARY_FIND_STATE(state) / sizeof(Eterm)) + - !!(SIZEOF_BINARY_FIND_STATE(state) % sizeof(Eterm)); -#ifdef HARDDEBUG erts_printf("Trap bm!\n"); + } #endif - state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1); - state_ptr->bignum_hdr = make_pos_bignum_header(x); - memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState)); - memcpy(&state_ptr->data.acffs, &state, sizeof(BMFindFirstState)); - *res_term = make_big(&state_ptr->bignum_hdr); - erts_free_aligned_binary_bytes(temp_alloc); - return DO_BIN_MATCH_RESTART; - } else { - *res_term = bfs->single_result(p, subject, bfs, pos, bm->len); + if (is_first_call) { + ctx = bf_context_export(p, ctx); + *ctxp = ctx; + erts_set_gc_state(p, 0); } erts_free_aligned_binary_bytes(temp_alloc); - BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR); - return DO_BIN_MATCH_OK; - } else if (bfs->type == am_ac) { - ACTrie *act; - Uint pos, rlen; - int acr; - ACFindFirstState state; - Uint reds = get_reds(p, AC_LOOP_FACTOR); - Uint save_reds = reds; - - act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin); -#ifdef HARDDEBUG - dump_ac_trie(act); -#endif - if (state_term == NIL) { - ac_init_find_first_match(&state, act, bfs->hsstart, bfs->hsend); - } else { - memcpy(&state, &state_ptr->data.acffs, sizeof(ACFindFirstState)); + *res_term = THE_NON_VALUE; + BUMP_ALL_REDS(p); + return BF_RESTART; + } else { + *res_term = ctx->found(p, subject, &ctx); + *ctxp = ctx; + } + erts_free_aligned_binary_bytes(temp_alloc); + if (*res_term == THE_NON_VALUE) { + if (is_first_call) { + erts_set_gc_state(p, 0); } - acr = ac_find_first_match(&state, bytes, &pos, &rlen, &reds); - if (acr == AC_NOT_FOUND) { - *res_term = bfs->not_found_result(p, subject, bfs); - } else if (acr == AC_RESTART) { - int x = - (SIZEOF_BINARY_FIND_STATE(state) / sizeof(Eterm)) + - !!(SIZEOF_BINARY_FIND_STATE(state) % sizeof(Eterm)); -#ifdef HARDDEBUG - erts_printf("Trap ac!\n"); -#endif - state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1); - state_ptr->bignum_hdr = make_pos_bignum_header(x); - memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState)); - memcpy(&state_ptr->data.acffs, &state, sizeof(ACFindFirstState)); - *res_term = make_big(&state_ptr->bignum_hdr); - erts_free_aligned_binary_bytes(temp_alloc); - return DO_BIN_MATCH_RESTART; - } else { - *res_term = bfs->single_result(p, subject, bfs, pos, rlen); + BUMP_ALL_REDS(p); + return BF_RESTART; + } + if (ctx->search->done != NULL) { + ctx->search->done(ctx); + } + ctx->state = BFDone; + if (!is_first_call) { + erts_set_gc_state(p, 1); + } + BUMP_REDS(p, (initial_reds - ctx->reds) / ctx->loop_factor); + return BF_OK; + } + case BFResult: { + *res_term = ctx->found(p, subject, &ctx); + *ctxp = ctx; + if (*res_term == THE_NON_VALUE) { + if (is_first_call) { + erts_set_gc_state(p, 0); } - erts_free_aligned_binary_bytes(temp_alloc); - BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR); - return DO_BIN_MATCH_OK; + BUMP_ALL_REDS(p); + return BF_RESTART; + } + if (ctx->search->done != NULL) { + ctx->search->done(ctx); } + ctx->state = BFDone; + if (!is_first_call) { + erts_set_gc_state(p, 1); + } + BUMP_REDS(p, (initial_reds - ctx->reds) / ctx->loop_factor); + return BF_OK; } - badarg: - return DO_BIN_MATCH_BADARG; + default: + ASSERT(!"Unknown state in do_binary_find"); + } + +badarg: + if (!is_first_call) { + if (ctx->search->done != NULL) { + ctx->search->done(ctx); + } + ctx->state = BFDone; + erts_set_gc_state(p, 1); + } + return BF_BADARG; } static BIF_RETTYPE binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags) { - BinaryFindState bfs; - Eterm *tp; - Binary *bin; - Eterm bin_term = NIL; + BinaryFindContext c_buff; + BinaryFindContext *ctx = &c_buff; + Binary *pat_bin; int runres; Eterm result; - if (is_not_binary(arg1)) { + if (is_not_binary(arg1) || binary_bitsize(arg1) != 0) { goto badarg; } - bfs.flags = flags; - if (parse_match_opts_list(arg3, arg1, &(bfs.hsstart), &(bfs.hsend))) { + ctx->flags = flags; + if (parse_match_opts_list(arg3, arg1, &(ctx->hsstart), &(ctx->hsend))) { goto badarg; } - if (bfs.hsend == 0) { - BIF_RET(do_match_not_found_result(p, arg1, &bfs)); + if (ctx->hsend == 0) { + result = do_match_not_found_result(p, arg1, &ctx); + BIF_RET(result); } - if (is_tuple(arg2)) { - tp = tuple_val(arg2); - if (arityval(*tp) != 2 || is_not_atom(tp[1])) { - goto badarg; - } - if (((tp[1] != am_bm) && (tp[1] != am_ac)) || - !is_internal_magic_ref(tp[2])) { - goto badarg; - } - bfs.type = tp[1]; - bin = erts_magic_ref2bin(tp[2]); - if (bfs.type == am_bm && - ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) { - goto badarg; - } - if (bfs.type == am_ac && - ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) { - goto badarg; - } - bin_term = tp[2]; - } else if (do_binary_match_compile(arg2, &(bfs.type), &bin)) { + if (maybe_binary_match_compile(ctx, arg2, &pat_bin) != BF_OK) { goto badarg; } - bfs.not_found_result = &do_match_not_found_result; - bfs.single_result = &do_match_single_result; - bfs.global_result = &do_match_global_result; - runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result); - if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) { - Eterm *hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); - bin_term = erts_mk_magic_ref(&hp, &MSO(p), bin); - } else if (bin_term == NIL) { - erts_bin_free(bin); + bf_context_init(ctx, do_match_not_found_result, do_match_single_result, + do_match_global_result, pat_bin); + runres = do_binary_find(p, arg1, &ctx, pat_bin, NULL, &result); + if (runres == BF_OK && ctx->pat_term == THE_NON_VALUE) { + erts_bin_free(pat_bin); } switch (runres) { - case DO_BIN_MATCH_OK: + case BF_OK: BIF_RET(result); - case DO_BIN_MATCH_RESTART: - BUMP_ALL_REDS(p); - BIF_TRAP3(&binary_find_trap_export, p, arg1, result, bin_term); + case BF_RESTART: + ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result); + BIF_TRAP3(&binary_find_trap_export, p, arg1, ctx->trap_term, ctx->pat_term); default: goto badarg; } - badarg: - BIF_ERROR(p,BADARG); +badarg: + BIF_ERROR(p, BADARG); } BIF_RETTYPE binary_match_2(BIF_ALIST_2) @@ -1471,76 +1512,52 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3) BIF_RETTYPE binary_matches_2(BIF_ALIST_2) { - return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE, BINARY_FIND_ALL); + return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE, BF_FLAG_GLOBAL); } BIF_RETTYPE binary_matches_3(BIF_ALIST_3) { - return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BINARY_FIND_ALL); + return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BF_FLAG_GLOBAL); } static BIF_RETTYPE binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) { - BinaryFindState bfs; - Eterm *tp; - Binary *bin; - Eterm bin_term = NIL; + BinaryFindContext c_buff; + BinaryFindContext *ctx = &c_buff; + Binary *pat_bin; int runres; Eterm result; - if (is_not_binary(arg1)) { + if (is_not_binary(arg1) || binary_bitsize(arg1) != 0) { goto badarg; } - if (parse_split_opts_list(arg3, arg1, &(bfs.hsstart), &(bfs.hsend), &(bfs.flags))) { + if (parse_split_opts_list(arg3, arg1, &(ctx->hsstart), &(ctx->hsend), &(ctx->flags))) { goto badarg; } - if (bfs.hsend == 0) { - result = do_split_not_found_result(p, arg1, &bfs); + if (ctx->hsend == 0) { + result = do_split_not_found_result(p, arg1, &ctx); BIF_RET(result); } - if (is_tuple(arg2)) { - tp = tuple_val(arg2); - if (arityval(*tp) != 2 || is_not_atom(tp[1])) { - goto badarg; - } - if (((tp[1] != am_bm) && (tp[1] != am_ac)) || - !is_internal_magic_ref(tp[2])) { - goto badarg; - } - bfs.type = tp[1]; - bin = erts_magic_ref2bin(tp[2]); - if (bfs.type == am_bm && - ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) { - goto badarg; - } - if (bfs.type == am_ac && - ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) { - goto badarg; - } - bin_term = tp[2]; - } else if (do_binary_match_compile(arg2, &(bfs.type), &bin)) { + if (maybe_binary_match_compile(ctx, arg2, &pat_bin) != BF_OK) { goto badarg; } - bfs.not_found_result = &do_split_not_found_result; - bfs.single_result = &do_split_single_result; - bfs.global_result = &do_split_global_result; - runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result); - if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) { - Eterm *hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); - bin_term = erts_mk_magic_ref(&hp, &MSO(p), bin); - } else if (bin_term == NIL) { - erts_bin_free(bin); - } - switch(runres) { - case DO_BIN_MATCH_OK: + bf_context_init(ctx, do_split_not_found_result, do_split_single_result, + do_split_global_result, pat_bin); + runres = do_binary_find(p, arg1, &ctx, pat_bin, NULL, &result); + if (runres == BF_OK && ctx->pat_term == THE_NON_VALUE) { + erts_bin_free(pat_bin); + } + switch (runres) { + case BF_OK: BIF_RET(result); - case DO_BIN_MATCH_RESTART: - BIF_TRAP3(&binary_find_trap_export, p, arg1, result, bin_term); + case BF_RESTART: + ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result); + BIF_TRAP3(&binary_find_trap_export, p, arg1, ctx->trap_term, ctx->pat_term); default: goto badarg; } - badarg: +badarg: BIF_ERROR(p, BADARG); } @@ -1554,72 +1571,117 @@ BIF_RETTYPE binary_split_3(BIF_ALIST_3) return binary_split(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } -static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs) +static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { - if (bfs->flags & BINARY_FIND_ALL) { + if ((*ctxp)->flags & BF_FLAG_GLOBAL) { return NIL; } else { return am_nomatch; } } -static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindState *bfs, - Sint pos, Sint len) +static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { + BinaryFindContext *ctx = (*ctxp); + BinaryFindFirstContext *ff = &(ctx->u.ff); Eterm erlen; Eterm *hp; Eterm ret; - erlen = erts_make_integer((Uint)(len), p); - ret = erts_make_integer(pos, p); + erlen = erts_make_integer((Uint)(ff->len), p); + ret = erts_make_integer(ff->pos, p); hp = HAlloc(p, 3); ret = TUPLE2(hp, ret, erlen); return ret; } -static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindState *bfs, - FindallData *fad, Uint fad_sz) +static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { - Sint i; + BinaryFindContext *ctx = (*ctxp); + BinaryFindAllContext *fa = &(ctx->u.fa); + FindallData *fad; Eterm tpl; - Eterm *hp; - Eterm ret; + Sint i; + register Uint reds = ctx->reds; - for (i = 0; i < fad_sz; ++i) { - fad[i].epos = erts_make_integer(fad[i].pos, p); - fad[i].elen = erts_make_integer(fad[i].len, p); + if (ctx->state == BFSearch) { + if (ctx->pat_type == am_ac) { + fa->data = fa->d.ac.out; + fa->size = fa->d.ac.m; + } else { + fa->data = fa->d.bm.out; + fa->size = fa->d.bm.m; + } + fa->tail = fa->size - 1; + fa->head = 0; + fa->end_pos = 0; + fa->term = NIL; + if (ctx->exported == 0 && ((fa->size * 2) >= reds)) { + ctx = bf_context_export(p, ctx); + *ctxp = ctx; + fa = &(ctx->u.fa); + } + erts_factory_proc_prealloc_init(&(fa->factory), p, fa->size * (3 + 2)); + ctx->state = BFResult; + } + + fad = fa->data; + + if (fa->end_pos == 0) { + for (i = fa->head; i < fa->size; ++i) { + if (--reds == 0) { + ASSERT(ctx->exported == 1); + fa->head = i; + ctx->reds = reds; + return THE_NON_VALUE; + } + fad[i].epos = erts_make_integer(fad[i].pos, p); + fad[i].elen = erts_make_integer(fad[i].len, p); + } + fa->end_pos = 1; + fa->head = fa->tail; } - hp = HAlloc(p, fad_sz * (3 + 2)); - ret = NIL; - for (i = fad_sz - 1; i >= 0; --i) { - tpl = TUPLE2(hp, fad[i].epos, fad[i].elen); - hp += 3; - ret = CONS(hp, tpl, ret); - hp += 2; + + for (i = fa->head; i >= 0; --i) { + if (--reds == 0) { + ASSERT(ctx->exported == 1); + fa->head = i; + ctx->reds = reds; + return THE_NON_VALUE; + } + tpl = TUPLE2(fa->factory.hp, fad[i].epos, fad[i].elen); + fa->factory.hp += 3; + fa->term = CONS(fa->factory.hp, tpl, fa->term); + fa->factory.hp += 2; } + ctx->reds = reds; + erts_factory_close(&(fa->factory)); - return ret; + return fa->term; } -static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs) +static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { + BinaryFindContext *ctx = (*ctxp); Eterm *hp; Eterm ret; - if (bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL) + if (ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL) && binary_size(subject) == 0) { - return NIL; + return NIL; } hp = HAlloc(p, 2); ret = CONS(hp, subject, NIL); - return ret; } -static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *bfs, - Sint pos, Sint len) +static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { + BinaryFindContext *ctx = (*ctxp); + BinaryFindFirstContext *ff = &(ctx->u.ff); + Sint pos; + Sint len; size_t orig_size; Eterm orig; Uint offset; @@ -1630,9 +1692,12 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState * Eterm *hp; Eterm ret; + pos = ff->pos; + len = ff->len; + orig_size = binary_size(subject); - if ((bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL)) && + if ((ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL)) && (orig_size - pos - len) == 0) { if (pos == 0) { ret = NIL; @@ -1653,7 +1718,7 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState * hp += 2; } } else { - if ((bfs->flags & BINARY_SPLIT_TRIM_ALL) && (pos == 0)) { + if ((ctx->flags & BF_FLAG_SPLIT_TRIM_ALL) && (pos == 0)) { hp = HAlloc(p, 1 * (ERL_SUB_BIN_SIZE + 2)); ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size); sb1 = NULL; @@ -1691,39 +1756,60 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState * return ret; } -static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState *bfs, - FindallData *fad, Uint fad_sz) +static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { - size_t orig_size; + BinaryFindContext *ctx = (*ctxp); + BinaryFindAllContext *fa = &(ctx->u.fa); + FindallData *fad; Eterm orig; + size_t orig_size; Uint offset; Uint bit_offset; Uint bit_size; ErlSubBin *sb; + Uint do_trim; Sint i; - Sint tail; - Uint list_size; - Uint end_pos; - Uint do_trim = bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL); - Eterm *hp; - Eterm *hendp; - Eterm ret; + register Uint reds = ctx->reds; - tail = fad_sz - 1; - list_size = fad_sz + 1; - orig_size = binary_size(subject); - end_pos = (Uint)(orig_size); + if (ctx->state == BFSearch) { + if (ctx->pat_type == am_ac) { + fa->data = fa->d.ac.out; + fa->size = fa->d.ac.m; + } else { + fa->data = fa->d.bm.out; + fa->size = fa->d.bm.m; + } + fa->tail = fa->size - 1; + fa->head = fa->tail; + orig_size = binary_size(subject); + fa->end_pos = (Uint)(orig_size); + fa->term = NIL; + if (ctx->exported == 0 && ((fa->head + 1) >= reds)) { + ctx = bf_context_export(p, ctx); + *ctxp = ctx; + fa = &(ctx->u.fa); + } + erts_factory_proc_prealloc_init(&(fa->factory), p, (fa->size + 1) * (ERL_SUB_BIN_SIZE + 2)); + ctx->state = BFResult; + } - hp = HAlloc(p, list_size * (ERL_SUB_BIN_SIZE + 2)); - hendp = hp + list_size * (ERL_SUB_BIN_SIZE + 2); ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size); ASSERT(bit_size == 0); + fad = fa->data; + do_trim = ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL); - ret = NIL; - - for (i = tail; i >= 0; --i) { - sb = (ErlSubBin *)(hp); - sb->size = end_pos - (fad[i].pos + fad[i].len); + for (i = fa->head; i >= 0; --i) { + if (--reds == 0) { + ASSERT(ctx->exported == 1); + fa->head = i; + ctx->reds = reds; + if (!do_trim && (ctx->flags & BF_FLAG_SPLIT_TRIM)) { + ctx->flags &= ~BF_FLAG_SPLIT_TRIM; + } + return THE_NON_VALUE; + } + sb = (ErlSubBin *)(fa->factory.hp); + sb->size = fa->end_pos - (fad[i].pos + fad[i].len); if (!(sb->size == 0 && do_trim)) { sb->thing_word = HEADER_SUB_BIN; sb->offs = offset + fad[i].pos + fad[i].len; @@ -1731,15 +1817,18 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState * sb->bitoffs = bit_offset; sb->bitsize = 0; sb->is_writable = 0; - hp += ERL_SUB_BIN_SIZE; - ret = CONS(hp, make_binary(sb), ret); - hp += 2; - do_trim &= ~BINARY_SPLIT_TRIM; + fa->factory.hp += ERL_SUB_BIN_SIZE; + fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term); + fa->factory.hp += 2; + do_trim &= ~BF_FLAG_SPLIT_TRIM; } - end_pos = fad[i].pos; + fa->end_pos = fad[i].pos; } - sb = (ErlSubBin *)(hp); + fa->head = i; + ctx->reds = reds; + + sb = (ErlSubBin *)(fa->factory.hp); sb->size = fad[0].pos; if (!(sb->size == 0 && do_trim)) { sb->thing_word = HEADER_SUB_BIN; @@ -1748,26 +1837,31 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState * sb->bitoffs = bit_offset; sb->bitsize = 0; sb->is_writable = 0; - hp += ERL_SUB_BIN_SIZE; - ret = CONS(hp, make_binary(sb), ret); - hp += 2; + fa->factory.hp += ERL_SUB_BIN_SIZE; + fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term); + fa->factory.hp += 2; } - HRelease(p, hendp, hp); - return ret; + erts_factory_close(&(fa->factory)); + + return fa->term; } static BIF_RETTYPE binary_find_trap(BIF_ALIST_3) { int runres; Eterm result; - Binary *bin = erts_magic_ref2bin(BIF_ARG_3); - - runres = do_binary_find(BIF_P, BIF_ARG_1, NULL, bin, BIF_ARG_2, &result); - if (runres == DO_BIN_MATCH_OK) { + Binary *ctx_bin = erts_magic_ref2bin(BIF_ARG_2); + Binary *pat_bin = erts_magic_ref2bin(BIF_ARG_3); + BinaryFindContext *ctx = NULL; + + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == bf_context_destructor); + runres = do_binary_find(BIF_P, BIF_ARG_1, &ctx, pat_bin, ctx_bin, &result); + if (runres == BF_OK) { + ASSERT(result != THE_NON_VALUE); BIF_RET(result); } else { - BUMP_ALL_REDS(BIF_P); - BIF_TRAP3(&binary_find_trap_export, BIF_P, BIF_ARG_1, result, BIF_ARG_3); + ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result); + BIF_TRAP3(&binary_find_trap_export, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } } diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index e9bfb39035..f673ef3194 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -50,13 +50,6 @@ #include "dtrace-wrapper.h" #include "lttng-wrapper.h" -#ifdef ERTS_SMP -#define DDLL_SMP 1 -#else -#define DDLL_SMP 0 -#endif - - /* * Local types */ @@ -107,18 +100,18 @@ static void dereference_all_processes(DE_Handle *dh); static void restore_process_references(DE_Handle *dh); static void ddll_no_more_references(void *vdh); -#define lock_drv_list() erts_smp_rwmtx_rwlock(&erts_driver_list_lock) -#define unlock_drv_list() erts_smp_rwmtx_rwunlock(&erts_driver_list_lock) +#define lock_drv_list() erts_rwmtx_rwlock(&erts_driver_list_lock) +#define unlock_drv_list() erts_rwmtx_rwunlock(&erts_driver_list_lock) #define assert_drv_list_locked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ - || erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ + || erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define assert_drv_list_rwlocked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)) #define assert_drv_list_rlocked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define assert_drv_list_not_locked() \ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ - && !erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(!erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ + && !erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING)) @@ -134,13 +127,13 @@ kill_ports_driver_unloaded(DE_Handle *dh) if (!prt) continue; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; state = erts_atomic32_read_nob(&prt->state); if (state & FREE_PORT_FLAGS) continue; - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh) @@ -280,10 +273,8 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) path[path_len++] = '/'; sys_strcpy(path+path_len,name); -#if DDLL_SMP - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); -#endif if ((drv = lookup_driver(name)) != NULL) { if (drv->handle == NULL) { /* static_driver */ @@ -404,24 +395,18 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) erts_ddll_reference_driver(dh); ASSERT(dh->status == ERL_DE_RELOAD); dh->status = ERL_DE_FORCE_RELOAD; -#if DDLL_SMP unlock_drv_list(); -#endif kill_ports_driver_unloaded(dh); /* Dereference, eventually causing driver destruction */ -#if DDLL_SMP lock_drv_list(); -#endif erts_ddll_dereference_driver(dh); } -#if DDLL_SMP erts_ddll_reference_driver(dh); unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); erts_ddll_dereference_driver(dh); -#endif BIF_P->flags |= F_USING_DDLL; if (monitor) { @@ -432,18 +417,14 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_ok, ok_term); } -#if DDLL_SMP unlock_drv_list(); -#endif erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); BIF_RET(t); soft_error: -#if DDLL_SMP unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); if (do_build_load_error) { soft_error_term = build_load_error(BIF_P, build_this_load_error); } @@ -452,11 +433,11 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) t = TUPLE2(hp, am_error, soft_error_term); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); BIF_RET(t); error: assert_drv_list_not_locked(); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); if (path != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); } @@ -518,7 +499,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) Eterm l; int kill_ports = 0; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); for(l = options; is_list(l); l = CDR(list_val(l))) { Eterm opt = CAR(list_val(l)); @@ -551,9 +532,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) goto error; } -#if DDLL_SMP lock_drv_list(); -#endif if ((drv = lookup_driver(name)) == NULL) { soft_error_term = am_not_loaded; @@ -597,7 +576,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) dh->reload_full_path = dh->reload_driver_name = NULL; dh->reload_flags = 0; } - if (erts_smp_atomic32_read_nob(&dh->port_count) > 0) { + if (erts_atomic32_read_nob(&dh->port_count) > 0) { ++kill_ports; } dh->status = ERL_DE_UNLOAD; @@ -608,23 +587,17 @@ done: /* Avoid closing the driver by referencing it */ erts_ddll_reference_driver(dh); dh->status = ERL_DE_FORCE_UNLOAD; -#if DDLL_SMP unlock_drv_list(); -#endif kill_ports_driver_unloaded(dh); -#if DDLL_SMP lock_drv_list(); -#endif erts_ddll_dereference_driver(dh); } -#if DDLL_SMP erts_ddll_reference_driver(dh); unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); erts_ddll_dereference_driver(dh); -#endif erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); BIF_P->flags |= F_USING_DDLL; if (monitor > 0) { @@ -638,17 +611,13 @@ done: if (kill_ports > 1) { ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */ } -#if DDLL_SMP unlock_drv_list(); -#endif BIF_RET(t); soft_error: -#if DDLL_SMP unlock_drv_list(); -#endif erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_error, soft_error_term); BIF_RET(t); @@ -658,7 +627,7 @@ soft_error: if (name != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_ERROR(BIF_P, BADARG); } @@ -697,9 +666,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0) int need = 3; Eterm res = NIL; erts_driver_t *drv; -#if DDLL_SMP lock_drv_list(); -#endif for (drv = driver_list; drv; drv = drv->next) { need += sys_strlen(drv->name)*2+2; } @@ -712,9 +679,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0) } res = TUPLE2(hp,am_ok,res); /* hp += 3 */ -#if DDLL_SMP unlock_drv_list(); -#endif BIF_RET(res); } @@ -736,9 +701,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) Eterm *hp; int i; Uint filter; -#if DDLL_SMP int have_lock = 0; -#endif if ((name = pick_list_or_atom(name_term)) == NULL) { goto error; @@ -748,10 +711,8 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) goto error; } -#if DDLL_SMP lock_drv_list(); have_lock = 1; -#endif if ((drv = lookup_driver(name)) == NULL) { goto error; } @@ -781,7 +742,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) } else if (drv->handle->status == ERL_DE_PERMANENT) { res = am_permanent; } else { - res = make_small(erts_smp_atomic32_read_nob(&drv->handle->port_count)); + res = make_small(erts_atomic32_read_nob(&drv->handle->port_count)); } goto done; case am_linked_in_driver: @@ -827,9 +788,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) hp += 2; } done: -#if DDLL_SMP unlock_drv_list(); -#endif if (pei) erts_free(ERTS_ALC_T_DDLL_TMP_BUF, pei); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); @@ -838,11 +797,9 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) if (name != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); } -#if DDLL_SMP if (have_lock) { unlock_drv_list(); } -#endif BIF_ERROR(p,BADARG); } @@ -899,13 +856,9 @@ BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1) if (errdesc_to_code(code_term,&errint) != 0) { goto error; } -#if DDLL_SMP lock_drv_list(); -#endif errstring = erts_ddll_error(errint); -#if DDLL_SMP unlock_drv_list(); -#endif break; } if (errstring == NULL) { @@ -968,7 +921,7 @@ Eterm erts_ddll_monitor_driver(Process *p, void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) { erts_driver_t *drv; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); lock_drv_list(); drv = driver_list; while (drv != NULL) { @@ -993,7 +946,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) } done: unlock_drv_list(); - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); } /* @@ -1002,7 +955,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) { erts_driver_t *drv; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); lock_drv_list(); drv = driver_list; while (drv != NULL) { @@ -1040,18 +993,14 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) dh->status = ERL_DE_UNLOAD; } if (!left - && erts_smp_atomic32_read_nob(&drv->handle->port_count) > 0) { + && erts_atomic32_read_nob(&drv->handle->port_count) > 0) { if (kill_ports) { DE_Handle *dh = drv->handle; erts_ddll_reference_driver(dh); dh->status = ERL_DE_FORCE_UNLOAD; -#if DDLL_SMP unlock_drv_list(); -#endif kill_ports_driver_unloaded(dh); -#if DDLL_SMP lock_drv_list(); /* Needed for future list operations */ -#endif drv = drv->next; /* before allowing destruction */ erts_ddll_dereference_driver(dh); } else { @@ -1065,7 +1014,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) } } unlock_drv_list(); - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); } void erts_ddll_lock_driver(DE_Handle *dh, char *name) { @@ -1093,41 +1042,41 @@ void erts_ddll_lock_driver(DE_Handle *dh, char *name) void erts_ddll_increment_port_count(DE_Handle *dh) { assert_drv_list_locked(); - erts_smp_atomic32_inc_nob(&dh->port_count); + erts_atomic32_inc_nob(&dh->port_count); } void erts_ddll_decrement_port_count(DE_Handle *dh) { assert_drv_list_locked(); #ifdef DEBUG - ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0); + ASSERT(erts_atomic32_dec_read_nob(&dh->port_count) >= 0); #else - erts_smp_atomic32_dec_nob(&dh->port_count); + erts_atomic32_dec_nob(&dh->port_count); #endif } static void first_ddll_reference(DE_Handle *dh) { assert_drv_list_rwlocked(); - erts_smp_refc_init(&(dh->refc),1); + erts_refc_init(&(dh->refc),1); } void erts_ddll_reference_driver(DE_Handle *dh) { assert_drv_list_locked(); - if (erts_smp_refc_inctest(&(dh->refc),1) == 1) { - erts_smp_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */ + if (erts_refc_inctest(&(dh->refc),1) == 1) { + erts_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */ } } void erts_ddll_reference_referenced_driver(DE_Handle *dh) { - erts_smp_refc_inc(&(dh->refc),2); + erts_refc_inc(&(dh->refc),2); } void erts_ddll_dereference_driver(DE_Handle *dh) { - if (erts_smp_refc_dectest(&(dh->refc),0) == 0) { + if (erts_refc_dectest(&(dh->refc),0) == 0) { /* No lock here, but if the driver is referenced again, the scheduled deletion is added as a reference too, see above */ erts_schedule_misc_op(ddll_no_more_references, (void *) dh); @@ -1150,11 +1099,11 @@ static void restore_process_references(DE_Handle *dh) { DE_ProcEntry *p; assert_drv_list_rwlocked(); - ASSERT(erts_smp_refc_read(&(dh->refc),0) == 0); + ASSERT(erts_refc_read(&(dh->refc),0) == 0); for(p = dh->procs;p != NULL; p = p->next) { if (p->awaiting_status == ERL_DE_PROC_LOADED) { ASSERT(p->flags & ERL_DE_FL_DEREFERENCED); - erts_smp_refc_inc(&(dh->refc),1); + erts_refc_inc(&(dh->refc),1); p->flags &= ~ERL_DE_FL_DEREFERENCED; } } @@ -1176,9 +1125,9 @@ static void ddll_no_more_references(void *vdh) lock_drv_list(); - x = erts_smp_refc_read(&(dh->refc),0); + x = erts_refc_read(&(dh->refc),0); if (x > 0) { - x = erts_smp_refc_dectest(&(dh->refc),0); /* delete the reference added for me */ + x = erts_refc_dectest(&(dh->refc),0); /* delete the reference added for me */ } @@ -1281,10 +1230,8 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro Eterm immediate_type = NIL; erts_driver_t *drv; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); -#if DDLL_SMP + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); lock_drv_list(); -#endif if ((drv = lookup_driver(name)) == NULL) { immediate_tag = am_unloaded; immediate_type = am_DOWN; @@ -1314,20 +1261,14 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro } p->flags |= F_USING_DDLL; r = add_monitor(p, drv->handle, ERL_DE_PROC_AWAIT_LOAD); -#if DDLL_SMP unlock_drv_list(); -#endif BIF_RET(r); immediate: r = erts_make_ref(p); -#if DDLL_SMP - erts_smp_proc_unlock(p, plocks); -#endif + erts_proc_unlock(p, plocks); notify_proc(p, r, name_term, immediate_type, immediate_tag, 0); -#if DDLL_SMP unlock_drv_list(); - erts_smp_proc_lock(p, plocks); -#endif + erts_proc_lock(p, plocks); BIF_RET(r); } @@ -1338,10 +1279,8 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP Eterm immediate_type = NIL; erts_driver_t *drv; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); -#if DDLL_SMP + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); lock_drv_list(); -#endif if ((drv = lookup_driver(name)) == NULL) { immediate_tag = am_unloaded; immediate_type = am_DOWN; @@ -1355,20 +1294,14 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP p->flags |= F_USING_DDLL; r = add_monitor(p, drv->handle, flag); -#if DDLL_SMP unlock_drv_list(); -#endif BIF_RET(r); immediate: r = erts_make_ref(p); -#if DDLL_SMP - erts_smp_proc_unlock(p, plocks); -#endif + erts_proc_unlock(p, plocks); notify_proc(p, r, name_term, immediate_type, immediate_tag, 0); -#if DDLL_SMP unlock_drv_list(); - erts_smp_proc_lock(p, plocks); -#endif + erts_proc_lock(p, plocks); BIF_RET(r); } @@ -1572,8 +1505,8 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name) res = ERL_DE_LOAD_ERROR_BAD_NAME; goto error; } - erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0); - erts_smp_atomic32_init_nob(&dh->port_count, 0); + erts_atomic_init_nob(&(dh->refc), (erts_aint_t) 0); + erts_atomic32_init_nob(&dh->port_count, 0); dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1); sys_strcpy(dh->full_path, path); dh->flags = 0; @@ -1644,8 +1577,8 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name) dh->handle = NULL; dh->procs = NULL; - erts_smp_atomic32_init_nob(&dh->port_count, 0); - erts_smp_refc_init(&(dh->refc), (erts_aint_t) 0); + erts_atomic32_init_nob(&dh->port_count, 0); + erts_refc_init(&(dh->refc), (erts_aint_t) 0); dh->status = -1; dh->reload_full_path = NULL; dh->reload_driver_name = NULL; @@ -1683,7 +1616,7 @@ static int reload_driver_entry(DE_Handle *dh) dh->reload_full_path = NULL; dh->reload_driver_name = NULL; - ASSERT(erts_smp_refc_read(&(dh->refc),0) == 0); + ASSERT(erts_refc_read(&(dh->refc),0) == 0); ASSERT(dh->full_path != NULL); erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh->full_path); dh->full_path = NULL; @@ -1714,7 +1647,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type, ErtsMessage *mp; ErtsProcLocks rp_locks = 0; ErlOffHeap *ohp; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; assert_drv_list_rwlocked(); if (errcode != 0) { @@ -1740,8 +1673,8 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type, mess = TUPLE5(hp,type,r,am_driver,driver_name,tag); } erts_queue_message(proc, rp_locks, mp, mess, am_system); - erts_smp_proc_unlock(proc, rp_locks); - ERTS_SMP_CHK_NO_PROC_LOCKS; + erts_proc_unlock(proc, rp_locks); + ERTS_CHK_NO_PROC_LOCKS; } static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Eterm tag) @@ -1813,7 +1746,7 @@ static Eterm build_load_error(Process *p, int code) { int need = load_error_need(code); Eterm *hp = NULL; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); if (need) { hp = HAlloc(p,need); } diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 0547b4d75c..36939d6acc 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -88,18 +88,12 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE #ifdef ARCH_64 " [64-bit]" #endif -#ifdef ERTS_SMP " [smp:%beu:%beu]" -#endif -#ifdef USE_THREADS -#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP) " [ds:%beu:%beu:%beu]" -#endif #if defined(ERTS_DIRTY_SCHEDULERS_TEST) " [dirty-schedulers-TEST]" #endif " [async-threads:%d]" -#endif #ifdef HIPE " [hipe]" #endif @@ -354,14 +348,12 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p) char *rc_str = ""; char rc_buf[100]; char *ov = otp_version; -#ifdef ERTS_SMP Uint total, online, active; Uint dirty_cpu, dirty_cpu_onln, dirty_io; erts_schedulers_state(&total, &online, &active, &dirty_cpu, &dirty_cpu_onln, NULL, &dirty_io, NULL); -#endif for (i = 0; i < sizeof(otp_version)-4; i++) { if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c') rc = atoi(&ov[i+3]); @@ -376,15 +368,9 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p) } return erts_print(to, arg, erts_system_version, rc_str -#ifdef ERTS_SMP , total, online -#ifdef ERTS_DIRTY_SCHEDULERS , dirty_cpu, dirty_cpu_onln, dirty_io -#endif -#endif -#ifdef USE_THREADS , erts_async_max_threads -#endif #ifdef ERTS_ENABLE_KERNEL_POLL , erts_use_kernel_poll ? "true" : "false" #endif @@ -764,7 +750,6 @@ process_info_init(void) static ERTS_INLINE Process * pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks) { -#ifdef ERTS_SMP /* * If the main lock is needed, we use erts_pid2proc_not_running() * instead of erts_pid2proc() for two reasons: @@ -782,7 +767,6 @@ pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks) return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN, pid, info_locks); else -#endif return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN, pid, info_locks); } @@ -900,13 +884,13 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap, * is being inspected... */ ASSERT(locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); + ERTS_MSGQ_MV_INQ2PRIVQ(rp); locks &= ~ERTS_PROC_LOCK_MSGQ; unlock_locks |= ERTS_PROC_LOCK_MSGQ; } if (unlock_locks) - erts_smp_proc_unlock(rp, unlock_locks); + erts_proc_unlock(rp, unlock_locks); } @@ -964,7 +948,7 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap, if (c_p == rp) locks &= ~ERTS_PROC_LOCK_MAIN; if (locks && rp) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); if (res_elem_ix != &def_res_elem_ix_buf[0]) erts_free(ERTS_ALC_T_TMP, res_elem_ix); @@ -1055,7 +1039,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P, BIF_ARG_1, BIF_ARG_2); else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) { - erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS); ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined); } else { @@ -1075,24 +1059,22 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2) * is being inspected... */ ASSERT(info_locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); + ERTS_MSGQ_MV_INQ2PRIVQ(rp); info_locks &= ~ERTS_PROC_LOCK_MSGQ; unlock_locks |= ERTS_PROC_LOCK_MSGQ; } if (unlock_locks) - erts_smp_proc_unlock(rp, unlock_locks); + erts_proc_unlock(rp, unlock_locks); res = process_info_aux(BIF_P, rp, info_locks, pid, BIF_ARG_2, 0); } ASSERT(is_value(res)); -#ifdef ERTS_SMP if (BIF_P == rp) info_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp && info_locks) - erts_smp_proc_unlock(rp, info_locks); -#endif + erts_proc_unlock(rp, info_locks); ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED)); BIF_RET(res); @@ -1379,7 +1361,7 @@ process_info_aux(Process *BIF_P, break; case am_trap_exit: { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); hp = HAlloc(BIF_P, 3); if (state & ERTS_PSFLG_TRAP_EXIT) res = am_true; @@ -2145,14 +2127,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) ASSERT(erts_compat_rel > 0); BIF_RET(make_small(erts_compat_rel)); } else if (BIF_ARG_1 == am_multi_scheduling) { -#ifndef ERTS_SMP - BIF_RET(am_disabled); -#else -#ifndef ERTS_DIRTY_SCHEDULERS - if (erts_no_schedulers == 1) - BIF_RET(am_disabled); - else -#endif { int msb = erts_is_multi_scheduling_blocked(); BIF_RET(!msb @@ -2161,7 +2135,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) ? am_blocked : am_blocked_normal)); } -#endif } else if (BIF_ARG_1 == am_build_type) { #if defined(DEBUG) ERTS_DECL_AM(debug); @@ -2271,7 +2244,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = TUPLE2(hp, am_sequential_tracer, val); BIF_RET(res); } else if (BIF_ARG_1 == am_garbage_collection){ - Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs); Eterm tup; hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2); @@ -2289,7 +2262,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(res); } else if (BIF_ARG_1 == am_fullsweep_after){ - Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs); hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, am_fullsweep_after, make_small(val)); BIF_RET(res); @@ -2322,8 +2295,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0); /* Need to be the only thread running... */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); if (BIF_ARG_1 == am_info) info(ERTS_PRINT_DSBUF, (void *) dsbufp); @@ -2334,8 +2307,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) else distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); ASSERT(dsbufp && dsbufp->str); res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len); @@ -2344,7 +2317,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) { DistEntry *dep; i = 0; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); for (dep = erts_visible_dist_entries; dep; dep = dep->next) ++i; for (dep = erts_hidden_dist_entries; dep; dep = dep->next) @@ -2367,7 +2340,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = CONS(hp, tpl, res); hp += 2; } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); BIF_RET(res); } else if (BIF_ARG_1 == am_system_version) { erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0); @@ -2393,16 +2366,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(erts_allocator_options((void *) BIF_P)); } else if (BIF_ARG_1 == am_thread_pool_size) { -#ifdef USE_THREADS extern int erts_async_max_threads; -#endif int n; -#ifdef USE_THREADS n = erts_async_max_threads; -#else - n = 0; -#endif BIF_RET(make_small(n)); } else if (BIF_ARG_1 == am_alloc_util_allocators) { @@ -2470,7 +2437,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) #endif BIF_RET(res); -#endif /* #ifndef ERTS_SMP */ +#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */ } else if (BIF_ARG_1 == am_wordsize) { return make_small(sizeof(Eterm)); } else if (BIF_ARG_1 == am_endian) { @@ -2550,11 +2517,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(res); #endif } else if (BIF_ARG_1 == am_threads) { -#ifdef USE_THREADS return am_true; -#else - return am_false; -#endif } else if (BIF_ARG_1 == am_creation) { return make_small(erts_this_node->creation); } else if (BIF_ARG_1 == am_break_ignored) { @@ -2613,11 +2576,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) hp = HAlloc(BIF_P, 2*n); BIF_RET(buf_to_intlist(&hp, buf, n, NIL)); } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) { -#ifdef ERTS_SMP BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) { BIF_RET(erts_bound_schedulers_term(BIF_P)); } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) { @@ -2629,11 +2588,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = make_small(erts_no_schedulers); BIF_RET(res); } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) { -#ifndef ERTS_SMP - Eterm *hp = HAlloc(BIF_P, 4); - res = TUPLE3(hp, make_small(1), make_small(1), make_small(1)); - BIF_RET(res); -#else Eterm *hp; Uint total, online, active; erts_schedulers_state(&total, &online, &active, @@ -2644,13 +2598,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) make_small(online), make_small(active)); BIF_RET(res); -#endif } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) { -#ifndef ERTS_SMP - Eterm *hp = HAlloc(BIF_P, 4); - res = TUPLE3(hp, make_small(1), make_small(1), make_small(1)); - BIF_RET(res); -#else Eterm *hp; Uint total, online, active; erts_schedulers_state(&total, &online, &active, @@ -2661,19 +2609,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) make_small(online), make_small(active)); BIF_RET(res); -#endif } else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) { -#ifndef ERTS_SMP - Eterm *hp = HAlloc(BIF_P, 2+5); - res = CONS(hp+5, - TUPLE4(hp, - am_normal, - make_small(1), - make_small(1), - make_small(1)), - NIL); - BIF_RET(res); -#else Eterm *hp, tpl; Uint sz, total, online, active, dirty_cpu_total, dirty_cpu_online, dirty_cpu_active, @@ -2719,46 +2655,25 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) hp += 5; res = CONS(hp, tpl, res); BIF_RET(res); -#endif } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) { -#ifndef ERTS_SMP - BIF_RET(make_small(1)); -#else Uint online; erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL); BIF_RET(make_small(online)); -#endif } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) { -#ifndef ERTS_SMP - BIF_RET(make_small(1)); -#else Uint active; erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL); BIF_RET(make_small(active)); -#endif } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) { Uint dirty_cpu; -#ifdef ERTS_DIRTY_SCHEDULERS erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL); -#else - dirty_cpu = 0; -#endif BIF_RET(make_small(dirty_cpu)); } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) { Uint dirty_cpu_onln; -#ifdef ERTS_DIRTY_SCHEDULERS erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL); -#else - dirty_cpu_onln = 0; -#endif BIF_RET(make_small(dirty_cpu_onln)); } else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) { Uint dirty_io; -#ifdef ERTS_DIRTY_SCHEDULERS erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL); -#else - dirty_io = 0; -#endif BIF_RET(make_small(dirty_io)); } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) { res = make_small(erts_no_run_queues); @@ -2805,23 +2720,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) { BIF_RET(erts_check_io_info(BIF_P)); } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) { -#ifndef ERTS_SMP - BIF_RET(NIL); -#else if (erts_no_schedulers == 1) BIF_RET(NIL); else BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0)); -#endif } else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) { -#ifndef ERTS_SMP - BIF_RET(NIL); -#else if (erts_no_schedulers == 1) BIF_RET(NIL); else BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1)); -#endif } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) { BIF_RET(ERTS_USE_MODIFIED_TIMING() ? make_small(erts_modified_timing_level) @@ -2884,12 +2791,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(am_false); #endif } -#ifdef ERTS_SMP else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) { erts_thr_progress_dbg_print_state(); BIF_RET(am_true); } -#endif else if (BIF_ARG_1 == am_message_queue_data) { switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) { case SPO_OFF_HEAP_MSGQ: @@ -2981,7 +2886,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, { Eterm res = THE_NON_VALUE; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (item == am_id) { if (hpp) @@ -3172,9 +3077,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, } else if (ERTS_IS_ATOM_STR("locking", item)) { if (hpp) { -#ifndef ERTS_SMP - res = am_false; -#else if (erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) { DECL_AM(port_level); @@ -3188,7 +3090,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, & ERL_DRV_FLAG_USE_PORT_LOCKING)); res = AM_driver_level; } -#endif } if (szp) { res = am_true; @@ -3201,7 +3102,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, goto done; } res = ((ERTS_PTS_FLG_PARALLELISM & - erts_smp_atomic32_read_nob(&prt->sched.flags)) + erts_atomic32_read_nob(&prt->sched.flags)) ? am_true : am_false); } @@ -3277,7 +3178,7 @@ fun_info_2(BIF_ALIST_2) } break; case am_refc: - val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p); + val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p); hp = HAlloc(p, 3); break; case am_arity: @@ -3382,7 +3283,7 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1) BIF_RET(am_false); } else { - if (erts_smp_atomic32_read_acqb(&rp->state) + if (erts_atomic32_read_acqb(&rp->state) & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false); else @@ -3417,7 +3318,7 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2) BIF_ARG_1, BIF_ARG_2); if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) { Eterm args[2] = {BIF_ARG_1, BIF_ARG_2}; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_ALL); ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P, BIF_ARG_1, am_erlang, @@ -3426,11 +3327,9 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2) 2); } erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp); -#ifdef ERTS_SMP - erts_smp_proc_unlock(rp, (BIF_P == rp + erts_proc_unlock(rp, (BIF_P == rp ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); -#endif BIF_RET(am_true); } @@ -3608,7 +3507,7 @@ BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0) BIF_RET(erts_error_logger_warnings); } -static erts_smp_atomic_t available_internal_state; +static erts_atomic_t available_internal_state; static int empty_magic_ref_destructor(Binary *bin) { @@ -3621,7 +3520,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) * NOTE: Only supposed to be used for testing, and debugging. */ - if (!erts_smp_atomic_read_nob(&available_internal_state)) { + if (!erts_atomic_read_nob(&available_internal_state)) { BIF_ERROR(BIF_P, EXC_UNDEF); } @@ -3664,9 +3563,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) int no_errors; ErtsCheckIoDebugInfo ciodi = {0}; #ifdef HAVE_ERTS_CHECK_IO_DEBUG - erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN); no_errors = erts_check_io_debug(&ciodi); - erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN); #else no_errors = 0; #endif @@ -3716,9 +3615,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) { Uint n; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); n = erts_debug_nbalance(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(erts_make_integer(n, BIF_P)); } else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) { @@ -3733,11 +3632,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) { Eterm res; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE); - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); BIF_RET(res); } else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) { @@ -3804,11 +3703,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) tp[2], ERTS_PROC_LOCK_LINK); if (!p) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); BIF_RET(am_undefined); } res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); BIF_RET(res); } else if(is_internal_port(tp[2])) { @@ -3827,11 +3726,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) DistEntry *dep = erts_find_dist_entry(tp[2]); if(dep) { Eterm subres; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); subres = make_link_list(BIF_P, dep->nlinks, NIL); subres = make_link_list(BIF_P, dep->node_links, subres); - erts_smp_de_links_unlock(dep); - erts_deref_dist_entry(dep); + erts_de_links_unlock(dep); BIF_RET(subres); } else { BIF_RET(am_undefined); @@ -3849,20 +3747,19 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) tp[2], ERTS_PROC_LOCK_LINK); if (!p) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); BIF_RET(am_undefined); } res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p)); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); BIF_RET(res); } else if(is_node_name_atom(tp[2])) { DistEntry *dep = erts_find_dist_entry(tp[2]); if(dep) { Eterm ml; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); ml = make_monitor_list(BIF_P, dep->monitors); - erts_smp_de_links_unlock(dep); - erts_deref_dist_entry(dep); + erts_de_links_unlock(dep); BIF_RET(ml); } else { BIF_RET(am_undefined); @@ -3877,7 +3774,6 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) else { Uint cno = dist_entry_channel_no(dep); res = make_small(cno); - erts_deref_dist_entry(dep); } BIF_RET(res); } @@ -3889,7 +3785,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else { Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); BIF_RET(res); } } @@ -3939,15 +3835,14 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) DFLAG_BIT_BINARIES); BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags)); } - else if (ERTS_IS_ATOM_STR("dist_port", tp[1])) { + else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) { Eterm res = am_undefined; DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]); if (dep) { - erts_smp_de_rlock(dep); - if (is_internal_port(dep->cid)) + erts_de_rlock(dep); + if (is_internal_port(dep->cid) || is_internal_pid(dep->cid)) res = dep->cid; - erts_smp_de_runlock(dep); - erts_deref_dist_entry(dep); + erts_de_runlock(dep); } BIF_RET(res); } @@ -4091,7 +3986,7 @@ BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); } -static erts_smp_atomic_t hipe_test_reschedule_flag; +static erts_atomic_t hipe_test_reschedule_flag; #if defined(VALGRIND) && defined(__GNUC__) /* Force noinline for valgrind suppression */ @@ -4115,7 +4010,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1) && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) { erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true); - erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on); + erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on); if (on) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id); @@ -4131,7 +4026,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(prev_on ? am_true : am_false); } - if (!erts_smp_atomic_read_nob(&available_internal_state)) { + if (!erts_atomic_read_nob(&available_internal_state)) { BIF_ERROR(BIF_P, EXC_UNDEF); } @@ -4155,13 +4050,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint ms; if (term_to_Sint(BIF_ARG_2, &ms) != 0) { if (ms > 0) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); if (block) - erts_smp_thr_progress_block(); + erts_thr_progress_block(); while (erts_milli_sleep((long) ms) != 0); if (block) - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } BIF_RET(am_true); } @@ -4170,9 +4065,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint ms; if (term_to_Sint(BIF_ARG_2, &ms) != 0) { if (ms > 0) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); while (erts_milli_sleep((long) ms) != 0); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } BIF_RET(am_true); } @@ -4240,10 +4135,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(AM_dead); } -#ifdef ERTS_SMP if (BIF_P == rp) rp_locks |= ERTS_PROC_LOCK_MAIN; -#endif xres = erts_send_exit_signal(NULL, /* NULL in order to force a pending exit when we send to our @@ -4255,11 +4148,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) NIL, NULL, 0); -#ifdef ERTS_SMP if (BIF_P == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; -#endif - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (xres > 1) { DECL_AM(message); BIF_RET(AM_message); @@ -4321,14 +4212,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) } else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) { /* Used by hipe test suites */ - erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag); + erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag); if (!flag && BIF_ARG_2 != am_false) { - erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1); + erts_atomic_set_nob(&hipe_test_reschedule_flag, 1); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } - erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag); + erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag); BIF_RET(NIL); } else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) { @@ -4339,7 +4230,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); res = am_true; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } BIF_RET(res); } @@ -4356,16 +4247,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(am_false); else { Uint32 con_id; - erts_smp_de_rlock(dep); + erts_de_rlock(dep); con_id = dep->connection_id; - erts_smp_de_runlock(dep); + erts_de_runlock(dep); erts_kill_dist_connection(dep, con_id); - erts_deref_dist_entry(dep); BIF_RET(am_true); } } else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) { -#ifdef ERTS_SMP int old_use_opt, use_opt; switch (BIF_ARG_2) { case am_true: @@ -4378,16 +4267,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); old_use_opt = !erts_disable_proc_not_running_opt; erts_disable_proc_not_running_opt = !use_opt; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(old_use_opt ? am_true : am_false); -#else - BIF_ERROR(BIF_P, EXC_NOTSUP); -#endif } else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) { if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) { @@ -4415,9 +4301,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint64 msecs; if (term_to_Sint64(BIF_ARG_2, &msecs)) { /* Negative value restore original value... */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_debug_test_node_tab_delayed_delete(msecs); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_ok); } } @@ -4865,8 +4751,8 @@ static void os_info_init(void) void erts_bif_info_init(void) { - erts_smp_atomic_init_nob(&available_internal_state, 0); - erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0); + erts_atomic_init_nob(&available_internal_state, 0); + erts_atomic_init_nob(&hipe_test_reschedule_flag, 0); alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1); alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1); diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index ff03151619..4b73be55c6 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -86,25 +86,25 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) erts_make_ref_in_array(port->async_open_port->ref); port->async_open_port->to = BIF_P->common.id; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); if (ERTS_PROC_PENDING_EXIT(BIF_P)) { /* need to exit caller instead */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); KILL_CATCHES(BIF_P); BIF_P->freason = EXC_EXIT; erts_port_release(port); BIF_RET(am_badarg); } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(BIF_P); + ERTS_MSGQ_MV_INQ2PRIVQ(BIF_P); BIF_P->msg.save = BIF_P->msg.last; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE); res = erts_proc_store_ref(BIF_P, port->async_open_port->ref); } else { res = port->common.id; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); } erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id); @@ -114,7 +114,7 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, BIF_P, am_link, port->common.id); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); erts_port_release(port); @@ -271,12 +271,10 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3) break; } - state = erts_smp_atomic32_read_acqb(&BIF_P->state); + state = erts_atomic32_read_acqb(&BIF_P->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif ERTS_BIF_EXITED(BIF_P); } @@ -321,12 +319,10 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3) break; } - state = erts_smp_atomic32_read_acqb(&BIF_P->state); + state = erts_atomic32_read_acqb(&BIF_P->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif ERTS_BIF_EXITED(BIF_P); } @@ -511,39 +507,35 @@ cleanup_old_port_data(erts_aint_t data) ASSERT(is_immed((Eterm) data)); } else { -#ifdef ERTS_SMP ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data; size_t size; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; size = sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm); erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap, (void *) pdhp, &pdhp->later_op, size); -#else - free_port_data_heap((void *) data); -#endif } } void erts_init_port_data(Port *prt) { - erts_smp_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined); + erts_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined); } void erts_cleanup_port_data(Port *prt) { ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP); - cleanup_old_port_data(erts_smp_atomic_xchg_nob(&prt->data, + cleanup_old_port_data(erts_atomic_xchg_nob(&prt->data, (erts_aint_t) NULL)); } Uint erts_port_data_size(Port *prt) { - erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data); + erts_aint_t data = erts_atomic_read_ddrb(&prt->data); if ((data & 0x3) != 0) { ASSERT(is_immed((Eterm) (UWord) data)); @@ -558,7 +550,7 @@ erts_port_data_size(Port *prt) ErlOffHeap * erts_port_data_offheap(Port *prt) { - erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data); + erts_aint_t data = erts_atomic_read_ddrb(&prt->data); if ((data & 0x3) != 0) { ASSERT(is_immed((Eterm) (UWord) data)); @@ -603,11 +595,11 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2) ASSERT((data & 0x3) == 0); } - data = erts_smp_atomic_xchg_wb(&prt->data, data); + data = erts_atomic_xchg_wb(&prt->data, data); if (data == (erts_aint_t)NULL) { /* Port terminated by racing thread */ - data = erts_smp_atomic_xchg_wb(&prt->data, data); + data = erts_atomic_xchg_wb(&prt->data, data); ASSERT(data != (erts_aint_t)NULL); cleanup_old_port_data(data); BIF_ERROR(BIF_P, BADARG); @@ -630,7 +622,7 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1) if (!prt) BIF_ERROR(BIF_P, BADARG); - data = erts_smp_atomic_read_ddrb(&prt->data); + data = erts_atomic_read_ddrb(&prt->data); if (data == (erts_aint_t)NULL) BIF_ERROR(BIF_P, BADARG); /* Port terminated by racing thread */ @@ -925,7 +917,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump); #ifdef USE_VM_PROBES @@ -942,7 +934,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) if (port && IS_TRACED_FL(port, F_TRACE_PORTS)) trace_port(port, am_getting_linked, p->common.id); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in); diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c index ad124fd979..bc819505e7 100644 --- a/erts/emulator/beam/erl_bif_re.c +++ b/erts/emulator/beam/erl_bif_re.c @@ -66,11 +66,7 @@ static void erts_erts_pcre_stack_free(void *ptr) { #define ERTS_PCRE_STACK_MARGIN (10*1024) -#ifdef ERTS_SMP # define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit()) -#else -# define ERTS_STACK_LIMIT ((char *) erts_scheduler_stack_limit) -#endif static int stack_guard_downwards(void) diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index 45159c4392..3fe089a00e 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -60,10 +60,8 @@ static struct { /* Protected by code write permission */ int local; BpFunctions f; /* Local functions */ BpFunctions e; /* Export entries */ -#ifdef ERTS_SMP Process* stager; ErtsThrPrgrLaterOp lop; -#endif } finish_bp; static Eterm @@ -71,9 +69,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist); static int erts_set_tracing_event_pattern(Eterm event, Binary*, int on); -#ifdef ERTS_SMP static void smp_bp_finisher(void* arg); -#endif static BIF_RETTYPE system_monitor(Process *p, Eterm monitor_pid, Eterm list); @@ -345,7 +341,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) ERTS_TRACER_CLEAR(&meta_tracer); -#ifdef ERTS_SMP if (finish_bp.current >= 0) { ASSERT(matches >= 0); ASSERT(finish_bp.stager == NULL); @@ -355,7 +350,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(p, make_small(matches)); } -#endif erts_release_code_write_permission(); @@ -367,7 +361,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) } } -#ifdef ERTS_SMP static void smp_bp_finisher(void* null) { if (erts_finish_breakpointing()) { /* Not done */ @@ -380,15 +373,14 @@ static void smp_bp_finisher(void* null) finish_bp.stager = NULL; #endif erts_release_code_write_permission(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); } } -#endif /* ERTS_SMP */ void erts_get_default_trace_pattern(int *trace_pattern_is_on, @@ -397,8 +389,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, struct trace_pattern_flags *trace_pattern_flags, ErtsTracer *meta_tracer) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); if (trace_pattern_is_on) *trace_pattern_is_on = erts_default_trace_pattern_is_on; if (match_spec) @@ -413,8 +405,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, int erts_is_default_trace_enabled(void) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); return erts_default_trace_pattern_is_on; } @@ -543,9 +535,7 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) int matches = 0; Uint mask = 0; int cpu_ts = 0; -#ifdef ERTS_SMP int system_blocked = 0; -#endif if (! erts_trace_flags(list, &mask, &tracer, &cpu_ts)) { BIF_ERROR(p, BADARG); @@ -620,13 +610,13 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) goto error; if (start_trace(tracee_p, tracer, &tracee_p->common, on, mask)) { - erts_smp_proc_unlock(tracee_p, + erts_proc_unlock(tracee_p, (tracee_p == p ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); goto already_traced; } - erts_smp_proc_unlock(tracee_p, + erts_proc_unlock(tracee_p, (tracee_p == p ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); @@ -699,11 +689,9 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) mods = 1; } -#ifdef ERTS_SMP - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); system_blocked = 1; -#endif ok = 1; if (procs || mods) { @@ -766,12 +754,10 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) goto error; } -#ifdef ERTS_SMP if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } -#endif erts_release_code_write_permission(); ERTS_TRACER_CLEAR(&tracer); @@ -785,12 +771,10 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) ERTS_TRACER_CLEAR(&tracer); -#ifdef ERTS_SMP if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } -#endif erts_release_code_write_permission(); BIF_ERROR(p, BADARG); @@ -878,7 +862,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) trace_flags = ERTS_TRACE_FLAGS(tracee); if (tracee != p) - erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); } else if (is_external_pid(pid_spec) && external_pid_dist_entry(pid_spec) == erts_this_dist_entry) { return am_undefined; @@ -1055,28 +1039,20 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key) mfa[1] = tp[2]; mfa[2] = signed_val(tp[3]); -#ifdef ERTS_SMP if ( (key == am_call_time) || (key == am_all)) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } -#endif -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx); -#endif + erts_mtx_lock(&erts_dirty_bp_ix_mtx); r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time); -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx); -#endif -#ifdef ERTS_SMP + erts_mtx_unlock(&erts_dirty_bp_ix_mtx); if ( (key == am_call_time) || (key == am_all)) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } -#endif switch (r) { case FUNC_TRACE_NOEXIST: @@ -1387,7 +1363,7 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified, #ifdef DEBUG ep->info.op = (BeamInstr) BeamOp(op_i_func_info_IaaI); #endif - ep->beam[0] = (BeamInstr) BeamOp(op_jump_f); + ep->beam[0] = (BeamInstr) BeamOp(op_trace_jump_W); ep->beam[1] = (BeamInstr) ep->addressv[code_ix]; } erts_set_call_trace_bif(ci, match_prog_set, 0); @@ -1403,7 +1379,7 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified, */ erts_clear_call_trace_bif(ci, 0); if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { - ep->beam[0] = (BeamInstr) BeamOp(op_jump_f); + ep->beam[0] = (BeamInstr) BeamOp(op_trace_jump_W); } } } @@ -1526,17 +1502,13 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified, finish_bp.install = on; finish_bp.local = flags.breakpoint; -#ifdef ERTS_SMP if (is_blocking) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); -#endif + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); while (erts_finish_breakpointing()) { /* Empty loop body */ } -#ifdef ERTS_SMP finish_bp.current = -1; } -#endif if (flags.breakpoint) { matches += finish_bp.f.matched; @@ -1571,11 +1543,6 @@ erts_set_tracing_event_pattern(Eterm event, Binary* match_spec, int on) finish_bp.f.matched = 0; finish_bp.f.matching = NULL; -#ifndef ERTS_SMP - while (erts_finish_breakpointing()) { - /* Empty loop body */ - } -#endif return 1; } @@ -1594,7 +1561,7 @@ consolidate_event_tracing(ErtsTracingEvent te[]) int erts_finish_breakpointing(void) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); /* * Memory barriers will be issued for all schedulers *before* @@ -1704,7 +1671,7 @@ uninstall_exp_breakpoints(BpFunctions* f) if (ep->addressv[code_ix] != ep->beam) { continue; } - ASSERT(ep->beam[0] == (BeamInstr) BeamOp(op_jump_f)); + ASSERT(ep->beam[0] == (BeamInstr) BeamOp(op_trace_jump_W)); ep->addressv[code_ix] = (BeamInstr *) ep->beam[1]; } } @@ -1723,7 +1690,7 @@ clean_export_entries(BpFunctions* f) if (ep->addressv[code_ix] == ep->beam) { continue; } - if (ep->beam[0] == (BeamInstr) BeamOp(op_jump_f)) { + if (ep->beam[0] == (BeamInstr) BeamOp(op_trace_jump_W)) { ep->beam[0] = (BeamInstr) 0; ep->beam[1] = (BeamInstr) 0; } @@ -2015,24 +1982,20 @@ BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2) } void erts_system_monitor_clear(Process *c_p) { -#ifdef ERTS_SMP if (c_p) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } -#endif erts_set_system_monitor(NIL); erts_system_monitor_long_gc = 0; erts_system_monitor_long_schedule = 0; erts_system_monitor_large_heap = 0; erts_system_monitor_flags.busy_port = 0; erts_system_monitor_flags.busy_dist_port = 0; -#ifdef ERTS_SMP if (c_p) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } -#endif } @@ -2142,8 +2105,8 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) int busy_port, busy_dist_port; system_blocked = 1; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0)) goto error; @@ -2182,16 +2145,16 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) erts_system_monitor_flags.busy_port = !!busy_port; erts_system_monitor_flags.busy_dist_port = !!busy_dist_port; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); BIF_RET(prev); } error: if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } BIF_ERROR(p, BADARG); @@ -2200,23 +2163,19 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) /* Begin: Trace for System Profiling */ void erts_system_profile_clear(Process *c_p) { -#ifdef ERTS_SMP if (c_p) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } -#endif erts_set_system_profile(NIL); erts_system_profile_flags.scheduler = 0; erts_system_profile_flags.runnable_procs = 0; erts_system_profile_flags.runnable_ports = 0; erts_system_profile_flags.exclusive = 0; -#ifdef ERTS_SMP if (c_p) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } -#endif } static Eterm system_profile_get(Process *p) { @@ -2278,8 +2237,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) int scheduler, runnable_procs, runnable_ports, exclusive; system_blocked = 1; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* Check if valid process, no locks are taken */ @@ -2330,8 +2289,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) erts_system_profile_flags.runnable_procs = !!runnable_procs; erts_system_profile_flags.exclusive = !!exclusive; erts_system_profile_ts_type = ts; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); BIF_RET(prev); @@ -2339,8 +2298,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) error: if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } BIF_ERROR(p, BADARG); @@ -2365,7 +2324,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Eterm target; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsTraceDeliveredAll; static void @@ -2373,31 +2332,20 @@ reply_trace_delivered_all(void *vtdarp) { ErtsTraceDeliveredAll *tdarp = (ErtsTraceDeliveredAll *) vtdarp; - if (erts_smp_atomic32_dec_read_nob(&tdarp->refc) == 0) { + if (erts_atomic32_dec_read_nob(&tdarp->refc) == 0) { Eterm ref_copy, msg; Process *rp = tdarp->proc; Eterm *hp = NULL; ErlOffHeap *ohp; -#ifdef ERTS_SMP ErlHeapFragment *bp; bp = new_message_buffer(4 + NC_HEAP_SIZE(tdarp->ref)); hp = &bp->mem[0]; ohp = &bp->off_heap; -#else - ErtsProcLocks rp_locks = 0; - ErtsMessage *mp; - mp = erts_alloc_message_heap( - rp, &rp_locks, 4 + NC_HEAP_SIZE(tdarp->ref), &hp, &ohp); -#endif ref_copy = STORE_NC(&hp, ohp, tdarp->ref); msg = TUPLE3(hp, am_trace_delivered, tdarp->target, ref_copy); -#ifdef ERTS_SMP erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp); -#else - erts_queue_message(rp, rp_locks, mp, msg, am_system); -#endif erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp); erts_proc_dec_refc(rp); @@ -2418,7 +2366,7 @@ trace_delivered_1(BIF_ALIST_1) hp = &tdarp->ref_heap[0]; tdarp->ref = STORE_NC(&hp, NULL, ref); tdarp->target = BIF_ARG_1; - erts_smp_atomic32_init_nob(&tdarp->refc, + erts_atomic32_init_nob(&tdarp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(BIF_P, 1); erts_schedule_multi_misc_aux_work(0, diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c index 2f8adc87d5..19d46537f9 100644 --- a/erts/emulator/beam/erl_bif_unique.c +++ b/erts/emulator/beam/erl_bif_unique.c @@ -77,11 +77,9 @@ init_reference(void) ref_init_value += (Uint64) tv.tv_usec; #ifdef DEBUG max_thr_id = (Uint32) erts_no_schedulers; -#ifdef ERTS_DIRTY_SCHEDULERS max_thr_id += (Uint32) erts_no_dirty_cpu_schedulers; max_thr_id += (Uint32) erts_no_dirty_io_schedulers; #endif -#endif erts_atomic64_init_nob(&global_reference.count, (erts_aint64_t) ref_init_value); init_magic_ref_tables(); @@ -136,7 +134,7 @@ Eterm erts_make_ref(Process *c_p) Eterm* hp; Uint32 ref[ERTS_REF_NUMBERS]; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); hp = HAlloc(c_p, ERTS_REF_THING_SIZE); @@ -439,10 +437,8 @@ init_unique_integer(void) { int bits; unique_data.r.o.val0_max = (Uint64) erts_no_schedulers; -#ifdef ERTS_DIRTY_SCHEDULERS unique_data.r.o.val0_max += (Uint64) erts_no_dirty_cpu_schedulers; unique_data.r.o.val0_max += (Uint64) erts_no_dirty_io_schedulers; -#endif bits = erts_fit_in_bits_int64(unique_data.r.o.val0_max); unique_data.r.o.left_shift = bits; unique_data.r.o.right_shift = 64 - bits; @@ -803,7 +799,7 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0) BIF_RETTYPE res; Eterm* hp; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE); diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h index b036b28dbf..05007e864e 100644 --- a/erts/emulator/beam/erl_binary.h +++ b/erts/emulator/beam/erl_binary.h @@ -291,7 +291,7 @@ typedef union { * atomics are used they might * differ in size. */ - erts_smp_atomic_t smp_atomic_word; + erts_atomic_t smp_atomic_word; erts_atomic_t atomic_word; } ErtsMagicIndirectionWord; @@ -326,7 +326,7 @@ ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size, ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size, int (*destructor)(Binary *)); ERTS_GLB_INLINE Binary *erts_create_magic_indirection(int (*destructor)(Binary *)); -ERTS_GLB_INLINE erts_smp_atomic_t *erts_smp_binary_to_magic_indirection(Binary *bp); +ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp); ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -519,16 +519,6 @@ erts_create_magic_indirection(int (*destructor)(Binary *)) but word aligned */ } -ERTS_GLB_INLINE erts_smp_atomic_t * -erts_smp_binary_to_magic_indirection(Binary *bp) -{ - ErtsMagicIndirectionWord *mip; - ASSERT(bp->intern.flags & BIN_FLAG_MAGIC); - ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION); - mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp); - return &mip->smp_atomic_word; -} - ERTS_GLB_INLINE erts_atomic_t * erts_binary_to_magic_indirection(Binary *bp) { @@ -536,7 +526,7 @@ erts_binary_to_magic_indirection(Binary *bp) ASSERT(bp->intern.flags & BIN_FLAG_MAGIC); ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION); mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp); - return &mip->atomic_word; + return &mip->smp_atomic_word; } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index 51d23a8965..3a16913473 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -55,30 +55,19 @@ static byte get_bit(byte b, size_t a_offs); -#if defined(ERTS_SMP) /* the state resides in the current process' scheduler data */ -#elif defined(ERL_BITS_REENTRANT) -/* reentrant API but with a hidden single global state, for testing only */ -struct erl_bits_state ErlBitsState_; -#else -/* non-reentrant API with a single global state */ -struct erl_bits_state ErlBitsState; -#endif #define byte_buf (ErlBitsState.byte_buf_) #define byte_buf_len (ErlBitsState.byte_buf_len_) -static erts_smp_atomic_t bits_bufs_size; +static erts_atomic_t bits_bufs_size; Uint erts_bits_bufs_size(void) { - return (Uint) erts_smp_atomic_read_nob(&bits_bufs_size); + return (Uint) erts_atomic_read_nob(&bits_bufs_size); } -#if !defined(ERTS_SMP) -static -#endif void erts_bits_init_state(ERL_BITS_PROTO_0) { @@ -88,13 +77,11 @@ erts_bits_init_state(ERL_BITS_PROTO_0) erts_bin_offset = 0; } -#if defined(ERTS_SMP) void erts_bits_destroy_state(ERL_BITS_PROTO_0) { erts_free(ERTS_ALC_T_BITS_BUF, byte_buf); } -#endif void erts_init_bits(void) @@ -104,13 +91,8 @@ erts_init_bits(void) ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes) == offsetof(Binary,orig_bytes)); - erts_smp_atomic_init_nob(&bits_bufs_size, 0); -#if defined(ERTS_SMP) + erts_atomic_init_nob(&bits_bufs_size, 0); /* erl_process.c calls erts_bits_init_state() on all state instances */ -#else - ERL_BITS_DECLARE_STATEP; - erts_bits_init_state(ERL_BITS_ARGS_0); -#endif } /***************************************************************** @@ -744,7 +726,7 @@ static void ERTS_INLINE need_byte_buf(ERL_BITS_PROTO_1(int need)) { if (byte_buf_len < need) { - erts_smp_atomic_add_nob(&bits_bufs_size, need - byte_buf_len); + erts_atomic_add_nob(&bits_bufs_size, need - byte_buf_len); byte_buf_len = need; byte_buf = erts_realloc(ERTS_ALC_T_BITS_BUF, byte_buf, byte_buf_len); } diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h index 5da2b28a89..b9d141d585 100644 --- a/erts/emulator/beam/erl_bits.h +++ b/erts/emulator/beam/erl_bits.h @@ -84,31 +84,14 @@ typedef struct erl_bin_match_struct{ #define ms_matchbuffer(_Ms) &(((ErlBinMatchState*) boxed_val(_Ms))->mb) -#if defined(ERTS_SMP) -#define ERL_BITS_REENTRANT -#else -/* uncomment to test the reentrant API in the non-SMP runtime system */ -/* #define ERL_BITS_REENTRANT */ -#endif - -#ifdef ERL_BITS_REENTRANT - /* * Reentrant API with the state passed as a parameter. * (Except when the current Process* already is a parameter.) */ -#ifdef ERTS_SMP /* the state resides in the current process' scheduler data */ #define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS #define ERL_BITS_RELOAD_STATEP(P) do{EBS = &erts_proc_sched_data((P))->erl_bits_state;}while(0) #define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &erts_proc_sched_data((P))->erl_bits_state -#else -/* reentrant API but with a hidden single global state, for testing only */ -extern struct erl_bits_state ErlBitsState_; -#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS = &ErlBitsState_ -#define ERL_BITS_RELOAD_STATEP(P) do{}while(0) -#define ERL_BITS_DEFINE_STATEP(P) ERL_BITS_DECLARE_STATEP -#endif #define ErlBitsState (*EBS) #define ERL_BITS_PROTO_0 struct erl_bits_state *EBS @@ -120,26 +103,6 @@ extern struct erl_bits_state ErlBitsState_; #define ERL_BITS_ARGS_2(ARG1,ARG2) EBS, ARG1, ARG2 #define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) EBS, ARG1, ARG2, ARG3 -#else /* ERL_BITS_REENTRANT */ - -/* - * Non-reentrant API with a single global state. - */ -extern struct erl_bits_state ErlBitsState; -#define ERL_BITS_DECLARE_STATEP /*empty*/ -#define ERL_BITS_RELOAD_STATEP(P) do{}while(0) -#define ERL_BITS_DEFINE_STATEP(P) /*empty*/ - -#define ERL_BITS_PROTO_0 void -#define ERL_BITS_PROTO_1(PARM1) PARM1 -#define ERL_BITS_PROTO_2(PARM1,PARM2) PARM1, PARM2 -#define ERL_BITS_PROTO_3(PARM1,PARM2,PARM3) PARM1, PARM2, PARM3 -#define ERL_BITS_ARGS_0 /*empty*/ -#define ERL_BITS_ARGS_1(ARG1) ARG1 -#define ERL_BITS_ARGS_2(ARG1,ARG2) ARG1, ARG2 -#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) ARG1, ARG2, ARG3 - -#endif /* ERL_BITS_REENTRANT */ #define erts_bin_offset (ErlBitsState.erts_bin_offset_) #define erts_current_bin (ErlBitsState.erts_current_bin_) @@ -158,10 +121,8 @@ extern struct erl_bits_state ErlBitsState; } while (0) void erts_init_bits(void); /* Initialization once. */ -#ifdef ERTS_SMP void erts_bits_init_state(ERL_BITS_PROTO_0); void erts_bits_destroy_state(ERL_BITS_PROTO_0); -#endif /* diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c index f8b2fa744f..49f9beb19f 100644 --- a/erts/emulator/beam/erl_cpu_topology.c +++ b/erts/emulator/beam/erl_cpu_topology.c @@ -60,7 +60,7 @@ static int max_main_threads; static int reader_groups; static ErtsCpuBindData *scheduler2cpu_map; -static erts_smp_rwmtx_t cpuinfo_rwmtx; +static erts_rwmtx_t cpuinfo_rwmtx; typedef enum { ERTS_CPU_BIND_UNDEFINED, @@ -131,13 +131,11 @@ static erts_cpu_groups_map_t *reader_groups_map; #define ERTS_MAX_CPU_TOPOLOGY_ID ((int) 0xffff) -#ifdef ERTS_SMP static void cpu_bind_order_sort(erts_cpu_topology_t *cpudata, int size, ErtsCpuBindOrder bind_order, int mk_seq); static void write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size); -#endif static void reader_groups_callback(int, ErtsSchedulerData *, int, void *); static erts_cpu_groups_map_t *add_cpu_groups(int groups, @@ -434,7 +432,6 @@ processor_order_cmp(const void *vx, const void *vy) return 0; } -#ifdef ERTS_SMP void erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) { @@ -444,7 +441,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) int cgcc_ix; /* Unbind from cpu */ - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); if (scheduler2cpu_map[esdp->no].bound_id >= 0 && erts_unbind_from_cpu(cpuinfo) == 0) { esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1; @@ -463,7 +460,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) } } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(1, @@ -481,7 +478,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) void erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue)); if (esdp->no <= max_main_threads) erts_thr_set_main_status(1, (int) esdp->no); @@ -490,7 +487,6 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp) (void) ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND); } -#endif void erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) @@ -499,8 +495,8 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) erts_cpu_groups_map_t *cgm; erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_callback_call_t *cgcc; - erts_smp_runq_unlock(esdp->run_queue); - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_runq_unlock(esdp->run_queue); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); cpu_id = scheduler2cpu_map[esdp->no].bind_id; if (cpu_id >= 0 && cpu_id != scheduler2cpu_map[esdp->no].bound_id) { res = erts_bind_to_cpu(cpuinfo, cpu_id); @@ -543,7 +539,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(0, @@ -553,10 +549,9 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) erts_free(ERTS_ALC_T_TMP, cgcc); - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); } -#ifdef ERTS_SMP void erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) { @@ -565,7 +560,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_callback_call_t *cgcc; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); cgcc = erts_alloc(ERTS_ALC_T_TMP, (no_cpu_groups_callbacks @@ -581,7 +576,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(0, @@ -594,7 +589,6 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) if (esdp->no <= max_main_threads) erts_thr_set_main_status(1, (int) esdp->no); } -#endif static void write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size) @@ -602,7 +596,7 @@ write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size) int s_ix = 1; int cpu_ix; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (cpu_bind_order != ERTS_CPU_BIND_NONE && size) { @@ -702,9 +696,9 @@ Eterm erts_bound_schedulers_term(Process *c_p) { ErtsCpuBindOrder order; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); order = cpu_bind_order; - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return bound_schedulers_term(order); } @@ -717,7 +711,7 @@ erts_bind_schedulers(Process *c_p, Eterm how) int cpudata_size; ErtsCpuBindOrder old_cpu_bind_order; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP) { if (cpu_bind_order == ERTS_CPU_BIND_NONE @@ -773,7 +767,7 @@ erts_bind_schedulers(Process *c_p, Eterm how) done: - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); if (notify) erts_sched_notify_check_cpu_bind(); @@ -793,9 +787,9 @@ erts_sched_bind_atthrcreate_child(int unbind) { int res = 0; if (unbind) { - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); res = erts_unbind_from_cpu(cpuinfo); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } return res; } @@ -812,7 +806,7 @@ erts_sched_bind_atfork_prepare(void) ErtsSchedulerData *esdp = erts_get_scheduler_data(); int unbind = esdp != NULL && erts_is_scheduler_bound(esdp); if (unbind) - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); return unbind; } @@ -820,7 +814,7 @@ int erts_sched_bind_atfork_child(int unbind) { if (unbind) { - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); return erts_unbind_from_cpu(cpuinfo); } @@ -831,7 +825,7 @@ void erts_sched_bind_atfork_parent(int unbind) { if (unbind) - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } Eterm @@ -865,9 +859,9 @@ erts_fake_scheduler_bindings(Process *p, Eterm how) return res; } - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); create_tmp_cpu_topology_copy(&cpudata, &cpudata_size); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); if (!cpudata || fake_cpu_bind_order == ERTS_CPU_BIND_NONE) ERTS_BIF_PREP_RET(res, am_false); @@ -930,12 +924,12 @@ erts_get_schedulers_binds(Process *c_p) Eterm res = make_tuple(hp); *(hp++) = make_arityval(erts_no_schedulers); - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); for (ix = 1; ix <= erts_no_schedulers; ix++) *(hp++) = (scheduler2cpu_map[ix].bound_id >= 0 ? make_small(scheduler2cpu_map[ix].bound_id) : AM_unbound); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -1346,7 +1340,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) int cpudata_size = 0; Eterm res; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); res = get_cpu_topology_term(c_p, ERTS_GET_USED_CPU_TOPOLOGY); if (term == am_undefined) { if (user_cpudata) @@ -1367,7 +1361,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) } else if (is_not_list(term)) { error: - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); res = THE_NON_VALUE; goto done; } @@ -1461,7 +1455,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) write_schedulers_bind_change(cpudata, cpudata_size); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); erts_sched_notify_check_cpu_bind(); done: @@ -1615,7 +1609,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which) { Eterm res; int type; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); if (ERTS_IS_ATOM_STR("used", which)) type = ERTS_GET_USED_CPU_TOPOLOGY; else if (ERTS_IS_ATOM_STR("detected", which)) @@ -1628,7 +1622,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which) res = THE_NON_VALUE; else res = get_cpu_topology_term(c_p, type); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -1646,9 +1640,9 @@ get_logical_processors(int *conf, int *onln, int *avail) void erts_get_logical_processors(int *conf, int *onln, int *avail) { - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); get_logical_processors(conf, onln, avail); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } void @@ -1706,9 +1700,9 @@ erts_init_cpu_topology(void) { int ix; - erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL, + erts_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA, (sizeof(ErtsCpuBindData) @@ -1726,13 +1720,13 @@ erts_init_cpu_topology(void) NULL); if (cpu_bind_order == ERTS_CPU_BIND_NONE) - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); else { erts_cpu_topology_t *cpudata; int cpudata_size; create_tmp_cpu_topology_copy(&cpudata, &cpudata_size); write_schedulers_bind_change(cpudata, cpudata_size); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); erts_sched_notify_check_cpu_bind(); destroy_tmp_cpu_topology_copy(cpudata); } @@ -1742,7 +1736,7 @@ int erts_update_cpu_info(void) { int changed; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); changed = erts_cpu_info_update(cpuinfo); if (changed) { erts_cpu_topology_t *cpudata; @@ -1775,7 +1769,7 @@ erts_update_cpu_info(void) write_schedulers_bind_change(cpudata, cpudata_size); destroy_tmp_cpu_topology_copy(cpudata); } - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); if (changed) erts_sched_notify_check_cpu_bind(); return changed; @@ -1792,7 +1786,7 @@ reader_groups_callback(int suspending, void *unused) { if (reader_groups && esdp->no <= max_main_threads) - erts_smp_rwmtx_set_reader_group(suspending ? 0 : group+1); + erts_rwmtx_set_reader_group(suspending ? 0 : group+1); } static Eterm get_cpu_groups_map(Process *c_p, @@ -1821,9 +1815,9 @@ Eterm erts_get_reader_groups_map(Process *c_p) { Eterm res; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); res = get_cpu_groups_map(c_p, reader_groups_map, 1); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -2203,7 +2197,7 @@ add_cpu_groups(int groups, erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_map_t *cgm; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (use_groups > max_main_threads) use_groups = max_main_threads; @@ -2250,7 +2244,7 @@ cpu_groups_lookup(erts_cpu_groups_map_t *map, { int start, logical, ix; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (esdp->cpu_id < 0) @@ -2278,7 +2272,7 @@ static void update_cpu_groups_maps(void) { erts_cpu_groups_map_t *cgm; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); for (cgm = cpu_groups_maps; cgm; cgm = cgm->next) make_cpu_groups_map(cgm, 0); diff --git a/erts/emulator/beam/erl_cpu_topology.h b/erts/emulator/beam/erl_cpu_topology.h index c922214702..88bcad79ab 100644 --- a/erts/emulator/beam/erl_cpu_topology.h +++ b/erts/emulator/beam/erl_cpu_topology.h @@ -60,11 +60,9 @@ int erts_init_scheduler_bind_type_string(char *how); int erts_init_cpu_topology_string(char *topology_str); void erts_sched_check_cpu_bind(ErtsSchedulerData *esdp); -#ifdef ERTS_SMP void erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp); void erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp); void erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp); -#endif int erts_update_cpu_info(void); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 6d4a895ef6..3ba0886464 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -44,7 +44,7 @@ #include "erl_binary.h" -erts_smp_atomic_t erts_ets_misc_mem_size; +erts_atomic_t erts_ets_misc_mem_size; /* ** Utility macros @@ -61,15 +61,9 @@ enum DbIterSafety { ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */ ITER_SAFE /* No need to fixate at all */ }; -#ifdef ERTS_SMP # define ITERATION_SAFETY(Proc,Tab) \ ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \ : (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED)) -#else -# define ITERATION_SAFETY(Proc,Tab) \ - ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) \ - ? ITER_SAFE : ITER_SAFE_LOCKED) -#endif #define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP)) @@ -195,7 +189,7 @@ static void delete_sched_table(Process *c_p, DbTable *tb); static void table_dec_refc(DbTable *tb, erts_aint_t min_val) { - if (erts_smp_refc_dectest(&tb->common.refc, min_val) == 0) + if (erts_refc_dectest(&tb->common.refc, min_val) == 0) schedule_free_dbtable(tb); } @@ -209,21 +203,21 @@ static ERTS_INLINE void make_btid(DbTable *tb) { Binary *btid = erts_create_magic_indirection(db_table_tid_destructor); - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - erts_smp_atomic_init_nob(tbref, (erts_aint_t) tb); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + erts_atomic_init_nob(tbref, (erts_aint_t) tb); tb->common.btid = btid; /* * Table and magic indirection refer eachother, * and table is refered once by being alive... */ - erts_smp_refc_init(&tb->common.refc, 2); + erts_refc_init(&tb->common.refc, 2); erts_refc_inc(&btid->intern.refc, 1); } static ERTS_INLINE DbTable* btid2tab(Binary* btid) { - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - return (DbTable *) erts_smp_atomic_read_nob(tbref); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + return (DbTable *) erts_atomic_read_nob(tbref); } static DbTable * @@ -231,7 +225,7 @@ tid2tab(Eterm tid) { DbTable *tb; Binary *btid; - erts_smp_atomic_t *tbref; + erts_atomic_t *tbref; if (!is_internal_magic_ref(tid)) return NULL; @@ -239,8 +233,8 @@ tid2tab(Eterm tid) if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor) return NULL; - tbref = erts_smp_binary_to_magic_indirection(btid); - tb = (DbTable *) erts_smp_atomic_read_nob(tbref); + tbref = erts_binary_to_magic_indirection(btid); + tb = (DbTable *) erts_atomic_read_nob(tbref); ASSERT(!tb || tb->common.btid == btid); @@ -250,11 +244,11 @@ tid2tab(Eterm tid) static ERTS_INLINE int is_table_alive(DbTable *tb) { - erts_smp_atomic_t *tbref; + erts_atomic_t *tbref; DbTable *rtb; - tbref = erts_smp_binary_to_magic_indirection(tb->common.btid); - rtb = (DbTable *) erts_smp_atomic_read_nob(tbref); + tbref = erts_binary_to_magic_indirection(tb->common.btid); + rtb = (DbTable *) erts_atomic_read_nob(tbref); ASSERT(!rtb || rtb == tb); @@ -264,11 +258,7 @@ is_table_alive(DbTable *tb) static ERTS_INLINE int is_table_named(DbTable *tb) { -#ifdef ERTS_SMP return tb->common.type & DB_NAMED_TABLE; -#else - return tb->common.status & DB_NAMED_TABLE; -#endif } @@ -277,8 +267,8 @@ tid_clear(Process *c_p, DbTable *tb) { DbTable *rtb; Binary *btid = tb->common.btid; - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - rtb = (DbTable *) erts_smp_atomic_xchg_nob(tbref, (erts_aint_t) NULL); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + rtb = (DbTable *) erts_atomic_xchg_nob(tbref, (erts_aint_t) NULL); ASSERT(!rtb || tb == rtb); if (rtb) { table_dec_refc(tb, 1); @@ -297,13 +287,11 @@ make_tid(Process *c_p, DbTable *tb) /* ** The meta hash table of all NAMED ets tables */ -#ifdef ERTS_SMP # define META_NAME_TAB_LOCK_CNT 16 union { - erts_smp_rwmtx_t lck; + erts_rwmtx_t lck; byte _cache_line_alignment[64]; }meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT]; -#endif static struct meta_name_tab_entry { union { Eterm name_atom; @@ -319,13 +307,11 @@ static unsigned meta_name_tab_mask; static ERTS_INLINE struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name, - erts_smp_rwmtx_t** lockp) + erts_rwmtx_t** lockp) { unsigned bix = atom_val(name) & meta_name_tab_mask; struct meta_name_tab_entry* bucket = &meta_name_tab[bix]; -#ifdef ERTS_SMP *lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck; -#endif return bucket; } @@ -390,16 +376,14 @@ free_dbtable(void *vtb) { DbTable *tb = (DbTable *) vtb; #ifdef HARDDEBUG - if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { + if (erts_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n", - erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), + erts_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), tb->common.fixations); } #endif -#ifdef ERTS_SMP - erts_smp_rwmtx_destroy(&tb->common.rwlock); - erts_smp_mtx_destroy(&tb->common.fixlock); -#endif + erts_rwmtx_destroy(&tb->common.rwlock); + erts_mtx_destroy(&tb->common.fixlock); ASSERT(is_immed(tb->common.heir_data)); if (tb->common.btid) @@ -419,8 +403,8 @@ static void schedule_free_dbtable(DbTable* tb) * Caller is *not* allowed to access the specialized part * (hash or tree) of *tb after this function has returned. */ - ASSERT(erts_smp_refc_read(&tb->common.refc, 0) == 0); - ASSERT(erts_smp_refc_read(&tb->common.fix_count, 0) == 0); + ASSERT(erts_refc_read(&tb->common.refc, 0) == 0); + ASSERT(erts_refc_read(&tb->common.fix_count, 0) == 0); erts_schedule_thr_prgr_later_cleanup_op(free_dbtable, (void *) tb, &tb->release.data, @@ -435,7 +419,7 @@ save_sched_table(Process *c_p, DbTable *tb) ASSERT(esdp); esdp->ets_tables.count++; - erts_smp_refc_inc(&tb->common.refc, 1); + erts_refc_inc(&tb->common.refc, 1); first = esdp->ets_tables.clist; if (!first) { @@ -525,11 +509,11 @@ save_owned_table(Process *c_p, DbTable *tb) { DbTable *first; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES); - erts_smp_refc_inc(&tb->common.refc, 1); + erts_refc_inc(&tb->common.refc, 1); if (!first) { tb->common.owned.next = tb->common.owned.prev = tb; @@ -541,13 +525,13 @@ save_owned_table(Process *c_p, DbTable *tb) tb->common.owned.prev->common.owned.next = tb; first->common.owned.prev = tb; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); } static ERTS_INLINE void delete_owned_table(Process *p, DbTable *tb) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (tb->common.owned.next == tb) { DbTable* old; ASSERT(tb->common.owned.prev == tb); @@ -570,38 +554,33 @@ delete_owned_table(Process *p, DbTable *tb) if (tb == first) erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); table_dec_refc(tb, 1); } static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock) { -#ifdef ERTS_SMP - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; if (use_frequent_read_lock) - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; if (erts_ets_rwmtx_spin_count >= 0) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; -#endif -#ifdef ERTS_SMP - erts_smp_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab", + erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); - erts_smp_mtx_init(&tb->common.fixlock, "db_tab_fix", + erts_mtx_init(&tb->common.fixlock, "db_tab_fix", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED); -#endif } static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind) { -#ifdef ERTS_SMP if (tb->common.type & DB_FINE_LOCKED) { if (kind == LCK_WRITE) { - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); tb->common.is_thread_safe = 1; } else { - erts_smp_rwmtx_rlock(&tb->common.rwlock); + erts_rwmtx_rlock(&tb->common.rwlock); ASSERT(!tb->common.is_thread_safe); } } @@ -610,14 +589,13 @@ static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind) switch (kind) { case LCK_WRITE: case LCK_WRITE_REC: - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); break; default: - erts_smp_rwmtx_rlock(&tb->common.rwlock); + erts_rwmtx_rlock(&tb->common.rwlock); } ASSERT(tb->common.is_thread_safe); } -#endif } static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) @@ -627,16 +605,15 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) * DbTable structure. That is, ONLY the SMP case is allowed * to follow the tb pointer! */ -#ifdef ERTS_SMP if (tb->common.type & DB_FINE_LOCKED) { if (kind == LCK_WRITE) { ASSERT(tb->common.is_thread_safe); tb->common.is_thread_safe = 0; - erts_smp_rwmtx_rwunlock(&tb->common.rwlock); + erts_rwmtx_rwunlock(&tb->common.rwlock); } else { ASSERT(!tb->common.is_thread_safe); - erts_smp_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); } } else { @@ -644,13 +621,12 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) switch (kind) { case LCK_WRITE: case LCK_WRITE_REC: - erts_smp_rwmtx_rwunlock(&tb->common.rwlock); + erts_rwmtx_rwunlock(&tb->common.rwlock); break; default: - erts_smp_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); } } -#endif } static ERTS_INLINE @@ -661,7 +637,7 @@ DbTable* db_get_table_aux(Process *p, int meta_already_locked) { DbTable *tb; - erts_smp_rwmtx_t *mtl = NULL; + erts_rwmtx_t *mtl = NULL; /* * IMPORTANT: Only scheduler threads are allowed @@ -673,9 +649,9 @@ DbTable* db_get_table_aux(Process *p, if (is_atom(id)) { struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl); if (!meta_already_locked) - erts_smp_rwmtx_rlock(mtl); + erts_rwmtx_rlock(mtl); else{ - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl) || erts_lc_rwmtx_is_rwlocked(mtl)); mtl = NULL; } @@ -709,7 +685,7 @@ DbTable* db_get_table_aux(Process *p, } } if (mtl) - erts_smp_rwmtx_runlock(mtl); + erts_rwmtx_runlock(mtl); return tb; } @@ -725,12 +701,12 @@ DbTable* db_get_table(Process *p, static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock) { int ret = 0; - erts_smp_rwmtx_t* rwlock; + erts_rwmtx_t* rwlock; struct meta_name_tab_entry* new_entry; struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom, &rwlock); if (!have_lock) - erts_smp_rwmtx_rwlock(rwlock); + erts_rwmtx_rwlock(rwlock); if (bucket->pu.tb == NULL) { /* empty */ new_entry = bucket; @@ -778,27 +754,25 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock) done: if (!have_lock) - erts_smp_rwmtx_rwunlock(rwlock); + erts_rwmtx_rwunlock(rwlock); return ret; } static int remove_named_tab(DbTable *tb, int have_lock) { int ret = 0; - erts_smp_rwmtx_t* rwlock; + erts_rwmtx_t* rwlock; Eterm name_atom = tb->common.the_name; struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom, &rwlock); ASSERT(is_table_named(tb)); -#ifdef ERTS_SMP - if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) { + if (!have_lock && erts_rwmtx_tryrwlock(rwlock) == EBUSY) { db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwlock(rwlock); + erts_rwmtx_rwlock(rwlock); db_lock(tb, LCK_WRITE); } -#endif - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock)); if (bucket->pu.tb == NULL) { goto done; @@ -851,7 +825,7 @@ static int remove_named_tab(DbTable *tb, int have_lock) done: if (!have_lock) - erts_smp_rwmtx_rwunlock(rwlock); + erts_rwmtx_rwunlock(rwlock); return ret; } @@ -860,11 +834,11 @@ done: */ static ERTS_INLINE void local_fix_table(DbTable* tb) { - erts_smp_refc_inc(&tb->common.fix_count, 1); + erts_refc_inc(&tb->common.fix_count, 1); } static ERTS_INLINE void local_unfix_table(DbTable* tb) { - if (erts_smp_refc_dectest(&tb->common.fix_count, 0) == 0) { + if (erts_refc_dectest(&tb->common.fix_count, 0) == 0) { ASSERT(IS_HASH_TABLE(tb->common.status)); db_unfix_table_hash(&(tb->hash)); } @@ -1505,7 +1479,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) DbTable* tb; Eterm ret; Eterm old_name; - erts_smp_rwmtx_t *lck1, *lck2; + erts_rwmtx_t *lck1, *lck2; #ifdef HARDDEBUG erts_fprintf(stderr, @@ -1528,7 +1502,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) if (lck1 == lck2) lck2 = NULL; else if (lck1 > lck2) { - erts_smp_rwmtx_t *tmp = lck1; + erts_rwmtx_t *tmp = lck1; lck1 = lck2; lck2 = tmp; } @@ -1546,9 +1520,9 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) } } - erts_smp_rwmtx_rwlock(lck1); + erts_rwmtx_rwlock(lck1); if (lck2) - erts_smp_rwmtx_rwlock(lck2); + erts_rwmtx_rwlock(lck2); tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1); if (!tb) @@ -1568,16 +1542,16 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) tb->common.the_name = BIF_ARG_2; db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwunlock(lck1); + erts_rwmtx_rwunlock(lck1); if (lck2) - erts_smp_rwmtx_rwunlock(lck2); + erts_rwmtx_rwunlock(lck2); BIF_RET(ret); badarg: if (tb) db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwunlock(lck1); + erts_rwmtx_rwunlock(lck1); if (lck2) - erts_smp_rwmtx_rwunlock(lck2); + erts_rwmtx_rwunlock(lck2); BIF_ERROR(BIF_P, BADARG); } @@ -1598,9 +1572,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) Uint32 status; Sint keypos; int is_named, is_compressed; -#ifdef ERTS_SMP int is_fine_locked, frequent_read; -#endif #ifdef DEBUG int cret; #endif @@ -1616,10 +1588,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) status = DB_SET | DB_PROTECTED; keypos = 1; is_named = 0; -#ifdef ERTS_SMP is_fine_locked = 0; frequent_read = 0; -#endif heir = am_none; heir_data = (UWord) am_undefined; is_compressed = erts_ets_always_compress; @@ -1647,30 +1617,18 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) keypos = signed_val(tp[2]); } else if (tp[1] == am_write_concurrency) { -#ifdef ERTS_SMP if (tp[2] == am_true) { is_fine_locked = 1; } else if (tp[2] == am_false) { is_fine_locked = 0; } else break; -#else - if ((tp[2] != am_true) && (tp[2] != am_false)) { - break; - } -#endif } else if (tp[1] == am_read_concurrency) { -#ifdef ERTS_SMP if (tp[2] == am_true) { frequent_read = 1; } else if (tp[2] == am_false) { frequent_read = 0; } else break; -#else - if ((tp[2] != am_true) && (tp[2] != am_false)) { - break; - } -#endif } else if (tp[1] == am_heir && tp[2] == am_none) { @@ -1712,11 +1670,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) } if (IS_HASH_TABLE(status)) { meth = &db_hash; -#ifdef ERTS_SMP if (is_fine_locked && !(status & DB_PRIVATE)) { status |= DB_FINE_LOCKED; } -#endif } else if (IS_TREE_TABLE(status)) { meth = &db_tree; @@ -1725,10 +1681,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } -#ifdef ERTS_SMP if (frequent_read && !(status & DB_PRIVATE)) status |= DB_FREQ_READ; -#endif /* we create table outside any table lock * and take the unusal cost of destroy table if it @@ -1737,27 +1691,25 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) { DbTable init_tb; - erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0); + erts_atomic_init_nob(&init_tb.common.memory_size, 0); tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); - erts_smp_atomic_init_nob(&tb->common.memory_size, - erts_smp_atomic_read_nob(&init_tb.common.memory_size)); + erts_atomic_init_nob(&tb->common.memory_size, + erts_atomic_read_nob(&init_tb.common.memory_size)); } tb->common.meth = meth; tb->common.the_name = BIF_ARG_1; tb->common.status = status; -#ifdef ERTS_SMP tb->common.type = status & ERTS_ETS_TABLE_TYPES; /* Note, 'type' is *read only* from now on... */ -#endif - erts_smp_refc_init(&tb->common.fix_count, 0); + erts_refc_init(&tb->common.fix_count, 0); db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ)); tb->common.keypos = keypos; tb->common.owner = BIF_P->common.id; set_heir(BIF_P, tb, heir, heir_data); - erts_smp_atomic_init_nob(&tb->common.nitems, 0); + erts_atomic_init_nob(&tb->common.nitems, 0); tb->common.fixing_procs = NULL; tb->common.compress = is_compressed; @@ -1940,7 +1892,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) * Process 'rp' might be exiting, but our table lock prevents it * from terminating as it cannot complete erts_db_process_exiting(). */ - ASSERT(!(ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state))); + ASSERT(!(ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state))); delete_owned_table(rp, tb); BIF_P->flags |= F_USING_DB; @@ -2015,12 +1967,12 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3) db_unlock(tb,LCK_WRITE); send_ets_transfer_message(BIF_P, to_proc, &to_locks, tb, BIF_ARG_3); - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); UnUseTmpHeap(5,BIF_P); BIF_RET(am_true); badarg: - if (to_proc != NULL && to_proc != BIF_P) erts_smp_proc_unlock(to_proc, to_locks); + if (to_proc != NULL && to_proc != BIF_P) erts_proc_unlock(to_proc, to_locks); if (tb != NULL) db_unlock(tb, LCK_WRITE); BIF_ERROR(BIF_P, BADARG); } @@ -2244,7 +2196,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) { BIF_ERROR(BIF_P, BADARG); } - nitems = erts_smp_atomic_read_nob(&tb->common.nitems); + nitems = erts_atomic_read_nob(&tb->common.nitems); tb->common.meth->db_delete_all_objects(BIF_P, tb); db_unlock(tb, LCK_WRITE); BIF_RET(erts_make_integer(nitems,BIF_P)); @@ -2295,7 +2247,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) */ struct ErtsEtsAllReq_ { - erts_smp_atomic32_t refc; + erts_atomic32_t refc; Process *proc; ErtsOIRefStorage ref; ErtsEtsAllReqList list[1]; /* one per scheduler */ @@ -2428,7 +2380,7 @@ ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp, erts_proc_dec_refc(reqp->proc); - if (erts_smp_atomic32_dec_read_nob(&reqp->refc) == 0) + if (erts_atomic32_dec_read_nob(&reqp->refc) == 0) erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp); *reqpp = NULL; @@ -2516,19 +2468,17 @@ BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0) Eterm ref = erts_make_ref(BIF_P); ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ, ERTS_ETS_ALL_REQ_SIZE); - erts_smp_atomic32_init_nob(&req->refc, + erts_atomic32_init_nob(&req->refc, (erts_aint32_t) erts_no_schedulers); erts_oiref_storage_save(&req->ref, ref); req->proc = BIF_P; erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, handle_ets_all_request, (void *) req); -#endif handle_ets_all_request((void *) req); BIF_RET(ref); @@ -3212,7 +3162,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL || tb->common.owner != owner) { if (BIF_P != rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) { BIF_RET(am_undefined); } @@ -3226,7 +3176,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) db_unlock(tb, LCK_READ); /*if (rp != NULL && rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/ + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/ hp = HAlloc(BIF_P, 5*sizeof(fields)/sizeof(Eterm)); res = NIL; @@ -3345,11 +3295,10 @@ void init_db(ErtsDbSpinCount db_spin_count) unsigned bits; size_t size; -#ifdef ERTS_SMP int max_spin_count = (1 << 15) - 1; /* internal limit */ - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; switch (db_spin_count) { case ERTS_DB_SPNCNT_NONE: @@ -3389,13 +3338,12 @@ void init_db(ErtsDbSpinCount db_spin_count) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) { - erts_smp_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt, + erts_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt, "meta_name_tab", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB); } -#endif - erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0); + erts_atomic_init_nob(&erts_ets_misc_mem_size, 0); db_initialize_util(); if (user_requested_db_max_tabs < DB_DEF_MAX_TABS) @@ -3496,14 +3444,14 @@ retry: if (tb->common.owner != p->common.id) { if (to_proc != NULL ) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); } db_unlock(tb,LCK_WRITE); return !0; /* ok, someone already gave my table away */ } if (tb->common.heir != to_pid) { /* someone changed the heir */ if (to_proc != NULL ) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); } if (to_pid == p->common.id || to_pid == am_none) { return 0; /* no real heir, table still mine */ @@ -3516,7 +3464,7 @@ retry: } if (to_proc->common.u.alive.started_interval != tb->common.heir_started_interval) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); return 0; /* heir dead and pid reused, table still mine */ } @@ -3533,7 +3481,7 @@ retry: heir_data = tpv[1]; } send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data); - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); return !0; } @@ -3584,21 +3532,17 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix) db_lock(tb, LCK_WRITE_REC); if (!(tb->common.status & DB_DELETE)) { erts_aint_t diff; - #ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); - #endif + erts_mtx_lock(&tb->common.fixlock); ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p)); diff = -((erts_aint_t) fix->counter); - erts_smp_refc_add(&tb->common.fix_count,diff,0); + erts_refc_add(&tb->common.fix_count,diff,0); fix->counter = 0; fixing_procs_rbt_delete(&tb->common.fixing_procs, fix); - #ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); - #endif + erts_mtx_unlock(&tb->common.fixlock); if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) { work += db_unfix_table_hash(&(tb->hash)); } @@ -3655,9 +3599,9 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks) switch (state->op) { case GET_OWNED_TABLE: { DbTable* tb; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (!tb) { /* Done with owned tables; now fixations */ @@ -3748,10 +3692,8 @@ static void fix_table_locked(Process* p, DbTable* tb) { DbFixation *fix; -#ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); -#endif - erts_smp_refc_inc(&tb->common.fix_count,1); + erts_mtx_lock(&tb->common.fixlock); + erts_refc_inc(&tb->common.fix_count,1); fix = tb->common.fixing_procs; if (fix == NULL) { tb->common.time.monotonic @@ -3764,9 +3706,7 @@ static void fix_table_locked(Process* p, DbTable* tb) ASSERT(fixed_tabs_find(NULL, fix)); ++(fix->counter); -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); return; } } @@ -3779,9 +3719,7 @@ static void fix_table_locked(Process* p, DbTable* tb) fix->counter = 1; fixing_procs_rbt_insert(&tb->common.fixing_procs, fix); -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); p->flags |= F_USING_DB; fixed_tabs_insert(p, fix); @@ -3794,20 +3732,16 @@ static void unfix_table_locked(Process* p, DbTable* tb, { DbFixation* fix; -#ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); -#endif + erts_mtx_lock(&tb->common.fixlock); fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p); if (fix) { - erts_smp_refc_dec(&tb->common.fix_count,0); + erts_refc_dec(&tb->common.fix_count,0); --(fix->counter); ASSERT(fix->counter >= 0); if (fix->counter == 0) { fixing_procs_rbt_delete(&tb->common.fixing_procs, fix); -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); fixed_tabs_delete(p, fix); erts_refc_dec(&fix->tabs.btid->intern.refc, 1); @@ -3818,22 +3752,18 @@ static void unfix_table_locked(Process* p, DbTable* tb, goto unlocked; } } -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); unlocked: if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status) - && erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) { -#ifdef ERTS_SMP + && erts_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) { if (*kind_p == LCK_READ && tb->common.is_thread_safe) { /* Must have write lock while purging pseudo-deleted (OTP-8166) */ - erts_smp_rwmtx_runlock(&tb->common.rwlock); - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); *kind_p = LCK_WRITE; if (tb->common.status & DB_DELETE) return; } -#endif db_unfix_table_hash(&(tb->hash)); } } @@ -3855,9 +3785,8 @@ static void free_fixations_op(DbFixation* fix, void* vctx) ASSERT(ctx->tb->common.status & DB_DELETE); diff = -((erts_aint_t) fix->counter); - erts_smp_refc_add(&ctx->tb->common.fix_count, diff, 0); + erts_refc_add(&ctx->tb->common.fix_count, diff, 0); -#ifdef ERTS_SMP if (fix->procs.p != ctx->p) { /* Fixated by other process */ fix->counter = 0; @@ -3873,7 +3802,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx) */ } else -#endif { fixed_tabs_delete(fix->procs.p, fix); @@ -3886,7 +3814,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx) ctx->cnt++; } -#ifdef ERTS_SMP int erts_db_execute_free_fixation(Process* p, DbFixation* fix) { ASSERT(fix->counter == 0); @@ -3898,13 +3825,12 @@ int erts_db_execute_free_fixation(Process* p, DbFixation* fix) ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation)); return 1; } -#endif static SWord free_fixations_locked(Process* p, DbTable *tb) { struct free_fixations_ctx ctx; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); ctx.p = p; ctx.tb = tb; @@ -4047,7 +3973,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) int use_monotonic; if (What == am_size) { - ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems)); + ret = make_small(erts_atomic_read_nob(&tb->common.nitems)); } else if (What == am_type) { if (tb->common.status & DB_SET) { ret = am_set; @@ -4060,7 +3986,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) ret = am_bag; } } else if (What == am_memory) { - Uint words = (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size) + Uint words = (Uint) ((erts_atomic_read_nob(&tb->common.memory_size) + sizeof(Uint) - 1) / sizeof(Uint)); @@ -4106,9 +4032,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) = ERTS_IS_ATOM_STR("safe_fixed_monotonic_time", What)) || ERTS_IS_ATOM_STR("safe_fixed", What)) { -#ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); -#endif + erts_mtx_lock(&tb->common.fixlock); if (IS_FIXED(tb)) { Uint need; Eterm *hp; @@ -4150,9 +4074,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) } else { ret = am_false; } -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); } else if (What == am_atom_put("stats",5)) { if (IS_HASH_TABLE(tb->common.status)) { FloatDef f; @@ -4176,7 +4098,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) std_dev_exp = make_float(hp); PUT_DOUBLE(f, hp); hp += FLOAT_SIZE_OBJECT; - ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)), + ret = TUPLE7(hp, make_small(erts_atomic_read_nob(&tb->hash.nactive)), avg, std_dev_real, std_dev_exp, make_small(stats.min_chain_len), make_small(stats.max_chain_len), @@ -4208,9 +4130,9 @@ static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb) tb->common.meth->db_print(to, to_arg, show, tb); - erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read_nob(&tb->common.nitems)); + erts_print(to, to_arg, "Objects: %d\n", (int)erts_atomic_read_nob(&tb->common.nitems)); erts_print(to, to_arg, "Words: %bpu\n", - (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size) + (Uint) ((erts_atomic_read_nob(&tb->common.memory_size) + sizeof(Uint) - 1) / sizeof(Uint))); @@ -4250,9 +4172,9 @@ void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler * Uint erts_get_ets_misc_mem_size(void) { - ERTS_SMP_MEMORY_BARRIER; + ERTS_THR_MEMORY_BARRIER; /* Memory not allocated in ets_alloc */ - return (Uint) erts_smp_atomic_read_nob(&erts_ets_misc_mem_size); + return (Uint) erts_atomic_read_nob(&erts_ets_misc_mem_size); } /* SMP Note: May only be used when system is locked */ @@ -4261,7 +4183,7 @@ erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg) { int ix; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); for (ix = 0; ix < erts_no_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix); @@ -4367,4 +4289,4 @@ void erts_lcnt_update_db_locks(int enable) { &lcnt_update_db_locks_per_sched, (void*)(UWord)enable); } -#endif /* ERTS_ENABLE_LOCK_COUNT */
\ No newline at end of file +#endif /* ERTS_ENABLE_LOCK_COUNT */ diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index d83126b3a2..318e90cb28 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -124,7 +124,7 @@ extern Export ets_select_delete_continue_exp; extern Export ets_select_count_continue_exp; extern Export ets_select_replace_continue_exp; extern Export ets_select_continue_exp; -extern erts_smp_atomic_t erts_ets_misc_mem_size; +extern erts_atomic_t erts_ets_misc_mem_size; Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt); Uint erts_db_get_max_tabs(void); @@ -151,11 +151,11 @@ do { \ erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \ - ((erts_aint_t) (FREE_SZ))); \ ASSERT((TAB)); \ - erts_smp_atomic_add_nob(&(TAB)->common.memory_size, sz__); \ + erts_atomic_add_nob(&(TAB)->common.memory_size, sz__); \ } while (0) #define ERTS_ETS_MISC_MEM_ADD(SZ) \ - erts_smp_atomic_add_nob(&erts_ets_misc_mem_size, (SZ)); + erts_atomic_add_nob(&erts_ets_misc_mem_size, (SZ)); ERTS_GLB_INLINE void *erts_db_alloc(ErtsAlcType_t type, DbTable *tab, @@ -292,7 +292,7 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size) ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0); ASSERT(((void *) tab) != ptr - || erts_smp_atomic_read_nob(&tab->common.memory_size) == 0); + || erts_atomic_read_nob(&tab->common.memory_size) == 0); erts_free(type, ptr); } diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index cf928a9035..5d49b2ea14 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -109,22 +109,18 @@ #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ -#ifdef ERTS_SMP # define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED) -#else -# define DB_USING_FINE_LOCKING(TB) 0 -#endif #ifdef ETHR_ORDERED_READ_DEPEND -#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)) +#define SEGTAB(tb) ((struct segment**) erts_atomic_read_nob(&(tb)->segtab)) #else #define SEGTAB(tb) \ (DB_USING_FINE_LOCKING(tb) \ - ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \ - : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))) + ? ((struct segment**) erts_atomic_read_ddrb(&(tb)->segtab)) \ + : ((struct segment**) erts_atomic_read_nob(&(tb)->segtab))) #endif -#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive)) -#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) +#define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive)) +#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems)) #define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP) @@ -142,12 +138,12 @@ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval) { Uint mask = (DB_USING_FINE_LOCKING(tb) - ? erts_smp_atomic_read_acqb(&tb->szm) - : erts_smp_atomic_read_nob(&tb->szm)); + ? erts_atomic_read_acqb(&tb->szm) + : erts_atomic_read_nob(&tb->szm)); Uint ix = hval & mask; - if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) { + if (ix >= erts_atomic_read_nob(&tb->nactive)) { ix &= mask>>1; - ASSERT(ix < erts_smp_atomic_read_nob(&tb->nactive)); + ASSERT(ix < erts_atomic_read_nob(&tb->nactive)); } return ix; } @@ -166,7 +162,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion)); fixd->slot = ix; - was_next = erts_smp_atomic_read_acqb(&tb->fixdel); + was_next = erts_atomic_read_acqb(&tb->fixdel); do { /* Lockless atomic insertion in linked list: */ if (NFIXED(tb) <= fixated_by_me) { erts_db_free(ERTS_ALC_T_DB_FIX_DEL, (DbTable*)tb, @@ -175,7 +171,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, } exp_next = was_next; fixd->next = (FixedDeletion*) exp_next; - was_next = erts_smp_atomic_cmpxchg_mb(&tb->fixdel, + was_next = erts_atomic_cmpxchg_mb(&tb->fixdel, (erts_aint_t) fixd, exp_next); }while (was_next != exp_next); @@ -191,62 +187,55 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, ((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \ make_internal_hash(term, 0)) % MAX_HASH) -#ifdef ERTS_SMP # define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1) # define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck) # define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval)) /* Fine grained read lock */ -static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval) +static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval) { if (tb->common.is_thread_safe) { return NULL; } else { - erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval); + erts_rwmtx_t* lck = GET_LOCK(tb,hval); ASSERT(tb->common.type & DB_FINE_LOCKED); - erts_smp_rwmtx_rlock(lck); + erts_rwmtx_rlock(lck); return lck; } } /* Fine grained write lock */ -static ERTS_INLINE erts_smp_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval) +static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval) { if (tb->common.is_thread_safe) { return NULL; } else { - erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval); + erts_rwmtx_t* lck = GET_LOCK(tb,hval); ASSERT(tb->common.type & DB_FINE_LOCKED); - erts_smp_rwmtx_rwlock(lck); + erts_rwmtx_rwlock(lck); return lck; } } -static ERTS_INLINE void RUNLOCK_HASH(erts_smp_rwmtx_t* lck) +static ERTS_INLINE void RUNLOCK_HASH(erts_rwmtx_t* lck) { if (lck != NULL) { - erts_smp_rwmtx_runlock(lck); + erts_rwmtx_runlock(lck); } } -static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck) +static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck) { if (lck != NULL) { - erts_smp_rwmtx_rwunlock(lck); + erts_rwmtx_rwunlock(lck); } } -#else /* ERTS_SMP */ -# define RLOCK_HASH(tb,hval) NULL -# define WLOCK_HASH(tb,hval) NULL -# define RUNLOCK_HASH(lck) ((void)lck) -# define WUNLOCK_HASH(lck) ((void)lck) -#endif /* ERTS_SMP */ #ifdef ERTS_ENABLE_LOCK_CHECK # define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd)) -# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval))) -# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rwlocked(lck)) -# define IS_TAB_WLOCKED(tb) erts_smp_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock) +# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval))) +# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_lc_rwmtx_is_rwlocked(lck)) +# define IS_TAB_WLOCKED(tb) erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock) #else # define IS_HASH_RLOCKED(tb,hval) (1) # define IS_HASH_WLOCKED(tb,hval) (1) @@ -259,33 +248,25 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck) ** Slot READ locks updated accordingly, unlocked if EOT. */ static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix, - erts_smp_rwmtx_t** lck_ptr) + erts_rwmtx_t** lck_ptr) { -#ifdef ERTS_SMP ix += DB_HASH_LOCK_CNT; if (ix < NACTIVE(tb)) return ix; RUNLOCK_HASH(*lck_ptr); ix = (ix + 1) & DB_HASH_LOCK_MASK; if (ix != 0) *lck_ptr = RLOCK_HASH(tb,ix); return ix; -#else - return (++ix < NACTIVE(tb)) ? ix : 0; -#endif } /* Same as next_slot but with WRITE locking */ static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix, - erts_smp_rwmtx_t** lck_ptr) + erts_rwmtx_t** lck_ptr) { -#ifdef ERTS_SMP ix += DB_HASH_LOCK_CNT; if (ix < NACTIVE(tb)) return ix; WUNLOCK_HASH(*lck_ptr); ix = (ix + 1) & DB_HASH_LOCK_MASK; if (ix != 0) *lck_ptr = WLOCK_HASH(tb,ix); return ix; -#else - return next_slot(tb,ix,lck_ptr); -#endif } @@ -331,9 +312,7 @@ struct segment { /* An extended segment table */ struct ext_segtab { -#ifdef ERTS_SMP ErtsThrPrgrLaterOp lop; -#endif struct segment** prev_segtab; /* Used when table is shrinking */ int prev_nsegs; /* Size of prev_segtab */ int nsegs; /* Size of this segtab */ @@ -347,9 +326,9 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, struct segment** segtab) { if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); + erts_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); else - erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); + erts_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); } /* Used by select_replace on analyze_pattern */ @@ -361,7 +340,7 @@ typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Ete static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix); static void alloc_seg(DbTableHash *tb); static int free_seg(DbTableHash *tb, int free_records); -static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, +static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr, HashDbTerm *list); static HashDbTerm* search_list(DbTableHash* tb, Eterm key, HashValue hval, HashDbTerm *list); @@ -559,7 +538,7 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel) { /*int tries = 0;*/ DEBUG_WAIT(); - if (erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + if (erts_atomic_cmpxchg_relb(&tb->fixdel, (erts_aint_t) fixdel, (erts_aint_t) NULL) != (erts_aint_t) NULL) { /* Oboy, must join lists */ @@ -568,13 +547,13 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel) erts_aint_t exp_tail; while (last->next != NULL) last = last->next; - was_tail = erts_smp_atomic_read_acqb(&tb->fixdel); + was_tail = erts_atomic_read_acqb(&tb->fixdel); do { /* Lockless atomic list insertion */ exp_tail = was_tail; last->next = (FixedDeletion*) exp_tail; /*++tries;*/ DEBUG_WAIT(); - was_tail = erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + was_tail = erts_atomic_cmpxchg_relb(&tb->fixdel, (erts_aint_t) fixdel, exp_tail); }while (was_tail != exp_tail); @@ -590,18 +569,18 @@ SWord db_unfix_table_hash(DbTableHash *tb) FixedDeletion* fixdel; SWord work = 0; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock) - || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock) + || (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock) && !tb->common.is_thread_safe)); restart: - fixdel = (FixedDeletion*) erts_smp_atomic_xchg_mb(&tb->fixdel, + fixdel = (FixedDeletion*) erts_atomic_xchg_mb(&tb->fixdel, (erts_aint_t) NULL); while (fixdel != NULL) { FixedDeletion *fx = fixdel; int ix = fx->slot; HashDbTerm **bp; HashDbTerm *b; - erts_smp_rwmtx_t* lck = WLOCK_HASH(tb,ix); + erts_rwmtx_t* lck = WLOCK_HASH(tb,ix); if (IS_FIXED(tb)) { /* interrupted by fixer */ WUNLOCK_HASH(lck); @@ -647,10 +626,10 @@ int db_create_hash(Process *p, DbTable *tbl) { DbTableHash *tb = &tbl->hash; - erts_smp_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK); - erts_smp_atomic_init_nob(&tb->nactive, FIRST_SEGSZ); - erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); - erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); + erts_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK); + erts_atomic_init_nob(&tb->nactive, FIRST_SEGSZ); + erts_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); + erts_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); SET_SEGTAB(tb, tb->first_segtab); tb->nsegs = NSEG_1; tb->nslots = FIRST_SEGSZ; @@ -659,32 +638,30 @@ int db_create_hash(Process *p, DbTable *tbl) SIZEOF_SEGMENT(FIRST_SEGSZ)); sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ)); -#ifdef ERTS_SMP - erts_smp_atomic_init_nob(&tb->is_resizing, 0); + erts_atomic_init_nob(&tb->is_resizing, 0); if (tb->common.type & DB_FINE_LOCKED) { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; int i; if (tb->common.type & DB_FREQ_READ) - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; if (erts_ets_rwmtx_spin_count >= 0) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */ (DbTable *) tb, sizeof(DbTableHashFineLocks)); for (i=0; i<DB_HASH_LOCK_CNT; ++i) { - erts_smp_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt, + erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt, "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); } /* This important property is needed to guarantee the two buckets * involved in a grow/shrink operation it protected by the same lock: */ - ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); + ASSERT(erts_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); } else { /* coarse locking */ tb->locks = NULL; } ERTS_THR_MEMORY_BARRIER; -#endif /* ERST_SMP */ return DB_ERROR_NONE; } @@ -692,7 +669,7 @@ static int db_first_hash(Process *p, DbTable *tbl, Eterm *ret) { DbTableHash *tb = &tbl->hash; Uint ix = 0; - erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix); + erts_rwmtx_t* lck = RLOCK_HASH(tb,ix); HashDbTerm* list; for (;;) { @@ -725,7 +702,7 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; Uint ix; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); @@ -772,7 +749,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) HashDbTerm** bp; HashDbTerm* b; HashDbTerm* q; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems; int ret = DB_ERROR_NONE; @@ -798,7 +775,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) if (tb->common.status & DB_SET) { HashDbTerm* bnext = b->next; if (b->hvalue == INVALID_HASH) { - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); } else if (key_clash_fail) { ret = DB_ERROR_BADKEY; @@ -826,7 +803,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) do { if (db_eq(&tb->common,obj,&q->dbterm)) { if (q->hvalue == INVALID_HASH) { - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); q->hvalue = hval; if (q != b) { /* must move to preserve key insertion order */ *qp = q->next; @@ -847,7 +824,7 @@ Lnew: q->hvalue = hval; q->next = b; *bp = q; - nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); + nitems = erts_atomic_inc_read_nob(&tb->common.nitems); WUNLOCK_HASH(lck); { int nactive = NACTIVE(tb); @@ -891,7 +868,7 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; int ix; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); @@ -917,7 +894,7 @@ static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; int ix; HashDbTerm* b1; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); ix = hash_to_ix(tb, hval); @@ -946,7 +923,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl, HashValue hval; int ix; HashDbTerm* b1; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int retval; hval = MAKE_HASH(key); @@ -1011,7 +988,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) int ix; HashDbTerm** bp; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems_diff = 0; hval = MAKE_HASH(key); @@ -1043,7 +1020,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } *ret = am_true; @@ -1060,7 +1037,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) int ix; HashDbTerm** bp; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems_diff = 0; int nkeys = 0; Eterm key; @@ -1101,7 +1078,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } *ret = am_true; @@ -1112,7 +1089,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret) { DbTableHash *tb = &tbl->hash; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; Sint slot; int retval; int nactive; @@ -1227,18 +1204,13 @@ static int match_traverse(Process* p, DbTableHash* tb, unsigned current_list_pos = 0; /* Prefound buckets list index */ Eterm match_res; Sint got = 0; /* Matched terms counter */ - erts_smp_rwmtx_t* lck; /* Slot lock */ + erts_rwmtx_t* lck; /* Slot lock */ int ret_value; -#ifdef ERTS_SMP - erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) + erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) = (lock_for_write ? WLOCK_HASH : RLOCK_HASH); - void (*unlock_hash_function)(erts_smp_rwmtx_t*) + void (*unlock_hash_function)(erts_rwmtx_t*) = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH); -#else - #define lock_hash_function(tb, hval) NULL - #define unlock_hash_function(lck) ((void)lck) -#endif - Sint (*next_slot_function)(DbTableHash*, Uint, erts_smp_rwmtx_t**) + Sint (*next_slot_function)(DbTableHash*, Uint, erts_rwmtx_t**) = (lock_for_write ? next_slot_w : next_slot); if ((ret_value = analyze_pattern(tb, pattern, extra_match_validator, &mpi)) @@ -1356,10 +1328,6 @@ done: } return ret_value; -#ifndef SMP -#undef lock_hash_function -#undef unlock_hash_function -#endif } /* @@ -1386,18 +1354,13 @@ static int match_traverse_continue(Process* p, DbTableHash* tb, */ HashDbTerm* saved_current; /* Helper to avoid double skip on match */ Eterm match_res; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int ret_value; -#ifdef ERTS_SMP - erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) + erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) = (lock_for_write ? WLOCK_HASH : RLOCK_HASH); - void (*unlock_hash_function)(erts_smp_rwmtx_t*) + void (*unlock_hash_function)(erts_rwmtx_t*) = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH); -#else - #define lock_hash_function(tb, hval) NULL - #define unlock_hash_function(lck) ((void)lck) -#endif - Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_smp_rwmtx_t** lck_ptr) + Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_rwmtx_t** lck_ptr) = (lock_for_write ? next_slot_w : next_slot); if (got < 0) { @@ -1472,10 +1435,6 @@ done: */ return ret_value; -#ifndef SMP -#undef lock_hash_function -#undef unlock_hash_function -#endif } @@ -2008,7 +1967,7 @@ static int mtraversal_select_delete_on_match_res(void* context_ptr, Sint slot_ix *current_ptr = (*current_ptr)->next; // replace pointer to term using next free_term(sd_context_ptr->tb, del); } - erts_smp_atomic_dec_nob(&sd_context_ptr->tb->common.nitems); + erts_atomic_dec_nob(&sd_context_ptr->tb->common.nitems); return 1; } @@ -2048,11 +2007,7 @@ static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, Eterm patt sd_context.tid = tid; sd_context.hp = NULL; sd_context.prev_continuation_tptr = NULL; -#ifdef ERTS_SMP sd_context.fixated_by_me = sd_context.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */ -#else - sd_context.fixated_by_me = 0; -#endif sd_context.last_pseudo_delete = (Uint) -1; return match_traverse( @@ -2251,7 +2206,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) DbTableHash *tb = &tbl->hash; HashDbTerm **bp, *b; HashValue hval = MAKE_HASH(key); - erts_smp_rwmtx_t *lck = WLOCK_HASH(tb, hval); + erts_rwmtx_t *lck = WLOCK_HASH(tb, hval); int ix = hash_to_ix(tb, hval); int nitems_diff = 0; @@ -2280,7 +2235,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } return DB_ERROR_NONE; @@ -2302,7 +2257,7 @@ int db_mark_all_deleted_hash(DbTable *tbl) HashDbTerm* list; int i; - ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb)); + ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb)); for (i = 0; i < NACTIVE(tb); i++) { if ((list = BUCKET(tb,i)) != NULL) { @@ -2313,7 +2268,7 @@ int db_mark_all_deleted_hash(DbTable *tbl) }while(list != NULL); } } - erts_smp_atomic_set_nob(&tb->common.nitems, 0); + erts_atomic_set_nob(&tb->common.nitems, 0); return DB_ERROR_NONE; } @@ -2327,7 +2282,6 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl) erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb)); -#ifdef ERTS_SMP i = tbl->common.is_thread_safe; /* If crash dumping we set table to thread safe in order to avoid taking any locks */ @@ -2337,9 +2291,6 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl) db_calc_stats_hash(&tbl->hash, &stats); tbl->common.is_thread_safe = i; -#else - db_calc_stats_hash(&tbl->hash, &stats); -#endif erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len); erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len); @@ -2391,8 +2342,8 @@ static int db_free_table_hash(DbTable *tbl) static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) { DbTableHash *tb = &tbl->hash; - FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel); - ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE)); + FixedDeletion* fixdel = (FixedDeletion*) erts_atomic_read_acqb(&tb->fixdel); + ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE)); while (fixdel != NULL) { FixedDeletion *fx = fixdel; @@ -2404,11 +2355,11 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion)); if (--reds < 0) { - erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel); + erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel); return reds; /* Not done */ } } - erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL); + erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL); while(tb->nslots != 0) { reds -= EXT_SEGSZ/64 + free_seg(tb, 1); @@ -2420,7 +2371,6 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) return reds; /* Not done */ } } -#ifdef ERTS_SMP if (tb->locks != NULL) { int i; for (i=0; i<DB_HASH_LOCK_CNT; ++i) { @@ -2430,8 +2380,7 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) (void*)tb->locks, sizeof(DbTableHashFineLocks)); tb->locks = NULL; } -#endif - ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); + ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); return reds; /* Done */ } @@ -2530,7 +2479,7 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern, if (!db_has_variable(key)) { /* Bound key */ int ix, search_slot; HashDbTerm** bp; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); ix = hash_to_ix(tb, hval); @@ -2634,14 +2583,12 @@ static void alloc_seg(DbTableHash *tb) tb->nslots += EXT_SEGSZ; } -#ifdef ERTS_SMP static void dealloc_ext_segtab(void* lop_data) { struct ext_segtab* est = (struct ext_segtab*) lop_data; erts_free(ERTS_ALC_T_DB_SEG, est); } -#endif /* Shrink table by freeing the top segment ** free_records: 1=free any records in segment, 0=assume segment is empty @@ -2680,7 +2627,6 @@ static int free_seg(DbTableHash *tb, int free_records) SET_SEGTAB(tb, est->prev_segtab); tb->nsegs = est->prev_nsegs; -#ifdef ERTS_SMP if (!tb->common.is_thread_safe) { /* * Table is doing a graceful shrink operation and we must avoid @@ -2698,7 +2644,6 @@ static int free_seg(DbTableHash *tb, int free_records) sz); } else -#endif erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est, SIZEOF_EXT_SEGTAB(est->nsegs)); } @@ -2759,22 +2704,18 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, static ERTS_INLINE int begin_resizing(DbTableHash* tb) { -#ifdef ERTS_SMP if (DB_USING_FINE_LOCKING(tb)) return !erts_atomic_xchg_acqb(&tb->is_resizing, 1); else ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); -#endif return 1; } static ERTS_INLINE void done_resizing(DbTableHash* tb) { -#ifdef ERTS_SMP if (DB_USING_FINE_LOCKING(tb)) erts_atomic_set_relb(&tb->is_resizing, 0); -#endif } /* Grow table with one or more new buckets. @@ -2785,7 +2726,7 @@ static void grow(DbTableHash* tb, int nitems) HashDbTerm** pnext; HashDbTerm** to_pnext; HashDbTerm* p; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nactive; int from_ix, to_ix; int szm; @@ -2807,7 +2748,7 @@ static void grow(DbTableHash* tb, int nitems) } ASSERT(nactive < tb->nslots); - szm = erts_smp_atomic_read_nob(&tb->szm); + szm = erts_atomic_read_nob(&tb->szm); if (nactive <= szm) { from_ix = nactive & (szm >> 1); } else { @@ -2818,7 +2759,7 @@ static void grow(DbTableHash* tb, int nitems) to_ix = nactive; lck = WLOCK_HASH(tb, from_ix); - ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix)); + ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix)); /* Now a final double check (with the from_ix lock held) * that we did not get raced by a table fixer. */ @@ -2826,12 +2767,12 @@ static void grow(DbTableHash* tb, int nitems) WUNLOCK_HASH(lck); goto abort; } - erts_smp_atomic_set_nob(&tb->nactive, ++nactive); + erts_atomic_set_nob(&tb->nactive, ++nactive); if (from_ix == 0) { if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_relb(&tb->szm, szm); + erts_atomic_set_relb(&tb->szm, szm); else - erts_smp_atomic_set_nob(&tb->szm, szm); + erts_atomic_set_nob(&tb->szm, szm); } done_resizing(tb); @@ -2879,7 +2820,7 @@ static void shrink(DbTableHash* tb, int nitems) HashDbTerm** src_bp; HashDbTerm** dst_bp; HashDbTerm** bp; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int src_ix, dst_ix, low_szm; int nactive; int loop_limit = 5; @@ -2892,13 +2833,13 @@ static void shrink(DbTableHash* tb, int nitems) goto abort; /* already done (race) */ } src_ix = nactive - 1; - low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1; + low_szm = erts_atomic_read_nob(&tb->szm) >> 1; dst_ix = src_ix & low_szm; ASSERT(dst_ix < src_ix); ASSERT(nactive > FIRST_SEGSZ); lck = WLOCK_HASH(tb, dst_ix); - ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix)); + ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix)); /* Double check for racing table fixers */ if (IS_FIXED(tb)) { WUNLOCK_HASH(lck); @@ -2927,9 +2868,9 @@ static void shrink(DbTableHash* tb, int nitems) *src_bp = NULL; nactive = src_ix; - erts_smp_atomic_set_nob(&tb->nactive, nactive); + erts_atomic_set_nob(&tb->nactive, nactive); if (dst_ix == 0) { - erts_smp_atomic_set_relb(&tb->szm, low_szm); + erts_atomic_set_relb(&tb->szm, low_szm); } WUNLOCK_HASH(lck); @@ -2964,12 +2905,12 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key, /* It return the next live object in a table, NULL if no more */ /* In-bucket: RLOCKED */ /* Out-bucket: RLOCKED unless NULL */ -static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, +static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr, HashDbTerm *list) { int i; - ERTS_SMP_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr)); + ERTS_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr)); for (list = list->next; list != NULL; list = list->next) { if (list->hvalue != INVALID_HASH) @@ -2999,7 +2940,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, DbTableHash *tb = &tbl->hash; HashValue hval; HashDbTerm **bp, *b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int flags = 0; ASSERT(tb->common.status & DB_SET); @@ -3055,7 +2996,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, q->next = next; q->hvalue = hval; *bp = b = q; - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); } HRelease(p, hend, htop); @@ -3081,10 +3022,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) DbTableHash *tb = &tbl->hash; HashDbTerm **bp = (HashDbTerm **) handle->bp; HashDbTerm *b = *bp; - erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck; + erts_rwmtx_t* lck = (erts_rwmtx_t*) handle->lck; HashDbTerm* free_me = NULL; - ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */ + ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */ ASSERT((&b->dbterm == handle->dbterm) == !(tb->common.compress && handle->flags & DB_MUST_RESIZE)); @@ -3098,7 +3039,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) } WUNLOCK_HASH(lck); - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); try_shrink(tb); } else { if (handle->flags & DB_MUST_RESIZE) { @@ -3107,7 +3048,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) } if (handle->flags & DB_INC_TRY_GROW) { int nactive; - int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); + int nitems = erts_atomic_inc_read_nob(&tb->common.nitems); WUNLOCK_HASH(lck); nactive = NACTIVE(tb); @@ -3135,7 +3076,7 @@ static int db_delete_all_objects_hash(Process* p, DbTable* tbl) } else { db_free_table_hash(tbl); db_create_hash(p, tbl); - erts_smp_atomic_set_nob(&tbl->hash.common.nitems, 0); + erts_atomic_set_nob(&tbl->hash.common.nitems, 0); } return 0; } @@ -3165,7 +3106,7 @@ void db_foreach_offheap_hash(DbTable *tbl, void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats) { HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int sum = 0; int sq_sum = 0; int kept_items = 0; diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 523ed7860e..7d27609825 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -42,8 +42,8 @@ typedef struct hash_db_term { typedef struct db_table_hash_fine_locks { union { - erts_smp_rwmtx_t lck; - byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_rwmtx_t))]; + erts_rwmtx_t lck; + byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))]; }lck_vec[DB_HASH_LOCK_CNT]; } DbTableHashFineLocks; @@ -51,10 +51,10 @@ typedef struct db_table_hash { DbTableCommon common; /* SMP: szm and nactive are write-protected by is_resizing or table write lock */ - erts_smp_atomic_t szm; /* current size mask. */ - erts_smp_atomic_t nactive; /* Number of "active" slots */ + erts_atomic_t szm; /* current size mask. */ + erts_atomic_t nactive; /* Number of "active" slots */ - erts_smp_atomic_t segtab; /* The segment table (struct segment**) */ + erts_atomic_t segtab; /* The segment table (struct segment**) */ struct segment* first_segtab[1]; /* SMP: nslots and nsegs are protected by is_resizing or table write lock */ @@ -62,11 +62,9 @@ typedef struct db_table_hash { int nsegs; /* Size of segment table */ /* List of slots where elements have been deleted while table was fixed */ - erts_smp_atomic_t fixdel; /* (FixedDeletion*) */ -#ifdef ERTS_SMP - erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ + erts_atomic_t fixdel; /* (FixedDeletion*) */ + erts_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; -#endif } DbTableHash; diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index 7c80e92e50..5a276b9d88 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -50,7 +50,7 @@ #include "erl_db_tree.h" #define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos)) -#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) +#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems)) /* ** A stack of this size is enough for an AVL tree with more than @@ -91,7 +91,7 @@ */ static DbTreeStack* get_static_stack(DbTableTree* tb) { - if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { + if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } return NULL; @@ -103,7 +103,7 @@ static DbTreeStack* get_static_stack(DbTableTree* tb) static DbTreeStack* get_any_stack(DbTableTree* tb) { DbTreeStack* stack; - if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { + if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb, @@ -117,8 +117,8 @@ static DbTreeStack* get_any_stack(DbTableTree* tb) static void release_stack(DbTableTree* tb, DbTreeStack* stack) { if (stack == &tb->static_stack) { - ASSERT(erts_smp_atomic_read_nob(&tb->is_stack_busy) == 1); - erts_smp_atomic_set_relb(&tb->is_stack_busy, 0); + ASSERT(erts_atomic_read_nob(&tb->is_stack_busy) == 1); + erts_atomic_set_relb(&tb->is_stack_busy, 0); } else { erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb, @@ -514,7 +514,7 @@ int db_create_tree(Process *p, DbTable *tbl) sizeof(TreeDbTerm *) * STACK_NEED); tb->static_stack.pos = 0; tb->static_stack.slot = 0; - erts_smp_atomic_init_nob(&tb->is_stack_busy, 0); + erts_atomic_init_nob(&tb->is_stack_busy, 0); tb->deletion = 0; return DB_ERROR_NONE; } @@ -643,8 +643,8 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail) for (;;) if (!*this) { /* Found our place */ state = 1; - if (erts_smp_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { - erts_smp_atomic_dec_nob(&tb->common.nitems); + if (erts_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { + erts_atomic_dec_nob(&tb->common.nitems); return DB_ERROR_SYSRES; } *this = new_dbterm(tb, obj); @@ -1605,7 +1605,7 @@ static int db_select_delete_continue_tree(Process *p, sc.max = 1000; sc.keypos = tb->common.keypos; - ASSERT(!erts_smp_atomic_read_nob(&tb->is_stack_busy)); + ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy)); traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc); BUMP_REDS(p, 1000 - sc.max); @@ -2017,7 +2017,7 @@ static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds) (DbTable *) tb, (void *) tb->static_stack.array, sizeof(TreeDbTerm *) * STACK_NEED); - ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) + ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); } return reds; @@ -2027,7 +2027,7 @@ static int db_delete_all_objects_tree(Process* p, DbTable* tbl) { db_free_table_tree(tbl); db_create_tree(p, tbl); - erts_smp_atomic_set_nob(&tbl->tree.common.nitems, 0); + erts_atomic_set_nob(&tbl->tree.common.nitems, 0); return 0; } @@ -2107,7 +2107,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) { tstack[tpos++] = this; state = delsub(this); } - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); break; } } @@ -2174,7 +2174,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, tstack[tpos++] = this; state = delsub(this); } - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); break; } } diff --git a/erts/emulator/beam/erl_db_tree.h b/erts/emulator/beam/erl_db_tree.h index 72749ead1e..dc1b93d410 100644 --- a/erts/emulator/beam/erl_db_tree.h +++ b/erts/emulator/beam/erl_db_tree.h @@ -41,7 +41,7 @@ typedef struct db_table_tree { /* Tree-specific fields */ TreeDbTerm *root; /* The tree root */ Uint deletion; /* Being deleted */ - erts_smp_atomic_t is_stack_busy; + erts_atomic_t is_stack_busy; DbTreeStack static_stack; } DbTableTree; diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index 13eacaa8a9..e017b9552b 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -170,7 +170,7 @@ static Eterm set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer, Uint d_flags, Uint e_flags) { - ERTS_SMP_LC_ASSERT( + ERTS_LC_ASSERT( ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p) || erts_thr_progress_is_blocking()); @@ -361,11 +361,7 @@ typedef struct { } ErtsMatchPseudoProcess; -#ifdef ERTS_SMP -static erts_smp_tsd_key_t match_pseudo_process_key; -#else -static ErtsMatchPseudoProcess *match_pseudo_process; -#endif +static erts_tsd_key_t match_pseudo_process_key; static ERTS_INLINE void cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap) @@ -414,32 +410,27 @@ static ERTS_INLINE ErtsMatchPseudoProcess * get_match_pseudo_process(Process *c_p, Uint heap_size) { ErtsMatchPseudoProcess *mpsp; -#ifdef ERTS_SMP ErtsSchedulerData *esdp; esdp = c_p ? c_p->scheduler_data : erts_get_scheduler_data(); mpsp = esdp ? esdp->match_pseudo_process : - (ErtsMatchPseudoProcess*) erts_smp_tsd_get(match_pseudo_process_key); + (ErtsMatchPseudoProcess*) erts_tsd_get(match_pseudo_process_key); if (mpsp) { - ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key)); + ASSERT(mpsp == erts_tsd_get(match_pseudo_process_key)); ASSERT(mpsp->process.scheduler_data == esdp); cleanup_match_pseudo_process(mpsp, 0); } else { - ASSERT(erts_smp_tsd_get(match_pseudo_process_key) == NULL); + ASSERT(erts_tsd_get(match_pseudo_process_key) == NULL); mpsp = create_match_pseudo_process(); if (esdp) { esdp->match_pseudo_process = (void *) mpsp; } mpsp->process.scheduler_data = esdp; - erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp); + erts_tsd_set(match_pseudo_process_key, (void *) mpsp); } -#else - mpsp = match_pseudo_process; - cleanup_match_pseudo_process(mpsp, 0); -#endif if (heap_size > ERTS_DEFAULT_MS_HEAP_SIZE*sizeof(Eterm)) { mpsp->u.heap = (Eterm*) erts_alloc(ERTS_ALC_T_DB_MS_RUN_HEAP, heap_size); } @@ -449,31 +440,25 @@ get_match_pseudo_process(Process *c_p, Uint heap_size) return mpsp; } -#ifdef ERTS_SMP static void destroy_match_pseudo_process(void) { ErtsMatchPseudoProcess *mpsp; - mpsp = (ErtsMatchPseudoProcess *)erts_smp_tsd_get(match_pseudo_process_key); + mpsp = (ErtsMatchPseudoProcess *)erts_tsd_get(match_pseudo_process_key); if (mpsp) { cleanup_match_pseudo_process(mpsp, 0); erts_free(ERTS_ALC_T_DB_MS_PSDO_PROC, (void *) mpsp); - erts_smp_tsd_set(match_pseudo_process_key, (void *) NULL); + erts_tsd_set(match_pseudo_process_key, (void *) NULL); } } -#endif static void match_pseudo_process_init(void) { -#ifdef ERTS_SMP - erts_smp_tsd_key_create(&match_pseudo_process_key, + erts_tsd_key_create(&match_pseudo_process_key, "erts_match_pseudo_process_key"); - erts_smp_install_exit_handler(destroy_match_pseudo_process); -#else - match_pseudo_process = create_match_pseudo_process(); -#endif + erts_thr_install_exit_handler(destroy_match_pseudo_process); } void @@ -484,7 +469,7 @@ erts_match_set_release_result(Process* c_p) /* The trace control word. */ -static erts_smp_atomic32_t trace_control_word; +static erts_atomic32_t trace_control_word; /* This needs to be here, before the bif table... */ @@ -923,7 +908,7 @@ static void db_free_tmp_uncompressed(DbTerm* obj); */ BIF_RETTYPE db_get_trace_control_word(Process *p) { - Uint32 tcw = (Uint32) erts_smp_atomic32_read_acqb(&trace_control_word); + Uint32 tcw = (Uint32) erts_atomic32_read_acqb(&trace_control_word); BIF_RET(erts_make_integer((Uint) tcw, p)); } @@ -941,7 +926,7 @@ BIF_RETTYPE db_set_trace_control_word(Process *p, Eterm new) if (val != ((Uint32)val)) BIF_ERROR(p, BADARG); - old_tcw = (Uint32) erts_smp_atomic32_xchg_relb(&trace_control_word, + old_tcw = (Uint32) erts_atomic32_xchg_relb(&trace_control_word, (erts_aint32_t) val); BIF_RET(erts_make_integer((Uint) old_tcw, p)); } @@ -1466,7 +1451,7 @@ void db_initialize_util(void){ sizeof(DMCGuardBif), (int (*)(const void *, const void *)) &cmp_guard_bif); match_pseudo_process_init(); - erts_smp_atomic32_init_nob(&trace_control_word, 0); + erts_atomic32_init_nob(&trace_control_word, 0); } @@ -2528,9 +2513,9 @@ restart: case matchEnableTrace: ASSERT(c_p == self); if ( (n = erts_trace_flag2bit(esp[-1]))) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; @@ -2545,9 +2530,9 @@ restart: /* Always take over the tracer of the current process */ set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n); if (tmpp == c_p) - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); else - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); esp[-1] = am_true; } } @@ -2555,9 +2540,9 @@ restart: case matchDisableTrace: ASSERT(c_p == self); if ( (n = erts_trace_flag2bit(esp[-1]))) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; @@ -2572,9 +2557,9 @@ restart: /* Always take over the tracer of the current process */ set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0); if (tmpp == c_p) - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); else - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); esp[-1] = am_true; } } @@ -2598,14 +2583,14 @@ restart: if (in_flags & ERTS_PAM_IGNORE_TRACE_SILENT) break; if (*esp == am_true) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else if (*esp == am_false) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } break; case matchTrace2: @@ -2634,10 +2619,10 @@ restart: ERTS_TRACER_CLEAR(&tracer); break; } - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); (--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACER_CLEAR(&tracer); } break; @@ -2667,13 +2652,13 @@ restart: if (tmpp == c_p) { (--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); (--esp)[-1] = set_match_trace(tmpp, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } ERTS_TRACER_CLEAR(&tracer); } @@ -3277,7 +3262,7 @@ void db_cleanup_offheap_comp(DbTerm* obj) break; case FUN_SUBTAG: ASSERT(u.pb != &tmp); - if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) { + if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) { erts_erase_fun_entry(u.fun->fe); } break; diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index 7ce104a84c..6b126f35d6 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -240,16 +240,14 @@ typedef struct { */ typedef struct db_table_common { - erts_smp_refc_t refc; /* reference count of table struct */ - erts_smp_refc_t fix_count;/* fixation counter */ + erts_refc_t refc; /* reference count of table struct */ + erts_refc_t fix_count;/* fixation counter */ DbTableList all; DbTableList owned; -#ifdef ERTS_SMP - erts_smp_rwmtx_t rwlock; /* rw lock on table */ - erts_smp_mtx_t fixlock; /* Protects fixing_procs and time */ + erts_rwmtx_t rwlock; /* rw lock on table */ + erts_mtx_t fixlock; /* Protects fixing_procs and time */ int is_thread_safe; /* No fine locking inside table needed */ Uint32 type; /* table type, *read only* after creation */ -#endif Eterm owner; /* Pid of the creator */ Eterm heir; /* Pid of the heir */ UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */ @@ -257,8 +255,8 @@ typedef struct db_table_common { Eterm the_name; /* an atom */ Binary *btid; DbTableMethod* meth; /* table methods */ - erts_smp_atomic_t nitems; /* Total number of items in table */ - erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */ + erts_atomic_t nitems; /* Total number of items in table */ + erts_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */ struct { /* Last fixation time */ ErtsMonotonicTime monotonic; ErtsMonotonicTime offset; @@ -291,7 +289,7 @@ typedef struct db_table_common { (DB_BAG | DB_SET | DB_DUPLICATE_BAG))) #define IS_TREE_TABLE(Status) (!!((Status) & \ DB_ORDERED_SET)) -#define NFIXED(T) (erts_smp_refc_read(&(T)->common.fix_count,0)) +#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0)) #define IS_FIXED(T) (NFIXED(T) != 0) /* diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c index 742c428f2a..71d4534ef9 100644 --- a/erts/emulator/beam/erl_drv_thread.c +++ b/erts/emulator/beam/erl_drv_thread.c @@ -50,7 +50,6 @@ fatal_error(int err, char *func) #define ERL_DRV_TSD_EXTRA 10 #define ERL_DRV_INVALID_TSD_KEY INT_MAX -#ifdef USE_THREADS struct ErlDrvMutex_ { ethr_mutex mtx; @@ -85,10 +84,6 @@ struct ErlDrvTid_ { static ethr_tsd_key tid_key; -#else /* USE_THREADS */ -static Uint tsd_len; -static void **tsd; -#endif static ErlDrvTSDKey next_tsd_key; static ErlDrvTSDKey max_used_tsd_key; @@ -97,7 +92,6 @@ static char **used_tsd_keys; static erts_mtx_t tsd_mtx; static char *no_name; -#ifdef USE_THREADS static void thread_exit_handler(void) @@ -122,21 +116,15 @@ erl_drv_thread_wrapper(void *vdtid) return (*dtid->func)(dtid->arg); } -#endif void erl_drv_thr_init(void) { int i; -#ifdef USE_THREADS int res = ethr_tsd_key_create(&tid_key,"erts_tid_key"); if (res == 0) res = ethr_install_exit_handler(thread_exit_handler); if (res != 0) fatal_error(res, "erl_drv_thr_init()"); -#else - tsd_len = 0; - tsd = NULL; -#endif no_name = "unknown"; next_tsd_key = 0; @@ -153,13 +141,12 @@ void erl_drv_thr_init(void) /* * These functions implement the driver thread interface in erl_driver.h. * NOTE: Only use this interface from drivers. From within the emulator use - * either the erl_threads.h, the erl_smp.h or the ethread.h interface. + * either the erl_threads.h or the ethread.h interface. */ ErlDrvMutex * erl_drv_mutex_create(char *name) { -#ifdef USE_THREADS ErlDrvMutex *dmtx = erts_alloc_fnf(ERTS_ALC_T_DRV_MTX, (sizeof(ErlDrvMutex) + (name ? sys_strlen(name) + 1 : 0))); @@ -182,15 +169,11 @@ erl_drv_mutex_create(char *name) #endif } return dmtx; -#else - return (ErlDrvMutex *) NULL; -#endif } void erl_drv_mutex_destroy(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_uninstall(&dmtx->lcnt); @@ -199,24 +182,18 @@ erl_drv_mutex_destroy(ErlDrvMutex *dmtx) if (res != 0) fatal_error(res, "erl_drv_mutex_destroy()"); erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx); -#endif } char * erl_drv_mutex_name(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS return dmtx ? dmtx->name : NULL; -#else - return NULL; -#endif } int erl_drv_mutex_trylock(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS int res; if (!dmtx) fatal_error(EINVAL, "erl_drv_mutex_trylock()"); @@ -225,22 +202,17 @@ erl_drv_mutex_trylock(ErlDrvMutex *dmtx) erts_lcnt_trylock(&dmtx->lcnt, res); #endif return res; -#else - return 0; -#endif } void erl_drv_mutex_lock(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS if (!dmtx) fatal_error(EINVAL, "erl_drv_mutex_lock()"); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock(&dmtx->lcnt); #endif ethr_mutex_lock(&dmtx->mtx); -#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post(&dmtx->lcnt); #endif @@ -249,20 +221,17 @@ erl_drv_mutex_lock(ErlDrvMutex *dmtx) void erl_drv_mutex_unlock(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS if (!dmtx) fatal_error(EINVAL, "erl_drv_mutex_unlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_unlock(&dmtx->lcnt); #endif ethr_mutex_unlock(&dmtx->mtx); -#endif } ErlDrvCond * erl_drv_cond_create(char *name) { -#ifdef USE_THREADS ErlDrvCond *dcnd = erts_alloc_fnf(ERTS_ALC_T_DRV_CND, (sizeof(ErlDrvCond) + (name ? sys_strlen(name) + 1 : 0))); @@ -281,57 +250,43 @@ erl_drv_cond_create(char *name) } } return dcnd; -#else - return (ErlDrvCond *) NULL; -#endif } void erl_drv_cond_destroy(ErlDrvCond *dcnd) { -#ifdef USE_THREADS int res = dcnd ? ethr_cond_destroy(&dcnd->cnd) : EINVAL; if (res != 0) fatal_error(res, "erl_drv_cond_destroy()"); erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd); -#endif } char * erl_drv_cond_name(ErlDrvCond *dcnd) { -#ifdef USE_THREADS return dcnd ? dcnd->name : NULL; -#else - return NULL; -#endif } void erl_drv_cond_signal(ErlDrvCond *dcnd) { -#ifdef USE_THREADS if (!dcnd) fatal_error(EINVAL, "erl_drv_cond_signal()"); ethr_cond_signal(&dcnd->cnd); -#endif } void erl_drv_cond_broadcast(ErlDrvCond *dcnd) { -#ifdef USE_THREADS if (!dcnd) fatal_error(EINVAL, "erl_drv_cond_broadcast()"); ethr_cond_broadcast(&dcnd->cnd); -#endif } void erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx) { -#ifdef USE_THREADS if (!dcnd || !dmtx) { fatal_error(EINVAL, "erl_drv_cond_wait()"); } @@ -348,13 +303,11 @@ erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx) break; } } -#endif } ErlDrvRWLock * erl_drv_rwlock_create(char *name) { -#ifdef USE_THREADS ErlDrvRWLock *drwlck = erts_alloc_fnf(ERTS_ALC_T_DRV_RWLCK, (sizeof(ErlDrvRWLock) + (name ? sys_strlen(name) + 1 : 0))); @@ -375,15 +328,11 @@ erl_drv_rwlock_create(char *name) #endif } return drwlck; -#else - return (ErlDrvRWLock *) NULL; -#endif } void erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_uninstall(&drwlck->lcnt); @@ -392,23 +341,17 @@ erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck) if (res != 0) fatal_error(res, "erl_drv_rwlock_destroy()"); erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck); -#endif } char * erl_drv_rwlock_name(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS return drwlck ? drwlck->name : NULL; -#else - return NULL; -#endif } int erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS int res; if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()"); @@ -417,15 +360,11 @@ erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck) erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_READ); #endif return res; -#else - return 0; -#endif } void erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_rlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT @@ -435,26 +374,22 @@ erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post(&drwlck->lcnt); #endif -#endif } void erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_runlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ); #endif ethr_rwmutex_runlock(&drwlck->rwmtx); -#endif } int erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS int res; if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()"); @@ -463,15 +398,11 @@ erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck) erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_RDWR); #endif return res; -#else - return 0; -#endif } void erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_rwlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT @@ -481,20 +412,17 @@ erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post(&drwlck->lcnt); #endif -#endif } void erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR); #endif ethr_rwmutex_rwunlock(&drwlck->rwmtx); -#endif } int @@ -588,20 +516,13 @@ erl_drv_tsd_key_destroy(ErlDrvTSDKey key) } -#ifdef USE_THREADS #define ERL_DRV_TSD__ (dtid->tsd) #define ERL_DRV_TSD_LEN__ (dtid->tsd_len) -#else -#define ERL_DRV_TSD__ (tsd) -#define ERL_DRV_TSD_LEN__ (tsd_len) -#endif void erl_drv_tsd_set(ErlDrvTSDKey key, void *data) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) erl_drv_thread_self(); -#endif if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key]) fatal_error(EINVAL, "erl_drv_tsd_set()"); @@ -629,15 +550,11 @@ erl_drv_tsd_set(ErlDrvTSDKey key, void *data) void * erl_drv_tsd_get(ErlDrvTSDKey key) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key); -#endif if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key]) fatal_error(EINVAL, "erl_drv_tsd_get()"); -#ifdef USE_THREADS if (!dtid) return NULL; -#endif if (ERL_DRV_TSD_LEN__ <= key) return NULL; return ERL_DRV_TSD__[key]; @@ -672,7 +589,6 @@ erl_drv_thread_create(char *name, void* arg, ErlDrvThreadOpts *opts) { -#ifdef USE_THREADS int res; struct ErlDrvTid_ *dtid; ethr_thr_opts ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER; @@ -714,27 +630,19 @@ erl_drv_thread_create(char *name, *tid = (ErlDrvTid) dtid; return 0; -#else - return ENOTSUP; -#endif } char * erl_drv_thread_name(ErlDrvTid tid) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid; return dtid ? dtid->name : NULL; -#else - return NULL; -#endif } ErlDrvTid erl_drv_thread_self(void) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key); if (!dtid) { int res; @@ -753,15 +661,11 @@ erl_drv_thread_self(void) fatal_error(res, "erl_drv_thread_self()"); } return (ErlDrvTid) dtid; -#else - return (ErlDrvTid) NULL; -#endif } int erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2) { -#ifdef USE_THREADS int res; struct ErlDrvTid_ *dtid1 = (struct ErlDrvTid_ *) tid1; struct ErlDrvTid_ *dtid2 = (struct ErlDrvTid_ *) tid2; @@ -775,28 +679,22 @@ erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2) : !ethr_equal_tids(dtid1->tid, dtid2->tid)); return res; -#else - return 1; -#endif } void erl_drv_thread_exit(void *res) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key); if (dtid && dtid->drv_thr) { ethr_thr_exit(res); fatal_error(0, "erl_drv_thread_exit()"); } -#endif fatal_error(EACCES, "erl_drv_thread_exit()"); } int erl_drv_thread_join(ErlDrvTid tid, void **respp) { -#ifdef USE_THREADS int res; struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid; @@ -809,12 +707,9 @@ erl_drv_thread_join(ErlDrvTid tid, void **respp) if (res == 0) erts_free(ERTS_ALC_T_DRV_TID, dtid); return res; -#else - return ENOTSUP; -#endif } -#if defined(__DARWIN__) && defined(USE_THREADS) && defined(ERTS_SMP) +#if defined(__DARWIN__) extern int erts_darwin_main_thread_pipe[2]; extern int erts_darwin_main_thread_result_pipe[2]; diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c index 535f677bb3..9c866250bb 100644 --- a/erts/emulator/beam/erl_fun.c +++ b/erts/emulator/beam/erl_fun.c @@ -30,17 +30,16 @@ static Hash erts_fun_table; -#include "erl_smp.h" #ifdef HIPE # include "hipe_mode_switch.h" #endif -static erts_smp_rwmtx_t erts_fun_table_lock; +static erts_rwmtx_t erts_fun_table_lock; -#define erts_fun_read_lock() erts_smp_rwmtx_rlock(&erts_fun_table_lock) -#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock) -#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock) -#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock) +#define erts_fun_read_lock() erts_rwmtx_rlock(&erts_fun_table_lock) +#define erts_fun_read_unlock() erts_rwmtx_runlock(&erts_fun_table_lock) +#define erts_fun_write_lock() erts_rwmtx_rwlock(&erts_fun_table_lock) +#define erts_fun_write_unlock() erts_rwmtx_rwunlock(&erts_fun_table_lock) static HashValue fun_hash(ErlFunEntry* obj); static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2); @@ -59,11 +58,11 @@ void erts_init_fun_table(void) { HashFunctions f; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL, + erts_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) fun_hash; @@ -114,9 +113,9 @@ erts_put_fun_entry(Eterm mod, int uniq, int index) fe = (ErlFunEntry *) hash_put(&erts_fun_table, (void*) &template); sys_memset(fe->uniq, 0, sizeof(fe->uniq)); fe->index = 0; - refc = erts_smp_refc_inctest(&fe->refc, 0); + refc = erts_refc_inctest(&fe->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&fe->refc, 1); + erts_refc_inc(&fe->refc, 1); erts_fun_write_unlock(); return fe; } @@ -138,9 +137,9 @@ erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index, sys_memcpy(fe->uniq, uniq, sizeof(fe->uniq)); fe->index = index; fe->arity = arity; - refc = erts_smp_refc_inctest(&fe->refc, 0); + refc = erts_refc_inctest(&fe->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&fe->refc, 1); + erts_refc_inc(&fe->refc, 1); erts_fun_write_unlock(); return fe; } @@ -165,9 +164,9 @@ erts_get_fun_entry(Eterm mod, int uniq, int index) erts_fun_read_lock(); ret = (ErlFunEntry *) hash_get(&erts_fun_table, (void*) &template); if (ret) { - erts_aint_t refc = erts_smp_refc_inctest(&ret->refc, 1); + erts_aint_t refc = erts_refc_inctest(&ret->refc, 1); if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&ret->refc, 1); + erts_refc_inc(&ret->refc, 1); } erts_fun_read_unlock(); return ret; @@ -183,13 +182,11 @@ void erts_erase_fun_entry(ErlFunEntry* fe) { erts_fun_write_lock(); -#ifdef ERTS_SMP /* * We have to check refc again since someone might have looked up * the fun entry and incremented refc after last check. */ - if (erts_smp_refc_dectest(&fe->refc, -1) <= 0) -#endif + if (erts_refc_dectest(&fe->refc, -1) <= 0) { if (fe->address != unloaded_fun) erts_exit(ERTS_ERROR_EXIT, @@ -221,7 +218,7 @@ erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end) if (start <= addr && addr < end) { fe->pend_purge_address = addr; - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; fe->address = unloaded_fun; #ifdef HIPE fe->pend_purge_native_address = fe->native_address; @@ -275,10 +272,10 @@ erts_fun_purge_complete(ErlFunEntry **funs, Uint no) #ifdef HIPE fe->pend_purge_native_address = NULL; #endif - if (erts_smp_refc_dectest(&fe->refc, 0) == 0) + if (erts_refc_dectest(&fe->refc, 0) == 0) erts_erase_fun_entry(fe); } - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; } void @@ -307,7 +304,7 @@ erts_dump_fun_entries(fmtfn_t to, void *to_arg) #ifdef HIPE erts_print(to, to_arg, "Native_address: %p\n", fe->native_address); #endif - erts_print(to, to_arg, "Refc: %ld\n", erts_smp_refc_read(&fe->refc, 1)); + erts_print(to, to_arg, "Refc: %ld\n", erts_refc_read(&fe->refc, 1)); b = b->next; } } @@ -338,7 +335,7 @@ fun_alloc(ErlFunEntry* template) obj->old_uniq = template->old_uniq; obj->old_index = template->old_index; obj->module = template->module; - erts_smp_refc_init(&obj->refc, -1); + erts_refc_init(&obj->refc, -1); obj->address = unloaded_fun; obj->pend_purge_address = NULL; #ifdef HIPE diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h index 289d0d0b28..fb2901d866 100644 --- a/erts/emulator/beam/erl_fun.h +++ b/erts/emulator/beam/erl_fun.h @@ -21,7 +21,7 @@ #ifndef __ERLFUNTABLE_H__ #define __ERLFUNTABLE_H__ -#include "erl_smp.h" +#include "erl_threads.h" /* * Fun entry. @@ -42,7 +42,7 @@ typedef struct erl_fun_entry { Uint arity; /* The arity of the fun. */ Eterm module; /* Tagged atom for module. */ - erts_smp_refc_t refc; /* Reference count: One for code + one for each + erts_refc_t refc; /* Reference count: One for code + one for each fun object in each process. */ BeamInstr *pend_purge_address; /* address stored during a pending purge */ #ifdef HIPE diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 8cb977a7f3..8344c164fa 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -180,15 +180,13 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsGCInfoReq; -#ifdef ERTS_DIRTY_SCHEDULERS static struct { erts_mtx_t mtx; ErtsGCInfo info; } dirty_gc; -#endif static ERTS_INLINE int gc_cost(Uint gc_moved_live_words, Uint resize_moved_words) @@ -273,11 +271,9 @@ erts_init_gc(void) init_gc_info(&esdp->gc_info); } -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL, + erts_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); init_gc_info(&dirty_gc.info); -#endif init_gcireq_alloc(); } @@ -481,12 +477,10 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int } if (need == 0) { -#ifdef ERTS_DIRTY_SCHEDULERS if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) { ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p))); goto force_reschedule; } -#endif return 1; } /* @@ -541,9 +535,7 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int p->heap_hfrag = hfrag; #endif -#ifdef ERTS_DIRTY_SCHEDULERS force_reschedule: -#endif /* Make sure that we do a proper GC as soon as possible... */ p->flags |= F_FORCE_GC; @@ -616,7 +608,6 @@ young_gen_usage(Process *p) } \ } while (0) -#ifdef ERTS_DIRTY_SCHEDULERS static ERTS_INLINE void check_for_possibly_long_gc(Process *p, Uint ygen_usage) @@ -640,7 +631,6 @@ check_for_possibly_long_gc(Process *p, Uint ygen_usage) } } -#endif /* * Garbage collect a process. @@ -672,24 +662,20 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls) >= esdp->virtual_reds); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if ((p->flags & (F_DISABLE_GC|F_DELAY_GC)) || state & ERTS_PSFLG_EXITING) { -#ifdef ERTS_DIRTY_SCHEDULERS delay_gc_before_start: -#endif return delay_garbage_collection(p, live_hf_end, need, fcalls); } ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p); -#ifdef ERTS_DIRTY_SCHEDULERS if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { check_for_possibly_long_gc(p, ygen_usage); if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) goto delay_gc_before_start; } -#endif if (p->abandoned_heap) live_hf_end = ERTS_INVALID_HFRAG_PTR; @@ -698,7 +684,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC); - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); if (erts_system_monitor_long_gc != 0) start_time = erts_get_monotonic_time(esdp); @@ -731,14 +717,12 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE); } -#ifdef ERTS_DIRTY_SCHEDULERS if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { p->flags |= F_NEED_FULLSWEEP; check_for_possibly_long_gc(p, ygen_usage); if (p->flags & F_DIRTY_MAJOR_GC) goto delay_gc_after_start; } -#endif goto do_major_collection; } if (ERTS_SCHEDULER_IS_DIRTY(esdp)) @@ -779,17 +763,15 @@ do_major_collection: ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL; int res; - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); erts_send_exit_signal(p, p->common.id, p, &locks, am_kill, NIL, NULL, 0); - erts_smp_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR); -#ifdef ERTS_DIRTY_SCHEDULERS delay_gc_after_start: -#endif /* erts_send_exit_signal looks for ERTS_PSFLG_GC, so we have to remove it after the signal is sent */ - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); /* We have to make sure that we have space for need on the heap */ res = delay_garbage_collection(p, live_hf_end, need, fcalls); @@ -797,7 +779,7 @@ do_major_collection: return res; } - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, gc_trace_end_tag, reclaimed_now, THE_NON_VALUE); @@ -821,7 +803,6 @@ do_major_collection: monitor_large_heap(p); } -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { erts_mtx_lock(&dirty_gc.mtx); dirty_gc.info.garbage_cols++; @@ -829,7 +810,6 @@ do_major_collection: erts_mtx_unlock(&dirty_gc.mtx); } else -#endif { esdp->gc_info.garbage_cols++; esdp->gc_info.reclaimed += reclaimed_now; @@ -907,7 +887,6 @@ garbage_collect_hibernate(Process* p, int check_long_gc) if (p->flags & F_DISABLE_GC) ERTS_INTERNAL_ERROR("GC disabled"); -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p))) p->flags &= ~(F_DIRTY_GC_HIBERNATE|F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC); else if (check_long_gc) { @@ -920,11 +899,10 @@ garbage_collect_hibernate(Process* p, int check_long_gc) } p->flags = flags; } -#endif /* * Preliminaries. */ - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); ErtsGcQuickSanityCheck(p); ASSERT(p->stop == p->hend); /* Stack must be empty. */ @@ -1015,7 +993,7 @@ garbage_collect_hibernate(Process* p, int check_long_gc) p->flags |= F_HIBERNATED; - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); reds = gc_cost(actual_size, actual_size); return reds; @@ -1110,7 +1088,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, p->flags |= F_NEED_FULLSWEEP; -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p))) p->flags &= ~F_DIRTY_CLA; else { @@ -1126,7 +1103,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, return 10; } } -#endif reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0, p->arg_reg, p->arity, fcalls, @@ -1137,7 +1113,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, /* * Set GC state. */ - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); /* * Just did a major collection (which has discarded the old heap), @@ -1284,7 +1260,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, /* * Restore status. */ - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); reds += (Sint64) gc_cost((p->htop - p->heap) + byte_lit_size/sizeof(Uint), 0); @@ -2914,7 +2890,7 @@ sweep_off_heap(Process *p, int fullsweep) case FUN_SUBTAG: { ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe; - if (erts_smp_refc_dectest(&fe->refc, 0) == 0) { + if (erts_refc_dectest(&fe->refc, 0) == 0) { erts_erase_fun_entry(fe); } break; @@ -3230,7 +3206,6 @@ reply_gc_info(void *vgcirp) reclaimed = esdp->gc_info.reclaimed; garbage_cols = esdp->gc_info.garbage_cols; -#ifdef ERTS_DIRTY_SCHEDULERS /* * Add dirty schedulers info on requesting * schedulers info @@ -3241,7 +3216,6 @@ reply_gc_info(void *vgcirp) garbage_cols += dirty_gc.info.garbage_cols; erts_mtx_unlock(&dirty_gc.mtx); } -#endif sz = 0; hpp = NULL; @@ -3274,11 +3248,11 @@ reply_gc_info(void *vgcirp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&gcirp->refc) == 0) + if (erts_atomic32_dec_read_nob(&gcirp->refc) == 0) gcireq_free(vgcirp); } @@ -3330,18 +3304,16 @@ erts_gc_info_request(Process *c_p) gcirp->proc = c_p; gcirp->ref = STORE_NC(&hp, NULL, ref); gcirp->req_sched = esdp->no; - erts_smp_atomic32_init_nob(&gcirp->refc, + erts_atomic32_init_nob(&gcirp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_gc_info, (void *) gcirp); -#endif reply_gc_info((void *) gcirp); @@ -3628,12 +3600,12 @@ erts_check_off_heap2(Process *p, Eterm *htop) refc = erts_refc_read(&u.pb->val->intern.refc, 1); break; case FUN_SUBTAG: - refc = erts_smp_refc_read(&u.fun->fe->refc, 1); + refc = erts_refc_read(&u.fun->fe->refc, 1); break; case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: - refc = erts_smp_refc_read(&u.ext->node->refc, 1); + refc = erts_refc_read(&u.ext->node->refc, 1); break; case REF_SUBTAG: ASSERT(is_magic_ref_thing(u.hdr)); diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c index 6e5cc7b801..bda2c9b94d 100644 --- a/erts/emulator/beam/erl_hl_timer.c +++ b/erts/emulator/beam/erl_hl_timer.c @@ -96,13 +96,6 @@ typedef enum { #define ERTS_BIF_TIMER_SHORT_TIME 5000 -#ifdef ERTS_SMP -# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore \ - ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore) -#else -# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore -#endif - /* Bit 0 to 10 contains scheduler id (see mask below) */ #define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 11) #define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 12) @@ -159,7 +152,7 @@ typedef struct { typedef struct { Uint32 roflgs; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; union { void *arg; erts_atomic_t next; @@ -200,7 +193,7 @@ struct ErtsBifTimer_ { ErtsTWTimer twt; } type; struct { - erts_smp_atomic32_t state; + erts_atomic32_t state; #ifdef ERTS_MAGIC_REF_BIF_TIMERS ErtsMagicBinary *mbin; ErtsHLTimerList proc_list; @@ -269,7 +262,6 @@ typedef struct { erts_atomic_t last; } ErtsHLTCncldTmrQTail; -#ifdef ERTS_SMP typedef struct { /* @@ -301,7 +293,6 @@ typedef struct { } head; } ErtsHLTCncldTmrQ; -#endif /* ERTS_SMP */ typedef struct { ErtsHLTimer *root; @@ -309,9 +300,7 @@ typedef struct { } ErtsYieldingTimeoutState; struct ErtsHLTimerService_ { -#ifdef ERTS_SMP ErtsHLTCncldTmrQ canceled_queue; -#endif ErtsHLTimer *time_tree; #ifndef ERTS_MAGIC_REF_BIF_TIMERS ErtsBifTimer *btm_tree; @@ -720,9 +709,7 @@ proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list, #endif /* !ERTS_MAGIC_REF_BIF_TIMERS */ -#ifdef ERTS_SMP static void init_canceled_queue(ErtsHLTCncldTmrQ *cq); -#endif void erts_hl_timer_init(void) @@ -747,9 +734,7 @@ erts_create_timer_service(void) srv->yield = init_yield; erts_twheel_init_timer(&srv->service_timer); -#ifdef ERTS_SMP init_canceled_queue(&srv->canceled_queue); -#endif return srv; } @@ -791,13 +776,13 @@ get_time_left(ErtsSchedulerData *esdp, ErtsMonotonicTime timeout_pos) static ERTS_INLINE int proc_timeout_common(Process *proc, void *tmr) { - if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&proc->common.timer, + if (tmr == (void *) erts_atomic_cmpxchg_mb(&proc->common.timer, ERTS_PTMR_TIMEDOUT, (erts_aint_t) tmr)) { erts_aint32_t state; - erts_smp_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); - state = erts_smp_atomic32_read_acqb(&proc->state); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); + state = erts_atomic32_read_acqb(&proc->state); + erts_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING))) erts_schedule_process(proc, state, 0); return 1; @@ -808,7 +793,7 @@ proc_timeout_common(Process *proc, void *tmr) static ERTS_INLINE int port_timeout_common(Port *port, void *tmr) { - if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&port->common.timer, + if (tmr == (void *) erts_atomic_cmpxchg_mb(&port->common.timer, ERTS_PTMR_TIMEDOUT, (erts_aint_t) tmr)) { erts_port_task_schedule(port->common.id, @@ -821,24 +806,24 @@ port_timeout_common(Port *port, void *tmr) #ifdef ERTS_MAGIC_REF_BIF_TIMERS -static erts_smp_atomic_t * +static erts_atomic_t * mbin_to_btmref__(ErtsMagicBinary *mbin) { - return erts_smp_binary_to_magic_indirection((Binary *) mbin); + return erts_binary_to_magic_indirection((Binary *) mbin); } static ERTS_INLINE void magic_binary_init(ErtsMagicBinary *mbin, ErtsBifTimer *tmr) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin); - erts_smp_atomic_init_nob(aptr, (erts_aint_t) tmr); + erts_atomic_t *aptr = mbin_to_btmref__(mbin); + erts_atomic_init_nob(aptr, (erts_aint_t) tmr); } static ERTS_INLINE ErtsBifTimer * magic_binary_to_btm(ErtsMagicBinary *mbin) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin); - ErtsBifTimer *tmr = (ErtsBifTimer *) erts_smp_atomic_read_nob(aptr); + erts_atomic_t *aptr = mbin_to_btmref__(mbin); + ErtsBifTimer *tmr = (ErtsBifTimer *) erts_atomic_read_nob(aptr); ERTS_HLT_ASSERT(!tmr || tmr->btm.mbin == mbin); return tmr; } @@ -884,7 +869,7 @@ init_btm_specifics(ErtsSchedulerData *esdp, btm_rbt_insert(&esdp->timer_service->btm_tree, tmr); #endif - erts_smp_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE); + erts_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE); return refc; /* refc from magic binary... */ } @@ -917,10 +902,10 @@ timer_pre_dec_refc(ErtsTimer *tmr) { #ifdef ERTS_HLT_DEBUG erts_aint_t refc; - refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc); + refc = erts_atomic32_dec_read_nob(&tmr->head.refc); ERTS_HLT_ASSERT(refc > 0); #else - erts_smp_atomic32_dec_nob(&tmr->head.refc); + erts_atomic32_dec_nob(&tmr->head.refc); #endif } @@ -969,8 +954,8 @@ schedule_tw_timer_destroy(ErtsTWTimer *tmr) static ERTS_INLINE void tw_timer_dec_refc(ErtsTWTimer *tmr) { - if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) { - ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore; + if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) { + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); schedule_tw_timer_destroy(tmr); } } @@ -1114,7 +1099,7 @@ create_tw_timer(ErtsSchedulerData *esdp, return NULL; } - erts_smp_atomic32_init_nob(&tmr->head.refc, refc); + erts_atomic32_init_nob(&tmr->head.refc, refc); erts_twheel_set_timer(esdp->timer_wheel, &tmr->u.tw_tmr, @@ -1147,7 +1132,7 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs) * at once... */ - ERTS_HLT_ASSERT(erts_smp_atomic32_read_nob(&tmr->head.refc) == 0); + ERTS_HLT_ASSERT(erts_atomic32_read_nob(&tmr->head.refc) == 0); if (roflgs & ERTS_TMR_ROFLG_REG_NAME) { ERTS_HLT_ASSERT(is_atom(tmr->head.receiver.name)); @@ -1179,14 +1164,13 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs) static ERTS_INLINE void hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs) { - if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) { - ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore; + if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) { + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); schedule_hl_timer_destroy(tmr, roflgs); } } static void hlt_service_timeout(void *vesdp); -#ifdef ERTS_SMP static void handle_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTCncldTmrQ *cq, int use_limit, @@ -1194,12 +1178,11 @@ static void handle_canceled_queue(ErtsSchedulerData *esdp, int *need_thr_progress, ErtsThrPrgrVal *thr_prgr_p, int *need_more_work); -#endif static ERTS_INLINE void check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv) { -#if defined(ERTS_SMP) && ERTS_TMR_CHECK_CANCEL_ON_CREATE +#if ERTS_TMR_CHECK_CANCEL_ON_CREATE ErtsHLTCncldTmrQ *cq = &srv->canceled_queue; if (cq->head.first != cq->head.unref_end) handle_canceled_queue(esdp, cq, 1, @@ -1219,14 +1202,14 @@ bif_timer_ref_destructor(Binary *unused) static ERTS_INLINE void btm_clear_magic_binary(ErtsBifTimer *tmr) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin); + erts_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin); Uint32 roflgs = tmr->type.head.roflgs; #ifdef ERTS_HLT_DEBUG - erts_aint_t tval = erts_smp_atomic_xchg_nob(aptr, + erts_aint_t tval = erts_atomic_xchg_nob(aptr, (erts_aint_t) NULL); ERTS_HLT_ASSERT(tval == (erts_aint_t) tmr); #else - erts_smp_atomic_set_nob(aptr, (erts_aint_t) NULL); + erts_atomic_set_nob(aptr, (erts_aint_t) NULL); #endif if (roflgs & ERTS_TMR_ROFLG_HLT) hl_timer_dec_refc(&tmr->type.hlt, roflgs); @@ -1246,7 +1229,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, ERTS_HLT_ASSERT(tmr->type.head.roflgs == roflgs); ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR); - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_TIMED_OUT, ERTS_TMR_STATE_ACTIVE); @@ -1279,7 +1262,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, tmr->btm.bp = NULL; erts_queue_message(proc, 0, mp, tmr->btm.message, am_clock_service); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_lock(proc, ERTS_PROC_LOCK_BTM); /* If the process is exiting do not disturb the cleanup... */ if (!ERTS_PROC_IS_EXITING(proc)) { #ifdef ERTS_MAGIC_REF_BIF_TIMERS @@ -1295,7 +1278,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, } #endif } - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); if (dec_refc) timer_pre_dec_refc((ErtsTimer *) tmr); } @@ -1432,7 +1415,7 @@ create_hl_timer(ErtsSchedulerData *esdp, } tmr->head.roflgs = roflgs; - erts_smp_atomic32_init_nob(&tmr->head.refc, refc); + erts_atomic32_init_nob(&tmr->head.refc, refc); if (!srv->next_timeout || tmr->timeout < srv->next_timeout->timeout) { @@ -1664,7 +1647,6 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp, } } -#ifdef ERTS_SMP static void init_canceled_queue(ErtsHLTCncldTmrQ *cq) @@ -1794,7 +1776,7 @@ cq_check_incoming(ErtsSchedulerData *esdp, ErtsHLTCncldTmrQ *cq) cq->head.next.thr_progress_reached = 1; /* Move unreferenced end pointer forward... */ - ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore; + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); cq->head.unref_end = cq->head.next.unref_end; @@ -1887,31 +1869,24 @@ erts_handle_canceled_timers(void *vesdp, need_more_work); } -#endif /* ERTS_SMP */ static void queue_canceled_timer(ErtsSchedulerData *esdp, int rsched_id, ErtsTimer *tmr) { -#ifdef ERTS_SMP ErtsHLTCncldTmrQ *cq; cq = &ERTS_SCHEDULER_IX(rsched_id-1)->timer_service->canceled_queue; if (cq_enqueue(cq, tmr, rsched_id - (int) esdp->no)) erts_notify_canceled_timer(esdp, rsched_id); -#else - ERTS_INTERNAL_ERROR("Unexpected enqueue of canceled timer"); -#endif } static void continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr) { -#ifdef ERTS_SMP Uint32 sid = (tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK); if (esdp->no != sid) queue_canceled_timer(esdp, sid, tmr); else -#endif cleanup_sched_local_canceled_timer(esdp, tmr); } @@ -1997,7 +1972,7 @@ setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos, #else proc_btm_rbt_insert(&proc->bif_timers, tmr); #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); tmr->type.head.receiver.proc = proc; } } @@ -2018,7 +1993,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) Uint32 roflgs; int res; - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_CANCELED, ERTS_TMR_STATE_ACTIVE); if (state != ERTS_TMR_STATE_ACTIVE) @@ -2040,7 +2015,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) proc = tmr->type.head.receiver.proc; ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_lock(proc, ERTS_PROC_LOCK_BTM); /* * If process is exiting, let it clean up * the btm tree by itself (it may be in @@ -2059,7 +2034,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) res = 1; } #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); } return res; @@ -2082,7 +2057,7 @@ access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel) : erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr)); if (!cancel) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->btm.state); + erts_aint32_t state = erts_atomic32_read_acqb(&tmr->btm.state); if (state == ERTS_TMR_STATE_ACTIVE) return get_time_left(esdp, timeout); return -1; @@ -2176,7 +2151,7 @@ send_async_info(Process *proc, ErtsProcLocks initial_locks, locks &= ~initial_locks; if (locks) - erts_smp_proc_unlock(proc, locks); + erts_proc_unlock(proc, locks); return am_ok; } @@ -2262,7 +2237,7 @@ send_sync_info(Process *proc, ErtsProcLocks initial_locks, locks &= ~initial_locks; if (locks) - erts_smp_proc_unlock(proc, locks); + erts_proc_unlock(proc, locks); return am_ok; } @@ -2376,9 +2351,9 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp, * Check if the timer is aimed at current * process... */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM); + erts_proc_lock(c_p, ERTS_PROC_LOCK_BTM); tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_BTM); if (!tmr) return 0; @@ -2419,7 +2394,7 @@ no_timer_result(Process *c_p, Eterm tref, int cancel, int async, int info) erts_queue_message(c_p, locks, mp, msg, am_clock_service); locks &= ~ERTS_PROC_LOCK_MAIN; if (locks) - erts_smp_proc_unlock(c_p, locks); + erts_proc_unlock(c_p, locks); return am_ok; } @@ -2495,7 +2470,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info) req->rrefn[1] = rrefn[1]; req->rrefn[2] = rrefn[2]; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) ERTS_VBUMP_ALL_REDS(c_p); @@ -2513,10 +2488,10 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info) * otherwise, next receive will *not* work * as expected! */ - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, rref); } @@ -2606,7 +2581,7 @@ exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp) erts_aint_t state; int is_hlt; - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_CANCELED, ERTS_TMR_STATE_ACTIVE); @@ -2992,7 +2967,7 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo, ERTS_TMR_PROC, (void *) c_p, c_p->common.id, THE_NON_VALUE, NULL, NULL, NULL); - erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr); + erts_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr); } } @@ -3003,7 +2978,7 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo) ErtsMonotonicTime tmo, timeout_pos; int short_time, tres; - ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer) + ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer) == ERTS_PTMR_NONE); tres = parse_timeout_pos(esdp, etmo, &tmo, 0, @@ -3023,7 +2998,7 @@ erts_set_proc_timer_uword(Process *c_p, UWord tmo) { ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); - ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer) + ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer) == ERTS_PTMR_NONE); #ifndef ARCH_32 @@ -3046,13 +3021,13 @@ void erts_cancel_proc_timer(Process *c_p) { erts_aint_t tval; - tval = erts_smp_atomic_xchg_acqb(&c_p->common.timer, + tval = erts_atomic_xchg_acqb(&c_p->common.timer, ERTS_PTMR_NONE); c_p->flags &= ~(F_INSLPQUEUE|F_TIMO); if (tval == ERTS_PTMR_NONE) return; if (tval == ERTS_PTMR_TIMEDOUT) { - erts_smp_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE); + erts_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE); return; } continue_cancel_ptimer(erts_proc_sched_data(c_p), @@ -3067,7 +3042,7 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo) ErtsMonotonicTime timeout_pos; ErtsCreateTimerFunc create_timer; - if (erts_smp_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE) + if (erts_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE) erts_cancel_port_timer(c_prt); check_canceled_queue(esdp, esdp->timer_service); @@ -3080,14 +3055,14 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo) tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT, (void *) c_prt, c_prt->common.id, THE_NON_VALUE, NULL, NULL, NULL); - erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr); + erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr); } void erts_cancel_port_timer(Port *c_prt) { erts_aint_t tval; - tval = erts_smp_atomic_xchg_acqb(&c_prt->common.timer, + tval = erts_atomic_xchg_acqb(&c_prt->common.timer, ERTS_PTMR_NONE); if (tval == ERTS_PTMR_NONE) return; @@ -3095,7 +3070,7 @@ erts_cancel_port_timer(Port *c_prt) while (!erts_port_task_is_scheduled(&c_prt->timeout_task)) erts_thr_yield(); erts_port_task_abort(&c_prt->timeout_task); - erts_smp_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE); + erts_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE); return; } continue_cancel_ptimer(erts_get_scheduler_data(), @@ -3109,7 +3084,7 @@ erts_read_port_timer(Port *c_prt) erts_aint_t itmr; ErtsMonotonicTime timeout_pos; - itmr = erts_smp_atomic_read_acqb(&c_prt->common.timer); + itmr = erts_atomic_read_acqb(&c_prt->common.timer); if (itmr == ERTS_PTMR_NONE) return (Sint64) -1; if (itmr == ERTS_PTMR_TIMEDOUT) @@ -3246,7 +3221,7 @@ debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd) if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)) return; #endif - if (erts_smp_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) { + if (erts_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) { ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd; Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME) ? tmr->type.head.receiver.name @@ -3284,7 +3259,7 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm, btmfd.func = func; btmfd.arg = arg; - if (!erts_smp_thr_progress_is_blocking()) + if (!erts_thr_progress_is_blocking()) ERTS_INTERNAL_ERROR("Not blocking thread progress"); for (six = 0; six < erts_no_schedulers; six++) { @@ -3375,7 +3350,7 @@ erts_debug_callback_timer_foreach(void (*tclbk)(void *), dfct.func = func; dfct.arg = arg; - if (!erts_smp_thr_progress_is_blocking()) + if (!erts_thr_progress_is_blocking()) ERTS_INTERNAL_ERROR("Not blocking thread progress"); for (six = 0; six < erts_no_schedulers; six++) { diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h index ff31f04cb9..e6f5e8b67d 100644 --- a/erts/emulator/beam/erl_hl_timer.h +++ b/erts/emulator/beam/erl_hl_timer.h @@ -36,16 +36,16 @@ typedef struct ErtsHLTimerService_ ErtsHLTimerService; #define ERTS_PTMR_TIMEDOUT (ERTS_PTMR_NONE + ((erts_aint_t) 1)) #define ERTS_PTMR_INIT(P) \ - erts_smp_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE) + erts_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE) #define ERTS_PTMR_IS_SET(P) \ - (ERTS_PTMR_NONE != erts_smp_atomic_read_nob(&(P)->common.timer)) + (ERTS_PTMR_NONE != erts_atomic_read_nob(&(P)->common.timer)) #define ERTS_PTMR_IS_TIMED_OUT(P) \ - (ERTS_PTMR_TIMEDOUT == erts_smp_atomic_read_nob(&(P)->common.timer)) + (ERTS_PTMR_TIMEDOUT == erts_atomic_read_nob(&(P)->common.timer)) #define ERTS_PTMR_CLEAR(P) \ do { \ ASSERT(ERTS_PTMR_IS_TIMED_OUT((P))); \ - erts_smp_atomic_set_nob(&(P)->common.timer, \ + erts_atomic_set_nob(&(P)->common.timer, \ ERTS_PTMR_NONE); \ } while (0) @@ -63,13 +63,11 @@ void erts_hl_timer_init(void); void erts_start_timer_callback(ErtsMonotonicTime, void (*)(void *), void *); -#ifdef ERTS_SMP void erts_handle_canceled_timers(void *vesdp, int *need_thr_progress, ErtsThrPrgrVal *thr_prgr_p, int *need_more_work); -#endif Uint erts_bif_timer_memory_size(void); void erts_print_bif_timer_info(fmtfn_t to, void *to_arg); diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 5206d7564f..8c14c86219 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -69,16 +69,8 @@ * The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands * only. Do not remove even though they aren't used elsewhere in the emulator! */ -#ifdef ERTS_SMP const int etp_smp_compiled = 1; -#else -const int etp_smp_compiled = 0; -#endif -#ifdef USE_THREADS const int etp_thread_compiled = 1; -#else -const int etp_thread_compiled = 0; -#endif const char etp_erts_version[] = ERLANG_VERSION; const char etp_otp_release[] = ERLANG_OTP_RELEASE; const char etp_compile_date[] = ERLANG_COMPILE_DATE; @@ -156,17 +148,10 @@ static void erl_init(int ncpu, static erts_atomic_t exiting; -#ifdef ERTS_SMP -erts_smp_atomic32_t erts_writing_erl_crash_dump; +erts_atomic32_t erts_writing_erl_crash_dump; erts_tsd_key_t erts_is_crash_dumping_key; -#else -volatile int erts_writing_erl_crash_dump = 0; -#endif int erts_initialized = 0; -#if defined(USE_THREADS) && !defined(ERTS_SMP) -erts_tid_t erts_main_thread; -#endif int erts_use_sender_punish; @@ -185,7 +170,7 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace * in error codes. */ -erts_smp_atomic32_t erts_max_gen_gcs; +erts_atomic32_t erts_max_gen_gcs; Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error, am_info or am_warning, am_error is @@ -195,11 +180,9 @@ int erts_compat_rel; static int no_schedulers; static int no_schedulers_online; -#ifdef ERTS_DIRTY_SCHEDULERS static int no_dirty_cpu_schedulers; static int no_dirty_cpu_schedulers_online; static int no_dirty_io_schedulers; -#endif #ifdef DEBUG Uint32 verbose; /* See erl_debug.h for information about verbose */ @@ -328,11 +311,9 @@ erl_init(int ncpu, erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab); erts_init_scheduling(no_schedulers, no_schedulers_online -#ifdef ERTS_DIRTY_SCHEDULERS , no_dirty_cpu_schedulers, no_dirty_cpu_schedulers_online, no_dirty_io_schedulers -#endif ); erts_late_init_time_sup(); erts_init_cpu_topology(); /* Must be after init_scheduling */ @@ -410,7 +391,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** */ erts_init_empty_process(&parent); - erts_smp_proc_lock(&parent, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(&parent, ERTS_PROC_LOCK_MAIN); hp = HAlloc(&parent, argc*2 + 4); args = NIL; for (i = argc-1; i >= 0; i--) { @@ -425,7 +406,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC; res = erl_create_process(&parent, start_mod, am_start, args, &so); - erts_smp_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN); erts_cleanup_empty_process(&parent); return res; } @@ -450,7 +431,7 @@ erl_system_process_otp(Eterm parent_pid, char* modname, int off_heap_msgq) if (off_heap_msgq) so.flags |= SPO_OFF_HEAP_MSGQ; res = erl_create_process(parent, start_mod, am_start, NIL, &so); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(parent, ERTS_PROC_LOCK_MAIN); return res; } @@ -624,7 +605,6 @@ void erts_usage(void) ERTS_SCHED_THREAD_MIN_STACK_SIZE, ERTS_SCHED_THREAD_MAX_STACK_SIZE, ERTS_DEFAULT_SCHED_STACK_SIZE); -#ifdef ERTS_DIRTY_SCHEDULERS erts_fprintf(stderr, "-sssdcpu size suggested stack size in kilo words for dirty CPU scheduler\n"); erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n", ERTS_SCHED_THREAD_MIN_STACK_SIZE, @@ -635,7 +615,6 @@ void erts_usage(void) ERTS_SCHED_THREAD_MIN_STACK_SIZE, ERTS_SCHED_THREAD_MAX_STACK_SIZE, ERTS_DEFAULT_DIO_SCHED_STACK_SIZE); -#endif erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n"); erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n"); erts_fprintf(stderr, " schedulers online (n2), maximum for both\n"); @@ -644,7 +623,6 @@ void erts_usage(void) erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n"); erts_fprintf(stderr, " as percentages of logical processors configured and logical\n"); erts_fprintf(stderr, " processors available, respectively\n"); -#ifdef ERTS_DIRTY_SCHEDULERS erts_fprintf(stderr, "-SDcpu n1:n2 set number of dirty CPU schedulers (n1), and number of\n"); erts_fprintf(stderr, " dirty CPU schedulers online (n2), valid range for both\n"); erts_fprintf(stderr, " numbers is [1-%d], and n2 must be less than or equal to n1\n", @@ -654,7 +632,6 @@ void erts_usage(void) erts_fprintf(stderr, " and logical processors available, respectively\n"); erts_fprintf(stderr, "-SDio n set number of dirty I/O schedulers, valid range is [0-%d]\n", ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS); -#endif erts_fprintf(stderr, "-t size set the maximum number of atoms the emulator can handle\n"); erts_fprintf(stderr, " valid range is [%d-%d]\n", MIN_ATOM_TABLE_SIZE, MAX_ATOM_TABLE_SIZE); @@ -682,7 +659,6 @@ void erts_usage(void) erts_exit(1, ""); } -#ifdef USE_THREADS /* * allocators for thread lib */ @@ -724,7 +700,6 @@ static void ethr_ll_free(void *ptr) erts_free(ERTS_ALC_T_ETHR_LL, ptr); } -#endif static int early_init(int *argc, char **argv) /* @@ -742,21 +717,16 @@ early_init(int *argc, char **argv) /* int schdlrs_percentage = 100; int schdlrs_onln_percentage = 100; int max_main_threads; -#ifdef ERTS_DIRTY_SCHEDULERS int dirty_cpu_scheds; int dirty_cpu_scheds_online; int dirty_cpu_scheds_pctg = 100; int dirty_cpu_scheds_onln_pctg = 100; int dirty_io_scheds; -#endif int max_reader_groups; int reader_groups; char envbuf[21]; /* enough for any 64-bit integer */ size_t envbufsz; -#if defined(USE_THREADS) && !defined(ERTS_SMP) - erts_main_thread = erts_thr_self(); -#endif erts_save_emu_args(*argc, argv); @@ -781,11 +751,6 @@ early_init(int *argc, char **argv) /* &ncpu, &ncpuonln, &ncpuavail); -#ifndef ERTS_SMP - ncpu = 1; - ncpuonln = 1; - ncpuavail = 1; -#endif ignore_break = 0; replace_intr = 0; @@ -797,18 +762,12 @@ early_init(int *argc, char **argv) /* erts_sys_pre_init(); erts_atomic_init_nob(&exiting, 0); -#ifdef ERTS_SMP erts_thr_progress_pre_init(); -#endif -#ifdef ERTS_SMP - erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); + erts_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key"); -#else - erts_writing_erl_crash_dump = 0; -#endif - erts_smp_atomic32_init_nob(&erts_max_gen_gcs, + erts_atomic32_init_nob(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1)); erts_pre_init_process(); @@ -825,11 +784,9 @@ early_init(int *argc, char **argv) /* schdlrs = no_schedulers; schdlrs_onln = no_schedulers_online; -#ifdef ERTS_DIRTY_SCHEDULERS dirty_cpu_scheds = no_schedulers; dirty_cpu_scheds_online = no_schedulers_online; dirty_io_scheds = 10; -#endif envbufsz = sizeof(envbuf); @@ -922,7 +879,6 @@ early_init(int *argc, char **argv) /* ("using %d:%d scheduler percentages\n", schdlrs_percentage, schdlrs_onln_percentage)); } -#ifdef ERTS_DIRTY_SCHEDULERS else if (argv[i][2] == 'D') { char *arg; char *type = argv[i]+3; @@ -1034,7 +990,6 @@ early_init(int *argc, char **argv) /* break; } } -#endif else { int tot, onln; char *arg = get_arg(argv[i]+2, argv[i+1], &i); @@ -1093,7 +1048,6 @@ early_init(int *argc, char **argv) /* i++; } -#ifdef ERTS_SMP /* apply any scheduler percentages */ if (schdlrs_percentage != 100 || schdlrs_onln_percentage != 100) { schdlrs = schdlrs * schdlrs_percentage / 100; @@ -1117,12 +1071,6 @@ early_init(int *argc, char **argv) /* erts_usage(); } } -#else - /* Silence gcc warnings */ - (void)schdlrs_percentage; - (void)schdlrs_onln_percentage; -#endif -#ifdef ERTS_DIRTY_SCHEDULERS /* apply any dirty scheduler precentages */ if (dirty_cpu_scheds_pctg != 100 || dirty_cpu_scheds_onln_pctg != 100) { dirty_cpu_scheds = dirty_cpu_scheds * dirty_cpu_scheds_pctg / 100; @@ -1136,33 +1084,22 @@ early_init(int *argc, char **argv) /* dirty_cpu_scheds_online = schdlrs_onln; if (dirty_cpu_scheds_online < 1) dirty_cpu_scheds_online = 1; -#endif } -#ifndef USE_THREADS - erts_async_max_threads = 0; -#endif -#ifdef ERTS_SMP no_schedulers = schdlrs; no_schedulers_online = schdlrs_onln; erts_no_schedulers = (Uint) no_schedulers; -#else - erts_no_schedulers = 1; -#endif -#ifdef ERTS_DIRTY_SCHEDULERS erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds; no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online; erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds; -#endif erts_early_init_scheduling(no_schedulers); alloc_opts.ncpu = ncpu; erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes) -M flags. */ /* Require allocators */ -#ifdef ERTS_SMP /* * Thread progress management: * @@ -1177,15 +1114,10 @@ early_init(int *argc, char **argv) /* */ erts_thr_progress_init(no_schedulers, no_schedulers+2, -#ifndef ERTS_DIRTY_SCHEDULERS - erts_async_max_threads -#else erts_async_max_threads + erts_no_dirty_cpu_schedulers + erts_no_dirty_io_schedulers -#endif ); -#endif erts_thr_q_init(); erts_init_utils(); erts_early_init_cpu_topology(no_schedulers, @@ -1193,7 +1125,6 @@ early_init(int *argc, char **argv) /* max_reader_groups, &reader_groups); -#ifdef USE_THREADS { erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER; elid.mem.std.alloc = ethr_std_alloc; @@ -1210,7 +1141,6 @@ early_init(int *argc, char **argv) /* erts_thr_late_init(&elid); } -#endif erts_msacc_early_init(); #ifdef ERTS_ENABLE_LOCK_CHECK @@ -1237,40 +1167,6 @@ early_init(int *argc, char **argv) /* return ncpu; } -#ifndef ERTS_SMP - -void *erts_scheduler_stack_limit; - - -static void set_main_stack_size(void) -{ - char c; - UWord stacksize; -# if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK - struct rlimit rl; - int bytes; - stacksize = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024; - /* Add some extra pages... neede by some systems... */ - bytes = (int) stacksize + 3*erts_sys_get_page_size(); - if (getrlimit(RLIMIT_STACK, &rl) != 0 || - (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) { - erts_fprintf(stderr, "failed to set stack size for scheduler " - "thread to %d bytes\n", bytes); - erts_usage(); - } -# else - if (modified_sched_thread_suggested_stack_size) { - erts_fprintf(stderr, "no OS support for dynamic stack size limit\n"); - erts_usage(); - } - /* Be conservative and hope it is not more than 64 kWords... */ - stacksize = 64*1024*sizeof(void *); -# endif - - erts_scheduler_stack_limit = erts_calc_stacklimit(&c, stacksize); -} - -#endif void erl_start(int argc, char **argv) @@ -1304,7 +1200,7 @@ erl_start(int argc, char **argv) envbufsz = sizeof(envbuf); if (erts_sys_getenv_raw("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) { Uint16 max_gen_gcs = atoi(envbuf); - erts_smp_atomic32_set_nob(&erts_max_gen_gcs, + erts_atomic32_set_nob(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs); } @@ -1319,10 +1215,8 @@ erl_start(int argc, char **argv) * a lot of stack. */ erts_sched_thread_suggested_stack_size = ERTS_DEFAULT_SCHED_STACK_SIZE; -#ifdef ERTS_DIRTY_SCHEDULERS erts_dcpu_sched_thread_suggested_stack_size = ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE; erts_dio_sched_thread_suggested_stack_size = ERTS_DEFAULT_DIO_SCHED_STACK_SIZE; -#endif #ifdef DEBUG verbose = DEBUG_DEFAULT; @@ -1490,12 +1384,8 @@ erl_start(int argc, char **argv) #ifdef DEBUG strcat(tmp, ",DEBUG"); #endif -#ifdef ERTS_SMP strcat(tmp, ",SMP"); -#endif -#ifdef USE_THREADS strcat(tmp, ",ASYNC_THREADS"); -#endif #ifdef HIPE strcat(tmp, ",HIPE"); #endif @@ -1941,7 +1831,6 @@ erl_start(int argc, char **argv) VERBOSE(DEBUG_SYSTEM, ("scheduler wakeup threshold: %s\n", arg)); } -#ifdef ERTS_DIRTY_SCHEDULERS else if (has_prefix("ssdcpu", sub_param)) { /* suggested stack size (Kilo Words) for dirty CPU scheduler threads */ arg = get_arg(sub_param+6, argv[i+1], &i); @@ -1976,7 +1865,6 @@ erl_start(int argc, char **argv) ("suggested dirty IO scheduler thread stack size %d kilo words\n", erts_dio_sched_thread_suggested_stack_size)); } -#endif else if (has_prefix("ss", sub_param)) { /* suggested stack size (Kilo Words) for scheduler threads */ arg = get_arg(sub_param+2, argv[i+1], &i); @@ -2007,9 +1895,7 @@ erl_start(int argc, char **argv) arg); erts_usage(); } -#ifdef ERTS_SMP erts_runq_supervision_interval = val; -#endif } else { erts_fprintf(stderr, "bad scheduling option %s\n", argv[i]); @@ -2293,12 +2179,10 @@ erl_start(int argc, char **argv) if (erts_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE) erts_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE; -#ifdef ERTS_DIRTY_SCHEDULERS if (erts_dcpu_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE) erts_dcpu_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE; if (erts_dio_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE) erts_dio_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE; -#endif erl_init(ncpu, proc_tab_sz, @@ -2343,7 +2227,6 @@ erl_start(int argc, char **argv) && erts_literal_area_collector->common.id == pid); erts_proc_inc_refc(erts_literal_area_collector); -#ifdef ERTS_DIRTY_SCHEDULERS pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker", !0); erts_dirty_process_code_checker = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, @@ -2351,11 +2234,9 @@ erl_start(int argc, char **argv) ASSERT(erts_dirty_process_code_checker && erts_dirty_process_code_checker->common.id == pid); erts_proc_inc_refc(erts_dirty_process_code_checker); -#endif } -#ifdef ERTS_SMP erts_start_schedulers(); #ifdef ERTS_ENABLE_LOCK_COUNT @@ -2364,31 +2245,9 @@ erl_start(int argc, char **argv) /* Let system specific code decide what to do with the main thread... */ erts_sys_main_thread(); /* May or may not return! */ -#else - { - ErtsSchedulerData *esdp = erts_get_scheduler_data(); - erts_msacc_init_thread("scheduler", 1, 1); - erts_thr_set_main_status(1, 1); -#if ERTS_USE_ASYNC_READY_Q - esdp->aux_work_data.async_ready.queue - = erts_get_async_ready_queue(1); -#endif - set_main_stack_size(); - erts_sched_init_time_sup(esdp); - erts_ets_sched_spec_data_init(esdp); - erts_aux_work_timeout_late_init(esdp); - -#ifdef ERTS_ENABLE_LOCK_COUNT - erts_lcnt_post_startup(); -#endif - - process_main(esdp->x_reg_array, esdp->f_reg_array); - } -#endif } -#ifdef USE_THREADS __decl_noreturn void erts_thr_fatal_error(int err, char *what) { @@ -2402,7 +2261,6 @@ __decl_noreturn void erts_thr_fatal_error(int err, char *what) abort(); } -#endif static void system_cleanup(int flush_async) @@ -2415,7 +2273,6 @@ system_cleanup(int flush_async) * Another thread is currently exiting the system; * wait for it to do its job. */ -#ifdef ERTS_SMP if (erts_thr_progress_is_managed_thread()) { /* * The exiting thread might be waiting for @@ -2424,7 +2281,6 @@ system_cleanup(int flush_async) erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); } -#endif /* Wait forever... */ while (1) erts_milli_sleep(10000000); @@ -2439,17 +2295,12 @@ system_cleanup(int flush_async) if (!flush_async || !erts_initialized -#if defined(USE_THREADS) && !defined(ERTS_SMP) - || !erts_equal_tids(erts_main_thread, erts_thr_self()) -#endif ) return; -#ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif -#endif erts_exit_flush_async(); } diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 189c88ac4a..f81c90818f 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -75,12 +75,9 @@ static erts_lc_lock_order_t erts_lock_order[] = { * if only one lock use * the lock name)" */ -#ifdef ERTS_SMP { "driver_lock", "driver_name" }, { "port_lock", "port_id" }, -#endif { "port_data_lock", "address" }, -#ifdef ERTS_SMP { "bif_timers", NULL }, { "reg_tab", NULL }, { "proc_main", "pid" }, @@ -89,9 +86,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "hipe_mfait_lock", NULL }, #endif { "nodes_monitors", NULL }, -#ifdef ERTS_SMP { "resource_monitors", "address" }, -#endif { "driver_list", NULL }, { "proc_link", "pid" }, { "proc_msgq", "pid" }, @@ -114,7 +109,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "fun_tab", NULL }, { "environ", NULL }, { "release_literal_areas", NULL }, -#endif { "efile_drv", "address" }, { "drv_ev_state_grow", NULL, }, { "drv_ev_state", "address" }, @@ -125,11 +119,9 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "schdlr_sspnd", NULL }, { "migration_info_update", NULL }, { "run_queue", "address" }, -#ifdef ERTS_DIRTY_SCHEDULERS { "dirty_run_queue_sleep_list", "address" }, { "dirty_gc_info", NULL }, { "dirty_break_point_index", NULL }, -#endif { "process_table", NULL }, { "cpu_info", NULL }, { "pollset", "address" }, @@ -144,7 +136,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "async_enq_mtx", NULL }, { "msacc_list_mutex", NULL }, { "msacc_unmanaged_mutex", NULL }, -#ifdef ERTS_SMP { "atom_tab", NULL }, { "misc_op_list_pre_alloc_lock", "address" }, { "message_pre_alloc_lock", "address" }, @@ -155,17 +146,13 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "sys_msg_q", NULL }, { "tracer_mtx", NULL }, { "port_table", NULL }, -#endif { "magic_ref_table", "address" }, { "mtrace_op", NULL }, { "instr_x", NULL }, { "instr", NULL }, -#ifdef ERTS_SMP { "pollsets_lock", NULL }, -#endif { "alcu_allocator", "index" }, { "mseg", NULL }, -#ifdef ERTS_SMP { "port_task_pre_alloc_lock", "address" }, { "proclist_pre_alloc_lock", "address" }, { "xports_list_pre_alloc_lock", "address" }, @@ -178,7 +165,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "pix_lock", "address" }, { "run_queues_lists", NULL }, { "sched_stat", NULL }, -#endif { "async_init_mtx", NULL }, #ifdef __WIN32__ #ifdef DEBUG @@ -189,9 +175,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "efile_drv dtrace mutex", NULL }, #endif { "mtrace_buf", NULL }, -#ifdef ERTS_SMP { "os_monotonic_time", NULL }, -#endif { "erts_alloc_hard_debug", NULL }, { "hard_dbg_mseg", NULL }, { "erts_mmap", NULL } diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h index 8c754a8dfa..5c2c38e8f2 100644 --- a/erts/emulator/beam/erl_lock_check.h +++ b/erts/emulator/beam/erl_lock_check.h @@ -96,13 +96,7 @@ int erts_lc_is_emu_thr(void); #define ERTS_LC_ASSERT(A) \ ((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A))) -#ifdef ERTS_SMP -#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A) -#else -#define ERTS_SMP_LC_ASSERT(A) ((void) 1) -#endif #else /* #ifdef ERTS_ENABLE_LOCK_CHECK */ -#define ERTS_SMP_LC_ASSERT(A) ((void) 1) #define ERTS_LC_ASSERT(A) ((void) 1) #endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */ diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index c1af70592a..3418a7f4df 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -170,7 +170,7 @@ erts_cleanup_offheap(ErlOffHeap *offheap) erts_bin_release(u.pb->val); break; case FUN_SUBTAG: - if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) { + if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) { erts_erase_fun_entry(u.fun->fe); } break; @@ -265,11 +265,9 @@ erts_queue_dist_message(Process *rcvr, Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif -#ifdef ERTS_SMP erts_aint_t state; -#endif - ERTS_SMP_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); + ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = erts_alloc_message(0, NULL); mp->data.dist_ext = dist_ext; @@ -283,36 +281,34 @@ erts_queue_dist_message(Process *rcvr, #endif ERL_MESSAGE_TOKEN(mp) = token; -#ifdef ERTS_SMP if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { - if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; ErtsProcLocks unlocks = rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (unlocks) { - erts_smp_proc_unlock(rcvr, unlocks); + erts_proc_unlock(rcvr, unlocks); need_locks |= unlocks; } - erts_smp_proc_lock(rcvr, need_locks); + erts_proc_lock(rcvr, need_locks); } } - state = erts_smp_atomic32_read_acqb(&rcvr->state); + state = erts_atomic32_read_acqb(&rcvr->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } else -#endif if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { if (from == am_Empty) from = dist_ext->dep->sysname; /* Ahh... need to decode it in order to trace it... */ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0)) erts_free_message(mp); else { @@ -361,14 +357,10 @@ erts_queue_dist_message(Process *rcvr, LINK_MESSAGE(rcvr, mp, &mp->next, 1); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, -#ifdef ERTS_SMP rcvr_locks -#else - 0 -#endif ); } } @@ -393,50 +385,45 @@ queue_messages(Process* receiver, ERL_MESSAGE_TOKEN(first) == NIL || is_tuple(ERL_MESSAGE_TOKEN(first))); -#ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || receiver_locks == erts_proc_lc_my_proc_locks(receiver)); #endif if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { - if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks; if (receiver_state) state = *receiver_state; else - state = erts_smp_atomic32_read_nob(&receiver->state); + state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (need_locks) { - erts_smp_proc_unlock(receiver, need_locks); + erts_proc_unlock(receiver, need_locks); } need_locks |= ERTS_PROC_LOCK_MSGQ; - erts_smp_proc_lock(receiver, need_locks); + erts_proc_lock(receiver, need_locks); } locked_msgq = 1; } -#endif - state = erts_smp_atomic32_read_nob(&receiver->state); + state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { -#ifdef ERTS_SMP exiting: -#endif /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) - erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_cleanup_messages(first); return 0; } res = receiver->msg.len; -#ifdef ERTS_SMP if (receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place @@ -447,11 +434,10 @@ queue_messages(Process* receiver, * the root set when garbage collecting. */ res += receiver->msg_inq.len; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); + ERTS_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, first, last, len); } else -#endif { LINK_MESSAGE(receiver, first, last, len); } @@ -489,14 +475,10 @@ queue_messages(Process* receiver, } if (locked_msgq) { - erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } -#ifdef ERTS_SMP erts_proc_notify_new_message(receiver, receiver_locks); -#else - erts_proc_notify_new_message(receiver, 0); -#endif return res; } @@ -597,9 +579,7 @@ erts_try_alloc_message_on_heap(Process *pp, ErlOffHeap **ohpp, int *on_heap_p) { -#ifdef ERTS_SMP int locked_main = 0; -#endif ErtsMessage *mp; ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ)); @@ -607,15 +587,9 @@ erts_try_alloc_message_on_heap(Process *pp, if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP) goto in_message_fragment; else if ( -#if defined(ERTS_SMP) *plp & ERTS_PROC_LOCK_MAIN -#else - pp -#endif ) { -#ifdef ERTS_SMP try_on_heap: -#endif if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP) || (pp->flags & F_DISABLE_GC) || HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) { @@ -623,12 +597,10 @@ erts_try_alloc_message_on_heap(Process *pp, * The heap is either potentially in an inconsistent * state, or not large enough. */ -#ifdef ERTS_SMP if (locked_main) { *plp &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(pp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(pp, ERTS_PROC_LOCK_MAIN); } -#endif goto in_message_fragment; } @@ -639,14 +611,12 @@ erts_try_alloc_message_on_heap(Process *pp, mp->data.attached = NULL; *on_heap_p = !0; } -#ifdef ERTS_SMP - else if (pp && erts_smp_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) { + else if (pp && erts_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) { locked_main = 1; - *psp = erts_smp_atomic32_read_nob(&pp->state); + *psp = erts_atomic32_read_nob(&pp->state); *plp |= ERTS_PROC_LOCK_MAIN; goto try_on_heap; } -#endif else { in_message_fragment: if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) { @@ -715,7 +685,7 @@ erts_send_message(Process* sender, } #endif - receiver_state = erts_smp_atomic32_read_nob(&receiver->state); + receiver_state = erts_atomic32_read_nob(&receiver->state); if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; @@ -964,7 +934,7 @@ erts_move_messages_off_heap(Process *c_p) reds += c_p->msg.len / 10; - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); @@ -1029,9 +999,9 @@ erts_complete_off_heap_message_queue_change(Process *c_p) { int reds = 1; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); /* * This job was first initiated when the process changed to off heap @@ -1043,13 +1013,13 @@ erts_complete_off_heap_message_queue_change(Process *c_p) */ if (!(c_p->flags & F_OFF_HEAP_MSGQ)) - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); else { reds += 2; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); reds += erts_move_messages_off_heap(c_p); } c_p->flags &= ~F_OFF_HEAP_MSGQ_CHNG; @@ -1086,16 +1056,16 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) #ifdef DEBUG if (c_p->flags & F_OFF_HEAP_MSGQ) { - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { if (c_p->flags & F_OFF_HEAP_MSGQ_CHNG) { - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { - ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(!(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); } } @@ -1112,7 +1082,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) case am_on_heap: c_p->flags |= F_ON_HEAP_MSGQ; c_p->flags &= ~F_OFF_HEAP_MSGQ; - erts_smp_atomic32_read_bor_nob(&c_p->state, + erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_ON_HEAP_MSGQ); /* * We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ @@ -1121,7 +1091,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) */ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) { /* Safe to clear ERTS_PSFLG_OFF_HEAP_MSGQ... */ - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); } break; @@ -1139,7 +1109,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) break; case am_off_heap: c_p->flags &= ~F_ON_HEAP_MSGQ; - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_ON_HEAP_MSGQ); goto change_to_off_heap; default: @@ -1174,7 +1144,7 @@ change_to_off_heap: * change has completed, GC does not need to inspect * the message queue at all. */ - erts_smp_atomic32_read_bor_nob(&c_p->state, + erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_OFF_HEAP_MSGQ); c_p->flags |= F_OFF_HEAP_MSGQ_CHNG; cohmq = erts_alloc(ERTS_ALC_T_MSGQ_CHNG, @@ -1455,7 +1425,7 @@ erts_factory_message_create(ErtsHeapFactory* factory, int on_heap; erts_aint32_t state; - state = proc ? erts_smp_atomic32_read_nob(&proc->state) : 0; + state = proc ? erts_atomic32_read_nob(&proc->state) : 0; if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) { msgp = erts_alloc_message(sz, &hp); @@ -1470,7 +1440,7 @@ erts_factory_message_create(ErtsHeapFactory* factory, } if (on_heap) { - ERTS_SMP_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN); + ERTS_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN); ASSERT(ohp == &proc->off_heap); factory->mode = FACTORY_HALLOC; factory->p = proc; diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 42ed14e69c..9c8cf84e43 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -174,7 +174,6 @@ typedef struct { ErtsMessage** saved_last; /* saved last pointer */ } ErlMessageQueue; -#ifdef ERTS_SMP typedef struct { ErtsMessage* first; @@ -190,7 +189,6 @@ typedef struct erl_trace_message_queue__ { Sint len; /* queue length */ } ErlTraceMessageQueue; -#endif /* Get "current" message */ #define PEEK_MESSAGE(p) (*(p)->msg.save) @@ -207,7 +205,6 @@ typedef struct erl_trace_message_queue__ { (p)->where.len += (num_msgs); \ } while(0) -#ifdef ERTS_SMP /* Add message last in private message queue */ #define LINK_MESSAGE_PRIVQ(p, first_msg, last_msg, len) \ @@ -219,7 +216,7 @@ typedef struct erl_trace_message_queue__ { #define LINK_MESSAGE(p, first_msg, last_msg, len) \ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg_inq) -#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p) \ +#define ERTS_MSGQ_MV_INQ2PRIVQ(p) \ do { \ if (p->msg_inq.first) { \ *p->msg.last = p->msg_inq.first; \ @@ -231,17 +228,6 @@ typedef struct erl_trace_message_queue__ { } \ } while (0) -#else - -#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p) - -/* Add message last_msg in message queue */ -#define LINK_MESSAGE(p, first_msg, last_msg, len) \ - do { \ - LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \ - } while(0) - -#endif /* Unlink current message */ #define UNLINK_MESSAGE(p,msgp) do { \ diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c index 3994800ba7..1c840d89f6 100644 --- a/erts/emulator/beam/erl_monitors.c +++ b/erts/emulator/beam/erl_monitors.c @@ -54,7 +54,7 @@ #define DIR_RIGHT 1 #define DIR_END 2 -static erts_smp_atomic_t tot_link_lh_size; +static erts_atomic_t tot_link_lh_size; /* Implements the sort order in monitor trees, which is different from the ordinary term order. @@ -123,7 +123,7 @@ do { \ (*((Hp)++)) = boxed_val((From))[i__]; \ if (is_external((To))) { \ external_thing_ptr((To))->next = NULL; \ - erts_smp_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\ + erts_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\ } \ } \ } while (0) @@ -145,7 +145,7 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, UWord entity, Eterm nam } else { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH, mon_size*sizeof(Uint)); - erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); } hp = n->heap; @@ -179,7 +179,7 @@ static ErtsLink *create_link(Uint type, Eterm pid) } else { n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH, lnk_size*sizeof(Uint)); - erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); } hp = n->heap; @@ -214,13 +214,13 @@ static ErtsSuspendMonitor *create_suspend_monitor(Eterm pid) void erts_init_monitors(void) { - erts_smp_atomic_init_nob(&tot_link_lh_size, 0); + erts_atomic_init_nob(&tot_link_lh_size, 0); } Uint erts_tot_link_lh_size(void) { - return (Uint) erts_smp_atomic_read_nob(&tot_link_lh_size); + return (Uint) erts_atomic_read_nob(&tot_link_lh_size); } void erts_destroy_monitor(ErtsMonitor *mon) @@ -245,7 +245,7 @@ void erts_destroy_monitor(ErtsMonitor *mon) erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon); } else { erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon); - erts_smp_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); } } @@ -267,7 +267,7 @@ void erts_destroy_link(ErtsLink *lnk) erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk); } else { erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk); - erts_smp_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); } } @@ -985,15 +985,14 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1) DistEntry *dep; rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(p); + ERTS_ASSERT_IS_NOT_EXITING(p); if (is_atom(pid) && is_node_name_atom(pid) && (dep = erts_find_dist_entry(pid)) != NULL) { erts_printf("Dumping dist monitors-------------------\n"); - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_dump_monitors(dep->monitors,0); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); erts_printf("Monitors dumped-------------------------\n"); - erts_deref_dist_entry(dep); BIF_RET(am_true); } else { BIF_ERROR(p,BADARG); @@ -1002,7 +1001,7 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1) erts_printf("Dumping pid monitors--------------------\n"); erts_dump_monitors(ERTS_P_MONITORS(rp),0); erts_printf("Monitors dumped-------------------------\n"); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } } @@ -1030,15 +1029,14 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1) } else { rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(p); + ERTS_ASSERT_IS_NOT_EXITING(p); if (is_atom(pid) && is_node_name_atom(pid) && (dep = erts_find_dist_entry(pid)) != NULL) { erts_printf("Dumping dist links----------------------\n"); - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_dump_links(dep->nlinks,0); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); erts_printf("Links dumped----------------------------\n"); - erts_deref_dist_entry(dep); BIF_RET(am_true); } else { BIF_ERROR(p,BADARG); @@ -1048,7 +1046,7 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1) erts_printf("Dumping pid links-----------------------\n"); erts_dump_links(ERTS_P_LINKS(rp), 0); erts_printf("Links dumped----------------------------\n"); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } } diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c index 6c477be615..d659842b7e 100644 --- a/erts/emulator/beam/erl_msacc.c +++ b/erts/emulator/beam/erl_msacc.c @@ -48,11 +48,7 @@ static Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory) static void erts_msacc_reset(ErtsMsAcc *msacc); static ErtsMsAcc* get_msacc(void); -#ifdef USE_THREADS erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key); -#else -ErtsMsAcc *ERTS_WRITE_UNLIKELY(erts_msacc) = NULL; -#endif #ifndef ERTS_MSACC_ALWAYS_ON int ERTS_WRITE_UNLIKELY(erts_msacc_enabled); #endif @@ -60,10 +56,8 @@ int ERTS_WRITE_UNLIKELY(erts_msacc_enabled); static Eterm *erts_msacc_state_atoms = NULL; static erts_rwmtx_t msacc_mutex; static ErtsMsAcc *msacc_managed = NULL; -#ifdef USE_THREADS static ErtsMsAcc *msacc_unmanaged = NULL; static Uint msacc_unmanaged_count = 0; -#endif #if ERTS_MSACC_STATE_COUNT < MAP_SMALL_MAP_LIMIT #define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + ERTS_REF_THING_SIZE) @@ -78,11 +72,7 @@ void erts_msacc_early_init(void) { #endif erts_rwmtx_init(&msacc_mutex, "msacc_list_mutex", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); -#ifdef USE_THREADS erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key"); -#else - erts_msacc = NULL; -#endif } void erts_msacc_init(void) { @@ -107,7 +97,6 @@ void erts_msacc_init_thread(char *type, int id, int managed) { msacc->tid = erts_thr_self(); msacc->perf_counter = 0; -#ifdef USE_THREADS erts_rwmtx_rwlock(&msacc_mutex); if (!managed) { erts_mtx_init(&msacc->mtx, "msacc_unmanaged_mutex", NIL, @@ -121,9 +110,6 @@ void erts_msacc_init_thread(char *type, int id, int managed) { msacc_managed = msacc; } erts_rwmtx_rwunlock(&msacc_mutex); -#else - msacc_managed = msacc; -#endif erts_msacc_reset(msacc); @@ -216,7 +202,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsMSAccReq; static ErtsMsAcc* get_msacc(void) { @@ -267,7 +253,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) { rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } @@ -303,7 +289,7 @@ reply_msacc(void *vmsaccrp) erts_proc_dec_refc(msaccrp->proc); - if (erts_smp_atomic32_dec_read_nob(&msaccrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&msaccrp->refc) == 0) erts_free(ERTS_ALC_T_MSACC, vmsaccrp); } @@ -370,14 +356,10 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) msaccrp->ref = STORE_NC(&hp, NULL, ref); msaccrp->req_sched = esdp->no; -#ifdef ERTS_SMP *threads = erts_no_schedulers; *threads += 1; /* aux thread */ -#else - *threads = 1; -#endif - erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); + erts_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); erts_proc_add_refc(c_p, *threads); @@ -386,12 +368,9 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) erts_no_schedulers, reply_msacc, (void *) msaccrp); -#ifdef ERTS_SMP /* aux thread */ erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp); -#endif -#ifdef USE_THREADS /* Manage unmanaged threads */ switch (action) { case ERTS_MSACC_GATHER: { @@ -468,7 +447,6 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) default: { ASSERT(0); } } -#endif *threads = make_small(*threads); diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h index d64ef8c8b9..8349a7e297 100644 --- a/erts/emulator/beam/erl_msacc.h +++ b/erts/emulator/beam/erl_msacc.h @@ -159,11 +159,7 @@ struct erl_msacc_t_ { #ifdef ERTS_ENABLE_MSACC -#ifdef USE_THREADS extern erts_tsd_key_t erts_msacc_key; -#else -extern ErtsMsAcc *erts_msacc; -#endif #ifdef ERTS_MSACC_ALWAYS_ON #define erts_msacc_enabled 1 @@ -171,13 +167,8 @@ extern ErtsMsAcc *erts_msacc; extern int erts_msacc_enabled; #endif -#ifdef USE_THREADS #define ERTS_MSACC_TSD_GET() erts_tsd_get(erts_msacc_key) #define ERTS_MSACC_TSD_SET(tsd) erts_tsd_set(erts_msacc_key,tsd) -#else -#define ERTS_MSACC_TSD_GET() erts_msacc -#define ERTS_MSACC_TSD_SET(tsd) erts_msacc = tsd -#endif void erts_msacc_early_init(void); void erts_msacc_init(void); diff --git a/erts/emulator/beam/erl_nfunc_sched.c b/erts/emulator/beam/erl_nfunc_sched.c index 1bebc1eda4..f97e86bf95 100644 --- a/erts/emulator/beam/erl_nfunc_sched.c +++ b/erts/emulator/beam/erl_nfunc_sched.c @@ -113,7 +113,7 @@ erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc, NifExport* nep; int i; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); if (dirty_shadow_proc) { diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h index 55a3a6dbf6..9be0e6f7c7 100644 --- a/erts/emulator/beam/erl_nfunc_sched.h +++ b/erts/emulator/beam/erl_nfunc_sched.h @@ -144,9 +144,9 @@ ERTS_GLB_INLINE void erts_nif_export_restore(Process *c_p, NifExport *ep, Eterm result) { ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); - ERTS_SMP_LC_ASSERT(!(c_p->static_flags + ERTS_LC_ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); c_p->current = ep->current; @@ -193,7 +193,6 @@ erts_nif_export_check_save_trace(Process *c_p, Eterm result, ERTS_GLB_INLINE Process * erts_proc_shadow2real(Process *c_p) { -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC) { Process *real_c_p = c_p->next; ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); @@ -201,7 +200,6 @@ erts_proc_shadow2real(Process *c_p) return real_c_p; } ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); -#endif return c_p; } @@ -217,7 +215,6 @@ erts_proc_shadow2real(Process *c_p) || BeamOp(op_call_nif) == (BeamInstr *) (*(I))), \ ((NifExport *) (((char *) (I)) - offsetof(NifExport, exp.beam[0])))) -#ifdef ERTS_DIRTY_SCHEDULERS #include "erl_message.h" #include <stddef.h> @@ -235,7 +232,7 @@ erts_flush_dirty_shadow_proc(Process *sproc) Process *c_p = sproc->next; ASSERT(sproc->common.id == c_p->common.id); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); ASSERT(c_p->stop == sproc->stop); @@ -283,7 +280,7 @@ erts_cache_dirty_shadow_proc(Process *sproc) Process *c_p = sproc->next; ASSERT(c_p); ASSERT(sproc->common.id == c_p->common.id); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); sproc->htop = c_p->htop; @@ -311,7 +308,7 @@ erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p) sproc = esdp->dirty_shadow_process; ASSERT(sproc); ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC); - ASSERT(erts_smp_atomic32_read_nob(&sproc->state) + ASSERT(erts_atomic32_read_nob(&sproc->state) == (ERTS_PSFLG_ACTIVE | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_PROXY)); @@ -326,7 +323,6 @@ erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p) #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#endif /* ERTS_DIRTY_SCHEDULERS */ #endif /* defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__) */ diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index ac4ecd77e5..43441e0228 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -138,7 +138,7 @@ execution_state(ErlNifEnv *env, Process **c_pp, int *schedp) Process *c_p = env->proc; if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) { - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); } else { @@ -220,7 +220,7 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, ASSERT(esdp); if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); ASSERT(p->scheduler_data == esdp); ASSERT((state & (ERTS_PSFLG_RUNNING @@ -287,7 +287,7 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, else dirty_shadow_proc = env->proc; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); ep = erts_nif_export_schedule(c_p, dirty_shadow_proc, c_p->current, @@ -304,7 +304,6 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, return (ERL_NIF_TERM) THE_NON_VALUE; } -#ifdef ERTS_DIRTY_SCHEDULERS static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); @@ -320,7 +319,7 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * ErlNifEnv env; ERL_NIF_TERM result; #ifdef DEBUG - erts_aint32_t state = erts_smp_atomic32_read_nob(&c_p->state); + erts_aint32_t state = erts_atomic32_read_nob(&c_p->state); ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p)); @@ -343,14 +342,14 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))); - erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC + erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); ASSERT(env.proc->next == c_p); @@ -394,24 +393,19 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * return exiting; } -#endif static void full_flush_env(ErlNifEnv* env) { flush_env(env); -#ifdef ERTS_DIRTY_SCHEDULERS if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) /* Dirty nif call using shadow process struct */ erts_flush_dirty_shadow_proc(env->proc); -#endif } static void full_cache_env(ErlNifEnv* env) { -#ifdef ERTS_DIRTY_SCHEDULERS if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) erts_cache_dirty_shadow_proc(env->proc); -#endif cache_env(env); } @@ -564,7 +558,6 @@ void enif_clear_env(ErlNifEnv* env) free_tmp_objs(env); } -#ifdef ERTS_SMP #ifdef DEBUG static int enif_send_delay = 0; #define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0) @@ -590,9 +583,9 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) /* Only one thread at a time is allowed to flush trace messages, so we require the main lock to be held when doing the flush */ - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); msgq = c_p->trace_msg_q; @@ -611,7 +604,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) msgq->first = NULL; msgq->last = &msgq->first; msgq->len = 0; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); ASSERT(len != 0); @@ -624,13 +617,13 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) if (rp->common.id == c_p->common.id) rp_locks &= ~c_p_locks; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); reds += len; } else { erts_cleanup_messages(first); } reds += 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); msgq = msgq->next; } while (msgq); @@ -647,21 +640,18 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) } error: - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); return reds; } -#endif int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env; ErtsProcLocks rp_locks = 0; -#ifdef ERTS_SMP ErtsProcLocks lc_locks = 0; -#endif Process* rp; Process* c_p; ErtsMessage *mp; @@ -670,13 +660,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, execution_state(env, &c_p, &scheduler); -#ifndef ERTS_SMP - if (!scheduler) { - erts_exit(ERTS_ABORT_EXIT, - "enif_send: called from non-scheduler thread on non-SMP VM"); - return 0; - } -#endif if (scheduler > 0) { /* Normal scheduler */ rp = erts_proc_lookup(receiver); @@ -690,7 +673,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, return 0; if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } } @@ -699,7 +682,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ERTS_P2P_FLG_INC_REFC); if (!rp) { if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); return 0; } } @@ -734,7 +717,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, full_cache_env(env); } else { - erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint_t state = erts_atomic32_read_nob(&rp->state); if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) { mp = erts_alloc_message(sz, &hp); ohp = sz == 0 ? NULL : &mp->hfrag.off_heap; @@ -760,7 +743,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, full_cache_env(env); } } -#ifdef ERTS_SMP else { /* This clause is taken when the nif is called in the context of a traced process. We do not know which locks we have @@ -771,7 +753,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, Process *t_p = env->tracee; - erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE); msgq = t_p->trace_msg_q; @@ -788,7 +770,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, #endif if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq || rp_locks & ERTS_PROC_LOCK_MSGQ || - erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { if (!msgq) { msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE, @@ -802,36 +784,33 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, msgq->next = t_p->trace_msg_q; t_p->trace_msg_q = msgq; - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); erts_schedule_flush_trace_messages(t_p, 0); } else { msgq->len++; *msgq->last = mp; msgq->last = &mp->next; - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); } goto done; } else { - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); rp_locks &= ~ERTS_PROC_LOCK_TRACE; rp_locks |= ERTS_PROC_LOCK_MSGQ; } } -#endif /* ERTS_SMP */ erts_queue_message(rp, rp_locks, mp, msg, c_p ? c_p->common.id : am_undefined); -#ifdef ERTS_SMP done: if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks & ~lc_locks) - erts_smp_proc_unlock(rp, rp_locks & ~lc_locks); + erts_proc_unlock(rp, rp_locks & ~lc_locks); if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); -#endif + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (scheduler <= 0) erts_proc_dec_refc(rp); @@ -861,15 +840,9 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port, if (scheduler > 0) prt = erts_port_lookup(to_port->port_id, iflags); else { -#ifdef ERTS_SMP if (ERTS_PROC_IS_EXITING(c_p)) return 0; prt = erts_thr_port_lookup(to_port->port_id, iflags); -#else - erts_exit(ERTS_ABORT_EXIT, - "enif_port_command: called from non-scheduler " - "thread on non-SMP VM"); -#endif } if (!prt) @@ -1852,18 +1825,11 @@ int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc) if (scheduler > 0) return !!erts_proc_lookup(proc->pid); else { -#ifdef ERTS_SMP Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0, ERTS_P2P_FLG_INC_REFC); if (rp) erts_proc_dec_refc(rp); return !!rp; -#else - erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: " - "called from non-scheduler thread " - "in non-smp emulator"); - return 0; -#endif } } @@ -1879,17 +1845,10 @@ int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port) if (scheduler > 0) return !!erts_port_lookup(port->port_id, iflags); else { -#ifdef ERTS_SMP Port *prt = erts_thr_port_lookup(port->port_id, iflags); if (prt) erts_port_dec_refc(prt); return !!prt; -#else - erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: " - "called from non-scheduler thread " - "in non-smp emulator"); - return 0; -#endif } } @@ -2097,7 +2056,7 @@ ErlNifResourceType* open_resource_type(ErlNifEnv* env, ErlNifResourceFlags op = flags; Eterm module_am, name_am; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); module_am = make_atom(env->mod_nif->mod->module); name_am = enif_make_atom(env, name_str); @@ -2236,19 +2195,14 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context) rp = erts_proc_lookup(mon->u.pid); } else { -#ifdef ERTS_SMP rp = erts_proc_lookup_inc_refc(mon->u.pid); -#else - ASSERT(!"nif monitor destruction in non-scheduler thread"); - rp = NULL; -#endif } if (!rp) { is_exiting = 1; } if (rp) { - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { is_exiting = 1; } else { @@ -2256,11 +2210,9 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context) ASSERT(rmon); is_exiting = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); -#ifdef ERTS_SMP + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (ctx->scheduler <= 0) erts_proc_dec_refc(rp); -#endif } if (is_exiting) { ctx->resource->monitors->pending_failed_fire++; @@ -2286,34 +2238,7 @@ static void destroy_all_monitors(ErtsMonitor* monitors, ErtsResource* resource) } -#ifdef ERTS_SMP # define NIF_RESOURCE_DTOR &nif_resource_dtor -#else -# define NIF_RESOURCE_DTOR &nosmp_nif_resource_dtor_prologue - -/* - * NO-SMP: Always run resource destructor on scheduler thread - * as we may have to remove process monitors. - */ -static int nif_resource_dtor(Binary*); - -static void nosmp_nif_resource_dtor_scheduled(void* vbin) -{ - erts_bin_free((Binary*)vbin); -} - -static int nosmp_nif_resource_dtor_prologue(Binary* bin) -{ - if (is_scheduler()) { - return nif_resource_dtor(bin); - } - else { - erts_schedule_misc_aux_work(1, nosmp_nif_resource_dtor_scheduled, bin); - return 0; /* do not free */ - } -} - -#endif /* !ERTS_SMP */ static int nif_resource_dtor(Binary* bin) { @@ -2325,7 +2250,7 @@ static int nif_resource_dtor(Binary* bin) ErtsResourceMonitors* rm = resource->monitors; ASSERT(type->down); - erts_smp_mtx_lock(&rm->lock); + erts_mtx_lock(&rm->lock); ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0); if (rm->root) { ASSERT(!rm->is_dying); @@ -2347,11 +2272,11 @@ static int nif_resource_dtor(Binary* bin) */ ASSERT(!rm->is_dying); rm->is_dying = 1; - erts_smp_mtx_unlock(&rm->lock); + erts_mtx_unlock(&rm->lock); return 0; } - erts_smp_mtx_unlock(&rm->lock); - erts_smp_mtx_destroy(&rm->lock); + erts_mtx_unlock(&rm->lock); + erts_mtx_destroy(&rm->lock); } if (type->dtor != NULL) { @@ -2392,12 +2317,12 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref) ASSERT(rmp); ASSERT(resource->type->down); - erts_smp_mtx_lock(&rmp->lock); + erts_mtx_lock(&rmp->lock); rmon = erts_remove_monitor(&rmp->root, ref); if (!rmon) { int free_me = (--rmp->pending_failed_fire == 0) && rmp->is_dying; ASSERT(rmp->pending_failed_fire >= 0); - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); if (free_me) { ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0); @@ -2413,10 +2338,10 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref) * we avoid calling 'down' and just silently remove the monitor. * This can happen even for non smp as destructor calls may be scheduled. */ - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); } else { - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); ASSERT(rmon->u.pid == pid); erts_ref_to_driver_monitor(ref, &nif_monitor); @@ -2461,7 +2386,7 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz) erts_refc_inc(&resource->type->refc, 2); if (type->down) { resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs); - erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors", NIL, + erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL, ERTS_LOCK_FLAGS_CATEGORY_GENERIC); resource->monitors->root = NULL; resource->monitors->pending_failed_fire = 0; @@ -2660,7 +2585,6 @@ nif_export_restore(Process *c_p, NifExport *ep, Eterm res) } -#ifdef ERTS_DIRTY_SCHEDULERS /* * Finalize a dirty NIF call. This function is scheduled to cause the VM to @@ -2730,7 +2654,7 @@ schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp, execution_state(env, &proc, NULL); - (void) erts_smp_atomic32_read_bset_nob(&proc->state, + (void) erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC), (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND @@ -2768,7 +2692,7 @@ static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg, ASSERT(is_atom(mod) && is_atom(func)); ASSERT(fp); - (void) erts_smp_atomic32_read_bset_nob(&proc->state, + (void) erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC), dirty_psflg); @@ -2788,7 +2712,6 @@ static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv); } -#endif /* ERTS_DIRTY_SCHEDULERS */ /* * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It @@ -2862,24 +2785,20 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, if (scheduler <= 0) { if (scheduler == 0) enif_make_badarg(env); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); } if (flags == 0) result = schedule(env, execute_nif, fp, proc->current->module, fun_name_atom, argc, argv); else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) { -#ifdef ERTS_DIRTY_SCHEDULERS result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv); -#else - result = enif_raise_exception(env, am_notsup); -#endif } else result = enif_make_badarg(env); if (scheduler < 0) - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); return result; } @@ -2895,12 +2814,10 @@ enif_thread_type(void) switch (esdp->type) { case ERTS_SCHED_NORMAL: return ERL_NIF_THR_NORMAL_SCHEDULER; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: return ERL_NIF_THR_DIRTY_CPU_SCHEDULER; case ERTS_SCHED_DIRTY_IO: return ERL_NIF_THR_DIRTY_IO_SCHEDULER; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); return -1; @@ -3221,27 +3138,19 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid, execution_state(env, NULL, &scheduler); -#ifdef ERTS_SMP if (scheduler > 0) /* Normal scheduler */ rp = erts_proc_lookup_raw(target_pid->pid); else rp = erts_proc_lookup_raw_inc_refc(target_pid->pid); -#else - if (scheduler <= 0) { - erts_exit(ERTS_ABORT_EXIT, "enif_monitor_process: called from " - "non-scheduler thread on non-SMP VM"); - } - rp = erts_proc_lookup(target_pid->pid); -#endif if (!rp) return 1; ref = erts_make_ref_in_buffer(tmp); - erts_smp_mtx_lock(&rsrc->monitors->lock); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); - if (ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)) { + erts_mtx_lock(&rsrc->monitors->lock); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); + if (ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state)) { retval = 1; } else { @@ -3249,13 +3158,11 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid, erts_add_monitor(&ERTS_P_MONITORS(rp), MON_NIF_TARGET, ref, (UWord)rsrc, NIL); retval = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_mtx_unlock(&rsrc->monitors->lock); -#ifdef ERTS_SMP if (scheduler <= 0) erts_proc_dec_refc(rp); -#endif if (monitor) erts_ref_to_driver_monitor(ref,monitor); @@ -3283,35 +3190,27 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit ref = erts_driver_monitor_to_ref(ref_heap, monitor); - erts_smp_mtx_lock(&rsrc->monitors->lock); + erts_mtx_lock(&rsrc->monitors->lock); mon = erts_remove_monitor(&rsrc->monitors->root, ref); if (mon == NULL) { - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_mtx_unlock(&rsrc->monitors->lock); return 1; } ASSERT(mon->type == MON_ORIGIN); ASSERT(is_internal_pid(mon->u.pid)); -#ifdef ERTS_SMP if (scheduler > 0) /* Normal scheduler */ rp = erts_proc_lookup(mon->u.pid); else rp = erts_proc_lookup_inc_refc(mon->u.pid); -#else - if (scheduler <= 0) { - erts_exit(ERTS_ABORT_EXIT, "enif_demonitor_process: called from " - "non-scheduler thread on non-SMP VM"); - } - rp = erts_proc_lookup(mon->u.pid); -#endif if (!rp) { is_exiting = 1; } else { - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { is_exiting = 1; } else { @@ -3319,17 +3218,15 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit ASSERT(rmon); is_exiting = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); -#ifdef ERTS_SMP if (scheduler <= 0) erts_proc_dec_refc(rp); -#endif } if (is_exiting) { rsrc->monitors->pending_failed_fire++; } - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_mtx_unlock(&rsrc->monitors->lock); if (rmon) { ASSERT(rmon->type == MON_NIF_TARGET); @@ -3912,8 +3809,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } /* Block system (is this the right place to do it?) */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* Find calling module */ ASSERT(BIF_P->current != NULL); @@ -4018,14 +3915,9 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) * dirty scheduler support, treat a non-zero flags field as * a load error. */ -#ifdef ERTS_DIRTY_SCHEDULERS if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND) ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u", f->flags, mod_atom, f->name, f->arity); -#else - ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.", - mod_atom, f->name, f->arity); -#endif } else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0]) < BEAM_NIF_MIN_FUNC_SZ) @@ -4098,7 +3990,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) (BeamInstr) BeamOp(op_i_generic_breakpoint)); g->orig_instr = (BeamInstr) BeamOp(op_call_nif); } -#ifdef ERTS_DIRTY_SCHEDULERS if (f->flags) { code_ptr[3] = (BeamInstr) f->fptr; code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ? @@ -4106,7 +3997,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) (BeamInstr) static_schedule_dirty_cpu_nif; } else -#endif code_ptr[1] = (BeamInstr) f->fptr; code_ptr[2] = (BeamInstr) lib; } @@ -4124,8 +4014,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) erts_sys_ddll_free_error(&errdesc); } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); erts_free(ERTS_ALC_T_TMP, lib_name); @@ -4138,7 +4028,7 @@ erts_unload_nif(struct erl_module_nif* lib) { ErlNifResourceType* rt; ErlNifResourceType* next; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); ASSERT(lib != NULL); ASSERT(lib->mod != NULL); @@ -4210,8 +4100,8 @@ Eterm erts_nif_call_function(Process *p, Process *tracee, break; ASSERT(i < mod->entry.num_of_funcs); if (p) - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN - || erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN + || erts_thr_progress_is_blocking()); #endif if (p) { /* This is almost a normal nif call like in beam_emu, diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index deadf435e9..0f3dfa797c 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -29,11 +29,13 @@ #include "error.h" #include "erl_thr_progress.h" #include "dtrace-wrapper.h" +#include "erl_binary.h" +#include "erl_bif_unique.h" Hash erts_dist_table; Hash erts_node_table; -erts_smp_rwmtx_t erts_dist_table_rwmtx; -erts_smp_rwmtx_t erts_node_table_rwmtx; +erts_rwmtx_t erts_dist_table_rwmtx; +erts_rwmtx_t erts_node_table_rwmtx; DistEntry *erts_hidden_dist_entries; DistEntry *erts_visible_dist_entries; @@ -57,6 +59,58 @@ static ErtsMonotonicTime node_tab_delete_delay; /* -- The distribution table ---------------------------------------------- */ +#define ErtsBin2DistEntry(B) \ + ((DistEntry *) ERTS_MAGIC_BIN_DATA((B))) +#define ErtsDistEntry2Bin(DEP) \ + ((Binary *) ERTS_MAGIC_BIN_FROM_DATA((DEP))) + +static ERTS_INLINE erts_aint_t +de_refc_read(DistEntry *dep, erts_aint_t min) +{ + return erts_refc_read(&ErtsDistEntry2Bin(dep)->intern.refc, min); +} + +static ERTS_INLINE erts_aint_t +de_refc_inc_read(DistEntry *dep, erts_aint_t min) +{ + return erts_refc_inctest(&ErtsDistEntry2Bin(dep)->intern.refc, min); +} + +static ERTS_INLINE void +de_refc_inc(DistEntry *dep, erts_aint_t min) +{ + erts_refc_inc(&ErtsDistEntry2Bin(dep)->intern.refc, min); +} + +static ERTS_INLINE void +de_refc_dec(DistEntry *dep, erts_aint_t min) +{ +#ifdef DEBUG + (void) erts_refc_read(&ErtsDistEntry2Bin(dep)->intern.refc, min+1); +#endif + erts_bin_release(ErtsDistEntry2Bin(dep)); +} + +static ERTS_INLINE erts_aint_t +de_refc_dec_read(DistEntry *dep, erts_aint_t min) +{ + return erts_refc_dectest(&ErtsDistEntry2Bin(dep)->intern.refc, min); +} + +void +erts_ref_dist_entry(DistEntry *dep) +{ + ASSERT(dep); + de_refc_inc(dep, 1); +} + +void +erts_deref_dist_entry(DistEntry *dep) +{ + ASSERT(dep); + de_refc_dec(dep, 0); +} + #ifdef DEBUG static int is_in_de_list(DistEntry *dep, DistEntry *dep_list) @@ -85,45 +139,66 @@ dist_table_cmp(void *dep1, void *dep2) static void* dist_table_alloc(void *dep_tmpl) { +#ifdef DEBUG + erts_aint_t refc; +#endif Eterm sysname; + Binary *bin; DistEntry *dep; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; sysname = ((DistEntry *) dep_tmpl)->sysname; - dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry)); + + bin = erts_create_magic_binary_x(sizeof(DistEntry), + erts_dist_entry_destructor, + ERTS_ALC_T_DIST_ENTRY, + 0); + dep = ErtsBin2DistEntry(bin); dist_entries++; +#ifdef DEBUG + refc = +#else + (void) +#endif + de_refc_dec_read(dep, -1); + ASSERT(refc == -1); + dep->prev = NULL; - erts_smp_refc_init(&dep->refc, -1); - erts_smp_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname, + erts_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); dep->sysname = sysname; dep->cid = NIL; + erts_atomic_init_nob(&dep->input_handler, (erts_aint_t) NIL); dep->connection_id = 0; dep->status = 0; dep->flags = 0; dep->version = 0; - erts_smp_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname, + erts_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); dep->node_links = NULL; dep->nlinks = NULL; dep->monitors = NULL; - erts_smp_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname, + erts_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); - dep->qflgs = 0; - dep->qsize = 0; + erts_atomic32_init_nob(&dep->qflgs, 0); + erts_atomic_init_nob(&dep->qsize, 0); + erts_atomic64_init_nob(&dep->in, 0); + erts_atomic64_init_nob(&dep->out, 0); dep->out_queue.first = NULL; dep->out_queue.last = NULL; dep->suspended = NULL; + dep->tmp_out_queue.first = NULL; + dep->tmp_out_queue.last = NULL; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; - erts_smp_atomic_init_nob(&dep->dist_cmd_scheduled, 0); + erts_atomic_init_nob(&dep->dist_cmd_scheduled, 0); erts_port_task_handle_init(&dep->dist_cmd); dep->send = NULL; dep->cache = NULL; @@ -174,14 +249,14 @@ dist_table_free(void *vdep) erts_no_of_not_connected_dist_entries--; ASSERT(!dep->cache); - erts_smp_rwmtx_destroy(&dep->rwmtx); - erts_smp_mtx_destroy(&dep->lnk_mtx); - erts_smp_mtx_destroy(&dep->qlock); + erts_rwmtx_destroy(&dep->rwmtx); + erts_mtx_destroy(&dep->lnk_mtx); + erts_mtx_destroy(&dep->qlock); #ifdef DEBUG sys_memset(vdep, 0x77, sizeof(DistEntry)); #endif - erts_free(ERTS_ALC_T_DIST_ENTRY, (void *) dep); + erts_bin_free(ErtsDistEntry2Bin(dep)); ASSERT(dist_entries > 0); dist_entries--; @@ -193,25 +268,58 @@ erts_dist_table_info(fmtfn_t to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); hash_info(to, to_arg, &erts_dist_table); if (lock) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); +} + +static ERTS_INLINE DistEntry *find_dist_entry(Eterm sysname, + int inc_refc, + int connected_only) +{ + DistEntry *res; + DistEntry de; + de.sysname = sysname; + erts_rwmtx_rlock(&erts_dist_table_rwmtx); + res = hash_get(&erts_dist_table, (void *) &de); + if (res) { + if (connected_only && is_nil(res->cid)) + res = NULL; + else { + int pend_delete; + erts_aint_t refc; + if (inc_refc) { + refc = de_refc_inc_read(res, 1); + pend_delete = refc < 2; + } + else { + refc = de_refc_read(res, 0); + pend_delete = refc < 1; + } + if (pend_delete) /* Pending delete */ + de_refc_inc(res, 1); + } + } + erts_rwmtx_runlock(&erts_dist_table_rwmtx); + return res; } DistEntry * erts_channel_no_to_dist_entry(Uint cno) { + /* + * Does NOT increase reference count! + */ + /* * For this node (and previous incarnations of this node), * ERST_INTERNAL_CHANNEL_NO (will always be 0 I guess) is used as * channel no. For other nodes, the atom index of the atom corresponding * to the node name is used as channel no. */ - if(cno == ERST_INTERNAL_CHANNEL_NO) { - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + if (cno == ERST_INTERNAL_CHANNEL_NO) return erts_this_dist_entry; - } if((cno > MAX_ATOM_INDEX) || (cno >= atom_table_size()) @@ -220,83 +328,100 @@ erts_channel_no_to_dist_entry(Uint cno) /* cno is a valid atom index; find corresponding dist entry (if there is one) */ - return erts_find_dist_entry(make_atom(cno)); + return find_dist_entry(make_atom(cno), 0, 0); } - DistEntry * erts_sysname_to_connected_dist_entry(Eterm sysname) { - DistEntry de; - DistEntry *res_dep; - de.sysname = sysname; - - if(erts_this_dist_entry->sysname == sysname) { - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + /* + * Does NOT increase reference count! + */ + if(erts_this_dist_entry->sysname == sysname) return erts_this_dist_entry; - } - - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); - res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de); - if (res_dep) { - erts_aint_t refc = erts_smp_refc_inctest(&res_dep->refc, 1); - if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&res_dep->refc, 1); - } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); - if (res_dep) { - int deref; - erts_smp_rwmtx_rlock(&res_dep->rwmtx); - deref = is_nil(res_dep->cid); - erts_smp_rwmtx_runlock(&res_dep->rwmtx); - if (deref) { - erts_deref_dist_entry(res_dep); - res_dep = NULL; - } - } - return res_dep; + return find_dist_entry(sysname, 0, 1); } DistEntry *erts_find_or_insert_dist_entry(Eterm sysname) { + /* + * This function DOES increase reference count! + */ DistEntry *res; DistEntry de; erts_aint_t refc; - res = erts_find_dist_entry(sysname); + res = find_dist_entry(sysname, 1, 0); if (res) return res; de.sysname = sysname; - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); res = hash_put(&erts_dist_table, (void *) &de); - refc = erts_smp_refc_inctest(&res->refc, 0); + refc = de_refc_inc_read(res, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + de_refc_inc(res, 1); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); return res; } DistEntry *erts_find_dist_entry(Eterm sysname) { - DistEntry *res; - DistEntry de; - de.sysname = sysname; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); - res = hash_get(&erts_dist_table, (void *) &de); - if (res) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 1); - if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&res->refc, 1); - } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); - return res; + /* + * Does NOT increase reference count! + */ + return find_dist_entry(sysname, 0, 0); } -static void try_delete_dist_entry(void *vdep) +DistEntry * +erts_dhandle_to_dist_entry(Eterm dhandle) { - DistEntry *dep = (DistEntry *) vdep; + Binary *bin; + if (!is_internal_magic_ref(dhandle)) + return NULL; + bin = erts_magic_ref2bin(dhandle); + if (ERTS_MAGIC_BIN_DESTRUCTOR(bin) != erts_dist_entry_destructor) + return NULL; + return ErtsBin2DistEntry(bin); +} + +Eterm +erts_make_dhandle(Process *c_p, DistEntry *dep) +{ + Binary *bin; + Eterm *hp; + + bin = ErtsDistEntry2Bin(dep); + ASSERT(bin); + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dist_entry_destructor); + hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE); + return erts_mk_magic_ref(&hp, &c_p->off_heap, bin); +} + +static void try_delete_dist_entry(void *vbin); + +static void +prepare_try_delete_dist_entry(void *vbin) +{ + Binary *bin = (Binary *) vbin; + DistEntry *dep = ErtsBin2DistEntry(bin); + Uint size; erts_aint_t refc; - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + refc = de_refc_read(dep, 0); + if (refc > 0) + return; + + size = ERTS_MAGIC_BIN_SIZE(sizeof(DistEntry)); + erts_schedule_thr_prgr_later_cleanup_op(try_delete_dist_entry, + vbin, &dep->later_op, size); +} + +static void try_delete_dist_entry(void *vbin) +{ + Binary *bin = (Binary *) vbin; + DistEntry *dep = ErtsBin2DistEntry(bin); + erts_aint_t refc; + + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); /* * Another thread might have looked up this dist entry after * we decided to delete it (refc became zero). If so, the other @@ -312,26 +437,39 @@ static void try_delete_dist_entry(void *vdep) * * If refc > 0, the entry is in use. Keep the entry. */ - refc = erts_smp_refc_dectest(&dep->refc, -1); + refc = de_refc_dec_read(dep, -1); if (refc == -1) (void) hash_erase(&erts_dist_table, (void *) dep); - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); - - if (refc == 0) - erts_schedule_delete_dist_entry(dep); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); + + if (refc == 0) { + if (node_tab_delete_delay == 0) + prepare_try_delete_dist_entry(vbin); + else if (node_tab_delete_delay > 0) + erts_start_timer_callback(node_tab_delete_delay, + prepare_try_delete_dist_entry, + vbin); + } } -void erts_schedule_delete_dist_entry(DistEntry *dep) +int erts_dist_entry_destructor(Binary *bin) { - ASSERT(dep != erts_this_dist_entry); - if (dep != erts_this_dist_entry) { - if (node_tab_delete_delay == 0) - try_delete_dist_entry((void *) dep); - else if (node_tab_delete_delay > 0) - erts_start_timer_callback(node_tab_delete_delay, - try_delete_dist_entry, - (void *) dep); - } + DistEntry *dep = ErtsBin2DistEntry(bin); + erts_aint_t refc; + + refc = de_refc_read(dep, -1); + + if (refc == -1) + return 1; /* Allow deallocation of structure... */ + + if (node_tab_delete_delay == 0) + prepare_try_delete_dist_entry((void *) bin); + else if (node_tab_delete_delay > 0) + erts_start_timer_callback(node_tab_delete_delay, + prepare_try_delete_dist_entry, + (void *) bin); + + return 0; } Uint @@ -346,7 +484,7 @@ erts_dist_table_size(void) int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); #ifdef DEBUG hash_get_info(&hi, &erts_dist_table); ASSERT(dist_entries == hi.objs); @@ -373,18 +511,18 @@ erts_dist_table_size(void) + dist_entries*sizeof(DistEntry) + erts_dist_cache_size()); if (lock) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); return res; } void erts_set_dist_entry_not_connected(DistEntry *dep) { - ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); ASSERT(dep != erts_this_dist_entry); - ASSERT(is_internal_port(dep->cid)); + ASSERT(is_internal_port(dep->cid) || is_internal_pid(dep->cid)); if(dep->flags & DFLAG_PUBLISHED) { if(dep->prev) { @@ -428,18 +566,18 @@ erts_set_dist_entry_not_connected(DistEntry *dep) } erts_not_connected_dist_entries = dep; erts_no_of_not_connected_dist_entries++; - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); } void erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) { - ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); ASSERT(dep != erts_this_dist_entry); ASSERT(is_nil(dep->cid)); - ASSERT(is_internal_port(cid)); + ASSERT(is_internal_port(cid) || is_internal_pid(cid)); if(dep->prev) { ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries)); @@ -459,10 +597,19 @@ erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) dep->status |= ERTS_DE_SFLG_CONNECTED; dep->flags = flags; dep->cid = cid; + erts_atomic_set_nob(&dep->input_handler, + (erts_aint_t) cid); + dep->connection_id++; dep->connection_id &= ERTS_DIST_EXT_CON_ID_MASK; dep->prev = NULL; + erts_atomic64_set_nob(&dep->in, 0); + erts_atomic64_set_nob(&dep->out, 0); + erts_atomic32_set_nob(&dep->qflgs, + (is_internal_port(cid) + ? ERTS_DE_QFLG_PORT_CTRL + : ERTS_DE_QFLG_PROC_CTRL)); if(flags & DFLAG_PUBLISHED) { dep->next = erts_visible_dist_entries; if(erts_visible_dist_entries) { @@ -481,7 +628,7 @@ erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) erts_hidden_dist_entries = dep; erts_no_of_hidden_dist_entries++; } - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); } /* -- Node table --------------------------------------------------------- */ @@ -519,7 +666,7 @@ node_table_alloc(void *venp_tmpl) node_entries++; - erts_smp_refc_init(&enp->refc, -1); + erts_refc_init(&enp->refc, -1); enp->creation = ((ErlNode *) venp_tmpl)->creation; enp->sysname = ((ErlNode *) venp_tmpl)->sysname; enp->dist_entry = erts_find_or_insert_dist_entry(((ErlNode *) venp_tmpl)->sysname); @@ -532,7 +679,7 @@ node_table_free(void *venp) { ErlNode *enp = (ErlNode *) venp; - ERTS_SMP_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking()); erts_deref_dist_entry(enp->dist_entry); #ifdef DEBUG @@ -553,14 +700,14 @@ erts_node_table_size(void) #endif int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); #ifdef DEBUG hash_get_info(&hi, &erts_node_table); ASSERT(node_entries == hi.objs); #endif res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode); if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); return res; } @@ -569,10 +716,10 @@ erts_node_table_info(fmtfn_t to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); hash_info(to, to_arg, &erts_node_table); if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); } @@ -583,26 +730,26 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation) ne.sysname = sysname; ne.creation = creation; - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); res = hash_get(&erts_node_table, (void *) &ne); if (res && res != erts_this_node) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 0); + erts_aint_t refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); + erts_refc_inc(&res->refc, 1); } - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); if (res) return res; - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_rwmtx_rwlock(&erts_node_table_rwmtx); res = hash_put(&erts_node_table, (void *) &ne); ASSERT(res); if (res != erts_this_node) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 0); + erts_aint_t refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); + erts_refc_inc(&res->refc, 1); } - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_rwmtx_rwunlock(&erts_node_table_rwmtx); return res; } @@ -611,7 +758,7 @@ static void try_delete_node(void *venp) ErlNode *enp = (ErlNode *) venp; erts_aint_t refc; - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_rwmtx_rwlock(&erts_node_table_rwmtx); /* * Another thread might have looked up this node after we * decided to delete it (refc became zero). If so, the other @@ -627,10 +774,10 @@ static void try_delete_node(void *venp) * * If refc > 0, the entry is in use. Keep the entry. */ - refc = erts_smp_refc_dectest(&enp->refc, -1); + refc = erts_refc_dectest(&enp->refc, -1); if (refc == -1) (void) hash_erase(&erts_node_table, (void *) enp); - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_rwmtx_rwunlock(&erts_node_table_rwmtx); if (refc == 0) erts_schedule_delete_node(enp); @@ -673,7 +820,7 @@ static void print_node(void *venp, void *vpndp) erts_print(pndp->to, pndp->to_arg, " %d", enp->creation); #ifdef DEBUG erts_print(pndp->to, pndp->to_arg, " (refc=%ld)", - erts_smp_refc_read(&enp->refc, 0)); + erts_refc_read(&enp->refc, 0)); #endif pndp->no_sysname++; } @@ -696,13 +843,13 @@ void erts_print_node_info(fmtfn_t to, pnd.no_total = 0; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); hash_foreach(&erts_node_table, print_node, (void *) &pnd); if (pnd.no_sysname != 0) { erts_print(to, to_arg, "\n"); } if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); if(no_sysname) *no_sysname = pnd.no_sysname; @@ -715,20 +862,19 @@ void erts_print_node_info(fmtfn_t to, void erts_set_this_node(Eterm sysname, Uint creation) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking()); - ASSERT(erts_smp_refc_read(&erts_this_dist_entry->refc, 2)); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); + ASSERT(2 <= de_refc_read(erts_this_dist_entry, 2)); - if (erts_smp_refc_dectest(&erts_this_node->refc, 0) == 0) + if (erts_refc_dectest(&erts_this_node->refc, 0) == 0) try_delete_node(erts_this_node); - if (erts_smp_refc_dectest(&erts_this_dist_entry->refc, 0) == 0) - try_delete_dist_entry(erts_this_dist_entry); + erts_deref_dist_entry(erts_this_dist_entry); erts_this_node = NULL; /* to make sure refc is bumped for this node */ erts_this_node = erts_find_or_insert_node(sysname, creation); erts_this_dist_entry = erts_this_node->dist_entry; - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + erts_ref_dist_entry(erts_this_dist_entry); erts_this_node_sysname = erts_this_node_sysname_BUFFER; erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER), @@ -747,7 +893,7 @@ erts_delayed_node_table_gc(void) void erts_init_node_tables(int dd_sec) { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; HashFunctions f; ErlNode node_tmpl; @@ -758,12 +904,12 @@ void erts_init_node_tables(int dd_sec) orig_node_tab_delete_delay = node_tab_delete_delay; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL, + erts_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); - erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL, + erts_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); f.hash = (H_FUN) dist_table_hash; @@ -792,14 +938,14 @@ void erts_init_node_tables(int dd_sec) node_tmpl.creation = 0; erts_this_node = hash_put(&erts_node_table, &node_tmpl); /* +1 for erts_this_node */ - erts_smp_refc_init(&erts_this_node->refc, 1); + erts_refc_init(&erts_this_node->refc, 1); ASSERT(erts_this_node->dist_entry != NULL); erts_this_dist_entry = erts_this_node->dist_entry; /* +1 for erts_this_dist_entry */ - /* +1 for erts_this_node->dist_entry */ - erts_smp_refc_init(&erts_this_dist_entry->refc, 2); + erts_ref_dist_entry(erts_this_dist_entry); + ASSERT(2 == de_refc_read(erts_this_dist_entry, 2)); erts_this_node_sysname = erts_this_node_sysname_BUFFER; erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER), @@ -808,18 +954,16 @@ void erts_init_node_tables(int dd_sec) references_atoms_need_init = 1; } -#ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK int erts_lc_is_de_rwlocked(DistEntry *dep) { - return erts_smp_lc_rwmtx_is_rwlocked(&dep->rwmtx); + return erts_lc_rwmtx_is_rwlocked(&dep->rwmtx); } int erts_lc_is_de_rlocked(DistEntry *dep) { - return erts_smp_lc_rwmtx_is_rlocked(&dep->rwmtx); + return erts_lc_rwmtx_is_rlocked(&dep->rwmtx); } #endif -#endif #ifdef ERTS_ENABLE_LOCK_COUNT @@ -841,10 +985,10 @@ static void erts_lcnt_enable_dist_lock_count(void *dep_raw, void *enable) { } void erts_lcnt_update_distribution_locks(int enable) { - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); hash_foreach(&erts_dist_table, erts_lcnt_enable_dist_lock_count, (void*)(UWord)enable); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); } #endif @@ -878,6 +1022,7 @@ static Eterm AM_node_references; static Eterm AM_system; static Eterm AM_timer; static Eterm AM_delayed_delete_timer; +static Eterm AM_thread_progress_delete_timer; static void setup_reference_table(void); static Eterm reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp); @@ -946,8 +1091,8 @@ erts_get_node_and_dist_references(struct process *proc) Uint *endp; #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* No need to lock any thing since we are alone... */ if (references_atoms_need_init) { @@ -967,6 +1112,7 @@ erts_get_node_and_dist_references(struct process *proc) INIT_AM(timer); INIT_AM(system); INIT_AM(delayed_delete_timer); + INIT_AM(thread_progress_delete_timer); references_atoms_need_init = 0; } @@ -989,8 +1135,8 @@ erts_get_node_and_dist_references(struct process *proc) delete_reference_table(); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); return res; } @@ -1150,6 +1296,10 @@ insert_offheap2(ErlOffHeap *oh, void *arg) insert_offheap(oh, a->type, a->id); } +#define ErtsIsDistEntryBinary(Bin) \ + (((Bin)->intern.flags & BIN_FLAG_MAGIC) \ + && ERTS_MAGIC_BIN_DESTRUCTOR((Bin)) == erts_dist_entry_destructor) + static void insert_offheap(ErlOffHeap *oh, int type, Eterm id) { @@ -1160,7 +1310,10 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id) for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) { switch (thing_subtag(u.hdr->thing_word)) { case REF_SUBTAG: - if(IsMatchProgBinary(u.mref->mb)) { + if (ErtsIsDistEntryBinary(u.mref->mb)) + insert_dist_entry(ErtsBin2DistEntry(u.mref->mb), + type, id, 0); + else if(IsMatchProgBinary(u.mref->mb)) { InsertedBin *ib; int insert_bin = 1; for (ib = inserted_bins; ib; ib = ib->next) @@ -1292,39 +1445,45 @@ init_referred_dist(void *dist, void *unused) no_referred_dists++; } -#ifdef ERTS_SMP static void insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp) { insert_offheap(&bp->off_heap, HEAP_REF, to); } -#endif static void insert_delayed_delete_node(void *state, ErtsMonotonicTime timeout_pos, void *vnp) { - DeclareTmpHeapNoproc(heap,3); - UseTmpHeapNoproc(3); + Eterm heap[3]; insert_node((ErlNode *) vnp, SYSTEM_REF, TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer)); - UnUseTmpHeapNoproc(3); +} + +static void +insert_thr_prgr_delete_dist_entry(void *arg, ErtsThrPrgrVal thr_prgr, void *vbin) +{ + DistEntry *dep = ErtsBin2DistEntry(vbin); + Eterm heap[3]; + insert_dist_entry(dep, + SYSTEM_REF, + TUPLE2(&heap[0], AM_system, AM_thread_progress_delete_timer), + 0); } static void insert_delayed_delete_dist_entry(void *state, ErtsMonotonicTime timeout_pos, - void *vdep) + void *vbin) { - DeclareTmpHeapNoproc(heap,3); - UseTmpHeapNoproc(3); - insert_dist_entry((DistEntry *) vdep, + DistEntry *dep = ErtsBin2DistEntry(vbin); + Eterm heap[3]; + insert_dist_entry(dep, SYSTEM_REF, TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer), 0); - UnUseTmpHeapNoproc(3); } static void @@ -1358,9 +1517,12 @@ setup_reference_table(void) erts_debug_callback_timer_foreach(try_delete_node, insert_delayed_delete_node, NULL); - erts_debug_callback_timer_foreach(try_delete_dist_entry, + erts_debug_callback_timer_foreach(prepare_try_delete_dist_entry, insert_delayed_delete_dist_entry, NULL); + erts_debug_later_op_foreach(try_delete_dist_entry, + insert_thr_prgr_delete_dist_entry, + NULL); UseTmpHeapNoproc(3); insert_node(erts_this_node, @@ -1381,9 +1543,7 @@ setup_reference_table(void) int mli; ErtsMessage *msg_list[] = { proc->msg.first, -#ifdef ERTS_SMP proc->msg_inq.first, -#endif proc->msg_frag}; /* Insert Heap */ @@ -1427,12 +1587,18 @@ setup_reference_table(void) insert_links(ERTS_P_LINKS(proc), proc->common.id); if (ERTS_P_MONITORS(proc)) insert_monitors(ERTS_P_MONITORS(proc), proc->common.id); + { + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc); + if (dep) + insert_dist_entry(dep, + CTRL_REF, + proc->common.id, + 0); + } } } -#ifdef ERTS_SMP erts_foreach_sys_msg_in_q(insert_sys_msg); -#endif /* Insert all ports */ max = erts_ptab_max(&erts_port); @@ -1666,7 +1832,7 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) tup = MK_2TUP(referred_nodes[i].node->sysname, MK_UINT(referred_nodes[i].node->creation)); - tup = MK_3TUP(tup, MK_UINT(erts_smp_refc_read(&referred_nodes[i].node->refc, 0)), nril); + tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 0)), nril); nl = MK_CONS(tup, nl); } @@ -1727,7 +1893,7 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) /* DistList = [{Dist, Refc, ReferenceIdList}] */ tup = MK_3TUP(referred_dists[i].dist->sysname, - MK_UINT(erts_smp_refc_read(&referred_dists[i].dist->refc, 0)), + MK_UINT(de_refc_read(referred_dists[i].dist, 0)), dril); dl = MK_CONS(tup, dl); } @@ -1786,12 +1952,12 @@ delete_reference_table(void) void erts_debug_test_node_tab_delayed_delete(Sint64 millisecs) { - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (millisecs < 0) node_tab_delete_delay = orig_node_tab_delete_delay; else node_tab_delete_delay = millisecs; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); } diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h index 91bcb4fce1..3bba673435 100644 --- a/erts/emulator/beam/erl_node_tables.h +++ b/erts/emulator/beam/erl_node_tables.h @@ -44,10 +44,12 @@ #include "erl_alloc.h" #include "erl_process.h" #include "erl_monitors.h" -#include "erl_smp.h" #define ERTS_PORT_TASK_ONLY_BASIC_TYPES__ #include "erl_port_task.h" #undef ERTS_PORT_TASK_ONLY_BASIC_TYPES__ +#define ERTS_BINARY_TYPES_ONLY__ +#include "erl_binary.h" +#undef ERTS_BINARY_TYPES_ONLY__ #define ERTS_NODE_TAB_DELAY_GC_DEFAULT (60) #define ERTS_NODE_TAB_DELAY_GC_MAX (100*1000*1000) @@ -61,11 +63,17 @@ #define ERTS_DE_SFLGS_ALL (ERTS_DE_SFLG_CONNECTED \ | ERTS_DE_SFLG_EXITING) -#define ERTS_DE_QFLG_BUSY (((Uint32) 1) << 0) -#define ERTS_DE_QFLG_EXIT (((Uint32) 1) << 1) +#define ERTS_DE_QFLG_BUSY (((erts_aint32_t) 1) << 0) +#define ERTS_DE_QFLG_EXIT (((erts_aint32_t) 1) << 1) +#define ERTS_DE_QFLG_REQ_INFO (((erts_aint32_t) 1) << 2) +#define ERTS_DE_QFLG_PORT_CTRL (((erts_aint32_t) 1) << 3) +#define ERTS_DE_QFLG_PROC_CTRL (((erts_aint32_t) 1) << 4) #define ERTS_DE_QFLGS_ALL (ERTS_DE_QFLG_BUSY \ - | ERTS_DE_QFLG_EXIT) + | ERTS_DE_QFLG_EXIT \ + | ERTS_DE_QFLG_REQ_INFO \ + | ERTS_DE_QFLG_PORT_CTRL \ + | ERTS_DE_QFLG_PROC_CTRL) #if defined(ARCH_64) #define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713f713f713UL) @@ -107,12 +115,13 @@ typedef struct dist_entry_ { HashBucket hash_bucket; /* Hash bucket */ struct dist_entry_ *next; /* Next entry in dist_table (not sorted) */ struct dist_entry_ *prev; /* Previous entry in dist_table (not sorted) */ - erts_smp_refc_t refc; /* Reference count */ - erts_smp_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */ + erts_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */ Eterm sysname; /* name@host atom for efficiency */ Uint32 creation; /* creation of connected node */ - Eterm cid; /* connection handler (pid or port), NIL == free */ + erts_atomic_t input_handler; /* Input handler */ + Eterm cid; /* connection handler (pid or port), + NIL == free */ Uint32 connection_id; /* Connection id incremented on connect */ Uint32 status; /* Slot status, like exiting reserved etc */ Uint32 flags; /* Distribution flags, like hidden, @@ -120,7 +129,7 @@ typedef struct dist_entry_ { unsigned long version; /* Protocol version */ - erts_smp_mtx_t lnk_mtx; /* Protects node_links, nlinks, and + erts_mtx_t lnk_mtx; /* Protects node_links, nlinks, and monitors. */ ErtsLink *node_links; /* In a dist entry, node links are kept in a separate tree, while they are @@ -132,24 +141,29 @@ typedef struct dist_entry_ { ErtsLink *nlinks; /* Link tree with subtrees */ ErtsMonitor *monitors; /* Monitor tree */ - erts_smp_mtx_t qlock; /* Protects qflgs and out_queue */ - Uint32 qflgs; - Sint qsize; + erts_mtx_t qlock; /* Protects qflgs and out_queue */ + erts_atomic32_t qflgs; + erts_atomic_t qsize; + erts_atomic64_t in; + erts_atomic64_t out; ErtsDistOutputQueue out_queue; struct ErtsProcList_ *suspended; + ErtsDistOutputQueue tmp_out_queue; ErtsDistOutputQueue finalized_out_queue; - erts_smp_atomic_t dist_cmd_scheduled; + erts_atomic_t dist_cmd_scheduled; ErtsPortTaskHandle dist_cmd; Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf); struct cache* cache; /* The atom cache */ + + ErtsThrPrgrLaterOp later_op; } DistEntry; typedef struct erl_node_ { HashBucket hash_bucket; /* Hash bucket */ - erts_smp_refc_t refc; /* Reference count */ + erts_refc_t refc; /* Reference count */ Eterm sysname; /* name@host atom for efficiency */ Uint32 creation; /* Creation */ DistEntry *dist_entry; /* Corresponding dist entry */ @@ -158,8 +172,8 @@ typedef struct erl_node_ { extern Hash erts_dist_table; extern Hash erts_node_table; -extern erts_smp_rwmtx_t erts_dist_table_rwmtx; -extern erts_smp_rwmtx_t erts_node_table_rwmtx; +extern erts_rwmtx_t erts_dist_table_rwmtx; +extern erts_rwmtx_t erts_node_table_rwmtx; extern DistEntry *erts_hidden_dist_entries; extern DistEntry *erts_visible_dist_entries; @@ -190,76 +204,68 @@ void erts_init_node_tables(int); void erts_node_table_info(fmtfn_t, void *); void erts_print_node_info(fmtfn_t, void *, Eterm, int*, int*); Eterm erts_get_node_and_dist_references(struct process *); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int erts_lc_is_de_rwlocked(DistEntry *); int erts_lc_is_de_rlocked(DistEntry *); #endif +int erts_dist_entry_destructor(Binary *bin); +DistEntry *erts_dhandle_to_dist_entry(Eterm dhandle); +Eterm erts_make_dhandle(Process *c_p, DistEntry *dep); +void erts_ref_dist_entry(DistEntry *dep); +void erts_deref_dist_entry(DistEntry *dep); -#ifdef ERTS_ENABLE_LOCK_COUNT -void erts_lcnt_update_distribution_locks(int enable); -#endif - -ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep); ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np); -ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_runlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_rwlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_rwunlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_links_lock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_links_unlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_runlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rwlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rwunlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_links_lock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_links_unlock(DistEntry *dep); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void -erts_deref_dist_entry(DistEntry *dep) -{ - ASSERT(dep); - if (erts_smp_refc_dectest(&dep->refc, 0) == 0) - erts_schedule_delete_dist_entry(dep); -} - -ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np) { ASSERT(np); - if (erts_smp_refc_dectest(&np->refc, 0) == 0) + if (erts_refc_dectest(&np->refc, 0) == 0) erts_schedule_delete_node(np); } ERTS_GLB_INLINE void -erts_smp_de_rlock(DistEntry *dep) +erts_de_rlock(DistEntry *dep) { - erts_smp_rwmtx_rlock(&dep->rwmtx); + erts_rwmtx_rlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_runlock(DistEntry *dep) +erts_de_runlock(DistEntry *dep) { - erts_smp_rwmtx_runlock(&dep->rwmtx); + erts_rwmtx_runlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_rwlock(DistEntry *dep) +erts_de_rwlock(DistEntry *dep) { - erts_smp_rwmtx_rwlock(&dep->rwmtx); + erts_rwmtx_rwlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_rwunlock(DistEntry *dep) +erts_de_rwunlock(DistEntry *dep) { - erts_smp_rwmtx_rwunlock(&dep->rwmtx); + erts_rwmtx_rwunlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_links_lock(DistEntry *dep) +erts_de_links_lock(DistEntry *dep) { - erts_smp_mtx_lock(&dep->lnk_mtx); + erts_mtx_lock(&dep->lnk_mtx); } ERTS_GLB_INLINE void -erts_smp_de_links_unlock(DistEntry *dep) +erts_de_links_unlock(DistEntry *dep) { - erts_smp_mtx_unlock(&dep->lnk_mtx); + erts_mtx_unlock(&dep->lnk_mtx); } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h index b64de624dd..9117eb1f72 100644 --- a/erts/emulator/beam/erl_port.h +++ b/erts/emulator/beam/erl_port.h @@ -119,9 +119,7 @@ typedef struct { void *data[ERTS_PRTSD_SIZE]; } ErtsPrtSD; -#ifdef ERTS_SMP typedef struct ErtsXPortsList_ ErtsXPortsList; -#endif /* * Port locking: @@ -146,17 +144,12 @@ struct _erl_drv_port { ErtsPortTaskSched sched; ErtsPortTaskHandle timeout_task; -#ifdef ERTS_SMP erts_mtx_t *lock; ErtsXPortsList *xports; - erts_smp_atomic_t run_queue; -#else - erts_atomic32_t refc; - int cleanup; -#endif + erts_atomic_t run_queue; erts_atomic_t connected; /* A connected process */ Eterm caller; /* Current caller. */ - erts_smp_atomic_t data; /* Data associated with port. */ + erts_atomic_t data; /* Data associated with port. */ Uint bytes_in; /* Number of bytes read */ Uint bytes_out; /* Number of bytes written */ @@ -173,7 +166,7 @@ struct _erl_drv_port { int control_flags; /* Flags for port_control() */ ErlDrvPDL port_data_lock; - erts_smp_atomic_t psd; /* Port specific data */ + erts_atomic_t psd; /* Port specific data */ int reds; /* Only used while executing driver callbacks */ struct { @@ -209,24 +202,20 @@ ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt); ERTS_GLB_INLINE ErtsRunQueue * erts_port_runq(Port *prt) { -#ifdef ERTS_SMP ErtsRunQueue *rq1, *rq2; - rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + rq1 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); if (!rq1) return NULL; while (1) { - erts_smp_runq_lock(rq1); - rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + erts_runq_lock(rq1); + rq2 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); if (rq1 == rq2) return rq1; - erts_smp_runq_unlock(rq1); + erts_runq_unlock(rq1); rq1 = rq2; if (!rq1) return NULL; } -#else - return ERTS_RUNQ_IX(0); -#endif } #endif @@ -240,10 +229,10 @@ ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new); ERTS_GLB_INLINE void * erts_prtsd_get(Port *prt, int ix) { - ErtsPrtSD *psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + ErtsPrtSD *psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (!psd) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; return psd->data[ix]; } @@ -254,16 +243,14 @@ erts_prtsd_set(Port *prt, int ix, void *data) void *old; int i; - psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (psd) { -#ifdef ERTS_SMP #ifdef ETHR_ORDERED_READ_DEPEND ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore); #else ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore); #endif -#endif old = psd->data[ix]; psd->data[ix] = data; return old; @@ -275,7 +262,7 @@ erts_prtsd_set(Port *prt, int ix, void *data) new_psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD)); for (i = 0; i < ERTS_PRTSD_SIZE; i++) new_psd->data[i] = NULL; - psd = (ErtsPrtSD *) erts_smp_atomic_cmpxchg_mb(&prt->psd, + psd = (ErtsPrtSD *) erts_atomic_cmpxchg_mb(&prt->psd, (erts_aint_t) new_psd, (erts_aint_t) NULL); if (psd) @@ -359,15 +346,10 @@ Eterm erts_request_io_bytes(Process *c_p); void print_port_info(Port *, fmtfn_t, void *); void erts_port_free(Port *); -#ifndef ERTS_SMP -void erts_port_cleanup(Port *); -#endif void erts_fire_port_monitor(Port *prt, Eterm ref); -#ifdef ERTS_SMP int erts_port_handle_xports(Port *); -#endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int erts_lc_is_port_locked(Port *); #endif @@ -376,9 +358,9 @@ ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt); ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc); ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt); -ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt); -ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt); -ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt); +ERTS_GLB_INLINE int erts_port_trylock(Port *prt); +ERTS_GLB_INLINE void erts_port_lock(Port *prt); +ERTS_GLB_INLINE void erts_port_unlock(Port *prt); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -407,35 +389,27 @@ ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt) } ERTS_GLB_INLINE int -erts_smp_port_trylock(Port *prt) +erts_port_trylock(Port *prt) { -#ifdef ERTS_SMP /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); return erts_mtx_trylock(prt->lock); -#else - return 0; -#endif } ERTS_GLB_INLINE void -erts_smp_port_lock(Port *prt) +erts_port_lock(Port *prt) { -#ifdef ERTS_SMP /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_mtx_lock(prt->lock); -#endif } ERTS_GLB_INLINE void -erts_smp_port_unlock(Port *prt) +erts_port_unlock(Port *prt) { -#ifdef ERTS_SMP /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_mtx_unlock(prt->lock); -#endif } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ @@ -466,9 +440,7 @@ extern const Port erts_invalid_port; int erts_is_port_ioq_empty(Port *); void erts_terminate_port(Port *); -#ifdef ERTS_SMP Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks); -#endif ERTS_GLB_INLINE Port *erts_pix2port(int); ERTS_GLB_INLINE Port *erts_port_lookup_raw(Eterm); @@ -476,11 +448,9 @@ ERTS_GLB_INLINE Port *erts_port_lookup(Eterm, Uint32); ERTS_GLB_INLINE Port*erts_id2port(Eterm id); ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32); ERTS_GLB_INLINE void erts_port_release(Port *); -#ifdef ERTS_SMP ERTS_GLB_INLINE Port *erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs); ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs); ERTS_GLB_INLINE void erts_thr_port_release(Port *prt); -#endif ERTS_GLB_INLINE Port *erts_thr_drvport2port(ErlDrvPort, int); ERTS_GLB_INLINE Port *erts_drvport2port_state(ErlDrvPort, erts_aint32_t *); ERTS_GLB_INLINE Eterm erts_drvport2id(ErlDrvPort); @@ -506,7 +476,7 @@ erts_port_lookup_raw(Eterm id) { Port *prt; - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); if (is_not_internal_port(id)) return NULL; @@ -535,7 +505,7 @@ erts_id2port(Eterm id) Port *prt; /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); if (is_not_internal_port(id)) return NULL; @@ -546,10 +516,10 @@ erts_id2port(Eterm id) if (!prt || prt->common.id != id) return NULL; - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) { - erts_smp_port_unlock(prt); + erts_port_unlock(prt); return NULL; } @@ -562,14 +532,12 @@ erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 invalid_sflgs) { -#ifdef ERTS_SMP int no_proc_locks = !c_p || !c_p_locks; -#endif erts_aint32_t state; Port *prt; /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); if (is_not_internal_port(id)) return NULL; @@ -580,21 +548,17 @@ erts_id2port_sflgs(Eterm id, if (!prt || prt->common.id != id) return NULL; -#ifdef ERTS_SMP if (no_proc_locks) - erts_smp_port_lock(prt); - else if (erts_smp_port_trylock(prt) == EBUSY) { + erts_port_lock(prt); + else if (erts_port_trylock(prt) == EBUSY) { /* Unlock process locks, and acquire locks in lock order... */ - erts_smp_proc_unlock(c_p, c_p_locks); - erts_smp_port_lock(prt); - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_unlock(c_p, c_p_locks); + erts_port_lock(prt); + erts_proc_lock(c_p, c_p_locks); } -#endif state = erts_atomic32_read_nob(&prt->state); if (state & invalid_sflgs) { -#ifdef ERTS_SMP - erts_smp_port_unlock(prt); -#endif + erts_port_unlock(prt); return NULL; } @@ -605,18 +569,10 @@ ERTS_GLB_INLINE void erts_port_release(Port *prt) { /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); -#ifdef ERTS_SMP - erts_smp_port_unlock(prt); -#else - if (prt->cleanup) { - prt->cleanup = 0; - erts_port_cleanup(prt); - } -#endif + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); + erts_port_unlock(prt); } -#ifdef ERTS_SMP /* * erts_thr_id2port_sflgs() and erts_port_dec_refc(prt) can * be used by unmanaged threads in the SMP case. @@ -702,13 +658,10 @@ ERTS_GLB_INLINE void erts_thr_port_release(Port *prt) { erts_mtx_unlock(prt->lock); -#ifdef ERTS_SMP if (!erts_thr_progress_is_managed_thread()) erts_port_dec_refc(prt); -#endif } -#endif ERTS_GLB_INLINE Port * erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl) @@ -724,7 +677,7 @@ erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl) #ifdef ERTS_ENABLE_LOCK_CHECK if (!ERTS_IS_CRASH_DUMPING) { if (erts_lc_is_emu_thr()) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ERTS_LC_ASSERT(!prt->port_data_lock || erts_lc_mtx_is_locked(&prt->port_data_lock->mtx)); } @@ -753,7 +706,7 @@ erts_drvport2port_state(ErlDrvPort drvport, erts_aint32_t *statep) // ERTS_LC_ASSERT(erts_lc_is_emu_thr()); if (prt == ERTS_INVALID_ERL_DRV_PORT) return ERTS_INVALID_ERL_DRV_PORT; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); /* * This state check is only needed since a driver callback @@ -810,23 +763,21 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep) int reds = 0; erts_aint32_t state; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); state = erts_atomic32_read_nob(&prt->state); if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(prt)) { reds += ERTS_PORT_REDS_TERMINATE; erts_terminate_port(prt); state = erts_atomic32_read_nob(&prt->state); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); } -#ifdef ERTS_SMP if (prt->xports) { reds += erts_port_handle_xports(prt); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(!prt->xports); } -#endif if (statep) *statep = state; diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 4d7a86398a..14977dfa17 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -83,14 +83,14 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q #define LTTNG_DRIVER(TRACEPOINT, PP) do {} while(0) #endif -#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \ +#define ERTS_LC_VERIFY_RQ(RQ, PP) \ do { \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \ - ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ - erts_smp_atomic_read_nob(&(PP)->run_queue))); \ + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); \ + ERTS_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ + erts_atomic_read_nob(&(PP)->run_queue))); \ } while (0) -erts_smp_atomic_t erts_port_task_outstanding_io_tasks; +erts_atomic_t erts_port_task_outstanding_io_tasks; #define ERTS_PT_STATE_SCHEDULED 0 #define ERTS_PT_STATE_ABORTED 1 @@ -108,7 +108,7 @@ typedef union { } ErtsPortTaskTypeData; struct ErtsPortTask_ { - erts_smp_atomic32_t state; + erts_atomic32_t state; ErtsPortTaskType type; union { struct { @@ -126,9 +126,7 @@ struct ErtsPortTaskHandleList_ { ErtsPortTaskHandle handle; union { ErtsPortTaskHandleList *next; -#ifdef ERTS_SMP ErtsThrPrgrLaterOp release; -#endif } u; }; @@ -161,25 +159,19 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table, 50, ERTS_ALC_T_BUSY_CALLER_TAB) -#ifdef ERTS_SMP static void call_port_task_free(void *vptp) { port_task_free((ErtsPortTask *) vptp); } -#endif static ERTS_INLINE void schedule_port_task_free(ErtsPortTask *ptp) { -#ifdef ERTS_SMP erts_schedule_thr_prgr_later_cleanup_op(call_port_task_free, (void *) ptp, &ptp->u.release, sizeof(ErtsPortTask)); -#else - port_task_free(ptp); -#endif } static ERTS_INLINE ErtsPortTask * @@ -199,7 +191,7 @@ p2p_sig_data_init(ErtsPortTask *ptp) ptp->type = ERTS_PORT_TASK_PROC_SIG; ptp->u.alive.flags = ERTS_PT_FLG_SIG_DEP; - erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); + erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); ASSERT(ptp == p2p_sig_data_to_task(&ptp->u.alive.td.psig.data)); @@ -290,7 +282,7 @@ popped_from_busy_queue(Port *pp, ErtsPortTask *ptp, int last) #ifdef DEBUG erts_aint32_t flags = #endif - erts_smp_atomic32_read_band_nob( + erts_atomic32_read_band_nob( &pp->sched.flags, ~ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS); @@ -337,7 +329,7 @@ busy_wait_move_to_busy_queue(Port *pp, ErtsPortTask *ptp) #ifdef DEBUG flags = #endif - erts_smp_atomic32_read_bor_nob(&pp->sched.flags, + erts_atomic32_read_bor_nob(&pp->sched.flags, ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(!(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); @@ -477,7 +469,7 @@ no_sig_dep_move_from_busyq(Port *pp) int bix; erts_aint32_t flags = #endif - erts_smp_atomic32_read_band_nob( + erts_atomic32_read_band_nob( &pp->sched.flags, ~ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS); @@ -510,11 +502,11 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue) if (!first) { ASSERT(!tabp); ASSERT(!pp->sched.taskq.local.busy.last); - ASSERT(!(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); + ASSERT(!(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); return; } - ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS); + ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(tabp); tot_count = 0; @@ -570,13 +562,13 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue) static ERTS_INLINE void reset_port_task_handle(ErtsPortTaskHandle *pthp) { - erts_smp_atomic_set_relb(pthp, (erts_aint_t) NULL); + erts_atomic_set_relb(pthp, (erts_aint_t) NULL); } static ERTS_INLINE ErtsPortTask * handle2task(ErtsPortTaskHandle *pthp) { - return (ErtsPortTask *) erts_smp_atomic_read_acqb(pthp); + return (ErtsPortTask *) erts_atomic_read_acqb(pthp); } static ERTS_INLINE void @@ -603,7 +595,7 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) { ptp->u.alive.handle = pthp; if (pthp) { - erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp); + erts_atomic_set_relb(pthp, (erts_aint_t) ptp); ASSERT(ptp == handle2task(ptp->u.alive.handle)); } } @@ -617,7 +609,7 @@ set_tmp_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) * IMPORTANT! Task either need to be aborted, or task handle * need to be detached before thread progress has been made. */ - erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp); + erts_atomic_set_relb(pthp, (erts_aint_t) ptp); } } @@ -635,20 +627,20 @@ check_unset_busy_port_q(Port *pp, int resume_procs = 0; ASSERT(bpq); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); erts_port_task_sched_lock(&pp->sched); - qsize = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size); - low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low); + qsize = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size); + low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low); if (qsize < low) { erts_aint32_t mask = ~(ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q | ERTS_PTS_FLG_BUSY_PORT_Q); - flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, mask); + flags = erts_atomic32_read_band_relb(&pp->sched.flags, mask); if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q) resume_procs = 1; } else if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) { - flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, + flags = erts_atomic32_read_band_relb(&pp->sched.flags, ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); flags &= ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q; } @@ -673,16 +665,16 @@ aborted_proc2port_data(Port *pp, ErlDrvSizeT size) bpq = pp->sched.taskq.bpq; - qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size, + qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) -size); ASSERT(qsz + size > qsz); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); ASSERT(pp->sched.taskq.bpq); if ((flags & (ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q | ERTS_PTS_FLG_BUSY_PORT_Q)) != ERTS_PTS_FLG_BUSY_PORT_Q) return; - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) - erts_smp_atomic32_read_bor_nob(&pp->sched.flags, + if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low)) + erts_atomic32_read_bor_nob(&pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); } @@ -700,13 +692,13 @@ dequeued_proc2port_data(Port *pp, ErlDrvSizeT size) bpq = pp->sched.taskq.bpq; - qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size, + qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) -size); ASSERT(qsz + size > qsz); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q)) return; - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->low)) + if (qsz < (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->low)) check_unset_busy_port_q(pp, flags, bpq); } @@ -719,19 +711,19 @@ enqueue_proc2port_data(Port *pp, if (sigdp && bpq) { ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp); if (size) { - erts_aint_t asize = erts_smp_atomic_add_read_acqb(&bpq->size, + erts_aint_t asize = erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) size); ErlDrvSizeT qsz = (ErlDrvSizeT) asize; ASSERT(qsz - size < qsz); if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q) && qsz > bpq->high) { - flags = erts_smp_atomic32_read_bor_acqb(&pp->sched.flags, + flags = erts_atomic32_read_bor_acqb(&pp->sched.flags, ERTS_PTS_FLG_BUSY_PORT_Q); flags |= ERTS_PTS_FLG_BUSY_PORT_Q; - qsz = (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->size); - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) { - flags = (erts_smp_atomic32_read_bor_relb( + qsz = (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->size); + if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low)) { + flags = (erts_atomic32_read_bor_relb( &pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)); flags |= ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q; @@ -779,18 +771,18 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp erts_aint32_t flags; pp->sched.taskq.bpq = NULL; flags = ~(ERTS_PTS_FLG_BUSY_PORT_Q|ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); - flags = erts_smp_atomic32_read_band_acqb(&pp->sched.flags, flags); + flags = erts_atomic32_read_band_acqb(&pp->sched.flags, flags); if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q) resume_procs = 1; } else { if (!low) - low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low); + low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low); else { if (bpq->high < low) bpq->high = low; - erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low); + erts_atomic_set_relb(&bpq->low, (erts_aint_t) low); written = 1; } @@ -799,19 +791,19 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp else { if (low > high) { low = high; - erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low); + erts_atomic_set_relb(&bpq->low, (erts_aint_t) low); } bpq->high = high; written = 1; } if (written) { - ErlDrvSizeT size = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size); + ErlDrvSizeT size = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size); if (size > high) - erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_BUSY_PORT_Q); else if (size < low) - erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); } } @@ -830,25 +822,19 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp * No-suspend handles. */ -#ifdef ERTS_SMP static void free_port_task_handle_list(void *vpthlp) { erts_free(ERTS_ALC_T_PT_HNDL_LIST, vpthlp); } -#endif static void schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp) { -#ifdef ERTS_SMP erts_schedule_thr_prgr_later_cleanup_op(free_port_task_handle_list, (void *) pthlp, &pthlp->u.release, sizeof(ErtsPortTaskHandleList)); -#else - erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp); -#endif } static ERTS_INLINE void @@ -891,7 +877,7 @@ get_free_nosuspend_handles(Port *pp) { ErtsPortTaskHandleList *nshp, *last_nshp = NULL; - ERTS_SMP_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched)); + ERTS_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched)); nshp = pp->sched.taskq.local.busy.nosuspend; @@ -907,7 +893,7 @@ get_free_nosuspend_handles(Port *pp) pp->sched.taskq.local.busy.nosuspend = last_nshp->u.next; last_nshp->u.next = NULL; if (!pp->sched.taskq.local.busy.nosuspend) - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_NS_TASKS); } return nshp; @@ -930,7 +916,7 @@ free_nosuspend_handles(ErtsPortTaskHandleList *free_nshp) static ERTS_INLINE void enqueue_port(ErtsRunQueue *runq, Port *pp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); pp->sched.next = NULL; if (runq->ports.end) { ASSERT(runq->ports.start); @@ -944,19 +930,17 @@ enqueue_port(ErtsRunQueue *runq, Port *pp) runq->ports.end = pp; ASSERT(runq->ports.start && runq->ports.end); - erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); + erts_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); -#ifdef ERTS_SMP if (ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_HALTING) erts_non_empty_runq(runq); -#endif } static ERTS_INLINE Port * pop_port(ErtsRunQueue *runq) { Port *pp = runq->ports.start; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); if (!pp) { ASSERT(!runq->ports.end); } @@ -966,7 +950,7 @@ pop_port(ErtsRunQueue *runq) ASSERT(runq->ports.end == pp); runq->ports.end = NULL; } - erts_smp_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); + erts_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); } ASSERT(runq->ports.start || !runq->ports.end); @@ -993,7 +977,7 @@ enqueue_task(Port *pp, if (ns_pthlp) fail_flags |= ERTS_PTS_FLG_BUSY_PORT; erts_port_task_sched_lock(&pp->sched); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); if (flags & fail_flags) res = 0; else { @@ -1024,7 +1008,7 @@ enqueue_task(Port *pp, static ERTS_INLINE void prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - erts_aint32_t act = erts_smp_atomic32_read_nob(&pp->sched.flags); + erts_aint32_t act = erts_atomic32_read_nob(&pp->sched.flags); if (!pp->sched.taskq.local.busy.first || (act & ERTS_PTS_FLG_BUSY_PORT)) { *execqp = pp->sched.taskq.local.first; @@ -1045,7 +1029,7 @@ prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) new &= ~ERTS_PTS_FLG_IN_RUNQ; new |= ERTS_PTS_FLG_EXEC; - act = erts_smp_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp); ASSERT(act & ERTS_PTS_FLG_IN_RUNQ); @@ -1072,7 +1056,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) *execq = NULL; - act = erts_smp_atomic32_read_nob(&pp->sched.flags); + act = erts_atomic32_read_nob(&pp->sched.flags); if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq); @@ -1089,7 +1073,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) if (act & ERTS_PTS_FLG_HAVE_TASKS) new |= ERTS_PTS_FLG_IN_RUNQ; - act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ)); ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM)); @@ -1115,7 +1099,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) static ERTS_INLINE erts_aint32_t select_queue_for_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - erts_aint32_t flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + erts_aint32_t flags = erts_atomic32_read_nob(&pp->sched.flags); if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) flags = check_unset_busy_port_q(pp, flags, pp->sched.taskq.bpq); @@ -1225,7 +1209,7 @@ fetch_in_queue(Port *pp, ErtsPortTask **execqp) if (ptp) *execqp = ptp->u.alive.next; else - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_TASKS); @@ -1288,7 +1272,7 @@ erl_drv_consume_timeslice(ErlDrvPort dprt, int percent) void erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *pthp) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); reset_port_task_handle(pthp); } @@ -1301,9 +1285,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp) { int res; ErtsPortTask *ptp; -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay(); -#endif ptp = handle2task(pthp); if (!ptp) @@ -1313,14 +1295,14 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp) #ifdef DEBUG ErtsPortTaskHandle *saved_pthp = ptp->u.alive.handle; - ERTS_SMP_READ_MEMORY_BARRIER; - old_state = erts_smp_atomic32_read_nob(&ptp->state); + ERTS_THR_READ_MEMORY_BARRIER; + old_state = erts_atomic32_read_nob(&ptp->state); if (old_state == ERTS_PT_STATE_SCHEDULED) { ASSERT(!saved_pthp || saved_pthp == pthp); } #endif - old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + old_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_ABORTED, ERTS_PT_STATE_SCHEDULED); if (old_state != ERTS_PT_STATE_SCHEDULED) @@ -1333,9 +1315,9 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp) case ERTS_PORT_TASK_INPUT: case ERTS_PORT_TASK_OUTPUT: case ERTS_PORT_TASK_EVENT: - ASSERT(erts_smp_atomic_read_nob( + ASSERT(erts_atomic_read_nob( &erts_port_task_outstanding_io_tasks) > 0); - erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks); + erts_atomic_dec_relb(&erts_port_task_outstanding_io_tasks); break; default: break; @@ -1345,9 +1327,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp) } } -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif return res; } @@ -1356,12 +1336,10 @@ void erts_port_task_abort_nosuspend_tasks(Port *pp) { ErtsPortTaskHandleList *abort_list; -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID; -#endif erts_port_task_sched_lock(&pp->sched); - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_NS_TASKS); abort_list = pp->sched.taskq.local.busy.nosuspend; pp->sched.taskq.local.busy.nosuspend = NULL; @@ -1381,40 +1359,34 @@ erts_port_task_abort_nosuspend_tasks(Port *pp) pthlp = abort_list; abort_list = pthlp->u.next; -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) dhndl = erts_thr_progress_unmanaged_delay(); -#endif pthp = &pthlp->handle; ptp = handle2task(pthp); if (!ptp) { -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); -#endif schedule_port_task_handle_list_free(pthlp); continue; } #ifdef DEBUG saved_pthp = ptp->u.alive.handle; - ERTS_SMP_READ_MEMORY_BARRIER; - old_state = erts_smp_atomic32_read_nob(&ptp->state); + ERTS_THR_READ_MEMORY_BARRIER; + old_state = erts_atomic32_read_nob(&ptp->state); if (old_state == ERTS_PT_STATE_SCHEDULED) { ASSERT(saved_pthp == pthp); } #endif - old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + old_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_ABORTED, ERTS_PT_STATE_SCHEDULED); if (old_state != ERTS_PT_STATE_SCHEDULED) { /* Task already aborted, executing, or executed */ -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); -#endif schedule_port_task_handle_list_free(pthlp); continue; } @@ -1424,10 +1396,8 @@ erts_port_task_abort_nosuspend_tasks(Port *pp) type = ptp->type; td = ptp->u.alive.td; -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); -#endif schedule_port_task_handle_list_free(pthlp); abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL); @@ -1446,10 +1416,8 @@ erts_port_task_schedule(Eterm id, { ErtsProc2PortSigData *sigdp = NULL; ErtsPortTaskHandleList *ns_pthlp = NULL; -#ifdef ERTS_SMP ErtsRunQueue *xrunq; ErtsThrPrgrDelayHandle dhndl; -#endif ErtsRunQueue *runq; Port *pp; ErtsPortTask *ptp = NULL; @@ -1460,19 +1428,15 @@ erts_port_task_schedule(Eterm id, ASSERT(is_internal_port(id)); -#ifdef ERTS_SMP dhndl = erts_thr_progress_unmanaged_delay(); -#endif pp = erts_port_lookup_raw(id); -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) { if (pp) erts_port_inc_refc(pp); erts_thr_progress_unmanaged_continue(dhndl); } -#endif if (type != ERTS_PORT_TASK_PROC_SIG) { if (!pp) @@ -1483,7 +1447,7 @@ erts_port_task_schedule(Eterm id, ptp->type = type; ptp->u.alive.flags = 0; - erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); + erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); set_handle(ptp, pthp); } @@ -1495,7 +1459,7 @@ erts_port_task_schedule(Eterm id, va_start(argp, type); ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent); va_end(argp); - erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); + erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); break; } case ERTS_PORT_TASK_EVENT: { @@ -1504,7 +1468,7 @@ erts_port_task_schedule(Eterm id, ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent); ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData); va_end(argp); - erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); + erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); break; } case ERTS_PORT_TASK_PROC_SIG: { @@ -1559,7 +1523,7 @@ erts_port_task_schedule(Eterm id, if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) new |= ERTS_PTS_FLG_IN_RUNQ; - act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); if (exp == act) { if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) @@ -1584,44 +1548,38 @@ erts_port_task_schedule(Eterm id, if (!runq) ERTS_INTERNAL_ERROR("Missing run-queue"); -#ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); - ERTS_SMP_LC_ASSERT(runq != xrunq); - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_ASSERT(runq != xrunq); + ERTS_LC_VERIFY_RQ(runq, pp); if (xrunq) { /* Emigrate port ... */ - erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); - erts_smp_runq_unlock(runq); + erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); + erts_runq_unlock(runq); runq = erts_port_runq(pp); if (!runq) ERTS_INTERNAL_ERROR("Missing run-queue"); } -#endif enqueue_port(runq, pp); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); - erts_smp_notify_inc_runq(runq); + erts_notify_inc_runq(runq); done: if (prof_runnable_ports) erts_port_task_sched_unlock(&pp->sched); -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_port_dec_refc(pp); -#endif return 0; abort_nosuspend: -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_port_dec_refc(pp); -#endif abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0); @@ -1635,10 +1593,8 @@ abort_nosuspend: fail: -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_port_dec_refc(pp); -#endif if (ptp) { abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT, @@ -1658,14 +1614,14 @@ erts_port_task_free_port(Port *pp) erts_aint32_t flags; ErtsRunQueue *runq; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); runq = erts_port_runq(pp); if (!runq) ERTS_INTERNAL_ERROR("Missing run-queue"); erts_port_task_sched_lock(&pp->sched); - flags = erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + flags = erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_EXIT); erts_port_task_sched_unlock(&pp->sched); erts_atomic32_read_bset_relb(&pp->state, @@ -1675,7 +1631,7 @@ erts_port_task_free_port(Port *pp) | ERTS_PORT_SFLG_FREE), ERTS_PORT_SFLG_FREE); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); if (!(flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) begin_port_cleanup(pp, NULL, NULL); @@ -1705,7 +1661,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) ErtsSchedulerData *esdp = runq->scheduler; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); pp = pop_port(runq); if (!pp) { @@ -1713,9 +1669,9 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) goto done; } - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_VERIFY_RQ(runq, pp); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); *curr_port_pp = pp; @@ -1723,19 +1679,19 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no); int migrated = old && old != esdp->no; - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++; if (migrated) { erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); } prepare_exec(pp, &execq, &processing_busy_q); - erts_smp_port_lock(pp); + erts_port_lock(pp); /* trace port scheduling, in */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { @@ -1757,7 +1713,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) if (!ptp) break; - task_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + task_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_EXECUTING, ERTS_PT_STATE_SCHEDULED); if (task_state != ERTS_PT_STATE_SCHEDULED) { @@ -1769,8 +1725,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) start_time = erts_timestamp_millis(); } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_CHK_NO_PROC_LOCKS; ASSERT(pp->drv_ptr); switch (ptp->type) { @@ -1889,15 +1845,13 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) if (io_tasks_executed) { - ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) + ASSERT(erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed); - erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks, + erts_atomic_add_relb(&erts_port_task_outstanding_io_tasks, -1*io_tasks_executed); } -#ifdef ERTS_SMP - ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); -#endif + ASSERT(runq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); active = finalize_exec(pp, &execq, processing_busy_q); @@ -1907,54 +1861,47 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) *curr_port_pp = NULL; - erts_smp_runq_lock(runq); + erts_runq_lock(runq); if (active) { -#ifdef ERTS_SMP ErtsRunQueue *xrunq; -#endif ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); -#ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); - ERTS_SMP_LC_ASSERT(runq != xrunq); - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_ASSERT(runq != xrunq); + ERTS_LC_VERIFY_RQ(runq, pp); if (!xrunq) { -#endif enqueue_port(runq, pp); /* No need to notify ourselves about inc in runq. */ -#ifdef ERTS_SMP } else { /* Emigrate port... */ - erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); - erts_smp_runq_unlock(runq); + erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); + erts_runq_unlock(runq); xrunq = erts_port_runq(pp); ASSERT(xrunq); enqueue_port(xrunq, pp); - erts_smp_runq_unlock(xrunq); - erts_smp_notify_inc_runq(xrunq); + erts_runq_unlock(xrunq); + erts_notify_inc_runq(xrunq); - erts_smp_runq_lock(runq); + erts_runq_lock(runq); } -#endif } done: - res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) + res = (erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); runq->scheduler->reductions += reds; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); ERTS_PORT_REDUCTIONS_EXECUTED(esdp, runq, reds); return res; } -#ifdef ERTS_SMP static void release_port(void *vport) { @@ -1970,7 +1917,6 @@ schedule_release_port(void *vport) { &pp->common.u.release); } -#endif static void begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) @@ -1981,7 +1927,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) ErtsPortTaskHandleList *free_nshp = NULL; ErtsProcList *plp; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); /* * Abort remaining tasks... @@ -2054,11 +2000,11 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) qs[i] = ptp->u.alive.next; /* Normal case here is aborted tasks... */ - state = erts_smp_atomic32_read_nob(&ptp->state); + state = erts_atomic32_read_nob(&ptp->state); if (state == ERTS_PT_STATE_ABORTED) goto aborted_port_task; - state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_EXECUTING, ERTS_PT_STATE_SCHEDULED); if (state != ERTS_PT_STATE_SCHEDULED) { @@ -2122,7 +2068,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) } } - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~(ERTS_PTS_FLG_HAVE_BUSY_TASKS |ERTS_PTS_FLG_HAVE_TASKS |ERTS_PTS_FLGS_BUSY)); @@ -2164,7 +2110,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) /* * Schedule cleanup of port structure... */ -#ifdef ERTS_SMP /* We might not be a scheduler, eg. traceing to port we are sys_msg_dispatcher */ if (!erts_get_scheduler_data()) { erts_schedule_misc_aux_work(1, schedule_release_port, (void*)pp); @@ -2174,19 +2119,15 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) (void *) pp, &pp->common.u.release); } -#else - pp->cleanup = 1; -#endif } -#ifdef ERTS_SMP void erts_enqueue_port(ErtsRunQueue *rq, Port *pp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); - ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); + ASSERT(rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ); enqueue_port(rq, pp); } @@ -2194,16 +2135,15 @@ Port * erts_dequeue_port(ErtsRunQueue *rq) { Port *pp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); pp = pop_port(rq); ASSERT(!pp - || rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); - ASSERT(!pp || (erts_smp_atomic32_read_nob(&pp->sched.flags) + || rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(!pp || (erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ)); return pp; } -#endif /* * Initialize the module. @@ -2211,7 +2151,7 @@ erts_dequeue_port(ErtsRunQueue *rq) void erts_port_task_init(void) { - erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks, + erts_atomic_init_nob(&erts_port_task_outstanding_io_tasks, (erts_aint_t) 0); init_port_task_alloc(); init_busy_caller_table_alloc(); diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index 39f403b443..561f4ca936 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -27,11 +27,11 @@ #ifndef ERTS_PORT_TASK_H_BASIC_TYPES__ #define ERTS_PORT_TASK_H_BASIC_TYPES__ #include "erl_sys_driver.h" -#include "erl_smp.h" +#include "erl_threads.h" #define ERL_PORT_GET_PORT_TYPE_ONLY__ #include "erl_port.h" #undef ERL_PORT_GET_PORT_TYPE_ONLY__ -typedef erts_smp_atomic_t ErtsPortTaskHandle; +typedef erts_atomic_t ErtsPortTaskHandle; #endif #ifndef ERTS_PORT_TASK_ONLY_BASIC_TYPES__ @@ -64,7 +64,7 @@ typedef enum { #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS /* NOTE: Do not access any of the exported variables directly */ -extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; +extern erts_atomic_t erts_port_task_outstanding_io_tasks; #endif #define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0) @@ -98,8 +98,8 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; typedef struct { ErlDrvSizeT high; - erts_smp_atomic_t low; - erts_smp_atomic_t size; + erts_atomic_t low; + erts_atomic_t size; } ErtsPortTaskBusyPortQ; typedef struct ErtsPortTask_ ErtsPortTask; @@ -124,10 +124,8 @@ typedef struct { } in; ErtsPortTaskBusyPortQ *bpq; } taskq; - erts_smp_atomic32_t flags; -#ifdef ERTS_SMP + erts_atomic32_t flags; erts_mtx_t mtx; -#endif } ErtsPortTaskSched; ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp); @@ -151,13 +149,13 @@ ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void); ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp) { - erts_smp_atomic_init_nob(pthp, (erts_aint_t) NULL); + erts_atomic_init_nob(pthp, (erts_aint_t) NULL); } ERTS_GLB_INLINE int erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp) { - return ((void *) erts_smp_atomic_read_acqb(pthp)) != NULL; + return ((void *) erts_atomic_read_acqb(pthp)) != NULL; } ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, @@ -165,9 +163,9 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, { if (bpq) { erts_aint_t low = (erts_aint_t) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW; - erts_smp_atomic_init_nob(&bpq->low, low); + erts_atomic_init_nob(&bpq->low, low); bpq->high = (ErlDrvSizeT) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH; - erts_smp_atomic_init_nob(&bpq->size, (erts_aint_t) 0); + erts_atomic_init_nob(&bpq->size, (erts_aint_t) 0); } ptsp->taskq.bpq = bpq; } @@ -175,9 +173,7 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id) { -#ifdef ERTS_SMP char *lock_str = "port_sched_lock"; -#endif ptsp->next = NULL; ptsp->taskq.local.busy.first = NULL; ptsp->taskq.local.busy.last = NULL; @@ -186,32 +182,26 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id) ptsp->taskq.local.first = NULL; ptsp->taskq.in.first = NULL; ptsp->taskq.in.last = NULL; - erts_smp_atomic32_init_nob(&ptsp->flags, 0); -#ifdef ERTS_SMP + erts_atomic32_init_nob(&ptsp->flags, 0); erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO); -#endif } ERTS_GLB_INLINE void erts_port_task_sched_lock(ErtsPortTaskSched *ptsp) { -#ifdef ERTS_SMP erts_mtx_lock(&ptsp->mtx); -#endif } ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp) { -#ifdef ERTS_SMP erts_mtx_unlock(&ptsp->mtx); -#endif } ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp) { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) return erts_lc_mtx_is_locked(&ptsp->mtx); #else return 0; @@ -222,15 +212,13 @@ erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp) ERTS_GLB_INLINE void erts_port_task_fini_sched(ErtsPortTaskSched *ptsp) { -#ifdef ERTS_SMP erts_mtx_destroy(&ptsp->mtx); -#endif } ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp) { - erts_smp_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING); + erts_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING); } #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS @@ -238,7 +226,7 @@ erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp) ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void) { - return (erts_smp_atomic_read_acqb(&erts_port_task_outstanding_io_tasks) + return (erts_atomic_read_acqb(&erts_port_task_outstanding_io_tasks) != 0); } @@ -265,10 +253,8 @@ ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void); ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data_extra(size_t extra, void **extra_ptr); void erts_port_task_free_p2p_sig_data(ErtsProc2PortSigData *sigdp); -#ifdef ERTS_SMP void erts_enqueue_port(ErtsRunQueue *rq, Port *pp); Port *erts_dequeue_port(ErtsRunQueue *rq); -#endif #undef ERTS_INCLUDE_SCHEDULER_INTERNALS #endif /* ERL_PORT_TASK_H__ */ #endif /* ERTS_PORT_TASK_ONLY_BASIC_TYPES__ */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b72bac00c1..1f696f7ba4 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -127,18 +127,16 @@ runq_got_work_to_execute_flags(Uint32 flags) return !ERTS_IS_RUNQ_EMPTY_FLGS(flags); } -#ifdef ERTS_SMP static ERTS_INLINE int runq_got_work_to_execute(ErtsRunQueue *rq) { return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq)); } -#endif #undef RUNQ_READ_RQ #undef RUNQ_SET_RQ -#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_smp_atomic_read_nob((X))) -#define RUNQ_SET_RQ(X, RQ) erts_smp_atomic_set_nob((X), (erts_aint_t) (RQ)) +#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_atomic_read_nob((X))) +#define RUNQ_SET_RQ(X, RQ) erts_atomic_set_nob((X), (erts_aint_t) (RQ)) #ifdef DEBUG # if defined(ARCH_64) @@ -194,10 +192,8 @@ static UWord thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_O ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE); int erts_sched_thread_suggested_stack_size = -1; -#ifdef ERTS_DIRTY_SCHEDULERS int erts_dcpu_sched_thread_suggested_stack_size = -1; int erts_dio_sched_thread_suggested_stack_size = -1; -#endif #ifdef ERTS_ENABLE_LOCK_CHECK ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE]; #endif @@ -208,7 +204,6 @@ static struct { int sys_schedule; } sched_busy_wait; -#ifdef ERTS_SMP int erts_disable_proc_not_running_opt; static ErtsAuxWorkData *aux_thread_aux_work_data; @@ -226,25 +221,21 @@ typedef struct { typedef struct { Uint32 normal; -#ifdef ERTS_DIRTY_SCHEDULERS Uint32 dirty_cpu; Uint32 dirty_io; -#endif } ErtsSchedTypeCounters; static struct { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; ErtsSchedTypeCounters online; ErtsSchedTypeCounters curr_online; ErtsSchedTypeCounters active; - erts_smp_atomic32_t changing; + erts_atomic32_t changing; ErtsProcList *chngq; Eterm changer; ErtsMultiSchedulingBlock nmsb; /* Normal multi Scheduling Block */ ErtsMultiSchedulingBlock msb; /* Multi Scheduling Block */ -#ifdef ERTS_DIRTY_SCHEDULERS ErtsSchedType last_msb_dirty_type; -#endif } schdlr_sspnd; static void init_scheduler_suspend(void); @@ -253,10 +244,8 @@ static ERTS_INLINE Uint32 schdlr_sspnd_eq_nscheds(ErtsSchedTypeCounters *val1p, ErtsSchedTypeCounters *val2p) { int res = val1p->normal == val2p->normal; -#ifdef ERTS_DIRTY_SCHEDULERS res &= val1p->dirty_cpu == val2p->dirty_cpu; res &= val1p->dirty_io == val2p->dirty_io; -#endif return res; } @@ -267,16 +256,10 @@ schdlr_sspnd_get_nscheds(ErtsSchedTypeCounters *valp, switch (type) { case ERTS_SCHED_NORMAL: return valp->normal; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: return valp->dirty_cpu; case ERTS_SCHED_DIRTY_IO: return valp->dirty_io; -#else - case ERTS_SCHED_DIRTY_CPU: - case ERTS_SCHED_DIRTY_IO: - return 0; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); return 0; @@ -288,10 +271,8 @@ static ERTS_INLINE Uint32 schdlr_sspnd_get_nscheds_tot(ErtsSchedTypeCounters *valp) { Uint32 res = valp->normal; -#ifdef ERTS_DIRTY_SCHEDULERS res += valp->dirty_cpu; res += valp->dirty_io; -#endif return res; } #endif @@ -306,14 +287,12 @@ schdlr_sspnd_dec_nscheds(ErtsSchedTypeCounters *valp, case ERTS_SCHED_NORMAL: valp->normal--; break; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: valp->dirty_cpu--; break; case ERTS_SCHED_DIRTY_IO: valp->dirty_io--; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); } @@ -327,14 +306,12 @@ schdlr_sspnd_inc_nscheds(ErtsSchedTypeCounters *valp, case ERTS_SCHED_NORMAL: valp->normal++; break; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: valp->dirty_cpu++; break; case ERTS_SCHED_DIRTY_IO: valp->dirty_io++; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); } @@ -348,25 +325,23 @@ schdlr_sspnd_set_nscheds(ErtsSchedTypeCounters *valp, case ERTS_SCHED_NORMAL: valp->normal = no; break; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: valp->dirty_cpu = no; break; case ERTS_SCHED_DIRTY_IO: valp->dirty_io = no; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); } } static struct { - erts_smp_mtx_t update_mtx; - erts_smp_atomic32_t no_runqs; + erts_mtx_t update_mtx; + erts_atomic32_t no_runqs; int last_active_runqs; int forced_check_balance; - erts_smp_atomic32_t checking_balance; + erts_atomic32_t checking_balance; int halftime; int full_reds_history_index; struct { @@ -384,51 +359,41 @@ do { \ balance_info.prev_rise.reds = (REDS); \ } while (0) -#endif erts_sched_stat_t erts_sched_stat; -#ifdef USE_THREADS static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key); -#endif -static erts_smp_atomic32_t function_calls; +static erts_atomic32_t function_calls; -#ifdef ERTS_SMP -static erts_smp_atomic32_t doing_sys_schedule; -static erts_smp_atomic32_t no_empty_run_queues; +static erts_atomic32_t doing_sys_schedule; +static erts_atomic32_t no_empty_run_queues; long erts_runq_supervision_interval = 0; static ethr_event runq_supervision_event; static erts_tid_t runq_supervisor_tid; static erts_atomic_t runq_supervisor_sleeping; -#else /* !ERTS_SMP */ -ErtsSchedulerData *erts_scheduler_data; -#endif ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues); Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues); -#ifdef ERTS_DIRTY_SCHEDULERS struct { union { - erts_smp_atomic32_t active; + erts_atomic32_t active; char align__[ERTS_CACHE_LINE_SIZE]; } cpu; union { - erts_smp_atomic32_t active; + erts_atomic32_t active; char align__[ERTS_CACHE_LINE_SIZE]; } io; } dirty_count erts_align_attribute(ERTS_CACHE_LINE_SIZE); -#endif static ERTS_INLINE void dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add) { -#ifdef ERTS_DIRTY_SCHEDULERS erts_aint32_t val; - erts_smp_atomic32_t *ap; + erts_atomic32_t *ap; switch (esdp->type) { case ERTS_SCHED_DIRTY_CPU: ap = &dirty_count.cpu.active; @@ -446,23 +411,20 @@ dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add) * All updates done under run-queue lock, so * no inc or dec needed... */ - ERTS_SMP_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue)); - val = erts_smp_atomic32_read_nob(ap); + val = erts_atomic32_read_nob(ap); val += add; - erts_smp_atomic32_set_nob(ap, val); -#endif + erts_atomic32_set_nob(ap, val); } ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_scheduler_data); -#ifdef ERTS_DIRTY_SCHEDULERS ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_cpu_scheduler_data); ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_io_scheduler_data); typedef union { Process dsp; char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))]; } ErtsAlignedDirtyShadowProcess; -#endif typedef union { ErtsSchedulerSleepInfo ssi; @@ -470,12 +432,8 @@ typedef union { } ErtsAlignedSchedulerSleepInfo; static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info; -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info; static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info; -#endif -#endif static Uint last_reductions; static Uint last_exact_reductions; @@ -547,7 +505,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist, (ASSERT(-1 <= ((int) (IX)) \ && ((int) (IX)) < ((int) erts_no_schedulers)), \ &aligned_sched_sleep_info[(IX)].ssi) -#ifdef ERTS_DIRTY_SCHEDULERS #define ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(IX) \ (ASSERT(0 <= ((int) (IX)) \ && ((int) (IX)) < ((int) erts_no_dirty_cpu_schedulers)), \ @@ -556,7 +513,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist, (ASSERT(0 <= ((int) (IX)) \ && ((int) (IX)) < ((int) erts_no_dirty_io_schedulers)), \ &aligned_dirty_io_sched_sleep_info[(IX)].ssi) -#endif #define ERTS_FOREACH_RUNQ(RQVAR, DO) \ do { \ @@ -564,9 +520,9 @@ do { \ int ix__; \ for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ - erts_smp_runq_unlock(RQVAR); \ + erts_runq_unlock(RQVAR); \ } \ } while (0) @@ -576,12 +532,12 @@ do { \ int ix__; \ int online__ = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, \ ERTS_SCHED_NORMAL); \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \ + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \ for (ix__ = 0; ix__ < online__; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ - erts_smp_runq_unlock(RQVAR); \ + erts_runq_unlock(RQVAR); \ } \ } while (0) @@ -592,12 +548,12 @@ do { \ int ix__; \ for (ix__ = 0; ix__ < nrqs; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ } \ { DOX; } \ for (ix__ = 0; ix__ < nrqs; ix__++) \ - erts_smp_runq_unlock(ERTS_RUNQ_IX(ix__)); \ + erts_runq_unlock(ERTS_RUNQ_IX(ix__)); \ } while (0) #define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \ @@ -638,11 +594,8 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_MISC; valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM; valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC; -#if ERTS_USE_ASYNC_READY_Q valid |= ERTS_SSI_AUX_WORK_ASYNC_READY; valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN; -#endif -#ifdef ERTS_SMP valid |= ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP; valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_DD; @@ -651,7 +604,6 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS; -#endif #if HAVE_ERTS_MSEG valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK; #endif @@ -673,16 +625,14 @@ dbg_chk_aux_work_val(erts_aint32_t value) #define ERTS_DBG_CHK_SSI_AUX_WORK(SSI) #endif -#ifdef ERTS_SMP static void do_handle_pending_exiters(ErtsProcList *); static void wake_scheduler(ErtsRunQueue *rq); -#endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int -erts_smp_lc_runq_is_locked(ErtsRunQueue *runq) +erts_lc_runq_is_locked(ErtsRunQueue *runq) { - return erts_smp_lc_mtx_is_locked(&runq->mtx); + return erts_lc_mtx_is_locked(&runq->mtx); } #endif @@ -690,13 +640,13 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq) static ERTS_INLINE Uint64 ensure_later_proc_interval(Uint64 interval) { - return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval); + return erts_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval); } Uint64 erts_get_proc_interval(void) { - return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc)); + return erts_current_interval_nob(erts_ptab_interval(&erts_proc)); } Uint64 @@ -708,15 +658,13 @@ erts_ensure_later_proc_interval(Uint64 interval) Uint64 erts_step_proc_interval(void) { - return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc)); + return erts_step_interval_nob(erts_ptab_interval(&erts_proc)); } void erts_pre_init_process(void) { -#ifdef USE_THREADS erts_tsd_key_create(&sched_data_key, "erts_sched_data_key"); -#endif erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX] = "DELAYED_AW_WAKEUP"; @@ -796,6 +744,11 @@ erts_pre_init_process(void) = ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS; erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].set_locks = ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS; + + erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks + = ERTS_PSD_DIST_ENTRY_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks + = ERTS_PSD_DIST_ENTRY_SET_LOCKS; #endif } @@ -810,10 +763,8 @@ void erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab) { -#ifdef ERTS_SMP erts_disable_proc_not_running_opt = 0; erts_init_proc_lock(ncpu); -#endif init_proclist_alloc(); @@ -825,11 +776,7 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab) sizeof(Process), "process_table", legacy_proc_tab, -#ifdef ERTS_SMP 1 -#else - 0 -#endif ); last_reductions = 0; @@ -841,7 +788,7 @@ erts_late_init_process(void) { int ix; - erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL, + erts_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) { @@ -883,7 +830,6 @@ erts_late_init_process(void) static void init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp) { -#ifdef ERTS_DIRTY_SCHEDULERS if (esdp->type != ERTS_SCHED_NORMAL) { erts_atomic32_init_nob(&esdp->sched_wall_time.u.mod, 0); esdp->sched_wall_time.enabled = 1; @@ -892,7 +838,6 @@ init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp) esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE; } else -#endif { esdp->sched_wall_time.u.need = erts_sched_balance_util; esdp->sched_wall_time.enabled = 0; @@ -1041,14 +986,14 @@ erts_get_sched_util(ErtsRunQueue *rq, int initially_locked, int short_interval) if (!locked) { if (++try >= ERTS_GET_AVG_MAX_UNLOCKED_TRY) { /* Writer will eventually block on runq-lock */ - erts_smp_runq_lock(rq); + erts_runq_lock(rq); locked = 1; } } } if (!initially_locked && locked) - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); now = sched_wall_time_ts(); worktime = calc_sched_worktime(is_working, now, last, interval, old_worktime); @@ -1090,7 +1035,6 @@ init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled) #endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */ -#ifdef ERTS_DIRTY_SCHEDULERS typedef struct { Uint64 working; @@ -1143,9 +1087,7 @@ read_dirty_sched_wall_time(ErtsSchedulerData *esdp, ErtsDirtySchedWallTime *info info->working = info->total; } -#endif -#ifdef ERTS_SMP static void dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working) @@ -1202,7 +1144,6 @@ dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working) #endif } -#endif /* ERTS_SMP */ static void sched_wall_time_change(ErtsSchedulerData *esdp, int working) @@ -1247,11 +1188,9 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; -#ifdef ERTS_DIRTY_SCHEDULERS + erts_atomic32_t refc; int want_dirty_cpu; int want_dirty_io; -#endif } ErtsSchedWallTimeReq; typedef struct { @@ -1259,7 +1198,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsSystemCheckReq; @@ -1291,10 +1230,8 @@ reply_sched_wall_time(void *vswtrp) ErlOffHeap *ohp = NULL; ErtsMessage *mp = NULL; - ASSERT(esdp); -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif + ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)); + if (swtrp->set) { if (!swtrp->enable && esdp->sched_wall_time.enabled) { esdp->sched_wall_time.u.need = erts_sched_balance_util; @@ -1324,7 +1261,6 @@ reply_sched_wall_time(void *vswtrp) hpp = NULL; szp = &sz; -#ifdef ERTS_DIRTY_SCHEDULERS if (esdp->sched_wall_time.enabled && swtrp->req_sched == esdp->no && (swtrp->want_dirty_cpu || swtrp->want_dirty_io)) { @@ -1406,7 +1342,6 @@ reply_sched_wall_time(void *vswtrp) erts_free(ERTS_ALC_T_TMP, dswt); } else -#endif { /* Reply with info about this scheduler only... */ @@ -1443,11 +1378,11 @@ reply_sched_wall_time(void *vswtrp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&swtrp->refc) == 0) swtreq_free(vswtrp); } @@ -1460,11 +1395,10 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable, ErtsSchedWallTimeReq *swtrp; Eterm *hp; + ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)); + if (!set && !esdp->sched_wall_time.enabled) return THE_NON_VALUE; -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif swtrp = swtreq_alloc(); ref = erts_make_ref(c_p); @@ -1475,22 +1409,18 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable, swtrp->proc = c_p; swtrp->ref = STORE_NC(&hp, NULL, ref); swtrp->req_sched = esdp->no; -#ifdef ERTS_DIRTY_SCHEDULERS swtrp->want_dirty_cpu = want_dirty_cpu; swtrp->want_dirty_io = want_dirty_io; -#endif - erts_smp_atomic32_init_nob(&swtrp->refc, + erts_atomic32_init_nob(&swtrp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_sched_wall_time, (void *) swtrp); -#endif reply_sched_wall_time((void *) swtrp); @@ -1511,10 +1441,7 @@ reply_system_check(void *vscrp) ErlOffHeap *ohp = NULL; ErtsMessage *mp = NULL; - ASSERT(esdp); -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif + ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)); sz = ERTS_REF_THING_SIZE; mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp); @@ -1527,11 +1454,11 @@ reply_system_check(void *vscrp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&scrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&scrp->refc) == 0) screq_free(vscrp); } @@ -1549,17 +1476,15 @@ Eterm erts_system_check_request(Process *c_p) { scrp->proc = c_p; scrp->ref = STORE_NC(&hp, NULL, ref); scrp->req_sched = esdp->no; - erts_smp_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers); + erts_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_system_check, (void *) scrp); -#endif reply_system_check((void *) scrp); @@ -1620,7 +1545,7 @@ erts_psd_set_init(Process *p, int ix, void *data) for (i = 0; i < ERTS_PSD_SIZE; i++) new_psd->data[i] = NULL; - psd = (ErtsPSD *) erts_smp_atomic_cmpxchg_mb(&p->psd, + psd = (ErtsPSD *) erts_atomic_cmpxchg_mb(&p->psd, (erts_aint_t) new_psd, (erts_aint_t) NULL); if (psd) @@ -1632,7 +1557,6 @@ erts_psd_set_init(Process *p, int ix, void *data) return old; } -#ifdef ERTS_SMP void erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags) @@ -1660,7 +1584,6 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags) } } -#endif static ERTS_INLINE void set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi, @@ -1676,11 +1599,7 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi, old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs); if ((old_flgs & flgs) != flgs) { -#ifdef ERTS_SMP erts_sched_poke(ssi); -#else - erts_sys_schedule_interrupt(1); -#endif } } } @@ -1696,11 +1615,7 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi, old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs); if ((old_flgs & flgs) != flgs) { -#ifdef ERTS_SMP erts_sched_poke(ssi); -#else - erts_sys_schedule_interrupt(1); -#endif } } @@ -1716,7 +1631,6 @@ unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs) return erts_atomic32_read_band_nob(&ssi->aux_work, ~flgs); } -#ifdef ERTS_SMP static ERTS_INLINE void haw_chk_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val) @@ -1786,9 +1700,9 @@ static ERTS_INLINE void haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp) { ErtsThrPrgrVal current = awdp->current_thr_prgr; -#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + if (current != ERTS_THR_PRGR_INVALID && !erts_thr_progress_equal(current, erts_thr_progress_current())) { /* @@ -1805,9 +1719,7 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in { int jix, max_jix; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY); @@ -1865,7 +1777,6 @@ schedule_aux_work_wakeup(ErtsAuxWorkData *awdp, } } -#endif typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t; struct erts_misc_aux_work_t_ { @@ -1906,11 +1817,7 @@ init_misc_aux_work(void) sizeof(erts_algnd_misc_aux_work_q_t) * (erts_no_schedulers+1)); -#ifdef ERTS_SMP ix = 0; /* aux_thread + schedulers */ -#else - ix = 1; /* scheduler only */ -#endif for (; ix <= erts_no_schedulers; ix++) { qinit.arg = (void *) ERTS_SCHED_SLEEP_INFO_IX(ix-1); @@ -1928,10 +1835,8 @@ misc_aux_work_clean(ErtsThrQ_t *q, set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC); return aux_work | ERTS_SSI_AUX_WORK_MISC; case ERTS_THR_Q_NEED_THR_PRGR: -#ifdef ERTS_SMP set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR); haw_thr_prgr_soft_wakeup(awdp, erts_thr_q_need_thr_progress(q)); -#endif case ERTS_THR_Q_CLEAN: break; } @@ -1957,16 +1862,14 @@ handle_misc_aux_work(ErtsAuxWorkData *awdp, return misc_aux_work_clean(q, awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC); } -#ifdef ERTS_SMP static ERTS_INLINE erts_aint32_t handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->misc.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR; @@ -1978,7 +1881,6 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR); } -#endif static ERTS_INLINE void schedule_misc_aux_work(int sched_id, @@ -1988,11 +1890,7 @@ schedule_misc_aux_work(int sched_id, ErtsThrQ_t *q; erts_misc_aux_work_t *mawp; -#ifdef ERTS_SMP ASSERT(0 <= sched_id && sched_id <= erts_no_schedulers); -#else - ASSERT(sched_id == 1); -#endif q = &misc_aux_work_queues[sched_id].q; mawp = misc_aux_work_alloc(); @@ -2036,7 +1934,6 @@ erts_schedule_multi_misc_aux_work(int ignore_self, } } -#if ERTS_USE_ASYNC_READY_Q void erts_notify_check_async_ready_queue(void *vno) @@ -2052,9 +1949,9 @@ handle_async_ready(ErtsAuxWorkData *awdp, int waiting) { ErtsSchedulerSleepInfo *ssi = awdp->ssi; -#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY); if (erts_check_async_ready(awdp->async_ready.queue)) { if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY) @@ -2064,9 +1961,7 @@ handle_async_ready(ErtsAuxWorkData *awdp, } return aux_work; } -#ifdef ERTS_SMP awdp->async_ready.need_thr_prgr = 0; -#endif set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN); return ((aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY) | ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN); @@ -2079,10 +1974,8 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp, { void *thr_prgr_p; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif -#ifdef ERTS_SMP + if (awdp->async_ready.need_thr_prgr && !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->async_ready.thr_prgr)) { @@ -2091,26 +1984,20 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp, awdp->async_ready.need_thr_prgr = 0; thr_prgr_p = (void *) &awdp->async_ready.thr_prgr; -#else - thr_prgr_p = NULL; -#endif switch (erts_async_ready_clean(awdp->async_ready.queue, thr_prgr_p)) { case ERTS_ASYNC_READY_CLEAN: unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN); return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN; -#ifdef ERTS_SMP case ERTS_ASYNC_READY_NEED_THR_PRGR: haw_thr_prgr_soft_wakeup(awdp, awdp->async_ready.thr_prgr); awdp->async_ready.need_thr_prgr = 1; return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN; -#endif default: return aux_work; } } -#endif /* ERTS_USE_ASYNC_READY_Q */ static ERTS_INLINE erts_aint32_t @@ -2119,9 +2006,8 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) ErtsSchedulerSleepInfo *ssi = awdp->ssi; erts_aint32_t res; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC)); aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM @@ -2135,7 +2021,6 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) return aux_work; } -#ifdef ERTS_SMP void erts_alloc_notify_delayed_dealloc(int ix) @@ -2169,9 +2054,9 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; int more_work = 0; ERTS_MSACC_PUSH_STATE_M_X(); -#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD); ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ALLOC); erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp, @@ -2208,9 +2093,8 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR; @@ -2267,9 +2151,8 @@ handle_canceled_timers(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; int more_work = 0; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS); erts_handle_canceled_timers((void *) awdp->esdp, &need_thr_progress, @@ -2303,9 +2186,8 @@ handle_canceled_timers_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + if (!erts_thr_progress_has_reached_this(current, awdp->cncld_tmrs.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; @@ -2348,9 +2230,8 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait int lops; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) { ErtsThrPrgrLaterOp *lop = awdp->later_op.first; @@ -2380,7 +2261,7 @@ enqueue_later_op(ErtsSchedulerData *esdp, ErtsThrPrgrLaterOp *lop) { ErtsThrPrgrVal later = erts_thr_progress_later(esdp); - ASSERT(esdp); + ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)); lop->func = later_func; lop->data = later_data; @@ -2396,20 +2277,15 @@ enqueue_later_op(ErtsSchedulerData *esdp, return later; } -#endif /* ERTS_SMP */ void erts_schedule_thr_prgr_later_op(void (*later_func)(void *), void *later_data, ErtsThrPrgrLaterOp *lop) { -#ifndef ERTS_SMP - later_func(later_data); -#else ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop); haw_thr_prgr_wakeup(&esdp->aux_work_data, later); -#endif } void @@ -2418,13 +2294,9 @@ erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *), ErtsThrPrgrLaterOp *lop, UWord size) { -#ifndef ERTS_SMP - later_func(later_data); -#else ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop); haw_thr_prgr_later_cleanup_op_wakeup(&esdp->aux_work_data, later, size); -#endif } static ERTS_INLINE erts_aint32_t @@ -2433,9 +2305,7 @@ handle_debug_wait_completed(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int w ErtsSchedulerSleepInfo *ssi = awdp->ssi; erts_aint32_t saved_aux_work, flags; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif flags = awdp->debug.wait_completed.flags; @@ -2476,11 +2346,7 @@ setup_thr_debug_wait_completed(void *vproc) ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsAuxWorkData *awdp; erts_aint32_t wait_flags, aux_work_flags; -#ifdef ERTS_SMP awdp = esdp ? &esdp->aux_work_data : aux_thread_aux_work_data; -#else - awdp = &esdp->aux_work_data; -#endif wait_flags = 0; aux_work_flags = ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; @@ -2489,18 +2355,14 @@ setup_thr_debug_wait_completed(void *vproc) erts_alloc_fix_alloc_shrink(awdp->sched_id, 0); wait_flags |= (ERTS_SSI_AUX_WORK_DD | ERTS_SSI_AUX_WORK_DD_THR_PRGR); -#ifdef ERTS_SMP aux_work_flags |= ERTS_SSI_AUX_WORK_DD; -#endif } if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS) { wait_flags |= (ERTS_SSI_AUX_WORK_CNCLD_TMRS | ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); -#ifdef ERTS_SMP if (awdp->esdp && !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)) aux_work_flags |= ERTS_SSI_AUX_WORK_CNCLD_TMRS; -#endif } set_aux_work_flags_wakeup_nob(awdp->ssi, aux_work_flags); @@ -2519,21 +2381,17 @@ static void later_thr_debug_wait_completed(void *vlop) { struct debug_lop *lop = vlop; erts_aint32_t count = (erts_aint32_t) erts_no_schedulers; -#ifdef ERTS_SMP count += 1; /* aux thread */ -#endif if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) { /* scheduler threads */ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers, setup_thr_debug_wait_completed, lop->proc); -#ifdef ERTS_SMP /* aux_thread */ erts_schedule_misc_aux_work(0, setup_thr_debug_wait_completed, lop->proc); -#endif } erts_free(ERTS_ALC_T_DEBUG, lop); } @@ -2554,9 +2412,7 @@ erts_debug_wait_completed(Process *c_p, int flags) { /* Only one process at a time can do this */ erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers); -#ifdef ERTS_SMP count += 1; /* aux thread */ -#endif if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count, count, 0)) { @@ -2585,7 +2441,7 @@ notify_reap_ports_relb(void) } } -erts_smp_atomic32_t erts_halt_progress; +erts_atomic32_t erts_halt_progress; int erts_halt_code; static ERTS_INLINE erts_aint32_t @@ -2594,9 +2450,9 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS); ERTS_RUNQ_FLGS_SET(awdp->esdp->run_queue, ERTS_RUNQ_FLG_HALTING); - if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) { + if (erts_atomic32_dec_read_acqb(&erts_halt_progress) == 0) { int i, max = erts_ptab_max(&erts_port); - erts_smp_atomic32_set_nob(&erts_halt_progress, 1); + erts_atomic32_set_nob(&erts_halt_progress, 1); for (i = 0; i < max; i++) { erts_aint32_t state; Port *prt = erts_pix2port(i); @@ -2609,21 +2465,21 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) /* We need to set the halt flag - get the port lock */ - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP | ERTS_PORT_SFLG_HALT))) { state = erts_atomic32_read_bor_relb(&prt->state, ERTS_PORT_SFLG_HALT); - erts_smp_atomic32_inc_nob(&erts_halt_progress); + erts_atomic32_inc_nob(&erts_halt_progress); if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING))) erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1); } erts_port_release(prt); } - if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) { + if (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0) { erts_flush_async_exit(erts_halt_code, ""); } } @@ -2684,7 +2540,6 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiti #endif -#ifdef ERTS_SMP static ERTS_INLINE erts_aint32_t handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) @@ -2695,10 +2550,10 @@ handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin rq = awdp->esdp->run_queue; unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); pnd_xtrs = rq->procs.pending_exiters; rq->procs.pending_exiters = NULL; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (erts_proclist_fetch(&pnd_xtrs, NULL)) do_handle_pending_exiters(pnd_xtrs); @@ -2706,7 +2561,6 @@ handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS; } -#endif static ERTS_INLINE erts_aint32_t handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) @@ -2738,9 +2592,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_AUX); ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#ifdef ERTS_SMP haw_thr_prgr_current_reset(awdp); -#endif ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); ASSERT(aux_work); @@ -2759,7 +2611,6 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) * Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative * eachother. Most frequent first. */ -#ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP, handle_delayed_aux_work_wakeup); HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD, @@ -2767,13 +2618,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) /* DD must be before DD_THR_PRGR */ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD_THR_PRGR, handle_delayed_dealloc_thr_prgr); -#endif HANDLE_AUX_WORK((ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC), handle_fix_alloc); -#ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP, handle_thr_prgr_later_op); HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS, @@ -2781,28 +2630,21 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) /* CNCLD_TMRS must be before CNCLD_TMRS_THR_PRGR */ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR, handle_canceled_timers_thr_prgr); -#endif -#if ERTS_USE_ASYNC_READY_Q HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY, handle_async_ready); /* ASYNC_READY must be before ASYNC_READY_CLEAN */ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN, handle_async_ready_clean); -#endif -#ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC_THR_PRGR, handle_misc_aux_work_thr_prgr); -#endif /* MISC_THR_PRGR must be before MISC */ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC, handle_misc_aux_work); -#ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS, handle_pending_exiters); -#endif HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO, handle_setup_aux_work_timer); @@ -2828,10 +2670,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); -#ifdef ERTS_SMP if (waiting && !aux_work) haw_thr_prgr_current_check_progress(awdp); -#endif ERTS_MSACC_UPDATE_CACHE(); ERTS_MSACC_POP_STATE_M(); @@ -2930,11 +2770,7 @@ aux_work_timeout(void *vesdp) ASSERT(esdp == (ErtsSchedulerData *) vesdp); #endif -#ifdef ERTS_SMP i = 0; -#else - i = 1; -#endif for (; i <= erts_no_schedulers; i++) { erts_aint32_t type; @@ -2968,9 +2804,6 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable) { erts_aint32_t old, refc; -#ifndef ERTS_SMP - ix = 1; -#endif ERTS_DBG_CHK_AUX_WORK_VAL(type); ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix])); @@ -3002,7 +2835,7 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable) static ERTS_INLINE void sched_waiting_sys(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); ASSERT(rq->waiting >= 0); (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); @@ -3016,10 +2849,10 @@ sched_waiting_sys(Uint no, ErtsRunQueue *rq) static ERTS_INLINE void sched_active_sys(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); -#ifdef ERTS_DIRTY_SCHEDULERS + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); + ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); -#endif + ASSERT(rq->waiting < 0); rq->waiting *= -1; rq->waiting--; @@ -3037,34 +2870,27 @@ erts_active_schedulers(void) return as; } -#ifdef ERTS_SMP static ERTS_INLINE void clear_sys_scheduling(void) { - erts_smp_atomic32_set_mb(&doing_sys_schedule, 0); + erts_atomic32_set_mb(&doing_sys_schedule, 0); } static ERTS_INLINE int try_set_sys_scheduling(void) { - return 0 == erts_smp_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0); + return 0 == erts_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0); } -#endif static ERTS_INLINE int prepare_for_sys_schedule(int non_blocking) { if (non_blocking && erts_eager_check_io) { -#ifdef ERTS_SMP return try_set_sys_scheduling(); -#else - return 1; -#endif } else { -#ifdef ERTS_SMP while (!erts_port_task_have_outstanding_io_tasks() && try_set_sys_scheduling()) { if (!erts_port_task_have_outstanding_io_tasks()) @@ -3072,21 +2898,17 @@ prepare_for_sys_schedule(int non_blocking) clear_sys_scheduling(); } return 0; -#else - return !erts_port_task_have_outstanding_io_tasks(); -#endif } } -#ifdef ERTS_SMP static ERTS_INLINE void sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); -#ifdef ERTS_DIRTY_SCHEDULERS + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); + ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); -#endif + ASSERT(rq->waiting < 0); rq->waiting *= -1; } @@ -3094,7 +2916,7 @@ sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq) static ERTS_INLINE void sched_waiting(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); if (rq->waiting < 0) @@ -3109,7 +2931,7 @@ sched_waiting(Uint no, ErtsRunQueue *rq) static ERTS_INLINE void sched_active(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); if (rq->waiting < 0) rq->waiting++; else @@ -3123,7 +2945,7 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags) { if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && old_flags & ERTS_RUNQ_FLG_NONEMPTY) { #ifdef DEBUG - erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); + erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues); /* * For a short period of time no_empty_run_queues may have * been increased twice for a specific run queue. @@ -3131,9 +2953,9 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags) ASSERT(0 <= empty && empty < 2*erts_no_run_queues); #endif if (!erts_runq_supervision_interval) - erts_smp_atomic32_inc_relb(&no_empty_run_queues); + erts_atomic32_inc_relb(&no_empty_run_queues); else { - erts_smp_atomic32_inc_mb(&no_empty_run_queues); + erts_atomic32_inc_mb(&no_empty_run_queues); if (erts_atomic_read_nob(&runq_supervisor_sleeping)) ethr_event_set(&runq_supervision_event); } @@ -3163,7 +2985,7 @@ non_empty_runq(ErtsRunQueue *rq) Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY); if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY))) { #ifdef DEBUG - erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); + erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues); /* * For a short period of time no_empty_run_queues may have * been increased twice for a specific run queue. @@ -3171,10 +2993,10 @@ non_empty_runq(ErtsRunQueue *rq) ASSERT(0 < empty && empty <= 2*erts_no_run_queues); #endif if (!erts_runq_supervision_interval) - erts_smp_atomic32_dec_relb(&no_empty_run_queues); + erts_atomic32_dec_relb(&no_empty_run_queues); else { erts_aint32_t no; - no = erts_smp_atomic32_dec_read_mb(&no_empty_run_queues); + no = erts_atomic32_dec_read_mb(&no_empty_run_queues); if (no > 0 && erts_atomic_read_nob(&runq_supervisor_sleeping)) ethr_event_set(&runq_supervision_event); } @@ -3203,7 +3025,7 @@ sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi) do { nflgs = (xflgs & ERTS_SSI_FLG_MSB_EXEC); nflgs |= ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING; - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -3220,7 +3042,7 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi) erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING; do { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -3237,7 +3059,7 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) { break; @@ -3266,7 +3088,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type) } while (1) { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) @@ -3293,7 +3115,7 @@ static void thr_prgr_prep_wait(void *vssi) { ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi; - erts_smp_atomic32_read_bor_acqb(&ssi->flags, + erts_atomic32_read_bor_acqb(&ssi->flags, ERTS_SSI_FLG_SLEEPING); } @@ -3308,7 +3130,7 @@ thr_prgr_wait(void *vssi) while (1) { erts_aint32_t aflgs, nflgs; nflgs = xflgs | ERTS_SSI_FLG_TSE_SLEEPING; - aflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + aflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (aflgs == xflgs) { erts_tse_wait(ssi->event); break; @@ -3323,7 +3145,7 @@ static void thr_prgr_fin_wait(void *vssi) { ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi; - erts_smp_atomic32_read_band_nob(&ssi->flags, + erts_atomic32_read_band_nob(&ssi->flags, ~(ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_TSE_SLEEPING)); } @@ -3404,7 +3226,6 @@ aux_thread(void *unused) static void suspend_scheduler(ErtsSchedulerData *esdp); -#endif /* ERTS_SMP */ static void scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) @@ -3413,40 +3234,31 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ErtsSchedulerSleepInfo *ssi = esdp->ssi; int spincount; erts_aint32_t aux_work = 0; -#ifdef ERTS_SMP int thr_prgr_active = 1; erts_aint32_t flgs; -#endif ERTS_MSACC_PUSH_STATE_M(); -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) - erts_smp_spin_lock(&rq->sleepers.lock); -#endif + erts_spin_lock(&rq->sleepers.lock); flgs = sched_prep_spin_wait(ssi); if (flgs & ERTS_SSI_FLG_SUSPENDED) { /* Go suspend instead... */ -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) - erts_smp_spin_unlock(&rq->sleepers.lock); -#endif + erts_spin_unlock(&rq->sleepers.lock); return; } -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { ssi->prev = NULL; ssi->next = rq->sleepers.list; if (rq->sleepers.list) rq->sleepers.list->prev = ssi; rq->sleepers.list = ssi; - erts_smp_spin_unlock(&rq->sleepers.lock); + erts_spin_unlock(&rq->sleepers.lock); dirty_active(esdp, -1); } -#endif /* * If all schedulers are waiting, one of them *should* @@ -3457,7 +3269,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_waiting(esdp->no, rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); spincount = sched_busy_wait.tse; @@ -3485,7 +3297,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (aux_work) { if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); current_time = erts_get_monotonic_time(esdp); if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { if (!thr_prgr_active) { @@ -3573,7 +3385,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) - erts_smp_atomic32_read_band_nob(&ssi->flags, + erts_atomic32_read_band_nob(&ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC)); @@ -3584,23 +3396,21 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_wall_time_change(esdp, 1); } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); sched_active(esdp->no, rq); } else -#endif { - erts_smp_atomic32_set_relb(&function_calls, 0); + erts_atomic32_set_relb(&function_calls, 0); *fcalls = 0; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif + sched_waiting_sys(esdp->no, rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); ASSERT(working); sched_wall_time_change(esdp, working = 0); @@ -3632,31 +3442,20 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } sys_aux_work: -#ifndef ERTS_SMP - erts_sys_schedule_interrupt(0); -#endif aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) { if (!working) sched_wall_time_change(esdp, working = 1); -#ifdef ERTS_SMP if (!thr_prgr_active) erts_thr_progress_active(esdp, thr_prgr_active = 1); -#endif aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); ERTS_MSACC_UPDATE_CACHE(); -#ifdef ERTS_SMP if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); -#endif } -#ifndef ERTS_SMP - if (erts_smp_atomic32_read_dirty(&rq->len) != 0 || rq->misc.start) - goto sys_woken; -#else - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); goto sys_woken; @@ -3677,12 +3476,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) goto tse_wait; } } -#endif } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); -#ifdef ERTS_SMP /* * If we got new I/O tasks we aren't allowed to * sleep in erl_sys_schedule(). @@ -3700,24 +3497,22 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * do tse wait instead... */ sched_change_waiting_sys_to_waiting(esdp->no, rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); spincount = 0; goto tse_wait; } } -#endif if (aux_work) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); goto sys_poll_aux_work; } -#ifdef ERTS_SMP flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING); if (!(flgs & ERTS_SSI_FLG_SLEEPING)) { if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); goto sys_locked_woken; } - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); flgs = sched_prep_cont_spin_wait(ssi); if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); @@ -3729,17 +3524,14 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING); ASSERT(flgs & ERTS_SSI_FLG_WAITING); -#endif - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (working) sched_wall_time_change(esdp, working = 0); -#ifdef ERTS_SMP if (thr_prgr_active) erts_thr_progress_active(esdp, thr_prgr_active = 0); -#endif ASSERT(!erts_port_task_have_outstanding_io_tasks()); @@ -3756,11 +3548,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_bump_timers(esdp->timer_wheel, current_time); } -#ifndef ERTS_SMP - if (erts_smp_atomic32_read_dirty(&rq->len) == 0 && !rq->misc.start) - goto sys_aux_work; - sys_woken: -#else flgs = sched_prep_cont_spin_wait(ssi); if (flgs & ERTS_SSI_FLG_WAITING) goto sys_aux_work; @@ -3768,19 +3555,18 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sys_woken: if (!thr_prgr_active) erts_thr_progress_active(esdp, thr_prgr_active = 1); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); sys_locked_woken: if (!thr_prgr_active) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); erts_thr_progress_active(esdp, thr_prgr_active = 1); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } clear_sys_scheduling(); if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) - erts_smp_atomic32_read_band_nob(&ssi->flags, + erts_atomic32_read_band_nob(&ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC)); -#endif if (!working) sched_wall_time_change(esdp, working = 1); sched_active_sys(esdp->no, rq); @@ -3789,10 +3575,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (ERTS_SCHEDULER_IS_DIRTY(esdp)) dirty_active(esdp, 1); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); } -#ifdef ERTS_SMP static ERTS_INLINE erts_aint32_t ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi) @@ -3802,7 +3587,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi) erts_aint32_t nflgs = 0; erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING; while (1) { - oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return oflgs; nflgs = oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC); @@ -3816,13 +3601,12 @@ ssi_wake(ErtsSchedulerSleepInfo *ssi) erts_sched_finish_poke(ssi, ssi_flags_set_wake(ssi)); } -#ifdef ERTS_DIRTY_SCHEDULERS static void dcpu_sched_ix_suspend_wake(Uint ix) { ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); - erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); ssi_wake(ssi); } @@ -3830,7 +3614,7 @@ static void dio_sched_ix_suspend_wake(Uint ix) { ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); - erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); ssi_wake(ssi); } @@ -3848,7 +3632,6 @@ dio_sched_ix_wake(Uint ix) } #endif -#endif static void wake_scheduler(ErtsRunQueue *rq) @@ -3861,13 +3644,12 @@ wake_scheduler(ErtsRunQueue *rq) * so all code *should* handle this without having * the lock on the run queue. */ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq) + ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq) || ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); ssi_wake(rq->scheduler->ssi); } -#ifdef ERTS_DIRTY_SCHEDULERS static void wake_dirty_schedulers(ErtsRunQueue *rq, int one) { @@ -3877,10 +3659,10 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one) ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); sl = &rq->sleepers; - erts_smp_spin_lock(&sl->lock); + erts_spin_lock(&sl->lock); ssi = sl->list; if (!ssi) { - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); if (one) wake_scheduler(rq); } else if (one) { @@ -3894,14 +3676,14 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one) if (ssi->next) ssi->next->prev = ssi->prev; - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); ERTS_THR_MEMORY_BARRIER; flgs = ssi_flags_set_wake(ssi); erts_sched_finish_poke(ssi, flgs); } else { sl->list = NULL; - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); ERTS_THR_MEMORY_BARRIER; do { @@ -3918,7 +3700,6 @@ wake_dirty_scheduler(ErtsRunQueue *rq) wake_dirty_schedulers(rq, 1); } -#endif #define ERTS_NO_USED_RUNQS_SHIFT 16 #define ERTS_NO_RUNQS_MASK 0xffffU @@ -3932,13 +3713,13 @@ init_no_runqs(int active, int used) { erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK); no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT); - erts_smp_atomic32_init_nob(&balance_info.no_runqs, no_runqs); + erts_atomic32_init_nob(&balance_info.no_runqs, no_runqs); } static ERTS_INLINE void get_no_runqs(int *active, int *used) { - erts_aint32_t no_runqs = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t no_runqs = erts_atomic32_read_nob(&balance_info.no_runqs); if (active) *active = (int) (no_runqs & ERTS_NO_RUNQS_MASK); if (used) @@ -3948,12 +3729,12 @@ get_no_runqs(int *active, int *used) static ERTS_INLINE void set_no_used_runqs(int used) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); while (1) { erts_aint32_t act, new; new = (used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT; new |= exp & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) break; exp = act; @@ -3963,14 +3744,14 @@ set_no_used_runqs(int used) static ERTS_INLINE void set_no_active_runqs(int active) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); while (1) { erts_aint32_t act, new; if ((exp & ERTS_NO_RUNQS_MASK) == active) break; new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT); new |= active & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) break; exp = act; @@ -3980,14 +3761,14 @@ set_no_active_runqs(int active) static ERTS_INLINE int try_inc_no_active_runqs(int active) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active) return 0; if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) { erts_aint32_t new, act; new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT); new |= active & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) return 1; } @@ -4049,25 +3830,20 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq) } } -#endif /* ERTS_SMP */ static ERTS_INLINE void smp_notify_inc_runq(ErtsRunQueue *runq) { -#ifdef ERTS_SMP if (runq) { -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) wake_dirty_scheduler(runq); else -#endif wake_scheduler(runq); } -#endif } void -erts_smp_notify_inc_runq(ErtsRunQueue *runq) +erts_notify_inc_runq(ErtsRunQueue *runq) { smp_notify_inc_runq(runq); } @@ -4075,16 +3851,12 @@ erts_smp_notify_inc_runq(ErtsRunQueue *runq) void erts_sched_notify_check_cpu_bind(void) { -#ifdef ERTS_SMP int ix; for (ix = 0; ix < erts_no_run_queues; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND); wake_scheduler(rq); } -#else - erts_sched_check_cpu_bind(erts_get_scheduler_data()); -#endif } @@ -4093,9 +3865,9 @@ enqueue_process(ErtsRunQueue *runq, int prio, Process *p) { ErtsRunPrioQueue *rpq; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); - erts_smp_inc_runq_len(runq, &runq->procs.prio_info[prio], prio); + erts_inc_runq_len(runq, &runq->procs.prio_info[prio], prio); if (prio == PRIORITY_LOW) { p->schedule_count = RESCHEDULE_LOW; @@ -4123,7 +3895,7 @@ unqueue_process(ErtsRunQueue *runq, Process *prev_proc, Process *proc) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); if (prev_proc) prev_proc->next = proc->next; @@ -4135,7 +3907,7 @@ unqueue_process(ErtsRunQueue *runq, if (!rpq->first) rpq->last = NULL; - erts_smp_dec_runq_len(runq, rqi, prio); + erts_dec_runq_len(runq, rqi, prio); } @@ -4148,7 +3920,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) ErtsRunQueueInfo *rqi; Process *p; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); ASSERT(PRIORITY_NORMAL == prio_q || PRIORITY_HIGH == prio_q @@ -4159,9 +3931,9 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) if (!p) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (statep) *statep = state; @@ -4194,11 +3966,10 @@ check_requeue_process(ErtsRunQueue *rq, int prio_q) static ERTS_INLINE void free_proxy_proc(Process *proxy) { - ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); + ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); erts_free(ERTS_ALC_T_PROC, proxy); } -#ifdef ERTS_SMP static ErtsRunQueue * check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio) @@ -4251,7 +4022,7 @@ static void immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) { Uint32 iflags, iflag; - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); ASSERT(erts_thr_progress_is_managed_thread()); @@ -4292,13 +4063,13 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) rq = check_immigration_need(c_rq, mp, prio); if (rq) { - erts_smp_runq_lock(rq); + erts_runq_lock(rq); if (prio == ERTS_PORT_PRIO_LEVEL) { Port *prt; prt = erts_dequeue_port(rq); if (prt) RUNQ_SET_RQ(&prt->run_queue, c_rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (prt) { /* port might terminate while we have no lock... */ rq = erts_port_runq(prt); @@ -4310,7 +4081,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) erts_enqueue_port(c_rq, prt); if (!iflag) return; /* done */ - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); } } } @@ -4324,38 +4095,38 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) while (proc) { erts_aint32_t state; - state = erts_smp_atomic32_read_acqb(&proc->state); + state = erts_atomic32_read_acqb(&proc->state); if (!(ERTS_PSFLG_BOUND & state) && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) { ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio]; unqueue_process(rq, rpq, rqi, prio, prev_proc, proc); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); RUNQ_SET_RQ(&proc->run_queue, c_rq); rq_locked = 0; - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); enqueue_process(c_rq, prio, proc); if (!iflag) return; /* done */ - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); break; } prev_proc = proc; proc = proc->next; } if (rq_locked) - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } } } - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); } static ERTS_INLINE void suspend_run_queue(ErtsRunQueue *rq) { - erts_smp_atomic32_read_bor_nob(&rq->scheduler->ssi->flags, + erts_atomic32_read_bor_nob(&rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED); (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED); @@ -4372,7 +4143,7 @@ resume_run_queue(ErtsRunQueue *rq) ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); oflgs = ERTS_RUNQ_FLGS_READ_BSET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK @@ -4387,19 +4158,19 @@ resume_run_queue(ErtsRunQueue *rq) rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { - len = erts_smp_atomic32_read_dirty(&rq->procs.prio_info[pix].len); + len = erts_atomic32_read_dirty(&rq->procs.prio_info[pix].len); rq->procs.prio_info[pix].max_len = len; rq->procs.prio_info[pix].reds = 0; } - len = erts_smp_atomic32_read_dirty(&rq->ports.info.len); + len = erts_atomic32_read_dirty(&rq->ports.info.len); rq->ports.info.max_len = len; rq->ports.info.reds = 0; - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); rq->max_len = len; } - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); nrml_sched_ix_resume_wake(rq->ix); } @@ -4414,18 +4185,17 @@ schedule_bound_processes(ErtsRunQueue *rq, ErtsStuckBoundProcesses *sbpp) { Process *proc, *next; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); proc = sbpp->first; while (proc) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + erts_aint32_t state = erts_atomic32_read_acqb(&proc->state); next = proc->next; enqueue_process(rq, (int) ERTS_PSFLGS_GET_PRQ_PRIO(state), proc); proc = next; } } -#ifdef ERTS_DIRTY_SCHEDULERS static ERTS_INLINE void clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) @@ -4445,11 +4215,10 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) #else (void) #endif - erts_smp_atomic32_read_band_mb(&p->dirty_state, ~qb); + erts_atomic32_read_band_mb(&p->dirty_state, ~qb); ASSERT(old & qb); } -#endif /* ERTS_DIRTY_SCHEDULERS */ static void @@ -4461,7 +4230,7 @@ evacuate_run_queue(ErtsRunQueue *rq, ErtsMigrationPaths *mps; ErtsMigrationPath *mp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); @@ -4484,9 +4253,9 @@ evacuate_run_queue(ErtsRunQueue *rq, rq->misc.start = NULL; rq->misc.end = NULL; ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); if (to_rq->misc.end) to_rq->misc.end->next = start; else @@ -4496,9 +4265,9 @@ evacuate_run_queue(ErtsRunQueue *rq, non_empty_runq(to_rq); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); smp_notify_inc_runq(to_rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); } if (rq->ports.start) { @@ -4514,7 +4283,7 @@ evacuate_run_queue(ErtsRunQueue *rq, ErtsRunQueue *prt_rq; prt = erts_dequeue_port(rq); RUNQ_SET_RQ(&prt->run_queue, to_rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); /* * The port might terminate while * we have no lock on it... @@ -4526,9 +4295,9 @@ evacuate_run_queue(ErtsRunQueue *rq, "%s:%d:%s() internal error\n", __FILE__, __LINE__, __func__); erts_enqueue_port(to_rq, prt); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); prt = rq->ports.start; } smp_notify_inc_runq(to_rq); @@ -4565,7 +4334,7 @@ evacuate_run_queue(ErtsRunQueue *rq, free_proxy_proc(proc); goto handle_next_proc; } - real_state = erts_smp_atomic32_read_acqb(&real_proc->state); + real_state = erts_atomic32_read_acqb(&real_proc->state); } max_qbit = (state >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET); @@ -4591,7 +4360,7 @@ evacuate_run_queue(ErtsRunQueue *rq, #else (void) #endif - erts_smp_atomic32_read_band_mb(&proc->state, + erts_atomic32_read_band_mb(&proc->state, ~clr_bits); ASSERT((old & clr_bits) == clr_bits); @@ -4611,17 +4380,17 @@ evacuate_run_queue(ErtsRunQueue *rq, } else { int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); to_rq = mp->prio[prio].runq; RUNQ_SET_RQ(&proc->run_queue, to_rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); enqueue_process(to_rq, prio, proc); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); notify = 1; - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } handle_next_proc: @@ -4640,13 +4409,13 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, ErtsRunPrioQueue *rpq; if (*rq_lockedp) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); *rq_lockedp = 0; } - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq)); - erts_smp_runq_lock(vrq); + erts_runq_lock(vrq); if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_HALTING) goto no_procs; @@ -4682,16 +4451,16 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, proc = rpq->first; while (proc) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + erts_aint32_t state = erts_atomic32_read_acqb(&proc->state); if (!(ERTS_PSFLG_BOUND & state)) { /* Steal process */ int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio]; unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc); - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); RUNQ_SET_RQ(&proc->run_queue, rq); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); *rq_lockedp = 1; enqueue_process(rq, prio, proc); return !0; @@ -4705,7 +4474,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, no_procs: - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(vrq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(vrq)); /* * Check for a runnable port to steal... @@ -4715,7 +4484,7 @@ no_procs: ErtsRunQueue *prt_rq; Port *prt = erts_dequeue_port(vrq); RUNQ_SET_RQ(&prt->run_queue, rq); - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); /* * The port might terminate while @@ -4736,7 +4505,7 @@ no_procs: } } - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); return 0; } @@ -4768,7 +4537,7 @@ try_steal_task(ErtsRunQueue *rq) res = 0; rq_locked = 1; - ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked); + ERTS_LC_CHK_RUNQ_LOCK(rq, rq_locked); get_no_runqs(&active_rqs, &blnc_rqs); @@ -4781,7 +4550,7 @@ try_steal_task(ErtsRunQueue *rq) if (active_rqs < blnc_rqs) { int no = blnc_rqs - active_rqs; int stop_ix = vix = active_rqs + rq->ix % no; - while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { + while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { res = check_possible_steal_victim(rq, &rq_locked, vix); if (res) goto done; @@ -4796,7 +4565,7 @@ try_steal_task(ErtsRunQueue *rq) vix = rq->ix; /* ... then try to steal a job from another active queue... */ - while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { + while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { vix++; if (vix >= active_rqs) vix = 0; @@ -4813,7 +4582,7 @@ try_steal_task(ErtsRunQueue *rq) done: if (!rq_locked) - erts_smp_runq_lock(rq); + erts_runq_lock(rq); if (res) return res; @@ -4939,7 +4708,7 @@ alloc_mpaths(void) { void *block; ErtsMigrationPaths *res; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx)); res = mpaths.freelist; if (res) { @@ -4962,7 +4731,7 @@ retire_mpaths(ErtsMigrationPaths *mps) { ErtsThrPrgrVal current; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx)); current = erts_thr_progress_current(); @@ -5008,7 +4777,7 @@ check_balance(ErtsRunQueue *c_rq) int sched_util_balancing; #endif - if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) { + if (erts_atomic32_xchg_nob(&balance_info.checking_balance, 1)) { c_rq->check_balance_reds = INT_MAX; return; } @@ -5016,15 +4785,15 @@ check_balance(ErtsRunQueue *c_rq) get_no_runqs(NULL, &blnc_no_rqs); if (blnc_no_rqs == 1) { c_rq->check_balance_reds = INT_MAX; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); return; } - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); if (balance_info.halftime) { balance_info.halftime = 0; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); ERTS_FOREACH_RUNQ(rq, { if (rq->waiting) @@ -5034,7 +4803,7 @@ check_balance(ErtsRunQueue *c_rq) rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; }); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); return; } @@ -5047,7 +4816,7 @@ check_balance(ErtsRunQueue *c_rq) * is manipulated. Such updates of the migration information * might clash with balancing. */ - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); forced = balance_info.forced_check_balance; balance_info.forced_check_balance = 0; @@ -5055,10 +4824,10 @@ check_balance(ErtsRunQueue *c_rq) get_no_runqs(¤t_active, &blnc_no_rqs); if (blnc_no_rqs == 1) { - erts_smp_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(c_rq); + erts_mtx_unlock(&balance_info.update_mtx); + erts_runq_lock(c_rq); c_rq->check_balance_reds = INT_MAX; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); return; } @@ -5074,7 +4843,7 @@ check_balance(ErtsRunQueue *c_rq) /* Read balance information for all run queues */ for (qix = 0; qix < blnc_no_rqs; qix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); run_queue_info[qix].flags = ERTS_RUNQ_FLGS_GET_NOB(rq); for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { @@ -5102,7 +4871,7 @@ check_balance(ErtsRunQueue *c_rq) run_queue_info[qix].sched_util = erts_get_sched_util(rq, 1, 0); #endif - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } full_scheds = 0; @@ -5541,7 +5310,7 @@ erts_fprintf(stderr, "--------------------------------\n"); Uint32 flags = run_queue_info[qix].flags; ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK)); if (rq->waiting) flags |= ERTS_RUNQ_FLG_OUT_OF_WORK; @@ -5556,27 +5325,27 @@ erts_fprintf(stderr, "--------------------------------\n"); rq->out_of_work_count = 0; (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags); - rq->max_len = erts_smp_atomic32_read_dirty(&rq->len); + rq->max_len = erts_atomic32_read_dirty(&rq->len); for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { ErtsRunQueueInfo *rqi; rqi = (pix == ERTS_PORT_PRIO_LEVEL ? &rq->ports.info : &rq->procs.prio_info[pix]); - erts_smp_reset_max_len(rq, rqi); + erts_reset_max_len(rq, rqi); rqi->reds = 0; } rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); balance_info.n++; retire_mpaths(old_mpaths); - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); } static void @@ -5584,7 +5353,7 @@ change_no_used_runqs(int used) { ErtsMigrationPaths *new_mpaths, *old_mpaths; int qix; - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); set_no_used_runqs(used); old_mpaths = erts_get_migration_paths_managed(); @@ -5631,28 +5400,23 @@ change_no_used_runqs(int used) /* Make sure that we balance soon... */ balance_info.forced_check_balance = 1; - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(ERTS_RUNQ_IX(0)); + erts_runq_lock(ERTS_RUNQ_IX(0)); ERTS_RUNQ_IX(0)->check_balance_reds = 0; - erts_smp_runq_unlock(ERTS_RUNQ_IX(0)); + erts_runq_unlock(ERTS_RUNQ_IX(0)); } -#endif /* #ifdef ERTS_SMP */ Uint erts_debug_nbalance(void) { -#ifdef ERTS_SMP Uint n; - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); n = balance_info.n; - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); return n; -#else - return 0; -#endif } /* Wakeup other schedulers */ @@ -5698,7 +5462,6 @@ typedef enum { #define ERTS_WAKEUP_OTHER_DEC_LEGACY 10 #define ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY (CONTEXT_REDS/10) -#ifdef ERTS_SMP static struct { ErtsSchedWakeupOtherThreshold threshold; @@ -5714,7 +5477,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - int left_len = erts_smp_atomic32_read_dirty(&rq->len) - 1; + int left_len = erts_atomic32_read_dirty(&rq->len) - 1; if (left_len < 1) { int wo_reduce = wo_reds << wakeup_other.dec_shift; wo_reduce &= wakeup_other.dec_mask; @@ -5726,16 +5489,14 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) rq->wakeup_other += (left_len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC); if (rq->wakeup_other > wakeup_other.limit) { -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { if (rq->waiting) { wake_dirty_scheduler(rq); } } else -#endif { int empty_rqs = - erts_smp_atomic32_read_acqb(&no_empty_run_queues); + erts_atomic32_read_acqb(&no_empty_run_queues); if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); if (empty_rqs != 0) @@ -5787,7 +5548,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - erts_aint32_t len = erts_smp_atomic32_read_dirty(&rq->len); + erts_aint32_t len = erts_atomic32_read_dirty(&rq->len); if (len < 2) { rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds; if (rq->wakeup_other < 0) @@ -5798,7 +5559,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags) else { if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { + if (erts_atomic32_read_acqb(&no_empty_run_queues) != 0) { wake_scheduler_on_empty_runq(rq); rq->wakeup_other = 0; } @@ -5849,7 +5610,7 @@ static int no_runqs_to_supervise(void) { int used; - erts_aint32_t nerq = erts_smp_atomic32_read_acqb(&no_empty_run_queues); + erts_aint32_t nerq = erts_atomic32_read_acqb(&no_empty_run_queues); if (nerq <= 0) return 0; get_no_runqs(NULL, &used); @@ -5882,26 +5643,23 @@ runq_supervisor(void *unused) for (ix = 0; ix < no_rqs; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) { - erts_smp_runq_lock(rq); - if (erts_smp_atomic32_read_dirty(&rq->len) != 0) + erts_runq_lock(rq); + if (erts_atomic32_read_dirty(&rq->len) != 0) wake_scheduler_on_empty_runq(rq); /* forced wakeup... */ - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } } } return NULL; } -#endif void erts_early_init_scheduling(int no_schedulers) { aux_work_timeout_early_init(no_schedulers); -#ifdef ERTS_SMP wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT; -#endif sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT); @@ -5912,7 +5670,6 @@ erts_early_init_scheduling(int no_schedulers) int erts_sched_set_wakeup_other_thresold(char *str) { -#ifdef ERTS_SMP ErtsSchedWakeupOtherThreshold threshold; if (sys_strcmp(str, "very_high") == 0) threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH; @@ -5929,20 +5686,11 @@ erts_sched_set_wakeup_other_thresold(char *str) wakeup_other.threshold = threshold; set_wakeup_other_data(); return 0; -#else - if (sys_strcmp(str, "very_high") == 0 || sys_strcmp(str, "high") == 0 || - sys_strcmp(str, "medium") == 0 || sys_strcmp(str, "low") == 0 || - sys_strcmp(str, "very_low") == 0) { - return 0; - } - return EINVAL; -#endif } int erts_sched_set_wakeup_other_type(char *str) { -#ifdef ERTS_SMP ErtsSchedWakeupOtherType type; if (sys_strcmp(str, "default") == 0) type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT; @@ -5952,12 +5700,6 @@ erts_sched_set_wakeup_other_type(char *str) return EINVAL; wakeup_other.type = type; return 0; -#else - if (sys_strcmp(str, "default") == 0 || sys_strcmp(str, "legacy") == 0) { - return 0; - } - return EINVAL; -#endif } int @@ -6028,7 +5770,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) case ERTS_SCHED_NORMAL: id = (int) esdp->no; break; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: id = (int) erts_no_schedulers; id += (int) esdp->dirty_no; @@ -6038,7 +5779,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) id += (int) erts_no_dirty_cpu_schedulers; id += (int) esdp->dirty_no; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); break; @@ -6048,7 +5788,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->sched_id = id; awdp->esdp = esdp; awdp->ssi = esdp ? esdp->ssi : NULL; -#ifdef ERTS_SMP awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST; awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; @@ -6057,15 +5796,9 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->later_op.size = 0; awdp->later_op.first = NULL; awdp->later_op.last = NULL; -#endif -#ifdef ERTS_USE_ASYNC_READY_Q -#ifdef ERTS_SMP awdp->async_ready.need_thr_prgr = 0; awdp->async_ready.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; -#endif awdp->async_ready.queue = NULL; -#endif -#ifdef ERTS_SMP awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY; if (!dawwp) { awdp->delayed_wakeup.job = NULL; @@ -6081,7 +5814,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) for (i = 0; i <= erts_no_schedulers; i++) awdp->delayed_wakeup.sched2jix[i] = -1; } -#endif awdp->debug.wait_completed.flags = 0; awdp->debug.wait_completed.callback = NULL; awdp->debug.wait_completed.arg = NULL; @@ -6096,11 +5828,9 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, Uint64 time_stamp) { esdp->timer_wheel = NULL; -#ifdef ERTS_SMP erts_bits_init_state(&esdp->erl_bits_state); esdp->match_pseudo_process = NULL; esdp->free_process = NULL; -#endif esdp->x_reg_array = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * @@ -6108,7 +5838,6 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->f_reg_array = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER, MAX_REG * sizeof(FloatDef)); -#ifdef ERTS_DIRTY_SCHEDULERS esdp->run_queue = runq; if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) { esdp->no = 0; @@ -6136,18 +5865,12 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->dirty_shadow_process = shadow_proc; if (shadow_proc) { erts_init_empty_process(shadow_proc); - erts_smp_atomic32_init_nob(&shadow_proc->state, + erts_atomic32_init_nob(&shadow_proc->state, (ERTS_PSFLG_ACTIVE | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_PROXY)); shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC; } -#else - runq->scheduler = esdp; - esdp->run_queue = runq; - esdp->no = (Uint) num; - esdp->type = ERTS_SCHED_NORMAL; -#endif esdp->ssi = ssi; esdp->current_process = NULL; @@ -6169,9 +5892,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, if (daww_ptr) { init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr); -#ifdef ERTS_SMP *daww_ptr += daww_sz; -#endif } esdp->reductions = 0; @@ -6182,26 +5903,20 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, void erts_init_scheduling(int no_schedulers, int no_schedulers_online -#ifdef ERTS_DIRTY_SCHEDULERS , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online, int no_dirty_io_schedulers -#endif ) { int ix, n, no_ssi, tot_rqs; char *daww_ptr; size_t daww_sz; size_t size_runqs; -#ifdef ERTS_SMP erts_aint32_t set_schdlr_sspnd_change_flags; -#endif init_misc_op_list_alloc(); init_proc_sys_task_queues_alloc(); -#ifdef ERTS_SMP set_wakeup_other_data(); -#endif #if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT if (erts_sched_balance_util) @@ -6211,12 +5926,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online ASSERT(no_schedulers_online <= no_schedulers); ASSERT(no_schedulers_online >= 1); ASSERT(no_schedulers >= 1); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(no_dirty_cpu_schedulers <= no_schedulers); ASSERT(no_dirty_cpu_schedulers >= 1); ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online); ASSERT(no_dirty_cpu_schedulers_online >= 1); -#endif /* Create and initialize run queues */ @@ -6225,9 +5938,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs; erts_aligned_run_queues = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs); -#ifdef ERTS_SMP - erts_smp_atomic32_init_nob(&no_empty_run_queues, 0); -#endif + erts_atomic32_init_nob(&no_empty_run_queues, 0); erts_no_run_queues = n; @@ -6241,20 +5952,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online * id if the esdp->no <-> ix+1 mapping change. */ - erts_smp_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1), + erts_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); - erts_smp_cnd_init(&rq->cnd); + erts_cnd_init(&rq->cnd); -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP if (ERTS_RUNQ_IX_IS_DIRTY(ix)) { - erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list", + erts_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list", make_small(ix + 1), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); } rq->sleepers.list = NULL; -#endif -#endif rq->waiting = 0; rq->woken = 0; @@ -6267,7 +5974,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online } rq->out_of_work_count = 0; rq->max_len = 0; - erts_smp_atomic32_set_nob(&rq->len, 0); + erts_atomic32_set_nob(&rq->len, 0); rq->wakeup_other = 0; rq->wakeup_other_reds = 0; @@ -6276,7 +5983,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online rq->procs.reductions = 0; for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { - erts_smp_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0); + erts_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0); rq->procs.prio_info[pix].max_len = 0; rq->procs.prio_info[pix].reds = 0; if (pix < ERTS_NO_PROC_PRIO_LEVELS - 1) { @@ -6288,7 +5995,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online rq->misc.start = NULL; rq->misc.end = NULL; - erts_smp_atomic32_init_nob(&rq->ports.info.len, 0); + erts_atomic32_init_nob(&rq->ports.info.len, 0); rq->ports.info.max_len = 0; rq->ports.info.reds = 0; rq->ports.start = NULL; @@ -6300,7 +6007,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online } -#ifdef ERTS_SMP if (erts_no_run_queues != 1) { run_queue_info = erts_alloc(ERTS_ALC_T_RUNQ_BLNS, @@ -6311,52 +6017,41 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online * erts_no_run_queues)); } -#endif n = (int) no_schedulers; erts_no_schedulers = n; erts_no_total_schedulers = n; -#ifdef ERTS_DIRTY_SCHEDULERS erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers; erts_no_total_schedulers += no_dirty_cpu_schedulers; erts_no_dirty_io_schedulers = no_dirty_io_schedulers; erts_no_total_schedulers += no_dirty_io_schedulers; -#endif /* Create and initialize scheduler sleep info */ -#ifdef ERTS_SMP no_ssi = n+1; -#else - no_ssi = 1; -#endif aligned_sched_sleep_info = erts_alloc_permanent_cache_aligned( ERTS_ALC_T_SCHDLR_SLP_INFO, no_ssi*sizeof(ErtsAlignedSchedulerSleepInfo)); for (ix = 0; ix < no_ssi; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_sched_sleep_info[ix].ssi; -#ifdef ERTS_SMP #if 0 /* no need to initialize these... */ ssi->next = NULL; ssi->prev = NULL; #endif - erts_smp_atomic32_init_nob(&ssi->flags, 0); + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_thread_func */ -#endif erts_atomic32_init_nob(&ssi->aux_work, 0); } -#ifdef ERTS_SMP aligned_sched_sleep_info++; -#ifdef ERTS_DIRTY_SCHEDULERS aligned_dirty_cpu_sched_sleep_info = erts_alloc_permanent_cache_aligned( ERTS_ALC_T_SCHDLR_SLP_INFO, no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi; - erts_smp_atomic32_init_nob(&ssi->flags, 0); + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } @@ -6366,24 +6061,17 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); for (ix = 0; ix < no_dirty_io_schedulers; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi; - erts_smp_atomic32_init_nob(&ssi->flags, 0); + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } -#endif -#endif /* Create and initialize scheduler specific data */ -#ifdef ERTS_SMP daww_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE((sizeof(ErtsDelayedAuxWorkWakeupJob) + sizeof(int))*(n+1)); daww_ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, daww_sz*n); -#else - daww_sz = 0; - daww_ptr = NULL; -#endif erts_aligned_scheduler_data = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, @@ -6396,7 +6084,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online NULL, 0); } -#ifdef ERTS_DIRTY_SCHEDULERS { Uint64 ts = sched_wall_time_ts(); int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers; @@ -6427,7 +6114,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online &adsp[adspix++].dsp, ts); } } -#endif init_misc_aux_work(); init_swtreq_alloc(); @@ -6436,7 +6122,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */ debug_wait_completed_flags = 0; -#ifdef ERTS_SMP aux_thread_aux_work_data = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, @@ -6444,12 +6129,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online init_no_runqs(no_schedulers_online, no_schedulers_online); balance_info.last_active_runqs = no_schedulers; - erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL, + erts_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); balance_info.forced_check_balance = 0; balance_info.halftime = 1; balance_info.full_reds_history_index = 0; - erts_smp_atomic32_init_nob(&balance_info.checking_balance, 0); + erts_atomic32_init_nob(&balance_info.checking_balance, 0); balance_info.prev_rise.active_runqs = 0; balance_info.prev_rise.max_len = 0; balance_info.prev_rise.reds = 0; @@ -6480,7 +6165,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online suspend_run_queue(ERTS_RUNQ_IX(ix)); } -#ifdef ERTS_DIRTY_SCHEDULERS schdlr_sspnd_set_nscheds(&schdlr_sspnd.online, ERTS_SCHED_DIRTY_CPU, @@ -6497,7 +6181,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online set_schdlr_sspnd_change_flags |= ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN; for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) { ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); - erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED); } } @@ -6511,54 +6195,30 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online ERTS_SCHED_DIRTY_IO, no_dirty_io_schedulers); - erts_smp_atomic32_init_nob(&dirty_count.cpu.active, + erts_atomic32_init_nob(&dirty_count.cpu.active, (erts_aint32_t) no_dirty_cpu_schedulers); - erts_smp_atomic32_init_nob(&dirty_count.io.active, + erts_atomic32_init_nob(&dirty_count.io.active, (erts_aint32_t) no_dirty_io_schedulers); -#endif if (set_schdlr_sspnd_change_flags) - erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, + erts_atomic32_set_nob(&schdlr_sspnd.changing, set_schdlr_sspnd_change_flags); - erts_smp_atomic32_init_nob(&doing_sys_schedule, 0); + erts_atomic32_init_nob(&doing_sys_schedule, 0); init_misc_aux_work(); -#else /* !ERTS_SMP */ - { - ErtsSchedulerData *esdp; - esdp = ERTS_SCHEDULER_IX(0); - erts_scheduler_data = esdp; -#ifdef USE_THREADS - erts_tsd_set(sched_data_key, (void *) esdp); -#endif - } - erts_no_dirty_cpu_schedulers = 0; - erts_no_dirty_io_schedulers = 0; -#endif - erts_smp_atomic32_init_nob(&function_calls, 0); + erts_atomic32_init_nob(&function_calls, 0); /* init port tasks */ erts_port_task_init(); -#ifndef ERTS_SMP -#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC - erts_scheduler_data->verify_unused_temp_alloc - = erts_alloc_get_verify_unused_temp_alloc( - &erts_scheduler_data->verify_unused_temp_alloc_data); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL); -#endif -#endif - erts_smp_atomic32_init_relb(&erts_halt_progress, -1); + erts_atomic32_init_relb(&erts_halt_progress, -1); erts_halt_code = 0; -#if !defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - erts_lc_set_thread_name("scheduler 1"); -#endif } @@ -6571,7 +6231,6 @@ erts_schedid2runq(Uint id) return ERTS_RUNQ_IX(ix); } -#ifdef USE_THREADS ErtsSchedulerData * erts_get_scheduler_data(void) @@ -6579,16 +6238,13 @@ erts_get_scheduler_data(void) return (ErtsSchedulerData *) erts_tsd_get(sched_data_key); } -#endif static Process * make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) { erts_aint32_t state; Process *proxy; -#ifdef ERTS_SMP ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue); -#endif state = (ERTS_PSFLG_PROXY | ERTS_PSFLG_IN_RUNQ @@ -6599,11 +6255,9 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) if (prev_proxy) { proxy = prev_proxy; - ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); - erts_smp_atomic32_set_nob(&proxy->state, state); -#ifdef ERTS_SMP + ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); + erts_atomic32_set_nob(&proxy->state, state); RUNQ_SET_RQ(&proc->run_queue, rq); -#endif } else { proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process)); @@ -6615,11 +6269,9 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) ui32[i] = (Uint32) 0xdeadbeef; } #endif - erts_smp_atomic32_init_nob(&proxy->state, state); -#ifdef ERTS_SMP - erts_smp_atomic_init_nob(&proxy->run_queue, - erts_smp_atomic_read_nob(&proc->run_queue)); -#endif + erts_atomic32_init_nob(&proxy->state, state); + erts_atomic_init_nob(&proxy->run_queue, + erts_atomic_read_nob(&proc->run_queue)); } proxy->common.id = proc->common.id; @@ -6632,7 +6284,6 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) #define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2 #define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3 -#ifdef ERTS_DIRTY_SCHEDULERS static int check_dirty_enqueue_in_prio_queue(Process *c_p, @@ -6663,7 +6314,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p, if ((*newp) & ERTS_PSFLG_ACTIVE_SYS) return ERTS_ENQUEUE_NORMAL_QUEUE; - dact = erts_smp_atomic32_read_mb(&c_p->dirty_state); + dact = erts_atomic32_read_mb(&c_p->dirty_state); if (actual & (ERTS_PSFLG_DIRTY_ACTIVE_SYS | ERTS_PSFLG_DIRTY_CPU_PROC)) { max_qbit = ((dact >> ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET) @@ -6708,7 +6359,7 @@ fin_dirty_enq_s_change(Process *p, erts_aint32_t qbit = 1 << enq_prio; qbit <<= qmask_offset; - if (qbit & erts_smp_atomic32_read_bor_mb(&p->dirty_state, qbit)) { + if (qbit & erts_atomic32_read_bor_mb(&p->dirty_state, qbit)) { /* Already enqueue by someone else... */ if (pstruct_reserved) { /* We reserved process struct for enqueue; clear it... */ @@ -6717,7 +6368,7 @@ fin_dirty_enq_s_change(Process *p, #else (void) #endif - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ); ASSERT(old & ERTS_PSFLG_IN_RUNQ); } return 0; @@ -6726,7 +6377,6 @@ fin_dirty_enq_s_change(Process *p, return !0; } -#endif /* ERTS_DIRTY_SCHEDULERS */ static ERTS_INLINE int check_enqueue_in_prio_queue(Process *c_p, @@ -6741,14 +6391,12 @@ check_enqueue_in_prio_queue(Process *c_p, *prq_prio_p = aprio; -#ifdef ERTS_DIRTY_SCHEDULERS if (actual & ERTS_PSFLGS_DIRTY_WORK) { int res = check_dirty_enqueue_in_prio_queue(c_p, newp, actual, aprio, qbit); if (res != ERTS_ENQUEUE_NORMAL_QUEUE) return res; } -#endif max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK; max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS; @@ -6791,7 +6439,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st return NULL; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_ENQUEUE_DIRTY_CPU_QUEUE: case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE: @@ -6812,7 +6459,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st return NULL; -#endif default: { ErtsRunQueue* runq; @@ -6822,7 +6468,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st runq = erts_get_runq_proc(p); -#ifdef ERTS_SMP if (!(ERTS_PSFLG_BOUND & state)) { ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio); if (new_runq) { @@ -6830,7 +6475,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st runq = new_runq; } } -#endif ASSERT(runq); @@ -6858,7 +6502,6 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS; else { running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS; -#ifdef ERTS_DIRTY_SCHEDULERS if (state & ERTS_PSFLG_DIRTY_ACTIVE_SYS && (p->flags & (F_DELAY_GC|F_DISABLE_GC))) { /* @@ -6872,11 +6515,10 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, */ ASSERT(!(p->flags & (F_DIRTY_CLA | F_DIRTY_GC_HIBERNATE))); - state = erts_smp_atomic32_read_band_nob(&p->state, + state = erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); state &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS; } -#endif } a = state; @@ -6893,7 +6535,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } @@ -6905,7 +6547,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (erts_system_profile_flags.runnable_procs) { /* Status lock prevents out of order "runnable proc" trace msgs */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); if (!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)) && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) { @@ -6917,15 +6559,10 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (proxy) free_proxy_proc(proxy); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); -#if !defined(ERTS_SMP) - /* Decrement refc if process struct is free... */ - return !!(n & ERTS_PSFLG_FREE); -#else /* Decrement refc if scheduled out from dirty scheduler... */ return !is_normal_sched; -#endif } else { Process* sched_p; @@ -6944,7 +6581,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, ASSERT(runq); - erts_smp_runq_lock(runq); + erts_runq_lock(runq); if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */ @@ -6955,11 +6592,11 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (runq == c_rq) return 0; - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); smp_notify_inc_runq(runq); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); /* * Decrement refc if process is scheduled out by a @@ -7007,12 +6644,12 @@ add2runq(int enqueue, erts_aint32_t prio, sched_p = make_proxy_proc(pxy, proc, prio); } - erts_smp_runq_lock(runq); + erts_runq_lock(runq); /* Enqueue the process */ enqueue_process(runq, (int) prio, sched_p); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); smp_notify_inc_runq(runq); } } @@ -7037,7 +6674,7 @@ change_proc_schedule_state(Process *p, unsigned int lock_status = (prof_runnable_procs && !(locks & ERTS_PROC_LOCK_STATUS)); - ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); ASSERT(!(a & ERTS_PSFLG_PROXY)); ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING @@ -7052,7 +6689,7 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_ACTIVE_SYS)) == 0); if (lock_status) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); while (1) { erts_aint32_t e; @@ -7076,11 +6713,9 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_DIRTY_RUNNING_SYS | ERTS_PSFLG_IN_RUNQ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE -#ifdef ERTS_DIRTY_SCHEDULERS || (n & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING -#endif ) { /* * Active and seemingly need to be enqueued, but @@ -7090,7 +6725,7 @@ change_proc_schedule_state(Process *p, enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a); } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; if (enqueue == ERTS_ENQUEUE_NOT && n == a) @@ -7115,7 +6750,7 @@ change_proc_schedule_state(Process *p, } if (lock_status) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } @@ -7159,7 +6794,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, res = 1; /* prepare for success */ st->next = st->prev = st; /* Prep for empty prio queue */ - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); prof_runnable_procs = erts_system_profile_flags.runnable_procs; locked = 0; free_stqs = NULL; @@ -7179,9 +6814,9 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, if (!locked) { locked = 1; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (state & fail_state) { *fail_state_p = (state & fail_state); free_stqs = stqs; @@ -7225,7 +6860,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, n = e = a; n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET); - a = erts_smp_atomic32_cmpxchg_nob(&p->state, n, e); + a = erts_atomic32_cmpxchg_nob(&p->state, n, e); } while (a != e); state = n; } @@ -7235,10 +6870,10 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, enq_prio = -1; /* Status lock prevents out of order "runnable proc" trace msgs */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); if (!prof_runnable_procs) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); locked = 0; } @@ -7258,7 +6893,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS))) enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; if (a == n && enqueue == ERTS_ENQUEUE_NOT) @@ -7277,7 +6912,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, profile_runnable_proc(p, am_active); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); locked = 0; } @@ -7286,12 +6921,12 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, cleanup: if (locked) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (free_stqs) proc_sys_task_queues_free(free_stqs); - ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); + ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); return res; } @@ -7301,15 +6936,15 @@ suspend_process(Process *c_p, Process *p) { erts_aint32_t state; int suspended = 0; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if ((state & ERTS_PSFLG_SUSPENDED)) suspended = -1; else { if (c_p == p) { - state = erts_smp_atomic32_read_bor_relb(&p->state, + state = erts_atomic32_read_bor_relb(&p->state, ERTS_PSFLG_SUSPENDED); ASSERT(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS @@ -7325,7 +6960,7 @@ suspend_process(Process *c_p, Process *p) n = e = state; n |= ERTS_PSFLG_SUSPENDED; - state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e); + state = erts_atomic32_cmpxchg_relb(&p->state, n, e); if (state == e) { suspended = 1; break; @@ -7370,14 +7005,14 @@ resume_process(Process *p, ErtsProcLocks locks) erts_aint32_t state, enq_prio = -1; int enqueue; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); ASSERT(p->rcount > 0); if (--p->rcount > 0) /* multiple suspend */ return; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); enqueue = change_proc_schedule_state(p, ERTS_PSFLG_SUSPENDED, 0, @@ -7387,7 +7022,6 @@ resume_process(Process *p, ErtsProcLocks locks) add2runq(enqueue, enq_prio, p, state, NULL); } -#ifdef ERTS_SMP static ERTS_INLINE void sched_resume_wake__(ErtsSchedulerSleepInfo *ssi) @@ -7398,7 +7032,7 @@ sched_resume_wake__(ErtsSchedulerSleepInfo *ssi) | ERTS_SSI_FLG_SUSPENDED); erts_aint32_t oflgs; do { - oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs); + oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs); if (oflgs == xflgs) { erts_sched_finish_poke(ssi, oflgs); break; @@ -7413,7 +7047,6 @@ nrml_sched_ix_resume_wake(Uint ix) sched_resume_wake__(ERTS_SCHED_SLEEP_INFO_IX(ix)); } -#ifdef ERTS_DIRTY_SCHEDULERS static void dcpu_sched_ix_resume_wake(Uint ix) @@ -7427,7 +7060,6 @@ dio_sched_ix_resume_wake(Uint ix) sched_resume_wake__(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix)); } -#endif static erts_aint32_t sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct) @@ -7439,7 +7071,7 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct) erts_aint32_t xflgs = xpct; do { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -7456,7 +7088,7 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED)) @@ -7489,7 +7121,7 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) erts_tse_reset(ssi->event); while (1) { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; if ((oflgs & (ERTS_SSI_FLG_SLEEPING @@ -7507,12 +7139,11 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) static void init_scheduler_suspend(void) { - erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL, + erts_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); schdlr_sspnd.online.normal = 1; schdlr_sspnd.curr_online.normal = 1; schdlr_sspnd.active.normal = 1; -#ifdef ERTS_DIRTY_SCHEDULERS schdlr_sspnd.online.dirty_cpu = 0; schdlr_sspnd.curr_online.dirty_cpu = 0; schdlr_sspnd.active.dirty_cpu = 0; @@ -7520,8 +7151,7 @@ init_scheduler_suspend(void) schdlr_sspnd.curr_online.dirty_io = 0; schdlr_sspnd.active.dirty_io = 0; schdlr_sspnd.last_msb_dirty_type = ERTS_SCHED_DIRTY_IO; -#endif - erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0); + erts_atomic32_init_nob(&schdlr_sspnd.changing, 0); schdlr_sspnd.chngq = NULL; schdlr_sspnd.changer = am_false; schdlr_sspnd.nmsb.ongoing = 0; @@ -7552,7 +7182,7 @@ schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid) : 0)); if (p) { resume_process(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (sched_type != ERTS_SCHED_NORMAL) erts_proc_dec_refc(p); } @@ -7581,7 +7211,6 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type, } } -#ifdef ERTS_DIRTY_SCHEDULERS static ERTS_INLINE int have_dirty_work(void) @@ -7751,7 +7380,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, calls = ERTS_MODIFIED_TIMING_INPUT_REDS + 1; else calls = INPUT_REDUCTIONS + 1; - erts_smp_atomic32_set_nob(&function_calls, calls); + erts_atomic32_set_nob(&function_calls, calls); if ((nrml_prio == ERTS_MSB_NONE_PRIO_BIT) & ((dcpu_prio != ERTS_MSB_NONE_PRIO_BIT) @@ -7786,7 +7415,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, #else (void) #endif - erts_smp_atomic32_read_bset_mb(&esdp->ssi->flags, + erts_atomic32_read_bset_mb(&esdp->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), ERTS_SSI_FLG_SUSPENDED); @@ -7813,7 +7442,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, #else (void) #endif - erts_smp_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags, + erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), ERTS_SSI_FLG_MSB_EXEC); @@ -7825,7 +7454,6 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, } -#endif static void suspend_scheduler(ErtsSchedulerData *esdp) @@ -7853,14 +7481,6 @@ suspend_scheduler(ErtsSchedulerData *esdp) */ -#if !defined(ERTS_DIRTY_SCHEDULERS) - - sched_type = ERTS_SCHED_NORMAL; - online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN; - no = esdp->no; - ASSERT(no != 1); - -#else sched_type = esdp->type; switch (sched_type) { @@ -7881,25 +7501,24 @@ suspend_scheduler(ErtsSchedulerData *esdp) return; } - if (erts_smp_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) { + if (erts_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) { ASSERT(no == 1); if (!msb_scheduler_type_switch(sched_type, esdp, no)) return; /* Suspend and let scheduler 1 of another type execute... */ } -#endif if (sched_type != ERTS_SCHED_NORMAL) { dirty_active(esdp, -1); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); dirty_sched_wall_time_change(esdp, 0); } else { if (no != 1) evacuate_run_queue(esdp->run_queue, &sbp); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); erts_sched_check_cpu_bind_prep_suspend(esdp); @@ -7907,14 +7526,14 @@ suspend_scheduler(ErtsSchedulerData *esdp) profile_scheduler(make_small(esdp->no), am_inactive); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED); if (flgs & ERTS_SSI_FLG_SUSPENDED) { schdlr_sspnd_dec_nscheds(&schdlr_sspnd.active, sched_type); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); while (1) { @@ -7946,7 +7565,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (clr_flg) { ErtsProcList *plp, *end_plp; - changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~clr_flg); changing &= ~clr_flg; (void) erts_proclist_fetch(&msb[i]->chngq, &end_plp); @@ -7992,7 +7611,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) == schdlr_sspnd_get_nscheds(&schdlr_sspnd.curr_online, sched_type))) { ErtsProcList *plp; - changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~online_flag); changing &= ~online_flag; if (sched_type == ERTS_SCHED_NORMAL) { @@ -8014,11 +7633,11 @@ suspend_scheduler(ErtsSchedulerData *esdp) } if (curr_online) { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); schdlr_sspnd_resume_procs(sched_type, &resume); @@ -8046,9 +7665,9 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); if (evacuate) { - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); evacuate_run_queue(esdp->run_queue, &sbp); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); } } @@ -8146,23 +7765,23 @@ suspend_scheduler(ErtsSchedulerData *esdp) | ERTS_SSI_FLG_SUSPENDED)); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing) break; } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + erts_mtx_lock(&schdlr_sspnd.mtx); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); } schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing) { if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB) && !schdlr_sspnd.msb.ongoing && schdlr_sspnd_eq_nscheds(&schdlr_sspnd.online, &schdlr_sspnd.active)) { - erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_MSB); } if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB) @@ -8171,14 +7790,14 @@ suspend_scheduler(ErtsSchedulerData *esdp) ERTS_SCHED_NORMAL) == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL))) { - erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_NMSB); } } ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type)); } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); schdlr_sspnd_resume_procs(sched_type, &resume); @@ -8197,7 +7816,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) } } - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); non_empty_runq(esdp->run_queue); if (sched_type != ERTS_SCHED_NORMAL) @@ -8221,7 +7840,7 @@ erts_schedulers_state(Uint *total, { if (active || online || dirty_cpu_online || dirty_cpu_active || dirty_io_active) { - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (active) *active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL); @@ -8237,7 +7856,7 @@ erts_schedulers_state(Uint *total, if (dirty_io_active) *dirty_io_active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_DIRTY_IO); - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); } if (total) @@ -8253,7 +7872,7 @@ abort_sched_onln_chng_waitq(Process *p) { Eterm resume = NIL; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); #ifdef DEBUG { @@ -8303,7 +7922,7 @@ abort_sched_onln_chng_waitq(Process *p) } } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (is_internal_pid(resume)) schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume); @@ -8320,11 +7939,7 @@ erts_set_schedulers_online(Process *p, erts_aint32_t changing = 0, change_flags; int online, increase; ErtsProcList *plp; -#ifdef ERTS_DIRTY_SCHEDULERS int dirty_no, change_dirty, dirty_online; -#else - ASSERT(!dirty_only); -#endif if (new_no < 1) return ERTS_SCHDLR_SSPND_EINVAL; @@ -8333,11 +7948,9 @@ erts_set_schedulers_online(Process *p, else if (erts_no_schedulers < new_no) return ERTS_SCHDLR_SSPND_EINVAL; -#ifdef ERTS_DIRTY_SCHEDULERS if (dirty_only) resume_proc = 0; else -#endif { resume_proc = 1; /* @@ -8346,23 +7959,21 @@ erts_set_schedulers_online(Process *p, * race... */ if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); suspend_process(p, p); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); change_flags = 0; have_unlocked_plocks = 0; no = (int) new_no; -#ifdef ERTS_DIRTY_SCHEDULERS if (!dirty_only) -#endif { - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) { enqueue_wait: p->flags |= F_SCHDLR_ONLN_WAITQ; @@ -8388,12 +7999,6 @@ erts_set_schedulers_online(Process *p, *old_no = online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, ERTS_SCHED_NORMAL); -#ifndef ERTS_DIRTY_SCHEDULERS - if (no == online) { - res = ERTS_SCHDLR_SSPND_DONE; - goto done; - } -#else /* ERTS_DIRTY_SCHEDULERS */ dirty_online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, ERTS_SCHED_DIRTY_CPU); if (dirty_only) @@ -8445,7 +8050,6 @@ erts_set_schedulers_online(Process *p, if (dirty_only) increase = (dirty_no > dirty_online); else -#endif /* ERTS_DIRTY_SCHEDULERS */ { change_flags |= ERTS_SCHDLR_SSPND_CHNG_ONLN; schdlr_sspnd_set_nscheds(&schdlr_sspnd.online, @@ -8454,12 +8058,11 @@ erts_set_schedulers_online(Process *p, increase = (no > online); } - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags); + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags); res = ERTS_SCHDLR_SSPND_DONE; if (increase) { int ix; -#ifdef ERTS_DIRTY_SCHEDULERS if (change_dirty) { ErtsSchedulerSleepInfo* ssi; if (schdlr_sspnd.msb.ongoing) { @@ -8473,7 +8076,6 @@ erts_set_schedulers_online(Process *p, } } if (!dirty_only) -#endif { if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) { for (ix = online; ix < no; ix++) @@ -8482,7 +8084,7 @@ erts_set_schedulers_online(Process *p, else { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } change_no_used_runqs(no); @@ -8495,7 +8097,6 @@ erts_set_schedulers_online(Process *p, } } else /* if decrease */ { -#ifdef ERTS_DIRTY_SCHEDULERS if (change_dirty) { if (schdlr_sspnd.msb.ongoing) { for (ix = dirty_no; ix < dirty_online; ix++) @@ -8513,7 +8114,6 @@ erts_set_schedulers_online(Process *p, } } if (!dirty_only) -#endif { if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) { for (ix = no; ix < online; ix++) @@ -8522,7 +8122,7 @@ erts_set_schedulers_online(Process *p, else { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } change_no_used_runqs(no); @@ -8551,17 +8151,17 @@ done: <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, ERTS_SCHED_NORMAL)); - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (have_unlocked_plocks) - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); if (resume_proc) { if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); resume_process(p, plocks|ERTS_PROC_LOCK_STATUS); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } return res; @@ -8599,13 +8199,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal else { resume_proc = 1; if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); suspend_process(p, p); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (on) { /* ------ BLOCK ------ */ if (msbp->chngq) { ASSERT(msbp->ongoing); @@ -8635,12 +8235,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal p->flags |= have_blckd_flg; if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } ASSERT(!msbp->ongoing); msbp->ongoing = 1; - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, chng_flg); change_no_used_runqs(1); for (ix = 1; ix < erts_no_run_queues; ix++) @@ -8651,21 +8251,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal wake_scheduler(rq); } -#ifdef ERTS_DIRTY_SCHEDULERS if (!normal) { ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0), ERTS_RUNQ_FLG_MSB_EXEC); - erts_smp_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags, + erts_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags, ERTS_SSI_FLG_MSB_EXEC); for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) dcpu_sched_ix_suspend_wake(ix); for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) dio_sched_ix_suspend_wake(ix); } -#endif wait_until_msb: - ASSERT(chng_flg & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)); + ASSERT(chng_flg & erts_atomic32_read_nob(&schdlr_sspnd.changing)); plp = proclist_create(p); erts_proclist_store_last(&msbp->chngq, plp); @@ -8706,14 +8304,14 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal } if (!msbp->blckrs && !msbp->chngq) { int online; - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, chng_flg); p->flags &= ~have_blckd_flg; msbp->ongoing = 0; if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, @@ -8727,7 +8325,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal for (ix = online; ix < erts_no_run_queues; ix++) suspend_run_queue(ERTS_RUNQ_IX(ix)); } -#ifdef ERTS_DIRTY_SCHEDULERS if (!schdlr_sspnd.msb.ongoing) { /* Get rid of msb-exec flag in run-queue of scheduler 1 */ resume_run_queue(ERTS_RUNQ_IX(0)); @@ -8738,7 +8335,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) dio_sched_ix_resume_wake(ix); } -#endif } unblock_res: @@ -8750,17 +8346,17 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal res = ERTS_SCHDLR_SSPND_DONE; } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (have_unlocked_plocks) - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); if (resume_proc) { if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); resume_process(p, plocks|ERTS_PROC_LOCK_STATUS); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } return res; @@ -8770,14 +8366,14 @@ int erts_is_multi_scheduling_blocked(void) { int res; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (schdlr_sspnd.msb.blckrs) res = 1; else if (schdlr_sspnd.nmsb.blckrs) res = -1; else res = 0; - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); return res; } @@ -8789,7 +8385,7 @@ erts_multi_scheduling_blockers(Process *p, int normal) msbp = normal ? &schdlr_sspnd.nmsb : &schdlr_sspnd.msb; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (!erts_proclist_is_empty(msbp->blckrs)) { Eterm *hp, *hp_end; ErtsProcList *plp1, *plp2; @@ -8817,7 +8413,7 @@ erts_multi_scheduling_blockers(Process *p, int normal) } HRelease(p, hp_end, hp); } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); return res; } @@ -8827,9 +8423,7 @@ sched_thread_func(void *vesdp) ErtsThrPrgrCallbacks callbacks; ErtsSchedulerData *esdp = vesdp; Uint no = esdp->no; -#ifdef ERTS_SMP erts_tse_t *tse; -#endif erts_sched_init_time_sup(esdp); @@ -8839,7 +8433,6 @@ sched_thread_func(void *vesdp) (void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue, ERTS_RUNQ_FLG_EXEC); -#ifdef ERTS_SMP tse = erts_tse_fetch(); erts_tse_prepare_timed(tse); ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = tse; @@ -8853,7 +8446,6 @@ sched_thread_func(void *vesdp) erts_thr_progress_register_managed_thread(esdp, &callbacks, 0); erts_alloc_register_scheduler(vesdp); -#endif #ifdef ERTS_ENABLE_LOCK_CHECK { char buf[31]; @@ -8862,18 +8454,14 @@ sched_thread_func(void *vesdp) } #endif erts_tsd_set(sched_data_key, vesdp); -#ifdef ERTS_SMP #if HAVE_ERTS_MSEG erts_mseg_late_init(); #endif -#if ERTS_USE_ASYNC_READY_Q esdp->aux_work_data.async_ready.queue = erts_get_async_ready_queue(no); -#endif erts_sched_init_check_cpu_bind(esdp); erts_proc_lock_prepare_proc_lock_waiter(); -#endif #ifdef HIPE hipe_thread_signal_init(); @@ -8898,8 +8486,6 @@ sched_thread_func(void *vesdp) return NULL; } -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP static void* sched_dirty_cpu_thread_func(void *vesdp) { @@ -8929,9 +8515,7 @@ sched_dirty_cpu_thread_func(void *vesdp) } #endif erts_tsd_set(sched_data_key, vesdp); -#if ERTS_USE_ASYNC_READY_Q esdp->aux_work_data.async_ready.queue = NULL; -#endif erts_proc_lock_prepare_proc_lock_waiter(); @@ -8977,9 +8561,7 @@ sched_dirty_io_thread_func(void *vesdp) } #endif erts_tsd_set(sched_data_key, vesdp); -#if ERTS_USE_ASYNC_READY_Q esdp->aux_work_data.async_ready.queue = NULL; -#endif erts_proc_lock_prepare_proc_lock_waiter(); @@ -8995,8 +8577,6 @@ sched_dirty_io_thread_func(void *vesdp) no); return NULL; } -#endif -#endif static ethr_tid aux_tid; @@ -9014,7 +8594,6 @@ erts_start_schedulers(void) opts.name = name; -#ifdef ERTS_SMP if (erts_runq_supervision_interval) { opts.suggested_stack_size = 16; erts_snprintf(opts.name, 16, "runq_supervisor"); @@ -9028,7 +8607,6 @@ erts_start_schedulers(void) erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread\n"); } -#endif opts.suggested_stack_size = erts_sched_thread_suggested_stack_size; @@ -9054,8 +8632,6 @@ erts_start_schedulers(void) } erts_no_schedulers = actual; -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP { int ix; for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { @@ -9075,8 +8651,6 @@ erts_start_schedulers(void) erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d\n", ix); } } -#endif -#endif ERTS_THR_MEMORY_BARRIER; @@ -9103,9 +8677,7 @@ erts_start_schedulers(void) } } -#endif /* ERTS_SMP */ -#ifdef ERTS_SMP static void add_pend_suspend(Process *suspendee, @@ -9141,7 +8713,7 @@ handle_pending_suspend(Process *p, ErtsProcLocks p_locks) ErtsPendingSuspend *psp; int is_alive = !ERTS_PROC_IS_EXITING(p); - ERTS_SMP_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS); /* * New pending suspenders might appear while we are processing @@ -9167,15 +8739,15 @@ cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks) if (is_not_nil(p->suspendee)) { Process *rp; if (!(p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS, p->suspendee, ERTS_PROC_LOCK_STATUS); if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } if (!(p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); p->suspendee = NIL; } } @@ -9188,7 +8760,7 @@ handle_pend_sync_suspend(Process *suspendee, { Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -9204,7 +8776,7 @@ handle_pend_sync_suspend(Process *suspendee, resume suspender */ ASSERT(suspendee != suspender); resume_process(suspender, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); } } @@ -9215,10 +8787,10 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, Process *rp; int unlock_c_p_status; - ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); - ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)); + ERTS_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)); if (c_p->common.id == pid) return erts_pid2proc(c_p, c_p_locks, pid, pid_locks); @@ -9227,7 +8799,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, unlock_c_p_status = 0; else { unlock_c_p_status = 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); } if (c_p->suspendee == pid) { @@ -9266,21 +8838,19 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, /* Other process running */ ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING) - & erts_smp_atomic32_read_nob(&rp->state)); + & erts_atomic32_read_nob(&rp->state)); -#ifdef ERTS_DIRTY_SCHEDULERS if (!suspend - && (erts_smp_atomic32_read_nob(&rp->state) + && (erts_atomic32_read_nob(&rp->state) & ERTS_PSFLG_DIRTY_RUNNING)) { ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) { + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, pid_locks|ERTS_PROC_LOCK_STATUS); } goto done; } -#endif running: @@ -9300,19 +8870,19 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, c_p->flags |= F_P2PNR_RESCHED; } /* Yield (caller is assumed to yield immediately in bif). */ - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); rp = ERTS_PROC_LOCK_BUSY; } else { ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) { if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS) - & erts_smp_atomic32_read_nob(&rp->state)) { + & erts_atomic32_read_nob(&rp->state)) { /* Executing system task... */ resume_process(rp, ERTS_PROC_LOCK_STATUS); goto running; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); /* * If we are unlucky, the process just got selected for * execution of a system task. In this case we may be @@ -9336,7 +8906,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, #ifdef DEBUG { erts_aint32_t state; - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); ASSERT((state & ERTS_PSFLG_PENDING_EXIT) || !(state & ERTS_PSFLG_RUNNING)); } @@ -9350,9 +8920,9 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, done: if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); if (unlock_c_p_status) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); return rp; } @@ -9398,7 +8968,7 @@ do_bif_suspend_process(Process *c_p, { ASSERT(suspendee); ASSERT(!ERTS_PROC_IS_EXITING(suspendee)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(suspendee)); if (smon) { if (!smon->active) { @@ -9421,7 +8991,7 @@ handle_pend_bif_sync_suspend(Process *suspendee, { Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -9450,7 +9020,7 @@ handle_pend_bif_sync_suspend(Process *suspendee, resume suspender */ ASSERT(suspender != suspendee); resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspender, + erts_proc_unlock(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); } } @@ -9464,7 +9034,7 @@ handle_pend_bif_async_suspend(Process *suspendee, Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -9488,11 +9058,10 @@ handle_pend_bif_async_suspend(Process *suspendee, do_bif_suspend_process(suspendee, smon, suspendee); ASSERT(!smon || res != 0); } - erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(suspender, ERTS_PROC_LOCK_LINK); } } -#endif /* ERTS_SMP */ /* * The erlang:suspend_process/2 BIF @@ -9541,7 +9110,7 @@ suspend_process_2(BIF_ALIST_2) ? (ErtsProcLocks) 0 : ERTS_PROC_LOCK_STATUS); - erts_smp_proc_lock(BIF_P, xlocks); + erts_proc_lock(BIF_P, xlocks); suspendee = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN|xlocks, @@ -9552,34 +9121,15 @@ suspend_process_2(BIF_ALIST_2) smon = erts_add_or_lookup_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); -#ifndef ERTS_SMP /* no ERTS_SMP */ - - /* This is really a piece of cake without SMP support... */ - if (!smon->active) { - erts_smp_atomic32_read_bor_nob(&suspendee->state, ERTS_PSFLG_SUSPENDED); - suspend_process(BIF_P, suspendee); - smon->active++; - res = am_true; - } - else if (unless_suspending) - res = am_false; - else if (smon->active == INT_MAX) - goto system_limit; - else { - smon->active++; - res = am_true; - } - -#else /* ERTS_SMP */ /* ... but a little trickier with SMP support ... */ if (asynchronous) { /* --- Asynchronous suspend begin ---------------------------------- */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_LINK + ERTS_LC_ASSERT(ERTS_PROC_LOCK_LINK & erts_proc_lc_my_proc_locks(BIF_P)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS == erts_proc_lc_my_proc_locks(suspendee)); if (smon->active) { @@ -9619,10 +9169,10 @@ suspend_process_2(BIF_ALIST_2) else /* if (!asynchronous) */ { /* --- Synchronous suspend begin ----------------------------------- */ - ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS) + ERTS_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS) & erts_proc_lc_my_proc_locks(BIF_P)) == (ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS == erts_proc_lc_my_proc_locks(suspendee)); if (BIF_P->suspendee == BIF_ARG_1) { @@ -9688,10 +9238,9 @@ suspend_process_2(BIF_ALIST_2) /* --- Synchronous suspend end ------------------------------------- */ } -#endif /* ERTS_SMP */ #ifdef DEBUG { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&suspendee->state); + erts_aint32_t state = erts_atomic32_read_acqb(&suspendee->state); ASSERT((state & ERTS_PSFLG_SUSPENDED) || (asynchronous && smon->pending)); ASSERT((state & ERTS_PSFLG_SUSPENDED) @@ -9699,8 +9248,8 @@ suspend_process_2(BIF_ALIST_2) } #endif - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(BIF_P, xlocks); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, xlocks); BIF_RET(res); system_limit: @@ -9708,26 +9257,22 @@ suspend_process_2(BIF_ALIST_2) goto do_return; no_suspendee: -#ifdef ERTS_SMP BIF_P->suspendee = NIL; -#endif erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); badarg: ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); -#ifdef ERTS_SMP goto do_return; yield: ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2], BIF_P, BIF_ARG_1, BIF_ARG_2); -#endif do_return: if (suspendee) - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); if (xlocks) - erts_smp_proc_unlock(BIF_P, xlocks); + erts_proc_unlock(BIF_P, xlocks); return res; } @@ -9747,7 +9292,7 @@ resume_process_1(BIF_ALIST_1) if (BIF_P->common.id == BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); smon = erts_lookup_suspend_monitor(BIF_P->suspend_monitors, BIF_ARG_1); if (!smon) { @@ -9793,17 +9338,17 @@ resume_process_1(BIF_ALIST_1) goto no_suspendee; ASSERT(ERTS_PSFLG_SUSPENDED - & erts_smp_atomic32_read_nob(&suspendee->state)); + & erts_atomic32_read_nob(&suspendee->state)); ASSERT(BIF_P != suspendee); resume_process(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } if (!smon->active && !smon->pending) erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); @@ -9812,7 +9357,7 @@ resume_process_1(BIF_ALIST_1) erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); error: - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_ERROR(BIF_P, BADARG); } @@ -9821,18 +9366,16 @@ erts_internal_is_process_executing_dirty_1(BIF_ALIST_1) { if (is_not_internal_pid(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); -#ifdef ERTS_DIRTY_SCHEDULERS else { Process *rp = erts_proc_lookup(BIF_ARG_1); if (rp) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING |ERTS_PSFLG_DIRTY_RUNNING_SYS)) { BIF_RET(am_true); } } } -#endif BIF_RET(am_false); } @@ -9842,28 +9385,26 @@ run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int inc Sint rq_len; if (locked) - rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len); + rq_len = (Sint) erts_atomic32_read_dirty(&rq->len); else - rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len); + rq_len = (Sint) erts_atomic32_read_nob(&rq->len); ASSERT(rq_len >= 0); if (incl_active_sched) { -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { erts_aint32_t dcnt; if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(rq)) { - dcnt = erts_smp_atomic32_read_nob(&dirty_count.cpu.active); + dcnt = erts_atomic32_read_nob(&dirty_count.cpu.active); ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_cpu_schedulers); } else { ASSERT(ERTS_RUNQ_IS_DIRTY_IO_RUNQ(rq)); - dcnt = erts_smp_atomic32_read_nob(&dirty_count.io.active); + dcnt = erts_atomic32_read_nob(&dirty_count.io.active); ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_io_schedulers); } rq_len += (Sint) dcnt; } else -#endif { if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC) rq_len++; @@ -9882,12 +9423,10 @@ erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched, Uint len = 0; int no_rqs = erts_no_run_queues; -#ifdef ERTS_DIRTY_SCHEDULERS if (incl_dirty_io) no_rqs += ERTS_NUM_DIRTY_RUNQS; else no_rqs += ERTS_NUM_DIRTY_CPU_RUNQS; -#endif if (atomic_queues_read) { ERTS_ATOMIC_FOREACH_RUNQ_X(rq, no_rqs, @@ -9941,27 +9480,25 @@ erts_process_status(Process *rp, Eterm rpid) Process *p = rp ? rp : erts_proc_lookup_raw(rpid); if (p) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + erts_aint32_t state = erts_atomic32_read_acqb(&p->state); res = erts_process_state2status(state); } -#ifdef ERTS_SMP else { int i; ErtsSchedulerData *esdp; for (i = 0; i < erts_no_schedulers; i++) { esdp = ERTS_SCHEDULER_IX(i); - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); if (esdp->free_process && esdp->free_process->common.id == rpid) { res = am_free; - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); break; } - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); } } -#endif return res; } @@ -9977,9 +9514,9 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) int suspend; ASSERT(c_p == erts_get_current_process()); - ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (busy_port) suspend = erts_save_suspend_process_on_port(busy_port, c_p); @@ -9995,7 +9532,7 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) } if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (suspend && busy_port && erts_system_monitor_flags.busy_port) monitor_generic(c_p, am_busy_port, busy_port->common.id); @@ -10004,12 +9541,12 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) void erts_resume(Process* process, ErtsProcLocks process_locks) { - ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); + ERTS_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(process, ERTS_PROC_LOCK_STATUS); resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(process, ERTS_PROC_LOCK_STATUS); } int @@ -10029,7 +9566,7 @@ erts_resume_processes(ErtsProcList *list) resume_process(proc, ERTS_PROC_LOCK_STATUS); nresumed++; } - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); } fplp = plp; plp = plp->next; @@ -10041,7 +9578,7 @@ erts_resume_processes(ErtsProcList *list) Eterm erts_get_process_priority(Process *p) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); switch (ERTS_PSFLGS_GET_USR_PRIO(state)) { case PRIORITY_MAX: return am_max; case PRIORITY_HIGH: return am_high; @@ -10064,7 +9601,7 @@ erts_set_process_priority(Process *p, Eterm value) default: return THE_NON_VALUE; break; } - a = erts_smp_atomic32_read_nob(&p->state); + a = erts_atomic32_read_nob(&p->state); if (nprio == ERTS_PSFLGS_GET_USR_PRIO(a)) oprio = nprio; else { @@ -10072,7 +9609,7 @@ erts_set_process_priority(Process *p, Eterm value) erts_aint32_t e, n, aprio; if (a & ERTS_PSFLG_ACTIVE_SYS) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); slocked = 1; } @@ -10086,7 +9623,7 @@ erts_set_process_priority(Process *p, Eterm value) int max_qbit; if (!slocked) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); slocked = 1; } @@ -10127,11 +9664,11 @@ erts_set_process_priority(Process *p, Eterm value) n |= ((nprio << ERTS_PSFLGS_USR_PRIO_OFFSET) | (aprio << ERTS_PSFLGS_ACT_PRIO_OFFSET)); - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); } while (a != e); if (slocked) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } @@ -10216,14 +9753,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) input_reductions = INPUT_REDUCTIONS; } - ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()) + ERTS_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()) || !erts_thr_progress_is_blocking()); /* * Clean up after the process being scheduled out. */ if (!p) { /* NULL in the very first schedule() call */ -#ifdef ERTS_DIRTY_SCHEDULERS is_normal_sched = !esdp; if (is_normal_sched) { esdp = erts_get_scheduler_data(); @@ -10232,18 +9768,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) else { ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); } -#else - esdp = erts_get_scheduler_data(); - is_normal_sched = 1; -#endif rq = erts_get_runq_current(esdp); ASSERT(esdp); - fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls); + fcalls = (int) erts_atomic32_read_acqb(&function_calls); actual_reds = reds = 0; - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } else { -#ifdef ERTS_SMP -#ifdef ERTS_DIRTY_SCHEDULERS is_normal_sched = !esdp; if (is_normal_sched) { esdp = p->scheduler_data; @@ -10252,21 +9782,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) else { ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); } -#else - esdp = p->scheduler_data; - is_normal_sched = 1; -#endif ASSERT(esdp->current_process == p || esdp->free_process == p); -#else - esdp = erts_scheduler_data; - ASSERT(esdp->current_process == p); - is_normal_sched = 1; -#endif sched_out_proc: - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); reds = actual_reds = calls - esdp->virtual_reds; @@ -10275,14 +9796,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST; esdp->virtual_reds = 0; - fcalls = (int) erts_smp_atomic32_add_read_acqb(&function_calls, reds); + fcalls = (int) erts_atomic32_add_read_acqb(&function_calls, reds); ASSERT(esdp && esdp == erts_get_scheduler_data()); rq = erts_get_runq_current(esdp); p->reds += actual_reds; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (IS_TRACED(p)) { if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) @@ -10301,20 +9822,17 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); -#ifdef ERTS_SMP if (p->trace_msg_q) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); erts_schedule_flush_trace_messages(p, 1); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); } -#endif /* have to re-read state after taking lock */ - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); -#ifdef ERTS_SMP if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT)) erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_TRACE @@ -10323,7 +9841,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_TRACE | ERTS_PROC_LOCK_STATUS)); -#endif esdp->reductions += reds; @@ -10341,18 +9858,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) actual_reds); esdp->current_process = NULL; -#ifdef ERTS_SMP if (is_normal_sched) p->scheduler_data = NULL; -#endif - erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN + erts_proc_unlock(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_STATUS | ERTS_PROC_LOCK_TRACE)); ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER); -#ifdef ERTS_SMP if (state & ERTS_PSFLG_FREE) { if (!is_normal_sched) { ASSERT(p->flags & F_DELAYED_DEL_PROC); @@ -10362,36 +9876,32 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) esdp->free_process = NULL; } } -#endif if (dec_refc) erts_proc_dec_refc(p); } -#ifdef ERTS_SMP ASSERT(!esdp->free_process); -#endif ASSERT(!esdp->current_process); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_normal_sched) { if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS) (void) erts_get_monotonic_time(esdp); if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } } } - ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking()); check_activities_to_run: { erts_aint32_t psflg_running, psflg_running_sys; -#ifdef ERTS_SMP ErtsMigrationPaths *mps; ErtsMigrationPath *mp; @@ -10399,7 +9909,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (rq->check_balance_reds <= 0) check_balance(rq); - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); mps = erts_get_migration_paths_managed(); mp = &mps->mpath[rq->ix]; @@ -10408,14 +9918,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) immigrate(rq, mp); } - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); continue_check_activities_to_run: flags = ERTS_RUNQ_FLGS_GET_NOB(rq); continue_check_activities_to_run_known_flags: ASSERT(!is_normal_sched || (flags & ERTS_RUNQ_FLG_NONEMPTY)); if (!is_normal_sched) { - if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + if (erts_atomic32_read_acqb(&esdp->ssi->flags) & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) { suspend_scheduler(esdp); } @@ -10445,26 +9955,18 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) leader_update = erts_thr_progress_update(esdp); aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); if (aux_work | leader_update) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (leader_update) erts_thr_progress_leader_update(esdp); if (aux_work) handle_aux_work(&esdp->aux_work_data, aux_work, 0); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); -#else /* ERTS_SMP */ - { - erts_aint32_t aux_work; - aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); - if (aux_work) - handle_aux_work(&esdp->aux_work_data, aux_work, 0); - } -#endif /* ERTS_SMP */ flags = ERTS_RUNQ_FLGS_GET_NOB(rq); @@ -10475,8 +9977,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } else if (!runq_got_work_to_execute_flags(flags)) { /* Prepare for scheduler wait */ -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); rq->wakeup_other = 0; rq->wakeup_other_reds = 0; @@ -10490,7 +9991,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) ASSERT(!runq_got_work_to_execute(rq)); if (!is_normal_sched) { /* Dirty scheduler */ - if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + if (erts_atomic32_read_acqb(&esdp->ssi->flags) & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) { /* Go suspend... */ goto continue_check_activities_to_run_known_flags; @@ -10506,7 +10007,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) */ flags = ERTS_RUNQ_FLGS_GET_NOB(rq); if ((flags & ERTS_RUNQ_FLG_SUSPENDED) -#ifdef ERTS_DIRTY_SCHEDULERS /* If multi scheduling block and we have * dirty work, suspend and let dirty * scheduler handle work... */ @@ -10514,7 +10014,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_RUNQ_FLG_MSB_EXEC)) == ERTS_RUNQ_FLG_MSB_EXEC)) && have_dirty_work()) -#endif ) { non_empty_runq(rq); flags |= ERTS_RUNQ_FLG_NONEMPTY; @@ -10526,16 +10025,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } empty_runq(rq); } -#endif (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_EXEC); scheduler_wait(&fcalls, esdp, rq); flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC); flags |= ERTS_RUNQ_FLG_EXEC; ERTS_MSACC_UPDATE_CACHE(); -#ifdef ERTS_SMP non_empty_runq(rq); -#endif goto check_activities_to_run; } @@ -10549,13 +10045,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) ERTS_MSACC_PUSH_STATE_CACHED_M(); - erts_smp_atomic32_set_relb(&function_calls, 0); + erts_atomic32_set_relb(&function_calls, 0); fcalls = 0; #if 0 /* Not needed since we wont wait in sys schedule */ erts_sys_schedule_interrupt(0); #endif - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO); LTTNG2(scheduler_poll, esdp->no, 1); @@ -10567,21 +10063,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) erts_bump_timers(esdp->timer_wheel, current_time); -#ifdef ERTS_SMP - erts_smp_runq_lock(rq); + erts_runq_lock(rq); clear_sys_scheduling(); goto continue_check_activities_to_run; -#else - goto check_activities_to_run; -#endif } if (flags & ERTS_RUNQ_FLG_MISC_OP) exec_misc_ops(rq); -#ifdef ERTS_SMP wakeup_other.check(rq, flags); -#endif /* * Find a new port to run. @@ -10679,13 +10169,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) proxy_p = NULL; goto pick_next_process; } - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); } -#ifdef ERTS_DIRTY_SCHEDULERS if (!is_normal_sched) clear_proc_dirty_queue_bit(p, rq, qbit); -#endif while (1) { erts_aint32_t exp, new; @@ -10703,7 +10191,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS | ERTS_PSFLG_FREE))) -#ifdef ERTS_DIRTY_SCHEDULERS | (((state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_FREE @@ -10712,7 +10199,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) & (!!is_normal_sched)) -#endif ) & ((state & (ERTS_PSFLG_SUSPENDED | ERTS_PSFLG_EXITING @@ -10721,11 +10207,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_DIRTY_ACTIVE_SYS)) != ERTS_PSFLG_SUSPENDED) -#ifdef ERTS_DIRTY_SCHEDULERS & (!(state & (ERTS_PSFLG_EXITING | ERTS_PSFLG_PENDING_EXIT)) | (!!is_normal_sched)) -#endif ); if (run_process) { @@ -10735,7 +10219,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) else new |= psflg_running; } - state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp); + state = erts_atomic32_cmpxchg_relb(&p->state, new, exp); if (state == exp) { if (!run_process) { if (proxy_p) { @@ -10762,22 +10246,21 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) calls = 0; reds = context_reds; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR); -#ifdef ERTS_SMP if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (erts_sched_stat.enabled) { int prio; @@ -10788,28 +10271,24 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[prio].total_executed++; erts_sched_stat.prio[prio].executed++; if (migrated) { erts_sched_stat.prio[prio].total_migrated++; erts_sched_stat.prio[prio].migrated++; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); } - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); -#ifndef ERTS_DIRTY_SCHEDULERS - ASSERT(!p->scheduler_data); - p->scheduler_data = esdp; -#else /* ERTS_DIRTY_SCHEDULERS */ if (is_normal_sched) { if ((!!(state & ERTS_PSFLGS_DIRTY_WORK)) & (!(state & ERTS_PSFLG_ACTIVE_SYS))) { /* Migrate to dirty scheduler... */ sunlock_sched_out_proc: - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); goto sched_out_proc; } ASSERT(!p->scheduler_data); @@ -10839,17 +10318,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) : (rq == ERTS_DIRTY_IO_RUNQ && (state & ERTS_PSFLG_DIRTY_IO_PROC))); } -#endif if (state & ERTS_PSFLG_PENDING_EXIT) { erts_handle_pending_exit(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); } -#endif /* ERTS_SMP */ - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); /* Clear tracer if it has been removed */ if (IS_TRACED(p) && erts_is_tracer_proc_enabled( @@ -10885,10 +10362,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) reds -= cost; if (reds <= 0) goto sched_out_proc; -#ifdef ERTS_DIRTY_SCHEDULERS if (state & ERTS_PSFLGS_DIRTY_WORK) goto sched_out_proc; -#endif } ASSERT(state & psflg_running_sys); @@ -10907,7 +10382,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) n &= ~psflg_running_sys; n |= psflg_running; - state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + state = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (state == e) { state = n; break; @@ -10926,10 +10401,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) reds -= cost; if (reds <= 0) goto sched_out_proc; -#ifdef ERTS_DIRTY_SCHEDULERS if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) goto sched_out_proc; -#endif } } } @@ -10941,12 +10414,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) p->fcalls = reds; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* Never run a suspended process */ #ifdef DEBUG { - erts_aint32_t dstate = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t dstate = erts_atomic32_read_nob(&p->state); ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate) || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate)); } @@ -10956,9 +10429,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (!(state & ERTS_PSFLG_EXITING) && ERTS_PTMR_IS_TIMED_OUT(p)) { BeamInstr** pi; -#ifdef ERTS_SMP ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); -#endif pi = (BeamInstr **) p->def_arg_reg; p->i = *pi; p->flags &= ~F_INSLPQUEUE; @@ -10975,13 +10446,11 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result, int normal_sched) { Process *rp; -#ifdef ERTS_DIRTY_SCHEDULERS if (!normal_sched) rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, st->requester, 0, ERTS_P2P_FLG_INC_REFC); else -#endif rp = erts_proc_lookup(st->requester); if (rp) { ErtsProcLocks rp_locks; @@ -11029,12 +10498,10 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); -#ifdef ERTS_DIRTY_SCHEDULERS if (!normal_sched) erts_proc_dec_refc(rp); -#endif } erts_cleanup_offheap(&st->off_heap); @@ -11053,7 +10520,7 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop) *priop = -1; /* Shut up annoying erroneous warning */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (!c_p->sys_task_qs) { qmask = 0; @@ -11173,13 +10640,13 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop) if (a == n) break; - a = erts_smp_atomic32_cmpxchg_nob(&c_p->state, n, e); + a = erts_atomic32_cmpxchg_nob(&c_p->state, n, e); } while (a != e); } done: - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (unused_qs) proc_sys_task_queues_free(unused_qs); @@ -11190,9 +10657,7 @@ done: } static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio); -#ifdef ERTS_DIRTY_SCHEDULERS static void save_dirty_task(Process *c_p, ErtsProcSysTask *st); -#endif static int execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) @@ -11203,7 +10668,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) int qmask = 0; ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); do { ErtsProcSysTaskType type; @@ -11212,10 +10677,8 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) Eterm st_res; if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN); -#endif ASSERT(ERTS_PROC_IS_EXITING(c_p)); break; } @@ -11242,13 +10705,11 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) FLAGS(c_p) |= F_NEED_FULLSWEEP; } reds -= scheduler_gc_proc(c_p, reds); -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) { save_dirty_task(c_p, st); st = NULL; break; } -#endif if (type == ERTS_PSTT_GC_MAJOR) minor_gc = major_gc = 1; else @@ -11290,13 +10751,11 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) fcalls, do_gc); reds -= cla_reds; if (is_non_value(st_res)) { -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->flags & F_DIRTY_CLA) { save_dirty_task(c_p, st); st = NULL; break; } -#endif /* Needed gc, but gc was disabled */ save_gc_task(c_p, st, st_prio); st = NULL; @@ -11310,18 +10769,14 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) reds -= erts_complete_off_heap_message_queue_change(c_p); st_res = am_true; break; -#ifdef ERTS_SMP case ERTS_PSTT_FTMQ: reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN); st_res = am_true; break; -#endif -#ifdef ERTS_SMP case ERTS_PSTT_ETS_FREE_FIXATION: reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]); st_res = am_true; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid process sys task type"); st_res = am_false; @@ -11330,7 +10785,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) if (st) reds += notify_sys_task_executed(c_p, st, st_res, 1); - state = erts_smp_atomic32_read_acqb(&c_p->state); + state = erts_atomic32_read_acqb(&c_p->state); } while (qmask && reds > 0); *statep = state; @@ -11351,20 +10806,18 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) * are dirty tasks. */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); do { ErtsProcSysTask *st; Eterm st_res; int st_prio; -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->dirty_sys_tasks) { st = c_p->dirty_sys_tasks; c_p->dirty_sys_tasks = st->next; } else -#endif { st = fetch_sys_task(c_p, state, &qmask, &st_prio); if (!st) @@ -11382,12 +10835,10 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) case ERTS_PSTT_CLA: st_res = am_ok; break; -#ifdef ERTS_SMP case ERTS_PSTT_FTMQ: reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN); st_res = am_true; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid process sys task type"); st_res = am_false; @@ -11396,13 +10847,12 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) reds += notify_sys_task_executed(c_p, st, st_res, 1); - state = erts_smp_atomic32_read_acqb(&c_p->state); + state = erts_atomic32_read_acqb(&c_p->state); } while (qmask && reds < max_reds); return reds; } -#ifdef ERTS_DIRTY_SCHEDULERS void erts_execute_dirty_system_task(Process *c_p) @@ -11431,19 +10881,19 @@ erts_execute_dirty_system_task(Process *c_p) } if (c_p->flags & F_DIRTY_GC_HIBERNATE) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (c_p->msg.len) c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */ else { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->fvalue = NIL; erts_garbage_collect_hibernate(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); } if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) { @@ -11487,7 +10937,7 @@ erts_execute_dirty_system_task(Process *c_p) } - erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); + erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); } static BIF_RETTYPE @@ -11540,12 +10990,11 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state, erts_queue_message(rp, rp_locks, mp, msg, st->requester); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); return ret; } -#endif static BIF_RETTYPE request_system_task(Process *c_p, Eterm requester, Eterm target, @@ -11650,7 +11099,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, st->type = ERTS_PSTT_CPC; if (!rp) goto noproc; -#ifdef ERTS_DIRTY_SCHEDULERS /* * If the process should start executing dirty * code it is important that this task is @@ -11658,7 +11106,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, */ fail_state |= (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS); -#endif break; case am_copy_literals: @@ -11680,14 +11127,12 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, noproc: failure = noproc_res; } -#ifdef ERTS_DIRTY_SCHEDULERS else if (fail_state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { ret = dispatch_system_task(c_p, fail_state, st, target, priority, operation); goto cleanup_return; } -#endif else { ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()"); failure = am_internal_error; @@ -11703,9 +11148,7 @@ badarg: ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); -#ifdef ERTS_DIRTY_SCHEDULERS cleanup_return: -#endif if (st) { erts_cleanup_offheap(&st->off_heap); @@ -11746,7 +11189,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg) st->req_id_sz = 0; st->arg[0] = (Eterm)arg; ERTS_INIT_OFF_HEAP(&st->off_heap); - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); fail_state = ERTS_PSFLG_EXITING; @@ -11769,7 +11212,6 @@ erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix) erts_schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION, fix); } -#ifdef ERTS_DIRTY_SCHEDULERS static void flush_dirty_trace_messages(void *vpid) @@ -11786,45 +11228,35 @@ flush_dirty_trace_messages(void *vpid) proc = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_MAIN, 0); if (proc) { (void) erts_flush_trace_messages(proc, ERTS_PROC_LOCK_MAIN); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); } } -#endif /* ERTS_DIRTY_SCHEDULERS */ void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc) { -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl; -#endif Eterm pid = proc->common.id; -#ifdef ERTS_DIRTY_SCHEDULERS erts_aint32_t state; if (!force_on_proc) { - state = erts_smp_atomic32_read_nob(&proc->state); + state = erts_atomic32_read_nob(&proc->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { goto sched_flush_dirty; } } -#endif -#ifdef ERTS_SMP dhndl = erts_thr_progress_unmanaged_delay(); -#endif erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, NULL); -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif -#ifdef ERTS_DIRTY_SCHEDULERS if (!force_on_proc) { - state = erts_smp_atomic32_read_mb(&proc->state); + state = erts_atomic32_read_mb(&proc->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { void *vargp; @@ -11852,7 +11284,6 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc) erts_schedule_misc_aux_work(1, flush_dirty_trace_messages, vargp); } } -#endif } static void @@ -11861,7 +11292,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) erts_aint32_t state; ErtsProcSysTaskQs *qs; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p); if (!qs) { @@ -11891,7 +11322,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) } } - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING @@ -11907,20 +11338,18 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; n |= prio << ERTS_PSFLGS_ACT_PRIO_OFFSET; } - state = erts_smp_atomic32_cmpxchg_relb(&c_p->state, n, e); + state = erts_atomic32_cmpxchg_relb(&c_p->state, n, e); if (state == e) break; } } -#ifdef ERTS_DIRTY_SCHEDULERS static void save_dirty_task(Process *c_p, ErtsProcSysTask *st) { st->next = c_p->dirty_sys_tasks; c_p->dirty_sys_tasks = st; } -#endif int erts_set_gc_state(Process *c_p, int enable) @@ -11928,8 +11357,8 @@ erts_set_gc_state(Process *c_p, int enable) ErtsProcSysTaskQs *dgc_tsk_qs; ASSERT(c_p == erts_get_current_process()); ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) - & erts_smp_atomic32_read_nob(&c_p->state)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + & erts_atomic32_read_nob(&c_p->state)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); if (!enable) { c_p->flags |= F_DISABLE_GC; @@ -11944,7 +11373,7 @@ erts_set_gc_state(Process *c_p, int enable) /* Move delayed gc tasks into sys tasks queues. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (!c_p->sys_task_qs) { c_p->sys_task_qs = dgc_tsk_qs; @@ -12017,7 +11446,7 @@ erts_set_gc_state(Process *c_p, int enable) erts_aint32_t aprio, state = #endif - erts_smp_atomic32_read_bset_nob(&c_p->state, + erts_atomic32_read_bset_nob(&c_p->state, (ERTS_PSFLG_DELAYED_SYS | ERTS_PSFLG_ACTIVE_SYS), ERTS_PSFLG_ACTIVE_SYS); @@ -12031,7 +11460,7 @@ erts_set_gc_state(Process *c_p, int enable) } #endif - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, NULL); @@ -12047,24 +11476,24 @@ erts_sched_stat_modify(int what) int ix; switch (what) { case ERTS_SCHED_STAT_MODIFY_ENABLE: - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_sched_stat.enabled = 1; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case ERTS_SCHED_STAT_MODIFY_DISABLE: - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_sched_stat.enabled = 0; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case ERTS_SCHED_STAT_MODIFY_CLEAR: - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) { erts_sched_stat.prio[ix].total_executed = 0; erts_sched_stat.prio[ix].executed = 0; erts_sched_stat.prio[ix].total_migrated = 0; erts_sched_stat.prio[ix].migrated = 0; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); break; } } @@ -12078,7 +11507,7 @@ erts_sched_stat_term(Process *p, int total) Uint executed[ERTS_NO_PRIO_LEVELS]; Uint migrated[ERTS_NO_PRIO_LEVELS]; - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); if (total) { int i; for (i = 0; i < ERTS_NO_PRIO_LEVELS; i++) { @@ -12097,7 +11526,7 @@ erts_sched_stat_term(Process *p, int total) erts_sched_stat.prio[i].migrated = 0; } } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); sz = 0; (void) erts_bld_atom_2uint_3tup_list(NULL, &sz, ERTS_NO_PRIO_LEVELS, @@ -12117,7 +11546,6 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0); ErtsMiscOpList *molp = misc_op_list_alloc(); -#ifdef ERTS_SMP ErtsMigrationPaths *mpaths = erts_get_migration_paths(); if (!mpaths) @@ -12127,9 +11555,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) if (erq) rq = erq; } -#endif - erts_smp_runq_lock(rq); + erts_runq_lock(rq); molp->next = NULL; molp->func = func; @@ -12140,13 +11567,11 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) rq->misc.start = molp; rq->misc.end = molp; -#ifdef ERTS_SMP non_empty_runq(rq); -#endif ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); smp_notify_inc_runq(rq); } @@ -12179,7 +11604,7 @@ exec_misc_ops(ErtsRunQueue *rq) if (!rq->misc.start) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); while (molp) { tmp_molp = molp; @@ -12188,7 +11613,7 @@ exec_misc_ops(ErtsRunQueue *rq) misc_op_list_free(tmp_molp); } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } Uint @@ -12219,12 +11644,12 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) { Uint reds = erts_current_reductions(c_p, c_p); int ix; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); /* * Wait for other schedulers to schedule out their processes * and update 'reductions'. */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); for (reds = 0, ix = 0; ix < erts_no_run_queues; ix++) reds += ERTS_RUNQ_IX(ix)->procs.reductions; if (redsp) @@ -12232,8 +11657,8 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) if (diffp) *diffp = reds - last_exact_reductions; last_exact_reductions = reds; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } static void delete_process(Process* p); @@ -12241,10 +11666,8 @@ static void delete_process(Process* p); void erts_free_proc(Process *p) { -#ifdef ERTS_SMP erts_proc_lock_fin(p); -#endif - ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); + ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); ASSERT(0 == erts_proc_read_refc(p)); if (p->flags & F_DELAYED_DEL_PROC) delete_process(p); @@ -12263,17 +11686,13 @@ static void early_init_process_struct(void *varg, Eterm data) Process *proc = arg->proc; proc->common.id = make_internal_pid(data); -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_init_nob(&proc->dirty_state, 0); + erts_atomic32_init_nob(&proc->dirty_state, 0); proc->dirty_sys_tasks = NULL; -#endif - erts_smp_atomic32_init_relb(&proc->state, arg->state); + erts_atomic32_init_relb(&proc->state, arg->state); -#ifdef ERTS_SMP RUNQ_SET_RQ(&proc->run_queue, arg->run_queue); erts_proc_lock_init(proc); /* All locks locked */ -#endif } @@ -12346,7 +11765,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). INITIALIZE_LITERAL_PURGE_AREA(litarea); #endif - erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); /* * Check for errors. @@ -12394,9 +11813,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). goto error; } - ASSERT((erts_smp_atomic32_read_nob(&p->state) + ASSERT((erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_ON_HEAP_MSGQ) - || (erts_smp_atomic32_read_nob(&p->state) + || (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); #ifdef SHCOPY_SPAWN @@ -12422,7 +11841,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->min_vheap_size = BIN_VH_MIN_SIZE; MAX_HEAP_SIZE_SET(p, H_MAX_SIZE); MAX_HEAP_SIZE_FLAGS_SET(p, H_MAX_FLAGS); - p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + p->max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs); } p->schedule_count = 0; ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0)); @@ -12448,10 +11867,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #ifdef HIPE hipe_init_process(&p->hipe); -#ifdef ERTS_SMP hipe_init_process_smp(&p->hipe_smp); #endif -#endif p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz); p->old_hend = p->old_htop = p->old_heap = NULL; p->high_water = p->heap; @@ -12520,16 +11937,14 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->msg.save = &p->msg.first; p->msg.saved_last = &p->msg.first; p->msg.len = 0; -#ifdef ERTS_SMP p->msg_inq.first = NULL; p->msg_inq.last = &p->msg_inq.first; p->msg_inq.len = 0; -#endif p->bif_timers = NULL; p->mbuf = NULL; p->msg_frag = NULL; p->mbuf_sz = 0; - erts_smp_atomic_init_nob(&p->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL); p->dictionary = NULL; p->seq_trace_lastcnt = 0; p->seq_trace_clock = 0; @@ -12547,14 +11962,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->last_old_htop = NULL; #endif -#ifdef ERTS_SMP p->trace_msg_q = NULL; p->scheduler_data = NULL; p->suspendee = NIL; p->pending_suspenders = NULL; p->pending_exit.reason = THE_NON_VALUE; p->pending_exit.bp = NULL; -#endif #if !defined(NO_FPE_SIGNALS) || defined(HIPE) p->fp_exception = 0; @@ -12582,8 +11995,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). } if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) { locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); trace_proc_spawn(parent, am_spawn, p->common.id, mod, func, args); if (so->flags & SPO_LINK) trace_proc(parent, locks, parent, am_link, p->common.id); @@ -12595,8 +12008,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) { /* This happens when parent was not traced, but child is */ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); } trace_proc_spawn(p, am_spawned, parent->common.id, mod, func, args); if (so->flags & SPO_LINK) @@ -12635,7 +12048,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). so->mref = mref; } - erts_smp_proc_unlock(p, locks); + erts_proc_unlock(p, locks); res = p->common.id; @@ -12643,7 +12056,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Schedule process for execution. */ - erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); schedule_process(p, state, 0); @@ -12663,7 +12076,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). error: - erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); return res; } @@ -12714,7 +12127,7 @@ void erts_init_empty_process(Process *p) p->mbuf = NULL; p->msg_frag = NULL; p->mbuf_sz = 0; - erts_smp_atomic_init_nob(&p->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL); ERTS_P_MONITORS(p) = NULL; ERTS_P_LINKS(p) = NULL; /* List of links */ p->nodes_monitors = NULL; @@ -12757,23 +12170,18 @@ void erts_init_empty_process(Process *p) #ifdef HIPE hipe_init_process(&p->hipe); -#ifdef ERTS_SMP hipe_init_process_smp(&p->hipe_smp); #endif -#endif INIT_HOLE_CHECK(p); #ifdef DEBUG p->last_old_htop = NULL; #endif -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_init_nob(&p->dirty_state, 0); + erts_atomic32_init_nob(&p->dirty_state, 0); p->dirty_sys_tasks = NULL; -#endif - erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL); + erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL); -#ifdef ERTS_SMP p->scheduler_data = NULL; p->msg_inq.first = NULL; p->msg_inq.last = &p->msg_inq.first; @@ -12783,9 +12191,8 @@ void erts_init_empty_process(Process *p) p->pending_exit.reason = THE_NON_VALUE; p->pending_exit.bp = NULL; erts_proc_lock_init(p); - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0)); -#endif #if !defined(NO_FPE_SIGNALS) || defined(HIPE) p->fp_exception = 0; @@ -12832,14 +12239,12 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->parent == NIL); -#ifdef ERTS_SMP ASSERT(p->msg_inq.first == NULL); ASSERT(p->msg_inq.len == 0); ASSERT(p->suspendee == NIL); ASSERT(p->pending_suspenders == NULL); ASSERT(p->pending_exit.reason == THE_NON_VALUE); ASSERT(p->pending_exit.bp == NULL); -#endif /* Thing that erts_cleanup_empty_process() cleans up */ @@ -12864,9 +12269,7 @@ erts_cleanup_empty_process(Process* p) free_message_buffer(p->mbuf); p->mbuf = NULL; } -#ifdef ERTS_SMP erts_proc_lock_fin(p); -#endif #ifdef DEBUG erts_debug_verify_clean_empty_process(p); #endif @@ -12898,10 +12301,10 @@ delete_process(Process* p) /* Cleanup psd */ - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); if (psd) { - erts_smp_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */ + erts_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */ erts_free(ERTS_ALC_T_PSD, psd); } @@ -12959,7 +12362,7 @@ set_proc_exiting(Process *p, { erts_aint32_t state = in_state, enq_prio = -1; int enqueue; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL); enqueue = change_proc_schedule_state(p, (ERTS_PSFLG_SUSPENDED @@ -12982,22 +12385,6 @@ set_proc_exiting(Process *p, KILL_CATCHES(p); p->i = (BeamInstr *) beam_exit; -#ifndef ERTS_SMP - if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) - && !(state & ERTS_PSFLG_GC)) { - /* - * I non smp case: - * - * Currently executing process might be sent an exit - * signal if it is traced by a port that it also is - * linked to, and the port terminates during the - * trace. In this case we want schedule out the - * process as quickly as possible in order to detect - * the event as fast as possible. - */ - ERTS_VBUMP_ALL_REDS(p); - } -#endif add2runq(enqueue, enq_prio, p, state, NULL); } @@ -13010,9 +12397,9 @@ set_proc_self_exiting(Process *c_p) #endif erts_aint32_t state, enq_prio = -1; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL); - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT(state & (ERTS_PSFLG_RUNNING |ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING @@ -13032,38 +12419,37 @@ set_proc_self_exiting(Process *c_p) return state; } -#ifdef ERTS_SMP void erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) { ErtsProcLocks xlocks; ASSERT(is_value(c_p->pending_exit.reason)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks); - ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE) - & erts_smp_atomic32_read_nob(&c_p->state))); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks); + ERTS_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE) + & erts_atomic32_read_nob(&c_p->state))); /* Ensure that all locks on c_p are locked before proceeding... */ if (locks == ERTS_PROC_LOCKS_ALL) xlocks = 0; else { xlocks = ~locks & ERTS_PROC_LOCKS_ALL; - if (erts_smp_proc_trylock(c_p, xlocks) == EBUSY) { - erts_smp_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + if (erts_proc_trylock(c_p, xlocks) == EBUSY) { + erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } } set_proc_exiting(c_p, - erts_smp_atomic32_read_acqb(&c_p->state), + erts_atomic32_read_acqb(&c_p->state), c_p->pending_exit.reason, c_p->pending_exit.bp); c_p->pending_exit.reason = THE_NON_VALUE; c_p->pending_exit.bp = NULL; if (xlocks) - erts_smp_proc_unlock(c_p, xlocks); + erts_proc_unlock(c_p, xlocks); } static void save_pending_exiter(Process *p, ErtsProcList *plp); @@ -13084,9 +12470,9 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) * pending exit will soon be detected and handled by the * scheduler running the process (at schedule in/out). */ - if (erts_smp_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) { + if (erts_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) { if (erts_proclist_same(plp, p)) { - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING))) { @@ -13094,12 +12480,12 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL); } } - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); } else { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (erts_proclist_same(plp, p)) { - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING))) { @@ -13111,7 +12497,7 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) plp = NULL; } } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } } if (plp) @@ -13126,7 +12512,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp) ErtsSchedulerSleepInfo *ssi; ErtsRunQueue *rq; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); rq = RUNQ_READ_RQ(&p->run_queue); ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); @@ -13134,7 +12520,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp) if (!plp) plp = proclist_create(p); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); erts_proclist_store_last(&rq->procs.pending_exiters, plp); @@ -13142,12 +12528,11 @@ save_pending_exiter(Process *p, ErtsProcList *plp) ssi = rq->scheduler->ssi; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS); } -#endif /* * This function delivers an EXIT message to a process @@ -13283,11 +12668,11 @@ send_exit_signal(Process *c_p, /* current process if and only Uint32 flags /* flags */ ) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); Eterm rsn = reason == am_kill ? am_killed : reason; - ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp)); - ERTS_SMP_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) + ERTS_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp)); + ERTS_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) == ERTS_PROC_LOCKS_XSIG_SEND); ASSERT(reason != THE_NON_VALUE); @@ -13308,7 +12693,7 @@ send_exit_signal(Process *c_p, /* current process if and only if ((state & ERTS_PSFLG_TRAP_EXIT) && (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) { /* have to release the status lock in order to send the exit message */ - erts_smp_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND); *rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; if (have_seqtrace(token) && token_update) seq_trace_update_send(token_update); @@ -13319,7 +12704,6 @@ send_exit_signal(Process *c_p, /* current process if and only return 1; /* Receiver will get a message */ } else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) { -#ifdef ERTS_SMP if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) { ASSERT(!rp->pending_exit.bp); @@ -13329,10 +12713,10 @@ send_exit_signal(Process *c_p, /* current process if and only if (*rp_locks != ERTS_PROC_LOCKS_ALL) { ErtsProcLocks need_locks = (~(*rp_locks) & ERTS_PROC_LOCKS_ALL); - if (erts_smp_proc_trylock(c_p, need_locks) == EBUSY) { - erts_smp_proc_unlock(c_p, + if (erts_proc_trylock(c_p, need_locks) == EBUSY) { + erts_proc_unlock(c_p, *rp_locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } *rp_locks = ERTS_PROC_LOCKS_ALL; } @@ -13346,7 +12730,7 @@ send_exit_signal(Process *c_p, /* current process if and only ErlHeapFragment *bp = NULL; Eterm rsn_cpy; if (need_locks - && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + && erts_proc_trylock(rp, need_locks) == EBUSY) { /* ... but we havn't got all locks on it ... */ save_pending_exiter(rp, NULL); /* @@ -13358,7 +12742,7 @@ send_exit_signal(Process *c_p, /* current process if and only /* ...and we have all locks on it... */ *rp_locks = ERTS_PROC_LOCKS_ALL; - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); if (is_immed(rsn)) rsn_cpy = rsn; @@ -13366,14 +12750,12 @@ send_exit_signal(Process *c_p, /* current process if and only Eterm *hp; ErlOffHeap *ohp; Uint rsn_sz = size_object(rsn); -#ifdef ERTS_DIRTY_SCHEDULERS if (state & ERTS_PSFLG_DIRTY_RUNNING) { bp = new_message_buffer(rsn_sz); ohp = &bp->off_heap; hp = &bp->mem[0]; } else -#endif { hp = HAlloc(rp, rsn_sz); ohp = &rp->off_heap; @@ -13413,12 +12795,8 @@ send_exit_signal(Process *c_p, /* current process if and only * has been scheduled, we may need to add it to a normal run * queue... */ -#ifndef ERTS_DIRTY_SCHEDULERS - (void) erts_smp_atomic32_read_bor_relb(&rp->state, - ERTS_PSFLG_PENDING_EXIT); -#else { - erts_aint32_t a = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t a = erts_atomic32_read_nob(&rp->state); while (1) { erts_aint32_t n, e; int dwork; @@ -13426,7 +12804,7 @@ send_exit_signal(Process *c_p, /* current process if and only n |= ERTS_PSFLG_PENDING_EXIT; dwork = !!(n & ERTS_PSFLGS_DIRTY_WORK); n &= ~ERTS_PSFLGS_DIRTY_WORK; - a = erts_smp_atomic32_cmpxchg_mb(&rp->state, n, e); + a = erts_atomic32_cmpxchg_mb(&rp->state, n, e); if (a == e) { if (dwork) erts_schedule_process(rp, n, *rp_locks); @@ -13434,7 +12812,6 @@ send_exit_signal(Process *c_p, /* current process if and only } } } -#endif } } /* else: @@ -13446,17 +12823,6 @@ send_exit_signal(Process *c_p, /* current process if and only * that the receiver *will* exit; either on the pending * exit or by itself before seeing the pending exit. */ -#else /* !ERTS_SMP */ - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); - if (!(state & ERTS_PSFLG_EXITING)) { - set_proc_exiting(rp, - state, - (is_immed(rsn) || c_p == rp - ? rsn - : copy_object(rsn, rp)), - NULL); - } -#endif return -1; /* Receiver will exit */ } @@ -13504,9 +12870,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) ASSERT(is_node_name_atom(mon->u.pid)); dep = erts_sysname_to_connected_dist_entry(mon->u.pid); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13521,7 +12887,6 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } erts_destroy_monitor(rmon); } - erts_deref_dist_entry(dep); } } else { ASSERT(is_pid(mon->u.pid) || is_port(mon->u.pid)); @@ -13532,7 +12897,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) goto done; } rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon == NULL) { goto done; } @@ -13551,9 +12916,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) dep = external_pid_dist_entry(mon->u.pid); ASSERT(dep != NULL); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13604,15 +12969,15 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } UnUseTmpHeapNoproc(3); /* else: demonitor while we exited, i.e. do nothing... */ - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } else { /* external by pid or name */ ASSERT(is_external_pid(mon->u.pid)); dep = external_pid_dist_entry(mon->u.pid); ASSERT(dep != NULL); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13720,7 +13085,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) /* We didn't exit the process and it is traced */ if (IS_TRACED_FL(rp, F_TRACE_PROCS)) { if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } trace_proc(NULL, 0, rp, am_getting_unlinked, p->common.id); @@ -13728,7 +13093,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) } } ASSERT(rp != p); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } else if (is_external_pid(item)) { @@ -13738,14 +13103,14 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) int code; ErtsDistLinkData dld; erts_remove_dist_link(&dld, p->common.id, item, dep); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0); if (code == ERTS_DSIG_PREP_CONNECTED) { code = erts_dsig_send_exit_tt(&dsd, p->common.id, item, reason, SEQ_TRACE_TOKEN(p)); ASSERT(code == ERTS_DSIG_SEND_OK); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_destroy_dist_link(&dld); } } @@ -13756,12 +13121,11 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) if(dep) { /* dist entries have node links in a separate structure to avoid confusion */ - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rlnk = erts_remove_link(&(dep->node_links), p->common.id); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rlnk) erts_destroy_link(rlnk); - erts_deref_dist_entry(dep); } break; @@ -13781,7 +13145,7 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) ASSERT(suspendee != vc_p); if (smon->active) resume_process(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } erts_destroy_suspend_monitor(smon); } @@ -13810,18 +13174,13 @@ erts_do_exit_process(Process* p, Eterm reason) erts_exit(ERTS_DUMP_EXIT, "System process %T terminated: %T\n", p->common.id, reason); -#ifdef ERTS_SMP - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* By locking all locks (main lock is already locked) when going to exiting state (ERTS_PSFLG_EXITING), it is enough to take any lock when looking up a process (erts_pid2proc()) to prevent the looked up process from exiting until the lock has been released. */ - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); -#endif + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); -#ifndef ERTS_SMP - set_proc_self_exiting(p); -#else if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) { /* Process exited before pending exit was received... */ p->pending_exit.reason = THE_NON_VALUE; @@ -13833,8 +13192,7 @@ erts_do_exit_process(Process* p, Eterm reason) cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); -#endif + ERTS_MSGQ_MV_INQ2PRIVQ(p); if (IS_TRACED(p)) { if (IS_TRACED_FL(p, F_TRACE_CALLS)) @@ -13853,7 +13211,7 @@ erts_do_exit_process(Process* p, Eterm reason) ASSERT(erts_proc_read_refc(p) > 0); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); if (IS_TRACED_FL(p,F_TRACE_PROCS)) trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason); @@ -13875,7 +13233,7 @@ erts_continue_exit_process(Process *p) ErtsMonitor *mon; ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN; Eterm reason = p->fvalue; - DistEntry *dep; + DistEntry *dep = NULL; erts_aint32_t state; int delay_del_proc = 0; @@ -13883,7 +13241,7 @@ erts_continue_exit_process(Process *p) int yield_allowed = 1; #endif - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); ASSERT(ERTS_PROC_IS_EXITING(p)); @@ -13897,7 +13255,6 @@ erts_continue_exit_process(Process *p) p->bif_timers = NULL; } -#ifdef ERTS_SMP if (p->flags & F_SCHDLR_ONLN_WAITQ) abort_sched_onln_chng_waitq(p); @@ -13941,7 +13298,6 @@ erts_continue_exit_process(Process *p) __FILE__, __LINE__, (int) ssr); } } -#endif if (p->flags & F_USING_DB) { if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN)) @@ -13950,24 +13306,20 @@ erts_continue_exit_process(Process *p) } erts_set_gc_state(p, 1); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (state & ERTS_PSFLG_ACTIVE_SYS -#ifdef ERTS_DIRTY_SCHEDULERS || p->dirty_sys_tasks -#endif ) { if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2) goto yield; } #ifdef DEBUG - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); ASSERT(p->sys_task_qs == NULL); ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(p->dirty_sys_tasks == NULL); -#endif - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); #endif if (p->flags & F_USING_DDLL) { @@ -14000,7 +13352,7 @@ erts_continue_exit_process(Process *p) if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT)) trace_sched(p, curr_locks, am_out_exited); - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); curr_locks = ERTS_PROC_LOCKS_ALL; /* @@ -14023,23 +13375,19 @@ erts_continue_exit_process(Process *p) ErtsRunQueue *rq; rq = erts_get_runq_current(erts_proc_sched_data(p)); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); -#ifdef ERTS_SMP ASSERT(p->scheduler_data); ASSERT(p->scheduler_data->current_process == p); ASSERT(p->scheduler_data->free_process == NULL); p->scheduler_data->current_process = NULL; p->scheduler_data->free_process = p; -#else - erts_proc_inc_refc(p); /* Decremented in schedule() */ -#endif /* Time of death! */ erts_ptab_delete_element(&erts_proc, &p->common); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } /* @@ -14051,7 +13399,7 @@ erts_continue_exit_process(Process *p) { /* Inactivate and notify free */ - erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state); int refc_inced = 0; while (1) { n = e = a; @@ -14062,12 +13410,11 @@ erts_continue_exit_process(Process *p) erts_proc_inc_refc(p); refc_inced = 1; } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } -#ifdef ERTS_DIRTY_SCHEDULERS if (a & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { p->flags |= F_DELAYED_DEL_PROC; @@ -14077,18 +13424,20 @@ erts_continue_exit_process(Process *p) * when done with the process... */ } -#endif if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ)) erts_proc_dec_refc(p); } - - dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + dep = ((p->flags & F_DISTRIBUTION) + ? ERTS_PROC_SET_DIST_ENTRY(p, NULL) + : NULL); + + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); if (dep) { - erts_do_net_exits(dep, reason); + erts_do_net_exits(dep, (reason == am_kill) ? am_killed : reason); + erts_deref_dist_entry(dep); } /* @@ -14120,12 +13469,10 @@ erts_continue_exit_process(Process *p) have none here */ } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); -#ifdef ERTS_SMP erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN); -#endif ERTS_TRACER_CLEAR(&ERTS_TRACER(p)); @@ -14140,20 +13487,20 @@ erts_continue_exit_process(Process *p) ASSERT(yield_allowed); #endif - ERTS_SMP_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); + ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); p->i = (BeamInstr *) beam_continue_exit; if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); curr_locks |= ERTS_PROC_LOCK_STATUS; } if (curr_locks != ERTS_PROC_LOCK_MAIN) - erts_smp_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks); + erts_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); BUMP_ALL_REDS(p); } @@ -14189,7 +13536,7 @@ erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p) erts_print(to, to_arg, "CP: %p (", p->cp); print_function_from_pc(to, to_arg, p->cp); erts_print(to, to_arg, ")\n"); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_GC))) { @@ -14270,8 +13617,7 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); -#ifdef ERTS_SMP - flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags); + flg = erts_atomic32_read_dirty(&esdp->ssi->flags); erts_print(to, to_arg, "Scheduler Sleep Info Flags: "); for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) { erts_aint32_t chk = (1 << i); @@ -14298,7 +13644,6 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { } } erts_print(to, to_arg, "\n"); -#endif flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work); erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: "); @@ -14341,12 +13686,12 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { break; } erts_print(to, to_arg, "Length: %d\n", - erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); + erts_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); } erts_print(to, to_arg, "Run Queue Port Length: %d\n", - erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); + erts_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); - flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags); + flg = erts_atomic32_read_dirty(&esdp->run_queue->flags); erts_print(to, to_arg, "Run Queue Flags: "); for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) { erts_aint32_t chk = (1 << i); @@ -14418,7 +13763,7 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { p = esdp->current_process; erts_print(to, to_arg, "Current Process: "); if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) { - flg = erts_smp_atomic32_read_dirty(&p->state); + flg = erts_atomic32_read_dirty(&p->state); erts_print(to, to_arg, "%T\n", p->common.id); erts_print(to, to_arg, "Current Process State: "); @@ -14468,19 +13813,17 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { */ void erts_halt(int code) { - if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress, + if (-1 == erts_atomic32_cmpxchg_acqb(&erts_halt_progress, erts_no_schedulers, -1)) { -#ifdef ERTS_DIRTY_SCHEDULERS ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_CPU_RUNQ, ERTS_RUNQ_FLG_HALTING); ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_IO_RUNQ, ERTS_RUNQ_FLG_HALTING); -#endif erts_halt_code = code; notify_reap_ports_relb(); } } -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int erts_dbg_check_halloc_lock(Process *p) { @@ -14500,3 +13843,24 @@ erts_dbg_check_halloc_lock(Process *p) return 0; } #endif + +void +erts_debug_later_op_foreach(void (*callback)(void*), + void (*func)(void *, ErtsThrPrgrVal, void *), + void *arg) +{ + int six; + if (!erts_thr_progress_is_blocking()) + ERTS_INTERNAL_ERROR("Not blocking thread progress"); + + for (six = 0; six < erts_no_schedulers; six++) { + ErtsSchedulerData *esdp = &erts_aligned_scheduler_data[six].esd; + ErtsThrPrgrLaterOp *lop = esdp->aux_work_data.later_op.first; + + while (lop) { + if (lop->func == callback) + func(arg, lop->later, lop->data); + lop = lop->next; + } + } +} diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 639818c20c..e63da2d9db 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -47,7 +47,6 @@ typedef struct process Process; #include "erl_port.h" #undef ERL_PORT_GET_PORT_TYPE_ONLY__ #include "erl_vm.h" -#include "erl_smp.h" #include "erl_message.h" #include "erl_process_dict.h" #include "erl_node_container_utils.h" @@ -111,22 +110,16 @@ extern int erts_sched_compact_load; extern int erts_sched_balance_util; extern Uint erts_no_schedulers; extern Uint erts_no_total_schedulers; -#ifdef ERTS_DIRTY_SCHEDULERS extern Uint erts_no_dirty_cpu_schedulers; extern Uint erts_no_dirty_io_schedulers; -#endif extern Uint erts_no_run_queues; extern int erts_sched_thread_suggested_stack_size; -#ifdef ERTS_DIRTY_SCHEDULERS extern int erts_dcpu_sched_thread_suggested_stack_size; extern int erts_dio_sched_thread_suggested_stack_size; -#endif #define ERTS_SCHED_THREAD_MIN_STACK_SIZE 20 /* Kilo words */ #define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */ -#ifdef ERTS_SMP #include "erl_bits.h" -#endif /* process priorities */ #define PRIORITY_MAX 0 @@ -224,31 +217,31 @@ extern int erts_dio_sched_thread_suggested_stack_size; ((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO))) #define ERTS_RUNQ_FLGS_INIT(RQ, INIT) \ - erts_smp_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT)) + erts_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT)) #define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bor_relb(&(RQ)->flags, \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_SET_NOB(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bor_nob(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bor_nob(&(RQ)->flags, \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \ (erts_aint32_t) (MSK), \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_band_relb(&(RQ)->flags, \ (erts_aint32_t) ~(FLGS))) #define ERTS_RUNQ_FLGS_UNSET_NOB(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_band_nob(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_band_nob(&(RQ)->flags, \ (erts_aint32_t) ~(FLGS))) #define ERTS_RUNQ_FLGS_GET(RQ) \ - ((Uint32) erts_smp_atomic32_read_acqb(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_acqb(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_GET_NOB(RQ) \ - ((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_nob(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_GET_MB(RQ) \ - ((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_mb(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_READ_BSET(RQ, MSK, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \ (erts_aint32_t) (MSK), \ (erts_aint32_t) (FLGS))) @@ -365,20 +358,16 @@ typedef enum { typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo; -#ifdef ERTS_DIRTY_SCHEDULERS typedef struct { - erts_smp_spinlock_t lock; + erts_spinlock_t lock; ErtsSchedulerSleepInfo *list; } ErtsSchedulerSleepList; -#endif struct ErtsSchedulerSleepInfo_ { -#ifdef ERTS_SMP ErtsSchedulerSleepInfo *next; ErtsSchedulerSleepInfo *prev; - erts_smp_atomic32_t flags; + erts_atomic32_t flags; erts_tse_t *event; -#endif erts_atomic32_t aux_work; }; @@ -422,7 +411,7 @@ typedef struct ErtsSchedulerData_ ErtsSchedulerData; typedef struct ErtsRunQueue_ ErtsRunQueue; typedef struct { - erts_smp_atomic32_t len; + erts_atomic32_t len; erts_aint32_t max_len; int reds; } ErtsRunQueueInfo; @@ -433,7 +422,6 @@ typedef struct { # define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1 #endif -#ifdef ERTS_SMP #undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT #define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT @@ -476,30 +464,25 @@ struct ErtsMigrationPaths_ { ErtsMigrationPath mpath[1]; }; -#endif /* ERTS_SMP */ struct ErtsRunQueue_ { int ix; - erts_smp_mtx_t mtx; - erts_smp_cnd_t cnd; + erts_mtx_t mtx; + erts_cnd_t cnd; -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP ErtsSchedulerSleepList sleepers; -#endif -#endif ErtsSchedulerData *scheduler; int waiting; /* < 0 in sys schedule; > 0 on cnd variable */ int woken; - erts_smp_atomic32_t flags; + erts_atomic32_t flags; int check_balance_reds; int full_reds_history_sum; int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE]; int out_of_work_count; erts_aint32_t max_len; - erts_smp_atomic32_t len; + erts_atomic32_t len; int wakeup_other; int wakeup_other_reds; @@ -518,7 +501,7 @@ struct ErtsRunQueue_ { struct { ErtsMiscOpList *start; ErtsMiscOpList *end; - erts_smp_atomic_t evac_runq; + erts_atomic_t evac_runq; } misc; struct { @@ -531,9 +514,7 @@ struct ErtsRunQueue_ { #endif }; -#ifdef ERTS_SMP extern long erts_runq_supervision_interval; -#endif typedef union { ErtsRunQueue runq; @@ -581,17 +562,12 @@ typedef struct { int sched_id; ErtsSchedulerData *esdp; ErtsSchedulerSleepInfo *ssi; -#ifdef ERTS_SMP ErtsThrPrgrVal current_thr_prgr; ErtsThrPrgrVal latest_wakeup; -#endif struct { int ix; -#ifdef ERTS_SMP ErtsThrPrgrVal thr_prgr; -#endif } misc; -#ifdef ERTS_SMP struct { ErtsThrPrgrVal thr_prgr; } dd; @@ -604,24 +580,17 @@ typedef struct { ErtsThrPrgrLaterOp *first; ErtsThrPrgrLaterOp *last; } later_op; -#endif -#ifdef ERTS_USE_ASYNC_READY_Q struct { -#ifdef ERTS_SMP int need_thr_prgr; ErtsThrPrgrVal thr_prgr; -#endif void *queue; } async_ready; -#endif -#ifdef ERTS_SMP struct { Uint64 next; int *sched2jix; int jix; ErtsDelayedAuxWorkWakeupJob *job; } delayed_wakeup; -#endif struct { ErtsEtsAllYieldData ets_all; /* Other yielding operations... */ @@ -639,13 +608,11 @@ typedef struct { (&(ESDP)->aux_work_data.yield.NAME) void erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp); -#ifdef ERTS_DIRTY_SCHEDULERS typedef enum { ERTS_DIRTY_CPU_SCHEDULER, ERTS_DIRTY_IO_SCHEDULER } ErtsDirtySchedulerType; -#endif struct ErtsSchedulerData_ { /* @@ -659,21 +626,17 @@ struct ErtsSchedulerData_ { ErtsTimerWheel *timer_wheel; ErtsNextTimeoutRef next_tmo_ref; ErtsHLTimerService *timer_service; -#ifdef ERTS_SMP ethr_tid tid; /* Thread id */ struct erl_bits_state erl_bits_state; /* erl_bits.c state */ void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */ Process *free_process; ErtsThrPrgrData thr_progress_data; -#endif ErtsSchedulerSleepInfo *ssi; Process *current_process; ErtsSchedType type; Uint no; /* Scheduler number for normal schedulers */ -#ifdef ERTS_DIRTY_SCHEDULERS Uint dirty_no; /* Scheduler number for dirty schedulers */ Process *dirty_shadow_process; -#endif Port *current_port; ErtsRunQueue *run_queue; int virtual_reds; @@ -712,25 +675,23 @@ typedef union { } ErtsAlignedSchedulerData; extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data; -#ifdef ERTS_DIRTY_SCHEDULERS extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data; extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data; -#endif -#ifndef ERTS_SMP -extern ErtsSchedulerData *erts_scheduler_data; -#endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) -int erts_smp_lc_runq_is_locked(ErtsRunQueue *); +#if defined(ERTS_ENABLE_LOCK_CHECK) +int erts_lc_runq_is_locked(ErtsRunQueue *); #endif +void +erts_debug_later_op_foreach(void (*callback)(void*), + void (*func)(void *, ErtsThrPrgrVal, void *), + void *arg); + #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS -#ifdef ERTS_SMP void erts_empty_runq(ErtsRunQueue *rq); void erts_non_empty_runq(ErtsRunQueue *rq); -#endif /* @@ -738,86 +699,84 @@ void erts_non_empty_runq(ErtsRunQueue *rq); * other threads peek at values without run queue lock. */ -ERTS_GLB_INLINE void erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); -ERTS_GLB_INLINE void erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); -ERTS_GLB_INLINE void erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi); +ERTS_GLB_INLINE void erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); +ERTS_GLB_INLINE void erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); +ERTS_GLB_INLINE void erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void -erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) +erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); -#ifdef ERTS_SMP if (len == 0) erts_non_empty_runq(rq); -#endif len++; if (rq->max_len < len) rq->max_len = len; ASSERT(len > 0); - erts_smp_atomic32_set_nob(&rq->len, len); + erts_atomic32_set_nob(&rq->len, len); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); ASSERT(len >= 0); if (len == 0) { - ASSERT((erts_smp_atomic32_read_nob(&rq->flags) + ASSERT((erts_atomic32_read_nob(&rq->flags) & ((erts_aint32_t) (1 << prio))) == 0); - erts_smp_atomic32_read_bor_nob(&rq->flags, + erts_atomic32_read_bor_nob(&rq->flags, (erts_aint32_t) (1 << prio)); } len++; if (rqi->max_len < len) rqi->max_len = len; - erts_smp_atomic32_set_relb(&rqi->len, len); + erts_atomic32_set_relb(&rqi->len, len); } ERTS_GLB_INLINE void -erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) +erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); len--; ASSERT(len >= 0); - erts_smp_atomic32_set_nob(&rq->len, len); + erts_atomic32_set_nob(&rq->len, len); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); len--; ASSERT(len >= 0); if (len == 0) { - ASSERT((erts_smp_atomic32_read_nob(&rq->flags) + ASSERT((erts_atomic32_read_nob(&rq->flags) & ((erts_aint32_t) (1 << prio)))); - erts_smp_atomic32_read_band_nob(&rq->flags, + erts_atomic32_read_band_nob(&rq->flags, ~((erts_aint32_t) (1 << prio))); } - erts_smp_atomic32_set_relb(&rqi->len, len); + erts_atomic32_set_relb(&rqi->len, len); } ERTS_GLB_INLINE void -erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) +erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); ASSERT(rqi->max_len >= len); rqi->max_len = len; } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#define RUNQ_READ_LEN(X) erts_smp_atomic32_read_nob((X)) +#define RUNQ_READ_LEN(X) erts_atomic32_read_nob((X)) #endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */ @@ -835,14 +794,15 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) #define ERTS_PSD_NIF_TRAP_EXPORT 5 #define ERTS_PSD_ETS_OWNED_TABLES 6 #define ERTS_PSD_ETS_FIXED_TABLES 7 -#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 8 +#define ERTS_PSD_DIST_ENTRY 8 +#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 9 /* keep last... */ -#define ERTS_PSD_SIZE 9 +#define ERTS_PSD_SIZE 10 #if !defined(HIPE) # undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF # undef ERTS_PSD_SIZE -# define ERTS_PSD_SIZE 8 +# define ERTS_PSD_SIZE 9 #endif typedef struct { @@ -876,6 +836,9 @@ typedef struct { #define ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS ERTS_PROC_LOCK_MAIN #define ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN + typedef struct { ErtsProcLocks get_locks; ErtsProcLocks set_locks; @@ -890,7 +853,7 @@ extern ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE]; #define ERTS_SCHED_STAT_MODIFY_CLEAR 3 typedef struct { - erts_smp_spinlock_t lock; + erts_spinlock_t lock; int enabled; struct { Eterm name; @@ -911,7 +874,6 @@ typedef struct { typedef struct ErtsProcSysTask_ ErtsProcSysTask; typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs; -#ifdef ERTS_SMP typedef struct ErtsPendingSuspend_ ErtsPendingSuspend; struct ErtsPendingSuspend_ { @@ -924,7 +886,6 @@ struct ErtsPendingSuspend_ { Eterm pid); }; -#endif /* Defines to ease the change of memory architecture */ @@ -1076,23 +1037,18 @@ struct process { ErlHeapFragment* live_hf_end; ErtsMessage *msg_frag; /* Pointer to message fragment list */ Uint mbuf_sz; /* Total size of heap fragments and message fragments */ - erts_smp_atomic_t psd; /* Rarely used process specific data */ + erts_atomic_t psd; /* Rarely used process specific data */ Uint64 bin_vheap_sz; /* Virtual heap block size for binaries */ Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */ Uint64 bin_old_vheap; /* Virtual old heap size for binaries */ ErtsProcSysTaskQs *sys_task_qs; -#ifdef ERTS_DIRTY_SCHEDULERS ErtsProcSysTask *dirty_sys_tasks; -#endif - erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */ -#endif + erts_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ + erts_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */ -#ifdef ERTS_SMP ErlMessageInQueue msg_inq; ErlTraceMessageQueue *trace_msg_q; ErtsPendExit pending_exit; @@ -1100,11 +1056,10 @@ struct process { ErtsSchedulerData *scheduler_data; Eterm suspendee; ErtsPendingSuspend *pending_suspenders; - erts_smp_atomic_t run_queue; + erts_atomic_t run_queue; #ifdef HIPE struct hipe_process_state_smp hipe_smp; #endif -#endif #ifdef CHECK_FOR_HOLES Eterm* last_htop; /* No need to scan the heap below this point. */ @@ -1249,7 +1204,6 @@ void erts_check_for_holes(Process* p); #define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \ (((PSFLGS) >> ERTS_PSFLGS_PRQ_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) -#ifdef ERTS_DIRTY_SCHEDULERS /* * Flags in the dirty_state field. @@ -1276,7 +1230,6 @@ void erts_check_for_holes(Process* p); | ERTS_PDSFLG_IN_CPU_PRQ_HIGH \ | ERTS_PDSFLG_IN_CPU_PRQ_NORMAL\ | ERTS_PDSFLG_IN_CPU_PRQ_LOW) -#endif /* @@ -1367,7 +1320,7 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra); Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz); #endif -extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx; +extern erts_rwmtx_t erts_cpu_bind_rwmtx; /* If any of the erts_system_monitor_* variables are set (enabled), ** erts_system_monitor must be != NIL, to allow testing on just ** the erts_system_monitor_* variables. @@ -1541,20 +1494,14 @@ extern int erts_system_profile_ts_type; } \ } while (0) -#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP) #define ERTS_NUM_DIRTY_CPU_RUNQS 1 #define ERTS_NUM_DIRTY_IO_RUNQS 1 -#else -#define ERTS_NUM_DIRTY_CPU_RUNQS 0 -#define ERTS_NUM_DIRTY_IO_RUNQS 0 -#endif #define ERTS_NUM_DIRTY_RUNQS (ERTS_NUM_DIRTY_CPU_RUNQS+ERTS_NUM_DIRTY_IO_RUNQS) #define ERTS_RUNQ_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \ &erts_aligned_run_queues[(IX)].runq) -#ifdef ERTS_DIRTY_SCHEDULERS #define ERTS_RUNQ_IX_IS_DIRTY(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \ (erts_no_run_queues <= (IX))) @@ -1565,13 +1512,9 @@ extern int erts_system_profile_ts_type; #define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[erts_no_run_queues+1].runq) #define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ) == ERTS_DIRTY_CPU_RUNQ) #define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ) == ERTS_DIRTY_IO_RUNQ) -#else -#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0 -#endif #define ERTS_SCHEDULER_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \ &erts_aligned_scheduler_data[(IX)].esd) -#ifdef ERTS_DIRTY_SCHEDULERS #define ERTS_DIRTY_CPU_SCHEDULER_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_dirty_cpu_schedulers), \ &erts_aligned_dirty_cpu_scheduler_data[(IX)].esd) @@ -1584,24 +1527,14 @@ extern int erts_system_profile_ts_type; ((ESDP)->type == ERTS_SCHED_DIRTY_CPU) #define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \ ((ESDP)->type == ERTS_SCHED_DIRTY_IO) -#else /* !ERTS_DIRTY_SCHEDULERS */ -#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0 -#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0 -#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0 -#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0 -#endif void erts_pre_init_process(void); void erts_late_init_process(void); void erts_early_init_scheduling(int); void erts_init_scheduling(int, int -#ifdef ERTS_DIRTY_SCHEDULERS , int, int, int -#endif ); -#ifdef ERTS_DIRTY_SCHEDULERS void erts_execute_dirty_system_task(Process *c_p); -#endif int erts_set_gc_state(Process *c_p, int enable); Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable, int dirty_cpu, int want_dirty_io); @@ -1799,14 +1732,11 @@ void erts_schedule_ets_free_fixation(Eterm pid, struct db_fixation*); void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc); int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int erts_dbg_check_halloc_lock(Process *p); #endif -#if defined(ERTS_SMP) || defined(ERTS_DIRTY_SCHEDULERS) void erts_schedulers_state(Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *); -#endif -#ifdef ERTS_SMP ErtsSchedSuspendResult erts_set_schedulers_online(Process *p, ErtsProcLocks plocks, @@ -1821,14 +1751,9 @@ void erts_start_schedulers(void); void erts_alloc_notify_delayed_dealloc(int); void erts_alloc_ensure_handle_delayed_dealloc_call(int); void erts_notify_canceled_timer(ErtsSchedulerData *, int); -#endif -#if ERTS_USE_ASYNC_READY_Q void erts_notify_check_async_ready_queue(void *); -#endif -#ifdef ERTS_SMP void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later); void erts_notify_finish_breakpointing(Process* p); -#endif void erts_schedule_misc_aux_work(int sched_id, void (*func)(void *), void *arg); @@ -1897,13 +1822,9 @@ int erts_send_exit_signal(Process *, Eterm, Process *, Uint32); -#ifdef ERTS_SMP void erts_handle_pending_exit(Process *, ErtsProcLocks); #define ERTS_PROC_PENDING_EXIT(P) \ - (ERTS_PSFLG_PENDING_EXIT & erts_smp_atomic32_read_acqb(&(P)->state)) -#else -#define ERTS_PROC_PENDING_EXIT(P) 0 -#endif + (ERTS_PSFLG_PENDING_EXIT & erts_atomic32_read_acqb(&(P)->state)) void erts_deep_process_dump(fmtfn_t, void *); @@ -1933,19 +1854,7 @@ do { \ # define ERTS_VERIFY_UNUSED_TEMP_ALLOC(ESDP) #endif -#if defined(ERTS_SMP) || defined(USE_THREADS) ErtsSchedulerData *erts_get_scheduler_data(void); -#else -ERTS_GLB_INLINE ErtsSchedulerData *erts_get_scheduler_data(void); -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE -ErtsSchedulerData *erts_get_scheduler_data(void) -{ - return erts_scheduler_data; -} -#endif -#endif void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks); @@ -1957,7 +1866,7 @@ ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks) { /* No barrier needed, due to msg lock */ - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) erts_schedule_process(p, state, locks); } @@ -1967,7 +1876,7 @@ erts_schedule_dirty_sys_execution(Process *c_p) { erts_aint32_t a, n, e; - a = erts_smp_atomic32_read_nob(&c_p->state); + a = erts_atomic32_read_nob(&c_p->state); /* * Only a currently executing process schedules @@ -1983,7 +1892,7 @@ erts_schedule_dirty_sys_execution(Process *c_p) | ERTS_PSFLG_PENDING_EXIT))) { e = a; n = a | ERTS_PSFLG_DIRTY_ACTIVE_SYS; - a = erts_smp_atomic32_cmpxchg_mb(&c_p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&c_p->state, n, e); if (a == e) break; /* dirty-active-sys set */ } @@ -1991,21 +1900,21 @@ erts_schedule_dirty_sys_execution(Process *c_p) #endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) #define ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__ #include "erl_process_lock.h" #undef ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__ -#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) \ +#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L) \ do { \ if ((L)) \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \ + ERTS_LC_ASSERT(erts_lc_runq_is_locked((RQ))); \ else \ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked((RQ))); \ + ERTS_LC_ASSERT(!erts_lc_runq_is_locked((RQ))); \ } while (0) #else -#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) +#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L) #endif void *erts_psd_set_init(Process *p, int ix, void *data); @@ -2021,22 +1930,22 @@ ERTS_GLB_INLINE void * erts_psd_get(Process *p, int ix) { ErtsPSD *psd; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p); if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].get_locks) - ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking()); else { locks &= erts_psd_required_locks[ix].get_locks; - ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks + ERTS_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks || erts_thr_progress_is_blocking()); } #endif - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); ASSERT(0 <= ix && ix < ERTS_PSD_SIZE); if (!psd) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; return psd->data[ix]; } @@ -2044,30 +1953,28 @@ ERTS_GLB_INLINE void * erts_psd_set(Process *p, int ix, void *data) { ErtsPSD *psd; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p); - erts_aint32_t state = state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = state = erts_atomic32_read_nob(&p->state); if (!(state & ERTS_PSFLG_FREE)) { if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks) - ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking()); else { locks &= erts_psd_required_locks[ix].set_locks; - ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks + ERTS_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks || erts_thr_progress_is_blocking()); } } #endif - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); ASSERT(0 <= ix && ix < ERTS_PSD_SIZE); if (psd) { void *old; -#ifdef ERTS_SMP #ifdef ETHR_ORDERED_READ_DEPEND ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore); #else ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore); #endif -#endif old = psd->data[ix]; psd->data[ix] = data; return old; @@ -2104,6 +2011,11 @@ erts_psd_set(Process *p, int ix, void *data) #define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, NTE) \ erts_psd_set((P), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE)) +#define ERTS_PROC_GET_DIST_ENTRY(P) \ + ((DistEntry *) erts_psd_get((P), ERTS_PSD_DIST_ENTRY)) +#define ERTS_PROC_SET_DIST_ENTRY(P, DE) \ + ((DistEntry *) erts_psd_set((P), ERTS_PSD_DIST_ENTRY, (void *) (DE))) + #ifdef HIPE #define ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(P) \ ((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF)) @@ -2147,7 +2059,6 @@ erts_proc_set_error_handler(Process *p, Eterm handler) #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS -#ifdef ERTS_SMP #include "erl_thr_progress.h" @@ -2249,7 +2160,6 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio) #endif -#endif #endif @@ -2260,13 +2170,13 @@ ERTS_GLB_INLINE Eterm erts_get_current_pid(void); ERTS_GLB_INLINE Uint erts_get_scheduler_id(void); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp); -ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq); -ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq); -ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq); -ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq); -ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq); -ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); -ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); +ERTS_GLB_INLINE void erts_runq_lock(ErtsRunQueue *rq); +ERTS_GLB_INLINE int erts_runq_trylock(ErtsRunQueue *rq); +ERTS_GLB_INLINE void erts_runq_unlock(ErtsRunQueue *rq); +ERTS_GLB_INLINE void erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq); +ERTS_GLB_INLINE void erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq); +ERTS_GLB_INLINE void erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); +ERTS_GLB_INLINE void erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); ERTS_GLB_INLINE ErtsMessage *erts_alloc_message_heap_state(Process *pp, erts_aint32_t *psp, @@ -2291,11 +2201,7 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p) { ErtsSchedulerData *esdp; ASSERT(c_p); -#if !defined(ERTS_SMP) - esdp = erts_get_scheduler_data(); -#else esdp = c_p->scheduler_data; -# if defined(ERTS_DIRTY_SCHEDULERS) if (esdp) { ASSERT(esdp == erts_get_scheduler_data()); ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); @@ -2305,8 +2211,6 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p) ASSERT(esdp); ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); } -# endif -#endif ASSERT(esdp); return esdp; } @@ -2337,124 +2241,94 @@ Eterm erts_get_current_pid(void) ERTS_GLB_INLINE Uint erts_get_scheduler_id(void) { -#ifdef ERTS_SMP ErtsSchedulerData *esdp = erts_get_scheduler_data(); -#ifdef ERTS_DIRTY_SCHEDULERS if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp)) return 0; else -#endif return esdp ? esdp->no : (Uint) 0; -#else - return erts_get_scheduler_data() ? (Uint) 1 : (Uint) 0; -#endif } ERTS_GLB_INLINE ErtsRunQueue * erts_get_runq_proc(Process *p) { -#ifdef ERTS_SMP ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue)); return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue); -#else - return ERTS_RUNQ_IX(0); -#endif } ERTS_GLB_INLINE ErtsRunQueue * erts_get_runq_current(ErtsSchedulerData *esdp) { ASSERT(!esdp || esdp == erts_get_scheduler_data()); -#ifdef ERTS_SMP if (!esdp) esdp = erts_get_scheduler_data(); return esdp->run_queue; -#else - return ERTS_RUNQ_IX(0); -#endif } ERTS_GLB_INLINE void -erts_smp_runq_lock(ErtsRunQueue *rq) +erts_runq_lock(ErtsRunQueue *rq) { -#ifdef ERTS_SMP - erts_smp_mtx_lock(&rq->mtx); -#endif + erts_mtx_lock(&rq->mtx); } ERTS_GLB_INLINE int -erts_smp_runq_trylock(ErtsRunQueue *rq) +erts_runq_trylock(ErtsRunQueue *rq) { -#ifdef ERTS_SMP - return erts_smp_mtx_trylock(&rq->mtx); -#else - return 0; -#endif + return erts_mtx_trylock(&rq->mtx); } ERTS_GLB_INLINE void -erts_smp_runq_unlock(ErtsRunQueue *rq) +erts_runq_unlock(ErtsRunQueue *rq) { -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&rq->mtx); -#endif + erts_mtx_unlock(&rq->mtx); } ERTS_GLB_INLINE void -erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) +erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&rq->mtx)); if (xrq != rq) { - if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) { + if (erts_mtx_trylock(&xrq->mtx) == EBUSY) { if (rq < xrq) - erts_smp_mtx_lock(&xrq->mtx); + erts_mtx_lock(&xrq->mtx); else { - erts_smp_mtx_unlock(&rq->mtx); - erts_smp_mtx_lock(&xrq->mtx); - erts_smp_mtx_lock(&rq->mtx); + erts_mtx_unlock(&rq->mtx); + erts_mtx_lock(&xrq->mtx); + erts_mtx_lock(&rq->mtx); } } } -#endif } ERTS_GLB_INLINE void -erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq) +erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { -#ifdef ERTS_SMP if (xrq != rq) - erts_smp_mtx_unlock(&xrq->mtx); -#endif + erts_mtx_unlock(&xrq->mtx); } ERTS_GLB_INLINE void -erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) +erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) { -#ifdef ERTS_SMP ASSERT(rq1 && rq2); if (rq1 == rq2) - erts_smp_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq1->mtx); else if (rq1 < rq2) { - erts_smp_mtx_lock(&rq1->mtx); - erts_smp_mtx_lock(&rq2->mtx); + erts_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq2->mtx); } else { - erts_smp_mtx_lock(&rq2->mtx); - erts_smp_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq2->mtx); + erts_mtx_lock(&rq1->mtx); } -#endif } ERTS_GLB_INLINE void -erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) +erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) { -#ifdef ERTS_SMP ASSERT(rq1 && rq2); - erts_smp_mtx_unlock(&rq1->mtx); + erts_mtx_unlock(&rq1->mtx); if (rq1 != rq2) - erts_smp_mtx_unlock(&rq2->mtx); -#endif + erts_mtx_unlock(&rq2->mtx); } ERTS_GLB_INLINE ErtsMessage * @@ -2486,7 +2360,7 @@ erts_alloc_message_heap(Process *pp, Eterm **hpp, ErlOffHeap **ohpp) { - erts_aint32_t state = pp ? erts_smp_atomic32_read_nob(&pp->state) : 0; + erts_aint32_t state = pp ? erts_atomic32_read_nob(&pp->state) : 0; return erts_alloc_message_heap_state(pp, &state, plp, sz, hpp, ohpp); } @@ -2500,7 +2374,7 @@ erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp, *msgpp = erts_shrink_message(*msgpp, used_hp - start_hp, brefs, brefs_size); else if (!(*msgpp)->data.attached) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(pp)); HRelease(pp, end_hp, used_hp); } @@ -2562,7 +2436,6 @@ ERTS_TIME2REDS_IMPL__(ErtsMonotonicTime start, ErtsMonotonicTime end) } #endif -#ifdef ERTS_SMP Process *erts_pid2proc_not_running(Process *, ErtsProcLocks, @@ -2575,35 +2448,26 @@ Process *erts_pid2proc_nropt(Process *c_p, extern int erts_disable_proc_not_running_opt; #ifdef DEBUG -#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) \ +#define ERTS_ASSERT_IS_NOT_EXITING(P) \ do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0) #else -#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) +#define ERTS_ASSERT_IS_NOT_EXITING(P) #endif -#else /* !ERTS_SMP */ - -#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) - -#define erts_pid2proc_not_running erts_pid2proc -#define erts_pid2proc_nropt erts_pid2proc - -#endif #define ERTS_PROC_IS_EXITING(P) \ - (ERTS_PSFLG_EXITING & erts_smp_atomic32_read_acqb(&(P)->state)) + (ERTS_PSFLG_EXITING & erts_atomic32_read_acqb(&(P)->state)) /* Minimum NUMBER of processes for a small system to start */ #define ERTS_MIN_PROCESSES 1024 -#if defined(ERTS_SMP) && ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS +#if ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS #undef ERTS_MIN_PROCESSES #define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS #endif -void erts_smp_notify_inc_runq(ErtsRunQueue *runq); +void erts_notify_inc_runq(ErtsRunQueue *runq); -#ifdef ERTS_SMP void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t); ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi); @@ -2614,9 +2478,9 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) { erts_aint32_t flags; ERTS_THR_MEMORY_BARRIER; - flags = erts_smp_atomic32_read_nob(&ssi->flags); + flags = erts_atomic32_read_nob(&ssi->flags); if (flags & ERTS_SSI_FLG_SLEEPING) { - flags = erts_smp_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP); + flags = erts_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP); erts_sched_finish_poke(ssi, flags); } } @@ -2624,7 +2488,6 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#endif /* #ifdef ERTS_SMP */ #include "erl_process_lock.h" @@ -2634,5 +2497,5 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) void erts_halt(int code); -extern erts_smp_atomic32_t erts_halt_progress; +extern erts_atomic32_t erts_halt_progress; extern int erts_halt_code; diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index b826e6c5d3..5a2c262ff1 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -69,7 +69,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg) for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + erts_aint32_t state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC))) dump_process_info(to, to_arg, p); } @@ -85,7 +85,7 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) { size += sizeof(Process); if (incl_msg_inq) - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size); erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size); @@ -106,7 +106,7 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) { size += p->arity * sizeof(p->arg_reg[0]); } - if (erts_smp_atomic_read_nob(&p->psd) != (erts_aint_t) NULL) + if (erts_atomic_read_nob(&p->psd) != (erts_aint_t) NULL) size += sizeof(ErtsPSD); scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p); @@ -126,7 +126,7 @@ dump_process_info(fmtfn_t to, void *to_arg, Process *p) ErtsMessage* mp; int yreg = -1; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) { erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id); diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index ff124d5ba7..431867f27e 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -56,9 +56,9 @@ * Note that wait flags may be read without the pix lock, but * it is important that wait flags only are modified when the pix * lock is held. - * This implementation assumes that erts_smp_atomic_or_retold() + * This implementation assumes that erts_atomic_or_retold() * provides necessary memorybarriers for a lock operation, and that - * erts_smp_atomic_and_retold() provides necessary memorybarriers + * erts_atomic_and_retold() provides necessary memorybarriers * for an unlock operation. */ @@ -69,7 +69,6 @@ #include "erl_process.h" #include "erl_thr_progress.h" -#ifdef ERTS_SMP #if ERTS_PROC_LOCK_OWN_IMPL @@ -464,7 +463,7 @@ wait_for_locks(Process *p, } /* - * erts_proc_lock_failed() is called when erts_smp_proc_lock() + * erts_proc_lock_failed() is called when erts_proc_lock() * wasn't able to lock all locks. We may need to transfer locks * to waiters and wait for our turn on locks. * @@ -543,7 +542,7 @@ erts_proc_lock_failed(Process *p, } /* - * erts_proc_unlock_failed() is called when erts_smp_proc_unlock() + * erts_proc_unlock_failed() is called when erts_proc_unlock() * wasn't able to unlock all locks. We may need to transfer locks * to waiters. */ @@ -709,7 +708,7 @@ proc_safelock(int is_managed, refc1 = 1; erts_proc_inc_refc(p1); } - erts_smp_proc_unlock(p1, unlock_locks); + erts_proc_unlock(p1, unlock_locks); } unlock_locks = unlock_mask & have_locks2; if (unlock_locks) { @@ -719,7 +718,7 @@ proc_safelock(int is_managed, refc2 = 1; erts_proc_inc_refc(p2); } - erts_smp_proc_unlock(p2, unlock_locks); + erts_proc_unlock(p2, unlock_locks); } } @@ -750,7 +749,7 @@ proc_safelock(int is_managed, if (need_locks2 & lock) lock_no--; locks = need_locks1 & lock_mask; - erts_smp_proc_lock(p1, locks); + erts_proc_lock(p1, locks); have_locks1 |= locks; need_locks1 &= ~locks; } @@ -761,7 +760,7 @@ proc_safelock(int is_managed, lock = (1 << ++lock_no); } locks = need_locks2 & lock_mask; - erts_smp_proc_lock(p2, locks); + erts_proc_lock(p2, locks); have_locks2 |= locks; need_locks2 &= ~locks; } @@ -898,7 +897,7 @@ erts_pid2proc_opt(Process *c_p, #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ - busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); + busy = (int) erts_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); @@ -976,7 +975,7 @@ erts_pid2proc_opt(Process *c_p, : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { - erts_smp_proc_unlock(proc, need_locks); + erts_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_INC_REFC) dec_refc_proc = proc; @@ -1002,11 +1001,9 @@ static ERTS_INLINE Process *proc_lookup_inc_refc(Eterm pid, int allow_exit) { Process *proc; -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl; dhndl = erts_thr_progress_unmanaged_delay(); -#endif proc = erts_proc_lookup_raw(pid); if (proc) { @@ -1016,9 +1013,7 @@ Process *proc_lookup_inc_refc(Eterm pid, int allow_exit) erts_proc_inc_refc(proc); } -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif return proc; } @@ -1042,7 +1037,7 @@ erts_proc_lock_init(Process *p) #if ERTS_PROC_LOCK_OWN_IMPL /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL - erts_smp_atomic32_init_nob(&p->lock.flags, + erts_atomic32_init_nob(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL); #else p->lock.flags = ERTS_PROC_LOCKS_ALL; @@ -1093,7 +1088,7 @@ erts_proc_lock_init(Process *p) #endif #ifdef ERTS_PROC_LOCK_DEBUG for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) - erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); + erts_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); #endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_proc_lock_init(p); @@ -1113,7 +1108,7 @@ erts_proc_lock_fin(Process *p) erts_mtx_destroy(&p->lock.status); erts_mtx_destroy(&p->lock.trace); #endif -#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_destroy(p); #endif } @@ -1785,4 +1780,3 @@ check_queue(erts_proc_lock_t *lck) } #endif -#endif /* ERTS_SMP */ diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 023ba4d4ae..9d5691d3c4 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -36,7 +36,7 @@ #include "erl_lock_count.h" #endif -#include "erl_smp.h" +#include "erl_threads.h" #if defined(VALGRIND) || defined(ETHR_DISABLE_NATIVE_IMPLS) # define ERTS_PROC_LOCK_OWN_IMPL 0 @@ -73,7 +73,7 @@ typedef erts_aint32_t ErtsProcLocks; typedef struct erts_proc_lock_t_ { #if ERTS_PROC_LOCK_OWN_IMPL #if ERTS_PROC_LOCK_ATOMIC_IMPL - erts_smp_atomic32_t flags; + erts_atomic32_t flags; #else ErtsProcLocks flags; #endif @@ -103,7 +103,7 @@ typedef struct erts_proc_lock_t_ { # error "no implementation" #endif #ifdef ERTS_PROC_LOCK_DEBUG - erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1]; + erts_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1]; #endif } erts_proc_lock_t; @@ -243,11 +243,11 @@ typedef struct erts_proc_lock_t_ { /* Lock counter implemetation */ #ifdef ERTS_ENABLE_LOCK_POSITION -#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__) -#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__) +#define erts_proc_lock__(P,I,L) erts_proc_lock_x__(P,I,L,__FILE__,__LINE__) +#define erts_proc_lock(P,L) erts_proc_lock_x(P,L,__FILE__,__LINE__) #endif -#if defined(ERTS_SMP) && defined (ERTS_ENABLE_LOCK_COUNT) +#if defined (ERTS_ENABLE_LOCK_COUNT) void erts_lcnt_proc_lock_init(Process *p); void erts_lcnt_proc_lock_destroy(Process *p); @@ -421,10 +421,10 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res /* --- Process lock checking ----------------------------------------------- */ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) -#define ERTS_SMP_CHK_NO_PROC_LOCKS \ +#if defined(ERTS_ENABLE_LOCK_CHECK) +#define ERTS_CHK_NO_PROC_LOCKS \ erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__) -#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \ +#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \ erts_proc_lc_chk_only_proc_main((P)) void erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line); @@ -443,8 +443,8 @@ void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char* file, unsigned int line); void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks); #else -#define ERTS_SMP_CHK_NO_PROC_LOCKS -#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) +#define ERTS_CHK_NO_PROC_LOCKS +#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) #endif #endif /* #ifndef ERTS_PROC_LOCK_LOCK_CHECK__ */ @@ -455,7 +455,6 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks); #ifndef ERTS_PROCESS_LOCK_H__ #define ERTS_PROCESS_LOCK_H__ -#ifdef ERTS_SMP typedef struct { union { @@ -472,21 +471,21 @@ typedef struct { #if ERTS_PROC_LOCK_ATOMIC_IMPL #define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \ - ((ErtsProcLocks) erts_smp_atomic32_read_band_nob(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_read_band_nob(&(L)->flags, \ (erts_aint32_t) (MSK))) #define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) \ - ((ErtsProcLocks) erts_smp_atomic32_read_bor_acqb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_read_bor_acqb(&(L)->flags, \ (erts_aint32_t) (MSK))) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \ - ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_cmpxchg_acqb(&(L)->flags, \ (erts_aint32_t) (NEW), \ (erts_aint32_t) (EXPECTED))) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \ - ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_relb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_cmpxchg_relb(&(L)->flags, \ (erts_aint32_t) (NEW), \ (erts_aint32_t) (EXPECTED))) #define ERTS_PROC_LOCK_FLGS_READ_(L) \ - ((ErtsProcLocks) erts_smp_atomic32_read_nob(&(L)->flags)) + ((ErtsProcLocks) erts_atomic32_read_nob(&(L)->flags)) #else /* no opt atomic ops */ @@ -557,22 +556,22 @@ ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *); ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *); ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *); -ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p, +ERTS_GLB_INLINE ErtsProcLocks erts_proc_raw_trylock__(Process *p, ErtsProcLocks locks); #ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *, +ERTS_GLB_INLINE void erts_proc_lock_x__(Process *, erts_pix_lock_t *, ErtsProcLocks, char *file, unsigned int line); #else -ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *, +ERTS_GLB_INLINE void erts_proc_lock__(Process *, erts_pix_lock_t *, ErtsProcLocks); #endif -ERTS_GLB_INLINE void erts_smp_proc_unlock__(Process *, +ERTS_GLB_INLINE void erts_proc_unlock__(Process *, erts_pix_lock_t *, ErtsProcLocks); -ERTS_GLB_INLINE int erts_smp_proc_trylock__(Process *, +ERTS_GLB_INLINE int erts_proc_trylock__(Process *, erts_pix_lock_t *, ErtsProcLocks); @@ -600,7 +599,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck) } /* - * Helper function for erts_smp_proc_lock__ and erts_smp_proc_trylock__. + * Helper function for erts_proc_lock__ and erts_proc_trylock__. * * Attempts to grab all of 'locks' simultaneously. * @@ -613,7 +612,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck) * Does not release the pix lock. */ ERTS_GLB_INLINE ErtsProcLocks -erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks) +erts_proc_raw_trylock__(Process *p, ErtsProcLocks locks) { #if ERTS_PROC_LOCK_OWN_IMPL ErtsProcLocks expct_lflgs = 0; @@ -682,12 +681,12 @@ busy_main: ERTS_GLB_INLINE void #ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_proc_lock_x__(Process *p, +erts_proc_lock_x__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks, char *file, unsigned int line) #else -erts_smp_proc_lock__(Process *p, +erts_proc_lock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) #endif @@ -709,7 +708,7 @@ erts_smp_proc_lock__(Process *p, erts_proc_lc_lock(p, locks, file, line); #endif - old_lflgs = erts_smp_proc_raw_trylock__(p, locks); + old_lflgs = erts_proc_raw_trylock__(p, locks); if (old_lflgs != 0) { /* @@ -761,7 +760,7 @@ erts_smp_proc_lock__(Process *p, } ERTS_GLB_INLINE void -erts_smp_proc_unlock__(Process *p, +erts_proc_unlock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) { @@ -854,7 +853,7 @@ erts_smp_proc_unlock__(Process *p, } ERTS_GLB_INLINE int -erts_smp_proc_trylock__(Process *p, +erts_proc_trylock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) { @@ -875,7 +874,7 @@ erts_smp_proc_trylock__(Process *p, erts_pix_lock(pix_lck); #endif - if (erts_smp_proc_raw_trylock__(p, locks) != 0) { + if (erts_proc_raw_trylock__(p, locks) != 0) { /* Didn't get all locks... */ res = EBUSY; @@ -912,7 +911,7 @@ erts_smp_proc_trylock__(Process *p, return res; #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL - if (erts_smp_proc_raw_trylock__(p, locks) != 0) + if (erts_proc_raw_trylock__(p, locks) != 0) return EBUSY; else { #ifdef ERTS_PROC_LOCK_DEBUG @@ -933,11 +932,11 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked) if (locks & lock) { erts_aint32_t lock_count; if (locked) { - lock_count = erts_smp_atomic32_inc_read_nob(&p->lock.locked[i]); + lock_count = erts_atomic32_inc_read_nob(&p->lock.locked[i]); ERTS_LC_ASSERT(lock_count == 1); } else { - lock_count = erts_smp_atomic32_dec_read_nob(&p->lock.locked[i]); + lock_count = erts_atomic32_dec_read_nob(&p->lock.locked[i]); ERTS_LC_ASSERT(lock_count == 0); } } @@ -947,15 +946,14 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#endif /* ERTS_SMP */ #ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line); +ERTS_GLB_INLINE void erts_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line); #else -ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE void erts_proc_lock(Process *, ErtsProcLocks); #endif -ERTS_GLB_INLINE void erts_smp_proc_unlock(Process *, ErtsProcLocks); -ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE void erts_proc_unlock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE int erts_proc_trylock(Process *, ErtsProcLocks); ERTS_GLB_INLINE void erts_proc_inc_refc(Process *); ERTS_GLB_INLINE void erts_proc_dec_refc(Process *); @@ -966,79 +964,65 @@ ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *); ERTS_GLB_INLINE void #ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line) +erts_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line) #else -erts_smp_proc_lock(Process *p, ErtsProcLocks locks) +erts_proc_lock(Process *p, ErtsProcLocks locks) #endif { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_smp_proc_lock_x__(p, +#if defined(ERTS_ENABLE_LOCK_POSITION) + erts_proc_lock_x__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else ERTS_PID2PIXLOCK(p->common.id), #endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/ locks, file, line); -#elif defined(ERTS_SMP) - erts_smp_proc_lock__(p, +#else + erts_proc_lock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else ERTS_PID2PIXLOCK(p->common.id), #endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/ locks); -#endif /*ERTS_SMP*/ +#endif /*ERTS_ENABLE_LOCK_POSITION*/ } ERTS_GLB_INLINE void -erts_smp_proc_unlock(Process *p, ErtsProcLocks locks) +erts_proc_unlock(Process *p, ErtsProcLocks locks) { -#ifdef ERTS_SMP - erts_smp_proc_unlock__(p, + erts_proc_unlock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else ERTS_PID2PIXLOCK(p->common.id), #endif locks); -#endif } ERTS_GLB_INLINE int -erts_smp_proc_trylock(Process *p, ErtsProcLocks locks) +erts_proc_trylock(Process *p, ErtsProcLocks locks) { -#ifndef ERTS_SMP - return 0; -#else - return erts_smp_proc_trylock__(p, + return erts_proc_trylock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else ERTS_PID2PIXLOCK(p->common.id), #endif locks); -#endif } ERTS_GLB_INLINE void erts_proc_inc_refc(Process *p) { - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); -#ifdef ERTS_SMP + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); erts_ptab_atmc_inc_refc(&p->common); -#else - erts_ptab_inc_refc(&p->common); -#endif } ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p) { Sint referred; - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); -#ifdef ERTS_SMP + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); referred = erts_ptab_atmc_dec_test_refc(&p->common); -#else - referred = erts_ptab_dec_test_refc(&p->common); -#endif if (!referred) { ASSERT(ERTS_PROC_IS_EXITING(p)); erts_free_proc(p); @@ -1048,12 +1032,8 @@ ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p) ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc) { Sint referred; - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); -#ifdef ERTS_SMP + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); referred = erts_ptab_atmc_add_test_refc(&p->common, add_refc); -#else - referred = erts_ptab_add_test_refc(&p->common, add_refc); -#endif if (!referred) { ASSERT(ERTS_PROC_IS_EXITING(p)); erts_free_proc(p); @@ -1062,17 +1042,12 @@ ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc) ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *p) { - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); -#ifdef ERTS_SMP + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); return erts_ptab_atmc_read_refc(&p->common); -#else - return erts_ptab_read_refc(&p->common); -#endif } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#ifdef ERTS_SMP void erts_proc_lock_init(Process *); void erts_proc_lock_fin(Process *); void erts_proc_safelock(Process *a_proc, @@ -1081,7 +1056,6 @@ void erts_proc_safelock(Process *a_proc, Process *b_proc, ErtsProcLocks b_have_locks, ErtsProcLocks b_need_locks); -#endif /* * --- Process table lookup ------------------------------------------------ @@ -1113,9 +1087,6 @@ ERTS_GLB_INLINE Process *erts_pix2proc(int ix); ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid); ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid); -#ifndef ERTS_SMP -ERTS_GLB_INLINE -#endif Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -1132,7 +1103,7 @@ ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid) { Process *proc; - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); if (is_not_internal_pid(pid)) return NULL; @@ -1152,25 +1123,6 @@ ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid) return proc; } -#ifndef ERTS_SMP -ERTS_GLB_INLINE Process * -erts_pid2proc_opt(Process *c_p_unused, - ErtsProcLocks c_p_have_locks_unused, - Eterm pid, - ErtsProcLocks pid_need_locks_unused, - int flags) -{ - Process *proc = erts_proc_lookup_raw(pid); - if (!proc) - return NULL; - if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) - && ERTS_PROC_IS_EXITING(proc)) - return NULL; - if (flags & ERTS_P2P_FLG_INC_REFC) - erts_proc_inc_refc(proc); - return proc; -} -#endif /* !ERTS_SMP */ #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c index b3bcb3af3f..38c095fb4a 100644 --- a/erts/emulator/beam/erl_ptab.c +++ b/erts/emulator/beam/erl_ptab.c @@ -284,31 +284,31 @@ struct ErtsPTabListBifData_ { static ERTS_INLINE void last_data_init_nob(ErtsPTab *ptab, Uint64 val) { - erts_smp_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val); + erts_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val); } static ERTS_INLINE void last_data_set_relb(ErtsPTab *ptab, Uint64 val) { - erts_smp_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val); + erts_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val); } static ERTS_INLINE Uint64 last_data_read_nob(ErtsPTab *ptab) { - return (Uint64) erts_smp_atomic64_read_nob(&ptab->vola.tile.last_data); + return (Uint64) erts_atomic64_read_nob(&ptab->vola.tile.last_data); } static ERTS_INLINE Uint64 last_data_read_acqb(ErtsPTab *ptab) { - return (Uint64) erts_smp_atomic64_read_acqb(&ptab->vola.tile.last_data); + return (Uint64) erts_atomic64_read_acqb(&ptab->vola.tile.last_data); } static ERTS_INLINE Uint64 last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp) { - return (Uint64) erts_smp_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data, + return (Uint64) erts_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data, (erts_aint64_t) new, (erts_aint64_t) exp); } @@ -346,9 +346,9 @@ ix_to_free_id_data_ix(ErtsPTab *ptab, Uint32 ix) UWord erts_ptab_mem_size(ErtsPTab *ptab) { - UWord size = ptab->r.o.max*sizeof(erts_smp_atomic_t); + UWord size = ptab->r.o.max*sizeof(erts_atomic_t); if (ptab->r.o.free_id_data) - size += ptab->r.o.max*sizeof(erts_smp_atomic32_t); + size += ptab->r.o.max*sizeof(erts_atomic32_t); return size; } @@ -367,14 +367,14 @@ erts_ptab_init_table(ErtsPTab *ptab, size_t tab_sz, alloc_sz; Uint32 bits, cl, cli, ix, ix_per_cache_line, tab_cache_lines; char *tab_end; - erts_smp_atomic_t *tab_entry; - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_atomic_t *tab_entry; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL, + erts_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0); + erts_atomic32_init_nob(&ptab->vola.tile.count, 0); last_data_init_nob(ptab, ~((Uint64) 0)); /* A size that is a power of 2 is to prefer performance wise */ @@ -388,20 +388,20 @@ erts_ptab_init_table(ErtsPTab *ptab, ptab->r.o.element_size = element_size; ptab->r.o.max = size; - tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t)); + tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic_t)); alloc_sz = tab_sz; if (!legacy) - alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t)); + alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t)); ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, alloc_sz); tab_end = ((char *) ptab->r.o.tab) + tab_sz; tab_entry = ptab->r.o.tab; while (tab_end > ((char *) tab_entry)) { - erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL); + erts_atomic_init_nob(tab_entry, ERTS_AINT_NULL); tab_entry++; } tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE; - ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t)); + ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic_t)); ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */ ASSERT((ix_per_cache_line & (ix_per_cache_line - 1)) == 0); /* power of 2 */ ASSERT((tab_cache_lines & (tab_cache_lines - 1)) == 0); /* power of 2 */ @@ -429,11 +429,11 @@ erts_ptab_init_table(ErtsPTab *ptab, } else { - tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t)); - ptab->r.o.free_id_data = (erts_smp_atomic32_t *) tab_end; + tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t)); + ptab->r.o.free_id_data = (erts_atomic32_t *) tab_end; tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE; - ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic32_t)); + ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic32_t)); ptab->r.o.dix_cl_mask = tab_cache_lines-1; ptab->r.o.dix_cl_shift = erts_fit_in_bits_int32(ix_per_cache_line-1); @@ -448,19 +448,19 @@ erts_ptab_init_table(ErtsPTab *ptab, ix = 0; for (cl = 0; cl < tab_cache_lines; cl++) { for (cli = 0; cli < ix_per_cache_line; cli++) { - erts_smp_atomic32_init_nob(&ptab->r.o.free_id_data[ix], + erts_atomic32_init_nob(&ptab->r.o.free_id_data[ix], cli*tab_cache_lines+cl); - ASSERT(erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data); + ASSERT(erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data); ix++; } } - erts_smp_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1); - erts_smp_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1); + erts_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1); + erts_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1); } - erts_smp_interval_init(&ptab->list.data.interval); + erts_interval_init(&ptab->list.data.interval); ptab->list.data.deleted.start = NULL; ptab->list.data.deleted.end = NULL; ptab->list.data.chunks = (((ptab->r.o.max - 1) @@ -480,9 +480,9 @@ erts_ptab_init_table(ErtsPTab *ptab, * have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while * still having a table size of the power of 2. */ - erts_smp_atomic32_inc_nob(&ptab->vola.tile.count); + erts_atomic32_inc_nob(&ptab->vola.tile.count); pix = erts_ptab_data2pix(ptab, ptab->r.o.invalid_data); - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab->r.o.invalid_element); } @@ -506,12 +506,12 @@ erts_ptab_new_element(ErtsPTab *ptab, erts_ptab_rlock(ptab); - count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count); + count = erts_atomic32_inc_read_acqb(&ptab->vola.tile.count); if (count > ptab->r.o.max) { while (1) { erts_aint32_t act_count; - act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count, + act_count = erts_atomic32_cmpxchg_relb(&ptab->vola.tile.count, count-1, count); if (act_count == count) { @@ -525,14 +525,14 @@ erts_ptab_new_element(ErtsPTab *ptab, } ptab_el->u.alive.started_interval - = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + = erts_current_interval_nob(erts_ptab_interval(ptab)); if (ptab->r.o.free_id_data) { do { - ix = (Uint32) erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix); + ix = (Uint32) erts_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix); ix = ix_to_free_id_data_ix(ptab, ix); - data = erts_smp_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix], + data = erts_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix], (erts_aint32_t)ptab->r.o.invalid_data); }while ((Eterm)data == ptab->r.o.invalid_data); @@ -546,10 +546,10 @@ erts_ptab_new_element(ErtsPTab *ptab, pix = erts_ptab_data2pix(ptab, (Eterm) data); #ifdef DEBUG - ASSERT(ERTS_AINT_NULL == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix], + ASSERT(ERTS_AINT_NULL == erts_atomic_xchg_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el)); #else - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); #endif erts_ptab_runlock(ptab); @@ -563,7 +563,7 @@ erts_ptab_new_element(ErtsPTab *ptab, restart: ptab_el->u.alive.started_interval - = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + = erts_current_interval_nob(erts_ptab_interval(ptab)); ld = last_data_read_acqb(ptab); @@ -571,10 +571,10 @@ erts_ptab_new_element(ErtsPTab *ptab, while (1) { ld++; pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld)); - if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix]) + if (erts_atomic_read_nob(&ptab->r.o.tab[pix]) == ERTS_AINT_NULL) { erts_aint_t val; - val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix], + val = erts_atomic_cmpxchg_relb(&ptab->r.o.tab[pix], invalid, ERTS_AINT_NULL); @@ -621,10 +621,10 @@ erts_ptab_new_element(ErtsPTab *ptab, /* Move into slot reserved */ #ifdef DEBUG - ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix], + ASSERT(invalid == erts_atomic_xchg_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el)); #else - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); #endif if (rlocked) @@ -644,7 +644,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el) sizeof(ErtsPTabDeletedElement)); ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start && ptab->list.data.deleted.end); - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); @@ -654,7 +654,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el) ptdep->u.element.id = ptab_el->id; ptdep->u.element.inserted = ptab_el->u.alive.started_interval; ptdep->u.element.deleted = - erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + erts_current_interval_nob(erts_ptab_interval(ptab)); ptab->list.data.deleted.end->next = ptdep; ptab->list.data.deleted.end = ptdep; @@ -678,7 +678,7 @@ erts_ptab_delete_element(ErtsPTab *ptab, pix = erts_ptab_id2pix(ptab, ptab_el->id); /* *Need* to be an managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_ptab_rlock(ptab); maybe_save = ptab->list.data.deleted.end != NULL; @@ -687,7 +687,7 @@ erts_ptab_delete_element(ErtsPTab *ptab, erts_ptab_rwlock(ptab); } - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL); + erts_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL); if (ptab->r.o.free_id_data) { Uint32 prev_data; @@ -703,17 +703,17 @@ erts_ptab_delete_element(ErtsPTab *ptab, ASSERT(pix == erts_ptab_data2pix(ptab, data)); do { - ix = (Uint32) erts_smp_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix); + ix = (Uint32) erts_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix); ix = ix_to_free_id_data_ix(ptab, ix); - prev_data = erts_smp_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix], + prev_data = erts_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix], data, ptab->r.o.invalid_data); }while ((Eterm)prev_data != ptab->r.o.invalid_data); } - ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0); - erts_smp_atomic32_dec_relb(&ptab->vola.tile.count); + ASSERT(erts_atomic32_read_nob(&ptab->vola.tile.count) > 0); + erts_atomic32_dec_relb(&ptab->vola.tile.count); if (!maybe_save) erts_ptab_runlock(ptab); @@ -927,7 +927,7 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp) sizeof(ErtsPTabDeletedElement)); ptlbdp->bif_invocation->ix = -1; ptlbdp->bif_invocation->u.bif_invocation.interval - = erts_smp_step_interval_nob(erts_ptab_interval(ptab)); + = erts_step_interval_nob(erts_ptab_interval(ptab)); ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); ptlbdp->bif_invocation->next = NULL; @@ -968,12 +968,12 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp) locked = 1; } - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table); if (cix != 0) ptlbdp->chunk[cix].interval - = erts_smp_step_interval_nob(erts_ptab_interval(ptab)); + = erts_step_interval_nob(erts_ptab_interval(ptab)); else if (ptlbdp->bif_invocation) ptlbdp->chunk[0].interval = *invocation_interval_p; /* else: interval is irrelevant */ @@ -1331,18 +1331,18 @@ static void assert_ptab_consistency(ErtsPTab *ptab) int null_slots = 0; for (ix=0; ix < ptab->r.o.max; ix++) { - if (erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) { + if (erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) { ++free_pids; - data = erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]); + data = erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]); pix = erts_ptab_data2pix(ptab, (Eterm) data); ASSERT(erts_ptab_pix2intptr_nob(ptab, pix) == ERTS_AINT_NULL); } - if (erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) { + if (erts_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) { ++null_slots; } } ASSERT(free_pids == null_slots); - ASSERT(free_pids == ptab->r.o.max - erts_smp_atomic32_read_nob(&ptab->vola.tile.count)); + ASSERT(free_pids == ptab->r.o.max - erts_atomic32_read_nob(&ptab->vola.tile.count)); } #endif } @@ -1366,7 +1366,7 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next) Uint32 i, max_ix, num, stop_id_ix; max_ix = ptab->r.o.max - 1; num = next; - id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix); + id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix); for (i=0; i <= max_ix; ++i) { Uint32 pix; @@ -1380,26 +1380,26 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next) if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) { ++id_ix; dix = ix_to_free_id_data_ix(ptab, id_ix); - erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num); + erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num); ASSERT(pix == erts_ptab_data2pix(ptab, num)); } } - erts_smp_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix); + erts_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix); /* Write invalid_data in rest of free_id_data[]: */ - stop_id_ix = (1 + erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix; + stop_id_ix = (1 + erts_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix; while (1) { id_ix = (id_ix+1) & max_ix; if (id_ix == stop_id_ix) break; dix = ix_to_free_id_data_ix(ptab, id_ix); - erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], + erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix], ptab->r.o.invalid_data); } } - id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1; + id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1; dix = ix_to_free_id_data_ix(ptab, id_ix); - res = (Sint) erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[dix]); + res = (Sint) erts_atomic32_read_nob(&ptab->r.o.free_id_data[dix]); } else { /* Deprecated legacy algorithm... */ @@ -1616,11 +1616,11 @@ debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp) static void debug_ptab_list_check_del_list(ErtsPTab *ptab) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); if (!ptab->list.data.deleted.start) ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end); else { - Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + Uint64 curr_interval = erts_current_interval_nob(erts_ptab_interval(ptab)); Uint64 *prev_x_interval_p = NULL; ErtsPTabDeletedElement *ptdep; diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h index fecfd96ab0..4858cc8ab8 100644 --- a/erts/emulator/beam/erl_ptab.h +++ b/erts/emulator/beam/erl_ptab.h @@ -60,7 +60,7 @@ typedef struct { } refc; ErtsTracer tracer; Uint trace_flags; - erts_smp_atomic_t timer; + erts_atomic_t timer; union { /* --- While being alive --- */ struct { @@ -78,7 +78,7 @@ typedef struct { typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement; typedef struct { - erts_smp_rwmtx_t rwmtx; + erts_rwmtx_t rwmtx; erts_interval_t interval; struct { ErtsPTabDeletedElement *start; @@ -88,15 +88,15 @@ typedef struct { } ErtsPTabListData; typedef struct { - erts_smp_atomic64_t last_data; - erts_smp_atomic32_t count; - erts_smp_atomic32_t aid_ix; - erts_smp_atomic32_t fid_ix; + erts_atomic64_t last_data; + erts_atomic32_t count; + erts_atomic32_t aid_ix; + erts_atomic32_t fid_ix; } ErtsPTabVolatileData; typedef struct { - erts_smp_atomic_t *tab; - erts_smp_atomic32_t *free_id_data; + erts_atomic_t *tab; + erts_atomic32_t *free_id_data; Uint32 max; Uint32 pix_mask; Uint32 pix_cl_mask; @@ -223,8 +223,8 @@ ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab); ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab); ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab); ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab); -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab); -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -245,7 +245,7 @@ ERTS_GLB_INLINE int erts_ptab_count(ErtsPTab *ptab) { int max = ptab->r.o.max; - erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count); + erts_aint32_t res = erts_atomic32_read_nob(&ptab->vola.tile.count); if (max == ERTS_PTAB_MAX_SIZE) { max--; res--; @@ -352,25 +352,25 @@ erts_ptab_id2data(ErtsPTab *ptab, Eterm id) ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]); + return erts_atomic_read_nob(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]); + return erts_atomic_read_ddrb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]); + return erts_atomic_read_rb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]); + return erts_atomic_read_acqb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el) @@ -386,11 +386,9 @@ ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el) { erts_aint_t refc = erts_atomic_dec_read_relb(&ptab_el->refc.atmc); - ERTS_SMP_LC_ASSERT(refc >= 0); -#ifdef ERTS_SMP + ERTS_LC_ASSERT(refc >= 0); if (refc == 0) ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); -#endif return (Sint) refc; } @@ -399,7 +397,7 @@ ERTS_GLB_INLINE Sint erts_ptab_atmc_add_test_refc(ErtsPTabElementCommon *ptab_el { erts_aint_t refc = erts_atomic_add_read_mb(&ptab_el->refc.atmc, (erts_aint_t) add_refc); - ERTS_SMP_LC_ASSERT(refc >= 0); + ERTS_LC_ASSERT(refc >= 0); return (Sint) refc; } @@ -417,7 +415,7 @@ ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE Sint erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el) { Sint refc = --ptab_el->refc.sint; - ERTS_SMP_LC_ASSERT(refc >= 0); + ERTS_LC_ASSERT(refc >= 0); return refc; } @@ -425,7 +423,7 @@ ERTS_GLB_INLINE Sint erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el, Sint add_refc) { ptab_el->refc.sint += add_refc; - ERTS_SMP_LC_ASSERT(ptab_el->refc.sint >= 0); + ERTS_LC_ASSERT(ptab_el->refc.sint >= 0); return (Sint) ptab_el->refc.sint; } @@ -436,42 +434,42 @@ ERTS_GLB_INLINE Sint erts_ptab_read_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx); + erts_rwmtx_rlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab) { - return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx); + return erts_rwmtx_tryrlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab) { - erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx); + erts_rwmtx_runlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx); + erts_rwmtx_rwlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab) { - return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx); + return erts_rwmtx_tryrwlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx); + erts_rwmtx_rwunlock(&ptab->list.data.rwmtx); } -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab) +ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab) { - return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx); + return erts_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx); } -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab) +ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab) { - return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx); + return erts_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx); } #endif diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c index 96238318c9..6cb7ccab8d 100644 --- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c +++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c @@ -32,7 +32,6 @@ # include "config.h" #endif -#ifdef ERTS_SMP #include "erl_process.h" #include "erl_thr_progress.h" @@ -325,4 +324,3 @@ erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr, return res; } -#endif /* ERTS_SMP */ diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h index 7808d7d438..1307e65962 100644 --- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h +++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h @@ -31,7 +31,6 @@ #ifndef ERTS_SCHED_SPEC_PRE_ALLOC_H__ #define ERTS_SCHED_SPEC_PRE_ALLOC_H__ -#ifdef ERTS_SMP #undef ERL_THR_PROGRESS_TSD_TYPE_ONLY #define ERL_THR_PROGRESS_TSD_TYPE_ONLY @@ -236,6 +235,5 @@ erts_sspa_free(erts_sspa_data_t *data, int cix, char *cblk) #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#endif /* ERTS_SMP */ #endif /* ERTS_SCHED_SPEC_PRE_ALLOC_H__ */ diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h deleted file mode 100644 index 696bdbdaf1..0000000000 --- a/erts/emulator/beam/erl_smp.h +++ /dev/null @@ -1,1585 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2005-2017. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * %CopyrightEnd% - */ -/* - * SMP interface to ethread library. - * This is essentially "sed s/erts_/erts_smp_/g < erl_threads.h > erl_smp.h", - * plus changes to NOP operations when ERTS_SMP is disabled. - * Author: Mikael Pettersson - */ -#ifndef ERL_SMP_H -#define ERL_SMP_H -#include "erl_threads.h" - -#ifdef ERTS_ENABLE_LOCK_POSITION -#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__) -#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__) -#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__) -#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__) -#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__) -#endif - - -#ifdef ERTS_SMP -#define ERTS_SMP_THR_OPTS_DEFAULT_INITER ERTS_THR_OPTS_DEFAULT_INITER -typedef erts_thr_opts_t erts_smp_thr_opts_t; -typedef erts_thr_init_data_t erts_smp_thr_init_data_t; -typedef erts_tid_t erts_smp_tid_t; -typedef erts_mtx_t erts_smp_mtx_t; -typedef erts_cnd_t erts_smp_cnd_t; -#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER -#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL -#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ -#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \ - ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ -#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED -#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED -#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED -typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t; -typedef erts_rwmtx_t erts_smp_rwmtx_t; -typedef erts_tsd_key_t erts_smp_tsd_key_t; -#define erts_smp_dw_atomic_t erts_dw_atomic_t -#define erts_smp_atomic_t erts_atomic_t -#define erts_smp_atomic32_t erts_atomic32_t -#define erts_smp_atomic64_t erts_atomic64_t -typedef erts_spinlock_t erts_smp_spinlock_t; -typedef erts_rwlock_t erts_smp_rwlock_t; -void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */ - -#define ERTS_SMP_MEMORY_BARRIER ERTS_THR_MEMORY_BARRIER -#define ERTS_SMP_WRITE_MEMORY_BARRIER ERTS_THR_WRITE_MEMORY_BARRIER -#define ERTS_SMP_READ_MEMORY_BARRIER ERTS_THR_READ_MEMORY_BARRIER -#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER - -#else /* #ifdef ERTS_SMP */ - -#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0} -typedef int erts_smp_thr_opts_t; -typedef int erts_smp_thr_init_data_t; -typedef int erts_smp_tid_t; -typedef int erts_smp_mtx_t; -typedef int erts_smp_cnd_t; -#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0} -#define ERTS_SMP_RWMTX_TYPE_NORMAL 0 -#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0 -#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0 -#define ERTS_SMP_RWMTX_LONG_LIVED 0 -#define ERTS_SMP_RWMTX_SHORT_LIVED 0 -#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0 -typedef struct { - char type; - char lived; - int main_spincount; - int aux_spincount; -} erts_smp_rwmtx_opt_t; -typedef int erts_smp_rwmtx_t; -typedef int erts_smp_tsd_key_t; -#define erts_smp_dw_atomic_t erts_no_dw_atomic_t -#define erts_smp_atomic_t erts_no_atomic_t -#define erts_smp_atomic32_t erts_no_atomic32_t -#define erts_smp_atomic64_t erts_no_atomic64_t -#if __GNUC__ > 2 -typedef struct { } erts_smp_spinlock_t; -typedef struct { } erts_smp_rwlock_t; -#else -typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t; -typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t; -#endif - -#define ERTS_SMP_MEMORY_BARRIER -#define ERTS_SMP_WRITE_MEMORY_BARRIER -#define ERTS_SMP_READ_MEMORY_BARRIER -#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER - -#endif /* #ifdef ERTS_SMP */ - -ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id); -ERTS_GLB_INLINE void erts_smp_thr_create(erts_smp_tid_t *tid, - void * (*func)(void *), - void *arg, - erts_smp_thr_opts_t *opts); -ERTS_GLB_INLINE void erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res); -ERTS_GLB_INLINE void erts_smp_thr_detach(erts_smp_tid_t tid); -ERTS_GLB_INLINE void erts_smp_thr_exit(void *res); -ERTS_GLB_INLINE void erts_smp_install_exit_handler(void (*exit_handler)(void)); -ERTS_GLB_INLINE erts_smp_tid_t erts_smp_thr_self(void); -ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y); -#ifdef ERTS_HAVE_REC_MTX_INIT -#define ERTS_SMP_HAVE_REC_MTX_INIT 1 -ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx); -#endif -ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line); -#else -ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx); -#endif -ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE int erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_cnd_init(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_destroy(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd, - erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no); -ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx, - erts_smp_rwmtx_opt_t *opt, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -#else -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx); -#endif -ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx); -ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock); -ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line); -#else -ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock); -#endif -ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock); -ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line); -#else -ERTS_GLB_INLINE void erts_smp_read_lock(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock); -#endif -ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, - char *keyname); -ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key); -ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value); -ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key); - -#ifdef ERTS_THR_HAVE_SIG_FUNCS -#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1 -ERTS_GLB_INLINE void erts_smp_thr_sigmask(int how, - const sigset_t *set, - sigset_t *oset); -ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); -#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */ - -/* - * See "Documentation of atomics and memory barriers" at the top - * of erl_threads.h for info on atomics. - */ - -#ifdef ERTS_SMP - -/* Double word size atomics */ - -#define erts_smp_dw_atomic_init_nob erts_dw_atomic_init_nob -#define erts_smp_dw_atomic_set_nob erts_dw_atomic_set_nob -#define erts_smp_dw_atomic_read_nob erts_dw_atomic_read_nob -#define erts_smp_dw_atomic_cmpxchg_nob erts_dw_atomic_cmpxchg_nob - -#define erts_smp_dw_atomic_init_mb erts_dw_atomic_init_mb -#define erts_smp_dw_atomic_set_mb erts_dw_atomic_set_mb -#define erts_smp_dw_atomic_read_mb erts_dw_atomic_read_mb -#define erts_smp_dw_atomic_cmpxchg_mb erts_dw_atomic_cmpxchg_mb - -#define erts_smp_dw_atomic_init_acqb erts_dw_atomic_init_acqb -#define erts_smp_dw_atomic_set_acqb erts_dw_atomic_set_acqb -#define erts_smp_dw_atomic_read_acqb erts_dw_atomic_read_acqb -#define erts_smp_dw_atomic_cmpxchg_acqb erts_dw_atomic_cmpxchg_acqb - -#define erts_smp_dw_atomic_init_relb erts_dw_atomic_init_relb -#define erts_smp_dw_atomic_set_relb erts_dw_atomic_set_relb -#define erts_smp_dw_atomic_read_relb erts_dw_atomic_read_relb -#define erts_smp_dw_atomic_cmpxchg_relb erts_dw_atomic_cmpxchg_relb - -#define erts_smp_dw_atomic_init_ddrb erts_dw_atomic_init_ddrb -#define erts_smp_dw_atomic_set_ddrb erts_dw_atomic_set_ddrb -#define erts_smp_dw_atomic_read_ddrb erts_dw_atomic_read_ddrb -#define erts_smp_dw_atomic_cmpxchg_ddrb erts_dw_atomic_cmpxchg_ddrb - -#define erts_smp_dw_atomic_init_rb erts_dw_atomic_init_rb -#define erts_smp_dw_atomic_set_rb erts_dw_atomic_set_rb -#define erts_smp_dw_atomic_read_rb erts_dw_atomic_read_rb -#define erts_smp_dw_atomic_cmpxchg_rb erts_dw_atomic_cmpxchg_rb - -#define erts_smp_dw_atomic_init_wb erts_dw_atomic_init_wb -#define erts_smp_dw_atomic_set_wb erts_dw_atomic_set_wb -#define erts_smp_dw_atomic_read_wb erts_dw_atomic_read_wb -#define erts_smp_dw_atomic_cmpxchg_wb erts_dw_atomic_cmpxchg_wb - -#define erts_smp_dw_atomic_set_dirty erts_dw_atomic_set_dirty -#define erts_smp_dw_atomic_read_dirty erts_dw_atomic_read_dirty - -/* Word size atomics */ - -#define erts_smp_atomic_init_nob erts_atomic_init_nob -#define erts_smp_atomic_set_nob erts_atomic_set_nob -#define erts_smp_atomic_read_nob erts_atomic_read_nob -#define erts_smp_atomic_inc_read_nob erts_atomic_inc_read_nob -#define erts_smp_atomic_dec_read_nob erts_atomic_dec_read_nob -#define erts_smp_atomic_inc_nob erts_atomic_inc_nob -#define erts_smp_atomic_dec_nob erts_atomic_dec_nob -#define erts_smp_atomic_add_read_nob erts_atomic_add_read_nob -#define erts_smp_atomic_add_nob erts_atomic_add_nob -#define erts_smp_atomic_read_bor_nob erts_atomic_read_bor_nob -#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob -#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob -#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob -#define erts_smp_atomic_read_bset_nob erts_atomic_read_bset_nob - -#define erts_smp_atomic_init_mb erts_atomic_init_mb -#define erts_smp_atomic_set_mb erts_atomic_set_mb -#define erts_smp_atomic_read_mb erts_atomic_read_mb -#define erts_smp_atomic_inc_read_mb erts_atomic_inc_read_mb -#define erts_smp_atomic_dec_read_mb erts_atomic_dec_read_mb -#define erts_smp_atomic_inc_mb erts_atomic_inc_mb -#define erts_smp_atomic_dec_mb erts_atomic_dec_mb -#define erts_smp_atomic_add_read_mb erts_atomic_add_read_mb -#define erts_smp_atomic_add_mb erts_atomic_add_mb -#define erts_smp_atomic_read_bor_mb erts_atomic_read_bor_mb -#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb -#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb -#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb -#define erts_smp_atomic_read_bset_mb erts_atomic_read_bset_mb - -#define erts_smp_atomic_init_acqb erts_atomic_init_acqb -#define erts_smp_atomic_set_acqb erts_atomic_set_acqb -#define erts_smp_atomic_read_acqb erts_atomic_read_acqb -#define erts_smp_atomic_inc_read_acqb erts_atomic_inc_read_acqb -#define erts_smp_atomic_dec_read_acqb erts_atomic_dec_read_acqb -#define erts_smp_atomic_inc_acqb erts_atomic_inc_acqb -#define erts_smp_atomic_dec_acqb erts_atomic_dec_acqb -#define erts_smp_atomic_add_read_acqb erts_atomic_add_read_acqb -#define erts_smp_atomic_add_acqb erts_atomic_add_acqb -#define erts_smp_atomic_read_bor_acqb erts_atomic_read_bor_acqb -#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb -#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb -#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb -#define erts_smp_atomic_read_bset_acqb erts_atomic_read_bset_acqb - -#define erts_smp_atomic_init_relb erts_atomic_init_relb -#define erts_smp_atomic_set_relb erts_atomic_set_relb -#define erts_smp_atomic_read_relb erts_atomic_read_relb -#define erts_smp_atomic_inc_read_relb erts_atomic_inc_read_relb -#define erts_smp_atomic_dec_read_relb erts_atomic_dec_read_relb -#define erts_smp_atomic_inc_relb erts_atomic_inc_relb -#define erts_smp_atomic_dec_relb erts_atomic_dec_relb -#define erts_smp_atomic_add_read_relb erts_atomic_add_read_relb -#define erts_smp_atomic_add_relb erts_atomic_add_relb -#define erts_smp_atomic_read_bor_relb erts_atomic_read_bor_relb -#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb -#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb -#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb -#define erts_smp_atomic_read_bset_relb erts_atomic_read_bset_relb - -#define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb -#define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb -#define erts_smp_atomic_read_ddrb erts_atomic_read_ddrb -#define erts_smp_atomic_inc_read_ddrb erts_atomic_inc_read_ddrb -#define erts_smp_atomic_dec_read_ddrb erts_atomic_dec_read_ddrb -#define erts_smp_atomic_inc_ddrb erts_atomic_inc_ddrb -#define erts_smp_atomic_dec_ddrb erts_atomic_dec_ddrb -#define erts_smp_atomic_add_read_ddrb erts_atomic_add_read_ddrb -#define erts_smp_atomic_add_ddrb erts_atomic_add_ddrb -#define erts_smp_atomic_read_bor_ddrb erts_atomic_read_bor_ddrb -#define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb -#define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb -#define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb -#define erts_smp_atomic_read_bset_ddrb erts_atomic_read_bset_ddrb - -#define erts_smp_atomic_init_rb erts_atomic_init_rb -#define erts_smp_atomic_set_rb erts_atomic_set_rb -#define erts_smp_atomic_read_rb erts_atomic_read_rb -#define erts_smp_atomic_inc_read_rb erts_atomic_inc_read_rb -#define erts_smp_atomic_dec_read_rb erts_atomic_dec_read_rb -#define erts_smp_atomic_inc_rb erts_atomic_inc_rb -#define erts_smp_atomic_dec_rb erts_atomic_dec_rb -#define erts_smp_atomic_add_read_rb erts_atomic_add_read_rb -#define erts_smp_atomic_add_rb erts_atomic_add_rb -#define erts_smp_atomic_read_bor_rb erts_atomic_read_bor_rb -#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb -#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb -#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb -#define erts_smp_atomic_read_bset_rb erts_atomic_read_bset_rb - -#define erts_smp_atomic_init_wb erts_atomic_init_wb -#define erts_smp_atomic_set_wb erts_atomic_set_wb -#define erts_smp_atomic_read_wb erts_atomic_read_wb -#define erts_smp_atomic_inc_read_wb erts_atomic_inc_read_wb -#define erts_smp_atomic_dec_read_wb erts_atomic_dec_read_wb -#define erts_smp_atomic_inc_wb erts_atomic_inc_wb -#define erts_smp_atomic_dec_wb erts_atomic_dec_wb -#define erts_smp_atomic_add_read_wb erts_atomic_add_read_wb -#define erts_smp_atomic_add_wb erts_atomic_add_wb -#define erts_smp_atomic_read_bor_wb erts_atomic_read_bor_wb -#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb -#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb -#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb -#define erts_smp_atomic_read_bset_wb erts_atomic_read_bset_wb - -#define erts_smp_atomic_set_dirty erts_atomic_set_dirty -#define erts_smp_atomic_read_dirty erts_atomic_read_dirty - -/* 32-bit atomics */ - -#define erts_smp_atomic32_init_nob erts_atomic32_init_nob -#define erts_smp_atomic32_set_nob erts_atomic32_set_nob -#define erts_smp_atomic32_read_nob erts_atomic32_read_nob -#define erts_smp_atomic32_inc_read_nob erts_atomic32_inc_read_nob -#define erts_smp_atomic32_dec_read_nob erts_atomic32_dec_read_nob -#define erts_smp_atomic32_inc_nob erts_atomic32_inc_nob -#define erts_smp_atomic32_dec_nob erts_atomic32_dec_nob -#define erts_smp_atomic32_add_read_nob erts_atomic32_add_read_nob -#define erts_smp_atomic32_add_nob erts_atomic32_add_nob -#define erts_smp_atomic32_read_bor_nob erts_atomic32_read_bor_nob -#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob -#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob -#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob -#define erts_smp_atomic32_read_bset_nob erts_atomic32_read_bset_nob - -#define erts_smp_atomic32_init_mb erts_atomic32_init_mb -#define erts_smp_atomic32_set_mb erts_atomic32_set_mb -#define erts_smp_atomic32_read_mb erts_atomic32_read_mb -#define erts_smp_atomic32_inc_read_mb erts_atomic32_inc_read_mb -#define erts_smp_atomic32_dec_read_mb erts_atomic32_dec_read_mb -#define erts_smp_atomic32_inc_mb erts_atomic32_inc_mb -#define erts_smp_atomic32_dec_mb erts_atomic32_dec_mb -#define erts_smp_atomic32_add_read_mb erts_atomic32_add_read_mb -#define erts_smp_atomic32_add_mb erts_atomic32_add_mb -#define erts_smp_atomic32_read_bor_mb erts_atomic32_read_bor_mb -#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb -#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb -#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb -#define erts_smp_atomic32_read_bset_mb erts_atomic32_read_bset_mb - -#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb -#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb -#define erts_smp_atomic32_read_acqb erts_atomic32_read_acqb -#define erts_smp_atomic32_inc_read_acqb erts_atomic32_inc_read_acqb -#define erts_smp_atomic32_dec_read_acqb erts_atomic32_dec_read_acqb -#define erts_smp_atomic32_inc_acqb erts_atomic32_inc_acqb -#define erts_smp_atomic32_dec_acqb erts_atomic32_dec_acqb -#define erts_smp_atomic32_add_read_acqb erts_atomic32_add_read_acqb -#define erts_smp_atomic32_add_acqb erts_atomic32_add_acqb -#define erts_smp_atomic32_read_bor_acqb erts_atomic32_read_bor_acqb -#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb -#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb -#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb -#define erts_smp_atomic32_read_bset_acqb erts_atomic32_read_bset_acqb - -#define erts_smp_atomic32_init_relb erts_atomic32_init_relb -#define erts_smp_atomic32_set_relb erts_atomic32_set_relb -#define erts_smp_atomic32_read_relb erts_atomic32_read_relb -#define erts_smp_atomic32_inc_read_relb erts_atomic32_inc_read_relb -#define erts_smp_atomic32_dec_read_relb erts_atomic32_dec_read_relb -#define erts_smp_atomic32_inc_relb erts_atomic32_inc_relb -#define erts_smp_atomic32_dec_relb erts_atomic32_dec_relb -#define erts_smp_atomic32_add_read_relb erts_atomic32_add_read_relb -#define erts_smp_atomic32_add_relb erts_atomic32_add_relb -#define erts_smp_atomic32_read_bor_relb erts_atomic32_read_bor_relb -#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb -#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb -#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb -#define erts_smp_atomic32_read_bset_relb erts_atomic32_read_bset_relb - -#define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb -#define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb -#define erts_smp_atomic32_read_ddrb erts_atomic32_read_ddrb -#define erts_smp_atomic32_inc_read_ddrb erts_atomic32_inc_read_ddrb -#define erts_smp_atomic32_dec_read_ddrb erts_atomic32_dec_read_ddrb -#define erts_smp_atomic32_inc_ddrb erts_atomic32_inc_ddrb -#define erts_smp_atomic32_dec_ddrb erts_atomic32_dec_ddrb -#define erts_smp_atomic32_add_read_ddrb erts_atomic32_add_read_ddrb -#define erts_smp_atomic32_add_ddrb erts_atomic32_add_ddrb -#define erts_smp_atomic32_read_bor_ddrb erts_atomic32_read_bor_ddrb -#define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb -#define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb -#define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb -#define erts_smp_atomic32_read_bset_ddrb erts_atomic32_read_bset_ddrb - -#define erts_smp_atomic32_init_rb erts_atomic32_init_rb -#define erts_smp_atomic32_set_rb erts_atomic32_set_rb -#define erts_smp_atomic32_read_rb erts_atomic32_read_rb -#define erts_smp_atomic32_inc_read_rb erts_atomic32_inc_read_rb -#define erts_smp_atomic32_dec_read_rb erts_atomic32_dec_read_rb -#define erts_smp_atomic32_inc_rb erts_atomic32_inc_rb -#define erts_smp_atomic32_dec_rb erts_atomic32_dec_rb -#define erts_smp_atomic32_add_read_rb erts_atomic32_add_read_rb -#define erts_smp_atomic32_add_rb erts_atomic32_add_rb -#define erts_smp_atomic32_read_bor_rb erts_atomic32_read_bor_rb -#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb -#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb -#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb -#define erts_smp_atomic32_read_bset_rb erts_atomic32_read_bset_rb - -#define erts_smp_atomic32_init_wb erts_atomic32_init_wb -#define erts_smp_atomic32_set_wb erts_atomic32_set_wb -#define erts_smp_atomic32_read_wb erts_atomic32_read_wb -#define erts_smp_atomic32_inc_read_wb erts_atomic32_inc_read_wb -#define erts_smp_atomic32_dec_read_wb erts_atomic32_dec_read_wb -#define erts_smp_atomic32_inc_wb erts_atomic32_inc_wb -#define erts_smp_atomic32_dec_wb erts_atomic32_dec_wb -#define erts_smp_atomic32_add_read_wb erts_atomic32_add_read_wb -#define erts_smp_atomic32_add_wb erts_atomic32_add_wb -#define erts_smp_atomic32_read_bor_wb erts_atomic32_read_bor_wb -#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb -#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb -#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb -#define erts_smp_atomic32_read_bset_wb erts_atomic32_read_bset_wb - -#define erts_smp_atomic32_set_dirty erts_atomic32_set_dirty -#define erts_smp_atomic32_read_dirty erts_atomic32_read_dirty - -/* 64-bit atomics */ - -#define erts_smp_atomic64_init_nob erts_atomic64_init_nob -#define erts_smp_atomic64_set_nob erts_atomic64_set_nob -#define erts_smp_atomic64_read_nob erts_atomic64_read_nob -#define erts_smp_atomic64_inc_read_nob erts_atomic64_inc_read_nob -#define erts_smp_atomic64_dec_read_nob erts_atomic64_dec_read_nob -#define erts_smp_atomic64_inc_nob erts_atomic64_inc_nob -#define erts_smp_atomic64_dec_nob erts_atomic64_dec_nob -#define erts_smp_atomic64_add_read_nob erts_atomic64_add_read_nob -#define erts_smp_atomic64_add_nob erts_atomic64_add_nob -#define erts_smp_atomic64_read_bor_nob erts_atomic64_read_bor_nob -#define erts_smp_atomic64_read_band_nob erts_atomic64_read_band_nob -#define erts_smp_atomic64_xchg_nob erts_atomic64_xchg_nob -#define erts_smp_atomic64_cmpxchg_nob erts_atomic64_cmpxchg_nob -#define erts_smp_atomic64_read_bset_nob erts_atomic64_read_bset_nob - -#define erts_smp_atomic64_init_mb erts_atomic64_init_mb -#define erts_smp_atomic64_set_mb erts_atomic64_set_mb -#define erts_smp_atomic64_read_mb erts_atomic64_read_mb -#define erts_smp_atomic64_inc_read_mb erts_atomic64_inc_read_mb -#define erts_smp_atomic64_dec_read_mb erts_atomic64_dec_read_mb -#define erts_smp_atomic64_inc_mb erts_atomic64_inc_mb -#define erts_smp_atomic64_dec_mb erts_atomic64_dec_mb -#define erts_smp_atomic64_add_read_mb erts_atomic64_add_read_mb -#define erts_smp_atomic64_add_mb erts_atomic64_add_mb -#define erts_smp_atomic64_read_bor_mb erts_atomic64_read_bor_mb -#define erts_smp_atomic64_read_band_mb erts_atomic64_read_band_mb -#define erts_smp_atomic64_xchg_mb erts_atomic64_xchg_mb -#define erts_smp_atomic64_cmpxchg_mb erts_atomic64_cmpxchg_mb -#define erts_smp_atomic64_read_bset_mb erts_atomic64_read_bset_mb - -#define erts_smp_atomic64_init_acqb erts_atomic64_init_acqb -#define erts_smp_atomic64_set_acqb erts_atomic64_set_acqb -#define erts_smp_atomic64_read_acqb erts_atomic64_read_acqb -#define erts_smp_atomic64_inc_read_acqb erts_atomic64_inc_read_acqb -#define erts_smp_atomic64_dec_read_acqb erts_atomic64_dec_read_acqb -#define erts_smp_atomic64_inc_acqb erts_atomic64_inc_acqb -#define erts_smp_atomic64_dec_acqb erts_atomic64_dec_acqb -#define erts_smp_atomic64_add_read_acqb erts_atomic64_add_read_acqb -#define erts_smp_atomic64_add_acqb erts_atomic64_add_acqb -#define erts_smp_atomic64_read_bor_acqb erts_atomic64_read_bor_acqb -#define erts_smp_atomic64_read_band_acqb erts_atomic64_read_band_acqb -#define erts_smp_atomic64_xchg_acqb erts_atomic64_xchg_acqb -#define erts_smp_atomic64_cmpxchg_acqb erts_atomic64_cmpxchg_acqb -#define erts_smp_atomic64_read_bset_acqb erts_atomic64_read_bset_acqb - -#define erts_smp_atomic64_init_relb erts_atomic64_init_relb -#define erts_smp_atomic64_set_relb erts_atomic64_set_relb -#define erts_smp_atomic64_read_relb erts_atomic64_read_relb -#define erts_smp_atomic64_inc_read_relb erts_atomic64_inc_read_relb -#define erts_smp_atomic64_dec_read_relb erts_atomic64_dec_read_relb -#define erts_smp_atomic64_inc_relb erts_atomic64_inc_relb -#define erts_smp_atomic64_dec_relb erts_atomic64_dec_relb -#define erts_smp_atomic64_add_read_relb erts_atomic64_add_read_relb -#define erts_smp_atomic64_add_relb erts_atomic64_add_relb -#define erts_smp_atomic64_read_bor_relb erts_atomic64_read_bor_relb -#define erts_smp_atomic64_read_band_relb erts_atomic64_read_band_relb -#define erts_smp_atomic64_xchg_relb erts_atomic64_xchg_relb -#define erts_smp_atomic64_cmpxchg_relb erts_atomic64_cmpxchg_relb -#define erts_smp_atomic64_read_bset_relb erts_atomic64_read_bset_relb - -#define erts_smp_atomic64_init_ddrb erts_atomic64_init_ddrb -#define erts_smp_atomic64_set_ddrb erts_atomic64_set_ddrb -#define erts_smp_atomic64_read_ddrb erts_atomic64_read_ddrb -#define erts_smp_atomic64_inc_read_ddrb erts_atomic64_inc_read_ddrb -#define erts_smp_atomic64_dec_read_ddrb erts_atomic64_dec_read_ddrb -#define erts_smp_atomic64_inc_ddrb erts_atomic64_inc_ddrb -#define erts_smp_atomic64_dec_ddrb erts_atomic64_dec_ddrb -#define erts_smp_atomic64_add_read_ddrb erts_atomic64_add_read_ddrb -#define erts_smp_atomic64_add_ddrb erts_atomic64_add_ddrb -#define erts_smp_atomic64_read_bor_ddrb erts_atomic64_read_bor_ddrb -#define erts_smp_atomic64_read_band_ddrb erts_atomic64_read_band_ddrb -#define erts_smp_atomic64_xchg_ddrb erts_atomic64_xchg_ddrb -#define erts_smp_atomic64_cmpxchg_ddrb erts_atomic64_cmpxchg_ddrb -#define erts_smp_atomic64_read_bset_ddrb erts_atomic64_read_bset_ddrb - -#define erts_smp_atomic64_init_rb erts_atomic64_init_rb -#define erts_smp_atomic64_set_rb erts_atomic64_set_rb -#define erts_smp_atomic64_read_rb erts_atomic64_read_rb -#define erts_smp_atomic64_inc_read_rb erts_atomic64_inc_read_rb -#define erts_smp_atomic64_dec_read_rb erts_atomic64_dec_read_rb -#define erts_smp_atomic64_inc_rb erts_atomic64_inc_rb -#define erts_smp_atomic64_dec_rb erts_atomic64_dec_rb -#define erts_smp_atomic64_add_read_rb erts_atomic64_add_read_rb -#define erts_smp_atomic64_add_rb erts_atomic64_add_rb -#define erts_smp_atomic64_read_bor_rb erts_atomic64_read_bor_rb -#define erts_smp_atomic64_read_band_rb erts_atomic64_read_band_rb -#define erts_smp_atomic64_xchg_rb erts_atomic64_xchg_rb -#define erts_smp_atomic64_cmpxchg_rb erts_atomic64_cmpxchg_rb -#define erts_smp_atomic64_read_bset_rb erts_atomic64_read_bset_rb - -#define erts_smp_atomic64_init_wb erts_atomic64_init_wb -#define erts_smp_atomic64_set_wb erts_atomic64_set_wb -#define erts_smp_atomic64_read_wb erts_atomic64_read_wb -#define erts_smp_atomic64_inc_read_wb erts_atomic64_inc_read_wb -#define erts_smp_atomic64_dec_read_wb erts_atomic64_dec_read_wb -#define erts_smp_atomic64_inc_wb erts_atomic64_inc_wb -#define erts_smp_atomic64_dec_wb erts_atomic64_dec_wb -#define erts_smp_atomic64_add_read_wb erts_atomic64_add_read_wb -#define erts_smp_atomic64_add_wb erts_atomic64_add_wb -#define erts_smp_atomic64_read_bor_wb erts_atomic64_read_bor_wb -#define erts_smp_atomic64_read_band_wb erts_atomic64_read_band_wb -#define erts_smp_atomic64_xchg_wb erts_atomic64_xchg_wb -#define erts_smp_atomic64_cmpxchg_wb erts_atomic64_cmpxchg_wb -#define erts_smp_atomic64_read_bset_wb erts_atomic64_read_bset_wb - -#define erts_smp_atomic64_set_dirty erts_atomic64_set_dirty -#define erts_smp_atomic64_read_dirty erts_atomic64_read_dirty - -#else /* !ERTS_SMP */ - -/* Double word size atomics */ - -#define erts_smp_dw_atomic_init_nob erts_no_dw_atomic_set -#define erts_smp_dw_atomic_set_nob erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_nob erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_mb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_mb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_mb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_acqb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_acqb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_acqb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_relb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_relb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_relb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_ddrb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_ddrb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_ddrb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_rb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_rb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_rb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_wb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_wb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_wb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_set_dirty erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_dirty erts_no_dw_atomic_read - -/* Word size atomics */ - -#define erts_smp_atomic_init_nob erts_no_atomic_set -#define erts_smp_atomic_set_nob erts_no_atomic_set -#define erts_smp_atomic_read_nob erts_no_atomic_read -#define erts_smp_atomic_inc_read_nob erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_nob erts_no_atomic_dec_read -#define erts_smp_atomic_inc_nob erts_no_atomic_inc -#define erts_smp_atomic_dec_nob erts_no_atomic_dec -#define erts_smp_atomic_add_read_nob erts_no_atomic_add_read -#define erts_smp_atomic_add_nob erts_no_atomic_add -#define erts_smp_atomic_read_bor_nob erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band -#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_nob erts_no_atomic_read_bset - -#define erts_smp_atomic_init_mb erts_no_atomic_set -#define erts_smp_atomic_set_mb erts_no_atomic_set -#define erts_smp_atomic_read_mb erts_no_atomic_read -#define erts_smp_atomic_inc_read_mb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_mb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_mb erts_no_atomic_inc -#define erts_smp_atomic_dec_mb erts_no_atomic_dec -#define erts_smp_atomic_add_read_mb erts_no_atomic_add_read -#define erts_smp_atomic_add_mb erts_no_atomic_add -#define erts_smp_atomic_read_bor_mb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_mb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_acqb erts_no_atomic_set -#define erts_smp_atomic_set_acqb erts_no_atomic_set -#define erts_smp_atomic_read_acqb erts_no_atomic_read -#define erts_smp_atomic_inc_read_acqb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_acqb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_acqb erts_no_atomic_inc -#define erts_smp_atomic_dec_acqb erts_no_atomic_dec -#define erts_smp_atomic_add_read_acqb erts_no_atomic_add_read -#define erts_smp_atomic_add_acqb erts_no_atomic_add -#define erts_smp_atomic_read_bor_acqb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_acqb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_relb erts_no_atomic_set -#define erts_smp_atomic_set_relb erts_no_atomic_set -#define erts_smp_atomic_read_relb erts_no_atomic_read -#define erts_smp_atomic_inc_read_relb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_relb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_relb erts_no_atomic_inc -#define erts_smp_atomic_dec_relb erts_no_atomic_dec -#define erts_smp_atomic_add_read_relb erts_no_atomic_add_read -#define erts_smp_atomic_add_relb erts_no_atomic_add -#define erts_smp_atomic_read_bor_relb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_relb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_ddrb erts_no_atomic_set -#define erts_smp_atomic_set_ddrb erts_no_atomic_set -#define erts_smp_atomic_read_ddrb erts_no_atomic_read -#define erts_smp_atomic_inc_read_ddrb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_ddrb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_ddrb erts_no_atomic_inc -#define erts_smp_atomic_dec_ddrb erts_no_atomic_dec -#define erts_smp_atomic_add_read_ddrb erts_no_atomic_add_read -#define erts_smp_atomic_add_ddrb erts_no_atomic_add -#define erts_smp_atomic_read_bor_ddrb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_ddrb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_rb erts_no_atomic_set -#define erts_smp_atomic_set_rb erts_no_atomic_set -#define erts_smp_atomic_read_rb erts_no_atomic_read -#define erts_smp_atomic_inc_read_rb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_rb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_rb erts_no_atomic_inc -#define erts_smp_atomic_dec_rb erts_no_atomic_dec -#define erts_smp_atomic_add_read_rb erts_no_atomic_add_read -#define erts_smp_atomic_add_rb erts_no_atomic_add -#define erts_smp_atomic_read_bor_rb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_rb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_wb erts_no_atomic_set -#define erts_smp_atomic_set_wb erts_no_atomic_set -#define erts_smp_atomic_read_wb erts_no_atomic_read -#define erts_smp_atomic_inc_read_wb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_wb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_wb erts_no_atomic_inc -#define erts_smp_atomic_dec_wb erts_no_atomic_dec -#define erts_smp_atomic_add_read_wb erts_no_atomic_add_read -#define erts_smp_atomic_add_wb erts_no_atomic_add -#define erts_smp_atomic_read_bor_wb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_wb erts_no_atomic_read_bset - -#define erts_smp_atomic_set_dirty erts_no_atomic_set -#define erts_smp_atomic_read_dirty erts_no_atomic_read - -/* 32-bit atomics */ - -#define erts_smp_atomic32_init_nob erts_no_atomic32_set -#define erts_smp_atomic32_set_nob erts_no_atomic32_set -#define erts_smp_atomic32_read_nob erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_nob erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_nob erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_nob erts_no_atomic32_inc -#define erts_smp_atomic32_dec_nob erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_nob erts_no_atomic32_add_read -#define erts_smp_atomic32_add_nob erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_nob erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_nob erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_mb erts_no_atomic32_set -#define erts_smp_atomic32_set_mb erts_no_atomic32_set -#define erts_smp_atomic32_read_mb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_mb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_mb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_mb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_mb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_mb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_mb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_mb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_mb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_acqb erts_no_atomic32_set -#define erts_smp_atomic32_set_acqb erts_no_atomic32_set -#define erts_smp_atomic32_read_acqb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_acqb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_acqb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_acqb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_acqb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_acqb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_acqb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_acqb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_acqb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_relb erts_no_atomic32_set -#define erts_smp_atomic32_set_relb erts_no_atomic32_set -#define erts_smp_atomic32_read_relb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_relb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_relb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_relb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_relb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_relb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_relb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_relb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_relb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_ddrb erts_no_atomic32_set -#define erts_smp_atomic32_set_ddrb erts_no_atomic32_set -#define erts_smp_atomic32_read_ddrb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_ddrb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_ddrb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_ddrb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_ddrb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_ddrb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_ddrb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_ddrb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_ddrb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_rb erts_no_atomic32_set -#define erts_smp_atomic32_set_rb erts_no_atomic32_set -#define erts_smp_atomic32_read_rb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_rb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_rb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_rb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_rb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_rb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_rb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_rb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_rb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_wb erts_no_atomic32_set -#define erts_smp_atomic32_set_wb erts_no_atomic32_set -#define erts_smp_atomic32_read_wb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_wb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_wb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_wb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_wb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_wb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_wb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_wb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_wb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_set_dirty erts_no_atomic32_set -#define erts_smp_atomic32_read_dirty erts_no_atomic32_read - -/* 64-bit atomics */ - -#define erts_smp_atomic64_init_nob erts_no_atomic64_set -#define erts_smp_atomic64_set_nob erts_no_atomic64_set -#define erts_smp_atomic64_read_nob erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_nob erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_nob erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_nob erts_no_atomic64_inc -#define erts_smp_atomic64_dec_nob erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_nob erts_no_atomic64_add_read -#define erts_smp_atomic64_add_nob erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_nob erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_nob erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_nob erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_nob erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_mb erts_no_atomic64_set -#define erts_smp_atomic64_set_mb erts_no_atomic64_set -#define erts_smp_atomic64_read_mb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_mb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_mb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_mb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_mb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_mb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_mb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_mb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_mb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_mb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_mb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_acqb erts_no_atomic64_set -#define erts_smp_atomic64_set_acqb erts_no_atomic64_set -#define erts_smp_atomic64_read_acqb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_acqb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_acqb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_acqb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_acqb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_acqb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_acqb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_acqb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_acqb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_acqb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_acqb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_relb erts_no_atomic64_set -#define erts_smp_atomic64_set_relb erts_no_atomic64_set -#define erts_smp_atomic64_read_relb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_relb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_relb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_relb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_relb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_relb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_relb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_relb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_relb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_relb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_relb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_ddrb erts_no_atomic64_set -#define erts_smp_atomic64_set_ddrb erts_no_atomic64_set -#define erts_smp_atomic64_read_ddrb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_ddrb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_ddrb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_ddrb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_ddrb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_ddrb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_ddrb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_ddrb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_ddrb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_ddrb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_ddrb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_rb erts_no_atomic64_set -#define erts_smp_atomic64_set_rb erts_no_atomic64_set -#define erts_smp_atomic64_read_rb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_rb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_rb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_rb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_rb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_rb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_rb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_rb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_rb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_rb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_rb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_wb erts_no_atomic64_set -#define erts_smp_atomic64_set_wb erts_no_atomic64_set -#define erts_smp_atomic64_read_wb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_wb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_wb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_wb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_wb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_wb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_wb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_wb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_wb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_wb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_wb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_set_dirty erts_no_atomic64_set -#define erts_smp_atomic64_read_dirty erts_no_atomic64_read - -#endif /* !ERTS_SMP */ - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void -erts_smp_thr_init(erts_smp_thr_init_data_t *id) -{ -#ifdef ERTS_SMP - erts_thr_init(id); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_thr_create(erts_smp_tid_t *tid, void * (*func)(void *), void *arg, - erts_smp_thr_opts_t *opts) -{ -#ifdef ERTS_SMP - erts_thr_create(tid, func, arg, opts); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res) -{ -#ifdef ERTS_SMP - erts_thr_join(tid, thr_res); -#endif -} - - -ERTS_GLB_INLINE void -erts_smp_thr_detach(erts_smp_tid_t tid) -{ -#ifdef ERTS_SMP - erts_thr_detach(tid); -#endif -} - - -ERTS_GLB_INLINE void -erts_smp_thr_exit(void *res) -{ -#ifdef ERTS_SMP - erts_thr_exit(res); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_install_exit_handler(void (*exit_handler)(void)) -{ -#ifdef ERTS_SMP - erts_thr_install_exit_handler(exit_handler); -#endif -} - -ERTS_GLB_INLINE erts_smp_tid_t -erts_smp_thr_self(void) -{ -#ifdef ERTS_SMP - return erts_thr_self(); -#else - return 0; -#endif -} - - -ERTS_GLB_INLINE int -erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y) -{ -#ifdef ERTS_SMP - return erts_equal_tids(x, y); -#else - return 1; -#endif -} - - -#ifdef ERTS_HAVE_REC_MTX_INIT -ERTS_GLB_INLINE void -erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx) -{ -#ifdef ERTS_SMP - erts_rec_mtx_init(mtx); -#endif -} -#endif - -ERTS_GLB_INLINE void -erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_mtx_init(mtx, name, extra, flags); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_mtx_init_locked(mtx, name, extra, flags); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_mtx_destroy(erts_smp_mtx_t *mtx) -{ -#ifdef ERTS_SMP - erts_mtx_destroy(mtx); -#endif -} - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line) -#else -erts_smp_mtx_trylock(erts_smp_mtx_t *mtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - return erts_mtx_trylock_x(mtx,file,line); -#elif defined(ERTS_SMP) - return erts_mtx_trylock(mtx); -#else - return 0; -#endif - -} - - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line) -#else -erts_smp_mtx_lock(erts_smp_mtx_t *mtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_mtx_lock_x(mtx, file, line); -#elif defined(ERTS_SMP) - erts_mtx_lock(mtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_mtx_unlock(erts_smp_mtx_t *mtx) -{ -#ifdef ERTS_SMP - erts_mtx_unlock(mtx); -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_mtx_is_locked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_cnd_init(erts_smp_cnd_t *cnd) -{ -#ifdef ERTS_SMP - erts_cnd_init(cnd); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_cnd_destroy(erts_smp_cnd_t *cnd) -{ -#ifdef ERTS_SMP - erts_cnd_destroy(cnd); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx) -{ -#ifdef ERTS_SMP - erts_cnd_wait(cnd, mtx); -#endif -} - -/* - * IMPORTANT note about erts_smp_cnd_signal() and erts_smp_cnd_broadcast() - * - * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast' - * even though the associated mutex/mutexes isn't/aren't locked by the - * caller. Our implementation do not allow that in order to avoid a - * performance penalty. That is, all associated mutexes *need* to be - * locked by the caller of erts_smp_cnd_signal()/erts_smp_cnd_broadcast()! - */ - -ERTS_GLB_INLINE void -erts_smp_cnd_signal(erts_smp_cnd_t *cnd) -{ -#ifdef ERTS_SMP - erts_cnd_signal(cnd); -#endif -} - - -ERTS_GLB_INLINE void -erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd) -{ -#ifdef ERTS_SMP - erts_cnd_broadcast(cnd); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_set_reader_group(int no) -{ -#ifdef ERTS_SMP - erts_rwmtx_set_reader_group(no); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, - char *name, - Eterm extra, - erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_smp_rwmtx_init_opt(rwmtx, NULL, name, extra, flags); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx, - erts_smp_rwmtx_opt_t *opt, - char *name, - Eterm extra, - erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_rwmtx_init_opt(rwmtx, opt, name, extra, flags); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx) -{ -#ifdef ERTS_SMP - erts_rwmtx_destroy(rwmtx); -#endif -} - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - return erts_rwmtx_tryrlock_x(rwmtx, file, line); -#elif defined(ERTS_SMP) - return erts_rwmtx_tryrlock(rwmtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_rwmtx_rlock_x(rwmtx, file, line); -#elif defined(ERTS_SMP) - erts_rwmtx_rlock(rwmtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx) -{ -#ifdef ERTS_SMP - erts_rwmtx_runlock(rwmtx); -#endif -} - - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - return erts_rwmtx_tryrwlock_x(rwmtx, file, line); -#elif defined(ERTS_SMP) - return erts_rwmtx_tryrwlock(rwmtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_rwmtx_rwlock_x(rwmtx, file, line); -#elif defined(ERTS_SMP) - erts_rwmtx_rwlock(rwmtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx) -{ -#ifdef ERTS_SMP - erts_rwmtx_rwunlock(rwmtx); -#endif -} - -#if 0 /* The following rwmtx function names are - reserved for potential future use. */ - -/* Try upgrade from r-locked state to rw-locked state */ -ERTS_GLB_INLINE int -erts_smp_rwmtx_trywlock(erts_smp_rwmtx_t *rwmtx) -{ - return 0; -} - -/* Upgrade from r-locked state to rw-locked state */ -ERTS_GLB_INLINE void -erts_smp_rwmtx_wlock(erts_smp_rwmtx_t *rwmtx) -{ - -} - -/* Downgrade from rw-locked state to r-locked state */ -ERTS_GLB_INLINE void -erts_smp_rwmtx_wunlock(erts_smp_rwmtx_t *rwmtx) -{ - -} - -#endif - -ERTS_GLB_INLINE int -erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwmtx_is_rlocked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwmtx_is_rwlocked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_spinlock_init(lock, name, extra, flags); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock) -{ -#ifdef ERTS_SMP - erts_spinlock_destroy(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_spin_unlock(erts_smp_spinlock_t *lock) -{ -#ifdef ERTS_SMP - erts_spin_unlock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line) -#else -erts_smp_spin_lock(erts_smp_spinlock_t *lock) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_spin_lock_x(lock, file, line); -#elif defined(ERTS_SMP) - erts_spin_lock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_spinlock_is_locked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_rwlock_init(lock, name, extra, flags); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock) -{ -#ifdef ERTS_SMP - erts_rwlock_destroy(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_read_unlock(erts_smp_rwlock_t *lock) -{ -#ifdef ERTS_SMP - erts_read_unlock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line) -#else -erts_smp_read_lock(erts_smp_rwlock_t *lock) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP) - erts_read_lock_x(lock, file, line); -#elif defined(ERTS_SMP) - erts_read_lock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_write_unlock(erts_smp_rwlock_t *lock) -{ -#ifdef ERTS_SMP - erts_write_unlock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line) -#else -erts_smp_write_lock(erts_smp_rwlock_t *lock) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_write_lock_x(lock, file, line); -#elif defined(ERTS_SMP) - erts_write_lock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwlock_is_rlocked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwlock_is_rwlocked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname) -{ -#ifdef ERTS_SMP - erts_tsd_key_create(keyp,keyname); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_tsd_key_delete(erts_smp_tsd_key_t key) -{ -#ifdef ERTS_SMP - erts_tsd_key_delete(key); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value) -{ -#ifdef ERTS_SMP - erts_tsd_set(key, value); -#endif -} - -ERTS_GLB_INLINE void * -erts_smp_tsd_get(erts_smp_tsd_key_t key) -{ -#ifdef ERTS_SMP - return erts_tsd_get(key); -#else - return NULL; -#endif -} - -#ifdef ERTS_THR_HAVE_SIG_FUNCS -#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1 - -ERTS_GLB_INLINE void -erts_smp_thr_sigmask(int how, const sigset_t *set, sigset_t *oset) -{ -#ifdef ERTS_SMP - erts_thr_sigmask(how, set, oset); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_thr_sigwait(const sigset_t *set, int *sig) -{ -#ifdef ERTS_SMP - erts_thr_sigwait(set, sig); -#endif -} - -#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */ - -#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ - -#endif /* ERL_SMP_H */ - -#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS - -/* Deprecated functions to replace */ - -#undef erts_smp_atomic_init -#undef erts_smp_atomic_set -#undef erts_smp_atomic_read -#undef erts_smp_atomic_inctest -#undef erts_smp_atomic_dectest -#undef erts_smp_atomic_inc -#undef erts_smp_atomic_dec -#undef erts_smp_atomic_addtest -#undef erts_smp_atomic_add -#undef erts_smp_atomic_xchg -#undef erts_smp_atomic_cmpxchg -#undef erts_smp_atomic_bor -#undef erts_smp_atomic_band - -#undef erts_smp_atomic32_init -#undef erts_smp_atomic32_set -#undef erts_smp_atomic32_read -#undef erts_smp_atomic32_inctest -#undef erts_smp_atomic32_dectest -#undef erts_smp_atomic32_inc -#undef erts_smp_atomic32_dec -#undef erts_smp_atomic32_addtest -#undef erts_smp_atomic32_add -#undef erts_smp_atomic32_xchg -#undef erts_smp_atomic32_cmpxchg -#undef erts_smp_atomic32_bor -#undef erts_smp_atomic32_band - -#endif diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c index 2a9f276e02..96824dc06e 100644 --- a/erts/emulator/beam/erl_thr_progress.c +++ b/erts/emulator/beam/erl_thr_progress.c @@ -80,7 +80,6 @@ #include "erl_thr_progress.h" #include "global.h" -#ifdef ERTS_SMP #define ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE 0 @@ -1513,4 +1512,3 @@ void erts_thr_progress_dbg_print_state(void) } -#endif diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h index b2894ba1fe..fa936b5707 100644 --- a/erts/emulator/beam/erl_thr_progress.h +++ b/erts/emulator/beam/erl_thr_progress.h @@ -33,18 +33,6 @@ #include "sys.h" -#ifndef ERTS_SMP - -#define erts_smp_thr_progress_block() ((void) 0) -#define erts_smp_thr_progress_unblock() ((void) 0) -#define erts_smp_thr_progress_is_blocking() 1 - -#else /* ERTS_SMP */ - -#define erts_smp_thr_progress_block erts_thr_progress_block -#define erts_smp_thr_progress_unblock erts_thr_progress_unblock -#define erts_smp_thr_progress_is_blocking erts_thr_progress_is_blocking - void erts_thr_progress_block(void); void erts_thr_progress_unblock(void); int erts_thr_progress_is_blocking(void); @@ -87,13 +75,10 @@ typedef struct { int erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp); void erts_thr_progress_fatal_error_wait(SWord timeout); -#endif /* ERTS_SMP */ typedef struct ErtsThrPrgrLaterOp_ ErtsThrPrgrLaterOp; struct ErtsThrPrgrLaterOp_ { -#ifdef ERTS_SMP ErtsThrPrgrVal later; -#endif void (*func)(void *); void *data; ErtsThrPrgrLaterOp *next; @@ -107,7 +92,6 @@ struct ErtsThrPrgrLaterOp_ { #include "erl_threads.h" #include "erl_process.h" -#ifdef ERTS_SMP /* ERTS_THR_PRGR_VAL_FIRST should only be used when initializing... */ #define ERTS_THR_PRGR_VAL_FIRST ((ErtsThrPrgrVal) 0) @@ -324,6 +308,5 @@ erts_thr_progress_has_reached(ErtsThrPrgrVal val) #endif -#endif /* ERTS_SMP */ #endif diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c index f56d0828dd..548c2768e5 100644 --- a/erts/emulator/beam/erl_thr_queue.c +++ b/erts/emulator/beam/erl_thr_queue.c @@ -87,32 +87,10 @@ #define ERTS_THR_Q_MAX_FINI_DEQ_OPS 50 -#ifdef ERTS_SMP ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(sl_element, ErtsThrQElement_t, 1000, ERTS_ALC_T_THR_Q_EL_SL) -#else - -static void -init_sl_element_alloc(void) -{ -} - -static ErtsThrQElement_t * -sl_element_alloc(void) -{ - return erts_alloc(ERTS_ALC_T_THR_Q_EL_SL, - sizeof(ErtsThrQElement_t)); -} - -static void -sl_element_free(ErtsThrQElement_t *p) -{ - erts_free(ERTS_ALC_T_THR_Q_EL_SL, p); -} - -#endif #define ErtsThrQDirtyReadEl(A) \ ((ErtsThrQElement_t *) erts_atomic_read_dirty((A))) @@ -135,14 +113,6 @@ static void noop_callback(void *arg) { } void erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi) { -#ifndef USE_THREADS - q->init = *qi; - if (!q->init.notify) - q->init.notify = noop_callback; - q->first = NULL; - q->last = NULL; - q->q.blk = NULL; -#else erts_atomic_init_nob(&q->tail.data.marker.next, ERTS_AINT_NULL); q->tail.data.marker.data.ptr = NULL; erts_atomic_init_nob(&q->tail.data.last, @@ -164,10 +134,8 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi) q->head.deq_fini.automatic = qi->auto_finalize_dequeue; q->head.deq_fini.start = NULL; q->head.deq_fini.end = NULL; -#ifdef ERTS_SMP q->head.next.thr_progress = erts_thr_progress_current(); q->head.next.thr_progress_reached = 1; -#endif q->head.next.um_refc_ix = 1; q->head.next.unref_end = &q->tail.data.marker; q->head.used_marker = 1; @@ -176,15 +144,12 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi) q->q.finalizing = 0; q->q.live = qi->live.queue; q->q.blk = NULL; -#endif } ErtsThrQCleanState_t erts_thr_q_finalize(ErtsThrQ_t *q) { -#ifdef USE_THREADS q->q.finalizing = 1; -#endif while (erts_thr_q_dequeue(q)); return erts_thr_q_clean(q); } @@ -229,7 +194,6 @@ erts_thr_q_destroy(ErtsThrQ_t *q) return erts_thr_q_finalize(q); } -#ifdef USE_THREADS static void destroy(ErtsThrQ_t *q) @@ -249,7 +213,6 @@ destroy(ErtsThrQ_t *q) erts_free(atype, q->q.blk); } -#endif static ERTS_INLINE ErtsThrQElement_t * element_live_alloc(ErtsThrQLive_t live) @@ -267,11 +230,7 @@ static ERTS_INLINE ErtsThrQElement_t * element_alloc(ErtsThrQ_t *q) { ErtsThrQLive_t live; -#ifdef USE_THREADS live = q->tail.data.live; -#else - live = q->init.live.objects; -#endif return element_live_alloc(live); } @@ -291,15 +250,10 @@ static ERTS_INLINE void element_free(ErtsThrQ_t *q, ErtsThrQElement_t *el) { ErtsThrQLive_t live; -#ifdef USE_THREADS live = q->head.live; -#else - live = q->init.live.objects; -#endif element_live_free(live, el); } -#ifdef USE_THREADS static ERTS_INLINE ErtsThrQElement_t * enqueue_managed(ErtsThrQ_t *q, ErtsThrQElement_t *this) @@ -423,11 +377,9 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify) return ERTS_THR_Q_CLEAN; } -#ifdef ERTS_SMP if (q->head.next.thr_progress_reached || erts_thr_progress_has_reached(q->head.next.thr_progress)) { q->head.next.thr_progress_reached = 1; -#endif um_refc_ix = q->head.next.um_refc_ix; if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) { /* Move unreferenced end pointer forward... */ @@ -439,23 +391,17 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify) ilast = (erts_aint_t) enqueue_marker(q, NULL); if (q->head.unref_end == (ErtsThrQElement_t *) ilast) - ERTS_SMP_MEMORY_BARRIER; + ERTS_THR_MEMORY_BARRIER; else { q->head.next.unref_end = (ErtsThrQElement_t *) ilast; -#ifdef ERTS_SMP q->head.next.thr_progress = erts_thr_progress_later(NULL); -#endif erts_atomic32_set_relb(&q->tail.data.um_refc_ix, um_refc_ix); q->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0; -#ifdef ERTS_SMP q->head.next.thr_progress_reached = 0; -#endif } } -#ifdef ERTS_SMP } -#endif head = ErtsThrQDirtyReadEl(&q->head.head); if (q->head.first == head) { @@ -489,9 +435,7 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify) check_thr_progress: -#ifdef ERTS_SMP if (q->head.next.thr_progress_reached) -#endif { int um_refc_ix = q->head.next.um_refc_ix; if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) { @@ -505,24 +449,16 @@ check_thr_progress: return ERTS_THR_Q_NEED_THR_PRGR; } -#endif ErtsThrQCleanState_t erts_thr_q_clean(ErtsThrQ_t *q) { -#ifdef USE_THREADS return clean(q, ERTS_THR_Q_MAX_SCHED_CLEAN_OPS, 0); -#else - return ERTS_THR_Q_CLEAN; -#endif } ErtsThrQCleanState_t erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty) { -#ifndef USE_THREADS - return ERTS_THR_Q_CLEAN; -#else ErtsThrQElement_t *head = ErtsThrQDirtyReadEl(&q->head.head); if (ensure_empty) { erts_aint_t inext; @@ -553,39 +489,21 @@ erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty) if (q->head.first != q->head.unref_end) return ERTS_THR_Q_DIRTY; -#ifdef ERTS_SMP if (q->head.next.thr_progress_reached) -#endif { int um_refc_ix = q->head.next.um_refc_ix; if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) return ERTS_THR_Q_DIRTY; } return ERTS_THR_Q_NEED_THR_PRGR; -#endif } static void enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this) { -#ifndef USE_THREADS - ASSERT(data); - - this->next = NULL; - this->data.ptr = data; - - if (q->last) - q->last->next = this; - else { - q->first = q->last = this; - q->init.notify(q->init.arg); - } -#else int notify; int um_refc_ix = 0; -#ifdef ERTS_SMP int unmanaged_thread; -#endif #if ERTS_THR_Q_DBG_CHK_DATA if (!data) @@ -596,10 +514,8 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this) this->data.ptr = data; -#ifdef ERTS_SMP unmanaged_thread = !erts_thr_progress_is_managed_thread(); if (unmanaged_thread) -#endif { um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix); while (1) { @@ -616,9 +532,7 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this) notify = this == enqueue_managed(q, this); -#ifdef ERTS_SMP if (unmanaged_thread) -#endif { if (notify) erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]); @@ -627,7 +541,6 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this) } if (notify) q->tail.data.notify(q->tail.data.arg); -#endif } void @@ -645,9 +558,6 @@ erts_thr_q_prepare_enqueue(ErtsThrQ_t *q) int erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp) { -#ifndef USE_THREADS - return 0; -#else #ifdef DEBUG if (!q->head.deq_fini.start) { ASSERT(!q->head.deq_fini.end); @@ -670,14 +580,12 @@ erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp) q->head.deq_fini.start = NULL; q->head.deq_fini.end = NULL; return fdp->start != NULL; -#endif } void erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0, ErtsThrQFinDeQ_t *fdp1) { -#ifdef USE_THREADS if (fdp1->start) { if (fdp0->end) ErtsThrQDirtySetEl(&fdp0->end->next, fdp1->start); @@ -685,13 +593,11 @@ erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0, fdp0->start = fdp1->start; fdp0->end = fdp1->end; } -#endif } int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state) { -#ifdef USE_THREADS ErtsThrQElement_t *start = state->start; if (start) { ErtsThrQLive_t live; @@ -710,17 +616,14 @@ int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state) return 1; /* More to do */ state->end = NULL; } -#endif return 0; } void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *state) { -#ifdef USE_THREADS state->start = NULL; state->end = NULL; -#endif } @@ -734,22 +637,6 @@ erts_thr_q_enqueue_prepared(ErtsThrQ_t *q, void *data, ErtsThrQPrepEnQ_t *prep) void * erts_thr_q_dequeue(ErtsThrQ_t *q) { -#ifndef USE_THREADS - void *res; - ErtsThrQElement_t *tmp; - - if (!q->first) - return NULL; - tmp = q->first; - res = tmp->data.ptr; - q->first = tmp->next; - if (!q->first) - q->last = NULL; - - element_free(q, tmp); - - return res; -#else ErtsThrQElement_t *head; erts_aint_t inext; void *res; @@ -778,7 +665,6 @@ erts_thr_q_dequeue(ErtsThrQ_t *q) ? ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS : ERTS_THR_Q_MAX_SCHED_CLEAN_OPS), 1); return res; -#endif } #ifdef USE_LTTNG_VM_TRACEPOINTS @@ -786,14 +672,6 @@ int erts_thr_q_length_dirty(ErtsThrQ_t *q) { int n = 0; -#ifndef USE_THREADS - void *res; - ErtsThrQElement_t *tmp; - - for (tmp = q->first; tmp != NULL; tmp = tmp->next) { - n++; - } -#else ErtsThrQElement_t *e; erts_aint_t inext; @@ -808,7 +686,6 @@ erts_thr_q_length_dirty(ErtsThrQ_t *q) } inext = erts_atomic_read_acqb(&e->next); } -#endif return n; } #endif diff --git a/erts/emulator/beam/erl_thr_queue.h b/erts/emulator/beam/erl_thr_queue.h index 705a67af4c..163a25318d 100644 --- a/erts/emulator/beam/erl_thr_queue.h +++ b/erts/emulator/beam/erl_thr_queue.h @@ -78,11 +78,7 @@ typedef struct ErtsThrQElement_t_ ErtsThrQElement_t; typedef struct ErtsThrQElement_t ErtsThrQPrepEnQ_t; struct ErtsThrQElement_t_ { -#ifdef USE_THREADS erts_atomic_t next; -#else - ErtsThrQElement_t *next; -#endif union { erts_atomic_t atmc; void *ptr; @@ -100,7 +96,6 @@ typedef enum { ERTS_THR_Q_DIRTY, } ErtsThrQCleanState_t; -#ifdef USE_THREADS typedef struct { ErtsThrQElement_t marker; @@ -108,9 +103,7 @@ typedef struct { erts_atomic_t um_refc[2]; erts_atomic32_t um_refc_ix; ErtsThrQLive_t live; -#ifdef ERTS_SMP erts_atomic32_t thr_prgr_clean_scheduled; -#endif void *arg; void (*notify)(void *); } ErtsThrQTail_t; @@ -141,10 +134,8 @@ struct ErtsThrQ_t_ { ErtsThrQElement_t *end; } deq_fini; struct { -#ifdef ERTS_SMP ErtsThrPrgrVal thr_progress; int thr_progress_reached; -#endif int um_refc_ix; ErtsThrQElement_t *unref_end; } next; @@ -159,18 +150,6 @@ struct ErtsThrQ_t_ { } q; }; -#else /* !USE_THREADS */ - -struct ErtsThrQ_t_ { - ErtsThrQInit_t init; - ErtsThrQElement_t *first; - ErtsThrQElement_t *last; - struct { - void *blk; - } q; -}; - -#endif void erts_thr_q_init(void); void erts_thr_q_initialize(ErtsThrQ_t *, ErtsThrQInit_t *); @@ -194,19 +173,15 @@ void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *); int erts_thr_q_length_dirty(ErtsThrQ_t *); #endif -#ifdef ERTS_SMP ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q); -#endif #if ERTS_GLB_INLINE_INCL_FUNC_DEF -#ifdef ERTS_SMP ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q) { return q->head.next.thr_progress; } -#endif #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h index 8b5c17d739..e306df818d 100644 --- a/erts/emulator/beam/erl_threads.h +++ b/erts/emulator/beam/erl_threads.h @@ -45,11 +45,6 @@ * Data dependency read barrier. Orders *only* loads * according to data dependency across the barrier. * - * If thread support has been disabled, these barriers will become no-ops. - * - * If the prefix ERTS_THR_ is replaced with ERTS_SMP_, the barriers will - * be enabled only in the SMP enabled runtime system. - * * --- Atomic operations --- * * Atomics operations exist for 32-bit, word size, and double word size @@ -86,20 +81,6 @@ * barrier. Load in atomic operation is ordered * before the barrier. * - * If thread support has been disabled, these functions are mapped to - * functions that performs the same operation, but aren't atomic - * and don't imply any memory barriers. - * - * If the atomic operations are prefixed with erts_smp_ instead of only - * erts_ the atomic operations will only be atomic in the SMP enabled - * runtime system, and will be mapped to non-atomic operations without - * memory barriers in the runtime system without SMP support. Atomic - * operations with erts_smp_ prefix should use the atomic types - * erts_smp_atomic32_t, erts_smp_atomic_t, and erts_smp_dw_atomic_t - * instead of erts_atomic32_t, erts_atomic_t, and erts_dw_atomic_t. The - * integer data types erts_aint32_t, erts_aint_t, and erts_dw_atomic_t - * are the same. - * * --- 32-bit atomic operations --- * * The following 32-bit atomic operations exist. <B> should be @@ -262,7 +243,6 @@ #include "erl_lock_flags.h" #include "erl_term.h" -#ifdef USE_THREADS #define ETHR_TRY_INLINE_FUNCS #include "ethread.h" @@ -405,76 +385,6 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *); # define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT #endif -#else /* #ifdef USE_THREADS */ - -#define ERTS_THR_MEMORY_BARRIER -#define ERTS_THR_WRITE_MEMORY_BARRIER -#define ERTS_THR_READ_MEMORY_BARRIER -#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER - -#define ERTS_THR_OPTS_DEFAULT_INITER 0 -typedef int erts_thr_opts_t; -typedef int erts_thr_init_data_t; -typedef int erts_thr_late_init_data_t; -typedef int erts_tid_t; -typedef int erts_mtx_t; -typedef int erts_cnd_t; -#define ERTS_RWMTX_OPT_DEFAULT_INITER {0} -#define ERTS_RWMTX_TYPE_NORMAL 0 -#define ERTS_RWMTX_TYPE_FREQUENT_READ 0 -#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0 -#define ERTS_RWMTX_LONG_LIVED 0 -#define ERTS_RWMTX_SHORT_LIVED 0 -#define ERTS_RWMTX_UNKNOWN_LIVED 0 -typedef struct { - char type; - char lived; - int main_spincount; - int aux_spincount; -} erts_rwmtx_opt_t; -typedef int erts_rwmtx_t; -typedef int erts_tsd_key_t; -typedef int erts_tse_t; - -typedef struct { SWord sint[2]; } erts_dw_aint_t; -typedef SWord erts_aint_t; -typedef Sint32 erts_aint32_t; -typedef Sint64 erts_aint64_t; - -#define erts_dw_atomic_t erts_dw_aint_t -#define erts_atomic_t erts_aint_t -#define erts_atomic32_t erts_aint32_t -#define erts_atomic64_t erts_aint64_t - -#if __GNUC__ > 2 -typedef struct { } erts_spinlock_t; -typedef struct { } erts_rwlock_t; -#else -typedef struct { int gcc_is_buggy; } erts_spinlock_t; -typedef struct { int gcc_is_buggy; } erts_rwlock_t; -#endif - -#ifdef WORDS_BIGENDIAN -#define ERTS_DW_AINT_LOW_WORD 1 -#define ERTS_DW_AINT_HIGH_WORD 0 -#else -#define ERTS_DW_AINT_LOW_WORD 0 -#define ERTS_DW_AINT_HIGH_WORD 1 -#endif - -#define ERTS_MTX_INITER 0 -#define ERTS_CND_INITER 0 -#define ERTS_THR_INIT_DATA_DEF_INITER 0 - -#define ERTS_HAVE_REC_MTX_INIT 1 - -#endif /* #ifdef USE_THREADS */ - -#define erts_no_dw_atomic_t erts_dw_aint_t -#define erts_no_atomic_t erts_aint_t -#define erts_no_atomic32_t erts_aint32_t -#define erts_no_atomic64_t erts_aint64_t - #define ERTS_AINT_NULL ((erts_aint_t) NULL) #define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1))) @@ -545,78 +455,6 @@ ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx); ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx); -ERTS_GLB_INLINE void erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val); -ERTS_GLB_INLINE void erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val); -ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var, - erts_no_dw_atomic_t *val, - erts_no_dw_atomic_t *old_val); -ERTS_GLB_INLINE void erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read(erts_no_atomic_t *var); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_inc_read(erts_no_atomic_t *incp); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_dec_read(erts_no_atomic_t *decp); -ERTS_GLB_INLINE void erts_no_atomic_inc(erts_no_atomic_t *incp); -ERTS_GLB_INLINE void erts_no_atomic_dec(erts_no_atomic_t *decp); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_add_read(erts_no_atomic_t *addp, - erts_aint_t i); -ERTS_GLB_INLINE void erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bor(erts_no_atomic_t *var, - erts_aint_t mask); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_band(erts_no_atomic_t *var, - erts_aint_t mask); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp, - erts_aint_t new); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t expected); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bset(erts_no_atomic_t *var, - erts_aint_t mask, - erts_aint_t set); -ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var, - erts_aint32_t i); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_inc_read(erts_no_atomic32_t *incp); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_dec_read(erts_no_atomic32_t *decp); -ERTS_GLB_INLINE void erts_no_atomic32_inc(erts_no_atomic32_t *incp); -ERTS_GLB_INLINE void erts_no_atomic32_dec(erts_no_atomic32_t *decp); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_add_read(erts_no_atomic32_t *addp, - erts_aint32_t i); -ERTS_GLB_INLINE void erts_no_atomic32_add(erts_no_atomic32_t *addp, - erts_aint32_t i); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bor(erts_no_atomic32_t *var, - erts_aint32_t mask); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_band(erts_no_atomic32_t *var, - erts_aint32_t mask); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, - erts_aint32_t new); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t expected); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var, - erts_aint32_t mask, - erts_aint32_t set); -ERTS_GLB_INLINE void erts_no_atomic64_set(erts_no_atomic64_t *var, - erts_aint64_t i); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read(erts_no_atomic64_t *var); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_inc_read(erts_no_atomic64_t *incp); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_dec_read(erts_no_atomic64_t *decp); -ERTS_GLB_INLINE void erts_no_atomic64_inc(erts_no_atomic64_t *incp); -ERTS_GLB_INLINE void erts_no_atomic64_dec(erts_no_atomic64_t *decp); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_add_read(erts_no_atomic64_t *addp, - erts_aint64_t i); -ERTS_GLB_INLINE void erts_no_atomic64_add(erts_no_atomic64_t *addp, - erts_aint64_t i); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bor(erts_no_atomic64_t *var, - erts_aint64_t mask); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_band(erts_no_atomic64_t *var, - erts_aint64_t mask); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp, - erts_aint64_t new); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp, - erts_aint64_t new, - erts_aint64_t expected); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bset(erts_no_atomic64_t *var, - erts_aint64_t mask, - erts_aint64_t set); ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, @@ -670,13 +508,10 @@ ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset); ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); -#ifdef USE_THREADS ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig); -#endif #endif /* #ifdef HAVE_ETHR_SIG_FUNCS */ -#ifdef USE_THREADS ERTS_GLB_INLINE erts_aint_t erts_atomic_read_bset_nob(erts_atomic_t *var, @@ -1684,379 +1519,6 @@ erts_atomic64_read_dirty(erts_atomic64_t *var) #endif /* ARCH_32 */ -#else /* !USE_THREADS */ - -/* Double word size atomics */ - -#define erts_dw_atomic_init_nob erts_no_dw_atomic_set -#define erts_dw_atomic_set_nob erts_no_dw_atomic_set -#define erts_dw_atomic_read_nob erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_mb erts_no_dw_atomic_init -#define erts_dw_atomic_set_mb erts_no_dw_atomic_set -#define erts_dw_atomic_read_mb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_acqb erts_no_dw_atomic_init -#define erts_dw_atomic_set_acqb erts_no_dw_atomic_set -#define erts_dw_atomic_read_acqb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_relb erts_no_dw_atomic_init -#define erts_dw_atomic_set_relb erts_no_dw_atomic_set -#define erts_dw_atomic_read_relb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_ddrb erts_no_dw_atomic_init -#define erts_dw_atomic_set_ddrb erts_no_dw_atomic_set -#define erts_dw_atomic_read_ddrb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_rb erts_no_dw_atomic_init -#define erts_dw_atomic_set_rb erts_no_dw_atomic_set -#define erts_dw_atomic_read_rb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_wb erts_no_dw_atomic_init -#define erts_dw_atomic_set_wb erts_no_dw_atomic_set -#define erts_dw_atomic_read_wb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_set_dirty erts_no_dw_atomic_set -#define erts_dw_atomic_read_dirty erts_no_dw_atomic_read - -/* Word size atomics */ - -#define erts_atomic_init_nob erts_no_atomic_set -#define erts_atomic_set_nob erts_no_atomic_set -#define erts_atomic_read_nob erts_no_atomic_read -#define erts_atomic_inc_read_nob erts_no_atomic_inc_read -#define erts_atomic_dec_read_nob erts_no_atomic_dec_read -#define erts_atomic_inc_nob erts_no_atomic_inc -#define erts_atomic_dec_nob erts_no_atomic_dec -#define erts_atomic_add_read_nob erts_no_atomic_add_read -#define erts_atomic_add_nob erts_no_atomic_add -#define erts_atomic_read_bor_nob erts_no_atomic_read_bor -#define erts_atomic_read_band_nob erts_no_atomic_read_band -#define erts_atomic_xchg_nob erts_no_atomic_xchg -#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_nob erts_no_atomic_read_bset - -#define erts_atomic_init_mb erts_no_atomic_set -#define erts_atomic_set_mb erts_no_atomic_set -#define erts_atomic_read_mb erts_no_atomic_read -#define erts_atomic_inc_read_mb erts_no_atomic_inc_read -#define erts_atomic_dec_read_mb erts_no_atomic_dec_read -#define erts_atomic_inc_mb erts_no_atomic_inc -#define erts_atomic_dec_mb erts_no_atomic_dec -#define erts_atomic_add_read_mb erts_no_atomic_add_read -#define erts_atomic_add_mb erts_no_atomic_add -#define erts_atomic_read_bor_mb erts_no_atomic_read_bor -#define erts_atomic_read_band_mb erts_no_atomic_read_band -#define erts_atomic_xchg_mb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_mb erts_no_atomic_read_bset - -#define erts_atomic_init_acqb erts_no_atomic_set -#define erts_atomic_set_acqb erts_no_atomic_set -#define erts_atomic_read_acqb erts_no_atomic_read -#define erts_atomic_inc_read_acqb erts_no_atomic_inc_read -#define erts_atomic_dec_read_acqb erts_no_atomic_dec_read -#define erts_atomic_inc_acqb erts_no_atomic_inc -#define erts_atomic_dec_acqb erts_no_atomic_dec -#define erts_atomic_add_read_acqb erts_no_atomic_add_read -#define erts_atomic_add_acqb erts_no_atomic_add -#define erts_atomic_read_bor_acqb erts_no_atomic_read_bor -#define erts_atomic_read_band_acqb erts_no_atomic_read_band -#define erts_atomic_xchg_acqb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_acqb erts_no_atomic_read_bset - -#define erts_atomic_init_relb erts_no_atomic_set -#define erts_atomic_set_relb erts_no_atomic_set -#define erts_atomic_read_relb erts_no_atomic_read -#define erts_atomic_inc_read_relb erts_no_atomic_inc_read -#define erts_atomic_dec_read_relb erts_no_atomic_dec_read -#define erts_atomic_inc_relb erts_no_atomic_inc -#define erts_atomic_dec_relb erts_no_atomic_dec -#define erts_atomic_add_read_relb erts_no_atomic_add_read -#define erts_atomic_add_relb erts_no_atomic_add -#define erts_atomic_read_bor_relb erts_no_atomic_read_bor -#define erts_atomic_read_band_relb erts_no_atomic_read_band -#define erts_atomic_xchg_relb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_relb erts_no_atomic_read_bset - -#define erts_atomic_init_ddrb erts_no_atomic_set -#define erts_atomic_set_ddrb erts_no_atomic_set -#define erts_atomic_read_ddrb erts_no_atomic_read -#define erts_atomic_inc_read_ddrb erts_no_atomic_inc_read -#define erts_atomic_dec_read_ddrb erts_no_atomic_dec_read -#define erts_atomic_inc_ddrb erts_no_atomic_inc -#define erts_atomic_dec_ddrb erts_no_atomic_dec -#define erts_atomic_add_read_ddrb erts_no_atomic_add_read -#define erts_atomic_add_ddrb erts_no_atomic_add -#define erts_atomic_read_bor_ddrb erts_no_atomic_read_bor -#define erts_atomic_read_band_ddrb erts_no_atomic_read_band -#define erts_atomic_xchg_ddrb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_ddrb erts_no_atomic_read_bset - -#define erts_atomic_init_rb erts_no_atomic_set -#define erts_atomic_set_rb erts_no_atomic_set -#define erts_atomic_read_rb erts_no_atomic_read -#define erts_atomic_inc_read_rb erts_no_atomic_inc_read -#define erts_atomic_dec_read_rb erts_no_atomic_dec_read -#define erts_atomic_inc_rb erts_no_atomic_inc -#define erts_atomic_dec_rb erts_no_atomic_dec -#define erts_atomic_add_read_rb erts_no_atomic_add_read -#define erts_atomic_add_rb erts_no_atomic_add -#define erts_atomic_read_bor_rb erts_no_atomic_read_bor -#define erts_atomic_read_band_rb erts_no_atomic_read_band -#define erts_atomic_xchg_rb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_rb erts_no_atomic_read_bset - -#define erts_atomic_init_wb erts_no_atomic_set -#define erts_atomic_set_wb erts_no_atomic_set -#define erts_atomic_read_wb erts_no_atomic_read -#define erts_atomic_inc_read_wb erts_no_atomic_inc_read -#define erts_atomic_dec_read_wb erts_no_atomic_dec_read -#define erts_atomic_inc_wb erts_no_atomic_inc -#define erts_atomic_dec_wb erts_no_atomic_dec -#define erts_atomic_add_read_wb erts_no_atomic_add_read -#define erts_atomic_add_wb erts_no_atomic_add -#define erts_atomic_read_bor_wb erts_no_atomic_read_bor -#define erts_atomic_read_band_wb erts_no_atomic_read_band -#define erts_atomic_xchg_wb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_wb erts_no_atomic_read_bset - -#define erts_atomic_set_dirty erts_no_atomic_set -#define erts_atomic_read_dirty erts_no_atomic_read - -/* 32-bit atomics */ - -#define erts_atomic32_init_nob erts_no_atomic32_set -#define erts_atomic32_set_nob erts_no_atomic32_set -#define erts_atomic32_read_nob erts_no_atomic32_read -#define erts_atomic32_inc_read_nob erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_nob erts_no_atomic32_dec_read -#define erts_atomic32_inc_nob erts_no_atomic32_inc -#define erts_atomic32_dec_nob erts_no_atomic32_dec -#define erts_atomic32_add_read_nob erts_no_atomic32_add_read -#define erts_atomic32_add_nob erts_no_atomic32_add -#define erts_atomic32_read_bor_nob erts_no_atomic32_read_bor -#define erts_atomic32_read_band_nob erts_no_atomic32_read_band -#define erts_atomic32_xchg_nob erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_nob erts_no_atomic32_read_bset - -#define erts_atomic32_init_mb erts_no_atomic32_set -#define erts_atomic32_set_mb erts_no_atomic32_set -#define erts_atomic32_read_mb erts_no_atomic32_read -#define erts_atomic32_inc_read_mb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_mb erts_no_atomic32_dec_read -#define erts_atomic32_inc_mb erts_no_atomic32_inc -#define erts_atomic32_dec_mb erts_no_atomic32_dec -#define erts_atomic32_add_read_mb erts_no_atomic32_add_read -#define erts_atomic32_add_mb erts_no_atomic32_add -#define erts_atomic32_read_bor_mb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_mb erts_no_atomic32_read_band -#define erts_atomic32_xchg_mb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_mb erts_no_atomic32_read_bset - -#define erts_atomic32_init_acqb erts_no_atomic32_set -#define erts_atomic32_set_acqb erts_no_atomic32_set -#define erts_atomic32_read_acqb erts_no_atomic32_read -#define erts_atomic32_inc_read_acqb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_acqb erts_no_atomic32_dec_read -#define erts_atomic32_inc_acqb erts_no_atomic32_inc -#define erts_atomic32_dec_acqb erts_no_atomic32_dec -#define erts_atomic32_add_read_acqb erts_no_atomic32_add_read -#define erts_atomic32_add_acqb erts_no_atomic32_add -#define erts_atomic32_read_bor_acqb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band -#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_acqb erts_no_atomic32_read_bset - -#define erts_atomic32_init_relb erts_no_atomic32_set -#define erts_atomic32_set_relb erts_no_atomic32_set -#define erts_atomic32_read_relb erts_no_atomic32_read -#define erts_atomic32_inc_read_relb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_relb erts_no_atomic32_dec_read -#define erts_atomic32_inc_relb erts_no_atomic32_inc -#define erts_atomic32_dec_relb erts_no_atomic32_dec -#define erts_atomic32_add_read_relb erts_no_atomic32_add_read -#define erts_atomic32_add_relb erts_no_atomic32_add -#define erts_atomic32_read_bor_relb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_relb erts_no_atomic32_read_band -#define erts_atomic32_xchg_relb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_relb erts_no_atomic32_read_bset - -#define erts_atomic32_init_ddrb erts_no_atomic32_set -#define erts_atomic32_set_ddrb erts_no_atomic32_set -#define erts_atomic32_read_ddrb erts_no_atomic32_read -#define erts_atomic32_inc_read_ddrb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_ddrb erts_no_atomic32_dec_read -#define erts_atomic32_inc_ddrb erts_no_atomic32_inc -#define erts_atomic32_dec_ddrb erts_no_atomic32_dec -#define erts_atomic32_add_read_ddrb erts_no_atomic32_add_read -#define erts_atomic32_add_ddrb erts_no_atomic32_add -#define erts_atomic32_read_bor_ddrb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band -#define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_ddrb erts_no_atomic32_read_bset - -#define erts_atomic32_init_rb erts_no_atomic32_set -#define erts_atomic32_set_rb erts_no_atomic32_set -#define erts_atomic32_read_rb erts_no_atomic32_read -#define erts_atomic32_inc_read_rb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_rb erts_no_atomic32_dec_read -#define erts_atomic32_inc_rb erts_no_atomic32_inc -#define erts_atomic32_dec_rb erts_no_atomic32_dec -#define erts_atomic32_add_read_rb erts_no_atomic32_add_read -#define erts_atomic32_add_rb erts_no_atomic32_add -#define erts_atomic32_read_bor_rb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_rb erts_no_atomic32_read_band -#define erts_atomic32_xchg_rb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_rb erts_no_atomic32_read_bset - -#define erts_atomic32_init_wb erts_no_atomic32_set -#define erts_atomic32_set_wb erts_no_atomic32_set -#define erts_atomic32_read_wb erts_no_atomic32_read -#define erts_atomic32_inc_read_wb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_wb erts_no_atomic32_dec_read -#define erts_atomic32_inc_wb erts_no_atomic32_inc -#define erts_atomic32_dec_wb erts_no_atomic32_dec -#define erts_atomic32_add_read_wb erts_no_atomic32_add_read -#define erts_atomic32_add_wb erts_no_atomic32_add -#define erts_atomic32_read_bor_wb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_wb erts_no_atomic32_read_band -#define erts_atomic32_xchg_wb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_wb erts_no_atomic32_read_bset - -#define erts_atomic32_set_dirty erts_no_atomic32_set -#define erts_atomic32_read_dirty erts_no_atomic32_read - -/* 64-bit atomics */ - -#define erts_atomic64_init_nob erts_no_atomic64_set -#define erts_atomic64_set_nob erts_no_atomic64_set -#define erts_atomic64_read_nob erts_no_atomic64_read -#define erts_atomic64_inc_read_nob erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_nob erts_no_atomic64_dec_read -#define erts_atomic64_inc_nob erts_no_atomic64_inc -#define erts_atomic64_dec_nob erts_no_atomic64_dec -#define erts_atomic64_add_read_nob erts_no_atomic64_add_read -#define erts_atomic64_add_nob erts_no_atomic64_add -#define erts_atomic64_read_bor_nob erts_no_atomic64_read_bor -#define erts_atomic64_read_band_nob erts_no_atomic64_read_band -#define erts_atomic64_xchg_nob erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_nob erts_no_atomic64_read_bset - -#define erts_atomic64_init_mb erts_no_atomic64_set -#define erts_atomic64_set_mb erts_no_atomic64_set -#define erts_atomic64_read_mb erts_no_atomic64_read -#define erts_atomic64_inc_read_mb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_mb erts_no_atomic64_dec_read -#define erts_atomic64_inc_mb erts_no_atomic64_inc -#define erts_atomic64_dec_mb erts_no_atomic64_dec -#define erts_atomic64_add_read_mb erts_no_atomic64_add_read -#define erts_atomic64_add_mb erts_no_atomic64_add -#define erts_atomic64_read_bor_mb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_mb erts_no_atomic64_read_band -#define erts_atomic64_xchg_mb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_mb erts_no_atomic64_read_bset - -#define erts_atomic64_init_acqb erts_no_atomic64_set -#define erts_atomic64_set_acqb erts_no_atomic64_set -#define erts_atomic64_read_acqb erts_no_atomic64_read -#define erts_atomic64_inc_read_acqb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_acqb erts_no_atomic64_dec_read -#define erts_atomic64_inc_acqb erts_no_atomic64_inc -#define erts_atomic64_dec_acqb erts_no_atomic64_dec -#define erts_atomic64_add_read_acqb erts_no_atomic64_add_read -#define erts_atomic64_add_acqb erts_no_atomic64_add -#define erts_atomic64_read_bor_acqb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_acqb erts_no_atomic64_read_band -#define erts_atomic64_xchg_acqb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_acqb erts_no_atomic64_read_bset - -#define erts_atomic64_init_relb erts_no_atomic64_set -#define erts_atomic64_set_relb erts_no_atomic64_set -#define erts_atomic64_read_relb erts_no_atomic64_read -#define erts_atomic64_inc_read_relb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_relb erts_no_atomic64_dec_read -#define erts_atomic64_inc_relb erts_no_atomic64_inc -#define erts_atomic64_dec_relb erts_no_atomic64_dec -#define erts_atomic64_add_read_relb erts_no_atomic64_add_read -#define erts_atomic64_add_relb erts_no_atomic64_add -#define erts_atomic64_read_bor_relb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_relb erts_no_atomic64_read_band -#define erts_atomic64_xchg_relb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_relb erts_no_atomic64_read_bset - -#define erts_atomic64_init_ddrb erts_no_atomic64_set -#define erts_atomic64_set_ddrb erts_no_atomic64_set -#define erts_atomic64_read_ddrb erts_no_atomic64_read -#define erts_atomic64_inc_read_ddrb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_ddrb erts_no_atomic64_dec_read -#define erts_atomic64_inc_ddrb erts_no_atomic64_inc -#define erts_atomic64_dec_ddrb erts_no_atomic64_dec -#define erts_atomic64_add_read_ddrb erts_no_atomic64_add_read -#define erts_atomic64_add_ddrb erts_no_atomic64_add -#define erts_atomic64_read_bor_ddrb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_ddrb erts_no_atomic64_read_band -#define erts_atomic64_xchg_ddrb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_ddrb erts_no_atomic64_read_bset - -#define erts_atomic64_init_rb erts_no_atomic64_set -#define erts_atomic64_set_rb erts_no_atomic64_set -#define erts_atomic64_read_rb erts_no_atomic64_read -#define erts_atomic64_inc_read_rb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_rb erts_no_atomic64_dec_read -#define erts_atomic64_inc_rb erts_no_atomic64_inc -#define erts_atomic64_dec_rb erts_no_atomic64_dec -#define erts_atomic64_add_read_rb erts_no_atomic64_add_read -#define erts_atomic64_add_rb erts_no_atomic64_add -#define erts_atomic64_read_bor_rb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_rb erts_no_atomic64_read_band -#define erts_atomic64_xchg_rb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_rb erts_no_atomic64_read_bset - -#define erts_atomic64_init_wb erts_no_atomic64_set -#define erts_atomic64_set_wb erts_no_atomic64_set -#define erts_atomic64_read_wb erts_no_atomic64_read -#define erts_atomic64_inc_read_wb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_wb erts_no_atomic64_dec_read -#define erts_atomic64_inc_wb erts_no_atomic64_inc -#define erts_atomic64_dec_wb erts_no_atomic64_dec -#define erts_atomic64_add_read_wb erts_no_atomic64_add_read -#define erts_atomic64_add_wb erts_no_atomic64_add -#define erts_atomic64_read_bor_wb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_wb erts_no_atomic64_read_band -#define erts_atomic64_xchg_wb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_wb erts_no_atomic64_read_bset - -#define erts_atomic64_set_dirty erts_no_atomic64_set -#define erts_atomic64_read_dirty erts_no_atomic64_read - -#endif /* !USE_THREADS */ #include "erl_msacc.h" @@ -2065,110 +1527,83 @@ erts_atomic64_read_dirty(erts_atomic64_t *var) ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id) { -#ifdef USE_THREADS int res = ethr_init(id); if (res) erts_thr_fatal_error(res, "initialize thread library"); -#endif } ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id) { -#ifdef USE_THREADS int res = ethr_late_init(id); if (res) erts_thr_fatal_error(res, "complete initialization of thread library"); -#endif } ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg, erts_thr_opts_t *opts) { -#ifdef USE_THREADS int res = ethr_thr_create(tid, func, arg, opts); if (res) erts_thr_fatal_error(res, "create thread"); -#endif } ERTS_GLB_INLINE void erts_thr_join(erts_tid_t tid, void **thr_res) { -#ifdef USE_THREADS int res = ethr_thr_join(tid, thr_res); if (res) erts_thr_fatal_error(res, "join thread"); -#endif } ERTS_GLB_INLINE void erts_thr_detach(erts_tid_t tid) { -#ifdef USE_THREADS int res = ethr_thr_detach(tid); if (res) erts_thr_fatal_error(res, "detach thread"); -#endif } ERTS_GLB_INLINE void erts_thr_exit(void *res) { -#ifdef USE_THREADS ethr_thr_exit(res); erts_thr_fatal_error(0, "terminate thread"); -#endif } ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void)) { -#ifdef USE_THREADS int res = ethr_install_exit_handler(exit_handler); if (res != 0) erts_thr_fatal_error(res, "install thread exit handler"); -#endif } ERTS_GLB_INLINE erts_tid_t erts_thr_self(void) { -#ifdef USE_THREADS return ethr_self(); -#else - return 0; -#endif } ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len) { -#ifdef USE_THREADS return ethr_getname(tid, buf, len); -#else - return -1; -#endif } ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y) { -#ifdef USE_THREADS return ethr_equal_tids(x, y); -#else - return 1; -#endif } ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS int res = ethr_mutex_init(&mtx->mtx); if (res) { erts_thr_fatal_error(res, "initialize mutex"); @@ -2185,13 +1620,11 @@ erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init_ref_x(&mtx->lcnt, name, extra, flags); #endif -#endif /* USE_THREADS */ } ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS erts_mtx_init(mtx, name, extra, flags); ethr_mutex_lock(&mtx->mtx); @@ -2201,13 +1634,11 @@ erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_trylock(&mtx->lcnt, 1); #endif -#endif } ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx) { -#ifdef USE_THREADS int res; ASSERT(!(mtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC)); @@ -2230,7 +1661,6 @@ erts_mtx_destroy(erts_mtx_t *mtx) #endif erts_thr_fatal_error(res, "destroy mutex"); } -#endif } ERTS_GLB_INLINE int @@ -2240,7 +1670,6 @@ erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line) erts_mtx_trylock(erts_mtx_t *mtx) #endif { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2262,9 +1691,6 @@ erts_mtx_trylock(erts_mtx_t *mtx) erts_lcnt_trylock(&mtx->lcnt, res); #endif return res; -#else - return 0; -#endif } @@ -2275,7 +1701,6 @@ erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line) erts_mtx_lock(erts_mtx_t *mtx) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_x(&mtx->lc, file, line); @@ -2290,13 +1715,11 @@ erts_mtx_lock(erts_mtx_t *mtx) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&mtx->lcnt, file, line); #endif -#endif } ERTS_GLB_INLINE void erts_mtx_unlock(erts_mtx_t *mtx) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock(&mtx->lc); #endif @@ -2304,13 +1727,12 @@ erts_mtx_unlock(erts_mtx_t *mtx) erts_lcnt_unlock(&mtx->lcnt); #endif ethr_mutex_unlock(&mtx->mtx); -#endif } ERTS_GLB_INLINE int erts_lc_mtx_is_locked(erts_mtx_t *mtx) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = mtx->lc; lc.flags = ERTS_LOCK_FLAGS_TYPE_MUTEX; @@ -2325,17 +1747,14 @@ erts_lc_mtx_is_locked(erts_mtx_t *mtx) ERTS_GLB_INLINE void erts_cnd_init(erts_cnd_t *cnd) { -#ifdef USE_THREADS int res = ethr_cond_init(cnd); if (res) erts_thr_fatal_error(res, "initialize condition variable"); -#endif } ERTS_GLB_INLINE void erts_cnd_destroy(erts_cnd_t *cnd) { -#ifdef USE_THREADS int res = ethr_cond_destroy(cnd); if (res != 0) { #ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG @@ -2348,13 +1767,11 @@ erts_cnd_destroy(erts_cnd_t *cnd) #endif erts_thr_fatal_error(res, "destroy condition variable"); } -#endif } ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2376,7 +1793,6 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx) if (res != 0 && res != EINTR) erts_thr_fatal_error(res, "wait on condition variable"); ERTS_MSACC_POP_STATE(); -#endif } /* @@ -2392,18 +1808,14 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx) ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd) { -#ifdef USE_THREADS ethr_cond_signal(cnd); -#endif } ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd) { -#ifdef USE_THREADS ethr_cond_broadcast(cnd); -#endif } /* rwmutex */ @@ -2411,7 +1823,6 @@ erts_cnd_broadcast(erts_cnd_t *cnd) ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no) { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_no_locked_of_type(ERTS_LOCK_TYPE_RWMUTEX); @@ -2419,13 +1830,11 @@ erts_rwmtx_set_reader_group(int no) res = ethr_rwmutex_set_reader_group(no); if (res != 0) erts_thr_fatal_error(res, "set reader group"); -#endif } ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt); if (res != 0) { erts_thr_fatal_error(res, "initialize rwmutex"); @@ -2442,7 +1851,6 @@ erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt, #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init_ref_x(&rwmtx->lcnt, name, extra, flags); #endif -#endif /* USE_THREADS */ } ERTS_GLB_INLINE void @@ -2454,7 +1862,6 @@ erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name, Eterm extra, ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx) { -#ifdef USE_THREADS int res; ASSERT(!(rwmtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC)); @@ -2477,7 +1884,6 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx) #endif erts_thr_fatal_error(res, "destroy rwmutex"); } -#endif } ERTS_GLB_INLINE int @@ -2487,7 +1893,6 @@ erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx) #endif { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2510,9 +1915,6 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx) #endif return res; -#else - return 0; -#endif } ERTS_GLB_INLINE void @@ -2522,7 +1924,6 @@ erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) erts_rwmtx_rlock(erts_rwmtx_t *rwmtx) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line); @@ -2537,13 +1938,11 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line); #endif -#endif } ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ); #endif @@ -2551,7 +1950,6 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx) erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ); #endif ethr_rwmutex_runlock(&rwmtx->rwmtx); -#endif } @@ -2562,7 +1960,6 @@ erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx) #endif { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2585,9 +1982,6 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx) #endif return res; -#else - return 0; -#endif } ERTS_GLB_INLINE void @@ -2597,7 +1991,6 @@ erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line); @@ -2612,13 +2005,11 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line); #endif -#endif } ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR); #endif @@ -2626,7 +2017,6 @@ erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx) erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR); #endif ethr_rwmutex_rwunlock(&rwmtx->rwmtx); -#endif } #if 0 /* The following rwmtx function names are @@ -2658,7 +2048,7 @@ erts_rwmtx_wunlock(erts_rwmtx_t *rwmtx) ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = mtx->lc; lc.flags = ERTS_LOCK_TYPE_RWMUTEX; @@ -2673,7 +2063,7 @@ erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx) ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = mtx->lc; lc.flags = ERTS_LOCK_TYPE_RWMUTEX; @@ -2685,334 +2075,11 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx) #endif } -/* No atomic ops */ - -ERTS_GLB_INLINE void -erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val) -{ - var->sint[0] = val->sint[0]; - var->sint[1] = val->sint[1]; -} - -ERTS_GLB_INLINE void -erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val) -{ - val->sint[0] = var->sint[0]; - val->sint[1] = var->sint[1]; -} - -ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var, - erts_no_dw_atomic_t *new_val, - erts_no_dw_atomic_t *old_val) -{ - if (var->sint[0] != old_val->sint[0] || var->sint[1] != old_val->sint[1]) { - erts_no_dw_atomic_read(var, old_val); - return 0; - } - else { - erts_no_dw_atomic_set(var, new_val); - return !0; - } -} - -ERTS_GLB_INLINE void -erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i) -{ - *var = i; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_read(erts_no_atomic_t *var) -{ - return *var; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_inc_read(erts_no_atomic_t *incp) -{ - return ++(*incp); -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_dec_read(erts_no_atomic_t *decp) -{ - return --(*decp); -} - -ERTS_GLB_INLINE void -erts_no_atomic_inc(erts_no_atomic_t *incp) -{ - ++(*incp); -} - -ERTS_GLB_INLINE void -erts_no_atomic_dec(erts_no_atomic_t *decp) -{ - --(*decp); -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_add_read(erts_no_atomic_t *addp, erts_aint_t i) -{ - return *addp += i; -} - -ERTS_GLB_INLINE void -erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i) -{ - *addp += i; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_read_bor(erts_no_atomic_t *var, erts_aint_t mask) -{ - erts_aint_t old; - old = *var; - *var |= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_read_band(erts_no_atomic_t *var, erts_aint_t mask) -{ - erts_aint_t old; - old = *var; - *var &= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_xchg(erts_no_atomic_t *xchgp, erts_aint_t new) -{ - erts_aint_t old = *xchgp; - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t expected) -{ - erts_aint_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_read_bset(erts_no_atomic_t *var, - erts_aint_t mask, - erts_aint_t set) -{ - erts_aint_t old = *var; - *var &= ~mask; - *var |= (mask & set); - return old; -} - -/* atomic32 */ - -ERTS_GLB_INLINE void -erts_no_atomic32_set(erts_no_atomic32_t *var, erts_aint32_t i) -{ - *var = i; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_read(erts_no_atomic32_t *var) -{ - return *var; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_inc_read(erts_no_atomic32_t *incp) -{ - return ++(*incp); -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_dec_read(erts_no_atomic32_t *decp) -{ - return --(*decp); -} - -ERTS_GLB_INLINE void -erts_no_atomic32_inc(erts_no_atomic32_t *incp) -{ - ++(*incp); -} - -ERTS_GLB_INLINE void -erts_no_atomic32_dec(erts_no_atomic32_t *decp) -{ - --(*decp); -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_add_read(erts_no_atomic32_t *addp, erts_aint32_t i) -{ - return *addp += i; -} - -ERTS_GLB_INLINE void -erts_no_atomic32_add(erts_no_atomic32_t *addp, erts_aint32_t i) -{ - *addp += i; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_read_bor(erts_no_atomic32_t *var, erts_aint32_t mask) -{ - erts_aint32_t old; - old = *var; - *var |= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_read_band(erts_no_atomic32_t *var, erts_aint32_t mask) -{ - erts_aint32_t old; - old = *var; - *var &= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, erts_aint32_t new) -{ - erts_aint32_t old = *xchgp; - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t expected) -{ - erts_aint32_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_read_bset(erts_no_atomic32_t *var, - erts_aint32_t mask, - erts_aint32_t set) -{ - erts_aint32_t old = *var; - *var &= ~mask; - *var |= (mask & set); - return old; -} - -/* atomic64 */ - -ERTS_GLB_INLINE void -erts_no_atomic64_set(erts_no_atomic64_t *var, erts_aint64_t i) -{ - *var = i; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_read(erts_no_atomic64_t *var) -{ - return *var; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_inc_read(erts_no_atomic64_t *incp) -{ - return ++(*incp); -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_dec_read(erts_no_atomic64_t *decp) -{ - return --(*decp); -} - -ERTS_GLB_INLINE void -erts_no_atomic64_inc(erts_no_atomic64_t *incp) -{ - ++(*incp); -} - -ERTS_GLB_INLINE void -erts_no_atomic64_dec(erts_no_atomic64_t *decp) -{ - --(*decp); -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_add_read(erts_no_atomic64_t *addp, erts_aint64_t i) -{ - return *addp += i; -} - -ERTS_GLB_INLINE void -erts_no_atomic64_add(erts_no_atomic64_t *addp, erts_aint64_t i) -{ - *addp += i; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_read_bor(erts_no_atomic64_t *var, erts_aint64_t mask) -{ - erts_aint64_t old; - old = *var; - *var |= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_read_band(erts_no_atomic64_t *var, erts_aint64_t mask) -{ - erts_aint64_t old; - old = *var; - *var &= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp, erts_aint64_t new) -{ - erts_aint64_t old = *xchgp; - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp, - erts_aint64_t new, - erts_aint64_t expected) -{ - erts_aint64_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_read_bset(erts_no_atomic64_t *var, - erts_aint64_t mask, - erts_aint64_t set) -{ - erts_aint64_t old = *var; - *var &= ~mask; - *var |= (mask & set); - return old; -} - /* spinlock */ ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS int res = ethr_spinlock_init(&lock->slck); if (res) { erts_thr_fatal_error(res, "init spinlock"); @@ -3029,13 +2096,11 @@ erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_fla #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags); #endif -#endif /* USE_THREADS */ } ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock) { -#ifdef USE_THREADS int res; ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC)); @@ -3058,15 +2123,11 @@ erts_spinlock_destroy(erts_spinlock_t *lock) #endif erts_thr_fatal_error(res, "destroy rwlock"); } -#else - (void)lock; -#endif } ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock(&lock->lc); #endif @@ -3074,9 +2135,6 @@ erts_spin_unlock(erts_spinlock_t *lock) erts_lcnt_unlock(&lock->lcnt); #endif ethr_spin_unlock(&lock->slck); -#else - (void)lock; -#endif } ERTS_GLB_INLINE void @@ -3086,7 +2144,6 @@ erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line) erts_spin_lock(erts_spinlock_t *lock) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_x(&lock->lc,file,line); @@ -3101,15 +2158,12 @@ erts_spin_lock(erts_spinlock_t *lock) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&lock->lcnt, file, line); #endif -#else - (void)lock; -#endif } ERTS_GLB_INLINE int erts_lc_spinlock_is_locked(erts_spinlock_t *lock) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = lock->lc; lc.flags = ERTS_LOCK_TYPE_SPINLOCK; @@ -3126,7 +2180,6 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock) ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS int res = ethr_rwlock_init(&lock->rwlck); if (res) { erts_thr_fatal_error(res, "init rwlock"); @@ -3143,13 +2196,11 @@ erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags); #endif -#endif /* USE_THREADS */ } ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock) { -#ifdef USE_THREADS int res; ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC)); @@ -3172,15 +2223,11 @@ erts_rwlock_destroy(erts_rwlock_t *lock) #endif erts_thr_fatal_error(res, "destroy rwlock"); } -#else - (void)lock; -#endif } ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ); #endif @@ -3188,9 +2235,6 @@ erts_read_unlock(erts_rwlock_t *lock) erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ); #endif ethr_read_unlock(&lock->rwlck); -#else - (void)lock; -#endif } ERTS_GLB_INLINE void @@ -3200,7 +2244,6 @@ erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line) erts_read_lock(erts_rwlock_t *lock) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_READ,file,line); @@ -3215,15 +2258,11 @@ erts_read_lock(erts_rwlock_t *lock) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&lock->lcnt, file, line); #endif -#else - (void)lock; -#endif } ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR); #endif @@ -3231,9 +2270,6 @@ erts_write_unlock(erts_rwlock_t *lock) erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR); #endif ethr_write_unlock(&lock->rwlck); -#else - (void)lock; -#endif } ERTS_GLB_INLINE void @@ -3243,7 +2279,6 @@ erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line) erts_write_lock(erts_rwlock_t *lock) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_RDWR,file,line); @@ -3258,15 +2293,12 @@ erts_write_lock(erts_rwlock_t *lock) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&lock->lcnt, file, line); #endif -#else - (void)lock; -#endif } ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = lock->lc; lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK; @@ -3281,7 +2313,7 @@ erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock) ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = lock->lc; lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK; @@ -3296,125 +2328,90 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock) ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname) { -#ifdef USE_THREADS int res = ethr_tsd_key_create(keyp, keyname); if (res) erts_thr_fatal_error(res, "create thread specific data key"); -#endif } ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key) { -#ifdef USE_THREADS int res = ethr_tsd_key_delete(key); if (res) erts_thr_fatal_error(res, "delete thread specific data key"); -#endif } ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value) { -#ifdef USE_THREADS int res = ethr_tsd_set(key, value); if (res) erts_thr_fatal_error(res, "set thread specific data"); -#endif } ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key) { -#ifdef USE_THREADS return ethr_tsd_get(key); -#else - return NULL; -#endif } ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void) { -#ifdef USE_THREADS return (erts_tse_t *) ethr_get_ts_event(); -#else - return (erts_tse_t *) NULL; -#endif } ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep) { -#ifdef USE_THREADS ethr_leave_ts_event(ep); -#endif } ERTS_GLB_INLINE void erts_tse_prepare_timed(erts_tse_t *ep) { -#ifdef USE_THREADS int res = ethr_event_prepare_timed(&((ethr_ts_event *) ep)->event); if (res != 0) erts_thr_fatal_error(res, "prepare timed"); -#endif } ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep) { -#ifdef USE_THREADS ethr_event_set(&((ethr_ts_event *) ep)->event); -#endif } ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep) { -#ifdef USE_THREADS ethr_event_reset(&((ethr_ts_event *) ep)->event); -#endif } ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); res = ethr_event_wait(&((ethr_ts_event *) ep)->event); ERTS_MSACC_POP_STATE(); return res; -#else - return ENOTSUP; -#endif } ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); res = ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount); ERTS_MSACC_POP_STATE(); return res; -#else - return ENOTSUP; -#endif } ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); res = ethr_event_twait(&((ethr_ts_event *) ep)->event, (ethr_sint64_t) tmo); ERTS_MSACC_POP_STATE(); return res; -#else - return ENOTSUP; -#endif } ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); res = ethr_event_stwait(&((ethr_ts_event *) ep)->event, @@ -3422,49 +2419,34 @@ ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo) (ethr_sint64_t) tmo); ERTS_MSACC_POP_STATE(); return res; -#else - return ENOTSUP; -#endif } ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep) { -#ifdef USE_THREADS return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP; -#else - return 0; -#endif } ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no) { -#ifdef USE_THREADS int res = ethr_set_main_thr_status(on, no); if (res != 0) erts_thr_fatal_error(res, "set thread main status"); -#endif } ERTS_GLB_INLINE int erts_thr_get_main_status(void) { -#ifdef USE_THREADS int main_status; int res = ethr_get_main_thr_status(&main_status); if (res != 0) erts_thr_fatal_error(res, "get thread main status"); return main_status; -#else - return 1; -#endif } ERTS_GLB_INLINE void erts_thr_yield(void) { -#ifdef USE_THREADS int res = ETHR_YIELD(); if (res != 0) erts_thr_fatal_error(res, "yield"); -#endif } @@ -3472,34 +2454,28 @@ ERTS_GLB_INLINE void erts_thr_yield(void) ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig) { -#ifdef USE_THREADS int res = ethr_kill((ethr_tid)tid, sig); if (res) erts_thr_fatal_error(res, "killing thread"); -#endif } ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset) { -#ifdef USE_THREADS int res = ethr_sigmask(how, set, oset); if (res) erts_thr_fatal_error(res, "get or set signal mask"); -#endif } ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig) { -#ifdef USE_THREADS int res; do { res = ethr_sigwait(set, sig); } while (res == EINTR); if (res) erts_thr_fatal_error(res, "to wait for signal"); -#endif } #endif /* #ifdef HAVE_ETHR_SIG_FUNCS */ @@ -3507,37 +2483,3 @@ erts_thr_sigwait(const sigset_t *set, int *sig) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ #endif /* #ifndef ERL_THREAD_H__ */ - -#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS - -/* Deprecated functions to replace */ - -#undef erts_atomic_init -#undef erts_atomic_set -#undef erts_atomic_read -#undef erts_atomic_inctest -#undef erts_atomic_dectest -#undef erts_atomic_inc -#undef erts_atomic_dec -#undef erts_atomic_addtest -#undef erts_atomic_add -#undef erts_atomic_xchg -#undef erts_atomic_cmpxchg -#undef erts_atomic_bor -#undef erts_atomic_band - -#undef erts_atomic32_init -#undef erts_atomic32_set -#undef erts_atomic32_read -#undef erts_atomic32_inctest -#undef erts_atomic32_dectest -#undef erts_atomic32_inc -#undef erts_atomic32_dec -#undef erts_atomic32_addtest -#undef erts_atomic32_add -#undef erts_atomic32_xchg -#undef erts_atomic32_cmpxchg -#undef erts_atomic32_bor -#undef erts_atomic32_band - -#endif diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c index 979c03fd43..f2e0900fec 100644 --- a/erts/emulator/beam/erl_time_sup.c +++ b/erts/emulator/beam/erl_time_sup.c @@ -36,11 +36,11 @@ #include "erl_driver.h" #include "erl_nif.h" -static erts_smp_mtx_t erts_get_time_mtx; +static erts_mtx_t erts_get_time_mtx; /* used by erts_runtime_elapsed_both */ typedef struct { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; ErtsMonotonicTime user; ErtsMonotonicTime sys; } ErtsRunTimePrevData; @@ -51,13 +51,13 @@ static union { } runtime_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE); static union { - erts_smp_atomic64_t time; - char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_atomic64_t))]; + erts_atomic64_t time; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic64_t))]; } wall_clock_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE); static union { - erts_smp_atomic64_t time; - char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_atomic64_t))]; + erts_atomic64_t time; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic64_t))]; } now_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE); static ErtsMonitor *time_offset_monitors = NULL; @@ -157,7 +157,7 @@ typedef struct { struct time_sup_infrequently_changed__ { #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT struct { - erts_smp_rwmtx_t rwmtx; + erts_rwmtx_t rwmtx; ErtsTWheelTimer timer; ErtsMonotonicCorrectionData cdata; } parmon; @@ -165,9 +165,9 @@ struct time_sup_infrequently_changed__ { #endif ErtsSystemTime sinit; ErtsMonotonicTime not_corrected_moffset; - erts_smp_atomic64_t offset; + erts_atomic64_t offset; ErtsMonotonicTime shadow_offset; - erts_smp_atomic32_t preliminary_offset; + erts_atomic32_t preliminary_offset; }; struct time_sup_frequently_changed__ { @@ -205,19 +205,19 @@ erts_get_approx_time(void) static ERTS_INLINE void init_time_offset(ErtsMonotonicTime offset) { - erts_smp_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset); + erts_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset); } static ERTS_INLINE void set_time_offset(ErtsMonotonicTime offset) { - erts_smp_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset); + erts_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset); } static ERTS_INLINE ErtsMonotonicTime get_time_offset(void) { - return (ErtsMonotonicTime) erts_smp_atomic64_read_acqb(&time_sup.inf.c.offset); + return (ErtsMonotonicTime) erts_atomic64_read_acqb(&time_sup.inf.c.offset); } static ERTS_INLINE void @@ -298,7 +298,7 @@ read_corrected_time(int os_drift_corrected) ErtsMonotonicTime os_mtime; ErtsMonotonicCorrectionInstance ci; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); os_mtime = erts_os_monotonic_time(); @@ -311,7 +311,7 @@ read_corrected_time(int os_drift_corrected) ci = time_sup.inf.c.parmon.cdata.insts.prev; } - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); return calc_corrected_erl_mtime(os_mtime, &ci, NULL, os_drift_corrected); @@ -389,13 +389,13 @@ check_time_correction(void *vesdp) int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time; int set_new_correction = 0, begin_short_intervals = 0; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); erts_os_times(&os_mtime, &os_stime); ci = time_sup.inf.c.parmon.cdata.insts.curr; - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); if (os_mtime < ci.os_mtime) erts_exit(ERTS_ABORT_EXIT, @@ -410,7 +410,7 @@ check_time_correction(void *vesdp) if (time_sup.inf.c.shadow_offset) { ERTS_TIME_ASSERT(time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE); - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) sdiff += time_sup.inf.c.shadow_offset; else time_sup.inf.c.shadow_offset = 0; @@ -433,7 +433,7 @@ check_time_correction(void *vesdp) } } else if ((time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE - && erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + && erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) && (sdiff < -2*time_sup.r.o.adj.small_diff || 2*time_sup.r.o.adj.small_diff < sdiff)) { /* @@ -658,7 +658,7 @@ check_time_correction(void *vesdp) #endif if (set_new_correction) { - erts_smp_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx); os_mtime = erts_os_monotonic_time(); @@ -686,7 +686,7 @@ check_time_correction(void *vesdp) time_sup.inf.c.parmon.cdata.insts.curr.os_mtime = os_mtime; time_sup.inf.c.parmon.cdata.insts.curr.correction = new_correction; - erts_smp_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx); } if (!esdp) @@ -804,13 +804,13 @@ finalize_corrected_time_offset(ErtsSystemTime *stimep) ErtsMonotonicCorrectionInstance ci; int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); erts_os_times(&os_mtime, stimep); ci = time_sup.inf.c.parmon.cdata.insts.curr; - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); if (os_mtime < ci.os_mtime) erts_exit(ERTS_ABORT_EXIT, @@ -863,7 +863,7 @@ static ErtsMonotonicTime get_not_corrected_time(void) { ErtsMonotonicTime stime, mtime; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); stime = erts_os_system_time(); @@ -889,7 +889,7 @@ static ErtsMonotonicTime get_not_corrected_time(void) ASSERT(stime == mtime + time_sup.inf.c.not_corrected_moffset); - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); return mtime; } @@ -971,9 +971,9 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX); - erts_smp_mtx_init(&erts_get_time_mtx, "get_time", NIL, + erts_mtx_init(&erts_get_time_mtx, "get_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_mtx_init(&runtime_prev.data.mtx, "runtime", NIL, + erts_mtx_init(&runtime_prev.data.mtx, "runtime", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); runtime_prev.data.user = 0; runtime_prev.data.sys = 0; @@ -982,9 +982,9 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) time_sup.r.o.warp_mode = time_warp_mode; if (time_warp_mode == ERTS_SINGLE_TIME_WARP_MODE) - erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1); + erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1); else - erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0); + erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0); time_sup.inf.c.shadow_offset = 0; #if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT @@ -1128,7 +1128,7 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) if (time_sup.r.o.correction) { ErtsMonotonicCorrectionData *cdatap; - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; ErtsMonotonicTime offset; erts_os_times(&time_sup.inf.c.minit, &time_sup.inf.c.sinit); @@ -1138,10 +1138,10 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) offset -= ERTS_MONOTONIC_BEGIN; init_time_offset(offset); - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts, + erts_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts, "get_corrected_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); @@ -1176,10 +1176,10 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) time_sup.f.c.last_not_corrected_time = 0; } - erts_smp_atomic64_init_nob(&wall_clock_prev.time, - (erts_aint64_t) 0); + erts_atomic64_init_nob(&wall_clock_prev.time, + (erts_aint64_t) 0); - erts_smp_atomic64_init_nob( + erts_atomic64_init_nob( &now_prev.time, (erts_aint64_t) ERTS_MONOTONIC_TO_USEC(get_time_offset())); @@ -1223,7 +1223,7 @@ ErtsTimeOffsetState erts_time_offset_state(void) case ERTS_NO_TIME_WARP_MODE: return ERTS_TIME_OFFSET_FINAL; case ERTS_SINGLE_TIME_WARP_MODE: - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) return ERTS_TIME_OFFSET_PRELIMINARY; return ERTS_TIME_OFFSET_FINAL; case ERTS_MULTI_TIME_WARP_MODE: @@ -1256,9 +1256,9 @@ erts_finalize_time_offset(void) case ERTS_SINGLE_TIME_WARP_MODE: { ErtsTimeOffsetState res = ERTS_TIME_OFFSET_FINAL; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) { + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) { ErtsMonotonicTime mtime, new_offset; #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT @@ -1295,11 +1295,11 @@ erts_finalize_time_offset(void) set_time_offset(new_offset); schedule_send_time_offset_changed_notifications(new_offset); - erts_smp_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0); + erts_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0); res = ERTS_TIME_OFFSET_PRELIMINARY; } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); return res; } @@ -1358,14 +1358,14 @@ erts_runtime_elapsed_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys, if (ms_user_diff || ms_sys_diff) { - erts_smp_mtx_lock(&runtime_prev.data.mtx); + erts_mtx_lock(&runtime_prev.data.mtx); prev_user = runtime_prev.data.user; prev_sys = runtime_prev.data.sys; runtime_prev.data.user = user; runtime_prev.data.sys = sys; - erts_smp_mtx_unlock(&runtime_prev.data.mtx); + erts_mtx_unlock(&runtime_prev.data.mtx); if (ms_user_diff) *ms_user_diff = user - prev_user; @@ -1394,8 +1394,8 @@ erts_wall_clock_elapsed_both(ErtsMonotonicTime *ms_total, ErtsMonotonicTime *ms_ ErtsMonotonicTime prev; prev = ((ErtsMonotonicTime) - erts_smp_atomic64_xchg_mb(&wall_clock_prev.time, - (erts_aint64_t) elapsed)); + erts_atomic64_xchg_mb(&wall_clock_prev.time, + (erts_aint64_t) elapsed)); *ms_diff = elapsed - prev; } @@ -1784,15 +1784,15 @@ get_now(Uint* megasec, Uint* sec, Uint* microsec) now = ERTS_MONOTONIC_TO_USEC(mtime + time_offset); /* Make sure now time is later than last time */ - prev = erts_smp_atomic64_read_nob(&now_prev.time); + prev = erts_atomic64_read_nob(&now_prev.time); while (1) { ErtsMonotonicTime act; if (now <= prev) now = prev + 1; act = ((ErtsMonotonicTime) - erts_smp_atomic64_cmpxchg_mb(&now_prev.time, - (erts_aint64_t) now, - (erts_aint64_t) prev)); + erts_atomic64_cmpxchg_mb(&now_prev.time, + (erts_aint64_t) now, + (erts_aint64_t) prev)); if (act == prev) break; prev = act; @@ -1883,10 +1883,10 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) { void erts_monitor_time_offset(Eterm id, Eterm ref) { - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); erts_add_monitor(&time_offset_monitors, MON_TIME_OFFSET, ref, id, NIL); no_time_offset_monitors++; - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); } int @@ -1895,7 +1895,7 @@ erts_demonitor_time_offset(Eterm ref) int res; ErtsMonitor *mon; ASSERT(is_internal_ref(ref)); - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); if (is_internal_ordinary_ref(ref)) mon = erts_remove_monitor(&time_offset_monitors, ref); else @@ -1907,7 +1907,7 @@ erts_demonitor_time_offset(Eterm ref) no_time_offset_monitors--; res = 1; } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); if (res) erts_destroy_monitor(mon); return res; @@ -1965,7 +1965,7 @@ send_time_offset_changed_notifications(void *new_offsetp) #endif new_offset -= ERTS_MONOTONIC_OFFSET_NATIVE; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); no_monitors = no_time_offset_monitors; if (no_monitors) { @@ -1990,7 +1990,7 @@ send_time_offset_changed_notifications(void *new_offsetp) ASSERT(cntxt.ix == no_monitors); } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); if (no_monitors) { Eterm *hp, *patch_refp, new_offset_term, message_template; @@ -2023,7 +2023,7 @@ send_time_offset_changed_notifications(void *new_offsetp) if (rp) { Eterm ref = to_mon_info[mix].ref; ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK; - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (erts_lookup_monitor(ERTS_P_MONITORS(rp), ref)) { ErtsMessage *mp; ErlOffHeap *ohp; @@ -2036,7 +2036,7 @@ send_time_offset_changed_notifications(void *new_offsetp) message = copy_struct(message_template, hsz, &hp, ohp); erts_queue_message(rp, rp_locks, mp, message, am_clock_service); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index db7d0ac449..a07e3642f6 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -77,8 +77,8 @@ static Eterm system_profile; int erts_cpu_timestamp; #endif -static erts_smp_mtx_t smq_mtx; -static erts_smp_rwmtx_t sys_trace_rwmtx; +static erts_mtx_t smq_mtx; +static erts_rwmtx_t sys_trace_rwmtx; enum ErtsSysMsgType { SYS_MSG_TYPE_UNDEFINED, @@ -237,7 +237,6 @@ write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp) } } -#ifdef ERTS_SMP static ERTS_INLINE Uint patch_ts_size(int ts_type) @@ -257,7 +256,6 @@ patch_ts_size(int ts_type) return 0; } } -#endif /* ERTS_SMP */ /* * Write a timestamp. The timestamp MUST be the last @@ -298,18 +296,11 @@ write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer) if (shrink) { if (bp) bp->used_size -= shrink; -#ifndef ERTS_SMP - else if (tracer) { - Eterm *endp = ts_hp + shrink; - HRelease(tracer, endp, ts_hp); - } -#endif } return res; } -#ifdef ERTS_SMP static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type, Eterm from, Eterm to, @@ -321,7 +312,6 @@ static void enqueue_sys_msg(enum ErtsSysMsgType type, Eterm msg, ErlHeapFragment *bp); static void init_sys_msg_dispatcher(void); -#endif static void init_tracer_nif(void); static int tracer_cmp_fun(void*, void*); @@ -332,11 +322,11 @@ static void tracer_free_fun(void*); typedef struct ErtsTracerNif_ ErtsTracerNif; void erts_init_trace(void) { - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL, + erts_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); #ifdef HAVE_ERTS_NOW_CPU @@ -350,9 +340,7 @@ void erts_init_trace(void) { default_port_trace_flags = F_INITIAL_TRACE_FLAGS; default_port_tracer = erts_tracer_nil; system_seq_tracer = erts_tracer_nil; -#ifdef ERTS_SMP init_sys_msg_dispatcher(); -#endif init_tracer_nif(); } @@ -412,43 +400,35 @@ static Uint active_sched; void erts_system_profile_setup_active_schedulers(void) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); active_sched = erts_active_schedulers(); } static void exiting_reset(Eterm exiting) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); if (exiting == system_monitor) { -#ifdef ERTS_SMP system_monitor = NIL; /* Let the trace message dispatcher clear flags, etc */ -#else - erts_system_monitor_clear(NULL); -#endif } if (exiting == system_profile) { -#ifdef ERTS_SMP system_profile = NIL; /* Let the trace message dispatcher clear flags, etc */ -#else - erts_system_profile_clear(NULL); -#endif } - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_trace_check_exiting(Eterm exiting) { int reset = 0; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); if (exiting == system_monitor) reset = 1; else if (exiting == system_profile) reset = 1; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); if (reset) exiting_reset(exiting); } @@ -468,7 +448,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new } } - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); old = system_seq_tracer; system_seq_tracer = erts_tracer_nil; erts_tracer_update(&system_seq_tracer, new); @@ -476,7 +456,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old); #endif - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); return old; } @@ -484,12 +464,12 @@ ErtsTracer erts_get_system_seq_tracer(void) { ErtsTracer st; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); st = system_seq_tracer; #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "get seq tracer %T\n", st); #endif - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); if (st != erts_tracer_nil && call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED, @@ -522,8 +502,8 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp, ErtsTracer curr_default_tracer = *default_tracer; if (tracerp) { /* we only have a rlock, so we have to unlock and then rwlock */ - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); } /* check if someone else changed default tracer while we got the write lock, if so we don't do @@ -533,8 +513,8 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp, ERTS_TRACER_CLEAR(default_tracer); } if (tracerp) { - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); } } } @@ -567,81 +547,81 @@ void erts_change_default_proc_tracing(int setflags, Uint flagsp, const ErtsTracer tracer) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); erts_change_default_tracing( setflags, flagsp, tracer, &default_proc_trace_flags, &default_proc_tracer); - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_change_default_port_tracing(int setflags, Uint flagsp, const ErtsTracer tracer) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); erts_change_default_tracing( setflags, flagsp, tracer, &default_port_trace_flags, &default_port_tracer); - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp) { - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); *tracerp = erts_tracer_nil; /* initialize */ get_default_tracing( flagsp, tracerp, &default_proc_trace_flags, &default_proc_tracer); - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); } void erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp) { - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); *tracerp = erts_tracer_nil; /* initialize */ get_default_tracing( flagsp, tracerp, &default_port_trace_flags, &default_port_tracer); - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); } void erts_set_system_monitor(Eterm monitor) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); system_monitor = monitor; - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } Eterm erts_get_system_monitor(void) { Eterm monitor; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); monitor = system_monitor; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); return monitor; } /* Performance monitoring */ void erts_set_system_profile(Eterm profile) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); system_profile = profile; - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } Eterm erts_get_system_profile(void) { Eterm profile; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); profile = system_profile; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); return profile; } @@ -679,71 +659,11 @@ write_sys_msg_to_port(Eterm unused_to, erts_exit(ERTS_ERROR_EXIT, "Internal error in do_send_to_port: %d\n", ptr-buffer); } -#ifndef ERTS_SMP - if (!INVALID_TRACER_PORT(trace_port, trace_port->common.id)) -#endif erts_raw_port_command(trace_port, buffer, ptr-buffer); erts_free(ERTS_ALC_T_TMP, (void *) buffer); } -#ifndef ERTS_SMP -/* Profile send - * Checks if profiler is port or process - * Eterm msg is local, need copying. - */ - -static void -profile_send(Eterm from, Eterm message) { - Uint sz = 0; - Uint *hp = NULL; - Eterm msg = NIL; - Process *profile_p = NULL; - - Eterm profiler = erts_get_system_profile(); - - /* do not profile profiler pid */ - if (from == profiler) return; - - if (is_internal_port(profiler)) { - Port *profiler_port = NULL; - - /* not smp */ - - profiler_port = erts_id2port_sflgs(profiler, - NULL, - 0, - ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); - if (profiler_port) { - write_sys_msg_to_port(profiler, - profiler_port, - NIL, /* or current process->common.id */ - SYS_MSG_TYPE_SYSPROF, - message); - erts_port_release(profiler_port); - } - - } else { - ErtsMessage *mp; - ASSERT(is_internal_pid(profiler)); - - profile_p = erts_proc_lookup(profiler); - - if (!profile_p) - return; - - sz = size_object(message); - mp = erts_alloc_message(sz, &hp); - if (sz == 0) - msg = message; - else - msg = copy_struct(message, sz, &hp, &mp->hfrag.off_heap); - - erts_queue_message(profile_p, 0, mp, msg, from); - } -} - -#endif static void trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what) @@ -815,9 +735,7 @@ trace_send(Process *p, Eterm to, Eterm msg) ErtsTracerNif *tnif = NULL; ErtsTracingEvent* te; Eterm pam_result; -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl; -#endif ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND)); @@ -842,9 +760,7 @@ trace_send(Process *p, Eterm to, Eterm msg) } else pam_result = am_true; -#ifdef ERTS_SMP dhndl = erts_thr_progress_unmanaged_delay(); -#endif if (is_internal_pid(to)) { if (!erts_proc_lookup(to)) @@ -862,9 +778,7 @@ trace_send(Process *p, Eterm to, Eterm msg) operation, msg, to, pam_result); } -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif erts_match_set_release_result_trace(p, pam_result); } @@ -1179,7 +1093,7 @@ erts_call_trace(Process* p, ErtsCodeInfo *info, Binary *match_spec, Eterm transformed_args[MAX_ARG]; ErtsTracer pre_ms_tracer = erts_tracer_nil; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN); ASSERT(tracer); if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) { @@ -1467,21 +1381,11 @@ monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp, { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Uint hsz; Eterm *hp, list, in_mfa = am_undefined, out_mfa = am_undefined; Eterm in_tpl, out_tpl, tmo_tpl, tmo, msg; -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p || p == monitor_p) { - return; - } -#endif /* * Size: {monitor, pid, long_schedule, [{timeout, T}, {in, {M,F,A}},{out,{M,F,A}}]} -> * 5 (top tuple of 4), (3 (elements) * 2 (cons)) + 3 (timeout tuple of 2) + size of Timeout + @@ -1517,36 +1421,18 @@ monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp, hp += 2; msg = TUPLE4(hp, am_monitor, p->common.id, am_long_schedule, list); hp += 5; -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } void monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time) { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Uint hsz; Eterm *hp, list, op; Eterm op_tpl, tmo_tpl, tmo, msg; -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p) { - return; - } -#endif /* * Size: {monitor, port, long_schedule, [{timeout, T}, {op, Operation}]} -> * 5 (top tuple of 4), (2 (elements) * 2 (cons)) + 3 (timeout tuple of 2) @@ -1582,24 +1468,13 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time) hp += 2; msg = TUPLE4(hp, am_monitor, pp->common.id, am_long_schedule, list); hp += 5; -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, pp->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } void monitor_long_gc(Process *p, Uint time) { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Uint hsz; Eterm *hp, list, msg; Eterm tags[] = { @@ -1624,12 +1499,6 @@ monitor_long_gc(Process *p, Uint time) { Eterm *hp_end; #endif -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p || p == monitor_p) - return; -#endif hsz = 0; (void) erts_bld_atom_uword_2tup_list(NULL, @@ -1657,24 +1526,13 @@ monitor_long_gc(Process *p, Uint time) { ASSERT(hp == hp_end); #endif -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } void monitor_large_heap(Process *p) { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Uint hsz; Eterm *hp, list, msg; Eterm tags[] = { @@ -1698,13 +1556,6 @@ monitor_large_heap(Process *p) { #endif -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p || p == monitor_p) { - return; - } -#endif hsz = 0; (void) erts_bld_atom_uword_2tup_list(NULL, @@ -1732,47 +1583,22 @@ monitor_large_heap(Process *p) { ASSERT(hp == hp_end); #endif -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } void monitor_generic(Process *p, Eterm type, Eterm spec) { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Eterm *hp, msg; -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p || p == monitor_p) - return; -#endif hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p); msg = TUPLE4(hp, am_monitor, p->common.id, type, spec); hp += 5; -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } @@ -1785,21 +1611,14 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { Eterm *hp, msg; ErlHeapFragment *bp = NULL; -#ifndef ERTS_SMP -#define LOCAL_HEAP_SIZE (7 + ERTS_TRACE_PATCH_TS_MAX_SIZE) - DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); - UseTmpHeapNoproc(LOCAL_HEAP_SIZE); - hp = local_heap; -#else Uint hsz; hsz = 7 + patch_ts_size(erts_system_profile_ts_type)-1; bp = new_message_buffer(hsz); hp = bp->mem; -#endif - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); switch (state) { case am_active: @@ -1821,14 +1640,8 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { /* Write timestamp in element 6 of the 'msg' tuple */ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); -#ifndef ERTS_SMP - profile_send(NIL, msg); - UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); -#undef LOCAL_HEAP_SIZE -#else enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp); -#endif - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } @@ -1837,7 +1650,7 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { void trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &p->common, &tnif, TRACE_FUN_E_PORTS, am_open)) send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PORTS, am_open, calling_pid, drv_name, am_true); @@ -1854,9 +1667,9 @@ void trace_port(Port *t_p, Eterm what, Eterm data) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PORTS, what)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PORTS, what, data, THE_NON_VALUE, am_true); @@ -1899,9 +1712,9 @@ void trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_RECEIVE, am_receive)) { /* We can use a stack heap here, as the nif is called in the context of a port */ @@ -2016,9 +1829,9 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists) { ErtsTracerNif *tnif = NULL; Eterm op = exists ? am_send : am_send_to_non_existing_process; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, op)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND, op, msg, receiver, am_true); @@ -2027,9 +1840,9 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists) void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, am_send)) { Eterm msg; Binary* bptr = NULL; @@ -2075,9 +1888,9 @@ trace_sched_ports(Port *p, Eterm what) { void trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SCHED_PORT, what)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SCHED_PORT, @@ -2092,24 +1905,14 @@ profile_runnable_port(Port *p, Eterm status) { ErlHeapFragment *bp = NULL; Eterm count = make_small(0); -#ifndef ERTS_SMP -#define LOCAL_HEAP_SIZE (6 + ERTS_TRACE_PATCH_TS_MAX_SIZE) - - DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); - UseTmpHeapNoproc(LOCAL_HEAP_SIZE); - - hp = local_heap; - -#else Uint hsz; hsz = 6 + patch_ts_size(erts_system_profile_ts_type)-1; bp = new_message_buffer(hsz); hp = bp->mem; -#endif - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); msg = TUPLE5(hp, am_profile, p->common.id, status, count, NIL /* Will be overwritten by timestamp */); @@ -2118,14 +1921,8 @@ profile_runnable_port(Port *p, Eterm status) { /* Write timestamp in element 5 of the 'msg' tuple */ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); -#ifndef ERTS_SMP - profile_send(p->common.id, msg); - UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); -#undef LOCAL_HEAP_SIZE -#else enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp); -#endif - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } /* Process profiling */ @@ -2136,23 +1933,13 @@ profile_runnable_proc(Process *p, Eterm status){ ErlHeapFragment *bp = NULL; ErtsCodeMFA *cmfa = NULL; -#ifndef ERTS_SMP -#define LOCAL_HEAP_SIZE (4 + 6 + ERTS_TRACE_PATCH_TS_MAX_SIZE) - DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); - UseTmpHeapNoproc(LOCAL_HEAP_SIZE); - - hp = local_heap; -#else ErtsThrPrgrDelayHandle dhndl; Uint hsz = 4 + 6 + patch_ts_size(erts_system_profile_ts_type)-1; -#endif /* Assumptions: * We possibly don't have the MAIN_LOCK for the process p here. * We assume that we can read from p->current and p->i atomically */ -#ifdef ERTS_SMP dhndl = erts_thr_progress_unmanaged_delay(); /* suspend purge operations */ -#endif if (!ERTS_PROC_IS_EXITING(p)) { if (p->current) { @@ -2162,14 +1949,12 @@ profile_runnable_proc(Process *p, Eterm status){ } } -#ifdef ERTS_SMP if (!cmfa) { hsz -= 4; } bp = new_message_buffer(hsz); hp = bp->mem; -#endif if (cmfa) { where = TUPLE3(hp, cmfa->module, cmfa->function, @@ -2179,11 +1964,9 @@ profile_runnable_proc(Process *p, Eterm status){ where = make_small(0); } -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); msg = TUPLE5(hp, am_profile, p->common.id, status, where, NIL /* Will be overwritten by timestamp */); @@ -2192,20 +1975,13 @@ profile_runnable_proc(Process *p, Eterm status){ /* Write timestamp in element 5 of the 'msg' tuple */ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); -#ifndef ERTS_SMP - profile_send(p->common.id, msg); - UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); -#undef LOCAL_HEAP_SIZE -#else enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp); -#endif - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } /* End system_profile tracing */ -#ifdef ERTS_SMP typedef struct ErtsSysMsgQ_ ErtsSysMsgQ; struct ErtsSysMsgQ_ { @@ -2251,7 +2027,7 @@ enqueue_sys_msg_unlocked(enum ErtsSysMsgType type, sys_message_queue = smqp; } sys_message_queue_end = smqp; - erts_smp_cnd_signal(&smq_cnd); + erts_cnd_signal(&smq_cnd); } static void @@ -2261,9 +2037,9 @@ enqueue_sys_msg(enum ErtsSysMsgType type, Eterm msg, ErlHeapFragment *bp) { - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); enqueue_sys_msg_unlocked(type, from, to, msg, bp); - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } void @@ -2315,10 +2091,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver) && !erts_system_monitor_flags.busy_port && !erts_system_monitor_flags.busy_dist_port) break; /* Everything is disabled */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (system_monitor == receiver || receiver == NIL) erts_system_monitor_clear(NULL); - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case SYS_MSG_TYPE_SYSPROF: if (receiver == NIL @@ -2328,11 +2104,11 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver) && !erts_system_profile_flags.scheduler) break; /* Block system to clear flags */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (system_profile == receiver || receiver == NIL) { erts_system_profile_clear(NULL); } - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case SYS_MSG_TYPE_ERRLGR: { char *no_elgger = "(no error logger present)"; @@ -2377,38 +2153,38 @@ static void sys_msg_dispatcher_wakeup(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 0; - erts_smp_cnd_signal(&smq_cnd); - erts_smp_mtx_unlock(&smq_mtx); + erts_cnd_signal(&smq_cnd); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_prep_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 1; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_fin_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 0; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); while (*wait_p) - erts_smp_cnd_wait(&smq_cnd, &smq_mtx); - erts_smp_mtx_unlock(&smq_mtx); + erts_cnd_wait(&smq_cnd, &smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void * @@ -2434,9 +2210,9 @@ sys_msg_dispatcher_func(void *unused) int end_wait = 0; ErtsSysMsgQ *smqp; - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); /* Free previously used queue ... */ while (local_sys_message_queue) { @@ -2447,21 +2223,21 @@ sys_msg_dispatcher_func(void *unused) /* Fetch current trace message queue ... */ if (!sys_message_queue) { - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); end_wait = 1; erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); } while (!sys_message_queue) - erts_smp_cnd_wait(&smq_cnd, &smq_mtx); + erts_cnd_wait(&smq_cnd, &smq_mtx); local_sys_message_queue = sys_message_queue; sys_message_queue = NULL; sys_message_queue_end = NULL; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); if (end_wait) { erts_thr_progress_finalize_wait(NULL); @@ -2535,7 +2311,7 @@ sys_msg_dispatcher_func(void *unused) #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "delivered\n"); #endif - erts_smp_proc_unlock(proc, proc_locks); + erts_proc_unlock(proc, proc_locks); } } else if (receiver == am_error_logger) { @@ -2573,7 +2349,7 @@ sys_msg_dispatcher_func(void *unused) sys_msg_disp_failure(smqp, receiver); drop_sys_msg: if (proc) - erts_smp_proc_unlock(proc, proc_locks); + erts_proc_unlock(proc, proc_locks); if (smqp->bp) free_message_buffer(smqp->bp); #ifdef DEBUG_PRINTOUTS @@ -2593,7 +2369,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm, ErlHeapFragment *)) { ErtsSysMsgQ *sm; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); for (sm = sys_message_queue; sm; sm = sm->next) { Eterm to; switch (sm->type) { @@ -2612,29 +2388,28 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm, } (*func)(sm->from, to, sm->msg, sm->bp); } - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void init_sys_msg_dispatcher(void) { - erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER; + erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; thr_opts.detached = 1; thr_opts.name = "sys_msg_dispatcher"; init_smq_element_alloc(); sys_message_queue = NULL; sys_message_queue_end = NULL; - erts_smp_cnd_init(&smq_cnd); - erts_smp_mtx_init(&smq_mtx, "sys_msg_q", NIL, + erts_cnd_init(&smq_cnd); + erts_mtx_init(&smq_mtx, "sys_msg_q", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); - erts_smp_thr_create(&sys_msg_dispatcher_tid, + erts_thr_create(&sys_msg_dispatcher_tid, sys_msg_dispatcher_func, NULL, &thr_opts); } -#endif #include "erl_nif.h" @@ -2730,7 +2505,7 @@ static void init_tracer_template(ErtsTracerNif *tnif) { } static Hash *tracer_hash = NULL; -static erts_smp_rwmtx_t tracer_mtx; +static erts_rwmtx_t tracer_mtx; static ErtsTracerNif * load_tracer_nif(const ErtsTracer tracer) @@ -2770,9 +2545,9 @@ load_tracer_nif(const ErtsTracer tracer) return NULL; } - erts_smp_rwmtx_rwlock(&tracer_mtx); + erts_rwmtx_rwlock(&tracer_mtx); tnif = hash_put(tracer_hash, &tnif_tmpl); - erts_smp_rwmtx_rwunlock(&tracer_mtx); + erts_rwmtx_rwunlock(&tracer_mtx); return tnif; } @@ -2783,14 +2558,14 @@ lookup_tracer_nif(const ErtsTracer tracer) ErtsTracerNif tnif_tmpl; ErtsTracerNif *tnif; tnif_tmpl.module = ERTS_TRACER_MODULE(tracer); - erts_smp_rwmtx_rlock(&tracer_mtx); + erts_rwmtx_rlock(&tracer_mtx); if ((tnif = hash_get(tracer_hash, &tnif_tmpl)) == NULL) { - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); tnif = load_tracer_nif(tracer); ASSERT(!tnif || tnif->nif_mod); return tnif; } - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); ASSERT(tnif->nif_mod); return tnif; } @@ -2928,17 +2703,17 @@ send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p, Eterm t_p_id, ErtsTracerNif *tnif, enum ErtsTracerOpt topt, Eterm tag, Eterm msg, Eterm extra, Eterm pam_result) { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) if (c_p) { /* We have to hold the main lock of the currently executing process */ erts_proc_lc_chk_have_proc_locks(c_p, ERTS_PROC_LOCK_MAIN); } if (is_internal_pid(t_p->id)) { /* We have to have at least one lock */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL); } else { ASSERT(is_internal_port(t_p->id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)); + ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)); } #endif @@ -2981,17 +2756,17 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, enum ErtsTracerOpt topt, Eterm tag) { Eterm nif_result; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) if (c_p) - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks || erts_thr_progress_is_blocking()); if (is_internal_pid(t_p->id)) { /* We have to have at least one lock */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL || erts_thr_progress_is_blocking()); } else { ASSERT(is_internal_port(t_p->id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p) || erts_thr_progress_is_blocking()); } #endif @@ -3013,12 +2788,12 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) { ErtsProcLocks c_p_xlocks = 0; if (is_internal_pid(t_p->id)) { - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); if (c_p_locks != ERTS_PROC_LOCKS_ALL) { c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL; - if (erts_smp_proc_trylock(c_p, c_p_xlocks) == EBUSY) { - erts_smp_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) { + erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } } } @@ -3026,7 +2801,7 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, t_p->trace_flags &= ~TRACEE_FLAGS; if (c_p_xlocks) - erts_smp_proc_unlock(c_p, c_p_xlocks); + erts_proc_unlock(c_p, c_p_xlocks); } return 0; @@ -3066,7 +2841,7 @@ int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks, void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer) { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) if (is_internal_pid(t_p->id) && !erts_thr_progress_is_blocking()) { erts_proc_lc_chk_have_proc_locks((Process*)t_p, ERTS_PROC_LOCKS_ALL); } else if (is_internal_port(t_p->id)) { @@ -3184,11 +2959,11 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) static void init_tracer_nif() { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL, + erts_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); erts_tracer_nif_clear(); @@ -3198,7 +2973,7 @@ static void init_tracer_nif() int erts_tracer_nif_clear() { - erts_smp_rwmtx_rlock(&tracer_mtx); + erts_rwmtx_rlock(&tracer_mtx); if (!tracer_hash || tracer_hash->nobjs) { HashFunctions hf; @@ -3210,19 +2985,19 @@ int erts_tracer_nif_clear() hf.meta_free = (HMFREE_FUN) erts_free; hf.meta_print = (HMPRINT_FUN) erts_print; - erts_smp_rwmtx_runlock(&tracer_mtx); - erts_smp_rwmtx_rwlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_rwlock(&tracer_mtx); if (tracer_hash) hash_delete(tracer_hash); tracer_hash = hash_new(ERTS_ALC_T_TRACER_NIF, "tracer_hash", 10, hf); - erts_smp_rwmtx_rwunlock(&tracer_mtx); + erts_rwmtx_rwunlock(&tracer_mtx); return 1; } - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); return 0; } diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h index 01fe1e5e23..dbf7ebd2a1 100644 --- a/erts/emulator/beam/erl_trace.h +++ b/erts/emulator/beam/erl_trace.h @@ -87,7 +87,6 @@ void erts_set_system_monitor(Eterm monitor); Eterm erts_get_system_monitor(void); int erts_is_tracer_valid(Process* p); -#ifdef ERTS_SMP void erts_check_my_tracer_proc(Process *); void erts_block_sys_msg_dispatcher(void); void erts_release_sys_msg_dispatcher(void); @@ -97,7 +96,6 @@ void erts_foreach_sys_msg_in_q(void (*func)(Eterm, ErlHeapFragment *)); void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *); void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *); -#endif void trace_send(Process*, Eterm, Eterm); void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*); @@ -149,16 +147,12 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, Uint32 flags_meta, BeamInstr* I, ErtsTracer meta_tracer); -#ifdef ERTS_SMP void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp); -#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \ +#define ERTS_CHK_PEND_TRACE_MSGS(ESDP) \ do { \ if ((ESDP)->pending_trace_msgs) \ erts_send_pending_trace_msgs((ESDP)); \ } while (0) -#else -#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) -#endif #define seq_trace_output(token, msg, type, receiver, process) \ seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL) diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h index 3d28b05752..44d8c85867 100644 --- a/erts/emulator/beam/erl_utils.h +++ b/erts/emulator/beam/erl_utils.h @@ -22,15 +22,11 @@ #define ERL_UTILS_H__ #include "sys.h" -#include "erl_smp.h" #include "erl_printf.h" struct process; typedef struct { -#ifdef DEBUG - int smp_api; -#endif union { Uint64 not_atomic; erts_atomic64_t atomic; @@ -38,70 +34,25 @@ typedef struct { } erts_interval_t; void erts_interval_init(erts_interval_t *); -void erts_smp_interval_init(erts_interval_t *); Uint64 erts_step_interval_nob(erts_interval_t *); Uint64 erts_step_interval_relb(erts_interval_t *); -Uint64 erts_smp_step_interval_nob(erts_interval_t *); -Uint64 erts_smp_step_interval_relb(erts_interval_t *); Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64); Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64); -Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64); -Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64); -ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *); ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *); ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE Uint64 -erts_current_interval_nob__(erts_interval_t *icp) -{ - return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic); -} - -ERTS_GLB_INLINE Uint64 -erts_current_interval_acqb__(erts_interval_t *icp) -{ - return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic); -} - -ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *icp) { - ASSERT(!icp->smp_api); - return erts_current_interval_nob__(icp); + return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic); } ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *icp) { - ASSERT(!icp->smp_api); - return erts_current_interval_acqb__(icp); -} - -ERTS_GLB_INLINE Uint64 -erts_smp_current_interval_nob(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return erts_current_interval_nob__(icp); -#else - return icp->counter.not_atomic; -#endif -} - -ERTS_GLB_INLINE Uint64 -erts_smp_current_interval_acqb(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return erts_current_interval_acqb__(icp); -#else - return icp->counter.not_atomic; -#endif + return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic); } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index 0b8d78c469..076767c7cd 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -55,7 +55,7 @@ #define CP_SIZE 1 #define ErtsHAllocLockCheck(P) \ - ERTS_SMP_LC_ASSERT(erts_dbg_check_halloc_lock((P))) + ERTS_LC_ASSERT(erts_dbg_check_halloc_lock((P))) #ifdef DEBUG @@ -102,9 +102,11 @@ if ((ptr) == (endp)) { \ ; \ } else if (HEAP_START(p) <= (ptr) && (ptr) < HEAP_TOP(p)) { \ + ASSERT(HEAP_TOP(p) == (endp)); \ HEAP_TOP(p) = (ptr); \ } else { \ - erts_heap_frag_shrink(p, ptr); \ + ASSERT(MBUF(p)->mem + MBUF(p)->used_size == (endp)); \ + erts_heap_frag_shrink(p, ptr); \ } #define HeapWordsLeft(p) (HEAP_LIMIT(p) - HEAP_TOP(p)) @@ -157,6 +159,7 @@ typedef struct op_entry { Uint32 mask[3]; /* Signature mask. */ unsigned involves_r; /* Needs special attention when matching. */ int sz; /* Number of loaded words. */ + int adjust; /* Adjustment for start of instruction. */ char* pack; /* Instructions for packing engine. */ char* sign; /* Signature string. */ } OpEntry; diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c index 828c833ffc..c81503f722 100644 --- a/erts/emulator/beam/export.c +++ b/erts/emulator/beam/export.c @@ -41,14 +41,12 @@ static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */ -static erts_smp_atomic_t total_entries_bytes; - -#include "erl_smp.h" +static erts_atomic_t total_entries_bytes; /* This lock protects the staging export table from concurrent access * AND it protects the staging table from becoming active. */ -erts_smp_mtx_t export_staging_lock; +erts_mtx_t export_staging_lock; extern BeamInstr* em_call_error_handler; extern BeamInstr* em_call_traced_function; @@ -85,17 +83,13 @@ static struct export_blob* entry_to_blob(struct export_entry* ee) void export_info(fmtfn_t to, void *to_arg) { -#ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) export_staging_lock(); -#endif index_info(to, to_arg, &export_tables[erts_active_code_ix()]); hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable); -#ifdef ERTS_SMP if (lock) export_staging_unlock(); -#endif } @@ -129,7 +123,7 @@ export_alloc(struct export_entry* tmpl_e) Export* obj; blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob)); - erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); + erts_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); obj = &blob->exp; obj->info.op = 0; obj->info.u.gen_bp = NULL; @@ -173,7 +167,7 @@ export_free(struct export_entry* obj) } DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export blob deallocation at %p", &blob->exp); erts_free(ERTS_ALC_T_EXPORT, blob); - erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); + erts_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); } void @@ -182,9 +176,9 @@ init_export_table(void) HashFunctions f; int i; - erts_smp_mtx_init(&export_staging_lock, "export_tab", NIL, + erts_mtx_init(&export_staging_lock, "export_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_atomic_init_nob(&total_entries_bytes, 0); + erts_atomic_init_nob(&total_entries_bytes, 0); f.hash = (H_FUN) export_hash; f.cmp = (HCMP_FUN) export_cmp; @@ -373,7 +367,7 @@ int export_table_sz(void) } int export_entries_sz(void) { - return erts_smp_atomic_read_nob(&total_entries_bytes); + return erts_atomic_read_nob(&total_entries_bytes); } Export *export_get(Export *e) { diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h index 7c812b306c..be6cce07bf 100644 --- a/erts/emulator/beam/export.h +++ b/erts/emulator/beam/export.h @@ -66,9 +66,9 @@ Export *export_get(Export*); void export_start_staging(void); void export_end_staging(int commit); -extern erts_smp_mtx_t export_staging_lock; -#define export_staging_lock() erts_smp_mtx_lock(&export_staging_lock) -#define export_staging_unlock() erts_smp_mtx_unlock(&export_staging_lock) +extern erts_mtx_t export_staging_lock; +#define export_staging_lock() erts_mtx_lock(&export_staging_lock) +#define export_staging_unlock() erts_mtx_unlock(&export_staging_lock) #include "beam_load.h" /* For em_* extern declarations */ #define ExportIsBuiltIn(EntryPtr) \ diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index c0a3838d42..60cf09dc07 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -616,7 +616,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize) sys_memcpy((void *) ep, (void *) edep, dist_ext_sz); ep += dist_ext_sz; if (new_edep->dep) - erts_smp_refc_inc(&new_edep->dep->refc, 1); + erts_ref_dist_entry(new_edep->dep); new_edep->extp = ep; new_edep->ext_endp = ep + ext_sz; new_edep->heap_size = -1; @@ -629,7 +629,8 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, byte *ext, Uint size, DistEntry *dep, - ErtsAtomCache *cache) + ErtsAtomCache *cache, + Uint32 *connection_id) { #undef ERTS_EXT_FAIL #undef ERTS_EXT_HDR_FAIL @@ -650,33 +651,36 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, if (size < 2) ERTS_EXT_FAIL; + if (!dep) + ERTS_INTERNAL_ERROR("Invalid use"); + if (ep[0] != VERSION_MAGIC) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - if (dep) - erts_dsprintf(dsbufp, - "** Got message from incompatible erlang on " - "channel %d\n", - dist_entry_channel_no(dep)); - else - erts_dsprintf(dsbufp, - "** Attempt to convert old incompatible " - "binary %d\n", - *ep); + erts_dsprintf(dsbufp, + "** Got message from incompatible erlang on " + "channel %d\n", + dist_entry_channel_no(dep)); erts_send_error_to_logger_nogl(dsbufp); ERTS_EXT_FAIL; } edep->flags = 0; edep->dep = dep; - if (dep) { - erts_smp_de_rlock(dep); - if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE) - edep->flags |= ERTS_DIST_EXT_DFLAG_HDR; - - edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK); - erts_smp_de_runlock(dep); + + erts_de_rlock(dep); + + if ((dep->status & (ERTS_DE_SFLG_EXITING|ERTS_DE_SFLG_CONNECTED)) + != ERTS_DE_SFLG_CONNECTED) { + erts_de_runlock(dep); + return ERTS_PREP_DIST_EXT_CLOSED; } + if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE) + edep->flags |= ERTS_DIST_EXT_DFLAG_HDR; + + *connection_id = dep->connection_id; + edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK); + if (ep[1] != DIST_HEADER) { if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) ERTS_EXT_HDR_FAIL; @@ -835,14 +839,15 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, ERTS_EXT_FAIL; #endif - return 0; + erts_de_runlock(dep); + + return ERTS_PREP_DIST_EXT_SUCCESS; #undef CHKSIZE #undef ERTS_EXT_FAIL #undef ERTS_EXT_HDR_FAIL - bad_hdr: - if (dep) { + bad_hdr: { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "%T got a corrupted distribution header from %T " @@ -855,10 +860,11 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, erts_dsprintf(dsbufp, ">>"); erts_send_warning_to_logger_nogl(dsbufp); } - fail: - if (dep) - erts_kill_dist_connection(dep, dep->connection_id); - return -1; + fail: { + erts_de_runlock(dep); + erts_kill_dist_connection(dep, *connection_id); + } + return ERTS_PREP_DIST_EXT_FAILED; } static void diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h index f00426cc16..3c61d013da 100644 --- a/erts/emulator/beam/external.h +++ b/erts/emulator/beam/external.h @@ -185,8 +185,13 @@ ERTS_GLB_INLINE void *erts_dist_ext_trailer(ErtsDistExternal *); ErtsDistExternal *erts_make_dist_ext_copy(ErtsDistExternal *, Uint); void *erts_dist_ext_trailer(ErtsDistExternal *); void erts_destroy_dist_ext_copy(ErtsDistExternal *); + +#define ERTS_PREP_DIST_EXT_FAILED (-1) +#define ERTS_PREP_DIST_EXT_SUCCESS (0) +#define ERTS_PREP_DIST_EXT_CLOSED (1) + int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint, - DistEntry *, ErtsAtomCache *); + DistEntry *, ErtsAtomCache *, Uint32 *); Sint erts_decode_dist_ext_size(ErtsDistExternal *); Eterm erts_decode_dist_ext(ErtsHeapFactory* factory, ErtsDistExternal *); diff --git a/erts/emulator/beam/float_instrs.tab b/erts/emulator/beam/float_instrs.tab new file mode 100644 index 0000000000..3d4db77892 --- /dev/null +++ b/erts/emulator/beam/float_instrs.tab @@ -0,0 +1,88 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +LOAD_DOUBLE(Src, Dst) { + GET_DOUBLE($Src, *(FloatDef *) &$Dst); +} + +fload(Reg, Dst) { + $LOAD_DOUBLE($Reg, $Dst); +} + +fstore(Float, Dst) { + PUT_DOUBLE(*((FloatDef *) &$Float), HTOP); + $Dst = make_float(HTOP); + HTOP += FLOAT_SIZE_OBJECT; +} + +fconv(Src, Dst) { + Eterm src = $Src; + + if (is_small(src)) { + $Dst = (double) signed_val(src); + } else if (is_big(src)) { + if (big_to_double(src, &$Dst) < 0) { + $BADARITH0(); + } + } else if (is_float(src)) { + $LOAD_DOUBLE(src, $Dst); + } else { + $BADARITH0(); + } +} + +FLOAT_OP(Src1, OP, Src2, Dst) { + ERTS_NO_FPE_CHECK_INIT(c_p); + $Dst = $Src1 $OP $Src2; + ERTS_NO_FPE_ERROR(c_p, $Dst, $BADARITH0()); +} + +i_fadd(Src1, Src2, Dst) { + $FLOAT_OP($Src1, +, $Src2, $Dst); +} + +i_fsub(Src1, Src2, Dst) { + $FLOAT_OP($Src1, -, $Src2, $Dst); +} + +i_fmul(Src1, Src2, Dst) { + $FLOAT_OP($Src1, *, $Src2, $Dst); +} + +i_fdiv(Src1, Src2, Dst) { + $FLOAT_OP($Src1, /, $Src2, $Dst); +} + +i_fnegate(Src, Dst) { + ERTS_NO_FPE_CHECK_INIT(c_p); + $Dst = -$Src; + ERTS_NO_FPE_ERROR(c_p, $Dst, $BADARITH0()); +} + +%unless NO_FPE_SIGNALS +fclearerror() { + ERTS_FP_CHECK_INIT(c_p); +} + +i_fcheckerror() { + ERTS_FP_ERROR(c_p, freg[0].fd, $BADARITH0()); +} +%endif diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 182d3aa44e..2b0ad0b98a 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -85,7 +85,7 @@ struct enif_resource_type_t typedef struct { - erts_smp_mtx_t lock; + erts_mtx_t lock; ErtsMonitor* root; int pending_failed_fire; int is_dying; @@ -128,10 +128,8 @@ extern Eterm erts_nif_call_function(Process *p, Process *tracee, struct enif_func_t *, int argc, Eterm *argv); -#ifdef ERTS_DIRTY_SCHEDULERS int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg); -#endif /* ERTS_DIRTY_SCHEDULERS */ /* Driver handle (wrapper for old plain handle) */ @@ -183,9 +181,9 @@ typedef struct { void *handle; /* Handle for DLL or SO (for dyn. drivers). */ DE_ProcEntry *procs; /* List of pids that have loaded this driver, or that wait for it to change state */ - erts_smp_refc_t refc; /* Number of ports/processes having + erts_refc_t refc; /* Number of ports/processes having references to the driver */ - erts_smp_atomic32_t port_count; /* Number of ports using the driver */ + erts_atomic32_t port_count; /* Number of ports using the driver */ Uint flags; /* ERL_DE_FL_KILL_PORTS */ int status; /* ERL_DE_xxx */ char *full_path; /* Full path of the driver */ @@ -209,9 +207,7 @@ struct erts_driver_t_ { } version; int flags; DE_Handle *handle; -#ifdef ERTS_SMP - erts_smp_mtx_t *lock; -#endif + erts_mtx_t *lock; ErlDrvEntry *entry; ErlDrvData (*start)(ErlDrvPort port, char *command, SysDriverOpts* opts); void (*stop)(ErlDrvData drv_data); @@ -238,7 +234,7 @@ struct erts_driver_t_ { }; extern erts_driver_t *driver_list; -extern erts_smp_rwmtx_t erts_driver_list_lock; +extern erts_rwmtx_t erts_driver_list_lock; extern void erts_ddll_init(void); extern void erts_ddll_lock_driver(DE_Handle *dh, char *name); @@ -299,7 +295,7 @@ extern Eterm node_cookie; extern Uint display_items; /* no of items to display in traces etc */ extern int erts_backtrace_depth; -extern erts_smp_atomic32_t erts_max_gen_gcs; +extern erts_atomic32_t erts_max_gen_gcs; extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */ extern int stackdump_on_exit; @@ -909,13 +905,11 @@ typedef struct ErtsLiteralArea_ { #define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \ (sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1)) -extern erts_smp_atomic_t erts_copy_literal_area__; +extern erts_atomic_t erts_copy_literal_area__; #define ERTS_COPY_LITERAL_AREA() \ - ((ErtsLiteralArea *) erts_smp_atomic_read_nob(&erts_copy_literal_area__)) + ((ErtsLiteralArea *) erts_atomic_read_nob(&erts_copy_literal_area__)) extern Process *erts_literal_area_collector; -#ifdef ERTS_DIRTY_SCHEDULERS extern Process *erts_dirty_process_code_checker; -#endif extern Process *erts_code_purger; @@ -1127,18 +1121,12 @@ extern ErtsModifiedTimings erts_modified_timings[]; extern int erts_no_line_info; extern Eterm erts_error_logger_warnings; extern int erts_initialized; -#if defined(USE_THREADS) && !defined(ERTS_SMP) -extern erts_tid_t erts_main_thread; -#endif extern int erts_compat_rel; extern int erts_use_sender_punish; void erl_start(int, char**); void erts_usage(void); Eterm erts_preloaded(Process* p); -#ifndef ERTS_SMP -extern void *erts_scheduler_stack_limit; -#endif /* erl_md5.c */ @@ -1182,7 +1170,7 @@ void erts_emergency_close_ports(void); void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon); Eterm erts_driver_monitor_to_ref(Eterm* hp, const ErlDrvMonitor *mon); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +#if defined(ERTS_ENABLE_LOCK_COUNT) void erts_lcnt_update_driver_locks(int enable); void erts_lcnt_update_port_locks(int enable); #endif @@ -1394,7 +1382,7 @@ Uint erts_current_reductions(Process* current, Process *p); int erts_print_system_version(fmtfn_t to, void *arg, Process *c_p); -int erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg); +int erts_hibernate(Process* c_p, Eterm* reg); ERTS_GLB_FORCE_INLINE int erts_is_literal(Eterm tptr, Eterm *ptr); diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c index a1f6f54543..93d1111904 100644 --- a/erts/emulator/beam/index.c +++ b/erts/emulator/beam/index.c @@ -98,7 +98,7 @@ index_put_entry(IndexTable* t, void* tmpl) * Do a write barrier here to allow readers to do lock free iteration. * erts_index_num_entries() does matching read barrier. */ - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; t->entries++; return p; diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h index 6c07571df6..30bc6a1121 100644 --- a/erts/emulator/beam/index.h +++ b/erts/emulator/beam/index.h @@ -88,7 +88,7 @@ ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t) * on tables where entries are never erased. * index_put_entry() does matching write barrier. */ - ERTS_SMP_READ_MEMORY_BARRIER; + ERTS_THR_READ_MEMORY_BARRIER; return ret; } diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab new file mode 100644 index 0000000000..7ea9dee299 --- /dev/null +++ b/erts/emulator/beam/instrs.tab @@ -0,0 +1,921 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +// Stack manipulation instructions + +allocate(NeedStack, Live) { + $AH($NeedStack, 0, $Live); +} + +allocate_heap(NeedStack, NeedHeap, Live) { + $AH($NeedStack, $NeedHeap, $Live); +} + +allocate_init(NeedStack, Live, Y) { + $AH($NeedStack, 0, $Live); + make_blank($Y); +} + +allocate_zero(NeedStack, Live) { + Eterm* ptr; + int i = $NeedStack; + $AH(i, 0, $Live); + for (ptr = E + i; ptr > E; ptr--) { + make_blank(*ptr); + } +} + +allocate_heap_zero(NeedStack, NeedHeap, Live) { + Eterm* ptr; + int i = $NeedStack; + $AH(i, $NeedHeap, $Live); + for (ptr = E + i; ptr > E; ptr--) { + make_blank(*ptr); + } +} + +// This instruction is probably never used (because it is combined with a +// a return). However, a future compiler might for some reason emit a +// deallocate not followed by a return, and that should work. + +deallocate(Deallocate) { + //| -no_prefetch + SET_CP(c_p, (BeamInstr *) cp_val(*E)); + E = ADD_BYTE_OFFSET(E, $Deallocate); +} + +deallocate_return(Deallocate) { + //| -no_next + int words_to_pop = $Deallocate; + SET_I((BeamInstr *) cp_val(*E)); + E = ADD_BYTE_OFFSET(E, words_to_pop); + CHECK_TERM(x(0)); + DispatchReturn; +} + +move_deallocate_return(Src, Deallocate) { + x(0) = $Src; + $deallocate_return($Deallocate); +} + +// Call instructions + +DISPATCH_REL(CallDest) { + //| -no_next + $SET_I_REL($CallDest); + DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); + Dispatch(); +} + +DISPATCH_ABS(CallDest) { + //| -no_next + SET_I((BeamInstr *) $CallDest); + DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); + Dispatch(); +} + +i_call(CallDest) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_REL($CallDest); +} + +move_call(Src, CallDest) { + x(0) = $Src; + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_REL($CallDest); +} + +i_call_last(CallDest, Deallocate) { + $deallocate($Deallocate); + $DISPATCH_REL($CallDest); +} + +move_call_last(Src, CallDest, Deallocate) { + x(0) = $Src; + $i_call_last($CallDest, $Deallocate); +} + +i_call_only(CallDest) { + $DISPATCH_REL($CallDest); +} + +move_call_only(Src, CallDest) { + x(0) = $Src; + $i_call_only($CallDest); +} + +DISPATCHX(Dest) { + //| -no_next + DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, $Dest); + // Dispatchx assumes the Export* is in Arg(0) + I = (&$Dest) - 1; + Dispatchx(); +} + +i_call_ext(Dest) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCHX($Dest); +} + +i_move_call_ext(Src, Dest) { + x(0) = $Src; + $i_call_ext($Dest); +} + +i_call_ext_only(Dest) { + $DISPATCHX($Dest); +} + +i_move_call_ext_only(Dest, Src) { + x(0) = $Src; + $i_call_ext_only($Dest); +} + +i_call_ext_last(Dest, Deallocate) { + $deallocate($Deallocate); + $DISPATCHX($Dest); +} + +i_move_call_ext_last(Dest, StackOffset, Src) { + x(0) = $Src; + $i_call_ext_last($Dest, $StackOffset); +} + +APPLY(I, Deallocate, Next) { + //| -no_next + HEAVY_SWAPOUT; + $Next = apply(c_p, reg, $I, $Deallocate); + HEAVY_SWAPIN; +} + +HANDLE_APPLY_ERROR() { + I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); + goto post_error_handling; +} + +i_apply() { + BeamInstr *next; + $APPLY(NULL, 0, next); + if (ERTS_LIKELY(next != NULL)) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +i_apply_last(Deallocate) { + BeamInstr *next; + $APPLY(I, $Deallocate, next); + if (ERTS_LIKELY(next != NULL)) { + $deallocate($Deallocate); + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +i_apply_only() { + BeamInstr *next; + $APPLY(I, 0, next); + if (ERTS_LIKELY(next != NULL)) { + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +FIXED_APPLY(Arity, I, Deallocate, Next) { + //| -no_next + HEAVY_SWAPOUT; + $Next = fixed_apply(c_p, reg, $Arity, $I, $Deallocate); + HEAVY_SWAPIN; +} + +apply(Arity) { + BeamInstr *next; + $FIXED_APPLY($Arity, NULL, 0, next); + if (ERTS_LIKELY(next != NULL)) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +apply_last(Arity, Deallocate) { + BeamInstr *next; + $FIXED_APPLY($Arity, I, $Deallocate, next); + if (ERTS_LIKELY(next != NULL)) { + $deallocate($Deallocate); + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +APPLY_FUN(Next) { + HEAVY_SWAPOUT; + $Next = apply_fun(c_p, r(0), x(1), reg); + HEAVY_SWAPIN; +} + +HANDLE_APPLY_FUN_ERROR() { + goto find_func_info; +} + +DISPATCH_FUN(I) { + SET_I($I); + Dispatchfun(); +} + +i_apply_fun() { + BeamInstr *next; + $APPLY_FUN(next); + if (ERTS_LIKELY(next != NULL)) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +i_apply_fun_last(Deallocate) { + BeamInstr *next; + $APPLY_FUN(next); + if (ERTS_LIKELY(next != NULL)) { + $deallocate($Deallocate); + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +i_apply_fun_only() { + BeamInstr *next; + $APPLY_FUN(next); + if (ERTS_LIKELY(next != NULL)) { + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +CALL_FUN(Fun, Next) { + //| -no_next + HEAVY_SWAPOUT; + $Next = call_fun(c_p, $Fun, reg, THE_NON_VALUE); + HEAVY_SWAPIN; +} + +i_call_fun(Fun) { + BeamInstr *next; + $CALL_FUN($Fun, next); + if (ERTS_LIKELY(next != NULL)) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +i_call_fun_last(Fun, Deallocate) { + BeamInstr *next; + $CALL_FUN($Fun, next); + if (ERTS_LIKELY(next != NULL)) { + $deallocate($Deallocate); + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +return() { + SET_I(c_p->cp); + DTRACE_RETURN_FROM_PC(c_p); + + /* + * We must clear the CP to make sure that a stale value do not + * create a false module dependcy preventing code upgrading. + * It also means that we can use the CP in stack backtraces. + */ + c_p->cp = 0; + CHECK_TERM(r(0)); + HEAP_SPACE_VERIFIED(0); + DispatchReturn; +} + +get_list(Src, Hd, Tl) { + Eterm* tmp_ptr = list_val($Src); + Eterm hd, tl; + hd = CAR(tmp_ptr); + tl = CDR(tmp_ptr); + $Hd = hd; + $Tl = tl; +} + +i_get(Src, Dst) { + $Dst = erts_pd_hash_get(c_p, $Src); +} + +i_get_hash(Src, Hash, Dst) { + $Dst = erts_pd_hash_get_with_hx(c_p, $Hash, $Src); +} + +i_get_tuple_element(Src, Element, Dst) { + Eterm* src = ADD_BYTE_OFFSET(tuple_val($Src), $Element); + $Dst = *src; +} + +i_get_tuple_element2(Src, Element, Dst) { + Eterm* src; + Eterm* dst; + Eterm E1, E2; + src = ADD_BYTE_OFFSET(tuple_val($Src), $Element); + dst = &($Dst); + E1 = src[0]; + E2 = src[1]; + dst[0] = E1; + dst[1] = E2; +} + +i_get_tuple_element2y(Src, Element, D1, D2) { + Eterm* src; + Eterm E1, E2; + src = ADD_BYTE_OFFSET(tuple_val($Src), $Element); + E1 = src[0]; + E2 = src[1]; + $D1 = E1; + $D2 = E2; +} + +i_get_tuple_element3(Src, Element, Dst) { + Eterm* src; + Eterm* dst; + Eterm E1, E2, E3; + src = ADD_BYTE_OFFSET(tuple_val($Src), $Element); + dst = &($Dst); + E1 = src[0]; + E2 = src[1]; + E3 = src[2]; + dst[0] = E1; + dst[1] = E2; + dst[2] = E3; +} + +i_element := element_group.fetch.execute; + + +element_group.head() { + Eterm element_index; + Eterm element_tuple; +} + +element_group.fetch(Src) { + element_tuple = $Src; +} + +element_group.execute(Fail, Index, Dst) { + element_index = $Index; + if (ERTS_LIKELY(is_small(element_index) && is_tuple(element_tuple))) { + Eterm* tp = tuple_val(element_tuple); + + if ((signed_val(element_index) >= 1) && + (signed_val(element_index) <= arityval(*tp))) { + $Dst = tp[signed_val(element_index)]; + $NEXT0(); + } + } + c_p->freason = BADARG; + $BIF_ERROR_ARITY_2($Fail, BIF_element_2, element_index, element_tuple); +} + +i_fast_element := fast_element_group.fetch.execute; + +fast_element_group.head() { + Eterm fast_element_tuple; +} + +fast_element_group.fetch(Src) { + fast_element_tuple = $Src; +} + +fast_element_group.execute(Fail, Index, Dst) { + if (ERTS_LIKELY(is_tuple(fast_element_tuple))) { + Eterm* tp = tuple_val(fast_element_tuple); + Eterm pos = $Index; /* Untagged integer >= 1 */ + if (pos <= arityval(*tp)) { + $Dst = tp[pos]; + $NEXT0(); + } + } + c_p->freason = BADARG; + $BIF_ERROR_ARITY_2($Fail, BIF_element_2, make_small($Index), fast_element_tuple); +} + +init(Y) { + make_blank($Y); +} + +init2(Y1, Y2) { + make_blank($Y1); + make_blank($Y2); +} + +init3(Y1, Y2, Y3) { + make_blank($Y1); + make_blank($Y2); + make_blank($Y3); +} + +i_make_fun(FunP, NumFree) { + HEAVY_SWAPOUT; + x(0) = new_fun(c_p, reg, (ErlFunEntry *) $FunP, $NumFree); + HEAVY_SWAPIN; +} + +i_trim(Words) { + Uint cp = E[0]; + E += $Words; + E[0] = cp; +} + +move(Src, Dst) { + $Dst = $Src; +} + +move3(S1, D1, S2, D2, S3, D3) { + $D1 = $S1; + $D2 = $S2; + $D3 = $S3; +} + +move_dup(Src, D1, D2) { + $D1 = $D2 = $Src; +} + +move2_par(S1, D1, S2, D2) { + Eterm V1, V2; + V1 = $S1; + V2 = $S2; + $D1 = V1; + $D2 = V2; +} + +move_shift(Src, SD, D) { + Eterm V; + V = $Src; + $D = $SD; + $SD = V; +} + +move_window3(S1, S2, S3, D) { + Eterm xt0, xt1, xt2; + Eterm* y = &$D; + xt0 = $S1; + xt1 = $S2; + xt2 = $S3; + y[0] = xt0; + y[1] = xt1; + y[2] = xt2; +} + +move_window4(S1, S2, S3, S4, D) { + Eterm xt0, xt1, xt2, xt3; + Eterm* y = &$D; + xt0 = $S1; + xt1 = $S2; + xt2 = $S3; + xt3 = $S4; + y[0] = xt0; + y[1] = xt1; + y[2] = xt2; + y[3] = xt3; +} + +move_window5(S1, S2, S3, S4, S5, D) { + Eterm xt0, xt1, xt2, xt3, xt4; + Eterm *y = &$D; + xt0 = $S1; + xt1 = $S2; + xt2 = $S3; + xt3 = $S4; + xt4 = $S5; + y[0] = xt0; + y[1] = xt1; + y[2] = xt2; + y[3] = xt3; + y[4] = xt4; +} + +move_return(Src) { + //| -no_next + x(0) = $Src; + SET_I(c_p->cp); + c_p->cp = 0; + DispatchReturn; +} + +move_x1(Src) { + x(1) = $Src; +} + +move_x2(Src) { + x(2) = $Src; +} + +node(Dst) { + $Dst = erts_this_node->sysname; +} + +put_list(Hd, Tl, Dst) { + HTOP[0] = $Hd; + HTOP[1] = $Tl; + $Dst = make_list(HTOP); + HTOP += 2; +} + +i_put_tuple := i_put_tuple.make.fill; + +i_put_tuple.make(Dst) { + $Dst = make_tuple(HTOP); +} + +i_put_tuple.fill(Arity) { + Eterm* hp = HTOP; + Eterm arity = $Arity; + + //| -no_next + *hp++ = make_arityval(arity); + I = $NEXT_INSTRUCTION; + do { + Eterm term = *I++; + switch (loader_tag(term)) { + case LOADER_X_REG: + *hp++ = x(loader_x_reg_index(term)); + break; + case LOADER_Y_REG: + *hp++ = y(loader_y_reg_index(term)); + break; + default: + *hp++ = term; + break; + } + } while (--arity != 0); + HTOP = hp; + ASSERT(VALID_INSTR(* (Eterm *)I)); + Goto(*I); +} + +self(Dst) { + $Dst = c_p->common.id; +} + +set_tuple_element(Element, Tuple, Offset) { + Eterm* p; + + ASSERT(is_tuple($Tuple)); + p = (Eterm *) ((unsigned char *) tuple_val($Tuple) + $Offset); + *p = $Element; +} + +swap(R1, R2) { + Eterm V = $R1; + $R1 = $R2; + $R2 = V; +} + +swap_temp(R1, R2, Tmp) { + Eterm V = $R1; + $R1 = $R2; + $R2 = $Tmp = V; +} + +test_heap(Nh, Live) { + $GC_TEST(0, $Nh, $Live); +} + +test_heap_1_put_list(Nh, Reg) { + $test_heap($Nh, 1); + $put_list($Reg, x(0), x(0)); +} + +is_integer_allocate(Fail, Src, NeedStack, Live) { + //| -no_prefetch + $is_integer($Fail, $Src); + $AH($NeedStack, 0, $Live); +} + +is_nonempty_list(Fail, Src) { + //| -no_prefetch + if (is_not_list($Src)) { + $FAIL($Fail); + } +} + +is_nonempty_list_test_heap(Fail, Need, Live) { + //| -no_prefetch + $is_nonempty_list($Fail, x(0)); + $test_heap($Need, $Live); +} + +is_nonempty_list_allocate(Fail, Src, Need, Live) { + //| -no_prefetch + $is_nonempty_list($Fail, $Src); + $AH($Need, 0, $Live); +} + +is_nonempty_list_get_list(Fail, Src, Hd, Tl) { + //| -no_prefetch + $is_nonempty_list($Fail, $Src); + $get_list($Src, $Hd, $Tl); +} + +jump(Fail) { + $JUMP($Fail); +} + +move_jump(Fail, Src) { + x(0) = $Src; + $jump($Fail); +} + +// +// Test instructions. +// + +is_atom(Fail, Src) { + if (is_not_atom($Src)) { + $FAIL($Fail); + } +} + +is_boolean(Fail, Src) { + if (($Src) != am_true && ($Src) != am_false) { + $FAIL($Fail); + } +} + +is_binary(Fail, Src) { + if (is_not_binary($Src) || binary_bitsize($Src) != 0) { + $FAIL($Fail); + } +} + +is_bitstring(Fail, Src) { + if (is_not_binary($Src)) { + $FAIL($Fail); + } +} + +is_float(Fail, Src) { + if (is_not_float($Src)) { + $FAIL($Fail); + } +} + +is_function(Fail, Src) { + if ( !(is_any_fun($Src)) ) { + $FAIL($Fail); + } +} + +is_function2(Fail, Fun, Arity) { + if (erl_is_function(c_p, $Fun, $Arity) != am_true ) { + $FAIL($Fail); + } +} + +is_integer(Fail, Src) { + if (is_not_integer($Src)) { + $FAIL($Fail); + } +} + +is_list(Fail, Src) { + if (is_not_list($Src) && is_not_nil($Src)) { + $FAIL($Fail); + } +} + +is_map(Fail, Src) { + if (is_not_map($Src)) { + $FAIL($Fail); + } +} + +is_nil(Fail, Src) { + if (is_not_nil($Src)) { + $FAIL($Fail); + } +} + +is_number(Fail, Src) { + if (is_not_integer($Src) && is_not_float($Src)) { + $FAIL($Fail); + } +} + +is_pid(Fail, Src) { + if (is_not_pid($Src)) { + $FAIL($Fail); + } +} + +is_port(Fail, Src) { + if (is_not_port($Src)) { + $FAIL($Fail); + } +} + +is_reference(Fail, Src) { + if (is_not_ref($Src)) { + $FAIL($Fail); + } +} + +is_tagged_tuple(Fail, Src, Arityval, Tag) { + Eterm term = $Src; + if (!(BEAM_IS_TUPLE(term) && + (tuple_val(term))[0] == $Arityval && + (tuple_val(term))[1] == $Tag)) { + $FAIL($Fail); + } +} + +is_tuple(Fail, Src) { + if (is_not_tuple($Src)) { + $FAIL($Fail); + } +} + +is_tuple_of_arity(Fail, Src, Arityval) { + Eterm term = $Src; + if (!(BEAM_IS_TUPLE(term) && *tuple_val(term) == $Arityval)) { + $FAIL($Fail); + } +} + +test_arity(Fail, Pointer, Arity) { + if (*tuple_val($Pointer) != $Arity) { + $FAIL($Fail); + } +} + +i_is_eq_exact_immed(Fail, X, Y) { + if ($X != $Y) { + $FAIL($Fail); + } +} + +i_is_ne_exact_immed(Fail, X, Y) { + if ($X == $Y) { + $FAIL($Fail); + } +} + +is_eq_exact(Fail, X, Y) { + if (!EQ($X, $Y)) { + $FAIL($Fail); + } +} + +i_is_eq_exact_literal(Fail, Src, Literal) { + if (!eq($Src, $Literal)) { + $FAIL($Fail); + } +} + +is_ne_exact(Fail, X, Y) { + if (EQ($X, $Y)) { + $FAIL($Fail); + } +} + +i_is_ne_exact_literal(Fail, Src, Literal) { + if (eq($Src, $Literal)) { + $FAIL($Fail); + } +} + +is_eq(Fail, X, Y) { + CMP_EQ_ACTION($X, $Y, $FAIL($Fail)); +} + +is_ne(Fail, X, Y) { + CMP_NE_ACTION($X, $Y, $FAIL($Fail)); +} + +is_lt(Fail, X, Y) { + CMP_LT_ACTION($X, $Y, $FAIL($Fail)); +} + +is_ge(Fail, X, Y) { + CMP_GE_ACTION($X, $Y, $FAIL($Fail)); +} + +badarg(Fail) { + $BADARG($Fail); + //| -no_next; +} + +badmatch(Src) { + c_p->fvalue = $Src; + c_p->freason = BADMATCH; + goto find_func_info; +} + +case_end(Src) { + c_p->fvalue = $Src; + c_p->freason = EXC_CASE_CLAUSE; + goto find_func_info; +} + +if_end() { + c_p->freason = EXC_IF_CLAUSE; + goto find_func_info; + //| -no_next; +} + +system_limit(Fail) { + $SYSTEM_LIMIT($Fail); + //| -no_next; +} + +catch(Y, Fail) { + c_p->catches++; + $Y = $Fail; +} + +catch_end(Y) { + c_p->catches--; + make_blank($Y); + if (is_non_value(r(0))) { + c_p->fvalue = NIL; + if (x(1) == am_throw) { + r(0) = x(2); + } else { + if (x(1) == am_error) { + SWAPOUT; + x(2) = add_stacktrace(c_p, x(2), x(3)); + SWAPIN; + } + /* only x(2) is included in the rootset here */ + if (E - HTOP < 3) { + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + } + r(0) = TUPLE2(HTOP, am_EXIT, x(2)); + HTOP += 3; + } + } + CHECK_TERM(r(0)); +} + +try_end(Y) { + c_p->catches--; + make_blank($Y); + if (is_non_value(r(0))) { + c_p->fvalue = NIL; + r(0) = x(1); + x(1) = x(2); + x(2) = x(3); + } +} + +try_case_end(Src) { + c_p->fvalue = $Src; + c_p->freason = EXC_TRY_CLAUSE; + goto find_func_info; + //| -no_next; +} + +i_raise() { + Eterm raise_trace = x(2); + Eterm raise_value = x(1); + struct StackTrace *s; + + c_p->fvalue = raise_value; + c_p->ftrace = raise_trace; + s = get_trace_from_exc(raise_trace); + if (s == NULL) { + c_p->freason = EXC_ERROR; + } else { + c_p->freason = PRIMARY_EXCEPTION(s->freason); + } + goto find_func_info; + //| -no_next +} + diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 3a5ddde5f4..bc1b9b6ef4 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -63,10 +63,10 @@ extern ErlDrvEntry forker_driver_entry; extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */ erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */ -erts_smp_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */ -static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling +erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */ +static erts_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling driver init */ -static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a +static erts_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a per thread basis (for BC interfaces) */ ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */ @@ -94,17 +94,11 @@ static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *); static void terminate_port(Port *p); static void pdl_init(void); static int driver_failure_term(ErlDrvPort ix, Eterm term, int eof); -#ifdef ERTS_SMP static void driver_monitor_lock_pdl(Port *p); static void driver_monitor_unlock_pdl(Port *p); #define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 1) #define DRV_MONITOR_LOCK_PDL(Port) driver_monitor_lock_pdl(Port) #define DRV_MONITOR_UNLOCK_PDL(Port) driver_monitor_unlock_pdl(Port) -#else -#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 0) -#define DRV_MONITOR_LOCK_PDL(Port) /* nothing */ -#define DRV_MONITOR_UNLOCK_PDL(Port) /* nothing */ -#endif #define ERL_SMALL_IO_BIN_LIMIT (4*ERL_ONHEAP_BIN_LIMIT) #define SMALL_WRITE_VEC 16 @@ -122,7 +116,7 @@ static ERTS_INLINE int is_port_ioq_empty(Port *pp) { int res; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); if (!pp->port_data_lock) res = (erts_ioq_size(&pp->ioq) == 0); else { @@ -144,7 +138,7 @@ Uint erts_port_ioq_size(Port *pp) { ErlDrvSizeT res; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); if (!pp->port_data_lock) res = erts_ioq_size(&pp->ioq); else { @@ -212,14 +206,13 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf) static ERTS_INLINE void kill_port(Port *pp) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_TRACER_CLEAR(&ERTS_TRACER(pp)); erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */ erts_port_task_free_port(pp); /* In non-smp case the port structure may have been deallocated now */ } -#ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK int @@ -227,12 +220,11 @@ erts_lc_is_port_locked(Port *prt) { if (!prt) return 0; - ERTS_SMP_LC_ASSERT(prt->lock); - return erts_smp_lc_mtx_is_locked(prt->lock); + ERTS_LC_ASSERT(prt->lock); + return erts_lc_mtx_is_locked(prt->lock); } #endif -#endif /* #ifdef ERTS_SMP */ static void initq(Port* prt); @@ -256,25 +248,21 @@ static ERTS_INLINE void port_init_instr(Port *prt * Stuff that need to be initialized with the port id * in the instrumented case, but not in the normal case. */ -#ifdef ERTS_SMP ASSERT(prt->drv_ptr && prt->lock); if (!prt->drv_ptr->lock) { erts_mtx_init_locked(prt->lock, "port_lock", id, ERTS_LOCK_FLAGS_CATEGORY_IO); } -#endif erts_port_task_init_sched(&prt->sched, id); } #if !ERTS_PORT_INIT_INSTR_NEED_ID static ERTS_INLINE void port_init_instr_abort(Port *prt) { -#ifdef ERTS_SMP ASSERT(prt->drv_ptr && prt->lock); if (!prt->drv_ptr->lock) { erts_mtx_unlock(prt->lock); erts_mtx_destroy(prt->lock); } -#endif erts_port_task_fini_sched(&prt->sched); } #endif @@ -310,7 +298,6 @@ static Port *create_port(char *name, erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED; erts_aint32_t x_pts_flgs = 0; -#ifdef ERTS_SMP ErtsRunQueue *runq; if (!driver_lock) { /* Align size for mutex following port struct */ @@ -318,7 +305,6 @@ static Port *create_port(char *name, size += sizeof(erts_mtx_t); } else -#endif port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port)); #ifdef DEBUG @@ -352,7 +338,6 @@ static Port *create_port(char *name, p += busy_port_queue_size; } -#ifdef ERTS_SMP if (driver_lock) { prt->lock = driver_lock; erts_mtx_lock(driver_lock); @@ -366,13 +351,9 @@ static Port *create_port(char *name, runq = erts_get_runq_current(NULL); else runq = ERTS_RUNQ_IX(0); - erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); + erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); prt->xports = NULL; -#else - erts_atomic32_init_nob(&prt->refc, 1); - prt->cleanup = 0; -#endif erts_port_task_pre_init_sched(&prt->sched, busy_port_queue); @@ -393,7 +374,7 @@ static Port *create_port(char *name, prt->common.u.alive.reg = NULL; ERTS_PTMR_INIT(prt); erts_port_task_handle_init(&prt->timeout_task); - erts_smp_atomic_init_nob(&prt->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&prt->psd, (erts_aint_t) NULL); prt->async_open_port = NULL; prt->drv_data = (SWord) 0; prt->os_pid = -1; @@ -420,10 +401,8 @@ static Port *create_port(char *name, #if !ERTS_PORT_INIT_INSTR_NEED_ID port_init_instr_abort(prt); #endif -#ifdef ERTS_SMP if (driver_lock) erts_mtx_unlock(driver_lock); -#endif if (enop) *enop = 0; erts_free(ERTS_ALC_T_PORT, prt); @@ -436,7 +415,7 @@ static Port *create_port(char *name, initq(prt); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (erts_port_schedule_all_ops) x_pts_flgs |= ERTS_PTS_FLG_FORCE_SCHED; @@ -445,29 +424,17 @@ static Port *create_port(char *name, x_pts_flgs |= ERTS_PTS_FLG_PARALLELISM; if (x_pts_flgs) - erts_smp_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs); + erts_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs); erts_atomic32_set_relb(&prt->state, state); return prt; } -#ifndef ERTS_SMP -void -erts_port_cleanup(Port *prt) -{ - if (prt->drv_ptr && prt->drv_ptr->handle) - erts_ddll_dereference_driver(prt->drv_ptr->handle); - prt->drv_ptr = NULL; - erts_port_dec_refc(prt); -} -#endif void erts_port_free(Port *prt) { -#if defined(ERTS_SMP) || defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK) erts_aint32_t state = erts_atomic32_read_nob(&prt->state); -#endif ERTS_LC_ASSERT(state & (ERTS_PORT_SFLG_INITIALIZING | ERTS_PORT_SFLG_FREE)); ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG); @@ -481,7 +448,6 @@ erts_port_free(Port *prt) prt->async_open_port = NULL; } -#ifdef ERTS_SMP ASSERT(prt->lock); if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) erts_mtx_destroy(prt->lock); @@ -498,7 +464,6 @@ erts_port_free(Port *prt) */ if (prt->drv_ptr->handle) erts_ddll_dereference_driver(prt->drv_ptr->handle); -#endif erts_free(ERTS_ALC_T_PORT, prt); } @@ -533,7 +498,7 @@ erts_save_suspend_process_on_port(Port *prt, Process *process) int saved; erts_aint32_t flags; erts_port_task_sched_lock(&prt->sched); - flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + flags = erts_atomic32_read_nob(&prt->sched.flags); saved = (flags & ERTS_PTS_FLGS_BUSY) && !(flags & ERTS_PTS_FLG_EXIT); if (saved) erts_proclist_store_last(&prt->suspended, erts_proclist_create(process)); @@ -577,16 +542,16 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ erts_mtx_t *driver_lock = NULL; int cprt_flgs = 0; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); if (!driver) { for (driver = driver_list; driver; driver = driver->next) { if (sys_strcmp(driver->name, name) == 0) break; } if (!driver) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG); } } @@ -631,19 +596,17 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ } if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG); } -#ifdef ERTS_SMP driver_lock = driver->lock; -#endif if (driver->handle != NULL) { erts_ddll_increment_port_count(driver->handle); erts_ddll_reference_driver(driver->handle); } - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); /* * We'll set up the port before calling the start function, @@ -656,9 +619,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ port = create_port(name, driver, driver_lock, cprt_flgs, pid, &port_errno); if (!port) { if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); erts_ddll_dereference_driver(driver->handle); } if (port_errno) @@ -726,11 +689,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(port, am_out, am_open); } -#ifdef ERTS_SMP if (port->xports) erts_port_handle_xports(port); ASSERT(!port->xports); -#endif } if (error_type) { @@ -745,9 +706,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ port->linebuf = NULL; } if (driver->handle != NULL) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } kill_port(port); erts_port_release(port); @@ -759,7 +720,6 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ #undef ERTS_OPEN_DRIVER_RET } -#ifdef ERTS_SMP struct ErtsXPortsList_ { ErtsXPortsList *next; @@ -768,7 +728,6 @@ struct ErtsXPortsList_ { ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(xports_list, ErtsXPortsList, 50, ERTS_ALC_T_XPORTS_LIST) -#endif /* * Driver function to create new instances of a driver @@ -788,7 +747,7 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ Process *rp; erts_mtx_t *driver_lock = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; /* Need to be called from a scheduler thread */ if (!erts_get_scheduler_id()) @@ -802,12 +761,12 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ if (!rp) return ERTS_INVALID_ERL_DRV_PORT; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(creator_port)); driver = creator_port->drv_ptr; - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); if (!erts_ddll_driver_ok(driver->handle)) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); return ERTS_INVALID_ERL_DRV_PORT; } @@ -816,35 +775,33 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ erts_ddll_reference_referenced_driver(driver->handle); } -#ifdef ERTS_SMP driver_lock = driver->lock; -#endif - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); /* Inherit parallelism flag from parent */ if (ERTS_PTS_FLG_PARALLELISM & - erts_smp_atomic32_read_nob(&creator_port->sched.flags)) + erts_atomic32_read_nob(&creator_port->sched.flags)) cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM; port = create_port(name, driver, driver_lock, cprt_flgs, pid, NULL); if (!port) { if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); erts_ddll_dereference_driver(driver->handle); } return ERTS_INVALID_ERL_DRV_PORT; } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } kill_port(port); erts_port_release(port); @@ -853,23 +810,20 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ erts_add_link(&ERTS_P_LINKS(port), LINK_PID, pid); erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); -#ifdef ERTS_SMP if (!driver_lock) { ErtsXPortsList *xplp = xports_list_alloc(); xplp->port = port; xplp->next = creator_port->xports; creator_port->xports = xplp; } -#endif port->drv_data = (UWord) drv_data; return ERTS_Port2ErlDrvPort(port); } -#ifdef ERTS_SMP int erts_port_handle_xports(Port *prt) { int reds = 0; @@ -898,7 +852,6 @@ int erts_port_handle_xports(Port *prt) prt->xports = NULL; return reds; } -#endif typedef enum { ERTS_TRY_IMM_DRV_CALL_OK, @@ -945,12 +898,12 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) invalid_sched_flags |= ERTS_PTS_FLG_PARALLELISM; if (sp->pre_chk_sched_flags) { - sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sp->sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sp->sched_flags & invalid_sched_flags) return ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; } - if (erts_smp_port_trylock(prt) == EBUSY) + if (erts_port_trylock(prt) == EBUSY) return ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK; invalid_state = sp->state; @@ -964,7 +917,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) if (prof_runnable_ports) erts_port_task_sched_lock(&prt->sched); - act = erts_smp_atomic32_read_nob(&prt->sched.flags); + act = erts_atomic32_read_nob(&prt->sched.flags); do { erts_aint32_t new; @@ -976,7 +929,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) } exp = act; new = act | ERTS_PTS_FLG_EXEC_IMM; - act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp); } while (act != exp); sp->sched_flags = act; @@ -998,14 +951,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) profile_runnable_proc(c_p, am_inactive); reds_left_in = ERTS_BIF_REDS_LEFT(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); } ASSERT(0 <= reds_left_in && reds_left_in <= CONTEXT_REDS); sp->reds_left_in = reds_left_in; prt->reds = CONTEXT_REDS - reds_left_in; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) @@ -1043,9 +996,9 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) if (prof_runnable_ports) erts_port_task_sched_lock(&prt->sched); - act = erts_smp_atomic32_read_band_mb(&prt->sched.flags, + act = erts_atomic32_read_band_mb(&prt->sched.flags, ~ERTS_PTS_FLG_EXEC_IMM); - ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM); + ERTS_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM); if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) @@ -1060,7 +1013,7 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) erts_port_release(prt); if (c_p) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (reds != (CONTEXT_REDS - sp->reds_left_in)) { int bump_reds = reds - (CONTEXT_REDS - sp->reds_left_in); @@ -1171,7 +1124,7 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg, Port* prt) prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } @@ -1189,7 +1142,7 @@ erts_schedule_proc2port_signal(Process *c_p, int sched_res; if (!refp) { if (c_p) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); } else { ASSERT(c_p); @@ -1210,20 +1163,20 @@ erts_schedule_proc2port_signal(Process *c_p, * otherwise, next receive will *not* work * as expected! */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) { /* need to exit caller instead */ - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); KILL_CATCHES(c_p); c_p->freason = EXC_EXIT; return ERTS_PORT_OP_CALLER_EXIT; } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; - erts_smp_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE + erts_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_MAIN)); } @@ -1239,7 +1192,7 @@ erts_schedule_proc2port_signal(Process *c_p, task_flags); if (c_p) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (sched_res != 0) { if (refp) { @@ -1250,9 +1203,9 @@ erts_schedule_proc2port_signal(Process *c_p, * containing the reference created above... */ ASSERT(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); JOIN_MESSAGE(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); *refp = NIL; } return ERTS_PORT_OP_DROPPED; @@ -1285,14 +1238,14 @@ send_badsig(Port *prt) { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; Process* rp; Eterm connected = ERTS_PORT_GET_CONNECTED(prt); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ERTS_LC_ASSERT(erts_get_scheduler_id()); ASSERT(is_internal_pid(connected)); rp = erts_proc_lookup_raw(connected); if (rp) { - erts_smp_proc_lock(rp, rp_locks); + erts_proc_lock(rp, rp_locks); if (!ERTS_PROC_IS_EXITING(rp)) (void) erts_send_exit_signal(NULL, prt->common.id, @@ -1303,7 +1256,7 @@ send_badsig(Port *prt) { NULL, 0); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } /* exit sent */ } /* send_badsig */ @@ -1426,7 +1379,7 @@ call_driver_outputv(int bang_op, ErlDrvSizeT size = evp->size; ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); @@ -1483,7 +1436,7 @@ port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s case ERTS_PROC2PORT_SIG_EXEC: /* Execution of a scheduled outputv() call */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) reply = am_badarg; @@ -1539,7 +1492,7 @@ call_driver_output(int bang_op, else { ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); #ifdef USE_VM_PROBES @@ -1590,7 +1543,7 @@ port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si case ERTS_PROC2PORT_SIG_EXEC: /* Execution of a scheduled output() call */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) reply = am_badarg; @@ -1805,7 +1758,7 @@ erts_port_output(Process *c_p, * Assumes caller have checked that port is valid... */ - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT)) return ((sched_flags & ERTS_PTS_FLG_EXIT) ? ERTS_PORT_OP_DROPPED @@ -2188,7 +2141,7 @@ erts_port_output(Process *c_p, } if (!(flags & ERTS_PORT_SIG_FLG_FORCE)) { - sched_flags = erts_smp_atomic32_read_acqb(&prt->sched.flags); + sched_flags = erts_atomic32_read_acqb(&prt->sched.flags); if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)) { if (async_nosuspend) erts_port_task_tmp_handle_detach(ns_pthp); @@ -2413,9 +2366,9 @@ set_port_connected(int bang_op, Process *rp = erts_proc_lookup_raw(connect); if (!rp) return ERTS_PORT_OP_DROPPED; - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); return ERTS_PORT_OP_DROPPED; } @@ -2427,7 +2380,7 @@ set_port_connected(int bang_op, ERTS_PORT_SET_CONNECTED(prt, connect); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (IS_TRACED_FL(prt, F_TRACE_PORTS)) trace_port(prt, am_getting_linked, connect); @@ -2620,7 +2573,7 @@ port_link_failure(Eterm port_id, Eterm linker) trace_proc(NULL, 0, rp, am_getting_unlinked, port_id); } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } } @@ -2706,7 +2659,7 @@ port_monitor_failure(Eterm port_id, Eterm origin, Eterm ref_DOWN) * caller has never seen it yet. */ erts_queue_monitor_message(origin_p, &p_locks, ref_DOWN, am_port, port_id, am_noproc); - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } /* Origin wants to monitor port Prt. State contains possible error, which has @@ -2734,7 +2687,7 @@ port_monitor(Port *prt, erts_aint32_t state, Eterm origin, erts_add_monitor(&ERTS_P_MONITORS(prt), MON_TARGET, ref, origin, name_or_nil); - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } else { failure: port_monitor_failure(prt->common.id, origin, ref); @@ -2825,7 +2778,7 @@ port_demonitor_failure(Eterm port_id, Eterm origin, Eterm ref) erts_destroy_monitor(mon1); } - erts_smp_proc_unlock(origin_p, rp_locks); + erts_proc_unlock(origin_p, rp_locks); } /* Origin wants to demonitor port Prt. State contains possible error, which has @@ -2855,7 +2808,7 @@ port_demonitor(Port *port, erts_aint32_t state, Eterm origin, Eterm ref) } } if (origin_p) { /* when origin is dying, it won't be found */ - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } } else { port_demonitor_failure(port->common.id, origin, ref); @@ -2942,10 +2895,10 @@ init_ack_send_reply(Port *port, Eterm resp) if (!is_internal_port(resp)) { Process *rp = erts_proc_lookup_raw(port->async_open_port->to); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); erts_remove_link(&ERTS_P_LINKS(port), port->async_open_port->to); erts_remove_link(&ERTS_P_LINKS(rp), port->common.id); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } port_sched_op_reply(port->async_open_port->to, port->async_open_port->ref, @@ -3009,9 +2962,9 @@ void erts_init_io(int port_tab_size, { ErlDrvEntry** dp; UWord common_element_size; - erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + drv_list_rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + drv_list_rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; erts_atomic64_init_nob(&bytes_in, 0); erts_atomic64_init_nob(&bytes_out, 0); @@ -3019,11 +2972,9 @@ void erts_init_io(int port_tab_size, common_element_size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port)); common_element_size += ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ)); common_element_size += 10; /* name */ -#ifdef ERTS_SMP common_element_size += sizeof(erts_mtx_t); init_xports_list_alloc(); -#endif pdl_init(); @@ -3038,12 +2989,12 @@ void erts_init_io(int port_tab_size, else if (port_tab_size < ERTS_MIN_PORTS) port_tab_size = ERTS_MIN_PORTS; - erts_smp_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL, + erts_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); driver_list = NULL; - erts_smp_tsd_key_create(&driver_list_lock_status_key, + erts_tsd_key_create(&driver_list_lock_status_key, "erts_driver_list_lock_status_key"); - erts_smp_tsd_key_create(&driver_list_last_error_key, + erts_tsd_key_create(&driver_list_last_error_key, "erts_driver_list_last_error_key"); erts_ptab_init_table(&erts_port, @@ -3058,8 +3009,8 @@ void erts_init_io(int port_tab_size, sys_init_io(); - erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1); - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, (void *) 1); + erts_rwmtx_rwlock(&erts_driver_list_lock); init_driver(&fd_driver, &fd_driver_entry, NULL); init_driver(&vanilla_driver, &vanilla_driver_entry, NULL); @@ -3071,11 +3022,11 @@ void erts_init_io(int port_tab_size, for (dp = driver_tab; *dp != NULL; dp++) erts_add_driver_entry(*dp, NULL, 1); - erts_smp_tsd_set(driver_list_lock_status_key, NULL); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, NULL); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } -#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_COUNT) static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable) { if (dp->lock) { @@ -3164,7 +3115,8 @@ void erts_lcnt_update_port_locks(int enable) { } } -#endif /* defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) */ +#endif /* defined(ERTS_ENABLE_LOCK_COUNT) */ + /* * Buffering of data when using line oriented I/O on ports */ @@ -3343,12 +3295,10 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res) ErtsProcLocks rp_locks = 0; int scheduler = erts_get_scheduler_id() != 0; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ASSERT(!prt || prt->common.id == sender); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - ASSERT(!prt || erts_lc_is_port_locked(prt)); -#endif + ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); ASSERT(is_internal_port(sender) && is_internal_pid(pid)); @@ -3376,7 +3326,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res) erts_queue_message(rp, rp_locks, mp, tuple, sender); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); @@ -3407,8 +3357,8 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, int scheduler = erts_get_scheduler_id() != 0; int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; need = 3 + 3 + 2*hlen; @@ -3475,7 +3425,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -3510,7 +3460,7 @@ static void flush_linebuf_messages(Port *prt, erts_aint32_t state) LineBufContext lc; int ret; - ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); if (!prt) return; @@ -3554,8 +3504,8 @@ deliver_vec_message(Port* prt, /* Port */ erts_aint32_t state; int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; /* * Check arguments for validity. @@ -3646,7 +3596,7 @@ deliver_vec_message(Port* prt, /* Port */ ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -3681,8 +3631,8 @@ static void flush_port(Port *p) { int fpe_was_unmasked; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (p->drv_ptr->flush != NULL) { ERTS_MSACC_PUSH_STATE_M(); @@ -3714,11 +3664,9 @@ static void flush_port(Port *p) if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_out, am_flush); } -#ifdef ERTS_SMP if (p->xports) erts_port_handle_xports(p); ASSERT(!p->xports); -#endif } if ((erts_atomic32_read_nob(&p->state) & ERTS_PORT_SFLGS_DEAD) == 0 && is_port_ioq_empty(p)) { @@ -3736,8 +3684,8 @@ terminate_port(Port *prt) erts_aint32_t state; ErtsPrtSD *psd; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(!ERTS_P_LINKS(prt)); ASSERT(!ERTS_P_MONITORS(prt)); @@ -3779,11 +3727,9 @@ terminate_port(Port *prt) (*drv->stop)((ErlDrvData)prt->drv_data); erts_unblock_fpe(fpe_was_unmasked); ERTS_MSACC_POP_STATE_M(); -#ifdef ERTS_SMP if (prt->xports) erts_port_handle_xports(prt); ASSERT(!prt->xports); -#endif } if (is_internal_port(send_closed_port_id) @@ -3791,9 +3737,9 @@ terminate_port(Port *prt) trace_port_send(prt, connected_id, am_closed, 1); if(drv->handle != NULL) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(drv->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } stopq(prt); /* clear queue memory */ if(prt->linebuf != NULL){ @@ -3803,7 +3749,7 @@ terminate_port(Port *prt) erts_cleanup_port_data(prt); - psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (psd) erts_free(ERTS_ALC_T_PRTSD, psd); @@ -3816,7 +3762,7 @@ terminate_port(Port *prt) * port has been removed from the port table (in kill_port()). */ if ((state & ERTS_PORT_SFLG_HALT) - && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) { + && (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0)) { erts_port_release(prt); /* We will exit and never return */ erts_flush_async_exit(erts_halt_code, ""); } @@ -3844,7 +3790,7 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc) goto done; } rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon == NULL) { goto done; } @@ -3918,7 +3864,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) 0); if (xres >= 0) { if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } /* We didn't exit the process and it is traced */ @@ -3928,7 +3874,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) erts_destroy_link(rlnk); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } erts_destroy_link(lnk); @@ -3963,7 +3909,7 @@ port_fire_one_monitor(ErtsMonitor *mon, void *ctx0) UnUseTmpHeapNoproc(3); rmon = erts_remove_monitor(&ERTS_P_MONITORS(origin), mon->ref); - erts_smp_proc_unlock(origin, origin_locks); + erts_proc_unlock(origin, origin_locks); if (rmon) { erts_destroy_monitor(rmon); @@ -3989,8 +3935,8 @@ erts_deliver_port_exit(Port *prt, Eterm from, Eterm reason, int send_closed, Eterm modified_reason; erts_aint32_t state, set_state_flags; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); modified_reason = (reason == am_kill) ? am_killed : reason; @@ -4351,7 +4297,7 @@ port_sig_control(Port *prt, prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); goto done; } } @@ -4404,7 +4350,7 @@ erts_port_control(Process* c_p, int copy; ErtsProc2PortSigData *sigdp; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & ERTS_PTS_FLG_EXIT) return ERTS_PORT_OP_BADARG; @@ -4716,11 +4662,11 @@ port_sig_call(Port *prt, prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); goto done; } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } } @@ -4754,7 +4700,7 @@ erts_port_call(Process* c_p, erts_aint32_t sched_flags; ErtsProc2PortSigData *sigdp; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & ERTS_PTS_FLG_EXIT) { return ERTS_PORT_OP_BADARG; } @@ -4972,7 +4918,7 @@ port_sig_info(Port *prt, prt); } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } return ERTS_PORT_REDS_INFO; } @@ -5041,7 +4987,7 @@ typedef struct { Uint sched_id; Eterm pid; Uint32 refn[ERTS_REF_NUMBERS]; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsIOBytesReq; static void @@ -5091,10 +5037,10 @@ reply_io_bytes(void *vreq) if (req->sched_id == sched_id) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } - if (erts_smp_atomic32_dec_read_nob(&req->refc) == 0) + if (erts_atomic32_dec_read_nob(&req->refc) == 0) erts_free(ERTS_ALC_T_IOB_REQ, req); } @@ -5117,16 +5063,14 @@ erts_request_io_bytes(Process *c_p) req->refn[0] = refn[0]; req->refn[1] = refn[1]; req->refn[2] = refn[2]; - erts_smp_atomic32_init_nob(&req->refc, + erts_atomic32_init_nob(&req->refc, (erts_aint32_t) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_io_bytes, (void *) req); -#endif reply_io_bytes((void *) req); @@ -5211,14 +5155,14 @@ set_busy_port(ErlDrvPort dprt, int on) DTRACE_CHARBUF(port_str, 16); #endif - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; prt = erts_drvport2port(dprt); if (prt == ERTS_INVALID_ERL_DRV_PORT) return; if (on) { - flags = erts_smp_atomic32_read_bor_acqb(&prt->sched.flags, + flags = erts_atomic32_read_bor_acqb(&prt->sched.flags, ERTS_PTS_FLG_BUSY_PORT); if (flags & ERTS_PTS_FLG_BUSY_PORT) return; /* Already busy */ @@ -5234,7 +5178,7 @@ set_busy_port(ErlDrvPort dprt, int on) } #endif } else { - flags = erts_smp_atomic32_read_band_acqb(&prt->sched.flags, + flags = erts_atomic32_read_band_acqb(&prt->sched.flags, ~ERTS_PTS_FLG_BUSY_PORT); if (!(flags & ERTS_PTS_FLG_BUSY_PORT)) return; /* Already non-busy */ @@ -5328,7 +5272,7 @@ int get_port_flags(ErlDrvPort ix) if (prt == ERTS_INVALID_ERL_DRV_PORT) return 0; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); flags = 0; if (state & ERTS_PORT_SFLG_BINARY_IO) @@ -5344,8 +5288,8 @@ void erts_raw_port_command(Port* p, byte* buf, Uint len) int fpe_was_unmasked; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (len > (Uint) INT_MAX) erts_exit(ERTS_ABORT_EXIT, @@ -5374,10 +5318,10 @@ int async_ready(Port *p, void* data) { int need_free = 1; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (p) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (p->drv_ptr->ready_async != NULL) { ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); #ifdef USE_VM_PROBES @@ -5562,8 +5506,8 @@ void driver_report_exit(ErlDrvPort ix, int status) if (prt == ERTS_INVALID_ERL_DRV_PORT) return; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); pid = ERTS_PORT_GET_CONNECTED(prt); ASSERT(is_internal_pid(pid)); @@ -5586,7 +5530,7 @@ void driver_report_exit(ErlDrvPort ix, int status) ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -6226,7 +6170,7 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len) } if (rp) { if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -6241,9 +6185,7 @@ static ERTS_INLINE int deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p, Port **trace_prt) { -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay(); -#endif erts_aint32_t state; int res = 1; Port *prt = erts_port_lookup_raw((Eterm) port_id); @@ -6261,24 +6203,20 @@ deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p, goto done; } if (connected_p) { -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) ETHR_MEMBAR(ETHR_LoadLoad); -#endif *connected_p = ERTS_PORT_GET_CONNECTED(prt); } done: -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) { - ERTS_SMP_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt)); erts_thr_progress_unmanaged_continue(dhndl); ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); } else -#endif if (res == 1) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); *trace_prt = prt; } return res; @@ -6306,13 +6244,13 @@ driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len) erts_aint32_t state; Port* prt; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; /* NOTE! It *not* safe to access 'drvport' from unmanaged threads. */ prt = erts_drvport2port_state(drvport, &state); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; /* invalid (dead) */ - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6349,16 +6287,14 @@ driver_send_term(ErlDrvPort drvport, * internal data representation for ErlDrvPort. */ Port* prt = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; -#ifdef ERTS_SMP + ERTS_CHK_NO_PROC_LOCKS; if (erts_thr_progress_is_managed_thread()) -#endif { erts_aint32_t state; prt = erts_drvport2port_state(drvport, &state); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; /* invalid (dead) */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; } @@ -6378,11 +6314,11 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, Port* prt = erts_drvport2port_state(ix, &state); ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6392,6 +6328,7 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, else erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len)); if (state & ERTS_PORT_SFLG_DISTRIBUTION) { + erts_atomic64_inc_nob(&prt->dist_entry->in); return erts_net_message(prt, prt->dist_entry, (byte*) hbuf, hlen, @@ -6417,12 +6354,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, Port* prt = erts_drvport2port_state(ix, &state); ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6432,6 +6369,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, else erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len)); if (state & ERTS_PORT_SFLG_DISTRIBUTION) { + erts_atomic64_inc_nob(&prt->dist_entry->in); if (len == 0) return erts_net_message(prt, prt->dist_entry, @@ -6456,7 +6394,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, int driver_output(ErlDrvPort ix, char* buf, ErlDrvSizeT len) { - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return driver_output2(ix, NULL, 0, buf, len); } @@ -6472,7 +6410,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, erts_aint32_t state; ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ASSERT(vec->size >= skip); if (vec->size <= skip) @@ -6483,7 +6421,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6697,7 +6635,6 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl) erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl); } -#ifdef ERTS_SMP static void driver_monitor_lock_pdl(Port *p) { if (p->port_data_lock) { @@ -6706,7 +6643,7 @@ static void driver_monitor_lock_pdl(Port *p) { /* Now we either have the port lock or the port_data_lock */ ERTS_LC_ASSERT(!p->port_data_lock || erts_lc_mtx_is_locked(&(p->port_data_lock->mtx))); - ERTS_SMP_LC_ASSERT(p->port_data_lock + ERTS_LC_ASSERT(p->port_data_lock || erts_lc_is_port_locked(p)); } @@ -6714,14 +6651,13 @@ static void driver_monitor_unlock_pdl(Port *p) { /* We should either have the port lock or the port_data_lock */ ERTS_LC_ASSERT(!p->port_data_lock || erts_lc_mtx_is_locked(&(p->port_data_lock->mtx))); - ERTS_SMP_LC_ASSERT(p->port_data_lock + ERTS_LC_ASSERT(p->port_data_lock || erts_lc_is_port_locked(p)); if (p->port_data_lock) { driver_pdl_unlock(p->port_data_lock); } } -#endif /* * exported driver_pdl_* functions ... @@ -6924,7 +6860,7 @@ int driver_set_timer(ErlDrvPort ix, unsigned long t) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; @@ -6941,7 +6877,7 @@ int driver_cancel_timer(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); erts_cancel_port_timer(prt); return 0; } @@ -6952,11 +6888,11 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t) Port* prt = erts_drvport2port(ix); Sint64 left; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); left = erts_read_port_timer(prt); if (left < 0) @@ -6971,7 +6907,7 @@ int driver_get_now(ErlDrvNowData *now_data) { Uint mega,secs,micro; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (now_data == NULL) { return -1; @@ -7045,7 +6981,7 @@ static int do_driver_monitor_process(Port *prt, erts_add_monitor(&ERTS_P_MONITORS(prt), MON_ORIGIN, ref, rp->common.id, NIL); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); erts_ref_to_driver_monitor(ref,monitor); return 0; } @@ -7059,7 +6995,7 @@ int driver_monitor_process(ErlDrvPort drvport, { Port *prt; int ret; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsSchedulerData *sched = erts_get_scheduler_data(); #endif @@ -7069,7 +7005,7 @@ int driver_monitor_process(ErlDrvPort drvport, /* Now (in SMP) we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_monitor_process(prt,process,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7104,7 +7040,7 @@ static int do_driver_demonitor_process(Port *prt, const ErlDrvMonitor *monitor) if (rp) { ErtsMonitor *rmon; rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon != NULL) { erts_destroy_monitor(rmon); } @@ -7117,7 +7053,7 @@ int driver_demonitor_process(ErlDrvPort drvport, { Port *prt; int ret; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsSchedulerData *sched = erts_get_scheduler_data(); #endif @@ -7127,7 +7063,7 @@ int driver_demonitor_process(ErlDrvPort drvport, /* Now we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_demonitor_process(prt,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7158,7 +7094,7 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport, { Port *prt; ErlDrvTermData ret; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsSchedulerData *sched = erts_get_scheduler_data(); #endif @@ -7168,7 +7104,7 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport, /* Now we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_get_monitored_process(prt,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7189,7 +7125,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref) int fpe_was_unmasked; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(prt->drv_ptr != NULL); DRV_MONITOR_LOCK_PDL(prt); if (erts_lookup_monitor(ERTS_P_MONITORS(prt), ref) == NULL) { @@ -7236,11 +7172,11 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof) erts_aint32_t state; Port* prt = erts_drvport2port_state(ix, &state); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (prt->async_open_port) init_ack_send_reply(prt, prt->common.id); @@ -7275,7 +7211,7 @@ int driver_exit(ErlDrvPort ix, int err) ErtsLink *lnk, *rlnk = NULL; Eterm connected; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; @@ -7288,10 +7224,8 @@ int driver_exit(ErlDrvPort ix, int err) lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected); -#ifdef ERTS_SMP if (rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); -#endif + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rlnk != NULL) { erts_destroy_link(rlnk); @@ -7345,7 +7279,7 @@ ErlDrvTermData driver_mk_atom(char* string) sys_strlen(string), ERTS_ATOM_ENC_LATIN1, 1); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return (ErlDrvTermData) am; } @@ -7354,27 +7288,27 @@ ErlDrvTermData driver_mk_port(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return (ErlDrvTermData) NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return (ErlDrvTermData) prt->common.id; } ErlDrvTermData driver_connected(ErlDrvPort ix) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return ERTS_PORT_GET_CONNECTED(prt); } ErlDrvTermData driver_caller(ErlDrvPort ix) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return prt->caller; } @@ -7383,20 +7317,20 @@ int driver_lock_driver(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); DE_Handle* dh; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); return -1; } erts_ddll_lock_driver(dh, prt->drv_ptr->name); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); return 0; } @@ -7404,9 +7338,9 @@ int driver_lock_driver(ErlDrvPort ix) static int maybe_lock_driver_list(void) { void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); if (rec_lock == 0) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); return 1; } return 0; @@ -7414,7 +7348,7 @@ static int maybe_lock_driver_list(void) static void maybe_unlock_driver_list(int doit) { if (doit) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } } /* @@ -7437,7 +7371,7 @@ void *driver_dl_open(char * path) { void *ptr; int res; - int *last_error_p = erts_smp_tsd_get(driver_list_last_error_key); + int *last_error_p = erts_tsd_get(driver_list_last_error_key); int locked = maybe_lock_driver_list(); if ((res = erts_sys_ddll_open(path, &ptr, NULL)) == 0) { maybe_unlock_driver_list(locked); @@ -7445,7 +7379,7 @@ void *driver_dl_open(char * path) } else { if (!last_error_p) { last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int)); - erts_smp_tsd_set(driver_list_last_error_key,last_error_p); + erts_tsd_set(driver_list_last_error_key,last_error_p); } *last_error_p = res; maybe_unlock_driver_list(locked); @@ -7457,7 +7391,7 @@ void *driver_dl_sym(void * handle, char *func_name) { void *ptr; int res; - int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key); + int *last_error_p = erts_tsd_get(driver_list_lock_status_key); int locked = maybe_lock_driver_list(); if ((res = erts_sys_ddll_sym(handle, func_name, &ptr)) == 0) { maybe_unlock_driver_list(locked); @@ -7465,7 +7399,7 @@ void *driver_dl_sym(void * handle, char *func_name) } else { if (!last_error_p) { last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int)); - erts_smp_tsd_set(driver_list_lock_status_key,last_error_p); + erts_tsd_set(driver_list_lock_status_key,last_error_p); } *last_error_p = res; maybe_unlock_driver_list(locked); @@ -7485,7 +7419,7 @@ int driver_dl_close(void *handle) char *driver_dl_error(void) { char *res; - int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key); + int *last_error_p = erts_tsd_get(driver_list_lock_status_key); int locked = maybe_lock_driver_list(); res = erts_ddll_error((last_error_p != NULL) ? (*last_error_p) : ERL_DE_ERROR_UNSPECIFIED); maybe_unlock_driver_list(locked); @@ -7523,20 +7457,8 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size) sip->driver_minor_version = ERL_DRV_EXTENDED_MINOR_VERSION; sip->erts_version = ERLANG_VERSION; sip->otp_release = ERLANG_OTP_RELEASE; - sip->thread_support = -#ifdef USE_THREADS - 1 -#else - 0 -#endif - ; - sip->smp_support = -#ifdef ERTS_SMP - 1 -#else - 0 -#endif - ; + sip->thread_support = 1; + sip->smp_support = 1; } @@ -7562,11 +7484,7 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size) */ if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) { sip->dirty_scheduler_support = -#ifdef ERTS_DIRTY_SCHEDULERS 1 -#else - 0 -#endif ; } @@ -7646,7 +7564,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) drv->version.minor = de->minor_version; drv->flags = de->driver_flags; drv->handle = handle; -#ifdef ERTS_SMP if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) { drv->lock = NULL; } else { @@ -7658,7 +7575,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO); } -#endif drv->entry = de; drv->start = de->start; @@ -7701,12 +7617,10 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) void erts_destroy_driver(erts_driver_t *drv) { -#ifdef ERTS_SMP if (drv->lock) { - erts_smp_mtx_destroy(drv->lock); + erts_mtx_destroy(drv->lock); erts_free(ERTS_ALC_T_DRIVER_LOCK, drv->lock); } -#endif erts_free(ERTS_ALC_T_DRIVER, drv); } @@ -7717,7 +7631,7 @@ erts_destroy_driver(erts_driver_t *drv) void add_driver_entry(ErlDrvEntry *drv){ void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); /* * Ignore result of erts_add_driver_entry, the init is not * allowed to fail when drivers are added by drivers. @@ -7731,7 +7645,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo int res; if (!driver_list_locked) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); } dp->next = driver_list; @@ -7742,7 +7656,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo driver_list = dp; if (!driver_list_locked) { - erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1); + erts_tsd_set(driver_list_lock_status_key, (void *) 1); } res = init_driver(dp, de, handle); @@ -7759,8 +7673,8 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo } if (!driver_list_locked) { - erts_smp_tsd_set(driver_list_lock_status_key, NULL); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, NULL); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return res; } @@ -7771,9 +7685,9 @@ int remove_driver_entry(ErlDrvEntry *drv) erts_driver_t *dp; void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); if (rec_lock == NULL) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); } dp = driver_list; while (dp && dp->entry != drv) @@ -7781,7 +7695,7 @@ int remove_driver_entry(ErlDrvEntry *drv) if (dp) { if (dp->handle) { if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return -1; } @@ -7795,12 +7709,12 @@ int remove_driver_entry(ErlDrvEntry *drv) } erts_destroy_driver(dp); if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return 1; } if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return 0; } diff --git a/erts/emulator/beam/macros.tab b/erts/emulator/beam/macros.tab new file mode 100644 index 0000000000..6f9b78af6f --- /dev/null +++ b/erts/emulator/beam/macros.tab @@ -0,0 +1,165 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +// +// Use if there is a garbage collection before storing to a +// general destination (either X or Y register). +// + +REFRESH_GEN_DEST() { + dst_ptr = REG_TARGET_PTR(dst); +} + +SET_I_REL(Offset) { + ASSERT(VALID_INSTR(*(I + ($Offset)))); + I += $Offset; +} + +SET_CP_I_ABS(Target) { + c_p->i = $Target; + ASSERT(VALID_INSTR(*c_p->i)); +} + +SET_REL_I(Dst, Offset) { + $Dst = I + ($Offset); + ASSERT(VALID_INSTR(*$Dst)); +} + +FAIL(Fail) { + //| -no_prefetch + $SET_I_REL($Fail); + Goto(*I); +} + +JUMP(Fail) { + //| -no_next + $SET_I_REL($Fail); + Goto(*I); +} + +GC_TEST(Ns, Nh, Live) { + Uint need = $Nh + $Ns; + if (ERTS_UNLIKELY(E - HTOP < need)) { + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + } + HEAP_SPACE_VERIFIED($Nh); +} + +GC_TEST_PRESERVE(NeedHeap, Live, PreserveTerm) { + Uint need = $NeedHeap; + if (ERTS_UNLIKELY(E - HTOP < need)) { + SWAPOUT; + reg[$Live] = $PreserveTerm; + PROCESS_MAIN_CHK_LOCKS(c_p); + FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live+1, FCALLS); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + $PreserveTerm = reg[$Live]; + SWAPIN; + } + HEAP_SPACE_VERIFIED($NeedHeap); +} + + +// Make sure that there are NeedStack + NeedHeap + 1 words available +// on the combined heap/stack segment, then allocates NeedHeap + 1 +// words on the stack and saves CP. +AH(NeedStack, NeedHeap, Live) { + unsigned needed = $NeedStack + 1; + $GC_TEST(needed, $NeedHeap, $Live); + E -= needed; + *E = make_cp(c_p->cp); + c_p->cp = 0; +} + +NEXT0() { + //| -no_next + SET_I((BeamInstr *) $NEXT_INSTRUCTION); + Goto(*I); +} + +NEXT(Addr) { + //| -no_next + SET_I((BeamInstr *) $Addr); + Goto(*I); +} + +FAIL_BODY() { + //| -no_prefetch + goto find_func_info; +} + +FAIL_HEAD_OR_BODY(Fail) { + //| -no_prefetch + + /* + * In a correctly working program, we expect failures in + * guards to be more likely than failures in bodies. + */ + + if (ERTS_LIKELY($Fail)) { + $FAIL($Fail); + } + goto find_func_info; +} + +BADARG(Fail) { + c_p->freason = BADARG; + $FAIL_HEAD_OR_BODY($Fail); +} + +BADARITH0() { + c_p->freason = BADARITH; + goto find_func_info; +} + +SYSTEM_LIMIT(Fail) { + c_p->freason = SYSTEM_LIMIT; + $FAIL_HEAD_OR_BODY($Fail); +} + +BIF_ERROR_ARITY_1(Fail, BIF, Op1) { + //| -no_prefetch + if (ERTS_LIKELY($Fail)) { + $FAIL($Fail); + } + reg[0] = $Op1; + SWAPOUT; + I = handle_error(c_p, I, reg, &bif_export[$BIF]->info.mfa); + goto post_error_handling; +} + +BIF_ERROR_ARITY_2(Fail, BIF, Op1, Op2) { + //| -no_prefetch + if (ERTS_LIKELY($Fail)) { + $FAIL($Fail); + } + reg[0] = $Op1; + reg[1] = $Op2; + SWAPOUT; + I = handle_error(c_p, I, reg, &bif_export[$BIF]->info.mfa); + goto post_error_handling; +} diff --git a/erts/emulator/beam/map_instrs.tab b/erts/emulator/beam/map_instrs.tab new file mode 100644 index 0000000000..bbb2f49b66 --- /dev/null +++ b/erts/emulator/beam/map_instrs.tab @@ -0,0 +1,159 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +ensure_map(Map) { + if (is_not_map($Map)) { + c_p->freason = BADMAP; + c_p->fvalue = $Map; + $FAIL_BODY(); + } +} + +new_map(Dst, Live, N) { + Eterm res; + + HEAVY_SWAPOUT; + res = new_map(c_p, reg, $Live, $N, $NEXT_INSTRUCTION); + HEAVY_SWAPIN; + $REFRESH_GEN_DEST(); + $Dst = res; + $NEXT($NEXT_INSTRUCTION+$N); +} + +i_new_small_map_lit(Dst, Live, Keys) { + Eterm res; + Uint n; + Eterm keys = $Keys; + + HEAVY_SWAPOUT; + res = new_small_map_lit(c_p, reg, keys, $Live, $NEXT_INSTRUCTION); + HEAVY_SWAPIN; + $REFRESH_GEN_DEST(); + $Dst = res; + n = arityval(*tuple_val(keys)); + $NEXT($NEXT_INSTRUCTION+n); +} + +i_get_map_element(Fail, Src, Key, Dst) { + Eterm res = get_map_element($Src, $Key); + if (is_non_value(res)) { + $FAIL($Fail); + } + $Dst = res; +} + +i_get_map_element_hash(Fail, Src, Key, Hx, Dst) { + Eterm res = get_map_element_hash($Src, $Key, $Hx); + if (is_non_value(res)) { + $FAIL($Fail); + } + $Dst = res; +} + +i_get_map_elements(Fail, Src, N) { + Eterm map; + BeamInstr *fs; + Uint sz, n; + + map = $Src; + + /* This instruction assumes Arg1 is a map, + * i.e. that it follows a test is_map if needed. + */ + + n = (Uint)$N / 3; + fs = $NEXT_INSTRUCTION; + + if (is_flatmap(map)) { + flatmap_t *mp; + Eterm *ks; + Eterm *vs; + + mp = (flatmap_t *)flatmap_val(map); + sz = flatmap_get_size(mp); + + if (sz == 0) { + $FAIL($Fail); + } + + ks = flatmap_get_keys(mp); + vs = flatmap_get_values(mp); + + while(sz) { + if (EQ((Eterm) fs[0], *ks)) { + PUT_TERM_REG(*vs, fs[1]); + n--; + fs += 3; + /* no more values to fetch, we are done */ + if (n == 0) { + $NEXT(fs); + } + } + ks++, sz--, vs++; + } + $FAIL($Fail); + } else { + const Eterm *v; + Uint32 hx; + ASSERT(is_hashmap(map)); + while(n--) { + hx = fs[2]; + ASSERT(hx == hashmap_make_hash((Eterm)fs[0])); + if ((v = erts_hashmap_get(hx, (Eterm)fs[0], map)) == NULL) { + $FAIL($Fail); + } + PUT_TERM_REG(*v, fs[1]); + fs += 3; + } + $NEXT(fs); + } +} + +update_map_assoc(Src, Dst, Live, N) { + Eterm res; + Uint live = $Live; + + reg[live] = $Src; + HEAVY_SWAPOUT; + res = update_map_assoc(c_p, reg, live, $N, $NEXT_INSTRUCTION); + HEAVY_SWAPIN; + ASSERT(is_value(res)); + $REFRESH_GEN_DEST(); + $Dst = res; + $NEXT($NEXT_INSTRUCTION+$N); +} + +update_map_exact(Fail, Src, Dst, Live, N) { + Eterm res; + Uint live = $Live; + + reg[live] = $Src; + HEAVY_SWAPOUT; + res = update_map_exact(c_p, reg, live, $N, $NEXT_INSTRUCTION); + HEAVY_SWAPIN; + if (is_value(res)) { + $REFRESH_GEN_DEST(); + $Dst = res; + $NEXT($NEXT_INSTRUCTION+$N); + } else { + $FAIL_HEAD_OR_BODY($Fail); + } +} diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c index 7987cb2eb5..baeec115ea 100644 --- a/erts/emulator/beam/module.c +++ b/erts/emulator/beam/module.c @@ -39,9 +39,9 @@ static IndexTable module_tables[ERTS_NUM_CODE_IX]; -erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; +erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; -static erts_smp_atomic_t tot_module_bytes; +static erts_atomic_t tot_module_bytes; /* SMP note: Active module table lookup and current module instance can be * read without any locks. Old module instances are protected by @@ -49,8 +49,6 @@ static erts_smp_atomic_t tot_module_bytes; * Staging table is protected by the "code_ix lock". */ -#include "erl_smp.h" - void module_info(fmtfn_t to, void *to_arg) { index_info(to, to_arg, &module_tables[erts_active_code_ix()]); @@ -84,7 +82,7 @@ void erts_module_instance_init(struct erl_module_instance* modi) static Module* module_alloc(Module* tmpl) { Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module)); - erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module)); + erts_atomic_add_nob(&tot_module_bytes, sizeof(Module)); obj->module = tmpl->module; obj->slot.index = -1; @@ -98,7 +96,7 @@ static Module* module_alloc(Module* tmpl) static void module_free(Module* mod) { erts_free(ERTS_ALC_T_MODULE, mod); - erts_smp_atomic_add_nob(&tot_module_bytes, -sizeof(Module)); + erts_atomic_add_nob(&tot_module_bytes, -sizeof(Module)); } void init_module_table(void) @@ -120,10 +118,10 @@ void init_module_table(void) } for (i=0; i<ERTS_NUM_CODE_IX; i++) { - erts_smp_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i), + erts_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); } - erts_smp_atomic_init_nob(&tot_module_bytes, 0); + erts_atomic_init_nob(&tot_module_bytes, 0); } @@ -159,14 +157,14 @@ static Module* put_module(Eterm mod, IndexTable* mod_tab) oldsz = index_table_sz(mod_tab); res = (Module*) index_put_entry(mod_tab, (void*) &e); newsz = index_table_sz(mod_tab); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); return res; } Module* erts_put_module(Eterm mod) { - ERTS_SMP_LC_ASSERT(erts_initialized == 0 + ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission()); return put_module(mod, &module_tables[erts_staging_code_ix()]); @@ -184,7 +182,7 @@ int module_code_size(ErtsCodeIndex code_ix) int module_table_sz(void) { - return erts_smp_atomic_read_nob(&tot_module_bytes); + return erts_atomic_read_nob(&tot_module_bytes); } #ifdef DEBUG @@ -233,7 +231,7 @@ void module_start_staging(void) copy_module(dst_mod, src_mod); } newsz = index_table_sz(dst); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); entries_at_start_staging = dst->entries; IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix()); @@ -251,7 +249,7 @@ void module_end_staging(int commit) oldsz = index_table_sz(tab); index_erase_latest_from(tab, entries_at_start_staging); newsz = index_table_sz(tab); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); } IF_DEBUG(dbg_load_code_ix = -1); diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h index 9d258d5dbf..9a81e6035b 100644 --- a/erts/emulator/beam/module.h +++ b/erts/emulator/beam/module.h @@ -72,29 +72,29 @@ int erts_is_old_code_rlocked(ErtsCodeIndex); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -extern erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; +extern erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_runlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_runlock(&the_old_code_rwlocks[code_ix]); } #ifdef ERTS_ENABLE_LOCK_CHECK ERTS_GLB_INLINE int erts_is_old_code_rlocked(ErtsCodeIndex code_ix) { - return erts_smp_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]); + return erts_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]); } #endif diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab new file mode 100644 index 0000000000..8055a8616f --- /dev/null +++ b/erts/emulator/beam/msg_instrs.tab @@ -0,0 +1,390 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +// /* +// * Skeleton for receive statement: +// * +// * recv_mark L1 Optional +// * call make_ref/monitor Optional +// * ... +// * recv_set L1 Optional +// * L1: <-------------------+ +// * <-----------+ | +// * | | +// * loop_rec L2 ------+---+ | +// * ... | | | +// * remove_message | | | +// * jump L3 | | | +// * ... | | | +// * loop_rec_end L1 --+ | | +// * L2: <---------------+ | +// * wait L1 -------------------+ or wait_timeout +// * timeout +// * +// * L3: Code after receive... +// * +// */ + +recv_mark(Dest) { + /* + * Save the current position in message buffer and the + * the label for the loop_rec/2 instruction for the + * the receive statement. + */ + $SET_REL_I(c_p->msg.mark, $Dest); + c_p->msg.saved_last = c_p->msg.last; +} + +i_recv_set() { + /* + * If the mark is valid (points to the loop_rec/2 + * instruction that follows), we know that the saved + * position points to the first message that could + * possibly be matched out. + * + * If the mark is invalid, we do nothing, meaning that + * we will look through all messages in the message queue. + */ + if (c_p->msg.mark == (BeamInstr *) ($NEXT_INSTRUCTION)) { + c_p->msg.save = c_p->msg.saved_last; + } + SET_I($NEXT_INSTRUCTION); + goto loop_rec_top__; + //| -no_next +} + +i_loop_rec(Dest) { + //| -no_prefetch + + /* + * Pick up the next message and place it in x(0). + * If no message, jump to a wait or wait_timeout instruction. + */ + + ErtsMessage* msgp; + + /* Entry point from recv_set */ + loop_rec_top__: + ; + + /* + * We need to disable GC while matching messages + * in the queue. This since messages with data outside + * the heap will be corrupted by a GC. + */ + ASSERT(!(c_p->flags & F_DELAY_GC)); + c_p->flags |= F_DELAY_GC; + + /* Entry point from loop_rec_end */ + loop_rec__: + + PROCESS_MAIN_CHK_LOCKS(c_p); + + msgp = PEEK_MESSAGE(c_p); + + if (!msgp) { + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + /* Make sure messages wont pass exit signals... */ + if (ERTS_PROC_PENDING_EXIT(c_p)) { + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + SWAPOUT; + c_p->flags &= ~F_DELAY_GC; + c_p->arity = 0; + goto do_schedule; /* Will be rescheduled for exit */ + } + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); + msgp = PEEK_MESSAGE(c_p); + if (msgp) { + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + } else { + c_p->flags &= ~F_DELAY_GC; + $SET_I_REL($Dest); + Goto(*I); /* Jump to a wait or wait_timeout instruction */ + } + } + if (is_non_value(ERL_MESSAGE_TERM(msgp))) { + SWAPOUT; /* erts_decode_dist_message() may write to heap... */ + if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) { + /* + * A corrupt distribution message that we weren't able to decode; + * remove it... + */ + /* No swapin should be needed */ + ASSERT(HTOP == c_p->htop && E == c_p->stop); + /* TODO: Add DTrace probe for this bad message situation? */ + UNLINK_MESSAGE(c_p, msgp); + msgp->next = NULL; + erts_cleanup_messages(msgp); + goto loop_rec__; + } + SWAPIN; + } + r(0) = ERL_MESSAGE_TERM(msgp); +} + +remove_message() { + //| -no_prefetch + + /* + * Remove a (matched) message from the message queue. + */ + + ErtsMessage* msgp; + PROCESS_MAIN_CHK_LOCKS(c_p); + + ERTS_CHK_MBUF_SZ(c_p); + + msgp = PEEK_MESSAGE(c_p); + + if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { + save_calls(c_p, &exp_receive); + } + if (ERL_MESSAGE_TOKEN(msgp) == NIL) { +#ifdef USE_VM_PROBES + if (DT_UTAG(c_p) != NIL) { + if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) { + SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag; + } else { + DT_UTAG(c_p) = NIL; + SEQ_TRACE_TOKEN(c_p) = NIL; + } + } else { +#endif + SEQ_TRACE_TOKEN(c_p) = NIL; +#ifdef USE_VM_PROBES + } + DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING; +#endif + } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) { + Eterm msg; + SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp); +#ifdef USE_VM_PROBES + if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) { + if (DT_UTAG(c_p) == NIL) { + DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp); + } + DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING; + } else { +#endif + ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p))); + ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5); + ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p))); + ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p))); + ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p))); + ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p))); + c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p)); + if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) { + c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p)); + } + msg = ERL_MESSAGE_TERM(msgp); + seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE, + c_p->common.id, c_p); +#ifdef USE_VM_PROBES + } +#endif + } +#ifdef USE_VM_PROBES + if (DTRACE_ENABLED(message_receive)) { + Eterm token2 = NIL; + DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); + Sint tok_label = 0; + Sint tok_lastcnt = 0; + Sint tok_serial = 0; + + dtrace_proc_str(c_p, receiver_name); + token2 = SEQ_TRACE_TOKEN(c_p); + if (have_seqtrace(token2)) { + tok_label = signed_val(SEQ_TRACE_T_LABEL(token2)); + tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2)); + tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2)); + } + DTRACE6(message_receive, + receiver_name, size_object(ERL_MESSAGE_TERM(msgp)), + c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial); + } +#endif + UNLINK_MESSAGE(c_p, msgp); + JOIN_MESSAGE(c_p); + CANCEL_TIMER(c_p); + + erts_save_message_in_proc(c_p, msgp); + c_p->flags &= ~F_DELAY_GC; + + if (ERTS_IS_GC_DESIRED_INTERNAL(c_p, HTOP, E)) { + /* + * We want to GC soon but we leave a few + * reductions giving the message some time + * to turn into garbage. + */ + ERTS_VBUMP_LEAVE_REDS_INTERNAL(c_p, 5, FCALLS); + } + + ERTS_DBG_CHK_REDS(c_p, FCALLS); + ERTS_CHK_MBUF_SZ(c_p); + + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); +} + +loop_rec_end(Dest) { + //| -no_next + /* + * Advance the save pointer to the next message (the current + * message didn't match), then jump to the loop_rec instruction. + */ + + ASSERT(c_p->flags & F_DELAY_GC); + + $SET_I_REL($Dest); + SAVE_MESSAGE(c_p); + if (FCALLS > 0 || FCALLS > neg_o_reds) { + FCALLS--; + goto loop_rec__; + } + + c_p->flags &= ~F_DELAY_GC; + $SET_CP_I_ABS(I); + SWAPOUT; + c_p->arity = 0; + c_p->current = NULL; + goto do_schedule; +} + +timeout_locked() { + /* + * A timeout has occurred. Reset the save pointer so that the next + * receive statement will examine the first message first. + */ + + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + $timeout(); +} + +timeout() { + if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) { + trace_receive(c_p, am_clock_service, am_timeout, NULL); + } + if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { + save_calls(c_p, &exp_timeout); + } + c_p->flags &= ~F_TIMO; + JOIN_MESSAGE(c_p); +} + +TIMEOUT_VALUE() { + c_p->freason = EXC_TIMEOUT_VALUE; + goto find_func_info; + //| -no_next +} + +i_wait_error_locked() { + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + $TIMEOUT_VALUE(); +} + +i_wait_error() { + $TIMEOUT_VALUE(); +} + +wait_timeout_unlocked_int := wait.lock.int.execute; +wait_timeout_locked_int := wait.int.execute; + +wait_timeout_unlocked := wait.lock.src.execute; +wait_timeout_locked := wait.src.execute; + +wait_unlocked := wait.lock.execute; +wait_locked := wait.unlocked.execute; + +wait.lock() { + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); +} + +wait.unlocked() { +} + +wait.int(Int) { + /* + * If we have already set the timer, we must NOT set it again. Therefore, + * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag. + */ + if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) { + BeamInstr** pi = (BeamInstr **) c_p->def_arg_reg; + *pi = $NEXT_INSTRUCTION; + erts_set_proc_timer_uword(c_p, $Int); + } +} + +wait.src(Src) { + /* + * If we have already set the timer, we must NOT set it again. Therefore, + * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag. + */ + if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) { + Eterm timeout_value = $Src; + if (timeout_value == make_small(0)) { + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + $NEXT0(); + } else if (timeout_value == am_infinity) { + c_p->flags |= F_TIMO; + } else { + int tres = erts_set_proc_timer_term(c_p, timeout_value); + if (tres == 0) { + /* + * The timer routiner will set c_p->i to the value in + * c_p->def_arg_reg[0]. Note that it is safe to use this + * location because there are no living x registers in + * a receive statement. + */ + BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg; + *pi = $NEXT_INSTRUCTION; + } else { /* Wrong time */ + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + c_p->freason = EXC_TIMEOUT_VALUE; + goto find_func_info; + } + } + } +} + +// +// Prepare to wait indefinitely for a new message to arrive +// (or the time set above if falling through from above). +// When a new message arrives, control will be transferred +// the loop_rec instruction (at label L1). In case of +// of timeout, control will be transferred to the timeout +// instruction following the wait_timeout instruction. +// + +wait.execute(JumpTarget) { + $SET_REL_I(c_p->i, $JumpTarget); /* L1 */ + SWAPOUT; + c_p->arity = 0; + + if (!ERTS_PTMR_IS_TIMED_OUT(c_p)) { + erts_atomic32_read_band_relb(&c_p->state, + ~ERTS_PSFLG_ACTIVE); + } + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + c_p->current = NULL; + goto do_schedule; + //| -no_next +} diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index 44613c7d85..87ff92d354 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -59,6 +59,7 @@ put_tuple u==0 d => too_old_compiler # All the other instructions. # +%cold label L i_func_info I a a I int_code_end @@ -68,6 +69,8 @@ i_debug_breakpoint i_return_time_trace i_return_to_trace i_yield +trace_jump W +%hot return @@ -96,22 +99,19 @@ line Loc | func_info M F A => func_info M F A | line Loc line I - -%macro: allocate Allocate -pack -%macro: allocate_zero AllocateZero -pack -%macro: allocate_heap AllocateHeap -pack -%macro: allocate_heap_zero AllocateHeapZero -pack -%macro: test_heap TestHeap -pack - allocate t t allocate_heap t I t -deallocate I + +%cold +deallocate Q +%hot + init y allocate_zero t t allocate_heap_zero t I t trim N Remaining => i_trim N -i_trim I +i_trim t test_heap I t @@ -122,8 +122,6 @@ init2 y y init3 y y y init Y1 | init Y2 | init Y3 => init3 Y1 Y2 Y3 init Y1 | init Y2 => init2 Y1 Y2 -%macro: init2 Init2 -pack -%macro: init3 Init3 -pack # Selecting values @@ -160,28 +158,20 @@ is_tuple Fail=f S | select_tuple_arity S=d Fail=f Size=u Rest=* => \ select_tuple_arity S=d Fail=f Size=u Rest=* => \ gen_select_tuple_arity(S, Fail, Size, Rest) -i_select_val_bins x f I -i_select_val_bins y f I +i_select_val_bins xy f I -i_select_val_lins x f I -i_select_val_lins y f I +i_select_val_lins xy f I -i_select_val2 x f c c f f -i_select_val2 y f c c f f +i_select_val2 xy f c c -i_select_tuple_arity x f I -i_select_tuple_arity y f I +i_select_tuple_arity xy f I -i_select_tuple_arity2 x f A A f f -i_select_tuple_arity2 y f A A f f +i_select_tuple_arity2 xy f A A -i_jump_on_val_zero x f I -i_jump_on_val_zero y f I +i_jump_on_val_zero xy f I -i_jump_on_val x f I I -i_jump_on_val y f I I +i_jump_on_val xy f I W -%macro: get_list GetList -pack get_list xy xy xy # The following get_list instructions using x(0) are frequently used. @@ -201,31 +191,27 @@ try Y F => catch Y F try_case Y => try_end Y try_end y +%cold try_case_end s +%hot # Destructive set tuple element -set_tuple_element s d P +set_tuple_element s S P # Get tuple element -%macro: i_get_tuple_element GetTupleElement -pack i_get_tuple_element xy P x %cold i_get_tuple_element xy P y %hot -%macro: i_get_tuple_element2 GetTupleElement2 -pack i_get_tuple_element2 x P x - -%macro: i_get_tuple_element2y GetTupleElement2Y -pack i_get_tuple_element2y x P y y -%macro: i_get_tuple_element3 GetTupleElement3 -pack i_get_tuple_element3 x P x -%macro: is_number IsNumber -fail_action %cold is_number f x is_number f y @@ -236,6 +222,11 @@ is_number Fail Literal=q => move Literal x | is_number Fail x jump f +# +# Expection rasing instructions. Infrequently executed. +# + +%cold case_end NotInX=cy => move NotInX x | case_end x badmatch NotInX=cy => move NotInX x | badmatch x @@ -257,9 +248,14 @@ i_raise badarg j system_limit j +%hot + +# +# Move instructions. +# + move C=cxy x==0 | jump Lbl => move_jump Lbl C -%macro: move_jump MoveJump -nonext move_jump f ncxy # Movement to and from the stack is common @@ -283,10 +279,6 @@ move_window X1=x X2=x X3=x X4=x Y1=y Y4=y | move X5=x Y5=y | succ(Y4,Y5) => \ move_window X1=x X2=x X3=x Y1=y Y3=y => move_window3 X1 X2 X3 Y1 move_window X1=x X2=x X3=x X4=x Y1=y Y4=y => move_window4 X1 X2 X3 X4 Y1 -%macro: move_window3 MoveWindow3 -pack -%macro: move_window4 MoveWindow4 -pack -%macro: move_window5 MoveWindow5 -pack - move_window3 x x x y move_window4 x x x x y move_window5 x x x x x y @@ -311,10 +303,8 @@ swap_temp R1 R2 Tmp | line Loc | call_ext_only Live Addr | \ swap_temp R1 R2 Tmp | line Loc | call_ext_last Live Addr D | \ is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_last Live Addr D -%macro: swap_temp SwapTemp -pack swap_temp x xy x -%macro: swap Swap -pack swap x xy move Src=x D1=x | move Src=x D2=x => move_dup Src D1 D2 @@ -358,17 +348,13 @@ move C=aiq X=x==2 => move_x2 C move_x1 c move_x2 c -%macro: move_shift MoveShift -pack move_shift x x x move_shift y x x move_shift x y x move_shift x x y -%macro: move_dup MoveDup -pack move_dup xy x xy -%macro: move2_par Move2Par -pack - move2_par x y x y move2_par y x y x move2_par x x x x @@ -380,7 +366,6 @@ move2_par y x x y move2_par x x y x move2_par y x x x -%macro: move3 Move3 -pack move3 x y x y x y move3 y x y x y x move3 x x x x x x @@ -390,7 +375,6 @@ move3 x x x x x x move S=n D=y => init D move S=c D=y => move S x | move x D -%macro:move Move -pack move x x move x y move y x @@ -410,13 +394,15 @@ move r y loop_rec Fail x==0 | smp_mark_target_label(Fail) => i_loop_rec Fail -label L | wait_timeout Fail Src | smp_already_locked(L) => label L | i_wait_timeout_locked Fail Src -wait_timeout Fail Src => i_wait_timeout Fail Src -i_wait_timeout Fail Src=aiq => gen_literal_timeout(Fail, Src) -i_wait_timeout_locked Fail Src=aiq => gen_literal_timeout_locked(Fail, Src) +label L | wait_timeout Fail Src | smp_already_locked(L) => \ + label L | wait_timeout_locked Src Fail +wait_timeout Fail Src => wait_timeout_unlocked Src Fail + +wait_timeout_unlocked Src=aiq Fail => gen_literal_timeout(Fail, Src) +wait_timeout_locked Src=aiq Fail => gen_literal_timeout_locked(Fail, Src) label L | wait Fail | smp_already_locked(L) => label L | wait_locked Fail -wait Fail | smp() => wait_unlocked Fail +wait Fail => wait_unlocked Fail label L | timeout | smp_already_locked(L) => label L | timeout_locked @@ -425,15 +411,19 @@ timeout timeout_locked i_loop_rec f loop_rec_end f -wait f wait_locked f wait_unlocked f -i_wait_timeout f I -i_wait_timeout f s -i_wait_timeout_locked f I -i_wait_timeout_locked f s + +# Note that a timeout value must fit in 32 bits. +wait_timeout_unlocked_int I f +wait_timeout_unlocked s f +wait_timeout_locked_int I f +wait_timeout_locked s f + +%cold i_wait_error i_wait_error_locked +%hot send @@ -441,33 +431,35 @@ send # Optimized comparisons with one immediate/literal operand. # -is_eq_exact Lbl R=xy C=ian => i_is_eq_exact_immed Lbl R C +is_eq_exact Lbl S S => +is_eq_exact Lbl C1=c C2=c => move C1 x | is_eq_exact Lbl x C2 +is_eq_exact Lbl C=c R=xy => is_eq_exact Lbl R C + +is_eq_exact Lbl R=xy n => is_nil Lbl R +is_eq_exact Lbl R=xy C=ia => i_is_eq_exact_immed Lbl R C is_eq_exact Lbl R=xy C=q => i_is_eq_exact_literal Lbl R C +is_ne_exact Lbl S S => jump Lbl +is_ne_exact Lbl C1=c C2=c => move C1 x | is_ne_exact Lbl x C2 +is_ne_exact Lbl C=c R=xy => is_ne_exact Lbl R C + is_ne_exact Lbl R=xy C=ian => i_is_ne_exact_immed Lbl R C is_ne_exact Lbl R=xy C=q => i_is_ne_exact_literal Lbl R C -%macro: i_is_eq_exact_immed EqualImmed -fail_action -i_is_eq_exact_immed f r c -i_is_eq_exact_immed f x c -i_is_eq_exact_immed f y c +i_is_eq_exact_immed f rxy c -i_is_eq_exact_literal f x c -i_is_eq_exact_literal f y c +i_is_eq_exact_literal f xy c -%macro: i_is_ne_exact_immed NotEqualImmed -fail_action -i_is_ne_exact_immed f x c -i_is_ne_exact_immed f y c +i_is_ne_exact_immed f xy c -i_is_ne_exact_literal f x c -i_is_ne_exact_literal f y c +i_is_ne_exact_literal f xy c is_eq_exact Lbl Y=y X=x => is_eq_exact Lbl X Y -%macro: is_eq_exact EqualExact -fail_action -pack is_eq_exact f x xy -is_eq_exact f s s +is_eq_exact f y y + +is_ne_exact f S S -%macro: is_lt IsLessThan -fail_action is_lt f x x is_lt f x c is_lt f c x @@ -475,7 +467,6 @@ is_lt f c x is_lt f s s %hot -%macro: is_ge IsGreaterEqual -fail_action is_ge f x x is_ge f x c is_ge f c x @@ -483,13 +474,8 @@ is_ge f c x is_ge f s s %hot -%macro: is_ne_exact NotEqualExact -fail_action -is_ne_exact f s s - -%macro: is_eq Equal -fail_action is_eq f s s -%macro: is_ne NotEqual -fail_action is_ne f s s # @@ -507,9 +493,7 @@ i_put_tuple Dst Arity Puts=* | put S => \ i_put_tuple/2 -%macro:i_put_tuple PutTuple -pack -goto:do_put_tuple -i_put_tuple x I -i_put_tuple y I +i_put_tuple xy I # # The instruction "put_list Const [] Dst" were generated in rare @@ -518,8 +502,6 @@ i_put_tuple y I # put_list Const=c n Dst => move Const x | put_list x n Dst -%macro:put_list PutList -pack - put_list x n x put_list y n x put_list x x x @@ -560,6 +542,7 @@ put_list s s d # Some more only used by the emulator # +%cold normal_exit continue_exit apply_bif @@ -567,6 +550,7 @@ call_nif call_error_handler error_action_code return_trace +%hot # # Instruction transformations & folded instructions. @@ -577,27 +561,18 @@ return_trace move S x==0 | return => move_return S -%macro: move_return MoveReturn -nonext -move_return x -move_return c -move_return n +move_return xcn move S x==0 | deallocate D | return => move_deallocate_return S D -%macro: move_deallocate_return MoveDeallocateReturn -pack -nonext -move_deallocate_return x Q -move_deallocate_return y Q -move_deallocate_return c Q -move_deallocate_return n Q +move_deallocate_return xycn Q deallocate D | return => deallocate_return D -%macro: deallocate_return DeallocateReturn -nonext deallocate_return Q test_heap Need u==1 | put_list Y=y x==0 x==0 => test_heap_1_put_list Need Y -%macro: test_heap_1_put_list TestHeapPutList -pack test_heap_1_put_list I y # @@ -608,8 +583,6 @@ is_tagged_tuple Fail Literal=q Arity Atom => \ move Literal x | is_tagged_tuple Fail x Arity Atom is_tagged_tuple Fail=f c Arity Atom => jump Fail -%macro:is_tagged_tuple IsTaggedTuple -fail_action - is_tagged_tuple f rxy A a # Test tuple & arity (head) @@ -618,17 +591,13 @@ is_tuple Fail Literal=q => move Literal x | is_tuple Fail x is_tuple Fail=f c => jump Fail is_tuple Fail=f S=xy | test_arity Fail=f S=xy Arity => is_tuple_of_arity Fail S Arity -%macro:is_tuple_of_arity IsTupleOfArity -fail_action - is_tuple_of_arity f rxy A -%macro: is_tuple IsTuple -fail_action is_tuple f rxy test_arity Fail Literal=q Arity => move Literal x | test_arity Fail x Arity test_arity Fail=f c Arity => jump Fail -%macro: test_arity IsArity -fail_action test_arity f xy A get_tuple_element Reg=x P1 D1=x | get_tuple_element Reg=x P2 D2=x | \ @@ -650,16 +619,13 @@ is_integer Fail Literal=q => move Literal x | is_integer Fail x is_integer Fail=f S=x | allocate Need Regs => is_integer_allocate Fail S Need Regs -%macro: is_integer_allocate IsIntegerAllocate -fail_action -is_integer_allocate f x I I +is_integer_allocate f x t t -%macro: is_integer IsInteger -fail_action is_integer f xy is_list Fail=f n => is_list Fail Literal=q => move Literal x | is_list Fail x is_list Fail=f c => jump Fail -%macro: is_list IsList -fail_action is_list f x %cold is_list f y @@ -667,24 +633,16 @@ is_list f y is_nonempty_list Fail=f S=x | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs -%macro:is_nonempty_list_allocate IsNonemptyListAllocate -fail_action -pack -is_nonempty_list_allocate f rx I t - -is_nonempty_list F=f x==0 | test_heap I1 I2 => is_non_empty_list_test_heap F I1 I2 - -%macro: is_non_empty_list_test_heap IsNonemptyListTestHeap -fail_action -pack -is_non_empty_list_test_heap f I t +is_nonempty_list F=f x==0 | test_heap I1 I2 => is_nonempty_list_test_heap F I1 I2 is_nonempty_list Fail=f S=x | get_list S D1=x D2=x => \ is_nonempty_list_get_list Fail S D1 D2 -%macro: is_nonempty_list_get_list IsNonemptyListGetList -fail_action -pack +is_nonempty_list_allocate f rx t t +is_nonempty_list_test_heap f I t is_nonempty_list_get_list f rx x x - -%macro: is_nonempty_list IsNonemptyList -fail_action is_nonempty_list f xy -%macro: is_atom IsAtom -fail_action is_atom f x %cold is_atom f y @@ -692,7 +650,6 @@ is_atom f y is_atom Fail=f a => is_atom Fail=f niq => jump Fail -%macro: is_float IsFloat -fail_action is_float f x %cold is_float f y @@ -703,12 +660,10 @@ is_float Fail Literal=q => move Literal x | is_float Fail x is_nil Fail=f n => is_nil Fail=f qia => jump Fail -%macro: is_nil IsNil -fail_action is_nil f xy is_binary Fail Literal=q => move Literal x | is_binary Fail x is_binary Fail=f c => jump Fail -%macro: is_binary IsBinary -fail_action is_binary f x %cold is_binary f y @@ -719,28 +674,24 @@ is_bitstr Fail Term => is_bitstring Fail Term is_bitstring Fail Literal=q => move Literal x | is_bitstring Fail x is_bitstring Fail=f c => jump Fail -%macro: is_bitstring IsBitstring -fail_action is_bitstring f x %cold is_bitstring f y %hot is_reference Fail=f cq => jump Fail -%macro: is_reference IsRef -fail_action is_reference f x %cold is_reference f y %hot is_pid Fail=f cq => jump Fail -%macro: is_pid IsPid -fail_action is_pid f x %cold is_pid f y %hot is_port Fail=f cq => jump Fail -%macro: is_port IsPort -fail_action is_port f x %cold is_port f y @@ -751,22 +702,19 @@ is_boolean Fail=f a==am_false => is_boolean Fail=f ac => jump Fail %cold -%macro: is_boolean IsBoolean -fail_action is_boolean f xy %hot is_function2 Fail=f acq Arity => jump Fail is_function2 Fail=f Fun a => jump Fail -is_function2 f s s -%macro: is_function2 IsFunction2 -fail_action +is_function2 f S s # Allocating & initializing. allocate Need Regs | init Y => allocate_init Need Regs Y init Y1 | init Y2 => init2 Y1 Y2 -%macro: allocate_init AllocateInit -pack -allocate_init t I y +allocate_init t t y ################################################################# # External function and bif calls. @@ -1020,9 +968,11 @@ i_apply_fun i_apply_fun_last P i_apply_fun_only +%cold i_hibernate i_perf_counter +%hot call_bif e @@ -1045,19 +995,18 @@ bif2 Fail Bif S1 S2 Dst => i_bif2 Fail Bif S1 S2 Dst i_get_hash c I d i_get s d -%macro: self Self self xy -%macro: node Node node x %cold node y %hot -i_fast_element j x I d -i_fast_element j y I d +# Note: 'I' is sufficient because this instruction will only be used +# if the arity fits in 24 bits. +i_fast_element xy j I d -i_element j xy s d +i_element xy j s d bif1 f b s d bif1_body b s d @@ -1068,50 +1017,35 @@ i_bif2_body b s s d # Internal calls. # -move S=c x==0 | call Ar P=f => i_move_call S P -move S=s x==0 | call Ar P=f => move_call S P - -i_move_call c f +move S=cxy x==0 | call Ar P=f => move_call S P -%macro:move_call MoveCall -arg_f -size -nonext move_call/2 +move_call cxy f -move_call xy f - -move S=c x==0 | call_last Ar P=f D => i_move_call_last P D S move S x==0 | call_last Ar P=f D => move_call_last S P D -i_move_call_last f P c - -%macro:move_call_last MoveCallLast -arg_f -nonext -pack - move_call_last/3 -move_call_last xy f Q +move_call_last cxy f Q -move S=c x==0 | call_only Ar P=f => i_move_call_only P S -move S=x x==0 | call_only Ar P=f => move_call_only S P +move S=cx x==0 | call_only Ar P=f => move_call_only S P -i_move_call_only f c - -%macro:move_call_only MoveCallOnly -arg_f -nonext move_call_only/2 - -move_call_only x f +move_call_only cx f call Ar Func => i_call Func call_last Ar Func D => i_call_last Func D call_only Ar Func => i_call_only Func i_call f -i_call_last f P +i_call_last f Q i_call_only f i_call_ext e -i_call_ext_last e P +i_call_ext_last e Q i_call_ext_only e i_move_call_ext c e -i_move_call_ext_last e P c +i_move_call_ext_last e Q c i_move_call_ext_only e c # Fun calls. @@ -1119,17 +1053,15 @@ i_move_call_ext_only e c call_fun Arity | deallocate D | return => i_call_fun_last Arity D call_fun Arity => i_call_fun Arity -i_call_fun I -i_call_fun_last I P +i_call_fun t +i_call_fun_last t Q make_fun2 OldIndex=u => gen_make_fun2(OldIndex) -%macro: i_make_fun MakeFun -pack %cold -i_make_fun I t +i_make_fun W t %hot -%macro: is_function IsFunction -fail_action is_function f xy is_function Fail=f c => jump Fail @@ -1139,45 +1071,44 @@ func_info M F A => i_func_info u M F A # New bit syntax matching (R11B). # ================================================================ -%cold +%warm bs_start_match2 Fail=f ica X Y D => jump Fail bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D -i_bs_start_match2 xy f I I d +i_bs_start_match2 xy f t t x bs_save2 Reg Index => gen_bs_save(Reg, Index) -i_bs_save2 x I +i_bs_save2 x t bs_restore2 Reg Index => gen_bs_restore(Reg, Index) -i_bs_restore2 x I +i_bs_restore2 x t # Matching integers bs_match_string Fail Ms Bits Val => i_bs_match_string Ms Fail Bits Val -i_bs_match_string x f I I +i_bs_match_string x f W W # Fetching integers from binaries. bs_get_integer2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \ gen_get_integer2(Fail, Ms, Live, Sz, Unit, Flags, Dst) -i_bs_get_integer_small_imm x I f I d -i_bs_get_integer_imm x I I f I d -i_bs_get_integer f I I s s d -i_bs_get_integer_8 x f d -i_bs_get_integer_16 x f d -i_bs_get_integer_32 x f I d +i_bs_get_integer_small_imm x W f t x +i_bs_get_integer_imm x W t f t x +i_bs_get_integer f t t x s x +i_bs_get_integer_8 x f x +i_bs_get_integer_16 x f x + +%if ARCH_64 +i_bs_get_integer_32 x f x +%endif # Fetching binaries from binaries. bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \ gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst) -%macro: i_bs_get_binary_imm2 BsGetBinaryImm_2 -fail_action -%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action -%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action - -i_bs_get_binary_imm2 f x I I I x -i_bs_get_binary2 f x I s I x -i_bs_get_binary_all2 f x I I x -i_bs_get_binary_all_reuse x f I +i_bs_get_binary_imm2 f x t W t x +i_bs_get_binary2 f x t s t x +i_bs_get_binary_all2 f x t t x +i_bs_get_binary_all_reuse x f t # Fetching float from binaries. bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \ @@ -1185,30 +1116,24 @@ bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \ bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail -%macro: i_bs_get_float2 BsGetFloat2 -fail_action -i_bs_get_float2 f x I s I x +i_bs_get_float2 f x t s t x # Miscellanous bs_skip_bits2 Fail=f Ms=x Sz=sq Unit=u Flags=u => \ gen_skip_bits2(Fail, Ms, Sz, Unit, Flags) -%macro: i_bs_skip_bits_imm2 BsSkipBitsImm2 -fail_action -i_bs_skip_bits_imm2 f x I - -%macro: i_bs_skip_bits2 BsSkipBits2 -fail_action -i_bs_skip_bits2 f x xy I - -%macro: i_bs_skip_bits_all2 BsSkipBitsAll2 -fail_action -i_bs_skip_bits_all2 f x I +i_bs_skip_bits_imm2 f x W +i_bs_skip_bits2 f x xy t +i_bs_skip_bits_all2 f x t bs_test_tail2 Fail=f Ms=x Bits=u==0 => bs_test_zero_tail2 Fail Ms bs_test_tail2 Fail=f Ms=x Bits=u => bs_test_tail_imm2 Fail Ms Bits bs_test_zero_tail2 f x -bs_test_tail_imm2 f x I +bs_test_tail_imm2 f x W bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms -bs_test_unit f x I +bs_test_unit f x t bs_test_unit8 f x # An y register operand for bs_context_to_binary is rare, @@ -1222,14 +1147,14 @@ bs_context_to_binary x # Utf8/utf16/utf32 support. (R12B-5) # bs_get_utf8 Fail=f Ms=x u u Dst=d => i_bs_get_utf8 Ms Fail Dst -i_bs_get_utf8 x f d +i_bs_get_utf8 x f x bs_skip_utf8 Fail=f Ms=x u u => i_bs_get_utf8 Ms Fail x bs_get_utf16 Fail=f Ms=x u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst bs_skip_utf16 Fail=f Ms=x u Flags=u => i_bs_get_utf16 Ms Fail Flags x -i_bs_get_utf16 x f I d +i_bs_get_utf16 x f t x bs_get_utf32 Fail=f Ms=x Live=u Flags=u Dst=d => \ bs_get_integer2 Fail Ms Live i=32 u=1 Flags Dst | \ @@ -1238,22 +1163,18 @@ bs_skip_utf32 Fail=f Ms=x Live=u Flags=u => \ bs_get_integer2 Fail Ms Live i=32 u=1 Flags x | \ i_bs_validate_unicode_retract Fail x Ms -i_bs_validate_unicode_retract j s s +i_bs_validate_unicode_retract j s S %hot # # Constructing binaries # -%cold +%warm bs_init2 Fail Sz Words Regs Flags Dst | binary_too_big(Sz) => system_limit Fail -bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst | should_gen_heap_bin(Sz) => \ - i_bs_init_heap_bin Sz Regs Dst bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init Sz Regs Dst -bs_init2 Fail Sz=u Words Regs Flags Dst | should_gen_heap_bin(Sz) => \ - i_bs_init_heap_bin_heap Sz Words Regs Dst bs_init2 Fail Sz=u Words Regs Flags Dst => \ i_bs_init_heap Sz Words Regs Dst @@ -1262,15 +1183,13 @@ bs_init2 Fail Sz Words=u==0 Regs Flags Dst => \ bs_init2 Fail Sz Words Regs Flags Dst => \ i_bs_init_fail_heap Sz Words Fail Regs Dst -i_bs_init_fail xy j I d +i_bs_init_fail xy j t x -i_bs_init_fail_heap s I j I d +i_bs_init_fail_heap s I j t x -i_bs_init I I d -i_bs_init_heap_bin I I d +i_bs_init W t x -i_bs_init_heap I I I d -i_bs_init_heap_bin_heap I I I d +i_bs_init_heap W I t x bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail @@ -1283,16 +1202,16 @@ bs_init_bits Fail Sz Words=u==0 Regs Flags Dst => \ bs_init_bits Fail Sz Words Regs Flags Dst => \ i_bs_init_bits_fail_heap Sz Words Fail Regs Dst -i_bs_init_bits_fail xy j I d +i_bs_init_bits_fail xy j t x -i_bs_init_bits_fail_heap s I j I d +i_bs_init_bits_fail_heap s I j t x -i_bs_init_bits I I d -i_bs_init_bits_heap I I I d +i_bs_init_bits W t x +i_bs_init_bits_heap W I t x bs_add Fail S1=i==0 S2 Unit=u==1 D => move S2 D -bs_add j s s I d +bs_add j s s t x bs_append Fail Size Extra Live Unit Bin Flags Dst => \ move Bin x | i_bs_append Fail Extra Live Unit Size Dst @@ -1302,8 +1221,8 @@ bs_private_append Fail Size Unit Bin Flags Dst => \ bs_init_writable -i_bs_append j I I I s d -i_bs_private_append j I s s d +i_bs_append j I t t s x +i_bs_private_append j t s S x # # Storing integers into binaries. @@ -1312,11 +1231,8 @@ i_bs_private_append j I s s d bs_put_integer Fail=j Sz=sq Unit=u Flags=u Src=s => \ gen_put_integer(Fail, Sz, Unit, Flags, Src) -%macro: i_new_bs_put_integer NewBsPutInteger -%macro: i_new_bs_put_integer_imm NewBsPutIntegerImm - -i_new_bs_put_integer j s I s -i_new_bs_put_integer_imm j I I s +i_new_bs_put_integer j s t s +i_new_bs_put_integer_imm j W t s # # Utf8/utf16/utf32 support. (R12B-5) @@ -1324,17 +1240,17 @@ i_new_bs_put_integer_imm j I I s bs_utf8_size j Src=s Dst=d => i_bs_utf8_size Src Dst -i_bs_utf8_size s d +i_bs_utf8_size s x bs_utf16_size j Src=s Dst=d => i_bs_utf16_size Src Dst -i_bs_utf16_size s d +i_bs_utf16_size s x bs_put_utf8 Fail u Src=s => i_bs_put_utf8 Fail Src i_bs_put_utf8 j s -bs_put_utf16 j I s +bs_put_utf16 j t s bs_put_utf32 Fail=j Flags=u Src=s => \ i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src @@ -1349,11 +1265,8 @@ bs_put_float Fail Sz=q Unit Flags Val => badarg Fail bs_put_float Fail=j Sz=s Unit=u Flags=u Src=s => \ gen_put_float(Fail, Sz, Unit, Flags, Src) -%macro: i_new_bs_put_float NewBsPutFloat -%macro: i_new_bs_put_float_imm NewBsPutFloatImm - -i_new_bs_put_float j s I s -i_new_bs_put_float_imm j I I s +i_new_bs_put_float j s t s +i_new_bs_put_float_imm j W t s # # Storing binaries into binaries. @@ -1362,14 +1275,9 @@ i_new_bs_put_float_imm j I I s bs_put_binary Fail=j Sz=s Unit=u Flags=u Src=s => \ gen_put_binary(Fail, Sz, Unit, Flags, Src) -%macro: i_new_bs_put_binary NewBsPutBinary -i_new_bs_put_binary j s I s - -%macro: i_new_bs_put_binary_imm NewBsPutBinaryImm -i_new_bs_put_binary_imm j I s - -%macro: i_new_bs_put_binary_all NewBsPutBinaryAll -i_new_bs_put_binary_all j s I +i_new_bs_put_binary j s t s +i_new_bs_put_binary_imm j W s +i_new_bs_put_binary_all j s t # # Warning: The i_bs_put_string and i_new_bs_put_string instructions @@ -1377,9 +1285,7 @@ i_new_bs_put_binary_all j s I # Don't change the instruction format unless you change the loader too. # -bs_put_string I I - -%hot +bs_put_string W W # # New floating point instructions (R8). @@ -1393,11 +1299,13 @@ fnegate p FR1 FR2 => i_fnegate FR1 FR2 fconv Arg=iqan Dst=l => move Arg x | fconv x Dst -fmove q l -fmove d l -fmove l d +fmove Arg=l Dst=d => fstore Arg Dst +fmove Arg=dq Dst=l => fload Arg Dst -fconv d l +fstore l d +fload Sq l + +fconv S l i_fadd l l l i_fsub l l l @@ -1407,50 +1315,87 @@ i_fnegate l l fclearerror | no_fpe_signals() => fcheckerror p | no_fpe_signals() => + +%unless NO_FPE_SIGNALS fcheckerror p => i_fcheckerror i_fcheckerror fclearerror +%endif + +%hot # # New apply instructions in R10B. # -apply I -apply_last I P +apply t +apply_last t Q # -# Map instructions in R17. +# Handle compatibility with OTP 17 here. # -sorted_put_map_assoc/5 -put_map_assoc F Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \ - sorted_put_map_assoc F Map Dst Live Size Rest +i_put_map_assoc/4 + +# We KNOW that in OTP 20 (actually OTP 18 and higher), a put_map_assoc instruction +# is always preceded by an is_map test. That means that put_map_assoc can never +# fail and does not need any failure label. + +put_map_assoc Fail Map Dst Live Size Rest=* | compiled_with_otp_20_or_higher() => \ + i_put_map_assoc Map Dst Live Size Rest + +# Translate the put_map_assoc instruction if the module was compiled by a compiler +# before 20. This is only necessary if the OTP 17 compiler was used, but we +# have no safe and relatively easy way to know whether OTP 18/19 was used. + +put_map_assoc Fail=p Map Dst Live Size Rest=* => \ + ensure_map Map | i_put_map_assoc Map Dst Live Size Rest +put_map_assoc Fail=f Map Dst Live Size Rest=* => \ + is_map Fail Map | i_put_map_assoc Map Dst Live Size Rest + +ensure_map Lit=q | literal_is_map(Lit) => +ensure_map Src=cqy => move Src x | ensure_map x + +%cold +ensure_map x +%hot + +# +# Map instructions. First introduced in R17. +# + +sorted_put_map_assoc/4 +i_put_map_assoc Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \ + sorted_put_map_assoc Map Dst Live Size Rest sorted_put_map_exact/5 put_map_exact F Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \ sorted_put_map_exact F Map Dst Live Size Rest -sorted_put_map_assoc j Map Dst Live Size Rest=* | is_empty_map(Map) => \ +sorted_put_map_assoc Map Dst Live Size Rest=* | is_empty_map(Map) => \ new_map Dst Live Size Rest -sorted_put_map_assoc F Src=s Dst Live Size Rest=* => \ - update_map_assoc F Src Dst Live Size Rest -sorted_put_map_assoc F Src Dst Live Size Rest=* => \ - move Src x | update_map_assoc F x Dst Live Size Rest +sorted_put_map_assoc Src=s Dst Live Size Rest=* => \ + update_map_assoc Src Dst Live Size Rest +sorted_put_map_assoc Src Dst Live Size Rest=* => \ + move Src x | update_map_assoc x Dst Live Size Rest sorted_put_map_exact F Src=s Dst Live Size Rest=* => \ update_map_exact F Src Dst Live Size Rest sorted_put_map_exact F Src Dst Live Size Rest=* => \ move Src x | update_map_exact F x Dst Live Size Rest -new_map d I I -update_map_assoc j s d I I -update_map_exact j s d I I +new_map Dst Live Size Rest=* | is_small_map_literal_keys(Size, Rest) => \ + gen_new_small_map_lit(Dst, Live, Size, Rest) + +new_map d t I +i_new_small_map_lit d t q +update_map_assoc s d t I +update_map_exact j s d t I is_map Fail Lit=q | literal_is_map(Lit) => is_map Fail cq => jump Fail -%macro: is_map IsMap -fail_action is_map f xy ## Transform has_map_fields #{ K1 := _, K2 := _ } to has_map_elements @@ -1470,10 +1415,8 @@ i_get_map_elements f s I i_get_map_element Fail Src=xy Key=y Dst => \ move Key x | i_get_map_element Fail Src x Dst -%macro: i_get_map_element_hash GetMapElementHash -fail_action i_get_map_element_hash f xy c I xy -%macro: i_get_map_element GetMapElement -fail_action i_get_map_element f xy x xy # @@ -1509,9 +1452,9 @@ gen_minus p Live Reg=d Int=i Dst | negation_is_small(Int) => \ # GCing arithmetic instructions. # -gen_plus Fail Live S1 S2 Dst => i_plus Fail Live S1 S2 Dst +gen_plus Fail Live S1 S2 Dst => i_plus S1 S2 Fail Live Dst -gen_minus Fail Live S1 S2 Dst => i_minus Fail Live S1 S2 Dst +gen_minus Fail Live S1 S2 Dst => i_minus S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:stimes/2 S1 S2 Dst => \ i_times Fail Live S1 S2 Dst @@ -1522,15 +1465,15 @@ gc_bif2 Fail Live u$bif:erlang:intdiv/2 S1 S2 Dst => \ i_int_div Fail Live S1 S2 Dst gc_bif2 Fail Live u$bif:erlang:rem/2 S1 S2 Dst => \ - i_rem Fail Live S1 S2 Dst + i_rem S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:bsl/2 S1 S2 Dst => \ - i_bsl Fail Live S1 S2 Dst + i_bsl S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:bsr/2 S1 S2 Dst => \ - i_bsr Fail Live S1 S2 Dst + i_bsr S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:band/2 S1 S2 Dst => \ - i_band Fail Live S1 S2 Dst + i_band S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:bor/2 S1 S2 Dst => \ i_bor Fail Live S1 S2 Dst @@ -1540,32 +1483,34 @@ gc_bif2 Fail Live u$bif:erlang:bxor/2 S1 S2 Dst => \ gc_bif1 Fail I u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src I Dst -i_increment rxy I I d +i_increment rxy W t d -i_plus j I x xy d -i_plus j I s s d +i_plus x xy j t d +i_plus s s j t d -i_minus j I x x d -i_minus j I s s d +i_minus x x j t d +i_minus s s j t d -i_times j I s s d +i_times j t s s d -i_m_div j I s s d -i_int_div j I s s d +i_m_div j t s s d +i_int_div j t s s d -i_rem j I x x d -i_rem j I s s d +i_rem x x j t d +i_rem s s j t d -i_bsl j I s s d -i_bsr j I s s d +i_bsl s s j t d +i_bsr s s j t d -i_band j I x c d -i_band j I s s d +i_band x c j t d +i_band s s j t d i_bor j I s s d i_bxor j I s s d -i_int_bnot j s I d +i_int_bnot Fail Src=c Live Dst => move Src x | i_int_bnot Fail x Live Dst + +i_int_bnot j S t d # # Old guard BIFs that creates heap fragments are no longer allowed. @@ -1589,9 +1534,9 @@ gc_bif2 Fail I Bif S1 S2 Dst => \ gc_bif3 Fail I Bif S1 S2 S3 Dst => \ gen_guard_bif3(Fail, I, Bif, S1, S2, S3, Dst) -i_gc_bif1 j I s I d +i_gc_bif1 j W s t d -i_gc_bif2 j I I s s d +i_gc_bif2 j W t s s d ii_gc_bif3/7 @@ -1600,7 +1545,7 @@ ii_gc_bif3/7 ii_gc_bif3 Fail Bif Live S1 S2 S3 Dst => \ move S1 x | i_gc_bif3 Fail Bif Live S2 S3 Dst -i_gc_bif3 j I I s s d +i_gc_bif3 j W t s s d # # The following instruction is specially handled in beam_load.c diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c index bf3267cff1..92a0854ad3 100644 --- a/erts/emulator/beam/register.c +++ b/erts/emulator/beam/register.c @@ -38,16 +38,15 @@ static Hash process_reg; #define REG_HASH(term) ((HashValue) atom_val(term)) -static erts_smp_rwmtx_t regtab_rwmtx; +static erts_rwmtx_t regtab_rwmtx; -#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(®tab_rwmtx) -#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(®tab_rwmtx) -#define reg_read_lock() erts_smp_rwmtx_rlock(®tab_rwmtx) -#define reg_write_lock() erts_smp_rwmtx_rwlock(®tab_rwmtx) -#define reg_read_unlock() erts_smp_rwmtx_runlock(®tab_rwmtx) -#define reg_write_unlock() erts_smp_rwmtx_rwunlock(®tab_rwmtx) +#define reg_try_read_lock() erts_rwmtx_tryrlock(®tab_rwmtx) +#define reg_try_write_lock() erts_rwmtx_tryrwlock(®tab_rwmtx) +#define reg_read_lock() erts_rwmtx_rlock(®tab_rwmtx) +#define reg_write_lock() erts_rwmtx_rwlock(®tab_rwmtx) +#define reg_read_unlock() erts_rwmtx_runlock(®tab_rwmtx) +#define reg_write_unlock() erts_rwmtx_rwunlock(®tab_rwmtx) -#ifdef ERTS_SMP static ERTS_INLINE void reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks) { @@ -64,7 +63,7 @@ reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks) } /* Release process locks in order to avoid deadlock */ - erts_smp_proc_unlock(c_p, *c_p_locks); + erts_proc_unlock(c_p, *c_p_locks); *c_p_locks = 0; } @@ -87,14 +86,13 @@ reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks) } /* Release process locks in order to avoid deadlock */ - erts_smp_proc_unlock(c_p, *c_p_locks); + erts_proc_unlock(c_p, *c_p_locks); *c_p_locks = 0; } reg_write_lock(); } -#endif static ERTS_INLINE int is_proc_alive(Process *p) @@ -141,11 +139,11 @@ static void reg_free(RegProc *obj) void init_register_table(void) { HashFunctions f; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(®tab_rwmtx, &rwmtx_opt, "reg_tab", NIL, + erts_rwmtx_init_opt(®tab_rwmtx, &rwmtx_opt, "reg_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) reg_hash; @@ -175,7 +173,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) Process *proc = NULL; Port *port = NULL; RegProc r, *rp; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); if (is_not_atom(name) || name == am_undefined) return res; @@ -185,7 +183,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) else { if (is_not_internal_pid(id) && is_not_internal_port(id)) return res; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (is_internal_port(id)) { port = erts_id2port(id); if (!port) @@ -193,15 +191,13 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) } } -#ifdef ERTS_SMP { ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0; reg_safe_write_lock(proc, &proc_locks); if (proc && !proc_locks) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } -#endif if (is_internal_pid(id)) { if (!proc) @@ -215,7 +211,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) } else { ASSERT(!INVALID_PORT(port, id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); r.pt = port; if (r.pt->common.u.alive.reg) goto done; @@ -250,8 +246,8 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) erts_port_release(port); if (c_p != proc) { if (proc) - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } return res; } @@ -272,17 +268,15 @@ erts_whereis_name_to_id(Process *c_p, Eterm name) HashValue hval; int ix; HashBucket* b; -#ifdef ERTS_SMP ErtsProcLocks c_p_locks = 0; if (c_p) { c_p_locks = ERTS_PROC_LOCK_MAIN; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); } reg_safe_read_lock(c_p, &c_p_locks); if (c_p && !c_p_locks) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); -#endif + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); hval = REG_HASH(name); ix = hval % process_reg.size; @@ -331,7 +325,6 @@ erts_whereis_name(Process *c_p, HashValue hval; int ix; HashBucket* b; -#ifdef ERTS_SMP ErtsProcLocks current_c_p_locks; Port *pending_port = NULL; @@ -348,7 +341,6 @@ erts_whereis_name(Process *c_p, * - read reg lock * - current_c_p_locks (either c_p_locks or 0) on c_p */ -#endif hval = REG_HASH(name); ix = hval % process_reg.size; @@ -370,7 +362,6 @@ erts_whereis_name(Process *c_p, if (!rp) *proc = NULL; else { -#ifdef ERTS_SMP if (!rp->p) *proc = NULL; else { @@ -387,17 +378,10 @@ erts_whereis_name(Process *c_p, *proc = rp->p; else { if (need_locks) - erts_smp_proc_unlock(rp->p, need_locks); + erts_proc_unlock(rp->p, need_locks); *proc = NULL; } } -#else - if (rp->p - && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p))) - *proc = rp->p; - else - *proc = NULL; -#endif if (*proc && (flags & ERTS_P2P_FLG_INC_REFC)) erts_proc_inc_refc(*proc); } @@ -407,7 +391,6 @@ erts_whereis_name(Process *c_p, if (!rp || !rp->pt) *port = NULL; else { -#ifdef ERTS_SMP if (lock_port) { if (pending_port == rp->pt) pending_port = NULL; @@ -419,11 +402,11 @@ erts_whereis_name(Process *c_p, pending_port = NULL; } - if (erts_smp_port_trylock(rp->pt) == EBUSY) { + if (erts_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_read_unlock(); @@ -431,19 +414,16 @@ erts_whereis_name(Process *c_p, goto restart; } } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(rp->pt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(rp->pt)); } -#endif *port = rp->pt; } } -#ifdef ERTS_SMP if (c_p && !current_c_p_locks) - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); if (pending_port) erts_port_release(pending_port); -#endif reg_read_unlock(); } @@ -476,7 +456,6 @@ int erts_unregister_name(Process *c_p, RegProc r, *rp; Port *port = c_prt; ErtsProcLocks current_c_p_locks = 0; -#ifdef ERTS_SMP /* * SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name', @@ -492,18 +471,15 @@ int erts_unregister_name(Process *c_p, restart: reg_safe_write_lock(c_p, ¤t_c_p_locks); -#endif r.name = name; if (is_non_value(name)) { /* Unregister current process name */ ASSERT(c_p); -#ifdef ERTS_SMP if (current_c_p_locks != c_p_locks) { - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); current_c_p_locks = c_p_locks; } -#endif if (c_p->common.u.alive.reg) { r.name = c_p->common.u.alive.reg->name; } else { @@ -516,36 +492,34 @@ int erts_unregister_name(Process *c_p, if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) { if (rp->pt) { if (port != rp->pt) { -#ifdef ERTS_SMP if (port) { ASSERT(port != c_prt); erts_port_release(port); port = NULL; } - if (erts_smp_port_trylock(rp->pt) == EBUSY) { + if (erts_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_write_unlock(); port = erts_id2port(id); goto restart; } -#endif port = rp->pt; } ASSERT(rp->pt == port); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); rp->pt->common.u.alive.reg = NULL; if (IS_TRACED_FL(port, F_TRACE_PORTS)) { if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } trace_port(port, am_unregister, r.name); @@ -553,7 +527,6 @@ int erts_unregister_name(Process *c_p, } else if (rp->p) { -#ifdef ERTS_SMP erts_proc_safelock(c_p, current_c_p_locks, c_p_locks, @@ -561,17 +534,14 @@ int erts_unregister_name(Process *c_p, (c_p == rp->p) ? current_c_p_locks : 0, ERTS_PROC_LOCK_MAIN); current_c_p_locks = c_p_locks; -#endif rp->p->common.u.alive.reg = NULL; if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) { trace_proc(rp->p, (c_p == rp->p) ? c_p_locks : ERTS_PROC_LOCK_MAIN, rp->p, am_unregister, r.name); } -#ifdef ERTS_SMP if (rp->p != c_p) { - erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN); } -#endif } hash_erase(&process_reg, (void*) &r); res = 1; @@ -585,14 +555,12 @@ int erts_unregister_name(Process *c_p, erts_port_release(port); } if (c_prt) { - erts_smp_port_lock(c_prt); + erts_port_lock(c_prt); } } -#ifdef ERTS_SMP if (c_p && !current_c_p_locks) { - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); } -#endif return res; } @@ -633,14 +601,12 @@ BIF_RETTYPE registered_0(BIF_ALIST_0) Uint need; Eterm* hp; HashBucket **bucket; -#ifdef ERTS_SMP ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P); reg_safe_read_lock(BIF_P, &proc_locks); if (!proc_locks) - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); bucket = process_reg.bucket; diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c index 527c9efeca..ac9ebd4714 100644 --- a/erts/emulator/beam/safe_hash.c +++ b/erts/emulator/beam/safe_hash.c @@ -62,7 +62,7 @@ static ERTS_INLINE int align_up_pow2(int val) */ static void rehash(SafeHash* h, int grow_limit) { - if (erts_smp_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) { + if (erts_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) { return; /* already in progress */ } if (h->grow_limit == grow_limit) { @@ -77,7 +77,7 @@ static void rehash(SafeHash* h, int grow_limit) sys_memzero(new_tab, bytes); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */ - erts_smp_mtx_lock(&h->lock_vec[i].mtx); + erts_mtx_lock(&h->lock_vec[i].mtx); } h->tab = new_tab; @@ -95,12 +95,12 @@ static void rehash(SafeHash* h, int grow_limit) } for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_unlock(&h->lock_vec[i].mtx); + erts_mtx_unlock(&h->lock_vec[i].mtx); } erts_free(h->type, (void *) old_tab); } /*else already done */ - erts_smp_atomic_set_relb(&h->is_rehashing, 0); + erts_atomic_set_relb(&h->is_rehashing, 0); } @@ -115,7 +115,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h) int objects = 0; for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) { - erts_smp_mtx_lock(&h->lock_vec[lock_ix].mtx); + erts_mtx_lock(&h->lock_vec[lock_ix].mtx); size = h->size_mask + 1; for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) { int depth = 0; @@ -128,7 +128,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h) if (depth > max_depth) max_depth = depth; } - erts_smp_mtx_unlock(&h->lock_vec[lock_ix].mtx); + erts_mtx_unlock(&h->lock_vec[lock_ix].mtx); } hi->name = h->name; @@ -145,9 +145,9 @@ int safe_hash_table_sz(SafeHash *h) int i, size; for(i=0; h->name[i]; i++); i++; - erts_smp_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */ + erts_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */ size = h->size_mask + 1; - erts_smp_mtx_unlock(&h->lock_vec[0].mtx); + erts_mtx_unlock(&h->lock_vec[0].mtx); return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i; } @@ -168,10 +168,10 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_ h->name = name; h->fun = fun; set_size(h,size); - erts_smp_atomic_init_nob(&h->is_rehashing, 0); - erts_smp_atomic_init_nob(&h->nitems, 0); + erts_atomic_init_nob(&h->is_rehashing, 0); + erts_atomic_init_nob(&h->nitems, 0); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL, + erts_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL, flags); } return h; @@ -185,8 +185,8 @@ void* safe_hash_get(SafeHash* h, void* tmpl) { SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); b = h->tab[hval & h->size_mask]; while(b != NULL) { @@ -194,7 +194,7 @@ void* safe_hash_get(SafeHash* h, void* tmpl) break; b = b->next; } - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return (void*) b; } @@ -207,13 +207,13 @@ void* safe_hash_put(SafeHash* h, void* tmpl) SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; SafeHashBucket** head; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); head = &h->tab[hval & h->size_mask]; b = *head; while(b != NULL) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return b; } b = b->next; @@ -224,8 +224,8 @@ void* safe_hash_put(SafeHash* h, void* tmpl) b->next = *head; *head = b; grow_limit = h->grow_limit; - erts_smp_mtx_unlock(lock); - if (erts_smp_atomic_inc_read_nob(&h->nitems) > grow_limit) { + erts_mtx_unlock(lock); + if (erts_atomic_inc_read_nob(&h->nitems) > grow_limit) { rehash(h, grow_limit); } return (void*) b; @@ -240,22 +240,22 @@ void* safe_hash_erase(SafeHash* h, void* tmpl) SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; SafeHashBucket** prevp; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); prevp = &h->tab[hval & h->size_mask]; b = *prevp; while(b != NULL) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { *prevp = b->next; - erts_smp_mtx_unlock(lock); - erts_smp_atomic_dec_nob(&h->nitems); + erts_mtx_unlock(lock); + erts_atomic_dec_nob(&h->nitems); h->fun.free((void*)b); return tmpl; } prevp = &b->next; b = b->next; } - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return NULL; } @@ -280,7 +280,7 @@ void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int int i; for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_t *lock = &h->lock_vec[i].mtx; + erts_mtx_t *lock = &h->lock_vec[i].mtx; if(enable) { erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL, diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h index dde48a6de8..259c58cff9 100644 --- a/erts/emulator/beam/safe_hash.h +++ b/erts/emulator/beam/safe_hash.h @@ -73,11 +73,11 @@ typedef struct int size_mask; /* (RW) Number of slots - 1 */ SafeHashBucket** tab; /* (RW) Vector of bucket pointers (objects) */ int grow_limit; /* (RW) Threshold for growing table */ - erts_smp_atomic_t nitems; /* (A) Number of items in table */ - erts_smp_atomic_t is_rehashing; /* (A) Table rehashing in progress */ + erts_atomic_t nitems; /* (A) Number of items in table */ + erts_atomic_t is_rehashing; /* (A) Table rehashing in progress */ union { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; byte __cache_line__[64]; }lock_vec[SAFE_HASH_LOCK_CNT]; diff --git a/erts/emulator/beam/select_instrs.tab b/erts/emulator/beam/select_instrs.tab new file mode 100644 index 0000000000..2951949d38 --- /dev/null +++ b/erts/emulator/beam/select_instrs.tab @@ -0,0 +1,190 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +i_select_val_bins := select_val_bins.fetch.select; + +select_val_bins.head() { + Eterm select_val; +} + +select_val_bins.fetch(Src) { + select_val = $Src; +} + +select_val_bins.select(Fail, NumElements) { + struct Singleton { + BeamInstr val; + }; + struct Singleton* low; + struct Singleton* high; + struct Singleton* mid; + int bdiff; /* int not long because the arrays aren't that large */ + + low = (struct Singleton *) ($NEXT_INSTRUCTION); + high = low + $NumElements; + + /* The pointer subtraction (high-low) below must produce + * a signed result, because high could be < low. That + * requires the compiler to insert quite a bit of code. + * + * However, high will be > low so the result will be + * positive. We can use that knowledge to optimise the + * entire sequence, from the initial comparison to the + * computation of mid. + * + * -- Mikael Pettersson, Acumem AB + * + * Original loop control code: + * + * while (low < high) { + * mid = low + (high-low) / 2; + * + */ + while ((bdiff = (int)((char*)high - (char*)low)) > 0) { + unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Singleton)-1); + + mid = (struct Singleton*)((char*)low + boffset); + if (select_val < mid->val) { + high = mid; + } else if (select_val > mid->val) { + low = mid + 1; + } else { + Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION + $NumElements); + Sint32 offset = jump_tab[mid - (struct Singleton *)($NEXT_INSTRUCTION)]; + $JUMP(offset); + } + } + $JUMP($Fail); +} + +i_select_tuple_arity2 := select_val2.src.get_arity.execute; +i_select_val2 := select_val2.src.execute; + +select_val2.head() { + Eterm select_val2; +} + +select_val2.src(Src) { + select_val2 = $Src; +} + +select_val2.get_arity() { + if (ERTS_LIKELY(is_tuple(select_val2))) { + select_val2 = *tuple_val(select_val2); + } else { + select_val2 = NIL; + } +} + +select_val2.execute(Fail, T1, T2) { + Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION); + + if (select_val2 == $T1) { + $JUMP(jump_tab[0]); + } else if (select_val2 == $T2) { + $JUMP(jump_tab[1]); + } else { + $FAIL($Fail); + } +} + +i_select_tuple_arity := select_val_lin.fetch.get_arity.execute; +i_select_val_lins := select_val_lin.fetch.execute; + +select_val_lin.head() { + Eterm select_val; +} + +select_val_lin.fetch(Src) { + select_val = $Src; +} + +select_val_lin.get_arity() { + if (ERTS_LIKELY(is_tuple(select_val))) { + select_val = *tuple_val(select_val); + } else { + select_val = NIL; + } +} + +select_val_lin.execute(Fail, N) { + BeamInstr* vs = $NEXT_INSTRUCTION; + int ix = 0; + + for (;;) { + if (vs[ix+0] >= select_val) { + ix += 0; + break; + } + if (vs[ix+1] >= select_val) { + ix += 1; + break; + } + ix += 2; + } + + if (vs[ix] == select_val) { + Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION + $N); + Eterm offset = jump_tab[ix]; + $JUMP(offset); + } else { + $JUMP($Fail); + } +} + +JUMP_ON_VAL(Fail, Index, N, Base) { + if (is_small($Index)) { + $Index = (Uint) (signed_val($Index) - $Base); + if ($Index < $N) { + Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION); + $JUMP(jump_tab[$Index]); + } + } + $FAIL($Fail); +} + +i_jump_on_val_zero := jump_on_val_zero.fetch.execute; + +jump_on_val_zero.head() { + Eterm index; +} + +jump_on_val_zero.fetch(Src) { + index = $Src; +} + +jump_on_val_zero.execute(Fail, N) { + $JUMP_ON_VAL($Fail, index, $N, 0); +} + +i_jump_on_val := jump_on_val.fetch.execute; + +jump_on_val.head() { + Eterm index; +} + +jump_on_val.fetch(Src) { + index = $Src; +} + +jump_on_val.execute(Fail, N, Base) { + $JUMP_ON_VAL($Fail, index, $N, $Base); +} diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 704d567337..68ef0a23f3 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -21,7 +21,7 @@ #ifndef __SYS_H__ #define __SYS_H__ -#if !defined(__GNUC__) +#if !defined(__GNUC__) || defined(__e2k__) # define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0 #elif !defined(__GNUC_MINOR__) # define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ @@ -34,9 +34,6 @@ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) #endif -#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ERTS_SMP) -# error "Dirty schedulers not supported without smp support" -#endif #ifdef ERTS_INLINE # ifndef ERTS_CAN_INLINE @@ -221,12 +218,6 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f # define ASSERT(e) ((void) 1) #endif -#ifdef ERTS_SMP -# define ERTS_SMP_ASSERT(e) ASSERT(e) -#else -# define ERTS_SMP_ASSERT(e) ((void)1) -#endif - /* ERTS_UNDEF can be used to silence false warnings about * "variable may be used uninitialized" while keeping the variable * marked as undefined by valgrind. @@ -327,9 +318,9 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f #endif #if SIZEOF_VOID_P == SIZEOF_LONG -typedef unsigned long Eterm; -typedef unsigned long Uint; -typedef long Sint; +typedef unsigned long Eterm erts_align_attribute(sizeof(long)); +typedef unsigned long Uint erts_align_attribute(sizeof(long)); +typedef long Sint erts_align_attribute(sizeof(long)); #define SWORD_CONSTANT(Const) Const##L #define UWORD_CONSTANT(Const) Const##UL #define ERTS_UWORD_MAX ULONG_MAX @@ -337,9 +328,9 @@ typedef long Sint; #define ERTS_SIZEOF_ETERM SIZEOF_LONG #define ErtsStrToSint strtol #elif SIZEOF_VOID_P == SIZEOF_INT -typedef unsigned int Eterm; -typedef unsigned int Uint; -typedef int Sint; +typedef unsigned int Eterm erts_align_attribute(sizeof(int)); +typedef unsigned int Uint erts_align_attribute(sizeof(int)); +typedef int Sint erts_align_attribute(sizeof(int)); #define SWORD_CONSTANT(Const) Const #define UWORD_CONSTANT(Const) Const##U #define ERTS_UWORD_MAX UINT_MAX @@ -347,9 +338,9 @@ typedef int Sint; #define ERTS_SIZEOF_ETERM SIZEOF_INT #define ErtsStrToSint strtol #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG -typedef unsigned long long Eterm; -typedef unsigned long long Uint; -typedef long long Sint; +typedef unsigned long long Eterm erts_align_attribute(sizeof(long long)); +typedef unsigned long long Uint erts_align_attribute(sizeof(long long)); +typedef long long Sint erts_align_attribute(sizeof(long long)); #define SWORD_CONSTANT(Const) Const##LL #define UWORD_CONSTANT(Const) Const##ULL #define ERTS_UWORD_MAX ULLONG_MAX @@ -470,41 +461,25 @@ typedef union { #include "erl_lock_check.h" -/* needed by erl_smp.h */ +/* needed by erl_threads.h */ int erts_send_warning_to_logger_str_nogl(char *); -#include "erl_smp.h" +#include "erl_threads.h" #ifdef ERTS_WANT_BREAK_HANDLING -# ifdef ERTS_SMP -extern erts_smp_atomic32_t erts_break_requested; +extern erts_atomic32_t erts_break_requested; # define ERTS_BREAK_REQUESTED \ - ((int) erts_smp_atomic32_read_nob(&erts_break_requested)) -# else -extern volatile int erts_break_requested; -# define ERTS_BREAK_REQUESTED erts_break_requested -# endif + ((int) erts_atomic32_read_nob(&erts_break_requested)) void erts_do_break_handling(void); #endif -#if !defined(ERTS_SMP) && !defined(__WIN32__) -extern volatile Uint erts_signal_state; -#define ERTS_SIGNAL_STATE erts_signal_state -void erts_handle_signal_state(void); -#endif -#ifdef ERTS_SMP -extern erts_smp_atomic32_t erts_writing_erl_crash_dump; +extern erts_atomic32_t erts_writing_erl_crash_dump; extern erts_tsd_key_t erts_is_crash_dumping_key; #define ERTS_SOMEONE_IS_CRASH_DUMPING \ - ((int) erts_smp_atomic32_read_mb(&erts_writing_erl_crash_dump)) + ((int) erts_atomic32_read_mb(&erts_writing_erl_crash_dump)) #define ERTS_IS_CRASH_DUMPING \ ((int) (SWord) erts_tsd_get(erts_is_crash_dumping_key)) -#else -extern volatile int erts_writing_erl_crash_dump; -#define ERTS_SOMEONE_IS_CRASH_DUMPING erts_writing_erl_crash_dump -#define ERTS_IS_CRASH_DUMPING erts_writing_erl_crash_dump -#endif /* Deal with memcpy() vs bcopy() etc. We want to use the mem*() functions, but be able to fall back on bcopy() etc on systems that don't have @@ -643,7 +618,7 @@ int erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_info_to_logger_str_nogl(char *); -/* needed by erl_smp.h (declared above) +/* needed by erl_threads.h (declared above) int erts_send_warning_to_logger_str_nogl(char *); */ int erts_send_error_to_logger_str_nogl(char *); @@ -765,10 +740,8 @@ extern char *erts_sys_ddll_error(int code); * System interfaces for startup. */ void erts_sys_schedule_interrupt(int set); -#ifdef ERTS_SMP void erts_sys_schedule_interrupt_timed(int, ErtsMonotonicTime); void erts_sys_main_thread(void); -#endif extern int erts_sys_prepare_crash_dump(int secs); extern void erts_sys_pre_init(void); @@ -855,13 +828,11 @@ int erts_sys_unsetenv(char *key); char *erts_read_env(char *key); void erts_free_read_env(void *value); -#if defined(ERTS_SMP) #if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX) extern void sys_thr_resume(erts_tid_t tid); extern void sys_thr_suspend(erts_tid_t tid); #define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2 #endif -#endif /* utils.c */ @@ -1025,140 +996,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -typedef erts_smp_atomic_t erts_smp_refc_t; - -ERTS_GLB_INLINE void erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val); -ERTS_GLB_INLINE void erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inc_unless(erts_smp_refc_t *refcp, - erts_aint_t unless_val, - erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inctest(erts_smp_refc_t *refcp, - erts_aint_t min_val); -ERTS_GLB_INLINE void erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_dectest(erts_smp_refc_t *refcp, - erts_aint_t min_val); -ERTS_GLB_INLINE void erts_smp_refc_add(erts_smp_refc_t *refcp, erts_aint_t diff, - erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_read(erts_smp_refc_t *refcp, - erts_aint_t min_val); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void -erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val) -{ - erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val); -} - -ERTS_GLB_INLINE void -erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inc(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#else - erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_inc_unless(erts_smp_refc_t *refcp, - erts_aint_t unless_val, - erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp); - while (1) { - erts_aint_t exp, new; -#ifdef ERTS_REFC_DEBUG - if (val < 0) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - if (val == unless_val) - return val; - new = val + 1; - exp = val; - val = erts_smp_atomic_cmpxchg_nob((erts_smp_atomic_t *) refcp, new, exp); - if (val == exp) - return new; - } -} - - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_inctest(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inctest(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -ERTS_GLB_INLINE void -erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_dec(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#else - erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_dectest(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_dectest(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -ERTS_GLB_INLINE void -erts_smp_refc_add(erts_smp_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n", - diff, val, min_val); -#else - erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_read(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_read(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ - - #ifdef ERTS_ENABLE_KERNEL_POLL extern int erts_use_kernel_poll; #endif diff --git a/erts/emulator/beam/trace_instrs.tab b/erts/emulator/beam/trace_instrs.tab new file mode 100644 index 0000000000..b10442c5e7 --- /dev/null +++ b/erts/emulator/beam/trace_instrs.tab @@ -0,0 +1,168 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +return_trace() { + ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]); + + SWAPOUT; /* Needed for shared heap */ + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + SWAPIN; + c_p->cp = NULL; + SET_I((BeamInstr *) cp_val(E[2])); + E += 3; + Goto(*I); + //| -no_next +} + +i_generic_breakpoint() { + BeamInstr real_I; + HEAVY_SWAPOUT; + real_I = erts_generic_breakpoint(c_p, erts_code_to_codeinfo(I), reg); + HEAVY_SWAPIN; + ASSERT(VALID_INSTR(real_I)); + Goto(real_I); + //| -no_next +} + +i_return_time_trace() { + BeamInstr *pc = (BeamInstr *) (UWord) E[0]; + SWAPOUT; + erts_trace_time_return(c_p, erts_code_to_codeinfo(pc)); + SWAPIN; + c_p->cp = NULL; + SET_I((BeamInstr *) cp_val(E[1])); + E += 2; + Goto(*I); + //| -no_next +} + +i_return_to_trace() { + if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) { + Uint *cpp = (Uint*) E; + for(;;) { + ASSERT(is_CP(*cpp)); + if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) { + do + ++cpp; + while (is_not_CP(*cpp)); + cpp += 2; + } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) { + do + ++cpp; + while (is_not_CP(*cpp)); + } else { + break; + } + } + SWAPOUT; /* Needed for shared heap */ + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + erts_trace_return_to(c_p, cp_val(*cpp)); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + SWAPIN; + } + c_p->cp = NULL; + SET_I((BeamInstr *) cp_val(E[0])); + E += 1; + Goto(*I); + //| -no_next +} + +i_yield() { + /* This is safe as long as REDS_IN(c_p) is never stored + * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5]. + */ + c_p->arg_reg[0] = am_true; + c_p->arity = 1; /* One living register (the 'true' return value) */ + SWAPOUT; + $SET_CP_I_ABS($NEXT_INSTRUCTION); + c_p->current = NULL; + goto do_schedule; + //| -no_next +} + +i_hibernate() { + HEAVY_SWAPOUT; + if (erts_hibernate(c_p, reg)) { + FCALLS = c_p->fcalls; + c_p->flags &= ~F_HIBERNATE_SCHED; + goto do_schedule; + } else { + HEAVY_SWAPIN; + I = handle_error(c_p, I, reg, &bif_export[BIF_hibernate_3]->info.mfa); + goto post_error_handling; + } + //| -no_next +} + +// This is optimised as an instruction because +// it has to be very very fast. + +i_perf_counter() { + ErtsSysPerfCounter ts; + + ts = erts_sys_perf_counter(); + if (IS_SSMALL(ts)) { + r(0) = make_small((Sint)ts); + } else { + $GC_TEST(0, ERTS_SINT64_HEAP_SIZE(ts), 0); + r(0) = make_big(HTOP); +#if defined(ARCH_32) + if (ts >= (((Uint64) 1) << 32)) { + *HTOP = make_pos_bignum_header(2); + BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff)); + BIG_DIGIT(HTOP, 1) = (Uint) ((ts >> 32) & ((Uint) 0xffffffff)); + HTOP += 3; + } + else +#endif + { + *HTOP = make_pos_bignum_header(1); + BIG_DIGIT(HTOP, 0) = (Uint) ts; + HTOP += 2; + } + } +} + +i_debug_breakpoint() { + HEAVY_SWAPOUT; + I = call_error_handler(c_p, erts_code_to_codemfa(I), reg, am_breakpoint); + HEAVY_SWAPIN; + if (I) { + Goto(*I); + } + goto handle_error; + //| -no_next +} + + + +// +// Special jump instruction used for tracing. Takes an absolute +// failure address. +// + +trace_jump(Fail) { + //| -no_next + SET_I((BeamInstr *) $Fail); + Goto(*I); +} diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index d7116bd2c3..993585be10 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -43,7 +43,6 @@ #include "erl_printf.h" #include "erl_threads.h" #include "erl_lock_count.h" -#include "erl_smp.h" #include "erl_time.h" #include "erl_thr_progress.h" #include "erl_thr_queue.h" @@ -1932,27 +1931,6 @@ do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp, gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader); sz = sz + gl_sz; -#ifndef ERTS_SMP -#ifdef USE_THREADS - if (!erts_get_scheduler_data()) /* Must be scheduler thread */ - *p = NULL; - else -#endif - { - *p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0); - if (*p) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&(*p)->state); - if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)) - *p = NULL; - } - } - - if (!*p) { - return NIL; - } - - /* So we have an error logger, lets build the message */ -#endif *bp = new_message_buffer(sz); *ohp = &(*bp)->off_heap; *hp = (*bp)->mem; @@ -1970,20 +1948,12 @@ static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment * #ifdef HARDDEBUG erts_fprintf(stderr, "%T\n", message); #endif -#ifdef ERTS_SMP { Eterm from = erts_get_current_pid(); if (is_not_internal_pid(from)) from = NIL; erts_queue_error_logger_message(from, message, bp); } -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(p, 0, mp, message, am_system); - } -#endif } /* error_logger ! @@ -3557,7 +3527,7 @@ store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns) if (is_external_header(*from_hp)) { ExternalThing *etp = (ExternalThing *) from_hp; ASSERT(is_external(ns)); - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } else if (is_ordinary_ref_thing(from_hp)) return make_internal_ref(to_hp); @@ -4712,22 +4682,6 @@ void erts_interval_init(erts_interval_t *icp) { erts_atomic64_init_nob(&icp->counter.atomic, 0); -#ifdef DEBUG - icp->smp_api = 0; -#endif -} - -void -erts_smp_interval_init(erts_interval_t *icp) -{ -#ifdef ERTS_SMP - erts_interval_init(icp); -#else - icp->counter.not_atomic = 0; -#endif -#ifdef DEBUG - icp->smp_api = 1; -#endif } static ERTS_INLINE Uint64 @@ -4767,79 +4721,25 @@ ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) Uint64 erts_step_interval_nob(erts_interval_t *icp) { - ASSERT(!icp->smp_api); return step_interval_nob(icp); } Uint64 erts_step_interval_relb(erts_interval_t *icp) { - ASSERT(!icp->smp_api); return step_interval_relb(icp); } Uint64 -erts_smp_step_interval_nob(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return step_interval_nob(icp); -#else - return ++icp->counter.not_atomic; -#endif -} - -Uint64 -erts_smp_step_interval_relb(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return step_interval_relb(icp); -#else - return ++icp->counter.not_atomic; -#endif -} - -Uint64 erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) { - ASSERT(!icp->smp_api); return ensure_later_interval_nob(icp, ic); } Uint64 erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) { - ASSERT(!icp->smp_api); - return ensure_later_interval_acqb(icp, ic); -} - -Uint64 -erts_smp_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return ensure_later_interval_nob(icp, ic); -#else - if (icp->counter.not_atomic > ic) - return icp->counter.not_atomic; - else - return ++icp->counter.not_atomic; -#endif -} - -Uint64 -erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP return ensure_later_interval_acqb(icp, ic); -#else - if (icp->counter.not_atomic > ic) - return icp->counter.not_atomic; - else - return ++icp->counter.not_atomic; -#endif } /* diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c index 3a7b3bb50c..a1b15a2199 100644 --- a/erts/emulator/drivers/common/efile_drv.c +++ b/erts/emulator/drivers/common/efile_drv.c @@ -167,7 +167,6 @@ dt_private *get_dt_private(int); #endif -#ifdef USE_THREADS #define THRDS_AVAILABLE (sys_info.async_threads > 0) #ifdef HARDDEBUG /* HARDDEBUG in io.c is expected too */ #define TRACE_DRIVER fprintf(stderr, "Efile: ") @@ -177,12 +176,6 @@ dt_private *get_dt_private(int); #define MUTEX_INIT(m, p) do { IF_THRDS { TRACE_DRIVER; (m = driver_pdl_create(p)); } } while (0) #define MUTEX_LOCK(m) do { IF_THRDS { TRACE_DRIVER; driver_pdl_lock(m); } } while (0) #define MUTEX_UNLOCK(m) do { IF_THRDS { TRACE_DRIVER; driver_pdl_unlock(m); } } while (0) -#else -#define THRDS_AVAILABLE (0) -#define MUTEX_INIT(m, p) -#define MUTEX_LOCK(m) -#define MUTEX_UNLOCK(m) -#endif #define IF_THRDS if (THRDS_AVAILABLE) @@ -2715,7 +2708,6 @@ file_output(ErlDrvData e, char* buf, ErlDrvSizeT count) } case FILE_READDIR: -#ifdef USE_THREADS if (sys_info.async_threads > 0) { d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + FILENAME_BYTELEN(name) + @@ -2736,7 +2728,6 @@ file_output(ErlDrvData e, char* buf, ErlDrvSizeT count) goto done; } else -#endif { size_t resbufsize; size_t n = 0, total = 0; diff --git a/erts/emulator/drivers/unix/ttsl_drv.c b/erts/emulator/drivers/unix/ttsl_drv.c index 2a508b02eb..7355df6059 100644 --- a/erts/emulator/drivers/unix/ttsl_drv.c +++ b/erts/emulator/drivers/unix/ttsl_drv.c @@ -892,8 +892,8 @@ static void ttysl_from_tty(ErlDrvData ttysl_data, ErlDrvEvent fd) tpos = 0; } } - } else { - DEBUGLOG(("ttysl_from_tty: driver failure in read(%d,..) = %d\n", (int)(SWord)fd, i)); + } else if (errno != EAGAIN && errno != EWOULDBLOCK) { + DEBUGLOG(("ttysl_from_tty: driver failure in read(%d,..) = %d (errno = %d)\n", (int)(SWord)fd, i, errno)); driver_failure(ttysl_port, -1); } } diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4 index dca3887564..b3c9a460bb 100644 --- a/erts/emulator/hipe/hipe_amd64_bifs.m4 +++ b/erts/emulator/hipe/hipe_amd64_bifs.m4 @@ -39,7 +39,7 @@ define(HANDLE_GOT_MBUF,` 3: call nbif_$1_gc_after_bif /* `HANDLE_GOT_MBUF' */ jmp 2b') -`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +`#if defined(ERTS_ENABLE_LOCK_CHECK) # define CALL_BIF(F) \ movq CSYM(nbif_impl_##F)@GOTPCREL(%rip), %r11; \ movq %r11, P_BIF_CALLEE(P); \ diff --git a/erts/emulator/hipe/hipe_arm_bifs.m4 b/erts/emulator/hipe/hipe_arm_bifs.m4 index a9097dabde..554faa2567 100644 --- a/erts/emulator/hipe/hipe_arm_bifs.m4 +++ b/erts/emulator/hipe/hipe_arm_bifs.m4 @@ -29,7 +29,7 @@ include(`hipe/hipe_arm_asm.m4') .p2align 2 .arm -`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +`#if defined(ERTS_ENABLE_LOCK_CHECK) # define CALL_BIF(F) ldr r14, =nbif_impl_##F; str r14, [r0, #P_BIF_CALLEE]; bl hipe_debug_bif_wrapper #else # define CALL_BIF(F) bl nbif_impl_##F diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c index 94bc563fda..05663648e9 100644 --- a/erts/emulator/hipe/hipe_bif0.c +++ b/erts/emulator/hipe/hipe_bif0.c @@ -1000,7 +1000,7 @@ BIF_RETTYPE hipe_bifs_set_native_address_in_fe_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); fe->native_address = native_address; - if (erts_smp_refc_dectest(&fe->refc, 0) == 0) + if (erts_refc_dectest(&fe->refc, 0) == 0) erts_erase_fun_entry(fe); BIF_RET(am_true); } @@ -1048,7 +1048,7 @@ static struct { * they create a new stub for the mfa, which forces locking. * XXX: Redesign apply et al to avoid those updates. */ - erts_smp_rwmtx_t lock; + erts_rwmtx_t lock; } hipe_mfa_info_table; Hash mod2mfa_tab; /* map from module atom to list of hipe_mfa_info */ @@ -1129,28 +1129,28 @@ struct hipe_ref { static inline void hipe_mfa_info_table_init_lock(void) { - erts_smp_rwmtx_init(&hipe_mfa_info_table.lock, "hipe_mfait_lock", NIL, + erts_rwmtx_init(&hipe_mfa_info_table.lock, "hipe_mfait_lock", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); } static inline void hipe_mfa_info_table_rlock(void) { - erts_smp_rwmtx_rlock(&hipe_mfa_info_table.lock); + erts_rwmtx_rlock(&hipe_mfa_info_table.lock); } static inline void hipe_mfa_info_table_runlock(void) { - erts_smp_rwmtx_runlock(&hipe_mfa_info_table.lock); + erts_rwmtx_runlock(&hipe_mfa_info_table.lock); } static inline void hipe_mfa_info_table_rwlock(void) { - erts_smp_rwmtx_rwlock(&hipe_mfa_info_table.lock); + erts_rwmtx_rwlock(&hipe_mfa_info_table.lock); } static inline void hipe_mfa_info_table_rwunlock(void) { - erts_smp_rwmtx_rwunlock(&hipe_mfa_info_table.lock); + erts_rwmtx_rwunlock(&hipe_mfa_info_table.lock); } static ERTS_INLINE @@ -1636,7 +1636,7 @@ void hipe_purge_refs(struct hipe_ref* first_ref, Eterm caller_module, { struct hipe_ref* ref = first_ref; - ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(is_blocking == erts_thr_progress_is_blocking()); while (ref) { struct hipe_ref* free_ref = ref; @@ -1682,9 +1682,9 @@ void hipe_purge_sdescs(struct hipe_sdesc* first_sdesc, Eterm module, { struct hipe_sdesc* sdesc = first_sdesc; - ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(is_blocking == erts_thr_progress_is_blocking()); - ERTS_SMP_LC_ASSERT(is_blocking); /*XXX Fix safe sdesc destruction */ + ERTS_LC_ASSERT(is_blocking); /*XXX Fix safe sdesc destruction */ while (sdesc) { struct hipe_sdesc* free_sdesc = sdesc; @@ -1702,7 +1702,7 @@ void hipe_purge_module(Module* modp, int is_blocking) { ASSERT(modp); - ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(is_blocking == erts_thr_progress_is_blocking()); DBG_TRACE_MFA(make_atom(modp->module), 0, 0, "hipe_purge_module"); @@ -1711,7 +1711,7 @@ void hipe_purge_module(Module* modp, int is_blocking) * Remove all hipe_ref's (external calls) from the old module instance */ if (modp->old.hipe_code->first_hipe_ref) { - ERTS_SMP_LC_ASSERT(is_blocking); + ERTS_LC_ASSERT(is_blocking); hipe_purge_refs(modp->old.hipe_code->first_hipe_ref, make_atom(modp->module), is_blocking); @@ -1722,7 +1722,7 @@ void hipe_purge_module(Module* modp, int is_blocking) * Remove all hipe_sdesc's for the old module instance */ if (modp->old.hipe_code->first_hipe_sdesc) { - ERTS_SMP_LC_ASSERT(is_blocking); + ERTS_LC_ASSERT(is_blocking); hipe_purge_sdescs(modp->old.hipe_code->first_hipe_sdesc, make_atom(modp->module), is_blocking); @@ -1773,7 +1773,7 @@ void hipe_redirect_to_module(Module* modp) struct hipe_mfa_info *p; struct hipe_ref_head* refh; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); for (p = mod2mfa_get(modp); p; p = p->next_in_mod) { if (p->new_address) { diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c index e04d3d32d1..9ebbb22846 100644 --- a/erts/emulator/hipe/hipe_bif2.c +++ b/erts/emulator/hipe/hipe_bif2.c @@ -45,7 +45,7 @@ static void proc_unlock(Process* c_p, Process* rp) locks &= ~ERTS_PROC_LOCK_MAIN; } if (rp && locks) { - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); } } @@ -153,14 +153,14 @@ BIF_RETTYPE hipe_bifs_modeswitch_debug_off_0(BIF_ALIST_0) BIF_RET(am_true); } -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_CHECK) BIF_RETTYPE hipe_debug_bif_wrapper(NBIF_ALIST_1); -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\ __FILE__, __LINE__) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) BIF_RETTYPE hipe_debug_bif_wrapper(NBIF_ALIST_1) @@ -168,13 +168,13 @@ BIF_RETTYPE hipe_debug_bif_wrapper(NBIF_ALIST_1) typedef BIF_RETTYPE nBif(NBIF_ALIST_1); nBif* fp = (nBif*) (BIF_P->hipe.bif_callee); BIF_RETTYPE res; - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(BIF_P); + ERTS_UNREQ_PROC_MAIN_LOCK(BIF_P); res = (*fp)(NBIF_CALL_ARGS); - ERTS_SMP_REQ_PROC_MAIN_LOCK(BIF_P); + ERTS_REQ_PROC_MAIN_LOCK(BIF_P); return res; } -#endif /* ERTS_ENABLE_LOCK_CHECK && ERTS_SMP */ +#endif /* ERTS_ENABLE_LOCK_CHECK*/ BIF_RETTYPE hipe_bifs_debug_native_called_2(BIF_ALIST_2) diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4 index f034c4700c..bebe20a18e 100644 --- a/erts/emulator/hipe/hipe_bif_list.m4 +++ b/erts/emulator/hipe/hipe_bif_list.m4 @@ -262,18 +262,12 @@ noproc_primop_interface_2(nbif_bs_get_utf16, erts_bs_get_utf16) noproc_primop_interface_2(nbif_bs_validate_unicode_retract, hipe_bs_validate_unicode_retract) /* - * Bit-syntax primops. The ERTS_SMP runtime system requires P, + * Bit-syntax primops. The runtime system requires P, * hence the use of nocons_nofail_primop_interface_N(). - * When ERTS_SMP is disabled, noproc_primop_interface_N() - * should be used instead. */ nocons_nofail_primop_interface_5(nbif_bs_put_small_float, hipe_bs_put_small_float) noproc_primop_interface_5(nbif_bs_put_bits, hipe_bs_put_bits) -ifelse(ERTS_SMP,1,` nocons_nofail_primop_interface_5(nbif_bs_put_big_integer, hipe_bs_put_big_integer) -',` -noproc_primop_interface_5(nbif_bs_put_big_integer, hipe_bs_put_big_integer) -')dnl nofail_primop_interface_0(nbif_check_get_msg, hipe_check_get_msg) @@ -283,13 +277,8 @@ nocons_nofail_primop_interface_0(nbif_emulate_fpe, hipe_emulate_fpe) noproc_primop_interface_1(nbif_emasculate_binary, hipe_emasculate_binary) -/* - * SMP-specific stuff - */ -ifelse(ERTS_SMP,1,` nocons_nofail_primop_interface_0(nbif_clear_timeout, hipe_clear_timeout) noproc_primop_interface_1(nbif_atomic_inc, hipe_atomic_inc) -',)dnl /* * BIFs that disable GC while trapping are called via a wrapper diff --git a/erts/emulator/hipe/hipe_instrs.tab b/erts/emulator/hipe/hipe_instrs.tab new file mode 100644 index 0000000000..bcce196a1d --- /dev/null +++ b/erts/emulator/hipe/hipe_instrs.tab @@ -0,0 +1,141 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + + +HIPE_MODE_SWITCH(Cmd) { + SWAPOUT; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + c_p->def_arg_reg[4] = -neg_o_reds; + c_p = hipe_mode_switch(c_p, $Cmd, reg); +} + +hipe_trap_call := hipe_trap.call.post; +hipe_trap_call_closure := hipe_trap.call_closure.post; +hipe_trap_return := hipe_trap.return.post; +hipe_trap_throw := hipe_trap.throw.post; +hipe_trap_resume := hipe_trap.resume.post; + +hipe_trap.call() { + /* + * I[-5]: &&lb_i_func_info_IaaI + * I[-4]: Native code callee (inserted by HiPE) + * I[-3]: Module (tagged atom) + * I[-2]: Function (tagged atom) + * I[-1]: Arity (untagged integer) + * I[ 0]: &&lb_hipe_trap_call + * ... remainder of original BEAM code + */ + ErtsCodeInfo *ci = erts_code_to_codeinfo(I); + ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); + c_p->hipe.u.ncallee = ci->u.ncallee; + ++hipe_trap_count; + $HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (ci->mfa.arity << 8)); +} + +hipe_trap.call_closure() { + ErtsCodeInfo *ci = erts_code_to_codeinfo(I); + ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); + c_p->hipe.u.ncallee = ci->u.ncallee; + ++hipe_trap_count; + $HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (ci->mfa.arity << 8)); +} + +hipe_trap.return() { + $HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RETURN); +} + +hipe_trap.throw() { + $HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_THROW); +} + +hipe_trap.resume() { + $HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RESUME); +} + +hipe_trap.post() { +#ifdef DEBUG + pid = c_p->common.id; /* may have switched process... */ +#endif + reg = erts_proc_sched_data(c_p)->x_reg_array; + freg = erts_proc_sched_data(c_p)->f_reg_array; + ERL_BITS_RELOAD_STATEP(c_p); + /* XXX: this abuse of def_arg_reg[] is horrid! */ + neg_o_reds = -c_p->def_arg_reg[4]; + FCALLS = c_p->fcalls; + SWAPIN; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + switch( c_p->def_arg_reg[3] ) { + case HIPE_MODE_SWITCH_RES_RETURN: + ASSERT(is_value(reg[0])); + SET_I(c_p->cp); + c_p->cp = 0; + Goto(*I); + case HIPE_MODE_SWITCH_RES_CALL_EXPORTED: + c_p->i = c_p->hipe.u.callee_exp->addressv[erts_active_code_ix()]; + /*fall through*/ + case HIPE_MODE_SWITCH_RES_CALL_BEAM: + SET_I(c_p->i); + Dispatch(); + case HIPE_MODE_SWITCH_RES_CALL_CLOSURE: + /* This can be used to call any function value, but currently + it's only used to call closures referring to unloaded + modules. */ + { + BeamInstr *next; + + next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE); + HEAVY_SWAPIN; + if (next != NULL) { + SET_I(next); + Dispatchfun(); + } + goto find_func_info; + } + case HIPE_MODE_SWITCH_RES_THROW: + c_p->cp = NULL; + I = handle_error(c_p, I, reg, NULL); + goto post_error_handling; + default: + erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: result %u\n", c_p->def_arg_reg[3]); + } + //| -no_next; +} + +hipe_call_count() { + /* + * I[-5]: &&lb_i_func_info_IaaI + * I[-4]: pointer to struct hipe_call_count (inserted by HiPE) + * I[-3]: Module (tagged atom) + * I[-2]: Function (tagged atom) + * I[-1]: Arity (untagged integer) + * I[ 0]: &&lb_hipe_call_count + * ... remainder of original BEAM code + */ + ErtsCodeInfo *ci = erts_code_to_codeinfo(I); + struct hipe_call_count *hcc = ci->u.hcc; + ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); + ASSERT(hcc != NULL); + ASSERT(VALID_INSTR(hcc->opcode)); + ++(hcc->count); + Goto(hcc->opcode); + //| -no_next; +} diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c index 4573980e1e..6ea120c65c 100644 --- a/erts/emulator/hipe/hipe_mkliterals.c +++ b/erts/emulator/hipe/hipe_mkliterals.c @@ -441,9 +441,7 @@ static const struct rts_param rts_params[] = { { 11, "ERL_FUN_SIZE", 1, ERL_FUN_SIZE }, { 12, "P_SCHED_DATA", -#ifdef ERTS_SMP 1, offsetof(struct process, scheduler_data) -#endif }, { 14, "P_FP_EXCEPTION", #if !defined(NO_FPE_SIGNALS) || defined(HIPE) @@ -453,11 +451,7 @@ static const struct rts_param rts_params[] = { /* This flag is always defined, but its value is configuration-dependent. */ { 15, "ERTS_IS_SMP", 1, -#if defined(ERTS_SMP) 1 -#else - 0 -#endif }, /* This flag is always defined, but its value is configuration-dependent. */ { 16, "ERTS_NO_FPE_SIGNALS", @@ -513,7 +507,7 @@ static const struct rts_param rts_params[] = { #endif }, { 48, "P_BIF_CALLEE", -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_CHECK) 1, offsetof(struct process, hipe.bif_callee) #endif }, diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c index ba7ae1e6a8..b7f81fc4a6 100644 --- a/erts/emulator/hipe/hipe_mode_switch.c +++ b/erts/emulator/hipe/hipe_mode_switch.c @@ -36,15 +36,15 @@ #include "hipe_stack.h" #include "hipe_bif0.h" /* hipe_mfa_info_table_init() */ -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +#if defined(ERTS_ENABLE_LOCK_CHECK) +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \ __FILE__, __LINE__) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) #else -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) +# define ERTS_REQ_PROC_MAIN_LOCK(P) +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) #endif @@ -394,7 +394,7 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) goto do_schedule; } - if (!(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) { + if (!(erts_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) { for (i = 0; i < p->arity; ++i) p->arg_reg[i] = reg[i]; goto do_schedule; @@ -490,19 +490,17 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) case HIPE_MODE_SWITCH_RES_WAIT: case HIPE_MODE_SWITCH_RES_WAIT_TIMEOUT: { /* same semantics, different debug trace messages */ -#ifdef ERTS_SMP /* XXX: BEAM has different entries for the locked and unlocked cases. HiPE doesn't, so we must check dynamically. */ if (p->hipe_smp.have_receive_locks) p->hipe_smp.have_receive_locks = 0; else - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); -#endif + erts_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); p->i = hipe_beam_pc_resume; p->arity = 0; - erts_smp_atomic32_read_band_relb(&p->state, + erts_atomic32_read_band_relb(&p->state, ~ERTS_PSFLG_ACTIVE); - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); do_schedule: { struct saved_calls *scb; @@ -513,21 +511,19 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) /* The process may have died while it was executing, if so we return out from native code to the interpreter */ - if (erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) + if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) p->i = beam_exit; #ifdef DEBUG ASSERT(p->debug_reds_in == reds_in); #endif p->flags &= ~F_HIPE_MODE; - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(p); + ERTS_UNREQ_PROC_MAIN_LOCK(p); p = erts_schedule(NULL, p, reds_in - p->fcalls); - ERTS_SMP_REQ_PROC_MAIN_LOCK(p); + ERTS_REQ_PROC_MAIN_LOCK(p); ASSERT(!(p->flags & F_HIPE_MODE)); -#ifdef ERTS_SMP p->hipe_smp.have_receive_locks = 0; reg = p->scheduler_data->x_reg_array; -#endif } { Eterm *argp; @@ -651,10 +647,10 @@ void hipe_inc_nstack(Process *p) p->hipe.nsp = new_nstack + (p->hipe.nsp - old_nstack); p->hipe.nstack = new_nstack; if (p->hipe.nstgraylim) - p->hipe.nstgraylim = + p->hipe.nstgraylim = new_nstack + (p->hipe.nstgraylim - old_nstack); if (p->hipe.nstblacklim) - p->hipe.nstblacklim = + p->hipe.nstblacklim = new_nstack + (p->hipe.nstblacklim - old_nstack); } } diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c index d8044fe6da..23f64a6991 100644 --- a/erts/emulator/hipe/hipe_native_bif.c +++ b/erts/emulator/hipe/hipe_native_bif.c @@ -143,12 +143,10 @@ BIF_RETTYPE nbif_impl_hipe_set_timeout(NBIF_ALIST_1) else { int tres = erts_set_proc_timer_term(p, timeout_value); if (tres != 0) { /* Wrong time */ -#ifdef ERTS_SMP if (p->hipe_smp.have_receive_locks) { p->hipe_smp.have_receive_locks = 0; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); } -#endif BIF_ERROR(p, EXC_TIMEOUT_VALUE); } } @@ -335,9 +333,7 @@ Binary *hipe_bs_reallocate(Binary* oldbptr, int newsize) } int hipe_bs_put_big_integer( -#ifdef ERTS_SMP Process *p, -#endif Eterm arg, Uint num_bits, byte* base, unsigned offset, unsigned flags) { byte *save_bin_buf; @@ -530,26 +526,22 @@ Eterm hipe_check_get_msg(Process *c_p) msgp = PEEK_MESSAGE(c_p); if (!msgp) { -#ifdef ERTS_SMP - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); /* Make sure messages wont pass exit signals... */ if (ERTS_PROC_PENDING_EXIT(c_p)) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); return THE_NON_VALUE; /* Will be rescheduled for exit */ } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); msgp = PEEK_MESSAGE(c_p); if (msgp) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); else { /* XXX: BEAM doesn't need this */ c_p->hipe_smp.have_receive_locks = 1; -#endif c_p->flags &= ~F_DELAY_GC; return THE_NON_VALUE; -#ifdef ERTS_SMP } -#endif } if (is_non_value(ERL_MESSAGE_TERM(msgp)) @@ -573,7 +565,6 @@ Eterm hipe_check_get_msg(Process *c_p) /* * SMP-specific stuff */ -#ifdef ERTS_SMP /* * This is like the timeout BEAM instruction. @@ -584,14 +575,12 @@ void hipe_clear_timeout(Process *c_p) * A timeout has occurred. Reset the save pointer so that the next * receive statement will examine the first message first. */ -#ifdef ERTS_SMP /* XXX: BEAM has different entries for the locked and unlocked cases. HiPE doesn't, so we must check dynamically. */ if (c_p->hipe_smp.have_receive_locks) { c_p->hipe_smp.have_receive_locks = 0; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); } -#endif if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) { trace_receive(c_p, am_clock_service, am_timeout, NULL); } @@ -601,7 +590,6 @@ void hipe_clear_timeout(Process *c_p) void hipe_atomic_inc(int *counter) { - erts_smp_atomic_inc_nob((erts_smp_atomic_t*)counter); + erts_atomic_inc_nob((erts_atomic_t*)counter); } -#endif diff --git a/erts/emulator/hipe/hipe_native_bif.h b/erts/emulator/hipe/hipe_native_bif.h index 38f874888b..cbc7ab8dc6 100644 --- a/erts/emulator/hipe/hipe_native_bif.h +++ b/erts/emulator/hipe/hipe_native_bif.h @@ -107,11 +107,7 @@ void hipe_emasculate_binary(Eterm); /* * Stuff that is different in SMP and non-SMP. */ -#ifdef ERTS_SMP int hipe_bs_put_big_integer(Process*, Eterm, Uint, byte*, unsigned, unsigned); -#else -int hipe_bs_put_big_integer(Eterm, Uint, byte*, unsigned, unsigned); -#endif AEXTERN(Eterm,nbif_check_get_msg,(Process*)); Eterm hipe_check_get_msg(Process*); @@ -122,12 +118,10 @@ BIF_RETTYPE hipe_bifs_debug_native_called_2(BIF_ALIST_2); /* * SMP-specific stuff */ -#ifdef ERTS_SMP AEXTERN(void,nbif_atomic_inc,(void)); AEXTERN(void,nbif_clear_timeout,(Process*)); void hipe_atomic_inc(int*); void hipe_clear_timeout(Process*); -#endif #define BIF_LIST(M,F,A,B,C,I) AEXTERN(Eterm,nbif_##C,(void)); #include "erl_bif_list.h" diff --git a/erts/emulator/hipe/hipe_ops.tab b/erts/emulator/hipe/hipe_ops.tab index 96e4c0da91..19a3820a6a 100644 --- a/erts/emulator/hipe/hipe_ops.tab +++ b/erts/emulator/hipe/hipe_ops.tab @@ -23,4 +23,7 @@ hipe_trap_call_closure hipe_trap_return hipe_trap_throw hipe_trap_resume + +%cold hipe_call_count +%hot diff --git a/erts/emulator/hipe/hipe_ppc_bifs.m4 b/erts/emulator/hipe/hipe_ppc_bifs.m4 index 79a8bef77d..283fbbb200 100644 --- a/erts/emulator/hipe/hipe_ppc_bifs.m4 +++ b/erts/emulator/hipe/hipe_ppc_bifs.m4 @@ -25,7 +25,7 @@ include(`hipe/hipe_ppc_asm.m4') #`include' "config.h" #`include' "hipe_literals.h" -`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +`#if defined(ERTS_ENABLE_LOCK_CHECK) # define CALL_BIF(F) STORE_IA(CSYM(nbif_impl_##F), P_BIF_CALLEE(P), r29); bl CSYM(hipe_debug_bif_wrapper) #else # define CALL_BIF(F) bl CSYM(nbif_impl_##F) diff --git a/erts/emulator/hipe/hipe_primops.h b/erts/emulator/hipe/hipe_primops.h index 4fcbc9df38..6aac5e6205 100644 --- a/erts/emulator/hipe/hipe_primops.h +++ b/erts/emulator/hipe/hipe_primops.h @@ -41,10 +41,8 @@ PRIMOP_LIST(am_bnot, &nbif_bnot_1) PRIMOP_LIST(am_gc_1, &nbif_gc_1) PRIMOP_LIST(am_check_get_msg, &nbif_check_get_msg) -#ifdef ERTS_SMP PRIMOP_LIST(am_atomic_inc, &nbif_atomic_inc) PRIMOP_LIST(am_clear_timeout, &nbif_clear_timeout) -#endif PRIMOP_LIST(am_select_msg, &nbif_select_msg) PRIMOP_LIST(am_set_timeout, &nbif_set_timeout) PRIMOP_LIST(am_rethrow, &nbif_rethrow) diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h index cc92bf653c..ef14c75f6c 100644 --- a/erts/emulator/hipe/hipe_process.h +++ b/erts/emulator/hipe/hipe_process.h @@ -49,7 +49,7 @@ struct hipe_process_state { #ifdef NO_FPE_SIGNALS double float_result; /* to be checked for inf/NaN by hipe_emulate_fpe */ #endif -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_CHECK) void (*bif_callee)(void); /* When calling BIF's via debug wrapper */ #endif #ifdef DEBUG @@ -82,7 +82,6 @@ static __inline__ void hipe_delete_process(struct hipe_process_state *p) erts_free(ERTS_ALC_T_HIPE_STK, (void*)p->nstack); } -#ifdef ERTS_SMP struct hipe_process_state_smp { int have_receive_locks; }; @@ -91,6 +90,5 @@ static __inline__ void hipe_init_process_smp(struct hipe_process_state_smp *p) { p->have_receive_locks = 0; } -#endif #endif /* HIPE_PROCESS_H */ diff --git a/erts/emulator/hipe/hipe_signal.h b/erts/emulator/hipe/hipe_signal.h index 5d8621135b..524def11a4 100644 --- a/erts/emulator/hipe/hipe_signal.h +++ b/erts/emulator/hipe/hipe_signal.h @@ -27,13 +27,9 @@ #if defined(__i386__) || defined(__x86_64__) extern void hipe_signal_init(void); -#else -static __inline__ void hipe_signal_init(void) { } -#endif - -#if defined(ERTS_SMP) && (defined(__i386__) || defined(__x86_64__)) extern void hipe_thread_signal_init(void); #else +static __inline__ void hipe_signal_init(void) { } static __inline__ void hipe_thread_signal_init(void) { } #endif diff --git a/erts/emulator/hipe/hipe_sparc_bifs.m4 b/erts/emulator/hipe/hipe_sparc_bifs.m4 index 14330c2f1c..1b49fa57fd 100644 --- a/erts/emulator/hipe/hipe_sparc_bifs.m4 +++ b/erts/emulator/hipe/hipe_sparc_bifs.m4 @@ -28,7 +28,7 @@ include(`hipe/hipe_sparc_asm.m4') .section ".text" .align 4 -`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +`#if defined(ERTS_ENABLE_LOCK_CHECK) # define CALL_BIF(F) set nbif_impl_##F, %o7; st %o7, [%o0+P_BIF_CALLEE]; call hipe_debug_bif_wrapper #else # define CALL_BIF(F) call nbif_impl_##F diff --git a/erts/emulator/hipe/hipe_x86_bifs.m4 b/erts/emulator/hipe/hipe_x86_bifs.m4 index aecf67dc1b..9cb343d067 100644 --- a/erts/emulator/hipe/hipe_x86_bifs.m4 +++ b/erts/emulator/hipe/hipe_x86_bifs.m4 @@ -31,7 +31,7 @@ include(`hipe/hipe_x86_asm.m4') #define TEST_GOT_EXN cmpl $THE_NON_VALUE,%eax #endif' -`#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +`#if defined(ERTS_ENABLE_LOCK_CHECK) # define CALL_BIF(F) movl $CSYM(nbif_impl_##F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper) #else # define CALL_BIF(F) call CSYM(nbif_impl_##F) diff --git a/erts/emulator/hipe/hipe_x86_signal.c b/erts/emulator/hipe/hipe_x86_signal.c index be68d7d463..d3b6933155 100644 --- a/erts/emulator/hipe/hipe_x86_signal.c +++ b/erts/emulator/hipe/hipe_x86_signal.c @@ -45,10 +45,8 @@ #include <signal.h> #include <stdio.h> #include <stdlib.h> -#ifdef ERTS_SMP #include "sys.h" #include "erl_alloc.h" -#endif #include "hipe_signal.h" #if defined(__GLIBC__) && __GLIBC__ == 2 && (__GLIBC_MINOR__ >= 3) @@ -259,7 +257,6 @@ static void hipe_sigaltstack(void *ss_sp) } } -#ifdef ERTS_SMP /* * Set up alternate signal stack for an Erlang process scheduler thread. */ @@ -269,7 +266,6 @@ void hipe_thread_signal_init(void) We use it to suppress false leak report from valgrind */ hipe_sigaltstack(erts_alloc_permanent_cache_aligned(ERTS_ALC_T_HIPE_LL, SIGSTKSZ)); } -#endif /* * Set up alternate signal stack for the main thread, @@ -277,10 +273,6 @@ void hipe_thread_signal_init(void) */ static void hipe_sigaltstack_init(void) { -#if !defined(ERTS_SMP) - static unsigned long my_sigstack[SIGSTKSZ/sizeof(long)]; - hipe_sigaltstack(my_sigstack); -#endif } /* diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 799f67fc45..834b77eb58 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -84,9 +84,6 @@ typedef char EventStateFlags; #define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control) #define ERTS_CIO_POLL_CTLV ERTS_POLL_EXPORT(erts_poll_controlv) #define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait) -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -#define ERTS_CIO_POLL_AS_INTR ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt) -#endif #define ERTS_CIO_POLL_INTR ERTS_POLL_EXPORT(erts_poll_interrupt) #define ERTS_CIO_POLL_INTR_TMD ERTS_POLL_EXPORT(erts_poll_interrupt_timed) #define ERTS_CIO_NEW_POLLSET ERTS_POLL_EXPORT(erts_poll_create_pollset) @@ -100,18 +97,16 @@ typedef char EventStateFlags; static struct pollset_info { ErtsPollSet ps; - erts_smp_atomic_t in_poll_wait; /* set while doing poll */ + erts_atomic_t in_poll_wait; /* set while doing poll */ struct { int six; /* start index */ int eix; /* end index */ - erts_smp_atomic32_t no; + erts_atomic32_t no; int size; ErtsSysFdType *array; } active_fd; -#ifdef ERTS_SMP struct removed_fd* removed_list; /* list of deselected fd's*/ - erts_smp_spinlock_t removed_list_lock; -#endif + erts_spinlock_t removed_list_lock; }pollset; #define NUM_OF_POLLSETS 1 @@ -137,7 +132,6 @@ typedef struct { EventStateFlags flags; } ErtsDrvEventState; -#ifdef ERTS_SMP struct removed_fd { struct removed_fd *next; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS @@ -150,19 +144,17 @@ struct removed_fd { #endif }; -#endif #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS static int max_fds = -1; #endif #define DRV_EV_STATE_LOCK_CNT 16 static union { - erts_smp_mtx_t lck; + erts_mtx_t lck; byte _cache_line_alignment[64]; }drv_ev_state_locks[DRV_EV_STATE_LOCK_CNT]; -#ifdef ERTS_SMP -static ERTS_INLINE erts_smp_mtx_t* fd_mtx(ErtsSysFdType fd) +static ERTS_INLINE erts_mtx_t* fd_mtx(ErtsSysFdType fd) { int hash = (int)fd; # ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS @@ -170,21 +162,18 @@ static ERTS_INLINE erts_smp_mtx_t* fd_mtx(ErtsSysFdType fd) # endif return &drv_ev_state_locks[hash % DRV_EV_STATE_LOCK_CNT].lck; } -#else -# define fd_mtx(fd) NULL -#endif #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS -static erts_smp_atomic_t drv_ev_state_len; +static erts_atomic_t drv_ev_state_len; static ErtsDrvEventState *drv_ev_state; -static erts_smp_mtx_t drv_ev_state_grow_lock; /* prevent lock-hogging of racing growers */ +static erts_mtx_t drv_ev_state_grow_lock; /* prevent lock-hogging of racing growers */ #else static SafeHash drv_ev_state_tab; static int num_state_prealloc; static ErtsDrvEventState *state_prealloc_first; -erts_smp_spinlock_t state_prealloc_lock; +erts_spinlock_t state_prealloc_lock; static ERTS_INLINE ErtsDrvEventState *hash_get_drv_ev_state(ErtsSysFdType fd) { @@ -249,15 +238,13 @@ static void steal_pending_stop_nif(erts_dsprintf_buf_t *dsbufp, ErtsResource*, ErtsDrvEventState *state, int mode, int on); -#ifdef ERTS_SMP ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_FD_LIST) -#endif static ERTS_INLINE void init_iotask(ErtsIoTask *io_task) { erts_port_task_handle_init(&io_task->task); - erts_smp_atomic_init_nob(&io_task->executed_time, ~((erts_aint_t) 0)); + erts_atomic_init_nob(&io_task->executed_time, ~((erts_aint_t) 0)); } static ERTS_INLINE int @@ -265,7 +252,7 @@ is_iotask_active(ErtsIoTask *io_task, erts_aint_t current_cio_time) { if (erts_port_task_is_scheduled(&io_task->task)) return 1; - if (erts_smp_atomic_read_nob(&io_task->executed_time) == current_cio_time) + if (erts_atomic_read_nob(&io_task->executed_time) == current_cio_time) return 1; return 0; } @@ -337,10 +324,9 @@ free_drv_event_data(ErtsDrvEventDataState *dep) static ERTS_INLINE void remember_removed(ErtsDrvEventState *state, struct pollset_info* psi) { -#ifdef ERTS_SMP struct removed_fd *fdlp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd))); - if (erts_smp_atomic_read_nob(&psi->in_poll_wait)) { + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(fd_mtx(state->fd))); + if (erts_atomic_read_nob(&psi->in_poll_wait)) { state->remove_cnt++; ASSERT(state->remove_cnt > 0); fdlp = removed_fd_alloc(); @@ -350,62 +336,56 @@ remember_removed(ErtsDrvEventState *state, struct pollset_info* psi) #ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS fdlp->state = state; #endif - erts_smp_spin_lock(&psi->removed_list_lock); + erts_spin_lock(&psi->removed_list_lock); fdlp->next = psi->removed_list; psi->removed_list = fdlp; - erts_smp_spin_unlock(&psi->removed_list_lock); + erts_spin_unlock(&psi->removed_list_lock); } -#endif } static ERTS_INLINE int is_removed(ErtsDrvEventState *state) { -#ifdef ERTS_SMP /* Note that there is a possible race here, where an fd is removed (increasing remove_cnt) and then added again just before erts_poll_wait is called by erts_check_io. Any polled event on the re-added fd will then be falsely ignored. But that does not matter, as the event will trigger again next time erl_check_io is called. */ return state->remove_cnt > 0; -#else - return 0; -#endif } static void forget_removed(struct pollset_info* psi) { -#ifdef ERTS_SMP struct removed_fd* fdlp; struct removed_fd* tofree; /* Fast track: if (atomic_ptr(removed_list)==NULL) return; */ - erts_smp_spin_lock(&psi->removed_list_lock); + erts_spin_lock(&psi->removed_list_lock); fdlp = psi->removed_list; psi->removed_list = NULL; - erts_smp_spin_unlock(&psi->removed_list_lock); + erts_spin_unlock(&psi->removed_list_lock); while (fdlp) { ErtsResource* resource = NULL; erts_driver_t* drv_ptr = NULL; - erts_smp_mtx_t* mtx; + erts_mtx_t* mtx; ErtsSysFdType fd; ErtsDrvEventState *state; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS fd = fdlp->fd; mtx = fd_mtx(fd); - erts_smp_mtx_lock(mtx); + erts_mtx_lock(mtx); state = &drv_ev_state[(int) fd]; #else state = fdlp->state; fd = state->fd; ASSERT(fd == fdlp->fd); mtx = fd_mtx(fd); - erts_smp_mtx_lock(mtx); + erts_mtx_lock(mtx); #endif ASSERT(state->remove_cnt > 0); if (--state->remove_cnt == 0) { @@ -440,7 +420,7 @@ forget_removed(struct pollset_info* psi) ASSERT(0); } } - erts_smp_mtx_unlock(mtx); + erts_mtx_unlock(mtx); if (drv_ptr) { int was_unmasked = erts_block_fpe(); DTRACE1(driver_stop_select, drv_ptr->name); @@ -460,7 +440,6 @@ forget_removed(struct pollset_info* psi) fdlp = fdlp->next; removed_fd_free(tofree); } -#endif /* ERTS_SMP */ } #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS @@ -471,15 +450,15 @@ grow_drv_ev_state(int min_ix) int old_len; int new_len; - erts_smp_mtx_lock(&drv_ev_state_grow_lock); - old_len = erts_smp_atomic_read_nob(&drv_ev_state_len); + erts_mtx_lock(&drv_ev_state_grow_lock); + old_len = erts_atomic_read_nob(&drv_ev_state_len); if (min_ix >= old_len) { new_len = erts_poll_new_table_len(old_len, min_ix + 1); if (new_len > max_fds) new_len = max_fds; for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { /* lock all fd's */ - erts_smp_mtx_lock(&drv_ev_state_locks[i].lck); + erts_mtx_lock(&drv_ev_state_locks[i].lck); } drv_ev_state = (drv_ev_state ? erts_realloc(ERTS_ALC_T_DRV_EV_STATE, @@ -500,14 +479,14 @@ grow_drv_ev_state(int min_ix) drv_ev_state[i].type = ERTS_EV_TYPE_NONE; drv_ev_state[i].flags = 0; } - erts_smp_atomic_set_nob(&drv_ev_state_len, new_len); + erts_atomic_set_nob(&drv_ev_state_len, new_len); for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { - erts_smp_mtx_unlock(&drv_ev_state_locks[i].lck); + erts_mtx_unlock(&drv_ev_state_locks[i].lck); } } /*else already grown by racing thread */ - erts_smp_mtx_unlock(&drv_ev_state_grow_lock); + erts_mtx_unlock(&drv_ev_state_grow_lock); } #endif /* ERTS_SYS_CONTINOUS_FD_NUMBERS */ @@ -565,7 +544,7 @@ deselect(ErtsDrvEventState *state, int mode) { int do_wake = 0; ErtsPollEvents rm_events; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd))); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(fd_mtx(state->fd))); ASSERT(state->events); abort_tasks(state, mode); @@ -637,9 +616,9 @@ check_fd_cleanup(ErtsDrvEventState *state, { erts_aint_t current_cio_time; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd))); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(fd_mtx(state->fd))); - current_cio_time = erts_smp_atomic_read_acqb(&erts_check_io_time); + current_cio_time = erts_atomic_read_acqb(&erts_check_io_time); *free_select = NULL; if (state->driver.select && (state->type != ERTS_EV_TYPE_DRV_SEL) @@ -692,7 +671,7 @@ check_cleanup_active_fd(ErtsSysFdType fd, { ErtsDrvEventState *state; int active = 0; - erts_smp_mtx_t *mtx = fd_mtx(fd); + erts_mtx_t *mtx = fd_mtx(fd); void *free_select = NULL; void *free_nif = NULL; #if ERTS_CIO_HAVE_DRV_EVENT @@ -702,7 +681,7 @@ check_cleanup_active_fd(ErtsSysFdType fd, ErtsPollEvents evon = 0, evoff = 0; #endif - erts_smp_mtx_lock(mtx); + erts_mtx_lock(mtx); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[(int) fd]; @@ -823,7 +802,7 @@ check_cleanup_active_fd(ErtsSysFdType fd, } - erts_smp_mtx_unlock(mtx); + erts_mtx_unlock(mtx); if (free_select) free_drv_select_data(free_select); @@ -857,7 +836,7 @@ check_cleanup_active_fds(erts_aint_t current_cio_time, int may_sleep) { int six = pollset.active_fd.six; int eix = pollset.active_fd.eix; - erts_aint32_t no = erts_smp_atomic32_read_dirty(&pollset.active_fd.no); + erts_aint32_t no = erts_atomic32_read_dirty(&pollset.active_fd.no); int size = pollset.active_fd.size; int ix = six; #if ERTS_CIO_DEFER_ACTIVE_EVENTS @@ -912,7 +891,7 @@ check_cleanup_active_fds(erts_aint_t current_cio_time, int may_sleep) pollset.active_fd.six = six; pollset.active_fd.eix = eix; - erts_smp_atomic32_set_relb(&pollset.active_fd.no, no); + erts_atomic32_set_relb(&pollset.active_fd.no, no); } static void grow_active_fds(void) @@ -941,8 +920,8 @@ add_active_fd(ErtsSysFdType fd) pollset.active_fd.array[eix] = fd; - erts_smp_atomic32_set_relb(&pollset.active_fd.no, - (erts_smp_atomic32_read_dirty(&pollset.active_fd.no) + erts_atomic32_set_relb(&pollset.active_fd.no, + (erts_atomic32_read_dirty(&pollset.active_fd.no) + 1)); eix++; @@ -982,10 +961,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) { + if ((unsigned)fd >= (unsigned)erts_atomic_read_nob(&drv_ev_state_len)) { if (fd < 0) { return -1; } @@ -997,7 +976,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, } #endif - erts_smp_mtx_lock(fd_mtx(fd)); + erts_mtx_lock(fd_mtx(fd)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[(int) fd]; @@ -1178,7 +1157,7 @@ done: &free_nif); done_unknown: - erts_smp_mtx_unlock(fd_mtx(fd)); + erts_mtx_unlock(fd_mtx(fd)); if (stop_select_fn) { int was_unmasked = erts_block_fpe(); DTRACE1(driver_stop_select, name); @@ -1227,7 +1206,7 @@ ERTS_CIO_EXPORT(enif_select)(ErlNifEnv* env, ASSERT(!(resource->monitors && resource->monitors->is_dying)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) { + if ((unsigned)fd >= (unsigned)erts_atomic_read_nob(&drv_ev_state_len)) { if (fd < 0) { return INT_MIN | ERL_NIF_SELECT_INVALID_EVENT; } @@ -1239,7 +1218,7 @@ ERTS_CIO_EXPORT(enif_select)(ErlNifEnv* env, } #endif - erts_smp_mtx_lock(fd_mtx(fd)); + erts_mtx_lock(fd_mtx(fd)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[(int) fd]; @@ -1433,7 +1412,7 @@ done: &free_nif); done_unknown: - erts_smp_mtx_unlock(fd_mtx(fd)); + erts_mtx_unlock(fd_mtx(fd)); if (call_stop) { erts_resource_stop(resource, (ErlNifEvent)fd, 1); if (call_stop == CALL_STOP_AND_RELEASE) { @@ -1479,10 +1458,10 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) { + if ((unsigned)fd >= (unsigned)erts_atomic_read_nob(&drv_ev_state_len)) { if (fd < 0) return -1; if (fd >= max_fds) { @@ -1493,7 +1472,7 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, } #endif - erts_smp_mtx_lock(fd_mtx(fd)); + erts_mtx_lock(fd_mtx(fd)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[(int) fd]; @@ -1586,7 +1565,7 @@ done: &free_select, &free_nif); - erts_smp_mtx_unlock(fd_mtx(fd)); + erts_mtx_unlock(fd_mtx(fd)); if (free_select) free_drv_select_data(free_select); @@ -2016,7 +1995,7 @@ iready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time) ERTS_PORT_TASK_INPUT, current_cio_time)) { ErtsIoTask *iotask = &state->driver.select->iniotask; - erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + erts_atomic_set_nob(&iotask->executed_time, current_cio_time); if (erts_port_task_schedule(id, &iotask->task, ERTS_PORT_TASK_INPUT, @@ -2034,7 +2013,7 @@ oready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time) ERTS_PORT_TASK_OUTPUT, current_cio_time)) { ErtsIoTask *iotask = &state->driver.select->outiotask; - erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + erts_atomic_set_nob(&iotask->executed_time, current_cio_time); if (erts_port_task_schedule(id, &iotask->task, ERTS_PORT_TASK_OUTPUT, @@ -2090,7 +2069,7 @@ send_event_tuple(struct erts_nif_select_event* e, ErtsResource* resource, erts_queue_message(rp, rp_locks, mp, tuple, am_system); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } @@ -2103,7 +2082,7 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data, ERTS_PORT_TASK_EVENT, current_cio_time)) { ErtsIoTask *iotask = &state->driver.event->iotask; - erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + erts_atomic_set_nob(&iotask->executed_time, current_cio_time); if (erts_port_task_schedule(id, &iotask->task, ERTS_PORT_TASK_EVENT, @@ -2118,13 +2097,6 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data, static void bad_fd_in_pollset(ErtsDrvEventState *, Eterm inport, Eterm outport); -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -void -ERTS_CIO_EXPORT(erts_check_io_async_sig_interrupt)(void) -{ - ERTS_CIO_POLL_AS_INTR(pollset.ps); -} -#endif void ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set) @@ -2185,9 +2157,9 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) * erts_check_io_time, since only one thread can * check io at a time. */ - current_cio_time = erts_smp_atomic_read_dirty(&erts_check_io_time); + current_cio_time = erts_atomic_read_dirty(&erts_check_io_time); current_cio_time++; - erts_smp_atomic_set_relb(&erts_check_io_time, current_cio_time); + erts_atomic_set_relb(&erts_check_io_time, current_cio_time); check_cleanup_active_fds(current_cio_time, timeout_time != ERTS_POLL_NO_TIMEOUT); @@ -2196,11 +2168,11 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) erts_lc_check_exact(NULL, 0); /* No locks should be locked */ #endif - pollres_len = erts_smp_atomic32_read_dirty(&pollset.active_fd.no) + ERTS_CHECK_IO_POLL_RES_LEN; + pollres_len = erts_atomic32_read_dirty(&pollset.active_fd.no) + ERTS_CHECK_IO_POLL_RES_LEN; pollres = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollResFd)*pollres_len); - erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1); + erts_atomic_set_nob(&pollset.in_poll_wait, 1); poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, timeout_time); @@ -2222,7 +2194,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) if (poll_ret != 0) { - erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0); + erts_atomic_set_nob(&pollset.in_poll_wait, 0); forget_removed(&pollset); erts_free(ERTS_ALC_T_TMP, pollres); if (poll_ret == EAGAIN) { @@ -2248,7 +2220,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) ErtsSysFdType fd = (ErtsSysFdType) pollres[i].fd; ErtsDrvEventState *state; - erts_smp_mtx_lock(fd_mtx(fd)); + erts_mtx_lock(fd_mtx(fd)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[ (int) fd]; @@ -2355,9 +2327,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) add_active_fd(state->fd); } -#ifdef ERTS_SMP - erts_smp_mtx_unlock(fd_mtx(fd)); -#endif + erts_mtx_unlock(fd_mtx(fd)); if (is_not_nil(in.pid)) { send_event_tuple(&in, resource, am_ready_input); } @@ -2404,13 +2374,11 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) } next_pollres:; -#ifdef ERTS_SMP - erts_smp_mtx_unlock(fd_mtx(fd)); -#endif + erts_mtx_unlock(fd_mtx(fd)); next_pollres_unlocked:; } - erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0); + erts_atomic_set_nob(&pollset.in_poll_wait, 0); erts_free(ERTS_ALC_T_TMP, pollres); forget_removed(&pollset); } @@ -2506,16 +2474,16 @@ static int drv_ev_state_cmp(void *des1, void *des2) static void *drv_ev_state_alloc(void *des_tmpl) { ErtsDrvEventState *evstate; - erts_smp_spin_lock(&state_prealloc_lock); + erts_spin_lock(&state_prealloc_lock); if (state_prealloc_first == NULL) { - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); evstate = (ErtsDrvEventState *) erts_alloc(ERTS_ALC_T_DRV_EV_STATE, sizeof(ErtsDrvEventState)); } else { evstate = state_prealloc_first; state_prealloc_first = (ErtsDrvEventState *) evstate->hb.next; --num_state_prealloc; - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); } /* XXX: Already valid data if prealloced, could ignore template! */ *evstate = *((ErtsDrvEventState *) des_tmpl); @@ -2525,11 +2493,11 @@ static void *drv_ev_state_alloc(void *des_tmpl) static void drv_ev_state_free(void *des) { - erts_smp_spin_lock(&state_prealloc_lock); + erts_spin_lock(&state_prealloc_lock); ((ErtsDrvEventState *) des)->hb.next = &state_prealloc_first->hb; state_prealloc_first = (ErtsDrvEventState *) des; ++num_state_prealloc; - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); } #endif @@ -2540,15 +2508,15 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void) ERL_NIF_SELECT_STOP_SCHEDULED | ERL_NIF_SELECT_INVALID_EVENT | ERL_NIF_SELECT_FAILED)) == 0); - erts_smp_atomic_init_nob(&erts_check_io_time, 0); - erts_smp_atomic_init_nob(&pollset.in_poll_wait, 0); + erts_atomic_init_nob(&erts_check_io_time, 0); + erts_atomic_init_nob(&pollset.in_poll_wait, 0); ERTS_CIO_POLL_INIT(); pollset.ps = ERTS_CIO_NEW_POLLSET(); pollset.active_fd.six = 0; pollset.active_fd.eix = 0; - erts_smp_atomic32_init_nob(&pollset.active_fd.no, 0); + erts_atomic32_init_nob(&pollset.active_fd.no, 0); pollset.active_fd.size = ERTS_ACTIVE_FD_INC; pollset.active_fd.array = erts_alloc(ERTS_ALC_T_ACTIVE_FD_ARR, sizeof(ErtsSysFdType)*ERTS_ACTIVE_FD_INC); @@ -2561,24 +2529,22 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void) #endif -#ifdef ERTS_SMP init_removed_fd_alloc(); pollset.removed_list = NULL; - erts_smp_spinlock_init(&pollset.removed_list_lock, "pollset_rm_list", NIL, + erts_spinlock_init(&pollset.removed_list_lock, "pollset_rm_list", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); { int i; for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { - erts_smp_mtx_init(&drv_ev_state_locks[i].lck, "drv_ev_state", make_small(i), + erts_mtx_init(&drv_ev_state_locks[i].lck, "drv_ev_state", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); } } -#endif #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS max_fds = ERTS_CIO_POLL_MAX_FDS(); - erts_smp_atomic_init_nob(&drv_ev_state_len, 0); + erts_atomic_init_nob(&drv_ev_state_len, 0); drv_ev_state = NULL; - erts_smp_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow", NIL, + erts_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); #else { @@ -2589,7 +2555,7 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void) hf.free = &drv_ev_state_free; num_state_prealloc = 0; state_prealloc_first = NULL; - erts_smp_spinlock_init(&state_prealloc_lock,"state_prealloc", NIL, + erts_spinlock_init(&state_prealloc_lock,"state_prealloc", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); safe_hash_init(ERTS_ALC_T_DRV_EV_STATE, &drv_ev_state_tab, "drv_ev_state_tab", @@ -2616,7 +2582,7 @@ ERTS_CIO_EXPORT(erts_check_io_size)(void) ERTS_CIO_POLL_INFO(pollset.ps, &pi); res = pi.memory_size; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - res += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len); + res += sizeof(ErtsDrvEventState) * erts_atomic_read_nob(&drv_ev_state_len); #else res += safe_hash_table_sz(&drv_ev_state_tab); { @@ -2624,9 +2590,9 @@ ERTS_CIO_EXPORT(erts_check_io_size)(void) safe_hash_get_info(&hi, &drv_ev_state_tab); res += hi.objs * sizeof(ErtsDrvEventState); } - erts_smp_spin_lock(&state_prealloc_lock); + erts_spin_lock(&state_prealloc_lock); res += num_state_prealloc * sizeof(ErtsDrvEventState); - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); #endif return res; } @@ -2639,8 +2605,8 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) Uint sz, *szp, *hp, **hpp, memory_size; Sint i; ErtsPollInfo pi; - erts_aint_t cio_time = erts_smp_atomic_read_acqb(&erts_check_io_time); - int active_fds = (int) erts_smp_atomic32_read_acqb(&pollset.active_fd.no); + erts_aint_t cio_time = erts_atomic_read_acqb(&erts_check_io_time); + int active_fds = (int) erts_atomic32_read_acqb(&pollset.active_fd.no); while (1) { erts_aint_t post_cio_time; @@ -2648,8 +2614,8 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) ERTS_CIO_POLL_INFO(pollset.ps, &pi); - post_cio_time = erts_smp_atomic_read_mb(&erts_check_io_time); - post_active_fds = (int) erts_smp_atomic32_read_acqb(&pollset.active_fd.no); + post_cio_time = erts_atomic_read_mb(&erts_check_io_time); + post_active_fds = (int) erts_atomic32_read_acqb(&pollset.active_fd.no); if (cio_time == post_cio_time && active_fds == post_active_fds) break; cio_time = post_cio_time; @@ -2658,7 +2624,7 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) memory_size = pi.memory_size; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len); + memory_size += sizeof(ErtsDrvEventState) * erts_atomic_read_nob(&drv_ev_state_len); #else memory_size += safe_hash_table_sz(&drv_ev_state_tab); { @@ -2666,9 +2632,9 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) safe_hash_get_info(&hi, &drv_ev_state_tab); memory_size += hi.objs * sizeof(ErtsDrvEventState); } - erts_smp_spin_lock(&state_prealloc_lock); + erts_spin_lock(&state_prealloc_lock); memory_size += num_state_prealloc * sizeof(ErtsDrvEventState); - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); #endif hpp = NULL; @@ -3076,11 +3042,11 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip) erts_printf("--- fds in pollset --------------------------------------\n"); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) erts_lc_check_exact(NULL, 0); /* No locks should be locked */ #endif - erts_smp_thr_progress_block(); /* stop the world to avoid messy locking */ + erts_thr_progress_block(); /* stop the world to avoid messy locking */ #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS counters.epep = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollEvents)*max_fds); @@ -3093,7 +3059,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip) counters.no_driver_event_structs = 0; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - len = erts_smp_atomic_read_nob(&drv_ev_state_len); + len = erts_atomic_read_nob(&drv_ev_state_len); for (fd = 0; fd < len; fd++) { doit_erts_check_io_debug((void *) &drv_ev_state[fd], (void *) &counters); } @@ -3105,7 +3071,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip) safe_hash_for_each(&drv_ev_state_tab, &doit_erts_check_io_debug, (void *) &counters); #endif - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); ciodip->no_used_fds = counters.used_fds; ciodip->no_driver_select_structs = counters.no_driver_select_structs; diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h index 2d3bb98afa..777942a473 100644 --- a/erts/emulator/sys/common/erl_check_io.h +++ b/erts/emulator/sys/common/erl_check_io.h @@ -44,10 +44,6 @@ Eterm erts_check_io_info_kp(void *); Eterm erts_check_io_info_nkp(void *); int erts_check_io_max_files_kp(void); int erts_check_io_max_files_nkp(void); -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -void erts_check_io_async_sig_interrupt_kp(void); -void erts_check_io_async_sig_interrupt_nkp(void); -#endif void erts_check_io_interrupt_kp(int); void erts_check_io_interrupt_nkp(int); void erts_check_io_interrupt_timed_kp(int, ErtsMonotonicTime); @@ -69,9 +65,6 @@ void erts_lcnt_update_cio_locks_nkp(int enable); Uint erts_check_io_size(void); Eterm erts_check_io_info(void *); int erts_check_io_max_files(void); -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -void erts_check_io_async_sig_interrupt(void); -#endif void erts_check_io_interrupt(int); void erts_check_io_interrupt_timed(int, ErtsMonotonicTime); void erts_check_io(int); @@ -83,11 +76,11 @@ void erts_lcnt_update_cio_locks(int enable); #endif -extern erts_smp_atomic_t erts_check_io_time; +extern erts_atomic_t erts_check_io_time; typedef struct { ErtsPortTaskHandle task; - erts_smp_atomic_t executed_time; + erts_atomic_t executed_time; } ErtsIoTask; ERTS_GLB_INLINE void erts_io_notify_port_task_executed(ErtsPortTaskHandle *pthp); @@ -98,8 +91,8 @@ ERTS_GLB_INLINE void erts_io_notify_port_task_executed(ErtsPortTaskHandle *pthp) { ErtsIoTask *itp = (ErtsIoTask *) (((char *) pthp) - offsetof(ErtsIoTask, task)); - erts_aint_t ci_time = erts_smp_atomic_read_acqb(&erts_check_io_time); - erts_smp_atomic_set_relb(&itp->executed_time, ci_time); + erts_aint_t ci_time = erts_atomic_read_acqb(&erts_check_io_time); + erts_atomic_set_relb(&itp->executed_time, ci_time); } #endif diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c index 214ed01c82..a9c6e72c5f 100644 --- a/erts/emulator/sys/common/erl_mmap.c +++ b/erts/emulator/sys/common/erl_mmap.c @@ -24,7 +24,6 @@ #define ERTS_WANT_MEM_MAPPERS #include "sys.h" #include "erl_process.h" -#include "erl_smp.h" #include "atom.h" #include "erl_mmap.h" #include <stddef.h> @@ -62,11 +61,11 @@ (((UWord) (PTR)) - ((UWord) mm->sa.bot) \ < ((UWord) mm->sua.top) - ((UWord) mm->sa.bot)) #define ERTS_MMAP_IN_SUPERALIGNED_AREA(PTR) \ - (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ + (ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ (((UWord) (PTR)) - ((UWord) mm->sa.bot) \ < ((UWord) mm->sa.top) - ((UWord) mm->sa.bot))) #define ERTS_MMAP_IN_SUPERUNALIGNED_AREA(PTR) \ - (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ + (ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ (((UWord) (PTR)) - ((UWord) mm->sua.bot) \ < ((UWord) mm->sua.top) - ((UWord) mm->sua.bot))) @@ -199,10 +198,10 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) \ do { \ - erts_smp_mtx_lock(&mm->mtx); \ + erts_mtx_lock(&mm->mtx); \ ERTS_MMAP_OP_START((IN_SZ)); \ ERTS_MMAP_OP_END((RES), (OUT_SZ)); \ - erts_smp_mtx_unlock(&mm->mtx); \ + erts_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MUNMAP_OP(PTR, SZ) \ @@ -221,9 +220,9 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MUNMAP_OP_LCK(PTR, SZ) \ do { \ - erts_smp_mtx_lock(&mm->mtx); \ + erts_mtx_lock(&mm->mtx); \ ERTS_MUNMAP_OP((PTR), (SZ)); \ - erts_smp_mtx_unlock(&mm->mtx); \ + erts_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) \ @@ -249,10 +248,10 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) \ do { \ - erts_smp_mtx_lock(&mm->mtx); \ + erts_mtx_lock(&mm->mtx); \ ERTS_MREMAP_OP_START((OLD_PTR), (OLD_SZ), (IN_SZ)); \ ERTS_MREMAP_OP_END((RES), (OUT_SZ)); \ - erts_smp_mtx_unlock(&mm->mtx); \ + erts_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MMAP_OP_ABORT() \ @@ -321,7 +320,7 @@ struct ErtsMemMapper_ { #if HAVE_MMAP && (!defined(MAP_ANON) && !defined(MAP_ANONYMOUS)) int mmap_fd; #endif - erts_smp_mtx_t mtx; + erts_mtx_t mtx; struct { char *free_list; char *unused_start; @@ -1536,7 +1535,7 @@ erts_mmap(ErtsMemMapper* mm, Uint32 flags, UWord *sizep) ErtsFreeSegDesc *desc; Uint32 superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); ERTS_MMAP_OP_START(*sizep); @@ -1660,7 +1659,7 @@ erts_mmap(ErtsMemMapper* mm, Uint32 flags, UWord *sizep) } ERTS_MMAP_OP_ABORT(); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); } #if ERTS_HAVE_OS_MMAP @@ -1724,13 +1723,13 @@ supercarrier_success: #endif ERTS_MMAP_OP_END(seg, asize); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); *sizep = asize; return (void *) seg; supercarrier_reserve_failure: - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); *sizep = 0; return NULL; } @@ -1760,7 +1759,7 @@ erts_munmap(ErtsMemMapper* mm, Uint32 flags, void *ptr, UWord size) start = (char *) ptr; end = start + size; - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); ERTS_MUNMAP_OP(ptr, size); @@ -1829,7 +1828,7 @@ erts_munmap(ErtsMemMapper* mm, Uint32 flags, void *ptr, UWord size) if (unres_sz) mm->unreserve_physical(((char *) ptr) + ad_sz, unres_sz); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); } } } @@ -1948,12 +1947,12 @@ erts_mremap(ErtsMemMapper* mm, ? ERTS_SUPERALIGNED_CEILING(*sizep) : ERTS_PAGEALIGNED_CEILING(*sizep)); - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr) ? (!superaligned && lookup_free_seg(&mm->sua.map, asize)) : (superaligned && lookup_free_seg(&mm->sa.map, asize))) { - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); /* * Segment currently in wrong area (due to a previous memory * shortage), move it to the right area. @@ -2068,7 +2067,7 @@ erts_mremap(ErtsMemMapper* mm, } ERTS_MMAP_OP_ABORT(); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); /* Failed to resize... */ } @@ -2090,14 +2089,14 @@ supercarrier_resize_success: #endif ERTS_MREMAP_OP_END(new_ptr, asize); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); *sizep = asize; return new_ptr; supercarrier_reserve_failure: ERTS_MREMAP_OP_END(NULL, old_size); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); *sizep = old_size; return NULL; @@ -2212,7 +2211,7 @@ erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init, int executable) erts_exit(1, "erts_mmap: Failed to open /dev/zero\n"); #endif - erts_smp_mtx_init(&mm->mtx, "erts_mmap", NIL, + erts_mtx_init(&mm->mtx, "erts_mmap", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); if (is_first_call) { erts_mtx_init(&am.init_mutex, "mmap_init_atoms", NIL, @@ -2407,7 +2406,7 @@ Eterm erts_mmap_info(ErtsMemMapper* mm, Eterm res = THE_NON_VALUE; if (!hpp) { - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); emis->sizes[0] = mm->size.supercarrier.total; emis->sizes[1] = mm->sa.top - mm->sa.bot; emis->sizes[2] = mm->sua.top - mm->sua.bot; @@ -2423,7 +2422,7 @@ Eterm erts_mmap_info(ErtsMemMapper* mm, emis->segs[5] = mm->sua.map.nseg; emis->os_used = mm->size.os.used; - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); } list[lix] = erts_mmap_info_options(mm, "option ", print_to_p, print_to_arg, @@ -2543,14 +2542,14 @@ Eterm erts_mmap_debug_info(Process* p) Eterm *hp, *hp_end; Uint may_need; - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); values[0] = (UWord)mm->sa.bot; values[1] = (UWord)mm->sa.top; values[2] = (UWord)mm->sua.bot; values[3] = (UWord)mm->sua.top; sa_list = build_free_seg_list(p, &mm->sa.map); sua_list = build_free_seg_list(p, &mm->sua.map); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); may_need = 4*(2+3+2) + 2*(2+3); hp = HAlloc(p, may_need); diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index d69a79dc2a..bf6de9b13a 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -188,7 +188,6 @@ typedef union { static int no_mseg_allocators; static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr; -#ifdef ERTS_SMP #define ERTS_MSEG_ALLCTR_IX(IX) \ (&aligned_mseg_allctr[(IX)].mseg_alloc) @@ -199,18 +198,6 @@ static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr; #define ERTS_MSEG_ALLCTR_OPT(OPT) \ ((OPT)->sched_spec ? ERTS_MSEG_ALLCTR_SS() : ERTS_MSEG_ALLCTR_IX(0)) -#else - -#define ERTS_MSEG_ALLCTR_IX(IX) \ - (&aligned_mseg_allctr[0].mseg_alloc) - -#define ERTS_MSEG_ALLCTR_SS() \ - (&aligned_mseg_allctr[0].mseg_alloc) - -#define ERTS_MSEG_ALLCTR_OPT(OPT) \ - (&aligned_mseg_allctr[0].mseg_alloc) - -#endif #define ERTS_MSEG_LOCK(MA) \ do { \ @@ -352,11 +339,11 @@ mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, void *old_seg, UWord old_size, U do { \ if ((MA)->is_thread_safe) \ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&(MA)->mtx) \ - || erts_smp_thr_progress_is_blocking() \ + || erts_thr_progress_is_blocking() \ || ERTS_IS_CRASH_DUMPING); \ else \ ERTS_LC_ASSERT((MA)->ix == (int) erts_get_scheduler_id() \ - || erts_smp_thr_progress_is_blocking() \ + || erts_thr_progress_is_blocking() \ || ERTS_IS_CRASH_DUMPING); \ } while (0) #else @@ -1404,11 +1391,7 @@ erts_mseg_init(ErtsMsegInit_t *init) int i; UWord x; -#ifdef ERTS_SMP no_mseg_allocators = init->nos + 1; -#else - no_mseg_allocators = 1; -#endif x = (UWord) malloc(sizeof(ErtsAlgndMsegAllctr_t) *no_mseg_allocators diff --git a/erts/emulator/sys/common/erl_os_monotonic_time_extender.c b/erts/emulator/sys/common/erl_os_monotonic_time_extender.c index d53190fdd5..341845cc2a 100644 --- a/erts/emulator/sys/common/erl_os_monotonic_time_extender.c +++ b/erts/emulator/sys/common/erl_os_monotonic_time_extender.c @@ -23,7 +23,6 @@ #endif #include "erl_os_monotonic_time_extender.h" -#ifdef USE_THREADS static void *os_monotonic_time_extender(void *vstatep) { @@ -49,30 +48,22 @@ static void *os_monotonic_time_extender(void *vstatep) } static erts_tid_t os_monotonic_extender_tid; -#endif void erts_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep, Uint32 (*raw_os_monotonic_time)(void), int check_seconds) { -#ifdef USE_THREADS statep->raw_os_monotonic_time = raw_os_monotonic_time; erts_atomic32_init_nob(&statep->extend[0], (erts_aint32_t) 0); erts_atomic32_init_nob(&statep->extend[1], (erts_aint32_t) 0); statep->check_interval = check_seconds; -#else - statep->extend[0] = (Uint32) 0; - statep->extend[1] = (Uint32) 0; - statep->last_msb = (ErtsMonotonicTime) 0; -#endif } void erts_late_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep) { -#ifdef USE_THREADS erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; thr_opts.detached = 1; thr_opts.suggested_stack_size = 4; @@ -85,5 +76,4 @@ erts_late_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep os_monotonic_time_extender, (void*) statep, &thr_opts); -#endif } diff --git a/erts/emulator/sys/common/erl_os_monotonic_time_extender.h b/erts/emulator/sys/common/erl_os_monotonic_time_extender.h index 8089c9aed9..53c32579d5 100644 --- a/erts/emulator/sys/common/erl_os_monotonic_time_extender.h +++ b/erts/emulator/sys/common/erl_os_monotonic_time_extender.h @@ -25,36 +25,16 @@ #include "erl_threads.h" typedef struct { -#ifdef USE_THREADS Uint32 (*raw_os_monotonic_time)(void); erts_atomic32_t extend[2]; int check_interval; -#else - Uint32 extend[2]; - ErtsMonotonicTime last_msb; -#endif } ErtsOsMonotonicTimeExtendState; -#ifdef USE_THREADS -# define ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(S, RT) ((void) 1) # define ERTS_EXTEND_OS_MONOTONIC_TIME(S, RT) \ ((((ErtsMonotonicTime) \ erts_atomic32_read_nob(&((S)->extend[((int) ((RT) >> 31)) & 1]))) \ << 32) \ + (RT)) -#else -# define ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(S, RT) \ - do { \ - Uint32 msb__ = (RT) & (((Uint32) 1) << 31); \ - if (msb__ != (S)->last_msb) { \ - int ix__ = ((int) ((S)->last_msb >> 31)) & 1; \ - (S)->extend[ix__]++; \ - (S)->last_msb = msb; \ - } \ - } while (0) -# define ERTS_EXTEND_OS_MONOTONIC_TIME(S, RT) \ - ((((ErtsMonotonicTime) (S)->extend[((int) ((RT) >> 31)) & 1]) << 32) + (RT)) -#endif void erts_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep, diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 52a8b6a53f..7d26839b0f 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -95,9 +95,6 @@ #define ERTS_POLL_DEBUG_PRINT #endif -#if defined(DEBUG) && 0 -#define HARD_DEBUG -#endif #ifdef _DARWIN_UNLIMITED_SELECT typedef struct { @@ -147,29 +144,15 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds, #define ERTS_POLL_USE_BATCH_UPDATE_POLLSET (ERTS_POLL_USE_DEVPOLL \ || ERTS_POLL_USE_KQUEUE) -#define ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE \ - (defined(ERTS_SMP) || ERTS_POLL_USE_KERNEL_POLL || ERTS_POLL_USE_POLL) -#define ERTS_POLL_USE_CONCURRENT_UPDATE \ - (defined(ERTS_SMP) && ERTS_POLL_USE_EPOLL) +#define ERTS_POLL_USE_CONCURRENT_UPDATE ERTS_POLL_USE_EPOLL #define ERTS_POLL_COALESCE_KP_RES (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL) -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -# define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 1 -#else -# define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 0 -#endif - -#define ERTS_POLL_USE_WAKEUP_PIPE \ - (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(USE_THREADS)) - -#ifdef ERTS_SMP - #define ERTS_POLLSET_LOCK(PS) \ - erts_smp_mtx_lock(&(PS)->mtx) + erts_mtx_lock(&(PS)->mtx) #define ERTS_POLLSET_UNLOCK(PS) \ - erts_smp_mtx_unlock(&(PS)->mtx) + erts_mtx_unlock(&(PS)->mtx) #define ERTS_POLLSET_SET_POLLED_CHK(PS) \ ((int) erts_atomic32_xchg_nob(&(PS)->polled, (erts_aint32_t) 1)) @@ -178,28 +161,13 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds, #define ERTS_POLLSET_IS_POLLED(PS) \ ((int) erts_atomic32_read_nob(&(PS)->polled)) -#else - -#define ERTS_POLLSET_LOCK(PS) -#define ERTS_POLLSET_UNLOCK(PS) -#define ERTS_POLLSET_SET_POLLED_CHK(PS) 0 -#define ERTS_POLLSET_UNSET_POLLED(PS) -#define ERTS_POLLSET_IS_POLLED(PS) 0 -#endif - -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE #define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \ - erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 1) + erts_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 1) #define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \ - erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 0) + erts_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 0) #define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \ - ((int) erts_smp_atomic32_read_nob(&(PS)->have_update_requests)) -#else -#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) -#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) -#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) 0 -#endif + ((int) erts_atomic32_read_nob(&(PS)->have_update_requests)) #if ERTS_POLL_USE_FALLBACK # if ERTS_POLL_USE_POLL @@ -212,7 +180,6 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds, * --- Data types ------------------------------------------------------------ */ -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE #define ERTS_POLLSET_UPDATE_REQ_BLOCK_SIZE 128 typedef struct ErtsPollSetUpdateRequestsBlock_ ErtsPollSetUpdateRequestsBlock; @@ -222,19 +189,14 @@ struct ErtsPollSetUpdateRequestsBlock_ { int fds[ERTS_POLLSET_UPDATE_REQ_BLOCK_SIZE]; }; -#endif -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE # define ERTS_POLL_FD_FLG_INURQ (((unsigned short) 1) << 0) -#endif #if ERTS_POLL_USE_FALLBACK # define ERTS_POLL_FD_FLG_INFLBCK (((unsigned short) 1) << 1) # define ERTS_POLL_FD_FLG_USEFLBCK (((unsigned short) 1) << 2) #endif -#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP) # define ERTS_POLL_FD_FLG_RST (((unsigned short) 1) << 3) -#endif typedef struct { #if ERTS_POLL_USE_POLL int pix; @@ -244,9 +206,7 @@ typedef struct { #if ERTS_POLL_COALESCE_KP_RES unsigned short res_ev_ix; #endif -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE || ERTS_POLL_USE_FALLBACK unsigned short flags; -#endif } ErtsFdStatus; @@ -272,7 +232,7 @@ struct ErtsPollSet_ { ErtsPollSet next; int internal_fd_limit; ErtsFdStatus *fds_status; - erts_smp_atomic_t no_of_user_fds; + erts_atomic_t no_of_user_fds; int fds_status_len; #if ERTS_POLL_USE_KERNEL_POLL int kp_fd; @@ -301,32 +261,24 @@ struct ErtsPollSet_ { ERTS_fd_set output_fds; ERTS_fd_set res_output_fds; #endif -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE ErtsPollSetUpdateRequestsBlock update_requests; ErtsPollSetUpdateRequestsBlock *curr_upd_req_block; - erts_smp_atomic32_t have_update_requests; -#endif -#ifdef ERTS_SMP + erts_atomic32_t have_update_requests; erts_atomic32_t polled; - erts_smp_mtx_t mtx; -#endif -#if ERTS_POLL_USE_WAKEUP_PIPE + erts_mtx_t mtx; int wake_fds[2]; -#endif #if ERTS_POLL_USE_TIMERFD int timer_fd; #endif #if ERTS_POLL_USE_FALLBACK int fallback_used; #endif -#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT erts_atomic32_t wakeup_state; -#endif erts_atomic64_t timeout_time; #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS - erts_smp_atomic_t no_avoided_wakeups; - erts_smp_atomic_t no_avoided_interrupts; - erts_smp_atomic_t no_interrupt_timed; + erts_atomic_t no_avoided_wakeups; + erts_atomic_t no_avoided_interrupts; + erts_atomic_t no_interrupt_timed; #endif }; @@ -336,7 +288,7 @@ static void fatal_error_async_signal_safe(char *error_str); static int max_fds = -1; static ErtsPollSet pollsets; -static erts_smp_mtx_t pollsets_lock; +static erts_mtx_t pollsets_lock; #if ERTS_POLL_USE_POLL @@ -413,50 +365,37 @@ get_timeout_time(ErtsPollSet ps) static ERTS_INLINE void reset_wakeup_state(ErtsPollSet ps) { -#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT erts_atomic32_set_mb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN); -#endif } static ERTS_INLINE int is_woken(ErtsPollSet ps) { -#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN; -#else - return 0; -#endif } static ERTS_INLINE int is_interrupted_reset(ErtsPollSet ps) { -#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT return (erts_atomic32_xchg_acqb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN) == ERTS_POLL_WOKEN_INTR); -#else - return 0; -#endif } static ERTS_INLINE void woke_up(ErtsPollSet ps) { -#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT erts_aint32_t wakeup_state = erts_atomic32_read_acqb(&ps->wakeup_state); if (wakeup_state == ERTS_POLL_NOT_WOKEN) (void) erts_atomic32_cmpxchg_nob(&ps->wakeup_state, ERTS_POLL_WOKEN, ERTS_POLL_NOT_WOKEN); ASSERT(erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN); -#endif } /* * --- Wakeup pipe ----------------------------------------------------------- */ -#if ERTS_POLL_USE_WAKEUP_PIPE static ERTS_INLINE void wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe) @@ -507,18 +446,11 @@ wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe) static ERTS_INLINE void cleanup_wakeup_pipe(ErtsPollSet ps) { -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - int intr = 0; -#endif int fd = ps->wake_fds[0]; int res; do { char buf[32]; res = read(fd, buf, sizeof(buf)); -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - if (res > 0) - intr = 1; -#endif } while (res > 0 || (res < 0 && errno == EINTR)); if (res < 0 && errno != ERRNO_BLOCK) { fatal_error("%s:%d:cleanup_wakeup_pipe(): " @@ -528,10 +460,6 @@ cleanup_wakeup_pipe(ErtsPollSet ps) fd, erl_errno_id(errno), errno); } -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - if (intr) - erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR); -#endif } static void @@ -574,7 +502,6 @@ create_wakeup_pipe(ErtsPollSet ps) ps->wake_fds[1] = wake_fds[1]; } -#endif /* ERTS_POLL_USE_WAKEUP_PIPE */ /* * --- timer fd ----------------------------------------------------------- @@ -648,7 +575,6 @@ timerfd_clear(ErtsPollSet ps, int res, int max_res) { /* * --- Poll set update requests ---------------------------------------------- */ -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE static ERTS_INLINE void enqueue_update_request(ErtsPollSet ps, int fd) @@ -691,7 +617,6 @@ free_update_requests_block(ErtsPollSet ps, } } -#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */ /* * --- Growing poll set structures ------------------------------------------- @@ -819,9 +744,7 @@ grow_fds_status(ErtsPollSet ps, int min_fd) #if ERTS_POLL_COALESCE_KP_RES ps->fds_status[i].res_ev_ix = (unsigned short) ERTS_POLL_MAX_RES; #endif -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE || ERTS_POLL_USE_FALLBACK ps->fds_status[i].flags = (unsigned short) 0; -#endif } ps->fds_status_len = new_len; } @@ -849,7 +772,7 @@ need_update(ErtsPollSet ps, int fd) ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST; reset = 0; } -#elif defined(ERTS_SMP) +#else ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST; #endif @@ -1032,7 +955,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK; ASSERT(ps->fds_status[fd].used_events); ps->fds_status[fd].used_events = 0; - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); update_fallback_pollset(ps, fd); ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK); break; @@ -1082,11 +1005,11 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp) events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events); if (!events) { buf[buf_len].events = POLLREMOVE; - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); } else if (!ps->fds_status[fd].used_events) { buf[buf_len].events = events; - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); } else { if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST) @@ -1176,12 +1099,12 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp) } if (used_events) { if (!events) { - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); } } else { if (events) - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); } ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0); ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0); @@ -1255,7 +1178,7 @@ update_pollset(ErtsPollSet ps, int fd) epe.data.fd = epe_templ.data.fd; res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe); } while (res != 0 && errno == EINTR); - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); ps->fds_status[fd].used_events = 0; } @@ -1263,11 +1186,11 @@ update_pollset(ErtsPollSet ps, int fd) /* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9 need a non-NULL event pointer even though it is ignored... */ op = EPOLL_CTL_DEL; - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); } else if (!ps->fds_status[fd].used_events) { op = EPOLL_CTL_ADD; - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); } else { op = EPOLL_CTL_MOD; @@ -1317,7 +1240,7 @@ update_pollset(ErtsPollSet ps, int fd) /* Fall through ... */ case EPOLL_CTL_ADD: { ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK; - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); #if ERTS_POLL_USE_CONCURRENT_UPDATE if (!*update_fallback) { *update_fallback = 1; @@ -1405,7 +1328,7 @@ static int update_pollset(ErtsPollSet ps, int fd) #if ERTS_POLL_USE_FALLBACK ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK); #endif - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); last_pix = --ps->no_poll_fds; if (pix != last_pix) { /* Move last pix to this pix */ @@ -1432,7 +1355,7 @@ static int update_pollset(ErtsPollSet ps, int fd) ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK) || fd == ps->kp_fd); #endif - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); ps->fds_status[fd].pix = pix = ps->no_poll_fds++; if (pix >= ps->poll_fds_len) grow_poll_fds(ps, pix); @@ -1484,7 +1407,7 @@ static int update_pollset(ErtsPollSet ps, int fd) if (!ps->fds_status[fd].used_events) { ASSERT(events); - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); #if ERTS_POLL_USE_FALLBACK ps->no_select_fds++; ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK; @@ -1492,7 +1415,7 @@ static int update_pollset(ErtsPollSet ps, int fd) } else if (!events) { ASSERT(ps->fds_status[fd].used_events); - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); ps->fds_status[fd].events = events; #if ERTS_POLL_USE_FALLBACK ps->no_select_fds--; @@ -1518,7 +1441,6 @@ static int update_pollset(ErtsPollSet ps, int fd) #endif /* ERTS_POLL_USE_POLL || ERTS_POLL_USE_SELECT || ERTS_POLL_USE_FALLBACK */ -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE static void handle_update_requests(ErtsPollSet ps) @@ -1565,7 +1487,6 @@ handle_update_requests(ErtsPollSet ps) ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(ps); } -#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */ static ERTS_INLINE ErtsPollEvents poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake) @@ -1583,12 +1504,10 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake goto done; } #endif -#if ERTS_POLL_USE_WAKEUP_PIPE if (fd == ps->wake_fds[0] || fd == ps->wake_fds[1]) { new_events = ERTS_POLL_EV_NVAL; goto done; } -#endif #if ERTS_POLL_USE_TIMERFD if (fd == ps->timer_fd) { new_events = ERTS_POLL_EV_NVAL; @@ -1615,9 +1534,7 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake new_events &= ~events; if (new_events == (ErtsPollEvents) 0) { -#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP) ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_RST; -#endif #if ERTS_POLL_USE_FALLBACK ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_USEFLBCK; #endif @@ -1626,18 +1543,12 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake ps->fds_status[fd].events = new_events; if (new_events == ps->fds_status[fd].used_events -#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP) && !(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST) -#endif ) { *do_wake = 0; goto done; } -#if !ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE - if (update_pollset(ps, fd) != 0) - new_events = ERTS_POLL_EV_ERR; -#else /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */ #if ERTS_POLL_USE_CONCURRENT_UPDATE if (ERTS_POLLSET_IS_POLLED(ps)) { @@ -1652,7 +1563,6 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake enqueue_update_request(ps, fd); -#ifdef ERTS_SMP /* * If new events have been added, we need to wake up the * polling thread, but if events have been removed we don't. @@ -1660,9 +1570,7 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake if ((new_events && (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)) || (~ps->fds_status[fd].used_events & new_events)) *do_wake = 1; -#endif /* ERTS_SMP */ -#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */ done: #ifdef ERTS_POLL_DEBUG_PRINT @@ -1695,10 +1603,8 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps, ERTS_POLLSET_UNLOCK(ps); -#ifdef ERTS_SMP if (final_do_wake) wake_poller(ps, 0, 0); -#endif /* ERTS_SMP */ } @@ -1718,11 +1624,9 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps, ERTS_POLLSET_UNLOCK(ps); -#ifdef ERTS_SMP if (*do_wake) { wake_poller(ps, 0, 0); } -#endif /* ERTS_SMP */ return res; } @@ -1739,9 +1643,7 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res) int res = 0; int i; int n = chk_fds_res < max_res ? chk_fds_res : max_res; -#if ERTS_POLL_USE_WAKEUP_PIPE int wake_fd = ps->wake_fds[0]; -#endif #if ERTS_POLL_USE_TIMERFD int timer_fd = ps->timer_fd; #endif @@ -1754,12 +1656,10 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res) int fd = ps->res_events[i].data.fd; int ix; ErtsPollEvents revents; -#if ERTS_POLL_USE_WAKEUP_PIPE if (fd == wake_fd) { cleanup_wakeup_pipe(ps); continue; } -#endif #if ERTS_POLL_USE_TIMERFD if (fd == timer_fd) { continue; @@ -1805,12 +1705,10 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res) } if (ev->filter == EVFILT_READ) { -#if ERTS_POLL_USE_WAKEUP_PIPE if (fd == wake_fd) { cleanup_wakeup_pipe(ps); continue; } -#endif pr[ix].events |= ERTS_POLL_EV_IN; } else if (ev->filter == EVFILT_WRITE) @@ -1833,12 +1731,10 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res) if (ps->res_events[i].revents) { int fd = ps->res_events[i].fd; ErtsPollEvents revents; -#if ERTS_POLL_USE_WAKEUP_PIPE if (fd == wake_fd) { cleanup_wakeup_pipe(ps); continue; } -#endif #if ERTS_POLL_USE_TIMERFD if (fd == timer_fd) { continue; @@ -1913,7 +1809,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, #if ERTS_POLL_USE_POLL /* --- poll -------------------------------- */ int res = 0; -#if ERTS_POLL_USE_WAKEUP_PIPE && !ERTS_POLL_USE_FALLBACK +#if !ERTS_POLL_USE_FALLBACK int wake_fd = ps->wake_fds[0]; #endif int i, first_ix, end_ix; @@ -1938,7 +1834,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, i++; continue; } -#elif ERTS_POLL_USE_WAKEUP_PIPE +#else if (fd == wake_fd) { cleanup_wakeup_pipe(ps); i++; @@ -1964,7 +1860,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, #elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */ int res = 0; -#if ERTS_POLL_USE_WAKEUP_PIPE && !ERTS_POLL_USE_FALLBACK +#if !ERTS_POLL_USE_FALLBACK int wake_fd = ps->wake_fds[0]; #endif int fd, first_fd, end_fd; @@ -1988,7 +1884,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, fd++; continue; } -#elif ERTS_POLL_USE_WAKEUP_PIPE +#else if (fd == wake_fd) { cleanup_wakeup_pipe(ps); fd++; @@ -2051,7 +1947,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, fd++; continue; } -#elif ERTS_POLL_USE_WAKEUP_PIPE +#else if (fd == wake_fd) { cleanup_wakeup_pipe(ps); fd++; @@ -2073,7 +1969,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, fd++; continue; } -#elif ERTS_POLL_USE_WAKEUP_PIPE +#else if (fd == wake_fd) { cleanup_wakeup_pipe(ps); fd++; @@ -2243,7 +2139,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) { int res; ERTS_MSACC_PUSH_STATE_M(); - if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0 + if (erts_atomic_read_nob(&ps->no_of_user_fds) == 0 && timeout_time == ERTS_POLL_NO_TIMEOUT) { /* Nothing to poll and zero timeout; done... */ return 0; @@ -2261,9 +2157,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) struct itimerspec its; timeout = get_timeout_itimerspec(ps, &its, timeout_time); if (timeout) { -#ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); -#endif ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); timerfd_set(ps, &its); res = epoll_wait(ps->kp_fd, ps->res_events, max_res, -1); @@ -2275,9 +2169,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) #else /* !ERTS_POLL_USE_TIMERFD */ timeout = (int) get_timeout(ps, 1000, timeout_time); if (timeout) { -#ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); -#endif ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); } res = epoll_wait(ps->kp_fd, ps->res_events, max_res, timeout); @@ -2288,9 +2180,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) grow_res_events(ps, max_res); timeout = get_timeout_timespec(ps, &ts, timeout_time); if (timeout) { -#ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); -#endif ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); } res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts); @@ -2307,19 +2197,15 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) * the maximum number of file descriptors in the poll set. */ struct dvpoll poll_res; - int nfds = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds); -#if ERTS_POLL_USE_WAKEUP_PIPE + int nfds = (int) erts_atomic_read_nob(&ps->no_of_user_fds); nfds++; /* Wakeup pipe */ -#endif timeout = (int) get_timeout(ps, 1000, timeout_time); poll_res.dp_nfds = nfds < max_res ? nfds : max_res; if (poll_res.dp_nfds > ps->res_events_len) grow_res_events(ps, poll_res.dp_nfds); poll_res.dp_fds = ps->res_events; if (timeout) { -#ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); -#endif ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); } poll_res.dp_timeout = timeout; @@ -2328,9 +2214,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) struct timespec ts; timeout = get_timeout_timespec(ps, &ts, timeout_time); if (timeout) { -#ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); -#endif ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); } res = ppoll(ps->poll_fds, ps->no_poll_fds, &ts, NULL); @@ -2338,9 +2222,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) timeout = (int) get_timeout(ps, 1000, timeout_time); if (timeout) { -#ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); -#endif ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); } res = poll(ps->poll_fds, ps->no_poll_fds, timeout); @@ -2352,9 +2234,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) ERTS_FD_COPY(&ps->output_fds, &ps->res_output_fds); if (timeout) { -#ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); -#endif ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); } res = ERTS_SELECT(ps->max_fd + 1, @@ -2362,7 +2242,6 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) &ps->res_output_fds, NULL, &to); -#ifdef ERTS_SMP if (timeout) { erts_thr_progress_finalize_wait(NULL); ERTS_MSACC_POP_STATE_M(); @@ -2397,14 +2276,11 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) res = -1; } } -#endif /* ERTS_SMP */ return res; #endif /* ----------------------------------------- */ } if (timeout) { -#ifdef ERTS_SMP erts_thr_progress_finalize_wait(NULL); -#endif ERTS_MSACC_POP_STATE_M(); } return res; @@ -2420,9 +2296,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, ErtsMonotonicTime to; int res, no_fds; int ebadf = 0; -#ifdef ERTS_SMP int ps_locked = 0; -#endif no_fds = *len; #ifdef ERTS_POLL_MAX_RES @@ -2447,13 +2321,11 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, ? ERTS_POLL_NO_TIMEOUT /* Use zero timeout */ : timeout_time); -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE if (ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) { ERTS_POLLSET_LOCK(ps); handle_update_requests(ps); ERTS_POLLSET_UNLOCK(ps); } -#endif while (1) { res = check_fd_events(ps, to, no_fds); @@ -2484,10 +2356,8 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, save_results: #endif -#ifdef ERTS_SMP ps_locked = 1; ERTS_POLLSET_LOCK(ps); -#endif no_fds = save_poll_result(ps, pr, no_fds, res, ebadf); @@ -2499,11 +2369,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, *len = no_fds; } -#ifdef ERTS_SMP if (ps_locked) ERTS_POLLSET_UNLOCK(ps); ERTS_POLLSET_UNSET_POLLED(ps); -#endif done: set_timeout_time(ps, ERTS_MONOTONIC_TIME_MAX); @@ -2522,25 +2390,12 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set) { -#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT if (!set) reset_wakeup_state(ps); else wake_poller(ps, 1, 0); -#endif } -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT -void -ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet ps) -{ - /* - * NOTE: This function is called from signal handlers, it, - * therefore, it has to be async-signal safe. - */ - wake_poller(ps, 1, 1); -} -#endif /* * erts_poll_interrupt_timed(): @@ -2552,7 +2407,6 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, int set, ErtsMonotonicTime timeout_time) { -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP) if (!set) reset_wakeup_state(ps); else { @@ -2562,13 +2416,12 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS else { if (ERTS_POLLSET_IS_POLLED(ps)) - erts_smp_atomic_inc_nob(&ps->no_avoided_wakeups); - erts_smp_atomic_inc_nob(&ps->no_avoided_interrupts); + erts_atomic_inc_nob(&ps->no_avoided_wakeups); + erts_atomic_inc_nob(&ps->no_avoided_interrupts); } - erts_smp_atomic_inc_nob(&ps->no_interrupt_timed); + erts_atomic_inc_nob(&ps->no_interrupt_timed); #endif } -#endif } int @@ -2583,7 +2436,7 @@ ERTS_POLL_EXPORT(erts_poll_max_fds)(void) void ERTS_POLL_EXPORT(erts_poll_init)(void) { - erts_smp_mtx_init(&pollsets_lock, "pollsets_lock", NIL, + erts_mtx_init(&pollsets_lock, "pollsets_lock", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); pollsets = NULL; @@ -2623,7 +2476,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) ps->internal_fd_limit = 0; ps->fds_status = NULL; ps->fds_status_len = 0; - erts_smp_atomic_init_nob(&ps->no_of_user_fds, 0); + erts_atomic_init_nob(&ps->no_of_user_fds, 0); #if ERTS_POLL_USE_KERNEL_POLL ps->kp_fd = -1; #if ERTS_POLL_USE_EPOLL @@ -2682,22 +2535,14 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) ERTS_FD_ZERO(&ps->res_output_fds); #endif #endif -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE ps->update_requests.next = NULL; ps->update_requests.len = 0; ps->curr_upd_req_block = &ps->update_requests; - erts_smp_atomic32_init_nob(&ps->have_update_requests, 0); -#endif -#ifdef ERTS_SMP + erts_atomic32_init_nob(&ps->have_update_requests, 0); erts_atomic32_init_nob(&ps->polled, 0); - erts_smp_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO); -#endif -#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + erts_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO); erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0); -#endif -#if ERTS_POLL_USE_WAKEUP_PIPE create_wakeup_pipe(ps); -#endif #if ERTS_POLL_USE_TIMERFD create_timerfd(ps); #endif @@ -2720,22 +2565,20 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) #endif init_timeout_time(ps); #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS - erts_smp_atomic_init_nob(&ps->no_avoided_wakeups, 0); - erts_smp_atomic_init_nob(&ps->no_avoided_interrupts, 0); - erts_smp_atomic_init_nob(&ps->no_interrupt_timed, 0); + erts_atomic_init_nob(&ps->no_avoided_wakeups, 0); + erts_atomic_init_nob(&ps->no_avoided_interrupts, 0); + erts_atomic_init_nob(&ps->no_interrupt_timed, 0); #endif -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE handle_update_requests(ps); -#endif #if ERTS_POLL_USE_FALLBACK ps->fallback_used = 0; #endif - erts_smp_atomic_set_nob(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */ + erts_atomic_set_nob(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */ - erts_smp_mtx_lock(&pollsets_lock); + erts_mtx_lock(&pollsets_lock); ps->next = pollsets; pollsets = ps; - erts_smp_mtx_unlock(&pollsets_lock); + erts_mtx_unlock(&pollsets_lock); return ps; } @@ -2772,7 +2615,6 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) erts_free(ERTS_ALC_T_SELECT_FDS, (void *) ps->res_output_fds.ptr); #endif #endif -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE { ErtsPollSetUpdateRequestsBlock *urqbp = ps->update_requests.next; while (urqbp) { @@ -2781,22 +2623,17 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) free_update_requests_block(ps, free_urqbp); } } -#endif -#ifdef ERTS_SMP - erts_smp_mtx_destroy(&ps->mtx); -#endif -#if ERTS_POLL_USE_WAKEUP_PIPE + erts_mtx_destroy(&ps->mtx); if (ps->wake_fds[0] >= 0) close(ps->wake_fds[0]); if (ps->wake_fds[1] >= 0) close(ps->wake_fds[1]); -#endif #if ERTS_POLL_USE_TIMERFD if (ps->timer_fd >= 0) close(ps->timer_fd); #endif - erts_smp_mtx_lock(&pollsets_lock); + erts_mtx_lock(&pollsets_lock); if (ps == pollsets) pollsets = pollsets->next; else { @@ -2806,7 +2643,7 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) ASSERT(ps == prev_ps->next); prev_ps->next = ps->next; } - erts_smp_mtx_unlock(&pollsets_lock); + erts_mtx_unlock(&pollsets_lock); erts_free(ERTS_ALC_T_POLLSET, (void *) ps); } @@ -2818,9 +2655,7 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) void ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) { -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE int pending_updates; -#endif Uint size = 0; ERTS_POLLSET_LOCK(ps); @@ -2845,7 +2680,6 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) #endif #endif -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE { ErtsPollSetUpdateRequestsBlock *urqbp = ps->update_requests.next; pending_updates = ps->update_requests.len; @@ -2855,7 +2689,6 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) urqbp = urqbp->next; } } -#endif pip->primary = #if ERTS_POLL_USE_KQUEUE @@ -2895,10 +2728,8 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) pip->memory_size = size; - pip->poll_set_size = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds); -#if ERTS_POLL_USE_WAKEUP_PIPE + pip->poll_set_size = (int) erts_atomic_read_nob(&ps->no_of_user_fds); pip->poll_set_size++; /* Wakeup pipe */ -#endif #if ERTS_POLL_USE_TIMERFD pip->poll_set_size++; /* timerfd */ #endif @@ -2922,19 +2753,11 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) #endif pip->lazy_updates = -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE 1 -#else - 0 -#endif ; pip->pending_updates = -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE pending_updates -#else - 0 -#endif ; pip->batch_updates = @@ -2956,9 +2779,9 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) pip->max_fds = max_fds; #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS - pip->no_avoided_wakeups = erts_smp_atomic_read_nob(&ps->no_avoided_wakeups); - pip->no_avoided_interrupts = erts_smp_atomic_read_nob(&ps->no_avoided_interrupts); - pip->no_interrupt_timed = erts_smp_atomic_read_nob(&ps->no_interrupt_timed); + pip->no_avoided_wakeups = erts_atomic_read_nob(&ps->no_avoided_wakeups); + pip->no_avoided_interrupts = erts_atomic_read_nob(&ps->no_avoided_interrupts); + pip->no_interrupt_timed = erts_atomic_read_nob(&ps->no_interrupt_timed); #endif ERTS_POLLSET_UNLOCK(ps); @@ -3031,9 +2854,7 @@ ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet ps, else { ev[fd] = ps->fds_status[fd].events; if ( -#if ERTS_POLL_USE_WAKEUP_PIPE fd == ps->wake_fds[0] || fd == ps->wake_fds[1] || -#endif #if ERTS_POLL_USE_TIMERFD fd == ps->timer_fd || #endif @@ -3121,11 +2942,7 @@ print_misc_debug_info(void) "select" #endif , -#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE "true" -#else - "false" -#endif , #if ERTS_POLL_USE_BATCH_UPDATE_POLLSET "true" @@ -3163,12 +2980,12 @@ static void erts_lcnt_enable_pollset_lock_count(ErtsPollSet pollset, int enable) void ERTS_POLL_EXPORT(erts_lcnt_update_pollset_locks)(int enable) { ErtsPollSet iterator; - erts_smp_mtx_lock(&pollsets_lock); + erts_mtx_lock(&pollsets_lock); for(iterator = pollsets; iterator != NULL; iterator = iterator->next) { erts_lcnt_enable_pollset_lock_count(iterator, enable); } - erts_smp_mtx_unlock(&pollsets_lock); + erts_mtx_unlock(&pollsets_lock); } #endif diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h index b3b4d79984..a57dc51e5b 100644 --- a/erts/emulator/sys/common/erl_poll.h +++ b/erts/emulator/sys/common/erl_poll.h @@ -227,9 +227,6 @@ typedef struct { #endif } ErtsPollInfo; -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -void ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet); -#endif void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet, int); void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet, diff --git a/erts/emulator/sys/common/erl_sys_common_misc.c b/erts/emulator/sys/common/erl_sys_common_misc.c index 79f87eb3a9..09237c81ce 100644 --- a/erts/emulator/sys/common/erl_sys_common_misc.c +++ b/erts/emulator/sys/common/erl_sys_common_misc.c @@ -51,7 +51,7 @@ * (often) exist two versions of erl_check_io (kernel-poll and * non-kernel-poll), and we dont want two versions of this variable. */ -erts_smp_atomic_t erts_check_io_time; +erts_atomic_t erts_check_io_time; /* Written once and only once */ diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h index b83837a7d2..b6f5b319ee 100644 --- a/erts/emulator/sys/unix/erl_unix_sys.h +++ b/erts/emulator/sys/unix/erl_unix_sys.h @@ -132,10 +132,6 @@ /* File descriptors are numbers anc consecutively allocated on Unix */ #define ERTS_SYS_CONTINOUS_FD_NUMBERS -#ifndef ERTS_SMP -# undef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -# define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -#endif typedef void *GETENV_STATE; @@ -358,9 +354,7 @@ extern void erts_sys_unix_later_init(void); #ifdef NO_FPE_SIGNALS #define erts_get_current_fp_exception() NULL -#ifdef ERTS_SMP #define erts_thread_init_fp_exception() do{}while(0) -#endif # define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0) # define __ERTS_FP_ERROR(fpexnp, f, Action) if (!isfinite(f)) { Action; } else {} # define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action) @@ -373,9 +367,7 @@ extern void erts_sys_unix_later_init(void); #else /* !NO_FPE_SIGNALS */ extern volatile unsigned long *erts_get_current_fp_exception(void); -#ifdef ERTS_SMP extern void erts_thread_init_fp_exception(void); -#endif # if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) # define erts_fwait(fpexnp,f) \ __asm__ __volatile__("fwait" : "=m"(*(fpexnp)) : "m"(f)) @@ -442,10 +434,8 @@ void erts_sys_unblock_fpe(int); /* Threads */ -#ifdef USE_THREADS extern int init_async(int); extern int exit_async(void); -#endif #define ERTS_EXIT_AFTER_DUMP _exit diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c index c1fa660cf9..d05028cabc 100644 --- a/erts/emulator/sys/unix/sys.c +++ b/erts/emulator/sys/unix/sys.c @@ -58,14 +58,12 @@ #define __DARWIN__ 1 #endif -#ifdef USE_THREADS #include "erl_threads.h" -#endif #include "erl_mseg.h" extern char **environ; -erts_smp_rwmtx_t environ_rwmtx; +erts_rwmtx_t environ_rwmtx; #define MAX_VSIZE 16 /* Max number of entries allowed in an I/O * vector sock_sendv(). @@ -94,19 +92,12 @@ extern void erts_sys_init_float(void); static int debug_log = 0; #endif -#ifdef ERTS_SMP -static erts_smp_atomic32_t have_prepared_crash_dump; -#define ERTS_PREPARED_CRASH_DUMP \ - ((int) erts_smp_atomic32_xchg_nob(&have_prepared_crash_dump, 1)) -#else -static volatile int have_prepared_crash_dump; +static erts_atomic32_t have_prepared_crash_dump; #define ERTS_PREPARED_CRASH_DUMP \ - (have_prepared_crash_dump++) -#endif + ((int) erts_atomic32_xchg_nob(&have_prepared_crash_dump, 1)) -erts_smp_atomic_t sys_misc_mem_sz; +erts_atomic_t sys_misc_mem_sz; -#if defined(ERTS_SMP) static void smp_sig_notify(int signum); static int sig_notify_fds[2] = {-1, -1}; @@ -114,7 +105,6 @@ static int sig_notify_fds[2] = {-1, -1}; static int sig_suspend_fds[2] = {-1, -1}; #endif -#endif jmp_buf erts_sys_sigsegv_jmp; @@ -128,38 +118,12 @@ static int max_files = -1; /* * a few variables used by the break handler */ -#ifdef ERTS_SMP -erts_smp_atomic32_t erts_break_requested; +erts_atomic32_t erts_break_requested; #define ERTS_SET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) #define ERTS_UNSET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) -#else -volatile int erts_break_requested = 0; -#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1) -#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0) -#endif + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) -#ifndef ERTS_SMP -static Eterm signalstate_sigterm[] = { - am_sigint, /* 0 */ - am_sighup, /* 1 */ - am_sigquit, /* 2 */ - am_sigabrt, /* 3 */ - am_sigalrm, /* 4 */ - am_sigterm, /* 5 */ - am_sigusr1, /* 6 */ - am_sigusr2, /* 7 */ - am_sigchld, /* 8 */ - am_sigstop, /* 9 */ - am_sigtstp /* 10 */ -}; - -volatile Uint erts_signal_state = 0; -#define ERTS_SET_SIGNAL_STATE(S) (erts_signal_state |= signum_to_signalstate(S)) -#define ERTS_CLEAR_SIGNAL_STATE (erts_signal_state = 0) -static ERTS_INLINE Uint signum_to_signalstate(int signum); -#endif /* set early so the break handler has access to initial mode */ static struct termios initial_tty_mode; @@ -223,9 +187,6 @@ init_check_io(void) io_func.select = driver_select_kp; io_func.enif_select = enif_select_kp; io_func.event = driver_event_kp; -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT - io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_kp; -#endif io_func.check_io_interrupt = erts_check_io_interrupt_kp; io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_kp; io_func.check_io = erts_check_io_kp; @@ -239,9 +200,6 @@ init_check_io(void) io_func.select = driver_select_nkp; io_func.enif_select = enif_select_nkp; io_func.event = driver_event_nkp; -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT - io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_nkp; -#endif io_func.check_io_interrupt = erts_check_io_interrupt_nkp; io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_nkp; io_func.check_io = erts_check_io_nkp; @@ -253,11 +211,7 @@ init_check_io(void) } } -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_as_interrupt)() -#else #define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_interrupt)(1) -#endif #define ERTS_CHK_IO_INTR (*io_func.check_io_interrupt) #define ERTS_CHK_IO_INTR_TMD (*io_func.check_io_interrupt_tmd) #define ERTS_CHK_IO (*io_func.check_io) @@ -272,11 +226,7 @@ init_check_io(void) max_files = erts_check_io_max_files(); } -#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT -#define ERTS_CHK_IO_AS_INTR() erts_check_io_async_sig_interrupt() -#else #define ERTS_CHK_IO_AS_INTR() erts_check_io_interrupt(1) -#endif #define ERTS_CHK_IO_INTR erts_check_io_interrupt #define ERTS_CHK_IO_INTR_TMD erts_check_io_interrupt_timed #define ERTS_CHK_IO erts_check_io @@ -290,13 +240,11 @@ erts_sys_schedule_interrupt(int set) ERTS_CHK_IO_INTR(set); } -#ifdef ERTS_SMP void erts_sys_schedule_interrupt_timed(int set, ErtsMonotonicTime timeout_time) { ERTS_CHK_IO_INTR_TMD(set, timeout_time); } -#endif UWord erts_sys_get_page_size(void) @@ -314,7 +262,7 @@ Uint erts_sys_misc_mem_sz(void) { Uint res = ERTS_CHK_IO_SZ(); - res += erts_smp_atomic_read_mb(&sys_misc_mem_sz); + res += erts_atomic_read_mb(&sys_misc_mem_sz); return res; } @@ -339,7 +287,6 @@ MALLOC_USE_HASH(1); #endif #endif -#ifdef USE_THREADS #ifdef ERTS_THR_HAVE_SIG_FUNCS @@ -418,19 +365,15 @@ thr_create_prepare_child(void *vtcdp) erts_sched_bind_atthrcreate_child(tcdp->sched_bind_data); } -#endif /* #ifdef USE_THREADS */ void erts_sys_pre_init(void) { -#ifdef USE_THREADS erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER; -#endif erts_printf_add_cr_to_stdout = 1; erts_printf_add_cr_to_stderr = 1; -#ifdef USE_THREADS eid.thread_create_child_func = thr_create_prepare_child; /* Before creation in parent */ @@ -452,23 +395,15 @@ erts_sys_pre_init(void) erts_lc_init(); #endif -#endif /* USE_THREADS */ erts_init_sys_time_sup(); -#ifdef USE_THREADS -#ifdef ERTS_SMP - erts_smp_atomic32_init_nob(&erts_break_requested, 0); - erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0); -#else - erts_break_requested = 0; - have_prepared_crash_dump = 0; -#endif + erts_atomic32_init_nob(&erts_break_requested, 0); + erts_atomic32_init_nob(&have_prepared_crash_dump, 0); -#endif /* USE_THREADS */ - erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0); + erts_atomic_init_nob(&sys_misc_mem_sz, 0); { /* @@ -531,10 +466,8 @@ SIGFUNC sys_signal(int sig, SIGFUNC func) return(oact.sa_handler); } -#ifdef USE_THREADS #undef sigprocmask #define sigprocmask erts_thr_sigmask -#endif void sys_sigblock(int sig) { @@ -672,7 +605,7 @@ static void signal_notify_requested(Eterm type) { erts_queue_message(p, locks, msgp, msg, am_system); if (locks) - erts_smp_proc_unlock(p, locks); + erts_proc_unlock(p, locks); erts_proc_dec_refc(p); } } @@ -697,11 +630,7 @@ break_requested(void) static RETSIGTYPE request_break(int signum) { -#ifdef ERTS_SMP smp_sig_notify(signum); -#else - break_requested(); -#endif } #ifdef ETHR_UNUSABLE_SIGUSRX @@ -789,8 +718,6 @@ signalterm_to_signum(Eterm signal) } } -#ifdef ERTS_SMP - static ERTS_INLINE Eterm signum_to_signalterm(int signum) { @@ -812,37 +739,9 @@ signum_to_signalterm(int signum) } } -#endif - -#ifndef ERTS_SMP -static ERTS_INLINE Uint -signum_to_signalstate(int signum) -{ - switch (signum) { - case SIGINT: return (1 << 0); - case SIGHUP: return (1 << 1); - case SIGQUIT: return (1 << 2); - case SIGABRT: return (1 << 3); - case SIGALRM: return (1 << 4); - case SIGTERM: return (1 << 5); - case SIGUSR1: return (1 << 6); - case SIGUSR2: return (1 << 7); - case SIGCHLD: return (1 << 8); - case SIGSTOP: return (1 << 9); - case SIGTSTP: return (1 << 10); - default: return 0; - } -} -#endif - static RETSIGTYPE generic_signal_handler(int signum) { -#ifdef ERTS_SMP smp_sig_notify(signum); -#else - ERTS_SET_SIGNAL_STATE(signum); - ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */ -#endif } int erts_set_signal(Eterm signal, Eterm type) { @@ -969,7 +868,7 @@ void os_version(int *pMajor, int *pMinor, int *pBuild) { void init_getenv_state(GETENV_STATE *state) { - erts_smp_rwmtx_rlock(&environ_rwmtx); + erts_rwmtx_rlock(&environ_rwmtx); *state = NULL; } @@ -978,7 +877,7 @@ char *getenv_string(GETENV_STATE *state0) char **state = (char **) *state0; char *cp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&environ_rwmtx)); if (state == NULL) state = environ; @@ -992,7 +891,7 @@ char *getenv_string(GETENV_STATE *state0) void fini_getenv_state(GETENV_STATE *state) { *state = NULL; - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); } void erts_do_break_handling(void) @@ -1005,7 +904,7 @@ void erts_do_break_handling(void) * therefore, make sure that all threads but this one are blocked before * proceeding! */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); /* during break we revert to initial settings */ /* this is done differently for oldshell */ @@ -1033,25 +932,9 @@ void erts_do_break_handling(void) tcsetattr(0,TCSANOW,&temp_mode); } - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); } -#ifdef ERTS_SIGNAL_STATE -void erts_handle_signal_state(void) { - Uint signal_state = ERTS_SIGNAL_STATE; - Uint i = 0; - - ERTS_CLEAR_SIGNAL_STATE; - - while (signal_state) { - if (signal_state & 0x1) { - signal_notify_requested(signalstate_sigterm[i]); - } - i++; - signal_state = signal_state >> 1; - } -} -#endif /* Fills in the systems representation of the jam/beam process identifier. ** The Pid is put in STRING representation in the supplied buffer, @@ -1079,14 +962,14 @@ erts_sys_putenv(char *key, char *value) env = erts_alloc(ERTS_ALC_T_TMP, need); #else env = erts_alloc(ERTS_ALC_T_PUTENV_STR, need); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, need); + erts_atomic_add_nob(&sys_misc_mem_sz, need); #endif strcpy(env,key); strcat(env,"="); strcat(env,value); - erts_smp_rwmtx_rwlock(&environ_rwmtx); + erts_rwmtx_rwlock(&environ_rwmtx); res = putenv(env); - erts_smp_rwmtx_rwunlock(&environ_rwmtx); + erts_rwmtx_rwunlock(&environ_rwmtx); #ifdef HAVE_COPYING_PUTENV erts_free(ERTS_ALC_T_TMP, env); #endif @@ -1133,9 +1016,9 @@ int erts_sys_getenv(char *key, char *value, size_t *size) { int res; - erts_smp_rwmtx_rlock(&environ_rwmtx); + erts_rwmtx_rlock(&environ_rwmtx); res = erts_sys_getenv__(key, value, size); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); return res; } @@ -1143,9 +1026,9 @@ int erts_sys_unsetenv(char *key) { int res; - erts_smp_rwmtx_rwlock(&environ_rwmtx); + erts_rwmtx_rwlock(&environ_rwmtx); res = unsetenv(key); - erts_smp_rwmtx_rwunlock(&environ_rwmtx); + erts_rwmtx_rwunlock(&environ_rwmtx); return res; } @@ -1286,16 +1169,6 @@ erl_assert_error(const char* expr, const char* func, const char* file, int line) fprintf(stderr, "%s:%d:%s() Assertion failed: %s\n", file, line, func, expr); fflush(stderr); -#if !defined(ERTS_SMP) && 0 - /* Writing a crashdump from a failed assertion when smp support - * is enabled almost a guaranteed deadlocking, don't even bother. - * - * It could maybe be useful (but I'm not convinced) to write the - * crashdump if smp support is disabled... - */ - if (erts_initialized) - erl_crash_dump(file, line, "Assertion failed: %s\n", expr); -#endif abort(); } @@ -1326,13 +1199,12 @@ void erl_sys_schedule(int runnable) { ERTS_CHK_IO(!runnable); - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } -#ifdef ERTS_SMP -static erts_smp_tid_t sig_dispatcher_tid; +static erts_tid_t sig_dispatcher_tid; static void smp_sig_notify(int signum) @@ -1406,7 +1278,7 @@ signal_dispatcher_thread_func(void *unused) } signal_notify_requested(signal); } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } return NULL; } @@ -1414,7 +1286,7 @@ signal_dispatcher_thread_func(void *unused) static void init_smp_sig_notify(void) { - erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER; + erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; thr_opts.detached = 1; thr_opts.name = "sys_sig_dispatcher"; @@ -1426,7 +1298,7 @@ init_smp_sig_notify(void) } /* Start signal handler thread */ - erts_smp_thr_create(&sig_dispatcher_tid, + erts_thr_create(&sig_dispatcher_tid, signal_dispatcher_thread_func, NULL, &thr_opts); @@ -1519,7 +1391,6 @@ erts_sys_main_thread(void) } } -#endif /* ERTS_SMP */ #ifdef ERTS_ENABLE_KERNEL_POLL /* get_value() is currently only used when kernel-poll is enabled */ @@ -1553,7 +1424,7 @@ erl_sys_args(int* argc, char** argv) { int i, j; - erts_smp_rwmtx_init(&environ_rwmtx, "environ", NIL, + erts_rwmtx_init(&environ_rwmtx, "environ", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); i = 1; @@ -1606,10 +1477,8 @@ erl_sys_args(int* argc, char** argv) init_check_io(); -#ifdef ERTS_SMP init_smp_sig_notify(); init_smp_sig_suspend(); -#endif /* Handled arguments have been marked with NULL. Slide arguments not handled towards the beginning of argv. */ diff --git a/erts/emulator/sys/unix/sys_drivers.c b/erts/emulator/sys/unix/sys_drivers.c index 834706d86f..7c9a532fed 100644 --- a/erts/emulator/sys/unix/sys_drivers.c +++ b/erts/emulator/sys/unix/sys_drivers.c @@ -53,14 +53,12 @@ #define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */ #include "sys.h" -#ifdef USE_THREADS #include "erl_threads.h" -#endif extern char **environ; -extern erts_smp_rwmtx_t environ_rwmtx; +extern erts_rwmtx_t environ_rwmtx; -extern erts_smp_atomic_t sys_misc_mem_sz; +extern erts_atomic_t sys_misc_mem_sz; static Eterm forker_port; @@ -86,12 +84,6 @@ static Eterm forker_port; #define MAXIOV 16 #endif -#ifdef USE_THREADS -# define FDBLOCK 1 -#else -# define FDBLOCK 0 -#endif - /* Used by the fd driver iff the fd could not be set to non-blocking */ typedef struct ErtsSysBlocking_ { ErlDrvPDL pdl; @@ -178,9 +170,7 @@ void erl_sys_late_init(void) { SysDriverOpts opts; -#ifdef ERTS_SMP Port *port; -#endif sys_signal(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */ @@ -197,13 +187,9 @@ erl_sys_late_init(void) opts.argv = NULL; opts.parallelism = erts_port_parallelism; -#ifdef ERTS_SMP port = -#endif erts_open_driver(&forker_driver, make_internal_pid(0), "forker", &opts, NULL, NULL); -#ifdef ERTS_SMP erts_mtx_unlock(port->lock); -#endif erts_sys_unix_later_init(); /* Need to be called after forker has been started */ } @@ -220,10 +206,8 @@ static ErlDrvData vanilla_start(ErlDrvPort, char*, SysDriverOpts*); /* II.III FD prototypes */ static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*); -#if FDBLOCK static void fd_async(void *); static void fd_ready_async(ErlDrvData drv_data, ErlDrvThreadData thread_data); -#endif static ErlDrvSSizeT fd_control(ErlDrvData, unsigned int, char *, ErlDrvSizeT, char **, ErlDrvSizeT); static void fd_stop(ErlDrvData); @@ -287,11 +271,7 @@ struct erl_drv_entry fd_driver_entry = { fd_control, NULL, outputv, -#if FDBLOCK fd_ready_async, /* ready_async */ -#else - NULL, -#endif fd_flush, /* flush */ NULL, /* call */ NULL, /* event */ @@ -363,7 +343,7 @@ static int set_blocking_data(ErtsSysDriverData *dd) { dd->blocking = erts_alloc(ERTS_ALC_T_SYS_BLOCKING, sizeof(ErtsSysBlocking)); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, sizeof(ErtsSysBlocking)); + erts_atomic_add_nob(&sys_misc_mem_sz, sizeof(ErtsSysBlocking)); dd->blocking->pdl = driver_pdl_create(dd->port_num); dd->blocking->res = 0; @@ -406,7 +386,7 @@ create_driver_data(ErlDrvPort port_num, size += sizeof(ErtsSysFdData); data = erts_alloc(ERTS_ALC_T_DRV_TAB,size); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, size); + erts_atomic_add_nob(&sys_misc_mem_sz, size); driver_data = (ErtsSysDriverData*)data; data += sizeof(*driver_data); @@ -441,7 +421,7 @@ create_driver_data(ErlDrvPort port_num, data += sizeof(*driver_data->ofd); init_fd_data(driver_data->ofd, ofd); } - if (is_blocking && FDBLOCK) + if (is_blocking) if (!set_blocking_data(driver_data)) { erts_free(ERTS_ALC_T_DRV_TAB, driver_data); return NULL; @@ -472,7 +452,7 @@ static char **build_unix_environment(char *block) char **cpp; char** old_env; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&environ_rwmtx)); cp = block; len = 0; @@ -620,12 +600,12 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, len = CMD_LINE_PREFIX_STR_SZ + len + 1; } - erts_smp_rwmtx_rlock(&environ_rwmtx); + erts_rwmtx_rlock(&environ_rwmtx); if (opts->envir == NULL) { new_environ = environ; } else if ((new_environ = build_unix_environment(opts->envir)) == NULL) { - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); close_pipes(ifd, ofd); erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); errno = ENOMEM; @@ -641,7 +621,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); if (new_environ != environ) erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); errno = err; return ERL_DRV_ERROR_ERRNO; } @@ -681,7 +661,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, if (!io_vector) { close_pipes(ifd, ofd); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); if (new_environ != environ) erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); @@ -766,7 +746,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, erts_free(ERTS_ALC_T_TMP, io_vector); if (new_environ != environ) erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); errno = err; return ERL_DRV_ERROR_ERRNO; @@ -795,7 +775,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, if (new_environ != environ) erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); dd = create_driver_data(port_num, ifd[0], ofd[1], opts->packet_bytes, DO_WRITE | DO_READ, opts->exit_status, @@ -1068,8 +1048,8 @@ static void clear_fd_data(ErtsSysFdData *fdd) { if (fdd->sz > 0) { erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fdd->buf); - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= fdd->sz); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*fdd->sz); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= fdd->sz); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*fdd->sz); } fdd->buf = NULL; fdd->sz = 0; @@ -1092,13 +1072,11 @@ static void fd_stop(ErlDrvData ev) /* Does not close the fds */ ErlDrvPort prt = dd->port_num; int sz = sizeof(ErtsSysDriverData); -#if FDBLOCK if (dd->blocking) { erts_free(ERTS_ALC_T_SYS_BLOCKING, dd->blocking); dd->blocking = NULL; sz += sizeof(ErtsSysBlocking); } -#endif if (dd->ifd) { sz += sizeof(ErtsSysFdData); @@ -1110,7 +1088,7 @@ static void fd_stop(ErlDrvData ev) /* Does not close the fds */ } erts_free(ERTS_ALC_T_DRV_TAB, dd); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -sz); + erts_atomic_add_nob(&sys_misc_mem_sz, -sz); } static void fd_flush(ErlDrvData ev) @@ -1191,19 +1169,19 @@ static void outputv(ErlDrvData e, ErlIOVec* ev) ev->iov[0].iov_len = pb; ev->size += pb; - if (dd->blocking && FDBLOCK) + if (dd->blocking) driver_pdl_lock(dd->blocking->pdl); if ((sz = driver_sizeq(ix)) > 0) { driver_enqv(ix, ev, 0); - if (dd->blocking && FDBLOCK) + if (dd->blocking) driver_pdl_unlock(dd->blocking->pdl); if (sz + ev->size >= (1 << 13)) set_busy_port(ix, 1); } - else if (!dd->blocking || !FDBLOCK) { + else if (!dd->blocking) { /* We try to write directly if the fd in non-blocking */ int vsize = ev->vsize > MAX_VSIZE ? MAX_VSIZE : ev->vsize; @@ -1220,7 +1198,6 @@ static void outputv(ErlDrvData e, ErlIOVec* ev) driver_enqv(ix, ev, n); /* n is the skip value */ driver_select(ix, ofd, ERL_DRV_WRITE|ERL_DRV_USE, 1); } -#if FDBLOCK else { if (ev->size != 0) { driver_enqv(ix, ev, 0); @@ -1231,7 +1208,6 @@ static void outputv(ErlDrvData e, ErlIOVec* ev) driver_pdl_unlock(dd->blocking->pdl); } } -#endif /* return 0;*/ } @@ -1303,7 +1279,7 @@ static int port_inp_failure(ErtsSysDriverData *dd, int res) clear_fd_data(dd->ifd); } - if (dd->blocking && FDBLOCK) { + if (dd->blocking) { driver_pdl_lock(dd->blocking->pdl); if (driver_sizeq(dd->port_num) > 0) { driver_pdl_unlock(dd->blocking->pdl); @@ -1408,7 +1384,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd) if (dd->ifd->fd < 0) { driver_select(port_num, abs(dd->ifd->fd), ERL_DRV_READ|ERL_DRV_USE, 0); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -sizeof(ErtsSysFdData)); + erts_atomic_add_nob(&sys_misc_mem_sz, -sizeof(ErtsSysFdData)); dd->ifd = NULL; } @@ -1514,7 +1490,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd) port_inp_failure(dd, -1); } else { - erts_smp_atomic_add_nob(&sys_misc_mem_sz, h); + erts_atomic_add_nob(&sys_misc_mem_sz, h); sys_memcpy(buf, cpos, bytes_left); dd->ifd->buf = buf; dd->ifd->sz = h; @@ -1549,7 +1525,7 @@ static void ready_output(ErlDrvData e, ErlDrvEvent ready_fd) should close the output fd as soon as the command has been sent. */ driver_select(ix, ready_fd, ERL_DRV_WRITE|ERL_DRV_USE, 0); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -sizeof(ErtsSysFdData)); + erts_atomic_add_nob(&sys_misc_mem_sz, -sizeof(ErtsSysFdData)); dd->ofd = NULL; } if (dd->terminating) @@ -1579,7 +1555,6 @@ static void stop_select(ErlDrvEvent fd, void* _) close((int)fd); } -#if FDBLOCK static void fd_async(void *async_data) @@ -1658,7 +1633,6 @@ void fd_ready_async(ErlDrvData drv_data, return; /* 0; */ } -#endif /* Forker driver */ diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c index 6435da086f..a82c15bd32 100644 --- a/erts/emulator/sys/unix/sys_float.c +++ b/erts/emulator/sys/unix/sys_float.c @@ -39,7 +39,6 @@ erts_sys_init_float(void) #else /* !NO_FPE_SIGNALS */ -#ifdef ERTS_SMP static erts_tsd_key_t fpe_key; /* once-only initialisation early in the main thread (via erts_sys_init_float()) */ @@ -61,11 +60,6 @@ static ERTS_INLINE volatile unsigned long *erts_thread_get_fp_exception(void) { return (volatile unsigned long*)erts_tsd_get(fpe_key); } -#else /* !SMP */ -#define erts_init_fp_exception() /*empty*/ -static volatile unsigned long fp_exception; -#define erts_thread_get_fp_exception() (&fp_exception) -#endif /* SMP */ volatile unsigned long *erts_get_current_fp_exception(void) { @@ -659,11 +653,9 @@ void erts_sys_init_float(void) void erts_thread_init_float(void) { -#ifdef ERTS_SMP /* This allows Erlang schedulers to leave Erlang-process context and still have working FP exceptions. XXX: is this needed? */ erts_thread_init_fp_exception(); -#endif #ifndef NO_FPE_SIGNALS /* NOTE: diff --git a/erts/emulator/sys/unix/sys_time.c b/erts/emulator/sys/unix/sys_time.c index 102ef7bebf..ef05380d17 100644 --- a/erts/emulator/sys/unix/sys_time.c +++ b/erts/emulator/sys/unix/sys_time.c @@ -160,7 +160,7 @@ struct sys_time_internal_state_read_mostly__ { #ifdef ERTS_SYS_TIME_INTERNAL_STATE_WRITE_FREQ__ struct sys_time_internal_state_write_freq__ { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; #if defined(__linux__) && defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME) ErtsMonotonicTime last_delivered; #endif @@ -304,7 +304,7 @@ sys_init_time(ErtsSysInitTimeResult *init_resp) erts_sys_time_data__.r.o.os_times = clock_gettime_times_verified; #endif - erts_smp_mtx_init(&internal_state.w.f.mtx, "os_monotonic_time", NIL, + erts_mtx_init(&internal_state.w.f.mtx, "os_monotonic_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); internal_state.w.f.last_delivered = clock_gettime_monotonic(); @@ -525,12 +525,12 @@ static ErtsMonotonicTime clock_gettime_monotonic_verified(void) mtime = (ErtsMonotonicTime) posix_clock_gettime(MONOTONIC_CLOCK_ID, MONOTONIC_CLOCK_ID_STR); - erts_smp_mtx_lock(&internal_state.w.f.mtx); + erts_mtx_lock(&internal_state.w.f.mtx); if (mtime < internal_state.w.f.last_delivered) mtime = internal_state.w.f.last_delivered; else internal_state.w.f.last_delivered = mtime; - erts_smp_mtx_unlock(&internal_state.w.f.mtx); + erts_mtx_unlock(&internal_state.w.f.mtx); return mtime; } @@ -547,12 +547,12 @@ static void clock_gettime_times_verified(ErtsMonotonicTime *mtimep, WALL_CLOCK_ID_STR, stimep); - erts_smp_mtx_lock(&internal_state.w.f.mtx); + erts_mtx_lock(&internal_state.w.f.mtx); if (*mtimep < internal_state.w.f.last_delivered) *mtimep = internal_state.w.f.last_delivered; else internal_state.w.f.last_delivered = *mtimep; - erts_smp_mtx_unlock(&internal_state.w.f.mtx); + erts_mtx_unlock(&internal_state.w.f.mtx); } #endif /* defined(OS_SYSTEM_TIME_USING_CLOCK_GETTIME) */ @@ -878,8 +878,6 @@ ErtsMonotonicTime erts_os_monotonic_time(void) { Uint32 ticks = get_tick_count(); - ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(&internal_state.wr.m.os_mtime_xtnd, - ticks); return ERTS_EXTEND_OS_MONOTONIC_TIME(&internal_state.wr.m.os_mtime_xtnd, ticks) << internal_state.r.o.times_shift; } diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c index 8743f83a50..0bd43bb4fb 100644 --- a/erts/emulator/sys/win32/erl_poll.c +++ b/erts/emulator/sys/win32/erl_poll.c @@ -286,41 +286,26 @@ struct ErtsPollSet_ { CRITICAL_SECTION standby_crit; /* CS to guard the counter */ HANDLE standby_wait_event; /* Event signalled when counte == 0 */ erts_atomic32_t wakeup_state; -#ifdef ERTS_SMP - erts_smp_mtx_t mtx; -#endif + erts_mtx_t mtx; erts_atomic64_t timeout_time; }; -#ifdef ERTS_SMP #define ERTS_POLLSET_LOCK(PS) \ - erts_smp_mtx_lock(&(PS)->mtx) + erts_mtx_lock(&(PS)->mtx) #define ERTS_POLLSET_UNLOCK(PS) \ - erts_smp_mtx_unlock(&(PS)->mtx) - -#else + erts_mtx_unlock(&(PS)->mtx) -#define ERTS_POLLSET_LOCK(PS) -#define ERTS_POLLSET_UNLOCK(PS) - -#endif /* * Communication with sys_interrupt */ -#ifdef ERTS_SMP -extern erts_smp_atomic32_t erts_break_requested; +extern erts_atomic32_t erts_break_requested; #define ERTS_SET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) #define ERTS_UNSET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) -#else -extern volatile int erts_break_requested; -#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1) -#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0) -#endif + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) static erts_mtx_t break_waiter_lock; static HANDLE break_happened_event; @@ -1193,14 +1178,10 @@ int erts_poll_wait(ErtsPollSet ps, HARDDEBUGF(("Start waiting %d [%d]",num_h, (int) timeout)); ERTS_POLLSET_UNLOCK(ps); -#ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); -#endif ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); WaitForMultipleObjects(num_h, harr, FALSE, timeout); -#ifdef ERTS_SMP erts_thr_progress_finalize_wait(NULL); -#endif ERTS_MSACC_POP_STATE_M(); ERTS_POLLSET_LOCK(ps); HARDDEBUGF(("Stop waiting %d [%d]",num_h, (int) timeout)); @@ -1359,9 +1340,7 @@ ErtsPollSet erts_poll_create_pollset(void) ps->restore_events = 0; erts_atomic32_init_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN); -#ifdef ERTS_SMP - erts_smp_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO); -#endif + erts_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO); init_timeout_time(ps); HARDTRACEF(("Out erts_poll_create_pollset")); @@ -1391,9 +1370,7 @@ void erts_poll_destroy_pollset(ErtsPollSet ps) CloseHandle(ps->event_io_ready); CloseHandle(ps->standby_wait_event); ERTS_POLLSET_UNLOCK(ps); -#ifdef ERTS_SMP - erts_smp_mtx_destroy(&ps->mtx); -#endif + erts_mtx_destroy(&ps->mtx); SEL_FREE(ERTS_ALC_T_POLLSET, (void *) ps); HARDTRACEF(("Out erts_poll_destroy_pollset")); } diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h index 78005aada9..1f53452d17 100644 --- a/erts/emulator/sys/win32/erl_win_sys.h +++ b/erts/emulator/sys/win32/erl_win_sys.h @@ -311,10 +311,8 @@ typedef long ssize_t; #endif /* Threads */ -#ifdef USE_THREADS int init_async(int); int exit_async(void); -#endif #define ERTS_HAVE_TRY_CATCH 1 diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c index 15c59109b1..b23dbecbac 100644 --- a/erts/emulator/sys/win32/sys.c +++ b/erts/emulator/sys/win32/sys.c @@ -80,11 +80,9 @@ static int application_type(const wchar_t* originalName, wchar_t fullPath[MAX_PA HANDLE erts_service_event; -#ifdef ERTS_SMP -static erts_smp_tsd_key_t win32_errstr_key; -#endif +static erts_tsd_key_t win32_errstr_key; -static erts_smp_atomic_t pipe_creation_counter; +static erts_atomic_t pipe_creation_counter; /* Results from application_type(_w) is one of */ #define APPL_NONE 0 @@ -94,10 +92,8 @@ static erts_smp_atomic_t pipe_creation_counter; static int driver_write(long, HANDLE, byte*, int); static int create_file_thread(struct async_io* aio, int mode); -#ifdef ERTS_SMP static void close_active_handle(DriverData *, HANDLE handle); static DWORD WINAPI threaded_handle_closer(LPVOID param); -#endif static DWORD WINAPI threaded_reader(LPVOID param); static DWORD WINAPI threaded_writer(LPVOID param); static DWORD WINAPI threaded_exiter(LPVOID param); @@ -136,7 +132,7 @@ static OSVERSIONINFO int_os_version; /* Version information for Win32. */ Disabled the use of CancelIoEx as its been seen to cause problem with some drivers. Not sure what to blame; faulty drivers or some form of invalid use. */ -#if defined(ERTS_SMP) && defined(USE_CANCELIOEX) +#if defined(USE_CANCELIOEX) static BOOL (WINAPI *fpCancelIoEx)(HANDLE,LPOVERLAPPED); #endif @@ -145,7 +141,7 @@ static BOOL (WINAPI *fpCancelIoEx)(HANDLE,LPOVERLAPPED); - call erl_start() to parse arguments and do other init */ -static erts_smp_atomic_t sys_misc_mem_sz; +static erts_atomic_t sys_misc_mem_sz; HMODULE beam_module = NULL; @@ -196,7 +192,7 @@ Uint erts_sys_misc_mem_sz(void) { Uint res = (Uint) erts_check_io_size(); - res += (Uint) erts_smp_atomic_read_mb(&sys_misc_mem_sz); + res += (Uint) erts_atomic_read_mb(&sys_misc_mem_sz); return res; } @@ -450,9 +446,7 @@ typedef struct async_io { * the console for Windows NT). */ HANDLE fd; /* Handle for file or pipe. */ -#ifdef ERTS_SMP int async_io_active; /* if true, a close of the file will signal the event in ov */ -#endif OVERLAPPED ov; /* Control structure for overlapped reading. * When overlapped reading is simulated with * a thread, the fields are used as follows: @@ -665,7 +659,7 @@ new_driver_data(ErlDrvPort port_num, int packet_bytes, int wait_objs_required, i dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize); if (dp->inbuf == NULL) goto buf_alloc_error; - erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize); dp->outBufSize = 0; dp->outbuf = NULL; dp->port_num = port_num; @@ -691,7 +685,6 @@ buf_alloc_error: static void release_driver_data(DriverData* dp) { -#ifdef ERTS_SMP #ifdef USE_CANCELIOEX if (fpCancelIoEx != NULL) { if (dp->in.thread == (HANDLE) -1 && dp->in.fd != INVALID_HANDLE_VALUE) { @@ -734,18 +727,10 @@ release_driver_data(DriverData* dp) DEBUGF(("...done\n")); } } -#else - if (dp->in.thread == (HANDLE) -1 && dp->in.fd != INVALID_HANDLE_VALUE) { - CancelIo(dp->in.fd); - } - if (dp->out.thread == (HANDLE) -1 && dp->out.fd != INVALID_HANDLE_VALUE) { - CancelIo(dp->out.fd); - } -#endif if (dp->inbuf != NULL) { - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->inBufSize); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*dp->inBufSize); DRV_BUF_FREE(dp->inbuf); dp->inBufSize = 0; dp->inbuf = NULL; @@ -753,8 +738,8 @@ release_driver_data(DriverData* dp) ASSERT(dp->inBufSize == 0); if (dp->outbuf != NULL) { - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); DRV_BUF_FREE(dp->outbuf); dp->outBufSize = 0; dp->outbuf = NULL; @@ -777,7 +762,6 @@ release_driver_data(DriverData* dp) unrefer_driver_data(dp); } -#ifdef ERTS_SMP struct handles_to_be_closed { HANDLE handles[MAXIMUM_WAIT_OBJECTS]; @@ -870,7 +854,6 @@ threaded_handle_closer(LPVOID param) DEBUGF(("threaded_handle_closer %p terminating\r\n", htbc)); return 0; } -#endif /* ERTS_SMP */ /* * Stores input and output file descriptors in the DriverData structure, @@ -946,9 +929,7 @@ init_async_io(DriverData *dp, AsyncIo* aio, int use_threads) aio->flushReplyEvent = NULL; aio->pendingError = 0; aio->bytesTransferred = 0; -#ifdef ERTS_SMP aio->async_io_active = 0; -#endif aio->ov.hEvent = CreateManualEvent(FALSE); if (aio->ov.hEvent == NULL) return -1; @@ -1029,9 +1010,7 @@ async_read_file(AsyncIo* aio, LPVOID buf, DWORD numToRead) ResetEvent(aio->ov.hEvent); SetEvent(aio->ioAllowed); } else { -#ifdef ERTS_SMP aio->async_io_active = 1; /* Will get 0 when the event actually happened */ -#endif if (ReadFile(aio->fd, buf, numToRead, &aio->bytesTransferred, &aio->ov)) { DEBUGF(("async_read_file: ReadFile() suceeded: %d bytes\n", @@ -1079,16 +1058,12 @@ async_write_file(AsyncIo* aio, /* Pointer to async control block. */ ResetEvent(aio->ov.hEvent); SetEvent(aio->ioAllowed); } else { -#ifdef ERTS_SMP aio->async_io_active = 1; /* Will get 0 when the event actually happened */ -#endif if (WriteFile(aio->fd, buf, numToWrite, &aio->bytesTransferred, &aio->ov)) { DEBUGF(("async_write_file: WriteFile() suceeded: %d bytes\n", aio->bytesTransferred)); -#ifdef ERTS_SMP aio->async_io_active = 0; /* The event will not be signalled */ -#endif ResetEvent(aio->ov.hEvent); return TRUE; } else { @@ -1190,7 +1165,7 @@ static int spawn_init(void) { int i; -#if defined(ERTS_SMP) && defined(USE_CANCELIOEX) +#if defined(USE_CANCELIOEX) HMODULE module = GetModuleHandle("kernel32"); fpCancelIoEx = (BOOL (WINAPI *)(HANDLE,LPOVERLAPPED)) ((module != NULL) ? GetProcAddress(module,"CancelIoEx") : NULL); @@ -1762,7 +1737,7 @@ static int create_pipe(HANDLE *phRead, HANDLE *phWrite, BOOL inheritRead, BOOL o * Otherwise, create named pipes. */ - calls = (UWord) erts_smp_atomic_inc_read_nob(&pipe_creation_counter); + calls = (UWord) erts_atomic_inc_read_nob(&pipe_creation_counter); erts_snprintf(pipe_name, sizeof(pipe_name), "\\\\.\\pipe\\erlang44_%d_%bpu", getpid(), calls); @@ -2447,7 +2422,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len) } dp->outBufSize = pb+len; - erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->outBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, dp->outBufSize); /* * Store header bytes (if any). @@ -2476,8 +2451,8 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len) } else { dp->out.ov.Offset += pb+len; /* For vanilla driver. */ /* XXX OffsetHigh should be changed too. */ - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); DRV_BUF_FREE(dp->outbuf); dp->outBufSize = 0; dp->outbuf = NULL; @@ -2511,11 +2486,9 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event) int pb; pb = dp->packet_bytes; -#ifdef ERTS_SMP if(dp->in.thread == (HANDLE) -1) { dp->in.async_io_active = 0; } -#endif DEBUGF(("ready_input: dp %p, event 0x%x\n", dp, ready_event)); /* @@ -2590,8 +2563,8 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event) error = ERROR_NOT_ENOUGH_MEMORY; break; /* Break out of loop into error handler. */ } - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, dp->totalNeeded - dp->inBufSize); dp->inBufSize = dp->totalNeeded; dp->inbuf = new_buf; @@ -2680,11 +2653,9 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event) DriverData *dp = (DriverData *) drv_data; int error; -#ifdef ERTS_SMP if(dp->out.thread == (HANDLE) -1) { dp->out.async_io_active = 0; } -#endif DEBUGF(("ready_output(%p, 0x%x)\n", drv_data, ready_event)); set_busy_port(dp->port_num, 0); if (!(dp->outbuf)) { @@ -2692,8 +2663,8 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event) write... */ return; } - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); DRV_BUF_FREE(dp->outbuf); dp->outBufSize = 0; dp->outbuf = NULL; @@ -2743,7 +2714,6 @@ sys_init_io(void) max_files = 2*erts_ptab_max(&erts_port); } -#ifdef ERTS_SMP void erts_sys_main_thread(void) { @@ -2756,7 +2726,6 @@ erts_sys_main_thread(void) WaitForSingleObject(dummy, INFINITE); } } -#endif void erts_sys_alloc_init(void) { @@ -2843,7 +2812,7 @@ Preload* sys_preloaded(void) (num_preloaded+1)*sizeof(Preload)); res_name = erts_alloc(ERTS_ALC_T_PRELOADED, (num_preloaded+1)*sizeof(unsigned)); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, + erts_atomic_add_nob(&sys_misc_mem_sz, (num_preloaded+1)*sizeof(Preload) + (num_preloaded+1)*sizeof(unsigned)); for (i = 0; i < num_preloaded; i++) { @@ -2856,7 +2825,7 @@ Preload* sys_preloaded(void) n = GETWORD(data); data += 2; preloaded[i].name = erts_alloc(ERTS_ALC_T_PRELOADED, n+1); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, n+1); + erts_atomic_add_nob(&sys_misc_mem_sz, n+1); sys_memcpy(preloaded[i].name, data, n); preloaded[i].name[n] = '\0'; data += n; @@ -2938,11 +2907,7 @@ sys_get_key(int fd) char* win32_errorstr(int error) { -#ifdef SMP - LPTSTR lpBufPtr = erts_smp_tsd_get(win32_errstr_key); -#else - static LPTSTR lpBufPtr = NULL; -#endif + LPTSTR lpBufPtr = erts_tsd_get(win32_errstr_key); if (lpBufPtr) { LocalFree(lpBufPtr); } @@ -2956,9 +2921,7 @@ char* win32_errorstr(int error) 0, NULL); SetLastError(error); -#ifdef ERTS_SMP - erts_smp_tsd_set(win32_errstr_key,lpBufPtr); -#endif + erts_tsd_set(win32_errstr_key,lpBufPtr); return lpBufPtr; } @@ -3131,7 +3094,6 @@ check_supported_os_version(void) #endif } -#ifdef USE_THREADS typedef struct { int sched_bind_data; @@ -3176,19 +3138,15 @@ thr_create_prepare_child(void *vtcdp) erts_sched_bind_atthrcreate_child(tcdp->sched_bind_data); } -#endif /* USE_THREADS */ void erts_sys_pre_init(void) { -#ifdef USE_THREADS erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER; -#endif int_os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); GetVersionEx(&int_os_version); check_supported_os_version(); -#ifdef USE_THREADS eid.thread_create_child_func = thr_create_prepare_child; /* Before creation in parent */ eid.thread_create_prepare_func = thr_create_prepare; @@ -3209,11 +3167,10 @@ erts_sys_pre_init(void) erts_lc_init(); #endif -#endif /* USE_THREADS */ erts_init_sys_time_sup(); - erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0); + erts_atomic_init_nob(&sys_misc_mem_sz, 0); } void noinherit_std_handle(DWORD type) @@ -3233,11 +3190,9 @@ void erl_sys_init(void) noinherit_std_handle(STD_INPUT_HANDLE); noinherit_std_handle(STD_ERROR_HANDLE); -#ifdef ERTS_SMP - erts_smp_tsd_key_create(&win32_errstr_key,"win32_errstr_key"); + erts_tsd_key_create(&win32_errstr_key,"win32_errstr_key"); InitializeCriticalSection(&htbc_lock); -#endif - erts_smp_atomic_init_nob(&pipe_creation_counter,0); + erts_atomic_init_nob(&pipe_creation_counter,0); /* * Test if we have named pipes or not. */ @@ -3299,13 +3254,11 @@ erts_sys_schedule_interrupt(int set) erts_check_io_interrupt(set); } -#ifdef ERTS_SMP void erts_sys_schedule_interrupt_timed(int set, ErtsMonotonicTime timeout_time) { erts_check_io_interrupt_timed(set, timeout_time); } -#endif /* * Called from schedule() when it runs out of runnable processes, @@ -3316,6 +3269,6 @@ void erl_sys_schedule(int runnable) { erts_check_io(!runnable); - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c index 8fcee1cbb6..5792816267 100644 --- a/erts/emulator/sys/win32/sys_env.c +++ b/erts/emulator/sys/win32/sys_env.c @@ -32,12 +32,12 @@ static WCHAR **env_to_arg(WCHAR *env); static WCHAR **find_arg(WCHAR **arg, WCHAR *str);
static int compare(const void *a, const void *b);
-static erts_smp_rwmtx_t environ_rwmtx;
+static erts_rwmtx_t environ_rwmtx; void
erts_sys_env_init(void)
{
- erts_smp_rwmtx_init(&environ_rwmtx, "environ", NIL,
+ erts_rwmtx_init(&environ_rwmtx, "environ", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
@@ -45,10 +45,10 @@ int erts_sys_putenv_raw(char *key, char *value)
{
int res;
- erts_smp_rwmtx_rwlock(&environ_rwmtx);
+ erts_rwmtx_rwlock(&environ_rwmtx); res = (SetEnvironmentVariable((LPCTSTR) key,
(LPCTSTR) value) ? 0 : 1);
- erts_smp_rwmtx_rwunlock(&environ_rwmtx);
+ erts_rwmtx_rwunlock(&environ_rwmtx); return res;
}
@@ -58,10 +58,10 @@ erts_sys_putenv(char *key, char *value) int res;
WCHAR *wkey = (WCHAR *) key;
WCHAR *wvalue = (WCHAR *) value;
- erts_smp_rwmtx_rwlock(&environ_rwmtx);
+ erts_rwmtx_rwlock(&environ_rwmtx); res = (SetEnvironmentVariableW(wkey,
wvalue) ? 0 : 1);
- erts_smp_rwmtx_rwunlock(&environ_rwmtx);
+ erts_rwmtx_rwunlock(&environ_rwmtx); return res;
}
@@ -76,12 +76,12 @@ erts_sys_getenv(char *key, char *value, size_t *size) DWORD wsize = *size / (sizeof(WCHAR) / sizeof(char));
SetLastError(0);
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); new_size = GetEnvironmentVariableW(wkey,
wvalue,
(DWORD) wsize);
res = !new_size && GetLastError() == ERROR_ENVVAR_NOT_FOUND ? -1 : 0;
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); if (res < 0)
return res;
res = new_size > wsize ? 1 : 0;
@@ -111,22 +111,22 @@ int erts_sys_getenv_raw(char *key, char *value, size_t *size)
{
int res;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); res = erts_sys_getenv__(key, value, size);
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); return res;
}
void init_getenv_state(GETENV_STATE *state)
{
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); state->environment_strings = GetEnvironmentStringsW();
state->next_string = state->environment_strings;
}
char *getenv_string(GETENV_STATE *state)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&environ_rwmtx)); if (state->next_string[0] == L'\0') {
return NULL;
} else {
@@ -140,7 +140,7 @@ void fini_getenv_state(GETENV_STATE *state) {
FreeEnvironmentStringsW(state->environment_strings);
state->environment_strings = state->next_string = NULL;
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); }
int erts_sys_unsetenv(char *key)
@@ -149,7 +149,7 @@ int erts_sys_unsetenv(char *key) WCHAR *wkey = (WCHAR *) key;
SetLastError(0);
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); GetEnvironmentVariableW(wkey,
NULL,
0);
@@ -157,7 +157,7 @@ int erts_sys_unsetenv(char *key) res = (SetEnvironmentVariableW(wkey,
NULL) ? 0 : 1);
}
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); return res;
}
@@ -171,12 +171,12 @@ win_build_environment(char* new_env) tmp_new = (WCHAR *) new_env;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); tmp = GetEnvironmentStringsW();
merged = merge_environment(tmp, tmp_new);
FreeEnvironmentStringsW(tmp);
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); return (char *) merged;
}
}
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c index df838960eb..02aa50500f 100644 --- a/erts/emulator/sys/win32/sys_interrupt.c +++ b/erts/emulator/sys/win32/sys_interrupt.c @@ -35,17 +35,11 @@ # define WIN_SYS_INLINE __forceinline #endif -#ifdef ERTS_SMP -erts_smp_atomic32_t erts_break_requested; +erts_atomic32_t erts_break_requested; #define ERTS_SET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) #define ERTS_UNSET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) -#else -volatile int erts_break_requested = 0; -#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1) -#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0) -#endif + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) extern int nohup; HANDLE erts_sys_break_event = NULL; @@ -57,14 +51,14 @@ void erts_do_break_handling(void) * therefore, make sure that all threads but this one are blocked before * proceeding! */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); /* call the break handling function, reset the flag */ do_break(); ResetEvent(erts_sys_break_event); ERTS_UNSET_BREAK_REQUESTED; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); } diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c index 88131aaa6a..25c2ad385c 100644 --- a/erts/emulator/sys/win32/sys_time.c +++ b/erts/emulator/sys/win32/sys_time.c @@ -95,7 +95,7 @@ struct sys_time_internal_state_read_mostly__ { }; struct sys_time_internal_state_write_freq__ { - erts_smp_mtx_t mtime_mtx; + erts_mtx_t mtime_mtx; ULONGLONG wrap; ULONGLONG last_tick_count; }; @@ -187,8 +187,6 @@ os_monotonic_time_gtc32(void) { ErtsMonotonicTime mtime; Uint32 ticks = (Uint32) GetTickCount(); - ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(&internal_state.wr.m.os_mtime_xtnd, - ticks); mtime = ERTS_EXTEND_OS_MONOTONIC_TIME(&internal_state.wr.m.os_mtime_xtnd, ticks); mtime <<= ERTS_GET_TICK_COUNT_TIME_UNIT_SHIFT; @@ -205,8 +203,6 @@ os_times_gtc32(ErtsMonotonicTime *mtimep, ErtsSystemTime *stimep) ticks = (Uint32) GetTickCount(); GetSystemTime(&st); - ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(&internal_state.wr.m.os_mtime_xtnd, - ticks); mtime = ERTS_EXTEND_OS_MONOTONIC_TIME(&internal_state.wr.m.os_mtime_xtnd, ticks); mtime <<= ERTS_GET_TICK_COUNT_TIME_UNIT_SHIFT; @@ -265,8 +261,6 @@ sys_hrtime_gtc32(void) { ErtsSysHrTime time; Uint32 ticks = (Uint32) GetTickCount(); - ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(&internal_state.wr.m.os_mtime_xtnd, - tick_count); time = (ErtsSysHrTime) ERTS_EXTEND_OS_MONOTONIC_TIME(&internal_state.wr.m.os_mtime_xtnd, ticks); time *= (ErtsSysHrTime) (1000 * 1000); @@ -300,7 +294,7 @@ sys_init_time(ErtsSysInitTimeResult *init_resp) module = GetModuleHandle(kernel_dll_name); if (!module) { get_tick_count: - erts_smp_mtx_init(&internal_state.w.f.mtime_mtx, "os_monotonic_time", NIL, + erts_mtx_init(&internal_state.w.f.mtime_mtx, "os_monotonic_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); internal_state.w.f.wrap = 0; internal_state.w.f.last_tick_count = 0; diff --git a/erts/emulator/test/alloc_SUITE.erl b/erts/emulator/test/alloc_SUITE.erl index 3a721095e2..f0871ead7d 100644 --- a/erts/emulator/test/alloc_SUITE.erl +++ b/erts/emulator/test/alloc_SUITE.erl @@ -65,12 +65,7 @@ mseg_clear_cache(Cfg) -> drv_case(Cfg). cpool(Cfg) -> drv_case(Cfg). migration(Cfg) -> - case erlang:system_info(smp_support) of - true -> - drv_case(Cfg, concurrent, "+MZe true"); - false -> - {skipped, "No smp"} - end. + drv_case(Cfg, concurrent, "+MZe true"). erts_mmap(Config) when is_list(Config) -> case {os:type(), mmsc_flags()} of diff --git a/erts/emulator/test/big_SUITE.erl b/erts/emulator/test/big_SUITE.erl index c308760211..5939d024ae 100644 --- a/erts/emulator/test/big_SUITE.erl +++ b/erts/emulator/test/big_SUITE.erl @@ -339,6 +339,13 @@ system_limit(Config) when is_list(Config) -> {'EXIT',{system_limit,_}} = (catch apply(erlang, id('bsl'), [Maxbig,2])), {'EXIT',{system_limit,_}} = (catch id(1) bsl (1 bsl 45)), {'EXIT',{system_limit,_}} = (catch id(1) bsl (1 bsl 69)), + + %% There should be no system_limit exception when shifting a zero. + 0 = id(0) bsl (1 bsl 128), + 0 = id(0) bsr -(1 bsl 128), + Erlang = id(erlang), + 0 = Erlang:'bsl'(id(0), 1 bsl 128), + 0 = Erlang:'bsr'(id(0), -(1 bsl 128)), ok. maxbig() -> diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl index b4ec99f902..28be4bfe37 100644 --- a/erts/emulator/test/distribution_SUITE.erl +++ b/erts/emulator/test/distribution_SUITE.erl @@ -418,18 +418,20 @@ make_busy(Node, Time) when is_integer(Time) -> Own = 500, freeze_node(Node, Time+Own), Data = make_busy_data(), + DCtrl = dctrl(Node), %% first make port busy Pid = spawn_link(fun () -> forever(fun () -> - dport_reg_send(Node, - '__noone__', - Data) + dctrl_dop_reg_send(Node, + '__noone__', + Data) end) end), receive after Own -> ok end, until(fun () -> - case process_info(Pid, status) of - {status, suspended} -> true; + case {DCtrl, process_info(Pid, status)} of + {DPrt, {status, suspended}} when is_port(DPrt) -> true; + {DPid, {status, waiting}} when is_pid(DPid) -> true; _ -> false end end), @@ -1703,37 +1705,38 @@ bad_dist_ext_check_msgs([M|Ms]) -> bad_dist_ext_check_msgs(Ms) end. +ensure_dctrl(Node) -> + case dctrl(Node) of + undefined -> + pong = net_adm:ping(Node), + dctrl(Node); + DCtrl -> + DCtrl + end. -dport_reg_send(Node, Name, Msg) -> - DPrt = case dport(Node) of - undefined -> - pong = net_adm:ping(Node), - dport(Node); - Prt -> - Prt - end, - port_command(DPrt, [dmsg_hdr(), - dmsg_ext({?DOP_REG_SEND, - self(), - ?COOKIE, - Name}), - dmsg_ext(Msg)]). - - -dport_send(To, Msg) -> +dctrl_send(DPrt, Data) when is_port(DPrt) -> + port_command(DPrt, Data); +dctrl_send(DPid, Data) when is_pid(DPid) -> + Ref = make_ref(), + DPid ! {send, self(), Ref, Data}, + receive {Ref, Res} -> Res end. + +dctrl_dop_reg_send(Node, Name, Msg) -> + dctrl_send(ensure_dctrl(Node), + [dmsg_hdr(), + dmsg_ext({?DOP_REG_SEND, + self(), + ?COOKIE, + Name}), + dmsg_ext(Msg)]). + +dctrl_dop_send(To, Msg) -> Node = node(To), - DPrt = case dport(Node) of - undefined -> - pong = net_adm:ping(Node), - dport(Node); - Prt -> - Prt - end, - port_command(DPrt, [dmsg_hdr(), - dmsg_ext({?DOP_SEND, - ?COOKIE, - To}), - dmsg_ext(Msg)]). + dctrl_send(ensure_dctrl(Node), + [dmsg_hdr(), + dmsg_ext({?DOP_SEND, ?COOKIE, To}), + dmsg_ext(Msg)]). + send_bad_structure(Offender,Victim,Bad,WhereToPutSelf) -> send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,[]). send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,PayLoad) -> @@ -1743,7 +1746,7 @@ send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,PayLoad) -> fun () -> Node = node(Victim), pong = net_adm:ping(Node), - DPrt = dport(Node), + DCtrl = dctrl(Node), Bad1 = case WhereToPutSelf of 0 -> Bad; @@ -1756,7 +1759,7 @@ send_bad_structure(Offender,Victim,Bad,WhereToPutSelf,PayLoad) -> [] -> []; _Other -> [dmsg_ext(PayLoad)] end, - port_command(DPrt, DData), + dctrl_send(DCtrl, DData), Parent ! {DData,Done} end), receive @@ -1784,11 +1787,11 @@ send_bad_msgs(BadNode, To, Repeat) when is_atom(BadNode), fun () -> Node = node(To), pong = net_adm:ping(Node), - DPrt = dport(Node), + DCtrl = dctrl(Node), DData = [dmsg_hdr(), dmsg_ext({?DOP_SEND, ?COOKIE, To}), dmsg_bad_atom_cache_ref()], - repeat(fun () -> port_command(DPrt, DData) end, Repeat), + repeat(fun () -> dctrl_send(DCtrl, DData) end, Repeat), Parent ! Done end), receive Done -> ok end. @@ -1810,11 +1813,12 @@ send_bad_ctl(BadNode, ToNode) when is_atom(BadNode), is_atom(ToNode) -> replace}), CtlBeginSize = size(Ctl) - size(Replace), <<CtlBegin:CtlBeginSize/binary, Replace/binary>> = Ctl, - port_command(dport(ToNode), - [dmsg_fake_hdr2(), - CtlBegin, - dmsg_bad_atom_cache_ref(), - dmsg_ext({a, message})]), + DCtrl = dctrl(ToNode), + Data = [dmsg_fake_hdr2(), + CtlBegin, + dmsg_bad_atom_cache_ref(), + dmsg_ext({a, message})], + dctrl_send(DCtrl, Data), Parent ! Done end), receive Done -> ok end. @@ -1827,17 +1831,17 @@ send_bad_dhdr(BadNode, ToNode) when is_atom(BadNode), is_atom(ToNode) -> spawn_link(BadNode, fun () -> pong = net_adm:ping(ToNode), - port_command(dport(ToNode), dmsg_bad_hdr()), + dctrl_send(dctrl(ToNode), dmsg_bad_hdr()), Parent ! Done end), receive Done -> ok end. -dport(Node) when is_atom(Node) -> +dctrl(Node) when is_atom(Node) -> case catch erts_debug:get_internal_state(available_internal_state) of true -> true; _ -> erts_debug:set_internal_state(available_internal_state, true) end, - erts_debug:get_internal_state({dist_port, Node}). + erts_debug:get_internal_state({dist_ctrl, Node}). dmsg_hdr() -> [131, % Version Magic @@ -1979,7 +1983,7 @@ freeze_node(Node, MS) -> fun () -> erts_debug:set_internal_state(available_internal_state, true), - dport_send(Freezer, DoingIt), + dctrl_dop_send(Freezer, DoingIt), receive after Own -> ok end, erts_debug:set_internal_state(block, MS+Own) end), diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index 6810729285..5fca191679 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -1144,8 +1144,6 @@ check_si_res(["thread", "false"]) -> false = erlang:system_info(threads); check_si_res(["smp", "true"]) -> true = erlang:system_info(smp_support); -check_si_res(["smp", "false"]) -> - false = erlang:system_info(smp_support); %% Data added in second version of driver_system_info() (driver version 1.1) check_si_res(["async_thrs", Value]) -> @@ -1944,44 +1942,39 @@ thr_msg_blast_receiver_proc(Port, Max, Parent, Done) -> end. thr_msg_blast(Config) when is_list(Config) -> - case erlang:system_info(smp_support) of - false -> - {skipped, "Non-SMP emulator; nothing to test..."}; - true -> - Path = proplists:get_value(data_dir, Config), - erl_ddll:start(), - ok = load_driver(Path, thr_msg_blast_drv), - MemBefore = driver_alloc_size(), - Start = os:timestamp(), - Port = open_port({spawn, thr_msg_blast_drv}, []), - true = is_port(Port), - Done = make_ref(), - Me = self(), - spawn(fun () -> - thr_msg_blast_receiver_proc(Port, 1, Me, Done) - end), - receive - Done -> ok - end, - ok = thr_msg_blast_receiver(Port, 0, 32*10000), - port_close(Port), - End = os:timestamp(), - receive - Garbage -> - ct:fail({received_garbage, Port, Garbage}) - after 2000 -> - ok - end, - MemAfter = driver_alloc_size(), - io:format("MemBefore=~p, MemAfter=~p~n", - [MemBefore, MemAfter]), - ThrMsgBlastTime = timer:now_diff(End,Start)/1000000, - io:format("ThrMsgBlastTime=~p~n", [ThrMsgBlastTime]), - MemBefore = MemAfter, - Res = {thr_msg_blast_time, ThrMsgBlastTime}, - erlang:display(Res), - Res - end. + Path = proplists:get_value(data_dir, Config), + erl_ddll:start(), + ok = load_driver(Path, thr_msg_blast_drv), + MemBefore = driver_alloc_size(), + Start = os:timestamp(), + Port = open_port({spawn, thr_msg_blast_drv}, []), + true = is_port(Port), + Done = make_ref(), + Me = self(), + spawn(fun () -> + thr_msg_blast_receiver_proc(Port, 1, Me, Done) + end), + receive + Done -> ok + end, + ok = thr_msg_blast_receiver(Port, 0, 32*10000), + port_close(Port), + End = os:timestamp(), + receive + Garbage -> + ct:fail({received_garbage, Port, Garbage}) + after 2000 -> + ok + end, + MemAfter = driver_alloc_size(), + io:format("MemBefore=~p, MemAfter=~p~n", + [MemBefore, MemAfter]), + ThrMsgBlastTime = timer:now_diff(End,Start)/1000000, + io:format("ThrMsgBlastTime=~p~n", [ThrMsgBlastTime]), + MemBefore = MemAfter, + Res = {thr_msg_blast_time, ThrMsgBlastTime}, + erlang:display(Res), + Res. -define(IN_RANGE(LoW_, VaLuE_, HiGh_), case in_range(LoW_, VaLuE_, HiGh_) of @@ -2488,14 +2481,6 @@ wait_deallocations() -> end. driver_alloc_size() -> - case erlang:system_info(smp_support) of - true -> - ok; - false -> - %% driver_alloc also used by elements in lock-free queues, - %% give these some time to be deallocated... - receive after 100 -> ok end - end, wait_deallocations(), case erlang:system_info({allocator_sizes, driver_alloc}) of false -> diff --git a/erts/emulator/test/erl_link_SUITE.erl b/erts/emulator/test/erl_link_SUITE.erl index 5622cce980..d8c5b663e3 100644 --- a/erts/emulator/test/erl_link_SUITE.erl +++ b/erts/emulator/test/erl_link_SUITE.erl @@ -533,7 +533,7 @@ freeze_node(Node, MS) -> fun () -> erts_debug:set_internal_state(available_internal_state, true), - dport_send(Freezer, DoingIt), + dctrl_dop_send(Freezer, DoingIt), receive after Own -> ok end, erts_debug:set_internal_state(block, MS+Own) end), @@ -544,20 +544,22 @@ make_busy(Node, Time) when is_integer(Time) -> Own = 500, freeze_node(Node, Time+Own), Data = busy_data(), + DCtrl = dctrl(Node), %% first make port busy Pid = spawn_link(fun () -> forever(fun () -> - dport_reg_send(Node, - '__noone__', - Data) + dctrl_dop_reg_send(Node, + '__noone__', + Data) end) end), receive after Own -> ok end, wait_until(fun () -> - case process_info(Pid, status) of - {status, suspended} -> true; - _ -> false - end + case {DCtrl, process_info(Pid, status)} of + {DPrt, {status, suspended}} when is_port(DPrt) -> true; + {DPid, {status, waiting}} when is_pid(DPid) -> true; + _ -> false + end end), %% then dist entry make_busy(Node, [nosuspend], Data), @@ -1048,42 +1050,45 @@ stop_node(Node) -> -define(DOP_DEMONITOR_P, 20). -define(DOP_MONITOR_P_EXIT, 21). -dport_send(To, Msg) -> - Node = node(To), - DPrt = case dport(Node) of - undefined -> - pong = net_adm:ping(Node), - dport(Node); - Prt -> - Prt - end, - port_command(DPrt, [dmsg_hdr(), - dmsg_ext({?DOP_SEND, - ?COOKIE, - To}), - dmsg_ext(Msg)]). - -dport_reg_send(Node, Name, Msg) -> - DPrt = case dport(Node) of - undefined -> - pong = net_adm:ping(Node), - dport(Node); - Prt -> - Prt - end, - port_command(DPrt, [dmsg_hdr(), - dmsg_ext({?DOP_REG_SEND, - self(), - ?COOKIE, - Name}), - dmsg_ext(Msg)]). - -dport(Node) when is_atom(Node) -> +ensure_dctrl(Node) -> + case dctrl(Node) of + undefined -> + pong = net_adm:ping(Node), + dctrl(Node); + DCtrl -> + DCtrl + end. + +dctrl_send(DPrt, Data) when is_port(DPrt) -> + port_command(DPrt, Data); +dctrl_send(DPid, Data) when is_pid(DPid) -> + Ref = make_ref(), + DPid ! {send, self(), Ref, Data}, + receive {Ref, Res} -> Res end. + +dctrl_dop_send(To, Msg) -> + dctrl_send(ensure_dctrl(node(To)), + [dmsg_hdr(), + dmsg_ext({?DOP_SEND, + ?COOKIE, + To}), + dmsg_ext(Msg)]). + +dctrl_dop_reg_send(Node, Name, Msg) -> + dctrl_send(ensure_dctrl(Node), + [dmsg_hdr(), + dmsg_ext({?DOP_REG_SEND, + self(), + ?COOKIE, + Name}), + dmsg_ext(Msg)]). + +dctrl(Node) when is_atom(Node) -> case catch erts_debug:get_internal_state(available_internal_state) of true -> true; _ -> erts_debug:set_internal_state(available_internal_state, true) end, - erts_debug:get_internal_state({dist_port, Node}). + erts_debug:get_internal_state({dist_ctrl, Node}). dmsg_hdr() -> [131, % Version Magic diff --git a/erts/emulator/test/estone_SUITE.erl b/erts/emulator/test/estone_SUITE.erl index 8b336b366d..c9c1867049 100644 --- a/erts/emulator/test/estone_SUITE.erl +++ b/erts/emulator/test/estone_SUITE.erl @@ -20,7 +20,7 @@ -module(estone_SUITE). %% Test functions -export([all/0, suite/0, groups/0, - estone/1, estone_bench/1]). + estone/1, estone_bench/1, pgo/0]). %% Internal exports for EStone tests -export([lists/1, @@ -44,9 +44,9 @@ links/1,lproc/1, run_micro/3,p1/1,ppp/3,macro/2,micros/0]). - --include_lib("common_test/include/ct.hrl"). +-ifndef(PGO). -include_lib("common_test/include/ct_event.hrl"). +-endif. %% EStone defines -define(TOTAL, (3000 * 1000 * 100)). %% 300 secs @@ -85,13 +85,28 @@ estone(Config) when is_list(Config) -> estone_bench(Config) -> DataDir = proplists:get_value(data_dir,Config), L = ?MODULE:macro(?MODULE:micros(),DataDir), - [ct_event:notify( - #event{name = benchmark_data, - data = [{name,proplists:get_value(title,Mark)}, - {value,proplists:get_value(estones,Mark)}]}) - || Mark <- L], + {Total, Stones} = sum_micros(L, 0, 0), + notify([[{title,"ESTONES"}, {estones, Stones}] | L]), L. +-ifndef(PGO). +notify(Marks) -> + [ct_event:notify( + #event{name = benchmark_data, + data = [{name,proplists:get_value(title, Mark)}, + {value,proplists:get_value(estones, Mark)}]}) + || Mark <- Marks]. +-else. +notify(_) -> + ok. +-endif. + +%% The benchmarks to run in order to guide PGO (profile guided optimisation) +pgo() -> + %% We run all benchmarks except the port_io as we don't want to + %% have to build a custom port. + Micros = ?MODULE:micros() -- [micro(port_io)], + ?MODULE:macro(Micros,[]). %% %% Calculate CPU speed @@ -364,7 +379,7 @@ monotonic_time() -> try erlang:monotonic_time() catch error:undef -> erlang:now() end. subtr(Before, After) when is_integer(Before), is_integer(After) -> - erlang:convert_time_unit(After-Before, native, microsecond); + erlang:convert_time_unit(After-Before, native, 1000000); subtr({_,_,_}=Before, {_,_,_}=After) -> timer:now_diff(After, Before). diff --git a/erts/emulator/test/exception_SUITE.erl b/erts/emulator/test/exception_SUITE.erl index aaca522da6..e473a10be7 100644 --- a/erts/emulator/test/exception_SUITE.erl +++ b/erts/emulator/test/exception_SUITE.erl @@ -21,7 +21,7 @@ -module(exception_SUITE). -export([all/0, suite/0, - badmatch/1, pending_errors/1, nil_arith/1, + badmatch/1, pending_errors/1, nil_arith/1, top_of_stacktrace/1, stacktrace/1, nested_stacktrace/1, raise/1, gunilla/1, per/1, exception_with_heap_frag/1, line_numbers/1]). @@ -36,8 +36,8 @@ suite() -> {timetrap, {minutes, 1}}]. all() -> - [badmatch, pending_errors, nil_arith, stacktrace, - nested_stacktrace, raise, gunilla, per, + [badmatch, pending_errors, nil_arith, top_of_stacktrace, + stacktrace, nested_stacktrace, raise, gunilla, per, exception_with_heap_frag, line_numbers]. -define(try_match(E), @@ -241,7 +241,54 @@ ba_bnot(A) -> io:format("bnot ~p", [A]), {'EXIT', {badarith, _}} = (catch bnot A). +%% Test that BIFs are added to the top of the stacktrace. + +top_of_stacktrace(Conf) when is_list(Conf) -> + %% Arithmetic operators + {'EXIT', {badarith, [{erlang, '+', [1, ok], _} | _]}} = (catch my_add(1, ok)), + {'EXIT', {badarith, [{erlang, '-', [1, ok], _} | _]}} = (catch my_minus(1, ok)), + {'EXIT', {badarith, [{erlang, '*', [1, ok], _} | _]}} = (catch my_times(1, ok)), + {'EXIT', {badarith, [{erlang, 'div', [1, ok], _} | _]}} = (catch my_div(1, ok)), + {'EXIT', {badarith, [{erlang, 'div', [1, 0], _} | _]}} = (catch my_div(1, 0)), + {'EXIT', {badarith, [{erlang, 'rem', [1, ok], _} | _]}} = (catch my_rem(1, ok)), + {'EXIT', {badarith, [{erlang, 'rem', [1, 0], _} | _]}} = (catch my_rem(1, 0)), + + %% Bit operators + {'EXIT', {badarith, [{erlang, 'band', [1, ok], _} | _]}} = (catch my_band(1, ok)), + {'EXIT', {badarith, [{erlang, 'bor', [1, ok], _} | _]}} = (catch my_bor(1, ok)), + {'EXIT', {badarith, [{erlang, 'bsl', [1, ok], _} | _]}} = (catch my_bsl(1, ok)), + {'EXIT', {badarith, [{erlang, 'bsr', [1, ok], _} | _]}} = (catch my_bsr(1, ok)), + {'EXIT', {badarith, [{erlang, 'bxor', [1, ok], _} | _]}} = (catch my_bxor(1, ok)), + {'EXIT', {badarith, [{erlang, 'bnot', [ok], _} | _]}} = (catch my_bnot(ok)), + + %% Tuples + {'EXIT', {badarg, [{erlang, element, [1, ok], _} | _]}} = (catch my_element(1, ok)), + {'EXIT', {badarg, [{erlang, element, [ok, {}], _} | _]}} = (catch my_element(ok, {})), + {'EXIT', {badarg, [{erlang, element, [1, {}], _} | _]}} = (catch my_element(1, {})), + {'EXIT', {badarg, [{erlang, element, [1, {}], _} | _]}} = (catch element(1, erlang:make_tuple(0, ok))), + + %% System limits + Maxbig = maxbig(), + MinusMaxbig = -Maxbig, + {'EXIT', {system_limit, [{erlang, '+', [Maxbig, 1], _} | _]}} = (catch my_add(Maxbig, 1)), + {'EXIT', {system_limit, [{erlang, '+', [Maxbig, 1], _} | _]}} = (catch my_add(maxbig_gc(), 1)), + {'EXIT', {system_limit, [{erlang, '-', [MinusMaxbig, 1], _} | _]}} = (catch my_minus(-Maxbig, 1)), + {'EXIT', {system_limit, [{erlang, '-', [MinusMaxbig, 1], _} | _]}} = (catch my_minus(-maxbig_gc(), 1)), + {'EXIT', {system_limit, [{erlang, '*', [Maxbig, 2], _} | _]}} = (catch my_times(Maxbig, 2)), + {'EXIT', {system_limit, [{erlang, '*', [Maxbig, 2], _} | _]}} = (catch my_times(maxbig_gc(), 2)), + {'EXIT', {system_limit, [{erlang, 'bnot', [Maxbig], _} | _]}} = (catch my_bnot(Maxbig)), + {'EXIT', {system_limit, [{erlang, 'bnot', [Maxbig], _} | _]}} = (catch my_bnot(maxbig_gc())), + ok. + +maxbig() -> + %% We assume that the maximum arity is (1 bsl 19) - 1. + Ws = erlang:system_info(wordsize), + (((1 bsl ((16777184 * (Ws div 4))-1)) - 1) bsl 1) + 1. +maxbig_gc() -> + Maxbig = maxbig(), + erlang:garbage_collect(), + Maxbig. stacktrace(Conf) when is_list(Conf) -> Tag = make_ref(), @@ -253,9 +300,9 @@ stacktrace(Conf) when is_list(Conf) -> St1 = erase(stacktrace1), St1 = erase(stacktrace2), St1 = erlang:get_stacktrace(), - {caught2,{error,badarith},[{?MODULE,my_add,2,_}|_]=St2} = + {caught2,{error,badarith},[{erlang,'+',[0,a],_},{?MODULE,my_add,2,_}|_]=St2} = stacktrace_1({'div',{1,0}}, error, {'add',{0,a}}), - [{?MODULE,my_div,2,_}|_] = erase(stacktrace1), + [{erlang,'div',[1,0],_},{?MODULE,my_div,2,_}|_] = erase(stacktrace1), St2 = erase(stacktrace2), St2 = erlang:get_stacktrace(), {caught2,{error,{try_clause,V}},[{?MODULE,stacktrace_1,3,_}|_]=St3} = @@ -308,13 +355,13 @@ nested_stacktrace(Conf) when is_list(Conf) -> nested_stacktrace_1({{value,{V,x1}},void,{V,x1}}, {void,void,void}), {caught1, - [{?MODULE,my_add,2,_}|_], + [{erlang,'+',[V,x1],_},{?MODULE,my_add,2,_}|_], value2, - [{?MODULE,my_add,2,_}|_]} = + [{erlang,'+',[V,x1],_},{?MODULE,my_add,2,_}|_]} = nested_stacktrace_1({{'add',{V,x1}},error,badarith}, {{value,{V,x2}},void,{V,x2}}), {caught1, - [{?MODULE,my_add,2,_}|_], + [{erlang,'+',[V,x1],_},{?MODULE,my_add,2,_}|_], {caught2,[{erlang,abs,[V],_}|_]}, [{erlang,abs,[V],_}|_]} = nested_stacktrace_1({{'add',{V,x1}},error,badarith}, @@ -355,7 +402,7 @@ raise(Conf) when is_list(Conf) -> end, A = erlang:get_stacktrace(), A = get(raise), - [{?MODULE,my_div,2,_}|_] = A, + [{erlang,'div',[1, 0], _},{?MODULE,my_div,2,_}|_] = A, %% N = 8, % Must be even N = erlang:system_flag(backtrace_depth, N), @@ -404,11 +451,20 @@ foo({raise,{Class,Reason,Stacktrace}}) -> erlang:raise(Class, Reason, Stacktrace). %%foo(function_clause) -> % must not be defined! -my_div(A, B) -> - A div B. +my_add(A, B) -> A + B. +my_minus(A, B) -> A - B. +my_times(A, B) -> A * B. +my_div(A, B) -> A div B. +my_rem(A, B) -> A rem B. + +my_band(A, B) -> A band B. +my_bor(A, B) -> A bor B. +my_bsl(A, B) -> A bsl B. +my_bsr(A, B) -> A bsr B. +my_bxor(A, B) -> A bxor B. +my_bnot(A) -> bnot A. -my_add(A, B) -> - A + B. +my_element(A, B) -> element(A, B). my_abs(X) -> abs(X). diff --git a/erts/emulator/test/map_SUITE_data/badmap_17.beam b/erts/emulator/test/map_SUITE_data/badmap_17.beam Binary files differindex 277fc34b94..6f79bb8c2c 100644 --- a/erts/emulator/test/map_SUITE_data/badmap_17.beam +++ b/erts/emulator/test/map_SUITE_data/badmap_17.beam diff --git a/erts/emulator/test/map_SUITE_data/badmap_17.erl b/erts/emulator/test/map_SUITE_data/badmap_17.erl index 0ec65e0e33..887fc2e5e3 100644 --- a/erts/emulator/test/map_SUITE_data/badmap_17.erl +++ b/erts/emulator/test/map_SUITE_data/badmap_17.erl @@ -1,7 +1,7 @@ -module(badmap_17). -export([update/1]). -%% Compile this source file with OTP 17. +%% Compile this source file with OTP 17.0. update(Map) -> try @@ -17,10 +17,42 @@ update(Map) -> catch error:{badmap,Map} -> ok - end. + end, + try + update_3(Map), + error(update_did_not_fail) + catch + error:{badmap,Map} -> + ok + end, + ok = update_4(Map), + ok = update_5(Map), + ok. update_1(M) -> M#{a=>42}. update_2(M) -> M#{a:=42}. + +update_3(M) -> + id(M), + M#{a=>42}. + +update_4(M) when M#{a=>b} =:= M -> + did_not_fail; +update_4(_) -> + ok. + +update_5(M) -> + id(M), + case id(true) of + true when M#{a=>b} =:= M -> + did_not_fail; + true -> + ok + end. + +id(I) -> + I. + diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl index 4811244b98..223bd7d586 100644 --- a/erts/emulator/test/nif_SUITE.erl +++ b/erts/emulator/test/nif_SUITE.erl @@ -1723,14 +1723,9 @@ send2(Config) when is_list(Config) -> %% Send msg from user thread send_threaded(Config) when is_list(Config) -> - case erlang:system_info(smp_support) of - true -> - send2_do1(fun(ME,To) -> send_blob_thread_dbg(ME,To,join) end), - send2_do1(fun(ME,To) -> send_blob_thread_and_join(ME,To) end), - ok; - false -> - {skipped,"No threaded send on non-SMP"} - end. + send2_do1(fun(ME,To) -> send_blob_thread_dbg(ME,To,join) end), + send2_do1(fun(ME,To) -> send_blob_thread_and_join(ME,To) end), + ok. send2_do1(SendBlobF) -> diff --git a/erts/emulator/test/node_container_SUITE.erl b/erts/emulator/test/node_container_SUITE.erl index 8e9e3cb05a..be90f929df 100644 --- a/erts/emulator/test/node_container_SUITE.erl +++ b/erts/emulator/test/node_container_SUITE.erl @@ -405,6 +405,7 @@ node_table_gc(Config) when is_list(Config) -> PreKnown = nodes(known), io:format("PreKnown = ~p~n", [PreKnown]), make_node_garbage(0, 200000, 1000, []), + receive after 1000 -> ok end, %% Wait for thread progress... PostKnown = nodes(known), PostAreas = erlang:system_info(allocated_areas), io:format("PostKnown = ~p~n", [PostKnown]), diff --git a/erts/emulator/test/port_trace_SUITE.erl b/erts/emulator/test/port_trace_SUITE.erl index c78dc754a9..a1986397a8 100644 --- a/erts/emulator/test/port_trace_SUITE.erl +++ b/erts/emulator/test/port_trace_SUITE.erl @@ -78,13 +78,6 @@ end_per_group(_GroupName, Config) -> Config. -init_per_testcase(driver_remote_send_term, Config) -> - case erlang:system_info(smp_support) of - false -> - {skip,"Only supported on smp systems"}; - true -> - init_per_testcase(driver_remote_send_term_smp, Config) - end; init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> erlang:trace(all, false, [all]), os:unsetenv("OUTPUTV"), diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl index af33de237c..12e26671c2 100644 --- a/erts/emulator/test/scheduler_SUITE.erl +++ b/erts/emulator/test/scheduler_SUITE.erl @@ -1083,7 +1083,6 @@ sbt_test(Config, CpuTCmd, ClBt, Bt, LP) -> ok. scheduler_threads(Config) when is_list(Config) -> - SmpSupport = erlang:system_info(smp_support), {Sched, SchedOnln, _} = get_sstate(Config, ""), %% Configure half the number of both the scheduler threads and %% the scheduler threads online. @@ -1095,10 +1094,7 @@ scheduler_threads(Config) when is_list(Config) -> %% setting using +SP to 50% scheduler threads and 25% scheduler %% threads online. The result should be 2x scheduler threads and %% 1x scheduler threads online. - TwiceSched = case SmpSupport of - false -> 1; - true -> Sched*2 - end, + TwiceSched = Sched*2, FourSched = integer_to_list(Sched*4), FourSchedOnln = integer_to_list(SchedOnln*4), CombinedCmd1 = "+S "++FourSched++":"++FourSchedOnln++" +SP50:25", @@ -1121,8 +1117,8 @@ scheduler_threads(Config) when is_list(Config) -> ResetCmd = "+S "++FourSched++":"++FourSchedOnln++" +S 0:0", {LProc, LProcAvail, _} = get_sstate(Config, ResetCmd), %% Test negative +S settings, but only for SMP-enabled emulators - case {SmpSupport, LProc > 1, LProcAvail > 1} of - {true, true, true} -> + case {LProc > 1, LProcAvail > 1} of + {true, true} -> SchedMinus1 = LProc-1, SchedOnlnMinus1 = LProcAvail-1, {SchedMinus1, SchedOnlnMinus1, _} = get_sstate(Config, "+S -1"), @@ -1157,9 +1153,6 @@ dirty_scheduler_threads_test(Config) -> ok. dirty_schedulers_online_test() -> - dirty_schedulers_online_test(erlang:system_info(smp_support)). -dirty_schedulers_online_test(false) -> ok; -dirty_schedulers_online_test(true) -> dirty_schedulers_online_smp_test(erlang:system_info(schedulers_online)). dirty_schedulers_online_smp_test(SchedOnln) when SchedOnln < 4 -> ok; dirty_schedulers_online_smp_test(SchedOnln) -> diff --git a/erts/emulator/test/signal_SUITE.erl b/erts/emulator/test/signal_SUITE.erl index f1d11d1814..61a8617165 100644 --- a/erts/emulator/test/signal_SUITE.erl +++ b/erts/emulator/test/signal_SUITE.erl @@ -139,71 +139,66 @@ pending_exit_gc(Config) when is_list(Config) -> pending_exit_test(self(), gc). pending_exit_test(From, Type) -> - case catch erlang:system_info(smp_support) of - true -> - OTE = process_flag(trap_exit, true), - Ref = make_ref(), - Master = self(), - ExitBySignal = case Type of - gc -> - lists:duplicate(10000, - exit_by_signal); - _ -> - exit_by_signal - end, - Pid = spawn_link( - fun () -> - receive go -> ok end, - false = have_pending_exit(), - exit = fake_exit(From, - self(), - ExitBySignal), - true = have_pending_exit(), - Master ! {self(), Ref, Type}, - case Type of - gc -> - force_gc(), - erlang:yield(); - unlink -> - unlink(From); - trap_exit -> - process_flag(trap_exit, true); - 'receive' -> - receive _ -> ok - after 0 -> ok - end; - exit -> - ok - end, - exit(exit_by_myself) - end), - Mon = erlang:monitor(process, Pid), - Pid ! go, - Reason = receive - {'DOWN', Mon, process, Pid, R} -> - receive - {Pid, Ref, Type} -> - ok - after 0 -> - ct:fail(premature_exit) - end, - case Type of - exit -> - exit_by_myself = R; - _ -> - ExitBySignal = R - end + OTE = process_flag(trap_exit, true), + Ref = make_ref(), + Master = self(), + ExitBySignal = case Type of + gc -> + lists:duplicate(10000, + exit_by_signal); + _ -> + exit_by_signal + end, + Pid = spawn_link( + fun () -> + receive go -> ok end, + false = have_pending_exit(), + exit = fake_exit(From, + self(), + ExitBySignal), + true = have_pending_exit(), + Master ! {self(), Ref, Type}, + case Type of + gc -> + force_gc(), + erlang:yield(); + unlink -> + unlink(From); + trap_exit -> + process_flag(trap_exit, true); + 'receive' -> + receive _ -> ok + after 0 -> ok + end; + exit -> + ok + end, + exit(exit_by_myself) + end), + Mon = erlang:monitor(process, Pid), + Pid ! go, + Reason = receive + {'DOWN', Mon, process, Pid, R} -> + receive + {Pid, Ref, Type} -> + ok + after 0 -> + ct:fail(premature_exit) end, - receive - {'EXIT', Pid, R2} -> - Reason = R2 - end, - process_flag(trap_exit, OTE), - ok, - {comment, "Test only valid with current SMP emulator."}; - _ -> - {skipped, "SMP support not enabled. Test only valid with current SMP emulator."} - end. + case Type of + exit -> + exit_by_myself = R; + _ -> + ExitBySignal = R + end + end, + receive + {'EXIT', Pid, R2} -> + Reason = R2 + end, + process_flag(trap_exit, OTE), + ok, + {comment, "Test only valid with current SMP emulator."}. diff --git a/erts/emulator/test/smoke_test_SUITE.erl b/erts/emulator/test/smoke_test_SUITE.erl index 41bb07b84c..adc6f56c06 100644 --- a/erts/emulator/test/smoke_test_SUITE.erl +++ b/erts/emulator/test/smoke_test_SUITE.erl @@ -88,11 +88,9 @@ native_atomics(Config) when is_list(Config) -> {value,{NA32Key, NA32, _}} = lists:keysearch(NA32Key, 1, EthreadInfo), {value,{NA64Key, NA64, _}} = lists:keysearch(NA64Key, 1, EthreadInfo), {value,{DWNAKey, DWNA, _}} = lists:keysearch(DWNAKey, 1, EthreadInfo), - case {erlang:system_info(build_type), erlang:system_info(smp_support), NA32, NA64, DWNA} of - {opt, true, "no", "no", _} -> + case {erlang:system_info(build_type), NA32, NA64, DWNA} of + {opt, "no", "no", _} -> ct:fail(optimized_smp_runtime_without_native_atomics); - {_, false, "no", "no", _} -> - {comment, "No native atomics"}; _ -> {comment, NA32 ++ " 32-bit, " diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl index 40cc940a94..7a396d273c 100644 --- a/erts/emulator/test/statistics_SUITE.erl +++ b/erts/emulator/test/statistics_SUITE.erl @@ -638,9 +638,7 @@ msacc(Config) -> (aux, 0) -> %% aux will be zero if we do not have smp support %% or no async threads - case erlang:system_info(smp_support) orelse - erlang:system_info(thread_pool_size) > 0 - of + case erlang:system_info(thread_pool_size) > 0 of false -> ok; true -> diff --git a/erts/emulator/test/system_profile_SUITE.erl b/erts/emulator/test/system_profile_SUITE.erl index 9b678fcff9..c9be54f668 100644 --- a/erts/emulator/test/system_profile_SUITE.erl +++ b/erts/emulator/test/system_profile_SUITE.erl @@ -146,9 +146,8 @@ do_runnable_ports({TsType, TsTypeFlag}, Config) -> %% Tests system_profiling with scheduler. scheduler(Config) when is_list(Config) -> - case {erlang:system_info(smp_support), erlang:system_info(schedulers_online)} of - {false,_} -> {skipped, "No need for scheduler test when smp support is disabled."}; - {_, 1} -> {skipped, "No need for scheduler test when only one scheduler online."}; + case erlang:system_info(schedulers_online) of + 1 -> {skipped, "No need for scheduler test when only one scheduler online."}; _ -> Nodes = 10, lists:foreach(fun (TsType) -> diff --git a/erts/emulator/test/tuple_SUITE.erl b/erts/emulator/test/tuple_SUITE.erl index 79b681b4d1..baf41180e0 100644 --- a/erts/emulator/test/tuple_SUITE.erl +++ b/erts/emulator/test/tuple_SUITE.erl @@ -134,6 +134,13 @@ t_element(Config) when is_list(Config) -> {'EXIT', {badarg, _}} = (catch element(1, id(42))), {'EXIT', {badarg, _}} = (catch element(id(1.5), id({a,b}))), + %% Make sure that the loader does not reject the module when + %% huge literal index values are used. + {'EXIT', {badarg, _}} = (catch element((1 bsl 24)-1, id({a,b,c}))), + {'EXIT', {badarg, _}} = (catch element(1 bsl 24, id({a,b,c}))), + {'EXIT', {badarg, _}} = (catch element(1 bsl 32, id({a,b,c}))), + {'EXIT', {badarg, _}} = (catch element(1 bsl 64, id({a,b,c}))), + ok. get_elements([Element|Rest], Tuple, Pos) -> diff --git a/erts/emulator/utils/beam_makeops b/erts/emulator/utils/beam_makeops index 0a30553f71..a9b2c8861c 100755 --- a/erts/emulator/utils/beam_makeops +++ b/erts/emulator/utils/beam_makeops @@ -20,13 +20,16 @@ # use strict; use vars qw($BEAM_FORMAT_NUMBER); +use constant COLD => 0; +use constant WARM => 1; +use constant HOT => 2; $BEAM_FORMAT_NUMBER = undef; my $target = \&emulator_output; my $outdir = "."; # Directory for output files. my $verbose = 0; -my $hot = 1; +my $hotness = 1; my $num_file_opcodes = 0; my $wordsize = 32; my %defs; # Defines (from command line). @@ -54,11 +57,6 @@ $pack_mask[4] = ['BEAM_LOOSE_MASK', # Only for 64 bit wordsize 'BEAM_LOOSE_MASK', $WHOLE_WORD]; -# Mapping from packagable arguments to number of packed arguments per -# word. Initialized after the wordsize is known. - -my @args_per_word; - # There are two types of instructions: generic and specific. # The generic instructions are those generated by the Beam compiler. # Corresponding to each generic instruction, there is generally a @@ -83,6 +81,10 @@ my %num_specific; my %gen_to_spec; my %specific_op; +# Information about each specific operator. Key is the print name (e.g. get_list_xxy). +# Value is a hash. +my %spec_op_info; + my %gen_arity; my @gen_arity; @@ -91,17 +93,22 @@ my @op_to_name; my @obsolete; -my %macro; -my %macro_flags; +# Instructions and micro instructions implemented in C. +my %c_code; # C code block, location, arguments. +my %c_code_used; # Used or not. + +# Definitions for instructions combined from micro instructions. +my %combined_instrs; -my %hot_code; -my %cold_code; +my @generated_code; # Generated code. +my %sort_order; my @unnumbered_generic; my %unnumbered; my %is_transformed; + # # Pre-processor. # @@ -138,13 +145,15 @@ my %arg_size = ('r' => 0, # x(0) - x register zero 'n' => 0, # NIL (implicit) 'c' => 1, # tagged constant (integer, atom, nil) 's' => 1, # tagged source; any of the above + 'S' => 1, # tagged source register (x or y) 'd' => 1, # tagged destination register (r, x, y) 'f' => 1, # failure label 'j' => 1, # either 'f' or 'p' 'e' => 1, # pointer to export entry 'L' => 0, # label - 'I' => 1, # untagged integer - 't' => 1, # untagged integer -- can be packed + 't' => 1, # untagged integer (12 bits) -- can be packed + 'I' => 1, # untagged integer (32 bits) -- can be packed + 'W' => 1, # untagged integer/pointer (one word) 'b' => 1, # pointer to bif 'A' => 1, # arity value 'P' => 1, # byte offset into tuple or stack @@ -186,16 +195,16 @@ sub define_type_bit { define_type_bit('s', $type_bit{'d'} | $type_bit{'i'} | $type_bit{'a'} | $type_bit{'n'} | $type_bit{'q'}); + define_type_bit('S', $type_bit{'d'}); define_type_bit('j', $type_bit{'f'} | $type_bit{'p'}); # Aliases (for matching purposes). - define_type_bit('I', $type_bit{'u'}); define_type_bit('t', $type_bit{'u'}); + define_type_bit('I', $type_bit{'u'}); + define_type_bit('W', $type_bit{'u'}); define_type_bit('A', $type_bit{'u'}); define_type_bit('L', $type_bit{'u'}); define_type_bit('b', $type_bit{'u'}); - define_type_bit('N', $type_bit{'u'}); - define_type_bit('U', $type_bit{'u'}); define_type_bit('e', $type_bit{'u'}); define_type_bit('P', $type_bit{'u'}); define_type_bit('Q', $type_bit{'u'}); @@ -222,6 +231,12 @@ $match_engine_ops{'TOP_fail'} = 1; sanity("tag '$tag': primitive tags must be named with lowercase letters") unless $tag =~ /^[a-z]$/; } + + foreach my $tag (keys %arg_size) { + defined $type_bit{$tag} or + sanity("the tag '$tag' has a size in %arg_size, " . + "but has no defined bit pattern"); + } } # @@ -240,27 +255,56 @@ while (@ARGV && $ARGV[0] =~ /^-(.*)/) { die "$0: Bad option: -$_\n"; } +if ($wordsize == 32) { + $defs{'ARCH_32'} = 1; + $defs{'ARCH_64'} = 0; +} elsif ($wordsize == 64) { + $defs{'ARCH_32'} = 0; + $defs{'ARCH_64'} = 1; +} + # # Initialize number of arguments per packed word. # -$args_per_word[2] = 2; -$args_per_word[3] = 3; -$args_per_word[4] = 2; -$args_per_word[5] = 3; -$args_per_word[6] = 3; - if ($wordsize == 64) { $pack_mask[3] = ['BEAM_TIGHT_MASK', 'BEAM_TIGHT_MASK', $WHOLE_WORD]; - $args_per_word[4] = 4; +} + +# +# Add placeholders for built-in macros. +# + +$c_code{'IS_PACKED'} = ['$Expr',"built-in macro",('Expr')]; +$c_code{'ARG_POSITION'} = ['$Expr',"built-in macro",('Expr')]; +foreach my $name (keys %c_code) { + $c_code_used{$name} = 1; } # # Parse the input files. # +my $in_c_code = ''; +my $c_code_block; +my $c_code_loc; +my @c_args; + while (<>) { my($op_num); + if ($in_c_code) { + if (/^\}/) { + my $name = $in_c_code; + my $block = $c_code_block; + $in_c_code = ''; + $block =~ s/^ //mg; + chomp $block; + $c_code{$name} = [$block,$c_code_loc,@c_args]; + } else { + $c_code_block .= $_; + } + next; + } chomp; if (s/\\$//) { $_ .= <>; @@ -268,6 +312,7 @@ while (<>) { } next if /^\s*$/; next if /^\#/; + next if m@^//@; # # Handle %if. @@ -316,30 +361,16 @@ while (<>) { } # - # Handle %hot/%cold. + # Handle %hot, %warm, and %cold. # if (/^\%hot/) { - $hot = 1; + $hotness = HOT; next; + } elsif (/^\%warm/) { + $hotness = WARM; + next; } elsif (/^\%cold/) { - $hot = 0; - next; - } - - # - # Handle macro definitions. - # - if (/^\%macro:(.*)/) { - my($op, $macro, @flags) = split(' ', $1); - defined($macro) and $macro =~ /^-/ and - error("A macro must not start with a hyphen"); - foreach (@flags) { - /^-/ or error("Flags for macros should start with a hyphen"); - } - error("Macro for '$op' is already defined") - if defined $macro{$op}; - $macro{$op} = $macro; - $macro_flags{$op} = join('', @flags); + $hotness = COLD; next; } @@ -352,6 +383,31 @@ while (<>) { } # + # Handle C code blocks. + # + if (/^(\w[\w.]*)\(([^\)]*)\)\s*{/) { + my $name = $1; + $in_c_code = $name; + $c_code_block = ''; + @c_args = parse_c_args($2); + $c_code_loc = "$ARGV($.)"; + if (defined $c_code{$name}) { + my $where = $c_code{$name}->[1]; + error("$name: already defined at $where"); + } + next; + } + + # + # Handle definition of instructions in terms of + # micro instructions. + # + if (/^(\w+)\s*:=\s*([\w.]+)\s*;\s*$/) { + $combined_instrs{$1} = ["$ARGV($.)",$2]; + next; + } + + # # Parse off the number of the operation. # $op_num = undef; @@ -402,7 +458,7 @@ while (<>) { if (defined $gen_opnum{$name,$arity} and $obsolete[$gen_opnum{$name,$arity}]) { error("specific instructions may not be specified for obsolete instructions"); } - save_specific_ops($name, $arity, $hot, @args); + save_specific_ops($name, $arity, $hotness, @args); if (defined $op_num) { error("specific instructions must not be numbered"); } elsif (!defined($gen_arity{$name}) && !defined($unnumbered{$name,$arity})) { @@ -449,6 +505,18 @@ $num_file_opcodes = @gen_opname; &$target(); # +# Ensure that all C code implementations have been used. +# +{ + my(@unused) = grep(!$c_code_used{$_}, keys %c_code); + foreach my $unused (@unused) { + my(undef,$where) = @{$c_code{$unused}}; + warn "$where: $unused is unused\n"; + } + die "\n" if @unused; +} + +# # Produce output needed by the emulator/loader. # @@ -458,6 +526,37 @@ sub emulator_output { my $key; # Loop variable. # + # Generate code and meta information for all instructions. + # + foreach $key (keys %specific_op) { + foreach (@{$specific_op{$key}}) { + my($name, $hotness, @args) = @$_; + my $sign = join('', @args); + my $print_name = print_name($name, @args); + + my($size, $code, $pack_spec) = cg_basic($name, @args); + if (defined $code) { + $code = "OpCase($print_name):\n$code"; + push @generated_code, [$hotness,$code,($print_name)]; + } + + # Note: Some of the information below will be modified + # for combined instructions. + my %info = ('size' => $size, + 'pack_spec' => $pack_spec, + 'adj' => 0, + 'args' => \@args); + $spec_op_info{$print_name} = \%info; + } + } + + # + # Combine micro instruction into instruction blocks and generate + # code for them. + # + combine_micro_instructions(); + + # # Information about opcodes (beam_opcodes.c). # $name = "$outdir/beam_opcodes.c"; @@ -488,7 +587,7 @@ sub emulator_output { # # Generate code for specific ops. # - my($spec_opnum) = 0; + my $spec_opnum = 0; print "const OpEntry opc[] = {\n"; foreach $key (sort keys %specific_op) { $gen_to_spec{$key} = $spec_opnum; @@ -506,37 +605,21 @@ sub emulator_output { # The primitive types should sort before other types. - my($sort_key) = $sign; + my $sort_key = $sign; eval "\$sort_key =~ tr/$genop_types/./"; $sort_key .= ":$sign"; - $items{$sort_key} = [$name, $hot, $sign, @args]; + my $print_name = print_name($name, @args); + $items{$sort_key} = $print_name; } # # Now call the generator for the sorted result. # - foreach (sort keys %items) { - my($name, $hot, $sign, @args) = @{$items{$_}}; + foreach my $sort_key (sort keys %items) { + my $print_name = $items{$sort_key}; + my $info = $spec_op_info{$print_name}; + my(@args) = @{$info->{'args'}}; my $arity = @args; - my($instr) = "${name}_$sign"; - $instr =~ s/_$//; - - # - # Call a generator to calculate size and generate macros - # for the emulator. - # - my($size, $code, $pack) = basic_generator($name, $hot, @args); - - # - # Save the generated $code for later. - # - if (defined $code) { - if ($hot) { - push(@{$hot_code{$code}}, $instr); - } else { - push(@{$cold_code{$code}}, $instr); - } - } # # Calculate the bit mask which should be used to match this @@ -558,7 +641,6 @@ sub emulator_output { } printf "/* %3d */ ", $spec_opnum; - my $print_name = $sign ne '' ? "${name}_$sign" : $name; my $init = "{"; my $sep = ""; foreach (@bits) { @@ -566,8 +648,12 @@ sub emulator_output { $sep = ","; } $init .= "}"; - init_item($print_name, $init, $involves_r, $size, $pack, $sign); - $op_to_name[$spec_opnum] = $instr; + my $adj = $info->{'adj'}; + my $size = $info->{'size'}; + my $pack_spec = $info->{'pack_spec'}; + my $sign = join '', @args; + init_item($print_name, $init, $involves_r, $size, $adj, $pack_spec, $sign); + $op_to_name[$spec_opnum] = $print_name; $spec_opnum++; } } @@ -646,9 +732,9 @@ sub emulator_output { print "#if !defined(ARCH_64)\n"; print qq[ #error "64-bit architecture assumed, but ARCH_64 not defined"\n]; print "#endif\n"; - print "#define BEAM_WIDE_MASK 0xFFFFUL\n"; - print "#define BEAM_LOOSE_MASK 0xFFFFUL\n"; - print "#define BEAM_TIGHT_MASK 0xFFFFUL\n"; + print "#define BEAM_WIDE_MASK 0xFFFFFFFFull\n"; + print "#define BEAM_LOOSE_MASK 0xFFFFull\n"; + print "#define BEAM_TIGHT_MASK 0xFFFFull\n"; print "#define BEAM_WIDE_SHIFT 32\n"; print "#define BEAM_LOOSE_SHIFT 16\n"; print "#define BEAM_TIGHT_SHIFT 16\n"; @@ -750,13 +836,23 @@ sub emulator_output { $name = "$outdir/beam_hot.h"; open(STDOUT, ">$name") || die "Failed to open $name for writing: $!\n"; comment('C'); - print_code(\%hot_code); + print_code(HOT); + + $name = "$outdir/beam_warm.h"; + open(STDOUT, ">$name") || die "Failed to open $name for writing: $!\n"; + comment('C'); + print_code(WARM); $name = "$outdir/beam_cold.h"; open(STDOUT, ">$name") || die "Failed to open $name for writing: $!\n"; comment('C'); - print_code(\%cold_code); + print_code(COLD); +} +sub print_name { + my($name,@args) = @_; + my $sign = join '', @args; + $sign ne '' ? "${name}_$sign" : $name; } sub init_item { @@ -784,29 +880,47 @@ sub q { } sub print_code { - my($ref) = @_; - my(%sorted); - my($key, $label); # Loop variables. - - foreach $key (keys %$ref) { - my($sort_key); - my($code) = ''; - foreach $label (@{$ref->{$key}}) { - $code .= "OpCase($label):\n"; - $sort_key = $label; - } - foreach (split("\n", $key)) { - $code .= " $_\n"; - } - $code .= "\n"; - $sorted{$sort_key} = $code; + my($include_hot) = @_; + my %sorted; + + foreach my $ref (@generated_code) { + my($hot,$code,@labels) = @$ref; + next unless $hot == $include_hot; + my($sort_key) = @labels; # Use the first label as sort key. + $sorted{$sort_key} = $code; } foreach (sort keys %sorted) { - print $sorted{$_}; + print_indented_code($sorted{$_}); + } +} + +sub print_indented_code { + my(@code) = @_; + + foreach my $chunk (@code) { + my $indent = 0; + foreach (split "\n", $chunk) { + s/^\s*//; + if (/\}/) { + $indent -= 2; + } + if ($_ eq '') { + print "\n"; + } elsif (/^#/) { + print $_, "\n"; + } else { + print ' ' x $indent, $_, "\n"; + } + if (/\{/) { + $indent += 2; + } + } + print "\n"; } } + # # Produce output needed by the compiler back-end (assembler). # @@ -893,6 +1007,18 @@ sub save_specific_ops { } } +sub parse_c_args { + local($_) = @_; + my @res; + + while (s/^(\w[\w\d]*)\s*//) { + push @res, $1; + s/^,\s*// or last; + } + $_ eq '' or error("garbage in argument list: $_"); + @res; +} + sub error { my(@message) = @_; my($where) = $. ? "$ARGV($.): " : ""; @@ -934,58 +1060,272 @@ sub comment { } # -# Basic implementation of instruction in emulator loop -# (assuming no packing). +# Combine micro instruction into instruction blocks. # +sub combine_micro_instructions { + my %groups; + + # Sanity check, normalize micro instructions. + foreach my $instr (keys %combined_instrs) { + my $ref = $combined_instrs{$instr}; + my($def_loc,$def) = @$ref; + my($group,@subs) = split /[.]/, $def; + my $arity = 0; + @subs = map { "$group.$_" } @subs; + foreach my $s (@subs) { + my $code = $c_code{$s}; + defined $code or + error("$def_loc: no definition of $s"); + $c_code_used{$s} = 1; + my(undef,undef,@c_args) = @{$code}; + $arity += scalar(@c_args); + } + push @{$groups{$group}}, [$instr,$arity,@subs]; + } -sub basic_generator { - my($name, $hot, @args) = @_; - my($size) = 0; - my($macro) = ''; - my($flags) = ''; - my(@f); - my(@f_types); - my($fail_type); - my($prefix) = ''; - my($tmp_arg_num) = 1; - my($pack_spec) = ''; - my($var_decls) = ''; - my($i); - my($no_prefetch) = 0; + # Now generate code for each group. + foreach my $group (sort keys %groups) { + my($hotness,$code,@labels) = + combine_instruction_group($group, @{$groups{$group}}); + push @generated_code, [$hotness,$code,@labels]; + } +} + +sub combine_instruction_group { + my($group,@in_instrs) = @_; + my $gcode = ''; # Code for the entire group. + my $group_hotness = COLD; + + # Get code for the head of the group (if any). + my $head_name = "$group.head"; + $c_code_used{$head_name} = 1; + my $head_code_ref = $c_code{$head_name}; + if (defined $head_code_ref) { + my($head_code,$where,@c_args) = @{$head_code_ref}; + @c_args and error("$where: no arguments allowed for " . + "head function '$head_name()'"); + $gcode = $head_code . "\n"; + } + + # Variables. + my %offsets; + my @instrs; + my %num_references; + my $group_size = 0; + + # Do basic error checking. Associate operands of instructions + # with the correct micro instructions. Calculate offsets for micro + # instructions. + foreach my $ref_instr (@in_instrs) { + my($specific,$arity,@subs) = @$ref_instr; + my $specific_key = "$specific/$arity"; + my $specific_op_ref = $specific_op{$specific_key}; + error("no $specific_key instruction") + unless defined $specific_op_ref; + foreach my $specific_op (@$specific_op_ref) { + my($name, $hotness, @args) = @{$specific_op}; + $group_hotness = $hotness unless $group_hotness >= $hotness; + my $offset = 0; + my @rest = @args; + my @new_subs; + my $print_name = print_name($specific, @args); + my $opcase = $print_name; + my $last = $subs[$#subs]; + foreach my $s (@subs) { + my $code = $c_code{$s}; + my(undef,undef,@c_args) = @{$code}; + my @first; + foreach (0..$#c_args) { + push @first, shift @rest; + } + my $size = cg_combined_size($s, 1, @first); + $offsets{$s} = $offset + unless defined $offsets{$s} and $offsets{$s} >= $offset; + $offset += $size - 1; + my $label = micro_label($s); + $num_references{$label} = 0; + push @new_subs, [$opcase,$label,$s,$size-1,@first]; + $opcase = ''; + } + $spec_op_info{$print_name}->{'size'} = $offset + 1; + $group_size = $offset if $group_size < $offset; + push @instrs, [$specific_key,@new_subs]; + } + } - # The following argument types should be included as macro arguments. - my(%incl_arg) = ('c' => 1, - 'i' => 1, - 'a' => 1, - 'A' => 1, - 'N' => 1, - 'U' => 1, - 'I' => 1, - 't' => 1, - 'P' => 1, - 'Q' => 1, - ); + # Link the sub instructions for each instructions to each + # other. + my @all_instrs; + foreach my $instr (@instrs) { + my($specific_key,@subs) = @{$instr}; + for (my $i = 0; $i < @subs; $i++) { + my($opcase,$label,$s,$size,@args) = @{$subs[$i]}; + my $next = ''; + (undef,$next) = @{$subs[$i+1]} if $i < $#subs; + $num_references{$next}++ if $next; + my $instr_info = "$opcase:$label:$next:$s:$size:@args"; + push @all_instrs, [$label,$offsets{$s},$instr_info]; + } + } - # Pick up the macro to use and its flags (if any). + my %order_to_instrs; + my %label_to_offset; + my %order_to_offset; + foreach my $instr (@all_instrs) { + my($label,$offset,$instr_info) = @$instr; + my $sort_key = sprintf("%02d.%02d", $offset, $num_references{$label}); + push @{$order_to_instrs{$sort_key}}, $instr_info; + $label_to_offset{$label} = $offset; + $order_to_offset{$sort_key} = $offset; + } + + my(@slots) = sort {$a <=> $b} keys %order_to_instrs; + + # Now generate the code for the entire group. + my $offset = 0; + my @opcase_labels; + my %down; + my %up; + for(my $i = 0; $i < @slots; $i++) { + my $key = $slots[$i]; + + # Sort micro-instructions with OpCase before other micro-instructions. + my(@instrs) = @{$order_to_instrs{$key}}; + my $order_func = sub { + my $a_key = ($a =~ /^:/) ? "1$a" : "0$a"; + my $b_key = ($b =~ /^:/) ? "1$b" : "0$b"; + $a_key cmp $b_key; + }; + @instrs = sort $order_func @instrs; + + my %seen; + foreach my $instr (@instrs) { + my($opcase,$label,$next,$s,$size,$args) = split ":", $instr; + my(@first) = split " ", $args; + + my $seen_key = "$label:$next:" . scalar(@first); + next if $opcase eq '' and $seen{$seen_key}; + $seen{$seen_key} = 1; + $seen_key .= $opcase; + + if ($opcase ne '') { + $gcode .= "OpCase($opcase):\n"; + push @opcase_labels, $opcase; + } + if ($num_references{$label}) { + $gcode .= "$label:\n"; + } + + my $flags = ''; + my $transfer_to_next = ''; + my $dec = 0; + + unless ($i == $#slots) { + $flags = "-no_next"; + my $next_offset = $label_to_offset{$next}; + $dec = $next_offset - ($offset + $size); + $transfer_to_next = "I -= $dec;\n" if $dec; + $transfer_to_next .= "goto $next;\n\n"; + } + + my($gen_code,$down,$up) = + cg_combined_code($s, 1, $flags, $offset, + $group_size-$offset-$dec, @first); + my $spec_label = "$opcase$label"; + $down{$spec_label} = $down; + $up{$spec_label} = $up; + $gcode .= $gen_code . $transfer_to_next; + } + $offset = $order_to_offset{$slots[$i+1]} if $i < $#slots; + } - $macro = $macro{$name} if defined $macro{$name}; - $flags = $macro_flags{$name} if defined $macro_flags{$name}; + foreach my $print_name (@opcase_labels) { + my $info = $spec_op_info{$print_name}; + $info->{'adj'} = $info->{'size'} - $group_size - 1; + } # - # Add any arguments to be included as macro arguments (for instance, - # 'p' is usually not an argument, except for calls). + # Assemble pack specifications for all instructions in the group. # + foreach my $instr (@instrs) { + my(undef,@subs) = @{$instr}; + my $down = ''; + my $up = ''; + for (my $i = 0; $i < @subs; $i++) { + my($opcase,$label) = @{$subs[$i]}; + my $spec_label = "$opcase$label"; + if (defined $down{$spec_label}) { + $down = $down{$spec_label} . $down; + $up = $up . $up{$spec_label}; + } + } + my $print_name = $subs[0]->[0]; + my $info = $spec_op_info{$print_name}; + $info->{'pack_spec'} = build_pack_spec("$down:$up"); + } - while ($flags =~ /-arg_(\w)/g) { - $incl_arg{$1} = 1; - }; + ($group_hotness,"{\n$gcode\n}\n\n",@opcase_labels); +} + +sub micro_label { + my $label = shift; + $label =~ s/[.]/__/g; + $label; +} + + +# +# Basic code generation for one instruction. +# + +sub cg_basic { + my($name,@args) = @_; + my($size,$code,$pack_spec) = code_gen($name, 1, '', 0, undef, @args); + $pack_spec = build_pack_spec($pack_spec); + ($size,$code,$pack_spec); +} + +# +# Calculate size for a micro instruction. +# + +sub cg_combined_size { + my($name,$pack,@args) = @_; + my($size) = code_gen($name, $pack, '', 0, undef, @args); + $size; +} + +# +# Generate code for a micro instruction. +# + +sub cg_combined_code { + my($size,$code,$pack_spec) = code_gen(@_); + if ($pack_spec eq '') { + ($code,'',''); + } else { + my($down,$up) = split /:/, $pack_spec; + ($code,$down,$up); + } +} + +sub code_gen { + my($name,$pack,$extra_comments,$offset,$group_size,@args) = @_; + my $size = 0; + my $flags = ''; + my @f; + my $prefix = ''; + my $tmp_arg_num = 1; + my $pack_spec = ''; + my $var_decls = ''; # - # Pack arguments if requested. + # Pack arguments for hot code with an implementation. # - if ($flags =~ /-pack/ && $hot) { - ($prefix, $pack_spec, @args) = do_pack(@args); + my $c_code_ref = $c_code{$name}; + if ($pack and defined $c_code_ref and $name ne 'catch') { + ($var_decls, $pack_spec, @args) = do_pack($offset, @args); } # @@ -993,259 +1333,503 @@ sub basic_generator { # the macro. # + my $need_block = 0; + my $arg_offset = $offset; foreach (@args) { my($this_size) = $arg_size{$_}; SWITCH: { - /^pack:(\d):(.*)/ and do { push(@f, $2); - push(@f_types, 'packed'); - $this_size = $1; - last SWITCH; - }; - /r/ and do { push(@f, "r(0)"); push(@f_types, $_); last SWITCH }; - /[xy]/ and do { push(@f, "$_" . "b(Arg($size))"); - push(@f_types, $_); - last SWITCH; - }; - /n/ and do { push(@f, "NIL"); push(@f_types, $_); last SWITCH }; - /s/ and do { my($tmp) = "targ$tmp_arg_num"; - $var_decls .= "Eterm $tmp; "; - $tmp_arg_num++; - push(@f, $tmp); - push(@f_types, $_); - $prefix .= "GetR($size, $tmp);\n"; - last SWITCH; }; - /d/ and do { $var_decls .= "Eterm dst; Eterm* dst_ptr; "; - push(@f, "*dst_ptr"); - push(@f_types, $_); - $prefix .= "dst = Arg($size);\n"; - $prefix .= "dst_ptr = REG_TARGET_PTR(dst);\n"; - last SWITCH; - }; - defined($incl_arg{$_}) - and do { push(@f, "Arg($size)"); - push(@f_types, $_); - last SWITCH; - }; - - /[fp]/ and do { $fail_type = $_; last SWITCH }; - - /[eLIFEbASjPowlq]/ and do { last SWITCH; }; + /^packed:d:(\d):(.*)/ and do { + $var_decls .= "Eterm dst = $2;\n" . + "Eterm* dst_ptr = REG_TARGET_PTR(dst);\n"; + push(@f, "*dst_ptr"); + $this_size = $1; + last SWITCH; + }; + /^packed:[a-zA-z]:(\d):(.*)/ and do { + push(@f, $2); + $this_size = $1; + last SWITCH; + }; + /r/ and do { + push(@f, "r(0)"); + last SWITCH; + }; + /[lxyS]/ and do { + push(@f, $_ . "b(" . arg_offset($arg_offset) . ")"); + last SWITCH; + }; + /n/ and do { + push(@f, "NIL"); + last SWITCH; + }; + /s/ and do { + my($tmp) = "targ$tmp_arg_num"; + $var_decls .= "Eterm $tmp;\n"; + $tmp_arg_num++; + push(@f, $tmp); + $prefix .= "GetR($arg_offset, $tmp);\n"; + $need_block = 1; + last SWITCH; + }; + /d/ and do { + $var_decls .= "Eterm dst = " . arg_offset($arg_offset) . ";\n" . + "Eterm* dst_ptr = REG_TARGET_PTR(dst);\n"; + push(@f, "*dst_ptr"); + last SWITCH; + }; + defined $arg_size{$_} and do { + push @f, arg_offset($arg_offset); + last SWITCH; + }; die "$name: The generator can't handle $_, at"; } $size += $this_size; + $arg_offset += $this_size; } # - # Add a fail action macro if requested. + # If the implementation is in beam_emu.c, there is nothing + # more to do. # + unless (defined $c_code_ref) { + return ($size+1, undef, ''); + } - $flags =~ /-fail_action/ and do { - $no_prefetch = 1; - if (!defined $fail_type) { - my($i); - for ($i = 0; $i < @f_types; $i++) { - local($_) = $f_types[$i]; - /[rxycians]/ and do { push(@f, "Badmatch($f[$i])"); next }; - } - } elsif ($fail_type eq 'f') { - push(@f, "ClauseFail()"); - } else { - my($i); - for ($i = 0; $i < @f_types; $i++) { - local($_) = $f_types[$i]; - /[rxycians]/ and do { push(@f, "Badmatch($f[$i])"); next }; - } - } - }; + $group_size = $size unless defined $group_size; # - # Add a size argument if requested. + # Generate main body of the implementation. # + my($c_code,$where,@c_args) = @{$c_code_ref}; + my %bindings; + $c_code_used{$name} = 1; - $flags =~ /-size/ and do { - push(@f, $size); - }; + if (@f != @c_args) { + error("$where: defining '$name' with ", scalar(@c_args), + " arguments instead of expected ", scalar(@f), " arguments"); + } - # Generate the macro if requested. - my($code); - if (defined $macro{$name}) { - my($macro_code) = "$prefix$macro(" . join(', ', @f) . ");"; - $var_decls .= "BeamInstr tmp_packed1;" - if $macro_code =~ /tmp_packed1/; - $var_decls .= "BeamInstr tmp_packed2;" - if $macro_code =~ /tmp_packed2/; - if ($flags =~ /-nonext/) { - $code = join("\n", - "{ $var_decls", - $macro_code, - "}"); - } elsif ($flags =~ /-goto:(\S*)/) { - my $goto = $1; - $code = join("\n", - "{ $var_decls", - $macro_code, - "I += $size + 1;", - "goto $goto;", - "}"); - } elsif ($no_prefetch) { - $code = join("\n", - "{ $var_decls", - $macro_code, - "Next($size);", - "}", ""); - } else { - $code = join("\n", - "{ $var_decls", - "BeamInstr* next;", - "PreFetch($size, next);", - "$macro_code", - "NextPF($size, next);", - "}", ""); - } + for (my $i = 0; $i < @f; $i++) { + my $var = $c_args[$i]; + $bindings{$var} = $f[$i]; + } + $bindings{'NEXT_INSTRUCTION'} = "I+" . ($group_size+$offset+1); + $c_code = eval { expand_all($c_code, \%bindings) }; + unless (defined $c_code) { + warn $@; + error("... from the body of $name at $where"); + } + my(@comments) = $c_code =~ m@//[|]\s*(.*)@g; + $c_code =~ s@//[|]\s*(.*)\n?@@g; + $flags = "@comments $extra_comments"; + + # + # Generate code for transferring to the next instruction. + # + my $dispatch_next; + my $instr_offset = $group_size + $offset + 1; + + if ($flags =~ /-no_next/) { + $dispatch_next = ""; + } elsif ($flags =~ /-no_prefetch/) { + $dispatch_next = "\nI += $instr_offset;\n" . + "ASSERT(VALID_INSTR(*I));\n" . + "Goto(*I);"; + } else { + $var_decls .= "BeamInstr* _nextpf = " . + "(BeamInstr *) I[$instr_offset];\n"; + $dispatch_next = "\nI += $instr_offset;\n" . + "ASSERT(VALID_INSTR(_nextpf));\n" . + "Goto(_nextpf);"; + } + + # + # Assemble the complete code for the instruction. + # + my $body = "$c_code$dispatch_next"; + if ($need_block) { + $body = "$prefix\{\n$body\n}"; + } else { + $body = "$prefix$body"; + } + my $code = join("\n", + "{", + "$var_decls$body", + "}", ""); + ($size+1, $code, $pack_spec); +} + +sub arg_offset { + my $offset = shift; + "I[" . ($offset+1) . "]"; +} + +sub expand_all { + my($code,$bindings_ref) = @_; + my %bindings = %{$bindings_ref}; + + # Expand all $Var occurrences. + $code =~ s/[\$](\w[\w\d]*)(?!\()/defined $bindings{$1} ? $bindings{$1} : "\$$1"/ge; + + # Find calls to macros, $name(...), and expand them. + my $res = ""; + while ($code =~ /[\$](\w[\w\d]*)\(/) { + my $macro_name = $1; + my $keep = substr($code, 0, $-[0]); + my $after = substr($code, $+[0]); + + my $body; + ($body,$code) = expand_macro($macro_name, $after, \%bindings); + $res .= "$keep$body"; + } + + $res . $code; +} + +sub expand_macro { + my($name,$rest,$bindings_ref) = @_; + + my $c_code = $c_code{$name}; + defined $c_code or + error("calling undefined macro '$name'..."); + $c_code_used{$name} = 1; + my ($body,$where,@vars) = @{$c_code}; + + # Separate the arguments into @args; + my @args; + my $level = 1; + my %inc = ('(' => 1, ')' => -1, + '[' => 1, ']' => -1, + '{' => 1, '}' => -1); + my $arg = undef; + while ($rest =~ /([,\(\[\{\}\]\)]|([^,\(\[\{\}\]\)]*))/g) { + my $token = $1; + my $inc = $inc{$token} || 0; + $level += $inc; + if ($level == 0) { + $rest = substr($rest, pos($rest)); + push @args, $arg if defined $arg; + last; + } + if ($token eq ',') { + if ($level == 1) { + push @args, $arg; + $arg = ""; + } + next; + } + $arg .= $token; + } + + # Trim leading whitespace from each argument. + foreach my $arg (@args) { + $arg =~ s/^\s*//; + } + + # Make sure that the number of arguments are correct. + if (@vars != @args) { + error("calling $name with ", scalar(@args), + " arguments instead of expected ", scalar(@vars), " arguments..."); + } + + # Now combine bindings from the parameter names and arguments. + my %bindings = %{$bindings_ref}; + my %new_bindings; + + # Keep the special, pre-defined bindings. + foreach my $key (qw(NEXT_INSTRUCTION)) { + $new_bindings{$key} = $bindings{$key}; + } + + for (my $i = 0; $i < @vars; $i++) { + my $arg = $args[$i]; + $arg = eval { expand_all($arg, \%bindings) }; + unless (defined $arg) { + warn $@; + die "... from the body of $name at $where\n"; + } + $new_bindings{$vars[$i]} = $arg; + } + + $body = eval { expand_all($body, \%new_bindings) }; + unless (defined $body) { + warn $@; + die "... from the body of $name at $where\n"; + } + + # Handle built-in macros. + if ($name eq 'ARG_POSITION') { + if ($body =~ /^I\[(\d+)\]$/) { + $body = $1; + } else { + $body = 0; + } + } elsif ($name eq 'IS_PACKED') { + $body = ($body =~ /^I\[\d+\]$/) ? 0 : 1; } - # Return the size and code for the macro (if any). - $size++; - ($size, $code, $pack_spec); + # Wrap body if needed and return resul.t + $body = "do {\n$body\n} while (0)" + if needs_do_wrapper($body); + ($body,$rest); +} + +# Conservative heuristic to determine whether a do { ... } while(0) +# wrapper is needed. +sub needs_do_wrapper { + local $_ = shift; + + s@^//[|][^\n]*\n@@; + s@^\s*@@s; + s@^/[*].*[*]/\s*@@s; + return 1 if /^(Eterm|Uint|Sint|int|unsigned)/; # Definitely needed. + return 0 if /^do/; + return 0 if /^SET_I/; + return 0 if /^SET_CP/; + return 0 if /^ERTS_NO_FPE_CHECK_INIT/; + return 0 if /^ASSERT/; + return 0 if /^DTRACE/; + return 0 if /^[A-Za-z_]*\s*=/; + return 0 if /^c_p->/; + return 0 if /^[A-Z_]*SWAPOUT/; + return 0 if /^if\s*[(]/; + return 0 if /^goto\b/; + return 0 if /^\d+/; + return 1; # Not sure, say that it is needed. } sub do_pack { - my(@args) = @_; + my($offset,@args) = @_; my($packable_args) = 0; - my @is_packable; # Packability (boolean) for each argument. - my $wide_packing = 0; - my(@orig_args) = @args; + my @bits_needed; # Bits needed for each argument. # - # Count the number of packable arguments. If we encounter any 's' or 'd' - # arguments, packing is not possible. + # Define the minimum number of bits needed for the packable argument types. + # + my %bits_needed = ('x' => 10, + 'y' => 10, + 'Q' => 10, + 'l' => 10, + 'S' => 16, + 'd' => 16, + 't' => 16); + if ($wordsize == 64) { + $bits_needed{'I'} = 32; + } + + # + # Count the number of packable arguments. # - my $packable_types = "xytQ"; foreach my $arg (@args) { - if ($arg =~ /^[$packable_types]/) { + if (defined $bits_needed{$arg}) { $packable_args++; - push @is_packable, 1; - } elsif ($arg =~ /^I/ and $wordsize == 64 and $packable_args < 2) { - $wide_packing = 1; - push @is_packable, 1; - if (++$packable_args == 2) { - # We can only pack two arguments. Turn off packing - # for the rest of the arguments. - $packable_types = "\xFF"; - } - } elsif ($arg =~ /^[sd]/) { - return ('', '', @args); - } elsif ($arg =~ /^[scq]/ and $packable_args > 0) { - # When packing, this operand will be picked up from the - # code array, put onto the packing stack, and later put - # back into a different location in the code. The problem - # is that if this operand is a literal, the original - # location in the code would have been remembered in a - # literal patch. For packing to work, we would have to - # adjust the position in the literal patch. For the - # moment, adding additional instructions to the packing - # engine to handle this does not seem worth it, so we will - # just turn off packing. - return ('', '', @args); + push @bits_needed, $bits_needed{$arg}; } else { - push @is_packable, 0; + push @bits_needed, 0; } } # - # Get out of here if too few or too many arguments. + # Try to pack 'f' and 'j', but not at expense at worse packing + # for other operands. For example, given the arguments "f x x", we + # want the 'x' operands to be packed, not 'f' and 'x' packed and + # the final 'x' not packed. # - return ('', '', @args) if $packable_args < 2; - my($size) = 0; - my($pack_prefix) = ''; - my($down) = ''; # Pack commands (towards instruction - # beginning). - my($up) = ''; # Pack commands (storing back while - # moving forward). + if ($wordsize == 64 and $packable_args == 1) { + for (my $i = 0; $i < @args; $i++) { + if ($args[$i] =~ /^[fj]$/) { + $bits_needed[$i] = 32; + $packable_args++; + last; + } + } + } - my $args_per_word = $args_per_word[$packable_args]; - my @shift; - my @mask; - my @instr; + # + # Nothing to pack unless there are at least 2 packable arguments. + # + return ('', ':', @args) if $packable_args < 2; - if ($wide_packing) { - @shift = ('0', 'BEAM_WIDE_SHIFT'); - @mask = ('BEAM_WIDE_MASK', $WHOLE_WORD); - @instr = ('w', 'i'); - } else { - @shift = @{$pack_shift[$args_per_word]}; - @mask = @{$pack_mask[$args_per_word]}; - @instr = @{$pack_instr[$args_per_word]}; + # + # Determine how many arguments we should pack into each word. + # + my @args_per_word; + my @need_wide_mask; + my $bits = 0; + my $word = 0; + $args_per_word[0] = 0; + $need_wide_mask[0] = 0; + for (my $i = 0; $i < @args; $i++) { + if ($bits_needed[$i]) { + my $needed = $bits_needed[$i]; + + my $next_word = sub { + $word++; + $args_per_word[$word] = 0; + $need_wide_mask[$word] = 0; + $bits = 0; + }; + + if ($bits+$needed > $wordsize) { # Does not fit. + $next_word->(); + } + if ($args_per_word[$word] == 4) { # Can't handle more than 4 args. + $next_word->(); + } + if ($needed == 32 and $args_per_word[$word] > 1) { + # Must only pack two arguments in this word, and there + # are already at least two arguments here. + $next_word->(); + } + $args_per_word[$word]++; + $bits += $needed; + if ($needed == 32) { + $need_wide_mask[$word]++; + } + if ($need_wide_mask[$word] and $bits > 32) { + # Can only pack two things in a word where one + # item is 32 bits. Force the next item into + # the next word. + $bits = $wordsize; + } + } } # + # Try to balance packing between words. + # + if ($args_per_word[$#args_per_word] == 1) { + if ($args_per_word[$#args_per_word-1] < 3) { + pop @args_per_word; + } else { + $args_per_word[$#args_per_word-1]--; + $args_per_word[$#args_per_word]++; + } + } elsif (@args_per_word == 2 and + $args_per_word[0] == 4 and + $args_per_word[1] == 2) { + $args_per_word[0] = 3; + $args_per_word[1] = 3; + } elsif (@args_per_word == 2 and + $args_per_word[0] == 3 and + $args_per_word[1] == 1) { + $args_per_word[0] = 2; + $args_per_word[1] = 2; + } + + my $size = 0; + my $pack_prefix = ''; + my $down = ''; # Pack commands (towards instruction + # beginning). + my $up = ''; # Pack commands (storing back while + # moving forward). + + # Skip an unpackable argument. + my $skip_unpackable = sub { + my($arg) = @_; + + if ($arg_size{$arg}) { + # Save the argument on the pack engine's stack. + my $push = 'g'; + if ($type_bit{$arg} & $type_bit{'q'}) { + # The operand may be a literal. + $push = 'q'; + } elsif ($type_bit{$arg} & $type_bit{'f'}) { + # The operand may be a failure label. + $push = 'f'; + } + $down = "$push${down}"; + $up = "${up}p"; + } + }; + + # # Now generate the packing instructions. One complication is that # the packing engine works from right-to-left, but we must generate # the instructions from left-to-right because we must calculate # instruction sizes from left-to-right. - # - # XXX Packing 3 't's in one word won't work. Sorry. - my $did_some_packing = 0; # Nothing packed yet. - my($ap) = 0; # Argument number within word. - my($tmpnum) = 1; # Number of temporary variable. - my($expr) = ''; - for (my $i = 0; $i < @args; $i++) { - my($reg) = $args[$i]; - my($this_size) = $arg_size{$reg}; - if ($is_packable[$i]) { - $this_size = 0; - $did_some_packing = 1; - - if ($ap == 0) { - $pack_prefix .= "tmp_packed$tmpnum = Arg($size);\n"; - $up .= "p"; - $down = "P$down"; - $this_size = 1; - } - - $down = "$instr[$ap]$down"; - my($unpack) = make_unpack($tmpnum, $shift[$ap], $mask[$ap]); - $args[$i] = "pack:$this_size:$reg" . "b($unpack)"; + my $arg_num = 0; + for (my $word = 0; $word < @args_per_word; $word++) { + my $ap = 0; # Argument number within word. + my $packed_var = "tmp_packed" . ($word+1); + my $args_per_word = $args_per_word[$word]; + my @shift; + my @mask; + my @instr; + + if ($need_wide_mask[$word]) { + @shift = ('0', 'BEAM_WIDE_SHIFT'); + @mask = ('BEAM_WIDE_MASK', $WHOLE_WORD); + @instr = ('w', 'w'); + } else { + @shift = @{$pack_shift[$args_per_word]}; + @mask = @{$pack_mask[$args_per_word]}; + @instr = @{$pack_instr[$args_per_word]}; + } - if (++$ap == $args_per_word) { - $ap = 0; - $tmpnum++; - } - } elsif ($arg_size{$reg} && $did_some_packing) { - # - # This is an argument that can't be packed. Normally, we must - # save it on the pack engine's stack, unless: - # - # 1. The argument has zero size (e.g. r(0)). Such arguments - # will not be loaded. They disappear. - # 2. If the argument is on the left of the first packed argument, - # the packing engine will never access it (because the engine - # operates from right-to-left). - # + while ($ap < $args_per_word) { + my $reg = $args[$arg_num]; + my $this_size = $arg_size{$reg}; + if ($bits_needed[$arg_num]) { + $this_size = 0; + + if ($ap == 0) { + $pack_prefix .= "Eterm $packed_var = " . + arg_offset($size+$offset) . ";\n"; + $up .= "p"; + $down = "P$down"; + $this_size = 1; + } + + $down = "$instr[$ap]$down"; + my $unpack = make_unpack($packed_var, $shift[$ap], $mask[$ap]); + $args[$arg_num] = "packed:$reg:$this_size:$reg" . "b($unpack)"; + + $ap++; + } else { + $skip_unpackable->($reg); + } + $size += $this_size; + $arg_num++; + } + } - $down = "g${down}"; - $up = "${up}p"; - } - $size += $this_size; + # + # Skip any unpackable arguments at the end. + # + while ($arg_num < @args) { + $skip_unpackable->($args[$arg_num]); + $arg_num++; } - my $pack_spec = $down . $up; + my $pack_spec = "$down:$up"; return ($pack_prefix, $pack_spec, @args); } sub make_unpack { - my($tmpnum, $shift, $mask) = @_; + my($packed_var, $shift, $mask) = @_; - my($e) = "tmp_packed$tmpnum"; + my $e = $packed_var; $e = "($e>>$shift)" if $shift; $e .= "&$mask" unless $mask eq $WHOLE_WORD; $e; } +sub build_pack_spec { + my $pack_spec = shift; + return '' if $pack_spec eq ''; + my($down,$up) = split /:/, $pack_spec; + while ($down =~ /[gfq]$/ and $up =~ /^p/) { + $down = substr($down, 0, -1); + $up = substr($up, 1); + } + "$down$up"; +} + sub quote { local($_) = @_; return "'$_'" if $_ eq 'try'; @@ -1286,8 +1870,11 @@ sub parse_transformation { # my @to; - if ($to =~ /^(\w+)\((.*?)\)/) { - my($name, $arglist) = ($1, $2); + if ($to =~ /^(\w+)\((.*?)\)(.*)/) { + my($name, $arglist, $garbage) = ($1, $2, $3); + if ($garbage =~ /\S/) { + error("garbage after call to '$name()'"); + } @to = (compile_transform_function($name, split(/\s*,\s*/, $arglist))); } else { @to = split(/\s*\|\s*/, $to); diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables index 47e1528958..094a35ae4b 100755 --- a/erts/emulator/utils/make_tables +++ b/erts/emulator/utils/make_tables @@ -59,7 +59,6 @@ my %dirty_bif_tab; my @bif; my @bif_info; -my $dirty_schedulers = 'no'; my $dirty_schedulers_test = 'no'; my $hipe = 'no'; @@ -73,10 +72,6 @@ while (@ARGV && $ARGV[0] =~ /^-(\w+)/) { $include = shift; die "No directory for -include argument specified" unless defined $include; - } elsif($opt eq '-ds') { - $dirty_schedulers = shift; - die "No -ds argument specified" - unless defined $dirty_schedulers; } elsif($opt eq '-dst') { $dirty_schedulers_test = shift; die "No -dst argument specified" @@ -140,21 +135,19 @@ while (<>) { push(@bif_info, [$type, $sched_type, $alias3, $alias]); } elsif ($type eq 'dirty-cpu' or $type eq 'dirty-io' or $type eq 'dirty-cpu-test' or $type eq 'dirty-io-test') { - if ($dirty_schedulers eq 'yes') { - my($bif,$other) = (@args); - $bif =~ m@^([a-z_.'0-9]+):(.*)/(\d)$@ or error("invalid BIF"); - my($mod,$name,$arity) = ($1,$2,$3); - my $mfa = "$mod:$name/$arity"; - if (($type eq 'dirty-cpu') - or (($dirty_schedulers_test eq 'yes') - and ($type eq 'dirty-cpu-test'))) { - $dirty_bif_tab{$mfa} = 'dirty_cpu'; - } elsif (($type eq 'dirty-io') - or (($dirty_schedulers_test eq 'yes') - and ($type eq 'dirty-io-test'))) { - $dirty_bif_tab{$mfa} = 'dirty_io'; - } - } + my($bif,$other) = (@args); + $bif =~ m@^([a-z_.'0-9]+):(.*)/(\d)$@ or error("invalid BIF"); + my($mod,$name,$arity) = ($1,$2,$3); + my $mfa = "$mod:$name/$arity"; + if (($type eq 'dirty-cpu') + or (($dirty_schedulers_test eq 'yes') + and ($type eq 'dirty-cpu-test'))) { + $dirty_bif_tab{$mfa} = 'dirty_cpu'; + } elsif (($type eq 'dirty-io') + or (($dirty_schedulers_test eq 'yes') + and ($type eq 'dirty-io-test'))) { + $dirty_bif_tab{$mfa} = 'dirty_io'; + } } else { error("invalid line"); } diff --git a/erts/etc/common/Makefile.in b/erts/etc/common/Makefile.in index 5b1b9119ce..1f35cef669 100644 --- a/erts/etc/common/Makefile.in +++ b/erts/etc/common/Makefile.in @@ -56,7 +56,7 @@ ERTS_INCL = -I$(ERL_TOP)/erts/include \ CC = @CC@ WFLAGS = @WFLAGS@ -CFLAGS = @CFLAGS@ @DEFS@ $(TYPE_FLAGS) @WFLAGS@ -I$(SYSDIR) -I$(EMUDIR) \ +CFLAGS = @CFLAGS@ @DEFS@ $(TYPE_FLAGS) @WFLAGS@ -I$(SYSOSDIR) -I$(EMUDIR) -I. \ -I$(COMSYSDIR) $(ERTS_INCL) -DOTP_SYSTEM_VERSION=\"$(SYSTEM_VSN)\" LD = @LD@ LIBS = @LIBS@ @@ -69,9 +69,9 @@ endif ifeq ($(TARGET),win32) ifeq ($(TYPE),debug) -CFLAGS = $(subst -O2,-g,@CFLAGS@ @DEFS@ $(TYPE_FLAGS) @WFLAGS@ -I$(SYSDIR) \ - -I$(EMUDIR) -I$(COMSYSDIR) $(ERTS_INCL) \ - -DOTP_SYSTEM_VERSION=\"$(SYSTEM_VSN)\") +CFLAGS = $(subst -O2,-g,@CFLAGS@ @DEFS@ $(TYPE_FLAGS) @WFLAGS@ -I$(SYSOSDIR) \ + -I$(EMUDIR) -I$(COMSYSDIR) $(ERTS_INCL) \ + -DOTP_SYSTEM_VERSION=\"$(SYSTEM_VSN)\") LDFLAGS += -g endif endif @@ -81,7 +81,8 @@ OBJDIR = $(ERL_TOP)/erts/obj$(TYPEMARKER)/$(TARGET) EMUDIR = $(ERL_TOP)/erts/emulator/beam COMSYSDIR = $(ERL_TOP)/erts/emulator/sys/common EMUOSDIR = $(ERL_TOP)/erts/emulator/@ERLANG_OSTYPE@ -SYSDIR = $(ERL_TOP)/erts/emulator/sys/@ERLANG_OSTYPE@ +SYSDIR = $(ERL_TOP)/erts/emulator/sys/common +SYSOSDIR = $(ERL_TOP)/erts/emulator/sys/@ERLANG_OSTYPE@ DRVDIR = $(ERL_TOP)/erts/emulator/drivers/@ERLANG_OSTYPE@ UXETC = ../unix WINETC = ../win32 diff --git a/erts/etc/common/ct_run.c b/erts/etc/common/ct_run.c index 6639c83778..efa7ac3493 100644 --- a/erts/etc/common/ct_run.c +++ b/erts/etc/common/ct_run.c @@ -20,16 +20,7 @@ /* * Purpose: Common Test front-end. */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "sys.h" -#ifdef __WIN32__ -#include <winbase.h> -#endif - -#include <ctype.h> +#include "etc_common.h" #define NO 0 #define YES 1 diff --git a/erts/etc/common/dialyzer.c b/erts/etc/common/dialyzer.c index c8d977f6de..0e74eb065b 100644 --- a/erts/etc/common/dialyzer.c +++ b/erts/etc/common/dialyzer.c @@ -20,16 +20,8 @@ /* * Purpose: Dialyzer front-end. */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "sys.h" -#ifdef __WIN32__ -#include <winbase.h> -#endif -#include <ctype.h> +#include "etc_common.h" #define NO 0 #define YES 1 diff --git a/erts/etc/common/erlc.c b/erts/etc/common/erlc.c index cbbd2a37cd..8cfd98bcc4 100644 --- a/erts/etc/common/erlc.c +++ b/erts/etc/common/erlc.c @@ -20,19 +20,7 @@ /* * Purpose: Common compiler front-end. */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "sys.h" -#ifdef __WIN32__ -#include <winbase.h> -/* FIXE ME config_win32.h? */ -#define HAVE_STRERROR 1 -#define snprintf _snprintf -#endif - -#include <ctype.h> +#include "etc_common.h" #define NO 0 #define YES 1 diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c index 51ed2d0dff..6b194e25da 100644 --- a/erts/etc/common/erlexec.c +++ b/erts/etc/common/erlexec.c @@ -23,14 +23,9 @@ * additions required for Windows NT. */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif +#include "etc_common.h" -#include "sys.h" #include "erl_driver.h" -#include <stdlib.h> -#include <stdarg.h> #include "erl_misc_utils.h" #ifdef __WIN32__ @@ -194,9 +189,7 @@ void error(char* format, ...); * Local functions. */ -#if !defined(ERTS_HAVE_SMP_EMU) || !defined(ERTS_HAVE_PLAIN_EMU) static void usage_notsup(const char *switchname, const char *alt); -#endif static char **build_args_from_env(char *env_var); static char **build_args_from_string(char *env_var); static void initial_argv_massage(int *argc, char ***argv); @@ -244,7 +237,7 @@ static int verbose = 0; /* If non-zero, print some extra information. */ static int start_detached = 0; /* If non-zero, the emulator should be * started detached (in the background). */ -static int start_smp_emu = 0; /* Start the smp emulator. */ +static int start_smp_emu = 1; /* Start the smp emulator. */ static const char* emu_type = 0; /* Type of emulator (lcnt, valgrind, etc) */ #ifdef __WIN32__ @@ -467,10 +460,6 @@ int main(int argc, char **argv) * Construct the path of the executable. */ cpuinfo = erts_cpu_info_create(); - /* '-smp auto' is default */ -#ifdef ERTS_HAVE_SMP_EMU - start_smp_emu = 1; -#endif #if defined(__WIN32__) && defined(WIN32_ALWAYS_DEBUG) emu_type = "debug"; @@ -502,23 +491,13 @@ int main(int argc, char **argv) i++; smp_enable: ; -#if !defined(ERTS_HAVE_SMP_EMU) - usage_notsup("-smp enable", ""); -#endif } else if (strcmp(argv[i+1], "disable") == 0) { i++; smp_disable: -#ifdef ERTS_HAVE_PLAIN_EMU - start_smp_emu = 0; -#else usage_notsup("-smp disable", " Use \"+S 1\" instead."); -#endif } else { smp: ; -#if !defined(ERTS_HAVE_SMP_EMU) - usage_notsup("-smp", ""); -#endif } } else if (strcmp(argv[i], "-smpenable") == 0) { goto smp_enable; @@ -886,8 +865,8 @@ int main(int argc, char **argv) case 'c': argv[i][0] = '-'; if (argv[i][2] == '\0' && i+1 < argc) { - if (sys_strcmp(argv[i+1], "true") == 0 - || sys_strcmp(argv[i+1], "false") == 0) { + if (strcmp(argv[i+1], "true") == 0 + || strcmp(argv[i+1], "false") == 0) { add_Eargs(argv[i]); add_Eargs(argv[i+1]); i++; @@ -1159,15 +1138,6 @@ usage_aux(void) #ifdef __WIN32__ "[-start_erl [datafile]] " #endif - "[-smp [auto" -#ifdef ERTS_HAVE_SMP_EMU - "|enable" -#endif -#ifdef ERTS_HAVE_PLAIN_EMU - "|disable" -#endif - "]" - "] " "[-make] [-man [manopts] MANPAGE] [-x] [-emu_args] [-start_epmd BOOLEAN] " "[-args_file FILENAME] [+A THREADS] [+a SIZE] [+B[c|d|i]] [+c [BOOLEAN]] " "[+C MODE] [+h HEAP_SIZE_OPTION] [+K BOOLEAN] " @@ -1188,14 +1158,12 @@ usage(const char *switchname) usage_aux(); } -#if !defined(ERTS_HAVE_SMP_EMU) || !defined(ERTS_HAVE_PLAIN_EMU) static void usage_notsup(const char *switchname, const char *alt) { fprintf(stderr, "Argument \'%s\' not supported.%s\n", switchname, alt); usage_aux(); } -#endif static void usage_format(char *format, ...) @@ -2195,18 +2163,18 @@ static WCHAR *utf8_to_utf16(unsigned char *bytes) res = target = emalloc((num + 1) * sizeof(WCHAR)); while (*bytes) { if (((*bytes) & ((unsigned char) 0x80)) == 0) { - unipoint = (Uint) *bytes; + unipoint = (unsigned int) *bytes; ++bytes; } else if (((*bytes) & ((unsigned char) 0xE0)) == 0xC0) { unipoint = - (((Uint) ((*bytes) & ((unsigned char) 0x1F))) << 6) | - ((Uint) (bytes[1] & ((unsigned char) 0x3F))); + (((unsigned int) ((*bytes) & ((unsigned char) 0x1F))) << 6) | + ((unsigned int) (bytes[1] & ((unsigned char) 0x3F))); bytes += 2; } else if (((*bytes) & ((unsigned char) 0xF0)) == 0xE0) { unipoint = - (((Uint) ((*bytes) & ((unsigned char) 0xF))) << 12) | - (((Uint) (bytes[1] & ((unsigned char) 0x3F))) << 6) | - ((Uint) (bytes[2] & ((unsigned char) 0x3F))); + (((unsigned int) ((*bytes) & ((unsigned char) 0xF))) << 12) | + (((unsigned int) (bytes[1] & ((unsigned char) 0x3F))) << 6) | + ((unsigned int) (bytes[2] & ((unsigned char) 0x3F))); if (unipoint > 0xFFFF) { unipoint = (unsigned int) '?'; } @@ -2225,7 +2193,7 @@ static WCHAR *utf8_to_utf16(unsigned char *bytes) static int put_utf8(WCHAR ch, unsigned char *target, int sz, int *pos) { - Uint x = (Uint) ch; + unsigned int x = (unsigned int) ch; if (x < 0x80) { if (*pos >= sz) { return -1; diff --git a/erts/etc/common/escript.c b/erts/etc/common/escript.c index 9cd5dd3fab..8241675200 100644 --- a/erts/etc/common/escript.c +++ b/erts/etc/common/escript.c @@ -20,16 +20,8 @@ /* * Purpose: escript front-end. */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "sys.h" -#ifdef __WIN32__ -#include <winbase.h> -#endif -#include <ctype.h> +#include "etc_common.h" static int debug = 0; /* Bit flags for debug printouts. */ diff --git a/erts/etc/common/etc_common.h b/erts/etc/common/etc_common.h new file mode 100644 index 0000000000..3f26064a9e --- /dev/null +++ b/erts/etc/common/etc_common.h @@ -0,0 +1,65 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2017. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * %CopyrightEnd% + */ +/* + * Purpose: common includes for all etc programs + */ +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#if !defined(__WIN32__) +# include <dirent.h> +# include <limits.h> +# include <sys/stat.h> +# include <sys/types.h> +# include <unistd.h> +#else +# include <windows.h> +# include <io.h> +# include <winbase.h> +# include <process.h> +#endif + +#include <errno.h> +#include <fcntl.h> +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <ctype.h> + +/* + * Make sure that MAXPATHLEN is defined. + */ +#ifndef MAXPATHLEN +# ifdef PATH_MAX +# define MAXPATHLEN PATH_MAX +# else +# define MAXPATHLEN 2048 +# endif +#endif + +#include "erl_printf.h" + +#ifdef __WIN32__ +/* FIXE ME config_win32.h? */ +#define HAVE_STRERROR 1 +#define snprintf _snprintf +#endif diff --git a/erts/etc/common/typer.c b/erts/etc/common/typer.c index 6bae9f96b7..b64cbb4a92 100644 --- a/erts/etc/common/typer.c +++ b/erts/etc/common/typer.c @@ -20,16 +20,8 @@ /* * Purpose: Typer front-end. */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "sys.h" -#ifdef __WIN32__ -#include <winbase.h> -#endif -#include <ctype.h> +#include "etc_common.h" #define NO 0 #define YES 1 diff --git a/erts/etc/unix/cerl.src b/erts/etc/unix/cerl.src index 30f2d831b5..8d5882cf32 100644 --- a/erts/etc/unix/cerl.src +++ b/erts/etc/unix/cerl.src @@ -139,29 +139,6 @@ while [ $# -gt 0 ]; do shift unset DISPLAY ;; - "-smp") - shift - if [ $# -le 0 ]; then - eeargs_add -smp - else - case $1 in - disable) - shift - eeargs_add -smpdisable - ;; - enable) - shift - eeargs_add -smp - ;; - *) - eeargs_add -smp - esac - fi - ;; - "-smpdisable") - shift - eeargs_add -smpdisable - ;; "-lcnt") shift cargs="$cargs -lcnt" diff --git a/erts/etc/unix/dyn_erl.c b/erts/etc/unix/dyn_erl.c index d6d2201648..5c7c3cad38 100644 --- a/erts/etc/unix/dyn_erl.c +++ b/erts/etc/unix/dyn_erl.c @@ -22,13 +22,7 @@ * This is a C version of the erl Bourne shell script */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "sys.h" -#include <stdlib.h> -#include <stdarg.h> +#include "etc_common.h" #define BOOL int #define TRUE 1 diff --git a/erts/etc/unix/run_erl.c b/erts/etc/unix/run_erl.c index f05c729eeb..f928163705 100644 --- a/erts/etc/unix/run_erl.c +++ b/erts/etc/unix/run_erl.c @@ -627,12 +627,14 @@ static void pass_on(pid_t childpid) status("Pty master read; "); #endif if ((len = sf_read(mfd, buf, BUFSIZ)) <= 0) { + int saved_errno = errno; sf_close(rfd); if(wfd) sf_close(wfd); sf_close(mfd); unlink(fifo1); unlink(fifo2); if (len < 0) { + errno = saved_errno; if(errno == EIO) ERROR0(LOG_ERR,"Erlang closed the connection."); else @@ -1342,13 +1344,15 @@ static int sf_open(const char *path, int type, mode_t mode) { return fd; } + static int sf_close(int fd) { int res = 0; - do { res = close(fd); } while(fd < 0 && errno == EINTR); + do { res = close(fd); } while(res < 0 && errno == EINTR); return res; } + /* Extract any control sequences that are ment only for run_erl * and should not be forwarded to the pty. */ diff --git a/erts/lib_src/common/erl_printf.c b/erts/lib_src/common/erl_printf.c index 7781fc2196..9031a4c5b7 100644 --- a/erts/lib_src/common/erl_printf.c +++ b/erts/lib_src/common/erl_printf.c @@ -63,7 +63,7 @@ void (*erts_printf_unblock_fpe)(int) = NULL; #undef FWRITE #undef PUTC_ON_SMALL_WRITES -#if defined(USE_THREADS) && defined(HAVE_FLOCKFILE) +#if defined(HAVE_FLOCKFILE) # define FLOCKFILE(FP) flockfile(FP) # define FUNLOCKFILE(FP) funlockfile(FP) # ifdef HAVE_PUTC_UNLOCKED @@ -73,11 +73,7 @@ void (*erts_printf_unblock_fpe)(int) = NULL; # ifdef HAVE_FWRITE_UNLOCKED # define FWRITE fwrite_unlocked # endif -#endif -#if !defined(USE_THREADS) && defined(putc) && !defined(fwrite) -# define PUTC_ON_SMALL_WRITES -#endif -#if !defined(FLOCKFILE) || !defined(FUNLOCKFILE) +#else # define FLOCKFILE(FP) # define FUNLOCKFILE(FP) #endif diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam Binary files differindex 6fa48e8582..e258ff5480 100644 --- a/erts/preloaded/ebin/erlang.beam +++ b/erts/preloaded/ebin/erlang.beam diff --git a/erts/preloaded/ebin/erts_internal.beam b/erts/preloaded/ebin/erts_internal.beam Binary files differindex 6691749dcb..5416826f19 100644 --- a/erts/preloaded/ebin/erts_internal.beam +++ b/erts/preloaded/ebin/erts_internal.beam diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl index f796ea64d3..d43fdc454c 100644 --- a/erts/preloaded/src/erlang.erl +++ b/erts/preloaded/src/erlang.erl @@ -48,6 +48,12 @@ await_sched_wall_time_modifications/2, gather_gc_info_result/1]). +-export([dist_ctrl_input_handler/2, + dist_ctrl_put_data/2, + dist_ctrl_get_data/1, + dist_ctrl_get_data_notification/1, + dist_get_stat/1]). + -deprecated([now/0]). %% Get rid of autoimports of spawn to avoid clashes with ourselves. @@ -87,6 +93,10 @@ -export_type([prepared_code/0]). +-opaque dist_handle() :: atom(). + +-export_type([dist_handle/0]). + -type iovec() :: [binary()]. -export_type([iovec/0]). @@ -1651,7 +1661,7 @@ setnode(_P1, _P2) -> erlang:nif_error(undefined). %% setnode/3 --spec erlang:setnode(P1, P2, P3) -> true when +-spec erlang:setnode(P1, P2, P3) -> dist_handle() when P1 :: atom(), P2 :: port(), P3 :: {term(), term(), term(), term()}. @@ -3214,6 +3224,47 @@ port_get_data(_Port) -> erlang:nif_error(undefined). %% +%% Distribution channel management +%% + +-spec erlang:dist_ctrl_input_handler(DHandle, InputHandler) -> 'ok' when + DHandle :: dist_handle(), + InputHandler :: pid(). + +dist_ctrl_input_handler(_DHandle, _InputHandler) -> + erlang:nif_error(undefined). + +-spec erlang:dist_ctrl_put_data(DHandle, Data) -> 'ok' when + DHandle :: dist_handle(), + Data :: iodata(). + +dist_ctrl_put_data(_DHandle, _Data) -> + erlang:nif_error(undefined). + +-spec erlang:dist_ctrl_get_data(DHandle) -> Data | 'none' when + DHandle :: dist_handle(), + Data :: iodata(). + +dist_ctrl_get_data(_DHandle) -> + erlang:nif_error(undefined). + +-spec erlang:dist_ctrl_get_data_notification(DHandle) -> 'ok' when + DHandle :: dist_handle(). + +dist_ctrl_get_data_notification(_DHandle) -> + erlang:nif_error(undefined). + +-spec erlang:dist_get_stat(DHandle) -> Res when + DHandle :: dist_handle(), + InputPackets :: non_neg_integer(), + OutputPackets :: non_neg_integer(), + PendingOutputPackets :: boolean(), + Res :: {'ok', InputPackets, OutputPackets, PendingOutputPackets}. + +dist_get_stat(_DHandle) -> + erlang:nif_error(undefined). + +%% %% If the emulator wants to perform a distributed command and %% a connection is not established to the actual node the following %% functions are called in order to set up the connection and then diff --git a/erts/preloaded/src/erts_internal.erl b/erts/preloaded/src/erts_internal.erl index 26fb1458af..bb1824ecd4 100644 --- a/erts/preloaded/src/erts_internal.erl +++ b/erts/preloaded/src/erts_internal.erl @@ -61,6 +61,8 @@ -export([trace/3, trace_pattern/3]). +-export([dist_ctrl_put_data/2]). + %% Auto import name clash -export([check_process_code/1]). @@ -461,3 +463,38 @@ trace(_PidSpec, _How, _FlagList) -> FlagList :: [ ]. trace_pattern(_MFA, _MatchSpec, _FlagList) -> erlang:nif_error(undefined). + +-spec dist_ctrl_put_data(DHandle, Data) -> 'ok' when + DHandle :: erlang:dist_handle(), + Data :: iolist(). + +dist_ctrl_put_data(DHandle, IoList) -> + %% + %% Helper for erlang:dist_ctrl_put_data/2 + %% + %% erlang:dist_ctrl_put_data/2 traps to + %% this function if second argument is + %% a list... + %% + try + Binary = erlang:iolist_to_binary(IoList), + %% Restart erlang:dist_ctrl_put_data/2 + %% with the iolist converted to a binary... + erlang:dist_ctrl_put_data(DHandle, Binary) + catch + Class : Reason -> + %% Throw exception as if thrown from + %% erlang:dist_ctrl_put_data/2 ... + RootST = try erlang:error(Reason) + catch + error:Reason -> + case erlang:get_stacktrace() of + [] -> []; + ST -> tl(ST) + end + end, + StackTrace = [{erlang, dist_ctrl_put_data, + [DHandle, IoList], []} + | RootST], + erlang:raise(Class, Reason, StackTrace) + end. diff --git a/lib/common_test/src/ct_slave.erl b/lib/common_test/src/ct_slave.erl index 4188bd7c3b..61e6446df8 100644 --- a/lib/common_test/src/ct_slave.erl +++ b/lib/common_test/src/ct_slave.erl @@ -38,7 +38,8 @@ -record(options, {username, password, boot_timeout, init_timeout, startup_timeout, startup_functions, monitor_master, - kill_if_fail, erl_flags, env, ssh_port, ssh_opts}). + kill_if_fail, erl_flags, env, ssh_port, ssh_opts, + stop_timeout}). %%%----------------------------------------------------------------- %%% @spec start(Node) -> Result @@ -198,6 +199,7 @@ start(Host, Node, Opts) -> end end. +%%%----------------------------------------------------------------- %%% @spec stop(Node) -> Result %%% Node = atom() %%% Result = {ok, NodeName} | @@ -205,16 +207,41 @@ start(Host, Node, Opts) -> %%% Reason = not_started | %%% not_connected | %%% stop_timeout - %%% NodeName = atom() %%% @doc Stops the running Erlang node with name <code>Node</code> on %%% the localhost. stop(Node) -> stop(gethostname(), Node). -%%% @spec stop(Host, Node) -> Result +%%%----------------------------------------------------------------- +%%% @spec stop(HostOrNode, NodeOrOpts) -> Result +%%% HostOrNode = atom() +%%% NodeOrOpts = atom() | list() +%%% Result = {ok, NodeName} | +%%% {error, Reason, NodeName} +%%% Reason = not_started | +%%% not_connected | +%%% stop_timeout +%%% NodeName = atom() +%%% @doc Stops the running Erlang node with default options on a specified +%%% host, or on the local host with specified options. That is, +%%% the call is interpreted as <code>stop(Host, Node)</code> when the +%%% second argument is atom-valued and <code>stop(Node, Opts)</code> +%%% when it's list-valued. +%%% @see stop/3 +stop(_HostOrNode = Node, _NodeOrOpts = Opts) %% match to satiate edoc + when is_list(Opts) -> + stop(gethostname(), Node, Opts); + +stop(Host, Node) -> + stop(Host, Node, []). + +%%% @spec stop(Host, Node, Opts) -> Result %%% Host = atom() %%% Node = atom() +%%% Opts = [OptTuples] +%%% OptTuples = {stop_timeout, StopTimeout} +%%% StopTimeout = integer() %%% Result = {ok, NodeName} | %%% {error, Reason, NodeName} %%% Reason = not_started | @@ -222,12 +249,19 @@ stop(Node) -> %%% stop_timeout %%% NodeName = atom() %%% @doc Stops the running Erlang node with name <code>Node</code> on -%%% host <code>Host</code>. -stop(Host, Node) -> +%%% host <code>Host</code> as specified by options <code>Opts</code>. +%%% +%%% <p>Option <code>stop_timeout</code> specifies, in seconds, +%%% the time to wait until the node is disconnected. +%%% Defaults to 5 seconds. If this timeout occurs, +%%% the result <code>{error, stop_timeout, NodeName}</code> is returned. +%%% +stop(Host, Node, Opts) -> ENode = enodename(Host, Node), case is_started(ENode) of {true, connected}-> - do_stop(ENode); + OptionsRec = fetch_options(Opts), + do_stop(ENode, OptionsRec); {true, not_connected}-> {error, not_connected, ENode}; false-> @@ -257,11 +291,13 @@ fetch_options(Options) -> EnvVars = get_option_value(env, Options, []), SSHPort = get_option_value(ssh_port, Options, []), SSHOpts = get_option_value(ssh_opts, Options, []), + StopTimeout = get_option_value(stop_timeout, Options, 5), #options{username=UserName, password=Password, boot_timeout=BootTimeout, init_timeout=InitTimeout, startup_timeout=StartupTimeout, startup_functions=StartupFunctions, monitor_master=Monitor, kill_if_fail=KillIfFail, - erl_flags=ErlFlags, env=EnvVars, ssh_port=SSHPort, ssh_opts=SSHOpts}. + erl_flags=ErlFlags, env=EnvVars, ssh_port=SSHPort, ssh_opts=SSHOpts, + stop_timeout=StopTimeout}. % send a message when slave node is started % @hidden @@ -461,6 +497,8 @@ wait_for_node_alive(Node, N) -> % call init:stop on a remote node do_stop(ENode) -> + do_stop(ENode, fetch_options([])). +do_stop(ENode, Options) -> {Cover,MainCoverNode} = case test_server:is_cover() of true -> @@ -471,7 +509,8 @@ do_stop(ENode) -> {false,undefined} end, spawn(ENode, init, stop, []), - case wait_for_node_dead(ENode, 5) of + StopTimeout = Options#options.stop_timeout, + case wait_for_node_dead(ENode, StopTimeout) of {ok,ENode} -> if Cover -> %% To avoid that cover is started again if a node diff --git a/lib/common_test/test_server/ts_install.erl b/lib/common_test/test_server/ts_install.erl index c4e0223ac7..c5631fb9c3 100644 --- a/lib/common_test/test_server/ts_install.erl +++ b/lib/common_test/test_server/ts_install.erl @@ -408,17 +408,13 @@ off_heap_msgq() -> end. schedulers() -> - case catch erlang:system_info(smp_support) of - true -> - case {erlang:system_info(schedulers), - erlang:system_info(schedulers_online)} of - {S,S} -> - "/S"++integer_to_list(S); - {S,O} -> - "/S"++integer_to_list(S) ++ ":" ++ - integer_to_list(O) - end; - _ -> "" + case {erlang:system_info(schedulers), + erlang:system_info(schedulers_online)} of + {S,S} -> + "/S"++integer_to_list(S); + {S,O} -> + "/S"++integer_to_list(S) ++ ":" ++ + integer_to_list(O) end. bind_type() -> diff --git a/lib/common_test/test_server/ts_run.erl b/lib/common_test/test_server/ts_run.erl index e22fa8d196..2736010551 100644 --- a/lib/common_test/test_server/ts_run.erl +++ b/lib/common_test/test_server/ts_run.erl @@ -207,11 +207,7 @@ make_command(Vars, Spec, State) -> _ -> ok end, - "cerl -valgrind" ++ - case erlang:system_info(smp_support) of - true -> " -smp"; - false -> "" - end + "cerl -valgrind" end, Naming = case ts_lib:var(longnames, Vars) of diff --git a/lib/compiler/doc/src/compile.xml b/lib/compiler/doc/src/compile.xml index 10164890f2..b398871ddf 100644 --- a/lib/compiler/doc/src/compile.xml +++ b/lib/compiler/doc/src/compile.xml @@ -123,6 +123,17 @@ in the Efficiency Guide.</p> </item> + <tag><c>{compile_info, [{atom(), term()}]}</c></tag> + <item> + <p>Allows compilers built on top of <c>compile</c> to attach + extra compilation metadata to the <c>compile_info</c> chunk + in the generated beam file.</p> + + <p>It is advised for compilers to remove all non-deterministic + information if the <c>deterministic</c> option is supported and + it was supplied by the user.</p> + </item> + <tag><c>compressed</c></tag> <item> <p>The compiler will compress the generated object code, diff --git a/lib/compiler/src/Makefile b/lib/compiler/src/Makefile index ef6db66ff6..9b22e5197b 100644 --- a/lib/compiler/src/Makefile +++ b/lib/compiler/src/Makefile @@ -83,6 +83,7 @@ MODULES = \ core_scan \ erl_bifs \ rec_env \ + sys_core_alias \ sys_core_bsm \ sys_core_dsetel \ sys_core_fold \ @@ -194,6 +195,7 @@ $(EBIN)/core_lib.beam: core_parse.hrl $(EBIN)/core_lint.beam: core_parse.hrl $(EBIN)/core_parse.beam: core_parse.hrl $(EGEN)/core_parse.erl $(EBIN)/core_pp.beam: core_parse.hrl +$(EBIN)/sys_core_alias.beam: core_parse.hrl $(EBIN)/sys_core_dsetel.beam: core_parse.hrl $(EBIN)/sys_core_fold.beam: core_parse.hrl $(EBIN)/sys_core_fold_lists.beam: core_parse.hrl diff --git a/lib/compiler/src/beam_asm.erl b/lib/compiler/src/beam_asm.erl index c35efdfc9d..9ecbb7884c 100644 --- a/lib/compiler/src/beam_asm.erl +++ b/lib/compiler/src/beam_asm.erl @@ -21,7 +21,7 @@ -module(beam_asm). --export([module/5]). +-export([module/4]). -export([encode/2]). -export_type([fail/0,label/0,reg/0,src/0,module_code/0,function_name/0]). @@ -55,20 +55,20 @@ -type module_code() :: {module(),[_],[_],[asm_function()],pos_integer()}. --spec module(module_code(), [{binary(), binary()}], [_], [compile:option()], [compile:option()]) -> +-spec module(module_code(), [{binary(), binary()}], [{atom(),term()}], [compile:option()]) -> {'ok',binary()}. -module(Code, ExtraChunks, SourceFile, Opts, CompilerOpts) -> - {ok,assemble(Code, ExtraChunks, SourceFile, Opts, CompilerOpts)}. +module(Code, ExtraChunks, CompileInfo, CompilerOpts) -> + {ok,assemble(Code, ExtraChunks, CompileInfo, CompilerOpts)}. -assemble({Mod,Exp0,Attr0,Asm0,NumLabels}, ExtraChunks, SourceFile, Opts, CompilerOpts) -> +assemble({Mod,Exp0,Attr0,Asm0,NumLabels}, ExtraChunks, CompileInfo, CompilerOpts) -> {1,Dict0} = beam_dict:atom(Mod, beam_dict:new()), {0,Dict1} = beam_dict:fname(atom_to_list(Mod) ++ ".erl", Dict0), NumFuncs = length(Asm0), {Asm,Attr} = on_load(Asm0, Attr0), Exp = cerl_sets:from_list(Exp0), {Code,Dict2} = assemble_1(Asm, Exp, Dict1, []), - build_file(Code, Attr, Dict2, NumLabels, NumFuncs, ExtraChunks, SourceFile, Opts, CompilerOpts). + build_file(Code, Attr, Dict2, NumLabels, NumFuncs, ExtraChunks, CompileInfo, CompilerOpts). on_load(Fs0, Attr0) -> case proplists:get_value(on_load, Attr0) of @@ -111,7 +111,7 @@ assemble_function([H|T], Acc, Dict0) -> assemble_function([], Code, Dict) -> {Code, Dict}. -build_file(Code, Attr, Dict, NumLabels, NumFuncs, ExtraChunks, SourceFile, Opts, CompilerOpts) -> +build_file(Code, Attr, Dict, NumLabels, NumFuncs, ExtraChunks, CompileInfo, CompilerOpts) -> %% Create the code chunk. CodeChunk = chunk(<<"Code">>, @@ -182,7 +182,7 @@ build_file(Code, Attr, Dict, NumLabels, NumFuncs, ExtraChunks, SourceFile, Opts, Essentials1 = [iolist_to_binary(C) || C <- Essentials0], MD5 = module_md5(Essentials1), Essentials = finalize_fun_table(Essentials1, MD5), - {Attributes,Compile} = build_attributes(Opts, SourceFile, Attr, MD5), + {Attributes,Compile} = build_attributes(Attr, CompileInfo, MD5), AttrChunk = chunk(<<"Attr">>, Attributes), CompileChunk = chunk(<<"CInf">>, Compile), @@ -192,7 +192,7 @@ build_file(Code, Attr, Dict, NumLabels, NumFuncs, ExtraChunks, SourceFile, Opts, %% Create IFF chunk. - Chunks = case member(slim, Opts) of + Chunks = case member(slim, CompilerOpts) of true -> [Essentials,AttrChunk]; false -> @@ -264,22 +264,10 @@ flatten_exports(Exps) -> flatten_imports(Imps) -> list_to_binary(map(fun({M,F,A}) -> <<M:32,F:32,A:32>> end, Imps)). -build_attributes(Opts, SourceFile, Attr, MD5) -> - Misc0 = case SourceFile of - [] -> []; - [_|_] -> [{source,SourceFile}] - end, - Misc = case member(slim, Opts) of - false -> Misc0; - true -> [] - end, - Compile = case member(deterministic, Opts) of - false -> - [{options,Opts},{version,?COMPILER_VSN}|Misc]; - true -> - [{version,?COMPILER_VSN}] - end, - {term_to_binary(set_vsn_attribute(Attr, MD5)),term_to_binary(Compile)}. +build_attributes(Attr, Compile, MD5) -> + AttrBinary = term_to_binary(set_vsn_attribute(Attr, MD5)), + CompileBinary = term_to_binary([{version,?COMPILER_VSN}|Compile]), + {AttrBinary,CompileBinary}. build_line_table(Dict) -> {NumLineInstrs,NumFnames0,Fnames0,NumLines,Lines0} = diff --git a/lib/compiler/src/beam_clean.erl b/lib/compiler/src/beam_clean.erl index b736d39f9c..e094c2c320 100644 --- a/lib/compiler/src/beam_clean.erl +++ b/lib/compiler/src/beam_clean.erl @@ -24,7 +24,7 @@ -export([module/2]). -export([bs_clean_saves/1]). -export([clean_labels/1]). --import(lists, [map/2,foldl/3,reverse/1,filter/2]). +-import(lists, [foldl/3,reverse/1,filter/2]). -spec module(beam_utils:module_code(), [compile:option()]) -> {'ok',beam_utils:module_code()}. @@ -118,7 +118,7 @@ add_to_work_list(F, {Fs,Used}=Sets) -> clean_labels(Fs0) -> St0 = #st{lmap=[],entry=1,lc=1}, {Fs1,#st{lmap=Lmap0,lc=Lc}} = function_renumber(Fs0, St0, []), - Lmap = gb_trees:from_orddict(ordsets:from_list(Lmap0)), + Lmap = maps:from_list(Lmap0), Fs = function_replace(Fs1, Lmap, []), {Fs,Lc}. @@ -187,7 +187,8 @@ is_record_tuple(_, _, _) -> no. function_replace([{function,Name,Arity,Entry,Asm0}|Fs], Dict, Acc) -> Asm = try - replace(Asm0, [], Dict) + Fb = fun(Old) -> throw({error,{undefined_label,Old}}) end, + beam_utils:replace_labels(Asm0, [], Dict, Fb) catch throw:{error,{undefined_label,Lbl}=Reason} -> io:format("Function ~s/~w refers to undefined label ~w\n", @@ -197,57 +198,6 @@ function_replace([{function,Name,Arity,Entry,Asm0}|Fs], Dict, Acc) -> function_replace(Fs, Dict, [{function,Name,Arity,Entry,Asm}|Acc]); function_replace([], _, Acc) -> Acc. -replace([{test,Test,{f,Lbl},Ops}|Is], Acc, D) -> - replace(Is, [{test,Test,{f,label(Lbl, D)},Ops}|Acc], D); -replace([{test,Test,{f,Lbl},Live,Ops,Dst}|Is], Acc, D) -> - replace(Is, [{test,Test,{f,label(Lbl, D)},Live,Ops,Dst}|Acc], D); -replace([{select,I,R,{f,Fail0},Vls0}|Is], Acc, D) -> - Vls = map(fun ({f,L}) -> {f,label(L, D)}; - (Other) -> Other - end, Vls0), - Fail = label(Fail0, D), - replace(Is, [{select,I,R,{f,Fail},Vls}|Acc], D); -replace([{'try',R,{f,Lbl}}|Is], Acc, D) -> - replace(Is, [{'try',R,{f,label(Lbl, D)}}|Acc], D); -replace([{'catch',R,{f,Lbl}}|Is], Acc, D) -> - replace(Is, [{'catch',R,{f,label(Lbl, D)}}|Acc], D); -replace([{jump,{f,Lbl}}|Is], Acc, D) -> - replace(Is, [{jump,{f,label(Lbl, D)}}|Acc], D); -replace([{loop_rec,{f,Lbl},R}|Is], Acc, D) -> - replace(Is, [{loop_rec,{f,label(Lbl, D)},R}|Acc], D); -replace([{loop_rec_end,{f,Lbl}}|Is], Acc, D) -> - replace(Is, [{loop_rec_end,{f,label(Lbl, D)}}|Acc], D); -replace([{wait,{f,Lbl}}|Is], Acc, D) -> - replace(Is, [{wait,{f,label(Lbl, D)}}|Acc], D); -replace([{wait_timeout,{f,Lbl},To}|Is], Acc, D) -> - replace(Is, [{wait_timeout,{f,label(Lbl, D)},To}|Acc], D); -replace([{bif,Name,{f,Lbl},As,R}|Is], Acc, D) when Lbl =/= 0 -> - replace(Is, [{bif,Name,{f,label(Lbl, D)},As,R}|Acc], D); -replace([{gc_bif,Name,{f,Lbl},Live,As,R}|Is], Acc, D) when Lbl =/= 0 -> - replace(Is, [{gc_bif,Name,{f,label(Lbl, D)},Live,As,R}|Acc], D); -replace([{call,Ar,{f,Lbl}}|Is], Acc, D) -> - replace(Is, [{call,Ar,{f,label(Lbl,D)}}|Acc], D); -replace([{make_fun2,{f,Lbl},U1,U2,U3}|Is], Acc, D) -> - replace(Is, [{make_fun2,{f,label(Lbl, D)},U1,U2,U3}|Acc], D); -replace([{bs_init,{f,Lbl},Info,Live,Ss,Dst}|Is], Acc, D) when Lbl =/= 0 -> - replace(Is, [{bs_init,{f,label(Lbl, D)},Info,Live,Ss,Dst}|Acc], D); -replace([{bs_put,{f,Lbl},Info,Ss}|Is], Acc, D) when Lbl =/= 0 -> - replace(Is, [{bs_put,{f,label(Lbl, D)},Info,Ss}|Acc], D); -replace([{put_map=I,{f,Lbl},Op,Src,Dst,Live,List}|Is], Acc, D) - when Lbl =/= 0 -> - replace(Is, [{I,{f,label(Lbl, D)},Op,Src,Dst,Live,List}|Acc], D); -replace([{get_map_elements=I,{f,Lbl},Src,List}|Is], Acc, D) when Lbl =/= 0 -> - replace(Is, [{I,{f,label(Lbl, D)},Src,List}|Acc], D); -replace([I|Is], Acc, D) -> - replace(Is, [I|Acc], D); -replace([], Acc, _) -> Acc. - -label(Old, D) -> - case gb_trees:lookup(Old, D) of - {value,Val} -> Val; - none -> throw({error,{undefined_label,Old}}) - end. - %%% %%% Final fixup of bs_start_match2/5,bs_save2/bs_restore2 instructions for %%% new bit syntax matching (introduced in R11B). diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl index 4365451356..0bcec9ce19 100644 --- a/lib/compiler/src/beam_jump.erl +++ b/lib/compiler/src/beam_jump.erl @@ -71,9 +71,9 @@ %%% %%% jump L2 %%% . . . -%%% L1: %%% L2: ... %%% +%%% and all preceding uses of L1 renamed to L2. %%% If the jump is unreachable, it will be removed according to (1). %%% %%% (5) In @@ -156,41 +156,46 @@ function({function,Name,Arity,CLabel,Asm0}) -> %%% share(Is0) -> - %% We will get more sharing if we never fall through to a label. - Is = eliminate_fallthroughs(Is0, []), - share_1(Is, #{}, [], []). + Is1 = eliminate_fallthroughs(Is0, []), + Is2 = find_fixpoint(fun(Is) -> + share_1(Is, #{}, #{}, [], []) + end, Is1), + reverse(Is2). -share_1([{label,L}=Lbl|Is], Dict0, [_|_]=Seq, Acc) -> +share_1([{label,L}=Lbl|Is], Dict0, Lbls0, [_|_]=Seq, Acc) -> case maps:find(Seq, Dict0) of error -> Dict = maps:put(Seq, L, Dict0), - share_1(Is, Dict, [], [Lbl|Seq ++ Acc]); + share_1(Is, Dict, Lbls0, [], [Lbl|Seq ++ Acc]); {ok,Label} -> - share_1(Is, Dict0, [], [Lbl,{jump,{f,Label}}|Acc]) + Lbls = maps:put(L, Label, Lbls0), + share_1(Is, Dict0, Lbls, [], [Lbl,{jump,{f,Label}}|Acc]) end; -share_1([{func_info,_,_,_}=I|Is], _, [], Acc) -> - reverse(Is, [I|Acc]); -share_1([{'catch',_,_}=I|Is], Dict0, Seq, Acc) -> - Dict = clean_non_sharable(Dict0), - share_1(Is, Dict, [I|Seq], Acc); -share_1([{'try',_,_}=I|Is], Dict0, Seq, Acc) -> - Dict = clean_non_sharable(Dict0), - share_1(Is, Dict, [I|Seq], Acc); -share_1([{try_case,_}=I|Is], Dict0, Seq, Acc) -> - Dict = clean_non_sharable(Dict0), - share_1(Is, Dict, [I|Seq], Acc); -share_1([{catch_end,_}=I|Is], Dict0, Seq, Acc) -> - Dict = clean_non_sharable(Dict0), - share_1(Is, Dict, [I|Seq], Acc); -share_1([I|Is], Dict, Seq, Acc) -> +share_1([{func_info,_,_,_}|_]=Is, _, Lbls, [], Acc) when Lbls =/= #{} -> + beam_utils:replace_labels(Acc, Is, Lbls, fun(Old) -> Old end); +share_1([{func_info,_,_,_}|_]=Is, _, Lbls, [], Acc) when Lbls =:= #{} -> + reverse(Acc, Is); +share_1([{'catch',_,_}=I|Is], Dict0, Lbls0, Seq, Acc) -> + {Dict,Lbls} = clean_non_sharable(Dict0, Lbls0), + share_1(Is, Dict, Lbls, [I|Seq], Acc); +share_1([{'try',_,_}=I|Is], Dict0, Lbls0, Seq, Acc) -> + {Dict,Lbls} = clean_non_sharable(Dict0, Lbls0), + share_1(Is, Dict, Lbls, [I|Seq], Acc); +share_1([{try_case,_}=I|Is], Dict0, Lbls0, Seq, Acc) -> + {Dict,Lbls} = clean_non_sharable(Dict0, Lbls0), + share_1(Is, Dict, Lbls, [I|Seq], Acc); +share_1([{catch_end,_}=I|Is], Dict0, Lbls0, Seq, Acc) -> + {Dict,Lbls} = clean_non_sharable(Dict0, Lbls0), + share_1(Is, Dict, Lbls, [I|Seq], Acc); +share_1([I|Is], Dict, Lbls, Seq, Acc) -> case is_unreachable_after(I) of false -> - share_1(Is, Dict, [I|Seq], Acc); + share_1(Is, Dict, Lbls, [I|Seq], Acc); true -> - share_1(Is, Dict, [I], Acc) + share_1(Is, Dict, Lbls, [I], Acc) end. -clean_non_sharable(Dict) -> +clean_non_sharable(Dict0, Lbls0) -> %% We are passing in or out of a 'catch' or 'try' block. Remove %% sequences that should not be shared over the boundaries of the %% block. Since the end of the sequence must match, the only @@ -198,7 +203,17 @@ clean_non_sharable(Dict) -> %% the 'catch'/'try' block is a sequence that ends with an %% instruction that causes an exception. Any sequence that causes %% an exception must contain a line/1 instruction. - maps:filter(fun(K, _V) -> sharable_with_try(K) end, Dict). + Dict1 = maps:to_list(Dict0), + Lbls1 = maps:to_list(Lbls0), + {Dict2,Lbls2} = foldl(fun({K, V}, {Dict,Lbls}) -> + case sharable_with_try(K) of + true -> + {[{K,V}|Dict],lists:keydelete(V, 2, Lbls)}; + false -> + {Dict,Lbls} + end + end, {[],Lbls1}, Dict1), + {maps:from_list(Dict2),maps:from_list(Lbls2)}. sharable_with_try([{line,_}|_]) -> %% This sequence may cause an exception and may potentially @@ -275,14 +290,15 @@ extract_seq_1(_, _) -> no. -record(st, { entry :: beam_asm:label(), %Entry label (must not be moved). - mlbl :: #{beam_asm:label() := [beam_asm:label()]}, %Moved labels. - labels :: cerl_sets:set() %Set of referenced labels. + replace :: #{beam_asm:label() := beam_asm:label()}, %Labels to replace. + labels :: cerl_sets:set(), %Set of referenced labels. + index :: beam_utils:code_index() | {lazy,[beam_utils:instruction()]} %Index built lazily only if needed }). opt(Is0, CLabel) -> find_fixpoint(fun(Is) -> Lbls = initial_labels(Is), - St = #st{entry=CLabel,mlbl=#{},labels=Lbls}, + St = #st{entry=CLabel,replace=#{},labels=Lbls,index={lazy,Is}}, opt(Is, [], St) end, Is0). @@ -292,7 +308,7 @@ find_fixpoint(OptFun, Is0) -> Is -> find_fixpoint(OptFun, Is) end. -opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc, St) -> +opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc0, St0) -> %% We have %% Test Label Ops %% jump Label @@ -301,10 +317,34 @@ opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc, St) -> case beam_utils:is_pure_test(I) of false -> %% Test is not pure; we must keep it. - opt(Is, [I|Acc], label_used(Lbl, St)); + opt(Is, [I|Acc0], label_used(Lbl, St0)); true -> %% The test is pure and its failure label is the same %% as in the jump that follows -- thus it is not needed. + %% Check if any of the previous instructions could also be eliminated. + {Acc,St} = opt_useless_loads(Acc0, L, St0), + opt(Is, Acc, St) + end; +opt([{test,_,{f,L}=Lbl,_}=I|[{label,L}|_]=Is], Acc0, St0) -> + %% Similar to the above, except we have a fall-through rather than jump + %% Test Label Ops + %% label Label + case beam_utils:is_pure_test(I) of + false -> + opt(Is, [I|Acc0], label_used(Lbl, St0)); + true -> + {Acc,St} = opt_useless_loads(Acc0, L, St0), + opt(Is, Acc, St) + end; +opt([{test,_,{f,L}=Lbl,_}=I|[{label,L}|_]=Is], Acc0, St0) -> + %% Similar to the above, except we have a fall-through rather than jump + %% Test Label Ops + %% label Label + case beam_utils:is_pure_test(I) of + false -> + opt(Is, [I|Acc0], label_used(Lbl, St0)); + true -> + {Acc,St} = opt_useless_loads(Acc0, L, St0), opt(Is, Acc, St) end; opt([{test,Test0,{f,L}=Lbl,Ops}=I|[{jump,To}|Is]=Is0], Acc, St) -> @@ -326,30 +366,16 @@ opt([{test,_,{f,_}=Lbl,_,_,_}=I|Is], Acc, St) -> opt(Is, [I|Acc], label_used(Lbl, St)); opt([{select,_,_R,Fail,Vls}=I|Is], Acc, St) -> skip_unreachable(Is, [I|Acc], label_used([Fail|Vls], St)); -opt([{label,Lbl}=I|Is], Acc, #st{mlbl=Mlbl}=St0) -> - case maps:find(Lbl, Mlbl) of - {ok,Lbls} -> - %% Essential to remove the list of labels from the dictionary, - %% since we will rescan the inserted labels. We MUST rescan. - St = St0#st{mlbl=maps:remove(Lbl, Mlbl)}, - insert_labels([Lbl|Lbls], Is, Acc, St); - error -> - opt(Is, [I|Acc], St0) - end; +opt([{label,From}=I,{label,To}|Is], Acc, #st{replace=Replace}=St) -> + opt([I|Is], Acc, St#st{replace=Replace#{To => From}}); opt([{jump,{f,_}=X}|[{label,_},{jump,X}|_]=Is], Acc, St) -> opt(Is, Acc, St); opt([{jump,{f,Lbl}}|[{label,Lbl}|_]=Is], Acc, St) -> opt(Is, Acc, St); -opt([{jump,{f,L}=Lbl}=I|Is], Acc0, #st{mlbl=Mlbl0}=St0) -> - %% All labels before this jump instruction should now be - %% moved to the location of the jump's target. - {Lbls,Acc} = collect_labels(Acc0, St0), - St = case Lbls of - [] -> St0; - [_|_] -> - Mlbl = maps_append_list(L, Lbls, Mlbl0), - St0#st{mlbl=Mlbl} - end, +opt([{jump,{f,L}=Lbl}=I|Is], Acc0, St0) -> + %% Replace all labels before this jump instruction into the + %% location of the jump's target. + {Acc,St} = collect_labels(Acc0, L, St0), skip_unreachable(Is, [I|Acc], label_used(Lbl, St)); %% Optimization: quickly handle some common instructions that don't %% have any failure labels and where is_unreachable_after(I) =:= false. @@ -369,36 +395,72 @@ opt([I|Is], Acc, #st{labels=Used0}=St0) -> true -> skip_unreachable(Is, [I|Acc], St); false -> opt(Is, [I|Acc], St) end; -opt([], Acc, #st{mlbl=Mlbl}) -> - Code = reverse(Acc), - insert_fc_labels(Code, Mlbl). - -insert_fc_labels([{label,L}=I|Is0], Mlbl) -> - case maps:find(L, Mlbl) of - error -> - [I|insert_fc_labels(Is0, Mlbl)]; - {ok,Lbls} -> - Is = [{label,Lb} || Lb <- Lbls] ++ Is0, - [I|insert_fc_labels(Is, maps:remove(L, Mlbl))] +opt([], Acc, #st{replace=Replace0}) when Replace0 =/= #{} -> + Replace = normalize_replace(maps:to_list(Replace0), Replace0, []), + beam_utils:replace_labels(Acc, [], Replace, fun(Old) -> Old end); +opt([], Acc, #st{replace=Replace}) when Replace =:= #{} -> + reverse(Acc). + +normalize_replace([{From,To0}|Rest], Replace, Acc) -> + case Replace of + #{To0 := To} -> + normalize_replace([{From,To}|Rest], Replace, Acc); + _ -> + normalize_replace(Rest, Replace, [{From,To0}|Acc]) end; -insert_fc_labels([_|_]=Is, _) -> Is. - -maps_append_list(K,Vs,M) -> - case M of - #{K:=Vs0} -> M#{K:=Vs0++Vs}; % same order as dict - _ -> M#{K => Vs} - end. +normalize_replace([], _Replace, Acc) -> + maps:from_list(Acc). + +%% After eliminating a test, it might happen, that a register was only used +%% in this test. Let's check if that was the case and if it was so, we can +%% eliminate the load into the register completely. +opt_useless_loads([{block,_}|_]=Is, L, #st{index={lazy,FIs}}=St) -> + opt_useless_loads(Is, L, St#st{index=beam_utils:index_labels(FIs)}); +opt_useless_loads([{block,Block0}|Is], L, #st{index=Index}=St) -> + case opt_useless_block_loads(Block0, L, Index) of + [] -> + opt_useless_loads(Is, L, St); + [_|_]=Block -> + {[{block,Block}|Is],St} + end; +%% After eliminating the test and useless blocks, it might happen, +%% that the previous test could also be eliminated. +%% It might be that the label was already marked as used, even if ultimately, +%% it never will be - we can't do much about it at that point, though +opt_useless_loads([{test,_,{f,L},_}=I|Is], L, St) -> + case beam_utils:is_pure_test(I) of + false -> + {[I|Is],St}; + true -> + opt_useless_loads(Is, L, St) + end; +opt_useless_loads(Is, _L, St) -> + {Is,St}. + +opt_useless_block_loads([{set,[Dst],_,_}=I|Is], L, Index) -> + BlockJump = [{block,Is},{jump,{f,L}}], + case beam_utils:is_killed(Dst, BlockJump, Index) of + true -> + %% The register is killed and not used, we can remove the load + opt_useless_block_loads(Is, L, Index); + false -> + [I|opt_useless_block_loads(Is, L, Index)] + end; +opt_useless_block_loads([I|Is], L, Index) -> + [I|opt_useless_block_loads(Is, L, Index)]; +opt_useless_block_loads([], _L, _Index) -> + []. -collect_labels(Is, #st{entry=Entry}) -> - collect_labels_1(Is, Entry, []). +collect_labels(Is, Label, #st{entry=Entry,replace=Replace} = St) -> + collect_labels_1(Is, Label, Entry, Replace, St). -collect_labels_1([{label,Entry}|_]=Is, Entry, Acc) -> +collect_labels_1([{label,Entry}|_]=Is, _Label, Entry, Acc, St) -> %% Never move the entry label. - {Acc,Is}; -collect_labels_1([{label,L}|Is], Entry, Acc) -> - collect_labels_1(Is, Entry, [L|Acc]); -collect_labels_1(Is, _Entry, Acc) -> - {Acc,Is}. + {Is,St#st{replace=Acc}}; +collect_labels_1([{label,L}|Is], Label, Entry, Acc, St) -> + collect_labels_1(Is, Label, Entry, Acc#{L => Label}, St); +collect_labels_1(Is, _Label, _Entry, Acc, St) -> + {Is,St#st{replace=Acc}}. %% label_defined(Is, Label) -> true | false. %% Test whether the label Label is defined at the start of the instruction @@ -418,13 +480,6 @@ invert_test(is_eq_exact) -> is_ne_exact; invert_test(is_ne_exact) -> is_eq_exact; invert_test(_) -> not_possible. -insert_labels([L|Ls], Is, [{jump,{f,L}}|Acc], St) -> - insert_labels(Ls, [{label,L}|Is], Acc, St); -insert_labels([L|Ls], Is, Acc, St) -> - insert_labels(Ls, [{label,L}|Is], Acc, St); -insert_labels([], Is, Acc, St) -> - opt(Is, Acc, St). - %% skip_unreachable([Instruction], St). %% Remove all instructions (including definitions of labels %% that have not been referenced yet) up to the next diff --git a/lib/compiler/src/beam_peep.erl b/lib/compiler/src/beam_peep.erl index 6df5c02334..9436c20b36 100644 --- a/lib/compiler/src/beam_peep.erl +++ b/lib/compiler/src/beam_peep.erl @@ -89,15 +89,37 @@ peep([{gc_bif,_,_,_,_,Dst}=I|Is], SeenTests0, Acc) -> peep([{jump,{f,L}},{label,L}=I|Is], _, Acc) -> %% Sometimes beam_jump has missed this optimization. peep(Is, gb_sets:empty(), [I|Acc]); -peep([{select,Op,R,F,Vls0}|Is], _, Acc) -> +peep([{select,Op,R,F,Vls0}|Is], SeenTests0, Acc0) -> case prune_redundant_values(Vls0, F) of [] -> %% No values left. Must convert to plain jump. I = {jump,F}, - peep(Is, gb_sets:empty(), [I|Acc]); + peep([I|Is], gb_sets:empty(), Acc0); + [{atom,_}=Value,Lbl] when Op =:= select_val -> + %% Single value left. Convert to regular test and pop redundant tests. + Is1 = [{test,is_eq_exact,F,[R,Value]},{jump,Lbl}|Is], + case Acc0 of + [{test,is_atom,F,[R]}|Acc] -> + peep(Is1, SeenTests0, Acc); + _ -> + peep(Is1, SeenTests0, Acc0) + end; + [{integer,_}=Value,Lbl] when Op =:= select_val -> + %% Single value left. Convert to regular test and pop redundant tests. + Is1 = [{test,is_eq_exact,F,[R,Value]},{jump,Lbl}|Is], + case Acc0 of + [{test,is_integer,F,[R]}|Acc] -> + peep(Is1, SeenTests0, Acc); + _ -> + peep(Is1, SeenTests0, Acc0) + end; + [Arity,Lbl] when Op =:= select_tuple_arity -> + %% Single value left. Convert to regular test + Is1 = [{test,test_arity,F,[R,Arity]},{jump,Lbl}|Is], + peep(Is1, SeenTests0, Acc0); [_|_]=Vls -> I = {select,Op,R,F,Vls}, - peep(Is, gb_sets:empty(), [I|Acc]) + peep(Is, gb_sets:empty(), [I|Acc0]) end; peep([{test,Op,_,Ops}=I|Is], SeenTests0, Acc) -> case beam_utils:is_pure_test(I) of diff --git a/lib/compiler/src/beam_utils.erl b/lib/compiler/src/beam_utils.erl index e39fbdc3b7..a4c65397df 100644 --- a/lib/compiler/src/beam_utils.erl +++ b/lib/compiler/src/beam_utils.erl @@ -23,14 +23,19 @@ -module(beam_utils). -export([is_killed_block/2,is_killed/3,is_killed_at/3, is_not_used/3, - empty_label_index/0,index_label/3,index_labels/1, + empty_label_index/0,index_label/3,index_labels/1,replace_labels/4, code_at/2,bif_to_test/3,is_pure_test/1, live_opt/1,delete_live_annos/1,combine_heap_needs/2, split_even/1]). -export_type([code_index/0,module_code/0,instruction/0]). --import(lists, [member/2,sort/1,reverse/1,splitwith/2]). +-import(lists, [map/2,member/2,sort/1,reverse/1,splitwith/2]). + +-define(is_const(Val), (element(1, Val) =:= integer orelse + element(1, Val) =:= float orelse + element(1, Val) =:= atom orelse + element(1, Val) =:= literal)). %% instruction() describes all instructions that are used during optimzation %% (from beam_a to beam_z). @@ -160,6 +165,18 @@ index_label(Lbl, Is0, Acc) -> code_at(L, Ll) -> gb_trees:get(L, Ll). +%% replace_labels(FunctionIs, Tail, ReplaceDb, Fallback) -> FunctionIs. +%% Replace all labels in instructions according to the ReplaceDb. +%% If label is not found the Fallback is called with the label to +%% produce a new one. + +-spec replace_labels([instruction()], + [instruction()], + #{beam_asm:label() => beam_asm:label()}, + fun((beam_asm:label()) -> term())) -> [instruction()]. +replace_labels(Is, Acc, D, Fb) -> + replace_labels_1(Is, Acc, D, Fb). + %% bif_to_test(Bif, [Op], Fail) -> {test,Test,Fail,[Op]} %% Convert a BIF to a test. Fail if not possible. @@ -185,10 +202,20 @@ bif_to_test('>', [A,B], Fail) -> {test,is_lt,Fail,[B,A]}; bif_to_test('<', [_,_]=Ops, Fail) -> {test,is_lt,Fail,Ops}; bif_to_test('>=', [_,_]=Ops, Fail) -> {test,is_ge,Fail,Ops}; bif_to_test('==', [A,nil], Fail) -> {test,is_nil,Fail,[A]}; +bif_to_test('==', [nil,A], Fail) -> {test,is_nil,Fail,[A]}; +bif_to_test('==', [C,A], Fail) when ?is_const(C) -> + {test,is_eq,Fail,[A,C]}; bif_to_test('==', [_,_]=Ops, Fail) -> {test,is_eq,Fail,Ops}; +bif_to_test('/=', [C,A], Fail) when ?is_const(C) -> + {test,is_ne,Fail,[A,C]}; bif_to_test('/=', [_,_]=Ops, Fail) -> {test,is_ne,Fail,Ops}; bif_to_test('=:=', [A,nil], Fail) -> {test,is_nil,Fail,[A]}; +bif_to_test('=:=', [nil,A], Fail) -> {test,is_nil,Fail,[A]}; +bif_to_test('=:=', [C,A], Fail) when ?is_const(C) -> + {test,is_eq_exact,Fail,[A,C]}; bif_to_test('=:=', [_,_]=Ops, Fail) -> {test,is_eq_exact,Fail,Ops}; +bif_to_test('=/=', [C,A], Fail) when ?is_const(C) -> + {test,is_ne_exact,Fail,[A,C]}; bif_to_test('=/=', [_,_]=Ops, Fail) -> {test,is_ne_exact,Fail,Ops}; bif_to_test(is_record, [_,_,_]=Ops, Fail) -> {test,is_record,Fail,Ops}. @@ -643,6 +670,58 @@ index_labels_1([], Acc) -> gb_trees:from_orddict(sort(Acc)). drop_labels([{label,_}|Is]) -> drop_labels(Is); drop_labels(Is) -> Is. + +replace_labels_1([{test,Test,{f,Lbl},Ops}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{test,Test,{f,label(Lbl, D, Fb)},Ops}|Acc], D, Fb); +replace_labels_1([{test,Test,{f,Lbl},Live,Ops,Dst}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{test,Test,{f,label(Lbl, D, Fb)},Live,Ops,Dst}|Acc], D, Fb); +replace_labels_1([{select,I,R,{f,Fail0},Vls0}|Is], Acc, D, Fb) -> + Vls = map(fun ({f,L}) -> {f,label(L, D, Fb)}; + (Other) -> Other + end, Vls0), + Fail = label(Fail0, D, Fb), + replace_labels_1(Is, [{select,I,R,{f,Fail},Vls}|Acc], D, Fb); +replace_labels_1([{'try',R,{f,Lbl}}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{'try',R,{f,label(Lbl, D, Fb)}}|Acc], D, Fb); +replace_labels_1([{'catch',R,{f,Lbl}}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{'catch',R,{f,label(Lbl, D, Fb)}}|Acc], D, Fb); +replace_labels_1([{jump,{f,Lbl}}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{jump,{f,label(Lbl, D, Fb)}}|Acc], D, Fb); +replace_labels_1([{loop_rec,{f,Lbl},R}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{loop_rec,{f,label(Lbl, D, Fb)},R}|Acc], D, Fb); +replace_labels_1([{loop_rec_end,{f,Lbl}}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{loop_rec_end,{f,label(Lbl, D, Fb)}}|Acc], D, Fb); +replace_labels_1([{wait,{f,Lbl}}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{wait,{f,label(Lbl, D, Fb)}}|Acc], D, Fb); +replace_labels_1([{wait_timeout,{f,Lbl},To}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{wait_timeout,{f,label(Lbl, D, Fb)},To}|Acc], D, Fb); +replace_labels_1([{bif,Name,{f,Lbl},As,R}|Is], Acc, D, Fb) when Lbl =/= 0 -> + replace_labels_1(Is, [{bif,Name,{f,label(Lbl, D, Fb)},As,R}|Acc], D, Fb); +replace_labels_1([{gc_bif,Name,{f,Lbl},Live,As,R}|Is], Acc, D, Fb) when Lbl =/= 0 -> + replace_labels_1(Is, [{gc_bif,Name,{f,label(Lbl, D, Fb)},Live,As,R}|Acc], D, Fb); +replace_labels_1([{call,Ar,{f,Lbl}}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{call,Ar,{f,label(Lbl, D, Fb)}}|Acc], D, Fb); +replace_labels_1([{make_fun2,{f,Lbl},U1,U2,U3}|Is], Acc, D, Fb) -> + replace_labels_1(Is, [{make_fun2,{f,label(Lbl, D, Fb)},U1,U2,U3}|Acc], D, Fb); +replace_labels_1([{bs_init,{f,Lbl},Info,Live,Ss,Dst}|Is], Acc, D, Fb) when Lbl =/= 0 -> + replace_labels_1(Is, [{bs_init,{f,label(Lbl, D, Fb)},Info,Live,Ss,Dst}|Acc], D, Fb); +replace_labels_1([{bs_put,{f,Lbl},Info,Ss}|Is], Acc, D, Fb) when Lbl =/= 0 -> + replace_labels_1(Is, [{bs_put,{f,label(Lbl, D, Fb)},Info,Ss}|Acc], D, Fb); +replace_labels_1([{put_map=I,{f,Lbl},Op,Src,Dst,Live,List}|Is], Acc, D, Fb) + when Lbl =/= 0 -> + replace_labels_1(Is, [{I,{f,label(Lbl, D, Fb)},Op,Src,Dst,Live,List}|Acc], D, Fb); +replace_labels_1([{get_map_elements=I,{f,Lbl},Src,List}|Is], Acc, D, Fb) when Lbl =/= 0 -> + replace_labels_1(Is, [{I,{f,label(Lbl, D, Fb)},Src,List}|Acc], D, Fb); +replace_labels_1([I|Is], Acc, D, Fb) -> + replace_labels_1(Is, [I|Acc], D, Fb); +replace_labels_1([], Acc, _, _) -> Acc. + +label(Old, D, Fb) -> + case D of + #{Old := New} -> New; + _ -> Fb(Old) + end. + %% Help functions for combine_heap_needs. combine_alloc_lists(Al1, Al2) -> diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl index aa2d224bb4..1b359d1e59 100644 --- a/lib/compiler/src/compile.erl +++ b/lib/compiler/src/compile.erl @@ -706,14 +706,16 @@ core_passes() -> [{unless,no_copt, [{core_old_inliner,fun test_old_inliner/1,fun core_old_inliner/2}, {iff,doldinline,{listing,"oldinline"}}, - {pass,sys_core_fold}, + {unless,no_fold,{pass,sys_core_fold}}, {iff,dcorefold,{listing,"corefold"}}, {core_inline_module,fun test_core_inliner/1,fun core_inline_module/2}, {iff,dinline,{listing,"inline"}}, {core_fold_after_inlining,fun test_any_inliner/1, fun core_fold_module_after_inlining/2}, + {iff,dcopt,{listing,"copt"}}, + {unless,no_alias,{pass,sys_core_alias}}, + {iff,dalias,{listing,"core_alias"}}, ?pass(core_transforms)]}, - {iff,dcopt,{listing,"copt"}}, {iff,'to_core',{done,"core"}}]} | kernel_passes()]. @@ -1446,15 +1448,33 @@ save_core_code(Code, St) -> beam_asm(Code0, #compile{ifile=File,extra_chunks=ExtraChunks,options=CompilerOpts}=St) -> case debug_info(St) of {ok,DebugInfo,Opts0} -> - Source = paranoid_absname(File), Opts1 = [O || O <- Opts0, effects_code_generation(O)], Chunks = [{<<"Dbgi">>, DebugInfo} | ExtraChunks], - {ok,Code} = beam_asm:module(Code0, Chunks, Source, Opts1, CompilerOpts), + CompileInfo = compile_info(File, Opts1), + {ok,Code} = beam_asm:module(Code0, Chunks, CompileInfo, CompilerOpts), {ok,Code,St#compile{abstract_code=[]}}; {error,Es} -> {error,St#compile{errors=St#compile.errors ++ [{File,Es}]}} end. +compile_info(File, Opts) -> + IsSlim = member(slim, Opts), + IsDeterministic = member(deterministic, Opts), + Info0 = proplists:get_value(compile_info, Opts, []), + Info1 = + case paranoid_absname(File) of + [_|_] = Source when not IsSlim, not IsDeterministic -> + [{source,Source} | Info0]; + _ -> + Info0 + end, + Info2 = + case IsDeterministic of + false -> [{options,proplists:delete(compile_info, Opts)} | Info1]; + true -> Info1 + end, + Info2. + paranoid_absname(""=File) -> File; paranoid_absname(File) -> @@ -1921,6 +1941,7 @@ pre_load() -> erl_lint, erl_parse, erl_scan, + sys_core_alias, sys_core_bsm, sys_core_dsetel, sys_core_fold, diff --git a/lib/compiler/src/compiler.app.src b/lib/compiler/src/compiler.app.src index 3139d68902..703cf1d1b8 100644 --- a/lib/compiler/src/compiler.app.src +++ b/lib/compiler/src/compiler.app.src @@ -58,6 +58,7 @@ core_lib, erl_bifs, rec_env, + sys_core_alias, sys_core_bsm, sys_core_dsetel, sys_core_fold, diff --git a/lib/compiler/src/sys_core_alias.erl b/lib/compiler/src/sys_core_alias.erl new file mode 100644 index 0000000000..63e2f7488e --- /dev/null +++ b/lib/compiler/src/sys_core_alias.erl @@ -0,0 +1,308 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 1999-2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +%% Purpose : Replace values by aliases from patterns optimisation for Core + +%% Replace expressions by aliases from patterns. For example: +%% +%% example({ok, Val}) -> +%% {ok, Val}. +%% +%% will become: +%% +%% example({ok, Val} = Tuple) -> +%% Tuple. +%% +%% Currently this pass aliases tuple and cons nodes made of literals, +%% variables and other cons. The tuple/cons may appear anywhere in the +%% pattern and it will be aliased if used later on. +%% +%% Notice a tuple/cons made only of literals is not aliased as it may +%% be part of the literal pool. + +-module(sys_core_alias). + +-export([module/2]). + +-include("core_parse.hrl"). + +-define(NOTSET, 0). + +-record(sub, {p=#{} :: #{term() => ?NOTSET | atom()}, %% Found pattern substitutions + v=cerl_sets:new() :: cerl_sets:set(cerl:var_name()), %% Variables used by patterns + t=undefined :: term()}). %% Temporary information from pre to post + +-type sub() :: #sub{}. + +-spec module(cerl:c_module(), [compile:option()]) -> + {'ok',cerl:c_module(),[]}. + +module(#c_module{defs=Ds0}=Mod, _Opts) -> + Ds1 = [def(D) || D <- Ds0], + {ok,Mod#c_module{defs=Ds1},[]}. + +def({#c_var{name={F,Arity}}=Name,B0}) -> + try + put(new_var_num, 0), + {B1,_} = cerl_trees:mapfold(fun pre/2, fun post/2, sub_new(undefined), B0), + erase(new_var_num), + {Name,B1} + catch + Class:Error -> + Stack = erlang:get_stacktrace(), + io:fwrite("Function: ~w/~w\n", [F,Arity]), + erlang:raise(Class, Error, Stack) + end. + +pre(#c_let{vars=Vars}=Node, Sub) -> + {Node,sub_fold(get_variables(Vars), Sub)}; + +pre(#c_fun{vars=Vars}=Node, Sub) -> + {Node,sub_fold(get_variables(Vars), Sub)}; + +pre(#c_clause{pats=Pats}=Node, Sub0) -> + VarNames = get_variables(Pats), + Sub1 = sub_fold(VarNames, Sub0), + Keys = get_pattern_keys(Pats), + Sub2 = sub_add_keys(Keys, Sub1), + + #sub{v=SubNames,t=Temp} = Sub2, + Sub3 = Sub2#sub{v=merge_variables(VarNames, SubNames), + t={clause,Pats,Keys,SubNames,Temp}}, + + {Node#c_clause{pats=[]},Sub3}; + +pre(Node, Sub0) -> + %% We cache only tuples and cons. + case cerl:is_data(Node) andalso not cerl:is_literal(Node) of + false -> + {Node,Sub0}; + true -> + Kind = cerl:data_type(Node), + Es = cerl:data_es(Node), + case sub_cache_nodes(Kind, Es, Sub0) of + {Name,Sub1} -> + {cerl:ann_c_var(cerl:get_ann(Node), Name),Sub1}; + error -> + {Node,Sub0} + end + end. + +post(#c_let{}=Node, Sub) -> + {Node,sub_unfold(Sub)}; + +post(#c_fun{}=Node, Sub) -> + {Node,sub_unfold(Sub)}; + +post(#c_clause{}=Node, #sub{t={clause,Pats0,Keys,V,T}}=Sub0) -> + {Sub1,PostKeys} = sub_take_keys(Keys, Sub0), + Pats1 = put_pattern_keys(Pats0, PostKeys), + Sub2 = sub_unfold(Sub1#sub{v=V,t=T}), + {Node#c_clause{pats=Pats1},Sub2}; + +post(Node, Sub) -> + {Node,Sub}. + +%% sub_new/1 +%% sub_add_keys/2 +%% sub_take_keys/3 +%% sub_cache_nodes/3 +%% +%% Manages the substitutions record. + +%% Builds a new sub. +-spec sub_new(term()) -> sub(). +sub_new(Temp) -> + #sub{t=Temp}. + +%% Folds the sub into a new one if the variables in nodes are not disjoint +sub_fold(VarNames, #sub{v=SubNames}=Sub) -> + case is_disjoint_variables(VarNames, SubNames) of + true -> Sub#sub{t={temp,Sub#sub.t}}; + false -> sub_new({sub,Sub}) + end. + +%% Unfolds the sub in case one was folded in the previous step +sub_unfold(#sub{t={temp,Temp}}=Sub) -> + Sub#sub{t=Temp}; +sub_unfold(#sub{t={sub,Sub}}) -> + Sub. + +%% Adds the keys extracted from patterns to the state. +-spec sub_add_keys([term()], sub()) -> sub(). +sub_add_keys(Keys, #sub{p=Pat0}=Sub) -> + Pat1 = + lists:foldl(fun(Key, Acc) -> + false = maps:is_key(Key, Acc), %Assertion. + maps:put(Key, ?NOTSET, Acc) + end, Pat0, Keys), + Sub#sub{p=Pat1}. + +%% Take the keys from the map taking into account the keys +%% that have changed as those must become aliases in the pattern. +-spec sub_take_keys([term()], sub()) -> {sub(), [{term(), atom()}]}. +sub_take_keys(Keys, #sub{p=Pat0}=Sub) -> + {Pat1,Acc} = sub_take_keys(Keys, Pat0, []), + {Sub#sub{p=Pat1},Acc}. + +sub_take_keys([K|T], Sub0, Acc) -> + case maps:take(K, Sub0) of + {?NOTSET,Sub1} -> + sub_take_keys(T, Sub1, Acc); + {Name,Sub1} -> + sub_take_keys(T, Sub1, [{K,Name}|Acc]) + end; +sub_take_keys([], Sub, Acc) -> + {Sub,Acc}. + +%% Check if the node can be cached based on the state information. +%% If it can be cached and it does not have an alias for it, we +%% build one. +-spec sub_cache_nodes(atom(), [cerl:cerl()], sub()) -> {atom(), sub()} | error. +sub_cache_nodes(Kind, Nodes, #sub{p=Pat}=Sub) -> + case nodes_to_key(Kind, Nodes) of + {ok, Key} -> + case Pat of + #{Key := ?NOTSET} -> + new_var_name(Key, Sub); + #{Key := Name} -> + {Name,Sub}; + #{} -> + error + end; + error -> + error + end. + +new_var_name(Key, #sub{p=Pat}=Sub) -> + Counter = get(new_var_num), + Name = list_to_atom("@r" ++ integer_to_list(Counter)), + put(new_var_num, Counter + 1), + {Name,Sub#sub{p=maps:put(Key, Name, Pat)}}. + +%% get_variables/1 +%% is_disjoint_variables/2 +%% merge_variables/2 + +get_variables(NodesList) -> + cerl_sets:from_list([Var || Node <- NodesList, Var <- cerl_trees:variables(Node)]). + +is_disjoint_variables(Vars1, Vars2) -> + cerl_sets:is_disjoint(Vars1, Vars2). + +merge_variables(Vars1, Vars2) -> + cerl_sets:union(Vars1, Vars2). + +%% get_pattern_keys/2 +%% put_pattern_keys/2 +%% +%% Gets keys from patterns or add them as aliases. + +get_pattern_keys(Patterns) -> + lists:foldl(fun get_pattern_keys/2, [], Patterns). + +get_pattern_keys(#c_tuple{es=Es}, Acc0) -> + Acc1 = accumulate_pattern_keys(tuple, Es, Acc0), + lists:foldl(fun get_pattern_keys/2, Acc1, Es); +get_pattern_keys(#c_cons{hd=Hd,tl=Tl}, Acc0) -> + Acc1 = accumulate_pattern_keys(cons, [Hd, Tl], Acc0), + get_pattern_keys(Tl, get_pattern_keys(Hd, Acc1)); +get_pattern_keys(#c_alias{pat=Pat}, Acc0) -> + get_pattern_keys(Pat, Acc0); +get_pattern_keys(#c_map{es=Es}, Acc0) -> + lists:foldl(fun get_pattern_keys/2, Acc0, Es); +get_pattern_keys(#c_map_pair{val=Val}, Acc0) -> + get_pattern_keys(Val, Acc0); +get_pattern_keys(_, Acc) -> + Acc. + +accumulate_pattern_keys(Kind, Nodes, Acc) -> + case nodes_to_key(Kind, Nodes) of + {ok,Key} -> [Key|Acc]; + error -> Acc + end. + +put_pattern_keys(Patterns, []) -> + Patterns; +put_pattern_keys(Patterns, Keys) -> + {NewPatterns,Map} = + lists:mapfoldl(fun alias_pattern_keys/2, maps:from_list(Keys), Patterns), + %% Check all aliases have been consumed from the map. + 0 = map_size(Map), + NewPatterns. + +alias_pattern_keys(#c_tuple{anno=Anno,es=Es0}=Node, Acc0) -> + {Es1,Acc1} = lists:mapfoldl(fun alias_pattern_keys/2, Acc0, Es0), + nodes_to_alias(tuple, Es0, Anno, Node#c_tuple{es=Es1}, Acc1); +alias_pattern_keys(#c_cons{anno=Anno,hd=Hd0,tl=Tl0}=Node, Acc0) -> + {Hd1,Acc1} = alias_pattern_keys(Hd0, Acc0), + {Tl1,Acc2} = alias_pattern_keys(Tl0, Acc1), + nodes_to_alias(cons, [Hd0, Tl0], Anno, Node#c_cons{hd=Hd1,tl=Tl1}, Acc2); +alias_pattern_keys(#c_alias{pat=Pat0}=Node, Acc0) -> + {Pat1,Acc1} = alias_pattern_keys(Pat0, Acc0), + {Node#c_alias{pat=Pat1}, Acc1}; +alias_pattern_keys(#c_map{es=Es0}=Node, Acc0) -> + {Es1,Acc1} = lists:mapfoldl(fun alias_pattern_keys/2, Acc0, Es0), + {Node#c_map{es=Es1}, Acc1}; +alias_pattern_keys(#c_map_pair{val=Val0}=Node, Acc0) -> + {Val1,Acc1} = alias_pattern_keys(Val0, Acc0), + {Node#c_map_pair{val=Val1}, Acc1}; +alias_pattern_keys(Pattern, Acc) -> + {Pattern,Acc}. + +%% Check if a node must become an alias because +%% its pattern was used later on as an expression. +nodes_to_alias(Kind, Inner, Anno, Node, Keys0) -> + case nodes_to_key(Kind, Inner) of + {ok,Key} -> + case maps:take(Key, Keys0) of + {Name,Keys1} -> + Var = cerl:ann_c_var(Anno, Name), + {cerl:ann_c_alias(Anno, Var, Node), Keys1}; + error -> + {Node,Keys0} + end; + error -> + {Node,Keys0} + end. + +%% Builds the key used to check if a value can be +%% replaced by an alias. It considers literals, +%% aliases, variables, tuples and cons recursively. +nodes_to_key(Kind, Nodes) -> + nodes_to_key(Nodes, [], Kind). + +nodes_to_key([#c_alias{var=Var}|T], Acc, Kind) -> + nodes_to_key([Var|T], Acc, Kind); +nodes_to_key([#c_var{name=Name}|T], Acc, Kind) -> + nodes_to_key(T, [[var,Name]|Acc], Kind); +nodes_to_key([Node|T], Acc0, Kind) -> + case cerl:is_data(Node) of + false -> + error; + true -> + case nodes_to_key(cerl:data_es(Node), [], cerl:data_type(Node)) of + {ok,Key} -> + nodes_to_key(T, [Key|Acc0], Kind); + error -> + error + end + end; +nodes_to_key([], Acc, Kind) -> + {ok,[Kind|Acc]}. diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl index d73060fb7e..f3f315935a 100644 --- a/lib/compiler/src/sys_core_fold.erl +++ b/lib/compiler/src/sys_core_fold.erl @@ -2422,16 +2422,10 @@ move_let_into_expr(#c_let{vars=InnerVs0,body=InnerBody0}=Inner, Outer#c_let{vars=OuterVs,arg=Arg, body=Inner#c_let{vars=InnerVs,arg=OuterBody,body=InnerBody}}; move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let, - #c_case{arg=Cexpr0,clauses=[Ca0,Cb0|Cs]}=Case, Sub0) -> - %% Test if there are no more clauses than Ca0 and Cb0, or if - %% Cb0 is guaranteed to match. - TwoClauses = Cs =:= [] orelse - case Cb0 of - #c_clause{pats=[#c_var{}],guard=#c_literal{val=true}} -> true; - _ -> false - end, - case {TwoClauses,is_failing_clause(Ca0),is_failing_clause(Cb0)} of - {true,false,true} -> + #c_case{arg=Cexpr0,clauses=[Ca0|Cs0]}=Case, Sub0) -> + case not is_failing_clause(Ca0) andalso + are_all_failing_clauses(Cs0) of + true -> %% let <Lvars> = case <Case-expr> of %% <Cpats> -> <Clause-body>; %% <OtherCpats> -> erlang:error(...) @@ -2467,8 +2461,8 @@ move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let, body=Lbody}, Ca = Ca0#c_clause{pats=CaPats,guard=G,body=B}, - Cb = clause(Cb0, Cexpr, value, Sub0), - Case#c_case{arg=Cexpr,clauses=[Ca,Cb]} + Cs = [clause(C, Cexpr, value, Sub0) || C <- Cs0], + Case#c_case{arg=Cexpr,clauses=[Ca|Cs]} catch nomatch -> %% This is not a defeat. The code will eventually @@ -2476,7 +2470,7 @@ move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let, %% optimizations done in this module. impossible end; - {_,_,_} -> impossible + false -> impossible end; move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let, #c_seq{arg=Sarg0,body=Sbody0}=Seq, Sub0) -> @@ -2499,6 +2493,9 @@ move_let_into_expr(#c_let{vars=Lvs0,body=Lbody0}=Let, body=Lbody}}; move_let_into_expr(_Let, _Expr, _Sub) -> impossible. +are_all_failing_clauses(Cs) -> + all(fun is_failing_clause/1, Cs). + is_failing_clause(#c_clause{body=B}) -> will_fail(B). diff --git a/lib/compiler/src/v3_codegen.erl b/lib/compiler/src/v3_codegen.erl index 47c1567f10..0ae3289103 100644 --- a/lib/compiler/src/v3_codegen.erl +++ b/lib/compiler/src/v3_codegen.erl @@ -884,12 +884,19 @@ select_extract_bin([{var,Hd}], Size, Unit, binary, Flags, Vf, %% calculcated by v3_life is too conservative to be useful for this purpose.) %% 'true' means that the code that follows will definitely not use the context %% again (because it is a block, not guard or matching code); 'false' that we -%% are not sure (there is either a guard, or more matching, either which may -%% reference the context again). - -is_context_unused(#l{ke=Ke}) -> is_context_unused(Ke); -is_context_unused({block,_}) -> true; -is_context_unused(_) -> false. +%% are not sure (there could be more matching). + +is_context_unused(#l{ke=Ke}) -> + is_context_unused(Ke); +is_context_unused({alt,_First,Then}) -> + %% {alt,First,Then} can be used for different purposes. If the Then part + %% is a block, it means that matching has finished and is used for a guard + %% to choose between the matched clauses. + is_context_unused(Then); +is_context_unused({block,_}) -> + true; +is_context_unused(_) -> + false. select_bin_end(#l{ke={val_clause,{bin_end,Ctx},B}}, Ivar, Tf, Bef, St0) -> diff --git a/lib/compiler/src/v3_core.erl b/lib/compiler/src/v3_core.erl index ae650546e5..20cb3343fb 100644 --- a/lib/compiler/src/v3_core.erl +++ b/lib/compiler/src/v3_core.erl @@ -2505,8 +2505,46 @@ cexpr(#ifun{anno=#a{us=Us0}=A0,name={named,Name},fc=#iclause{pats=Ps}}=Fun0, end; cexpr(#iapply{anno=A,op=Op,args=Args}, _As, St) -> {#c_apply{anno=A#a.anno,op=Op,args=Args},[],A#a.us,St}; -cexpr(#icall{anno=A,module=Mod,name=Name,args=Args}, _As, St) -> - {#c_call{anno=A#a.anno,module=Mod,name=Name,args=Args},[],A#a.us,St}; +cexpr(#icall{anno=A,module=Mod,name=Name,args=Args}, _As, St0) -> + Anno = A#a.anno, + case (not cerl:is_c_atom(Mod)) andalso member(tuple_calls, St0#core.opts) of + true -> + GenAnno = [compiler_generated|Anno], + + %% Generate the clause that matches on the tuple + {TupleVar,St1} = new_var(GenAnno, St0), + {TupleSizeVar, St2} = new_var(GenAnno, St1), + {TupleModVar, St3} = new_var(GenAnno, St2), + {TupleArgsVar, St4} = new_var(GenAnno, St3), + TryVar = cerl:c_var('Try'), + + TupleGuardExpr = + cerl:c_let([TupleSizeVar], + c_call_erl(tuple_size, [TupleVar]), + c_call_erl('>', [TupleSizeVar, cerl:c_int(0)])), + + TupleGuard = + cerl:c_try(TupleGuardExpr, [TryVar], TryVar, + [cerl:c_var('T'),cerl:c_var('R')], cerl:c_atom(false)), + + TupleApply = + cerl:c_let([TupleModVar], + c_call_erl(element, [cerl:c_int(1),TupleVar]), + cerl:c_let([TupleArgsVar], + cerl:make_list(Args ++ [TupleVar]), + c_call_erl(apply, [TupleModVar,Name,TupleArgsVar]))), + + TupleClause = cerl:ann_c_clause(GenAnno, [TupleVar], TupleGuard, TupleApply), + + %% Generate the fallback clause + {OtherVar,St5} = new_var(GenAnno, St4), + OtherApply = cerl:ann_c_call(GenAnno, OtherVar, Name, Args), + OtherClause = cerl:ann_c_clause(GenAnno, [OtherVar], OtherApply), + + {cerl:ann_c_case(GenAnno, Mod, [TupleClause,OtherClause]),[],A#a.us,St5}; + false -> + {#c_call{anno=Anno,module=Mod,name=Name,args=Args},[],A#a.us,St0} + end; cexpr(#iprimop{anno=A,name=Name,args=Args}, _As, St) -> {#c_primop{anno=A#a.anno,name=Name,args=Args},[],A#a.us,St}; cexpr(#iprotect{anno=A,body=Es}, _As, St0) -> @@ -2536,6 +2574,9 @@ cfun(#ifun{anno=A,id=Id,vars=Args,clauses=Lcs,fc=Lfc}, _As, St0) -> clauses=Ccs ++ [Cfc]}}, [],A#a.us,St2}. +c_call_erl(Fun, Args) -> + cerl:c_call(cerl:c_atom(erlang), cerl:c_atom(Fun), Args). + %% lit_vars(Literal) -> [Var]. lit_vars(Lit) -> lit_vars(Lit, []). diff --git a/lib/compiler/test/Makefile b/lib/compiler/test/Makefile index 63763f31b2..da5d207db9 100644 --- a/lib/compiler/test/Makefile +++ b/lib/compiler/test/Makefile @@ -22,6 +22,7 @@ MODULES= \ bs_construct_SUITE \ bs_match_SUITE \ bs_utf_SUITE \ + core_alias_SUITE \ core_fold_SUITE \ compile_SUITE \ compilation_SUITE \ diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl index 0ec05456ec..1da7f68dab 100644 --- a/lib/compiler/test/bs_match_SUITE.erl +++ b/lib/compiler/test/bs_match_SUITE.erl @@ -39,7 +39,7 @@ match_string_opt/1,select_on_integer/1, map_and_binary/1,unsafe_branch_caching/1, bad_literals/1,good_literals/1,constant_propagation/1, - parse_xml/1,get_payload/1]). + parse_xml/1,get_payload/1,escape/1]). -export([coverage_id/1,coverage_external_ignore/2]). @@ -71,7 +71,7 @@ groups() -> match_string_opt,select_on_integer, map_and_binary,unsafe_branch_caching, bad_literals,good_literals,constant_propagation,parse_xml, - get_payload]}]. + get_payload,escape]}]. init_per_suite(Config) -> @@ -1524,6 +1524,21 @@ do_get_payload(ExtHdr) -> <<_:13,_:35>> = ExtHdr#ext_header.ext_hdr_opts, ExtHdrOptions. +escape(_Config) -> + 0 = escape(<<>>, 0), + 1 = escape(<<128>>, 0), + 2 = escape(<<128,255>>, 0), + 42 = escape(<<42>>, 0), + 50 = escape(<<42,8>>, 0), + ok. + +escape(<<Byte, Rest/bits>>, Pos) when Byte >= 127 -> + escape(Rest, Pos + 1); +escape(<<Byte, Rest/bits>>, Pos) -> + escape(Rest, Pos + Byte); +escape(<<_Rest/bits>>, Pos) -> + Pos. + check(F, R) -> R = F(). diff --git a/lib/compiler/test/compilation_SUITE_data/opt_crash.erl b/lib/compiler/test/compilation_SUITE_data/opt_crash.erl index f1607cca68..c65ec31593 100644 --- a/lib/compiler/test/compilation_SUITE_data/opt_crash.erl +++ b/lib/compiler/test/compilation_SUITE_data/opt_crash.erl @@ -33,7 +33,7 @@ test() -> {userinfo,nil}, fun() -> nil end}, nil}, - {'query',nil}}}, + {query,nil}}}, {absoluteURI, {scheme,_}, @@ -43,7 +43,7 @@ test() -> {userinfo,nil}, HostportBefore}, nil}, - {'query',nil}}} = URI_Before, + {query,nil}}} = URI_Before, %% ... some funky code ommitted, not relevant ... @@ -55,7 +55,7 @@ test() -> {userinfo,nil}, HostportAfter}, nil}, - {'query',nil}}} = URI_Before, + {query,nil}}} = URI_Before, %% NOTE: I intended to write URI_After instead of URI_Before %% but the accident revealed that when you add the line below, %% it causes internal error in v3_codegen on compilation diff --git a/lib/compiler/test/compile_SUITE.erl b/lib/compiler/test/compile_SUITE.erl index f647a4030d..25983c6012 100644 --- a/lib/compiler/test/compile_SUITE.erl +++ b/lib/compiler/test/compile_SUITE.erl @@ -27,12 +27,12 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, app_test/1,appup_test/1, - debug_info/4, custom_debug_info/1, + debug_info/4, custom_debug_info/1, custom_compile_info/1, file_1/1, forms_2/1, module_mismatch/1, big_file/1, outdir/1, binary/1, makedep/1, cond_and_ifdef/1, listings/1, listings_big/1, other_output/1, kernel_listing/1, encrypted_abstr/1, strict_record/1, utf8_atoms/1, utf8_functions/1, extra_chunks/1, - cover/1, env/1, core_pp/1, + cover/1, env/1, core_pp/1, tuple_calls/1, core_roundtrip/1, asm/1, optimized_guards/1, sys_pre_attributes/1, dialyzer/1, warnings/1, pre_load_check/1, env_compiler_options/1, @@ -49,11 +49,12 @@ all() -> test_lib:recompile(?MODULE), [app_test, appup_test, file_1, forms_2, module_mismatch, big_file, outdir, binary, makedep, cond_and_ifdef, listings, listings_big, - other_output, kernel_listing, encrypted_abstr, + other_output, kernel_listing, encrypted_abstr, tuple_calls, strict_record, utf8_atoms, utf8_functions, extra_chunks, cover, env, core_pp, core_roundtrip, asm, optimized_guards, sys_pre_attributes, dialyzer, warnings, pre_load_check, - env_compiler_options, custom_debug_info, bc_options]. + env_compiler_options, custom_debug_info, bc_options, + custom_compile_info]. groups() -> []. @@ -649,6 +650,23 @@ custom_debug_info(Config) when is_list(Config) -> {ok,{simple,[{debug_info,{debug_info_v1,?MODULE,error}}]}} = beam_lib:chunks(ErrorBin, [debug_info]). +custom_compile_info(Config) when is_list(Config) -> + Anno = erl_anno:new(1), + Forms = [{attribute,Anno,module,custom_compile_info}], + Opts = [binary,{compile_info,[{another,version}]}], + + {ok,custom_compile_info,Bin} = compile:forms(Forms, Opts), + {ok,{custom_compile_info,[{compile_info,CompileInfo}]}} = + beam_lib:chunks(Bin, [compile_info]), + version = proplists:get_value(another, CompileInfo), + CompileOpts = proplists:get_value(options, CompileInfo), + undefined = proplists:get_value(compile_info, CompileOpts), + + {ok,custom_compile_info,DetBin} = compile:forms(Forms, [deterministic|Opts]), + {ok,{custom_compile_info,[{compile_info,DetInfo}]}} = + beam_lib:chunks(DetBin, [compile_info]), + version = proplists:get_value(another, DetInfo). + cover(Config) when is_list(Config) -> io:format("~p\n", [compile:options()]), ok. @@ -781,6 +799,37 @@ extra_chunks(Config) when is_list(Config) -> {ok,{extra_chunks,[{"ExCh",<<"Contents">>}]}} = beam_lib:chunks(ExtraChunksBinary, ["ExCh"]). +tuple_calls(Config) when is_list(Config) -> + Anno = erl_anno:new(1), + Forms = [{attribute,Anno,export,[{size,1},{store,1}]}, + {function,Anno,size,1, + [{clause,Anno,[{var,[],mod}],[], + [{call,[],{remote,[],{var,[],mod},{atom,[],size}},[]}]}]}, + {function,Anno,store,1, + [{clause,Anno,[{var,[],mod}],[], + [{call,[],{remote,[],{var,[],mod},{atom,[],store}},[{atom,[],key},{atom,[],value}]}]}]}], + + TupleCallsFalse = [{attribute,Anno,module,tuple_calls_false}|Forms], + {ok,_,TupleCallsFalseBinary} = compile:forms(TupleCallsFalse, [binary]), + code:load_binary(tuple_calls_false, "compile_SUITE.erl", TupleCallsFalseBinary), + {'EXIT',{badarg,_}} = (catch tuple_calls_false:store(dict())), + {'EXIT',{badarg,_}} = (catch tuple_calls_false:size(dict())), + {'EXIT',{badarg,_}} = (catch tuple_calls_false:size(empty_tuple())), + + TupleCallsTrue = [{attribute,Anno,module,tuple_calls_true}|Forms], + {ok,_,TupleCallsTrueBinary} = compile:forms(TupleCallsTrue, [binary,tuple_calls]), + code:load_binary(tuple_calls_true, "compile_SUITE.erl", TupleCallsTrueBinary), + Dict = tuple_calls_true:store(dict()), + 1 = tuple_calls_true:size(Dict), + {'EXIT',{badarg,_}} = (catch tuple_calls_true:size(empty_tuple())), + + ok. + +dict() -> + dict:new(). +empty_tuple() -> + {}. + env(Config) when is_list(Config) -> {Simple,Target} = get_files(Config, simple, env), {ok,Cwd} = file:get_cwd(), diff --git a/lib/compiler/test/core_alias_SUITE.erl b/lib/compiler/test/core_alias_SUITE.erl new file mode 100644 index 0000000000..f3f15ef0f8 --- /dev/null +++ b/lib/compiler/test/core_alias_SUITE.erl @@ -0,0 +1,195 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2007-2016. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(core_alias_SUITE). + +-export([all/0, suite/0, groups/0,init_per_suite/1, end_per_suite/1, + init_per_group/2, end_per_group/2, + tuples/1, cons/1]). + +-include_lib("common_test/include/ct.hrl"). + +suite() -> [{ct_hooks,[ts_install_cth]}]. + +all() -> + test_lib:recompile(?MODULE), + [{group,p}]. + +groups() -> + [{p,[parallel], + [tuples, cons]}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_GroupName, Config) -> + Config. + +end_per_group(_GroupName, Config) -> + Config. + + +id(X) -> X. + +tuples(Config) when is_list(Config) -> + Tuple = {ok,id(value)}, + + true = erts_debug:same(Tuple, simple_tuple(Tuple)), + true = erts_debug:same(Tuple, simple_tuple_in_map(#{hello => Tuple})), + true = erts_debug:same(Tuple, simple_tuple_case_repeated(Tuple, Tuple)), + true = erts_debug:same(Tuple, simple_tuple_fun_repeated(Tuple, Tuple)), + true = erts_debug:same(Tuple, simple_tuple_twice_head(Tuple, Tuple)), + + {Tuple1, Tuple2} = simple_tuple_twice_body(Tuple), + true = erts_debug:same(Tuple, Tuple1), + true = erts_debug:same(Tuple, Tuple2), + + Nested = {nested,Tuple}, + true = erts_debug:same(Tuple, nested_tuple_part(Nested)), + true = erts_debug:same(Nested, nested_tuple_whole(Nested)), + true = erts_debug:same(Nested, nested_tuple_with_alias(Nested)), + + true = erts_debug:same(Tuple, tuple_rebinding_after(Tuple)), + + Tuple = unaliased_tuple_rebinding_before(Tuple), + false = erts_debug:same(Tuple, unaliased_tuple_rebinding_before(Tuple)), + Nested = unaliased_literal_tuple_head(Nested), + false = erts_debug:same(Nested, unaliased_literal_tuple_head(Nested)), + Nested = unaliased_literal_tuple_body(Nested), + false = erts_debug:same(Nested, unaliased_literal_tuple_body(Nested)), + Nested = unaliased_different_var_tuple(Nested, Tuple), + false = erts_debug:same(Nested, unaliased_different_var_tuple(Nested, Tuple)). + +simple_tuple({ok,X}) -> + {ok,X}. +simple_tuple_twice_head({ok,X}, {ok,X}) -> + {ok,X}. +simple_tuple_twice_body({ok,X}) -> + {{ok,X},{ok,X}}. +simple_tuple_in_map(#{hello := {ok,X}}) -> + {ok,X}. +simple_tuple_fun_repeated({ok,X}, Y) -> + io:format("~p~n", [X]), + (fun({ok,X}) -> {ok,X} end)(Y). +simple_tuple_case_repeated({ok,X}, Y) -> + io:format("~p~n", [X]), + case Y of {ok,X} -> {ok,X} end. + +nested_tuple_part({nested,{ok,X}}) -> + {ok,X}. +nested_tuple_whole({nested,{ok,X}}) -> + {nested,{ok,X}}. +nested_tuple_with_alias({nested,{ok,_}=Y}) -> + {nested,Y}. + +tuple_rebinding_after(Y) -> + (fun(X) -> {ok,X} end)(Y), + case Y of {ok,X} -> {ok,X} end. +unaliased_tuple_rebinding_before({ok,X}) -> + io:format("~p~n", [X]), + (fun(X) -> {ok,X} end)(value). +unaliased_literal_tuple_head({nested,{ok,value}=X}) -> + io:format("~p~n", [X]), + {nested,{ok,value}}. +unaliased_literal_tuple_body({nested,{ok,value}=X}) -> + Res = {nested,Y={ok,value}}, + io:format("~p~n", [[X,Y]]), + Res. +unaliased_different_var_tuple({nested,{ok,value}=X}, Y) -> + io:format("~p~n", [X]), + {nested,Y}. + +cons(Config) when is_list(Config) -> + Cons = [ok|id(value)], + + true = erts_debug:same(Cons, simple_cons(Cons)), + true = erts_debug:same(Cons, simple_cons_in_map(#{hello => Cons})), + true = erts_debug:same(Cons, simple_cons_case_repeated(Cons, Cons)), + true = erts_debug:same(Cons, simple_cons_fun_repeated(Cons, Cons)), + true = erts_debug:same(Cons, simple_cons_twice_head(Cons, Cons)), + + {Cons1,Cons2} = simple_cons_twice_body(Cons), + true = erts_debug:same(Cons, Cons1), + true = erts_debug:same(Cons, Cons2), + + Nested = [nested,Cons], + true = erts_debug:same(Cons, nested_cons_part(Nested)), + true = erts_debug:same(Nested, nested_cons_whole(Nested)), + true = erts_debug:same(Nested, nested_cons_with_alias(Nested)), + true = erts_debug:same(Cons, cons_rebinding_after(Cons)), + + Unstripped = id([a,b]), + Stripped = cons_with_binary([<<>>|Unstripped]), + true = erts_debug:same(Unstripped, Stripped), + + Cons = unaliased_cons_rebinding_before(Cons), + false = erts_debug:same(Cons, unaliased_cons_rebinding_before(Cons)), + Nested = unaliased_literal_cons_head(Nested), + false = erts_debug:same(Nested, unaliased_literal_cons_head(Nested)), + Nested = unaliased_literal_cons_body(Nested), + false = erts_debug:same(Nested, unaliased_literal_cons_body(Nested)), + Nested = unaliased_different_var_cons(Nested, Cons), + false = erts_debug:same(Nested, unaliased_different_var_cons(Nested, Cons)). + +simple_cons([ok|X]) -> + [ok|X]. +simple_cons_twice_head([ok|X], [ok|X]) -> + [ok|X]. +simple_cons_twice_body([ok|X]) -> + {[ok|X],[ok|X]}. +simple_cons_in_map(#{hello := [ok|X]}) -> + [ok|X]. +simple_cons_fun_repeated([ok|X], Y) -> + io:format("~p~n", [X]), + (fun([ok|X]) -> [ok|X] end)(Y). +simple_cons_case_repeated([ok|X], Y) -> + io:format("~p~n", [X]), + case Y of [ok|X] -> [ok|X] end. + +nested_cons_part([nested,[ok|X]]) -> + [ok|X]. +nested_cons_whole([nested,[ok|X]]) -> + [nested,[ok|X]]. +nested_cons_with_alias([nested,[ok|_]=Y]) -> + [nested,Y]. + +cons_with_binary([<<>>,X|Y]) -> + cons_with_binary([X|Y]); +cons_with_binary(A) -> + A. + +cons_rebinding_after(Y) -> + (fun(X) -> [ok|X] end)(Y), + case Y of [ok|X] -> [ok|X] end. +unaliased_cons_rebinding_before([ok|X]) -> + io:format("~p~n", [X]), + (fun(X) -> [ok|X] end)(value). +unaliased_literal_cons_head([nested,[ok|value]=X]) -> + io:format("~p~n", [X]), + [nested,[ok|value]]. +unaliased_literal_cons_body([nested,[ok|value]=X]) -> + Res = [nested,Y=[ok|value]], + io:format("~p~n", [[X, Y]]), + Res. +unaliased_different_var_cons([nested,[ok|value]=X], Y) -> + io:format("~p~n", [X]), + [nested,Y]. diff --git a/lib/compiler/test/core_fold_SUITE.erl b/lib/compiler/test/core_fold_SUITE.erl index 0097e28d4d..262967d03d 100644 --- a/lib/compiler/test/core_fold_SUITE.erl +++ b/lib/compiler/test/core_fold_SUITE.erl @@ -26,7 +26,8 @@ unused_multiple_values_error/1,unused_multiple_values/1, multiple_aliases/1,redundant_boolean_clauses/1, mixed_matching_clauses/1,unnecessary_building/1, - no_no_file/1,configuration/1,supplies/1]). + no_no_file/1,configuration/1,supplies/1, + redundant_stack_frame/1]). -export([foo/0,foo/1,foo/2,foo/3]). @@ -45,7 +46,8 @@ groups() -> unused_multiple_values_error,unused_multiple_values, multiple_aliases,redundant_boolean_clauses, mixed_matching_clauses,unnecessary_building, - no_no_file,configuration,supplies]}]. + no_no_file,configuration,supplies, + redundant_stack_frame]}]. init_per_suite(Config) -> @@ -527,4 +529,26 @@ supplies(_Config) -> do_supplies(#{1 := Value}) when byte_size(Value), byte_size(kg) -> working. +redundant_stack_frame(_Config) -> + {1,2} = do_redundant_stack_frame(#{x=>1,y=>2}), + {'EXIT',{{badkey,_,x},_}} = (catch do_redundant_stack_frame(#{y=>2})), + {'EXIT',{{badkey,_,y},_}} = (catch do_redundant_stack_frame(#{x=>1})), + ok. + +do_redundant_stack_frame(Map) -> + %% There should not be a stack frame for this function. + X = case Map of + #{x := X0} -> + X0; + #{} -> + erlang:error({badkey, Map, x}) + end, + Y = case Map of + #{y := Y0} -> + Y0; + #{} -> + erlang:error({badkey, Map, y}) + end, + {X, Y}. + id(I) -> I. diff --git a/lib/compiler/test/guard_SUITE.erl b/lib/compiler/test/guard_SUITE.erl index ccb9b58225..d96cfdb7ac 100644 --- a/lib/compiler/test/guard_SUITE.erl +++ b/lib/compiler/test/guard_SUITE.erl @@ -1291,6 +1291,10 @@ rel_ops(Config) when is_list(Config) -> true = any_atom /= id(42), true = [] /= id(42), + %% Coverage of beam_utils:bif_to_test/3 + Empty = id([]), + ?T(==, [], Empty), + ok. -undef(TestOp). diff --git a/lib/compiler/test/match_SUITE.erl b/lib/compiler/test/match_SUITE.erl index 52b2da05f7..c31695be24 100644 --- a/lib/compiler/test/match_SUITE.erl +++ b/lib/compiler/test/match_SUITE.erl @@ -23,7 +23,7 @@ init_per_group/2,end_per_group/2, pmatch/1,mixed/1,aliases/1,non_matching_aliases/1, match_in_call/1,untuplify/1,shortcut_boolean/1,letify_guard/1, - selectify/1,underscore/1,match_map/1,map_vars_used/1, + selectify/1,deselectify/1,underscore/1,match_map/1,map_vars_used/1, coverage/1,grab_bag/1,literal_binary/1]). -include_lib("common_test/include/ct.hrl"). @@ -38,7 +38,7 @@ groups() -> [{p,[parallel], [pmatch,mixed,aliases,non_matching_aliases, match_in_call,untuplify, - shortcut_boolean,letify_guard,selectify, + shortcut_boolean,letify_guard,selectify,deselectify, underscore,match_map,map_vars_used,coverage, grab_bag,literal_binary]}]. @@ -466,6 +466,66 @@ sel_same_value2(V) when V =:= 42; V =:= 43 -> sel_same_value2(_) -> error. +%% Test deconstruction of select_val instructions in beam_peep into +%% regular tests with just one possible value left. Hitting proper cases +%% in beam_peep relies on unification of labels by beam_jump. + +deselectify(Config) when is_list(Config) -> + one_or_other = desel_tuple_arity({1}), + two = desel_tuple_arity({1,1}), + one_or_other = desel_tuple_arity({1,1,1}), + + one_or_other = dsel_integer(1), + two = dsel_integer(2), + one_or_other = dsel_integer(3), + + one_or_other = dsel_integer_typecheck(1), + two = dsel_integer_typecheck(2), + one_or_other = dsel_integer_typecheck(3), + + one_or_other = dsel_atom(one), + two = dsel_atom(two), + one_or_other = dsel_atom(three), + + one_or_other = dsel_atom_typecheck(one), + two = dsel_atom_typecheck(two), + one_or_other = dsel_atom_typecheck(three). + +desel_tuple_arity(Tuple) when is_tuple(Tuple) -> + case Tuple of + {_} -> one_or_other; + {_,_} -> two; + _ -> one_or_other + end. + +dsel_integer(Val) -> + case Val of + 1 -> one_or_other; + 2 -> two; + _ -> one_or_other + end. + +dsel_integer_typecheck(Val) when is_integer(Val) -> + case Val of + 1 -> one_or_other; + 2 -> two; + _ -> one_or_other + end. + +dsel_atom(Val) -> + case Val of + one -> one_or_other; + two -> two; + _ -> one_or_other + end. + +dsel_atom_typecheck(Val) when is_atom(Val) -> + case Val of + one -> one_or_other; + two -> two; + _ -> one_or_other + end. + underscore(Config) when is_list(Config) -> case Config of [] -> diff --git a/lib/compiler/test/misc_SUITE.erl b/lib/compiler/test/misc_SUITE.erl index 4bd884d86b..ea4aaf40a9 100644 --- a/lib/compiler/test/misc_SUITE.erl +++ b/lib/compiler/test/misc_SUITE.erl @@ -161,11 +161,12 @@ md5_1(Beam) -> %% Cover some code that handles internal errors. silly_coverage(Config) when is_list(Config) -> - %% sys_core_fold, sys_core_bsm, sys_core_setel, v3_kernel + %% sys_core_fold, sys_core_alias, sys_core_bsm, sys_core_setel, v3_kernel BadCoreErlang = {c_module,[], name,[],[], [{{c_var,[],{foo,2}},seriously_bad_body}]}, expect_error(fun() -> sys_core_fold:module(BadCoreErlang, []) end), + expect_error(fun() -> sys_core_alias:module(BadCoreErlang, []) end), expect_error(fun() -> sys_core_bsm:module(BadCoreErlang, []) end), expect_error(fun() -> sys_core_dsetel:module(BadCoreErlang, []) end), expect_error(fun() -> v3_kernel:module(BadCoreErlang, []) end), diff --git a/lib/compiler/test/trycatch_SUITE.erl b/lib/compiler/test/trycatch_SUITE.erl index a591d6cc93..42dbf7d5f0 100644 --- a/lib/compiler/test/trycatch_SUITE.erl +++ b/lib/compiler/test/trycatch_SUITE.erl @@ -324,11 +324,11 @@ eclectic(Conf) when is_list(Conf) -> {{error,{exit,V},{'EXIT',V}},V} = eclectic_1({foo,{error,{exit,V}}}, error, {value,V}), {{value,{value,V},V}, - {'EXIT',{badarith,[{?MODULE,my_add,2,_}|_]}}} = + {'EXIT',{badarith,[{erlang,'+',[0,a],_},{?MODULE,my_add,2,_}|_]}}} = eclectic_1({foo,{value,{value,V}}}, undefined, {'add',{0,a}}), {{'EXIT',V},V} = eclectic_1({catch_foo,{exit,V}}, undefined, {throw,V}), - {{error,{'div',{1,0}},{'EXIT',{badarith,[{?MODULE,my_div,2,_}|_]}}}, + {{error,{'div',{1,0}},{'EXIT',{badarith,[{erlang,'div',[1,0],_},{?MODULE,my_div,2,_}|_]}}}, {'EXIT',V}} = eclectic_1({foo,{error,{'div',{1,0}}}}, error, {exit,V}), {{{error,V},{'EXIT',{V,[{?MODULE,foo,1,_}|_]}}}, @@ -345,7 +345,7 @@ eclectic(Conf) when is_list(Conf) -> eclectic_2({error,{value,V}}, throw, {error,V}), {{caught,{'EXIT',{badarg,[{erlang,abs,[V],_}|_]}}},V} = eclectic_2({value,{'abs',V}}, undefined, {value,V}), - {{caught,{'EXIT',{badarith,[{?MODULE,my_add,2,_}|_]}}},V} = + {{caught,{'EXIT',{badarith,[{erlang,'+',[0,a],_},{?MODULE,my_add,2,_}|_]}}},V} = eclectic_2({exit,{'add',{0,a}}}, exit, {value,V}), {{caught,{'EXIT',V}},undefined} = eclectic_2({value,{error,V}}, undefined, {exit,V}), diff --git a/lib/dialyzer/src/dialyzer_utils.erl b/lib/dialyzer/src/dialyzer_utils.erl index 9b8fbc67eb..abd89034f3 100644 --- a/lib/dialyzer/src/dialyzer_utils.erl +++ b/lib/dialyzer/src/dialyzer_utils.erl @@ -120,92 +120,10 @@ get_core_from_beam(File, Opts) -> {error, " Could not get Core Erlang code for: " ++ File ++ "\n"} end; _ -> - deprecated_get_core_from_beam(File, Opts) + {error, " Could not get Core Erlang code for: " ++ File ++ "\n" ++ + " Recompile with +debug_info or analyze starting from source code"} end. -deprecated_get_core_from_beam(File, Opts) -> - case get_abstract_code_from_beam(File) of - error -> - {error, " Could not get abstract code for: " ++ File ++ "\n" ++ - " Recompile with +debug_info or analyze starting from source code"}; - {ok, AbstrCode} -> - case get_compile_options_from_beam(File) of - error -> - {error, " Could not get compile options for: " ++ File ++ "\n" ++ - " Recompile or analyze starting from source code"}; - {ok, CompOpts} -> - case get_core_from_abstract_code(AbstrCode, Opts ++ CompOpts) of - error -> - {error, " Could not get core Erlang code for: " ++ File}; - {ok, _} = Core -> - Core - end - end - end. - -get_abstract_code_from_beam(File) -> - case beam_lib:chunks(File, [abstract_code]) of - {ok, {_, List}} -> - case lists:keyfind(abstract_code, 1, List) of - {abstract_code, {raw_abstract_v1, Abstr}} -> {ok, Abstr}; - _ -> error - end; - _ -> - %% No or unsuitable abstract code. - error - end. - -get_compile_options_from_beam(File) -> - case beam_lib:chunks(File, [compile_info]) of - {ok, {_, List}} -> - case lists:keyfind(compile_info, 1, List) of - {compile_info, CompInfo} -> compile_info_to_options(CompInfo); - _ -> error - end; - _ -> - %% No or unsuitable compile info. - error - end. - -compile_info_to_options(CompInfo) -> - case lists:keyfind(options, 1, CompInfo) of - {options, CompOpts} -> {ok, CompOpts}; - _ -> error - end. - -get_core_from_abstract_code(AbstrCode, Opts) -> - %% We do not want the parse_transforms around since we already - %% performed them. In some cases we end up in trouble when - %% performing them again. - AbstrCode1 = cleanup_parse_transforms(AbstrCode), - %% Remove parse_transforms (and other options) from compile options. - Opts2 = cleanup_compile_options(Opts), - try compile:noenv_forms(AbstrCode1, Opts2 ++ src_compiler_opts()) of - {ok, _, Core} -> {ok, Core}; - _What -> error - catch - error:_ -> error - end. - -cleanup_parse_transforms([{attribute, _, compile, {parse_transform, _}}|Left]) -> - cleanup_parse_transforms(Left); -cleanup_parse_transforms([Other|Left]) -> - [Other|cleanup_parse_transforms(Left)]; -cleanup_parse_transforms([]) -> - []. - -cleanup_compile_options(Opts) -> - lists:filter(fun keep_compile_option/1, Opts). - -%% Using abstract, not asm or core. -keep_compile_option(from_asm) -> false; -keep_compile_option(from_core) -> false; -%% The parse transform will already have been applied, may cause -%% problems if it is re-applied. -keep_compile_option({parse_transform, _}) -> false; -keep_compile_option(warnings_as_errors) -> false; -keep_compile_option(_) -> true. - %% ============================================================================ %% %% Typed Records diff --git a/lib/dialyzer/test/behaviour_SUITE_data/results/gen_server_incorrect_args b/lib/dialyzer/test/behaviour_SUITE_data/results/gen_server_incorrect_args index 1eb8cd455b..1be0ce0d8c 100644 --- a/lib/dialyzer/test/behaviour_SUITE_data/results/gen_server_incorrect_args +++ b/lib/dialyzer/test/behaviour_SUITE_data/results/gen_server_incorrect_args @@ -1,5 +1,5 @@ gen_server_incorrect_args.erl:3: Undefined callback function handle_cast/2 (behaviour gen_server) gen_server_incorrect_args.erl:3: Undefined callback function init/1 (behaviour gen_server) -gen_server_incorrect_args.erl:7: The inferred return type of handle_call/3 ({'no'} | {'ok'}) has nothing in common with {'noreply',_} | {'noreply',_,'hibernate' | 'infinity' | non_neg_integer()} | {'reply',_,_} | {'stop',_,_} | {'reply',_,_,'hibernate' | 'infinity' | non_neg_integer()} | {'stop',_,_,_}, which is the expected return type for the callback of the gen_server behaviour +gen_server_incorrect_args.erl:7: The inferred return type of handle_call/3 ({'no'} | {'ok'}) has nothing in common with {'noreply',_} | {'noreply',_,'hibernate' | 'infinity' | non_neg_integer() | {'continue',_}} | {'reply',_,_} | {'stop',_,_} | {'reply',_,_,'hibernate' | 'infinity' | non_neg_integer() | {'continue',_}} | {'stop',_,_,_}, which is the expected return type for the callback of the gen_server behaviour gen_server_incorrect_args.erl:7: The inferred type for the 2nd argument of handle_call/3 ('boo' | 'foo') is not a supertype of {pid(),_}, which is expected type for this argument in the callback of the gen_server behaviour diff --git a/lib/kernel/doc/src/inet_res.xml b/lib/kernel/doc/src/inet_res.xml index 4ada4203c0..3454e3c6f9 100644 --- a/lib/kernel/doc/src/inet_res.xml +++ b/lib/kernel/doc/src/inet_res.xml @@ -130,7 +130,7 @@ dns_header() = DnsHeader inet_dns:header(DnsHeader) -> [ {id, integer()} | {qr, boolean()} - | {opcode, 'query' | iquery | status | integer()} + | {opcode, query | iquery | status | integer()} | {aa, boolean()} | {tc, boolean()} | {rd, boolean()} diff --git a/lib/kernel/examples/Makefile b/lib/kernel/examples/Makefile index 26ec58f571..f86e662838 100644 --- a/lib/kernel/examples/Makefile +++ b/lib/kernel/examples/Makefile @@ -45,7 +45,7 @@ RELSYSDIR = $(RELEASE_PATH)/lib/kernel-$(KERNEL_VSN)/examples # Pack and install the complete directory structure from # here (CWD) and down, for all examples. -EXAMPLES = uds_dist +EXAMPLES = uds_dist gen_tcp_dist release_spec: $(INSTALL_DIR) "$(RELSYSDIR)" diff --git a/lib/kernel/examples/gen_tcp_dist/Makefile b/lib/kernel/examples/gen_tcp_dist/Makefile new file mode 100644 index 0000000000..65513a1729 --- /dev/null +++ b/lib/kernel/examples/gen_tcp_dist/Makefile @@ -0,0 +1,20 @@ +RM=rm -f +CP=cp +EBIN=ebin +ERLC=erlc +# Works if building in open source source tree +KERNEL_INCLUDE=$(ERL_TOP)/lib/kernel/include +ERLCFLAGS+= -W -I$(KERNEL_INCLUDE) + +MODULES=gen_tcp_dist + +TARGET_FILES=$(MODULES:%=$(EBIN)/%.beam) + +opt: $(TARGET_FILES) + +$(EBIN)/%.beam: src/%.erl + $(ERLC) $(ERLCFLAGS) -o$(EBIN) $< + +clean: + $(RM) $(TARGET_FILES) + diff --git a/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore b/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore diff --git a/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl b/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl new file mode 100644 index 0000000000..98554ed805 --- /dev/null +++ b/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl @@ -0,0 +1,781 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(gen_tcp_dist). + +%% +%% This is an example of how to plug in an arbitrary distribution +%% carrier for Erlang using distribution processes. +%% +%% This example uses gen_tcp for transportation of data, but +%% you can use whatever underlying protocol you want as long +%% as your implementation reliably delivers data chunks to the +%% receiving VM in the order they were sent from the sending +%% VM. +%% +%% This code is a rewrite of the lib/kernel/src/inet_tcp_dist.erl +%% distribution impementation for TCP used by default. That +%% implementation use distribution ports instead of distribution +%% processes and is more efficient compared to this implementation. +%% This since this implementation more or less gets the +%% distribution processes in between the VM and the ports without +%% any gain specific gain. +%% + +-export([listen/1, accept/1, accept_connection/5, + setup/5, close/1, select/1, is_node_name/1]). + +%% Optional +-export([setopts/2, getopts/2]). + +%% internal exports + +-export([dist_cntrlr_setup/1, dist_cntrlr_input_setup/3, + dist_cntrlr_tick_handler/1]). + +-export([accept_loop/2,do_accept/6,do_setup/6]). + +-import(error_logger,[error_msg/2]). + +-include("net_address.hrl"). + +-include("dist.hrl"). +-include("dist_util.hrl"). + +%% ------------------------------------------------------------ +%% Select this protocol based on node name +%% select(Node) => Bool +%% ------------------------------------------------------------ + +select(Node) -> + case split_node(atom_to_list(Node), $@, []) of + [_, Host] -> + case inet:getaddr(Host, inet) of + {ok,_} -> true; + _ -> false + end; + _ -> false + end. + +%% ------------------------------------------------------------ +%% Create the listen socket, i.e. the port that this erlang +%% node is accessible through. +%% ------------------------------------------------------------ + +listen(Name) -> + case do_listen([binary, {active, false}, {packet,2}, {reuseaddr, true}]) of + {ok, Socket} -> + TcpAddress = get_tcp_address(Socket), + {_,Port} = TcpAddress#net_address.address, + ErlEpmd = net_kernel:epmd_module(), + case ErlEpmd:register_node(Name, Port) of + {ok, Creation} -> + {ok, {Socket, TcpAddress, Creation}}; + Error -> + Error + end; + Error -> + Error + end. + +do_listen(Options) -> + {First,Last} = case application:get_env(kernel,inet_dist_listen_min) of + {ok,N} when is_integer(N) -> + case application:get_env(kernel, + inet_dist_listen_max) of + {ok,M} when is_integer(M) -> + {N,M}; + _ -> + {N,N} + end; + _ -> + {0,0} + end, + do_listen(First, Last, listen_options([{backlog,128}|Options])). + +do_listen(First,Last,_) when First > Last -> + {error,eaddrinuse}; +do_listen(First,Last,Options) -> + case gen_tcp:listen(First, Options) of + {error, eaddrinuse} -> + do_listen(First+1,Last,Options); + Other -> + Other + end. + +listen_options(Opts0) -> + Opts1 = + case application:get_env(kernel, inet_dist_use_interface) of + {ok, Ip} -> + [{ip, Ip} | Opts0]; + _ -> + Opts0 + end, + case application:get_env(kernel, inet_dist_listen_options) of + {ok,ListenOpts} -> + ListenOpts ++ Opts1; + _ -> + Opts1 + end. + + +%% ------------------------------------------------------------ +%% Accepts new connection attempts from other Erlang nodes. +%% ------------------------------------------------------------ + +accept(Listen) -> + spawn_opt(?MODULE, accept_loop, [self(), Listen], [link, {priority, max}]). + +accept_loop(Kernel, Listen) -> + ?trace("~p~n",[{?MODULE, accept_loop, self()}]), + case gen_tcp:accept(Listen) of + {ok, Socket} -> + DistCtrl = spawn_dist_cntrlr(Socket), + ?trace("~p~n",[{?MODULE, accept_loop, accepted, Socket, DistCtrl, self()}]), + flush_controller(DistCtrl, Socket), + gen_tcp:controlling_process(Socket, DistCtrl), + flush_controller(DistCtrl, Socket), + Kernel ! {accept,self(),DistCtrl,inet,tcp}, + receive + {Kernel, controller, Pid} -> + call_ctrlr(DistCtrl, {supervisor, Pid}), + Pid ! {self(), controller}; + {Kernel, unsupported_protocol} -> + exit(unsupported_protocol) + end, + accept_loop(Kernel, Listen); + Error -> + exit(Error) + end. + +flush_controller(Pid, Socket) -> + receive + {tcp, Socket, Data} -> + Pid ! {tcp, Socket, Data}, + flush_controller(Pid, Socket); + {tcp_closed, Socket} -> + Pid ! {tcp_closed, Socket}, + flush_controller(Pid, Socket) + after 0 -> + ok + end. + +%% ------------------------------------------------------------ +%% Accepts a new connection attempt from another Erlang node. +%% Performs the handshake with the other side. +%% ------------------------------------------------------------ + +accept_connection(AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) -> + spawn_opt(?MODULE, do_accept, + [self(), AcceptPid, DistCtrl, MyNode, Allowed, SetupTime], + [link, {priority, max}]). + +do_accept(Kernel, AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) -> + ?trace("~p~n",[{?MODULE, do_accept, self(), MyNode}]), + receive + {AcceptPid, controller} -> + Timer = dist_util:start_timer(SetupTime), + case check_ip(DistCtrl) of + true -> + HSData0 = hs_data_common(DistCtrl), + HSData = HSData0#hs_data{kernel_pid = Kernel, + this_node = MyNode, + socket = DistCtrl, + timer = Timer, + this_flags = 0, + allowed = Allowed}, + dist_util:handshake_other_started(HSData); + {false,IP} -> + error_msg("** Connection attempt from " + "disallowed IP ~w ** ~n", [IP]), + ?shutdown(no_node) + end + end. + +%% we may not always want the nodelay behaviour +%% for performance reasons + +nodelay() -> + case application:get_env(kernel, dist_nodelay) of + undefined -> + {nodelay, true}; + {ok, true} -> + {nodelay, true}; + {ok, false} -> + {nodelay, false}; + _ -> + {nodelay, true} + end. + +%% ------------------------------------------------------------ +%% Setup a new connection to another Erlang node. +%% Performs the handshake with the other side. +%% ------------------------------------------------------------ + +setup(Node, Type, MyNode, LongOrShortNames,SetupTime) -> + spawn_opt(?MODULE, do_setup, + [self(), Node, Type, MyNode, LongOrShortNames, SetupTime], + [link, {priority, max}]). + +do_setup(Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) -> + ?trace("~p~n",[{?MODULE, do_setup, self(), Node}]), + [Name, Address] = splitnode(Node, LongOrShortNames), + case inet:getaddr(Address, inet) of + {ok, Ip} -> + Timer = dist_util:start_timer(SetupTime), + ErlEpmd = net_kernel:epmd_module(), + case ErlEpmd:port_please(Name, Ip) of + {port, TcpPort, Version} -> + ?trace("port_please(~p) -> version ~p~n", + [Node,Version]), + dist_util:reset_timer(Timer), + case + gen_tcp:connect( + Ip, TcpPort, + connect_options([binary, {active, false}, {packet, 2}])) + of + {ok, Socket} -> + DistCtrl = spawn_dist_cntrlr(Socket), + call_ctrlr(DistCtrl, {supervisor, self()}), + flush_controller(DistCtrl, Socket), + gen_tcp:controlling_process(Socket, DistCtrl), + flush_controller(DistCtrl, Socket), + HSData0 = hs_data_common(DistCtrl), + HSData = HSData0#hs_data{kernel_pid = Kernel, + other_node = Node, + this_node = MyNode, + socket = DistCtrl, + timer = Timer, + this_flags = 0, + other_version = Version, + request_type = Type}, + dist_util:handshake_we_started(HSData); + _ -> + %% Other Node may have closed since + %% port_please ! + ?trace("other node (~p) " + "closed since port_please.~n", + [Node]), + ?shutdown(Node) + end; + _ -> + ?trace("port_please (~p) " + "failed.~n", [Node]), + ?shutdown(Node) + end; + _Other -> + ?trace("inet_getaddr(~p) " + "failed (~p).~n", [Node,_Other]), + ?shutdown(Node) + end. + +connect_options(Opts) -> + case application:get_env(kernel, inet_dist_connect_options) of + {ok,ConnectOpts} -> + ConnectOpts ++ Opts; + _ -> + Opts + end. + +%% +%% Close a socket. +%% +close(Listen) -> + gen_tcp:close(Listen). + + +%% If Node is illegal terminate the connection setup!! +splitnode(Node, LongOrShortNames) -> + case split_node(atom_to_list(Node), $@, []) of + [Name|Tail] when Tail =/= [] -> + Host = lists:append(Tail), + case split_node(Host, $., []) of + [_] when LongOrShortNames =:= longnames -> + case inet:parse_address(Host) of + {ok, _} -> + [Name, Host]; + _ -> + error_msg("** System running to use " + "fully qualified " + "hostnames **~n" + "** Hostname ~ts is illegal **~n", + [Host]), + ?shutdown(Node) + end; + L when length(L) > 1, LongOrShortNames =:= shortnames -> + error_msg("** System NOT running to use fully qualified " + "hostnames **~n" + "** Hostname ~ts is illegal **~n", + [Host]), + ?shutdown(Node); + _ -> + [Name, Host] + end; + [_] -> + error_msg("** Nodename ~p illegal, no '@' character **~n", + [Node]), + ?shutdown(Node); + _ -> + error_msg("** Nodename ~p illegal **~n", [Node]), + ?shutdown(Node) + end. + +split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])]; +split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]); +split_node([], _, Ack) -> [lists:reverse(Ack)]. + +%% ------------------------------------------------------------ +%% Fetch local information about a Socket. +%% ------------------------------------------------------------ +get_tcp_address(Socket) -> + {ok, Address} = inet:sockname(Socket), + {ok, Host} = inet:gethostname(), + #net_address { + address = Address, + host = Host, + protocol = tcp, + family = inet + }. + +%% ------------------------------------------------------------ +%% Do only accept new connection attempts from nodes at our +%% own LAN, if the check_ip environment parameter is true. +%% ------------------------------------------------------------ +check_ip(DistCtrl) -> + case application:get_env(check_ip) of + {ok, true} -> + case get_ifs(DistCtrl) of + {ok, IFs, IP} -> + check_ip(IFs, IP); + _ -> + ?shutdown(no_node) + end; + _ -> + true + end. + +get_ifs(DistCtrl) -> + Socket = call_ctrlr(DistCtrl, socket), + case inet:peername(Socket) of + {ok, {IP, _}} -> + case inet:getif(Socket) of + {ok, IFs} -> {ok, IFs, IP}; + Error -> Error + end; + Error -> + Error + end. + +check_ip([{OwnIP, _, Netmask}|IFs], PeerIP) -> + case {inet_tcp:mask(Netmask, PeerIP), inet_tcp:mask(Netmask, OwnIP)} of + {M, M} -> true; + _ -> check_ip(IFs, PeerIP) + end; +check_ip([], PeerIP) -> + {false, PeerIP}. + +is_node_name(Node) when is_atom(Node) -> + case split_node(atom_to_list(Node), $@, []) of + [_, _Host] -> true; + _ -> false + end; +is_node_name(_Node) -> + false. + +hs_data_common(DistCtrl) -> + TickHandler = call_ctrlr(DistCtrl, tick_handler), + Socket = call_ctrlr(DistCtrl, socket), + #hs_data{f_send = send_fun(), + f_recv = recv_fun(), + f_setopts_pre_nodeup = setopts_pre_nodeup_fun(), + f_setopts_post_nodeup = setopts_post_nodeup_fun(), + f_getll = getll_fun(), + f_handshake_complete = handshake_complete_fun(), + f_address = address_fun(), + mf_setopts = setopts_fun(DistCtrl, Socket), + mf_getopts = getopts_fun(DistCtrl, Socket), + mf_getstat = getstat_fun(DistCtrl, Socket), + mf_tick = tick_fun(DistCtrl, TickHandler)}. + +%%% ------------------------------------------------------------ +%%% Distribution controller processes +%%% ------------------------------------------------------------ + +%% +%% There will be five parties working together when the +%% connection is up: +%% - The gen_tcp socket. Providing a tcp/ip connection +%% to the other node. +%% - The output handler. It will dispatch all outgoing +%% traffic from the VM to the gen_tcp socket. This +%% process is registered as distribution controller +%% for this channel with the VM. +%% - The input handler. It will dispatch all incoming +%% traffic from the gen_tcp socket to the VM. This +%% process is also the socket owner and receives +%% incoming traffic using active-N. +%% - The tick handler. Dispatches asynchronous tick +%% requests to the socket. It executes on max priority +%% since it is important to get ticks through to the +%% other end. +%% - The channel supervisor (provided by dist_util). It +%% monitors traffic. Issue tick requests to the tick +%% handler when no outgoing traffic is seen and bring +%% the connection down if no incoming traffic is seen. +%% This process also executes on max priority. +%% +%% These parties are linked togheter so should one +%% of them fail, all of them are terminated and the +%% connection is taken down. +%% + +%% In order to avoid issues with lingering signal binaries +%% we enable off-heap message queue data as well as fullsweep +%% after 0. The fullsweeps will be cheap since we have more +%% or less no live data. +-define(DIST_CNTRL_COMMON_SPAWN_OPTS, + [{message_queue_data, off_heap}, + {fullsweep_after, 0}]). + +tick_fun(DistCtrl, TickHandler) -> + fun (Ctrl) when Ctrl == DistCtrl -> + TickHandler ! tick + end. + +getstat_fun(DistCtrl, Socket) -> + fun (Ctrl) when Ctrl == DistCtrl -> + case inet:getstat(Socket, [recv_cnt, send_cnt, send_pend]) of + {ok, Stat} -> + split_stat(Stat,0,0,0); + Error -> + Error + end + end. + +split_stat([{recv_cnt, R}|Stat], _, W, P) -> + split_stat(Stat, R, W, P); +split_stat([{send_cnt, W}|Stat], R, _, P) -> + split_stat(Stat, R, W, P); +split_stat([{send_pend, P}|Stat], R, W, _) -> + split_stat(Stat, R, W, P); +split_stat([], R, W, P) -> + {ok, R, W, P}. + +setopts_fun(DistCtrl, Socket) -> + fun (Ctrl, Opts) when Ctrl == DistCtrl -> + setopts(Socket, Opts) + end. + +getopts_fun(DistCtrl, Socket) -> + fun (Ctrl, Opts) when Ctrl == DistCtrl -> + getopts(Socket, Opts) + end. + +setopts(S, Opts) -> + case [Opt || {K,_}=Opt <- Opts, + K =:= active orelse K =:= deliver orelse K =:= packet] of + [] -> inet:setopts(S,Opts); + Opts1 -> {error, {badopts,Opts1}} + end. + +getopts(S, Opts) -> + inet:getopts(S, Opts). + +send_fun() -> + fun (Ctrlr, Packet) -> + call_ctrlr(Ctrlr, {send, Packet}) + end. + +recv_fun() -> + fun (Ctrlr, Length, Timeout) -> + case call_ctrlr(Ctrlr, {recv, Length, Timeout}) of + {ok, Bin} when is_binary(Bin) -> + {ok, binary_to_list(Bin)}; + Other -> + Other + end + end. + +getll_fun() -> + fun (Ctrlr) -> + call_ctrlr(Ctrlr, getll) + end. + +address_fun() -> + fun (Ctrlr, Node) -> + case call_ctrlr(Ctrlr, {address, Node}) of + {error, no_node} -> %% No '@' or more than one '@' in node name. + ?shutdown(no_node); + Res -> + Res + end + end. + +setopts_pre_nodeup_fun() -> + fun (Ctrlr) -> + call_ctrlr(Ctrlr, pre_nodeup) + end. + +setopts_post_nodeup_fun() -> + fun (Ctrlr) -> + call_ctrlr(Ctrlr, post_nodeup) + end. + +handshake_complete_fun() -> + fun (Ctrlr, Node, DHandle) -> + call_ctrlr(Ctrlr, {handshake_complete, Node, DHandle}) + end. + +call_ctrlr(Ctrlr, Msg) -> + Ref = erlang:monitor(process, Ctrlr), + Ctrlr ! {Ref, self(), Msg}, + receive + {Ref, Res} -> + erlang:demonitor(Ref, [flush]), + Res; + {'DOWN', Ref, process, Ctrlr, Reason} -> + exit({dist_controller_exit, Reason}) + end. + +%% +%% The tick handler process writes a tick to the +%% socket when it receives a 'tick' message from +%% the connection supervisor. +%% +%% We are not allowed to block the connection +%% superviser when writing a tick and we also want +%% the tick to go through even during a heavily +%% loaded system. gen_tcp does not have a +%% non-blocking send operation exposed in its API +%% and we don't want to run the distribution +%% controller under high priority. Therefore this +%% sparate process with max prio that dispatches +%% ticks. +%% +dist_cntrlr_tick_handler(Socket) -> + receive + tick -> + %% May block due to busy port... + sock_send(Socket, ""); + _ -> + ok + end, + dist_cntrlr_tick_handler(Socket). + +spawn_dist_cntrlr(Socket) -> + spawn_opt(?MODULE, dist_cntrlr_setup, [Socket], + [{priority, max}] ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS). + +dist_cntrlr_setup(Socket) -> + TickHandler = spawn_opt(?MODULE, dist_cntrlr_tick_handler, + [Socket], + [link, {priority, max}] + ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS), + dist_cntrlr_setup_loop(Socket, TickHandler, undefined). + +%% +%% During the handshake phase we loop in dist_cntrlr_setup(). +%% When the connection is up we spawn an input handler and +%% continue as output handler. +%% +dist_cntrlr_setup_loop(Socket, TickHandler, Sup) -> + receive + {tcp_closed, Socket} -> + exit(connection_closed); + + {Ref, From, {supervisor, Pid}} -> + Res = link(Pid), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Pid); + + {Ref, From, tick_handler} -> + From ! {Ref, TickHandler}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, socket} -> + From ! {Ref, Socket}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, {send, Packet}} -> + Res = gen_tcp:send(Socket, Packet), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, {recv, Length, Timeout}} -> + Res = gen_tcp:recv(Socket, Length, Timeout), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, getll} -> + From ! {Ref, {ok, self()}}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, {address, Node}} -> + Res = case inet:peername(Socket) of + {ok, Address} -> + case split_node(atom_to_list(Node), $@, []) of + [_,Host] -> + #net_address{address=Address,host=Host, + protocol=tcp, family=inet}; + _ -> + {error, no_node} + end + end, + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, pre_nodeup} -> + Res = inet:setopts(Socket, + [{active, false}, + {packet, 4}, + nodelay()]), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, post_nodeup} -> + Res = inet:setopts(Socket, + [{active, false}, + {packet, 4}, + nodelay()]), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, {handshake_complete, _Node, DHandle}} -> + From ! {Ref, ok}, + %% Handshake complete! Begin dispatching traffic... + + %% We use separate process for dispatching input. This + %% is not necessary, but it enables parallel execution + %% of independent work loads at the same time as it + %% simplifies the the implementation... + InputHandler = spawn_opt(?MODULE, dist_cntrlr_input_setup, + [DHandle, Socket, Sup], + [link] ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS), + + flush_controller(InputHandler, Socket), + gen_tcp:controlling_process(Socket, InputHandler), + flush_controller(InputHandler, Socket), + + ok = erlang:dist_ctrl_input_handler(DHandle, InputHandler), + + InputHandler ! DHandle, + + %% From now on we execute on normal priority + process_flag(priority, normal), + erlang:dist_ctrl_get_data_notification(DHandle), + dist_cntrlr_output_loop(DHandle, Socket) + end. + +%% We use active 10 for good throughput while still +%% maintaining back-pressure if the input controller +%% isn't able to handle all incoming messages... +-define(ACTIVE_INPUT, 10). + +dist_cntrlr_input_setup(DHandle, Socket, Sup) -> + link(Sup), + %% Ensure we don't try to put data before registerd + %% as input handler... + receive + DHandle -> + dist_cntrlr_input_loop(DHandle, Socket, 0) + end. + +dist_cntrlr_input_loop(DHandle, Socket, N) when N =< ?ACTIVE_INPUT/2 -> + inet:setopts(Socket, [{active, ?ACTIVE_INPUT - N}]), + dist_cntrlr_input_loop(DHandle, Socket, ?ACTIVE_INPUT); +dist_cntrlr_input_loop(DHandle, Socket, N) -> + receive + {tcp_closed, Socket} -> + %% Connection to remote node terminated... + exit(connection_closed); + + {tcp, Socket, Data} -> + %% Incoming data from remote node... + try erlang:dist_ctrl_put_data(DHandle, Data) + catch _ : _ -> death_row() + end, + dist_cntrlr_input_loop(DHandle, Socket, N-1); + + _ -> + %% Ignore... + dist_cntrlr_input_loop(DHandle, Socket, N) + end. + +dist_cntrlr_send_data(DHandle, Socket) -> + case erlang:dist_ctrl_get_data(DHandle) of + none -> + erlang:dist_ctrl_get_data_notification(DHandle); + Data -> + sock_send(Socket, Data), + dist_cntrlr_send_data(DHandle, Socket) + end. + + +dist_cntrlr_output_loop(DHandle, Socket) -> + receive + dist_data -> + %% Outgoing data from this node... + try dist_cntrlr_send_data(DHandle, Socket) + catch _ : _ -> death_row() + end, + dist_cntrlr_output_loop(DHandle, Socket); + + {send, From, Ref, Data} -> + %% This is for testing only! + %% + %% Needed by some OTP distribution + %% test suites... + sock_send(Socket, Data), + From ! {Ref, ok}, + dist_cntrlr_output_loop(DHandle, Socket); + + _ -> + %% Drop garbage message... + dist_cntrlr_output_loop(DHandle, Socket) + + end. + +sock_send(Socket, Data) -> + try gen_tcp:send(Socket, Data) of + ok -> ok; + {error, Reason} -> death_row({send_error, Reason}) + catch + Type : Reason -> death_row({send_error, {Type, Reason}}) + end. + +death_row() -> + death_row(connection_closed). + +death_row(normal) -> + %% We do not want to exit with normal + %% exit reason since it wont bring down + %% linked processes... + death_row(); +death_row(Reason) -> + %% When the connection is on its way down operations + %% begin to fail. We catch the failures and call + %% this function waiting for termination. We should + %% be terminated by one of our links to the other + %% involved parties that began bringing the + %% connection down. By waiting for termination we + %% avoid altering the exit reason for the connection + %% teardown. We however limit the wait to 5 seconds + %% and bring down the connection ourselves if not + %% terminated... + receive after 5000 -> exit(Reason) end. diff --git a/lib/kernel/include/dist.hrl b/lib/kernel/include/dist.hrl index d6bccdf474..db4a5eaebc 100644 --- a/lib/kernel/include/dist.hrl +++ b/lib/kernel/include/dist.hrl @@ -40,3 +40,33 @@ -define(DFLAG_UTF8_ATOMS, 16#10000). -define(DFLAG_MAP_TAG, 16#20000). -define(DFLAG_BIG_CREATION, 16#40000). +-define(DFLAG_SEND_SENDER, 16#80000). + +%% DFLAGs that require strict ordering or:ed together... +-define(DFLAGS_STRICT_ORDER_DELIVERY, + ?DFLAG_DIST_HDR_ATOM_CACHE). + + +%% Also update dflag2str() in ../src/dist_util.erl +%% when adding flags... + +-define(DFLAGS_ALL, + (?DFLAG_PUBLISHED + bor ?DFLAG_ATOM_CACHE + bor ?DFLAG_EXTENDED_REFERENCES + bor ?DFLAG_DIST_MONITOR + bor ?DFLAG_FUN_TAGS + bor ?DFLAG_DIST_MONITOR_NAME + bor ?DFLAG_HIDDEN_ATOM_CACHE + bor ?DFLAG_NEW_FUN_TAGS + bor ?DFLAG_EXTENDED_PIDS_PORTS + bor ?DFLAG_EXPORT_PTR_TAG + bor ?DFLAG_BIT_BINARIES + bor ?DFLAG_NEW_FLOATS + bor ?DFLAG_UNICODE_IO + bor ?DFLAG_DIST_HDR_ATOM_CACHE + bor ?DFLAG_SMALL_ATOM_TAGS + bor ?DFLAG_UTF8_ATOMS + bor ?DFLAG_MAP_TAG + bor ?DFLAG_BIG_CREATION + bor ?DFLAG_SEND_SENDER)). diff --git a/lib/kernel/include/dist_util.hrl b/lib/kernel/include/dist_util.hrl index e3d2fe0eb6..eeb0f8dd43 100644 --- a/lib/kernel/include/dist_util.hrl +++ b/lib/kernel/include/dist_util.hrl @@ -29,9 +29,9 @@ -endif. -ifdef(dist_trace). --define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:timestamp(),node(),lists:flatten(io_lib:format(Fmt, Args))])). +-define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])). % Use the one below for config-file (early boot) connection tracing -%-define(trace(Fmt,Args), erlang:display([erlang:now(),node(),lists:flatten(io_lib:format(Fmt, Args))])). +%-define(trace(Fmt,Args), erlang:display([erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])). -define(trace_factor,8). -else. -define(trace(Fmt,Args), ok). @@ -78,7 +78,13 @@ %% New in kernel-5.1 (OTP 19.1): mf_setopts, %% netkernel:setopts on active connection - mf_getopts %% netkernel:getopts on active connection + mf_getopts, %% netkernel:getopts on active connection + + %% New in kernel-6.0 (OTP 21.0) + f_handshake_complete, %% Notify handshake complete + add_flags, %% dflags to add + reject_flags, %% dflags not to use (not all can be rejected) + require_flags %% dflags that are required }). diff --git a/lib/kernel/src/dist_util.erl b/lib/kernel/src/dist_util.erl index b3507e5d13..08bd5946cd 100644 --- a/lib/kernel/src/dist_util.erl +++ b/lib/kernel/src/dist_util.erl @@ -74,6 +74,48 @@ ticked = 0 }). +dflag2str(?DFLAG_PUBLISHED) -> + "PUBLISHED"; +dflag2str(?DFLAG_ATOM_CACHE) -> + "ATOM_CACHE"; +dflag2str(?DFLAG_EXTENDED_REFERENCES) -> + "EXTENDED_REFERENCES"; +dflag2str(?DFLAG_DIST_MONITOR) -> + "DIST_MONITOR"; +dflag2str(?DFLAG_FUN_TAGS) -> + "FUN_TAGS"; +dflag2str(?DFLAG_DIST_MONITOR_NAME) -> + "DIST_MONITOR_NAME"; +dflag2str(?DFLAG_HIDDEN_ATOM_CACHE) -> + "HIDDEN_ATOM_CACHE"; +dflag2str(?DFLAG_NEW_FUN_TAGS) -> + "NEW_FUN_TAGS"; +dflag2str(?DFLAG_EXTENDED_PIDS_PORTS) -> + "EXTENDED_PIDS_PORTS"; +dflag2str(?DFLAG_EXPORT_PTR_TAG) -> + "EXPORT_PTR_TAG"; +dflag2str(?DFLAG_BIT_BINARIES) -> + "BIT_BINARIES"; +dflag2str(?DFLAG_NEW_FLOATS) -> + "NEW_FLOATS"; +dflag2str(?DFLAG_UNICODE_IO) -> + "UNICODE_IO"; +dflag2str(?DFLAG_DIST_HDR_ATOM_CACHE) -> + "DIST_HDR_ATOM_CACHE"; +dflag2str(?DFLAG_SMALL_ATOM_TAGS) -> + "SMALL_ATOM_TAGS"; +dflag2str(?DFLAG_UTF8_ATOMS) -> + "UTF8_ATOMS"; +dflag2str(?DFLAG_MAP_TAG) -> + "MAP_TAG"; +dflag2str(?DFLAG_BIG_CREATION) -> + "BIG_CREATION"; +dflag2str(?DFLAG_SEND_SENDER) -> + "SEND_SENDER"; +dflag2str(_) -> + "UNKNOWN". + + remove_flag(Flag, Flags) -> case Flags band Flag of 0 -> @@ -82,13 +124,13 @@ remove_flag(Flag, Flags) -> Flags - Flag end. -adjust_flags(ThisFlags, OtherFlags) -> +adjust_flags(ThisFlags, OtherFlags, RejectFlags) -> case (?DFLAG_PUBLISHED band ThisFlags) band OtherFlags of 0 -> {remove_flag(?DFLAG_PUBLISHED, ThisFlags), remove_flag(?DFLAG_PUBLISHED, OtherFlags)}; _ -> - {ThisFlags, OtherFlags} + {ThisFlags, OtherFlags band (bnot RejectFlags)} end. publish_flag(hidden, _) -> @@ -101,36 +143,71 @@ publish_flag(_, OtherNode) -> 0 end. -make_this_flags(RequestType, OtherNode) -> - publish_flag(RequestType, OtherNode) bor - %% The parenthesis below makes the compiler generate better code. - (?DFLAG_EXPORT_PTR_TAG bor - ?DFLAG_EXTENDED_PIDS_PORTS bor - ?DFLAG_EXTENDED_REFERENCES bor - ?DFLAG_DIST_MONITOR bor - ?DFLAG_FUN_TAGS bor - ?DFLAG_DIST_MONITOR_NAME bor - ?DFLAG_HIDDEN_ATOM_CACHE bor - ?DFLAG_NEW_FUN_TAGS bor - ?DFLAG_BIT_BINARIES bor - ?DFLAG_NEW_FLOATS bor - ?DFLAG_UNICODE_IO bor - ?DFLAG_DIST_HDR_ATOM_CACHE bor - ?DFLAG_SMALL_ATOM_TAGS bor - ?DFLAG_UTF8_ATOMS bor - ?DFLAG_MAP_TAG bor - ?DFLAG_BIG_CREATION). - -handshake_other_started(#hs_data{request_type=ReqType}=HSData0) -> +-define(DFLAGS_REMOVABLE, + (?DFLAG_DIST_HDR_ATOM_CACHE + bor ?DFLAG_HIDDEN_ATOM_CACHE + bor ?DFLAG_ATOM_CACHE)). + +-define(DFLAGS_ADDABLE, + (?DFLAGS_ALL + band (bnot (?DFLAG_PUBLISHED + bor ?DFLAG_HIDDEN_ATOM_CACHE + bor ?DFLAG_ATOM_CACHE)))). + +-define(DFLAGS_THIS_DEFAULT, + (?DFLAG_EXPORT_PTR_TAG + bor ?DFLAG_EXTENDED_PIDS_PORTS + bor ?DFLAG_EXTENDED_REFERENCES + bor ?DFLAG_DIST_MONITOR + bor ?DFLAG_FUN_TAGS + bor ?DFLAG_DIST_MONITOR_NAME + bor ?DFLAG_NEW_FUN_TAGS + bor ?DFLAG_BIT_BINARIES + bor ?DFLAG_NEW_FLOATS + bor ?DFLAG_UNICODE_IO + bor ?DFLAG_DIST_HDR_ATOM_CACHE + bor ?DFLAG_SMALL_ATOM_TAGS + bor ?DFLAG_UTF8_ATOMS + bor ?DFLAG_MAP_TAG + bor ?DFLAG_BIG_CREATION + bor ?DFLAG_SEND_SENDER)). + +make_this_flags(RequestType, AddFlags, RemoveFlags, OtherNode) -> + case RemoveFlags band (bnot ?DFLAGS_REMOVABLE) of + 0 -> ok; + Rerror -> exit({"Rejecting non rejectable flags", Rerror}) + end, + case AddFlags band (bnot ?DFLAGS_ADDABLE) of + 0 -> ok; + Aerror -> exit({"Adding non addable flags", Aerror}) + end, + Flgs0 = ?DFLAGS_THIS_DEFAULT, + Flgs1 = Flgs0 bor publish_flag(RequestType, OtherNode), + Flgs2 = Flgs1 bor AddFlags, + Flgs3 = Flgs2 band (bnot (?DFLAG_HIDDEN_ATOM_CACHE + bor ?DFLAG_ATOM_CACHE)), + Flgs3 band (bnot RemoveFlags). + +handshake_other_started(#hs_data{request_type=ReqType, + add_flags=AddFlgs0, + reject_flags=RejFlgs0, + require_flags=ReqFlgs0}=HSData0) -> + AddFlgs = convert_flags(AddFlgs0), + RejFlgs = convert_flags(RejFlgs0), + ReqFlgs = convert_flags(ReqFlgs0), {PreOtherFlags,Node,Version} = recv_name(HSData0), - PreThisFlags = make_this_flags(ReqType, Node), + PreThisFlags = make_this_flags(ReqType, AddFlgs, RejFlgs, Node), {ThisFlags, OtherFlags} = adjust_flags(PreThisFlags, - PreOtherFlags), + PreOtherFlags, + RejFlgs), HSData = HSData0#hs_data{this_flags=ThisFlags, other_flags=OtherFlags, other_version=Version, other_node=Node, - other_started=true}, + other_started=true, + add_flags=AddFlgs, + reject_flags=RejFlgs, + require_flags=ReqFlgs}, check_dflags(HSData), is_allowed(HSData), ?debug({"MD5 connection from ~p (V~p)~n", @@ -165,23 +242,18 @@ is_allowed(#hs_data{other_node = Node, end. %% -%% Check that both nodes can handle the same types of extended -%% node containers. If they can not, abort the connection. +%% Check mandatory flags... %% check_dflags(#hs_data{other_node = Node, other_flags = OtherFlags, - other_started = OtherStarted} = HSData) -> - - Mandatory = [{?DFLAG_EXTENDED_REFERENCES, "EXTENDED_REFERENCES"}, - {?DFLAG_EXTENDED_PIDS_PORTS, "EXTENDED_PIDS_PORTS"}, - {?DFLAG_UTF8_ATOMS, "UTF8_ATOMS"}], - Missing = lists:filtermap(fun({Bit, Str}) -> - case Bit band OtherFlags of - Bit -> false; - 0 -> {true, Str} - end - end, - Mandatory), + other_started = OtherStarted, + require_flags = RequiredFlags} = HSData) -> + Mandatory = ((?DFLAG_EXTENDED_REFERENCES + bor ?DFLAG_EXTENDED_PIDS_PORTS + bor ?DFLAG_UTF8_ATOMS) + bor RequiredFlags), + Missing = check_mandatory(0, ?DFLAGS_ALL, Mandatory, + OtherFlags, []), case Missing of [] -> ok; @@ -201,6 +273,22 @@ check_dflags(#hs_data{other_node = Node, ?shutdown2(Node, {check_dflags_failed, Missing}) end. +check_mandatory(_Bit, 0, _Mandatory, _OtherFlags, Missing) -> + Missing; +check_mandatory(Bit, Left, Mandatory, OtherFlags, Missing) -> + DFlag = (1 bsl Bit), + NewLeft = Left band (bnot DFlag), + NewMissing = case {DFlag band Mandatory, + DFlag band OtherFlags} of + {DFlag, 0} -> + %% Mandatory and missing... + [dflag2str(DFlag) | Missing]; + _ -> + %% Not mandatory or present... + Missing + end, + check_mandatory(Bit+1, NewLeft, Mandatory, OtherFlags, NewMissing). + %% No nodedown will be sent if we fail before this process has %% succeeded to mark the node as pending. @@ -314,13 +402,24 @@ flush_down() -> end. handshake_we_started(#hs_data{request_type=ReqType, - other_node=Node}=PreHSData) -> - PreThisFlags = make_this_flags(ReqType, Node), - HSData = PreHSData#hs_data{this_flags=PreThisFlags}, + other_node=Node, + add_flags=AddFlgs0, + reject_flags=RejFlgs0, + require_flags=ReqFlgs0}=PreHSData) -> + AddFlgs = convert_flags(AddFlgs0), + RejFlgs = convert_flags(RejFlgs0), + ReqFlgs = convert_flags(ReqFlgs0), + PreThisFlags = make_this_flags(ReqType, AddFlgs, RejFlgs, Node), + HSData = PreHSData#hs_data{this_flags = PreThisFlags, + add_flags = AddFlgs, + reject_flags = RejFlgs, + require_flags = ReqFlgs}, send_name(HSData), recv_status(HSData), {PreOtherFlags,ChallengeA} = recv_challenge(HSData), - {ThisFlags,OtherFlags} = adjust_flags(PreThisFlags, PreOtherFlags), + {ThisFlags,OtherFlags} = adjust_flags(PreThisFlags, + PreOtherFlags, + RejFlgs), NewHSData = HSData#hs_data{this_flags = ThisFlags, other_flags = OtherFlags, other_started = false}, @@ -336,15 +435,16 @@ handshake_we_started(#hs_data{request_type=ReqType, handshake_we_started(OldHsData) when element(1,OldHsData) =:= hs_data -> handshake_we_started(convert_old_hsdata(OldHsData)). -convert_old_hsdata({hs_data, KP, ON, TN, S, T, TF, A, OV, OF, OS, FS, FR, - FS_PRE, FS_POST, FG, FA, MFT, MFG, RT}) -> - #hs_data{ - kernel_pid = KP, other_node = ON, this_node = TN, socket = S, timer = T, - this_flags = TF, allowed = A, other_version = OV, other_flags = OF, - other_started = OS, f_send = FS, f_recv = FR, f_setopts_pre_nodeup = FS_PRE, - f_setopts_post_nodeup = FS_POST, f_getll = FG, f_address = FA, - mf_tick = MFT, mf_getstat = MFG, request_type = RT}. +convert_old_hsdata(OldHsData) -> + OHSDL = tuple_to_list(OldHsData), + NoMissing = tuple_size(#hs_data{}) - tuple_size(OldHsData), + true = NoMissing > 0, + list_to_tuple(OHSDL ++ lists:duplicate(NoMissing, undefined)). +convert_flags(Flags) when is_integer(Flags) -> + Flags; +convert_flags(_Undefined) -> + 0. %% -------------------------------------------------------------- %% The connection has been established. @@ -359,15 +459,20 @@ connection(#hs_data{other_node = Node, PType = publish_type(HSData#hs_data.other_flags), case FPreNodeup(Socket) of ok -> - do_setnode(HSData), % Succeeds or exits the process. + DHandle = do_setnode(HSData), % Succeeds or exits the process. Address = FAddress(Socket,Node), mark_nodeup(HSData,Address), case FPostNodeup(Socket) of ok -> + case HSData#hs_data.f_handshake_complete of + undefined -> ok; + HsComplete -> HsComplete(Socket, Node, DHandle) + end, con_loop({HSData#hs_data.kernel_pid, Node, Socket, PType, + DHandle, HSData#hs_data.mf_tick, HSData#hs_data.mf_getstat, HSData#hs_data.mf_setopts, @@ -425,18 +530,16 @@ do_setnode(#hs_data{other_node = Node, socket = Socket, [Node, Port, {publish_type(Flags), '(', Flags, ')', Version}]), - case (catch - erlang:setnode(Node, Port, - {Flags, Version, '', ''})) of - {'EXIT', {system_limit, _}} -> + try + erlang:setnode(Node, Port, {Flags, Version, '', ''}) + catch + error:system_limit -> error_msg("** Distribution system limit reached, " "no table space left for node ~w ** ~n", [Node]), ?shutdown(Node); - {'EXIT', Other} -> - exit(Other); - _Else -> - ok + error:Other -> + exit({Other, erlang:get_stacktrace()}) end; _ -> error_msg("** Distribution connection error, " @@ -468,7 +571,13 @@ mark_nodeup(#hs_data{kernel_pid = Kernel, ?shutdown(Node) end. -con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=ConData, +getstat(DHandle, _Socket, undefined) -> + erlang:dist_get_stat(DHandle); +getstat(_DHandle, Socket, MFGetstat) -> + MFGetstat(Socket). + +con_loop({Kernel, Node, Socket, Type, DHandle, MFTick, MFGetstat, + MFSetOpts, MFGetOpts}=ConData, Tick) -> receive {tcp_closed, Socket} -> @@ -476,7 +585,7 @@ con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=C {Kernel, disconnect} -> ?shutdown2(Node, disconnected); {Kernel, aux_tick} -> - case MFGetstat(Socket) of + case getstat(DHandle, Socket, MFGetstat) of {ok, _, _, PendWrite} -> send_tick(Socket, PendWrite, MFTick); _ -> @@ -484,7 +593,7 @@ con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=C end, con_loop(ConData, Tick); {Kernel, tick} -> - case send_tick(Socket, Tick, Type, + case send_tick(DHandle, Socket, Tick, Type, MFTick, MFGetstat) of {ok, NewTick} -> con_loop(ConData, NewTick); @@ -497,7 +606,7 @@ con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=C ?shutdown2(Node, send_net_tick_failed) end; {From, get_status} -> - case MFGetstat(Socket) of + case getstat(DHandle, Socket, MFGetstat) of {ok, Read, Write, _} -> From ! {self(), get_status, {ok, Read, Write}}, con_loop(ConData, Tick); @@ -735,14 +844,14 @@ send_status(#hs_data{socket = Socket, other_node = Node, %% we haven't read anything as a hidden node only ticks when it receives %% a TICK !! -send_tick(Socket, Tick, Type, MFTick, MFGetstat) -> +send_tick(DHandle, Socket, Tick, Type, MFTick, MFGetstat) -> #tick{tick = T0, read = Read, write = Write, ticked = Ticked} = Tick, T = T0 + 1, T1 = T rem 4, - case MFGetstat(Socket) of + case getstat(DHandle, Socket, MFGetstat) of {ok, Read, _, _} when Ticked =:= T -> {error, not_responding}; {ok, Read, W, Pend} when Type =:= hidden -> @@ -771,11 +880,10 @@ send_tick(Socket, Tick, Type, MFTick, MFGetstat) -> Error end. -send_tick(Socket, 0, MFTick) -> - MFTick(Socket); -send_tick(_, _Pend, _) -> - %% Dont send tick if pending write. - ok. +send_tick(_, Pend, _) when Pend /= false, Pend /= 0 -> + ok; %% Dont send tick if pending write. +send_tick(Socket, _Pend, MFTick) -> + MFTick(Socket). %% ------------------------------------------------------------ %% Connection setup timeout timer. diff --git a/lib/kernel/src/erl_epmd.erl b/lib/kernel/src/erl_epmd.erl index 7bc9e2ede3..f96bc88913 100644 --- a/lib/kernel/src/erl_epmd.erl +++ b/lib/kernel/src/erl_epmd.erl @@ -79,7 +79,13 @@ port_please(Node, EpmdAddr, Timeout) -> port_please1(Node,HostName, Timeout) -> - case inet:gethostbyname(HostName, inet, Timeout) of + Family = case inet_db:res_option(inet6) of + true -> + inet6; + false -> + inet + end, + case inet:gethostbyname(HostName, Family, Timeout) of {ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} -> get_port(Node, EpmdAddr, Timeout); Else -> diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src index f1ef70a373..fc5417597f 100644 --- a/lib/kernel/src/kernel.appup.src +++ b/lib/kernel/src/kernel.appup.src @@ -18,7 +18,7 @@ %% %CopyrightEnd% {"%VSN%", %% Up from - max one major revision back - [{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-19.*, OTP-20.0 + [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.* %% Down to - max one major revision back - [{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-19.*, OTP-20.0 + [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.* }. diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl index 7da89dd7cb..f36b4f1e6a 100644 --- a/lib/kernel/src/net_kernel.erl +++ b/lib/kernel/src/net_kernel.erl @@ -423,8 +423,8 @@ handle_call({connect, Type, Node}, From, State) -> {ok, SetupPid} -> Owners = [{SetupPid, Node} | State#state.conn_owners], {noreply,State#state{conn_owners=Owners}}; - _ -> - ?connect_failure(Node, {setup_call, failed}), + _Error -> + ?connect_failure(Node, {setup_call, failed, _Error}), async_reply({reply, false, State}, From) end end; diff --git a/lib/kernel/test/gen_udp_SUITE.erl b/lib/kernel/test/gen_udp_SUITE.erl index aa616d43d6..8364fe7cdc 100644 --- a/lib/kernel/test/gen_udp_SUITE.erl +++ b/lib/kernel/test/gen_udp_SUITE.erl @@ -295,21 +295,9 @@ bad_address(Config) when is_list(Config) -> %% are received per in/out scheduling, which should be %% the same as the read_packets parameter. %% -%% What happens on the SMP emulator remains to be seen... -%% %% OTP-6249 UDP option for number of packet reads. read_packets(Config) when is_list(Config) -> - case erlang:system_info(smp_support) of - false -> - read_packets_1(); - true -> - %% We would need some new sort of tracing to test this - %% option reliably in an SMP emulator. - {skip,"SMP emulator"} - end. - -read_packets_1() -> N1 = 5, N2 = 7, {ok,R} = gen_udp:open(0, [{read_packets,N1}]), diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl index e246276262..ae00d75460 100644 --- a/lib/kernel/test/zlib_SUITE.erl +++ b/lib/kernel/test/zlib_SUITE.erl @@ -995,32 +995,27 @@ sub_heap_binaries(Config) when is_list(Config) -> %% Check concurrent access to zlib driver. smp(Config) -> - case erlang:system_info(smp_support) of - true -> - NumOfProcs = lists:min([8,erlang:system_info(schedulers)]), - io:format("smp starting ~p workers\n",[NumOfProcs]), - - %% Tests to run in parallel. - Funcs = - [zip_usage, gz_usage, compress_usage, dictionary_usage, - crc, adler], - - %% We get all function arguments here to avoid repeated parallel - %% file read access. - UsageArgs = - list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]), - Parent = self(), - - WorkerFun = - fun() -> - worker(rand:uniform(9999), UsageArgs, Parent) - end, - - Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)], - wait_pids(Pids); - false -> - {skipped,"No smp support"} - end. + NumOfProcs = lists:min([8,erlang:system_info(schedulers)]), + io:format("smp starting ~p workers\n",[NumOfProcs]), + + %% Tests to run in parallel. + Funcs = + [zip_usage, gz_usage, compress_usage, dictionary_usage, + crc, adler], + + %% We get all function arguments here to avoid repeated parallel + %% file read access. + UsageArgs = + list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]), + Parent = self(), + + WorkerFun = + fun() -> + worker(rand:uniform(9999), UsageArgs, Parent) + end, + + Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)], + wait_pids(Pids). worker(Seed, FnATpl, Parent) -> io:format("smp worker ~p, seed=~p~n",[self(),Seed]), diff --git a/lib/mnesia/doc/src/Mnesia_chap5.xmlsrc b/lib/mnesia/doc/src/Mnesia_chap5.xmlsrc index 62759c624b..0265e0efa0 100644 --- a/lib/mnesia/doc/src/Mnesia_chap5.xmlsrc +++ b/lib/mnesia/doc/src/Mnesia_chap5.xmlsrc @@ -226,8 +226,10 @@ not known beforehand, all fragments are searched for matching records.</p> <p>Notice that in <c>ordered_set</c> tables, the records - are ordered per fragment, and the the order is undefined in - results returned by <c>select</c> and <c>match_object</c>.</p> + are ordered per fragment, and the order is undefined in + results returned by <c>select</c> and <c>match_object</c>, + as well as <c>first</c>, <c>next</c>, <c>prev</c> and + <c>last</c>.</p> <p>The following code illustrates how a <c>Mnesia</c> table is converted to be a fragmented table and how more fragments are added later:</p> diff --git a/lib/observer/src/observer_traceoptions_wx.erl b/lib/observer/src/observer_traceoptions_wx.erl index 4f46426cf6..fbcf6d7fe9 100644 --- a/lib/observer/src/observer_traceoptions_wx.erl +++ b/lib/observer/src/observer_traceoptions_wx.erl @@ -619,7 +619,7 @@ create_styled_txtctrl(Parent) -> keyWords() -> L = ["after","begin","case","try","cond","catch","andalso","orelse", - "end","fun","if","let","of","query","receive","when","bnot","not", + "end","fun","if","let","of","receive","when","bnot","not", "div","rem","band","and","bor","bxor","bsl","bsr","or","xor"], lists:flatten([K ++ " " || K <- L] ++ [0]). diff --git a/lib/os_mon/src/disksup.erl b/lib/os_mon/src/disksup.erl index 044604b000..aeec335ba7 100644 --- a/lib/os_mon/src/disksup.erl +++ b/lib/os_mon/src/disksup.erl @@ -32,7 +32,7 @@ terminate/2, code_change/3]). %% Other exports --export([format_status/2]). +-export([format_status/2, parse_df/2]). -record(state, {threshold, timeout, os, diskdata = [],port}). @@ -294,8 +294,8 @@ check_disks_solaris("", _Threshold) -> check_disks_solaris("\n", _Threshold) -> []; check_disks_solaris(Str, Threshold) -> - case io_lib:fread("~s~d~d~d~d%~s", Str) of - {ok, [_FS, KB, _Used, _Avail, Cap, MntOn], RestStr} -> + case parse_df(Str, posix) of + {ok, {KB, Cap, MntOn}, RestStr} -> if Cap >= Threshold -> set_alarm({disk_almost_full, MntOn}, []); @@ -308,14 +308,102 @@ check_disks_solaris(Str, Threshold) -> check_disks_solaris(skip_to_eol(Str),Threshold) end. +%% @private +%% @doc Predicate to take a word from the input string until a space or +%% a percent '%' sign (the Capacity field is followed by a %) +parse_df_is_not_space($ ) -> false; +parse_df_is_not_space($%) -> false; +parse_df_is_not_space(_) -> true. + +%% @private +%% @doc Predicate to take spaces away from string. Stops on a non-space +parse_df_is_space($ ) -> true; +parse_df_is_space(_) -> false. + +%% @private +%% @doc Predicate to consume remaining characters until end of line. +parse_df_is_not_eol($\r) -> false; +parse_df_is_not_eol($\n) -> false; +parse_df_is_not_eol(_) -> true. + +%% @private +%% @doc Trims leading non-spaces (the word) from the string then trims spaces. +parse_df_skip_word(Input) -> + Remaining = lists:dropwhile(fun parse_df_is_not_space/1, Input), + lists:dropwhile(fun parse_df_is_space/1, Remaining). + +%% @private +%% @doc Takes all non-spaces and then drops following spaces. +parse_df_take_word(Input) -> + {Word, Remaining0} = lists:splitwith(fun parse_df_is_not_space/1, Input), + Remaining1 = lists:dropwhile(fun parse_df_is_space/1, Remaining0), + {Word, Remaining1}. + +%% @private +%% @doc Takes all non-spaces and then drops the % after it and the spaces. +parse_df_take_word_percent(Input) -> + {Word, Remaining0} = lists:splitwith(fun parse_df_is_not_space/1, Input), + %% Drop the leading % or do nothing + Remaining1 = case Remaining0 of + [$% | R1] -> R1; + _ -> Remaining0 % Might be no % or empty list even + end, + Remaining2 = lists:dropwhile(fun parse_df_is_space/1, Remaining1), + {Word, Remaining2}. + +%% @private +%% @doc Given a line of 'df' POSIX/SUSv3 output split it into fields: +%% a string (mounted device), 4 integers (kilobytes, used, available +%% and capacity), skip % sign, (optionally for susv3 can also skip IUsed, IFree +%% and ICap% fields) then take remaining characters as the mount path +-spec parse_df(string(), posix | susv3) -> + {error, parse_df} | {ok, {integer(), integer(), integer()}, string()}. +parse_df(Input0, Flavor) -> + %% Format of Posix/Linux df output looks like Header + Lines + %% Filesystem 1024-blocks Used Available Capacity Mounted on + %% udev 2467108 0 2467108 0% /dev + Input1 = parse_df_skip_word(Input0), % skip device path field + {KbStr, Input2} = parse_df_take_word(Input1), % take Kb field + Input3 = parse_df_skip_word(Input2), % skip Used field + Input4 = parse_df_skip_word(Input3), % skip Avail field + + % take Capacity% field; drop a % sign following the capacity + {CapacityStr, Input5} = parse_df_take_word_percent(Input4), + + %% Format of OS X/SUSv3 df looks similar to POSIX but has 3 extra columns + %% Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted + %% /dev/disk1 243949060 2380 86690680 65% 2029724 37555 0% / + Input6 = case Flavor of + posix -> Input5; + susv3 -> % there are 3 extra integers we want to skip + Input5a = parse_df_skip_word(Input5), % skip IUsed field + Input5b = parse_df_skip_word(Input5a), % skip IFree field + %% skip the value of ICap + '%' field + {_, Input5c} = parse_df_take_word_percent(Input5b), + Input5c + end, + + % path is the remaining string till end of line + {MountPath, Input7} = lists:splitwith(fun parse_df_is_not_eol/1, Input6), + % Trim the newlines + Remaining = lists:dropwhile(fun(X) -> not parse_df_is_not_eol(X) end, + Input7), + try + Kb = erlang:list_to_integer(KbStr), + Capacity = erlang:list_to_integer(CapacityStr), + {ok, {Kb, Capacity, MountPath}, Remaining} + catch error:badarg -> + {error, parse_df} + end. + % Parse per SUSv3 specification, notably recent OS X check_disks_susv3("", _Threshold) -> []; check_disks_susv3("\n", _Threshold) -> []; check_disks_susv3(Str, Threshold) -> - case io_lib:fread("~s~d~d~d~d%~d~d~d%~s", Str) of - {ok, [_FS, KB, _Used, _Avail, Cap, _IUsed, _IFree, _ICap, MntOn], RestStr} -> + case parse_df(Str, susv3) of + {ok, {KB, Cap, MntOn}, RestStr} -> if Cap >= Threshold -> set_alarm({disk_almost_full, MntOn}, []); diff --git a/lib/os_mon/test/disksup_SUITE.erl b/lib/os_mon/test/disksup_SUITE.erl index ad61985014..d7f2626160 100644 --- a/lib/os_mon/test/disksup_SUITE.erl +++ b/lib/os_mon/test/disksup_SUITE.erl @@ -30,7 +30,7 @@ -export([port/1]). -export([terminate/1, unavailable/1, restart/1]). -export([otp_5910/1]). --export([posix_only/1]). +-export([posix_only/1, parse_df_output_posix/1, parse_df_output_susv3/1]). init_per_suite(Config) when is_list(Config) -> ok = application:start(os_mon), @@ -59,7 +59,8 @@ suite() -> all() -> Bugs = [otp_5910], - Always = [api, config, alarm, port, posix_only, unavailable] ++ Bugs, + Always = [api, config, alarm, port, posix_only, unavailable, + parse_df_output_posix, parse_df_output_susv3] ++ Bugs, case test_server:os_type() of {unix, _OSname} -> Always; {win32, _OSname} -> Always; @@ -413,3 +414,36 @@ get_disk_data([{"none",0,0}=E]) -> [E]; get_disk_data([{_,_,0}|Es]) -> get_disk_data(Es); get_disk_data([E|Es]) -> [E|get_disk_data(Es)]; get_disk_data([]) -> []. + +%% @doc Test various expected inputs to 'df' command output (Linux/POSIX) +parse_df_output_posix(Config) when is_list(Config) -> + PosixHdr = "Filesystem 1K-blocks Used Available Use% Mounted on\n", + {error, _} = disksup:parse_df(PosixHdr, posix), + {error, _} = disksup:parse_df("", posix), + {error, _} = disksup:parse_df("\n\n", posix), + + %% Have a simple example with no funny spaces in mount path + Posix1 = "tmpfs 498048 7288 490760 2% /run\n", + {ok, {498048, 2, "/run"}, ""} = disksup:parse_df(Posix1, posix), + + %% Have a mount path with some spaces in it + Posix2 = "tmpfs 498048 7288 490760 2% /spaces 1 2\n", + {ok, {498048, 2, "/spaces 1 2"}, ""} = disksup:parse_df(Posix2, posix). + +%% @doc Test various expected inputs to 'df' command output (Darwin/SUSv3) +parse_df_output_susv3(Config) when is_list(Config) -> + DarwinHdr = "Filesystem 1024-blocks Used Available Capacity " ++ + "iused ifree %iused Mounted on", + {error, _} = disksup:parse_df(DarwinHdr, susv3), + {error, _} = disksup:parse_df("", susv3), + {error, _} = disksup:parse_df("\n\n", susv3), + + %% Have a simple example with no funny spaces in mount path + Darwin1 = "/dev/disk1 243949060 157002380 86690680 65% 2029724 " ++ + "4292937555 0% /\n", + {ok, {243949060, 65, "/"}, ""} = disksup:parse_df(Darwin1, susv3), + + %% Have a mount path with some spaces in it + Darwin2 = "/dev/disk1 243949060 157002380 86690680 65% 2029724 " ++ + "4292937555 0% /spaces 1 2\n", + {ok, {243949060, 65, "/spaces 1 2"}, ""} = disksup:parse_df(Darwin2, susv3). diff --git a/lib/reltool/src/reltool_mod_win.erl b/lib/reltool/src/reltool_mod_win.erl index 2d56d74563..894d6e2ecb 100644 --- a/lib/reltool/src/reltool_mod_win.erl +++ b/lib/reltool/src/reltool_mod_win.erl @@ -833,7 +833,7 @@ load_code(Ed, Code) when is_binary(Code) -> keyWords() -> L = ["after","begin","case","try","cond","catch","andalso","orelse", - "end","fun","if","let","of","query","receive","when","bnot","not", + "end","fun","if","let","of","receive","when","bnot","not", "div","rem","band","and","bor","bxor","bsl","bsr","or","xor"], lists:flatten([K ++ " " || K <- L] ++ [0]). diff --git a/lib/runtime_tools/test/dbg_SUITE.erl b/lib/runtime_tools/test/dbg_SUITE.erl index 4b0864858c..cfe8412e33 100644 --- a/lib/runtime_tools/test/dbg_SUITE.erl +++ b/lib/runtime_tools/test/dbg_SUITE.erl @@ -23,7 +23,7 @@ -export([all/0, suite/0, big/1, tiny/1, simple/1, message/1, distributed/1, port/1, send/1, recv/1, - ip_port/1, file_port/1, file_port2/1, file_port_schedfix/1, + ip_port/1, file_port/1, file_port2/1, ip_port_busy/1, wrap_port/1, wrap_port_time/1, with_seq_trace/1, dead_suspend/1, local_trace/1, saved_patterns/1, tracer_exit_on_stop/1, @@ -41,7 +41,7 @@ suite() -> all() -> [big, tiny, simple, message, distributed, port, ip_port, send, recv, - file_port, file_port2, file_port_schedfix, ip_port_busy, + file_port, file_port2, ip_port_busy, wrap_port, wrap_port_time, with_seq_trace, dead_suspend, local_trace, saved_patterns, tracer_exit_on_stop, erl_tracer, distributed_erl_tracer]. @@ -623,99 +623,6 @@ file_port2(Config) when is_list(Config) -> end, ok. -%% Test that the scheduling timestamp fix for trace flag 'running' works. -file_port_schedfix(Config) when is_list(Config) -> - case (catch erlang:system_info(smp_support)) of - true -> - {skip, "No schedule fix on SMP"}; - _ -> - try - file_port_schedfix1(Config) - after - dbg:stop() - end - end. -file_port_schedfix1(Config) when is_list(Config) -> - stop(), - {A,B,C} = erlang:now(), - FTMP = atom_to_list(?MODULE) ++ integer_to_list(A) ++ - "-" ++ integer_to_list(B) ++ "-" ++ integer_to_list(C), - FName = filename:join([proplists:get_value(data_dir, Config), FTMP]), - %% - Port = dbg:trace_port(file, {FName, wrap, ".wraplog", 8*1024, 4}), - {ok, _} = dbg:tracer(port, Port), - {ok,[{matched,_node,0}]} = dbg:p(new,[running,procs,send,timestamp]), - %% - %% Generate the trace data - %% - %% This starts 3 processes that sends a message to each other in a ring, - %% 4 laps. Prior to sending the message to the next in the ring, each - %% process send 8 messages to itself, just to generate some trace data, - %% and to lower the possibility that the trace log wraps just after - %% a schedule out message (which would not burden any process and hence - %% not show up in the result) - %% - %% The wrap file trace is used because it burns a lot of time when the - %% driver swaps files, a lot more than the regular file trace. The test - %% case is dimensioned so that the log fills two files and just starts - %% on the third (out of four wrap files). This gives two file swaps, - %% and there are three processes, so one process will NOT be burdened. - %% The criterion for trace success is then that the max process - %% execution time must not be more than twice the min process - %% execution time. Wallclock. A normal result is about 10 times more - %% without schedule in - schedule out compensation (OTP-3938). - %% - ok = token_volleyball(3, 4, 8), - %% - {ok,[{matched,_,_}]} = dbg:p(all, [clear]), - stop(), - %% - %% Get the trace result - %% - Tag = make_ref(), - dbg:trace_client(file, {FName, wrap, ".wraplog"}, - {fun schedstat_handler/2, {self(), Tag, []}}), - Result = - receive - {Tag, D} -> - lists:map( - fun({Pid, {A1, B1, C1}}) -> - {Pid, C1/1000000 + B1 + A1*1000000} - end, - D) - end, - ok = io:format("Result=~p", [Result]), - % erlang:display({?MODULE, ?LINE, Result}), - %% - %% Analyze the result - %% - {Min, Max} = lists:foldl(fun({_Pid, M}, {Mi, Ma}) -> - {if M < Mi -> M; true -> Mi end, - if M > Ma -> M; true -> Ma end} - end, - {void, 0}, - Result), - % More PaN debug - io:format("Min = ~f, Max = ~f~n",[Min,Max]), - %% - %% Cleanup - %% - ToBeDeleted = filelib:wildcard(FName++"*"++".wraplog"), - lists:map(fun file:delete/1, ToBeDeleted), - % io:format("ToBeDeleted=~p", [ToBeDeleted]), - %% - %% Present the result - %% - P = (Max / Min - 1) * 100, - BottomLine = lists:flatten(io_lib:format("~.2f %", [P])), - if P > 100 -> - Reason = {BottomLine, '>', "100%"}, - erlang:display({file_port_schedfix, fail, Reason}), - ct:fail(Reason); - true -> - {comment, BottomLine} - end. - %% Test tracing to wrapping file port wrap_port(Config) when is_list(Config) -> Self = self(), diff --git a/lib/runtime_tools/test/dyntrace_SUITE.erl b/lib/runtime_tools/test/dyntrace_SUITE.erl index 7be2f49a8b..7ffbe54446 100644 --- a/lib/runtime_tools/test/dyntrace_SUITE.erl +++ b/lib/runtime_tools/test/dyntrace_SUITE.erl @@ -51,11 +51,7 @@ init_per_suite(Config) -> case erlang:system_info(debug_compiled) of false -> ""; true -> ".debug" - end ++ - case erlang:system_info(smp_support) of - false -> ""; - true -> ".smp" - end, + end ++ ".smp", [{emu_name,N}|Config]. end_per_suite(_Config) -> diff --git a/lib/sasl/src/Makefile b/lib/sasl/src/Makefile index ac7ee51100..45cd814bf8 100644 --- a/lib/sasl/src/Makefile +++ b/lib/sasl/src/Makefile @@ -37,7 +37,7 @@ RELSYSDIR = $(RELEASE_PATH)/lib/sasl-$(VSN) MODULES= alarm_handler sasl sasl_report \ sasl_report_file_h sasl_report_tty_h format_lib_supp \ misc_supp rb rb_format_supp release_handler \ - release_handler_1 si si_sasl_supp systools \ + release_handler_1 systools \ systools_make systools_rc systools_relup systools_lib \ erlsrv diff --git a/lib/sasl/src/sasl.app.src b/lib/sasl/src/sasl.app.src index 633cdfa070..d75543a91b 100644 --- a/lib/sasl/src/sasl.app.src +++ b/lib/sasl/src/sasl.app.src @@ -32,8 +32,6 @@ sasl_report, sasl_report_tty_h, sasl_report_file_h, - si, - si_sasl_supp, systools, systools_make, systools_rc, diff --git a/lib/sasl/src/sasl.appup.src b/lib/sasl/src/sasl.appup.src index 7f866507a0..94af164b20 100644 --- a/lib/sasl/src/sasl.appup.src +++ b/lib/sasl/src/sasl.appup.src @@ -18,7 +18,7 @@ %% %CopyrightEnd% {"%VSN%", %% Up from - max one major revision back - [{<<"3\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-19.* + [{<<"3\\.0\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.* %% Down to - max one major revision back - [{<<"3\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-19.* + [{<<"3\\.0\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.* }. diff --git a/lib/sasl/src/si.erl b/lib/sasl/src/si.erl deleted file mode 100644 index 275c6d508b..0000000000 --- a/lib/sasl/src/si.erl +++ /dev/null @@ -1,169 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% -%%----------------------------------------------------------------- -%% l(format_lib_supp), l(si_sasl_supp), l(si), l(si_ms_aos_supp), l(misc_supp). -%% c(format_lib_supp), c(si_sasl_supp), c(si), c(si_ms_aos_supp), c(misc_supp). -%%----------------------------------------------------------------- - - -%%-------------------------------------------------- -%% Description: -%% Status Inspection, main module. -%%-------------------------------------------------- - --module(si). - - -%% External exports --export([h/0, help/0, start/0, start/1, start_log/1, stop_log/0, - abbrevs/0, pi/1, pi/2, pi/3, pi/4, ppi/1, ppi/3, stop/0]). - -%% Internal exports --export([pi_impl/2, test/0]). - - -%%-------------------------------------------------- -%% Table of contents -%% 1. Interface -%% 2. Implementation - - --import(si_sasl_supp, [status_info/1, make_pid/1, p/1]). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% 1. Interface -%%-------------------------------------------------- - -h() -> print_help(). -help() -> print_help(). - -start() -> si_sasl_supp:start(). -start(Options) -> si_sasl_supp:start(Options). - -stop() -> si_sasl_supp:stop(). - -start_log(FileName) -> - si_sasl_supp:start_log(FileName). - -stop_log() -> - si_sasl_supp:stop_log(). - -%%%----------------------------------------------------------------- -%%% All functions can be called with an option 'normal' or 'all'; -%%% default is 'normal'. -%%%----------------------------------------------------------------- - -abbrevs() -> - io:format("~p", [lists:append(si_sasl_supp:process_abbrevs(), - process_abbrevs())]). - -%%----------------------------------------------------------------- -%% Process Info that tries to determine processtype (=Module), then -%% it uses this Module:format_info to format data from status_info/1. -%%----------------------------------------------------------------- -pi(XPid) -> - si_sasl_supp:si_exec({si, pi_impl}, [normal, XPid]). - -pi(Opt, XPid) -> - si_sasl_supp:si_exec({si, pi_impl}, [si_sasl_supp:valid_opt(Opt), XPid]). - -pi(A, B, C) when is_integer(A), is_integer(B), is_integer(C) -> - si_sasl_supp:si_exec({si, pi_impl}, [normal, {A, B, C}]). - -pi(Opt, A, B, C) when is_integer(A), is_integer(B), is_integer(C) -> - si_sasl_supp:si_exec({si, pi_impl}, [si_sasl_supp:valid_opt(Opt), {A, B, C}]). - -%%----------------------------------------------------------------- -%% Pretty print Process_Info. -%%----------------------------------------------------------------- -ppi(XPid) -> - si_sasl_supp:ppi(XPid). -ppi(A, B, C) -> - si_sasl_supp:ppi(A, B, C). - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% 2. Implementation -%%-------------------------------------------------- - -print_help() -> - p("~nStatus Inspection tool - usage"), - p("=============================="), - p(" For all these functions, Opt is an optional argument"), - p(" which can be 'normal' or 'all'; default is 'normal'."), - p(" If 'all', all information will be printed."), - p(" A Pid can be: \"<A.B.C>\", {A, B, C}, B, a registered_name or an abbrev."), - p("ANY PROCESS"), - p("si:pi([Opt,] Pid) - Formatted information about any process that"), - p(" SI recognises."), - p("si:pi([Opt,] A,B,C) - Same as si:pi({A, B, C})."), - p("si:ppi(Pid) - Pretty formating of process_info."), - p(" Works for any process."), - p("MISC"), - p("si:abbrevs() - Lists valid abbreviations."), - p("si:start_log(Filename) - Logging to file."), - p("si:stop_log()"), - p("si:start() - Starts Status Inspection (the si_server)."), - p("si:start([{start_log, FileName}])"), - p("si:stop() - Shut down SI."). - - -%%-------------------------------------------------- -%% Copied (and modified) code from si_sasl_supp. -%%-------------------------------------------------- -pi_impl(Opt, XPid) -> - case make_pid(try_local_expand_abbrev(XPid)) of - Pid when is_pid(Pid) -> - case status_info(Pid) of - {status_info, Pid, {module, Module}, Data} -> - si_sasl_supp:do_best_printout(Opt, Pid, Module, Data); - {error, Reason} -> - _ = si_sasl_supp:ppi_impl(Pid), - {error, {"can not get status info from process:", - XPid, - Reason}}; - Else -> - {error, {"unknown status info", Else}} - end; - {error, Reason} -> - {error, Reason} - end. - -%%-------------------------------------------------- -%% Functions for handling of abbreviations -%%-------------------------------------------------- -try_local_expand_abbrev(Abbrev) -> - case si_sasl_supp:expand_abbrev(Abbrev, process_abbrevs()) of - {value, {_, RealName}} -> RealName; - _ -> Abbrev - end. - -process_abbrevs() -> - []. - -%% Test get_status_info/format_status_info for all implemented servers. -test() -> - lists:foreach(fun test_all_registered/1, - lists:append(si_sasl_supp:process_abbrevs(), - process_abbrevs())). - -test_all_registered({Al, _Ful}) -> - si:pi(all, Al). diff --git a/lib/sasl/src/si_sasl_supp.erl b/lib/sasl/src/si_sasl_supp.erl deleted file mode 100644 index cce628f8c4..0000000000 --- a/lib/sasl/src/si_sasl_supp.erl +++ /dev/null @@ -1,380 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% --module(si_sasl_supp). - --behaviour(gen_server). - -%%%--------------------------------------------------------------------------- -%%% Description: -%%% This module contains the BOS specific parts of the Status Inspection Tool. -%%%--------------------------------------------------------------------------- - - -%% user interface --export([h/0, help/0, start_log/1, stop_log/0, abbrevs/0, pi/1, pi/2, pi/3, - pi/4, ppi/1, ppi/3, start/0, start/1, stop/0, start_link/1]). - -%% intermodule exports --export([make_pid/1, make_pid/3, process_abbrevs/0, expand_abbrev/2, - status_info/1, valid_opt/1, p/1, do_best_printout/4, - si_exec/2, handle_call/3, terminate/2]). - -%% exports for use within module --export([init/1, start_log_impl/1, pi_impl/2, ppi_impl/1]). - -%% other gen_server callbacks (not used) --export([handle_cast/2, handle_info/2, code_change/3]). - -%%-------------------------------------------------- -%% Table of contents -%% 1. Interface -%% 2. SI - Server -%% 3. Code -%% 4. Selectors -%%-------------------------------------------------- - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% 1. Interface -%% ----------------------------------------------------- - -h() -> print_help(). -help() -> print_help(). - -si_exec(Fun, Args) -> call({si_exec, Fun, Args}). - -start_log(FileName) -> - call({start_log, FileName}). - -stop_log() -> - call(stop_log). - -abbrevs() -> - io:format("~p", [process_abbrevs()]). - -%%----------------------------------------------------------------- -%% All functions can be called with an option 'normal' or 'all'; -%% default is 'normal'. -%%----------------------------------------------------------------- -%% Process Info that tries to determine processtype (=Module), then -%% it uses this Module:format_info to format data from status_info/1. -%%----------------------------------------------------------------- -pi(XPid) -> - si_exec({si_sasl_supp, pi_impl}, [normal, XPid]). - -pi(Opt, XPid) -> - si_exec({si_sasl_supp, pi_impl}, [valid_opt(Opt), XPid]). - -pi(A, B, C) when is_integer(A), is_integer(B), is_integer(C) -> - si_exec({si_sasl_supp, pi_impl}, [normal, {A, B, C}]). - -pi(Opt, A, B, C) when is_integer(A), is_integer(B), is_integer(C) -> - si_exec({si_sasl_supp, pi_impl}, [valid_opt(Opt), {A, B, C}]). - -%%----------------------------------------------------------------- -%% Pretty print Process_Info. -%%----------------------------------------------------------------- -ppi(XPid) -> - case whereis(si_server) of - undefined -> % You can always run ppi. - ppi_impl(XPid); % if si_server is down, use standard_io - _ -> - si_exec({si_sasl_supp, ppi_impl}, [XPid]) - end. -ppi(A, B, C) -> - case whereis(si_server) of - undefined -> % You can always run ppi. - ppi_impl({A, B, C}); % if si_server is down, use standard_io - _ -> - si_exec({si_sasl_supp, ppi_impl}, [{A, B, C}]) - end. - - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% 2. SI - Server -%%-------------------------------------------------- --record(state, {}). - -start() -> start([]). -start(Options) -> - supervisor:start_child(sasl_sup, - {si_server, {si_sasl_supp, start_link, [Options]}, - temporary, brutal_kill, worker, [si_sasl_supp]}). - -start_link(_Options) -> - gen_server:start_link({local, si_server}, si_sasl_supp, [], []). - -stop() -> - call(stop), - supervisor:delete_child(sasl_sup, si_server). - - -init(Options) -> - process_flag(trap_exit, true), - start_log_impl(get_option(Options, start_log, standard_io)), - {ok, #state{}}. - -%%----------------------------------------------------------------- -%% If an error occurs and we're logging to file: write the error -%% to the file. -%% Always return the error. -%% The only data held by the si_server is the device in its process dictionary. -%%----------------------------------------------------------------- -handle_call({si_exec, Fun, Args}, _From, State) -> - case catch apply(Fun, Args) of - {'EXIT', Reason} -> - print_error(get(device), - "SI internal error. Reason: ~w~n", - [Reason]), - {stop, shutdown, {internal_error, Reason}, State}; - {error, Reason} -> - print_error(get(device), "~nSI error: ~w~n", [Reason]), - {reply, {error, Reason}, State}; - X -> - {reply, X, State} - end; -handle_call({start_log, FileName}, _From, State) -> - start_log_impl(FileName), - {reply, ok, State}; -handle_call(stop_log, _From, State) -> - start_log_impl(standard_io), - {reply, ok, State}; -handle_call(stop, _From, State) -> - start_log_impl(standard_io), - {stop, normal, stopped, State}. - -terminate(_Reason, _State) -> - _ = close_device(get(device)), - ok. - -handle_cast(_Msg, State) -> - {noreply, State}. -handle_info(_Info, State) -> - {noreply, State}. -code_change(_OldVsn, State, _Extra) -> - {ok, State}. - -close_device(standard_io) -> ok; -close_device(Fd) -> file:close(Fd). - -print_error(standard_io, _, _) -> ok; -print_error(Device, Format, Args) -> - io:format(Device, Format, Args). - -get_option(Options, Key, Default) -> - case lists:keysearch(Key, 1, Options) of - {value, {_Key, Value}} -> Value; - _ -> Default - end. - -open_log_file(undefined, NewFile) -> - open_log_file(NewFile); -open_log_file(standard_io, NewFile) -> - open_log_file(NewFile); -open_log_file(OldFile, NewFile) -> - _ = file:close(OldFile), - open_log_file(NewFile). - -open_log_file(standard_io) -> standard_io; -open_log_file(FileName) -> - case file:open(FileName, [write]) of - {ok, Fd} -> Fd; - Error -> - io:format("si_sasl_supp: Cannot open file '~s' (~w).~n", - [FileName, Error]), - io:format("si_sasl_supp: Using standard_io~n"), - standard_io - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%% 3. Code -%%-------------------------------------------------- - -%%----------------------------------------------------------------- -%% call(Request) -> Term -%%----------------------------------------------------------------- -call(Req) -> - gen_server:call(si_server, Req, infinity). - -%%-------------------------------------------------- -%% Makes a Pid of almost anything. -%% Returns: Pid|{error, Reason} -%% Fails: Never. -%%-------------------------------------------------- -make_pid(A,B,C) when is_integer(A), is_integer(B), is_integer(C) -> - list_to_pid(lists:concat(["<",A,".",B,".",C,">"])). -make_pid(P) when is_pid(P) -> P; -make_pid(undefined) -> undefined; -make_pid(P) when is_atom(P) -> - case whereis(P) of - undefined -> - case expand_abbrev(P, process_abbrevs()) of - {error, Reason} -> {error, Reason}; - {value, {_Abbrev, FullName}} -> - case whereis(FullName) of - undefined -> - {error, {'process not registered', P}}; - Pid -> Pid - end - end; - Pid -> Pid - end; -make_pid(P) when is_list(P) -> list_to_pid(P); -make_pid({A, B, C}) -> make_pid(A, B, C); -make_pid(X) -> {error, {'can not make a pid of', X}}. - -process_abbrevs() -> - [{init, init}, - {fs, file_server}]. - -%%-------------------------------------------------- -%% Args: Abbrevs is an assoc-list of {Abbrev, RealName} -%% Returns: {value, {Abbrev, FullName}}|{error, Reason} -%%-------------------------------------------------- -expand_abbrev(ProcessName, Abbrevs) -> - case lists:keysearch(ProcessName, 1, Abbrevs) of - {value, {Abbrev, FullName}} -> - {value, {Abbrev, FullName}}; - _ -> - case lists:keysearch(ProcessName, 2, Abbrevs) of - {value, {Abbrev, FullName}} -> - {value, {Abbrev, FullName}}; - _ -> - {error, {'invalid process name', ProcessName}} - end - end. - -%%----------------------------------------------------------------- -%% This is the function that actually gets the information out -%% of the agent/server/... -%% Returns: {status_info, Pid, Type, Data} -%% | {error, Reason} -%%----------------------------------------------------------------- -status_info(Pid) when is_pid(Pid) -> - case catch sys:get_status(Pid, 5000) of - {status, Pid, Type, Info} -> - {status_info, Pid, Type, Info}; - _ -> - {error, {'process does not respond', Pid}} - end; - -status_info(X) -> - {error, {'not a pid', X}}. - -%%-------------------------------------------------- -%% Implementation starts here. -%%-------------------------------------------------- -start_log_impl(FileName) -> - put(device, open_log_file(get(device), FileName)). - -valid_opt(all) -> all; -valid_opt(_Opt) -> normal. - - -print_help() -> - p("- - - - - - - - PROCESSES - - - - - - - - - "), - p("si_sasl_supp:pi([Opt,] Pid) - Formatted information about any process that"), - p(" SI recognises."), - p("si_sasl_supp:pi([Opt,] A,B,C) - Same as si_sasl_supp:pi({A, B, C})."), - p("si_sasl_supp:ppi(Pid) - Pretty formating of process_info."), - p(" Works for any process."), - p("- - - - - - - - MISC - - - - - - - - - - - "), - p("si_sasl_supp:abbrevs() - Lists valid abbreviations."), - p("si_sasl_supp:start_log(FileNname)"), - p("si_sasl_supp:stop_log()"), - p("si_sasl_supp:start() - Starts Status Inspection (the si_server)."), - p("si_sasl_supp:start([{start_log, FileName}])"), - p("si_sasl_supp:stop() - Shut down SI."). - - - -%% Convenient shorthand -p(X) -> - io:format(lists:append(X, "~n")). - -pi_impl(Opt, XPid) -> - case make_pid(XPid) of - Pid when is_pid(Pid) -> - case status_info(Pid) of - {status_info, Pid, {module, Module}, Data} -> - do_best_printout(Opt, Pid, Module, Data); - {error, Reason} -> - _ = ppi_impl(Pid), - {error, {"can not get status info from process:", - XPid, - Reason}} - end; - {error, Reason} -> - {error, Reason} - end. - -%%-------------------------------------------------- -%% Is there a format_info for this process? In that case, run it! -%% Return ok|{error, Reason} -%% Fails: Never. -%%-------------------------------------------------- -do_best_printout(Opt, Pid, Mod, Data) when is_pid(Pid) -> - case print_info(get(device), Pid, {Mod, format_status}, Opt, Data) of - ok -> ok; - {error, Reason} -> - _ = ppi_impl(Pid), - {error, Reason} - end. - -ppi_impl(XPid) -> - case make_pid(XPid) of - P when is_pid(P) -> - case process_info(P) of - undefined -> - {error, {'dead process', P}}; - PI -> - Device = case get(device) of - undefined -> standard_io; - X -> X - end, - io:format(Device, "~nPretty Process Info~n", []), - io:format(Device, "-------------------~n", []), - io:format(Device, "~p~n", [PI]) - end; - _ -> {error, {no_pid, XPid}} - end. - -print_info(Device, Pid, {Module, Func}, Opt, Data) -> - case erlang:function_exported(Module, Func, 2) of - true -> - case catch apply(Module, Func, [Opt, Data]) of - Format when is_list(Format) -> - format_lib_supp:print_info(Device, 79, - add_pid_to_format(Pid, Format)), - ok; - Other -> {error, {'invalid format', Other}} - end; - _ -> - {error, {no_such_function, Module, Func}} - end. - -add_pid_to_format(Pid, [{header, H} | T]) -> - [{header, H}, {data, [{"Pid", Pid}]} | T]; -add_pid_to_format(Pid, List) -> - [{data, [{"Pid", Pid}]} | List]. - - diff --git a/lib/sasl/test/test_lib.hrl b/lib/sasl/test/test_lib.hrl index 9a54937f96..f5210d4f27 100644 --- a/lib/sasl/test/test_lib.hrl +++ b/lib/sasl/test/test_lib.hrl @@ -1,3 +1,3 @@ -define(ertsvsn,"4.4"). --define(kernelvsn,"5.0"). --define(stdlibvsn,"3.0"). +-define(kernelvsn,"5.3"). +-define(stdlibvsn,"3.4"). diff --git a/lib/ssl/src/dtls_udp_listener.erl b/lib/ssl/src/dtls_udp_listener.erl index c789a32087..c9e04767aa 100644 --- a/lib/ssl/src/dtls_udp_listener.erl +++ b/lib/ssl/src/dtls_udp_listener.erl @@ -35,7 +35,7 @@ -record(state, {port, - listner, + listener, dtls_options, emulated_options, dtls_msq_queues = kv_new(), @@ -81,7 +81,7 @@ init([Port, EmOpts, InetOptions, DTLSOptions]) -> first = true, dtls_options = DTLSOptions, emulated_options = EmOpts, - listner = Socket, + listener = Socket, close = false}} catch _:_ -> {error, closed} @@ -91,7 +91,7 @@ handle_call({accept, _}, _, #state{close = true} = State) -> handle_call({accept, Accepter}, From, #state{first = true, accepters = Accepters, - listner = Socket} = State0) -> + listener = Socket} = State0) -> next_datagram(Socket), State = State0#state{first = false, accepters = queue:in({Accepter, From}, Accepters)}, @@ -100,7 +100,7 @@ handle_call({accept, Accepter}, From, #state{first = true, handle_call({accept, Accepter}, From, #state{accepters = Accepters} = State0) -> State = State0#state{accepters = queue:in({Accepter, From}, Accepters)}, {noreply, State}; -handle_call(sockname, _, #state{listner = Socket} = State) -> +handle_call(sockname, _, #state{listener = Socket} = State) -> Reply = inet:sockname(Socket), {reply, Reply, State}; handle_call(close, _, #state{dtls_processes = Processes, @@ -114,7 +114,7 @@ handle_call(close, _, #state{dtls_processes = Processes, end, queue:to_list(Accepters)), {reply, ok, State#state{close = true, accepters = queue:new()}} end; -handle_call({get_sock_opts, {SocketOptNames, EmOptNames}}, _, #state{listner = Socket, +handle_call({get_sock_opts, {SocketOptNames, EmOptNames}}, _, #state{listener = Socket, emulated_options = EmOpts} = State) -> case get_socket_opts(Socket, SocketOptNames) of {ok, Opts} -> @@ -125,7 +125,7 @@ handle_call({get_sock_opts, {SocketOptNames, EmOptNames}}, _, #state{listner = S handle_call(get_all_opts, _, #state{dtls_options = DTLSOptions, emulated_options = EmOpts} = State) -> {reply, {ok, EmOpts, DTLSOptions}, State}; -handle_call({set_sock_opts, {SocketOpts, NewEmOpts}}, _, #state{listner = Socket, emulated_options = EmOpts0} = State) -> +handle_call({set_sock_opts, {SocketOpts, NewEmOpts}}, _, #state{listener = Socket, emulated_options = EmOpts0} = State) -> set_socket_opts(Socket, SocketOpts), EmOpts = do_set_emulated_opts(NewEmOpts, EmOpts0), {reply, ok, State#state{emulated_options = EmOpts}}. @@ -134,7 +134,7 @@ handle_cast({active_once, Client, Pid}, State0) -> State = handle_active_once(Client, Pid, State0), {noreply, State}. -handle_info({udp, Socket, IP, InPortNo, _} = Msg, #state{listner = Socket} = State0) -> +handle_info({udp, Socket, IP, InPortNo, _} = Msg, #state{listener = Socket} = State0) -> State = handle_datagram({IP, InPortNo}, Msg, State0), next_datagram(Socket), {noreply, State}; @@ -142,11 +142,11 @@ handle_info({udp, Socket, IP, InPortNo, _} = Msg, #state{listner = Socket} = Sta %% UDP socket does not have a connection and should not receive an econnreset %% This does however happens on on some windows versions. Just ignoring it %% appears to make things work as expected! -handle_info({udp_error, Socket, econnreset = Error}, #state{listner = Socket} = State) -> +handle_info({udp_error, Socket, econnreset = Error}, #state{listener = Socket} = State) -> Report = io_lib:format("Ignore SSL UDP Listener: Socket error: ~p ~n", [Error]), error_logger:info_report(Report), {noreply, State}; -handle_info({udp_error, Socket, Error}, #state{listner = Socket} = State) -> +handle_info({udp_error, Socket, Error}, #state{listener = Socket} = State) -> Report = io_lib:format("SSL UDP Listener shutdown: Socket error: ~p ~n", [Error]), error_logger:info_report(Report), {noreply, State#state{close=true}}; @@ -225,10 +225,10 @@ setup_new_connection(User, From, Client, Msg, #state{dtls_processes = Processes, dtls_msq_queues = MsgQueues, dtls_options = DTLSOpts, port = Port, - listner = Socket, + listener = Socket, emulated_options = EmOpts} = State) -> ConnArgs = [server, "localhost", Port, {self(), {Client, Socket}}, - {DTLSOpts, EmOpts, udp_listner}, User, dtls_socket:default_cb_info()], + {DTLSOpts, EmOpts, udp_listener}, User, dtls_socket:default_cb_info()], case dtls_connection_sup:start_child(ConnArgs) of {ok, Pid} -> erlang:monitor(process, Pid), diff --git a/lib/ssl/src/ssl_cipher.erl b/lib/ssl/src/ssl_cipher.erl index 50c5f0d755..b6cd22dd13 100644 --- a/lib/ssl/src/ssl_cipher.erl +++ b/lib/ssl/src/ssl_cipher.erl @@ -375,30 +375,38 @@ psk_suites({3, N}) -> psk_suites(N) when N >= 3 -> [ + ?TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384, ?TLS_DHE_PSK_WITH_AES_256_GCM_SHA384, ?TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, ?TLS_PSK_WITH_AES_256_GCM_SHA384, + ?TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, ?TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, ?TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, ?TLS_PSK_WITH_AES_256_CBC_SHA384, + ?TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256, ?TLS_DHE_PSK_WITH_AES_128_GCM_SHA256, ?TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, ?TLS_PSK_WITH_AES_128_GCM_SHA256, + ?TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, ?TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, ?TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, ?TLS_PSK_WITH_AES_128_CBC_SHA256 ] ++ psk_suites(0); psk_suites(_) -> - [?TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + [?TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + ?TLS_DHE_PSK_WITH_AES_256_CBC_SHA, ?TLS_RSA_PSK_WITH_AES_256_CBC_SHA, ?TLS_PSK_WITH_AES_256_CBC_SHA, + ?TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, ?TLS_DHE_PSK_WITH_AES_128_CBC_SHA, ?TLS_RSA_PSK_WITH_AES_128_CBC_SHA, ?TLS_PSK_WITH_AES_128_CBC_SHA, + ?TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, ?TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, ?TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, ?TLS_PSK_WITH_3DES_EDE_CBC_SHA, + ?TLS_ECDHE_PSK_WITH_RC4_128_SHA, ?TLS_DHE_PSK_WITH_RC4_128_SHA, ?TLS_RSA_PSK_WITH_RC4_128_SHA, ?TLS_PSK_WITH_RC4_128_SHA]. @@ -565,6 +573,15 @@ suite_definition(?TLS_RSA_PSK_WITH_AES_128_CBC_SHA) -> suite_definition(?TLS_RSA_PSK_WITH_AES_256_CBC_SHA) -> {rsa_psk, aes_256_cbc, sha, default_prf}; +%%% PSK NULL Cipher Suites RFC 4785 + +suite_definition(?TLS_PSK_WITH_NULL_SHA) -> + {psk, null, sha, default_prf}; +suite_definition(?TLS_DHE_PSK_WITH_NULL_SHA) -> + {dhe_psk, null, sha, default_prf}; +suite_definition(?TLS_RSA_PSK_WITH_NULL_SHA) -> + {rsa_psk, null, sha, default_prf}; + %%% TLS 1.2 PSK Cipher Suites RFC 5487 suite_definition(?TLS_PSK_WITH_AES_128_GCM_SHA256) -> @@ -606,6 +623,36 @@ suite_definition(?TLS_RSA_PSK_WITH_NULL_SHA256) -> suite_definition(?TLS_RSA_PSK_WITH_NULL_SHA384) -> {rsa_psk, null, sha384, default_prf}; +%%% ECDHE PSK Cipher Suites RFC 5489 + +suite_definition(?TLS_ECDHE_PSK_WITH_RC4_128_SHA) -> + {ecdhe_psk, rc4_128, sha, default_prf}; +suite_definition(?TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA) -> + {ecdhe_psk, '3des_ede_cbc', sha, default_prf}; +suite_definition(?TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA) -> + {ecdhe_psk, aes_128_cbc, sha, default_prf}; +suite_definition(?TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA) -> + {ecdhe_psk, aes_256_cbc, sha, default_prf}; +suite_definition(?TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256) -> + {ecdhe_psk, aes_128_cbc, sha256, default_prf}; +suite_definition(?TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384) -> + {ecdhe_psk, aes_256_cbc, sha384, default_prf}; +suite_definition(?TLS_ECDHE_PSK_WITH_NULL_SHA256) -> + {ecdhe_psk, null, sha256, default_prf}; +suite_definition(?TLS_ECDHE_PSK_WITH_NULL_SHA384) -> + {ecdhe_psk, null, sha384, default_prf}; + +%%% ECDHE_PSK with AES-GCM and AES-CCM Cipher Suites, draft-ietf-tls-ecdhe-psk-aead-05 + +suite_definition(?TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256) -> + {ecdhe_psk, aes_128_gcm, null, sha256}; +suite_definition(?TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384) -> + {ecdhe_psk, aes_256_gcm, null, sha384}; +%% suite_definition(?TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256) -> +%% {ecdhe_psk, aes_128_ccm, null, sha256}; +%% suite_definition(?TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256) -> +%% {ecdhe_psk, aes_256_ccm, null, sha256}; + %%% SRP Cipher Suites RFC 5054 suite_definition(?TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) -> @@ -867,6 +914,15 @@ suite({rsa_psk, aes_128_cbc,sha}) -> suite({rsa_psk, aes_256_cbc,sha}) -> ?TLS_RSA_PSK_WITH_AES_256_CBC_SHA; +%%% PSK NULL Cipher Suites RFC 4785 + +suite({psk, null, sha}) -> + ?TLS_PSK_WITH_NULL_SHA; +suite({dhe_psk, null, sha}) -> + ?TLS_DHE_PSK_WITH_NULL_SHA; +suite({rsa_psk, null, sha}) -> + ?TLS_RSA_PSK_WITH_NULL_SHA; + %%% TLS 1.2 PSK Cipher Suites RFC 5487 suite({psk, aes_128_gcm, null, sha256}) -> @@ -908,6 +964,36 @@ suite({rsa_psk, null, sha256}) -> suite({rsa_psk, null, sha384}) -> ?TLS_RSA_PSK_WITH_NULL_SHA384; +%%% ECDHE PSK Cipher Suites RFC 5489 + +suite({ecdhe_psk, rc4_128,sha}) -> + ?TLS_ECDHE_PSK_WITH_RC4_128_SHA; +suite({ecdhe_psk, '3des_ede_cbc',sha}) -> + ?TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA; +suite({ecdhe_psk, aes_128_cbc,sha}) -> + ?TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA; +suite({ecdhe_psk, aes_256_cbc,sha}) -> + ?TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA; +suite({ecdhe_psk, aes_128_cbc, sha256}) -> + ?TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256; +suite({ecdhe_psk, aes_256_cbc, sha384}) -> + ?TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384; +suite({ecdhe_psk, null, sha256}) -> + ?TLS_ECDHE_PSK_WITH_NULL_SHA256; +suite({ecdhe_psk, null, sha384}) -> + ?TLS_ECDHE_PSK_WITH_NULL_SHA384; + +%%% ECDHE_PSK with AES-GCM and AES-CCM Cipher Suites, draft-ietf-tls-ecdhe-psk-aead-05 + +suite({ecdhe_psk, aes_128_gcm, null, sha256}) -> + ?TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256; +suite({ecdhe_psk, aes_256_gcm, null, sha384}) -> + ?TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384; +%% suite({ecdhe_psk, aes_128_ccm, null, sha256}) -> +%% ?TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256; +%% suite({ecdhe_psk, aes_256_ccm, null, sha256}) -> +%% ?TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256; + %%% SRP Cipher Suites RFC 5054 suite({srp_anon, '3des_ede_cbc', sha}) -> @@ -1467,7 +1553,8 @@ is_acceptable_keyexchange(dhe_dss, Algos) -> is_acceptable_keyexchange(dhe_rsa, Algos) -> proplists:get_bool(dh, Algos) andalso proplists:get_bool(rsa, Algos); -is_acceptable_keyexchange(ecdh_anon, Algos) -> +is_acceptable_keyexchange(KeyExchange, Algos) when KeyExchange == ecdh_anon; + KeyExchange == ecdhe_psk -> proplists:get_bool(ecdh, Algos); is_acceptable_keyexchange(KeyExchange, Algos) when KeyExchange == ecdh_ecdsa; KeyExchange == ecdhe_ecdsa -> diff --git a/lib/ssl/src/ssl_cipher.hrl b/lib/ssl/src/ssl_cipher.hrl index 8e8f3d9c67..e5462d8402 100644 --- a/lib/ssl/src/ssl_cipher.hrl +++ b/lib/ssl/src/ssl_cipher.hrl @@ -399,6 +399,17 @@ %% TLS_RSA_PSK_WITH_AES_256_CBC_SHA = { 0x00, 0x95 }; -define(TLS_RSA_PSK_WITH_AES_256_CBC_SHA, <<?BYTE(16#00), ?BYTE(16#95)>>). +%%% PSK NULL Cipher Suites RFC 4785 + +%% TLS_PSK_WITH_NULL_SHA = { 0x00, 0x2C }; +-define(TLS_PSK_WITH_NULL_SHA, <<?BYTE(16#00), ?BYTE(16#2C)>>). + +%% TLS_DHE_PSK_WITH_NULL_SHA = { 0x00, 0x2D }; +-define(TLS_DHE_PSK_WITH_NULL_SHA, <<?BYTE(16#00), ?BYTE(16#2D)>>). + +%% TLS_RSA_PSK_WITH_NULL_SHA = { 0x00, 0x2E }; +-define(TLS_RSA_PSK_WITH_NULL_SHA, <<?BYTE(16#00), ?BYTE(16#2E)>>). + %%% TLS 1.2 PSK Cipher Suites RFC 5487 %% TLS_PSK_WITH_AES_128_GCM_SHA256 = {0x00,0xA8}; @@ -455,6 +466,46 @@ %% TLS_RSA_PSK_WITH_NULL_SHA384 = {0x00,0xB9}; -define(TLS_RSA_PSK_WITH_NULL_SHA384, <<?BYTE(16#00), ?BYTE(16#B9)>>). +%%% ECDHE PSK Cipher Suites RFC 5489 + +%% TLS_ECDHE_PSK_WITH_RC4_128_SHA = {0xC0,0x33}; +-define(TLS_ECDHE_PSK_WITH_RC4_128_SHA, <<?BYTE(16#C0), ?BYTE(16#33)>>). + +%% TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = {0xC0,0x34}; +-define(TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, <<?BYTE(16#C0), ?BYTE(16#34)>>). + +%% TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = {0xC0,0x35}; +-define(TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, <<?BYTE(16#C0), ?BYTE(16#35)>>). + +%% TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = {0xC0,0x36}; +-define(TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, <<?BYTE(16#C0), ?BYTE(16#36)>>). + +%% TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = {0xC0,0x37}; +-define(TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, <<?BYTE(16#C0), ?BYTE(16#37)>>). + +%% TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = {0xC0,0x38}; +-define(TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, <<?BYTE(16#C0), ?BYTE(16#38)>>). + +%% TLS_ECDHE_PSK_WITH_NULL_SHA256 = {0xC0,0x3A}; +-define(TLS_ECDHE_PSK_WITH_NULL_SHA256, <<?BYTE(16#C0), ?BYTE(16#3A)>>). + +%% TLS_ECDHE_PSK_WITH_NULL_SHA384 = {0xC0,0x3B}; +-define(TLS_ECDHE_PSK_WITH_NULL_SHA384, <<?BYTE(16#C0), ?BYTE(16#3B)>>). + +%%% ECDHE_PSK with AES-GCM and AES-CCM Cipher Suites, draft-ietf-tls-ecdhe-psk-aead-05 + +%% TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = {0xTBD; 0xTBD} {0xD0,0x01}; +-define(TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256, <<?BYTE(16#D0), ?BYTE(16#01)>>). + +%% TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = {0xTBD; 0xTBD} {0xD0,0x02}; +-define(TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384, <<?BYTE(16#D0), ?BYTE(16#02)>>). + +%% TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = {0xTBD; 0xTBD} {0xD0,0x03}; +-define(TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256, <<?BYTE(16#D0), ?BYTE(16#03)>>). + +%% TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = {0xTBD; 0xTBD} {0xD0,0x05}; +-define(TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256, <<?BYTE(16#D0), ?BYTE(16#05)>>). + %%% SRP Cipher Suites RFC 5054 %% TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = { 0xC0,0x1A }; diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl index b031d3d47b..5d48325719 100644 --- a/lib/ssl/src/ssl_connection.erl +++ b/lib/ssl/src/ssl_connection.erl @@ -146,8 +146,8 @@ socket_control(Connection, Socket, Pid, Transport) -> -spec socket_control(tls_connection | dtls_connection, port(), pid(), atom(), pid()| undefined) -> {ok, #sslsocket{}} | {error, reason()}. %%-------------------------------------------------------------------- -socket_control(Connection, Socket, Pid, Transport, udp_listner) -> - %% dtls listner process must have the socket control +socket_control(Connection, Socket, Pid, Transport, udp_listener) -> + %% dtls listener process must have the socket control {ok, Connection:socket(Pid, Transport, Socket, Connection, undefined)}; socket_control(tls_connection = Connection, Socket, Pid, Transport, ListenTracker) -> @@ -517,7 +517,7 @@ certify(internal, #server_key_exchange{exchange_keys = Keys}, when Alg == dhe_dss; Alg == dhe_rsa; Alg == ecdhe_rsa; Alg == ecdhe_ecdsa; Alg == dh_anon; Alg == ecdh_anon; - Alg == psk; Alg == dhe_psk; Alg == rsa_psk; + Alg == psk; Alg == dhe_psk; Alg == ecdhe_psk; Alg == rsa_psk; Alg == srp_dss; Alg == srp_rsa; Alg == srp_anon -> Params = ssl_handshake:decode_server_key(Keys, Alg, ssl:tls_version(Version)), @@ -542,6 +542,15 @@ certify(internal, #server_key_exchange{exchange_keys = Keys}, end end; +certify(internal, #certificate_request{}, + #state{role = client, negotiated_version = Version, + key_algorithm = Alg} = State, _) + when Alg == dh_anon; Alg == ecdh_anon; + Alg == psk; Alg == dhe_psk; Alg == ecdhe_psk; Alg == rsa_psk; + Alg == srp_dss; Alg == srp_rsa; Alg == srp_anon -> + handle_own_alert(?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE), + Version, certify, State); + certify(internal, #certificate_request{} = CertRequest, #state{session = #session{own_certificate = Cert}, role = client, @@ -1399,6 +1408,16 @@ certify_client_key_exchange(#client_dhe_psk_identity{} = ClientKey, PremasterSecret = ssl_handshake:premaster_secret(ClientKey, ServerDhPrivateKey, Params, PSKLookup), calculate_master_secret(PremasterSecret, State0, Connection, certify, cipher); + +certify_client_key_exchange(#client_ecdhe_psk_identity{} = ClientKey, + #state{diffie_hellman_keys = ServerEcDhPrivateKey, + ssl_options = + #ssl_options{user_lookup_fun = PSKLookup}} = State, + Connection) -> + PremasterSecret = + ssl_handshake:premaster_secret(ClientKey, ServerEcDhPrivateKey, PSKLookup), + calculate_master_secret(PremasterSecret, State, Connection, certify, cipher); + certify_client_key_exchange(#client_rsa_psk_identity{} = ClientKey, #state{private_key = Key, ssl_options = @@ -1418,6 +1437,7 @@ certify_server(#state{key_algorithm = Algo} = State, _) when Algo == dh_anon; Algo == ecdh_anon; Algo == psk; Algo == dhe_psk; + Algo == ecdhe_psk; Algo == srp_anon -> State; @@ -1524,6 +1544,28 @@ key_exchange(#state{role = server, key_algorithm = dhe_psk, State = Connection:queue_handshake(Msg, State0), State#state{diffie_hellman_keys = DHKeys}; +key_exchange(#state{role = server, key_algorithm = ecdhe_psk, + ssl_options = #ssl_options{psk_identity = PskIdentityHint}, + hashsign_algorithm = HashSignAlgo, + private_key = PrivateKey, + session = #session{ecc = ECCCurve}, + connection_states = ConnectionStates0, + negotiated_version = Version + } = State0, Connection) -> + ECDHKeys = public_key:generate_key(ECCCurve), + #{security_parameters := SecParams} = + ssl_record:pending_connection_state(ConnectionStates0, read), + #security_parameters{client_random = ClientRandom, + server_random = ServerRandom} = SecParams, + Msg = ssl_handshake:key_exchange(server, ssl:tls_version(Version), + {ecdhe_psk, + PskIdentityHint, ECDHKeys, + HashSignAlgo, ClientRandom, + ServerRandom, + PrivateKey}), + State = Connection:queue_handshake(Msg, State0), + State#state{diffie_hellman_keys = ECDHKeys}; + key_exchange(#state{role = server, key_algorithm = rsa_psk, ssl_options = #ssl_options{psk_identity = undefined}} = State, _) -> State; @@ -1622,6 +1664,17 @@ key_exchange(#state{role = client, {dhe_psk, SslOpts#ssl_options.psk_identity, DhPubKey}), Connection:queue_handshake(Msg, State0); + +key_exchange(#state{role = client, + ssl_options = SslOpts, + key_algorithm = ecdhe_psk, + negotiated_version = Version, + diffie_hellman_keys = ECDHKeys} = State0, Connection) -> + Msg = ssl_handshake:key_exchange(client, ssl:tls_version(Version), + {ecdhe_psk, + SslOpts#ssl_options.psk_identity, ECDHKeys}), + Connection:queue_handshake(Msg, State0); + key_exchange(#state{role = client, ssl_options = SslOpts, key_algorithm = rsa_psk, @@ -1677,6 +1730,12 @@ rsa_psk_key_exchange(Version, PskIdentity, PremasterSecret, rsa_psk_key_exchange(_, _, _, _) -> throw (?ALERT_REC(?FATAL,?HANDSHAKE_FAILURE, pub_key_is_not_rsa)). +request_client_cert(#state{key_algorithm = Alg} = State, _) + when Alg == dh_anon; Alg == ecdh_anon; + Alg == psk; Alg == dhe_psk; Alg == ecdhe_psk; Alg == rsa_psk; + Alg == srp_dss; Alg == srp_rsa; Alg == srp_anon -> + State; + request_client_cert(#state{ssl_options = #ssl_options{verify = verify_peer, signature_algs = SupportedHashSigns}, connection_states = ConnectionStates0, @@ -1798,6 +1857,18 @@ calculate_secret(#server_dhe_psk_params{ calculate_master_secret(PremasterSecret, State#state{diffie_hellman_keys = Keys}, Connection, certify, certify); +calculate_secret(#server_ecdhe_psk_params{ + dh_params = #server_ecdh_params{curve = ECCurve}} = ServerKey, + #state{ssl_options = #ssl_options{user_lookup_fun = PSKLookup}} = + State=#state{session=Session}, Connection) -> + ECDHKeys = public_key:generate_key(ECCurve), + + PremasterSecret = ssl_handshake:premaster_secret(ServerKey, ECDHKeys, PSKLookup), + calculate_master_secret(PremasterSecret, + State#state{diffie_hellman_keys = ECDHKeys, + session = Session#session{ecc = ECCurve}}, + Connection, certify, certify); + calculate_secret(#server_srp_params{srp_n = Prime, srp_g = Generator} = ServerKey, #state{ssl_options = #ssl_options{srp_identity = SRPId}} = State, Connection) -> @@ -1882,6 +1953,7 @@ is_anonymous(Algo) when Algo == dh_anon; Algo == ecdh_anon; Algo == psk; Algo == dhe_psk; + Algo == ecdhe_psk; Algo == rsa_psk; Algo == srp_anon -> true; diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl index b1661624b5..fc4181a760 100644 --- a/lib/ssl/src/ssl_handshake.erl +++ b/lib/ssl/src/ssl_handshake.erl @@ -227,6 +227,7 @@ certificate_request(CipherSuite, CertDbHandle, CertDbRef, HashSigns, Version) -> {ecdh, #'ECPrivateKey'{}} | {psk, binary()} | {dhe_psk, binary(), binary()} | + {ecdhe_psk, binary(), #'ECPrivateKey'{}} | {srp, {binary(), binary()}, #srp_user{}, {HashAlgo::atom(), SignAlgo::atom()}, binary(), binary(), public_key:private_key()}) -> #client_key_exchange{} | #server_key_exchange{}. @@ -264,6 +265,13 @@ key_exchange(client, _Version, {dhe_psk, Identity, PublicKey}) -> dh_public = PublicKey} }; +key_exchange(client, _Version, {ecdhe_psk, Identity, #'ECPrivateKey'{publicKey = ECPublicKey}}) -> + #client_key_exchange{ + exchange_keys = #client_ecdhe_psk_identity{ + identity = Identity, + dh_public = ECPublicKey} + }; + key_exchange(client, _Version, {psk_premaster_secret, PskIdentity, Secret, {_, PublicKey, _}}) -> EncPremasterSecret = encrypted_premaster_secret(Secret, PublicKey), @@ -310,6 +318,16 @@ key_exchange(server, Version, {dhe_psk, PskIdentityHint, {PublicKey, _}, enc_server_key_exchange(Version, ServerEDHPSKParams, HashSign, ClientRandom, ServerRandom, PrivateKey); +key_exchange(server, Version, {ecdhe_psk, PskIdentityHint, + #'ECPrivateKey'{publicKey = ECPublicKey, + parameters = ECCurve}, + HashSign, ClientRandom, ServerRandom, PrivateKey}) -> + ServerECDHEPSKParams = #server_ecdhe_psk_params{ + hint = PskIdentityHint, + dh_params = #server_ecdh_params{curve = ECCurve, public = ECPublicKey}}, + enc_server_key_exchange(Version, ServerECDHEPSKParams, HashSign, + ClientRandom, ServerRandom, PrivateKey); + key_exchange(server, Version, {srp, {PublicKey, _}, #srp_user{generator = Generator, prime = Prime, salt = Salt}, @@ -532,14 +550,31 @@ premaster_secret(#server_dhe_psk_params{ LookupFun) -> PremasterSecret = premaster_secret(PublicDhKey, PrivateDhKey, Params), psk_secret(IdentityHint, LookupFun, PremasterSecret); + +premaster_secret(#server_ecdhe_psk_params{ + hint = IdentityHint, + dh_params = #server_ecdh_params{ + public = ECServerPubKey}}, + PrivateEcDhKey, + LookupFun) -> + PremasterSecret = premaster_secret(#'ECPoint'{point = ECServerPubKey}, PrivateEcDhKey), + psk_secret(IdentityHint, LookupFun, PremasterSecret); + premaster_secret({rsa_psk, PSKIdentity}, PSKLookup, RSAPremasterSecret) -> - psk_secret(PSKIdentity, PSKLookup, RSAPremasterSecret). + psk_secret(PSKIdentity, PSKLookup, RSAPremasterSecret); + +premaster_secret(#client_ecdhe_psk_identity{ + identity = PSKIdentity, + dh_public = PublicEcDhPoint}, PrivateEcDhKey, PSKLookup) -> + PremasterSecret = premaster_secret(#'ECPoint'{point = PublicEcDhPoint}, PrivateEcDhKey), + psk_secret(PSKIdentity, PSKLookup, PremasterSecret). premaster_secret(#client_dhe_psk_identity{ identity = PSKIdentity, dh_public = PublicDhKey}, PrivateKey, #'DHParameter'{} = Params, PSKLookup) -> PremasterSecret = premaster_secret(PublicDhKey, PrivateKey, Params), psk_secret(PSKIdentity, PSKLookup, PremasterSecret). + premaster_secret(#client_psk_identity{identity = PSKIdentity}, PSKLookup) -> psk_secret(PSKIdentity, PSKLookup); premaster_secret({psk, PSKIdentity}, PSKLookup) -> @@ -887,6 +922,7 @@ enc_server_key_exchange(Version, Params, {HashAlgo, SignAlgo}, | #client_ec_diffie_hellman_public{} | #client_psk_identity{} | #client_dhe_psk_identity{} + | #client_ecdhe_psk_identity{} | #client_rsa_psk_identity{} | #client_srp_public{}. %% @@ -1048,6 +1084,7 @@ dec_server_key(<<?UINT16(Len), PskIdentityHint:Len/binary, _/binary>> = KeyStruc params_bin = BinMsg, hashsign = HashSign, signature = Signature}; + dec_server_key(<<?UINT16(Len), IdentityHint:Len/binary, ?UINT16(PLen), P:PLen/binary, ?UINT16(GLen), G:GLen/binary, @@ -1062,6 +1099,22 @@ dec_server_key(<<?UINT16(Len), IdentityHint:Len/binary, params_bin = BinMsg, hashsign = HashSign, signature = Signature}; +dec_server_key(<<?UINT16(Len), IdentityHint:Len/binary, + ?BYTE(?NAMED_CURVE), ?UINT16(CurveID), + ?BYTE(PointLen), ECPoint:PointLen/binary, + _/binary>> = KeyStruct, + ?KEY_EXCHANGE_EC_DIFFIE_HELLMAN_PSK, Version) -> + DHParams = #server_ecdh_params{ + curve = {namedCurve, tls_v1:enum_to_oid(CurveID)}, + public = ECPoint}, + Params = #server_ecdhe_psk_params{ + hint = IdentityHint, + dh_params = DHParams}, + {BinMsg, HashSign, Signature} = dec_server_key_params(Len + 2 + PointLen + 4, KeyStruct, Version), + #server_key_params{params = Params, + params_bin = BinMsg, + hashsign = HashSign, + signature = Signature}; dec_server_key(<<?UINT16(NLen), N:NLen/binary, ?UINT16(GLen), G:GLen/binary, ?BYTE(SLen), S:SLen/binary, @@ -1132,7 +1185,8 @@ filter_hashsigns([Suite | Suites], [{KeyExchange,_,_,_} | Algos], HashSigns, Acc KeyExchange == ecdh_anon; KeyExchange == srp_anon; KeyExchange == psk; - KeyExchange == dhe_psk -> + KeyExchange == dhe_psk; + KeyExchange == ecdhe_psk -> %% In this case hashsigns is not used as the kexchange is anonaymous filter_hashsigns(Suites, Algos, HashSigns, [Suite| Acc]). @@ -1496,6 +1550,8 @@ advertises_ec_ciphers([{ecdhe_rsa, _,_,_} | _]) -> true; advertises_ec_ciphers([{ecdh_anon, _,_,_} | _]) -> true; +advertises_ec_ciphers([{ecdhe_psk, _,_,_} | _]) -> + true; advertises_ec_ciphers([_| Rest]) -> advertises_ec_ciphers(Rest). @@ -1790,6 +1846,18 @@ encode_server_key(#server_dhe_psk_params{ YLen = byte_size(Y), <<?UINT16(Len), PskIdentityHint/binary, ?UINT16(PLen), P/binary, ?UINT16(GLen), G/binary, ?UINT16(YLen), Y/binary>>; +encode_server_key(Params = #server_ecdhe_psk_params{hint = undefined}) -> + encode_server_key(Params#server_ecdhe_psk_params{hint = <<>>}); +encode_server_key(#server_ecdhe_psk_params{ + hint = PskIdentityHint, + dh_params = #server_ecdh_params{ + curve = {namedCurve, ECCurve}, public = ECPubKey}}) -> + %%TODO: support arbitrary keys + Len = byte_size(PskIdentityHint), + KLen = size(ECPubKey), + <<?UINT16(Len), PskIdentityHint/binary, + ?BYTE(?NAMED_CURVE), ?UINT16((tls_v1:oid_to_enum(ECCurve))), + ?BYTE(KLen), ECPubKey/binary>>; encode_server_key(#server_srp_params{srp_n = N, srp_g = G, srp_s = S, srp_b = B}) -> NLen = byte_size(N), GLen = byte_size(G), @@ -1822,6 +1890,12 @@ encode_client_key(#client_dhe_psk_identity{identity = Id, dh_public = DHPublic}, Len = byte_size(Id), DHLen = byte_size(DHPublic), <<?UINT16(Len), Id/binary, ?UINT16(DHLen), DHPublic/binary>>; +encode_client_key(Identity = #client_ecdhe_psk_identity{identity = undefined}, Version) -> + encode_client_key(Identity#client_ecdhe_psk_identity{identity = <<"psk_identity">>}, Version); +encode_client_key(#client_ecdhe_psk_identity{identity = Id, dh_public = DHPublic}, _) -> + Len = byte_size(Id), + DHLen = byte_size(DHPublic), + <<?UINT16(Len), Id/binary, ?BYTE(DHLen), DHPublic/binary>>; encode_client_key(Identity = #client_rsa_psk_identity{identity = undefined}, Version) -> encode_client_key(Identity#client_rsa_psk_identity{identity = <<"psk_identity">>}, Version); encode_client_key(#client_rsa_psk_identity{identity = Id, exchange_keys = ExchangeKeys}, Version) -> @@ -1873,6 +1947,10 @@ dec_client_key(<<?UINT16(Len), Id:Len/binary, ?UINT16(DH_YLen), DH_Y:DH_YLen/binary>>, ?KEY_EXCHANGE_DHE_PSK, _) -> #client_dhe_psk_identity{identity = Id, dh_public = DH_Y}; +dec_client_key(<<?UINT16(Len), Id:Len/binary, + ?BYTE(DH_YLen), DH_Y:DH_YLen/binary>>, + ?KEY_EXCHANGE_EC_DIFFIE_HELLMAN_PSK, _) -> + #client_ecdhe_psk_identity{identity = Id, dh_public = DH_Y}; dec_client_key(<<?UINT16(Len), Id:Len/binary, PKEPMS/binary>>, ?KEY_EXCHANGE_RSA_PSK, {3, 0}) -> #client_rsa_psk_identity{identity = Id, @@ -2050,6 +2128,8 @@ key_exchange_alg(psk) -> ?KEY_EXCHANGE_PSK; key_exchange_alg(dhe_psk) -> ?KEY_EXCHANGE_DHE_PSK; +key_exchange_alg(ecdhe_psk) -> + ?KEY_EXCHANGE_EC_DIFFIE_HELLMAN_PSK; key_exchange_alg(rsa_psk) -> ?KEY_EXCHANGE_RSA_PSK; key_exchange_alg(Alg) @@ -2308,6 +2388,7 @@ is_acceptable_hash_sign({_, ecdsa} = Algos, ecdsa, ecdsa, ecdhe_ecdsa, Supported is_acceptable_hash_sign(_, _, _, KeyExAlgo, _) when KeyExAlgo == psk; KeyExAlgo == dhe_psk; + KeyExAlgo == ecdhe_psk; KeyExAlgo == srp_anon; KeyExAlgo == dh_anon; KeyExAlgo == ecdhe_anon diff --git a/lib/ssl/src/ssl_handshake.hrl b/lib/ssl/src/ssl_handshake.hrl index 324b7dbde3..a191fcf766 100644 --- a/lib/ssl/src/ssl_handshake.hrl +++ b/lib/ssl/src/ssl_handshake.hrl @@ -133,6 +133,7 @@ -define(KEY_EXCHANGE_DIFFIE_HELLMAN, 1). -define(KEY_EXCHANGE_EC_DIFFIE_HELLMAN, 6). -define(KEY_EXCHANGE_PSK, 2). +-define(KEY_EXCHANGE_EC_DIFFIE_HELLMAN_PSK, 7). -define(KEY_EXCHANGE_DHE_PSK, 3). -define(KEY_EXCHANGE_RSA_PSK, 4). -define(KEY_EXCHANGE_SRP, 5). @@ -162,6 +163,11 @@ dh_params }). +-record(server_ecdhe_psk_params, { + hint, + dh_params + }). + -record(server_srp_params, { srp_n, %% opaque srp_N<1..2^16-1> srp_g, %% opaque srp_g<1..2^16-1> @@ -254,6 +260,11 @@ dh_public }). +-record(client_ecdhe_psk_identity, { + identity, + dh_public + }). + -record(client_rsa_psk_identity, { identity, exchange_keys diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl index aae2927575..a5f03a1f15 100644 --- a/lib/ssl/test/ssl_test_lib.erl +++ b/lib/ssl/test/ssl_test_lib.erl @@ -1482,10 +1482,14 @@ is_psk_anon_suite({psk, _,_}) -> true; is_psk_anon_suite({dhe_psk,_,_}) -> true; +is_psk_anon_suite({ecdhe_psk,_,_}) -> + true; is_psk_anon_suite({psk, _,_,_}) -> true; is_psk_anon_suite({dhe_psk, _,_,_}) -> true; +is_psk_anon_suite({ecdhe_psk, _,_,_}) -> + true; is_psk_anon_suite(_) -> false. diff --git a/lib/stdlib/doc/src/gen_server.xml b/lib/stdlib/doc/src/gen_server.xml index 7d137fc772..da74e793e6 100644 --- a/lib/stdlib/doc/src/gen_server.xml +++ b/lib/stdlib/doc/src/gen_server.xml @@ -60,6 +60,8 @@ gen_server:abcast -----> Module:handle_cast/2 - -----> Module:handle_info/2 +- -----> Module:handle_continue/2 + - -----> Module:terminate/2 - -----> Module:code_change/3</pre> @@ -88,6 +90,13 @@ gen_server:abcast -----> Module:handle_cast/2 implies at least two garbage collections (when hibernating and shortly after waking up) and is not something you want to do between each call to a busy server.</p> + + <p>If the <c>gen_server</c> process needs to perform an action + immediately after initialization or to break the execution of a + callback into multiple steps, it can return <c>{continue,Continue}</c> + in place of the time-out or hibernation value, which will immediately + invoke the <c>handle_continue/2</c> callback.</p> + </description> <funcs> @@ -610,12 +619,15 @@ gen_server:abcast -----> Module:handle_cast/2 <v>State = term()</v> <v>Result = {reply,Reply,NewState} | {reply,Reply,NewState,Timeout}</v> <v> | {reply,Reply,NewState,hibernate}</v> + <v> | {reply,Reply,NewState,{continue,Continue}}</v> <v> | {noreply,NewState} | {noreply,NewState,Timeout}</v> <v> | {noreply,NewState,hibernate}</v> + <v> | {noreply,NewState,{continue,Continue}}</v> <v> | {stop,Reason,Reply,NewState} | {stop,Reason,NewState}</v> <v> Reply = term()</v> <v> NewState = term()</v> <v> Timeout = int()>=0 | infinity</v> + <v> Continue = term()</v> <v> Reason = term()</v> </type> <desc> @@ -673,9 +685,11 @@ gen_server:abcast -----> Module:handle_cast/2 <v>State = term()</v> <v>Result = {noreply,NewState} | {noreply,NewState,Timeout}</v> <v> | {noreply,NewState,hibernate}</v> + <v> | {noreply,NewState,{continue,Continue}}</v> <v> | {stop,Reason,NewState}</v> <v> NewState = term()</v> <v> Timeout = int()>=0 | infinity</v> + <v> Continue = term()</v> <v> Reason = term()</v> </type> <desc> @@ -690,6 +704,41 @@ gen_server:abcast -----> Module:handle_cast/2 </func> <func> + <name>Module:handle_continue(Continue, State) -> Result</name> + <fsummary>Handle a continue instruction.</fsummary> + <type> + <v>Continue = term()</v> + <v>State = term()</v> + <v>Result = {noreply,NewState} | {noreply,NewState,Timeout}</v> + <v> | {noreply,NewState,hibernate}</v> + <v> | {noreply,NewState,{continue,Continue}}</v> + <v> | {stop,Reason,NewState}</v> + <v> NewState = term()</v> + <v> Timeout = int()>=0 | infinity</v> + <v> Continue = term()</v> + <v> Reason = normal | term()</v> + </type> + <desc> + <note> + <p>This callback is optional, so callback modules need to + export it only if they return <c>{continue,Continue}</c> + from another callback. If continue is used and the callback + is not implemented, the process will exit with <c>undef</c> + error.</p> + </note> + <p>This function is called by a <c>gen_server</c> process whenever + a previous callback returns <c>{continue, Continue}</c>. + <c>handle_continue/2</c> is invoked immediately after the previous + callback, which makes it useful for performing work after + initialization or for splitting the work in a callback in + multiple steps, updating the process state along the way.</p> + <p>For a description of the other arguments and possible return values, + see <seealso marker="#Module:handle_call/3"> + <c>Module:handle_call/3</c></seealso>.</p> + </desc> + </func> + + <func> <name>Module:handle_info(Info, State) -> Result</name> <fsummary>Handle an incoming message.</fsummary> <type> @@ -697,6 +746,7 @@ gen_server:abcast -----> Module:handle_cast/2 <v>State = term()</v> <v>Result = {noreply,NewState} | {noreply,NewState,Timeout}</v> <v> | {noreply,NewState,hibernate}</v> + <v> | {noreply,NewState,{continue,Continue}}</v> <v> | {stop,Reason,NewState}</v> <v> NewState = term()</v> <v> Timeout = int()>=0 | infinity</v> @@ -726,7 +776,7 @@ gen_server:abcast -----> Module:handle_cast/2 <type> <v>Args = term()</v> <v>Result = {ok,State} | {ok,State,Timeout} | {ok,State,hibernate}</v> - <v> | {stop,Reason} | ignore</v> + <v> | {ok,State,{continue,Continue}} | {stop,Reason} | ignore</v> <v> State = term()</v> <v> Timeout = int()>=0 | infinity</v> <v> Reason = term()</v> diff --git a/lib/stdlib/doc/src/gen_statem.xml b/lib/stdlib/doc/src/gen_statem.xml index a7caa71dcb..8de6ed754f 100644 --- a/lib/stdlib/doc/src/gen_statem.xml +++ b/lib/stdlib/doc/src/gen_statem.xml @@ -1851,7 +1851,7 @@ handle_event(_, _, State, Data) -> </p> <note> <p> - Note that if the <c>gen_statem</c> is started trough + Note that if the <c>gen_statem</c> is started through <seealso marker="proc_lib"><c>proc_lib</c></seealso> and <seealso marker="#enter_loop/4"><c>enter_loop/4-6</c></seealso>, diff --git a/lib/stdlib/doc/src/notes.xml b/lib/stdlib/doc/src/notes.xml index bdd5b39cd3..604d758db3 100644 --- a/lib/stdlib/doc/src/notes.xml +++ b/lib/stdlib/doc/src/notes.xml @@ -432,7 +432,7 @@ marker="erts:erl"><c>erl</c></seealso> command.</p> <p> See <url - href="http://pcre.org/original/changelog.txt"><c>http://pcre.org/original/changelog.txt</c></url> + href="http://pcre.org/original/changelog.txt">http://pcre.org/original/changelog.txt</url> for information about changes made to PCRE between the versions 8.33 and 8.40.</p> <p> diff --git a/lib/stdlib/src/erl_pp.erl b/lib/stdlib/src/erl_pp.erl index ee5e7a11bf..b0064aadb8 100644 --- a/lib/stdlib/src/erl_pp.erl +++ b/lib/stdlib/src/erl_pp.erl @@ -598,8 +598,6 @@ lexpr({'fun',_,{clauses,Cs},Extra}, _Prec, Opts) -> lexpr({named_fun,_,Name,Cs,Extra}, _Prec, Opts) -> {force_nl,fun_info(Extra), {list,[{first,['fun', " "],fun_clauses(Cs, Opts, {named, Name})},'end']}}; -lexpr({'query',_,Lc}, _Prec, Opts) -> - {list,[{step,leaf("query"),lexpr(Lc, 0, Opts)},'end']}; lexpr({call,_,{remote,_,{atom,_,M},{atom,_,F}=N}=Name,Args}, Prec, Opts) -> case erl_internal:bif(M, F, length(Args)) of true -> diff --git a/lib/stdlib/src/gen_server.erl b/lib/stdlib/src/gen_server.erl index 7daa7a9fe4..ac172325b5 100644 --- a/lib/stdlib/src/gen_server.erl +++ b/lib/stdlib/src/gen_server.erl @@ -116,23 +116,27 @@ %%%========================================================================= -callback init(Args :: term()) -> - {ok, State :: term()} | {ok, State :: term(), timeout() | hibernate} | + {ok, State :: term()} | {ok, State :: term(), timeout() | hibernate | {continue, term()}} | {stop, Reason :: term()} | ignore. -callback handle_call(Request :: term(), From :: {pid(), Tag :: term()}, State :: term()) -> {reply, Reply :: term(), NewState :: term()} | - {reply, Reply :: term(), NewState :: term(), timeout() | hibernate} | + {reply, Reply :: term(), NewState :: term(), timeout() | hibernate | {continue, term()}} | {noreply, NewState :: term()} | - {noreply, NewState :: term(), timeout() | hibernate} | + {noreply, NewState :: term(), timeout() | hibernate | {continue, term()}} | {stop, Reason :: term(), Reply :: term(), NewState :: term()} | {stop, Reason :: term(), NewState :: term()}. -callback handle_cast(Request :: term(), State :: term()) -> {noreply, NewState :: term()} | - {noreply, NewState :: term(), timeout() | hibernate} | + {noreply, NewState :: term(), timeout() | hibernate | {continue, term()}} | {stop, Reason :: term(), NewState :: term()}. -callback handle_info(Info :: timeout | term(), State :: term()) -> {noreply, NewState :: term()} | - {noreply, NewState :: term(), timeout() | hibernate} | + {noreply, NewState :: term(), timeout() | hibernate | {continue, term()}} | + {stop, Reason :: term(), NewState :: term()}. +-callback handle_continue(Info :: term(), State :: term()) -> + {noreply, NewState :: term()} | + {noreply, NewState :: term(), timeout() | hibernate | {continue, term()}} | {stop, Reason :: term(), NewState :: term()}. -callback terminate(Reason :: (normal | shutdown | {shutdown, term()} | term()), @@ -149,7 +153,7 @@ Status :: term(). -optional_callbacks( - [handle_info/2, terminate/2, code_change/3, format_status/2]). + [handle_info/2, handle_continue/2, terminate/2, code_change/3, format_status/2]). %%% ----------------------------------------------------------------- %%% Starts a generic server. @@ -309,7 +313,7 @@ enter_loop(Mod, Options, State, ServerName, Timeout) -> Name = gen:get_proc_name(ServerName), Parent = gen:get_parent(), Debug = gen:debug_options(Name, Options), - HibernateAfterTimeout = gen:hibernate_after(Options), + HibernateAfterTimeout = gen:hibernate_after(Options), loop(Parent, Name, State, Mod, Timeout, HibernateAfterTimeout, Debug). %%%======================================================================== @@ -374,6 +378,19 @@ init_it(Mod, Args) -> %%% --------------------------------------------------- %%% The MAIN loop. %%% --------------------------------------------------- + +loop(Parent, Name, State, Mod, {continue, Continue} = Msg, HibernateAfterTimeout, Debug) -> + Reply = try_dispatch(Mod, handle_continue, Continue, State), + case Debug of + [] -> + handle_common_reply(Reply, Parent, Name, undefined, Msg, Mod, + HibernateAfterTimeout, State); + _ -> + Debug1 = sys:handle_debug(Debug, fun print_event/3, Name, Msg), + handle_common_reply(Reply, Parent, Name, undefined, Msg, Mod, + HibernateAfterTimeout, State, Debug1) + end; + loop(Parent, Name, State, Mod, hibernate, HibernateAfterTimeout, Debug) -> proc_lib:hibernate(?MODULE,wake_hib,[Parent, Name, State, Mod, HibernateAfterTimeout, Debug]); diff --git a/lib/stdlib/src/stdlib.appup.src b/lib/stdlib/src/stdlib.appup.src index 3100504a80..e4e3fb83e9 100644 --- a/lib/stdlib/src/stdlib.appup.src +++ b/lib/stdlib/src/stdlib.appup.src @@ -18,7 +18,7 @@ %% %CopyrightEnd% {"%VSN%", %% Up from - max one major revision back - [{<<"3\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-19.* + [{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.* %% Down to - max one major revision back - [{<<"3\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-19.* + [{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.* }. diff --git a/lib/stdlib/test/erl_internal_SUITE.erl b/lib/stdlib/test/erl_internal_SUITE.erl index 789a9d4363..7d9df1f989 100644 --- a/lib/stdlib/test/erl_internal_SUITE.erl +++ b/lib/stdlib/test/erl_internal_SUITE.erl @@ -80,7 +80,7 @@ callbacks(application) -> callbacks(gen_server) -> [{init,1}, {handle_call,3}, {handle_cast,2}, {handle_info,2}, {terminate,2}, {code_change,3}, - {format_status,2}]; + {format_status,2}, {handle_continue, 2}]; callbacks(gen_fsm) -> [{init,1}, {handle_event,3}, {handle_sync_event,4}, {handle_info,3}, {terminate,3}, {code_change,4}, @@ -101,7 +101,7 @@ callbacks(supervisor) -> optional_callbacks(application) -> []; optional_callbacks(gen_server) -> - [{handle_info, 2}, {terminate, 2}, {code_change, 3}, {format_status, 2}]; + [{handle_info, 2}, {handle_continue, 2}, {terminate, 2}, {code_change, 3}, {format_status, 2}]; optional_callbacks(gen_fsm) -> [{handle_info, 3}, {terminate, 3}, {code_change, 4}, {format_status, 2}]; optional_callbacks(gen_event) -> diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl index 05451a83fb..5a5e282998 100644 --- a/lib/stdlib/test/ets_SUITE.erl +++ b/lib/stdlib/test/ets_SUITE.erl @@ -2283,13 +2283,8 @@ write_concurrency(Config) when is_list(Config) -> NoHashMem = ets:info(No7,memory), NoHashMem = ets:info(No8,memory), - case erlang:system_info(smp_support) of - true -> - true = YesMem > NoHashMem, - true = YesMem > NoTreeMem; - false -> - true = YesMem =:= NoHashMem - end, + true = YesMem > NoHashMem, + true = YesMem > NoTreeMem, {'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency,foo}])), {'EXIT',{badarg,_}} = (catch ets_new(foo,[public,{write_concurrency}])), @@ -5912,16 +5907,11 @@ add_lists([E1|T1], [E2|T2], Acc) -> run_smp_workers(InitF,ExecF,FiniF,Laps) -> run_smp_workers(InitF,ExecF,FiniF,Laps, 0). run_smp_workers(InitF,ExecF,FiniF,Laps, Exclude) -> - case erlang:system_info(smp_support) of - true -> - case erlang:system_info(schedulers_online) of - N when N > Exclude -> - run_workers_do(InitF,ExecF,FiniF,Laps, N - Exclude); - _ -> - {skipped, "Too few schedulers online"} - end; - false -> - {skipped,"No smp support"} + case erlang:system_info(schedulers_online) of + N when N > Exclude -> + run_workers_do(InitF,ExecF,FiniF,Laps, N - Exclude); + _ -> + {skipped, "Too few schedulers online"} end. run_sched_workers(InitF,ExecF,FiniF,Laps) -> @@ -6231,11 +6221,9 @@ spawn_monitor_with_pid(Pid, Fun, N) -> only_if_smp(Func) -> only_if_smp(2, Func). only_if_smp(Schedulers, Func) -> - case {erlang:system_info(smp_support), - erlang:system_info(schedulers_online)} of - {false,_} -> {skip,"No smp support"}; - {true,N} when N < Schedulers -> {skip,"Too few schedulers online"}; - {true,_} -> Func() + case erlang:system_info(schedulers_online) of + N when N < Schedulers -> {skip,"Too few schedulers online"}; + _ -> Func() end. %% Copy-paste from emulator/test/binary_SUITE.erl diff --git a/lib/stdlib/test/gen_server_SUITE.erl b/lib/stdlib/test/gen_server_SUITE.erl index 2e9dc4d4fb..2bc220fef2 100644 --- a/lib/stdlib/test/gen_server_SUITE.erl +++ b/lib/stdlib/test/gen_server_SUITE.erl @@ -27,7 +27,7 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2]). -export([start/1, crash/1, call/1, cast/1, cast_fast/1, - info/1, abcast/1, multicall/1, multicall_down/1, + continue/1, info/1, abcast/1, multicall/1, multicall_down/1, call_remote1/1, call_remote2/1, call_remote3/1, call_remote_n1/1, call_remote_n2/1, call_remote_n3/1, spec_init/1, spec_init_local_registered_parent/1, @@ -37,7 +37,8 @@ get_state/1, replace_state/1, call_with_huge_message_queue/1, undef_handle_call/1, undef_handle_cast/1, undef_handle_info/1, undef_init/1, undef_code_change/1, undef_terminate1/1, - undef_terminate2/1, undef_in_terminate/1, undef_in_handle_info/1 + undef_terminate2/1, undef_in_terminate/1, undef_in_handle_info/1, + undef_handle_continue/1 ]). -export([stop1/1, stop2/1, stop3/1, stop4/1, stop5/1, stop6/1, stop7/1, @@ -52,7 +53,7 @@ %% The gen_server behaviour --export([init/1, handle_call/3, handle_cast/2, +-export([init/1, handle_call/3, handle_cast/2, handle_continue/2, handle_info/2, code_change/3, terminate/2, format_status/2]). suite() -> @@ -61,7 +62,7 @@ suite() -> all() -> [start, {group,stop}, crash, call, cast, cast_fast, info, abcast, - multicall, multicall_down, call_remote1, call_remote2, + continue, multicall, multicall_down, call_remote1, call_remote2, call_remote3, call_remote_n1, call_remote_n2, call_remote_n3, spec_init, spec_init_local_registered_parent, @@ -76,7 +77,7 @@ groups() -> [{stop, [], [stop1, stop2, stop3, stop4, stop5, stop6, stop7, stop8, stop9, stop10]}, {undef_callbacks, [], - [undef_handle_call, undef_handle_cast, undef_handle_info, + [undef_handle_call, undef_handle_cast, undef_handle_info, undef_handle_continue, undef_init, undef_code_change, undef_terminate1, undef_terminate2]}]. @@ -458,6 +459,47 @@ call(Config) when is_list(Config) -> ok. %% -------------------------------------- +%% Test handle_continue. +%% -------------------------------------- + +continue(Config) when is_list(Config) -> + {ok, Pid} = gen_server:start_link(gen_server_SUITE, {continue, self()}, []), + [{Pid, continue}, {Pid, after_continue}] = read_replies(Pid), + + gen_server:call(Pid, {continue_reply, self()}), + [{Pid, continue}, {Pid, after_continue}] = read_replies(Pid), + + gen_server:call(Pid, {continue_noreply, self()}), + [{Pid, continue}, {Pid, after_continue}] = read_replies(Pid), + + gen_server:cast(Pid, {continue_noreply, self()}), + [{Pid, continue}, {Pid, after_continue}] = read_replies(Pid), + + Pid ! {continue_noreply, self()}, + [{Pid, continue}, {Pid, after_continue}] = read_replies(Pid), + + Pid ! {continue_continue, self()}, + [{Pid, before_continue}, {Pid, continue}, {Pid, after_continue}] = read_replies(Pid), + + Ref = monitor(process, Pid), + Pid ! continue_stop, + verify_down_reason(Ref, Pid, normal). + +read_replies(Pid) -> + receive + {Pid, ack} -> read_replies() + after + 1000 -> ct:fail({continue, ack}) + end. + +read_replies() -> + receive + Msg -> [Msg | read_replies()] + after + 0 -> [] + end. + +%% -------------------------------------- %% Test call to nonexisting processes on remote nodes %% -------------------------------------- @@ -1346,7 +1388,7 @@ echo_loop() -> %% Test the default implementation of terminate if the callback module %% does not export it undef_terminate1(Config) when is_list(Config) -> - {ok, Server} = gen_server:start(oc_server, [], []), + {ok, Server} = oc_server:start(), MRef = monitor(process, Server), ok = gen_server:stop(Server), ok = verify_down_reason(MRef, Server, normal). @@ -1354,7 +1396,7 @@ undef_terminate1(Config) when is_list(Config) -> %% Test the default implementation of terminate if the callback module %% does not export it undef_terminate2(Config) when is_list(Config) -> - {ok, Server} = gen_server:start(oc_server, [], []), + {ok, Server} = oc_server:start(), MRef = monitor(process, Server), ok = gen_server:stop(Server, {error, test}, infinity), ok = verify_down_reason(MRef, Server, {error, test}). @@ -1377,7 +1419,7 @@ undef_init(_Config) -> %% The upgrade should fail if code_change is expected in the callback module %% but not exported, but the server should continue with the old code undef_code_change(Config) when is_list(Config) -> - {ok, Server} = gen_server:start(oc_server, [], []), + {ok, Server} = oc_server:start(), {error, {'EXIT', {undef, [{oc_server, code_change, [_, _, _], _}|_]}}} = fake_upgrade(Server, ?MODULE), true = is_process_alive(Server). @@ -1385,7 +1427,7 @@ undef_code_change(Config) when is_list(Config) -> %% The server should crash if the handle_call callback is %% not exported in the callback module undef_handle_call(_Config) -> - {ok, Server} = gen_server:start(oc_server, [], []), + {ok, Server} = oc_server:start(), try gen_server:call(Server, call_msg), ct:fail(should_crash) @@ -1397,17 +1439,25 @@ undef_handle_call(_Config) -> %% The server should crash if the handle_cast callback is %% not exported in the callback module undef_handle_cast(_Config) -> - {ok, Server} = gen_server:start(oc_server, [], []), + {ok, Server} = oc_server:start(), MRef = monitor(process, Server), gen_server:cast(Server, cast_msg), verify_undef_down(MRef, Server, oc_server, handle_cast), ok. +%% The server should crash if the handle_continue callback is +%% not exported in the callback module +undef_handle_continue(_Config) -> + {ok, Server} = oc_server:start(continue), + MRef = monitor(process, Server), + verify_undef_down(MRef, Server, oc_server, handle_continue), + ok. + %% The server should log but not crash if the handle_info callback is %% calling an undefined function undef_handle_info(Config) when is_list(Config) -> error_logger_forwarder:register(), - {ok, Server} = gen_server:start(oc_server, [], []), + {ok, Server} = oc_server:start(), Server ! hej, wait_until_processed(Server, hej, 10), true = is_process_alive(Server), @@ -1570,8 +1620,11 @@ init(hibernate) -> init(sleep) -> ct:sleep(1000), {ok, []}; +init({continue, Pid}) -> + self() ! {after_continue, Pid}, + {ok, [], {continue, {message, Pid}}}; init({state,State}) -> - {ok, State}. + {ok,State}. handle_call(started_p, _From, State) -> io:format("FROZ"), @@ -1604,6 +1657,12 @@ handle_call(shutdown_reason, _From, _State) -> handle_call({call_undef_fun, Mod, Fun}, _From, State) -> Mod:Fun(), {reply, ok, State}; +handle_call({continue_reply, Pid}, _From, State) -> + self() ! {after_continue, Pid}, + {reply, ok, State, {continue, {message, Pid}}}; +handle_call({continue_noreply, Pid}, From, State) -> + self() ! {after_continue, Pid}, + {noreply, State, {continue, {message, Pid, From}}}; handle_call(stop_shutdown_reason, _From, State) -> {stop,{shutdown,stop_reason},State}. @@ -1620,6 +1679,9 @@ handle_cast(hibernate_later, _State) -> handle_cast({call_undef_fun, Mod, Fun}, State) -> Mod:Fun(), {noreply, State}; +handle_cast({continue_noreply, Pid}, State) -> + self() ! {after_continue, Pid}, + {noreply, State, {continue, {message, Pid}}}; handle_cast({From, stop}, State) -> io:format("BAZ"), {stop, {From,stopped}, State}. @@ -1657,9 +1719,34 @@ handle_info(continue, From) -> {noreply, []}; handle_info({From, stop}, State) -> {stop, {From,stopped_info}, State}; +handle_info({after_continue, Pid}, State) -> + Pid ! {self(), after_continue}, + Pid ! {self(), ack}, + {noreply, State}; +handle_info({continue_noreply, Pid}, State) -> + self() ! {after_continue, Pid}, + {noreply, State, {continue, {message, Pid}}}; +handle_info({continue_continue, Pid}, State) -> + {noreply, State, {continue, {continue, Pid}}}; +handle_info(continue_stop, State) -> + {noreply, State, {continue, stop}}; handle_info(_Info, State) -> {noreply, State}. +handle_continue({continue, Pid}, State) -> + Pid ! {self(), before_continue}, + self() ! {after_continue, Pid}, + {noreply, State, {continue, {message, Pid}}}; +handle_continue(stop, State) -> + {stop, normal, State}; +handle_continue({message, Pid}, State) -> + Pid ! {self(), continue}, + {noreply, State}; +handle_continue({message, Pid, From}, State) -> + Pid ! {self(), continue}, + gen_server:reply(From, ok), + {noreply, State}. + code_change(_OldVsn, {new, {undef_in_code_change, {Mod, Fun}}} = State, _Extra) -> diff --git a/lib/stdlib/test/gen_server_SUITE_data/oc_server.erl b/lib/stdlib/test/gen_server_SUITE_data/oc_server.erl index 4ba37987f3..7b92a49bf6 100644 --- a/lib/stdlib/test/gen_server_SUITE_data/oc_server.erl +++ b/lib/stdlib/test/gen_server_SUITE_data/oc_server.erl @@ -22,7 +22,7 @@ -behaviour(gen_server). %% API --export([start/0]). +-export([start/0, start/1]). %% gen_server callbacks -export([init/1]). @@ -30,8 +30,12 @@ -record(state, {}). start() -> - gen_server:start({local, ?MODULE}, ?MODULE, [], []). + gen_server:start(?MODULE, ok, []). -init([]) -> - {ok, #state{}}. +start(continue) -> + gen_server:start(?MODULE, continue, []). +init(ok) -> + {ok, #state{}}; +init(continue) -> + {ok, #state{}, {continue, continue}}. diff --git a/lib/stdlib/test/unicode_util_SUITE_data/GraphemeBreakTest.txt b/lib/stdlib/test/unicode_util_SUITE_data/GraphemeBreakTest.txt index 4bb4b1369b..d7d8f90de0 100644 --- a/lib/stdlib/test/unicode_util_SUITE_data/GraphemeBreakTest.txt +++ b/lib/stdlib/test/unicode_util_SUITE_data/GraphemeBreakTest.txt @@ -1,23 +1,24 @@ -# GraphemeBreakTest-9.0.0.txt -# Date: 2016-06-02, 18:28:17 GMT -# © 2016 Unicode®, Inc. +# GraphemeBreakTest-10.0.0.txt +# Date: 2017-04-14, 05:40:29 GMT +# © 2017 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html # # Unicode Character Database # For documentation, see http://www.unicode.org/reports/tr44/ # -# Default Grapheme Break Test +# Default Grapheme_Cluster_Break Test # # Format: -# <string> (# <comment>)? -# <string> contains hex Unicode code points, with -# ÷ wherever there is a break opportunity, and +# <string> (# <comment>)? +# <string> contains hex Unicode code points, with +# ÷ wherever there is a break opportunity, and # × wherever there is not. # <comment> the format can change, but currently it shows: # - the sample character name # - (x) the Grapheme_Cluster_Break property value for the sample character -# - [x] the rule that determines whether there is a break or not +# - [x] the rule that determines whether there is a break or not, +# as listed in the Rules section of GraphemeBreakTest.html # # These samples may be extended or changed in the future. # @@ -53,8 +54,8 @@ ÷ 0020 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 0020 × 200D ÷ # ÷ [0.2] SPACE (Other) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 0020 × 0308 × 200D ÷ # ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 0020 ÷ 2764 ÷ # ÷ [0.2] SPACE (Other) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 0020 × 0308 ÷ 2764 ÷ # ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 0020 ÷ 2640 ÷ # ÷ [0.2] SPACE (Other) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 0020 × 0308 ÷ 2640 ÷ # ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 0020 ÷ 1F466 ÷ # ÷ [0.2] SPACE (Other) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0020 × 0308 ÷ 1F466 ÷ # ÷ [0.2] SPACE (Other) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0020 ÷ 0378 ÷ # ÷ [0.2] SPACE (Other) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -93,8 +94,8 @@ ÷ 000D ÷ 0308 ÷ 1F3FB ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 000D ÷ 200D ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 000D ÷ 0308 × 200D ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 000D ÷ 2764 ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 000D ÷ 0308 ÷ 2764 ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 000D ÷ 2640 ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 000D ÷ 0308 ÷ 2640 ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 000D ÷ 1F466 ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] BOY (EBG) ÷ [0.3] ÷ 000D ÷ 0308 ÷ 1F466 ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 000D ÷ 0378 ÷ # ÷ [0.2] <CARRIAGE RETURN (CR)> (CR) ÷ [4.0] <reserved-0378> (Other) ÷ [0.3] @@ -133,8 +134,8 @@ ÷ 000A ÷ 0308 ÷ 1F3FB ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 000A ÷ 200D ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 000A ÷ 0308 × 200D ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 000A ÷ 2764 ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 000A ÷ 0308 ÷ 2764 ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 000A ÷ 2640 ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 000A ÷ 0308 ÷ 2640 ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 000A ÷ 1F466 ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] BOY (EBG) ÷ [0.3] ÷ 000A ÷ 0308 ÷ 1F466 ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 000A ÷ 0378 ÷ # ÷ [0.2] <LINE FEED (LF)> (LF) ÷ [4.0] <reserved-0378> (Other) ÷ [0.3] @@ -173,8 +174,8 @@ ÷ 0001 ÷ 0308 ÷ 1F3FB ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 0001 ÷ 200D ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 0001 ÷ 0308 × 200D ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 0001 ÷ 2764 ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 0001 ÷ 0308 ÷ 2764 ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 0001 ÷ 2640 ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 0001 ÷ 0308 ÷ 2640 ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 0001 ÷ 1F466 ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] BOY (EBG) ÷ [0.3] ÷ 0001 ÷ 0308 ÷ 1F466 ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0001 ÷ 0378 ÷ # ÷ [0.2] <START OF HEADING> (Control) ÷ [4.0] <reserved-0378> (Other) ÷ [0.3] @@ -213,8 +214,8 @@ ÷ 0300 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 0300 × 200D ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 0300 × 0308 × 200D ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 0300 ÷ 2764 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 0300 × 0308 ÷ 2764 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 0300 ÷ 2640 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 0300 × 0308 ÷ 2640 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 0300 ÷ 1F466 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0300 × 0308 ÷ 1F466 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0300 ÷ 0378 ÷ # ÷ [0.2] COMBINING GRAVE ACCENT (Extend) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -253,8 +254,8 @@ ÷ 0600 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 0600 × 200D ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 0600 × 0308 × 200D ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 0600 × 2764 ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 0600 × 0308 ÷ 2764 ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 0600 × 2640 ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 0600 × 0308 ÷ 2640 ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 0600 × 1F466 ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] BOY (EBG) ÷ [0.3] ÷ 0600 × 0308 ÷ 1F466 ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0600 × 0378 ÷ # ÷ [0.2] ARABIC NUMBER SIGN (Prepend) × [9.2] <reserved-0378> (Other) ÷ [0.3] @@ -293,8 +294,8 @@ ÷ 0903 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 0903 × 200D ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 0903 × 0308 × 200D ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 0903 ÷ 2764 ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 0903 × 0308 ÷ 2764 ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 0903 ÷ 2640 ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 0903 × 0308 ÷ 2640 ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 0903 ÷ 1F466 ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0903 × 0308 ÷ 1F466 ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0903 ÷ 0378 ÷ # ÷ [0.2] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -333,8 +334,8 @@ ÷ 1100 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 1100 × 200D ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 1100 × 0308 × 200D ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 1100 ÷ 2764 ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 1100 × 0308 ÷ 2764 ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 1100 ÷ 2640 ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 1100 × 0308 ÷ 2640 ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 1100 ÷ 1F466 ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1100 × 0308 ÷ 1F466 ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1100 ÷ 0378 ÷ # ÷ [0.2] HANGUL CHOSEONG KIYEOK (L) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -373,8 +374,8 @@ ÷ 1160 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 1160 × 200D ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 1160 × 0308 × 200D ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 1160 ÷ 2764 ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 1160 × 0308 ÷ 2764 ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 1160 ÷ 2640 ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 1160 × 0308 ÷ 2640 ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 1160 ÷ 1F466 ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1160 × 0308 ÷ 1F466 ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1160 ÷ 0378 ÷ # ÷ [0.2] HANGUL JUNGSEONG FILLER (V) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -413,8 +414,8 @@ ÷ 11A8 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 11A8 × 200D ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 11A8 × 0308 × 200D ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 11A8 ÷ 2764 ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 11A8 × 0308 ÷ 2764 ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 11A8 ÷ 2640 ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 11A8 × 0308 ÷ 2640 ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 11A8 ÷ 1F466 ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 11A8 × 0308 ÷ 1F466 ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 11A8 ÷ 0378 ÷ # ÷ [0.2] HANGUL JONGSEONG KIYEOK (T) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -453,8 +454,8 @@ ÷ AC00 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ AC00 × 200D ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ AC00 × 0308 × 200D ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ AC00 ÷ 2764 ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ AC00 × 0308 ÷ 2764 ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ AC00 ÷ 2640 ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ AC00 × 0308 ÷ 2640 ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ AC00 ÷ 1F466 ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ AC00 × 0308 ÷ 1F466 ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ AC00 ÷ 0378 ÷ # ÷ [0.2] HANGUL SYLLABLE GA (LV) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -493,8 +494,8 @@ ÷ AC01 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ AC01 × 200D ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ AC01 × 0308 × 200D ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ AC01 ÷ 2764 ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ AC01 × 0308 ÷ 2764 ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ AC01 ÷ 2640 ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ AC01 × 0308 ÷ 2640 ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ AC01 ÷ 1F466 ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ AC01 × 0308 ÷ 1F466 ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ AC01 ÷ 0378 ÷ # ÷ [0.2] HANGUL SYLLABLE GAG (LVT) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -533,8 +534,8 @@ ÷ 1F1E6 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 1F1E6 × 200D ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 1F1E6 × 0308 × 200D ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 1F1E6 ÷ 2764 ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 1F1E6 × 0308 ÷ 2764 ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 1F1E6 ÷ 2640 ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 1F1E6 × 0308 ÷ 2640 ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 1F1E6 ÷ 1F466 ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1F1E6 × 0308 ÷ 1F466 ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1F1E6 ÷ 0378 ÷ # ÷ [0.2] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -573,8 +574,8 @@ ÷ 261D × 0308 × 1F3FB ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) × [9.0] COMBINING DIAERESIS (Extend) × [10.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 261D × 200D ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 261D × 0308 × 200D ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 261D ÷ 2764 ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 261D × 0308 ÷ 2764 ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 261D ÷ 2640 ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 261D × 0308 ÷ 2640 ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 261D ÷ 1F466 ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 261D × 0308 ÷ 1F466 ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 261D ÷ 0378 ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -613,8 +614,8 @@ ÷ 1F3FB × 0308 ÷ 1F3FB ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 1F3FB × 200D ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 1F3FB × 0308 × 200D ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 1F3FB ÷ 2764 ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 1F3FB × 0308 ÷ 2764 ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 1F3FB ÷ 2640 ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 1F3FB × 0308 ÷ 2640 ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 1F3FB ÷ 1F466 ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1F3FB × 0308 ÷ 1F466 ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1F3FB ÷ 0378 ÷ # ÷ [0.2] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -653,54 +654,54 @@ ÷ 200D × 0308 ÷ 1F3FB ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 200D × 200D ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 200D × 0308 × 200D ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 200D × 2764 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [11.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 200D × 0308 ÷ 2764 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 200D × 2640 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [11.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 200D × 0308 ÷ 2640 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 200D × 1F466 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [11.0] BOY (EBG) ÷ [0.3] ÷ 200D × 0308 ÷ 1F466 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 200D ÷ 0378 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] ÷ 200D × 0308 ÷ 0378 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] ÷ 200D ÷ D800 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) ÷ [5.0] <surrogate-D800> (Control) ÷ [0.3] ÷ 200D × 0308 ÷ D800 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <surrogate-D800> (Control) ÷ [0.3] -÷ 2764 ÷ 0020 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] SPACE (Other) ÷ [0.3] -÷ 2764 × 0308 ÷ 0020 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] SPACE (Other) ÷ [0.3] -÷ 2764 ÷ 000D ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [5.0] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3] -÷ 2764 × 0308 ÷ 000D ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3] -÷ 2764 ÷ 000A ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [5.0] <LINE FEED (LF)> (LF) ÷ [0.3] -÷ 2764 × 0308 ÷ 000A ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <LINE FEED (LF)> (LF) ÷ [0.3] -÷ 2764 ÷ 0001 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [5.0] <START OF HEADING> (Control) ÷ [0.3] -÷ 2764 × 0308 ÷ 0001 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <START OF HEADING> (Control) ÷ [0.3] -÷ 2764 × 0300 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING GRAVE ACCENT (Extend) ÷ [0.3] -÷ 2764 × 0308 × 0300 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] COMBINING GRAVE ACCENT (Extend) ÷ [0.3] -÷ 2764 ÷ 0600 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] -÷ 2764 × 0308 ÷ 0600 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] -÷ 2764 × 0903 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] -÷ 2764 × 0308 × 0903 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] -÷ 2764 ÷ 1100 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] -÷ 2764 × 0308 ÷ 1100 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] -÷ 2764 ÷ 1160 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] -÷ 2764 × 0308 ÷ 1160 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] -÷ 2764 ÷ 11A8 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] -÷ 2764 × 0308 ÷ 11A8 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] -÷ 2764 ÷ AC00 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] -÷ 2764 × 0308 ÷ AC00 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] -÷ 2764 ÷ AC01 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] -÷ 2764 × 0308 ÷ AC01 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] -÷ 2764 ÷ 1F1E6 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] -÷ 2764 × 0308 ÷ 1F1E6 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] -÷ 2764 ÷ 261D ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] WHITE UP POINTING INDEX (E_Base) ÷ [0.3] -÷ 2764 × 0308 ÷ 261D ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] WHITE UP POINTING INDEX (E_Base) ÷ [0.3] -÷ 2764 ÷ 1F3FB ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] -÷ 2764 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] -÷ 2764 × 200D ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 2764 × 0308 × 200D ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 2764 ÷ 2764 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 2764 × 0308 ÷ 2764 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 2764 ÷ 1F466 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] BOY (EBG) ÷ [0.3] -÷ 2764 × 0308 ÷ 1F466 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] -÷ 2764 ÷ 0378 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] -÷ 2764 × 0308 ÷ 0378 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] -÷ 2764 ÷ D800 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [5.0] <surrogate-D800> (Control) ÷ [0.3] -÷ 2764 × 0308 ÷ D800 ÷ # ÷ [0.2] HEAVY BLACK HEART (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <surrogate-D800> (Control) ÷ [0.3] +÷ 2640 ÷ 0020 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] SPACE (Other) ÷ [0.3] +÷ 2640 × 0308 ÷ 0020 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] SPACE (Other) ÷ [0.3] +÷ 2640 ÷ 000D ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [5.0] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3] +÷ 2640 × 0308 ÷ 000D ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3] +÷ 2640 ÷ 000A ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [5.0] <LINE FEED (LF)> (LF) ÷ [0.3] +÷ 2640 × 0308 ÷ 000A ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <LINE FEED (LF)> (LF) ÷ [0.3] +÷ 2640 ÷ 0001 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [5.0] <START OF HEADING> (Control) ÷ [0.3] +÷ 2640 × 0308 ÷ 0001 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <START OF HEADING> (Control) ÷ [0.3] +÷ 2640 × 0300 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING GRAVE ACCENT (Extend) ÷ [0.3] +÷ 2640 × 0308 × 0300 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] COMBINING GRAVE ACCENT (Extend) ÷ [0.3] +÷ 2640 ÷ 0600 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] +÷ 2640 × 0308 ÷ 0600 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] ARABIC NUMBER SIGN (Prepend) ÷ [0.3] +÷ 2640 × 0903 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] +÷ 2640 × 0308 × 0903 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) × [9.1] DEVANAGARI SIGN VISARGA (SpacingMark) ÷ [0.3] +÷ 2640 ÷ 1100 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] +÷ 2640 × 0308 ÷ 1100 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL CHOSEONG KIYEOK (L) ÷ [0.3] +÷ 2640 ÷ 1160 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] +÷ 2640 × 0308 ÷ 1160 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL JUNGSEONG FILLER (V) ÷ [0.3] +÷ 2640 ÷ 11A8 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] +÷ 2640 × 0308 ÷ 11A8 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL JONGSEONG KIYEOK (T) ÷ [0.3] +÷ 2640 ÷ AC00 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] +÷ 2640 × 0308 ÷ AC00 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL SYLLABLE GA (LV) ÷ [0.3] +÷ 2640 ÷ AC01 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] +÷ 2640 × 0308 ÷ AC01 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HANGUL SYLLABLE GAG (LVT) ÷ [0.3] +÷ 2640 ÷ 1F1E6 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] +÷ 2640 × 0308 ÷ 1F1E6 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] REGIONAL INDICATOR SYMBOL LETTER A (RI) ÷ [0.3] +÷ 2640 ÷ 261D ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] WHITE UP POINTING INDEX (E_Base) ÷ [0.3] +÷ 2640 × 0308 ÷ 261D ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] WHITE UP POINTING INDEX (E_Base) ÷ [0.3] +÷ 2640 ÷ 1F3FB ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] +÷ 2640 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] +÷ 2640 × 200D ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] +÷ 2640 × 0308 × 200D ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] +÷ 2640 ÷ 2640 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 2640 × 0308 ÷ 2640 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 2640 ÷ 1F466 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] BOY (EBG) ÷ [0.3] +÷ 2640 × 0308 ÷ 1F466 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] +÷ 2640 ÷ 0378 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] +÷ 2640 × 0308 ÷ 0378 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] +÷ 2640 ÷ D800 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) ÷ [5.0] <surrogate-D800> (Control) ÷ [0.3] +÷ 2640 × 0308 ÷ D800 ÷ # ÷ [0.2] FEMALE SIGN (Glue_After_Zwj) × [9.0] COMBINING DIAERESIS (Extend) ÷ [5.0] <surrogate-D800> (Control) ÷ [0.3] ÷ 1F466 ÷ 0020 ÷ # ÷ [0.2] BOY (EBG) ÷ [999.0] SPACE (Other) ÷ [0.3] ÷ 1F466 × 0308 ÷ 0020 ÷ # ÷ [0.2] BOY (EBG) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] SPACE (Other) ÷ [0.3] ÷ 1F466 ÷ 000D ÷ # ÷ [0.2] BOY (EBG) ÷ [5.0] <CARRIAGE RETURN (CR)> (CR) ÷ [0.3] @@ -733,8 +734,8 @@ ÷ 1F466 × 0308 × 1F3FB ÷ # ÷ [0.2] BOY (EBG) × [9.0] COMBINING DIAERESIS (Extend) × [10.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 1F466 × 200D ÷ # ÷ [0.2] BOY (EBG) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 1F466 × 0308 × 200D ÷ # ÷ [0.2] BOY (EBG) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 1F466 ÷ 2764 ÷ # ÷ [0.2] BOY (EBG) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 1F466 × 0308 ÷ 2764 ÷ # ÷ [0.2] BOY (EBG) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 1F466 ÷ 2640 ÷ # ÷ [0.2] BOY (EBG) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 1F466 × 0308 ÷ 2640 ÷ # ÷ [0.2] BOY (EBG) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 1F466 ÷ 1F466 ÷ # ÷ [0.2] BOY (EBG) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1F466 × 0308 ÷ 1F466 ÷ # ÷ [0.2] BOY (EBG) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 1F466 ÷ 0378 ÷ # ÷ [0.2] BOY (EBG) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -773,8 +774,8 @@ ÷ 0378 × 0308 ÷ 1F3FB ÷ # ÷ [0.2] <reserved-0378> (Other) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 0378 × 200D ÷ # ÷ [0.2] <reserved-0378> (Other) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ 0378 × 0308 × 200D ÷ # ÷ [0.2] <reserved-0378> (Other) × [9.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ 0378 ÷ 2764 ÷ # ÷ [0.2] <reserved-0378> (Other) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ 0378 × 0308 ÷ 2764 ÷ # ÷ [0.2] <reserved-0378> (Other) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 0378 ÷ 2640 ÷ # ÷ [0.2] <reserved-0378> (Other) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ 0378 × 0308 ÷ 2640 ÷ # ÷ [0.2] <reserved-0378> (Other) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 0378 ÷ 1F466 ÷ # ÷ [0.2] <reserved-0378> (Other) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0378 × 0308 ÷ 1F466 ÷ # ÷ [0.2] <reserved-0378> (Other) × [9.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ 0378 ÷ 0378 ÷ # ÷ [0.2] <reserved-0378> (Other) ÷ [999.0] <reserved-0378> (Other) ÷ [0.3] @@ -813,8 +814,8 @@ ÷ D800 ÷ 0308 ÷ 1F3FB ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ D800 ÷ 200D ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] ÷ D800 ÷ 0308 × 200D ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) × [9.0] ZERO WIDTH JOINER (ZWJ) ÷ [0.3] -÷ D800 ÷ 2764 ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] -÷ D800 ÷ 0308 ÷ 2764 ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ D800 ÷ 2640 ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] +÷ D800 ÷ 0308 ÷ 2640 ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ D800 ÷ 1F466 ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] BOY (EBG) ÷ [0.3] ÷ D800 ÷ 0308 ÷ 1F466 ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] COMBINING DIAERESIS (Extend) ÷ [999.0] BOY (EBG) ÷ [0.3] ÷ D800 ÷ 0378 ÷ # ÷ [0.2] <surrogate-D800> (Control) ÷ [4.0] <reserved-0378> (Other) ÷ [0.3] @@ -840,7 +841,7 @@ ÷ 261D × 1F3FB ÷ 261D ÷ # ÷ [0.2] WHITE UP POINTING INDEX (E_Base) × [10.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [999.0] WHITE UP POINTING INDEX (E_Base) ÷ [0.3] ÷ 1F466 × 1F3FB ÷ # ÷ [0.2] BOY (EBG) × [10.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] ÷ 200D × 1F466 × 1F3FB ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [11.0] BOY (EBG) × [10.0] EMOJI MODIFIER FITZPATRICK TYPE-1-2 (E_Modifier) ÷ [0.3] -÷ 200D × 2764 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [11.0] HEAVY BLACK HEART (Glue_After_Zwj) ÷ [0.3] +÷ 200D × 2640 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [11.0] FEMALE SIGN (Glue_After_Zwj) ÷ [0.3] ÷ 200D × 1F466 ÷ # ÷ [0.2] ZERO WIDTH JOINER (ZWJ) × [11.0] BOY (EBG) ÷ [0.3] ÷ 1F466 ÷ 1F466 ÷ # ÷ [0.2] BOY (EBG) ÷ [999.0] BOY (EBG) ÷ [0.3] # diff --git a/lib/stdlib/test/unicode_util_SUITE_data/LineBreakTest.txt b/lib/stdlib/test/unicode_util_SUITE_data/LineBreakTest.txt index 05efcf5a44..6715446aba 100644 --- a/lib/stdlib/test/unicode_util_SUITE_data/LineBreakTest.txt +++ b/lib/stdlib/test/unicode_util_SUITE_data/LineBreakTest.txt @@ -1,25 +1,28 @@ -# LineBreakTest-9.0.0.txt -# Date: 2016-06-18, 00:42:06 GMT -# © 2016 Unicode®, Inc. +# LineBreakTest-10.0.0.txt +# Date: 2017-04-14, 05:40:30 GMT +# © 2017 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html # # Unicode Character Database # For documentation, see http://www.unicode.org/reports/tr44/ # -# Default Line Break Test +# Default Line_Break Test # # Format: -# <string> (# <comment>)? -# <string> contains hex Unicode code points, with -# ÷ wherever there is a break opportunity, and +# <string> (# <comment>)? +# <string> contains hex Unicode code points, with +# ÷ wherever there is a break opportunity, and # × wherever there is not. # <comment> the format can change, but currently it shows: # - the sample character name # - (x) the Line_Break property value for the sample character -# - [x] the rule that determines whether there is a break or not -# Note: The Line Break tests use tailoring of numbers described in Example 7 of Section 8.2 Examples of Customization. -# They also differ from the results produced by a pair table implementation in sequences like: ZW SP CL. +# - [x] the rule that determines whether there is a break or not, +# as listed in the Rules section of LineBreakTest.html +# +# Note: +# The Line_Break tests use tailoring of numbers described in +# Example 7 of Section 8.2, "Examples of Customization" of UAX #14. # # These samples may be extended or changed in the future. # diff --git a/lib/stdlib/test/unicode_util_SUITE_data/NormalizationTest.txt b/lib/stdlib/test/unicode_util_SUITE_data/NormalizationTest.txt index e133fa8a78..71f2371c5e 100644 --- a/lib/stdlib/test/unicode_util_SUITE_data/NormalizationTest.txt +++ b/lib/stdlib/test/unicode_util_SUITE_data/NormalizationTest.txt @@ -1,6 +1,6 @@ -# NormalizationTest-9.0.0.txt -# Date: 2016-04-04, 11:41:55 GMT -# © 2016 Unicode®, Inc. +# NormalizationTest-10.0.0.txt +# Date: 2017-03-08, 08:41:55 GMT +# © 2017 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html # @@ -17653,6 +17653,10 @@ FFEE;FFEE;FFEE;25CB;25CB; # (○; ○; ○; ○; ○; ) HALFWIDTH WHITE CIRCLE 0061 0CBC 3099 093C 0334 0062;0061 0334 0CBC 093C 3099 0062;0061 0334 0CBC 093C 3099 0062;0061 0334 0CBC 093C 3099 0062;0061 0334 0CBC 093C 3099 0062; # (a◌಼◌゙◌़◌̴b; a◌̴◌಼◌़◌゙b; a◌̴◌಼◌़◌゙b; a◌̴◌಼◌़◌゙b; a◌̴◌಼◌़◌゙b; ) LATIN SMALL LETTER A, KANNADA SIGN NUKTA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, DEVANAGARI SIGN NUKTA, COMBINING TILDE OVERLAY, LATIN SMALL LETTER B 0061 05B0 094D 3099 0CCD 0062;0061 3099 094D 0CCD 05B0 0062;0061 3099 094D 0CCD 05B0 0062;0061 3099 094D 0CCD 05B0 0062;0061 3099 094D 0CCD 05B0 0062; # (a◌ְ◌्◌゙◌್b; a◌゙◌्◌್◌ְb; a◌゙◌्◌್◌ְb; a◌゙◌्◌್◌ְb; a◌゙◌्◌್◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, KANNADA SIGN VIRAMA, LATIN SMALL LETTER B 0061 0CCD 05B0 094D 3099 0062;0061 3099 0CCD 094D 05B0 0062;0061 3099 0CCD 094D 05B0 0062;0061 3099 0CCD 094D 05B0 0062;0061 3099 0CCD 094D 05B0 0062; # (a◌್◌ְ◌्◌゙b; a◌゙◌್◌्◌ְb; a◌゙◌್◌्◌ְb; a◌゙◌್◌्◌ְb; a◌゙◌್◌्◌ְb; ) LATIN SMALL LETTER A, KANNADA SIGN VIRAMA, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B +0061 05B0 094D 3099 0D3B 0062;0061 3099 094D 0D3B 05B0 0062;0061 3099 094D 0D3B 05B0 0062;0061 3099 094D 0D3B 05B0 0062;0061 3099 094D 0D3B 05B0 0062; # (a◌ְ◌्◌゙◌഻b; a◌゙◌्◌഻◌ְb; a◌゙◌्◌഻◌ְb; a◌゙◌्◌഻◌ְb; a◌゙◌्◌഻◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, MALAYALAM SIGN VERTICAL BAR VIRAMA, LATIN SMALL LETTER B +0061 0D3B 05B0 094D 3099 0062;0061 3099 0D3B 094D 05B0 0062;0061 3099 0D3B 094D 05B0 0062;0061 3099 0D3B 094D 05B0 0062;0061 3099 0D3B 094D 05B0 0062; # (a◌഻◌ְ◌्◌゙b; a◌゙◌഻◌्◌ְb; a◌゙◌഻◌्◌ְb; a◌゙◌഻◌्◌ְb; a◌゙◌഻◌्◌ְb; ) LATIN SMALL LETTER A, MALAYALAM SIGN VERTICAL BAR VIRAMA, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B +0061 05B0 094D 3099 0D3C 0062;0061 3099 094D 0D3C 05B0 0062;0061 3099 094D 0D3C 05B0 0062;0061 3099 094D 0D3C 05B0 0062;0061 3099 094D 0D3C 05B0 0062; # (a◌ְ◌्◌゙◌഼b; a◌゙◌्◌഼◌ְb; a◌゙◌्◌഼◌ְb; a◌゙◌्◌഼◌ְb; a◌゙◌्◌഼◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, MALAYALAM SIGN CIRCULAR VIRAMA, LATIN SMALL LETTER B +0061 0D3C 05B0 094D 3099 0062;0061 3099 0D3C 094D 05B0 0062;0061 3099 0D3C 094D 05B0 0062;0061 3099 0D3C 094D 05B0 0062;0061 3099 0D3C 094D 05B0 0062; # (a◌഼◌ְ◌्◌゙b; a◌゙◌഼◌्◌ְb; a◌゙◌഼◌्◌ְb; a◌゙◌഼◌्◌ְb; a◌゙◌഼◌्◌ְb; ) LATIN SMALL LETTER A, MALAYALAM SIGN CIRCULAR VIRAMA, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B 0061 05B0 094D 3099 0D4D 0062;0061 3099 094D 0D4D 05B0 0062;0061 3099 094D 0D4D 05B0 0062;0061 3099 094D 0D4D 05B0 0062;0061 3099 094D 0D4D 05B0 0062; # (a◌ְ◌्◌゙◌്b; a◌゙◌्◌്◌ְb; a◌゙◌्◌്◌ְb; a◌゙◌्◌്◌ְb; a◌゙◌्◌്◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, MALAYALAM SIGN VIRAMA, LATIN SMALL LETTER B 0061 0D4D 05B0 094D 3099 0062;0061 3099 0D4D 094D 05B0 0062;0061 3099 0D4D 094D 05B0 0062;0061 3099 0D4D 094D 05B0 0062;0061 3099 0D4D 094D 05B0 0062; # (a◌്◌ְ◌्◌゙b; a◌゙◌്◌्◌ְb; a◌゙◌്◌्◌ְb; a◌゙◌്◌्◌ְb; a◌゙◌്◌्◌ְb; ) LATIN SMALL LETTER A, MALAYALAM SIGN VIRAMA, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B 0061 05B0 094D 3099 0DCA 0062;0061 3099 094D 0DCA 05B0 0062;0061 3099 094D 0DCA 05B0 0062;0061 3099 094D 0DCA 05B0 0062;0061 3099 094D 0DCA 05B0 0062; # (a◌ְ◌्◌゙◌්b; a◌゙◌्◌්◌ְb; a◌゙◌्◌්◌ְb; a◌゙◌्◌්◌ְb; a◌゙◌्◌්◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, SINHALA SIGN AL-LAKUNA, LATIN SMALL LETTER B @@ -17999,6 +18003,14 @@ FFEE;FFEE;FFEE;25CB;25CB; # (○; ○; ○; ○; ○; ) HALFWIDTH WHITE CIRCLE 0061 1DF4 0315 0300 05AE 0062;0061 05AE 1DF4 0300 0315 0062;0061 05AE 1DF4 0300 0315 0062;0061 05AE 1DF4 0300 0315 0062;0061 05AE 1DF4 0300 0315 0062; # (a◌ᷴ◌̕◌̀◌֮b; a◌֮◌ᷴ◌̀◌̕b; a◌֮◌ᷴ◌̀◌̕b; a◌֮◌ᷴ◌̀◌̕b; a◌֮◌ᷴ◌̀◌̕b; ) LATIN SMALL LETTER A, COMBINING LATIN SMALL LETTER U WITH DIAERESIS, COMBINING COMMA ABOVE RIGHT, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, LATIN SMALL LETTER B 0061 0315 0300 05AE 1DF5 0062;00E0 05AE 1DF5 0315 0062;0061 05AE 0300 1DF5 0315 0062;00E0 05AE 1DF5 0315 0062;0061 05AE 0300 1DF5 0315 0062; # (a◌̕◌̀◌֮◌᷵b; à◌֮◌᷵◌̕b; a◌֮◌̀◌᷵◌̕b; à◌֮◌᷵◌̕b; a◌֮◌̀◌᷵◌̕b; ) LATIN SMALL LETTER A, COMBINING COMMA ABOVE RIGHT, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, COMBINING UP TACK ABOVE, LATIN SMALL LETTER B 0061 1DF5 0315 0300 05AE 0062;0061 05AE 1DF5 0300 0315 0062;0061 05AE 1DF5 0300 0315 0062;0061 05AE 1DF5 0300 0315 0062;0061 05AE 1DF5 0300 0315 0062; # (a◌᷵◌̕◌̀◌֮b; a◌֮◌᷵◌̀◌̕b; a◌֮◌᷵◌̀◌̕b; a◌֮◌᷵◌̀◌̕b; a◌֮◌᷵◌̀◌̕b; ) LATIN SMALL LETTER A, COMBINING UP TACK ABOVE, COMBINING COMMA ABOVE RIGHT, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, LATIN SMALL LETTER B +0061 035C 0315 0300 1DF6 0062;00E0 0315 1DF6 035C 0062;0061 0300 0315 1DF6 035C 0062;00E0 0315 1DF6 035C 0062;0061 0300 0315 1DF6 035C 0062; # (a◌͜◌̕◌̀◌᷶b; à◌̕◌᷶◌͜b; a◌̀◌̕◌᷶◌͜b; à◌̕◌᷶◌͜b; a◌̀◌̕◌᷶◌͜b; ) LATIN SMALL LETTER A, COMBINING DOUBLE BREVE BELOW, COMBINING COMMA ABOVE RIGHT, COMBINING GRAVE ACCENT, COMBINING KAVYKA ABOVE RIGHT, LATIN SMALL LETTER B +0061 1DF6 035C 0315 0300 0062;00E0 1DF6 0315 035C 0062;0061 0300 1DF6 0315 035C 0062;00E0 1DF6 0315 035C 0062;0061 0300 1DF6 0315 035C 0062; # (a◌᷶◌͜◌̕◌̀b; à◌᷶◌̕◌͜b; a◌̀◌᷶◌̕◌͜b; à◌᷶◌̕◌͜b; a◌̀◌᷶◌̕◌͜b; ) LATIN SMALL LETTER A, COMBINING KAVYKA ABOVE RIGHT, COMBINING DOUBLE BREVE BELOW, COMBINING COMMA ABOVE RIGHT, COMBINING GRAVE ACCENT, LATIN SMALL LETTER B +0061 0300 05AE 1D16D 1DF7 0062;00E0 1D16D 05AE 1DF7 0062;0061 1D16D 05AE 1DF7 0300 0062;00E0 1D16D 05AE 1DF7 0062;0061 1D16D 05AE 1DF7 0300 0062; # (a◌̀◌𝅭֮◌᷷b; à𝅭◌֮◌᷷b; a𝅭◌֮◌᷷◌̀b; à𝅭◌֮◌᷷b; a𝅭◌֮◌᷷◌̀b; ) LATIN SMALL LETTER A, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, MUSICAL SYMBOL COMBINING AUGMENTATION DOT, COMBINING KAVYKA ABOVE LEFT, LATIN SMALL LETTER B +0061 1DF7 0300 05AE 1D16D 0062;00E0 1D16D 1DF7 05AE 0062;0061 1D16D 1DF7 05AE 0300 0062;00E0 1D16D 1DF7 05AE 0062;0061 1D16D 1DF7 05AE 0300 0062; # (a◌᷷◌̀◌𝅭֮b; à𝅭◌᷷◌֮b; a𝅭◌᷷◌֮◌̀b; à𝅭◌᷷◌֮b; a𝅭◌᷷◌֮◌̀b; ) LATIN SMALL LETTER A, COMBINING KAVYKA ABOVE LEFT, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, MUSICAL SYMBOL COMBINING AUGMENTATION DOT, LATIN SMALL LETTER B +0061 0300 05AE 1D16D 1DF8 0062;00E0 1D16D 05AE 1DF8 0062;0061 1D16D 05AE 1DF8 0300 0062;00E0 1D16D 05AE 1DF8 0062;0061 1D16D 05AE 1DF8 0300 0062; # (a◌̀◌𝅭֮◌᷸b; à𝅭◌֮◌᷸b; a𝅭◌֮◌᷸◌̀b; à𝅭◌֮◌᷸b; a𝅭◌֮◌᷸◌̀b; ) LATIN SMALL LETTER A, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, MUSICAL SYMBOL COMBINING AUGMENTATION DOT, COMBINING DOT ABOVE LEFT, LATIN SMALL LETTER B +0061 1DF8 0300 05AE 1D16D 0062;00E0 1D16D 1DF8 05AE 0062;0061 1D16D 1DF8 05AE 0300 0062;00E0 1D16D 1DF8 05AE 0062;0061 1D16D 1DF8 05AE 0300 0062; # (a◌᷸◌̀◌𝅭֮b; à𝅭◌᷸◌֮b; a𝅭◌᷸◌֮◌̀b; à𝅭◌᷸◌֮b; a𝅭◌᷸◌֮◌̀b; ) LATIN SMALL LETTER A, COMBINING DOT ABOVE LEFT, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, MUSICAL SYMBOL COMBINING AUGMENTATION DOT, LATIN SMALL LETTER B +0061 059A 0316 302A 1DF9 0062;0061 302A 0316 1DF9 059A 0062;0061 302A 0316 1DF9 059A 0062;0061 302A 0316 1DF9 059A 0062;0061 302A 0316 1DF9 059A 0062; # (a◌֚◌̖◌〪◌᷹b; a◌〪◌̖◌᷹◌֚b; a◌〪◌̖◌᷹◌֚b; a◌〪◌̖◌᷹◌֚b; a◌〪◌̖◌᷹◌֚b; ) LATIN SMALL LETTER A, HEBREW ACCENT YETIV, COMBINING GRAVE ACCENT BELOW, IDEOGRAPHIC LEVEL TONE MARK, COMBINING WIDE INVERTED BRIDGE BELOW, LATIN SMALL LETTER B +0061 1DF9 059A 0316 302A 0062;0061 302A 1DF9 0316 059A 0062;0061 302A 1DF9 0316 059A 0062;0061 302A 1DF9 0316 059A 0062;0061 302A 1DF9 0316 059A 0062; # (a◌᷹◌֚◌̖◌〪b; a◌〪◌᷹◌̖◌֚b; a◌〪◌᷹◌̖◌֚b; a◌〪◌᷹◌̖◌֚b; a◌〪◌᷹◌̖◌֚b; ) LATIN SMALL LETTER A, COMBINING WIDE INVERTED BRIDGE BELOW, HEBREW ACCENT YETIV, COMBINING GRAVE ACCENT BELOW, IDEOGRAPHIC LEVEL TONE MARK, LATIN SMALL LETTER B 0061 0315 0300 05AE 1DFB 0062;00E0 05AE 1DFB 0315 0062;0061 05AE 0300 1DFB 0315 0062;00E0 05AE 1DFB 0315 0062;0061 05AE 0300 1DFB 0315 0062; # (a◌̕◌̀◌֮◌᷻b; à◌֮◌᷻◌̕b; a◌֮◌̀◌᷻◌̕b; à◌֮◌᷻◌̕b; a◌֮◌̀◌᷻◌̕b; ) LATIN SMALL LETTER A, COMBINING COMMA ABOVE RIGHT, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, COMBINING DELETION MARK, LATIN SMALL LETTER B 0061 1DFB 0315 0300 05AE 0062;0061 05AE 1DFB 0300 0315 0062;0061 05AE 1DFB 0300 0315 0062;0061 05AE 1DFB 0300 0315 0062;0061 05AE 1DFB 0300 0315 0062; # (a◌᷻◌̕◌̀◌֮b; a◌֮◌᷻◌̀◌̕b; a◌֮◌᷻◌̀◌̕b; a◌֮◌᷻◌̀◌̕b; a◌֮◌᷻◌̀◌̕b; ) LATIN SMALL LETTER A, COMBINING DELETION MARK, COMBINING COMMA ABOVE RIGHT, COMBINING GRAVE ACCENT, HEBREW ACCENT ZINOR, LATIN SMALL LETTER B 0061 035D 035C 0315 1DFC 0062;0061 0315 035C 1DFC 035D 0062;0061 0315 035C 1DFC 035D 0062;0061 0315 035C 1DFC 035D 0062;0061 0315 035C 1DFC 035D 0062; # (a◌͝◌͜◌̕◌᷼b; a◌̕◌͜◌᷼◌͝b; a◌̕◌͜◌᷼◌͝b; a◌̕◌͜◌᷼◌͝b; a◌̕◌͜◌᷼◌͝b; ) LATIN SMALL LETTER A, COMBINING DOUBLE BREVE, COMBINING DOUBLE BREVE BELOW, COMBINING COMMA ABOVE RIGHT, COMBINING DOUBLE INVERTED BREVE BELOW, LATIN SMALL LETTER B @@ -18397,8 +18409,20 @@ FFEE;FFEE;FFEE;25CB;25CB; # (○; ○; ○; ○; ○; ) HALFWIDTH WHITE CIRCLE 0061 116B7 3099 093C 0334 0062;0061 0334 116B7 093C 3099 0062;0061 0334 116B7 093C 3099 0062;0061 0334 116B7 093C 3099 0062;0061 0334 116B7 093C 3099 0062; # (a◌𑚷◌゙◌़◌̴b; a◌̴◌𑚷◌़◌゙b; a◌̴◌𑚷◌़◌゙b; a◌̴◌𑚷◌़◌゙b; a◌̴◌𑚷◌़◌゙b; ) LATIN SMALL LETTER A, TAKRI SIGN NUKTA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, DEVANAGARI SIGN NUKTA, COMBINING TILDE OVERLAY, LATIN SMALL LETTER B 0061 05B0 094D 3099 1172B 0062;0061 3099 094D 1172B 05B0 0062;0061 3099 094D 1172B 05B0 0062;0061 3099 094D 1172B 05B0 0062;0061 3099 094D 1172B 05B0 0062; # (a◌ְ◌्◌゙◌𑜫b; a◌゙◌्◌𑜫◌ְb; a◌゙◌्◌𑜫◌ְb; a◌゙◌्◌𑜫◌ְb; a◌゙◌्◌𑜫◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, AHOM SIGN KILLER, LATIN SMALL LETTER B 0061 1172B 05B0 094D 3099 0062;0061 3099 1172B 094D 05B0 0062;0061 3099 1172B 094D 05B0 0062;0061 3099 1172B 094D 05B0 0062;0061 3099 1172B 094D 05B0 0062; # (a◌𑜫◌ְ◌्◌゙b; a◌゙◌𑜫◌्◌ְb; a◌゙◌𑜫◌्◌ְb; a◌゙◌𑜫◌्◌ְb; a◌゙◌𑜫◌्◌ְb; ) LATIN SMALL LETTER A, AHOM SIGN KILLER, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B +0061 05B0 094D 3099 11A34 0062;0061 3099 094D 11A34 05B0 0062;0061 3099 094D 11A34 05B0 0062;0061 3099 094D 11A34 05B0 0062;0061 3099 094D 11A34 05B0 0062; # (a◌ְ◌्◌゙◌𑨴b; a◌゙◌्◌𑨴◌ְb; a◌゙◌्◌𑨴◌ְb; a◌゙◌्◌𑨴◌ְb; a◌゙◌्◌𑨴◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, ZANABAZAR SQUARE SIGN VIRAMA, LATIN SMALL LETTER B +0061 11A34 05B0 094D 3099 0062;0061 3099 11A34 094D 05B0 0062;0061 3099 11A34 094D 05B0 0062;0061 3099 11A34 094D 05B0 0062;0061 3099 11A34 094D 05B0 0062; # (a◌𑨴◌ְ◌्◌゙b; a◌゙◌𑨴◌्◌ְb; a◌゙◌𑨴◌्◌ְb; a◌゙◌𑨴◌्◌ְb; a◌゙◌𑨴◌्◌ְb; ) LATIN SMALL LETTER A, ZANABAZAR SQUARE SIGN VIRAMA, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B +0061 05B0 094D 3099 11A47 0062;0061 3099 094D 11A47 05B0 0062;0061 3099 094D 11A47 05B0 0062;0061 3099 094D 11A47 05B0 0062;0061 3099 094D 11A47 05B0 0062; # (a◌ְ◌्◌゙◌𑩇b; a◌゙◌्◌𑩇◌ְb; a◌゙◌्◌𑩇◌ְb; a◌゙◌्◌𑩇◌ְb; a◌゙◌्◌𑩇◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, ZANABAZAR SQUARE SUBJOINER, LATIN SMALL LETTER B +0061 11A47 05B0 094D 3099 0062;0061 3099 11A47 094D 05B0 0062;0061 3099 11A47 094D 05B0 0062;0061 3099 11A47 094D 05B0 0062;0061 3099 11A47 094D 05B0 0062; # (a◌𑩇◌ְ◌्◌゙b; a◌゙◌𑩇◌्◌ְb; a◌゙◌𑩇◌्◌ְb; a◌゙◌𑩇◌्◌ְb; a◌゙◌𑩇◌्◌ְb; ) LATIN SMALL LETTER A, ZANABAZAR SQUARE SUBJOINER, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B +0061 05B0 094D 3099 11A99 0062;0061 3099 094D 11A99 05B0 0062;0061 3099 094D 11A99 05B0 0062;0061 3099 094D 11A99 05B0 0062;0061 3099 094D 11A99 05B0 0062; # (a◌ְ◌्◌゙◌𑪙b; a◌゙◌्◌𑪙◌ְb; a◌゙◌्◌𑪙◌ְb; a◌゙◌्◌𑪙◌ְb; a◌゙◌्◌𑪙◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, SOYOMBO SUBJOINER, LATIN SMALL LETTER B +0061 11A99 05B0 094D 3099 0062;0061 3099 11A99 094D 05B0 0062;0061 3099 11A99 094D 05B0 0062;0061 3099 11A99 094D 05B0 0062;0061 3099 11A99 094D 05B0 0062; # (a◌𑪙◌ְ◌्◌゙b; a◌゙◌𑪙◌्◌ְb; a◌゙◌𑪙◌्◌ְb; a◌゙◌𑪙◌्◌ְb; a◌゙◌𑪙◌्◌ְb; ) LATIN SMALL LETTER A, SOYOMBO SUBJOINER, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B 0061 05B0 094D 3099 11C3F 0062;0061 3099 094D 11C3F 05B0 0062;0061 3099 094D 11C3F 05B0 0062;0061 3099 094D 11C3F 05B0 0062;0061 3099 094D 11C3F 05B0 0062; # (a◌ְ◌्◌゙◌𑰿b; a◌゙◌्◌𑰿◌ְb; a◌゙◌्◌𑰿◌ְb; a◌゙◌्◌𑰿◌ְb; a◌゙◌्◌𑰿◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, BHAIKSUKI SIGN VIRAMA, LATIN SMALL LETTER B 0061 11C3F 05B0 094D 3099 0062;0061 3099 11C3F 094D 05B0 0062;0061 3099 11C3F 094D 05B0 0062;0061 3099 11C3F 094D 05B0 0062;0061 3099 11C3F 094D 05B0 0062; # (a◌𑰿◌ְ◌्◌゙b; a◌゙◌𑰿◌्◌ְb; a◌゙◌𑰿◌्◌ְb; a◌゙◌𑰿◌्◌ְb; a◌゙◌𑰿◌्◌ְb; ) LATIN SMALL LETTER A, BHAIKSUKI SIGN VIRAMA, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B +0061 3099 093C 0334 11D42 0062;0061 0334 093C 11D42 3099 0062;0061 0334 093C 11D42 3099 0062;0061 0334 093C 11D42 3099 0062;0061 0334 093C 11D42 3099 0062; # (a◌゙◌़◌̴◌𑵂b; a◌̴◌़◌𑵂◌゙b; a◌̴◌़◌𑵂◌゙b; a◌̴◌़◌𑵂◌゙b; a◌̴◌़◌𑵂◌゙b; ) LATIN SMALL LETTER A, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, DEVANAGARI SIGN NUKTA, COMBINING TILDE OVERLAY, MASARAM GONDI SIGN NUKTA, LATIN SMALL LETTER B +0061 11D42 3099 093C 0334 0062;0061 0334 11D42 093C 3099 0062;0061 0334 11D42 093C 3099 0062;0061 0334 11D42 093C 3099 0062;0061 0334 11D42 093C 3099 0062; # (a◌𑵂◌゙◌़◌̴b; a◌̴◌𑵂◌़◌゙b; a◌̴◌𑵂◌़◌゙b; a◌̴◌𑵂◌़◌゙b; a◌̴◌𑵂◌़◌゙b; ) LATIN SMALL LETTER A, MASARAM GONDI SIGN NUKTA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, DEVANAGARI SIGN NUKTA, COMBINING TILDE OVERLAY, LATIN SMALL LETTER B +0061 05B0 094D 3099 11D44 0062;0061 3099 094D 11D44 05B0 0062;0061 3099 094D 11D44 05B0 0062;0061 3099 094D 11D44 05B0 0062;0061 3099 094D 11D44 05B0 0062; # (a◌ְ◌्◌゙◌𑵄b; a◌゙◌्◌𑵄◌ְb; a◌゙◌्◌𑵄◌ְb; a◌゙◌्◌𑵄◌ְb; a◌゙◌्◌𑵄◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, MASARAM GONDI SIGN HALANTA, LATIN SMALL LETTER B +0061 11D44 05B0 094D 3099 0062;0061 3099 11D44 094D 05B0 0062;0061 3099 11D44 094D 05B0 0062;0061 3099 11D44 094D 05B0 0062;0061 3099 11D44 094D 05B0 0062; # (a◌𑵄◌ְ◌्◌゙b; a◌゙◌𑵄◌्◌ְb; a◌゙◌𑵄◌्◌ְb; a◌゙◌𑵄◌्◌ְb; a◌゙◌𑵄◌्◌ְb; ) LATIN SMALL LETTER A, MASARAM GONDI SIGN HALANTA, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B +0061 05B0 094D 3099 11D45 0062;0061 3099 094D 11D45 05B0 0062;0061 3099 094D 11D45 05B0 0062;0061 3099 094D 11D45 05B0 0062;0061 3099 094D 11D45 05B0 0062; # (a◌ְ◌्◌゙◌𑵅b; a◌゙◌्◌𑵅◌ְb; a◌゙◌्◌𑵅◌ְb; a◌゙◌्◌𑵅◌ְb; a◌゙◌्◌𑵅◌ְb; ) LATIN SMALL LETTER A, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, MASARAM GONDI VIRAMA, LATIN SMALL LETTER B +0061 11D45 05B0 094D 3099 0062;0061 3099 11D45 094D 05B0 0062;0061 3099 11D45 094D 05B0 0062;0061 3099 11D45 094D 05B0 0062;0061 3099 11D45 094D 05B0 0062; # (a◌𑵅◌ְ◌्◌゙b; a◌゙◌𑵅◌्◌ְb; a◌゙◌𑵅◌्◌ְb; a◌゙◌𑵅◌्◌ְb; a◌゙◌𑵅◌्◌ְb; ) LATIN SMALL LETTER A, MASARAM GONDI VIRAMA, HEBREW POINT SHEVA, DEVANAGARI SIGN VIRAMA, COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK, LATIN SMALL LETTER B 0061 093C 0334 16AF0 0062;0061 0334 16AF0 093C 0062;0061 0334 16AF0 093C 0062;0061 0334 16AF0 093C 0062;0061 0334 16AF0 093C 0062; # (a◌़◌̴◌𖫰b; a◌̴◌𖫰◌़b; a◌̴◌𖫰◌़b; a◌̴◌𖫰◌़b; a◌̴◌𖫰◌़b; ) LATIN SMALL LETTER A, DEVANAGARI SIGN NUKTA, COMBINING TILDE OVERLAY, BASSA VAH COMBINING HIGH TONE, LATIN SMALL LETTER B 0061 16AF0 093C 0334 0062;0061 16AF0 0334 093C 0062;0061 16AF0 0334 093C 0062;0061 16AF0 0334 093C 0062;0061 16AF0 0334 093C 0062; # (a◌𖫰◌़◌̴b; a◌𖫰◌̴◌़b; a◌𖫰◌̴◌़b; a◌𖫰◌̴◌़b; a◌𖫰◌̴◌़b; ) LATIN SMALL LETTER A, BASSA VAH COMBINING HIGH TONE, DEVANAGARI SIGN NUKTA, COMBINING TILDE OVERLAY, LATIN SMALL LETTER B 0061 093C 0334 16AF1 0062;0061 0334 16AF1 093C 0062;0061 0334 16AF1 093C 0062;0061 0334 16AF1 093C 0062;0061 0334 16AF1 093C 0062; # (a◌़◌̴◌𖫱b; a◌̴◌𖫱◌़b; a◌̴◌𖫱◌़b; a◌̴◌𖫱◌़b; a◌̴◌𖫱◌़b; ) LATIN SMALL LETTER A, DEVANAGARI SIGN NUKTA, COMBINING TILDE OVERLAY, BASSA VAH COMBINING LOW TONE, LATIN SMALL LETTER B diff --git a/lib/stdlib/uc_spec/CaseFolding.txt b/lib/stdlib/uc_spec/CaseFolding.txt index 372ee68bd8..efdf18e441 100644 --- a/lib/stdlib/uc_spec/CaseFolding.txt +++ b/lib/stdlib/uc_spec/CaseFolding.txt @@ -1,6 +1,6 @@ -# CaseFolding-9.0.0.txt -# Date: 2016-03-02, 18:54:54 GMT -# © 2016 Unicode®, Inc. +# CaseFolding-10.0.0.txt +# Date: 2017-04-14, 05:40:18 GMT +# © 2017 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html # @@ -24,7 +24,7 @@ # # NOTE: case folding does not preserve normalization formats! # -# For information on case folding, including how to have case folding +# For information on case folding, including how to have case folding # preserve normalization formats, see Section 3.13 Default Case Algorithms in # The Unicode Standard. # diff --git a/lib/stdlib/uc_spec/CompositionExclusions.txt b/lib/stdlib/uc_spec/CompositionExclusions.txt index 1999ed1328..ff42508686 100644 --- a/lib/stdlib/uc_spec/CompositionExclusions.txt +++ b/lib/stdlib/uc_spec/CompositionExclusions.txt @@ -1,6 +1,6 @@ -# CompositionExclusions-9.0.0.txt -# Date: 2016-01-21, 22:00:00 GMT [KW, LI] -# © 2016 Unicode®, Inc. +# CompositionExclusions-10.0.0.txt +# Date: 2017-02-15, 00:00:00 GMT [KW, LI] +# © 2017 Unicode®, Inc. # For terms of use, see http://www.unicode.org/terms_of_use.html # # Unicode Character Database diff --git a/lib/stdlib/uc_spec/GraphemeBreakProperty.txt b/lib/stdlib/uc_spec/GraphemeBreakProperty.txt index c5e94a3762..32bb12e47e 100644 --- a/lib/stdlib/uc_spec/GraphemeBreakProperty.txt +++ b/lib/stdlib/uc_spec/GraphemeBreakProperty.txt @@ -1,6 +1,6 @@ -# GraphemeBreakProperty-9.0.0.txt -# Date: 2016-06-03, 22:23:55 GMT -# © 2016 Unicode®, Inc. +# GraphemeBreakProperty-10.0.0.txt +# Date: 2017-03-12, 07:03:41 GMT +# © 2017 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html # @@ -25,8 +25,11 @@ 0D4E ; Prepend # Lo MALAYALAM LETTER DOT REPH 110BD ; Prepend # Cf KAITHI NUMBER SIGN 111C2..111C3 ; Prepend # Lo [2] SHARADA SIGN JIHVAMULIYA..SHARADA SIGN UPADHMANIYA +11A3A ; Prepend # Lo ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA +11A86..11A89 ; Prepend # Lo [4] SOYOMBO CLUSTER-INITIAL LETTER RA..SOYOMBO CLUSTER-INITIAL LETTER SA +11D46 ; Prepend # Lo MASARAM GONDI REPHA -# Total code points: 13 +# Total code points: 19 # ================================================ @@ -126,6 +129,7 @@ E01F0..E0FFF ; Control # Cn [3600] <reserved-E01F0>..<reserved-E0FFF> 0AC7..0AC8 ; Extend # Mn [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI 0ACD ; Extend # Mn GUJARATI SIGN VIRAMA 0AE2..0AE3 ; Extend # Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL +0AFA..0AFF ; Extend # Mn [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE 0B01 ; Extend # Mn ORIYA SIGN CANDRABINDU 0B3C ; Extend # Mn ORIYA SIGN NUKTA 0B3E ; Extend # Mc ORIYA VOWEL SIGN AA @@ -154,7 +158,8 @@ E01F0..E0FFF ; Control # Cn [3600] <reserved-E01F0>..<reserved-E0FFF> 0CCC..0CCD ; Extend # Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA 0CD5..0CD6 ; Extend # Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK 0CE2..0CE3 ; Extend # Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL -0D01 ; Extend # Mn MALAYALAM SIGN CANDRABINDU +0D00..0D01 ; Extend # Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU +0D3B..0D3C ; Extend # Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA 0D3E ; Extend # Mc MALAYALAM VOWEL SIGN AA 0D41..0D44 ; Extend # Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR 0D4D ; Extend # Mn MALAYALAM SIGN VIRAMA @@ -243,7 +248,7 @@ E01F0..E0FFF ; Control # Cn [3600] <reserved-E01F0>..<reserved-E0FFF> 1CED ; Extend # Mn VEDIC SIGN TIRYAK 1CF4 ; Extend # Mn VEDIC TONE CANDRA ABOVE 1CF8..1CF9 ; Extend # Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE -1DC0..1DF5 ; Extend # Mn [54] COMBINING DOTTED GRAVE ACCENT..COMBINING UP TACK ABOVE +1DC0..1DF9 ; Extend # Mn [58] COMBINING DOTTED GRAVE ACCENT..COMBINING WIDE INVERTED BRIDGE BELOW 1DFB..1DFF ; Extend # Mn [5] COMBINING DELETION MARK..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW 200C ; Extend # Cf ZERO WIDTH NON-JOINER 20D0..20DC ; Extend # Mn [13] COMBINING LEFT HARPOON ABOVE..COMBINING FOUR DOTS ABOVE @@ -353,6 +358,15 @@ FF9E..FF9F ; Extend # Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDT 1171D..1171F ; Extend # Mn [3] AHOM CONSONANT SIGN MEDIAL LA..AHOM CONSONANT SIGN MEDIAL LIGATING RA 11722..11725 ; Extend # Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU 11727..1172B ; Extend # Mn [5] AHOM VOWEL SIGN AW..AHOM SIGN KILLER +11A01..11A06 ; Extend # Mn [6] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL SIGN O +11A09..11A0A ; Extend # Mn [2] ZANABAZAR SQUARE VOWEL SIGN REVERSED I..ZANABAZAR SQUARE VOWEL LENGTH MARK +11A33..11A38 ; Extend # Mn [6] ZANABAZAR SQUARE FINAL CONSONANT MARK..ZANABAZAR SQUARE SIGN ANUSVARA +11A3B..11A3E ; Extend # Mn [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA +11A47 ; Extend # Mn ZANABAZAR SQUARE SUBJOINER +11A51..11A56 ; Extend # Mn [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE +11A59..11A5B ; Extend # Mn [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK +11A8A..11A96 ; Extend # Mn [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA +11A98..11A99 ; Extend # Mn [2] SOYOMBO GEMINATION MARK..SOYOMBO SUBJOINER 11C30..11C36 ; Extend # Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L 11C38..11C3D ; Extend # Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA 11C3F ; Extend # Mn BHAIKSUKI SIGN VIRAMA @@ -360,6 +374,11 @@ FF9E..FF9F ; Extend # Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDT 11CAA..11CB0 ; Extend # Mn [7] MARCHEN SUBJOINED LETTER RA..MARCHEN VOWEL SIGN AA 11CB2..11CB3 ; Extend # Mn [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E 11CB5..11CB6 ; Extend # Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU +11D31..11D36 ; Extend # Mn [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R +11D3A ; Extend # Mn MASARAM GONDI VOWEL SIGN E +11D3C..11D3D ; Extend # Mn [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O +11D3F..11D45 ; Extend # Mn [7] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI VIRAMA +11D47 ; Extend # Mn MASARAM GONDI RA-KARA 16AF0..16AF4 ; Extend # Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE 16B30..16B36 ; Extend # Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM 16F8F..16F92 ; Extend # Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW @@ -387,7 +406,7 @@ FF9E..FF9F ; Extend # Lm [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDT E0020..E007F ; Extend # Cf [96] TAG SPACE..CANCEL TAG E0100..E01EF ; Extend # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 -# Total code points: 1828 +# Total code points: 1901 # ================================================ @@ -472,6 +491,7 @@ E0100..E01EF ; Extend # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 1C34..1C35 ; SpacingMark # Mc [2] LEPCHA CONSONANT SIGN NYIN-DO..LEPCHA CONSONANT SIGN KANG 1CE1 ; SpacingMark # Mc VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA 1CF2..1CF3 ; SpacingMark # Mc [2] VEDIC SIGN ARDHAVISARGA..VEDIC SIGN ROTATED ARDHAVISARGA +1CF7 ; SpacingMark # Mc VEDIC SIGN ATIKRAMA A823..A824 ; SpacingMark # Mc [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI NAGRI VOWEL SIGN I A827 ; SpacingMark # Mc SYLOTI NAGRI VOWEL SIGN OO A880..A881 ; SpacingMark # Mc [2] SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VISARGA @@ -529,6 +549,10 @@ ABEC ; SpacingMark # Mc MEETEI MAYEK LUM IYEK 116B6 ; SpacingMark # Mc TAKRI SIGN VIRAMA 11720..11721 ; SpacingMark # Mc [2] AHOM VOWEL SIGN A..AHOM VOWEL SIGN AA 11726 ; SpacingMark # Mc AHOM VOWEL SIGN E +11A07..11A08 ; SpacingMark # Mc [2] ZANABAZAR SQUARE VOWEL SIGN AI..ZANABAZAR SQUARE VOWEL SIGN AU +11A39 ; SpacingMark # Mc ZANABAZAR SQUARE SIGN VISARGA +11A57..11A58 ; SpacingMark # Mc [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU +11A97 ; SpacingMark # Mc SOYOMBO SIGN VISARGA 11C2F ; SpacingMark # Mc BHAIKSUKI VOWEL SIGN AA 11C3E ; SpacingMark # Mc BHAIKSUKI SIGN VISARGA 11CA9 ; SpacingMark # Mc MARCHEN SUBJOINED LETTER YA @@ -538,7 +562,7 @@ ABEC ; SpacingMark # Mc MEETEI MAYEK LUM IYEK 1D166 ; SpacingMark # Mc MUSICAL SYMBOL COMBINING SPRECHGESANG STEM 1D16D ; SpacingMark # Mc MUSICAL SYMBOL COMBINING AUGMENTATION DOT -# Total code points: 341 +# Total code points: 348 # ================================================ @@ -1375,8 +1399,9 @@ D789..D7A3 ; LVT # Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH 26F9 ; E_Base # So PERSON WITH BALL 270A..270D ; E_Base # So [4] RAISED FIST..WRITING HAND 1F385 ; E_Base # So FATHER CHRISTMAS -1F3C3..1F3C4 ; E_Base # So [2] RUNNER..SURFER -1F3CA..1F3CB ; E_Base # So [2] SWIMMER..WEIGHT LIFTER +1F3C2..1F3C4 ; E_Base # So [3] SNOWBOARDER..SURFER +1F3C7 ; E_Base # So HORSE RACING +1F3CA..1F3CC ; E_Base # So [3] SWIMMER..GOLFER 1F442..1F443 ; E_Base # So [2] EAR..NOSE 1F446..1F450 ; E_Base # So [11] WHITE UP POINTING BACKHAND INDEX..OPEN HANDS SIGN 1F46E ; E_Base # So POLICE OFFICER @@ -1385,7 +1410,7 @@ D789..D7A3 ; LVT # Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH 1F481..1F483 ; E_Base # So [3] INFORMATION DESK PERSON..DANCER 1F485..1F487 ; E_Base # So [3] NAIL POLISH..HAIRCUT 1F4AA ; E_Base # So FLEXED BICEPS -1F575 ; E_Base # So SLEUTH OR SPY +1F574..1F575 ; E_Base # So [2] MAN IN BUSINESS SUIT LEVITATING..SLEUTH OR SPY 1F57A ; E_Base # So MAN DANCING 1F590 ; E_Base # So RAISED HAND WITH FINGERS SPLAYED 1F595..1F596 ; E_Base # So [2] REVERSED HAND WITH MIDDLE FINGER EXTENDED..RAISED HAND WITH PART BETWEEN MIDDLE AND RING FINGERS @@ -1394,13 +1419,15 @@ D789..D7A3 ; LVT # Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH 1F6A3 ; E_Base # So ROWBOAT 1F6B4..1F6B6 ; E_Base # So [3] BICYCLIST..PEDESTRIAN 1F6C0 ; E_Base # So BATH -1F918..1F91E ; E_Base # So [7] SIGN OF THE HORNS..HAND WITH INDEX AND MIDDLE FINGERS CROSSED +1F6CC ; E_Base # So SLEEPING ACCOMMODATION +1F918..1F91C ; E_Base # So [5] SIGN OF THE HORNS..RIGHT-FACING FIST +1F91E..1F91F ; E_Base # So [2] HAND WITH INDEX AND MIDDLE FINGERS CROSSED..I LOVE YOU HAND SIGN 1F926 ; E_Base # So FACE PALM -1F930 ; E_Base # So PREGNANT WOMAN -1F933..1F939 ; E_Base # So [7] SELFIE..JUGGLING -1F93C..1F93E ; E_Base # So [3] WRESTLERS..HANDBALL +1F930..1F939 ; E_Base # So [10] PREGNANT WOMAN..JUGGLING +1F93D..1F93E ; E_Base # So [2] WATER POLO..HANDBALL +1F9D1..1F9DD ; E_Base # So [13] ADULT..ELF -# Total code points: 79 +# Total code points: 98 # ================================================ @@ -1416,11 +1443,28 @@ D789..D7A3 ; LVT # Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH # ================================================ +2640 ; Glue_After_Zwj # So FEMALE SIGN +2642 ; Glue_After_Zwj # So MALE SIGN +2695..2696 ; Glue_After_Zwj # So [2] STAFF OF AESCULAPIUS..SCALES +2708 ; Glue_After_Zwj # So AIRPLANE 2764 ; Glue_After_Zwj # So HEAVY BLACK HEART +1F308 ; Glue_After_Zwj # So RAINBOW +1F33E ; Glue_After_Zwj # So EAR OF RICE +1F373 ; Glue_After_Zwj # So COOKING +1F393 ; Glue_After_Zwj # So GRADUATION CAP +1F3A4 ; Glue_After_Zwj # So MICROPHONE +1F3A8 ; Glue_After_Zwj # So ARTIST PALETTE +1F3EB ; Glue_After_Zwj # So SCHOOL +1F3ED ; Glue_After_Zwj # So FACTORY 1F48B ; Glue_After_Zwj # So KISS MARK +1F4BB..1F4BC ; Glue_After_Zwj # So [2] PERSONAL COMPUTER..BRIEFCASE +1F527 ; Glue_After_Zwj # So WRENCH +1F52C ; Glue_After_Zwj # So MICROSCOPE 1F5E8 ; Glue_After_Zwj # So LEFT SPEECH BUBBLE +1F680 ; Glue_After_Zwj # So ROCKET +1F692 ; Glue_After_Zwj # So FIRE ENGINE -# Total code points: 3 +# Total code points: 22 # ================================================ diff --git a/lib/stdlib/uc_spec/PropList.txt b/lib/stdlib/uc_spec/PropList.txt index a8c0da7135..9a2d0e4b1c 100644 --- a/lib/stdlib/uc_spec/PropList.txt +++ b/lib/stdlib/uc_spec/PropList.txt @@ -1,6 +1,6 @@ -# PropList-9.0.0.txt -# Date: 2016-06-01, 10:34:30 GMT -# © 2016 Unicode®, Inc. +# PropList-10.0.0.txt +# Date: 2017-03-10, 08:25:30 GMT +# © 2017 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html # @@ -199,6 +199,9 @@ FF64 ; Terminal_Punctuation # Po HALFWIDTH IDEOGRAPHIC COMMA 115C9..115D7 ; Terminal_Punctuation # Po [15] SIDDHAM END OF TEXT MARK..SIDDHAM SECTION MARK WITH CIRCLES AND FOUR ENCLOSURES 11641..11642 ; Terminal_Punctuation # Po [2] MODI DANDA..MODI DOUBLE DANDA 1173C..1173E ; Terminal_Punctuation # Po [3] AHOM SIGN SMALL SECTION..AHOM SIGN RULAI +11A42..11A43 ; Terminal_Punctuation # Po [2] ZANABAZAR SQUARE MARK SHAD..ZANABAZAR SQUARE MARK DOUBLE SHAD +11A9B..11A9C ; Terminal_Punctuation # Po [2] SOYOMBO MARK SHAD..SOYOMBO MARK DOUBLE SHAD +11AA1..11AA2 ; Terminal_Punctuation # Po [2] SOYOMBO TERMINAL MARK-1..SOYOMBO TERMINAL MARK-2 11C41..11C43 ; Terminal_Punctuation # Po [3] BHAIKSUKI DANDA..BHAIKSUKI WORD SEPARATOR 11C71 ; Terminal_Punctuation # Po MARCHEN MARK SHAD 12470..12474 ; Terminal_Punctuation # Po [5] CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD DIVIDER..CUNEIFORM PUNCTUATION SIGN DIAGONAL QUADCOLON @@ -209,7 +212,7 @@ FF64 ; Terminal_Punctuation # Po HALFWIDTH IDEOGRAPHIC COMMA 1BC9F ; Terminal_Punctuation # Po DUPLOYAN PUNCTUATION CHINOOK FULL STOP 1DA87..1DA8A ; Terminal_Punctuation # Po [4] SIGNWRITING COMMA..SIGNWRITING COLON -# Total code points: 246 +# Total code points: 252 # ================================================ @@ -471,6 +474,7 @@ FF41..FF46 ; Hex_Digit # L& [6] FULLWIDTH LATIN SMALL LETTER A..FULLWIDTH L 0AC9 ; Other_Alphabetic # Mc GUJARATI VOWEL SIGN CANDRA O 0ACB..0ACC ; Other_Alphabetic # Mc [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU 0AE2..0AE3 ; Other_Alphabetic # Mn [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL +0AFA..0AFC ; Other_Alphabetic # Mn [3] GUJARATI SIGN SUKUN..GUJARATI SIGN MADDAH 0B01 ; Other_Alphabetic # Mn ORIYA SIGN CANDRABINDU 0B02..0B03 ; Other_Alphabetic # Mc [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA 0B3E ; Other_Alphabetic # Mc ORIYA VOWEL SIGN AA @@ -508,7 +512,7 @@ FF41..FF46 ; Hex_Digit # L& [6] FULLWIDTH LATIN SMALL LETTER A..FULLWIDTH L 0CCC ; Other_Alphabetic # Mn KANNADA VOWEL SIGN AU 0CD5..0CD6 ; Other_Alphabetic # Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK 0CE2..0CE3 ; Other_Alphabetic # Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL -0D01 ; Other_Alphabetic # Mn MALAYALAM SIGN CANDRABINDU +0D00..0D01 ; Other_Alphabetic # Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU 0D02..0D03 ; Other_Alphabetic # Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA 0D3E..0D40 ; Other_Alphabetic # Mc [3] MALAYALAM VOWEL SIGN AA..MALAYALAM VOWEL SIGN II 0D41..0D44 ; Other_Alphabetic # Mn [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR @@ -726,6 +730,17 @@ FB1E ; Other_Alphabetic # Mn HEBREW POINT JUDEO-SPANISH VARIKA 11722..11725 ; Other_Alphabetic # Mn [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU 11726 ; Other_Alphabetic # Mc AHOM VOWEL SIGN E 11727..1172A ; Other_Alphabetic # Mn [4] AHOM VOWEL SIGN AW..AHOM VOWEL SIGN AM +11A01..11A06 ; Other_Alphabetic # Mn [6] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL SIGN O +11A07..11A08 ; Other_Alphabetic # Mc [2] ZANABAZAR SQUARE VOWEL SIGN AI..ZANABAZAR SQUARE VOWEL SIGN AU +11A09..11A0A ; Other_Alphabetic # Mn [2] ZANABAZAR SQUARE VOWEL SIGN REVERSED I..ZANABAZAR SQUARE VOWEL LENGTH MARK +11A35..11A38 ; Other_Alphabetic # Mn [4] ZANABAZAR SQUARE SIGN CANDRABINDU..ZANABAZAR SQUARE SIGN ANUSVARA +11A39 ; Other_Alphabetic # Mc ZANABAZAR SQUARE SIGN VISARGA +11A3B..11A3E ; Other_Alphabetic # Mn [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA +11A51..11A56 ; Other_Alphabetic # Mn [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE +11A57..11A58 ; Other_Alphabetic # Mc [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU +11A59..11A5B ; Other_Alphabetic # Mn [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK +11A8A..11A96 ; Other_Alphabetic # Mn [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA +11A97 ; Other_Alphabetic # Mc SOYOMBO SIGN VISARGA 11C2F ; Other_Alphabetic # Mc BHAIKSUKI VOWEL SIGN AA 11C30..11C36 ; Other_Alphabetic # Mn [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L 11C38..11C3D ; Other_Alphabetic # Mn [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA @@ -737,6 +752,12 @@ FB1E ; Other_Alphabetic # Mn HEBREW POINT JUDEO-SPANISH VARIKA 11CB2..11CB3 ; Other_Alphabetic # Mn [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E 11CB4 ; Other_Alphabetic # Mc MARCHEN VOWEL SIGN O 11CB5..11CB6 ; Other_Alphabetic # Mn [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU +11D31..11D36 ; Other_Alphabetic # Mn [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R +11D3A ; Other_Alphabetic # Mn MASARAM GONDI VOWEL SIGN E +11D3C..11D3D ; Other_Alphabetic # Mn [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O +11D3F..11D41 ; Other_Alphabetic # Mn [3] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI SIGN VISARGA +11D43 ; Other_Alphabetic # Mn MASARAM GONDI SIGN CANDRA +11D47 ; Other_Alphabetic # Mn MASARAM GONDI RA-KARA 16B30..16B36 ; Other_Alphabetic # Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM 16F51..16F7E ; Other_Alphabetic # Mc [46] MIAO SIGN ASPIRATION..MIAO VOWEL SIGN NG 1BC9E ; Other_Alphabetic # Mn DUPLOYAN DOUBLE MARK @@ -750,7 +771,7 @@ FB1E ; Other_Alphabetic # Mn HEBREW POINT JUDEO-SPANISH VARIKA 1F150..1F169 ; Other_Alphabetic # So [26] NEGATIVE CIRCLED LATIN CAPITAL LETTER A..NEGATIVE CIRCLED LATIN CAPITAL LETTER Z 1F170..1F189 ; Other_Alphabetic # So [26] NEGATIVE SQUARED LATIN CAPITAL LETTER A..NEGATIVE SQUARED LATIN CAPITAL LETTER Z -# Total code points: 1238 +# Total code points: 1300 # ================================================ @@ -759,18 +780,20 @@ FB1E ; Other_Alphabetic # Mn HEBREW POINT JUDEO-SPANISH VARIKA 3021..3029 ; Ideographic # Nl [9] HANGZHOU NUMERAL ONE..HANGZHOU NUMERAL NINE 3038..303A ; Ideographic # Nl [3] HANGZHOU NUMERAL TEN..HANGZHOU NUMERAL THIRTY 3400..4DB5 ; Ideographic # Lo [6582] CJK UNIFIED IDEOGRAPH-3400..CJK UNIFIED IDEOGRAPH-4DB5 -4E00..9FD5 ; Ideographic # Lo [20950] CJK UNIFIED IDEOGRAPH-4E00..CJK UNIFIED IDEOGRAPH-9FD5 +4E00..9FEA ; Ideographic # Lo [20971] CJK UNIFIED IDEOGRAPH-4E00..CJK UNIFIED IDEOGRAPH-9FEA F900..FA6D ; Ideographic # Lo [366] CJK COMPATIBILITY IDEOGRAPH-F900..CJK COMPATIBILITY IDEOGRAPH-FA6D FA70..FAD9 ; Ideographic # Lo [106] CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COMPATIBILITY IDEOGRAPH-FAD9 17000..187EC ; Ideographic # Lo [6125] TANGUT IDEOGRAPH-17000..TANGUT IDEOGRAPH-187EC 18800..18AF2 ; Ideographic # Lo [755] TANGUT COMPONENT-001..TANGUT COMPONENT-755 +1B170..1B2FB ; Ideographic # Lo [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB 20000..2A6D6 ; Ideographic # Lo [42711] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6D6 2A700..2B734 ; Ideographic # Lo [4149] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B734 2B740..2B81D ; Ideographic # Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D 2B820..2CEA1 ; Ideographic # Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 +2CEB0..2EBE0 ; Ideographic # Lo [7473] CJK UNIFIED IDEOGRAPH-2CEB0..CJK UNIFIED IDEOGRAPH-2EBE0 2F800..2FA1D ; Ideographic # Lo [542] CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPATIBILITY IDEOGRAPH-2FA1D -# Total code points: 88284 +# Total code points: 96174 # ================================================ @@ -826,12 +849,14 @@ FA70..FAD9 ; Ideographic # Lo [106] CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COM 0A4D ; Diacritic # Mn GURMUKHI SIGN VIRAMA 0ABC ; Diacritic # Mn GUJARATI SIGN NUKTA 0ACD ; Diacritic # Mn GUJARATI SIGN VIRAMA +0AFD..0AFF ; Diacritic # Mn [3] GUJARATI SIGN THREE-DOT NUKTA ABOVE..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE 0B3C ; Diacritic # Mn ORIYA SIGN NUKTA 0B4D ; Diacritic # Mn ORIYA SIGN VIRAMA 0BCD ; Diacritic # Mn TAMIL SIGN VIRAMA 0C4D ; Diacritic # Mn TELUGU SIGN VIRAMA 0CBC ; Diacritic # Mn KANNADA SIGN NUKTA 0CCD ; Diacritic # Mn KANNADA SIGN VIRAMA +0D3B..0D3C ; Diacritic # Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA 0D4D ; Diacritic # Mn MALAYALAM SIGN VIRAMA 0DCA ; Diacritic # Mn SINHALA SIGN AL-LAKUNA 0E47..0E4C ; Diacritic # Mn [6] THAI CHARACTER MAITAIKHU..THAI CHARACTER THANTHAKHAT @@ -871,10 +896,11 @@ FA70..FAD9 ; Ideographic # Lo [106] CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COM 1CE2..1CE8 ; Diacritic # Mn [7] VEDIC SIGN VISARGA SVARITA..VEDIC SIGN VISARGA ANUDATTA WITH TAIL 1CED ; Diacritic # Mn VEDIC SIGN TIRYAK 1CF4 ; Diacritic # Mn VEDIC TONE CANDRA ABOVE +1CF7 ; Diacritic # Mc VEDIC SIGN ATIKRAMA 1CF8..1CF9 ; Diacritic # Mn [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE 1D2C..1D6A ; Diacritic # Lm [63] MODIFIER LETTER CAPITAL A..GREEK SUBSCRIPT SMALL LETTER CHI 1DC4..1DCF ; Diacritic # Mn [12] COMBINING MACRON-ACUTE..COMBINING ZIGZAG BELOW -1DF5 ; Diacritic # Mn COMBINING UP TACK ABOVE +1DF5..1DF9 ; Diacritic # Mn [5] COMBINING UP TACK ABOVE..COMBINING WIDE INVERTED BRIDGE BELOW 1DFD..1DFF ; Diacritic # Mn [3] COMBINING ALMOST EQUAL TO BELOW..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW 1FBD ; Diacritic # Sk GREEK KORONIS 1FBF..1FC1 ; Diacritic # Sk [3] GREEK PSILI..GREEK DIALYTIKA AND PERISPOMENI @@ -947,7 +973,12 @@ FFE3 ; Diacritic # Sk FULLWIDTH MACRON 116B6 ; Diacritic # Mc TAKRI SIGN VIRAMA 116B7 ; Diacritic # Mn TAKRI SIGN NUKTA 1172B ; Diacritic # Mn AHOM SIGN KILLER +11A34 ; Diacritic # Mn ZANABAZAR SQUARE SIGN VIRAMA +11A47 ; Diacritic # Mn ZANABAZAR SQUARE SUBJOINER +11A99 ; Diacritic # Mn SOYOMBO SUBJOINER 11C3F ; Diacritic # Mn BHAIKSUKI SIGN VIRAMA +11D42 ; Diacritic # Mn MASARAM GONDI SIGN NUKTA +11D44..11D45 ; Diacritic # Mn [2] MASARAM GONDI SIGN HALANTA..MASARAM GONDI VIRAMA 16AF0..16AF4 ; Diacritic # Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE 16F8F..16F92 ; Diacritic # Mn [4] MIAO TONE RIGHT..MIAO TONE BELOW 16F93..16F9F ; Diacritic # Lm [13] MIAO LETTER TONE-2..MIAO LETTER REFORMED TONE-8 @@ -960,7 +991,7 @@ FFE3 ; Diacritic # Sk FULLWIDTH MACRON 1E944..1E946 ; Diacritic # Mn [3] ADLAM ALIF LENGTHENER..ADLAM GEMINATION MARK 1E948..1E94A ; Diacritic # Mn [3] ADLAM CONSONANT MODIFIER..ADLAM NUKTA -# Total code points: 782 +# Total code points: 798 # ================================================ @@ -989,11 +1020,12 @@ AAF3..AAF4 ; Extender # Lm [2] MEETEI MAYEK SYLLABLE REPETITION MARK..MEETE FF70 ; Extender # Lm HALFWIDTH KATAKANA-HIRAGANA PROLONGED SOUND MARK 1135D ; Extender # Lo GRANTHA SIGN PLUTA 115C6..115C8 ; Extender # Po [3] SIDDHAM REPETITION MARK-1..SIDDHAM REPETITION MARK-3 +11A98 ; Extender # Mn SOYOMBO GEMINATION MARK 16B42..16B43 ; Extender # Lm [2] PAHAWH HMONG SIGN VOS NRUA..PAHAWH HMONG SIGN IB YAM -16FE0 ; Extender # Lm TANGUT ITERATION MARK +16FE0..16FE1 ; Extender # Lm [2] TANGUT ITERATION MARK..NUSHU ITERATION MARK 1E944..1E946 ; Extender # Mn [3] ADLAM ALIF LENGTHENER..ADLAM GEMINATION MARK -# Total code points: 42 +# Total code points: 44 # ================================================ @@ -1105,7 +1137,7 @@ E0020..E007F ; Other_Grapheme_Extend # Cf [96] TAG SPACE..CANCEL TAG # ================================================ 3400..4DB5 ; Unified_Ideograph # Lo [6582] CJK UNIFIED IDEOGRAPH-3400..CJK UNIFIED IDEOGRAPH-4DB5 -4E00..9FD5 ; Unified_Ideograph # Lo [20950] CJK UNIFIED IDEOGRAPH-4E00..CJK UNIFIED IDEOGRAPH-9FD5 +4E00..9FEA ; Unified_Ideograph # Lo [20971] CJK UNIFIED IDEOGRAPH-4E00..CJK UNIFIED IDEOGRAPH-9FEA FA0E..FA0F ; Unified_Ideograph # Lo [2] CJK COMPATIBILITY IDEOGRAPH-FA0E..CJK COMPATIBILITY IDEOGRAPH-FA0F FA11 ; Unified_Ideograph # Lo CJK COMPATIBILITY IDEOGRAPH-FA11 FA13..FA14 ; Unified_Ideograph # Lo [2] CJK COMPATIBILITY IDEOGRAPH-FA13..CJK COMPATIBILITY IDEOGRAPH-FA14 @@ -1117,8 +1149,9 @@ FA27..FA29 ; Unified_Ideograph # Lo [3] CJK COMPATIBILITY IDEOGRAPH-FA27..C 2A700..2B734 ; Unified_Ideograph # Lo [4149] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B734 2B740..2B81D ; Unified_Ideograph # Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D 2B820..2CEA1 ; Unified_Ideograph # Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 +2CEB0..2EBE0 ; Unified_Ideograph # Lo [7473] CJK UNIFIED IDEOGRAPH-2CEB0..CJK UNIFIED IDEOGRAPH-2EBE0 -# Total code points: 80388 +# Total code points: 87882 # ================================================ @@ -1277,6 +1310,8 @@ FF61 ; Sentence_Terminal # Po HALFWIDTH IDEOGRAPHIC FULL STOP 115C9..115D7 ; Sentence_Terminal # Po [15] SIDDHAM END OF TEXT MARK..SIDDHAM SECTION MARK WITH CIRCLES AND FOUR ENCLOSURES 11641..11642 ; Sentence_Terminal # Po [2] MODI DANDA..MODI DOUBLE DANDA 1173C..1173E ; Sentence_Terminal # Po [3] AHOM SIGN SMALL SECTION..AHOM SIGN RULAI +11A42..11A43 ; Sentence_Terminal # Po [2] ZANABAZAR SQUARE MARK SHAD..ZANABAZAR SQUARE MARK DOUBLE SHAD +11A9B..11A9C ; Sentence_Terminal # Po [2] SOYOMBO MARK SHAD..SOYOMBO MARK DOUBLE SHAD 11C41..11C42 ; Sentence_Terminal # Po [2] BHAIKSUKI DANDA..BHAIKSUKI DOUBLE DANDA 16A6E..16A6F ; Sentence_Terminal # Po [2] MRO DANDA..MRO DOUBLE DANDA 16AF5 ; Sentence_Terminal # Po BASSA VAH FULL STOP @@ -1285,7 +1320,7 @@ FF61 ; Sentence_Terminal # Po HALFWIDTH IDEOGRAPHIC FULL STOP 1BC9F ; Sentence_Terminal # Po DUPLOYAN PUNCTUATION CHINOOK FULL STOP 1DA88 ; Sentence_Terminal # Po SIGNWRITING FULL STOP -# Total code points: 124 +# Total code points: 128 # ================================================ @@ -1402,9 +1437,7 @@ E0100..E01EF ; Variation_Selector # Mn [240] VARIATION SELECTOR-17..VARIATION S 239B..23B3 ; Pattern_Syntax # Sm [25] LEFT PARENTHESIS UPPER HOOK..SUMMATION BOTTOM 23B4..23DB ; Pattern_Syntax # So [40] TOP SQUARE BRACKET..FUSE 23DC..23E1 ; Pattern_Syntax # Sm [6] TOP PARENTHESIS..BOTTOM TORTOISE SHELL BRACKET -23E2..23FE ; Pattern_Syntax # So [29] WHITE TRAPEZIUM..POWER SLEEP SYMBOL -23FF ; Pattern_Syntax # Cn <reserved-23FF> -2400..2426 ; Pattern_Syntax # So [39] SYMBOL FOR NULL..SYMBOL FOR SUBSTITUTE FORM TWO +23E2..2426 ; Pattern_Syntax # So [69] WHITE TRAPEZIUM..SYMBOL FOR SUBSTITUTE FORM TWO 2427..243F ; Pattern_Syntax # Cn [25] <reserved-2427>..<reserved-243F> 2440..244A ; Pattern_Syntax # So [11] OCR HOOK..OCR DOUBLE BACKSLASH 244B..245F ; Pattern_Syntax # Cn [21] <reserved-244B>..<reserved-245F> @@ -1492,8 +1525,8 @@ E0100..E01EF ; Variation_Selector # Mn [240] VARIATION SELECTOR-17..VARIATION S 2BBA..2BBC ; Pattern_Syntax # Cn [3] <reserved-2BBA>..<reserved-2BBC> 2BBD..2BC8 ; Pattern_Syntax # So [12] BALLOT BOX WITH LIGHT X..BLACK MEDIUM RIGHT-POINTING TRIANGLE CENTRED 2BC9 ; Pattern_Syntax # Cn <reserved-2BC9> -2BCA..2BD1 ; Pattern_Syntax # So [8] TOP HALF BLACK CIRCLE..UNCERTAINTY SIGN -2BD2..2BEB ; Pattern_Syntax # Cn [26] <reserved-2BD2>..<reserved-2BEB> +2BCA..2BD2 ; Pattern_Syntax # So [9] TOP HALF BLACK CIRCLE..GROUP MARK +2BD3..2BEB ; Pattern_Syntax # Cn [25] <reserved-2BD3>..<reserved-2BEB> 2BEC..2BEF ; Pattern_Syntax # So [4] LEFTWARDS TWO-HEADED ARROW WITH TRIANGLE ARROWHEADS..DOWNWARDS TWO-HEADED ARROW WITH TRIANGLE ARROWHEADS 2BF0..2BFF ; Pattern_Syntax # Cn [16] <reserved-2BF0>..<reserved-2BFF> 2E00..2E01 ; Pattern_Syntax # Po [2] RIGHT ANGLE SUBSTITUTION MARKER..RIGHT ANGLE DOTTED SUBSTITUTION MARKER @@ -1533,8 +1566,8 @@ E0100..E01EF ; Variation_Selector # Mn [240] VARIATION SELECTOR-17..VARIATION S 2E40 ; Pattern_Syntax # Pd DOUBLE HYPHEN 2E41 ; Pattern_Syntax # Po REVERSED COMMA 2E42 ; Pattern_Syntax # Ps DOUBLE LOW-REVERSED-9 QUOTATION MARK -2E43..2E44 ; Pattern_Syntax # Po [2] DASH WITH LEFT UPTURN..DOUBLE SUSPENSION MARK -2E45..2E7F ; Pattern_Syntax # Cn [59] <reserved-2E45>..<reserved-2E7F> +2E43..2E49 ; Pattern_Syntax # Po [7] DASH WITH LEFT UPTURN..DOUBLE STACKED COMMA +2E4A..2E7F ; Pattern_Syntax # Cn [54] <reserved-2E4A>..<reserved-2E7F> 3001..3003 ; Pattern_Syntax # Po [3] IDEOGRAPHIC COMMA..DITTO MARK 3008 ; Pattern_Syntax # Ps LEFT ANGLE BRACKET 3009 ; Pattern_Syntax # Pe RIGHT ANGLE BRACKET @@ -1576,4 +1609,10 @@ FE45..FE46 ; Pattern_Syntax # Po [2] SESAME DOT..WHITE SESAME DOT # Total code points: 10 +# ================================================ + +1F1E6..1F1FF ; Regional_Indicator # So [26] REGIONAL INDICATOR SYMBOL LETTER A..REGIONAL INDICATOR SYMBOL LETTER Z + +# Total code points: 26 + # EOF diff --git a/lib/stdlib/uc_spec/SpecialCasing.txt b/lib/stdlib/uc_spec/SpecialCasing.txt index b23fa7f768..b9ba0d81c1 100644 --- a/lib/stdlib/uc_spec/SpecialCasing.txt +++ b/lib/stdlib/uc_spec/SpecialCasing.txt @@ -1,6 +1,6 @@ -# SpecialCasing-9.0.0.txt -# Date: 2016-03-02, 18:55:13 GMT -# © 2016 Unicode®, Inc. +# SpecialCasing-10.0.0.txt +# Date: 2017-04-14, 05:40:43 GMT +# © 2017 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see http://www.unicode.org/terms_of_use.html # @@ -197,7 +197,7 @@ FB17; FB17; 0544 056D; 0544 053D; # ARMENIAN SMALL LIGATURE MEN XEH # ================================================================================ # Conditional Mappings -# The remainder of this file provides conditional casing data used to produce +# The remainder of this file provides conditional casing data used to produce # full case mappings. # ================================================================================ # Language-Insensitive Mappings diff --git a/lib/stdlib/uc_spec/UnicodeData.txt b/lib/stdlib/uc_spec/UnicodeData.txt index a756976461..d89c64f526 100644 --- a/lib/stdlib/uc_spec/UnicodeData.txt +++ b/lib/stdlib/uc_spec/UnicodeData.txt @@ -2072,6 +2072,17 @@ 085A;MANDAIC VOCALIZATION MARK;Mn;220;NSM;;;;;N;;;;; 085B;MANDAIC GEMINATION MARK;Mn;220;NSM;;;;;N;;;;; 085E;MANDAIC PUNCTUATION;Po;0;R;;;;;N;;;;; +0860;SYRIAC LETTER MALAYALAM NGA;Lo;0;AL;;;;;N;;;;; +0861;SYRIAC LETTER MALAYALAM JA;Lo;0;AL;;;;;N;;;;; +0862;SYRIAC LETTER MALAYALAM NYA;Lo;0;AL;;;;;N;;;;; +0863;SYRIAC LETTER MALAYALAM TTA;Lo;0;AL;;;;;N;;;;; +0864;SYRIAC LETTER MALAYALAM NNA;Lo;0;AL;;;;;N;;;;; +0865;SYRIAC LETTER MALAYALAM NNNA;Lo;0;AL;;;;;N;;;;; +0866;SYRIAC LETTER MALAYALAM BHA;Lo;0;AL;;;;;N;;;;; +0867;SYRIAC LETTER MALAYALAM RA;Lo;0;AL;;;;;N;;;;; +0868;SYRIAC LETTER MALAYALAM LLA;Lo;0;AL;;;;;N;;;;; +0869;SYRIAC LETTER MALAYALAM LLLA;Lo;0;AL;;;;;N;;;;; +086A;SYRIAC LETTER MALAYALAM SSA;Lo;0;AL;;;;;N;;;;; 08A0;ARABIC LETTER BEH WITH SMALL V BELOW;Lo;0;AL;;;;;N;;;;; 08A1;ARABIC LETTER BEH WITH HAMZA ABOVE;Lo;0;AL;;;;;N;;;;; 08A2;ARABIC LETTER JEEM WITH TWO DOTS ABOVE;Lo;0;AL;;;;;N;;;;; @@ -2366,6 +2377,8 @@ 09F9;BENGALI CURRENCY DENOMINATOR SIXTEEN;No;0;L;;;;16;N;;;;; 09FA;BENGALI ISSHAR;So;0;L;;;;;N;;;;; 09FB;BENGALI GANDA MARK;Sc;0;ET;;;;;N;;;;; +09FC;BENGALI LETTER VEDIC ANUSVARA;Lo;0;L;;;;;N;;;;; +09FD;BENGALI ABBREVIATION SIGN;Po;0;L;;;;;N;;;;; 0A01;GURMUKHI SIGN ADAK BINDI;Mn;0;NSM;;;;;N;;;;; 0A02;GURMUKHI SIGN BINDI;Mn;0;NSM;;;;;N;;;;; 0A03;GURMUKHI SIGN VISARGA;Mc;0;L;;;;;N;;;;; @@ -2530,6 +2543,12 @@ 0AF0;GUJARATI ABBREVIATION SIGN;Po;0;L;;;;;N;;;;; 0AF1;GUJARATI RUPEE SIGN;Sc;0;ET;;;;;N;;;;; 0AF9;GUJARATI LETTER ZHA;Lo;0;L;;;;;N;;;;; +0AFA;GUJARATI SIGN SUKUN;Mn;0;NSM;;;;;N;;;;; +0AFB;GUJARATI SIGN SHADDA;Mn;0;NSM;;;;;N;;;;; +0AFC;GUJARATI SIGN MADDAH;Mn;0;NSM;;;;;N;;;;; +0AFD;GUJARATI SIGN THREE-DOT NUKTA ABOVE;Mn;0;NSM;;;;;N;;;;; +0AFE;GUJARATI SIGN CIRCLE NUKTA ABOVE;Mn;0;NSM;;;;;N;;;;; +0AFF;GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE;Mn;0;NSM;;;;;N;;;;; 0B01;ORIYA SIGN CANDRABINDU;Mn;0;NSM;;;;;N;;;;; 0B02;ORIYA SIGN ANUSVARA;Mc;0;L;;;;;N;;;;; 0B03;ORIYA SIGN VISARGA;Mc;0;L;;;;;N;;;;; @@ -2876,6 +2895,7 @@ 0CEF;KANNADA DIGIT NINE;Nd;0;L;;9;9;9;N;;;;; 0CF1;KANNADA SIGN JIHVAMULIYA;Lo;0;L;;;;;N;;;;; 0CF2;KANNADA SIGN UPADHMANIYA;Lo;0;L;;;;;N;;;;; +0D00;MALAYALAM SIGN COMBINING ANUSVARA ABOVE;Mn;0;NSM;;;;;N;;;;; 0D01;MALAYALAM SIGN CANDRABINDU;Mn;0;NSM;;;;;N;;;;; 0D02;MALAYALAM SIGN ANUSVARA;Mc;0;L;;;;;N;;;;; 0D03;MALAYALAM SIGN VISARGA;Mc;0;L;;;;;N;;;;; @@ -2931,6 +2951,8 @@ 0D38;MALAYALAM LETTER SA;Lo;0;L;;;;;N;;;;; 0D39;MALAYALAM LETTER HA;Lo;0;L;;;;;N;;;;; 0D3A;MALAYALAM LETTER TTTA;Lo;0;L;;;;;N;;;;; +0D3B;MALAYALAM SIGN VERTICAL BAR VIRAMA;Mn;9;NSM;;;;;N;;;;; +0D3C;MALAYALAM SIGN CIRCULAR VIRAMA;Mn;9;NSM;;;;;N;;;;; 0D3D;MALAYALAM SIGN AVAGRAHA;Lo;0;L;;;;;N;;;;; 0D3E;MALAYALAM VOWEL SIGN AA;Mc;0;L;;;;;N;;;;; 0D3F;MALAYALAM VOWEL SIGN I;Mc;0;L;;;;;N;;;;; @@ -6413,6 +6435,7 @@ 1CF4;VEDIC TONE CANDRA ABOVE;Mn;230;NSM;;;;;N;;;;; 1CF5;VEDIC SIGN JIHVAMULIYA;Lo;0;L;;;;;N;;;;; 1CF6;VEDIC SIGN UPADHMANIYA;Lo;0;L;;;;;N;;;;; +1CF7;VEDIC SIGN ATIKRAMA;Mc;0;L;;;;;N;;;;; 1CF8;VEDIC TONE RING ABOVE;Mn;230;NSM;;;;;N;;;;; 1CF9;VEDIC TONE DOUBLE RING ABOVE;Mn;230;NSM;;;;;N;;;;; 1D00;LATIN LETTER SMALL CAPITAL A;Ll;0;L;;;;;N;;;;; @@ -6661,6 +6684,10 @@ 1DF3;COMBINING LATIN SMALL LETTER O WITH DIAERESIS;Mn;230;NSM;;;;;N;;;;; 1DF4;COMBINING LATIN SMALL LETTER U WITH DIAERESIS;Mn;230;NSM;;;;;N;;;;; 1DF5;COMBINING UP TACK ABOVE;Mn;230;NSM;;;;;N;;;;; +1DF6;COMBINING KAVYKA ABOVE RIGHT;Mn;232;NSM;;;;;N;;;;; +1DF7;COMBINING KAVYKA ABOVE LEFT;Mn;228;NSM;;;;;N;;;;; +1DF8;COMBINING DOT ABOVE LEFT;Mn;228;NSM;;;;;N;;;;; +1DF9;COMBINING WIDE INVERTED BRIDGE BELOW;Mn;220;NSM;;;;;N;;;;; 1DFB;COMBINING DELETION MARK;Mn;230;NSM;;;;;N;;;;; 1DFC;COMBINING DOUBLE INVERTED BREVE BELOW;Mn;233;NSM;;;;;N;;;;; 1DFD;COMBINING ALMOST EQUAL TO BELOW;Mn;220;NSM;;;;;N;;;;; @@ -7339,6 +7366,7 @@ 20BC;MANAT SIGN;Sc;0;ET;;;;;N;;;;; 20BD;RUBLE SIGN;Sc;0;ET;;;;;N;;;;; 20BE;LARI SIGN;Sc;0;ET;;;;;N;;;;; +20BF;BITCOIN SIGN;Sc;0;ET;;;;;N;;;;; 20D0;COMBINING LEFT HARPOON ABOVE;Mn;230;NSM;;;;;N;NON-SPACING LEFT HARPOON ABOVE;;;; 20D1;COMBINING RIGHT HARPOON ABOVE;Mn;230;NSM;;;;;N;NON-SPACING RIGHT HARPOON ABOVE;;;; 20D2;COMBINING LONG VERTICAL LINE OVERLAY;Mn;1;NSM;;;;;N;NON-SPACING LONG VERTICAL BAR OVERLAY;;;; @@ -8135,6 +8163,7 @@ 23FC;POWER ON-OFF SYMBOL;So;0;ON;;;;;N;;;;; 23FD;POWER ON SYMBOL;So;0;ON;;;;;N;;;;; 23FE;POWER SLEEP SYMBOL;So;0;ON;;;;;N;;;;; +23FF;OBSERVER EYE SYMBOL;So;0;ON;;;;;N;;;;; 2400;SYMBOL FOR NULL;So;0;ON;;;;;N;GRAPHIC FOR NULL;;;; 2401;SYMBOL FOR START OF HEADING;So;0;ON;;;;;N;GRAPHIC FOR START OF HEADING;;;; 2402;SYMBOL FOR START OF TEXT;So;0;ON;;;;;N;GRAPHIC FOR START OF TEXT;;;; @@ -10083,6 +10112,7 @@ 2BCF;ROTATED WHITE FOUR POINTED CUSP;So;0;ON;;;;;N;;;;; 2BD0;SQUARE POSITION INDICATOR;So;0;ON;;;;;N;;;;; 2BD1;UNCERTAINTY SIGN;So;0;ON;;;;;N;;;;; +2BD2;GROUP MARK;So;0;ON;;;;;N;;;;; 2BEC;LEFTWARDS TWO-HEADED ARROW WITH TRIANGLE ARROWHEADS;So;0;ON;;;;;N;;;;; 2BED;UPWARDS TWO-HEADED ARROW WITH TRIANGLE ARROWHEADS;So;0;ON;;;;;N;;;;; 2BEE;RIGHTWARDS TWO-HEADED ARROW WITH TRIANGLE ARROWHEADS;So;0;ON;;;;;N;;;;; @@ -10615,6 +10645,11 @@ 2E42;DOUBLE LOW-REVERSED-9 QUOTATION MARK;Ps;0;ON;;;;;N;;;;; 2E43;DASH WITH LEFT UPTURN;Po;0;ON;;;;;N;;;;; 2E44;DOUBLE SUSPENSION MARK;Po;0;ON;;;;;N;;;;; +2E45;INVERTED LOW KAVYKA;Po;0;ON;;;;;N;;;;; +2E46;INVERTED LOW KAVYKA WITH KAVYKA ABOVE;Po;0;ON;;;;;N;;;;; +2E47;LOW KAVYKA;Po;0;ON;;;;;N;;;;; +2E48;LOW KAVYKA WITH DOT;Po;0;ON;;;;;N;;;;; +2E49;DOUBLE STACKED COMMA;Po;0;ON;;;;;N;;;;; 2E80;CJK RADICAL REPEAT;So;0;ON;;;;;N;;;;; 2E81;CJK RADICAL CLIFF;So;0;ON;;;;;N;;;;; 2E82;CJK RADICAL SECOND ONE;So;0;ON;;;;;N;;;;; @@ -11250,6 +11285,7 @@ 312B;BOPOMOFO LETTER NG;Lo;0;L;;;;;N;;;;; 312C;BOPOMOFO LETTER GN;Lo;0;L;;;;;N;;;;; 312D;BOPOMOFO LETTER IH;Lo;0;L;;;;;N;;;;; +312E;BOPOMOFO LETTER O WITH DOT ABOVE;Lo;0;L;;;;;N;;;;; 3131;HANGUL LETTER KIYEOK;Lo;0;L;<compat> 1100;;;;N;HANGUL LETTER GIYEOG;;;; 3132;HANGUL LETTER SSANGKIYEOK;Lo;0;L;<compat> 1101;;;;N;HANGUL LETTER SSANG GIYEOG;;;; 3133;HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; @@ -12016,7 +12052,7 @@ 4DFE;HEXAGRAM FOR AFTER COMPLETION;So;0;ON;;;;;N;;;;; 4DFF;HEXAGRAM FOR BEFORE COMPLETION;So;0;ON;;;;;N;;;;; 4E00;<CJK Ideograph, First>;Lo;0;L;;;;;N;;;;; -9FD5;<CJK Ideograph, Last>;Lo;0;L;;;;;N;;;;; +9FEA;<CJK Ideograph, Last>;Lo;0;L;;;;;N;;;;; A000;YI SYLLABLE IT;Lo;0;L;;;;;N;;;;; A001;YI SYLLABLE IX;Lo;0;L;;;;;N;;;;; A002;YI SYLLABLE I;Lo;0;L;;;;;N;;;;; @@ -17093,6 +17129,9 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 10321;OLD ITALIC NUMERAL FIVE;No;0;L;;;;5;N;;;;; 10322;OLD ITALIC NUMERAL TEN;No;0;L;;;;10;N;;;;; 10323;OLD ITALIC NUMERAL FIFTY;No;0;L;;;;50;N;;;;; +1032D;OLD ITALIC LETTER YE;Lo;0;L;;;;;N;;;;; +1032E;OLD ITALIC LETTER NORTHERN TSE;Lo;0;L;;;;;N;;;;; +1032F;OLD ITALIC LETTER SOUTHERN TSE;Lo;0;L;;;;;N;;;;; 10330;GOTHIC LETTER AHSA;Lo;0;L;;;;;N;;;;; 10331;GOTHIC LETTER BAIRKAN;Lo;0;L;;;;;N;;;;; 10332;GOTHIC LETTER GIBA;Lo;0;L;;;;;N;;;;; @@ -20068,6 +20107,158 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 118F1;WARANG CITI NUMBER EIGHTY;No;0;L;;;;80;N;;;;; 118F2;WARANG CITI NUMBER NINETY;No;0;L;;;;90;N;;;;; 118FF;WARANG CITI OM;Lo;0;L;;;;;N;;;;; +11A00;ZANABAZAR SQUARE LETTER A;Lo;0;L;;;;;N;;;;; +11A01;ZANABAZAR SQUARE VOWEL SIGN I;Mn;0;NSM;;;;;N;;;;; +11A02;ZANABAZAR SQUARE VOWEL SIGN UE;Mn;0;NSM;;;;;N;;;;; +11A03;ZANABAZAR SQUARE VOWEL SIGN U;Mn;0;NSM;;;;;N;;;;; +11A04;ZANABAZAR SQUARE VOWEL SIGN E;Mn;0;NSM;;;;;N;;;;; +11A05;ZANABAZAR SQUARE VOWEL SIGN OE;Mn;0;NSM;;;;;N;;;;; +11A06;ZANABAZAR SQUARE VOWEL SIGN O;Mn;0;NSM;;;;;N;;;;; +11A07;ZANABAZAR SQUARE VOWEL SIGN AI;Mc;0;L;;;;;N;;;;; +11A08;ZANABAZAR SQUARE VOWEL SIGN AU;Mc;0;L;;;;;N;;;;; +11A09;ZANABAZAR SQUARE VOWEL SIGN REVERSED I;Mn;0;NSM;;;;;N;;;;; +11A0A;ZANABAZAR SQUARE VOWEL LENGTH MARK;Mn;0;NSM;;;;;N;;;;; +11A0B;ZANABAZAR SQUARE LETTER KA;Lo;0;L;;;;;N;;;;; +11A0C;ZANABAZAR SQUARE LETTER KHA;Lo;0;L;;;;;N;;;;; +11A0D;ZANABAZAR SQUARE LETTER GA;Lo;0;L;;;;;N;;;;; +11A0E;ZANABAZAR SQUARE LETTER GHA;Lo;0;L;;;;;N;;;;; +11A0F;ZANABAZAR SQUARE LETTER NGA;Lo;0;L;;;;;N;;;;; +11A10;ZANABAZAR SQUARE LETTER CA;Lo;0;L;;;;;N;;;;; +11A11;ZANABAZAR SQUARE LETTER CHA;Lo;0;L;;;;;N;;;;; +11A12;ZANABAZAR SQUARE LETTER JA;Lo;0;L;;;;;N;;;;; +11A13;ZANABAZAR SQUARE LETTER NYA;Lo;0;L;;;;;N;;;;; +11A14;ZANABAZAR SQUARE LETTER TTA;Lo;0;L;;;;;N;;;;; +11A15;ZANABAZAR SQUARE LETTER TTHA;Lo;0;L;;;;;N;;;;; +11A16;ZANABAZAR SQUARE LETTER DDA;Lo;0;L;;;;;N;;;;; +11A17;ZANABAZAR SQUARE LETTER DDHA;Lo;0;L;;;;;N;;;;; +11A18;ZANABAZAR SQUARE LETTER NNA;Lo;0;L;;;;;N;;;;; +11A19;ZANABAZAR SQUARE LETTER TA;Lo;0;L;;;;;N;;;;; +11A1A;ZANABAZAR SQUARE LETTER THA;Lo;0;L;;;;;N;;;;; +11A1B;ZANABAZAR SQUARE LETTER DA;Lo;0;L;;;;;N;;;;; +11A1C;ZANABAZAR SQUARE LETTER DHA;Lo;0;L;;;;;N;;;;; +11A1D;ZANABAZAR SQUARE LETTER NA;Lo;0;L;;;;;N;;;;; +11A1E;ZANABAZAR SQUARE LETTER PA;Lo;0;L;;;;;N;;;;; +11A1F;ZANABAZAR SQUARE LETTER PHA;Lo;0;L;;;;;N;;;;; +11A20;ZANABAZAR SQUARE LETTER BA;Lo;0;L;;;;;N;;;;; +11A21;ZANABAZAR SQUARE LETTER BHA;Lo;0;L;;;;;N;;;;; +11A22;ZANABAZAR SQUARE LETTER MA;Lo;0;L;;;;;N;;;;; +11A23;ZANABAZAR SQUARE LETTER TSA;Lo;0;L;;;;;N;;;;; +11A24;ZANABAZAR SQUARE LETTER TSHA;Lo;0;L;;;;;N;;;;; +11A25;ZANABAZAR SQUARE LETTER DZA;Lo;0;L;;;;;N;;;;; +11A26;ZANABAZAR SQUARE LETTER DZHA;Lo;0;L;;;;;N;;;;; +11A27;ZANABAZAR SQUARE LETTER ZHA;Lo;0;L;;;;;N;;;;; +11A28;ZANABAZAR SQUARE LETTER ZA;Lo;0;L;;;;;N;;;;; +11A29;ZANABAZAR SQUARE LETTER -A;Lo;0;L;;;;;N;;;;; +11A2A;ZANABAZAR SQUARE LETTER YA;Lo;0;L;;;;;N;;;;; +11A2B;ZANABAZAR SQUARE LETTER RA;Lo;0;L;;;;;N;;;;; +11A2C;ZANABAZAR SQUARE LETTER LA;Lo;0;L;;;;;N;;;;; +11A2D;ZANABAZAR SQUARE LETTER VA;Lo;0;L;;;;;N;;;;; +11A2E;ZANABAZAR SQUARE LETTER SHA;Lo;0;L;;;;;N;;;;; +11A2F;ZANABAZAR SQUARE LETTER SSA;Lo;0;L;;;;;N;;;;; +11A30;ZANABAZAR SQUARE LETTER SA;Lo;0;L;;;;;N;;;;; +11A31;ZANABAZAR SQUARE LETTER HA;Lo;0;L;;;;;N;;;;; +11A32;ZANABAZAR SQUARE LETTER KSSA;Lo;0;L;;;;;N;;;;; +11A33;ZANABAZAR SQUARE FINAL CONSONANT MARK;Mn;0;NSM;;;;;N;;;;; +11A34;ZANABAZAR SQUARE SIGN VIRAMA;Mn;9;NSM;;;;;N;;;;; +11A35;ZANABAZAR SQUARE SIGN CANDRABINDU;Mn;0;NSM;;;;;N;;;;; +11A36;ZANABAZAR SQUARE SIGN CANDRABINDU WITH ORNAMENT;Mn;0;NSM;;;;;N;;;;; +11A37;ZANABAZAR SQUARE SIGN CANDRA WITH ORNAMENT;Mn;0;NSM;;;;;N;;;;; +11A38;ZANABAZAR SQUARE SIGN ANUSVARA;Mn;0;NSM;;;;;N;;;;; +11A39;ZANABAZAR SQUARE SIGN VISARGA;Mc;0;L;;;;;N;;;;; +11A3A;ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA;Lo;0;L;;;;;N;;;;; +11A3B;ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA;Mn;0;NSM;;;;;N;;;;; +11A3C;ZANABAZAR SQUARE CLUSTER-FINAL LETTER RA;Mn;0;NSM;;;;;N;;;;; +11A3D;ZANABAZAR SQUARE CLUSTER-FINAL LETTER LA;Mn;0;NSM;;;;;N;;;;; +11A3E;ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA;Mn;0;NSM;;;;;N;;;;; +11A3F;ZANABAZAR SQUARE INITIAL HEAD MARK;Po;0;L;;;;;N;;;;; +11A40;ZANABAZAR SQUARE CLOSING HEAD MARK;Po;0;L;;;;;N;;;;; +11A41;ZANABAZAR SQUARE MARK TSHEG;Po;0;L;;;;;N;;;;; +11A42;ZANABAZAR SQUARE MARK SHAD;Po;0;L;;;;;N;;;;; +11A43;ZANABAZAR SQUARE MARK DOUBLE SHAD;Po;0;L;;;;;N;;;;; +11A44;ZANABAZAR SQUARE MARK LONG TSHEG;Po;0;L;;;;;N;;;;; +11A45;ZANABAZAR SQUARE INITIAL DOUBLE-LINED HEAD MARK;Po;0;L;;;;;N;;;;; +11A46;ZANABAZAR SQUARE CLOSING DOUBLE-LINED HEAD MARK;Po;0;L;;;;;N;;;;; +11A47;ZANABAZAR SQUARE SUBJOINER;Mn;9;NSM;;;;;N;;;;; +11A50;SOYOMBO LETTER A;Lo;0;L;;;;;N;;;;; +11A51;SOYOMBO VOWEL SIGN I;Mn;0;NSM;;;;;N;;;;; +11A52;SOYOMBO VOWEL SIGN UE;Mn;0;NSM;;;;;N;;;;; +11A53;SOYOMBO VOWEL SIGN U;Mn;0;NSM;;;;;N;;;;; +11A54;SOYOMBO VOWEL SIGN E;Mn;0;NSM;;;;;N;;;;; +11A55;SOYOMBO VOWEL SIGN O;Mn;0;NSM;;;;;N;;;;; +11A56;SOYOMBO VOWEL SIGN OE;Mn;0;NSM;;;;;N;;;;; +11A57;SOYOMBO VOWEL SIGN AI;Mc;0;L;;;;;N;;;;; +11A58;SOYOMBO VOWEL SIGN AU;Mc;0;L;;;;;N;;;;; +11A59;SOYOMBO VOWEL SIGN VOCALIC R;Mn;0;NSM;;;;;N;;;;; +11A5A;SOYOMBO VOWEL SIGN VOCALIC L;Mn;0;NSM;;;;;N;;;;; +11A5B;SOYOMBO VOWEL LENGTH MARK;Mn;0;NSM;;;;;N;;;;; +11A5C;SOYOMBO LETTER KA;Lo;0;L;;;;;N;;;;; +11A5D;SOYOMBO LETTER KHA;Lo;0;L;;;;;N;;;;; +11A5E;SOYOMBO LETTER GA;Lo;0;L;;;;;N;;;;; +11A5F;SOYOMBO LETTER GHA;Lo;0;L;;;;;N;;;;; +11A60;SOYOMBO LETTER NGA;Lo;0;L;;;;;N;;;;; +11A61;SOYOMBO LETTER CA;Lo;0;L;;;;;N;;;;; +11A62;SOYOMBO LETTER CHA;Lo;0;L;;;;;N;;;;; +11A63;SOYOMBO LETTER JA;Lo;0;L;;;;;N;;;;; +11A64;SOYOMBO LETTER JHA;Lo;0;L;;;;;N;;;;; +11A65;SOYOMBO LETTER NYA;Lo;0;L;;;;;N;;;;; +11A66;SOYOMBO LETTER TTA;Lo;0;L;;;;;N;;;;; +11A67;SOYOMBO LETTER TTHA;Lo;0;L;;;;;N;;;;; +11A68;SOYOMBO LETTER DDA;Lo;0;L;;;;;N;;;;; +11A69;SOYOMBO LETTER DDHA;Lo;0;L;;;;;N;;;;; +11A6A;SOYOMBO LETTER NNA;Lo;0;L;;;;;N;;;;; +11A6B;SOYOMBO LETTER TA;Lo;0;L;;;;;N;;;;; +11A6C;SOYOMBO LETTER THA;Lo;0;L;;;;;N;;;;; +11A6D;SOYOMBO LETTER DA;Lo;0;L;;;;;N;;;;; +11A6E;SOYOMBO LETTER DHA;Lo;0;L;;;;;N;;;;; +11A6F;SOYOMBO LETTER NA;Lo;0;L;;;;;N;;;;; +11A70;SOYOMBO LETTER PA;Lo;0;L;;;;;N;;;;; +11A71;SOYOMBO LETTER PHA;Lo;0;L;;;;;N;;;;; +11A72;SOYOMBO LETTER BA;Lo;0;L;;;;;N;;;;; +11A73;SOYOMBO LETTER BHA;Lo;0;L;;;;;N;;;;; +11A74;SOYOMBO LETTER MA;Lo;0;L;;;;;N;;;;; +11A75;SOYOMBO LETTER TSA;Lo;0;L;;;;;N;;;;; +11A76;SOYOMBO LETTER TSHA;Lo;0;L;;;;;N;;;;; +11A77;SOYOMBO LETTER DZA;Lo;0;L;;;;;N;;;;; +11A78;SOYOMBO LETTER ZHA;Lo;0;L;;;;;N;;;;; +11A79;SOYOMBO LETTER ZA;Lo;0;L;;;;;N;;;;; +11A7A;SOYOMBO LETTER -A;Lo;0;L;;;;;N;;;;; +11A7B;SOYOMBO LETTER YA;Lo;0;L;;;;;N;;;;; +11A7C;SOYOMBO LETTER RA;Lo;0;L;;;;;N;;;;; +11A7D;SOYOMBO LETTER LA;Lo;0;L;;;;;N;;;;; +11A7E;SOYOMBO LETTER VA;Lo;0;L;;;;;N;;;;; +11A7F;SOYOMBO LETTER SHA;Lo;0;L;;;;;N;;;;; +11A80;SOYOMBO LETTER SSA;Lo;0;L;;;;;N;;;;; +11A81;SOYOMBO LETTER SA;Lo;0;L;;;;;N;;;;; +11A82;SOYOMBO LETTER HA;Lo;0;L;;;;;N;;;;; +11A83;SOYOMBO LETTER KSSA;Lo;0;L;;;;;N;;;;; +11A86;SOYOMBO CLUSTER-INITIAL LETTER RA;Lo;0;L;;;;;N;;;;; +11A87;SOYOMBO CLUSTER-INITIAL LETTER LA;Lo;0;L;;;;;N;;;;; +11A88;SOYOMBO CLUSTER-INITIAL LETTER SHA;Lo;0;L;;;;;N;;;;; +11A89;SOYOMBO CLUSTER-INITIAL LETTER SA;Lo;0;L;;;;;N;;;;; +11A8A;SOYOMBO FINAL CONSONANT SIGN G;Mn;0;NSM;;;;;N;;;;; +11A8B;SOYOMBO FINAL CONSONANT SIGN K;Mn;0;NSM;;;;;N;;;;; +11A8C;SOYOMBO FINAL CONSONANT SIGN NG;Mn;0;NSM;;;;;N;;;;; +11A8D;SOYOMBO FINAL CONSONANT SIGN D;Mn;0;NSM;;;;;N;;;;; +11A8E;SOYOMBO FINAL CONSONANT SIGN N;Mn;0;NSM;;;;;N;;;;; +11A8F;SOYOMBO FINAL CONSONANT SIGN B;Mn;0;NSM;;;;;N;;;;; +11A90;SOYOMBO FINAL CONSONANT SIGN M;Mn;0;NSM;;;;;N;;;;; +11A91;SOYOMBO FINAL CONSONANT SIGN R;Mn;0;NSM;;;;;N;;;;; +11A92;SOYOMBO FINAL CONSONANT SIGN L;Mn;0;NSM;;;;;N;;;;; +11A93;SOYOMBO FINAL CONSONANT SIGN SH;Mn;0;NSM;;;;;N;;;;; +11A94;SOYOMBO FINAL CONSONANT SIGN S;Mn;0;NSM;;;;;N;;;;; +11A95;SOYOMBO FINAL CONSONANT SIGN -A;Mn;0;NSM;;;;;N;;;;; +11A96;SOYOMBO SIGN ANUSVARA;Mn;0;NSM;;;;;N;;;;; +11A97;SOYOMBO SIGN VISARGA;Mc;0;L;;;;;N;;;;; +11A98;SOYOMBO GEMINATION MARK;Mn;0;NSM;;;;;N;;;;; +11A99;SOYOMBO SUBJOINER;Mn;9;NSM;;;;;N;;;;; +11A9A;SOYOMBO MARK TSHEG;Po;0;L;;;;;N;;;;; +11A9B;SOYOMBO MARK SHAD;Po;0;L;;;;;N;;;;; +11A9C;SOYOMBO MARK DOUBLE SHAD;Po;0;L;;;;;N;;;;; +11A9E;SOYOMBO HEAD MARK WITH MOON AND SUN AND TRIPLE FLAME;Po;0;L;;;;;N;;;;; +11A9F;SOYOMBO HEAD MARK WITH MOON AND SUN AND FLAME;Po;0;L;;;;;N;;;;; +11AA0;SOYOMBO HEAD MARK WITH MOON AND SUN;Po;0;L;;;;;N;;;;; +11AA1;SOYOMBO TERMINAL MARK-1;Po;0;L;;;;;N;;;;; +11AA2;SOYOMBO TERMINAL MARK-2;Po;0;L;;;;;N;;;;; 11AC0;PAU CIN HAU LETTER PA;Lo;0;L;;;;;N;;;;; 11AC1;PAU CIN HAU LETTER KA;Lo;0;L;;;;;N;;;;; 11AC2;PAU CIN HAU LETTER LA;Lo;0;L;;;;;N;;;;; @@ -20290,6 +20481,81 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 11CB4;MARCHEN VOWEL SIGN O;Mc;0;L;;;;;N;;;;; 11CB5;MARCHEN SIGN ANUSVARA;Mn;0;NSM;;;;;N;;;;; 11CB6;MARCHEN SIGN CANDRABINDU;Mn;0;NSM;;;;;N;;;;; +11D00;MASARAM GONDI LETTER A;Lo;0;L;;;;;N;;;;; +11D01;MASARAM GONDI LETTER AA;Lo;0;L;;;;;N;;;;; +11D02;MASARAM GONDI LETTER I;Lo;0;L;;;;;N;;;;; +11D03;MASARAM GONDI LETTER II;Lo;0;L;;;;;N;;;;; +11D04;MASARAM GONDI LETTER U;Lo;0;L;;;;;N;;;;; +11D05;MASARAM GONDI LETTER UU;Lo;0;L;;;;;N;;;;; +11D06;MASARAM GONDI LETTER E;Lo;0;L;;;;;N;;;;; +11D08;MASARAM GONDI LETTER AI;Lo;0;L;;;;;N;;;;; +11D09;MASARAM GONDI LETTER O;Lo;0;L;;;;;N;;;;; +11D0B;MASARAM GONDI LETTER AU;Lo;0;L;;;;;N;;;;; +11D0C;MASARAM GONDI LETTER KA;Lo;0;L;;;;;N;;;;; +11D0D;MASARAM GONDI LETTER KHA;Lo;0;L;;;;;N;;;;; +11D0E;MASARAM GONDI LETTER GA;Lo;0;L;;;;;N;;;;; +11D0F;MASARAM GONDI LETTER GHA;Lo;0;L;;;;;N;;;;; +11D10;MASARAM GONDI LETTER NGA;Lo;0;L;;;;;N;;;;; +11D11;MASARAM GONDI LETTER CA;Lo;0;L;;;;;N;;;;; +11D12;MASARAM GONDI LETTER CHA;Lo;0;L;;;;;N;;;;; +11D13;MASARAM GONDI LETTER JA;Lo;0;L;;;;;N;;;;; +11D14;MASARAM GONDI LETTER JHA;Lo;0;L;;;;;N;;;;; +11D15;MASARAM GONDI LETTER NYA;Lo;0;L;;;;;N;;;;; +11D16;MASARAM GONDI LETTER TTA;Lo;0;L;;;;;N;;;;; +11D17;MASARAM GONDI LETTER TTHA;Lo;0;L;;;;;N;;;;; +11D18;MASARAM GONDI LETTER DDA;Lo;0;L;;;;;N;;;;; +11D19;MASARAM GONDI LETTER DDHA;Lo;0;L;;;;;N;;;;; +11D1A;MASARAM GONDI LETTER NNA;Lo;0;L;;;;;N;;;;; +11D1B;MASARAM GONDI LETTER TA;Lo;0;L;;;;;N;;;;; +11D1C;MASARAM GONDI LETTER THA;Lo;0;L;;;;;N;;;;; +11D1D;MASARAM GONDI LETTER DA;Lo;0;L;;;;;N;;;;; +11D1E;MASARAM GONDI LETTER DHA;Lo;0;L;;;;;N;;;;; +11D1F;MASARAM GONDI LETTER NA;Lo;0;L;;;;;N;;;;; +11D20;MASARAM GONDI LETTER PA;Lo;0;L;;;;;N;;;;; +11D21;MASARAM GONDI LETTER PHA;Lo;0;L;;;;;N;;;;; +11D22;MASARAM GONDI LETTER BA;Lo;0;L;;;;;N;;;;; +11D23;MASARAM GONDI LETTER BHA;Lo;0;L;;;;;N;;;;; +11D24;MASARAM GONDI LETTER MA;Lo;0;L;;;;;N;;;;; +11D25;MASARAM GONDI LETTER YA;Lo;0;L;;;;;N;;;;; +11D26;MASARAM GONDI LETTER RA;Lo;0;L;;;;;N;;;;; +11D27;MASARAM GONDI LETTER LA;Lo;0;L;;;;;N;;;;; +11D28;MASARAM GONDI LETTER VA;Lo;0;L;;;;;N;;;;; +11D29;MASARAM GONDI LETTER SHA;Lo;0;L;;;;;N;;;;; +11D2A;MASARAM GONDI LETTER SSA;Lo;0;L;;;;;N;;;;; +11D2B;MASARAM GONDI LETTER SA;Lo;0;L;;;;;N;;;;; +11D2C;MASARAM GONDI LETTER HA;Lo;0;L;;;;;N;;;;; +11D2D;MASARAM GONDI LETTER LLA;Lo;0;L;;;;;N;;;;; +11D2E;MASARAM GONDI LETTER KSSA;Lo;0;L;;;;;N;;;;; +11D2F;MASARAM GONDI LETTER JNYA;Lo;0;L;;;;;N;;;;; +11D30;MASARAM GONDI LETTER TRA;Lo;0;L;;;;;N;;;;; +11D31;MASARAM GONDI VOWEL SIGN AA;Mn;0;NSM;;;;;N;;;;; +11D32;MASARAM GONDI VOWEL SIGN I;Mn;0;NSM;;;;;N;;;;; +11D33;MASARAM GONDI VOWEL SIGN II;Mn;0;NSM;;;;;N;;;;; +11D34;MASARAM GONDI VOWEL SIGN U;Mn;0;NSM;;;;;N;;;;; +11D35;MASARAM GONDI VOWEL SIGN UU;Mn;0;NSM;;;;;N;;;;; +11D36;MASARAM GONDI VOWEL SIGN VOCALIC R;Mn;0;NSM;;;;;N;;;;; +11D3A;MASARAM GONDI VOWEL SIGN E;Mn;0;NSM;;;;;N;;;;; +11D3C;MASARAM GONDI VOWEL SIGN AI;Mn;0;NSM;;;;;N;;;;; +11D3D;MASARAM GONDI VOWEL SIGN O;Mn;0;NSM;;;;;N;;;;; +11D3F;MASARAM GONDI VOWEL SIGN AU;Mn;0;NSM;;;;;N;;;;; +11D40;MASARAM GONDI SIGN ANUSVARA;Mn;0;NSM;;;;;N;;;;; +11D41;MASARAM GONDI SIGN VISARGA;Mn;0;NSM;;;;;N;;;;; +11D42;MASARAM GONDI SIGN NUKTA;Mn;7;NSM;;;;;N;;;;; +11D43;MASARAM GONDI SIGN CANDRA;Mn;0;NSM;;;;;N;;;;; +11D44;MASARAM GONDI SIGN HALANTA;Mn;9;NSM;;;;;N;;;;; +11D45;MASARAM GONDI VIRAMA;Mn;9;NSM;;;;;N;;;;; +11D46;MASARAM GONDI REPHA;Lo;0;L;;;;;N;;;;; +11D47;MASARAM GONDI RA-KARA;Mn;0;NSM;;;;;N;;;;; +11D50;MASARAM GONDI DIGIT ZERO;Nd;0;L;;0;0;0;N;;;;; +11D51;MASARAM GONDI DIGIT ONE;Nd;0;L;;1;1;1;N;;;;; +11D52;MASARAM GONDI DIGIT TWO;Nd;0;L;;2;2;2;N;;;;; +11D53;MASARAM GONDI DIGIT THREE;Nd;0;L;;3;3;3;N;;;;; +11D54;MASARAM GONDI DIGIT FOUR;Nd;0;L;;4;4;4;N;;;;; +11D55;MASARAM GONDI DIGIT FIVE;Nd;0;L;;5;5;5;N;;;;; +11D56;MASARAM GONDI DIGIT SIX;Nd;0;L;;6;6;6;N;;;;; +11D57;MASARAM GONDI DIGIT SEVEN;Nd;0;L;;7;7;7;N;;;;; +11D58;MASARAM GONDI DIGIT EIGHT;Nd;0;L;;8;8;8;N;;;;; +11D59;MASARAM GONDI DIGIT NINE;Nd;0;L;;9;9;9;N;;;;; 12000;CUNEIFORM SIGN A;Lo;0;L;;;;;N;;;;; 12001;CUNEIFORM SIGN A TIMES A;Lo;0;L;;;;;N;;;;; 12002;CUNEIFORM SIGN A TIMES BAD;Lo;0;L;;;;;N;;;;; @@ -24087,6 +24353,7 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 16F9E;MIAO LETTER REFORMED TONE-6;Lm;0;L;;;;;N;;;;; 16F9F;MIAO LETTER REFORMED TONE-8;Lm;0;L;;;;;N;;;;; 16FE0;TANGUT ITERATION MARK;Lm;0;L;;;;;N;;;;; +16FE1;NUSHU ITERATION MARK;Lm;0;L;;;;;N;;;;; 17000;<Tangut Ideograph, First>;Lo;0;L;;;;;N;;;;; 187EC;<Tangut Ideograph, Last>;Lo;0;L;;;;;N;;;;; 18800;TANGUT COMPONENT-001;Lo;0;L;;;;;N;;;;; @@ -24846,6 +25113,687 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 18AF2;TANGUT COMPONENT-755;Lo;0;L;;;;;N;;;;; 1B000;KATAKANA LETTER ARCHAIC E;Lo;0;L;;;;;N;;;;; 1B001;HIRAGANA LETTER ARCHAIC YE;Lo;0;L;;;;;N;;;;; +1B002;HENTAIGANA LETTER A-1;Lo;0;L;;;;;N;;;;; +1B003;HENTAIGANA LETTER A-2;Lo;0;L;;;;;N;;;;; +1B004;HENTAIGANA LETTER A-3;Lo;0;L;;;;;N;;;;; +1B005;HENTAIGANA LETTER A-WO;Lo;0;L;;;;;N;;;;; +1B006;HENTAIGANA LETTER I-1;Lo;0;L;;;;;N;;;;; +1B007;HENTAIGANA LETTER I-2;Lo;0;L;;;;;N;;;;; +1B008;HENTAIGANA LETTER I-3;Lo;0;L;;;;;N;;;;; +1B009;HENTAIGANA LETTER I-4;Lo;0;L;;;;;N;;;;; +1B00A;HENTAIGANA LETTER U-1;Lo;0;L;;;;;N;;;;; +1B00B;HENTAIGANA LETTER U-2;Lo;0;L;;;;;N;;;;; +1B00C;HENTAIGANA LETTER U-3;Lo;0;L;;;;;N;;;;; +1B00D;HENTAIGANA LETTER U-4;Lo;0;L;;;;;N;;;;; +1B00E;HENTAIGANA LETTER U-5;Lo;0;L;;;;;N;;;;; +1B00F;HENTAIGANA LETTER E-2;Lo;0;L;;;;;N;;;;; +1B010;HENTAIGANA LETTER E-3;Lo;0;L;;;;;N;;;;; +1B011;HENTAIGANA LETTER E-4;Lo;0;L;;;;;N;;;;; +1B012;HENTAIGANA LETTER E-5;Lo;0;L;;;;;N;;;;; +1B013;HENTAIGANA LETTER E-6;Lo;0;L;;;;;N;;;;; +1B014;HENTAIGANA LETTER O-1;Lo;0;L;;;;;N;;;;; +1B015;HENTAIGANA LETTER O-2;Lo;0;L;;;;;N;;;;; +1B016;HENTAIGANA LETTER O-3;Lo;0;L;;;;;N;;;;; +1B017;HENTAIGANA LETTER KA-1;Lo;0;L;;;;;N;;;;; +1B018;HENTAIGANA LETTER KA-2;Lo;0;L;;;;;N;;;;; +1B019;HENTAIGANA LETTER KA-3;Lo;0;L;;;;;N;;;;; +1B01A;HENTAIGANA LETTER KA-4;Lo;0;L;;;;;N;;;;; +1B01B;HENTAIGANA LETTER KA-5;Lo;0;L;;;;;N;;;;; +1B01C;HENTAIGANA LETTER KA-6;Lo;0;L;;;;;N;;;;; +1B01D;HENTAIGANA LETTER KA-7;Lo;0;L;;;;;N;;;;; +1B01E;HENTAIGANA LETTER KA-8;Lo;0;L;;;;;N;;;;; +1B01F;HENTAIGANA LETTER KA-9;Lo;0;L;;;;;N;;;;; +1B020;HENTAIGANA LETTER KA-10;Lo;0;L;;;;;N;;;;; +1B021;HENTAIGANA LETTER KA-11;Lo;0;L;;;;;N;;;;; +1B022;HENTAIGANA LETTER KA-KE;Lo;0;L;;;;;N;;;;; +1B023;HENTAIGANA LETTER KI-1;Lo;0;L;;;;;N;;;;; +1B024;HENTAIGANA LETTER KI-2;Lo;0;L;;;;;N;;;;; +1B025;HENTAIGANA LETTER KI-3;Lo;0;L;;;;;N;;;;; +1B026;HENTAIGANA LETTER KI-4;Lo;0;L;;;;;N;;;;; +1B027;HENTAIGANA LETTER KI-5;Lo;0;L;;;;;N;;;;; +1B028;HENTAIGANA LETTER KI-6;Lo;0;L;;;;;N;;;;; +1B029;HENTAIGANA LETTER KI-7;Lo;0;L;;;;;N;;;;; +1B02A;HENTAIGANA LETTER KI-8;Lo;0;L;;;;;N;;;;; +1B02B;HENTAIGANA LETTER KU-1;Lo;0;L;;;;;N;;;;; +1B02C;HENTAIGANA LETTER KU-2;Lo;0;L;;;;;N;;;;; +1B02D;HENTAIGANA LETTER KU-3;Lo;0;L;;;;;N;;;;; +1B02E;HENTAIGANA LETTER KU-4;Lo;0;L;;;;;N;;;;; +1B02F;HENTAIGANA LETTER KU-5;Lo;0;L;;;;;N;;;;; +1B030;HENTAIGANA LETTER KU-6;Lo;0;L;;;;;N;;;;; +1B031;HENTAIGANA LETTER KU-7;Lo;0;L;;;;;N;;;;; +1B032;HENTAIGANA LETTER KE-1;Lo;0;L;;;;;N;;;;; +1B033;HENTAIGANA LETTER KE-2;Lo;0;L;;;;;N;;;;; +1B034;HENTAIGANA LETTER KE-3;Lo;0;L;;;;;N;;;;; +1B035;HENTAIGANA LETTER KE-4;Lo;0;L;;;;;N;;;;; +1B036;HENTAIGANA LETTER KE-5;Lo;0;L;;;;;N;;;;; +1B037;HENTAIGANA LETTER KE-6;Lo;0;L;;;;;N;;;;; +1B038;HENTAIGANA LETTER KO-1;Lo;0;L;;;;;N;;;;; +1B039;HENTAIGANA LETTER KO-2;Lo;0;L;;;;;N;;;;; +1B03A;HENTAIGANA LETTER KO-3;Lo;0;L;;;;;N;;;;; +1B03B;HENTAIGANA LETTER KO-KI;Lo;0;L;;;;;N;;;;; +1B03C;HENTAIGANA LETTER SA-1;Lo;0;L;;;;;N;;;;; +1B03D;HENTAIGANA LETTER SA-2;Lo;0;L;;;;;N;;;;; +1B03E;HENTAIGANA LETTER SA-3;Lo;0;L;;;;;N;;;;; +1B03F;HENTAIGANA LETTER SA-4;Lo;0;L;;;;;N;;;;; +1B040;HENTAIGANA LETTER SA-5;Lo;0;L;;;;;N;;;;; +1B041;HENTAIGANA LETTER SA-6;Lo;0;L;;;;;N;;;;; +1B042;HENTAIGANA LETTER SA-7;Lo;0;L;;;;;N;;;;; +1B043;HENTAIGANA LETTER SA-8;Lo;0;L;;;;;N;;;;; +1B044;HENTAIGANA LETTER SI-1;Lo;0;L;;;;;N;;;;; +1B045;HENTAIGANA LETTER SI-2;Lo;0;L;;;;;N;;;;; +1B046;HENTAIGANA LETTER SI-3;Lo;0;L;;;;;N;;;;; +1B047;HENTAIGANA LETTER SI-4;Lo;0;L;;;;;N;;;;; +1B048;HENTAIGANA LETTER SI-5;Lo;0;L;;;;;N;;;;; +1B049;HENTAIGANA LETTER SI-6;Lo;0;L;;;;;N;;;;; +1B04A;HENTAIGANA LETTER SU-1;Lo;0;L;;;;;N;;;;; +1B04B;HENTAIGANA LETTER SU-2;Lo;0;L;;;;;N;;;;; +1B04C;HENTAIGANA LETTER SU-3;Lo;0;L;;;;;N;;;;; +1B04D;HENTAIGANA LETTER SU-4;Lo;0;L;;;;;N;;;;; +1B04E;HENTAIGANA LETTER SU-5;Lo;0;L;;;;;N;;;;; +1B04F;HENTAIGANA LETTER SU-6;Lo;0;L;;;;;N;;;;; +1B050;HENTAIGANA LETTER SU-7;Lo;0;L;;;;;N;;;;; +1B051;HENTAIGANA LETTER SU-8;Lo;0;L;;;;;N;;;;; +1B052;HENTAIGANA LETTER SE-1;Lo;0;L;;;;;N;;;;; +1B053;HENTAIGANA LETTER SE-2;Lo;0;L;;;;;N;;;;; +1B054;HENTAIGANA LETTER SE-3;Lo;0;L;;;;;N;;;;; +1B055;HENTAIGANA LETTER SE-4;Lo;0;L;;;;;N;;;;; +1B056;HENTAIGANA LETTER SE-5;Lo;0;L;;;;;N;;;;; +1B057;HENTAIGANA LETTER SO-1;Lo;0;L;;;;;N;;;;; +1B058;HENTAIGANA LETTER SO-2;Lo;0;L;;;;;N;;;;; +1B059;HENTAIGANA LETTER SO-3;Lo;0;L;;;;;N;;;;; +1B05A;HENTAIGANA LETTER SO-4;Lo;0;L;;;;;N;;;;; +1B05B;HENTAIGANA LETTER SO-5;Lo;0;L;;;;;N;;;;; +1B05C;HENTAIGANA LETTER SO-6;Lo;0;L;;;;;N;;;;; +1B05D;HENTAIGANA LETTER SO-7;Lo;0;L;;;;;N;;;;; +1B05E;HENTAIGANA LETTER TA-1;Lo;0;L;;;;;N;;;;; +1B05F;HENTAIGANA LETTER TA-2;Lo;0;L;;;;;N;;;;; +1B060;HENTAIGANA LETTER TA-3;Lo;0;L;;;;;N;;;;; +1B061;HENTAIGANA LETTER TA-4;Lo;0;L;;;;;N;;;;; +1B062;HENTAIGANA LETTER TI-1;Lo;0;L;;;;;N;;;;; +1B063;HENTAIGANA LETTER TI-2;Lo;0;L;;;;;N;;;;; +1B064;HENTAIGANA LETTER TI-3;Lo;0;L;;;;;N;;;;; +1B065;HENTAIGANA LETTER TI-4;Lo;0;L;;;;;N;;;;; +1B066;HENTAIGANA LETTER TI-5;Lo;0;L;;;;;N;;;;; +1B067;HENTAIGANA LETTER TI-6;Lo;0;L;;;;;N;;;;; +1B068;HENTAIGANA LETTER TI-7;Lo;0;L;;;;;N;;;;; +1B069;HENTAIGANA LETTER TU-1;Lo;0;L;;;;;N;;;;; +1B06A;HENTAIGANA LETTER TU-2;Lo;0;L;;;;;N;;;;; +1B06B;HENTAIGANA LETTER TU-3;Lo;0;L;;;;;N;;;;; +1B06C;HENTAIGANA LETTER TU-4;Lo;0;L;;;;;N;;;;; +1B06D;HENTAIGANA LETTER TU-TO;Lo;0;L;;;;;N;;;;; +1B06E;HENTAIGANA LETTER TE-1;Lo;0;L;;;;;N;;;;; +1B06F;HENTAIGANA LETTER TE-2;Lo;0;L;;;;;N;;;;; +1B070;HENTAIGANA LETTER TE-3;Lo;0;L;;;;;N;;;;; +1B071;HENTAIGANA LETTER TE-4;Lo;0;L;;;;;N;;;;; +1B072;HENTAIGANA LETTER TE-5;Lo;0;L;;;;;N;;;;; +1B073;HENTAIGANA LETTER TE-6;Lo;0;L;;;;;N;;;;; +1B074;HENTAIGANA LETTER TE-7;Lo;0;L;;;;;N;;;;; +1B075;HENTAIGANA LETTER TE-8;Lo;0;L;;;;;N;;;;; +1B076;HENTAIGANA LETTER TE-9;Lo;0;L;;;;;N;;;;; +1B077;HENTAIGANA LETTER TO-1;Lo;0;L;;;;;N;;;;; +1B078;HENTAIGANA LETTER TO-2;Lo;0;L;;;;;N;;;;; +1B079;HENTAIGANA LETTER TO-3;Lo;0;L;;;;;N;;;;; +1B07A;HENTAIGANA LETTER TO-4;Lo;0;L;;;;;N;;;;; +1B07B;HENTAIGANA LETTER TO-5;Lo;0;L;;;;;N;;;;; +1B07C;HENTAIGANA LETTER TO-6;Lo;0;L;;;;;N;;;;; +1B07D;HENTAIGANA LETTER TO-RA;Lo;0;L;;;;;N;;;;; +1B07E;HENTAIGANA LETTER NA-1;Lo;0;L;;;;;N;;;;; +1B07F;HENTAIGANA LETTER NA-2;Lo;0;L;;;;;N;;;;; +1B080;HENTAIGANA LETTER NA-3;Lo;0;L;;;;;N;;;;; +1B081;HENTAIGANA LETTER NA-4;Lo;0;L;;;;;N;;;;; +1B082;HENTAIGANA LETTER NA-5;Lo;0;L;;;;;N;;;;; +1B083;HENTAIGANA LETTER NA-6;Lo;0;L;;;;;N;;;;; +1B084;HENTAIGANA LETTER NA-7;Lo;0;L;;;;;N;;;;; +1B085;HENTAIGANA LETTER NA-8;Lo;0;L;;;;;N;;;;; +1B086;HENTAIGANA LETTER NA-9;Lo;0;L;;;;;N;;;;; +1B087;HENTAIGANA LETTER NI-1;Lo;0;L;;;;;N;;;;; +1B088;HENTAIGANA LETTER NI-2;Lo;0;L;;;;;N;;;;; +1B089;HENTAIGANA LETTER NI-3;Lo;0;L;;;;;N;;;;; +1B08A;HENTAIGANA LETTER NI-4;Lo;0;L;;;;;N;;;;; +1B08B;HENTAIGANA LETTER NI-5;Lo;0;L;;;;;N;;;;; +1B08C;HENTAIGANA LETTER NI-6;Lo;0;L;;;;;N;;;;; +1B08D;HENTAIGANA LETTER NI-7;Lo;0;L;;;;;N;;;;; +1B08E;HENTAIGANA LETTER NI-TE;Lo;0;L;;;;;N;;;;; +1B08F;HENTAIGANA LETTER NU-1;Lo;0;L;;;;;N;;;;; +1B090;HENTAIGANA LETTER NU-2;Lo;0;L;;;;;N;;;;; +1B091;HENTAIGANA LETTER NU-3;Lo;0;L;;;;;N;;;;; +1B092;HENTAIGANA LETTER NE-1;Lo;0;L;;;;;N;;;;; +1B093;HENTAIGANA LETTER NE-2;Lo;0;L;;;;;N;;;;; +1B094;HENTAIGANA LETTER NE-3;Lo;0;L;;;;;N;;;;; +1B095;HENTAIGANA LETTER NE-4;Lo;0;L;;;;;N;;;;; +1B096;HENTAIGANA LETTER NE-5;Lo;0;L;;;;;N;;;;; +1B097;HENTAIGANA LETTER NE-6;Lo;0;L;;;;;N;;;;; +1B098;HENTAIGANA LETTER NE-KO;Lo;0;L;;;;;N;;;;; +1B099;HENTAIGANA LETTER NO-1;Lo;0;L;;;;;N;;;;; +1B09A;HENTAIGANA LETTER NO-2;Lo;0;L;;;;;N;;;;; +1B09B;HENTAIGANA LETTER NO-3;Lo;0;L;;;;;N;;;;; +1B09C;HENTAIGANA LETTER NO-4;Lo;0;L;;;;;N;;;;; +1B09D;HENTAIGANA LETTER NO-5;Lo;0;L;;;;;N;;;;; +1B09E;HENTAIGANA LETTER HA-1;Lo;0;L;;;;;N;;;;; +1B09F;HENTAIGANA LETTER HA-2;Lo;0;L;;;;;N;;;;; +1B0A0;HENTAIGANA LETTER HA-3;Lo;0;L;;;;;N;;;;; +1B0A1;HENTAIGANA LETTER HA-4;Lo;0;L;;;;;N;;;;; +1B0A2;HENTAIGANA LETTER HA-5;Lo;0;L;;;;;N;;;;; +1B0A3;HENTAIGANA LETTER HA-6;Lo;0;L;;;;;N;;;;; +1B0A4;HENTAIGANA LETTER HA-7;Lo;0;L;;;;;N;;;;; +1B0A5;HENTAIGANA LETTER HA-8;Lo;0;L;;;;;N;;;;; +1B0A6;HENTAIGANA LETTER HA-9;Lo;0;L;;;;;N;;;;; +1B0A7;HENTAIGANA LETTER HA-10;Lo;0;L;;;;;N;;;;; +1B0A8;HENTAIGANA LETTER HA-11;Lo;0;L;;;;;N;;;;; +1B0A9;HENTAIGANA LETTER HI-1;Lo;0;L;;;;;N;;;;; +1B0AA;HENTAIGANA LETTER HI-2;Lo;0;L;;;;;N;;;;; +1B0AB;HENTAIGANA LETTER HI-3;Lo;0;L;;;;;N;;;;; +1B0AC;HENTAIGANA LETTER HI-4;Lo;0;L;;;;;N;;;;; +1B0AD;HENTAIGANA LETTER HI-5;Lo;0;L;;;;;N;;;;; +1B0AE;HENTAIGANA LETTER HI-6;Lo;0;L;;;;;N;;;;; +1B0AF;HENTAIGANA LETTER HI-7;Lo;0;L;;;;;N;;;;; +1B0B0;HENTAIGANA LETTER HU-1;Lo;0;L;;;;;N;;;;; +1B0B1;HENTAIGANA LETTER HU-2;Lo;0;L;;;;;N;;;;; +1B0B2;HENTAIGANA LETTER HU-3;Lo;0;L;;;;;N;;;;; +1B0B3;HENTAIGANA LETTER HE-1;Lo;0;L;;;;;N;;;;; +1B0B4;HENTAIGANA LETTER HE-2;Lo;0;L;;;;;N;;;;; +1B0B5;HENTAIGANA LETTER HE-3;Lo;0;L;;;;;N;;;;; +1B0B6;HENTAIGANA LETTER HE-4;Lo;0;L;;;;;N;;;;; +1B0B7;HENTAIGANA LETTER HE-5;Lo;0;L;;;;;N;;;;; +1B0B8;HENTAIGANA LETTER HE-6;Lo;0;L;;;;;N;;;;; +1B0B9;HENTAIGANA LETTER HE-7;Lo;0;L;;;;;N;;;;; +1B0BA;HENTAIGANA LETTER HO-1;Lo;0;L;;;;;N;;;;; +1B0BB;HENTAIGANA LETTER HO-2;Lo;0;L;;;;;N;;;;; +1B0BC;HENTAIGANA LETTER HO-3;Lo;0;L;;;;;N;;;;; +1B0BD;HENTAIGANA LETTER HO-4;Lo;0;L;;;;;N;;;;; +1B0BE;HENTAIGANA LETTER HO-5;Lo;0;L;;;;;N;;;;; +1B0BF;HENTAIGANA LETTER HO-6;Lo;0;L;;;;;N;;;;; +1B0C0;HENTAIGANA LETTER HO-7;Lo;0;L;;;;;N;;;;; +1B0C1;HENTAIGANA LETTER HO-8;Lo;0;L;;;;;N;;;;; +1B0C2;HENTAIGANA LETTER MA-1;Lo;0;L;;;;;N;;;;; +1B0C3;HENTAIGANA LETTER MA-2;Lo;0;L;;;;;N;;;;; +1B0C4;HENTAIGANA LETTER MA-3;Lo;0;L;;;;;N;;;;; +1B0C5;HENTAIGANA LETTER MA-4;Lo;0;L;;;;;N;;;;; +1B0C6;HENTAIGANA LETTER MA-5;Lo;0;L;;;;;N;;;;; +1B0C7;HENTAIGANA LETTER MA-6;Lo;0;L;;;;;N;;;;; +1B0C8;HENTAIGANA LETTER MA-7;Lo;0;L;;;;;N;;;;; +1B0C9;HENTAIGANA LETTER MI-1;Lo;0;L;;;;;N;;;;; +1B0CA;HENTAIGANA LETTER MI-2;Lo;0;L;;;;;N;;;;; +1B0CB;HENTAIGANA LETTER MI-3;Lo;0;L;;;;;N;;;;; +1B0CC;HENTAIGANA LETTER MI-4;Lo;0;L;;;;;N;;;;; +1B0CD;HENTAIGANA LETTER MI-5;Lo;0;L;;;;;N;;;;; +1B0CE;HENTAIGANA LETTER MI-6;Lo;0;L;;;;;N;;;;; +1B0CF;HENTAIGANA LETTER MI-7;Lo;0;L;;;;;N;;;;; +1B0D0;HENTAIGANA LETTER MU-1;Lo;0;L;;;;;N;;;;; +1B0D1;HENTAIGANA LETTER MU-2;Lo;0;L;;;;;N;;;;; +1B0D2;HENTAIGANA LETTER MU-3;Lo;0;L;;;;;N;;;;; +1B0D3;HENTAIGANA LETTER MU-4;Lo;0;L;;;;;N;;;;; +1B0D4;HENTAIGANA LETTER ME-1;Lo;0;L;;;;;N;;;;; +1B0D5;HENTAIGANA LETTER ME-2;Lo;0;L;;;;;N;;;;; +1B0D6;HENTAIGANA LETTER ME-MA;Lo;0;L;;;;;N;;;;; +1B0D7;HENTAIGANA LETTER MO-1;Lo;0;L;;;;;N;;;;; +1B0D8;HENTAIGANA LETTER MO-2;Lo;0;L;;;;;N;;;;; +1B0D9;HENTAIGANA LETTER MO-3;Lo;0;L;;;;;N;;;;; +1B0DA;HENTAIGANA LETTER MO-4;Lo;0;L;;;;;N;;;;; +1B0DB;HENTAIGANA LETTER MO-5;Lo;0;L;;;;;N;;;;; +1B0DC;HENTAIGANA LETTER MO-6;Lo;0;L;;;;;N;;;;; +1B0DD;HENTAIGANA LETTER YA-1;Lo;0;L;;;;;N;;;;; +1B0DE;HENTAIGANA LETTER YA-2;Lo;0;L;;;;;N;;;;; +1B0DF;HENTAIGANA LETTER YA-3;Lo;0;L;;;;;N;;;;; +1B0E0;HENTAIGANA LETTER YA-4;Lo;0;L;;;;;N;;;;; +1B0E1;HENTAIGANA LETTER YA-5;Lo;0;L;;;;;N;;;;; +1B0E2;HENTAIGANA LETTER YA-YO;Lo;0;L;;;;;N;;;;; +1B0E3;HENTAIGANA LETTER YU-1;Lo;0;L;;;;;N;;;;; +1B0E4;HENTAIGANA LETTER YU-2;Lo;0;L;;;;;N;;;;; +1B0E5;HENTAIGANA LETTER YU-3;Lo;0;L;;;;;N;;;;; +1B0E6;HENTAIGANA LETTER YU-4;Lo;0;L;;;;;N;;;;; +1B0E7;HENTAIGANA LETTER YO-1;Lo;0;L;;;;;N;;;;; +1B0E8;HENTAIGANA LETTER YO-2;Lo;0;L;;;;;N;;;;; +1B0E9;HENTAIGANA LETTER YO-3;Lo;0;L;;;;;N;;;;; +1B0EA;HENTAIGANA LETTER YO-4;Lo;0;L;;;;;N;;;;; +1B0EB;HENTAIGANA LETTER YO-5;Lo;0;L;;;;;N;;;;; +1B0EC;HENTAIGANA LETTER YO-6;Lo;0;L;;;;;N;;;;; +1B0ED;HENTAIGANA LETTER RA-1;Lo;0;L;;;;;N;;;;; +1B0EE;HENTAIGANA LETTER RA-2;Lo;0;L;;;;;N;;;;; +1B0EF;HENTAIGANA LETTER RA-3;Lo;0;L;;;;;N;;;;; +1B0F0;HENTAIGANA LETTER RA-4;Lo;0;L;;;;;N;;;;; +1B0F1;HENTAIGANA LETTER RI-1;Lo;0;L;;;;;N;;;;; +1B0F2;HENTAIGANA LETTER RI-2;Lo;0;L;;;;;N;;;;; +1B0F3;HENTAIGANA LETTER RI-3;Lo;0;L;;;;;N;;;;; +1B0F4;HENTAIGANA LETTER RI-4;Lo;0;L;;;;;N;;;;; +1B0F5;HENTAIGANA LETTER RI-5;Lo;0;L;;;;;N;;;;; +1B0F6;HENTAIGANA LETTER RI-6;Lo;0;L;;;;;N;;;;; +1B0F7;HENTAIGANA LETTER RI-7;Lo;0;L;;;;;N;;;;; +1B0F8;HENTAIGANA LETTER RU-1;Lo;0;L;;;;;N;;;;; +1B0F9;HENTAIGANA LETTER RU-2;Lo;0;L;;;;;N;;;;; +1B0FA;HENTAIGANA LETTER RU-3;Lo;0;L;;;;;N;;;;; +1B0FB;HENTAIGANA LETTER RU-4;Lo;0;L;;;;;N;;;;; +1B0FC;HENTAIGANA LETTER RU-5;Lo;0;L;;;;;N;;;;; +1B0FD;HENTAIGANA LETTER RU-6;Lo;0;L;;;;;N;;;;; +1B0FE;HENTAIGANA LETTER RE-1;Lo;0;L;;;;;N;;;;; +1B0FF;HENTAIGANA LETTER RE-2;Lo;0;L;;;;;N;;;;; +1B100;HENTAIGANA LETTER RE-3;Lo;0;L;;;;;N;;;;; +1B101;HENTAIGANA LETTER RE-4;Lo;0;L;;;;;N;;;;; +1B102;HENTAIGANA LETTER RO-1;Lo;0;L;;;;;N;;;;; +1B103;HENTAIGANA LETTER RO-2;Lo;0;L;;;;;N;;;;; +1B104;HENTAIGANA LETTER RO-3;Lo;0;L;;;;;N;;;;; +1B105;HENTAIGANA LETTER RO-4;Lo;0;L;;;;;N;;;;; +1B106;HENTAIGANA LETTER RO-5;Lo;0;L;;;;;N;;;;; +1B107;HENTAIGANA LETTER RO-6;Lo;0;L;;;;;N;;;;; +1B108;HENTAIGANA LETTER WA-1;Lo;0;L;;;;;N;;;;; +1B109;HENTAIGANA LETTER WA-2;Lo;0;L;;;;;N;;;;; +1B10A;HENTAIGANA LETTER WA-3;Lo;0;L;;;;;N;;;;; +1B10B;HENTAIGANA LETTER WA-4;Lo;0;L;;;;;N;;;;; +1B10C;HENTAIGANA LETTER WA-5;Lo;0;L;;;;;N;;;;; +1B10D;HENTAIGANA LETTER WI-1;Lo;0;L;;;;;N;;;;; +1B10E;HENTAIGANA LETTER WI-2;Lo;0;L;;;;;N;;;;; +1B10F;HENTAIGANA LETTER WI-3;Lo;0;L;;;;;N;;;;; +1B110;HENTAIGANA LETTER WI-4;Lo;0;L;;;;;N;;;;; +1B111;HENTAIGANA LETTER WI-5;Lo;0;L;;;;;N;;;;; +1B112;HENTAIGANA LETTER WE-1;Lo;0;L;;;;;N;;;;; +1B113;HENTAIGANA LETTER WE-2;Lo;0;L;;;;;N;;;;; +1B114;HENTAIGANA LETTER WE-3;Lo;0;L;;;;;N;;;;; +1B115;HENTAIGANA LETTER WE-4;Lo;0;L;;;;;N;;;;; +1B116;HENTAIGANA LETTER WO-1;Lo;0;L;;;;;N;;;;; +1B117;HENTAIGANA LETTER WO-2;Lo;0;L;;;;;N;;;;; +1B118;HENTAIGANA LETTER WO-3;Lo;0;L;;;;;N;;;;; +1B119;HENTAIGANA LETTER WO-4;Lo;0;L;;;;;N;;;;; +1B11A;HENTAIGANA LETTER WO-5;Lo;0;L;;;;;N;;;;; +1B11B;HENTAIGANA LETTER WO-6;Lo;0;L;;;;;N;;;;; +1B11C;HENTAIGANA LETTER WO-7;Lo;0;L;;;;;N;;;;; +1B11D;HENTAIGANA LETTER N-MU-MO-1;Lo;0;L;;;;;N;;;;; +1B11E;HENTAIGANA LETTER N-MU-MO-2;Lo;0;L;;;;;N;;;;; +1B170;NUSHU CHARACTER-1B170;Lo;0;L;;;;;N;;;;; +1B171;NUSHU CHARACTER-1B171;Lo;0;L;;;;;N;;;;; +1B172;NUSHU CHARACTER-1B172;Lo;0;L;;;;;N;;;;; +1B173;NUSHU CHARACTER-1B173;Lo;0;L;;;;;N;;;;; +1B174;NUSHU CHARACTER-1B174;Lo;0;L;;;;;N;;;;; +1B175;NUSHU CHARACTER-1B175;Lo;0;L;;;;;N;;;;; +1B176;NUSHU CHARACTER-1B176;Lo;0;L;;;;;N;;;;; +1B177;NUSHU CHARACTER-1B177;Lo;0;L;;;;;N;;;;; +1B178;NUSHU CHARACTER-1B178;Lo;0;L;;;;;N;;;;; +1B179;NUSHU CHARACTER-1B179;Lo;0;L;;;;;N;;;;; +1B17A;NUSHU CHARACTER-1B17A;Lo;0;L;;;;;N;;;;; +1B17B;NUSHU CHARACTER-1B17B;Lo;0;L;;;;;N;;;;; +1B17C;NUSHU CHARACTER-1B17C;Lo;0;L;;;;;N;;;;; +1B17D;NUSHU CHARACTER-1B17D;Lo;0;L;;;;;N;;;;; +1B17E;NUSHU CHARACTER-1B17E;Lo;0;L;;;;;N;;;;; +1B17F;NUSHU CHARACTER-1B17F;Lo;0;L;;;;;N;;;;; +1B180;NUSHU CHARACTER-1B180;Lo;0;L;;;;;N;;;;; +1B181;NUSHU CHARACTER-1B181;Lo;0;L;;;;;N;;;;; +1B182;NUSHU CHARACTER-1B182;Lo;0;L;;;;;N;;;;; +1B183;NUSHU CHARACTER-1B183;Lo;0;L;;;;;N;;;;; +1B184;NUSHU CHARACTER-1B184;Lo;0;L;;;;;N;;;;; +1B185;NUSHU CHARACTER-1B185;Lo;0;L;;;;;N;;;;; +1B186;NUSHU CHARACTER-1B186;Lo;0;L;;;;;N;;;;; +1B187;NUSHU CHARACTER-1B187;Lo;0;L;;;;;N;;;;; +1B188;NUSHU CHARACTER-1B188;Lo;0;L;;;;;N;;;;; +1B189;NUSHU CHARACTER-1B189;Lo;0;L;;;;;N;;;;; +1B18A;NUSHU CHARACTER-1B18A;Lo;0;L;;;;;N;;;;; +1B18B;NUSHU CHARACTER-1B18B;Lo;0;L;;;;;N;;;;; +1B18C;NUSHU CHARACTER-1B18C;Lo;0;L;;;;;N;;;;; +1B18D;NUSHU CHARACTER-1B18D;Lo;0;L;;;;;N;;;;; +1B18E;NUSHU CHARACTER-1B18E;Lo;0;L;;;;;N;;;;; +1B18F;NUSHU CHARACTER-1B18F;Lo;0;L;;;;;N;;;;; +1B190;NUSHU CHARACTER-1B190;Lo;0;L;;;;;N;;;;; +1B191;NUSHU CHARACTER-1B191;Lo;0;L;;;;;N;;;;; +1B192;NUSHU CHARACTER-1B192;Lo;0;L;;;;;N;;;;; +1B193;NUSHU CHARACTER-1B193;Lo;0;L;;;;;N;;;;; +1B194;NUSHU CHARACTER-1B194;Lo;0;L;;;;;N;;;;; +1B195;NUSHU CHARACTER-1B195;Lo;0;L;;;;;N;;;;; +1B196;NUSHU CHARACTER-1B196;Lo;0;L;;;;;N;;;;; +1B197;NUSHU CHARACTER-1B197;Lo;0;L;;;;;N;;;;; +1B198;NUSHU CHARACTER-1B198;Lo;0;L;;;;;N;;;;; +1B199;NUSHU CHARACTER-1B199;Lo;0;L;;;;;N;;;;; +1B19A;NUSHU CHARACTER-1B19A;Lo;0;L;;;;;N;;;;; +1B19B;NUSHU CHARACTER-1B19B;Lo;0;L;;;;;N;;;;; +1B19C;NUSHU CHARACTER-1B19C;Lo;0;L;;;;;N;;;;; +1B19D;NUSHU CHARACTER-1B19D;Lo;0;L;;;;;N;;;;; +1B19E;NUSHU CHARACTER-1B19E;Lo;0;L;;;;;N;;;;; +1B19F;NUSHU CHARACTER-1B19F;Lo;0;L;;;;;N;;;;; +1B1A0;NUSHU CHARACTER-1B1A0;Lo;0;L;;;;;N;;;;; +1B1A1;NUSHU CHARACTER-1B1A1;Lo;0;L;;;;;N;;;;; +1B1A2;NUSHU CHARACTER-1B1A2;Lo;0;L;;;;;N;;;;; +1B1A3;NUSHU CHARACTER-1B1A3;Lo;0;L;;;;;N;;;;; +1B1A4;NUSHU CHARACTER-1B1A4;Lo;0;L;;;;;N;;;;; +1B1A5;NUSHU CHARACTER-1B1A5;Lo;0;L;;;;;N;;;;; +1B1A6;NUSHU CHARACTER-1B1A6;Lo;0;L;;;;;N;;;;; +1B1A7;NUSHU CHARACTER-1B1A7;Lo;0;L;;;;;N;;;;; +1B1A8;NUSHU CHARACTER-1B1A8;Lo;0;L;;;;;N;;;;; +1B1A9;NUSHU CHARACTER-1B1A9;Lo;0;L;;;;;N;;;;; +1B1AA;NUSHU CHARACTER-1B1AA;Lo;0;L;;;;;N;;;;; +1B1AB;NUSHU CHARACTER-1B1AB;Lo;0;L;;;;;N;;;;; +1B1AC;NUSHU CHARACTER-1B1AC;Lo;0;L;;;;;N;;;;; +1B1AD;NUSHU CHARACTER-1B1AD;Lo;0;L;;;;;N;;;;; +1B1AE;NUSHU CHARACTER-1B1AE;Lo;0;L;;;;;N;;;;; +1B1AF;NUSHU CHARACTER-1B1AF;Lo;0;L;;;;;N;;;;; +1B1B0;NUSHU CHARACTER-1B1B0;Lo;0;L;;;;;N;;;;; +1B1B1;NUSHU CHARACTER-1B1B1;Lo;0;L;;;;;N;;;;; +1B1B2;NUSHU CHARACTER-1B1B2;Lo;0;L;;;;;N;;;;; +1B1B3;NUSHU CHARACTER-1B1B3;Lo;0;L;;;;;N;;;;; +1B1B4;NUSHU CHARACTER-1B1B4;Lo;0;L;;;;;N;;;;; +1B1B5;NUSHU CHARACTER-1B1B5;Lo;0;L;;;;;N;;;;; +1B1B6;NUSHU CHARACTER-1B1B6;Lo;0;L;;;;;N;;;;; +1B1B7;NUSHU CHARACTER-1B1B7;Lo;0;L;;;;;N;;;;; +1B1B8;NUSHU CHARACTER-1B1B8;Lo;0;L;;;;;N;;;;; +1B1B9;NUSHU CHARACTER-1B1B9;Lo;0;L;;;;;N;;;;; +1B1BA;NUSHU CHARACTER-1B1BA;Lo;0;L;;;;;N;;;;; +1B1BB;NUSHU CHARACTER-1B1BB;Lo;0;L;;;;;N;;;;; +1B1BC;NUSHU CHARACTER-1B1BC;Lo;0;L;;;;;N;;;;; +1B1BD;NUSHU CHARACTER-1B1BD;Lo;0;L;;;;;N;;;;; +1B1BE;NUSHU CHARACTER-1B1BE;Lo;0;L;;;;;N;;;;; +1B1BF;NUSHU CHARACTER-1B1BF;Lo;0;L;;;;;N;;;;; +1B1C0;NUSHU CHARACTER-1B1C0;Lo;0;L;;;;;N;;;;; +1B1C1;NUSHU CHARACTER-1B1C1;Lo;0;L;;;;;N;;;;; +1B1C2;NUSHU CHARACTER-1B1C2;Lo;0;L;;;;;N;;;;; +1B1C3;NUSHU CHARACTER-1B1C3;Lo;0;L;;;;;N;;;;; +1B1C4;NUSHU CHARACTER-1B1C4;Lo;0;L;;;;;N;;;;; +1B1C5;NUSHU CHARACTER-1B1C5;Lo;0;L;;;;;N;;;;; +1B1C6;NUSHU CHARACTER-1B1C6;Lo;0;L;;;;;N;;;;; +1B1C7;NUSHU CHARACTER-1B1C7;Lo;0;L;;;;;N;;;;; +1B1C8;NUSHU CHARACTER-1B1C8;Lo;0;L;;;;;N;;;;; +1B1C9;NUSHU CHARACTER-1B1C9;Lo;0;L;;;;;N;;;;; +1B1CA;NUSHU CHARACTER-1B1CA;Lo;0;L;;;;;N;;;;; +1B1CB;NUSHU CHARACTER-1B1CB;Lo;0;L;;;;;N;;;;; +1B1CC;NUSHU CHARACTER-1B1CC;Lo;0;L;;;;;N;;;;; +1B1CD;NUSHU CHARACTER-1B1CD;Lo;0;L;;;;;N;;;;; +1B1CE;NUSHU CHARACTER-1B1CE;Lo;0;L;;;;;N;;;;; +1B1CF;NUSHU CHARACTER-1B1CF;Lo;0;L;;;;;N;;;;; +1B1D0;NUSHU CHARACTER-1B1D0;Lo;0;L;;;;;N;;;;; +1B1D1;NUSHU CHARACTER-1B1D1;Lo;0;L;;;;;N;;;;; +1B1D2;NUSHU CHARACTER-1B1D2;Lo;0;L;;;;;N;;;;; +1B1D3;NUSHU CHARACTER-1B1D3;Lo;0;L;;;;;N;;;;; +1B1D4;NUSHU CHARACTER-1B1D4;Lo;0;L;;;;;N;;;;; +1B1D5;NUSHU CHARACTER-1B1D5;Lo;0;L;;;;;N;;;;; +1B1D6;NUSHU CHARACTER-1B1D6;Lo;0;L;;;;;N;;;;; +1B1D7;NUSHU CHARACTER-1B1D7;Lo;0;L;;;;;N;;;;; +1B1D8;NUSHU CHARACTER-1B1D8;Lo;0;L;;;;;N;;;;; +1B1D9;NUSHU CHARACTER-1B1D9;Lo;0;L;;;;;N;;;;; +1B1DA;NUSHU CHARACTER-1B1DA;Lo;0;L;;;;;N;;;;; +1B1DB;NUSHU CHARACTER-1B1DB;Lo;0;L;;;;;N;;;;; +1B1DC;NUSHU CHARACTER-1B1DC;Lo;0;L;;;;;N;;;;; +1B1DD;NUSHU CHARACTER-1B1DD;Lo;0;L;;;;;N;;;;; +1B1DE;NUSHU CHARACTER-1B1DE;Lo;0;L;;;;;N;;;;; +1B1DF;NUSHU CHARACTER-1B1DF;Lo;0;L;;;;;N;;;;; +1B1E0;NUSHU CHARACTER-1B1E0;Lo;0;L;;;;;N;;;;; +1B1E1;NUSHU CHARACTER-1B1E1;Lo;0;L;;;;;N;;;;; +1B1E2;NUSHU CHARACTER-1B1E2;Lo;0;L;;;;;N;;;;; +1B1E3;NUSHU CHARACTER-1B1E3;Lo;0;L;;;;;N;;;;; +1B1E4;NUSHU CHARACTER-1B1E4;Lo;0;L;;;;;N;;;;; +1B1E5;NUSHU CHARACTER-1B1E5;Lo;0;L;;;;;N;;;;; +1B1E6;NUSHU CHARACTER-1B1E6;Lo;0;L;;;;;N;;;;; +1B1E7;NUSHU CHARACTER-1B1E7;Lo;0;L;;;;;N;;;;; +1B1E8;NUSHU CHARACTER-1B1E8;Lo;0;L;;;;;N;;;;; +1B1E9;NUSHU CHARACTER-1B1E9;Lo;0;L;;;;;N;;;;; +1B1EA;NUSHU CHARACTER-1B1EA;Lo;0;L;;;;;N;;;;; +1B1EB;NUSHU CHARACTER-1B1EB;Lo;0;L;;;;;N;;;;; +1B1EC;NUSHU CHARACTER-1B1EC;Lo;0;L;;;;;N;;;;; +1B1ED;NUSHU CHARACTER-1B1ED;Lo;0;L;;;;;N;;;;; +1B1EE;NUSHU CHARACTER-1B1EE;Lo;0;L;;;;;N;;;;; +1B1EF;NUSHU CHARACTER-1B1EF;Lo;0;L;;;;;N;;;;; +1B1F0;NUSHU CHARACTER-1B1F0;Lo;0;L;;;;;N;;;;; +1B1F1;NUSHU CHARACTER-1B1F1;Lo;0;L;;;;;N;;;;; +1B1F2;NUSHU CHARACTER-1B1F2;Lo;0;L;;;;;N;;;;; +1B1F3;NUSHU CHARACTER-1B1F3;Lo;0;L;;;;;N;;;;; +1B1F4;NUSHU CHARACTER-1B1F4;Lo;0;L;;;;;N;;;;; +1B1F5;NUSHU CHARACTER-1B1F5;Lo;0;L;;;;;N;;;;; +1B1F6;NUSHU CHARACTER-1B1F6;Lo;0;L;;;;;N;;;;; +1B1F7;NUSHU CHARACTER-1B1F7;Lo;0;L;;;;;N;;;;; +1B1F8;NUSHU CHARACTER-1B1F8;Lo;0;L;;;;;N;;;;; +1B1F9;NUSHU CHARACTER-1B1F9;Lo;0;L;;;;;N;;;;; +1B1FA;NUSHU CHARACTER-1B1FA;Lo;0;L;;;;;N;;;;; +1B1FB;NUSHU CHARACTER-1B1FB;Lo;0;L;;;;;N;;;;; +1B1FC;NUSHU CHARACTER-1B1FC;Lo;0;L;;;;;N;;;;; +1B1FD;NUSHU CHARACTER-1B1FD;Lo;0;L;;;;;N;;;;; +1B1FE;NUSHU CHARACTER-1B1FE;Lo;0;L;;;;;N;;;;; +1B1FF;NUSHU CHARACTER-1B1FF;Lo;0;L;;;;;N;;;;; +1B200;NUSHU CHARACTER-1B200;Lo;0;L;;;;;N;;;;; +1B201;NUSHU CHARACTER-1B201;Lo;0;L;;;;;N;;;;; +1B202;NUSHU CHARACTER-1B202;Lo;0;L;;;;;N;;;;; +1B203;NUSHU CHARACTER-1B203;Lo;0;L;;;;;N;;;;; +1B204;NUSHU CHARACTER-1B204;Lo;0;L;;;;;N;;;;; +1B205;NUSHU CHARACTER-1B205;Lo;0;L;;;;;N;;;;; +1B206;NUSHU CHARACTER-1B206;Lo;0;L;;;;;N;;;;; +1B207;NUSHU CHARACTER-1B207;Lo;0;L;;;;;N;;;;; +1B208;NUSHU CHARACTER-1B208;Lo;0;L;;;;;N;;;;; +1B209;NUSHU CHARACTER-1B209;Lo;0;L;;;;;N;;;;; +1B20A;NUSHU CHARACTER-1B20A;Lo;0;L;;;;;N;;;;; +1B20B;NUSHU CHARACTER-1B20B;Lo;0;L;;;;;N;;;;; +1B20C;NUSHU CHARACTER-1B20C;Lo;0;L;;;;;N;;;;; +1B20D;NUSHU CHARACTER-1B20D;Lo;0;L;;;;;N;;;;; +1B20E;NUSHU CHARACTER-1B20E;Lo;0;L;;;;;N;;;;; +1B20F;NUSHU CHARACTER-1B20F;Lo;0;L;;;;;N;;;;; +1B210;NUSHU CHARACTER-1B210;Lo;0;L;;;;;N;;;;; +1B211;NUSHU CHARACTER-1B211;Lo;0;L;;;;;N;;;;; +1B212;NUSHU CHARACTER-1B212;Lo;0;L;;;;;N;;;;; +1B213;NUSHU CHARACTER-1B213;Lo;0;L;;;;;N;;;;; +1B214;NUSHU CHARACTER-1B214;Lo;0;L;;;;;N;;;;; +1B215;NUSHU CHARACTER-1B215;Lo;0;L;;;;;N;;;;; +1B216;NUSHU CHARACTER-1B216;Lo;0;L;;;;;N;;;;; +1B217;NUSHU CHARACTER-1B217;Lo;0;L;;;;;N;;;;; +1B218;NUSHU CHARACTER-1B218;Lo;0;L;;;;;N;;;;; +1B219;NUSHU CHARACTER-1B219;Lo;0;L;;;;;N;;;;; +1B21A;NUSHU CHARACTER-1B21A;Lo;0;L;;;;;N;;;;; +1B21B;NUSHU CHARACTER-1B21B;Lo;0;L;;;;;N;;;;; +1B21C;NUSHU CHARACTER-1B21C;Lo;0;L;;;;;N;;;;; +1B21D;NUSHU CHARACTER-1B21D;Lo;0;L;;;;;N;;;;; +1B21E;NUSHU CHARACTER-1B21E;Lo;0;L;;;;;N;;;;; +1B21F;NUSHU CHARACTER-1B21F;Lo;0;L;;;;;N;;;;; +1B220;NUSHU CHARACTER-1B220;Lo;0;L;;;;;N;;;;; +1B221;NUSHU CHARACTER-1B221;Lo;0;L;;;;;N;;;;; +1B222;NUSHU CHARACTER-1B222;Lo;0;L;;;;;N;;;;; +1B223;NUSHU CHARACTER-1B223;Lo;0;L;;;;;N;;;;; +1B224;NUSHU CHARACTER-1B224;Lo;0;L;;;;;N;;;;; +1B225;NUSHU CHARACTER-1B225;Lo;0;L;;;;;N;;;;; +1B226;NUSHU CHARACTER-1B226;Lo;0;L;;;;;N;;;;; +1B227;NUSHU CHARACTER-1B227;Lo;0;L;;;;;N;;;;; +1B228;NUSHU CHARACTER-1B228;Lo;0;L;;;;;N;;;;; +1B229;NUSHU CHARACTER-1B229;Lo;0;L;;;;;N;;;;; +1B22A;NUSHU CHARACTER-1B22A;Lo;0;L;;;;;N;;;;; +1B22B;NUSHU CHARACTER-1B22B;Lo;0;L;;;;;N;;;;; +1B22C;NUSHU CHARACTER-1B22C;Lo;0;L;;;;;N;;;;; +1B22D;NUSHU CHARACTER-1B22D;Lo;0;L;;;;;N;;;;; +1B22E;NUSHU CHARACTER-1B22E;Lo;0;L;;;;;N;;;;; +1B22F;NUSHU CHARACTER-1B22F;Lo;0;L;;;;;N;;;;; +1B230;NUSHU CHARACTER-1B230;Lo;0;L;;;;;N;;;;; +1B231;NUSHU CHARACTER-1B231;Lo;0;L;;;;;N;;;;; +1B232;NUSHU CHARACTER-1B232;Lo;0;L;;;;;N;;;;; +1B233;NUSHU CHARACTER-1B233;Lo;0;L;;;;;N;;;;; +1B234;NUSHU CHARACTER-1B234;Lo;0;L;;;;;N;;;;; +1B235;NUSHU CHARACTER-1B235;Lo;0;L;;;;;N;;;;; +1B236;NUSHU CHARACTER-1B236;Lo;0;L;;;;;N;;;;; +1B237;NUSHU CHARACTER-1B237;Lo;0;L;;;;;N;;;;; +1B238;NUSHU CHARACTER-1B238;Lo;0;L;;;;;N;;;;; +1B239;NUSHU CHARACTER-1B239;Lo;0;L;;;;;N;;;;; +1B23A;NUSHU CHARACTER-1B23A;Lo;0;L;;;;;N;;;;; +1B23B;NUSHU CHARACTER-1B23B;Lo;0;L;;;;;N;;;;; +1B23C;NUSHU CHARACTER-1B23C;Lo;0;L;;;;;N;;;;; +1B23D;NUSHU CHARACTER-1B23D;Lo;0;L;;;;;N;;;;; +1B23E;NUSHU CHARACTER-1B23E;Lo;0;L;;;;;N;;;;; +1B23F;NUSHU CHARACTER-1B23F;Lo;0;L;;;;;N;;;;; +1B240;NUSHU CHARACTER-1B240;Lo;0;L;;;;;N;;;;; +1B241;NUSHU CHARACTER-1B241;Lo;0;L;;;;;N;;;;; +1B242;NUSHU CHARACTER-1B242;Lo;0;L;;;;;N;;;;; +1B243;NUSHU CHARACTER-1B243;Lo;0;L;;;;;N;;;;; +1B244;NUSHU CHARACTER-1B244;Lo;0;L;;;;;N;;;;; +1B245;NUSHU CHARACTER-1B245;Lo;0;L;;;;;N;;;;; +1B246;NUSHU CHARACTER-1B246;Lo;0;L;;;;;N;;;;; +1B247;NUSHU CHARACTER-1B247;Lo;0;L;;;;;N;;;;; +1B248;NUSHU CHARACTER-1B248;Lo;0;L;;;;;N;;;;; +1B249;NUSHU CHARACTER-1B249;Lo;0;L;;;;;N;;;;; +1B24A;NUSHU CHARACTER-1B24A;Lo;0;L;;;;;N;;;;; +1B24B;NUSHU CHARACTER-1B24B;Lo;0;L;;;;;N;;;;; +1B24C;NUSHU CHARACTER-1B24C;Lo;0;L;;;;;N;;;;; +1B24D;NUSHU CHARACTER-1B24D;Lo;0;L;;;;;N;;;;; +1B24E;NUSHU CHARACTER-1B24E;Lo;0;L;;;;;N;;;;; +1B24F;NUSHU CHARACTER-1B24F;Lo;0;L;;;;;N;;;;; +1B250;NUSHU CHARACTER-1B250;Lo;0;L;;;;;N;;;;; +1B251;NUSHU CHARACTER-1B251;Lo;0;L;;;;;N;;;;; +1B252;NUSHU CHARACTER-1B252;Lo;0;L;;;;;N;;;;; +1B253;NUSHU CHARACTER-1B253;Lo;0;L;;;;;N;;;;; +1B254;NUSHU CHARACTER-1B254;Lo;0;L;;;;;N;;;;; +1B255;NUSHU CHARACTER-1B255;Lo;0;L;;;;;N;;;;; +1B256;NUSHU CHARACTER-1B256;Lo;0;L;;;;;N;;;;; +1B257;NUSHU CHARACTER-1B257;Lo;0;L;;;;;N;;;;; +1B258;NUSHU CHARACTER-1B258;Lo;0;L;;;;;N;;;;; +1B259;NUSHU CHARACTER-1B259;Lo;0;L;;;;;N;;;;; +1B25A;NUSHU CHARACTER-1B25A;Lo;0;L;;;;;N;;;;; +1B25B;NUSHU CHARACTER-1B25B;Lo;0;L;;;;;N;;;;; +1B25C;NUSHU CHARACTER-1B25C;Lo;0;L;;;;;N;;;;; +1B25D;NUSHU CHARACTER-1B25D;Lo;0;L;;;;;N;;;;; +1B25E;NUSHU CHARACTER-1B25E;Lo;0;L;;;;;N;;;;; +1B25F;NUSHU CHARACTER-1B25F;Lo;0;L;;;;;N;;;;; +1B260;NUSHU CHARACTER-1B260;Lo;0;L;;;;;N;;;;; +1B261;NUSHU CHARACTER-1B261;Lo;0;L;;;;;N;;;;; +1B262;NUSHU CHARACTER-1B262;Lo;0;L;;;;;N;;;;; +1B263;NUSHU CHARACTER-1B263;Lo;0;L;;;;;N;;;;; +1B264;NUSHU CHARACTER-1B264;Lo;0;L;;;;;N;;;;; +1B265;NUSHU CHARACTER-1B265;Lo;0;L;;;;;N;;;;; +1B266;NUSHU CHARACTER-1B266;Lo;0;L;;;;;N;;;;; +1B267;NUSHU CHARACTER-1B267;Lo;0;L;;;;;N;;;;; +1B268;NUSHU CHARACTER-1B268;Lo;0;L;;;;;N;;;;; +1B269;NUSHU CHARACTER-1B269;Lo;0;L;;;;;N;;;;; +1B26A;NUSHU CHARACTER-1B26A;Lo;0;L;;;;;N;;;;; +1B26B;NUSHU CHARACTER-1B26B;Lo;0;L;;;;;N;;;;; +1B26C;NUSHU CHARACTER-1B26C;Lo;0;L;;;;;N;;;;; +1B26D;NUSHU CHARACTER-1B26D;Lo;0;L;;;;;N;;;;; +1B26E;NUSHU CHARACTER-1B26E;Lo;0;L;;;;;N;;;;; +1B26F;NUSHU CHARACTER-1B26F;Lo;0;L;;;;;N;;;;; +1B270;NUSHU CHARACTER-1B270;Lo;0;L;;;;;N;;;;; +1B271;NUSHU CHARACTER-1B271;Lo;0;L;;;;;N;;;;; +1B272;NUSHU CHARACTER-1B272;Lo;0;L;;;;;N;;;;; +1B273;NUSHU CHARACTER-1B273;Lo;0;L;;;;;N;;;;; +1B274;NUSHU CHARACTER-1B274;Lo;0;L;;;;;N;;;;; +1B275;NUSHU CHARACTER-1B275;Lo;0;L;;;;;N;;;;; +1B276;NUSHU CHARACTER-1B276;Lo;0;L;;;;;N;;;;; +1B277;NUSHU CHARACTER-1B277;Lo;0;L;;;;;N;;;;; +1B278;NUSHU CHARACTER-1B278;Lo;0;L;;;;;N;;;;; +1B279;NUSHU CHARACTER-1B279;Lo;0;L;;;;;N;;;;; +1B27A;NUSHU CHARACTER-1B27A;Lo;0;L;;;;;N;;;;; +1B27B;NUSHU CHARACTER-1B27B;Lo;0;L;;;;;N;;;;; +1B27C;NUSHU CHARACTER-1B27C;Lo;0;L;;;;;N;;;;; +1B27D;NUSHU CHARACTER-1B27D;Lo;0;L;;;;;N;;;;; +1B27E;NUSHU CHARACTER-1B27E;Lo;0;L;;;;;N;;;;; +1B27F;NUSHU CHARACTER-1B27F;Lo;0;L;;;;;N;;;;; +1B280;NUSHU CHARACTER-1B280;Lo;0;L;;;;;N;;;;; +1B281;NUSHU CHARACTER-1B281;Lo;0;L;;;;;N;;;;; +1B282;NUSHU CHARACTER-1B282;Lo;0;L;;;;;N;;;;; +1B283;NUSHU CHARACTER-1B283;Lo;0;L;;;;;N;;;;; +1B284;NUSHU CHARACTER-1B284;Lo;0;L;;;;;N;;;;; +1B285;NUSHU CHARACTER-1B285;Lo;0;L;;;;;N;;;;; +1B286;NUSHU CHARACTER-1B286;Lo;0;L;;;;;N;;;;; +1B287;NUSHU CHARACTER-1B287;Lo;0;L;;;;;N;;;;; +1B288;NUSHU CHARACTER-1B288;Lo;0;L;;;;;N;;;;; +1B289;NUSHU CHARACTER-1B289;Lo;0;L;;;;;N;;;;; +1B28A;NUSHU CHARACTER-1B28A;Lo;0;L;;;;;N;;;;; +1B28B;NUSHU CHARACTER-1B28B;Lo;0;L;;;;;N;;;;; +1B28C;NUSHU CHARACTER-1B28C;Lo;0;L;;;;;N;;;;; +1B28D;NUSHU CHARACTER-1B28D;Lo;0;L;;;;;N;;;;; +1B28E;NUSHU CHARACTER-1B28E;Lo;0;L;;;;;N;;;;; +1B28F;NUSHU CHARACTER-1B28F;Lo;0;L;;;;;N;;;;; +1B290;NUSHU CHARACTER-1B290;Lo;0;L;;;;;N;;;;; +1B291;NUSHU CHARACTER-1B291;Lo;0;L;;;;;N;;;;; +1B292;NUSHU CHARACTER-1B292;Lo;0;L;;;;;N;;;;; +1B293;NUSHU CHARACTER-1B293;Lo;0;L;;;;;N;;;;; +1B294;NUSHU CHARACTER-1B294;Lo;0;L;;;;;N;;;;; +1B295;NUSHU CHARACTER-1B295;Lo;0;L;;;;;N;;;;; +1B296;NUSHU CHARACTER-1B296;Lo;0;L;;;;;N;;;;; +1B297;NUSHU CHARACTER-1B297;Lo;0;L;;;;;N;;;;; +1B298;NUSHU CHARACTER-1B298;Lo;0;L;;;;;N;;;;; +1B299;NUSHU CHARACTER-1B299;Lo;0;L;;;;;N;;;;; +1B29A;NUSHU CHARACTER-1B29A;Lo;0;L;;;;;N;;;;; +1B29B;NUSHU CHARACTER-1B29B;Lo;0;L;;;;;N;;;;; +1B29C;NUSHU CHARACTER-1B29C;Lo;0;L;;;;;N;;;;; +1B29D;NUSHU CHARACTER-1B29D;Lo;0;L;;;;;N;;;;; +1B29E;NUSHU CHARACTER-1B29E;Lo;0;L;;;;;N;;;;; +1B29F;NUSHU CHARACTER-1B29F;Lo;0;L;;;;;N;;;;; +1B2A0;NUSHU CHARACTER-1B2A0;Lo;0;L;;;;;N;;;;; +1B2A1;NUSHU CHARACTER-1B2A1;Lo;0;L;;;;;N;;;;; +1B2A2;NUSHU CHARACTER-1B2A2;Lo;0;L;;;;;N;;;;; +1B2A3;NUSHU CHARACTER-1B2A3;Lo;0;L;;;;;N;;;;; +1B2A4;NUSHU CHARACTER-1B2A4;Lo;0;L;;;;;N;;;;; +1B2A5;NUSHU CHARACTER-1B2A5;Lo;0;L;;;;;N;;;;; +1B2A6;NUSHU CHARACTER-1B2A6;Lo;0;L;;;;;N;;;;; +1B2A7;NUSHU CHARACTER-1B2A7;Lo;0;L;;;;;N;;;;; +1B2A8;NUSHU CHARACTER-1B2A8;Lo;0;L;;;;;N;;;;; +1B2A9;NUSHU CHARACTER-1B2A9;Lo;0;L;;;;;N;;;;; +1B2AA;NUSHU CHARACTER-1B2AA;Lo;0;L;;;;;N;;;;; +1B2AB;NUSHU CHARACTER-1B2AB;Lo;0;L;;;;;N;;;;; +1B2AC;NUSHU CHARACTER-1B2AC;Lo;0;L;;;;;N;;;;; +1B2AD;NUSHU CHARACTER-1B2AD;Lo;0;L;;;;;N;;;;; +1B2AE;NUSHU CHARACTER-1B2AE;Lo;0;L;;;;;N;;;;; +1B2AF;NUSHU CHARACTER-1B2AF;Lo;0;L;;;;;N;;;;; +1B2B0;NUSHU CHARACTER-1B2B0;Lo;0;L;;;;;N;;;;; +1B2B1;NUSHU CHARACTER-1B2B1;Lo;0;L;;;;;N;;;;; +1B2B2;NUSHU CHARACTER-1B2B2;Lo;0;L;;;;;N;;;;; +1B2B3;NUSHU CHARACTER-1B2B3;Lo;0;L;;;;;N;;;;; +1B2B4;NUSHU CHARACTER-1B2B4;Lo;0;L;;;;;N;;;;; +1B2B5;NUSHU CHARACTER-1B2B5;Lo;0;L;;;;;N;;;;; +1B2B6;NUSHU CHARACTER-1B2B6;Lo;0;L;;;;;N;;;;; +1B2B7;NUSHU CHARACTER-1B2B7;Lo;0;L;;;;;N;;;;; +1B2B8;NUSHU CHARACTER-1B2B8;Lo;0;L;;;;;N;;;;; +1B2B9;NUSHU CHARACTER-1B2B9;Lo;0;L;;;;;N;;;;; +1B2BA;NUSHU CHARACTER-1B2BA;Lo;0;L;;;;;N;;;;; +1B2BB;NUSHU CHARACTER-1B2BB;Lo;0;L;;;;;N;;;;; +1B2BC;NUSHU CHARACTER-1B2BC;Lo;0;L;;;;;N;;;;; +1B2BD;NUSHU CHARACTER-1B2BD;Lo;0;L;;;;;N;;;;; +1B2BE;NUSHU CHARACTER-1B2BE;Lo;0;L;;;;;N;;;;; +1B2BF;NUSHU CHARACTER-1B2BF;Lo;0;L;;;;;N;;;;; +1B2C0;NUSHU CHARACTER-1B2C0;Lo;0;L;;;;;N;;;;; +1B2C1;NUSHU CHARACTER-1B2C1;Lo;0;L;;;;;N;;;;; +1B2C2;NUSHU CHARACTER-1B2C2;Lo;0;L;;;;;N;;;;; +1B2C3;NUSHU CHARACTER-1B2C3;Lo;0;L;;;;;N;;;;; +1B2C4;NUSHU CHARACTER-1B2C4;Lo;0;L;;;;;N;;;;; +1B2C5;NUSHU CHARACTER-1B2C5;Lo;0;L;;;;;N;;;;; +1B2C6;NUSHU CHARACTER-1B2C6;Lo;0;L;;;;;N;;;;; +1B2C7;NUSHU CHARACTER-1B2C7;Lo;0;L;;;;;N;;;;; +1B2C8;NUSHU CHARACTER-1B2C8;Lo;0;L;;;;;N;;;;; +1B2C9;NUSHU CHARACTER-1B2C9;Lo;0;L;;;;;N;;;;; +1B2CA;NUSHU CHARACTER-1B2CA;Lo;0;L;;;;;N;;;;; +1B2CB;NUSHU CHARACTER-1B2CB;Lo;0;L;;;;;N;;;;; +1B2CC;NUSHU CHARACTER-1B2CC;Lo;0;L;;;;;N;;;;; +1B2CD;NUSHU CHARACTER-1B2CD;Lo;0;L;;;;;N;;;;; +1B2CE;NUSHU CHARACTER-1B2CE;Lo;0;L;;;;;N;;;;; +1B2CF;NUSHU CHARACTER-1B2CF;Lo;0;L;;;;;N;;;;; +1B2D0;NUSHU CHARACTER-1B2D0;Lo;0;L;;;;;N;;;;; +1B2D1;NUSHU CHARACTER-1B2D1;Lo;0;L;;;;;N;;;;; +1B2D2;NUSHU CHARACTER-1B2D2;Lo;0;L;;;;;N;;;;; +1B2D3;NUSHU CHARACTER-1B2D3;Lo;0;L;;;;;N;;;;; +1B2D4;NUSHU CHARACTER-1B2D4;Lo;0;L;;;;;N;;;;; +1B2D5;NUSHU CHARACTER-1B2D5;Lo;0;L;;;;;N;;;;; +1B2D6;NUSHU CHARACTER-1B2D6;Lo;0;L;;;;;N;;;;; +1B2D7;NUSHU CHARACTER-1B2D7;Lo;0;L;;;;;N;;;;; +1B2D8;NUSHU CHARACTER-1B2D8;Lo;0;L;;;;;N;;;;; +1B2D9;NUSHU CHARACTER-1B2D9;Lo;0;L;;;;;N;;;;; +1B2DA;NUSHU CHARACTER-1B2DA;Lo;0;L;;;;;N;;;;; +1B2DB;NUSHU CHARACTER-1B2DB;Lo;0;L;;;;;N;;;;; +1B2DC;NUSHU CHARACTER-1B2DC;Lo;0;L;;;;;N;;;;; +1B2DD;NUSHU CHARACTER-1B2DD;Lo;0;L;;;;;N;;;;; +1B2DE;NUSHU CHARACTER-1B2DE;Lo;0;L;;;;;N;;;;; +1B2DF;NUSHU CHARACTER-1B2DF;Lo;0;L;;;;;N;;;;; +1B2E0;NUSHU CHARACTER-1B2E0;Lo;0;L;;;;;N;;;;; +1B2E1;NUSHU CHARACTER-1B2E1;Lo;0;L;;;;;N;;;;; +1B2E2;NUSHU CHARACTER-1B2E2;Lo;0;L;;;;;N;;;;; +1B2E3;NUSHU CHARACTER-1B2E3;Lo;0;L;;;;;N;;;;; +1B2E4;NUSHU CHARACTER-1B2E4;Lo;0;L;;;;;N;;;;; +1B2E5;NUSHU CHARACTER-1B2E5;Lo;0;L;;;;;N;;;;; +1B2E6;NUSHU CHARACTER-1B2E6;Lo;0;L;;;;;N;;;;; +1B2E7;NUSHU CHARACTER-1B2E7;Lo;0;L;;;;;N;;;;; +1B2E8;NUSHU CHARACTER-1B2E8;Lo;0;L;;;;;N;;;;; +1B2E9;NUSHU CHARACTER-1B2E9;Lo;0;L;;;;;N;;;;; +1B2EA;NUSHU CHARACTER-1B2EA;Lo;0;L;;;;;N;;;;; +1B2EB;NUSHU CHARACTER-1B2EB;Lo;0;L;;;;;N;;;;; +1B2EC;NUSHU CHARACTER-1B2EC;Lo;0;L;;;;;N;;;;; +1B2ED;NUSHU CHARACTER-1B2ED;Lo;0;L;;;;;N;;;;; +1B2EE;NUSHU CHARACTER-1B2EE;Lo;0;L;;;;;N;;;;; +1B2EF;NUSHU CHARACTER-1B2EF;Lo;0;L;;;;;N;;;;; +1B2F0;NUSHU CHARACTER-1B2F0;Lo;0;L;;;;;N;;;;; +1B2F1;NUSHU CHARACTER-1B2F1;Lo;0;L;;;;;N;;;;; +1B2F2;NUSHU CHARACTER-1B2F2;Lo;0;L;;;;;N;;;;; +1B2F3;NUSHU CHARACTER-1B2F3;Lo;0;L;;;;;N;;;;; +1B2F4;NUSHU CHARACTER-1B2F4;Lo;0;L;;;;;N;;;;; +1B2F5;NUSHU CHARACTER-1B2F5;Lo;0;L;;;;;N;;;;; +1B2F6;NUSHU CHARACTER-1B2F6;Lo;0;L;;;;;N;;;;; +1B2F7;NUSHU CHARACTER-1B2F7;Lo;0;L;;;;;N;;;;; +1B2F8;NUSHU CHARACTER-1B2F8;Lo;0;L;;;;;N;;;;; +1B2F9;NUSHU CHARACTER-1B2F9;Lo;0;L;;;;;N;;;;; +1B2FA;NUSHU CHARACTER-1B2FA;Lo;0;L;;;;;N;;;;; +1B2FB;NUSHU CHARACTER-1B2FB;Lo;0;L;;;;;N;;;;; 1BC00;DUPLOYAN LETTER H;Lo;0;L;;;;;N;;;;; 1BC01;DUPLOYAN LETTER X;Lo;0;L;;;;;N;;;;; 1BC02;DUPLOYAN LETTER P;Lo;0;L;;;;;N;;;;; @@ -28269,6 +29217,12 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F248;TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-6557;So;0;L;<compat> 3014 6557 3015;;;;N;;;;; 1F250;CIRCLED IDEOGRAPH ADVANTAGE;So;0;L;<circle> 5F97;;;;N;;;;; 1F251;CIRCLED IDEOGRAPH ACCEPT;So;0;L;<circle> 53EF;;;;N;;;;; +1F260;ROUNDED SYMBOL FOR FU;So;0;ON;;;;;N;;;;; +1F261;ROUNDED SYMBOL FOR LU;So;0;ON;;;;;N;;;;; +1F262;ROUNDED SYMBOL FOR SHOU;So;0;ON;;;;;N;;;;; +1F263;ROUNDED SYMBOL FOR XI;So;0;ON;;;;;N;;;;; +1F264;ROUNDED SYMBOL FOR SHUANGXI;So;0;ON;;;;;N;;;;; +1F265;ROUNDED SYMBOL FOR CAI;So;0;ON;;;;;N;;;;; 1F300;CYCLONE;So;0;ON;;;;;N;;;;; 1F301;FOGGY;So;0;ON;;;;;N;;;;; 1F302;CLOSED UMBRELLA;So;0;ON;;;;;N;;;;; @@ -29248,6 +30202,8 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F6D0;PLACE OF WORSHIP;So;0;ON;;;;;N;;;;; 1F6D1;OCTAGONAL SIGN;So;0;ON;;;;;N;;;;; 1F6D2;SHOPPING TROLLEY;So;0;ON;;;;;N;;;;; +1F6D3;STUPA;So;0;ON;;;;;N;;;;; +1F6D4;PAGODA;So;0;ON;;;;;N;;;;; 1F6E0;HAMMER AND WRENCH;So;0;ON;;;;;N;;;;; 1F6E1;SHIELD;So;0;ON;;;;;N;;;;; 1F6E2;OIL DRUM;So;0;ON;;;;;N;;;;; @@ -29268,6 +30224,8 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F6F4;SCOOTER;So;0;ON;;;;;N;;;;; 1F6F5;MOTOR SCOOTER;So;0;ON;;;;;N;;;;; 1F6F6;CANOE;So;0;ON;;;;;N;;;;; +1F6F7;SLED;So;0;ON;;;;;N;;;;; +1F6F8;FLYING SAUCER;So;0;ON;;;;;N;;;;; 1F700;ALCHEMICAL SYMBOL FOR QUINTESSENCE;So;0;ON;;;;;N;;;;; 1F701;ALCHEMICAL SYMBOL FOR AIR;So;0;ON;;;;;N;;;;; 1F702;ALCHEMICAL SYMBOL FOR FIRE;So;0;ON;;;;;N;;;;; @@ -29617,6 +30575,18 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F8AB;RIGHTWARDS FRONT-TILTED SHADOWED WHITE ARROW;So;0;ON;;;;;N;;;;; 1F8AC;WHITE ARROW SHAFT WIDTH ONE;So;0;ON;;;;;N;;;;; 1F8AD;WHITE ARROW SHAFT WIDTH TWO THIRDS;So;0;ON;;;;;N;;;;; +1F900;CIRCLED CROSS FORMEE WITH FOUR DOTS;So;0;ON;;;;;N;;;;; +1F901;CIRCLED CROSS FORMEE WITH TWO DOTS;So;0;ON;;;;;N;;;;; +1F902;CIRCLED CROSS FORMEE;So;0;ON;;;;;N;;;;; +1F903;LEFT HALF CIRCLE WITH FOUR DOTS;So;0;ON;;;;;N;;;;; +1F904;LEFT HALF CIRCLE WITH THREE DOTS;So;0;ON;;;;;N;;;;; +1F905;LEFT HALF CIRCLE WITH TWO DOTS;So;0;ON;;;;;N;;;;; +1F906;LEFT HALF CIRCLE WITH DOT;So;0;ON;;;;;N;;;;; +1F907;LEFT HALF CIRCLE;So;0;ON;;;;;N;;;;; +1F908;DOWNWARD FACING HOOK;So;0;ON;;;;;N;;;;; +1F909;DOWNWARD FACING NOTCHED HOOK;So;0;ON;;;;;N;;;;; +1F90A;DOWNWARD FACING HOOK WITH DOT;So;0;ON;;;;;N;;;;; +1F90B;DOWNWARD FACING NOTCHED HOOK WITH DOT;So;0;ON;;;;;N;;;;; 1F910;ZIPPER-MOUTH FACE;So;0;ON;;;;;N;;;;; 1F911;MONEY-MOUTH FACE;So;0;ON;;;;;N;;;;; 1F912;FACE WITH THERMOMETER;So;0;ON;;;;;N;;;;; @@ -29632,6 +30602,7 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F91C;RIGHT-FACING FIST;So;0;ON;;;;;N;;;;; 1F91D;HANDSHAKE;So;0;ON;;;;;N;;;;; 1F91E;HAND WITH INDEX AND MIDDLE FINGERS CROSSED;So;0;ON;;;;;N;;;;; +1F91F;I LOVE YOU HAND SIGN;So;0;ON;;;;;N;;;;; 1F920;FACE WITH COWBOY HAT;So;0;ON;;;;;N;;;;; 1F921;CLOWN FACE;So;0;ON;;;;;N;;;;; 1F922;NAUSEATED FACE;So;0;ON;;;;;N;;;;; @@ -29640,7 +30611,17 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F925;LYING FACE;So;0;ON;;;;;N;;;;; 1F926;FACE PALM;So;0;ON;;;;;N;;;;; 1F927;SNEEZING FACE;So;0;ON;;;;;N;;;;; +1F928;FACE WITH ONE EYEBROW RAISED;So;0;ON;;;;;N;;;;; +1F929;GRINNING FACE WITH STAR EYES;So;0;ON;;;;;N;;;;; +1F92A;GRINNING FACE WITH ONE LARGE AND ONE SMALL EYE;So;0;ON;;;;;N;;;;; +1F92B;FACE WITH FINGER COVERING CLOSED LIPS;So;0;ON;;;;;N;;;;; +1F92C;SERIOUS FACE WITH SYMBOLS COVERING MOUTH;So;0;ON;;;;;N;;;;; +1F92D;SMILING FACE WITH SMILING EYES AND HAND COVERING MOUTH;So;0;ON;;;;;N;;;;; +1F92E;FACE WITH OPEN MOUTH VOMITING;So;0;ON;;;;;N;;;;; +1F92F;SHOCKED FACE WITH EXPLODING HEAD;So;0;ON;;;;;N;;;;; 1F930;PREGNANT WOMAN;So;0;ON;;;;;N;;;;; +1F931;BREAST-FEEDING;So;0;ON;;;;;N;;;;; +1F932;PALMS UP TOGETHER;So;0;ON;;;;;N;;;;; 1F933;SELFIE;So;0;ON;;;;;N;;;;; 1F934;PRINCE;So;0;ON;;;;;N;;;;; 1F935;MAN IN TUXEDO;So;0;ON;;;;;N;;;;; @@ -29665,6 +30646,7 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F949;THIRD PLACE MEDAL;So;0;ON;;;;;N;;;;; 1F94A;BOXING GLOVE;So;0;ON;;;;;N;;;;; 1F94B;MARTIAL ARTS UNIFORM;So;0;ON;;;;;N;;;;; +1F94C;CURLING STONE;So;0;ON;;;;;N;;;;; 1F950;CROISSANT;So;0;ON;;;;;N;;;;; 1F951;AVOCADO;So;0;ON;;;;;N;;;;; 1F952;CUCUMBER;So;0;ON;;;;;N;;;;; @@ -29680,6 +30662,19 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F95C;PEANUTS;So;0;ON;;;;;N;;;;; 1F95D;KIWIFRUIT;So;0;ON;;;;;N;;;;; 1F95E;PANCAKES;So;0;ON;;;;;N;;;;; +1F95F;DUMPLING;So;0;ON;;;;;N;;;;; +1F960;FORTUNE COOKIE;So;0;ON;;;;;N;;;;; +1F961;TAKEOUT BOX;So;0;ON;;;;;N;;;;; +1F962;CHOPSTICKS;So;0;ON;;;;;N;;;;; +1F963;BOWL WITH SPOON;So;0;ON;;;;;N;;;;; +1F964;CUP WITH STRAW;So;0;ON;;;;;N;;;;; +1F965;COCONUT;So;0;ON;;;;;N;;;;; +1F966;BROCCOLI;So;0;ON;;;;;N;;;;; +1F967;PIE;So;0;ON;;;;;N;;;;; +1F968;PRETZEL;So;0;ON;;;;;N;;;;; +1F969;CUT OF MEAT;So;0;ON;;;;;N;;;;; +1F96A;SANDWICH;So;0;ON;;;;;N;;;;; +1F96B;CANNED FOOD;So;0;ON;;;;;N;;;;; 1F980;CRAB;So;0;ON;;;;;N;;;;; 1F981;LION FACE;So;0;ON;;;;;N;;;;; 1F982;SCORPION;So;0;ON;;;;;N;;;;; @@ -29698,7 +30693,36 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 1F98F;RHINOCEROS;So;0;ON;;;;;N;;;;; 1F990;SHRIMP;So;0;ON;;;;;N;;;;; 1F991;SQUID;So;0;ON;;;;;N;;;;; +1F992;GIRAFFE FACE;So;0;ON;;;;;N;;;;; +1F993;ZEBRA FACE;So;0;ON;;;;;N;;;;; +1F994;HEDGEHOG;So;0;ON;;;;;N;;;;; +1F995;SAUROPOD;So;0;ON;;;;;N;;;;; +1F996;T-REX;So;0;ON;;;;;N;;;;; +1F997;CRICKET;So;0;ON;;;;;N;;;;; 1F9C0;CHEESE WEDGE;So;0;ON;;;;;N;;;;; +1F9D0;FACE WITH MONOCLE;So;0;ON;;;;;N;;;;; +1F9D1;ADULT;So;0;ON;;;;;N;;;;; +1F9D2;CHILD;So;0;ON;;;;;N;;;;; +1F9D3;OLDER ADULT;So;0;ON;;;;;N;;;;; +1F9D4;BEARDED PERSON;So;0;ON;;;;;N;;;;; +1F9D5;PERSON WITH HEADSCARF;So;0;ON;;;;;N;;;;; +1F9D6;PERSON IN STEAMY ROOM;So;0;ON;;;;;N;;;;; +1F9D7;PERSON CLIMBING;So;0;ON;;;;;N;;;;; +1F9D8;PERSON IN LOTUS POSITION;So;0;ON;;;;;N;;;;; +1F9D9;MAGE;So;0;ON;;;;;N;;;;; +1F9DA;FAIRY;So;0;ON;;;;;N;;;;; +1F9DB;VAMPIRE;So;0;ON;;;;;N;;;;; +1F9DC;MERPERSON;So;0;ON;;;;;N;;;;; +1F9DD;ELF;So;0;ON;;;;;N;;;;; +1F9DE;GENIE;So;0;ON;;;;;N;;;;; +1F9DF;ZOMBIE;So;0;ON;;;;;N;;;;; +1F9E0;BRAIN;So;0;ON;;;;;N;;;;; +1F9E1;ORANGE HEART;So;0;ON;;;;;N;;;;; +1F9E2;BILLED CAP;So;0;ON;;;;;N;;;;; +1F9E3;SCARF;So;0;ON;;;;;N;;;;; +1F9E4;GLOVES;So;0;ON;;;;;N;;;;; +1F9E5;COAT;So;0;ON;;;;;N;;;;; +1F9E6;SOCKS;So;0;ON;;;;;N;;;;; 20000;<CJK Ideograph Extension B, First>;Lo;0;L;;;;;N;;;;; 2A6D6;<CJK Ideograph Extension B, Last>;Lo;0;L;;;;;N;;;;; 2A700;<CJK Ideograph Extension C, First>;Lo;0;L;;;;;N;;;;; @@ -29707,6 +30731,8 @@ FFFD;REPLACEMENT CHARACTER;So;0;ON;;;;;N;;;;; 2B81D;<CJK Ideograph Extension D, Last>;Lo;0;L;;;;;N;;;;; 2B820;<CJK Ideograph Extension E, First>;Lo;0;L;;;;;N;;;;; 2CEA1;<CJK Ideograph Extension E, Last>;Lo;0;L;;;;;N;;;;; +2CEB0;<CJK Ideograph Extension F, First>;Lo;0;L;;;;;N;;;;; +2EBE0;<CJK Ideograph Extension F, Last>;Lo;0;L;;;;;N;;;;; 2F800;CJK COMPATIBILITY IDEOGRAPH-2F800;Lo;0;L;4E3D;;;;N;;;;; 2F801;CJK COMPATIBILITY IDEOGRAPH-2F801;Lo;0;L;4E38;;;;N;;;;; 2F802;CJK COMPATIBILITY IDEOGRAPH-2F802;Lo;0;L;4E41;;;;N;;;;; diff --git a/lib/stdlib/uc_spec/gen_unicode_mod.escript b/lib/stdlib/uc_spec/gen_unicode_mod.escript index fefd7d3b70..5b8763f576 100755 --- a/lib/stdlib/uc_spec/gen_unicode_mod.escript +++ b/lib/stdlib/uc_spec/gen_unicode_mod.escript @@ -186,7 +186,7 @@ gen_static(Fd) -> " {U,L} -> #{upper=>U,lower=>L,title=>U,fold=>L};\n" " {U,L,T,F} -> #{upper=>U,lower=>L,title=>T,fold=>F}\n" " end.\n\n"), - io:put_chars(Fd, "spec_version() -> {9,0}.\n\n\n"), + io:put_chars(Fd, "spec_version() -> {10,0}.\n\n\n"), io:put_chars(Fd, "class(Codepoint) -> {CCC,_,_} = unicode_table(Codepoint),\n CCC.\n\n"), io:put_chars(Fd, "-spec uppercase(unicode:chardata()) -> " "maybe_improper_list(gc(),unicode:chardata()).\n"), diff --git a/lib/syntax_tools/src/Makefile b/lib/syntax_tools/src/Makefile index 8325db45a8..c21d2f49c8 100644 --- a/lib/syntax_tools/src/Makefile +++ b/lib/syntax_tools/src/Makefile @@ -75,7 +75,7 @@ $(EBIN)/%.$(EMULATOR):%.erl # special rules and dependencies to apply the transform to itself $(EBIN)/merl_transform.beam: $(EBIN)/merl.beam ./merl_transform.beam \ - ../include/merl.hrl \ + ../include/merl.hrl $(EBIN)/erl_comment_scan.beam \ $(EBIN)/erl_syntax.beam $(EBIN)/erl_syntax_lib.beam ./merl_transform.beam: ./merl_transform.erl $(EBIN)/merl.beam \ ../include/merl.hrl diff --git a/lib/tools/emacs/erlang.el b/lib/tools/emacs/erlang.el index 411e0e13df..429188b028 100644 --- a/lib/tools/emacs/erlang.el +++ b/lib/tools/emacs/erlang.el @@ -900,6 +900,11 @@ resulting regexp is surrounded by \\_< and \\_>." "display" "display_nl" "display_string" + "dist_get_stat" + "dist_ctrl_get_data" + "dist_ctrl_get_data_notification" + "dist_ctrl_input_handler" + "dist_ctrl_put_data" "dist_exit" "dlink" "dmonitor_node" diff --git a/lib/wx/examples/demo/demo.erl b/lib/wx/examples/demo/demo.erl index 8fb70ad7b0..8f3291305b 100644 --- a/lib/wx/examples/demo/demo.erl +++ b/lib/wx/examples/demo/demo.erl @@ -411,7 +411,7 @@ find(Ed) -> keyWords() -> L = ["after","begin","case","try","cond","catch","andalso","orelse", - "end","fun","if","let","of","query","receive","when","bnot","not", + "end","fun","if","let","of","receive","when","bnot","not", "div","rem","band","and","bor","bxor","bsl","bsr","or","xor"], lists:flatten([K ++ " " || K <- L] ++ [0]). diff --git a/make/otp.mk.in b/make/otp.mk.in index 83bab7065d..4232d37c2e 100644 --- a/make/otp.mk.in +++ b/make/otp.mk.in @@ -47,9 +47,9 @@ CROSS_COMPILING = @CROSS_COMPILING@ # ---------------------------------------------------- DEFAULT_TARGETS = opt debug release release_docs clean docs -DEFAULT_FLAVOR=@DEFAULT_FLAVOR@ -FLAVORS=@FLAVORS@ -TYPES=@TYPES@ +TYPES = @TYPES@ + +USE_PGO = @USE_PGO@ # Slash separated list of return values from $(origin VAR) # that are untrusted - set default in this file instead. @@ -62,8 +62,8 @@ DUBIOUS_ORIGINS = /undefined/environment/ # HiPE # ---------------------------------------------------- -HIPE_ENABLED=@HIPE_ENABLED@ -NATIVE_LIBS_ENABLED=@NATIVE_LIBS_ENABLED@ +HIPE_ENABLED = @HIPE_ENABLED@ +NATIVE_LIBS_ENABLED = @NATIVE_LIBS_ENABLED@ # ---------------------------------------------------- # Command macros @@ -85,6 +85,7 @@ LD = @LD@ RANLIB = @RANLIB@ AR = @AR@ PERL = @PERL@ +LLVM_PROFDATA = @LLVM_PROFDATA@ BITS64 = @BITS64@ diff --git a/make/output.mk.in b/make/output.mk.in index 171d2456aa..7c6533fddd 100644 --- a/make/output.mk.in +++ b/make/output.mk.in @@ -139,3 +139,7 @@ vsn_verbose = $(vsn_verbose_$(V)) yecc_verbose_0 = @echo " YECC "$@; yecc_verbose = $(yecc_verbose_$(V)) + +llvm_profdata_verbose_0 = @echo " LLVM_PROFDATA "$@; +llvm_profdata_verbose = $(llvm_profdata_verbose_$(V)) +V_LLVM_PROFDATA = $(llvm_profdata_verbose)$(LLVM_PROFDATA) diff --git a/scripts/build-otp b/scripts/build-otp index 92a866a0a9..92031c79c8 100755 --- a/scripts/build-otp +++ b/scripts/build-otp @@ -37,7 +37,7 @@ if [ ! -d "logs" ]; then fi do_and_log "Autoconfing" autoconf -do_and_log "Configuring" configure --enable-plain-emulator +do_and_log "Configuring" configure do_and_log "Building OTP" boot -a exit 0 diff --git a/scripts/run-smoke-tests b/scripts/run-smoke-tests index 5a850c7107..b3d26f1fce 100755 --- a/scripts/run-smoke-tests +++ b/scripts/run-smoke-tests @@ -17,5 +17,3 @@ function run_smoke_tests { } run_smoke_tests -ERL_FLAGS="-smp disable" run_smoke_tests - diff --git a/system/doc/efficiency_guide/advanced.xml b/system/doc/efficiency_guide/advanced.xml index e1760d0ded..896eda5f1c 100644 --- a/system/doc/efficiency_guide/advanced.xml +++ b/system/doc/efficiency_guide/advanced.xml @@ -255,30 +255,36 @@ <cell><marker id="unique_references"/>Unique References on a Runtime System Instance</cell> <cell>Each scheduler thread has its own set of references, and all other threads have a shared set of references. Each set of references - consist of <c>2⁶⁴ - 1</c> unique references. That is the total + consist of <c>2⁶⁴ - 1</c> unique references. That is, the total amount of unique references that can be produced on a runtime - system instance is <c>(NoSchedulers + 1) * (2⁶⁴ - 1)</c>. If a - scheduler thread create a new reference each nano second, + system instance is <c>(NoSchedulers + 1) × (2⁶⁴ - 1)</c>. + <br/><br/> + If a scheduler thread create a new reference each nano second, references will at earliest be reused after more than 584 years. That is, for the foreseeable future they are unique enough.</cell> </row> <row> <cell><marker id="unique_integers"/>Unique Integers on a Runtime System Instance</cell> - <cell>There are two types of unique integers both created using the - <seealso marker="erts:erlang#unique_integer/1">erlang:unique_integer()</seealso> - BIF. Unique integers created: - <taglist> - <tag>with the <c>monotonic</c> modifier</tag> - <item>consist of a set of <c>2⁶⁴ - 1</c> unique integers.</item> - <tag>without the <c>monotonic</c> modifier</tag> - <item>consist of a set of <c>2⁶⁴ - 1</c> unique integers per scheduler - thread and a set of <c>2⁶⁴ - 1</c> unique integers shared by - other threads. That is the total amount of unique integers without - the <c>monotonic</c> modifier is <c>(NoSchedulers + 1) * (2⁶⁴ - 1)</c></item> - </taglist> - If a unique integer is created each nano second, unique integers - will at earliest be reused after more than 584 years. That is, for - the foreseeable future they are unique enough.</cell> + <cell> + There are two types of unique integers both created using the + <seealso marker="erts:erlang#unique_integer/1">erlang:unique_integer()</seealso> + BIF: + <br/><br/> + <em>1.</em> Unique integers created <em>with</em> the + <c>monotonic</c> modifier consist of a set of <c>2⁶⁴ - 1</c> + unique integers. + <br/><br/> + <em>2.</em> Unique integers created <em>without</em> the + <c>monotonic</c> modifier consist of a set of <c>2⁶⁴ - 1</c> + unique integers per scheduler thread and a set of <c>2⁶⁴ - 1</c> + unique integers shared by other threads. That is, the total + amount of unique integers without the <c>monotonic</c> modifier + is <c>(NoSchedulers + 1) × (2⁶⁴ - 1)</c>. + <br/><br/> + If a unique integer is created each nano second, unique integers + will at earliest be reused after more than 584 years. That is, for + the foreseeable future they are unique enough. + </cell> </row> <tcaption>System Limits</tcaption> </table> |